Merge "usb: gadget: add u_qc_ether - QC Ethernet-over-USB link layer utilities." into msm-3.4
diff --git a/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt b/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt
new file mode 100644
index 0000000..2ba7341
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt
@@ -0,0 +1,98 @@
+Qualcomm's QPNP PMIC Voltage ADC Arbiter
+
+QPNP PMIC Voltage ADC (VADC) provides interface to clients to read
+Voltage. A 15 bit ADC is used for Voltage measurements. There are multiple
+peripherals to the VADC and the scope of the driver is to provide interface
+for the USR peripheral of the VADC.
+
+VADC node
+
+Required properties:
+- compatible : should be "qcom,qpnp-vadc" for Voltage ADC driver.
+- reg : offset and length of the PMIC Aribter register map.
+- interrupts : The USR bank peripheral VADC interrupt.
+- qcom,adc-bit-resolution : Bit resolution of the ADC.
+- qcom,adc-vdd-reference : Voltage reference used by the ADC.
+
+Channel nodes
+NOTE: Atleast one Channel node is required.
+
+Required properties:
+- label : Channel name used for sysfs entry.
+- qcom,channel-num : Channel number associated to the AMUX input.
+- qcom,decimation : Sampling rate to use for the individual channel measurement.
+ Select from following unsigned int.
+ 0 : 512
+ 1 : 1K
+ 2 : 2K
+ 3 : 4K
+- qcom,pre-div-channel-scaling : Pre-div used for the channel before the signal
+ is being measured.
+- qcom,calibration-type : Reference voltage to use for channel calibration.
+ Channel calibration is dependendent on the channel.
+ Certain channels like XO_THERM, BATT_THERM use ratiometric
+ calibration. Most other channels fall under absolute calibration.
+ Select from the following strings.
+ "absolute" : Uses the 625mv and 1.25V reference channels.
+ "ratiometric" : Uses the reference Voltage/GND for calibration.
+- qcom,scale-function : Scaling function used to convert raw ADC code to units specific to
+ a given channel.
+ Select from the following unsigned int.
+ 0 : Default scaling to convert raw adc code to voltage.
+ 1 : Conversion to temperature based on btm parameters.
+ 2 : Returns result in milli degree's Centigrade.
+ 3 : Returns current across 0.1 ohm resistor.
+ 4 : Returns XO thermistor voltage in degree's Centigrade.
+- qcom,hw-settle-time : Settling period for the channel before ADC read.
+ Select from the following unsigned int.
+ 0 : 0us
+ 1 : 100us
+ 2 : 200us
+ 3 : 300us
+ 4 : 400us
+ 5 : 500us
+ 6 : 600us
+ 7 : 700us
+ 8 : 800us
+ 9 : 900us
+ 0xa : 1ms
+ 0xb : 2ms
+ 0xc : 4ms
+ 0xd : 6ms
+ 0xe : 8ms
+ 0xf : 10ms
+- qcom,fast-avg-setup : Average number of samples to be used for measurement. Fast averaging
+ provides the option to obtain a single measurement from the ADC that
+ is an average of multiple samples. The value selected is 2^(value)
+ Select from the following unsigned int.
+ 0 : 1
+ 1 : 2
+ 2 : 4
+ 3 : 8
+ 4 : 16
+ 5 : 32
+ 6 : 64
+ 7 : 128
+ 8 : 256
+
+Example:
+ /* Main Node */
+ qcom,vadc@3100 {
+ compatible = "qcom,qpnp-vadc";
+ reg = <0x3100 0x100>;
+ interrupts = <0x0 0x31 0x0>;
+ qcom,adc-bit-resolution = <15>;
+ qcom,adc-vdd-reference = <1800>;
+
+ /* Channel Node */
+ chan@0 {
+ label = "usb_in";
+ qcom,channel-num = <0>;
+ qcom,decimation = <0>;
+ qcom,pre-div-channel-scaling = <20>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iommu/msm_iommu.txt b/Documentation/devicetree/bindings/iommu/msm_iommu.txt
index c198fe9..f5a2590 100644
--- a/Documentation/devicetree/bindings/iommu/msm_iommu.txt
+++ b/Documentation/devicetree/bindings/iommu/msm_iommu.txt
@@ -12,10 +12,14 @@
- interrupts : should contain the context bank interrupt.
- qcom,iommu-ctx-sids : List of stream identifiers associated with this
translation context.
- - qcom,iommu-ctx-name : Name of the context bank
+ - label : Name of the context bank
- qcom,iommu-smt-size : Number of SMR entries in the SMT of this HW block
- vdd-supply : vdd-supply: phandle to GDSC regulator controlling this IOMMU.
+Optional properties:
+- qcom,needs-alt-core-clk : boolean to enable the secondary core clock for
+ access to the IOMMU configuration registers
+
Example:
qcom,iommu@fda64000 {
@@ -27,12 +31,12 @@
reg = <0xfda6c000 0x1000>;
interrupts = <0 70 0>;
qcom,iommu-ctx-sids = <0 2>;
- qcom,iommu-ctx-name = "ctx_0";
+ label = "ctx_0";
};
qcom,iommu-ctx@fda6d000 {
reg = <0xfda6d000 0x1000>;
interrupts = <0 71 0>;
qcom,iommu-ctx-sids = <1>;
- qcom,iommu-ctx-name = "ctx_1";
+ label = "ctx_1";
};
};
diff --git a/Documentation/devicetree/bindings/mmc/msm_sdcc.txt b/Documentation/devicetree/bindings/mmc/msm_sdcc.txt
index a8de90f..c674a13 100644
--- a/Documentation/devicetree/bindings/mmc/msm_sdcc.txt
+++ b/Documentation/devicetree/bindings/mmc/msm_sdcc.txt
@@ -16,10 +16,8 @@
Optional Properties:
- cell-index - defines slot ID.
- qcom,sdcc-bus-width - defines the bus I/O width that controller supports.
- - qcom,sdcc-wp-gpio - defines write protect switch gpio.
- - qcom,sdcc-wp-polarity - specifies the polarity of wp switch.
- - qcom,sdcc-cd-gpio - defines card detect gpio number.
- - qcom,sdcc-cd-polarity - specifies the polarity of cd gpio.
+ - wp-gpios - specify GPIO for write protect switch detection.
+ - cd-gpios - specify GPIO for card detection.
- qcom,sdcc-nonremovable - specifies whether the card in slot is
hot pluggable or hard wired.
- qcom,sdcc-disable_cmd23 - disable sending CMD23 to card when controller can't support it.
diff --git a/Documentation/devicetree/bindings/mtd/msm_qpic_nand.txt b/Documentation/devicetree/bindings/mtd/msm_qpic_nand.txt
new file mode 100644
index 0000000..bddbbae
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/msm_qpic_nand.txt
@@ -0,0 +1,51 @@
+Qualcomm Parallel Interface controller (QPIC) for NAND devices
+
+Required properties:
+- compatible : "qcom,msm-nand".
+- reg : should specify QPIC NANDc and BAM physical address range.
+- reg-names : should specify relevant names to each reg property defined.
+- interrupts : should specify QPIC/BAM interrupt numbers.
+- interrupt-names : should specify relevant names to each interrupts property
+ defined.
+
+MTD flash partition layout for NAND devices -
+
+Each partition is represented as a sub-node of the qcom,mtd-partitions device.
+Each node's name represents the name of the corresponding partition.
+
+Required properties:
+- reg : The partition offset and size
+- label : The label / name for this partition.
+
+Optional properties:
+- read-only: This parameter, if present, indicates that this partition
+ should only be mounted read-only.
+
+Examples:
+
+ qcom,nand@f9af0000 {
+ compatible = "qcom,msm-nand";
+ reg = <0xf9af0000 0x1000>,
+ <0xf9ac4000 0x8000>;
+ reg-names = "nand_phys",
+ "bam_phys";
+ interrupts = <0 279 0>;
+ interrupt-names = "bam_irq";
+ };
+
+ qcom,mtd-partitions {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ partition@0 {
+ label = "boot";
+ reg = <0x00000000 0x1000>;
+ };
+ partition@00020000 {
+ label = "userdata";
+ reg = <0x00020000 0x1000>;
+ };
+ partition@00040000 {
+ label = "system";
+ reg = <0x00040000 0x1000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index 16a5c77..84f0c24 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -1,4 +1,4 @@
-Qualcomm audio devices for ALSA sound soc
+Qualcomm audio devices for ALSA sound SoC
* msm-pcm
@@ -42,6 +42,22 @@
- compatible : "qcom,msm-dai-fe"
+* msm-dai-q6
+
+[First Level Nodes]
+
+Required properties:
+
+ - compatible : "msm-dai-q6"
+
+[Second Level Nodes]
+
+Required properties:
+
+ - compatible : "qcom,msm-dai-q6-dev"
+ - qcom,msm-dai-q6-dev-id : The slimbus multi channel port ID
+ Value is from 16384 to 16393
+
* msm-auxpcm
[First Level Nodes]
@@ -87,6 +103,22 @@
- compatible : "qcom,msm-pcm-hostless"
+* msm-ocmem-audio
+
+Required properties:
+
+ - compatible : "qcom,msm-ocmem-audio"
+
+ - qcom,msm-ocmem-audio-src-id: Master port id
+
+ - qcom,msm-ocmem-audio-dst-id: Slave port id
+
+ - qcom,msm-ocmem-audio-ab: arbitrated bandwidth
+ in Bytes/s
+
+ - qcom,msm-ocmem-audio-ib: instantaneous bandwidth
+ in Bytes/s
+
Example:
qcom,msm-pcm {
@@ -117,6 +149,19 @@
compatible = "qcom,msm-dai-fe";
};
+ qcom,msm-dai-q6 {
+ compatible = "qcom,msm-dai-q6";
+ qcom,msm-dai-q6-sb-0-rx {
+ compatible = "qcom,msm-dai-q6-dev";
+ qcom,msm-dai-q6-dev-id = <16384>;
+ };
+
+ qcom,msm-dai-q6-sb-0-tx {
+ compatible = "qcom,msm-dai-q6-dev";
+ qcom,msm-dai-q6-dev-id = <16385>;
+ };
+ };
+
qcom,msm-auxpcm {
compatible = "qcom,msm-auxpcm-resource";
qcom,msm-cpudai-auxpcm-clk = "pcm_clk";
@@ -143,3 +188,11 @@
compatible = "qcom,msm-pcm-hostless";
};
+ qcom,msm-ocmem-audio {
+ compatible = "qcom,msm-ocmem-audio";
+ qcom,msm-ocmem-audio-src-id = <11>;
+ qcom,msm-ocmem-audio-dst-id = <604>;
+ qcom,msm-ocmem-audio-ab = <209715200>;
+ qcom,msm-ocmem-audio-ib = <471859200>;
+ };
+
diff --git a/Documentation/mtd/devices/msm_qpic_nand.txt b/Documentation/mtd/devices/msm_qpic_nand.txt
new file mode 100644
index 0000000..301e823
--- /dev/null
+++ b/Documentation/mtd/devices/msm_qpic_nand.txt
@@ -0,0 +1,296 @@
+Introduction
+============
+
+In MDM9x25, new NAND controller(NANDc) has been added and it has the
+following major changes as compared to its previous version -
+
+1. It includes Secured BAM-Lite and the support for ADM(Application Data Mover)
+has been removed.
+
+2. It includes 4 bit BCH ECC and the support for 4 bit Reed Solomon ECC has
+been removed.
+
+3. The support for Dual NAND controllers has been removed and thus the
+software features like ping-pong mode and interleave mode are deprecated.
+
+4. It includes support for dual buffers in case of read and one dedicated
+write buffer to each processor (Modem and Apps).
+
+This new NAND driver takes care of all the above new hardware changes. In
+addition to the above hardware changes, it also takes care of software device
+tree changes.
+
+Hardware description
+====================
+
+The NANDc Core:
+---------------
+Qualcomm Parallel Interface Controller (QPIC), formerly named EBI2, is a
+wrapper module which integrates a NAND controller core and a LCD controller
+core and multiplexes their access to shared parallel interfaces pins. Both
+controller cores are accessible to processors (Modem and Apps), and share
+master access to the Peripheral NoC (Network on Chip) via a BAM module.
+
+In MDM9x25, QPIC is located on the peripheral NoC, connected via a 32-bit AHB
+Master port and a 32-bit AHB Slave Port. The NANDc register interface goes
+through AHB Slave Port and data transfers using BAM goes through AHB Master
+Port. The NAND Controller (NANDc) is a hardware core which manages the access
+to an off-chip NAND device.
+
+BAM-Lite:
+---------
+BAM(Bus Access Manager) can transfer data between a peripheral and memory,
+or between two peripherals in a BAM to BAM mode. Each BAM contains multiple
+DMA channels, called pipes. A pipe provides a unidirectional data transfer
+engine, capable of either receiving data in consumer mode, or transmitting
+data in producer mode. The consumer fetches the data from the source system
+memory, and the producer writes data to the destination system memory.
+
+BAM-Lite's interface is similar to the BAM interface with slight changes to
+the sideband interface. BAM-Lite is an area-optimized version of BAM. BAM-Lite
+supports new features such as Notify-When-Done(NWD), pipe lock/unlock and
+command descriptors.
+
+NANDc has a secured BAM-Lite which provides DMA support for the NANDc and
+command support for accessing the NANDc registers. It is called secured
+because it has an integrated APU (Address Protection Unit) that validates
+every access to BAM and its peripheral registers.
+
+The NANDc has in total 6 BAM pipes - 3 pipes are dedicated for each processor
+(Modem and Apps) at the hardware level.
+
+Software description
+====================
+
+The NAND device is shared between two independent file systems, each running
+on a different processor - the application processor (Apps) and the Modem.
+The NAND driver uses BAM driver to transfer NAND operation requests and
+data to/from the NAND Controller (NANDc) core through the BAM pipes. Every
+NANDc register read/write access must go through BAM as it facilitates security
+mechanisms to enable simultaneous access to NAND device from both processors
+(Modem and Apps).
+
+The Apps NAND driver registers NANDc BAM peripheral with BAM driver, allocates
+endpoints and descriptor FIFO memory and registers for complete event
+notification for the following pipes:
+
+ - system consumer pipe for data (pipe#0) : This BAM pipe will be used
+ for transferring data from system memory to NANDc i.e., during write.
+
+ - system producer pipe for data (pipe#1) : This BAM pipe will be used
+ for transferring data from NANDc to system memory i.e., during read.
+
+ - system consumer pipe for commands (pipe#2) : This BAM pipe will be
+ used for both reading and writing to NANDc registers. It can be
+ configured either as consumer pipe or producer pipe but as per HW
+ team's recommendation it is configured as consumer pipe.
+
+Control path:
+-------------
+Each NAND operation can be described as a set of BAM command or/and data
+descriptors.
+
+A command descriptor(CD) points to the starting address of a command
+block. Each command block may contain a set of command elements where
+each command element is a single NANDc register read/write. The NAND
+driver submits all command descriptors to its system consumer pipe#2.
+
+Data path:
+----------
+A Data Descriptor(DD) points to the start of a data block which is a sequential
+chunk of data.
+
+For page write operations, the NAND driver submits data descriptors to system
+consumer pipe#0 and as per the descriptors submitted, the BAM reads data from
+the data block into the NANDc buffer.
+
+For page read operations, the NAND driver submits data descriptors to system
+producer pipe#1 and as per the descriptors submitted, the BAM reads data from
+the NANDc buffer into the data block.
+
+The driver submits a CD/DD using BAM driver APIs sps_transfer_one()/
+sps_transfer(). To this API, flags is passed as one of the arguments and if
+SPS_IOVEC_FLAG_CMD is passed, then it is identified as a CD. Otherwise, it is
+identified as a DD. The other valid SPS flags for a CD/DD are -
+
+ - SPS_IOVEC_FLAG_INT : This flag indicates BAM driver to raise BAM
+ interrupt after the current descriptor with this flag has been
+ processed by BAM HW. This flag is applicable for both CD and DD.
+
+ - SPS_IOVEC_FLAG_NWD : This flag indicates BAM HW to not process
+ next descriptors until it receives an acknowledgement by NANDc
+ that the current descriptor with this flag is completely
+ executed. This flag is applicable only for a CD.
+
+ - SPS_IOVEC_FLAG_LOCK: This flag marks the beginning of a series of
+ commands and it indicates that all the CDs submitted on this pipe
+ must be executed atomically without any interruption by commands
+ from other pipes. This is applicable only for a CD.
+
+ - SPS_IOVEC_FLAG_UNLOCK: This flag marks the end of a series of
+ commands and it indicates that the other pipe that was locked due to
+ SPS_IOVEC_FLAG_LOCK flag can be unblocked after the current CD
+ with this flag is executed. This is applicable only for a CD.
+
+ - SPS_IOVEC_FLAG_EOT - This flag indicates to BAM driver that the
+ current descriptor with this flag is the last descriptor submitted
+ during write operation. This is applicable only for a DD.
+
+Error handling:
+---------------
+After a page read/write complete notification from BAM, NAND driver validates
+the values read from NANDc registers to confirm the success/failure of page
+read/write operation. For example, after a page read/write is complete, the
+drivers reads the NANDc status registers to check for any operational errors,
+protection violation errors and device status errors, number of correctable/
+uncorrectable errors reported by the controller. Based on the error conditions
+that are met, the driver reports appropriate error codes to upper layers. The
+upper layers respond to these errors and take appropriate action.
+
+Design
+======
+
+The existing NAND driver (ADM based) can not be reused due to many major HW
+changes (see Introduction section) in the new NANDc core. Some of the complex
+features (Dual NAND controllers support) too are deprecated in the new NANDc.
+Hence, a new NAND driver is written to take care of both SPS/BAM changes and
+other controller specific changes. The rest of the interaction with MTD and
+YAFFS2 remains same as its previous version of NAND driver msm_nand.c.
+
+Power Management
+================
+
+Two clocks are supplied by the system's clock controller to NANDc - AHB clock
+and interface clock. The interface clock is the clock that drives some of the
+HW blocks within NANDc. As of now, both these clocks are always on. But NANDc
+provides clock gating if some of the QPIC clock control registers are
+configured. The clock gating is yet to be enabled by driver.
+
+SMP/Multi-Core
+==============
+
+The locking mechanism for page read/write operations is taken care of by the
+higher layers such as MTD/YAFFS2 and only one single page operation can happen
+at any time on a given partition. For a single page operation, there is always
+only one context associated within the driver and thus no additional handling
+is required within the driver. But it is possible for file system to issue
+one request on partition and at the same time to issue another request on
+another partition as each partition corresponds to different MTD block device.
+This situation is handled within the driver by properly acquiring a mutex lock
+before submitting any command/data descriptors to any of the BAM pipes.
+
+
+Security
+========
+
+The same NAND device is accessible from both processors (Modem and Apps) and
+thus to avoid any configuration overwrite issues during a page operation,
+driver on each processor (Modem and Apps) must explicitly use BAM pipe
+lock/unlock mechanism. This is taken care of by the NAND driver. The partition
+violation issues are prevented by an MPU (Memory Protection Unit) that is
+attached to NANDc.
+
+Performance
+===========
+
+None.
+
+Interface
+=========
+
+The NAND driver registers each partition on NAND device as a MTD block device
+using mtd_device_register(). As part of this registration, the following ops
+(struct mtd_info *mtd) are registered with MTD layer for each partition:
+
+mtd->_block_isbad = msm_nand_block_isbad;
+mtd->_block_markbad = msm_nand_block_markbad;
+mtd->_read = msm_nand_read;
+mtd->_write = msm_nand_write;
+mtd->_read_oob = msm_nand_read_oob;
+mtd->_write_oob = msm_nand_write_oob;
+mtd->_erase = msm_nand_erase;
+
+msm_nand_block_isbad() - This checks if a block is bad or not by reading bad
+block byte in the first page of a block. A block is considered as bad if bad
+block byte location contains any value other than 0xFF.
+
+msm_nand_block_markbad() - This marks a block as bad by writing 0 to the
+entire first page of the block and thus writing 0 to bad block byte location.
+
+msm_nand_read/write() - This is used to read/write only main data from/to
+single/multiple pages within NAND device. The YAFFS2 file system can send
+read/write request for two types of data -
+
+ - Main data : This is the actual data to be read/written from/to a
+ page during a read/write operation on this device. The size of this
+ data request is typically based on the page size of the device
+ (2K/4K).
+
+ - OOB(Out Of Band) data : This is the spare data that will be used by
+ file system to keep track of its meta data/tags associated with the
+ actual data. As of now, the file system needs only 16 bytes to
+ accommodate this data. The NAND driver always writes this data
+ towards the end of main data.
+
+It is up to the file system whether or not to send a read/write request for OOB
+data along with main data.
+
+msm_nand_read_oob()/write_oob() - This is used to read/write both main data
+and spare data from/to single/multiple pages within NAND device.
+
+msm_nand_erase() - This erases the complete block by sending erase command to
+the device.
+
+The YAFFS2 file system registers as the user of MTD device and uses the ops
+exposed by the NAND driver to perform read/write/erase operations on NAND
+device. As of now, the driver can work with only YAFFS2 file system. An
+attempt to use it with any other file system might demand additional changes
+in the driver.
+
+Driver parameters
+=================
+
+None.
+
+Config options
+==============
+
+The config option MTD_MSM_QPIC_NAND enables this driver.
+
+Dependencies
+============
+
+It depends on the following kernel components:
+
+- SPS/BAM driver
+- MTD core layer
+- To add necessary NANDc and BAM resources to .dts file
+
+It depends on the following non-kernel components:
+
+The partition information of the NAND device must be passed by Modem subsystem
+to Apps boot loader and Apps boot loader must update the .dts file
+with the partition information as per the defined MTD bindings.
+
+The detailed information on MTD bindings can be found at -
+Documentation/devicetree/bindings/mtd/msm_qpic_nand.txt
+
+User space utilities
+====================
+
+None.
+
+Other
+=====
+
+No changes other than device tree changes are anticipated.
+
+Known issues
+============
+
+None.
+
+To do
+=====
+
+The NANDc core supports clock gating and is not yet supported by the driver.
diff --git a/arch/arm/boot/dts/msm-iommu.dtsi b/arch/arm/boot/dts/msm-iommu.dtsi
new file mode 100755
index 0000000..0e2ddce9
--- /dev/null
+++ b/arch/arm/boot/dts/msm-iommu.dtsi
@@ -0,0 +1,161 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/ {
+ jpeg_iommu: qcom,iommu@fda64000 {
+ compatible = "qcom,msm-smmu-v2";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ reg = <0xfda64000 0x10000>;
+ vdd-supply = <&gdsc_jpeg>;
+ qcom,iommu-smt-size = <16>;
+ status = "disabled";
+
+ qcom,iommu-ctx@fda6c000 {
+ reg = <0xfda6c000 0x1000>;
+ interrupts = <0 69 0>;
+ qcom,iommu-ctx-sids = <0>;
+ label = "jpeg_enc0";
+ };
+
+ qcom,iommu-ctx@fda6d000 {
+ reg = <0xfda6d000 0x1000>;
+ interrupts = <0 70 0>;
+ qcom,iommu-ctx-sids = <1>;
+ label = "jpeg_enc1";
+ };
+
+ qcom,iommu-ctx@fda6e000 {
+ reg = <0xfda6e000 0x1000>;
+ interrupts = <0 71 0>;
+ qcom,iommu-ctx-sids = <2>;
+ label = "jpeg_dec";
+ };
+ };
+
+ mdp_iommu: qcom,iommu@fd928000 {
+ compatible = "qcom,msm-smmu-v2";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ reg = <0xfd928000 0x10000>;
+ vdd-supply = <&gdsc_mdss>;
+ qcom,iommu-smt-size = <16>;
+ status = "disabled";
+
+ qcom,iommu-ctx@fd930000 {
+ reg = <0xfd930000 0x1000>;
+ interrupts = <0 46 0>;
+ qcom,iommu-ctx-sids = <0>;
+ label = "mdp_0";
+ };
+
+ qcom,iommu-ctx@fd931000 {
+ reg = <0xfd931000 0x1000>;
+ interrupts = <0 47 0>;
+ qcom,iommu-ctx-sids = <1>;
+ label = "mdp_1";
+ };
+ };
+
+ venus_iommu: qcom,iommu@fdc84000 {
+ compatible = "qcom,msm-smmu-v2";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ reg = <0xfdc84000 0x10000>;
+ vdd-supply = <&gdsc_venus>;
+ qcom,iommu-smt-size = <16>;
+ qcom,needs-alt-core-clk;
+ status = "disabled";
+
+ qcom,iommu-ctx@fdc8c000 {
+ reg = <0xfdc8c000 0x1000>;
+ interrupts = <0 43 0>;
+ qcom,iommu-ctx-sids = <0 1 2 3 4 5>;
+ label = "venus_ns";
+ };
+
+ qcom,iommu-ctx@fdc8d000 {
+ reg = <0xfdc8d000 0x1000>;
+ interrupts = <0 42 0>;
+ qcom,iommu-ctx-sids = <0x80 0x81 0x82 0x83 0x84 0x85>;
+ label = "venus_cp";
+ };
+
+ qcom,iommu-ctx@fdc8e000 {
+ reg = <0xfdc8e000 0x1000>;
+ interrupts = <0 41 0>;
+ qcom,iommu-ctx-sids = <0xc0 0xc6>;
+ label = "venus_fw";
+ };
+ };
+
+ kgsl_iommu: qcom,iommu@fdb10000 {
+ compatible = "qcom,msm-smmu-v2";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ reg = <0xfdb10000 0x10000>;
+ vdd-supply = <&gdsc_oxili_cx>;
+ qcom,iommu-smt-size = <32>;
+ qcom,needs-alt-core-clk;
+ status = "disabled";
+
+ qcom,iommu-ctx@fdb18000 {
+ reg = <0xfdb18000 0x1000>;
+ interrupts = <0 240 0>;
+ qcom,iommu-ctx-sids = <0>;
+ label = "gfx3d_user";
+ };
+
+ qcom,iommu-ctx@fdb19000 {
+ reg = <0xfdb19000 0x1000>;
+ interrupts = <0 241 0>;
+ qcom,iommu-ctx-sids = <1>;
+ label = "gfx3d_priv";
+ };
+ };
+
+ vfe_iommu: qcom,iommu@fda44000 {
+ compatible = "qcom,msm-smmu-v2";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ reg = <0xfda44000 0x10000>;
+ vdd-supply = <&gdsc_vfe>;
+ qcom,iommu-smt-size = <32>;
+ status = "disabled";
+
+ qcom,iommu-ctx@fda4c000 {
+ reg = <0xfda4c000 0x1000>;
+ interrupts = <0 64 0>;
+ qcom,iommu-ctx-sids = <0>;
+ label = "vfe0";
+ };
+
+ qcom,iommu-ctx@fda4d000 {
+ reg = <0xfda4d000 0x1000>;
+ interrupts = <0 65 0>;
+ qcom,iommu-ctx-sids = <1>;
+ label = "vfe1";
+ };
+
+ qcom,iommu-ctx@fda4e000 {
+ reg = <0xfda4e000 0x1000>;
+ interrupts = <0 66 0>;
+ qcom,iommu-ctx-sids = <2>;
+ label = "cpp";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/msm8974-iommu.dtsi b/arch/arm/boot/dts/msm8974-iommu.dtsi
index a115fd8..184826e 100755
--- a/arch/arm/boot/dts/msm8974-iommu.dtsi
+++ b/arch/arm/boot/dts/msm8974-iommu.dtsi
@@ -10,108 +10,24 @@
* GNU General Public License for more details.
*/
-/ {
- jpeg: qcom,iommu@fda64000 {
- compatible = "qcom,msm-smmu-v2";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
- reg = <0xfda64000 0x10000>;
- vdd-supply = <&gdsc_jpeg>;
- qcom,iommu-smt-size = <16>;
+/include/ "msm-iommu.dtsi"
- qcom,iommu-ctx@fda6c000 {
- reg = <0xfda6c000 0x1000>;
- interrupts = <0 69 0>;
- qcom,iommu-ctx-sids = <0>;
- qcom,iommu-ctx-name = "jpeg_enc0";
- };
- qcom,iommu-ctx@fda6d000 {
- reg = <0xfda6d000 0x1000>;
- interrupts = <0 70 0>;
- qcom,iommu-ctx-sids = <1>;
- qcom,iommu-ctx-name = "jpeg_enc1";
- };
- qcom,iommu-ctx@fda6e000 {
- reg = <0xfda6e000 0x1000>;
- interrupts = <0 71 0>;
- qcom,iommu-ctx-sids = <2>;
- qcom,iommu-ctx-name = "jpeg_dec";
- };
- };
+&jpeg_iommu {
+ status = "ok";
+};
- mdp: qcom,iommu@fd928000 {
- compatible = "qcom,msm-smmu-v2";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
- reg = <0xfd928000 0x10000>;
- vdd-supply = <&gdsc_mdss>;
- qcom,iommu-smt-size = <16>;
+&mdp_iommu {
+ status = "ok";
+};
- qcom,iommu-ctx@fd930000 {
- reg = <0xfd930000 0x1000>;
- interrupts = <0 74 0>;
- qcom,iommu-ctx-sids = <0>;
- qcom,iommu-ctx-name = "mdp_0";
- };
- qcom,iommu-ctx@fd931000 {
- reg = <0xfd931000 0x1000>;
- interrupts = <0 75 0>;
- qcom,iommu-ctx-sids = <1>;
- qcom,iommu-ctx-name = "mdp_1";
- };
- };
+&venus_iommu {
+ status = "ok";
+};
- venus: qcom,iommu@fdc84000 {
- compatible = "qcom,msm-smmu-v2";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
- reg = <0xfdc84000 0x10000>;
- vdd-supply = <&gdsc_venus>;
- qcom,iommu-smt-size = <16>;
+&kgsl_iommu {
+ status = "ok";
+};
- qcom,iommu-ctx@fdc8c000 {
- reg = <0xfdc8c000 0x1000>;
- interrupts = <0 43 0>;
- qcom,iommu-ctx-sids = <0 1 2 3 4 5>;
- qcom,iommu-ctx-name = "venus_ns";
- };
- qcom,iommu-ctx@fdc8d000 {
- reg = <0xfdc8d000 0x1000>;
- interrupts = <0 42 0>;
- qcom,iommu-ctx-sids = <0x80 0x81 0x82 0x83 0x84 0x85>;
- qcom,iommu-ctx-name = "venus_cp";
- };
- qcom,iommu-ctx@fdc8e000 {
- reg = <0xfdc8e000 0x1000>;
- interrupts = <0 41 0>;
- qcom,iommu-ctx-sids = <0xc0 0xc6>;
- qcom,iommu-ctx-name = "venus_fw";
- };
- };
-
- kgsl: qcom,iommu@fdb10000 {
- compatible = "qcom,msm-smmu-v2";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
- reg = <0xfdb10000 0x10000>;
- vdd-supply = <&gdsc_oxili_cx>;
- qcom,iommu-smt-size = <32>;
-
- qcom,iommu-ctx@fdb18000 {
- reg = <0xfdb18000 0x1000>;
- interrupts = <0 240 0>;
- qcom,iommu-ctx-sids = <0>;
- qcom,iommu-ctx-name = "gfx3d_user";
- };
- qcom,iommu-ctx@fdb19000 {
- reg = <0xfdb19000 0x1000>;
- interrupts = <0 241 0>;
- qcom,iommu-ctx-sids = <1>;
- qcom,iommu-ctx-name = "gfx3d_priv";
- };
- };
+&vfe_iommu {
+ status = "ok";
};
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index 719eb4e..f71f74c 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -400,6 +400,19 @@
compatible = "qcom,msm-dai-fe";
};
+ qcom,msm-dai-q6 {
+ compatible = "qcom,msm-dai-q6";
+ qcom,msm-dai-q6-sb-0-rx {
+ compatible = "qcom,msm-dai-q6-dev";
+ qcom,msm-dai-q6-dev-id = <16384>;
+ };
+
+ qcom,msm-dai-q6-sb-0-tx {
+ compatible = "qcom,msm-dai-q6-dev";
+ qcom,msm-dai-q6-dev-id = <16385>;
+ };
+ };
+
qcom,msm-auxpcm {
compatible = "qcom,msm-auxpcm-resource";
qcom,msm-cpudai-auxpcm-clk = "pcm_clk";
@@ -426,6 +439,14 @@
compatible = "qcom,msm-pcm-hostless";
};
+ qcom,msm-ocmem-audio {
+ compatible = "qcom,msm-ocmem-audio";
+ qcom,msm-ocmem-audio-src-id = <11>;
+ qcom,msm-ocmem-audio-dst-id = <604>;
+ qcom,msm-ocmem-audio-ab = <32505856>;
+ qcom,msm-ocmem-audio-ib = <32505856>;
+ };
+
qcom,mss@fc880000 {
compatible = "qcom,pil-q6v5-mss";
reg = <0xfc880000 0x100>,
@@ -557,6 +578,12 @@
qcom,firmware-max-paddr = <0xFA00000>;
};
+ qcom,cache_erp {
+ compatible = "qcom,cache_erp";
+ interrupts = <1 9 0>, <0 2 0>;
+ interrupt-names = "l1_irq", "l2_irq";
+ };
+
tsens@fc4a8000 {
compatible = "qcom,msm-tsens";
reg = <0xfc4a8000 0x2000>,
@@ -567,6 +594,12 @@
qcom,slope = <1134 1122 1142 1123 1176 1176 1176 1186 1176
1176 1176>;
};
+
+ qcom,msm-rtb {
+ compatible = "qcom,msm-rtb";
+ qcom,memory-reservation-type = "EBI1";
+ qcom,memory-reservation-size = <0x100000>; /* 1M EBI1 buffer */
+ };
};
/include/ "msm-pm8x41-rpm-regulator.dtsi"
diff --git a/arch/arm/boot/dts/msm9625.dts b/arch/arm/boot/dts/msm9625.dts
index 6c007fb..6b44be9 100644
--- a/arch/arm/boot/dts/msm9625.dts
+++ b/arch/arm/boot/dts/msm9625.dts
@@ -55,4 +55,14 @@
reg = <0xf991f000 0x1000>;
interrupts = <0 109 0>;
};
+
+ qcom,nand@f9ac0000 {
+ compatible = "qcom,msm-nand";
+ reg = <0xf9ac0000 0x1000>,
+ <0xf9ac4000 0x8000>;
+ reg-names = "nand_phys",
+ "bam_phys";
+ interrupts = <0 247 0>;
+ interrupt-names = "bam_irq";
+ };
};
diff --git a/arch/arm/configs/msm8660-perf_defconfig b/arch/arm/configs/msm8660-perf_defconfig
index 173dcca..f3b2219 100644
--- a/arch/arm/configs/msm8660-perf_defconfig
+++ b/arch/arm/configs/msm8660-perf_defconfig
@@ -340,6 +340,7 @@
CONFIG_FB_MSM_TRIPLE_BUFFER=y
CONFIG_FB_MSM_MDP40=y
CONFIG_FB_MSM_OVERLAY=y
+CONFIG_FB_MSM_NO_MDP_PIPE_CTRL=y
CONFIG_FB_MSM_OVERLAY0_WRITEBACK=y
CONFIG_FB_MSM_OVERLAY1_WRITEBACK=y
CONFIG_FB_MSM_WRITEBACK_MSM_PANEL=y
diff --git a/arch/arm/configs/msm8660_defconfig b/arch/arm/configs/msm8660_defconfig
index f1c0aaa..4748496 100644
--- a/arch/arm/configs/msm8660_defconfig
+++ b/arch/arm/configs/msm8660_defconfig
@@ -342,6 +342,7 @@
CONFIG_FB_MSM_TRIPLE_BUFFER=y
CONFIG_FB_MSM_MDP40=y
CONFIG_FB_MSM_OVERLAY=y
+CONFIG_FB_MSM_NO_MDP_PIPE_CTRL=y
CONFIG_FB_MSM_OVERLAY0_WRITEBACK=y
CONFIG_FB_MSM_OVERLAY1_WRITEBACK=y
CONFIG_FB_MSM_WRITEBACK_MSM_PANEL=y
diff --git a/arch/arm/configs/msm8960-perf_defconfig b/arch/arm/configs/msm8960-perf_defconfig
index c2f4702..66e71fc 100644
--- a/arch/arm/configs/msm8960-perf_defconfig
+++ b/arch/arm/configs/msm8960-perf_defconfig
@@ -83,6 +83,7 @@
CONFIG_MSM_GSS_SSR_8064=y
CONFIG_MSM_TZ_LOG=y
CONFIG_MSM_RPM_LOG=y
+CONFIG_MSM_RPM_RBCPR_STATS_LOG=y
CONFIG_MSM_RPM_STATS_LOG=y
CONFIG_MSM_BUS_SCALING=y
CONFIG_MSM_BUS_RPM_MULTI_TIER_ENABLED=y
@@ -367,6 +368,7 @@
CONFIG_FB_MSM_TRIPLE_BUFFER=y
CONFIG_FB_MSM_MDP40=y
CONFIG_FB_MSM_OVERLAY=y
+CONFIG_FB_MSM_NO_MDP_PIPE_CTRL=y
CONFIG_FB_MSM_OVERLAY0_WRITEBACK=y
CONFIG_FB_MSM_OVERLAY1_WRITEBACK=y
CONFIG_FB_MSM_WRITEBACK_MSM_PANEL=y
diff --git a/arch/arm/configs/msm8960_defconfig b/arch/arm/configs/msm8960_defconfig
index a50485d..3731845 100644
--- a/arch/arm/configs/msm8960_defconfig
+++ b/arch/arm/configs/msm8960_defconfig
@@ -83,6 +83,7 @@
CONFIG_MSM_TZ_LOG=y
CONFIG_MSM_RPM_LOG=y
CONFIG_MSM_RPM_STATS_LOG=y
+CONFIG_MSM_RPM_RBCPR_STATS_LOG=y
CONFIG_MSM_BUS_SCALING=y
CONFIG_MSM_BUS_RPM_MULTI_TIER_ENABLED=y
CONFIG_MSM_WATCHDOG=y
@@ -370,6 +371,7 @@
CONFIG_FB_MSM_TRIPLE_BUFFER=y
CONFIG_FB_MSM_MDP40=y
CONFIG_FB_MSM_OVERLAY=y
+CONFIG_FB_MSM_NO_MDP_PIPE_CTRL=y
CONFIG_FB_MSM_OVERLAY0_WRITEBACK=y
CONFIG_FB_MSM_OVERLAY1_WRITEBACK=y
CONFIG_FB_MSM_WRITEBACK_MSM_PANEL=y
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index 63d2ced..b02bd7c 100644
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -171,6 +171,7 @@
CONFIG_SND=y
CONFIG_SND_SOC=y
CONFIG_SND_SOC_MSM8974=y
+CONFIG_WCD9320_CODEC=y
CONFIG_USB=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_STORAGE=y
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
index 99ee2de..e32194f 100644
--- a/arch/arm/include/asm/arch_timer.h
+++ b/arch/arm/include/asm/arch_timer.h
@@ -2,6 +2,7 @@
#define __ASMARM_ARCH_TIMER_H
#include <linux/ioport.h>
+#include <linux/clocksource.h>
struct arch_timer {
struct resource res[3];
@@ -10,6 +11,7 @@
#ifdef CONFIG_ARM_ARCH_TIMER
int arch_timer_register(struct arch_timer *);
int arch_timer_of_register(void);
+cycle_t arch_counter_get_cntpct(void);
#else
static inline int arch_timer_register(struct arch_timer *at)
{
@@ -20,6 +22,11 @@
{
return -ENXIO;
}
+
+static inline cycle_t arch_counter_get_cntpct(void)
+{
+ return 0;
+}
#endif
#endif
diff --git a/arch/arm/include/asm/mach/mmc.h b/arch/arm/include/asm/mach/mmc.h
index 562f13c..d341ea9 100644
--- a/arch/arm/include/asm/mach/mmc.h
+++ b/arch/arm/include/asm/mach/mmc.h
@@ -146,7 +146,7 @@
unsigned int uhs_caps2;
void (*sdio_lpm_gpio_setup)(struct device *, unsigned int);
unsigned int status_irq;
- unsigned int status_gpio;
+ int status_gpio;
/* Indicates the polarity of the GPIO line when card is inserted */
bool is_status_gpio_active_low;
unsigned int sdiowakeup_irq;
@@ -158,7 +158,7 @@
unsigned int msmsdcc_fmax;
bool nonremovable;
unsigned int mpm_sdiowakeup_int;
- unsigned int wpswitch_gpio;
+ int wpswitch_gpio;
bool is_wpswitch_active_low;
struct msm_mmc_slot_reg_data *vreg_data;
int is_sdio_al_client;
diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c
index 87bb7d3..43c627d 100644
--- a/arch/arm/kernel/arch_timer.c
+++ b/arch/arm/kernel/arch_timer.c
@@ -22,6 +22,7 @@
#include <linux/of_address.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/export.h>
#include <asm/cputype.h>
#include <asm/localtimer.h>
@@ -315,10 +316,16 @@
return ((cycle_t) cvalh << 32) | cvall;
}
-static cycle_t arch_counter_read(struct clocksource *cs)
+cycle_t arch_counter_get_cntpct(void)
{
return arch_specific_timer->get_cntpct();
}
+EXPORT_SYMBOL(arch_counter_get_cntpct);
+
+static cycle_t arch_counter_read(struct clocksource *cs)
+{
+ return arch_counter_get_cntpct();
+}
#ifdef ARCH_HAS_READ_CURRENT_TIMER
int read_current_timer(unsigned long *timer_val)
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index 4f14698..cc615ef 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -39,6 +39,7 @@
select MSM_RUN_QUEUE_STATS if MSM_SOC_REV_A
select DONT_MAP_HOLE_AFTER_MEMBANK0
select MIGHT_HAVE_CACHE_L2X0
+ select MSM_MODEM_RESTART
config ARCH_MSM7X30
bool "MSM7x30"
@@ -65,6 +66,7 @@
select MULTI_IRQ_HANDLER
select MSM_PM2 if PM
select HOLES_IN_ZONE if SPARSEMEM
+ select MSM_MODEM_RESTART
config ARCH_QSD8X50
bool "QSD8X50"
@@ -78,6 +80,7 @@
select MSM_GPIOMUX
select MSM_DALRPC
select MSM_PM2 if PM
+ select MSM_MODEM_RESTART
config ARCH_MSM8X60
bool "MSM8X60"
@@ -754,18 +757,6 @@
help
Support for the Qualcomm MSM8x60 Dragon board.
-config MACH_MSM8960_SIM
- depends on ARCH_MSM8960
- bool "MSM8960 Simulator"
- help
- Support for the Qualcomm MSM8960 simulator.
-
-config MACH_MSM8960_RUMI3
- depends on ARCH_MSM8960
- bool "MSM8960 RUMI3"
- help
- Support for the Qualcomm MSM8960 RUMI3 emulator.
-
config MACH_MSM8960_CDP
depends on ARCH_MSM8960
bool "MSM8960 CDP"
@@ -2064,6 +2055,16 @@
the low power modes that RPM enters. The drivers outputs the message
via a debugfs node.
+config MSM_RPM_RBCPR_STATS_LOG
+ tristate "MSM Resource Power Manager RPBCPR Stat Driver"
+ depends on DEBUG_FS
+ depends on MSM_RPM
+ help
+ This option enables a driver which reads RPM messages from a shared
+ memory location. These messages provide statistical information about
+ RBCPR (Rapid Bridge Core Power Reduction) information . The drivers
+ outputs the message via a debugfs node.
+
config MSM_DIRECT_SCLK_ACCESS
bool "Direct access to the SCLK timer"
default n
@@ -2080,6 +2081,9 @@
config MSM_NATIVE_RESTART
bool
+config MSM_MODEM_RESTART
+ bool
+
config MSM_PM2
depends on PM
bool
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index aff2251..50f811e 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -297,7 +297,7 @@
obj-$(CONFIG_ARCH_MSM8974) += clock-local2.o clock-pll.o clock-8974.o clock-rpm.o clock-voter.o
obj-$(CONFIG_ARCH_MSM8974) += gdsc.o
obj-$(CONFIG_ARCH_MSM9625) += board-9625.o board-9625-gpiomux.o
-obj-$(CONFIG_ARCH_MSM8930) += acpuclock-8930.o acpuclock-8627.o
+obj-$(CONFIG_ARCH_MSM8930) += acpuclock-8930.o acpuclock-8627.o acpuclock-8930aa.o
obj-$(CONFIG_MACH_SAPPHIRE) += board-sapphire.o board-sapphire-gpio.o
obj-$(CONFIG_MACH_SAPPHIRE) += board-sapphire-keypad.o board-sapphire-panel.o
@@ -328,6 +328,7 @@
obj-$(CONFIG_MSM_MPM_OF) += mpm-of.o
obj-$(CONFIG_MSM_MPM) += mpm.o
obj-$(CONFIG_MSM_RPM_STATS_LOG) += rpm_stats.o
+obj-$(CONFIG_MSM_RPM_RBCPR_STATS_LOG) += rpm_rbcpr_stats.o
obj-$(CONFIG_MSM_RPM_LOG) += rpm_log.o
obj-$(CONFIG_MSM_TZ_LOG) += tz_log.o
obj-$(CONFIG_MSM_XO) += msm_xo.o
@@ -362,6 +363,7 @@
obj-$(CONFIG_MSM_FAKE_BATTERY) += fish_battery.o
obj-$(CONFIG_MSM_RPC_VIBRATOR) += msm_vibrator.o
obj-$(CONFIG_MSM_NATIVE_RESTART) += restart.o
+obj-$(CONFIG_MSM_MODEM_RESTART) += restart_7k.o
obj-$(CONFIG_MSM_PROC_COMM_REGULATOR) += proccomm-regulator.o
ifdef CONFIG_MSM_PROC_COMM_REGULATOR
diff --git a/arch/arm/mach-msm/acpuclock-7627.c b/arch/arm/mach-msm/acpuclock-7627.c
index f9ff226..639cc94 100644
--- a/arch/arm/mach-msm/acpuclock-7627.c
+++ b/arch/arm/mach-msm/acpuclock-7627.c
@@ -38,11 +38,15 @@
#include "smd_private.h"
#include "acpuclock.h"
+#include "clock.h"
#define A11S_CLK_CNTL_ADDR (MSM_CSR_BASE + 0x100)
#define A11S_CLK_SEL_ADDR (MSM_CSR_BASE + 0x104)
#define A11S_VDD_SVS_PLEVEL_ADDR (MSM_CSR_BASE + 0x124)
+#define PLL4_L_VAL_ADDR (MSM_CLK_CTL_BASE + 0x378)
+#define PLL4_M_VAL_ADDR (MSM_CLK_CTL_BASE + 0x37C)
+#define PLL4_N_VAL_ADDR (MSM_CLK_CTL_BASE + 0x380)
#define POWER_COLLAPSE_KHZ 19200
@@ -67,6 +71,12 @@
const char *name;
};
+struct pll_config {
+ unsigned int l;
+ unsigned int m;
+ unsigned int n;
+};
+
static struct acpu_clk_src pll_clk[ACPU_PLL_END] = {
[ACPU_PLL_0] = { .name = "pll0_clk" },
[ACPU_PLL_1] = { .name = "pll1_clk" },
@@ -74,6 +84,13 @@
[ACPU_PLL_4] = { .name = "pll4_clk" },
};
+static struct pll_config pll4_cfg_tbl[] = {
+ { 36, 1, 2 }, /* 700.8 MHz */
+ { 52, 1, 2 }, /* 1008 MHz */
+ { 63, 0, 1 }, /* 1209.6 MHz */
+ { 73, 0, 1 }, /* 1401.6 MHz */
+};
+
struct clock_state {
struct clkctl_acpu_speed *current_speed;
struct mutex lock;
@@ -91,15 +108,20 @@
unsigned int ahbclk_div;
int vdd;
unsigned int axiclk_khz;
+ struct pll_config *pll_rate;
unsigned long lpj; /* loops_per_jiffy */
/* Pointers in acpu_freq_tbl[] for max up/down steppings. */
struct clkctl_acpu_speed *down[ACPU_PLL_END];
struct clkctl_acpu_speed *up[ACPU_PLL_END];
};
+static bool dynamic_reprogram;
static struct clock_state drv_state = { 0 };
static struct clkctl_acpu_speed *acpu_freq_tbl;
+/* Switch to this when reprogramming PLL4 */
+static struct clkctl_acpu_speed *backup_s;
+
/*
* ACPU freq tables used for different PLLs frequency combinations. The
* correct table is selected during init.
@@ -119,7 +141,7 @@
{ 0, 400000, ACPU_PLL_2, 2, 2, 133333, 2, 5, 160000 },
{ 1, 480000, ACPU_PLL_0, 4, 1, 160000, 2, 6, 160000 },
{ 1, 600000, ACPU_PLL_2, 2, 1, 200000, 2, 7, 200000 },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+ { 0 }
};
/* 7627 with CDMA capable modem */
@@ -133,7 +155,7 @@
{ 0, 400000, ACPU_PLL_2, 2, 2, 133333, 2, 5, 160000 },
{ 1, 480000, ACPU_PLL_0, 4, 1, 160000, 2, 6, 160000 },
{ 1, 600000, ACPU_PLL_2, 2, 1, 200000, 2, 7, 200000 },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+ { 0 }
};
/* 7627 with GSM capable modem - PLL2 @ 800 */
@@ -147,7 +169,7 @@
{ 0, 400000, ACPU_PLL_2, 2, 1, 133333, 2, 5, 160000 },
{ 1, 480000, ACPU_PLL_0, 4, 1, 160000, 2, 6, 160000 },
{ 1, 800000, ACPU_PLL_2, 2, 0, 200000, 3, 7, 200000 },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+ { 0 }
};
/* 7627 with CDMA capable modem - PLL2 @ 800 */
@@ -161,7 +183,7 @@
{ 0, 400000, ACPU_PLL_2, 2, 1, 133333, 2, 5, 160000 },
{ 1, 480000, ACPU_PLL_0, 4, 1, 160000, 2, 6, 160000 },
{ 1, 800000, ACPU_PLL_2, 2, 0, 200000, 3, 7, 200000 },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+ { 0 }
};
/* 7627a PLL2 @ 1200MHz with GSM capable modem */
@@ -176,7 +198,7 @@
{ 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, 5, 122880 },
{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
{ 1, 800000, ACPU_PLL_4, 6, 0, 100000, 3, 7, 200000 },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+ { 0 }
};
/* 7627a PLL2 @ 1200MHz with CDMA capable modem */
@@ -191,7 +213,7 @@
{ 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, 5, 120000 },
{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
{ 1, 800000, ACPU_PLL_4, 6, 0, 100000, 3, 7, 200000 },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+ { 0 }
};
/* 7627aa PLL4 @ 1008MHz with GSM capable modem */
@@ -206,7 +228,7 @@
{ 0, 504000, ACPU_PLL_4, 6, 1, 63000, 3, 6, 160000 },
{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
{ 1, 1008000, ACPU_PLL_4, 6, 0, 126000, 3, 7, 200000},
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+ { 0 }
};
/* 7627aa PLL4 @ 1008MHz with CDMA capable modem */
@@ -221,7 +243,7 @@
{ 0, 504000, ACPU_PLL_4, 6, 1, 63000, 3, 6, 160000 },
{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
{ 1, 1008000, ACPU_PLL_4, 6, 0, 126000, 3, 7, 200000},
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+ { 0 }
};
/* 8625 PLL4 @ 1209MHz with GSM capable modem */
@@ -235,7 +257,7 @@
{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
{ 0, 604800, ACPU_PLL_4, 6, 1, 75600, 3, 6, 160000 },
{ 1, 1209600, ACPU_PLL_4, 6, 0, 151200, 3, 7, 200000},
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+ { 0 }
};
/* 8625 PLL4 @ 1209MHz with CDMA capable modem */
@@ -249,7 +271,40 @@
{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
{ 0, 604800, ACPU_PLL_4, 6, 1, 75600, 3, 6, 160000 },
{ 1, 1209600, ACPU_PLL_4, 6, 0, 151200, 3, 7, 200000},
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+ { 0 }
+};
+
+/* 8625 PLL4 @ 1401.6MHz with GSM capable modem */
+static struct clkctl_acpu_speed pll0_960_pll1_245_pll2_1200_pll4_1401[] = {
+ { 0, 19200, ACPU_PLL_TCXO, 0, 0, 2400, 3, 0, 30720 },
+ { 0, 61440, ACPU_PLL_1, 1, 3, 7680, 3, 0, 61440 },
+ { 0, 122880, ACPU_PLL_1, 1, 1, 15360, 3, 1, 61440 },
+ { 1, 245760, ACPU_PLL_1, 1, 0, 30720, 3, 1, 61440 },
+ { 0, 300000, ACPU_PLL_2, 2, 3, 37500, 3, 2, 122880 },
+ { 1, 320000, ACPU_PLL_0, 4, 2, 40000, 3, 2, 122880 },
+ { 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, 3, 122880 },
+ { 0, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 4, 160000 },
+ { 1, 700800, ACPU_PLL_4, 6, 0, 87500, 3, 4, 160000, &pll4_cfg_tbl[0]},
+ { 1, 1008000, ACPU_PLL_4, 6, 0, 126000, 3, 5, 200000, &pll4_cfg_tbl[1]},
+ { 1, 1209600, ACPU_PLL_4, 6, 0, 151200, 3, 6, 200000, &pll4_cfg_tbl[2]},
+ { 1, 1401600, ACPU_PLL_4, 6, 0, 175000, 3, 7, 200000, &pll4_cfg_tbl[3]},
+ { 0 }
+};
+
+/* 8625 PLL4 @ 1401.6MHz with CDMA capable modem */
+static struct clkctl_acpu_speed pll0_960_pll1_196_pll2_1200_pll4_1401[] = {
+ { 0, 19200, ACPU_PLL_TCXO, 0, 0, 2400, 3, 0, 24576 },
+ { 0, 65536, ACPU_PLL_1, 1, 3, 8192, 3, 1, 49152 },
+ { 0, 98304, ACPU_PLL_1, 1, 1, 12288, 3, 2, 49152 },
+ { 1, 196608, ACPU_PLL_1, 1, 0, 24576, 3, 3, 98304 },
+ { 1, 320000, ACPU_PLL_0, 4, 2, 40000, 3, 2, 122880 },
+ { 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, 3, 122880 },
+ { 0, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 4, 160000 },
+ { 1, 700800, ACPU_PLL_4, 6, 0, 87500, 3, 4, 160000, &pll4_cfg_tbl[0]},
+ { 1, 1008000, ACPU_PLL_4, 6, 0, 126000, 3, 5, 200000, &pll4_cfg_tbl[1]},
+ { 1, 1209600, ACPU_PLL_4, 6, 0, 151200, 3, 6, 200000, &pll4_cfg_tbl[2]},
+ { 1, 1401600, ACPU_PLL_4, 6, 0, 175000, 3, 7, 200000, &pll4_cfg_tbl[3]},
+ { 0 }
};
/* 8625 PLL4 @ 1152MHz with GSM capable modem */
@@ -263,7 +318,7 @@
{ 0, 576000, ACPU_PLL_4, 6, 1, 72000, 3, 6, 160000 },
{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
{ 1, 1152000, ACPU_PLL_4, 6, 0, 144000, 3, 7, 200000},
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+ { 0 }
};
/* 8625 PLL4 @ 1115MHz with CDMA capable modem */
@@ -277,7 +332,7 @@
{ 0, 576000, ACPU_PLL_4, 6, 1, 72000, 3, 6, 160000 },
{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
{ 1, 1152000, ACPU_PLL_4, 6, 0, 144000, 3, 7, 200000},
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+ { 0 }
};
@@ -292,7 +347,7 @@
{ 0, 400000, ACPU_PLL_2, 2, 2, 50000, 3, 4, 122880 },
{ 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, 5, 122880 },
{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 200000 },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+ { 0 }
};
/* 7627a PLL2 @ 1200MHz with GSM capable modem */
@@ -307,7 +362,7 @@
{ 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, 5, 122880 },
{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
{ 1, 800000, ACPU_PLL_4, 6, 0, 100000, 3, 7, 200000 },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+ { 0 }
};
/* 7627a PLL2 @ 1200MHz with CDMA capable modem */
@@ -322,7 +377,7 @@
{ 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, 5, 120000 },
{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
{ 1, 800000, ACPU_PLL_4, 6, 0, 100000, 3, 7, 200000 },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+ { 0 }
};
/* 7627aa PLL4 @ 1008MHz with GSM capable modem */
@@ -337,7 +392,7 @@
{ 0, 504000, ACPU_PLL_4, 6, 1, 63000, 3, 6, 160000 },
{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
{ 1, 1008000, ACPU_PLL_4, 6, 0, 126000, 3, 7, 200000},
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+ { 0 }
};
/* 7627aa PLL4 @ 1008MHz with CDMA capable modem */
@@ -352,7 +407,7 @@
{ 0, 504000, ACPU_PLL_4, 6, 1, 63000, 3, 6, 160000 },
{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
{ 1, 1008000, ACPU_PLL_4, 6, 0, 126000, 3, 7, 200000},
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+ { 0 }
};
/* 7625a PLL2 @ 1200MHz with GSM capable modem */
@@ -366,7 +421,7 @@
{ 0, 400000, ACPU_PLL_2, 2, 2, 50000, 3, 4, 122880 },
{ 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, 5, 122880 },
{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 200000 },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+ { 0 }
};
#define PLL_CONFIG(m0, m1, m2, m4) { \
@@ -399,6 +454,8 @@
PLL_CONFIG(960, 196, 1200, 1209),
PLL_CONFIG(960, 245, 1200, 1152),
PLL_CONFIG(960, 196, 1200, 1152),
+ PLL_CONFIG(960, 245, 1200, 1401),
+ PLL_CONFIG(960, 196, 1200, 1401),
{ 0, 0, 0, 0, 0 }
};
@@ -439,6 +496,31 @@
}
#endif
+static void update_jiffies(int cpu, unsigned long loops)
+{
+#ifdef CONFIG_SMP
+ for_each_possible_cpu(cpu) {
+ per_cpu(cpu_data, cpu).loops_per_jiffy =
+ loops;
+ }
+#endif
+ /* Adjust the global one */
+ loops_per_jiffy = loops;
+}
+
+/* Assumes PLL4 is off and the acpuclock isn't sourced from PLL4 */
+static void acpuclk_config_pll4(struct pll_config *pll)
+{
+ /* Make sure write to disable PLL_4 has completed
+ * before reconfiguring that PLL. */
+ mb();
+ writel_relaxed(pll->l, PLL4_L_VAL_ADDR);
+ writel_relaxed(pll->m, PLL4_M_VAL_ADDR);
+ writel_relaxed(pll->n, PLL4_N_VAL_ADDR);
+ /* Make sure PLL is programmed before returning. */
+ mb();
+}
+
static int acpuclk_set_vdd_level(int vdd)
{
uint32_t current_vdd;
@@ -524,6 +606,7 @@
struct clkctl_acpu_speed *cur_s, *tgt_s, *strt_s;
int res, rc = 0;
unsigned int plls_enabled = 0, pll;
+ int delta;
if (reason == SETRATE_CPUFREQ)
mutex_lock(&drv_state.lock);
@@ -592,6 +675,61 @@
pr_debug("Switching from ACPU rate %u KHz -> %u KHz\n",
strt_s->a11clk_khz, tgt_s->a11clk_khz);
+ delta = abs((int)(strt_s->a11clk_khz - tgt_s->a11clk_khz));
+
+ if (dynamic_reprogram) {
+ if (tgt_s->pll == ACPU_PLL_4) {
+ if (strt_s->pll == ACPU_PLL_4 ||
+ delta > drv_state.max_speed_delta_khz) {
+ /*
+ * Enable the backup PLL if required
+ * and switch to it.
+ */
+ clk_enable(pll_clk[backup_s->pll].clk);
+ acpuclk_set_div(backup_s);
+ }
+ /* Make sure PLL4 is off before reprogramming */
+ if ((plls_enabled & (1 << tgt_s->pll))) {
+ clk_disable(pll_clk[tgt_s->pll].clk);
+ plls_enabled &= (0 << tgt_s->pll);
+ }
+ acpuclk_config_pll4(tgt_s->pll_rate);
+ pll_clk[tgt_s->pll].clk->rate = tgt_s->a11clk_khz*1000;
+
+ } else if (strt_s->pll == ACPU_PLL_4) {
+ if (delta > drv_state.max_speed_delta_khz) {
+ /*
+ * Enable the bcackup PLL if required
+ * and switch to it.
+ */
+ clk_enable(pll_clk[backup_s->pll].clk);
+ acpuclk_set_div(backup_s);
+ }
+ }
+
+ if (!(plls_enabled & (1 << tgt_s->pll))) {
+ rc = clk_enable(pll_clk[tgt_s->pll].clk);
+ if (rc < 0) {
+ pr_err("PLL%d enable failed (%d)\n",
+ tgt_s->pll, rc);
+ goto out;
+ }
+ plls_enabled |= 1 << tgt_s->pll;
+ }
+ acpuclk_set_div(tgt_s);
+ drv_state.current_speed = tgt_s;
+ /* Re-adjust lpj for the new clock speed. */
+ update_jiffies(cpu, cur_s->lpj);
+
+ /* Disable the backup PLL */
+ if ((delta > drv_state.max_speed_delta_khz)
+ || (strt_s->pll == ACPU_PLL_4 &&
+ tgt_s->pll == ACPU_PLL_4))
+ clk_disable_unprepare(pll_clk[backup_s->pll].clk);
+
+ goto done;
+ }
+
while (cur_s != tgt_s) {
/*
* Always jump to target freq if within max_speed_delta_khz,
@@ -648,17 +786,10 @@
acpuclk_set_div(cur_s);
drv_state.current_speed = cur_s;
/* Re-adjust lpj for the new clock speed. */
-#ifdef CONFIG_SMP
- for_each_possible_cpu(cpu) {
- per_cpu(cpu_data, cpu).loops_per_jiffy =
- cur_s->lpj;
- }
-#endif
- /* Adjust the global one */
- loops_per_jiffy = cur_s->lpj;
+ update_jiffies(cpu, cur_s->lpj);
}
-
+done:
/* Nothing else to do for SWFI. */
if (reason == SETRATE_SWFI)
goto out;
@@ -781,7 +912,7 @@
static void __devinit select_freq_plan(void)
{
unsigned long pll_mhz[ACPU_PLL_END];
- struct pll_freq_tbl_map *t;
+ struct pll_freq_tbl_map *t = acpu_freq_tbl_list;
int i;
/* Get PLL clocks */
@@ -817,7 +948,7 @@
}
} else {
/* Select the right table to use. */
- for (t = acpu_freq_tbl_list; t->tbl != 0; t++) {
+ for (; t->tbl != 0; t++) {
if (t->pll0_rate == pll_mhz[ACPU_PLL_0]
&& t->pll1_rate == pll_mhz[ACPU_PLL_1]
&& t->pll2_rate == pll_mhz[ACPU_PLL_2]
@@ -828,6 +959,25 @@
}
}
+ /*
+ * When PLL4 can run max @ 1401.6MHz, we have to support
+ * dynamic reprograming of PLL4.
+ *
+ * Also find the backup pll used during PLL4 reprogramming.
+ * We are using PLL2@600MHz as backup PLL, since 800MHz jump
+ * is fine.
+ */
+ if (t->pll4_rate == 1401) {
+ dynamic_reprogram = 1;
+ for ( ; t->tbl->a11clk_khz; t->tbl++) {
+ if (t->tbl->pll == ACPU_PLL_2 &&
+ t->tbl->a11clk_src_div == 1) {
+ backup_s = t->tbl;
+ break;
+ }
+ }
+ }
+
if (acpu_freq_tbl == NULL) {
pr_crit("Unknown PLL configuration!\n");
BUG();
@@ -988,3 +1138,4 @@
return platform_driver_register(&acpuclk_7627_driver);
}
postcore_initcall(acpuclk_7627_init);
+
diff --git a/arch/arm/mach-msm/acpuclock-8064.c b/arch/arm/mach-msm/acpuclock-8064.c
index d46d268..6f9960d 100644
--- a/arch/arm/mach-msm/acpuclock-8064.c
+++ b/arch/arm/mach-msm/acpuclock-8064.c
@@ -31,10 +31,12 @@
.has_droop_ctl = true,
.droop_offset = 0x14,
.droop_val = 0x0108C000,
- .low_vdd_l_max = 40,
- .vdd[HFPLL_VDD_NONE] = 0,
- .vdd[HFPLL_VDD_LOW] = 945000,
+ .low_vdd_l_max = 22,
+ .nom_vdd_l_max = 42,
+ .vdd[HFPLL_VDD_NONE] = 0,
+ .vdd[HFPLL_VDD_LOW] = 945000,
.vdd[HFPLL_VDD_NOM] = 1050000,
+ .vdd[HFPLL_VDD_HIGH] = 1150000,
};
static struct scalable scalable[] __initdata = {
@@ -43,7 +45,7 @@
.aux_clk_sel_phys = 0x02088014,
.aux_clk_sel = 3,
.l2cpmr_iaddr = 0x4501,
- .vreg[VREG_CORE] = { "krait0", 1300000, 1740000 },
+ .vreg[VREG_CORE] = { "krait0", 1300000 },
.vreg[VREG_MEM] = { "krait0_mem", 1150000 },
.vreg[VREG_DIG] = { "krait0_dig", 1150000 },
.vreg[VREG_HFPLL_A] = { "krait0_hfpll", 1800000 },
@@ -53,7 +55,7 @@
.aux_clk_sel_phys = 0x02098014,
.aux_clk_sel = 3,
.l2cpmr_iaddr = 0x5501,
- .vreg[VREG_CORE] = { "krait1", 1300000, 1740000 },
+ .vreg[VREG_CORE] = { "krait1", 1300000 },
.vreg[VREG_MEM] = { "krait1_mem", 1150000 },
.vreg[VREG_DIG] = { "krait1_dig", 1150000 },
.vreg[VREG_HFPLL_A] = { "krait1_hfpll", 1800000 },
@@ -63,7 +65,7 @@
.aux_clk_sel_phys = 0x020A8014,
.aux_clk_sel = 3,
.l2cpmr_iaddr = 0x6501,
- .vreg[VREG_CORE] = { "krait2", 1300000, 1740000 },
+ .vreg[VREG_CORE] = { "krait2", 1300000 },
.vreg[VREG_MEM] = { "krait2_mem", 1150000 },
.vreg[VREG_DIG] = { "krait2_dig", 1150000 },
.vreg[VREG_HFPLL_A] = { "krait2_hfpll", 1800000 },
@@ -73,7 +75,7 @@
.aux_clk_sel_phys = 0x020B8014,
.aux_clk_sel = 3,
.l2cpmr_iaddr = 0x7501,
- .vreg[VREG_CORE] = { "krait3", 1300000, 1740000 },
+ .vreg[VREG_CORE] = { "krait3", 1300000 },
.vreg[VREG_MEM] = { "krait3_mem", 1150000 },
.vreg[VREG_DIG] = { "krait3_dig", 1150000 },
.vreg[VREG_HFPLL_A] = { "krait3_hfpll", 1800000 },
@@ -104,111 +106,107 @@
};
static struct l2_level l2_freq_tbl[] __initdata __initdata = {
- [0] = { {STBY_KHZ, QSB, 0, 0, 0x00 }, 1050000, 1050000, 0 },
- [1] = { { 384000, PLL_8, 0, 2, 0x00 }, 1050000, 1050000, 1 },
- [2] = { { 432000, HFPLL, 2, 0, 0x20 }, 1050000, 1050000, 2 },
- [3] = { { 486000, HFPLL, 2, 0, 0x24 }, 1050000, 1050000, 2 },
- [4] = { { 540000, HFPLL, 2, 0, 0x28 }, 1050000, 1050000, 2 },
- [5] = { { 594000, HFPLL, 1, 0, 0x16 }, 1050000, 1050000, 2 },
- [6] = { { 648000, HFPLL, 1, 0, 0x18 }, 1050000, 1050000, 4 },
- [7] = { { 702000, HFPLL, 1, 0, 0x1A }, 1050000, 1050000, 4 },
- [8] = { { 756000, HFPLL, 1, 0, 0x1C }, 1150000, 1150000, 4 },
- [9] = { { 810000, HFPLL, 1, 0, 0x1E }, 1150000, 1150000, 4 },
- [10] = { { 864000, HFPLL, 1, 0, 0x20 }, 1150000, 1150000, 4 },
- [11] = { { 918000, HFPLL, 1, 0, 0x22 }, 1150000, 1150000, 5 },
- [12] = { { 972000, HFPLL, 1, 0, 0x24 }, 1150000, 1150000, 5 },
- [13] = { { 1026000, HFPLL, 1, 0, 0x26 }, 1150000, 1150000, 5 },
- [14] = { { 1080000, HFPLL, 1, 0, 0x28 }, 1150000, 1150000, 5 },
- [15] = { { 1134000, HFPLL, 1, 0, 0x2A }, 1150000, 1150000, 5 },
+ [0] = { { 384000, PLL_8, 0, 2, 0x00 }, 1050000, 1050000, 1 },
+ [1] = { { 432000, HFPLL, 2, 0, 0x20 }, 1050000, 1050000, 2 },
+ [2] = { { 486000, HFPLL, 2, 0, 0x24 }, 1050000, 1050000, 2 },
+ [3] = { { 540000, HFPLL, 2, 0, 0x28 }, 1050000, 1050000, 2 },
+ [4] = { { 594000, HFPLL, 1, 0, 0x16 }, 1050000, 1050000, 2 },
+ [5] = { { 648000, HFPLL, 1, 0, 0x18 }, 1050000, 1050000, 4 },
+ [6] = { { 702000, HFPLL, 1, 0, 0x1A }, 1050000, 1050000, 4 },
+ [7] = { { 756000, HFPLL, 1, 0, 0x1C }, 1150000, 1150000, 4 },
+ [8] = { { 810000, HFPLL, 1, 0, 0x1E }, 1150000, 1150000, 4 },
+ [9] = { { 864000, HFPLL, 1, 0, 0x20 }, 1150000, 1150000, 4 },
+ [10] = { { 918000, HFPLL, 1, 0, 0x22 }, 1150000, 1150000, 5 },
+ [11] = { { 972000, HFPLL, 1, 0, 0x24 }, 1150000, 1150000, 5 },
+ [12] = { { 1026000, HFPLL, 1, 0, 0x26 }, 1150000, 1150000, 5 },
+ [13] = { { 1080000, HFPLL, 1, 0, 0x28 }, 1150000, 1150000, 5 },
+ [14] = { { 1134000, HFPLL, 1, 0, 0x2A }, 1150000, 1150000, 5 },
};
static struct acpu_level acpu_freq_tbl_slow[] __initdata = {
- { 0, { STBY_KHZ, QSB, 0, 0, 0x00 }, L2(0), 950000 },
- { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(1), 950000 },
- { 0, { 432000, HFPLL, 2, 0, 0x20 }, L2(7), 975000 },
- { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(7), 975000 },
- { 0, { 540000, HFPLL, 2, 0, 0x28 }, L2(7), 1000000 },
- { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(7), 1000000 },
- { 0, { 648000, HFPLL, 1, 0, 0x18 }, L2(7), 1025000 },
- { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(7), 1025000 },
- { 0, { 756000, HFPLL, 1, 0, 0x1C }, L2(7), 1075000 },
- { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(7), 1075000 },
- { 0, { 864000, HFPLL, 1, 0, 0x20 }, L2(7), 1100000 },
- { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(7), 1100000 },
- { 0, { 972000, HFPLL, 1, 0, 0x24 }, L2(7), 1125000 },
- { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(7), 1125000 },
- { 0, { 1080000, HFPLL, 1, 0, 0x28 }, L2(15), 1175000 },
- { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(15), 1175000 },
- { 0, { 1188000, HFPLL, 1, 0, 0x2C }, L2(15), 1200000 },
- { 1, { 1242000, HFPLL, 1, 0, 0x2E }, L2(15), 1200000 },
- { 0, { 1296000, HFPLL, 1, 0, 0x30 }, L2(15), 1225000 },
- { 1, { 1350000, HFPLL, 1, 0, 0x32 }, L2(15), 1225000 },
- { 0, { 1404000, HFPLL, 1, 0, 0x34 }, L2(15), 1237500 },
- { 1, { 1458000, HFPLL, 1, 0, 0x36 }, L2(15), 1237500 },
- { 1, { 1512000, HFPLL, 1, 0, 0x38 }, L2(15), 1250000 },
+ { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(0), 950000 },
+ { 0, { 432000, HFPLL, 2, 0, 0x20 }, L2(6), 975000 },
+ { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(6), 975000 },
+ { 0, { 540000, HFPLL, 2, 0, 0x28 }, L2(6), 1000000 },
+ { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(6), 1000000 },
+ { 0, { 648000, HFPLL, 1, 0, 0x18 }, L2(6), 1025000 },
+ { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(6), 1025000 },
+ { 0, { 756000, HFPLL, 1, 0, 0x1C }, L2(6), 1075000 },
+ { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(6), 1075000 },
+ { 0, { 864000, HFPLL, 1, 0, 0x20 }, L2(6), 1100000 },
+ { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(6), 1100000 },
+ { 0, { 972000, HFPLL, 1, 0, 0x24 }, L2(6), 1125000 },
+ { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(6), 1125000 },
+ { 0, { 1080000, HFPLL, 1, 0, 0x28 }, L2(14), 1175000 },
+ { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(14), 1175000 },
+ { 0, { 1188000, HFPLL, 1, 0, 0x2C }, L2(14), 1200000 },
+ { 1, { 1242000, HFPLL, 1, 0, 0x2E }, L2(14), 1200000 },
+ { 0, { 1296000, HFPLL, 1, 0, 0x30 }, L2(14), 1225000 },
+ { 1, { 1350000, HFPLL, 1, 0, 0x32 }, L2(14), 1225000 },
+ { 0, { 1404000, HFPLL, 1, 0, 0x34 }, L2(14), 1237500 },
+ { 1, { 1458000, HFPLL, 1, 0, 0x36 }, L2(14), 1237500 },
+ { 1, { 1512000, HFPLL, 1, 0, 0x38 }, L2(14), 1250000 },
{ 0, { 0 } }
};
static struct acpu_level acpu_freq_tbl_nom[] __initdata = {
- { 0, { STBY_KHZ, QSB, 0, 0, 0x00 }, L2(0), 900000 },
- { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(1), 900000 },
- { 0, { 432000, HFPLL, 2, 0, 0x20 }, L2(7), 925000 },
- { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(7), 925000 },
- { 0, { 540000, HFPLL, 2, 0, 0x28 }, L2(7), 950000 },
- { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(7), 950000 },
- { 0, { 648000, HFPLL, 1, 0, 0x18 }, L2(7), 975000 },
- { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(7), 975000 },
- { 0, { 756000, HFPLL, 1, 0, 0x1C }, L2(7), 1025000 },
- { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(7), 1025000 },
- { 0, { 864000, HFPLL, 1, 0, 0x20 }, L2(7), 1050000 },
- { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(7), 1050000 },
- { 0, { 972000, HFPLL, 1, 0, 0x24 }, L2(7), 1075000 },
- { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(7), 1075000 },
- { 0, { 1080000, HFPLL, 1, 0, 0x28 }, L2(15), 1125000 },
- { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(15), 1125000 },
- { 0, { 1188000, HFPLL, 1, 0, 0x2C }, L2(15), 1150000 },
- { 1, { 1242000, HFPLL, 1, 0, 0x2E }, L2(15), 1150000 },
- { 0, { 1296000, HFPLL, 1, 0, 0x30 }, L2(15), 1175000 },
- { 1, { 1350000, HFPLL, 1, 0, 0x32 }, L2(15), 1175000 },
- { 0, { 1404000, HFPLL, 1, 0, 0x34 }, L2(15), 1187500 },
- { 1, { 1458000, HFPLL, 1, 0, 0x36 }, L2(15), 1187500 },
- { 1, { 1512000, HFPLL, 1, 0, 0x38 }, L2(15), 1200000 },
+ { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(0), 900000 },
+ { 0, { 432000, HFPLL, 2, 0, 0x20 }, L2(6), 925000 },
+ { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(6), 925000 },
+ { 0, { 540000, HFPLL, 2, 0, 0x28 }, L2(6), 950000 },
+ { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(6), 950000 },
+ { 0, { 648000, HFPLL, 1, 0, 0x18 }, L2(6), 975000 },
+ { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(6), 975000 },
+ { 0, { 756000, HFPLL, 1, 0, 0x1C }, L2(6), 1025000 },
+ { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(6), 1025000 },
+ { 0, { 864000, HFPLL, 1, 0, 0x20 }, L2(6), 1050000 },
+ { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(6), 1050000 },
+ { 0, { 972000, HFPLL, 1, 0, 0x24 }, L2(6), 1075000 },
+ { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(6), 1075000 },
+ { 0, { 1080000, HFPLL, 1, 0, 0x28 }, L2(14), 1125000 },
+ { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(14), 1125000 },
+ { 0, { 1188000, HFPLL, 1, 0, 0x2C }, L2(14), 1150000 },
+ { 1, { 1242000, HFPLL, 1, 0, 0x2E }, L2(14), 1150000 },
+ { 0, { 1296000, HFPLL, 1, 0, 0x30 }, L2(14), 1175000 },
+ { 1, { 1350000, HFPLL, 1, 0, 0x32 }, L2(14), 1175000 },
+ { 0, { 1404000, HFPLL, 1, 0, 0x34 }, L2(14), 1187500 },
+ { 1, { 1458000, HFPLL, 1, 0, 0x36 }, L2(14), 1187500 },
+ { 1, { 1512000, HFPLL, 1, 0, 0x38 }, L2(14), 1200000 },
{ 0, { 0 } }
};
static struct acpu_level acpu_freq_tbl_fast[] __initdata = {
- { 0, { STBY_KHZ, QSB, 0, 0, 0x00 }, L2(0), 850000 },
- { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(1), 850000 },
- { 0, { 432000, HFPLL, 2, 0, 0x20 }, L2(7), 875000 },
- { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(7), 875000 },
- { 0, { 540000, HFPLL, 2, 0, 0x28 }, L2(7), 900000 },
- { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(7), 900000 },
- { 0, { 648000, HFPLL, 1, 0, 0x18 }, L2(7), 925000 },
- { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(7), 925000 },
- { 0, { 756000, HFPLL, 1, 0, 0x1C }, L2(7), 975000 },
- { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(7), 975000 },
- { 0, { 864000, HFPLL, 1, 0, 0x20 }, L2(7), 1000000 },
- { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(7), 1000000 },
- { 0, { 972000, HFPLL, 1, 0, 0x24 }, L2(7), 1025000 },
- { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(7), 1025000 },
- { 0, { 1080000, HFPLL, 1, 0, 0x28 }, L2(15), 1075000 },
- { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(15), 1075000 },
- { 0, { 1188000, HFPLL, 1, 0, 0x2C }, L2(15), 1100000 },
- { 1, { 1242000, HFPLL, 1, 0, 0x2E }, L2(15), 1100000 },
- { 0, { 1296000, HFPLL, 1, 0, 0x30 }, L2(15), 1125000 },
- { 1, { 1350000, HFPLL, 1, 0, 0x32 }, L2(15), 1125000 },
- { 0, { 1404000, HFPLL, 1, 0, 0x34 }, L2(15), 1137500 },
- { 1, { 1458000, HFPLL, 1, 0, 0x36 }, L2(15), 1137500 },
- { 1, { 1512000, HFPLL, 1, 0, 0x38 }, L2(15), 1150000 },
+ { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(0), 850000 },
+ { 0, { 432000, HFPLL, 2, 0, 0x20 }, L2(6), 875000 },
+ { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(6), 875000 },
+ { 0, { 540000, HFPLL, 2, 0, 0x28 }, L2(6), 900000 },
+ { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(6), 900000 },
+ { 0, { 648000, HFPLL, 1, 0, 0x18 }, L2(6), 925000 },
+ { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(6), 925000 },
+ { 0, { 756000, HFPLL, 1, 0, 0x1C }, L2(6), 975000 },
+ { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(6), 975000 },
+ { 0, { 864000, HFPLL, 1, 0, 0x20 }, L2(6), 1000000 },
+ { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(6), 1000000 },
+ { 0, { 972000, HFPLL, 1, 0, 0x24 }, L2(6), 1025000 },
+ { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(6), 1025000 },
+ { 0, { 1080000, HFPLL, 1, 0, 0x28 }, L2(14), 1075000 },
+ { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(14), 1075000 },
+ { 0, { 1188000, HFPLL, 1, 0, 0x2C }, L2(14), 1100000 },
+ { 1, { 1242000, HFPLL, 1, 0, 0x2E }, L2(14), 1100000 },
+ { 0, { 1296000, HFPLL, 1, 0, 0x30 }, L2(14), 1125000 },
+ { 1, { 1350000, HFPLL, 1, 0, 0x32 }, L2(14), 1125000 },
+ { 0, { 1404000, HFPLL, 1, 0, 0x34 }, L2(14), 1137500 },
+ { 1, { 1458000, HFPLL, 1, 0, 0x36 }, L2(14), 1137500 },
+ { 1, { 1512000, HFPLL, 1, 0, 0x38 }, L2(14), 1150000 },
{ 0, { 0 } }
};
static struct pvs_table pvs_tables[NUM_PVS] __initdata = {
- [PVS_SLOW] = { acpu_freq_tbl_slow, sizeof(acpu_freq_tbl_slow) },
- [PVS_NOMINAL] = { acpu_freq_tbl_nom, sizeof(acpu_freq_tbl_nom) },
- [PVS_FAST] = { acpu_freq_tbl_fast, sizeof(acpu_freq_tbl_fast) },
- /* TODO: update the faster table when data is available */
- [PVS_FASTER] = { acpu_freq_tbl_fast, sizeof(acpu_freq_tbl_fast) },
+[PVS_SLOW] = { acpu_freq_tbl_slow, sizeof(acpu_freq_tbl_slow), 0 },
+[PVS_NOMINAL] = { acpu_freq_tbl_nom, sizeof(acpu_freq_tbl_nom), 25000 },
+[PVS_FAST] = { acpu_freq_tbl_fast, sizeof(acpu_freq_tbl_fast), 25000 },
+/* TODO: update the faster table when data is available */
+[PVS_FASTER] = { acpu_freq_tbl_fast, sizeof(acpu_freq_tbl_fast), 25000 },
};
static struct acpuclk_krait_params acpuclk_8064_params __initdata = {
@@ -220,6 +218,7 @@
.l2_freq_tbl_size = sizeof(l2_freq_tbl),
.bus_scale = &bus_scale_data,
.qfprom_phys_base = 0x00700000,
+ .stby_khz = 384000,
};
static int __init acpuclk_8064_probe(struct platform_device *pdev)
diff --git a/arch/arm/mach-msm/acpuclock-8627.c b/arch/arm/mach-msm/acpuclock-8627.c
index 8060803..1642dae 100644
--- a/arch/arm/mach-msm/acpuclock-8627.c
+++ b/arch/arm/mach-msm/acpuclock-8627.c
@@ -37,10 +37,12 @@
.has_droop_ctl = true,
.droop_offset = 0x14,
.droop_val = 0x0108C000,
- .low_vdd_l_max = 40,
+ .low_vdd_l_max = 22,
+ .nom_vdd_l_max = 42,
.vdd[HFPLL_VDD_NONE] = LVL_NONE,
.vdd[HFPLL_VDD_LOW] = LVL_LOW,
.vdd[HFPLL_VDD_NOM] = LVL_NOM,
+ .vdd[HFPLL_VDD_HIGH] = LVL_HIGH,
};
static struct scalable scalable[] __initdata = {
@@ -49,7 +51,7 @@
.aux_clk_sel_phys = 0x02088014,
.aux_clk_sel = 3,
.l2cpmr_iaddr = 0x4501,
- .vreg[VREG_CORE] = { "krait0", 1300000, 1740000 },
+ .vreg[VREG_CORE] = { "krait0", 1300000 },
.vreg[VREG_MEM] = { "krait0_mem", 1150000 },
.vreg[VREG_DIG] = { "krait0_dig", 1150000 },
.vreg[VREG_HFPLL_A] = { "krait0_hfpll", 1800000 },
@@ -59,7 +61,7 @@
.aux_clk_sel_phys = 0x02098014,
.aux_clk_sel = 3,
.l2cpmr_iaddr = 0x5501,
- .vreg[VREG_CORE] = { "krait1", 1300000, 1740000 },
+ .vreg[VREG_CORE] = { "krait1", 1300000 },
.vreg[VREG_MEM] = { "krait1_mem", 1150000 },
.vreg[VREG_DIG] = { "krait1_dig", 1150000 },
.vreg[VREG_HFPLL_A] = { "krait1_hfpll", 1800000 },
@@ -90,43 +92,41 @@
/* TODO: Update vdd_dig, vdd_mem and bw when data is available. */
static struct l2_level l2_freq_tbl[] __initdata = {
- [0] = { {STBY_KHZ, QSB, 0, 0, 0x00 }, LVL_NOM, 1050000, 0 },
- [1] = { { 384000, PLL_8, 0, 2, 0x00 }, LVL_NOM, 1050000, 1 },
- [2] = { { 432000, HFPLL, 2, 0, 0x20 }, LVL_NOM, 1050000, 1 },
- [3] = { { 486000, HFPLL, 2, 0, 0x24 }, LVL_NOM, 1050000, 1 },
- [4] = { { 540000, HFPLL, 2, 0, 0x28 }, LVL_NOM, 1050000, 2 },
- [5] = { { 594000, HFPLL, 1, 0, 0x16 }, LVL_NOM, 1050000, 2 },
- [6] = { { 648000, HFPLL, 1, 0, 0x18 }, LVL_NOM, 1050000, 2 },
- [7] = { { 702000, HFPLL, 1, 0, 0x1A }, LVL_NOM, 1050000, 3 },
- [8] = { { 756000, HFPLL, 1, 0, 0x1C }, LVL_HIGH, 1150000, 3 },
- [9] = { { 810000, HFPLL, 1, 0, 0x1E }, LVL_HIGH, 1150000, 3 },
- [10] = { { 864000, HFPLL, 1, 0, 0x20 }, LVL_HIGH, 1150000, 4 },
- [11] = { { 918000, HFPLL, 1, 0, 0x22 }, LVL_HIGH, 1150000, 4 },
- [12] = { { 972000, HFPLL, 1, 0, 0x24 }, LVL_HIGH, 1150000, 4 },
+ [0] = { { 384000, PLL_8, 0, 2, 0x00 }, LVL_NOM, 1050000, 1 },
+ [1] = { { 432000, HFPLL, 2, 0, 0x20 }, LVL_NOM, 1050000, 1 },
+ [2] = { { 486000, HFPLL, 2, 0, 0x24 }, LVL_NOM, 1050000, 1 },
+ [3] = { { 540000, HFPLL, 2, 0, 0x28 }, LVL_NOM, 1050000, 2 },
+ [4] = { { 594000, HFPLL, 1, 0, 0x16 }, LVL_NOM, 1050000, 2 },
+ [5] = { { 648000, HFPLL, 1, 0, 0x18 }, LVL_NOM, 1050000, 2 },
+ [6] = { { 702000, HFPLL, 1, 0, 0x1A }, LVL_NOM, 1050000, 3 },
+ [7] = { { 756000, HFPLL, 1, 0, 0x1C }, LVL_HIGH, 1150000, 3 },
+ [8] = { { 810000, HFPLL, 1, 0, 0x1E }, LVL_HIGH, 1150000, 3 },
+ [9] = { { 864000, HFPLL, 1, 0, 0x20 }, LVL_HIGH, 1150000, 4 },
+ [10] = { { 918000, HFPLL, 1, 0, 0x22 }, LVL_HIGH, 1150000, 4 },
+ [11] = { { 972000, HFPLL, 1, 0, 0x24 }, LVL_HIGH, 1150000, 4 },
};
/* TODO: Update core voltages when data is available. */
static struct acpu_level acpu_freq_tbl[] __initdata = {
- { 0, { STBY_KHZ, QSB, 0, 0, 0x00 }, L2(0), 900000 },
- { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(1), 900000 },
- { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(5), 925000 },
- { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(5), 925000 },
- { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(5), 937500 },
- { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(5), 962500 },
- { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(9), 987500 },
- { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(9), 1000000 },
- { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(9), 1025000 },
- { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(9), 1062500 },
- { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(12), 1062500 },
- { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(12), 1087500 },
- { 1, { 972000, HFPLL, 1, 0, 0x24 }, L2(12), 1100000 },
+ { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(0), 900000 },
+ { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(4), 925000 },
+ { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(4), 925000 },
+ { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(4), 937500 },
+ { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(4), 962500 },
+ { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(8), 987500 },
+ { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(8), 1000000 },
+ { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(8), 1025000 },
+ { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(8), 1062500 },
+ { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(11), 1062500 },
+ { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(11), 1087500 },
+ { 1, { 972000, HFPLL, 1, 0, 0x24 }, L2(11), 1100000 },
{ 0, { 0 } }
};
static struct pvs_table pvs_tables[NUM_PVS] __initdata = {
- [PVS_SLOW] = { acpu_freq_tbl, sizeof(acpu_freq_tbl) },
- [PVS_NOMINAL] = { acpu_freq_tbl, sizeof(acpu_freq_tbl) },
- [PVS_FAST] = { acpu_freq_tbl, sizeof(acpu_freq_tbl) },
+ [PVS_SLOW] = { acpu_freq_tbl, sizeof(acpu_freq_tbl), 0 },
+ [PVS_NOMINAL] = { acpu_freq_tbl, sizeof(acpu_freq_tbl), 25000 },
+ [PVS_FAST] = { acpu_freq_tbl, sizeof(acpu_freq_tbl), 25000 },
};
static struct acpuclk_krait_params acpuclk_8627_params __initdata = {
@@ -138,6 +138,7 @@
.l2_freq_tbl_size = sizeof(l2_freq_tbl),
.bus_scale = &bus_scale_data,
.qfprom_phys_base = 0x00700000,
+ .stby_khz = 384000,
};
static int __init acpuclk_8627_probe(struct platform_device *pdev)
diff --git a/arch/arm/mach-msm/acpuclock-8930.c b/arch/arm/mach-msm/acpuclock-8930.c
index d04ce03..5647d14 100644
--- a/arch/arm/mach-msm/acpuclock-8930.c
+++ b/arch/arm/mach-msm/acpuclock-8930.c
@@ -37,10 +37,12 @@
.has_droop_ctl = true,
.droop_offset = 0x14,
.droop_val = 0x0108C000,
- .low_vdd_l_max = 40,
+ .low_vdd_l_max = 22,
+ .nom_vdd_l_max = 42,
.vdd[HFPLL_VDD_NONE] = LVL_NONE,
.vdd[HFPLL_VDD_LOW] = LVL_LOW,
.vdd[HFPLL_VDD_NOM] = LVL_NOM,
+ .vdd[HFPLL_VDD_HIGH] = LVL_HIGH,
};
static struct scalable scalable[] __initdata = {
@@ -49,7 +51,7 @@
.aux_clk_sel_phys = 0x02088014,
.aux_clk_sel = 3,
.l2cpmr_iaddr = 0x4501,
- .vreg[VREG_CORE] = { "krait0", 1300000, 1740000 },
+ .vreg[VREG_CORE] = { "krait0", 1300000 },
.vreg[VREG_MEM] = { "krait0_mem", 1150000 },
.vreg[VREG_DIG] = { "krait0_dig", 1150000 },
.vreg[VREG_HFPLL_A] = { "krait0_hfpll", 1800000 },
@@ -59,7 +61,7 @@
.aux_clk_sel_phys = 0x02098014,
.aux_clk_sel = 3,
.l2cpmr_iaddr = 0x5501,
- .vreg[VREG_CORE] = { "krait1", 1300000, 1740000 },
+ .vreg[VREG_CORE] = { "krait1", 1300000 },
.vreg[VREG_MEM] = { "krait1_mem", 1150000 },
.vreg[VREG_DIG] = { "krait1_dig", 1150000 },
.vreg[VREG_HFPLL_A] = { "krait1_hfpll", 1800000 },
@@ -93,92 +95,88 @@
/* TODO: Update vdd_dig, vdd_mem and bw when data is available. */
static struct l2_level l2_freq_tbl[] __initdata = {
- [0] = { {STBY_KHZ, QSB, 0, 0, 0x00 }, LVL_NOM, 1050000, 0 },
- [1] = { { 384000, PLL_8, 0, 2, 0x00 }, LVL_NOM, 1050000, 1 },
- [2] = { { 432000, HFPLL, 2, 0, 0x20 }, LVL_NOM, 1050000, 2 },
- [3] = { { 486000, HFPLL, 2, 0, 0x24 }, LVL_NOM, 1050000, 2 },
- [4] = { { 540000, HFPLL, 2, 0, 0x28 }, LVL_NOM, 1050000, 2 },
- [5] = { { 594000, HFPLL, 1, 0, 0x16 }, LVL_NOM, 1050000, 2 },
- [6] = { { 648000, HFPLL, 1, 0, 0x18 }, LVL_NOM, 1050000, 4 },
- [7] = { { 702000, HFPLL, 1, 0, 0x1A }, LVL_NOM, 1050000, 4 },
- [8] = { { 756000, HFPLL, 1, 0, 0x1C }, LVL_HIGH, 1150000, 4 },
- [9] = { { 810000, HFPLL, 1, 0, 0x1E }, LVL_HIGH, 1150000, 4 },
- [10] = { { 864000, HFPLL, 1, 0, 0x20 }, LVL_HIGH, 1150000, 4 },
- [11] = { { 918000, HFPLL, 1, 0, 0x22 }, LVL_HIGH, 1150000, 7 },
- [12] = { { 972000, HFPLL, 1, 0, 0x24 }, LVL_HIGH, 1150000, 7 },
- [13] = { { 1026000, HFPLL, 1, 0, 0x26 }, LVL_HIGH, 1150000, 7 },
- [14] = { { 1080000, HFPLL, 1, 0, 0x28 }, LVL_HIGH, 1150000, 7 },
- [15] = { { 1134000, HFPLL, 1, 0, 0x2A }, LVL_HIGH, 1150000, 7 },
- [16] = { { 1188000, HFPLL, 1, 0, 0x2C }, LVL_HIGH, 1150000, 7 },
+ [0] = { { 384000, PLL_8, 0, 2, 0x00 }, LVL_NOM, 1050000, 1 },
+ [1] = { { 432000, HFPLL, 2, 0, 0x20 }, LVL_NOM, 1050000, 2 },
+ [2] = { { 486000, HFPLL, 2, 0, 0x24 }, LVL_NOM, 1050000, 2 },
+ [3] = { { 540000, HFPLL, 2, 0, 0x28 }, LVL_NOM, 1050000, 2 },
+ [4] = { { 594000, HFPLL, 1, 0, 0x16 }, LVL_NOM, 1050000, 2 },
+ [5] = { { 648000, HFPLL, 1, 0, 0x18 }, LVL_NOM, 1050000, 4 },
+ [6] = { { 702000, HFPLL, 1, 0, 0x1A }, LVL_NOM, 1050000, 4 },
+ [7] = { { 756000, HFPLL, 1, 0, 0x1C }, LVL_HIGH, 1150000, 4 },
+ [8] = { { 810000, HFPLL, 1, 0, 0x1E }, LVL_HIGH, 1150000, 4 },
+ [9] = { { 864000, HFPLL, 1, 0, 0x20 }, LVL_HIGH, 1150000, 4 },
+ [10] = { { 918000, HFPLL, 1, 0, 0x22 }, LVL_HIGH, 1150000, 7 },
+ [11] = { { 972000, HFPLL, 1, 0, 0x24 }, LVL_HIGH, 1150000, 7 },
+ [12] = { { 1026000, HFPLL, 1, 0, 0x26 }, LVL_HIGH, 1150000, 7 },
+ [13] = { { 1080000, HFPLL, 1, 0, 0x28 }, LVL_HIGH, 1150000, 7 },
+ [14] = { { 1134000, HFPLL, 1, 0, 0x2A }, LVL_HIGH, 1150000, 7 },
+ [15] = { { 1188000, HFPLL, 1, 0, 0x2C }, LVL_HIGH, 1150000, 7 },
};
static struct acpu_level acpu_freq_tbl_slow[] __initdata = {
- { 0, { STBY_KHZ, QSB, 0, 0, 0x00 }, L2(0), 950000 },
- { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(1), 950000 },
- { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(6), 975000 },
- { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(6), 975000 },
- { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(6), 1000000 },
- { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(6), 1000000 },
- { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(6), 1025000 },
- { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(6), 1025000 },
- { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(11), 1075000 },
- { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(11), 1075000 },
- { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(11), 1100000 },
- { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(11), 1100000 },
- { 1, { 972000, HFPLL, 1, 0, 0x24 }, L2(11), 1125000 },
- { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(11), 1125000 },
- { 1, { 1080000, HFPLL, 1, 0, 0x28 }, L2(16), 1175000 },
- { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(16), 1175000 },
- { 1, { 1188000, HFPLL, 1, 0, 0x2C }, L2(16), 1200000 },
+ { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(0), 950000 },
+ { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(5), 975000 },
+ { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(5), 975000 },
+ { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(5), 1000000 },
+ { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(5), 1000000 },
+ { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(5), 1025000 },
+ { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(5), 1025000 },
+ { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(10), 1075000 },
+ { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(10), 1075000 },
+ { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(10), 1100000 },
+ { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(10), 1100000 },
+ { 1, { 972000, HFPLL, 1, 0, 0x24 }, L2(10), 1125000 },
+ { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(10), 1125000 },
+ { 1, { 1080000, HFPLL, 1, 0, 0x28 }, L2(15), 1175000 },
+ { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(15), 1175000 },
+ { 1, { 1188000, HFPLL, 1, 0, 0x2C }, L2(15), 1200000 },
{ 0, { 0 } }
};
static struct acpu_level acpu_freq_tbl_nom[] __initdata = {
- { 0, { STBY_KHZ, QSB, 0, 0, 0x00 }, L2(0), 925000 },
- { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(1), 925000 },
- { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(6), 950000 },
- { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(6), 950000 },
- { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(6), 975000 },
- { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(6), 975000 },
- { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(6), 1000000 },
- { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(6), 1000000 },
- { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(11), 1050000 },
- { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(11), 1050000 },
- { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(11), 1075000 },
- { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(11), 1075000 },
- { 1, { 972000, HFPLL, 1, 0, 0x24 }, L2(11), 1100000 },
- { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(11), 1100000 },
- { 1, { 1080000, HFPLL, 1, 0, 0x28 }, L2(16), 1150000 },
- { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(16), 1150000 },
- { 1, { 1188000, HFPLL, 1, 0, 0x2C }, L2(16), 1175000 },
+ { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(0), 925000 },
+ { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(5), 950000 },
+ { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(5), 950000 },
+ { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(5), 975000 },
+ { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(5), 975000 },
+ { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(5), 1000000 },
+ { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(5), 1000000 },
+ { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(10), 1050000 },
+ { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(10), 1050000 },
+ { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(10), 1075000 },
+ { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(10), 1075000 },
+ { 1, { 972000, HFPLL, 1, 0, 0x24 }, L2(10), 1100000 },
+ { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(10), 1100000 },
+ { 1, { 1080000, HFPLL, 1, 0, 0x28 }, L2(15), 1150000 },
+ { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(15), 1150000 },
+ { 1, { 1188000, HFPLL, 1, 0, 0x2C }, L2(15), 1175000 },
{ 0, { 0 } }
};
static struct acpu_level acpu_freq_tbl_fast[] __initdata = {
- { 0, { STBY_KHZ, QSB, 0, 0, 0x00 }, L2(0), 900000 },
- { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(1), 900000 },
- { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(6), 900000 },
- { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(6), 900000 },
- { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(6), 925000 },
- { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(6), 925000 },
- { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(6), 950000 },
- { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(6), 950000 },
- { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(11), 1000000 },
- { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(11), 1000000 },
- { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(11), 1025000 },
- { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(11), 1025000 },
- { 1, { 972000, HFPLL, 1, 0, 0x24 }, L2(11), 1050000 },
- { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(11), 1050000 },
- { 1, { 1080000, HFPLL, 1, 0, 0x28 }, L2(16), 1100000 },
- { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(16), 1100000 },
- { 1, { 1188000, HFPLL, 1, 0, 0x2C }, L2(16), 1125000 },
+ { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(0), 900000 },
+ { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(5), 900000 },
+ { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(5), 900000 },
+ { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(5), 925000 },
+ { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(5), 925000 },
+ { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(5), 950000 },
+ { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(5), 950000 },
+ { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(10), 1000000 },
+ { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(10), 1000000 },
+ { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(10), 1025000 },
+ { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(10), 1025000 },
+ { 1, { 972000, HFPLL, 1, 0, 0x24 }, L2(10), 1050000 },
+ { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(10), 1050000 },
+ { 1, { 1080000, HFPLL, 1, 0, 0x28 }, L2(15), 1100000 },
+ { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(15), 1100000 },
+ { 1, { 1188000, HFPLL, 1, 0, 0x2C }, L2(15), 1125000 },
{ 0, { 0 } }
};
static struct pvs_table pvs_tables[NUM_PVS] __initdata = {
- [PVS_SLOW] = { acpu_freq_tbl_slow, sizeof(acpu_freq_tbl_slow) },
- [PVS_NOMINAL] = { acpu_freq_tbl_nom, sizeof(acpu_freq_tbl_nom) },
- [PVS_FAST] = { acpu_freq_tbl_fast, sizeof(acpu_freq_tbl_fast) },
+[PVS_SLOW] = { acpu_freq_tbl_slow, sizeof(acpu_freq_tbl_slow), 0 },
+[PVS_NOMINAL] = { acpu_freq_tbl_nom, sizeof(acpu_freq_tbl_nom), 25000 },
+[PVS_FAST] = { acpu_freq_tbl_fast, sizeof(acpu_freq_tbl_fast), 25000 },
};
static struct acpuclk_krait_params acpuclk_8930_params __initdata = {
@@ -190,6 +188,7 @@
.l2_freq_tbl_size = sizeof(l2_freq_tbl),
.bus_scale = &bus_scale_data,
.qfprom_phys_base = 0x00700000,
+ .stby_khz = 384000,
};
static int __init acpuclk_8930_probe(struct platform_device *pdev)
diff --git a/arch/arm/mach-msm/acpuclock-8930aa.c b/arch/arm/mach-msm/acpuclock-8930aa.c
new file mode 100644
index 0000000..34ba1da
--- /dev/null
+++ b/arch/arm/mach-msm/acpuclock-8930aa.c
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <mach/rpm-regulator.h>
+#include <mach/msm_bus_board.h>
+#include <mach/msm_bus.h>
+
+#include "acpuclock.h"
+#include "acpuclock-krait.h"
+
+/* Corner type vreg VDD values */
+#define LVL_NONE RPM_VREG_CORNER_NONE
+#define LVL_LOW RPM_VREG_CORNER_LOW
+#define LVL_NOM RPM_VREG_CORNER_NOMINAL
+#define LVL_HIGH RPM_VREG_CORNER_HIGH
+
+static struct hfpll_data hfpll_data __initdata = {
+ .mode_offset = 0x00,
+ .l_offset = 0x08,
+ .m_offset = 0x0C,
+ .n_offset = 0x10,
+ .config_offset = 0x04,
+ .config_val = 0x7845C665,
+ .has_droop_ctl = true,
+ .droop_offset = 0x14,
+ .droop_val = 0x0108C000,
+ .low_vdd_l_max = 22,
+ .nom_vdd_l_max = 42,
+ .vdd[HFPLL_VDD_NONE] = LVL_NONE,
+ .vdd[HFPLL_VDD_LOW] = LVL_LOW,
+ .vdd[HFPLL_VDD_NOM] = LVL_NOM,
+ .vdd[HFPLL_VDD_HIGH] = LVL_HIGH,
+};
+
+static struct scalable scalable[] __initdata = {
+ [CPU0] = {
+ .hfpll_phys_base = 0x00903200,
+ .aux_clk_sel_phys = 0x02088014,
+ .aux_clk_sel = 3,
+ .l2cpmr_iaddr = 0x4501,
+ .vreg[VREG_CORE] = { "krait0", 1300000 },
+ .vreg[VREG_MEM] = { "krait0_mem", 1150000 },
+ .vreg[VREG_DIG] = { "krait0_dig", 1150000 },
+ .vreg[VREG_HFPLL_A] = { "krait0_hfpll", 1800000 },
+ },
+ [CPU1] = {
+ .hfpll_phys_base = 0x00903300,
+ .aux_clk_sel_phys = 0x02098014,
+ .aux_clk_sel = 3,
+ .l2cpmr_iaddr = 0x5501,
+ .vreg[VREG_CORE] = { "krait1", 1300000 },
+ .vreg[VREG_MEM] = { "krait1_mem", 1150000 },
+ .vreg[VREG_DIG] = { "krait1_dig", 1150000 },
+ .vreg[VREG_HFPLL_A] = { "krait1_hfpll", 1800000 },
+ },
+ [L2] = {
+ .hfpll_phys_base = 0x00903400,
+ .aux_clk_sel_phys = 0x02011028,
+ .aux_clk_sel = 3,
+ .l2cpmr_iaddr = 0x0500,
+ .vreg[VREG_HFPLL_A] = { "l2_hfpll", 1800000 },
+ },
+};
+
+static struct msm_bus_paths bw_level_tbl[] __initdata = {
+ [0] = BW_MBPS(640), /* At least 80 MHz on bus. */
+ [1] = BW_MBPS(1064), /* At least 133 MHz on bus. */
+ [2] = BW_MBPS(1600), /* At least 200 MHz on bus. */
+ [3] = BW_MBPS(2128), /* At least 266 MHz on bus. */
+ [4] = BW_MBPS(3200), /* At least 400 MHz on bus. */
+ [5] = BW_MBPS(3600), /* At least 450 MHz on bus. */
+ [6] = BW_MBPS(3936), /* At least 492 MHz on bus. */
+ [7] = BW_MBPS(4264), /* At least 533 MHz on bus. */
+};
+
+static struct msm_bus_scale_pdata bus_scale_data __initdata = {
+ .usecase = bw_level_tbl,
+ .num_usecases = ARRAY_SIZE(bw_level_tbl),
+ .active_only = 1,
+ .name = "acpuclk-8930aa",
+};
+
+/* TODO: Update vdd_dig, vdd_mem and bw when data is available. */
+static struct l2_level l2_freq_tbl[] __initdata = {
+ [0] = { { 384000, PLL_8, 0, 2, 0x00 }, LVL_NOM, 1050000, 1 },
+ [1] = { { 432000, HFPLL, 2, 0, 0x20 }, LVL_NOM, 1050000, 2 },
+ [2] = { { 486000, HFPLL, 2, 0, 0x24 }, LVL_NOM, 1050000, 2 },
+ [3] = { { 540000, HFPLL, 2, 0, 0x28 }, LVL_NOM, 1050000, 2 },
+ [4] = { { 594000, HFPLL, 1, 0, 0x16 }, LVL_NOM, 1050000, 2 },
+ [5] = { { 648000, HFPLL, 1, 0, 0x18 }, LVL_NOM, 1050000, 4 },
+ [6] = { { 702000, HFPLL, 1, 0, 0x1A }, LVL_NOM, 1050000, 4 },
+ [7] = { { 756000, HFPLL, 1, 0, 0x1C }, LVL_HIGH, 1150000, 4 },
+ [8] = { { 810000, HFPLL, 1, 0, 0x1E }, LVL_HIGH, 1150000, 4 },
+ [9] = { { 864000, HFPLL, 1, 0, 0x20 }, LVL_HIGH, 1150000, 4 },
+ [10] = { { 918000, HFPLL, 1, 0, 0x22 }, LVL_HIGH, 1150000, 7 },
+ [11] = { { 972000, HFPLL, 1, 0, 0x24 }, LVL_HIGH, 1150000, 7 },
+ [12] = { { 1026000, HFPLL, 1, 0, 0x26 }, LVL_HIGH, 1150000, 7 },
+ [13] = { { 1080000, HFPLL, 1, 0, 0x28 }, LVL_HIGH, 1150000, 7 },
+ [14] = { { 1134000, HFPLL, 1, 0, 0x2A }, LVL_HIGH, 1150000, 7 },
+ [15] = { { 1188000, HFPLL, 1, 0, 0x2C }, LVL_HIGH, 1150000, 7 },
+};
+
+static struct acpu_level acpu_freq_tbl_slow[] __initdata = {
+ { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(0), 950000 },
+ { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(5), 975000 },
+ { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(5), 975000 },
+ { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(5), 1000000 },
+ { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(5), 1000000 },
+ { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(5), 1025000 },
+ { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(5), 1025000 },
+ { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(10), 1075000 },
+ { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(10), 1075000 },
+ { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(10), 1100000 },
+ { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(10), 1100000 },
+ { 1, { 972000, HFPLL, 1, 0, 0x24 }, L2(10), 1125000 },
+ { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(10), 1125000 },
+ { 1, { 1080000, HFPLL, 1, 0, 0x28 }, L2(15), 1175000 },
+ { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(15), 1175000 },
+ { 1, { 1188000, HFPLL, 1, 0, 0x2C }, L2(15), 1200000 },
+ { 1, { 1242000, HFPLL, 1, 0, 0x2E }, L2(15), 1200000 },
+ { 1, { 1296000, HFPLL, 1, 0, 0x30 }, L2(15), 1225000 },
+ { 1, { 1350000, HFPLL, 1, 0, 0x32 }, L2(15), 1225000 },
+ { 1, { 1404000, HFPLL, 1, 0, 0x34 }, L2(15), 1237500 },
+ { 0, { 0 } }
+};
+
+static struct acpu_level acpu_freq_tbl_nom[] __initdata = {
+ { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(0), 925000 },
+ { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(5), 950000 },
+ { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(5), 950000 },
+ { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(5), 975000 },
+ { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(5), 975000 },
+ { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(5), 1000000 },
+ { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(5), 1000000 },
+ { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(10), 1050000 },
+ { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(10), 1050000 },
+ { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(10), 1075000 },
+ { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(10), 1075000 },
+ { 1, { 972000, HFPLL, 1, 0, 0x24 }, L2(10), 1100000 },
+ { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(10), 1100000 },
+ { 1, { 1080000, HFPLL, 1, 0, 0x28 }, L2(15), 1150000 },
+ { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(15), 1150000 },
+ { 1, { 1188000, HFPLL, 1, 0, 0x2C }, L2(15), 1175000 },
+ { 1, { 1242000, HFPLL, 1, 0, 0x2E }, L2(15), 1175000 },
+ { 1, { 1296000, HFPLL, 1, 0, 0x30 }, L2(15), 1200000 },
+ { 1, { 1350000, HFPLL, 1, 0, 0x32 }, L2(15), 1200000 },
+ { 1, { 1404000, HFPLL, 1, 0, 0x34 }, L2(15), 1212500 },
+ { 0, { 0 } }
+};
+
+static struct acpu_level acpu_freq_tbl_fast[] __initdata = {
+ { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(0), 900000 },
+ { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(5), 900000 },
+ { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(5), 900000 },
+ { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(5), 925000 },
+ { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(5), 925000 },
+ { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(5), 950000 },
+ { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(5), 950000 },
+ { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(10), 1000000 },
+ { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(10), 1000000 },
+ { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(10), 1025000 },
+ { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(10), 1025000 },
+ { 1, { 972000, HFPLL, 1, 0, 0x24 }, L2(10), 1050000 },
+ { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(10), 1050000 },
+ { 1, { 1080000, HFPLL, 1, 0, 0x28 }, L2(15), 1100000 },
+ { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(15), 1100000 },
+ { 1, { 1188000, HFPLL, 1, 0, 0x2C }, L2(15), 1125000 },
+ { 1, { 1242000, HFPLL, 1, 0, 0x2E }, L2(15), 1125000 },
+ { 1, { 1296000, HFPLL, 1, 0, 0x30 }, L2(15), 1150000 },
+ { 1, { 1350000, HFPLL, 1, 0, 0x32 }, L2(15), 1150000 },
+ { 1, { 1404000, HFPLL, 1, 0, 0x34 }, L2(15), 1162500 },
+ { 0, { 0 } }
+};
+
+static struct pvs_table pvs_tables[NUM_PVS] __initdata = {
+[PVS_SLOW] = { acpu_freq_tbl_slow, sizeof(acpu_freq_tbl_slow), 0 },
+[PVS_NOMINAL] = { acpu_freq_tbl_nom, sizeof(acpu_freq_tbl_nom), 25000 },
+[PVS_FAST] = { acpu_freq_tbl_fast, sizeof(acpu_freq_tbl_fast), 25000 },
+};
+
+static struct acpuclk_krait_params acpuclk_8930aa_params __initdata = {
+ .scalable = scalable,
+ .scalable_size = sizeof(scalable),
+ .hfpll_data = &hfpll_data,
+ .pvs_tables = pvs_tables,
+ .l2_freq_tbl = l2_freq_tbl,
+ .l2_freq_tbl_size = sizeof(l2_freq_tbl),
+ .bus_scale = &bus_scale_data,
+ .qfprom_phys_base = 0x00700000,
+ .stby_khz = 384000,
+};
+
+static int __init acpuclk_8930aa_probe(struct platform_device *pdev)
+{
+ return acpuclk_krait_init(&pdev->dev, &acpuclk_8930aa_params);
+}
+
+static struct platform_driver acpuclk_8930aa_driver = {
+ .driver = {
+ .name = "acpuclk-8930aa",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init acpuclk_8930aa_init(void)
+{
+ return platform_driver_probe(&acpuclk_8930aa_driver,
+ acpuclk_8930aa_probe);
+}
+device_initcall(acpuclk_8930aa_init);
diff --git a/arch/arm/mach-msm/acpuclock-8960.c b/arch/arm/mach-msm/acpuclock-8960.c
index 8623c2b..8cc4b13 100644
--- a/arch/arm/mach-msm/acpuclock-8960.c
+++ b/arch/arm/mach-msm/acpuclock-8960.c
@@ -31,10 +31,12 @@
.has_droop_ctl = true,
.droop_offset = 0x14,
.droop_val = 0x0108C000,
- .low_vdd_l_max = 40,
- .vdd[HFPLL_VDD_NONE] = 0,
- .vdd[HFPLL_VDD_LOW] = 850000,
+ .low_vdd_l_max = 22,
+ .nom_vdd_l_max = 42,
+ .vdd[HFPLL_VDD_NONE] = 0,
+ .vdd[HFPLL_VDD_LOW] = 945000,
.vdd[HFPLL_VDD_NOM] = 1050000,
+ .vdd[HFPLL_VDD_HIGH] = 1150000,
};
static struct scalable scalable[] __initdata = {
@@ -43,7 +45,7 @@
.aux_clk_sel_phys = 0x02088014,
.aux_clk_sel = 3,
.l2cpmr_iaddr = 0x4501,
- .vreg[VREG_CORE] = { "krait0", 1300000, 3200000 },
+ .vreg[VREG_CORE] = { "krait0", 1300000 },
.vreg[VREG_MEM] = { "krait0_mem", 1150000 },
.vreg[VREG_DIG] = { "krait0_dig", 1150000 },
.vreg[VREG_HFPLL_A] = { "krait0_s8", 2050000 },
@@ -54,7 +56,7 @@
.aux_clk_sel_phys = 0x02098014,
.aux_clk_sel = 3,
.l2cpmr_iaddr = 0x5501,
- .vreg[VREG_CORE] = { "krait1", 1300000, 3200000 },
+ .vreg[VREG_CORE] = { "krait1", 1300000 },
.vreg[VREG_MEM] = { "krait1_mem", 1150000 },
.vreg[VREG_DIG] = { "krait1_dig", 1150000 },
.vreg[VREG_HFPLL_A] = { "krait1_s8", 2050000 },
@@ -88,113 +90,109 @@
};
static struct l2_level l2_freq_tbl[] __initdata = {
- [0] = { {STBY_KHZ, QSB, 0, 0, 0x00 }, 1050000, 1050000, 0 },
- [1] = { { 384000, PLL_8, 0, 2, 0x00 }, 1050000, 1050000, 1 },
- [2] = { { 432000, HFPLL, 2, 0, 0x20 }, 1050000, 1050000, 2 },
- [3] = { { 486000, HFPLL, 2, 0, 0x24 }, 1050000, 1050000, 2 },
- [4] = { { 540000, HFPLL, 2, 0, 0x28 }, 1050000, 1050000, 2 },
- [5] = { { 594000, HFPLL, 1, 0, 0x16 }, 1050000, 1050000, 2 },
- [6] = { { 648000, HFPLL, 1, 0, 0x18 }, 1050000, 1050000, 4 },
- [7] = { { 702000, HFPLL, 1, 0, 0x1A }, 1050000, 1050000, 4 },
- [8] = { { 756000, HFPLL, 1, 0, 0x1C }, 1150000, 1150000, 4 },
- [9] = { { 810000, HFPLL, 1, 0, 0x1E }, 1150000, 1150000, 4 },
- [10] = { { 864000, HFPLL, 1, 0, 0x20 }, 1150000, 1150000, 4 },
- [11] = { { 918000, HFPLL, 1, 0, 0x22 }, 1150000, 1150000, 6 },
- [12] = { { 972000, HFPLL, 1, 0, 0x24 }, 1150000, 1150000, 6 },
- [13] = { { 1026000, HFPLL, 1, 0, 0x26 }, 1150000, 1150000, 6 },
- [14] = { { 1080000, HFPLL, 1, 0, 0x28 }, 1150000, 1150000, 6 },
- [15] = { { 1134000, HFPLL, 1, 0, 0x2A }, 1150000, 1150000, 6 },
- [16] = { { 1188000, HFPLL, 1, 0, 0x2C }, 1150000, 1150000, 6 },
- [17] = { { 1242000, HFPLL, 1, 0, 0x2E }, 1150000, 1150000, 6 },
- [18] = { { 1296000, HFPLL, 1, 0, 0x30 }, 1150000, 1150000, 6 },
- [19] = { { 1350000, HFPLL, 1, 0, 0x32 }, 1150000, 1150000, 6 },
+ [0] = { { 384000, PLL_8, 0, 2, 0x00 }, 1050000, 1050000, 1 },
+ [1] = { { 432000, HFPLL, 2, 0, 0x20 }, 1050000, 1050000, 2 },
+ [2] = { { 486000, HFPLL, 2, 0, 0x24 }, 1050000, 1050000, 2 },
+ [3] = { { 540000, HFPLL, 2, 0, 0x28 }, 1050000, 1050000, 2 },
+ [4] = { { 594000, HFPLL, 1, 0, 0x16 }, 1050000, 1050000, 2 },
+ [5] = { { 648000, HFPLL, 1, 0, 0x18 }, 1050000, 1050000, 4 },
+ [6] = { { 702000, HFPLL, 1, 0, 0x1A }, 1050000, 1050000, 4 },
+ [7] = { { 756000, HFPLL, 1, 0, 0x1C }, 1150000, 1150000, 4 },
+ [8] = { { 810000, HFPLL, 1, 0, 0x1E }, 1150000, 1150000, 4 },
+ [9] = { { 864000, HFPLL, 1, 0, 0x20 }, 1150000, 1150000, 4 },
+ [10] = { { 918000, HFPLL, 1, 0, 0x22 }, 1150000, 1150000, 6 },
+ [11] = { { 972000, HFPLL, 1, 0, 0x24 }, 1150000, 1150000, 6 },
+ [12] = { { 1026000, HFPLL, 1, 0, 0x26 }, 1150000, 1150000, 6 },
+ [13] = { { 1080000, HFPLL, 1, 0, 0x28 }, 1150000, 1150000, 6 },
+ [14] = { { 1134000, HFPLL, 1, 0, 0x2A }, 1150000, 1150000, 6 },
+ [15] = { { 1188000, HFPLL, 1, 0, 0x2C }, 1150000, 1150000, 6 },
+ [16] = { { 1242000, HFPLL, 1, 0, 0x2E }, 1150000, 1150000, 6 },
+ [17] = { { 1296000, HFPLL, 1, 0, 0x30 }, 1150000, 1150000, 6 },
+ [18] = { { 1350000, HFPLL, 1, 0, 0x32 }, 1150000, 1150000, 6 },
};
static struct acpu_level acpu_freq_tbl_slow[] __initdata = {
- { 0, { STBY_KHZ, QSB, 0, 0, 0x00 }, L2(0), 950000 },
- { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(1), 950000 },
- { 0, { 432000, HFPLL, 2, 0, 0x20 }, L2(7), 975000 },
- { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(7), 975000 },
- { 0, { 540000, HFPLL, 2, 0, 0x28 }, L2(7), 1000000 },
- { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(7), 1000000 },
- { 0, { 648000, HFPLL, 1, 0, 0x18 }, L2(7), 1025000 },
- { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(7), 1025000 },
- { 0, { 756000, HFPLL, 1, 0, 0x1C }, L2(7), 1075000 },
- { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(7), 1075000 },
- { 0, { 864000, HFPLL, 1, 0, 0x20 }, L2(7), 1100000 },
- { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(7), 1100000 },
- { 0, { 972000, HFPLL, 1, 0, 0x24 }, L2(7), 1125000 },
- { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(7), 1125000 },
- { 0, { 1080000, HFPLL, 1, 0, 0x28 }, L2(19), 1175000 },
- { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(19), 1175000 },
- { 0, { 1188000, HFPLL, 1, 0, 0x2C }, L2(19), 1200000 },
- { 1, { 1242000, HFPLL, 1, 0, 0x2E }, L2(19), 1200000 },
- { 0, { 1296000, HFPLL, 1, 0, 0x30 }, L2(19), 1225000 },
- { 1, { 1350000, HFPLL, 1, 0, 0x32 }, L2(19), 1225000 },
- { 0, { 1404000, HFPLL, 1, 0, 0x34 }, L2(19), 1237500 },
- { 1, { 1458000, HFPLL, 1, 0, 0x36 }, L2(19), 1237500 },
- { 1, { 1512000, HFPLL, 1, 0, 0x38 }, L2(19), 1250000 },
+ { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(0), 950000 },
+ { 0, { 432000, HFPLL, 2, 0, 0x20 }, L2(6), 975000 },
+ { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(6), 975000 },
+ { 0, { 540000, HFPLL, 2, 0, 0x28 }, L2(6), 1000000 },
+ { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(6), 1000000 },
+ { 0, { 648000, HFPLL, 1, 0, 0x18 }, L2(6), 1025000 },
+ { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(6), 1025000 },
+ { 0, { 756000, HFPLL, 1, 0, 0x1C }, L2(6), 1075000 },
+ { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(6), 1075000 },
+ { 0, { 864000, HFPLL, 1, 0, 0x20 }, L2(6), 1100000 },
+ { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(6), 1100000 },
+ { 0, { 972000, HFPLL, 1, 0, 0x24 }, L2(6), 1125000 },
+ { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(6), 1125000 },
+ { 0, { 1080000, HFPLL, 1, 0, 0x28 }, L2(18), 1175000 },
+ { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(18), 1175000 },
+ { 0, { 1188000, HFPLL, 1, 0, 0x2C }, L2(18), 1200000 },
+ { 1, { 1242000, HFPLL, 1, 0, 0x2E }, L2(18), 1200000 },
+ { 0, { 1296000, HFPLL, 1, 0, 0x30 }, L2(18), 1225000 },
+ { 1, { 1350000, HFPLL, 1, 0, 0x32 }, L2(18), 1225000 },
+ { 0, { 1404000, HFPLL, 1, 0, 0x34 }, L2(18), 1237500 },
+ { 1, { 1458000, HFPLL, 1, 0, 0x36 }, L2(18), 1237500 },
+ { 1, { 1512000, HFPLL, 1, 0, 0x38 }, L2(18), 1250000 },
{ 0, { 0 } }
};
static struct acpu_level acpu_freq_tbl_nom[] __initdata = {
- { 0, { STBY_KHZ, QSB, 0, 0, 0x00 }, L2(0), 900000 },
- { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(1), 900000 },
- { 0, { 432000, HFPLL, 2, 0, 0x20 }, L2(7), 925000 },
- { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(7), 925000 },
- { 0, { 540000, HFPLL, 2, 0, 0x28 }, L2(7), 950000 },
- { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(7), 950000 },
- { 0, { 648000, HFPLL, 1, 0, 0x18 }, L2(7), 975000 },
- { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(7), 975000 },
- { 0, { 756000, HFPLL, 1, 0, 0x1C }, L2(7), 1025000 },
- { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(7), 1025000 },
- { 0, { 864000, HFPLL, 1, 0, 0x20 }, L2(7), 1050000 },
- { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(7), 1050000 },
- { 0, { 972000, HFPLL, 1, 0, 0x24 }, L2(7), 1075000 },
- { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(7), 1075000 },
- { 0, { 1080000, HFPLL, 1, 0, 0x28 }, L2(19), 1125000 },
- { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(19), 1125000 },
- { 0, { 1188000, HFPLL, 1, 0, 0x2C }, L2(19), 1150000 },
- { 1, { 1242000, HFPLL, 1, 0, 0x2E }, L2(19), 1150000 },
- { 0, { 1296000, HFPLL, 1, 0, 0x30 }, L2(19), 1175000 },
- { 1, { 1350000, HFPLL, 1, 0, 0x32 }, L2(19), 1175000 },
- { 0, { 1404000, HFPLL, 1, 0, 0x34 }, L2(19), 1187500 },
- { 1, { 1458000, HFPLL, 1, 0, 0x36 }, L2(19), 1187500 },
- { 1, { 1512000, HFPLL, 1, 0, 0x38 }, L2(19), 1200000 },
+ { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(0), 900000 },
+ { 0, { 432000, HFPLL, 2, 0, 0x20 }, L2(6), 925000 },
+ { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(6), 925000 },
+ { 0, { 540000, HFPLL, 2, 0, 0x28 }, L2(6), 950000 },
+ { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(6), 950000 },
+ { 0, { 648000, HFPLL, 1, 0, 0x18 }, L2(6), 975000 },
+ { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(6), 975000 },
+ { 0, { 756000, HFPLL, 1, 0, 0x1C }, L2(6), 1025000 },
+ { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(6), 1025000 },
+ { 0, { 864000, HFPLL, 1, 0, 0x20 }, L2(6), 1050000 },
+ { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(6), 1050000 },
+ { 0, { 972000, HFPLL, 1, 0, 0x24 }, L2(6), 1075000 },
+ { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(6), 1075000 },
+ { 0, { 1080000, HFPLL, 1, 0, 0x28 }, L2(18), 1125000 },
+ { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(18), 1125000 },
+ { 0, { 1188000, HFPLL, 1, 0, 0x2C }, L2(18), 1150000 },
+ { 1, { 1242000, HFPLL, 1, 0, 0x2E }, L2(18), 1150000 },
+ { 0, { 1296000, HFPLL, 1, 0, 0x30 }, L2(18), 1175000 },
+ { 1, { 1350000, HFPLL, 1, 0, 0x32 }, L2(18), 1175000 },
+ { 0, { 1404000, HFPLL, 1, 0, 0x34 }, L2(18), 1187500 },
+ { 1, { 1458000, HFPLL, 1, 0, 0x36 }, L2(18), 1187500 },
+ { 1, { 1512000, HFPLL, 1, 0, 0x38 }, L2(18), 1200000 },
{ 0, { 0 } }
};
static struct acpu_level acpu_freq_tbl_fast[] __initdata = {
- { 0, { STBY_KHZ, QSB, 0, 0, 0x00 }, L2(0), 850000 },
- { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(1), 850000 },
- { 0, { 432000, HFPLL, 2, 0, 0x20 }, L2(7), 875000 },
- { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(7), 875000 },
- { 0, { 540000, HFPLL, 2, 0, 0x28 }, L2(7), 900000 },
- { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(7), 900000 },
- { 0, { 648000, HFPLL, 1, 0, 0x18 }, L2(7), 925000 },
- { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(7), 925000 },
- { 0, { 756000, HFPLL, 1, 0, 0x1C }, L2(7), 975000 },
- { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(7), 975000 },
- { 0, { 864000, HFPLL, 1, 0, 0x20 }, L2(7), 1000000 },
- { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(7), 1000000 },
- { 0, { 972000, HFPLL, 1, 0, 0x24 }, L2(7), 1025000 },
- { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(7), 1025000 },
- { 0, { 1080000, HFPLL, 1, 0, 0x28 }, L2(19), 1075000 },
- { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(19), 1075000 },
- { 0, { 1188000, HFPLL, 1, 0, 0x2C }, L2(19), 1100000 },
- { 1, { 1242000, HFPLL, 1, 0, 0x2E }, L2(19), 1100000 },
- { 0, { 1296000, HFPLL, 1, 0, 0x30 }, L2(19), 1125000 },
- { 1, { 1350000, HFPLL, 1, 0, 0x32 }, L2(19), 1125000 },
- { 0, { 1404000, HFPLL, 1, 0, 0x34 }, L2(19), 1137500 },
- { 1, { 1458000, HFPLL, 1, 0, 0x36 }, L2(19), 1137500 },
- { 1, { 1512000, HFPLL, 1, 0, 0x38 }, L2(19), 1150000 },
+ { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(0), 850000 },
+ { 0, { 432000, HFPLL, 2, 0, 0x20 }, L2(6), 875000 },
+ { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(6), 875000 },
+ { 0, { 540000, HFPLL, 2, 0, 0x28 }, L2(6), 900000 },
+ { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(6), 900000 },
+ { 0, { 648000, HFPLL, 1, 0, 0x18 }, L2(6), 925000 },
+ { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(6), 925000 },
+ { 0, { 756000, HFPLL, 1, 0, 0x1C }, L2(6), 975000 },
+ { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(6), 975000 },
+ { 0, { 864000, HFPLL, 1, 0, 0x20 }, L2(6), 1000000 },
+ { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(6), 1000000 },
+ { 0, { 972000, HFPLL, 1, 0, 0x24 }, L2(6), 1025000 },
+ { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(6), 1025000 },
+ { 0, { 1080000, HFPLL, 1, 0, 0x28 }, L2(18), 1075000 },
+ { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(18), 1075000 },
+ { 0, { 1188000, HFPLL, 1, 0, 0x2C }, L2(18), 1100000 },
+ { 1, { 1242000, HFPLL, 1, 0, 0x2E }, L2(18), 1100000 },
+ { 0, { 1296000, HFPLL, 1, 0, 0x30 }, L2(18), 1125000 },
+ { 1, { 1350000, HFPLL, 1, 0, 0x32 }, L2(18), 1125000 },
+ { 0, { 1404000, HFPLL, 1, 0, 0x34 }, L2(18), 1137500 },
+ { 1, { 1458000, HFPLL, 1, 0, 0x36 }, L2(18), 1137500 },
+ { 1, { 1512000, HFPLL, 1, 0, 0x38 }, L2(18), 1150000 },
{ 0, { 0 } }
};
static struct pvs_table pvs_tables[NUM_PVS] __initdata = {
- [PVS_SLOW] = { acpu_freq_tbl_slow, sizeof(acpu_freq_tbl_slow) },
- [PVS_NOMINAL] = { acpu_freq_tbl_nom, sizeof(acpu_freq_tbl_nom) },
- [PVS_FAST] = { acpu_freq_tbl_fast, sizeof(acpu_freq_tbl_fast) },
+[PVS_SLOW] = { acpu_freq_tbl_slow, sizeof(acpu_freq_tbl_slow), 0 },
+[PVS_NOMINAL] = { acpu_freq_tbl_nom, sizeof(acpu_freq_tbl_nom), 25000 },
+[PVS_FAST] = { acpu_freq_tbl_fast, sizeof(acpu_freq_tbl_fast), 25000 },
};
static struct acpuclk_krait_params acpuclk_8960_params __initdata = {
@@ -206,6 +204,7 @@
.l2_freq_tbl_size = sizeof(l2_freq_tbl),
.bus_scale = &bus_scale_data,
.qfprom_phys_base = 0x00700000,
+ .stby_khz = 384000,
};
static int __init acpuclk_8960_probe(struct platform_device *pdev)
diff --git a/arch/arm/mach-msm/acpuclock-8974.c b/arch/arm/mach-msm/acpuclock-8974.c
index 9ed038b..8c89014 100644
--- a/arch/arm/mach-msm/acpuclock-8974.c
+++ b/arch/arm/mach-msm/acpuclock-8974.c
@@ -33,20 +33,26 @@
.l_offset = 0x04,
.m_offset = 0x08,
.n_offset = 0x0C,
+ .has_user_reg = true,
+ .user_offset = 0x10,
.config_offset = 0x14,
- /* TODO: Verify magic number for 8974 when available. */
- .config_val = 0x7845C665,
+ /* TODO: Verify magic numbers when final values are available. */
+ .user_val = 0x8,
+ .config_val = 0x04D0405D,
+ .low_vco_l_max = 65,
.low_vdd_l_max = 52,
+ .nom_vdd_l_max = 104,
.vdd[HFPLL_VDD_NONE] = LVL_NONE,
.vdd[HFPLL_VDD_LOW] = LVL_LOW,
.vdd[HFPLL_VDD_NOM] = LVL_NOM,
+ .vdd[HFPLL_VDD_HIGH] = LVL_HIGH,
};
static struct scalable scalable[] __initdata = {
[CPU0] = {
.hfpll_phys_base = 0xF908A000,
.l2cpmr_iaddr = 0x4501,
- .vreg[VREG_CORE] = { "krait0", 1050000, 3200000 },
+ .vreg[VREG_CORE] = { "krait0", 1050000 },
.vreg[VREG_MEM] = { "krait0_mem", 1050000 },
.vreg[VREG_DIG] = { "krait0_dig", LVL_HIGH },
.vreg[VREG_HFPLL_A] = { "krait0_hfpll_a", 2150000 },
@@ -55,7 +61,7 @@
[CPU1] = {
.hfpll_phys_base = 0xF909A000,
.l2cpmr_iaddr = 0x5501,
- .vreg[VREG_CORE] = { "krait1", 1050000, 3200000 },
+ .vreg[VREG_CORE] = { "krait1", 1050000 },
.vreg[VREG_MEM] = { "krait1_mem", 1050000 },
.vreg[VREG_DIG] = { "krait1_dig", LVL_HIGH },
.vreg[VREG_HFPLL_A] = { "krait1_hfpll_a", 2150000 },
@@ -64,7 +70,7 @@
[CPU2] = {
.hfpll_phys_base = 0xF90AA000,
.l2cpmr_iaddr = 0x6501,
- .vreg[VREG_CORE] = { "krait2", 1050000, 3200000 },
+ .vreg[VREG_CORE] = { "krait2", 1050000 },
.vreg[VREG_MEM] = { "krait2_mem", 1050000 },
.vreg[VREG_DIG] = { "krait2_dig", LVL_HIGH },
.vreg[VREG_HFPLL_A] = { "krait2_hfpll_a", 2150000 },
@@ -73,7 +79,7 @@
[CPU3] = {
.hfpll_phys_base = 0xF90BA000,
.l2cpmr_iaddr = 0x7501,
- .vreg[VREG_CORE] = { "krait3", 1050000, 3200000 },
+ .vreg[VREG_CORE] = { "krait3", 1050000 },
.vreg[VREG_MEM] = { "krait3_mem", 1050000 },
.vreg[VREG_DIG] = { "krait3_dig", LVL_HIGH },
.vreg[VREG_HFPLL_A] = { "krait3_hfpll_a", 2150000 },
@@ -103,33 +109,31 @@
};
static struct l2_level l2_freq_tbl[] __initdata = {
- [0] = { {STBY_KHZ, QSB, 0, 0, 0 }, LVL_LOW, 1050000, 0 },
- [1] = { { 300000, PLL_0, 0, 2, 0 }, LVL_LOW, 1050000, 2 },
- [2] = { { 384000, HFPLL, 2, 0, 40 }, LVL_NOM, 1050000, 2 },
- [3] = { { 460800, HFPLL, 2, 0, 48 }, LVL_NOM, 1050000, 2 },
- [4] = { { 537600, HFPLL, 1, 0, 28 }, LVL_NOM, 1050000, 2 },
- [5] = { { 576000, HFPLL, 1, 0, 30 }, LVL_NOM, 1050000, 3 },
- [6] = { { 652800, HFPLL, 1, 0, 34 }, LVL_NOM, 1050000, 3 },
- [7] = { { 729600, HFPLL, 1, 0, 38 }, LVL_NOM, 1050000, 3 },
- [8] = { { 806400, HFPLL, 1, 0, 42 }, LVL_NOM, 1050000, 3 },
- [9] = { { 883200, HFPLL, 1, 0, 46 }, LVL_NOM, 1050000, 4 },
- [10] = { { 960000, HFPLL, 1, 0, 50 }, LVL_NOM, 1050000, 4 },
- [11] = { { 1036800, HFPLL, 1, 0, 54 }, LVL_NOM, 1050000, 4 },
+ [0] = { { 300000, PLL_0, 0, 2, 0 }, LVL_LOW, 1050000, 2 },
+ [1] = { { 384000, HFPLL, 2, 0, 40 }, LVL_NOM, 1050000, 2 },
+ [2] = { { 460800, HFPLL, 2, 0, 48 }, LVL_NOM, 1050000, 2 },
+ [3] = { { 537600, HFPLL, 1, 0, 28 }, LVL_NOM, 1050000, 2 },
+ [4] = { { 576000, HFPLL, 1, 0, 30 }, LVL_NOM, 1050000, 3 },
+ [5] = { { 652800, HFPLL, 1, 0, 34 }, LVL_NOM, 1050000, 3 },
+ [6] = { { 729600, HFPLL, 1, 0, 38 }, LVL_NOM, 1050000, 3 },
+ [7] = { { 806400, HFPLL, 1, 0, 42 }, LVL_NOM, 1050000, 3 },
+ [8] = { { 883200, HFPLL, 1, 0, 46 }, LVL_NOM, 1050000, 4 },
+ [9] = { { 960000, HFPLL, 1, 0, 50 }, LVL_NOM, 1050000, 4 },
+ [10] = { { 1036800, HFPLL, 1, 0, 54 }, LVL_NOM, 1050000, 4 },
};
static struct acpu_level acpu_freq_tbl[] __initdata = {
- { 0, {STBY_KHZ, QSB, 0, 0, 0 }, L2(0), 1050000 },
- { 1, { 300000, PLL_0, 0, 2, 0 }, L2(1), 1050000 },
- { 1, { 384000, HFPLL, 2, 0, 40 }, L2(2), 1050000 },
- { 1, { 460800, HFPLL, 2, 0, 48 }, L2(3), 1050000 },
- { 1, { 537600, HFPLL, 1, 0, 28 }, L2(4), 1050000 },
- { 1, { 576000, HFPLL, 1, 0, 30 }, L2(5), 1050000 },
- { 1, { 652800, HFPLL, 1, 0, 34 }, L2(6), 1050000 },
- { 1, { 729600, HFPLL, 1, 0, 38 }, L2(7), 1050000 },
- { 1, { 806400, HFPLL, 1, 0, 42 }, L2(8), 1050000 },
- { 1, { 883200, HFPLL, 1, 0, 46 }, L2(9), 1050000 },
- { 1, { 960000, HFPLL, 1, 0, 50 }, L2(10), 1050000 },
- { 1, { 1036800, HFPLL, 1, 0, 54 }, L2(11), 1050000 },
+ { 1, { 300000, PLL_0, 0, 2, 0 }, L2(0), 1050000, 3200000 },
+ { 1, { 384000, HFPLL, 2, 0, 40 }, L2(1), 1050000, 3200000 },
+ { 1, { 460800, HFPLL, 2, 0, 48 }, L2(2), 1050000, 3200000 },
+ { 1, { 537600, HFPLL, 1, 0, 28 }, L2(3), 1050000, 3200000 },
+ { 1, { 576000, HFPLL, 1, 0, 30 }, L2(4), 1050000, 3200000 },
+ { 1, { 652800, HFPLL, 1, 0, 34 }, L2(5), 1050000, 3200000 },
+ { 1, { 729600, HFPLL, 1, 0, 38 }, L2(6), 1050000, 3200000 },
+ { 1, { 806400, HFPLL, 1, 0, 42 }, L2(7), 1050000, 3200000 },
+ { 1, { 883200, HFPLL, 1, 0, 46 }, L2(8), 1050000, 3200000 },
+ { 1, { 960000, HFPLL, 1, 0, 50 }, L2(9), 1050000, 3200000 },
+ { 1, { 1036800, HFPLL, 1, 0, 54 }, L2(10), 1050000, 3200000 },
{ 0, { 0 } }
};
@@ -148,6 +152,7 @@
.l2_freq_tbl_size = sizeof(l2_freq_tbl),
.bus_scale = &bus_scale_data,
.qfprom_phys_base = 0xFC4A8000,
+ .stby_khz = 300000,
};
static int __init acpuclk_8974_probe(struct platform_device *pdev)
diff --git a/arch/arm/mach-msm/acpuclock-krait.c b/arch/arm/mach-msm/acpuclock-krait.c
index fd43f57..33396e5 100644
--- a/arch/arm/mach-msm/acpuclock-krait.c
+++ b/arch/arm/mach-msm/acpuclock-krait.c
@@ -11,9 +11,8 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "%s: " fmt, __func__
-
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/delay.h>
@@ -42,7 +41,6 @@
#define PRI_SRC_SEL_SEC_SRC 0
#define PRI_SRC_SEL_HFPLL 1
#define PRI_SRC_SEL_HFPLL_DIV2 2
-#define SEC_SRC_SEL_QSB 0
#define SEC_SRC_SEL_L2PLL 1
#define SEC_SRC_SEL_AUX 2
@@ -54,12 +52,12 @@
static struct drv_data {
struct acpu_level *acpu_freq_tbl;
- const struct acpu_level *max_acpu_lvl;
const struct l2_level *l2_freq_tbl;
struct scalable *scalable;
struct hfpll_data *hfpll_data;
u32 bus_perf_client;
struct msm_bus_scale_pdata *bus_scale;
+ int boost_uv;
struct device *dev;
} drv;
@@ -171,8 +169,19 @@
/* Program the HFPLL rate. Assumes HFPLL is already disabled. */
static void hfpll_set_rate(struct scalable *sc, const struct core_speed *tgt_s)
{
- writel_relaxed(tgt_s->pll_l_val,
- sc->hfpll_base + drv.hfpll_data->l_offset);
+ void __iomem *base = sc->hfpll_base;
+ u32 regval;
+
+ writel_relaxed(tgt_s->pll_l_val, base + drv.hfpll_data->l_offset);
+
+ if (drv.hfpll_data->has_user_reg) {
+ regval = readl_relaxed(base + drv.hfpll_data->user_offset);
+ if (tgt_s->pll_l_val <= drv.hfpll_data->low_vco_l_max)
+ regval &= ~drv.hfpll_data->user_vco_mask;
+ else
+ regval |= drv.hfpll_data->user_vco_mask;
+ writel_relaxed(regval, base + drv.hfpll_data->user_offset);
+ }
}
/* Return the L2 speed that should be applied. */
@@ -235,40 +244,59 @@
sc->cur_speed = tgt_s;
}
+struct vdd_data {
+ int vdd_mem;
+ int vdd_dig;
+ int vdd_core;
+ int ua_core;
+};
+
/* Apply any per-cpu voltage increases. */
-static int increase_vdd(int cpu, int vdd_core, int vdd_mem, int vdd_dig,
+static int increase_vdd(int cpu, struct vdd_data *data,
enum setrate_reason reason)
{
struct scalable *sc = &drv.scalable[cpu];
- int rc = 0;
+ int rc;
/*
* Increase vdd_mem active-set before vdd_dig.
* vdd_mem should be >= vdd_dig.
*/
- if (vdd_mem > sc->vreg[VREG_MEM].cur_vdd) {
+ if (data->vdd_mem > sc->vreg[VREG_MEM].cur_vdd) {
rc = rpm_regulator_set_voltage(sc->vreg[VREG_MEM].rpm_reg,
- vdd_mem, sc->vreg[VREG_MEM].max_vdd);
+ data->vdd_mem, sc->vreg[VREG_MEM].max_vdd);
if (rc) {
dev_err(drv.dev,
"vdd_mem (cpu%d) increase failed (%d)\n",
cpu, rc);
return rc;
}
- sc->vreg[VREG_MEM].cur_vdd = vdd_mem;
+ sc->vreg[VREG_MEM].cur_vdd = data->vdd_mem;
}
/* Increase vdd_dig active-set vote. */
- if (vdd_dig > sc->vreg[VREG_DIG].cur_vdd) {
+ if (data->vdd_dig > sc->vreg[VREG_DIG].cur_vdd) {
rc = rpm_regulator_set_voltage(sc->vreg[VREG_DIG].rpm_reg,
- vdd_dig, sc->vreg[VREG_DIG].max_vdd);
+ data->vdd_dig, sc->vreg[VREG_DIG].max_vdd);
if (rc) {
dev_err(drv.dev,
"vdd_dig (cpu%d) increase failed (%d)\n",
cpu, rc);
return rc;
}
- sc->vreg[VREG_DIG].cur_vdd = vdd_dig;
+ sc->vreg[VREG_DIG].cur_vdd = data->vdd_dig;
+ }
+
+ /* Increase current request. */
+ if (data->ua_core > sc->vreg[VREG_CORE].cur_ua) {
+ rc = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
+ data->ua_core);
+ if (rc < 0) {
+ dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n",
+ sc->vreg[VREG_CORE].name, rc);
+ return rc;
+ }
+ sc->vreg[VREG_CORE].cur_ua = data->ua_core;
}
/*
@@ -277,25 +305,25 @@
* because we don't know what CPU we are running on at this point, but
* the CPU regulator API requires we call it from the affected CPU.
*/
- if (vdd_core > sc->vreg[VREG_CORE].cur_vdd
+ if (data->vdd_core > sc->vreg[VREG_CORE].cur_vdd
&& reason != SETRATE_HOTPLUG) {
- rc = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core,
- sc->vreg[VREG_CORE].max_vdd);
+ rc = regulator_set_voltage(sc->vreg[VREG_CORE].reg,
+ data->vdd_core, sc->vreg[VREG_CORE].max_vdd);
if (rc) {
dev_err(drv.dev,
"vdd_core (cpu%d) increase failed (%d)\n",
cpu, rc);
return rc;
}
- sc->vreg[VREG_CORE].cur_vdd = vdd_core;
+ sc->vreg[VREG_CORE].cur_vdd = data->vdd_core;
}
- return rc;
+ return 0;
}
/* Apply any per-cpu voltage decreases. */
-static void decrease_vdd(int cpu, int vdd_core, int vdd_mem, int vdd_dig,
- enum setrate_reason reason)
+static void decrease_vdd(int cpu, struct vdd_data *data,
+ enum setrate_reason reason)
{
struct scalable *sc = &drv.scalable[cpu];
int ret;
@@ -305,46 +333,58 @@
* that's being affected. Don't do this in the hotplug remove path,
* where the rail is off and we're executing on the other CPU.
*/
- if (vdd_core < sc->vreg[VREG_CORE].cur_vdd
+ if (data->vdd_core < sc->vreg[VREG_CORE].cur_vdd
&& reason != SETRATE_HOTPLUG) {
- ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core,
- sc->vreg[VREG_CORE].max_vdd);
+ ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg,
+ data->vdd_core, sc->vreg[VREG_CORE].max_vdd);
if (ret) {
dev_err(drv.dev,
"vdd_core (cpu%d) decrease failed (%d)\n",
cpu, ret);
return;
}
- sc->vreg[VREG_CORE].cur_vdd = vdd_core;
+ sc->vreg[VREG_CORE].cur_vdd = data->vdd_core;
+ }
+
+ /* Decrease current request. */
+ if (data->ua_core < sc->vreg[VREG_CORE].cur_ua) {
+ ret = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
+ data->ua_core);
+ if (ret < 0) {
+ dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n",
+ sc->vreg[VREG_CORE].name, ret);
+ return;
+ }
+ sc->vreg[VREG_CORE].cur_ua = data->ua_core;
}
/* Decrease vdd_dig active-set vote. */
- if (vdd_dig < sc->vreg[VREG_DIG].cur_vdd) {
+ if (data->vdd_dig < sc->vreg[VREG_DIG].cur_vdd) {
ret = rpm_regulator_set_voltage(sc->vreg[VREG_DIG].rpm_reg,
- vdd_dig, sc->vreg[VREG_DIG].max_vdd);
+ data->vdd_dig, sc->vreg[VREG_DIG].max_vdd);
if (ret) {
dev_err(drv.dev,
"vdd_dig (cpu%d) decrease failed (%d)\n",
cpu, ret);
return;
}
- sc->vreg[VREG_DIG].cur_vdd = vdd_dig;
+ sc->vreg[VREG_DIG].cur_vdd = data->vdd_dig;
}
/*
* Decrease vdd_mem active-set after vdd_dig.
* vdd_mem should be >= vdd_dig.
*/
- if (vdd_mem < sc->vreg[VREG_MEM].cur_vdd) {
+ if (data->vdd_mem < sc->vreg[VREG_MEM].cur_vdd) {
ret = rpm_regulator_set_voltage(sc->vreg[VREG_MEM].rpm_reg,
- vdd_mem, sc->vreg[VREG_MEM].max_vdd);
+ data->vdd_mem, sc->vreg[VREG_MEM].max_vdd);
if (ret) {
dev_err(drv.dev,
"vdd_mem (cpu%d) decrease failed (%d)\n",
cpu, ret);
return;
}
- sc->vreg[VREG_MEM].cur_vdd = vdd_mem;
+ sc->vreg[VREG_MEM].cur_vdd = data->vdd_mem;
}
}
@@ -353,25 +393,39 @@
return drv.l2_freq_tbl[tgt->l2_level].vdd_mem;
}
-static int calculate_vdd_dig(const struct acpu_level *tgt)
+static int get_src_dig(const struct core_speed *s)
{
- int pll_vdd_dig;
const int *hfpll_vdd = drv.hfpll_data->vdd;
const u32 low_vdd_l_max = drv.hfpll_data->low_vdd_l_max;
+ const u32 nom_vdd_l_max = drv.hfpll_data->nom_vdd_l_max;
- if (drv.l2_freq_tbl[tgt->l2_level].speed.src != HFPLL)
- pll_vdd_dig = hfpll_vdd[HFPLL_VDD_NONE];
- else if (drv.l2_freq_tbl[tgt->l2_level].speed.pll_l_val > low_vdd_l_max)
- pll_vdd_dig = hfpll_vdd[HFPLL_VDD_NOM];
+ if (s->src != HFPLL)
+ return hfpll_vdd[HFPLL_VDD_NONE];
+ else if (s->pll_l_val > nom_vdd_l_max)
+ return hfpll_vdd[HFPLL_VDD_HIGH];
+ else if (s->pll_l_val > low_vdd_l_max)
+ return hfpll_vdd[HFPLL_VDD_NOM];
else
- pll_vdd_dig = hfpll_vdd[HFPLL_VDD_LOW];
-
- return max(drv.l2_freq_tbl[tgt->l2_level].vdd_dig, pll_vdd_dig);
+ return hfpll_vdd[HFPLL_VDD_LOW];
}
+static int calculate_vdd_dig(const struct acpu_level *tgt)
+{
+ int l2_pll_vdd_dig, cpu_pll_vdd_dig;
+
+ l2_pll_vdd_dig = get_src_dig(&drv.l2_freq_tbl[tgt->l2_level].speed);
+ cpu_pll_vdd_dig = get_src_dig(&tgt->speed);
+
+ return max(drv.l2_freq_tbl[tgt->l2_level].vdd_dig,
+ max(l2_pll_vdd_dig, cpu_pll_vdd_dig));
+}
+
+static bool enable_boost = true;
+module_param_named(boost, enable_boost, bool, S_IRUGO | S_IWUSR);
+
static int calculate_vdd_core(const struct acpu_level *tgt)
{
- return tgt->vdd_core;
+ return tgt->vdd_core + (enable_boost ? drv.boost_uv : 0);
}
/* Set the CPU's clock rate and adjust the L2 rate, voltage and BW requests. */
@@ -381,7 +435,7 @@
const struct core_speed *strt_acpu_s, *tgt_acpu_s;
const struct acpu_level *tgt;
int tgt_l2_l;
- int vdd_mem, vdd_dig, vdd_core;
+ struct vdd_data vdd_data;
unsigned long flags;
int rc = 0;
@@ -410,19 +464,20 @@
}
/* Calculate voltage requirements for the current CPU. */
- vdd_mem = calculate_vdd_mem(tgt);
- vdd_dig = calculate_vdd_dig(tgt);
- vdd_core = calculate_vdd_core(tgt);
+ vdd_data.vdd_mem = calculate_vdd_mem(tgt);
+ vdd_data.vdd_dig = calculate_vdd_dig(tgt);
+ vdd_data.vdd_core = calculate_vdd_core(tgt);
+ vdd_data.ua_core = tgt->ua_core;
/* Increase VDD levels if needed. */
if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG) {
- rc = increase_vdd(cpu, vdd_core, vdd_mem, vdd_dig, reason);
+ rc = increase_vdd(cpu, &vdd_data, reason);
if (rc)
goto out;
}
- pr_debug("Switching from ACPU%d rate %lu KHz -> %lu KHz\n",
- cpu, strt_acpu_s->khz, tgt_acpu_s->khz);
+ dev_dbg(drv.dev, "Switching from ACPU%d rate %lu KHz -> %lu KHz\n",
+ cpu, strt_acpu_s->khz, tgt_acpu_s->khz);
/* Set the new CPU speed. */
set_speed(&drv.scalable[cpu], tgt_acpu_s);
@@ -447,9 +502,9 @@
set_bus_bw(drv.l2_freq_tbl[tgt_l2_l].bw_level);
/* Drop VDD levels if we can. */
- decrease_vdd(cpu, vdd_core, vdd_mem, vdd_dig, reason);
+ decrease_vdd(cpu, &vdd_data, reason);
- pr_debug("ACPU%d speed change complete\n", cpu);
+ dev_dbg(drv.dev, "ACPU%d speed change complete\n", cpu);
out:
if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG)
@@ -457,11 +512,16 @@
return rc;
}
+static struct acpuclk_data acpuclk_krait_data = {
+ .set_rate = acpuclk_krait_set_rate,
+ .get_rate = acpuclk_krait_get_rate,
+};
+
/* Initialize a HFPLL at a given rate and enable it. */
static void __init hfpll_init(struct scalable *sc,
const struct core_speed *tgt_s)
{
- pr_debug("Initializing HFPLL%d\n", sc - drv.scalable);
+ dev_dbg(drv.dev, "Initializing HFPLL%d\n", sc - drv.scalable);
/* Disable the PLL for re-programming. */
hfpll_disable(sc, true);
@@ -471,6 +531,9 @@
sc->hfpll_base + drv.hfpll_data->config_offset);
writel_relaxed(0, sc->hfpll_base + drv.hfpll_data->m_offset);
writel_relaxed(1, sc->hfpll_base + drv.hfpll_data->n_offset);
+ if (drv.hfpll_data->has_user_reg)
+ writel_relaxed(drv.hfpll_data->user_val,
+ sc->hfpll_base + drv.hfpll_data->user_offset);
/* Program droop controller, if supported */
if (drv.hfpll_data->has_droop_ctl)
@@ -533,19 +596,21 @@
}
/* Voltage regulator initialization. */
-static int __cpuinit regulator_init(struct scalable *sc)
+static int __cpuinit regulator_init(struct scalable *sc,
+ const struct acpu_level *acpu_level)
{
int ret, vdd_mem, vdd_dig, vdd_core;
- vdd_mem = calculate_vdd_mem(drv.max_acpu_lvl);
- vdd_dig = calculate_vdd_dig(drv.max_acpu_lvl);
-
+ vdd_mem = calculate_vdd_mem(acpu_level);
ret = rpm_regulator_init(sc, VREG_MEM, vdd_mem, true);
if (ret)
goto err_mem;
+
+ vdd_dig = calculate_vdd_dig(acpu_level);
ret = rpm_regulator_init(sc, VREG_DIG, vdd_dig, true);
if (ret)
goto err_dig;
+
ret = rpm_regulator_init(sc, VREG_HFPLL_A,
sc->vreg[VREG_HFPLL_A].max_vdd, false);
if (ret)
@@ -564,7 +629,15 @@
sc->vreg[VREG_CORE].name, ret);
goto err_core_get;
}
- vdd_core = calculate_vdd_core(drv.max_acpu_lvl);
+ ret = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
+ acpu_level->ua_core);
+ if (ret < 0) {
+ dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n",
+ sc->vreg[VREG_CORE].name, ret);
+ goto err_core_conf;
+ }
+ sc->vreg[VREG_CORE].cur_ua = acpu_level->ua_core;
+ vdd_core = calculate_vdd_core(acpu_level);
ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core,
sc->vreg[VREG_CORE].max_vdd);
if (ret) {
@@ -573,13 +646,6 @@
goto err_core_conf;
}
sc->vreg[VREG_CORE].cur_vdd = vdd_core;
- ret = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
- sc->vreg[VREG_CORE].peak_ua);
- if (ret < 0) {
- dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n",
- sc->vreg[VREG_CORE].name, ret);
- goto err_core_conf;
- }
ret = regulator_enable(sc->vreg[VREG_CORE].reg);
if (ret) {
dev_err(drv.dev, "regulator_enable(%s) failed (%d)\n",
@@ -647,9 +713,63 @@
return 0;
}
+static void __cpuinit fill_cur_core_speed(struct core_speed *s,
+ struct scalable *sc)
+{
+ s->pri_src_sel = get_l2_indirect_reg(sc->l2cpmr_iaddr) & 0x3;
+ s->sec_src_sel = (get_l2_indirect_reg(sc->l2cpmr_iaddr) >> 2) & 0x3;
+ s->pll_l_val = readl_relaxed(sc->hfpll_base + drv.hfpll_data->l_offset);
+}
+
+static bool __cpuinit speed_equal(const struct core_speed *s1,
+ const struct core_speed *s2)
+{
+ return (s1->pri_src_sel == s2->pri_src_sel &&
+ s1->sec_src_sel == s2->sec_src_sel &&
+ s1->pll_l_val == s2->pll_l_val);
+}
+
+static const struct acpu_level __cpuinit *find_cur_acpu_level(int cpu)
+{
+ struct scalable *sc = &drv.scalable[cpu];
+ const struct acpu_level *l;
+ struct core_speed cur_speed;
+
+ fill_cur_core_speed(&cur_speed, sc);
+ for (l = drv.acpu_freq_tbl; l->speed.khz != 0; l++)
+ if (speed_equal(&l->speed, &cur_speed))
+ return l;
+ return NULL;
+}
+
+static const struct l2_level __init *find_cur_l2_level(void)
+{
+ struct scalable *sc = &drv.scalable[L2];
+ const struct l2_level *l;
+ struct core_speed cur_speed;
+
+ fill_cur_core_speed(&cur_speed, sc);
+ for (l = drv.l2_freq_tbl; l->speed.khz != 0; l++)
+ if (speed_equal(&l->speed, &cur_speed))
+ return l;
+ return NULL;
+}
+
+static const struct acpu_level __cpuinit *find_min_acpu_level(void)
+{
+ struct acpu_level *l;
+
+ for (l = drv.acpu_freq_tbl; l->speed.khz != 0; l++)
+ if (l->use_for_scaling)
+ return l;
+
+ return NULL;
+}
+
static int __cpuinit per_cpu_init(int cpu)
{
struct scalable *sc = &drv.scalable[cpu];
+ const struct acpu_level *acpu_level;
int ret;
sc->hfpll_base = ioremap(sc->hfpll_phys_base, SZ_32);
@@ -658,14 +778,29 @@
goto err_ioremap;
}
- ret = regulator_init(sc);
+ acpu_level = find_cur_acpu_level(cpu);
+ if (!acpu_level) {
+ acpu_level = find_min_acpu_level();
+ if (!acpu_level) {
+ ret = -ENODEV;
+ goto err_table;
+ }
+ dev_dbg(drv.dev, "CPU%d is running at an unknown rate. Defaulting to %lu KHz.\n",
+ cpu, acpu_level->speed.khz);
+ } else {
+ dev_dbg(drv.dev, "CPU%d is running at %lu KHz\n", cpu,
+ acpu_level->speed.khz);
+ }
+
+ ret = regulator_init(sc, acpu_level);
if (ret)
goto err_regulators;
- ret = init_clock_sources(sc, &drv.max_acpu_lvl->speed);
+ ret = init_clock_sources(sc, &acpu_level->speed);
if (ret)
goto err_clocks;
- sc->l2_vote = drv.max_acpu_lvl->l2_level;
+
+ sc->l2_vote = acpu_level->l2_level;
sc->initialized = true;
return 0;
@@ -673,13 +808,14 @@
err_clocks:
regulator_cleanup(sc);
err_regulators:
+err_table:
iounmap(sc->hfpll_base);
err_ioremap:
return ret;
}
/* Register with bus driver. */
-static void __init bus_init(void)
+static void __init bus_init(const struct l2_level *l2_level)
{
int ret;
@@ -690,7 +826,7 @@
}
ret = msm_bus_scale_client_update_request(drv.bus_perf_client,
- drv.l2_freq_tbl[drv.max_acpu_lvl->l2_level].bw_level);
+ l2_level->bw_level);
if (ret)
dev_err(drv.dev, "initial bandwidth req failed (%d)\n", ret);
}
@@ -731,20 +867,20 @@
static void __init cpufreq_table_init(void) {}
#endif
-#define HOT_UNPLUG_KHZ STBY_KHZ
static int __cpuinit acpuclk_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
static int prev_khz[NR_CPUS];
int rc, cpu = (int)hcpu;
struct scalable *sc = &drv.scalable[cpu];
+ unsigned long hot_unplug_khz = acpuclk_krait_data.power_collapse_khz;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DEAD:
prev_khz[cpu] = acpuclk_krait_get_rate(cpu);
/* Fall through. */
case CPU_UP_CANCELED:
- acpuclk_krait_set_rate(cpu, HOT_UNPLUG_KHZ, SETRATE_HOTPLUG);
+ acpuclk_krait_set_rate(cpu, hot_unplug_khz, SETRATE_HOTPLUG);
regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg, 0);
break;
case CPU_UP_PREPARE:
@@ -757,7 +893,7 @@
if (WARN_ON(!prev_khz[cpu]))
return NOTIFY_BAD;
rc = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
- sc->vreg[VREG_CORE].peak_ua);
+ sc->vreg[VREG_CORE].cur_ua);
if (rc < 0)
return NOTIFY_BAD;
acpuclk_krait_set_rate(cpu, prev_khz[cpu], SETRATE_HOTPLUG);
@@ -840,27 +976,6 @@
return tbl_idx;
}
-static const struct acpu_level __init *find_max_acpu_lvl(struct acpu_level *tbl)
-{
- struct acpu_level *l, *max_lvl = NULL;
-
- for (l = tbl; l->speed.khz != 0; l++)
- if (l->use_for_scaling)
- max_lvl = l;
-
- BUG_ON(!max_lvl);
- dev_info(drv.dev, "Max CPU freq: %lu KHz\n", max_lvl->speed.khz);
-
- return max_lvl;
-}
-
-static struct acpuclk_data acpuclk_krait_data = {
- .set_rate = acpuclk_krait_set_rate,
- .get_rate = acpuclk_krait_get_rate,
- .power_collapse_khz = STBY_KHZ,
- .wait_for_irq_khz = STBY_KHZ,
-};
-
static void __init drv_data_init(struct device *dev,
const struct acpuclk_krait_params *params)
{
@@ -892,20 +1007,21 @@
params->pvs_tables[tbl_idx].size,
GFP_KERNEL);
BUG_ON(!drv.acpu_freq_tbl);
+ drv.boost_uv = params->pvs_tables[tbl_idx].boost_uv;
- drv.max_acpu_lvl = find_max_acpu_lvl(drv.acpu_freq_tbl);
+ acpuclk_krait_data.power_collapse_khz = params->stby_khz;
+ acpuclk_krait_data.wait_for_irq_khz = params->stby_khz;
}
static void __init hw_init(void)
{
struct scalable *l2 = &drv.scalable[L2];
+ const struct l2_level *l2_level;
int cpu, rc;
if (krait_needs_vmin())
krait_apply_vmin(drv.acpu_freq_tbl);
- bus_init();
-
l2->hfpll_base = ioremap(l2->hfpll_phys_base, SZ_32);
BUG_ON(!l2->hfpll_base);
@@ -915,14 +1031,26 @@
rc = rpm_regulator_init(l2, VREG_HFPLL_B,
l2->vreg[VREG_HFPLL_B].max_vdd, false);
BUG_ON(rc);
- rc = init_clock_sources(l2,
- &drv.l2_freq_tbl[drv.max_acpu_lvl->l2_level].speed);
+
+ l2_level = find_cur_l2_level();
+ if (!l2_level) {
+ l2_level = drv.l2_freq_tbl;
+ dev_dbg(drv.dev, "L2 is running at an unknown rate. Defaulting to %lu KHz.\n",
+ l2_level->speed.khz);
+ } else {
+ dev_dbg(drv.dev, "L2 is running at %lu KHz\n",
+ l2_level->speed.khz);
+ }
+
+ rc = init_clock_sources(l2, &l2_level->speed);
BUG_ON(rc);
for_each_online_cpu(cpu) {
rc = per_cpu_init(cpu);
BUG_ON(rc);
}
+
+ bus_init(l2_level);
}
int __init acpuclk_krait_init(struct device *dev,
diff --git a/arch/arm/mach-msm/acpuclock-krait.h b/arch/arm/mach-msm/acpuclock-krait.h
index f92aaf3..5a95e76 100644
--- a/arch/arm/mach-msm/acpuclock-krait.h
+++ b/arch/arm/mach-msm/acpuclock-krait.h
@@ -14,7 +14,6 @@
#ifndef __ARCH_ARM_MACH_MSM_ACPUCLOCK_KRAIT_H
#define __ARCH_ARM_MACH_MSM_ACPUCLOCK_KRAIT_H
-#define STBY_KHZ 1
#define L2(x) (x)
#define BW_MBPS(_bw) \
{ \
@@ -39,7 +38,6 @@
enum src_id {
PLL_0 = 0,
HFPLL,
- QSB,
PLL_8,
};
@@ -74,6 +72,7 @@
HFPLL_VDD_NONE,
HFPLL_VDD_LOW,
HFPLL_VDD_NOM,
+ HFPLL_VDD_HIGH,
NUM_HFPLL_VDD
};
@@ -96,15 +95,15 @@
* @reg: Regulator handle.
* @rpm_reg: RPM Regulator handle.
* @cur_vdd: Last-set voltage in uV.
- * @peak_ua: Maximum current draw expected in uA.
+ * @cur_ua: Last-set current in uA.
*/
struct vreg {
const char *name;
const int max_vdd;
- const int peak_ua;
struct regulator *reg;
struct rpm_regulator *rpm_reg;
int cur_vdd;
+ int cur_ua;
};
/**
@@ -116,11 +115,11 @@
* @pll_l_val: HFPLL "L" value to be applied when an HFPLL source is selected.
*/
struct core_speed {
- const unsigned long khz;
- const int src;
- const u32 pri_src_sel;
- const u32 sec_src_sel;
- const u32 pll_l_val;
+ unsigned long khz;
+ int src;
+ u32 pri_src_sel;
+ u32 sec_src_sel;
+ u32 pll_l_val;
};
/**
@@ -143,12 +142,14 @@
* @speed: CPU clock configuration.
* @l2_level: L2 configuration to use.
* @vdd_core: CPU core voltage in uV.
+ * @ua_core: CPU core current consumption in uA.
*/
struct acpu_level {
const int use_for_scaling;
const struct core_speed speed;
const unsigned int l2_level;
int vdd_core;
+ int ua_core;
};
/**
@@ -159,10 +160,16 @@
* @n_offset: "N" value register offset from base address.
* @config_offset: Configuration register offset from base address.
* @config_val: Value to initialize the @config_offset register to.
+ * @has_user_reg: Indicates the presence of an addition config register.
+ * @user_offset: User register offset from base address, if applicable.
+ * @user_val: Value to initialize the @user_offset register to.
+ * @user_vco_mask: Bit in the @user_offset to enable high-frequency VCO mode.
* @has_droop_ctl: Indicates the presence of a voltage droop controller.
* @droop_offset: Droop controller register offset from base address.
* @droop_val: Value to initialize the @config_offset register to.
* @low_vdd_l_max: Maximum "L" value supported at HFPLL_VDD_LOW.
+ * @nom_vdd_l_max: Maximum "L" value supported at HFPLL_VDD_NOM.
+ * @low_vco_l_max: Maximum "L" value supported in low-frequency VCO mode.
* @vdd: voltage requirements for each VDD level for the L2 PLL.
*/
struct hfpll_data {
@@ -172,10 +179,16 @@
const u32 n_offset;
const u32 config_offset;
const u32 config_val;
+ const bool has_user_reg;
+ const u32 user_offset;
+ const u32 user_val;
+ const u32 user_vco_mask;
const bool has_droop_ctl;
const u32 droop_offset;
const u32 droop_val;
const u32 low_vdd_l_max;
+ const u32 nom_vdd_l_max;
+ const u32 low_vco_l_max;
const int vdd[NUM_HFPLL_VDD];
};
@@ -207,10 +220,12 @@
* struct pvs_table - CPU performance level table and size.
* @table: CPU performance level table
* @size: sizeof(@table)
+ * @boost_uv: Voltage boost amount
*/
struct pvs_table {
struct acpu_level *table;
size_t size;
+ int boost_uv;
};
/**
@@ -223,6 +238,7 @@
* @l2_freq_tbl_size: Size of @l2_freq_tbl.
* @qfprom_phys_base: Physical base address of QFPROM.
* @bus_scale: MSM bus driver parameters.
+ * @stby_khz: KHz value corresponding to an always-on clock source.
*/
struct acpuclk_krait_params {
struct scalable *scalable;
@@ -233,6 +249,7 @@
size_t l2_freq_tbl_size;
phys_addr_t qfprom_phys_base;
struct msm_bus_scale_pdata *bus_scale;
+ unsigned long stby_khz;
};
/**
diff --git a/arch/arm/mach-msm/acpuclock.h b/arch/arm/mach-msm/acpuclock.h
index e73a2af..841f717 100644
--- a/arch/arm/mach-msm/acpuclock.h
+++ b/arch/arm/mach-msm/acpuclock.h
@@ -90,4 +90,4 @@
*/
void acpuclk_register(struct acpuclk_data *data);
-#endif
+#endif /*__ARCH_ARM_MACH_MSM_ACPUCLOCK_H*/
diff --git a/arch/arm/mach-msm/bam_dmux.c b/arch/arm/mach-msm/bam_dmux.c
index 5aea0ed..b35e949 100644
--- a/arch/arm/mach-msm/bam_dmux.c
+++ b/arch/arm/mach-msm/bam_dmux.c
@@ -283,6 +283,9 @@
static int bam_dmux_uplink_vote;
static int bam_dmux_power_state;
+static void bam_dmux_log(const char *fmt, ...)
+ __printf(1, 2);
+
#define DMUX_LOG_KERR(fmt...) \
do { \
@@ -569,7 +572,8 @@
rx_hdr->ch_id);
handle_bam_mux_cmd_open(rx_hdr);
if (!(rx_hdr->reserved & ENABLE_DISCONNECT_ACK)) {
- bam_dmux_log("%s: deactivating disconnect ack\n");
+ bam_dmux_log("%s: deactivating disconnect ack\n",
+ __func__);
disconnect_ack = 0;
}
dev_kfree_skb_any(rx_skb);
diff --git a/arch/arm/mach-msm/board-8064-gpu.c b/arch/arm/mach-msm/board-8064-gpu.c
index eb36a81e..122505e 100644
--- a/arch/arm/mach-msm/board-8064-gpu.c
+++ b/arch/arm/mach-msm/board-8064-gpu.c
@@ -225,6 +225,7 @@
.set_grp_async = NULL,
.idle_timeout = HZ/10,
.nap_allowed = true,
+ .strtstp_sleepwake = true,
.clk_map = KGSL_CLK_CORE | KGSL_CLK_IFACE | KGSL_CLK_MEM_IFACE,
#ifdef CONFIG_MSM_BUS_SCALING
.bus_scale_table = &grp3d_bus_scale_pdata,
diff --git a/arch/arm/mach-msm/board-8064.c b/arch/arm/mach-msm/board-8064.c
index 3e07833..ce1c829 100644
--- a/arch/arm/mach-msm/board-8064.c
+++ b/arch/arm/mach-msm/board-8064.c
@@ -1751,10 +1751,10 @@
static struct msm_thermal_data msm_thermal_pdata = {
.sensor_id = 7,
- .poll_ms = 1000,
- .limit_temp = 60,
- .temp_hysteresis = 10,
- .limit_freq = 918000,
+ .poll_ms = 250,
+ .limit_temp_degC = 60,
+ .temp_hysteresis_degC = 10,
+ .freq_step = 2,
};
#define MSM_SHARED_RAM_PHYS 0x80000000
@@ -2253,7 +2253,6 @@
&msm_gss,
&apq8064_rtb_device,
&apq8064_cpu_idle_device,
- &apq8064_msm_gov_device,
&apq8064_device_cache_erp,
&msm8960_device_ebi1_ch0_erp,
&msm8960_device_ebi1_ch1_erp,
diff --git a/arch/arm/mach-msm/board-8930-display.c b/arch/arm/mach-msm/board-8930-display.c
index 292c031..d975997 100644
--- a/arch/arm/mach-msm/board-8930-display.c
+++ b/arch/arm/mach-msm/board-8930-display.c
@@ -482,16 +482,16 @@
static struct mipi_dsi_phy_ctrl dsi_novatek_cmd_mode_phy_db = {
/* DSI_BIT_CLK at 500MHz, 2 lane, RGB888 */
- {0x0F, 0x0a, 0x04, 0x00, 0x20}, /* regulator */
+ {0x09, 0x08, 0x05, 0x00, 0x20}, /* regulator */
/* timing */
{0xab, 0x8a, 0x18, 0x00, 0x92, 0x97, 0x1b, 0x8c,
0x0c, 0x03, 0x04, 0xa0},
{0x5f, 0x00, 0x00, 0x10}, /* phy ctrl */
{0xff, 0x00, 0x06, 0x00}, /* strength */
/* pll control */
- {0x40, 0xf9, 0x30, 0xda, 0x00, 0x40, 0x03, 0x62,
+ {0x0, 0xe, 0x30, 0xda, 0x00, 0x10, 0x0f, 0x61,
0x40, 0x07, 0x03,
- 0x00, 0x1a, 0x00, 0x00, 0x02, 0x00, 0x20, 0x00, 0x01},
+ 0x00, 0x1a, 0x00, 0x00, 0x02, 0x00, 0x20, 0x00, 0x02},
};
static struct mipi_dsi_panel_platform_data novatek_pdata = {
diff --git a/arch/arm/mach-msm/board-8930-gpu.c b/arch/arm/mach-msm/board-8930-gpu.c
index bd343c1..99a5a34 100644
--- a/arch/arm/mach-msm/board-8930-gpu.c
+++ b/arch/arm/mach-msm/board-8930-gpu.c
@@ -140,6 +140,7 @@
.set_grp_async = NULL,
.idle_timeout = HZ/12,
.nap_allowed = true,
+ .strtstp_sleepwake = true,
.clk_map = KGSL_CLK_CORE | KGSL_CLK_IFACE | KGSL_CLK_MEM_IFACE,
#ifdef CONFIG_MSM_BUS_SCALING
.bus_scale_table = &grp3d_bus_scale_pdata,
diff --git a/arch/arm/mach-msm/board-8930-regulator.c b/arch/arm/mach-msm/board-8930-regulator.c
index af91089..d3a4960 100644
--- a/arch/arm/mach-msm/board-8930-regulator.c
+++ b/arch/arm/mach-msm/board-8930-regulator.c
@@ -174,11 +174,13 @@
REGULATOR_SUPPLY("8038_s5", NULL),
REGULATOR_SUPPLY("krait0", "acpuclk-8627"),
REGULATOR_SUPPLY("krait0", "acpuclk-8930"),
+ REGULATOR_SUPPLY("krait0", "acpuclk-8930aa"),
};
VREG_CONSUMERS(S6) = {
REGULATOR_SUPPLY("8038_s6", NULL),
REGULATOR_SUPPLY("krait1", "acpuclk-8627"),
REGULATOR_SUPPLY("krait1", "acpuclk-8930"),
+ REGULATOR_SUPPLY("krait1", "acpuclk-8930aa"),
};
VREG_CONSUMERS(LVS1) = {
REGULATOR_SUPPLY("8038_lvs1", NULL),
@@ -538,6 +540,14 @@
RPM_REG_MAP(L24, 0, 2, "krait1_mem", "acpuclk-8627"),
RPM_REG_MAP(VDD_DIG_CORNER, 0, 1, "krait0_dig", "acpuclk-8627"),
RPM_REG_MAP(VDD_DIG_CORNER, 0, 2, "krait1_dig", "acpuclk-8627"),
+
+ RPM_REG_MAP(L23, 0, 1, "krait0_hfpll", "acpuclk-8930aa"),
+ RPM_REG_MAP(L23, 0, 2, "krait1_hfpll", "acpuclk-8930aa"),
+ RPM_REG_MAP(L23, 0, 6, "l2_hfpll", "acpuclk-8930aa"),
+ RPM_REG_MAP(L24, 0, 1, "krait0_mem", "acpuclk-8930aa"),
+ RPM_REG_MAP(L24, 0, 2, "krait1_mem", "acpuclk-8930aa"),
+ RPM_REG_MAP(VDD_DIG_CORNER, 0, 1, "krait0_dig", "acpuclk-8930aa"),
+ RPM_REG_MAP(VDD_DIG_CORNER, 0, 2, "krait1_dig", "acpuclk-8930aa"),
};
struct rpm_regulator_platform_data msm8930_rpm_regulator_pdata __devinitdata = {
diff --git a/arch/arm/mach-msm/board-8930.c b/arch/arm/mach-msm/board-8930.c
index d7a077c..ed0cc82 100644
--- a/arch/arm/mach-msm/board-8930.c
+++ b/arch/arm/mach-msm/board-8930.c
@@ -2095,10 +2095,10 @@
static struct msm_thermal_data msm_thermal_pdata = {
.sensor_id = 9,
- .poll_ms = 1000,
- .limit_temp = 60,
- .temp_hysteresis = 10,
- .limit_freq = 918000,
+ .poll_ms = 250,
+ .limit_temp_degC = 60,
+ .temp_hysteresis_degC = 10,
+ .freq_step = 2,
};
#ifdef CONFIG_MSM_FAKE_BATTERY
@@ -2226,6 +2226,7 @@
#endif
&msm8930_rpm_device,
&msm8930_rpm_log_device,
+ &msm8930_rpm_rbcpr_device,
&msm8930_rpm_stat_device,
#ifdef CONFIG_ION_MSM
&msm8930_ion_dev,
@@ -2571,8 +2572,10 @@
platform_add_devices(msm8930_footswitch, msm8930_num_footswitch);
if (cpu_is_msm8627())
platform_device_register(&msm8627_device_acpuclk);
- else if (cpu_is_msm8930() || cpu_is_msm8930aa())
+ else if (cpu_is_msm8930())
platform_device_register(&msm8930_device_acpuclk);
+ else if (cpu_is_msm8930aa())
+ platform_device_register(&msm8930aa_device_acpuclk);
platform_add_devices(common_devices, ARRAY_SIZE(common_devices));
msm8930_add_vidc_device();
/*
diff --git a/arch/arm/mach-msm/board-8960-display.c b/arch/arm/mach-msm/board-8960-display.c
index 88827ab..27aeaf4 100644
--- a/arch/arm/mach-msm/board-8960-display.c
+++ b/arch/arm/mach-msm/board-8960-display.c
@@ -615,16 +615,6 @@
return mdp_pdata.cont_splash_enabled;
}
-static struct platform_device mipi_dsi_renesas_panel_device = {
- .name = "mipi_renesas",
- .id = 0,
-};
-
-static struct platform_device mipi_dsi_simulator_panel_device = {
- .name = "mipi_simulator",
- .id = 0,
-};
-
#define LPM_CHANNEL0 0
static int toshiba_gpio[] = {LPM_CHANNEL0};
@@ -1015,20 +1005,12 @@
platform_device_register(&wfd_device);
#endif
- if (machine_is_msm8960_sim())
- platform_device_register(&mipi_dsi_simulator_panel_device);
-
- if (machine_is_msm8960_rumi3())
- platform_device_register(&mipi_dsi_renesas_panel_device);
-
- if (!machine_is_msm8960_sim() && !machine_is_msm8960_rumi3()) {
- platform_device_register(&mipi_dsi_novatek_panel_device);
- platform_device_register(&mipi_dsi_orise_panel_device);
+ platform_device_register(&mipi_dsi_novatek_panel_device);
+ platform_device_register(&mipi_dsi_orise_panel_device);
#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL
- platform_device_register(&hdmi_msm_device);
+ platform_device_register(&hdmi_msm_device);
#endif
- }
if (machine_is_msm8960_liquid())
platform_device_register(&mipi_dsi2lvds_bridge_device);
diff --git a/arch/arm/mach-msm/board-8960-pmic.c b/arch/arm/mach-msm/board-8960-pmic.c
index 17b0b6f..5950026 100644
--- a/arch/arm/mach-msm/board-8960-pmic.c
+++ b/arch/arm/mach-msm/board-8960-pmic.c
@@ -386,25 +386,6 @@
KEY(0, 3, KEY_CAMERA_FOCUS),
};
-static struct matrix_keymap_data keymap_data_sim = {
- .keymap_size = ARRAY_SIZE(keymap_sim),
- .keymap = keymap_sim,
-};
-
-static struct pm8xxx_keypad_platform_data keypad_data_sim = {
- .input_name = "keypad_8960",
- .input_phys_device = "keypad_8960/input0",
- .num_rows = 12,
- .num_cols = 8,
- .rows_gpio_start = PM8921_GPIO_PM_TO_SYS(9),
- .cols_gpio_start = PM8921_GPIO_PM_TO_SYS(1),
- .debounce_ms = 15,
- .scan_delay_ms = 32,
- .row_hold_ns = 91500,
- .wakeup = 1,
- .keymap_data = &keymap_data_sim,
-};
-
static int pm8921_therm_mitigation[] = {
1100,
700,
@@ -613,10 +594,6 @@
&msm8960_ssbi_pm8921_pdata;
pm8921_platform_data.num_regulators = msm_pm8921_regulator_pdata_len;
- /* Simulator supports a QWERTY keypad */
- if (machine_is_msm8960_sim())
- pm8921_platform_data.keypad_pdata = &keypad_data_sim;
-
if (machine_is_msm8960_liquid()) {
pm8921_platform_data.keypad_pdata = &keypad_data_liquid;
pm8921_platform_data.leds_pdata = &pm8xxx_leds_pdata_liquid;
diff --git a/arch/arm/mach-msm/board-8960.c b/arch/arm/mach-msm/board-8960.c
index d56bdbd..63eef4a 100644
--- a/arch/arm/mach-msm/board-8960.c
+++ b/arch/arm/mach-msm/board-8960.c
@@ -1407,11 +1407,18 @@
msm_bus_rpm_set_mt_mask();
msm_bus_8960_apps_fabric_pdata.rpm_enabled = 1;
msm_bus_8960_sys_fabric_pdata.rpm_enabled = 1;
- msm_bus_8960_mm_fabric_pdata.rpm_enabled = 1;
msm_bus_apps_fabric.dev.platform_data =
&msm_bus_8960_apps_fabric_pdata;
msm_bus_sys_fabric.dev.platform_data = &msm_bus_8960_sys_fabric_pdata;
- msm_bus_mm_fabric.dev.platform_data = &msm_bus_8960_mm_fabric_pdata;
+ if (cpu_is_msm8960ab()) {
+ msm_bus_8960_sg_mm_fabric_pdata.rpm_enabled = 1;
+ msm_bus_mm_fabric.dev.platform_data =
+ &msm_bus_8960_sg_mm_fabric_pdata;
+ } else {
+ msm_bus_8960_mm_fabric_pdata.rpm_enabled = 1;
+ msm_bus_mm_fabric.dev.platform_data =
+ &msm_bus_8960_mm_fabric_pdata;
+ }
msm_bus_sys_fpb.dev.platform_data = &msm_bus_8960_sys_fpb_pdata;
msm_bus_cpss_fpb.dev.platform_data = &msm_bus_8960_cpss_fpb_pdata;
#endif
@@ -2375,10 +2382,10 @@
static struct msm_thermal_data msm_thermal_pdata = {
.sensor_id = 0,
- .poll_ms = 1000,
- .limit_temp = 60,
- .temp_hysteresis = 10,
- .limit_freq = 918000,
+ .poll_ms = 250,
+ .limit_temp_degC = 60,
+ .temp_hysteresis_degC = 10,
+ .freq_step = 2,
};
#ifdef CONFIG_MSM_FAKE_BATTERY
@@ -2576,67 +2583,6 @@
&msm_tsens_device,
};
-static struct platform_device *sim_devices[] __initdata = {
- &msm8960_device_uart_gsbi5,
- &msm8960_device_otg,
- &msm8960_device_gadget_peripheral,
- &msm_device_hsusb_host,
- &msm_device_hsic_host,
- &android_usb_device,
- &msm_device_vidc,
- &msm_bus_apps_fabric,
- &msm_bus_sys_fabric,
- &msm_bus_mm_fabric,
- &msm_bus_sys_fpb,
- &msm_bus_cpss_fpb,
- &msm_pcm,
- &msm_multi_ch_pcm,
- &msm_pcm_routing,
- &msm_cpudai0,
- &msm_cpudai1,
- &msm8960_cpudai_slimbus_2_rx,
- &msm8960_cpudai_slimbus_2_tx,
- &msm_cpudai_hdmi_rx,
- &msm_cpudai_bt_rx,
- &msm_cpudai_bt_tx,
- &msm_cpudai_fm_rx,
- &msm_cpudai_fm_tx,
- &msm_cpudai_auxpcm_rx,
- &msm_cpudai_auxpcm_tx,
- &msm_cpu_fe,
- &msm_stub_codec,
- &msm_voice,
- &msm_voip,
- &msm_lpa_pcm,
- &msm_compr_dsp,
- &msm_cpudai_incall_music_rx,
- &msm_cpudai_incall_record_rx,
- &msm_cpudai_incall_record_tx,
-
-#if defined(CONFIG_CRYPTO_DEV_QCRYPTO) || \
- defined(CONFIG_CRYPTO_DEV_QCRYPTO_MODULE)
- &qcrypto_device,
-#endif
-
-#if defined(CONFIG_CRYPTO_DEV_QCEDEV) || \
- defined(CONFIG_CRYPTO_DEV_QCEDEV_MODULE)
- &qcedev_device,
-#endif
-};
-
-static struct platform_device *rumi3_devices[] __initdata = {
- &msm8960_device_uart_gsbi5,
- &msm_kgsl_3d0,
- &msm_kgsl_2d0,
- &msm_kgsl_2d1,
-#ifdef CONFIG_MSM_GEMINI
- &msm8960_gemini_device,
-#endif
-#ifdef CONFIG_MSM_MERCURY
- &msm8960_mercury_device,
-#endif
-};
-
static struct platform_device *cdp_devices[] __initdata = {
&msm_8960_q6_lpass,
&msm_8960_q6_mss_fw,
@@ -2994,10 +2940,6 @@
/* Build the matching 'supported_machs' bitmask */
if (machine_is_msm8960_cdp())
mach_mask = I2C_SURF;
- else if (machine_is_msm8960_rumi3())
- mach_mask = I2C_RUMI;
- else if (machine_is_msm8960_sim())
- mach_mask = I2C_SIM;
else if (machine_is_msm8960_fluid())
mach_mask = I2C_FLUID;
else if (machine_is_msm8960_liquid())
@@ -3036,71 +2978,6 @@
#endif
}
-static void __init msm8960_sim_init(void)
-{
- struct msm_watchdog_pdata *wdog_pdata = (struct msm_watchdog_pdata *)
- &msm8960_device_watchdog.dev.platform_data;
-
- wdog_pdata->bark_time = 15000;
- msm_tsens_early_init(&msm_tsens_pdata);
- msm_thermal_init(&msm_thermal_pdata);
- BUG_ON(msm_rpm_init(&msm8960_rpm_data));
- BUG_ON(msm_rpmrs_levels_init(&msm_rpmrs_data));
- regulator_suppress_info_printing();
- platform_device_register(&msm8960_device_rpm_regulator);
- msm_clock_init(&msm8960_clock_init_data);
- msm8960_init_pmic();
-
- msm8960_device_otg.dev.platform_data = &msm_otg_pdata;
- msm8960_init_gpiomux();
- msm8960_i2c_init();
- msm_spm_init(msm_spm_data, ARRAY_SIZE(msm_spm_data));
- msm_spm_l2_init(msm_spm_l2_data);
- msm8960_init_buses();
- platform_add_devices(common_devices, ARRAY_SIZE(common_devices));
- msm8960_pm8921_gpio_mpp_init();
- platform_add_devices(sim_devices, ARRAY_SIZE(sim_devices));
-
- msm8960_device_qup_spi_gsbi1.dev.platform_data =
- &msm8960_qup_spi_gsbi1_pdata;
- spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info));
-
- msm8960_init_mmc();
- msm8960_init_fb();
- slim_register_board_info(msm_slim_devices,
- ARRAY_SIZE(msm_slim_devices));
- BUG_ON(msm_pm_boot_init(&msm_pm_boot_pdata));
-}
-
-static void __init msm8960_rumi3_init(void)
-{
- msm_tsens_early_init(&msm_tsens_pdata);
- msm_thermal_init(&msm_thermal_pdata);
- BUG_ON(msm_rpm_init(&msm8960_rpm_data));
- BUG_ON(msm_rpmrs_levels_init(&msm_rpmrs_data));
- regulator_suppress_info_printing();
- platform_device_register(&msm8960_device_rpm_regulator);
- msm8960_init_gpiomux();
- msm8960_init_pmic();
- msm8960_device_qup_spi_gsbi1.dev.platform_data =
- &msm8960_qup_spi_gsbi1_pdata;
- spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info));
- msm8960_i2c_init();
- msm_spm_init(msm_spm_data, ARRAY_SIZE(msm_spm_data));
- msm_spm_l2_init(msm_spm_l2_data);
- platform_add_devices(common_devices, ARRAY_SIZE(common_devices));
- msm8960_pm8921_gpio_mpp_init();
- platform_add_devices(rumi3_devices, ARRAY_SIZE(rumi3_devices));
- msm8960_init_mmc();
- register_i2c_devices();
-
-
- msm8960_init_fb();
- slim_register_board_info(msm_slim_devices,
- ARRAY_SIZE(msm_slim_devices));
- BUG_ON(msm_pm_boot_init(&msm_pm_boot_pdata));
-}
-
static void __init msm8960_cdp_init(void)
{
if (meminfo_init(SYS_MEMORY, SZ_256M) < 0)
@@ -3193,30 +3070,6 @@
}
}
-MACHINE_START(MSM8960_SIM, "QCT MSM8960 SIMULATOR")
- .map_io = msm8960_map_io,
- .reserve = msm8960_reserve,
- .init_irq = msm8960_init_irq,
- .handle_irq = gic_handle_irq,
- .timer = &msm_timer,
- .init_machine = msm8960_sim_init,
- .init_early = msm8960_allocate_memory_regions,
- .init_very_early = msm8960_early_memory,
- .restart = msm_restart,
-MACHINE_END
-
-MACHINE_START(MSM8960_RUMI3, "QCT MSM8960 RUMI3")
- .map_io = msm8960_map_io,
- .reserve = msm8960_reserve,
- .init_irq = msm8960_init_irq,
- .handle_irq = gic_handle_irq,
- .timer = &msm_timer,
- .init_machine = msm8960_rumi3_init,
- .init_early = msm8960_allocate_memory_regions,
- .init_very_early = msm8960_early_memory,
- .restart = msm_restart,
-MACHINE_END
-
MACHINE_START(MSM8960_CDP, "QCT MSM8960 CDP")
.map_io = msm8960_map_io,
.reserve = msm8960_reserve,
diff --git a/arch/arm/mach-msm/board-8974.c b/arch/arm/mach-msm/board-8974.c
index 30b44bd..74aa837 100644
--- a/arch/arm/mach-msm/board-8974.c
+++ b/arch/arm/mach-msm/board-8974.c
@@ -59,7 +59,7 @@
#endif
#define MSM_ION_MM_FW_SIZE 0xa00000 /* (10MB) */
#define MSM_ION_MM_SIZE 0x7800000 /* (120MB) */
-#define MSM_ION_QSECOM_SIZE 0x100000 /* (1MB) */
+#define MSM_ION_QSECOM_SIZE 0x600000 /* (6MB) */
#define MSM_ION_MFC_SIZE SZ_8K
#define MSM_ION_AUDIO_SIZE 0x2B4000
#define MSM_ION_HEAP_NUM 8
diff --git a/arch/arm/mach-msm/board-msm7x27a.c b/arch/arm/mach-msm/board-msm7x27a.c
index ba4e098..3bd7eeb 100644
--- a/arch/arm/mach-msm/board-msm7x27a.c
+++ b/arch/arm/mach-msm/board-msm7x27a.c
@@ -883,6 +883,7 @@
*/
static struct ion_platform_data ion_pdata = {
.nr = MSM_ION_HEAP_NUM,
+ .has_outer_cache = 1,
.heaps = {
{
.id = ION_SYSTEM_HEAP_ID,
@@ -896,7 +897,6 @@
.type = ION_HEAP_TYPE_CARVEOUT,
.name = ION_CAMERA_HEAP_NAME,
.memory_type = ION_EBI_TYPE,
- .has_outer_cache = 1,
.extra_data = (void *)&co_ion_pdata,
},
/* PMEM_AUDIO */
@@ -905,7 +905,6 @@
.type = ION_HEAP_TYPE_CARVEOUT,
.name = ION_AUDIO_HEAP_NAME,
.memory_type = ION_EBI_TYPE,
- .has_outer_cache = 1,
.extra_data = (void *)&co_ion_pdata,
},
/* PMEM_MDP = SF */
@@ -914,7 +913,6 @@
.type = ION_HEAP_TYPE_CARVEOUT,
.name = ION_SF_HEAP_NAME,
.memory_type = ION_EBI_TYPE,
- .has_outer_cache = 1,
.extra_data = (void *)&co_ion_pdata,
},
#endif
diff --git a/arch/arm/mach-msm/board-qrd7627a.c b/arch/arm/mach-msm/board-qrd7627a.c
index a694557..1921cc3 100644
--- a/arch/arm/mach-msm/board-qrd7627a.c
+++ b/arch/arm/mach-msm/board-qrd7627a.c
@@ -772,6 +772,7 @@
*/
static struct ion_platform_data ion_pdata = {
.nr = MSM_ION_HEAP_NUM,
+ .has_outer_cache = 1,
.heaps = {
{
.id = ION_SYSTEM_HEAP_ID,
@@ -786,7 +787,6 @@
.name = ION_CAMERA_HEAP_NAME,
.size = MSM_ION_CAMERA_SIZE,
.memory_type = ION_EBI_TYPE,
- .has_outer_cache = 1,
.extra_data = (void *)&co_ion_pdata,
},
/* PMEM_AUDIO */
@@ -796,7 +796,6 @@
.name = ION_AUDIO_HEAP_NAME,
.size = MSM_ION_AUDIO_SIZE,
.memory_type = ION_EBI_TYPE,
- .has_outer_cache = 1,
.extra_data = (void *)&co_ion_pdata,
},
/* PMEM_MDP = SF */
@@ -806,7 +805,6 @@
.name = ION_SF_HEAP_NAME,
.size = MSM_ION_SF_SIZE,
.memory_type = ION_EBI_TYPE,
- .has_outer_cache = 1,
.extra_data = (void *)&co_ion_pdata,
},
#endif
diff --git a/arch/arm/mach-msm/clock-8974.c b/arch/arm/mach-msm/clock-8974.c
index 7948143..73fd8ef 100644
--- a/arch/arm/mach-msm/clock-8974.c
+++ b/arch/arm/mach-msm/clock-8974.c
@@ -4765,8 +4765,8 @@
CLK_LOOKUP("core_clk", camss_phy1_csi1phytimer_clk.c, ""),
CLK_LOOKUP("core_clk", camss_phy2_csi2phytimer_clk.c, ""),
CLK_LOOKUP("iface_clk", camss_top_ahb_clk.c, ""),
- CLK_LOOKUP("iface_clk", camss_vfe_cpp_ahb_clk.c, ""),
- CLK_LOOKUP("core_clk", camss_vfe_cpp_clk.c, ""),
+ CLK_LOOKUP("iface_clk", camss_vfe_cpp_ahb_clk.c, "fda44000.qcom,iommu"),
+ CLK_LOOKUP("core_clk", camss_vfe_cpp_clk.c, "fda44000.qcom,iommu"),
CLK_LOOKUP("camss_vfe_vfe0_clk", camss_vfe_vfe0_clk.c, ""),
CLK_LOOKUP("camss_vfe_vfe1_clk", camss_vfe_vfe1_clk.c, ""),
CLK_LOOKUP("vfe0_clk_src", vfe0_clk_src.c, ""),
@@ -4782,7 +4782,9 @@
CLK_LOOKUP("iface_clk", oxilicx_ahb_clk.c, "fdb00000.qcom,kgsl-3d0"),
CLK_LOOKUP("core_clk", oxilicx_axi_clk.c, "fdb10000.qcom,iommu"),
CLK_LOOKUP("iface_clk", oxilicx_ahb_clk.c, "fdb10000.qcom,iommu"),
+ CLK_LOOKUP("alt_core_clk", oxili_gfx3d_clk.c, "fdb10000.qcom,iommu"),
CLK_LOOKUP("iface_clk", venus0_ahb_clk.c, "fdc84000.qcom,iommu"),
+ CLK_LOOKUP("alt_core_clk", venus0_vcodec0_clk.c, "fdc84000.qcom,iommu"),
CLK_LOOKUP("core_clk", venus0_axi_clk.c, "fdc84000.qcom,iommu"),
CLK_LOOKUP("bus_clk", venus0_axi_clk.c, ""),
CLK_LOOKUP("src_clk", vcodec0_clk_src.c, "fdce0000.qcom,venus"),
diff --git a/arch/arm/mach-msm/clock-local.c b/arch/arm/mach-msm/clock-local.c
index ca913dc..51e5703 100644
--- a/arch/arm/mach-msm/clock-local.c
+++ b/arch/arm/mach-msm/clock-local.c
@@ -355,7 +355,7 @@
{
u32 reg_val;
- reg_val = readl_relaxed(b->ctl_reg);
+ reg_val = b->ctl_reg ? readl_relaxed(b->ctl_reg) : 0;
if (b->en_mask) {
reg_val &= ~(b->en_mask);
writel_relaxed(reg_val, b->ctl_reg);
@@ -564,7 +564,7 @@
if (!branch_in_hwcg_mode(b)) {
b->hwcg_mask = 0;
c->flags &= ~CLKFLAG_HWCG;
- if (readl_relaxed(b->ctl_reg) & b->en_mask)
+ if (b->ctl_reg && readl_relaxed(b->ctl_reg) & b->en_mask)
return HANDOFF_ENABLED_CLK;
} else {
c->flags |= CLKFLAG_HWCG;
diff --git a/arch/arm/mach-msm/clock-pll.c b/arch/arm/mach-msm/clock-pll.c
index 2938135..d5831e2 100644
--- a/arch/arm/mach-msm/clock-pll.c
+++ b/arch/arm/mach-msm/clock-pll.c
@@ -319,6 +319,7 @@
{60, 1152000000},
{62, 1200000000},
{63, 1209600000},
+ {73, 1401600000},
{0, 0},
};
diff --git a/arch/arm/mach-msm/devices-8064.c b/arch/arm/mach-msm/devices-8064.c
index aa67690..b2e4208 100644
--- a/arch/arm/mach-msm/devices-8064.c
+++ b/arch/arm/mach-msm/devices-8064.c
@@ -2236,6 +2236,7 @@
RIVA_APPS_WLAN_SMSM_IRQ,
RIVA_APPS_WLAN_RX_DATA_AVAIL_IRQ,
RIVA_APPS_WLAN_DATA_XFER_DONE_IRQ,
+ PM8821_SEC_IRQ_N,
};
struct msm_mpm_device_data apq8064_mpm_dev_data __initdata = {
diff --git a/arch/arm/mach-msm/devices-8930.c b/arch/arm/mach-msm/devices-8930.c
index 1f954c8..ffa3c38 100644
--- a/arch/arm/mach-msm/devices-8930.c
+++ b/arch/arm/mach-msm/devices-8930.c
@@ -30,6 +30,7 @@
#include "devices.h"
#include "rpm_log.h"
#include "rpm_stats.h"
+#include "rpm_rbcpr_stats.h"
#include "footswitch.h"
#ifdef CONFIG_MSM_MPM
@@ -287,6 +288,31 @@
},
};
+static struct resource msm_rpm_rbcpr_resource = {
+ .start = 0x0010CB00,
+ .end = 0x0010CB00 + SZ_8K - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct msm_rpmrbcpr_platform_data msm_rpm_rbcpr_pdata = {
+ .rbcpr_data = {
+ .upside_steps = 1,
+ .downside_steps = 2,
+ .svs_voltage = 1050000,
+ .nominal_voltage = 1162500,
+ .turbo_voltage = 1287500,
+ },
+};
+
+struct platform_device msm8930_rpm_rbcpr_device = {
+ .name = "msm_rpm_rbcpr",
+ .id = -1,
+ .dev = {
+ .platform_data = &msm_rpm_rbcpr_pdata,
+ },
+ .resource = &msm_rpm_rbcpr_resource,
+};
+
static int msm8930_LPM_latency = 1000; /* >100 usec for WFI */
struct platform_device msm8930_cpu_idle_device = {
@@ -365,6 +391,11 @@
.id = -1,
};
+struct platform_device msm8930aa_device_acpuclk = {
+ .name = "acpuclk-8930aa",
+ .id = -1,
+};
+
static struct fs_driver_data gfx3d_fs_data = {
.clks = (struct fs_clk_data[]){
{ .name = "core_clk", .reset_rate = 27000000 },
diff --git a/arch/arm/mach-msm/devices-msm7x27a.c b/arch/arm/mach-msm/devices-msm7x27a.c
index 8912e96..96984fb 100644
--- a/arch/arm/mach-msm/devices-msm7x27a.c
+++ b/arch/arm/mach-msm/devices-msm7x27a.c
@@ -236,12 +236,22 @@
.max_speed_delta_khz = 604800,
};
+static struct acpuclk_pdata msm8625ab_acpuclk_pdata = {
+ .max_speed_delta_khz = 801600,
+};
+
struct platform_device msm8625_device_acpuclk = {
.name = "acpuclk-7627",
.id = -1,
.dev.platform_data = &msm8625_acpuclk_pdata,
};
+struct platform_device msm8625ab_device_acpuclk = {
+ .name = "acpuclk-7627",
+ .id = -1,
+ .dev.platform_data = &msm8625ab_acpuclk_pdata,
+};
+
struct platform_device msm_device_smd = {
.name = "msm_smd",
.id = -1,
@@ -1623,6 +1633,7 @@
enum {
MSM8625,
MSM8625A,
+ MSM8625AB,
};
static int __init msm8625_cpu_id(void)
@@ -1643,6 +1654,11 @@
case 0x781:
cpu = MSM8625A;
break;
+ case 0x775:
+ case 0x776:
+ case 0x782:
+ cpu = MSM8625AB;
+ break;
default:
pr_err("Invalid Raw ID\n");
return -ENODEV;
@@ -1665,10 +1681,11 @@
platform_device_register(&msm7x27aa_device_acpuclk);
else if (msm8625_cpu_id() == MSM8625A)
platform_device_register(&msm8625_device_acpuclk);
+ else if (msm8625_cpu_id() == MSM8625AB)
+ platform_device_register(&msm8625ab_device_acpuclk);
} else {
platform_device_register(&msm7x27a_device_acpuclk);
}
-
return 0;
}
diff --git a/arch/arm/mach-msm/devices-msm7x2xa.h b/arch/arm/mach-msm/devices-msm7x2xa.h
index 4184a86..8febe26 100644
--- a/arch/arm/mach-msm/devices-msm7x2xa.h
+++ b/arch/arm/mach-msm/devices-msm7x2xa.h
@@ -33,4 +33,5 @@
void __init msm8x25_spm_device_init(void);
void __init msm8x25_kgsl_3d0_init(void);
void __iomem *core1_reset_base(void);
+extern void setup_mm_for_reboot(void);
#endif
diff --git a/arch/arm/mach-msm/devices.h b/arch/arm/mach-msm/devices.h
index daf70a8..d48a801 100644
--- a/arch/arm/mach-msm/devices.h
+++ b/arch/arm/mach-msm/devices.h
@@ -328,6 +328,7 @@
extern struct platform_device msm8930_rpm_device;
extern struct platform_device msm8930_rpm_stat_device;
extern struct platform_device msm8930_rpm_log_device;
+extern struct platform_device msm8930_rpm_rbcpr_device;
extern struct platform_device msm8660_rpm_device;
extern struct platform_device msm8660_rpm_stat_device;
@@ -423,8 +424,10 @@
extern struct platform_device apq8064_device_acpuclk;
extern struct platform_device msm8625_device_acpuclk;
extern struct platform_device msm8627_device_acpuclk;
+extern struct platform_device msm8625ab_device_acpuclk;
extern struct platform_device msm8x50_device_acpuclk;
extern struct platform_device msm8x60_device_acpuclk;
extern struct platform_device msm8930_device_acpuclk;
+extern struct platform_device msm8930aa_device_acpuclk;
extern struct platform_device msm8960_device_acpuclk;
extern struct platform_device msm9615_device_acpuclk;
diff --git a/arch/arm/mach-msm/include/mach/iommu.h b/arch/arm/mach-msm/include/mach/iommu.h
index 28c53db..b14f145 100644
--- a/arch/arm/mach-msm/include/mach/iommu.h
+++ b/arch/arm/mach-msm/include/mach/iommu.h
@@ -72,6 +72,7 @@
* @irq: Interrupt number
* @clk: The bus clock for this IOMMU hardware instance
* @pclk: The clock for the IOMMU bus interconnect
+ * @aclk: Alternate clock for this IOMMU core, if any
* @name: Human-readable name of this IOMMU device
* @gdsc: Regulator needed to power this HW block (v2 only)
* @nsmr: Size of the SMT on this HW block (v2 only)
@@ -85,6 +86,7 @@
int ttbr_split;
struct clk *clk;
struct clk *pclk;
+ struct clk *aclk;
const char *name;
struct regulator *gdsc;
unsigned int nsmr;
diff --git a/arch/arm/mach-msm/include/mach/msm_bus_board.h b/arch/arm/mach-msm/include/mach/msm_bus_board.h
index d95e4a4..0a53b46 100644
--- a/arch/arm/mach-msm/include/mach/msm_bus_board.h
+++ b/arch/arm/mach-msm/include/mach/msm_bus_board.h
@@ -65,6 +65,7 @@
extern struct msm_bus_fabric_registration msm_bus_8960_apps_fabric_pdata;
extern struct msm_bus_fabric_registration msm_bus_8960_sys_fabric_pdata;
extern struct msm_bus_fabric_registration msm_bus_8960_mm_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8960_sg_mm_fabric_pdata;
extern struct msm_bus_fabric_registration msm_bus_8960_sys_fpb_pdata;
extern struct msm_bus_fabric_registration msm_bus_8960_cpss_fpb_pdata;
diff --git a/arch/arm/mach-msm/include/mach/socinfo.h b/arch/arm/mach-msm/include/mach/socinfo.h
index 2c3d395..f7ba507 100644
--- a/arch/arm/mach-msm/include/mach/socinfo.h
+++ b/arch/arm/mach-msm/include/mach/socinfo.h
@@ -330,4 +330,16 @@
#endif
}
+static inline int cpu_is_msm8974(void)
+{
+#ifdef CONFIG_ARCH_MSM8974
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8974;
+#else
+ return 0;
+#endif
+}
+
#endif
diff --git a/arch/arm/mach-msm/lpm_resources.c b/arch/arm/mach-msm/lpm_resources.c
index e5be352..ebcbd26 100644
--- a/arch/arm/mach-msm/lpm_resources.c
+++ b/arch/arm/mach-msm/lpm_resources.c
@@ -46,7 +46,7 @@
static bool msm_lpm_get_rpm_notif = true;
/*Macros*/
-#define VDD_DIG_ACTIVE (950000)
+#define VDD_DIG_ACTIVE (5)
#define VDD_MEM_ACTIVE (1050000)
#define MAX_RS_NAME (16)
#define MAX_RS_SIZE (4)
@@ -264,7 +264,7 @@
return ret;
}
- ret = msm_rpm_wait_for_ack(msg_id);
+ ret = msm_rpm_wait_for_ack_noirq(msg_id);
if (ret < 0) {
pr_err("%s: Couldn't get ACK from RPM for Msg %d Error %d",
__func__, msg_id, ret);
diff --git a/arch/arm/mach-msm/mpm-of.c b/arch/arm/mach-msm/mpm-of.c
index 1832301..f98c0f2 100644
--- a/arch/arm/mach-msm/mpm-of.c
+++ b/arch/arm/mach-msm/mpm-of.c
@@ -554,7 +554,15 @@
pr_info("%s(): request_irq failed errno: %d\n", __func__, ret);
goto failed_irq_get;
}
- msm_mpm_initialized &= MSM_MPM_DEVICE_PROBED;
+ ret = irq_set_irq_wake(dev->mpm_ipc_irq, 1);
+
+ if (ret) {
+ pr_err("%s: failed to set wakeup irq %u: %d\n",
+ __func__, dev->mpm_ipc_irq, ret);
+ goto failed_irq_get;
+
+ }
+ msm_mpm_initialized |= MSM_MPM_DEVICE_PROBED;
return 0;
@@ -701,9 +709,10 @@
}
}
- msm_mpm_initialized &= MSM_MPM_IRQ_MAPPING_DONE;
+ msm_mpm_initialized |= MSM_MPM_IRQ_MAPPING_DONE;
return;
+
failed_malloc:
for (i = 0; i < MSM_MPM_NR_MPM_IRQS; i++) {
mpm_of_map[i].chip->irq_mask = NULL;
diff --git a/arch/arm/mach-msm/msm-buspm-dev.c b/arch/arm/mach-msm/msm-buspm-dev.c
index 296418d..a818eed 100644
--- a/arch/arm/mach-msm/msm-buspm-dev.c
+++ b/arch/arm/mach-msm/msm-buspm-dev.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -40,6 +40,13 @@
return (dev) ? dev->vaddr : NULL;
}
+static inline unsigned int msm_buspm_dev_get_buflen(struct file *filp)
+{
+ struct msm_buspm_map_dev *dev = filp->private_data;
+
+ return dev ? dev->buflen : 0;
+}
+
static inline unsigned long msm_buspm_dev_get_paddr(struct file *filp)
{
struct msm_buspm_map_dev *dev = filp->private_data;
@@ -114,6 +121,7 @@
unsigned long paddr;
int retval = 0;
void *buf = msm_buspm_dev_get_vaddr(filp);
+ unsigned int buflen = msm_buspm_dev_get_buflen(filp);
unsigned char *dbgbuf = buf;
switch (cmd) {
@@ -156,7 +164,7 @@
break;
}
- if ((xfer.size <= sizeof(buf)) &&
+ if ((xfer.size <= buflen) &&
(copy_to_user((void __user *)xfer.data, buf,
xfer.size))) {
retval = -EFAULT;
@@ -177,7 +185,7 @@
break;
}
- if ((sizeof(buf) <= xfer.size) &&
+ if ((buflen <= xfer.size) &&
(copy_from_user(buf, (void __user *)xfer.data,
xfer.size))) {
retval = -EFAULT;
diff --git a/arch/arm/mach-msm/msm-krait-l2-accessors.c b/arch/arm/mach-msm/msm-krait-l2-accessors.c
index 41a2490..3da155a 100644
--- a/arch/arm/mach-msm/msm-krait-l2-accessors.c
+++ b/arch/arm/mach-msm/msm-krait-l2-accessors.c
@@ -82,10 +82,6 @@
u32 uninitialized_var(l2cpuvrf8_val), l2cpuvrf8_addr = 0;
u32 ret_val;
- /* CP15 registers are not emulated on RUMI3. */
- if (machine_is_msm8960_rumi3())
- return 0;
-
raw_spin_lock_irqsave(&l2_access_lock, flags);
if (l2cpuvrf8_needs_fix(reg_addr))
@@ -115,10 +111,6 @@
unsigned long flags;
u32 uninitialized_var(l2cpuvrf8_val), l2cpuvrf8_addr = 0;
- /* CP15 registers are not emulated on RUMI3. */
- if (machine_is_msm8960_rumi3())
- return;
-
raw_spin_lock_irqsave(&l2_access_lock, flags);
if (l2cpuvrf8_needs_fix(reg_addr))
@@ -144,9 +136,6 @@
{
u32 val;
unsigned long flags;
- /* CP15 registers are not emulated on RUMI3. */
- if (machine_is_msm8960_rumi3())
- return 0;
raw_spin_lock_irqsave(&l2_access_lock, flags);
asm volatile ("mcr p15, 3, %[l2cpselr], c15, c0, 6\n\t"
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_board_8960.c b/arch/arm/mach-msm/msm_bus/msm_bus_board_8960.c
index 7ede23d..9ba9f7b1 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_board_8960.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_board_8960.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -39,6 +39,12 @@
MSM_BUS_TIERED_SLAVE_KMPSS_L2,
};
+enum msm_bus_sg_tiered_slaves_type {
+ SG_TIERED_SLAVE_MM_IMEM = 1,
+ SG_MMSS_TIERED_SLAVE_FAB_APPS_0,
+ SG_MMSS_TIERED_SLAVE_FAB_APPS_1,
+};
+
enum msm_bus_8960_master_ports_type {
MSM_BUS_SYSTEM_MASTER_PORT_APPSS_FAB = 0,
MSM_BUS_MASTER_PORT_SPS,
@@ -106,6 +112,23 @@
MSM_BUS_SLAVE_PORT_RIVA,
};
+enum msm_bus_8960_sg_master_ports_type {
+ MSM_BUS_MASTER_PORT_GRAPHICS_3D_PORT0 =
+ MSM_BUS_MMSS_MASTER_PORT_UNUSED_2,
+ MSM_BUS_MASTER_PORT_VIDEO_CAP =
+ MSM_BUS_MASTER_PORT_GRAPHICS_2D_CORE0,
+ MSM_BUS_MASTER_PORT_VIDEO_DEC =
+ MSM_BUS_MASTER_PORT_GRAPHICS_2D_CORE1,
+ MSM_BUS_MASTER_PORT_VIDEO_ENC =
+ MSM_BUS_MASTER_PORT_HD_CODEC_PORT0,
+};
+
+enum msm_bus_8960_sg_slave_ports_type {
+ SG_SLAVE_PORT_MM_IMEM = 0,
+ SG_MMSS_SLAVE_PORT_APPS_FAB_0,
+ SG_MMSS_SLAVE_PORT_APPS_FAB_1,
+};
+
static int tier2[] = {MSM_BUS_BW_TIER2,};
static uint32_t master_iids[NMASTERS];
static uint32_t slave_iids[NSLAVES];
@@ -424,6 +447,10 @@
static int mport_mdp1[] = {MSM_BUS_MASTER_PORT_MDP_PORT1,};
static int mport_rotator[] = {MSM_BUS_MASTER_PORT_ROTATOR,};
static int mport_graphics_3d[] = {MSM_BUS_MASTER_PORT_GRAPHICS_3D,};
+static int pro_mport_graphics_3d[] = {
+ MSM_BUS_MASTER_PORT_GRAPHICS_3D_PORT0,
+ MSM_BUS_MASTER_PORT_GRAPHICS_3D,
+};
static int mport_jpeg_dec[] = {MSM_BUS_MASTER_PORT_JPEG_DEC,};
static int mport_graphics_2d_core0[] = {MSM_BUS_MASTER_PORT_GRAPHICS_2D_CORE0,};
static int mport_vfe[] = {MSM_BUS_MASTER_PORT_VFE,};
@@ -432,6 +459,9 @@
static int mport_graphics_2d_core1[] = {MSM_BUS_MASTER_PORT_GRAPHICS_2D_CORE1,};
static int mport_hd_codec_port0[] = {MSM_BUS_MASTER_PORT_HD_CODEC_PORT0,};
static int mport_hd_codec_port1[] = {MSM_BUS_MASTER_PORT_HD_CODEC_PORT1,};
+static int mport_video_cap[] = {MSM_BUS_MASTER_PORT_VIDEO_CAP};
+static int mport_video_enc[] = {MSM_BUS_MASTER_PORT_VIDEO_ENC};
+static int mport_video_dec[] = {MSM_BUS_MASTER_PORT_VIDEO_DEC};
static int appss_mport_fab_mmss[] = {
MSM_BUS_APPSS_MASTER_PORT_FAB_MMSS_0,
MSM_BUS_APPSS_MASTER_PORT_FAB_MMSS_1
@@ -439,15 +469,25 @@
static int mmss_sport_apps_fab[] = {
MSM_BUS_MMSS_SLAVE_PORT_APPS_FAB_0,
- MSM_BUS_MMSS_SLAVE_PORT_APPS_FAB_1
+ MSM_BUS_MMSS_SLAVE_PORT_APPS_FAB_1,
+};
+static int sg_sport_apps_fab[] = {
+ SG_MMSS_SLAVE_PORT_APPS_FAB_0,
+ SG_MMSS_SLAVE_PORT_APPS_FAB_1,
};
static int sport_mm_imem[] = {MSM_BUS_SLAVE_PORT_MM_IMEM,};
+static int sg_sport_mm_imem[] = {SG_SLAVE_PORT_MM_IMEM,};
static int mmss_tiered_slave_fab_apps[] = {
MSM_BUS_MMSS_TIERED_SLAVE_FAB_APPS_0,
MSM_BUS_MMSS_TIERED_SLAVE_FAB_APPS_1,
};
+static int sg_tiered_slave_fab_apps[] = {
+ SG_MMSS_TIERED_SLAVE_FAB_APPS_0,
+ SG_MMSS_TIERED_SLAVE_FAB_APPS_1,
+};
static int tiered_slave_mm_imem[] = {MSM_BUS_TIERED_SLAVE_MM_IMEM,};
+static int sg_tiered_slave_mm_imem[] = {SG_TIERED_SLAVE_MM_IMEM,};
static struct msm_bus_node_info mmss_fabric_info[] = {
@@ -557,6 +597,106 @@
},
};
+static struct msm_bus_node_info sg_mmss_fabric_info[] = {
+ {
+ .id = MSM_BUS_MASTER_MDP_PORT0,
+ .masterp = mport_mdp,
+ .num_mports = ARRAY_SIZE(mport_mdp),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ },
+ {
+ .id = MSM_BUS_MASTER_MDP_PORT1,
+ .masterp = mport_mdp1,
+ .num_mports = ARRAY_SIZE(mport_mdp1),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ },
+ {
+ .id = MSM_BUS_MASTER_ROTATOR,
+ .masterp = mport_rotator,
+ .num_mports = ARRAY_SIZE(mport_rotator),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ },
+ {
+ .id = MSM_BUS_MASTER_GRAPHICS_3D,
+ .masterp = pro_mport_graphics_3d,
+ .num_mports = ARRAY_SIZE(pro_mport_graphics_3d),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ },
+ {
+ .id = MSM_BUS_MASTER_JPEG_DEC,
+ .masterp = mport_jpeg_dec,
+ .num_mports = ARRAY_SIZE(mport_jpeg_dec),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ },
+ {
+ .id = MSM_BUS_MASTER_VIDEO_CAP,
+ .masterp = mport_video_cap,
+ .num_mports = ARRAY_SIZE(mport_video_cap),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ },
+ {
+ .id = MSM_BUS_MASTER_VFE,
+ .masterp = mport_vfe,
+ .num_mports = ARRAY_SIZE(mport_vfe),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ },
+ {
+ .id = MSM_BUS_MASTER_VPE,
+ .masterp = mport_vpe,
+ .num_mports = ARRAY_SIZE(mport_vpe),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ },
+ {
+ .id = MSM_BUS_MASTER_JPEG_ENC,
+ .masterp = mport_jpeg_enc,
+ .num_mports = ARRAY_SIZE(mport_jpeg_enc),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ },
+ /* This port has been added for V2. It is absent in V1 */
+ {
+ .id = MSM_BUS_MASTER_VIDEO_DEC,
+ .masterp = mport_video_dec,
+ .num_mports = ARRAY_SIZE(mport_video_dec),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ },
+ {
+ .id = MSM_BUS_MASTER_VIDEO_ENC,
+ .masterp = mport_video_enc,
+ .num_mports = ARRAY_SIZE(mport_video_enc),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ },
+ {
+ .id = MSM_BUS_FAB_APPSS,
+ .gateway = 1,
+ .slavep = sg_sport_apps_fab,
+ .num_sports = ARRAY_SIZE(sg_sport_apps_fab),
+ .masterp = appss_mport_fab_mmss,
+ .num_mports = ARRAY_SIZE(appss_mport_fab_mmss),
+ .tier = sg_tiered_slave_fab_apps,
+ .num_tiers = ARRAY_SIZE(sg_tiered_slave_fab_apps),
+ .buswidth = 16,
+ },
+ {
+ .id = MSM_BUS_SLAVE_MM_IMEM,
+ .slavep = sg_sport_mm_imem,
+ .num_sports = ARRAY_SIZE(sg_sport_mm_imem),
+ .tier = sg_tiered_slave_mm_imem,
+ .num_tiers = ARRAY_SIZE(sg_tiered_slave_mm_imem),
+ .buswidth = 8,
+ },
+};
+
static struct msm_bus_node_info sys_fpb_fabric_info[] = {
{
.id = MSM_BUS_FAB_SYSTEM,
@@ -919,6 +1059,22 @@
.board_algo = &msm_bus_board_algo,
};
+struct msm_bus_fabric_registration msm_bus_8960_sg_mm_fabric_pdata = {
+ .id = MSM_BUS_FAB_MMSS,
+ .name = "msm_mm_fab",
+ sg_mmss_fabric_info,
+ ARRAY_SIZE(sg_mmss_fabric_info),
+ .ahb = 0,
+ .fabclk[DUAL_CTX] = "bus_clk",
+ .fabclk[ACTIVE_CTX] = "bus_a_clk",
+ .haltid = MSM_RPM_ID_MMSS_FABRIC_CFG_HALT_0,
+ .offset = MSM_RPM_ID_MM_FABRIC_ARB_0,
+ .nmasters = 13,
+ .nslaves = 3,
+ .ntieredslaves = 3,
+ .board_algo = &msm_bus_board_algo,
+};
+
struct msm_bus_fabric_registration msm_bus_8960_sys_fpb_pdata = {
.id = MSM_BUS_FAB_SYSTEM_FPB,
.name = "msm_sys_fpb",
diff --git a/arch/arm/mach-msm/msm_rtb.c b/arch/arm/mach-msm/msm_rtb.c
index 9dbf9c1..a60c213 100644
--- a/arch/arm/mach-msm/msm_rtb.c
+++ b/arch/arm/mach-msm/msm_rtb.c
@@ -16,11 +16,13 @@
#include <linux/kernel.h>
#include <linux/memory_alloc.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/atomic.h>
+#include <linux/of.h>
#include <asm/io.h>
#include <asm-generic/sizes.h>
#include <mach/memory.h>
@@ -31,6 +33,8 @@
#define SENTINEL_BYTE_2 0xAA
#define SENTINEL_BYTE_3 0xFF
+#define RTB_COMPAT_STR "qcom,msm-rtb"
+
/* Write
* 1) 3 bytes sentinel
* 2) 1 bytes of log type
@@ -227,8 +231,22 @@
#if defined(CONFIG_MSM_RTB_SEPARATE_CPUS)
unsigned int cpu;
#endif
+ int ret;
- msm_rtb.size = d->size;
+ if (!pdev->dev.of_node) {
+ msm_rtb.size = d->size;
+ } else {
+ int size;
+
+ ret = of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,memory-reservation-size",
+ &size);
+
+ if (ret < 0)
+ return ret;
+
+ msm_rtb.size = size;
+ }
if (msm_rtb.size <= 0 || msm_rtb.size > SZ_1M)
return -EINVAL;
@@ -275,10 +293,17 @@
return 0;
}
+static struct of_device_id msm_match_table[] = {
+ {.compatible = RTB_COMPAT_STR},
+ {},
+};
+EXPORT_COMPAT(RTB_COMPAT_STR);
+
static struct platform_driver msm_rtb_driver = {
.driver = {
.name = "msm_rtb",
- .owner = THIS_MODULE
+ .owner = THIS_MODULE,
+ .of_match_table = msm_match_table
},
};
diff --git a/arch/arm/mach-msm/pcie.c b/arch/arm/mach-msm/pcie.c
index f105356..d954b53 100644
--- a/arch/arm/mach-msm/pcie.c
+++ b/arch/arm/mach-msm/pcie.c
@@ -200,6 +200,16 @@
static int msm_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
int where, int size, u32 val)
{
+ /*
+ *Attempt to reset secondary bus is causing PCIE core to reset.
+ *Disable secondary bus reset functionality.
+ */
+ if ((bus->number == 0) && (where == PCI_BRIDGE_CONTROL) &&
+ (val & PCI_BRIDGE_CTL_BUS_RESET)) {
+ pr_info("PCIE secondary bus reset not supported\n");
+ val &= ~PCI_BRIDGE_CTL_BUS_RESET;
+ }
+
return msm_pcie_oper_conf(bus, devfn, WR, where, size, &val);
}
diff --git a/arch/arm/mach-msm/pil-q6v5-mss.c b/arch/arm/mach-msm/pil-q6v5-mss.c
index e279f99..62685ca 100644
--- a/arch/arm/mach-msm/pil-q6v5-mss.c
+++ b/arch/arm/mach-msm/pil-q6v5-mss.c
@@ -155,11 +155,14 @@
goto err_clks;
/* Program Image Address */
- if (drv->self_auth)
+ if (drv->self_auth) {
writel_relaxed(drv->start_addr, drv->rmb_base + RMB_MBA_IMAGE);
- else
+ /* Ensure write to RMB base occurs before reset is released. */
+ mb();
+ } else {
writel_relaxed((drv->start_addr >> 4) & 0x0FFFFFF0,
drv->reg_base + QDSP6SS_RST_EVB);
+ }
ret = pil_q6v5_reset(pil);
if (ret)
diff --git a/arch/arm/mach-msm/platsmp.c b/arch/arm/mach-msm/platsmp.c
index 5e063a1..08cbc34 100644
--- a/arch/arm/mach-msm/platsmp.c
+++ b/arch/arm/mach-msm/platsmp.c
@@ -83,11 +83,6 @@
if (!base_ptr)
return -ENODEV;
- if (machine_is_msm8960_sim() || machine_is_msm8960_rumi3()) {
- writel_relaxed(0x10, base_ptr+0x04);
- writel_relaxed(0x80, base_ptr+0x04);
- }
-
if (machine_is_apq8064_sim())
writel_relaxed(0xf0000, base_ptr+0x04);
@@ -135,8 +130,7 @@
if (cpu_is_msm8x60())
return scorpion_release_secondary();
- if (machine_is_msm8960_sim() || machine_is_msm8960_rumi3() ||
- machine_is_apq8064_sim())
+ if (machine_is_apq8064_sim())
return krait_release_secondary_sim(0x02088000, cpu);
if (machine_is_msm8974_sim())
diff --git a/arch/arm/mach-msm/pm2.c b/arch/arm/mach-msm/pm2.c
index 7a8e4c3..8fccda4 100644
--- a/arch/arm/mach-msm/pm2.c
+++ b/arch/arm/mach-msm/pm2.c
@@ -21,10 +21,8 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/init.h>
-#include <linux/pm.h>
#include <linux/pm_qos.h>
#include <linux/suspend.h>
-#include <linux/reboot.h>
#include <linux/io.h>
#include <linux/tick.h>
#include <linux/memory.h>
@@ -34,7 +32,6 @@
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#endif
-#include <asm/system_misc.h>
#ifdef CONFIG_CACHE_L2X0
#include <asm/hardware/cache-l2x0.h>
#endif
@@ -1577,55 +1574,6 @@
}
}
-/******************************************************************************
- * Restart Definitions
- *****************************************************************************/
-
-static uint32_t restart_reason = 0x776655AA;
-
-static void msm_pm_power_off(void)
-{
- msm_rpcrouter_close();
- msm_proc_comm(PCOM_POWER_DOWN, 0, 0);
- for (;;)
- ;
-}
-
-static void msm_pm_restart(char str, const char *cmd)
-{
- msm_rpcrouter_close();
- msm_proc_comm(PCOM_RESET_CHIP, &restart_reason, 0);
-
- for (;;)
- ;
-}
-
-static int msm_reboot_call
- (struct notifier_block *this, unsigned long code, void *_cmd)
-{
- if ((code == SYS_RESTART) && _cmd) {
- char *cmd = _cmd;
- if (!strcmp(cmd, "bootloader")) {
- restart_reason = 0x77665500;
- } else if (!strcmp(cmd, "recovery")) {
- restart_reason = 0x77665502;
- } else if (!strcmp(cmd, "eraseflash")) {
- restart_reason = 0x776655EF;
- } else if (!strncmp(cmd, "oem-", 4)) {
- unsigned code = simple_strtoul(cmd + 4, 0, 16) & 0xff;
- restart_reason = 0x6f656d00 | code;
- } else {
- restart_reason = 0x77665501;
- }
- }
- return NOTIFY_DONE;
-}
-
-static struct notifier_block msm_reboot_notifier = {
- .notifier_call = msm_reboot_call,
-};
-
-
/*
* Initialize the power management subsystem.
*
@@ -1693,10 +1641,6 @@
virt_to_phys(&msm_pm_pc_pgd));
#endif
- pm_power_off = msm_pm_power_off;
- arm_pm_restart = msm_pm_restart;
- register_reboot_notifier(&msm_reboot_notifier);
-
msm_pm_smem_data = smem_alloc(SMEM_APPS_DEM_SLAVE_DATA,
sizeof(*msm_pm_smem_data));
if (msm_pm_smem_data == NULL) {
diff --git a/arch/arm/mach-msm/qdsp6v2/ultrasound/usf.c b/arch/arm/mach-msm/qdsp6v2/ultrasound/usf.c
index a8773ea..a973b92 100644
--- a/arch/arm/mach-msm/qdsp6v2/ultrasound/usf.c
+++ b/arch/arm/mach-msm/qdsp6v2/ultrasound/usf.c
@@ -1131,13 +1131,12 @@
{
uint16_t ind = 0;
+ usf_unregister_conflicting_events(
+ usf->conflicting_event_types);
+ usf->conflicting_event_types = 0;
for (ind = 0; ind < USF_MAX_EVENT_IND; ++ind) {
if (usf->input_ifs[ind] == NULL)
continue;
-
- usf_unregister_conflicting_events(
- usf->conflicting_event_types);
- usf->conflicting_event_types = 0;
input_unregister_device(usf->input_ifs[ind]);
usf->input_ifs[ind] = NULL;
pr_debug("%s input_unregister_device[%s]\n",
diff --git a/arch/arm/mach-msm/qdsp6v2/ultrasound/usfcdev.c b/arch/arm/mach-msm/qdsp6v2/ultrasound/usfcdev.c
index b99a9b0..f566e82 100644
--- a/arch/arm/mach-msm/qdsp6v2/ultrasound/usfcdev.c
+++ b/arch/arm/mach-msm/qdsp6v2/ultrasound/usfcdev.c
@@ -39,9 +39,22 @@
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_KEYBIT |
INPUT_DEVICE_ID_MATCH_ABSBIT,
- .evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) },
+ .evbit = { BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY) },
.keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
- .absbit = { BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
+ /* assumption: ABS_X & ABS_Y are in the same long */
+ .absbit = { [BIT_WORD(ABS_X)] = BIT_MASK(ABS_X) |
+ BIT_MASK(ABS_Y) },
+ },
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
+ INPUT_DEVICE_ID_MATCH_KEYBIT |
+ INPUT_DEVICE_ID_MATCH_ABSBIT,
+ .evbit = { BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY) },
+ .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
+ /* assumption: MT_.._X & MT_.._Y are in the same long */
+ .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
+ BIT_MASK(ABS_MT_POSITION_X) |
+ BIT_MASK(ABS_MT_POSITION_Y) },
},
{ } /* Terminating entry */
};
@@ -76,12 +89,12 @@
int ind = handler->minor;
pr_debug("%s: name=[%s]; ind=%d\n", __func__, dev->name, ind);
+
if (s_usfcdev_events[ind].registered_event &&
s_usfcdev_events[ind].match_cb) {
rc = (*s_usfcdev_events[ind].match_cb)((uint16_t)ind, dev);
pr_debug("%s: [%s]; rc=%d\n", __func__, dev->name, rc);
}
-
return rc;
}
@@ -128,10 +141,12 @@
{
uint16_t ind = (uint16_t)handle->handler->minor;
- pr_debug("%s: event_type=%d; filter=%d\n",
+ pr_debug("%s: event_type=%d; filter=%d; abs_xy=%ld; abs_y_mt[]=%ld\n",
__func__,
ind,
- s_usfcdev_events[ind].filter);
+ s_usfcdev_events[ind].filter,
+ usfc_tsc_ids[0].absbit[0],
+ usfc_tsc_ids[1].absbit[1]);
return s_usfcdev_events[ind].filter;
}
diff --git a/arch/arm/mach-msm/restart_7k.c b/arch/arm/mach-msm/restart_7k.c
new file mode 100644
index 0000000..dc9edf4
--- /dev/null
+++ b/arch/arm/mach-msm/restart_7k.c
@@ -0,0 +1,101 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/reboot.h>
+#include <linux/pm.h>
+#include <asm/system_misc.h>
+#include <mach/proc_comm.h>
+
+#include "devices-msm7x2xa.h"
+#include "smd_rpcrouter.h"
+
+static uint32_t restart_reason = 0x776655AA;
+
+static void msm_pm_power_off(void)
+{
+ msm_rpcrouter_close();
+ msm_proc_comm(PCOM_POWER_DOWN, 0, 0);
+ for (;;)
+ ;
+}
+
+static void msm_pm_restart(char str, const char *cmd)
+{
+ msm_rpcrouter_close();
+ pr_debug("The reset reason is %x\n", restart_reason);
+
+ /* Disable interrupts */
+ local_irq_disable();
+ local_fiq_disable();
+
+ /*
+ * Take out a flat memory mapping and will
+ * insert a 1:1 mapping in place of
+ * the user-mode pages to ensure predictable results
+ * This function takes care of flushing the caches
+ * and flushing the TLB.
+ */
+ setup_mm_for_reboot();
+
+ msm_proc_comm(PCOM_RESET_CHIP, &restart_reason, 0);
+
+ for (;;)
+ ;
+}
+
+static int msm_reboot_call
+ (struct notifier_block *this, unsigned long code, void *_cmd)
+{
+ if ((code == SYS_RESTART) && _cmd) {
+ char *cmd = _cmd;
+ if (!strncmp(cmd, "bootloader", 10)) {
+ restart_reason = 0x77665500;
+ } else if (!strncmp(cmd, "recovery", 8)) {
+ restart_reason = 0x77665502;
+ } else if (!strncmp(cmd, "eraseflash", 10)) {
+ restart_reason = 0x776655EF;
+ } else if (!strncmp(cmd, "oem-", 4)) {
+ unsigned long code;
+ int res;
+ res = kstrtoul(cmd + 4, 16, &code);
+ code &= 0xff;
+ restart_reason = 0x6f656d00 | code;
+ } else {
+ restart_reason = 0x77665501;
+ }
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block msm_reboot_notifier = {
+ .notifier_call = msm_reboot_call,
+};
+
+static int __init msm_pm_restart_init(void)
+{
+ int ret;
+
+ pm_power_off = msm_pm_power_off;
+ arm_pm_restart = msm_pm_restart;
+
+ ret = register_reboot_notifier(&msm_reboot_notifier);
+ if (ret)
+ pr_err("Failed to register reboot notifier\n");
+
+ return ret;
+}
+late_initcall(msm_pm_restart_init);
diff --git a/arch/arm/mach-msm/rpm_rbcpr_stats.c b/arch/arm/mach-msm/rpm_rbcpr_stats.c
new file mode 100644
index 0000000..7f27efc
--- /dev/null
+++ b/arch/arm/mach-msm/rpm_rbcpr_stats.c
@@ -0,0 +1,415 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/sort.h>
+#include <asm/uaccess.h>
+#include <mach/msm_iomap.h>
+#include "timer.h"
+#include "rpm_rbcpr_stats.h"
+
+#define RBCPR_USER_BUF (2000)
+#define STR(a) (#a)
+#define GETFIELD(a) ((strnstr(STR(a), "->", 80) + 2))
+#define PRINTFIELD(buf, buf_size, pos, format, ...) \
+ ((pos < buf_size) ? snprintf((buf + pos), (buf_size - pos), format,\
+ ## __VA_ARGS__) : 0)
+
+enum {
+ RBCPR_CORNER_SVS = 0,
+ RBCPR_CORNER_NOMINAL,
+ RBCPR_CORNER_TURBO,
+ RBCPR_CORNERS_COUNT,
+ RBCPR_CORNER_INVALID = 0x7FFFFFFF,
+};
+
+struct msm_rpmrbcpr_recmnd {
+ uint32_t voltage;
+ uint32_t timestamp;
+};
+
+struct msm_rpmrbcpr_corners {
+ int efuse_adjustment;
+ struct msm_rpmrbcpr_recmnd *rpm_rcmnd;
+ uint32_t programmed_voltage;
+ uint32_t isr_counter;
+ uint32_t min_counter;
+ uint32_t max_counter;
+};
+
+struct msm_rpmrbcpr_stats {
+ uint32_t status_count;
+ uint32_t num_corners;
+ uint32_t num_latest_recommends;
+ struct msm_rpmrbcpr_corners *rbcpr_corners;
+ uint32_t current_corner;
+ uint32_t railway_voltage;
+ uint32_t enable;
+};
+
+struct msm_rpmrbcpr_stats_internal {
+ void __iomem *regbase;
+ uint32_t len;
+ char buf[RBCPR_USER_BUF];
+};
+
+static DEFINE_SPINLOCK(rpm_rbcpr_lock);
+static struct msm_rpmrbcpr_design_data rbcpr_design_data;
+static struct msm_rpmrbcpr_stats rbcpr_stats;
+static struct msm_rpmrbcpr_stats_internal pvtdata;
+
+static inline unsigned long msm_rpmrbcpr_read_data(void __iomem *regbase,
+ int offset)
+{
+ return readl_relaxed(regbase + (offset * 4));
+}
+
+static int msm_rpmrbcpr_cmp_func(const void *a, const void *b)
+{
+ struct msm_rpmrbcpr_recmnd *pa = (struct msm_rpmrbcpr_recmnd *)(a);
+ struct msm_rpmrbcpr_recmnd *pb = (struct msm_rpmrbcpr_recmnd *)(b);
+ return pa->timestamp - pb->timestamp;
+}
+
+static char *msm_rpmrbcpr_corner_string(uint32_t corner)
+{
+ switch (corner) {
+ case RBCPR_CORNER_SVS:
+ return STR(RBCPR_CORNER_SVS);
+ break;
+ case RBCPR_CORNER_NOMINAL:
+ return STR(RBCPR_CORNER_NOMINAL);
+ break;
+ case RBCPR_CORNER_TURBO:
+ return STR(RBCPR_CORNER_TURBO);
+ break;
+ case RBCPR_CORNERS_COUNT:
+ case RBCPR_CORNER_INVALID:
+ default:
+ return STR(RBCPR_CORNER_INVALID);
+ break;
+ }
+}
+
+static int msm_rpmrbcpr_print_buf(struct msm_rpmrbcpr_stats *pdata,
+ struct msm_rpmrbcpr_design_data *pdesdata,
+ char *buf)
+{
+ int pos = 0;
+ struct msm_rpmrbcpr_corners *corners;
+ struct msm_rpmrbcpr_recmnd *rcmnd;
+ int i, j;
+ int current_timestamp = msm_timer_get_sclk_ticks();
+
+ if (!pdata->enable) {
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ "RBCPR Stats not enabled at RPM");
+ return pos;
+ }
+
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ ":RBCPR Platform Data");
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ " (%s: %u)", GETFIELD(pdesdata->upside_steps),
+ pdesdata->upside_steps);
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ " (%s:%u)", GETFIELD(pdesdata->downside_steps),
+ pdesdata->downside_steps);
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ " (%s: %d)", GETFIELD(pdesdata->svs_voltage),
+ pdesdata->svs_voltage);
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ " (%s: %d)", GETFIELD(pdesdata->nominal_voltage),
+ pdesdata->nominal_voltage);
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ " (%s: %d)\n", GETFIELD(pdesdata->turbo_voltage),
+ pdesdata->turbo_voltage);
+
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ ":RBCPR Stats");
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ " (%s: %u)", GETFIELD(pdata->status_counter),
+ pdata->status_count);
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ " (%s: %s)", GETFIELD(pdata->current_corner),
+ msm_rpmrbcpr_corner_string(pdata->current_corner));
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ " (current_timestamp: 0x%x)",
+ current_timestamp);
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ " (%s: %u)\n", GETFIELD(pdata->railway_voltage),
+ pdata->railway_voltage);
+
+ for (i = 0; i < pdata->num_corners; i++) {
+ corners = &pdata->rbcpr_corners[i];
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ ":\tRBCPR Corner Data");
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ " (name: %s)", msm_rpmrbcpr_corner_string(i));
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ " (%s: %d)", GETFIELD(corners->efuse_adjustment),
+ corners->efuse_adjustment);
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ " (%s: %u)", GETFIELD(corners->programmed_voltage),
+ corners->programmed_voltage);
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ " (%s: %u)", GETFIELD(corners->isr_counter),
+ corners->isr_counter);
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ "(%s: %u)", GETFIELD(corners->min_counter),
+ corners->min_counter);
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ "(%s:%u)\n", GETFIELD(corners->max_counter),
+ corners->max_counter);
+ for (j = 0; j < pdata->num_latest_recommends; j++) {
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ ":\t\tVoltage History[%d]", j);
+ rcmnd = &corners->rpm_rcmnd[j];
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ " (%s: %u)", GETFIELD(rcmnd->voltage),
+ rcmnd->voltage);
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ " (%s: 0x%x)\n", GETFIELD(rcmnd->timestamp),
+ rcmnd->timestamp);
+ }
+ }
+ return pos;
+}
+
+
+static void msm_rpmrbcpr_copy_data(struct msm_rpmrbcpr_stats_internal *pdata,
+ struct msm_rpmrbcpr_stats *prbcpr_stats)
+{
+ struct msm_rpmrbcpr_corners *corners;
+ struct msm_rpmrbcpr_recmnd *rcmnd;
+ int i, j;
+ int offset = (offsetof(struct msm_rpmrbcpr_stats, rbcpr_corners) / 4);
+
+ if (!prbcpr_stats)
+ return;
+
+ for (i = 0; i < prbcpr_stats->num_corners; i++) {
+ corners = &prbcpr_stats->rbcpr_corners[i];
+ corners->efuse_adjustment = msm_rpmrbcpr_read_data(
+ pdata->regbase, offset++);
+ for (j = 0; j < prbcpr_stats->num_latest_recommends; j++) {
+ rcmnd = &corners->rpm_rcmnd[j];
+ rcmnd->voltage = msm_rpmrbcpr_read_data(
+ pdata->regbase, offset++);
+ rcmnd->timestamp = msm_rpmrbcpr_read_data(
+ pdata->regbase, offset++);
+ }
+ sort(&corners->rpm_rcmnd[0],
+ prbcpr_stats->num_latest_recommends,
+ sizeof(struct msm_rpmrbcpr_recmnd),
+ msm_rpmrbcpr_cmp_func, NULL);
+ corners->programmed_voltage = msm_rpmrbcpr_read_data(
+ pdata->regbase, offset++);
+ corners->isr_counter = msm_rpmrbcpr_read_data(pdata->regbase,
+ offset++);
+ corners->min_counter = msm_rpmrbcpr_read_data(pdata->regbase,
+ offset++);
+ corners->max_counter = msm_rpmrbcpr_read_data(pdata->regbase,
+ offset++);
+ }
+ prbcpr_stats->current_corner = msm_rpmrbcpr_read_data(pdata->regbase,
+ offset++);
+ prbcpr_stats->railway_voltage = msm_rpmrbcpr_read_data
+ (pdata->regbase, offset++);
+ prbcpr_stats->enable = msm_rpmrbcpr_read_data(pdata->regbase, offset++);
+}
+
+static int msm_rpmrbcpr_file_read(struct file *file, char __user *bufu,
+ size_t count, loff_t *ppos)
+{
+ struct msm_rpmrbcpr_stats_internal *pdata = file->private_data;
+ int ret;
+ int status_counter;
+
+ if (!pdata) {
+ pr_info("%s pdata is null", __func__);
+ return -EINVAL;
+ }
+
+ if (!bufu || count < 0) {
+ pr_info("%s count %d ", __func__, count);
+ return -EINVAL;
+ }
+
+ if (*ppos > pdata->len || !pdata->len) {
+ /* Read RPM stats */
+ status_counter = readl_relaxed(pdata->regbase +
+ offsetof(struct msm_rpmrbcpr_stats, status_count));
+ if (status_counter != rbcpr_stats.status_count) {
+ spin_lock(&rpm_rbcpr_lock);
+ msm_rpmrbcpr_copy_data(pdata, &rbcpr_stats);
+ rbcpr_stats.status_count = status_counter;
+ spin_unlock(&rpm_rbcpr_lock);
+ }
+ pdata->len = msm_rpmrbcpr_print_buf(&rbcpr_stats,
+ &rbcpr_design_data, pdata->buf);
+ *ppos = 0;
+ }
+ /* copy to user data */
+ ret = simple_read_from_buffer(bufu, count, ppos, pdata->buf,
+ pdata->len);
+ return ret;
+}
+
+static void msm_rpmrbcpr_free_mem(struct msm_rpmrbcpr_stats_internal *pvtdata,
+ struct msm_rpmrbcpr_stats *prbcpr_stats)
+{
+ int i;
+ if (pvtdata->regbase)
+ iounmap(pvtdata->regbase);
+
+
+ if (prbcpr_stats) {
+ for (i = 0; i < prbcpr_stats->num_corners; i++) {
+ kfree(prbcpr_stats->rbcpr_corners[i].rpm_rcmnd);
+ prbcpr_stats->rbcpr_corners[i].rpm_rcmnd = NULL;
+ }
+
+ kfree(prbcpr_stats->rbcpr_corners);
+ prbcpr_stats->rbcpr_corners = NULL;
+ }
+}
+
+static int msm_rpmrbcpr_allocate_mem(struct msm_rpmrbcpr_platform_data *pdata,
+ struct resource *res)
+{
+ int i;
+
+ pvtdata.regbase = ioremap(res->start, (res->end - res->start + 1));
+ memcpy(&rbcpr_design_data, &pdata->rbcpr_data,
+ sizeof(struct msm_rpmrbcpr_design_data));
+
+
+ rbcpr_stats.num_corners = readl_relaxed(pvtdata.regbase +
+ offsetof(struct msm_rpmrbcpr_stats, num_corners));
+ rbcpr_stats.num_latest_recommends = readl_relaxed(pvtdata.regbase +
+ offsetof(struct msm_rpmrbcpr_stats,
+ num_latest_recommends));
+
+ rbcpr_stats.rbcpr_corners = kzalloc(
+ sizeof(struct msm_rpmrbcpr_corners)
+ * rbcpr_stats.num_corners, GFP_KERNEL);
+
+ if (!rbcpr_stats.rbcpr_corners) {
+ msm_rpmrbcpr_free_mem(&pvtdata, &rbcpr_stats);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < rbcpr_stats.num_corners; i++) {
+ rbcpr_stats.rbcpr_corners[i].rpm_rcmnd =
+ kzalloc(sizeof(struct msm_rpmrbcpr_corners)
+ * rbcpr_stats.num_latest_recommends,
+ GFP_KERNEL);
+
+ if (!rbcpr_stats.rbcpr_corners[i].rpm_rcmnd) {
+ msm_rpmrbcpr_free_mem(&pvtdata, &rbcpr_stats);
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+static int msm_rpmrbcpr_file_open(struct inode *inode, struct file *file)
+{
+ file->private_data = &pvtdata;
+ pvtdata.len = 0;
+
+ if (!pvtdata.regbase)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int msm_rpmrbcpr_file_close(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static const struct file_operations msm_rpmrbcpr_fops = {
+ .owner = THIS_MODULE,
+ .open = msm_rpmrbcpr_file_open,
+ .read = msm_rpmrbcpr_file_read,
+ .release = msm_rpmrbcpr_file_close,
+ .llseek = no_llseek,
+};
+
+static int __devinit msm_rpmrbcpr_probe(struct platform_device *pdev)
+{
+ struct dentry *dent;
+ struct msm_rpmrbcpr_platform_data *pdata;
+ int ret = 0;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata)
+ return -EINVAL;
+ dent = debugfs_create_file("rpm_rbcpr", S_IRUGO, NULL,
+ pdev->dev.platform_data, &msm_rpmrbcpr_fops);
+
+ if (!dent) {
+ pr_err("%s: ERROR debugfs_create_file failed\n", __func__);
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pdev, dent);
+ ret = msm_rpmrbcpr_allocate_mem(pdata, pdev->resource);
+ return ret;
+}
+
+static int __devexit msm_rpmrbcpr_remove(struct platform_device *pdev)
+{
+ struct dentry *dent;
+
+ msm_rpmrbcpr_free_mem(&pvtdata, &rbcpr_stats);
+ dent = platform_get_drvdata(pdev);
+ debugfs_remove(dent);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static struct platform_driver msm_rpmrbcpr_driver = {
+ .probe = msm_rpmrbcpr_probe,
+ .remove = __devexit_p(msm_rpmrbcpr_remove),
+ .driver = {
+ .name = "msm_rpm_rbcpr",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init msm_rpmrbcpr_init(void)
+{
+ return platform_driver_register(&msm_rpmrbcpr_driver);
+}
+
+static void __exit msm_rpmrbcpr_exit(void)
+{
+ platform_driver_unregister(&msm_rpmrbcpr_driver);
+}
+
+module_init(msm_rpmrbcpr_init);
+module_exit(msm_rpmrbcpr_exit);
diff --git a/arch/arm/mach-msm/rpm_rbcpr_stats.h b/arch/arm/mach-msm/rpm_rbcpr_stats.h
new file mode 100644
index 0000000..55644d0
--- /dev/null
+++ b/arch/arm/mach-msm/rpm_rbcpr_stats.h
@@ -0,0 +1,30 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_RPM_RBCPR_STATS_H
+#define __ARCH_ARM_MACH_MSM_RPM_RBCPR_STATS_H
+
+#include <linux/types.h>
+
+struct msm_rpmrbcpr_design_data {
+ u32 upside_steps;
+ u32 downside_steps;
+ int svs_voltage;
+ int nominal_voltage;
+ int turbo_voltage;
+};
+
+struct msm_rpmrbcpr_platform_data {
+ struct msm_rpmrbcpr_design_data rbcpr_data;
+};
+#endif
diff --git a/arch/arm/mach-msm/socinfo.c b/arch/arm/mach-msm/socinfo.c
index 817c2dc..bea567b 100644
--- a/arch/arm/mach-msm/socinfo.c
+++ b/arch/arm/mach-msm/socinfo.c
@@ -628,8 +628,7 @@
static void * __init setup_dummy_socinfo(void)
{
- if (machine_is_msm8960_rumi3() || machine_is_msm8960_sim() ||
- machine_is_msm8960_cdp())
+ if (machine_is_msm8960_cdp())
dummy_socinfo.id = 87;
else if (machine_is_apq8064_rumi3() || machine_is_apq8064_sim())
dummy_socinfo.id = 109;
@@ -758,7 +757,6 @@
return 1;
if (read_cpuid_mpidr() & BIT(30) &&
- !machine_is_msm8960_sim() &&
!machine_is_apq8064_sim())
return 1;
@@ -768,9 +766,6 @@
const int read_msm_cpu_type(void)
{
- if (machine_is_msm8960_sim() || machine_is_msm8960_rumi3())
- return MSM_CPU_8960;
-
if (socinfo_get_msm_cpu() != MSM_CPU_UNKNOWN)
return socinfo_get_msm_cpu();
@@ -793,6 +788,10 @@
case 0x510F06F0:
return MSM_CPU_8064;
+ case 0x511F06F1:
+ case 0x512F06F0:
+ return MSM_CPU_8974;
+
default:
return MSM_CPU_UNKNOWN;
};
diff --git a/arch/arm/mach-msm/wcnss-ssr-8960.c b/arch/arm/mach-msm/wcnss-ssr-8960.c
index 4295d9b..318523b 100644
--- a/arch/arm/mach-msm/wcnss-ssr-8960.c
+++ b/arch/arm/mach-msm/wcnss-ssr-8960.c
@@ -132,6 +132,7 @@
{
pil_force_shutdown("wcnss");
flush_delayed_work(&cancel_vote_work);
+ wcnss_flush_delayed_boot_votes();
disable_irq_nosync(RIVA_APSS_WDOG_BITE_RESET_RDY_IRQ);
return 0;
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 9107231..94b1e61 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -856,8 +856,10 @@
case 0x08000000: /* ldm or stm, or thumb-2 32bit instruction */
if (thumb2_32b)
handler = do_alignment_t32_to_handler(&instr, regs, &offset);
- else
+ else {
handler = do_alignment_ldmstm;
+ offset.un = 0;
+ }
break;
default:
diff --git a/drivers/bluetooth/hci_ibs.c b/drivers/bluetooth/hci_ibs.c
index 6845020..fb084f5 100644
--- a/drivers/bluetooth/hci_ibs.c
+++ b/drivers/bluetooth/hci_ibs.c
@@ -37,9 +37,7 @@
#include <linux/fcntl.h>
#include <linux/interrupt.h>
#include <linux/ptrace.h>
-#include <linux/ftrace.h>
#include <linux/poll.h>
-#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/tty.h>
@@ -94,15 +92,6 @@
HCI_IBS_RX_VOTE_CLOCK_OFF,
};
-/* HCI_IBS state for the WorkQueue */
-enum hci_ibs_wq_state_e {
- HCI_IBS_WQ_INIT_STATE = 0,
- HCI_IBS_WQ_TX_VOTE_OFF,
- HCI_IBS_WQ_RX_VOTE_OFF,
- HCI_IBS_WQ_AWAKE_RX,
- HCI_IBS_WQ_AWAKE_DEVICE,
-};
-
static unsigned long wake_retrans = 1;
static unsigned long tx_idle_delay = (HZ * 2);
@@ -123,11 +112,6 @@
unsigned long rx_vote; /* clock must be on for RX */
struct timer_list tx_idle_timer;
struct timer_list wake_retrans_timer;
- struct workqueue_struct *workqueue;
- struct work_struct ws_ibs;
- unsigned long ibs_wq_state;
- void *ibs_hu; /* keeps the hci_uart pointer for reference */
-
/* debug */
unsigned long ibs_sent_wacks;
unsigned long ibs_sent_slps;
@@ -258,56 +242,6 @@
return err;
}
-static void ibs_wq(struct work_struct *work)
-{
- unsigned long flags = 0;
- struct ibs_struct *ibs = container_of(work, struct ibs_struct,
- ws_ibs);
- struct hci_uart *hu = (struct hci_uart *)ibs->ibs_hu;
-
- BT_DBG("hu %p, ibs_wq state: %lu\n", hu, ibs->ibs_wq_state);
-
- /* lock hci_ibs state */
- spin_lock_irqsave(&ibs->hci_ibs_lock, flags);
-
- switch (ibs->ibs_wq_state) {
- case HCI_IBS_WQ_AWAKE_DEVICE:
- /* Vote for serial clock */
- ibs_msm_serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
-
- /* send wake indication to device */
- if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0)
- BT_ERR("cannot send WAKE to device");
-
- ibs->ibs_sent_wakes++; /* debug */
-
- /* start retransmit timer */
- mod_timer(&ibs->wake_retrans_timer, jiffies + wake_retrans);
- break;
- case HCI_IBS_WQ_AWAKE_RX:
- ibs_msm_serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
- ibs->rx_ibs_state = HCI_IBS_RX_AWAKE;
-
- if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0)
- BT_ERR("cannot acknowledge device wake up");
-
- ibs->ibs_sent_wacks++; /* debug */
- /* actually send the packets */
- hci_uart_tx_wakeup(hu);
- break;
- case HCI_IBS_WQ_RX_VOTE_OFF:
- ibs_msm_serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_OFF, hu);
- break;
- case HCI_IBS_WQ_TX_VOTE_OFF:
- ibs_msm_serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu);
- break;
- default:
- BT_DBG("Invalid state in ibs workqueue");
- break;
- }
- spin_unlock_irqrestore(&ibs->hci_ibs_lock, flags);
-}
-
static void hci_ibs_tx_idle_timeout(unsigned long arg)
{
struct hci_uart *hu = (struct hci_uart *) arg;
@@ -348,9 +282,7 @@
spin_lock_irqsave_nested(&ibs->hci_ibs_lock,
flags, SINGLE_DEPTH_NESTING);
- /* vote off tx clock */
- ibs->ibs_wq_state = HCI_IBS_WQ_TX_VOTE_OFF;
- queue_work(ibs->workqueue, &ibs->ws_ibs);
+ ibs_msm_serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu);
out:
spin_unlock_irqrestore(&ibs->hci_ibs_lock, flags);
}
@@ -404,16 +336,6 @@
skb_queue_head_init(&ibs->txq);
skb_queue_head_init(&ibs->tx_wait_q);
spin_lock_init(&ibs->hci_ibs_lock);
- ibs->workqueue = create_singlethread_workqueue("ibs_wq");
- if (!ibs->workqueue) {
- BT_ERR("IBS Workqueue not initialized properly");
- kfree(ibs);
- return -ENOMEM;
- }
-
- INIT_WORK(&ibs->ws_ibs, ibs_wq);
- ibs->ibs_hu = (void *)hu;
- ibs->ibs_wq_state = HCI_IBS_WQ_INIT_STATE;
/* Assume we start with both sides asleep -- extra wakes OK */
ibs->tx_ibs_state = HCI_IBS_TX_ASLEEP;
@@ -510,8 +432,6 @@
skb_queue_purge(&ibs->txq);
del_timer(&ibs->tx_idle_timer);
del_timer(&ibs->wake_retrans_timer);
- destroy_workqueue(ibs->workqueue);
- ibs->ibs_hu = NULL;
kfree_skb(ibs->rx_skb);
@@ -543,11 +463,9 @@
/* Make sure clock is on - we may have turned clock off since
* receiving the wake up indicator
*/
- /* awake rx clock */
- ibs->ibs_wq_state = HCI_IBS_WQ_AWAKE_RX;
- queue_work(ibs->workqueue, &ibs->ws_ibs);
- spin_unlock_irqrestore(&ibs->hci_ibs_lock, flags);
- return;
+ ibs_msm_serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
+ ibs->rx_ibs_state = HCI_IBS_RX_AWAKE;
+ /* deliberate fall-through */
case HCI_IBS_RX_AWAKE:
/* Always acknowledge device wake up,
* sending IBS message doesn't count as TX ON.
@@ -592,9 +510,7 @@
case HCI_IBS_RX_AWAKE:
/* update state */
ibs->rx_ibs_state = HCI_IBS_RX_ASLEEP;
- /* vote off rx clock under workqueue */
- ibs->ibs_wq_state = HCI_IBS_WQ_RX_VOTE_OFF;
- queue_work(ibs->workqueue, &ibs->ws_ibs);
+ ibs_msm_serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_OFF, hu);
break;
case HCI_IBS_RX_ASLEEP:
/* deliberate fall-through */
@@ -679,12 +595,20 @@
case HCI_IBS_TX_ASLEEP:
BT_DBG("device asleep, waking up and queueing packet");
+ ibs_msm_serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
/* save packet for later */
skb_queue_tail(&ibs->tx_wait_q, skb);
+ /* awake device */
+ if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) {
+ BT_ERR("cannot send WAKE to device");
+ break;
+ }
+ ibs->ibs_sent_wakes++; /* debug */
+
+ /* start retransmit timer */
+ mod_timer(&ibs->wake_retrans_timer, jiffies + wake_retrans);
+
ibs->tx_ibs_state = HCI_IBS_TX_WAKING;
- /* schedule a work queue to wake up device */
- ibs->ibs_wq_state = HCI_IBS_WQ_AWAKE_DEVICE;
- queue_work(ibs->workqueue, &ibs->ws_ibs);
break;
case HCI_IBS_TX_WAKING:
diff --git a/drivers/char/diag/diag_dci.h b/drivers/char/diag/diag_dci.h
index c0b82df..b70efe3 100644
--- a/drivers/char/diag/diag_dci.h
+++ b/drivers/char/diag/diag_dci.h
@@ -28,11 +28,6 @@
int signal_type;
};
-#define DIAG_CON_APSS (0x0001) /* Bit mask for APSS */
-#define DIAG_CON_MPSS (0x0002) /* Bit mask for MPSS */
-#define DIAG_CON_LPASS (0x0004) /* Bit mask for LPASS */
-#define DIAG_CON_WCNSS (0x0008) /* Bit mask for WCNSS */
-
enum {
DIAG_DCI_NO_ERROR = 1001, /* No error */
DIAG_DCI_NO_REG, /* Could not register */
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 2f356f0..95a85f2a 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -58,6 +58,11 @@
#define DIAG_CTRL_MSG_F3_MASK 11
#define CONTROL_CHAR 0x7E
+#define DIAG_CON_APSS (0x0001) /* Bit mask for APSS */
+#define DIAG_CON_MPSS (0x0002) /* Bit mask for MPSS */
+#define DIAG_CON_LPASS (0x0004) /* Bit mask for LPASS */
+#define DIAG_CON_WCNSS (0x0008) /* Bit mask for WCNSS */
+
/* Maximum number of pkt reg supported at initialization*/
extern unsigned int diag_max_reg;
extern unsigned int diag_threshold_reg;
@@ -224,6 +229,9 @@
struct work_struct diag_qdsp_mask_update_work;
struct work_struct diag_wcnss_mask_update_work;
struct work_struct diag_read_smd_dci_work;
+ struct work_struct diag_clean_modem_reg_work;
+ struct work_struct diag_clean_lpass_reg_work;
+ struct work_struct diag_clean_wcnss_reg_work;
uint8_t *msg_masks;
uint8_t *log_masks;
int log_masks_length;
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 547f42f..240a514 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -1268,6 +1268,12 @@
diag_read_smd_wcnss_cntl_work_fn);
INIT_WORK(&(driver->diag_read_smd_dci_work),
diag_read_smd_dci_work_fn);
+ INIT_WORK(&(driver->diag_clean_modem_reg_work),
+ diag_clean_modem_reg_fn);
+ INIT_WORK(&(driver->diag_clean_lpass_reg_work),
+ diag_clean_lpass_reg_fn);
+ INIT_WORK(&(driver->diag_clean_wcnss_reg_work),
+ diag_clean_wcnss_reg_fn);
diag_debugfs_init();
diagfwd_init();
diagfwd_cntl_init();
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 384c1bf..b228276 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -1735,8 +1735,8 @@
static void diag_smd_notify(void *ctxt, unsigned event)
{
if (event == SMD_EVENT_CLOSE) {
- pr_info("diag: clean modem registration\n");
- diag_clear_reg(MODEM_PROC);
+ queue_work(driver->diag_cntl_wq,
+ &(driver->diag_clean_modem_reg_work));
driver->ch = 0;
return;
} else if (event == SMD_EVENT_OPEN) {
@@ -1750,8 +1750,8 @@
static void diag_smd_qdsp_notify(void *ctxt, unsigned event)
{
if (event == SMD_EVENT_CLOSE) {
- pr_info("diag: clean lpass registration\n");
- diag_clear_reg(QDSP_PROC);
+ queue_work(driver->diag_cntl_wq,
+ &(driver->diag_clean_lpass_reg_work));
driver->chqdsp = 0;
return;
} else if (event == SMD_EVENT_OPEN) {
@@ -1765,8 +1765,8 @@
static void diag_smd_wcnss_notify(void *ctxt, unsigned event)
{
if (event == SMD_EVENT_CLOSE) {
- pr_info("diag: clean wcnss registration\n");
- diag_clear_reg(WCNSS_PROC);
+ queue_work(driver->diag_cntl_wq,
+ &(driver->diag_clean_wcnss_reg_work));
driver->ch_wcnss = 0;
return;
} else if (event == SMD_EVENT_OPEN) {
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
index de1a5b5..95abd21 100644
--- a/drivers/char/diag/diagfwd_cntl.c
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -20,9 +20,34 @@
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#endif
-
+/* tracks which peripheral is undergoing SSR */
+static uint16_t reg_dirty;
#define HDR_SIZ 8
+void diag_clean_modem_reg_fn(struct work_struct *work)
+{
+ pr_debug("diag: clean modem registration\n");
+ reg_dirty |= DIAG_CON_MPSS;
+ diag_clear_reg(MODEM_PROC);
+ reg_dirty ^= DIAG_CON_MPSS;
+}
+
+void diag_clean_lpass_reg_fn(struct work_struct *work)
+{
+ pr_debug("diag: clean lpass registration\n");
+ reg_dirty |= DIAG_CON_LPASS;
+ diag_clear_reg(QDSP_PROC);
+ reg_dirty ^= DIAG_CON_LPASS;
+}
+
+void diag_clean_wcnss_reg_fn(struct work_struct *work)
+{
+ pr_debug("diag: clean wcnss registration\n");
+ reg_dirty |= DIAG_CON_WCNSS;
+ diag_clear_reg(WCNSS_PROC);
+ reg_dirty ^= DIAG_CON_WCNSS;
+}
+
void diag_smd_cntl_notify(void *ctxt, unsigned event)
{
int r1, r2;
@@ -105,6 +130,8 @@
struct bindpkt_params *temp;
void *buf = NULL;
smd_channel_t *smd_ch = NULL;
+ /* tracks which peripheral is sending registration */
+ uint16_t reg_mask = 0;
if (pkt_params == NULL) {
pr_alert("diag: Memory allocation failure\n");
@@ -114,12 +141,15 @@
if (proc_num == MODEM_PROC) {
buf = driver->buf_in_cntl;
smd_ch = driver->ch_cntl;
+ reg_mask = DIAG_CON_MPSS;
} else if (proc_num == QDSP_PROC) {
buf = driver->buf_in_qdsp_cntl;
smd_ch = driver->chqdsp_cntl;
+ reg_mask = DIAG_CON_LPASS;
} else if (proc_num == WCNSS_PROC) {
buf = driver->buf_in_wcnss_cntl;
smd_ch = driver->ch_wcnss_cntl;
+ reg_mask = DIAG_CON_WCNSS;
}
if (!smd_ch || !buf) {
@@ -180,8 +210,16 @@
temp -= pkt_params->count;
pkt_params->params = temp;
flag = 1;
- diagchar_ioctl(NULL, DIAG_IOCTL_COMMAND_REG,
- (unsigned long)pkt_params);
+ /* peripheral undergoing SSR should not
+ * record new registration
+ */
+ if (!(reg_dirty & reg_mask))
+ diagchar_ioctl(NULL,
+ DIAG_IOCTL_COMMAND_REG, (unsigned long)
+ pkt_params);
+ else
+ pr_err("diag: drop reg proc %d\n",
+ proc_num);
kfree(temp);
}
buf = buf + HDR_SIZ + data_len;
@@ -275,6 +313,7 @@
void diagfwd_cntl_init(void)
{
+ reg_dirty = 0;
driver->polling_reg_flag = 0;
driver->diag_cntl_wq = create_singlethread_workqueue("diag_cntl_wq");
if (driver->buf_in_cntl == NULL) {
diff --git a/drivers/char/diag/diagfwd_cntl.h b/drivers/char/diag/diagfwd_cntl.h
index 743ddc1..59e5e6b 100644
--- a/drivers/char/diag/diagfwd_cntl.h
+++ b/drivers/char/diag/diagfwd_cntl.h
@@ -85,7 +85,9 @@
void diag_smd_cntl_notify(void *ctxt, unsigned event);
void diag_smd_qdsp_cntl_notify(void *ctxt, unsigned event);
void diag_smd_wcnss_cntl_notify(void *ctxt, unsigned event);
-
+void diag_clean_modem_reg_fn(struct work_struct *);
+void diag_clean_lpass_reg_fn(struct work_struct *);
+void diag_clean_wcnss_reg_fn(struct work_struct *);
void diag_debugfs_init(void);
void diag_debugfs_cleanup(void);
diff --git a/drivers/coresight/coresight-tpiu.c b/drivers/coresight/coresight-tpiu.c
index 4b52c4d..c0bcfdd 100644
--- a/drivers/coresight/coresight-tpiu.c
+++ b/drivers/coresight/coresight-tpiu.c
@@ -119,9 +119,17 @@
if (ret)
goto err_clk_rate;
+ /* Disable tpiu to support older targets that need this */
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ goto err_clk_enable;
+ __tpiu_disable();
+ clk_disable_unprepare(drvdata->clk);
+
dev_info(drvdata->dev, "TPIU initialized\n");
return 0;
+err_clk_enable:
err_clk_rate:
clk_put(drvdata->clk);
err_clk_get:
diff --git a/drivers/coresight/coresight.c b/drivers/coresight/coresight.c
index 055ef55..a17ac9a 100644
--- a/drivers/coresight/coresight.c
+++ b/drivers/coresight/coresight.c
@@ -20,71 +20,351 @@
#include <linux/err.h>
#include <linux/export.h>
#include <linux/slab.h>
-#include <linux/mutex.h>
+#include <linux/semaphore.h>
#include <linux/clk.h>
#include <linux/coresight.h>
#include "coresight-priv.h"
+
+#define NO_SINK (-1)
#define MAX_STR_LEN (65535)
+static int curr_sink = NO_SINK;
static LIST_HEAD(coresight_orph_conns);
-static DEFINE_MUTEX(coresight_conns_mutex);
static LIST_HEAD(coresight_devs);
-static DEFINE_MUTEX(coresight_devs_mutex);
+static DEFINE_SEMAPHORE(coresight_mutex);
-int coresight_enable(struct coresight_device *csdev, int port)
+static int coresight_find_link_inport(struct coresight_device *csdev)
{
int i;
- int ret;
+ struct coresight_device *parent;
struct coresight_connection *conn;
- mutex_lock(&csdev->mutex);
- if (csdev->refcnt[port] == 0) {
- for (i = 0; i < csdev->nr_conns; i++) {
- conn = &csdev->conns[i];
- ret = coresight_enable(conn->child_dev,
- conn->child_port);
- if (ret)
- goto err_enable_child;
- }
- if (csdev->ops->enable)
- ret = csdev->ops->enable(csdev, port);
- if (ret)
- goto err_enable;
+ parent = container_of(csdev->path_link.next, struct coresight_device,
+ path_link);
+ for (i = 0; i < parent->nr_conns; i++) {
+ conn = &parent->conns[i];
+ if (conn->child_dev == csdev)
+ return conn->child_port;
}
- csdev->refcnt[port]++;
- mutex_unlock(&csdev->mutex);
+
+ pr_err("coresight: couldn't find inport, parent: %d, child: %d\n",
+ parent->id, csdev->id);
return 0;
-err_enable_child:
- while (i) {
- conn = &csdev->conns[--i];
- coresight_disable(conn->child_dev, conn->child_port);
+}
+
+static int coresight_find_link_outport(struct coresight_device *csdev)
+{
+ int i;
+ struct coresight_device *child;
+ struct coresight_connection *conn;
+
+ child = container_of(csdev->path_link.prev, struct coresight_device,
+ path_link);
+ for (i = 0; i < csdev->nr_conns; i++) {
+ conn = &csdev->conns[i];
+ if (conn->child_dev == child)
+ return conn->outport;
}
-err_enable:
- mutex_unlock(&csdev->mutex);
+
+ pr_err("coresight: couldn't find outport, parent: %d, child: %d\n",
+ csdev->id, child->id);
+ return 0;
+}
+
+static int coresight_enable_sink(struct coresight_device *csdev)
+{
+ int ret;
+
+ if (csdev->refcnt.sink_refcnt == 0) {
+ if (csdev->ops->sink_ops->enable) {
+ ret = csdev->ops->sink_ops->enable(csdev);
+ if (ret)
+ goto err;
+ csdev->enable = true;
+ }
+ }
+ csdev->refcnt.sink_refcnt++;
+
+ return 0;
+err:
+ return ret;
+}
+
+static void coresight_disable_sink(struct coresight_device *csdev)
+{
+ if (csdev->refcnt.sink_refcnt == 1) {
+ if (csdev->ops->sink_ops->disable) {
+ csdev->ops->sink_ops->disable(csdev);
+ csdev->enable = false;
+ }
+ }
+ csdev->refcnt.sink_refcnt--;
+}
+
+static int coresight_enable_link(struct coresight_device *csdev)
+{
+ int ret;
+ int link_subtype;
+ int refport, inport, outport;
+
+ inport = coresight_find_link_inport(csdev);
+ outport = coresight_find_link_outport(csdev);
+
+ link_subtype = csdev->subtype.link_subtype;
+ if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG)
+ refport = inport;
+ else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT)
+ refport = outport;
+ else
+ refport = 0;
+
+ if (csdev->refcnt.link_refcnts[refport] == 0) {
+ if (csdev->ops->link_ops->enable) {
+ ret = csdev->ops->link_ops->enable(csdev, inport,
+ outport);
+ if (ret)
+ goto err;
+ csdev->enable = true;
+ }
+ }
+ csdev->refcnt.link_refcnts[refport]++;
+
+ return 0;
+err:
+ return ret;
+}
+
+static void coresight_disable_link(struct coresight_device *csdev)
+{
+ int link_subtype;
+ int refport, inport, outport;
+
+ inport = coresight_find_link_inport(csdev);
+ outport = coresight_find_link_outport(csdev);
+
+ link_subtype = csdev->subtype.link_subtype;
+ if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG)
+ refport = inport;
+ else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT)
+ refport = outport;
+ else
+ refport = 0;
+
+ if (csdev->refcnt.link_refcnts[refport] == 1) {
+ if (csdev->ops->link_ops->disable) {
+ csdev->ops->link_ops->disable(csdev, inport, outport);
+ csdev->enable = false;
+ }
+ }
+ csdev->refcnt.link_refcnts[refport]--;
+}
+
+static int coresight_enable_source(struct coresight_device *csdev)
+{
+ int ret;
+
+ if (csdev->refcnt.source_refcnt == 0) {
+ if (csdev->ops->source_ops->enable) {
+ ret = csdev->ops->source_ops->enable(csdev);
+ if (ret)
+ goto err;
+ csdev->enable = true;
+ }
+ }
+ csdev->refcnt.source_refcnt++;
+
+ return 0;
+err:
+ return ret;
+}
+
+static void coresight_disable_source(struct coresight_device *csdev)
+{
+ if (csdev->refcnt.source_refcnt == 1) {
+ if (csdev->ops->source_ops->disable) {
+ csdev->ops->source_ops->disable(csdev);
+ csdev->enable = false;
+ }
+ }
+ csdev->refcnt.source_refcnt--;
+}
+
+static struct list_head *coresight_build_path(struct coresight_device *csdev,
+ struct list_head *path)
+{
+ int i;
+ struct list_head *p;
+ struct coresight_connection *conn;
+
+ if (csdev->id == curr_sink) {
+ list_add_tail(&csdev->path_link, path);
+ return path;
+ }
+
+ for (i = 0; i < csdev->nr_conns; i++) {
+ conn = &csdev->conns[i];
+ p = coresight_build_path(conn->child_dev, path);
+ if (p) {
+ list_add_tail(&csdev->path_link, p);
+ return p;
+ }
+ }
+ return NULL;
+}
+
+static void coresight_release_path(struct list_head *path)
+{
+ struct coresight_device *cd, *temp;
+
+ list_for_each_entry_safe(cd, temp, path, path_link)
+ list_del(&cd->path_link);
+}
+
+static int coresight_enable_path(struct list_head *path, bool incl_source)
+{
+ int ret = 0;
+ struct coresight_device *cd;
+
+ list_for_each_entry(cd, path, path_link) {
+ if (cd == list_first_entry(path, struct coresight_device,
+ path_link)) {
+ ret = coresight_enable_sink(cd);
+ } else if (list_is_last(&cd->path_link, path)) {
+ if (incl_source)
+ ret = coresight_enable_source(cd);
+ } else {
+ ret = coresight_enable_link(cd);
+ }
+ if (ret)
+ goto err;
+ }
+ return 0;
+err:
+ list_for_each_entry_continue_reverse(cd, path, path_link) {
+ if (cd == list_first_entry(path, struct coresight_device,
+ path_link)) {
+ coresight_disable_sink(cd);
+ } else if (list_is_last(&cd->path_link, path)) {
+ if (incl_source)
+ coresight_disable_source(cd);
+ } else {
+ coresight_disable_link(cd);
+ }
+ }
+ return ret;
+}
+
+static void coresight_disable_path(struct list_head *path, bool incl_source)
+{
+ struct coresight_device *cd;
+
+ list_for_each_entry(cd, path, path_link) {
+ if (cd == list_first_entry(path, struct coresight_device,
+ path_link)) {
+ coresight_disable_sink(cd);
+ } else if (list_is_last(&cd->path_link, path)) {
+ if (incl_source)
+ coresight_disable_source(cd);
+ } else {
+ coresight_disable_link(cd);
+ }
+ }
+}
+
+static int coresight_switch_sink(struct coresight_device *csdev)
+{
+ int ret = 0;
+ LIST_HEAD(path);
+ struct coresight_device *cd;
+
+ if (IS_ERR_OR_NULL(csdev))
+ return -EINVAL;
+
+ down(&coresight_mutex);
+ if (csdev->id == curr_sink)
+ goto out;
+
+ list_for_each_entry(cd, &coresight_devs, dev_link) {
+ if (cd->type == CORESIGHT_DEV_TYPE_SOURCE && cd->enable) {
+ coresight_build_path(cd, &path);
+ coresight_disable_path(&path, false);
+ coresight_release_path(&path);
+ }
+ }
+ curr_sink = csdev->id;
+ list_for_each_entry(cd, &coresight_devs, dev_link) {
+ if (cd->type == CORESIGHT_DEV_TYPE_SOURCE && cd->enable) {
+ coresight_build_path(cd, &path);
+ ret = coresight_enable_path(&path, false);
+ coresight_release_path(&path);
+ if (ret)
+ goto err;
+ }
+ }
+out:
+ up(&coresight_mutex);
+ return 0;
+err:
+ list_for_each_entry(cd, &coresight_devs, dev_link) {
+ if (cd->type == CORESIGHT_DEV_TYPE_SOURCE && cd->enable)
+ coresight_disable_source(cd);
+ }
+ pr_err("coresight: sink switch failed, sources disabled; try again\n");
+ return ret;
+}
+
+int coresight_enable(struct coresight_device *csdev)
+{
+ int ret = 0;
+ LIST_HEAD(path);
+
+ if (IS_ERR_OR_NULL(csdev))
+ return -EINVAL;
+
+ down(&coresight_mutex);
+ if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) {
+ ret = -EINVAL;
+ pr_err("coresight: wrong device type in %s\n", __func__);
+ goto out;
+ }
+ if (csdev->enable)
+ goto out;
+
+ coresight_build_path(csdev, &path);
+ ret = coresight_enable_path(&path, true);
+ coresight_release_path(&path);
+ if (ret)
+ pr_err("coresight: enable failed\n");
+out:
+ up(&coresight_mutex);
return ret;
}
EXPORT_SYMBOL(coresight_enable);
-void coresight_disable(struct coresight_device *csdev, int port)
+void coresight_disable(struct coresight_device *csdev)
{
- int i;
- struct coresight_connection *conn;
+ LIST_HEAD(path);
- mutex_lock(&csdev->mutex);
- if (csdev->refcnt[port] == 1) {
- if (csdev->ops->disable)
- csdev->ops->disable(csdev, port);
- for (i = 0; i < csdev->nr_conns; i++) {
- conn = &csdev->conns[i];
- coresight_disable(conn->child_dev, conn->child_port);
- }
+ if (IS_ERR_OR_NULL(csdev))
+ return;
+
+ down(&coresight_mutex);
+ if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) {
+ pr_err("coresight: wrong device type in %s\n", __func__);
+ goto out;
}
- csdev->refcnt[port]--;
- mutex_unlock(&csdev->mutex);
+ if (!csdev->enable)
+ goto out;
+
+ coresight_build_path(csdev, &path);
+ coresight_disable_path(&path, true);
+ coresight_release_path(&path);
+out:
+ up(&coresight_mutex);
}
EXPORT_SYMBOL(coresight_disable);
@@ -104,6 +384,39 @@
.dev_attrs = coresight_dev_attrs,
};
+static ssize_t coresight_show_curr_sink(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct coresight_device *csdev = to_coresight_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ csdev->id == curr_sink ? 1 : 0);
+}
+
+static ssize_t coresight_store_curr_sink(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret = 0;
+ unsigned long val;
+ struct coresight_device *csdev = to_coresight_device(dev);
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ if (val)
+ ret = coresight_switch_sink(csdev);
+ else
+ ret = -EINVAL;
+
+ if (ret)
+ return ret;
+ return size;
+}
+static DEVICE_ATTR(curr_sink, S_IRUGO | S_IWUSR, coresight_show_curr_sink,
+ coresight_store_curr_sink);
+
static ssize_t coresight_show_enable(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -124,9 +437,9 @@
return -EINVAL;
if (val)
- ret = coresight_enable(csdev, 0);
+ ret = coresight_enable(csdev);
else
- coresight_disable(csdev, 0);
+ coresight_disable(csdev);
if (ret)
return ret;
@@ -135,38 +448,55 @@
static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, coresight_show_enable,
coresight_store_enable);
-static struct attribute *coresight_attrs[] = {
+static struct attribute *coresight_attrs_sink[] = {
+ &dev_attr_curr_sink.attr,
+ NULL,
+};
+
+static struct attribute_group coresight_attr_grp_sink = {
+ .attrs = coresight_attrs_sink,
+};
+
+static const struct attribute_group *coresight_attr_grps_sink[] = {
+ &coresight_attr_grp_sink,
+ NULL,
+};
+
+static struct attribute *coresight_attrs_source[] = {
&dev_attr_enable.attr,
NULL,
};
-static struct attribute_group coresight_attr_grp = {
- .attrs = coresight_attrs,
+static struct attribute_group coresight_attr_grp_source = {
+ .attrs = coresight_attrs_source,
};
-static const struct attribute_group *coresight_attr_grps[] = {
- &coresight_attr_grp,
+static const struct attribute_group *coresight_attr_grps_source[] = {
+ &coresight_attr_grp_source,
NULL,
};
-static struct device_type coresight_dev_type[CORESIGHT_DEV_TYPE_MAX] = {
+static struct device_type coresight_dev_type[] = {
{
- .name = "source",
- .groups = coresight_attr_grps,
+ .name = "sink",
+ .groups = coresight_attr_grps_sink,
},
{
.name = "link",
},
{
- .name = "sink",
- .groups = coresight_attr_grps,
+ .name = "linksink",
+ .groups = coresight_attr_grps_sink,
+ },
+ {
+ .name = "source",
+ .groups = coresight_attr_grps_source,
},
};
static void coresight_device_release(struct device *dev)
{
struct coresight_device *csdev = to_coresight_device(dev);
- mutex_destroy(&csdev->mutex);
kfree(csdev);
}
@@ -174,14 +504,12 @@
{
struct coresight_connection *conn, *temp;
- mutex_lock(&coresight_conns_mutex);
list_for_each_entry_safe(conn, temp, &coresight_orph_conns, link) {
if (conn->child_id == csdev->id) {
conn->child_dev = csdev;
list_del(&conn->link);
}
}
- mutex_unlock(&coresight_conns_mutex);
}
static void coresight_fixup_device_conns(struct coresight_device *csdev)
@@ -192,21 +520,16 @@
for (i = 0; i < csdev->nr_conns; i++) {
found = false;
- mutex_lock(&coresight_devs_mutex);
- list_for_each_entry(cd, &coresight_devs, link) {
+ list_for_each_entry(cd, &coresight_devs, dev_link) {
if (csdev->conns[i].child_id == cd->id) {
csdev->conns[i].child_dev = cd;
found = true;
break;
}
}
- mutex_unlock(&coresight_devs_mutex);
- if (!found) {
- mutex_lock(&coresight_conns_mutex);
+ if (!found)
list_add_tail(&csdev->conns[i].link,
&coresight_orph_conns);
- mutex_unlock(&coresight_conns_mutex);
- }
}
}
@@ -214,7 +537,9 @@
{
int i;
int ret;
- int *refcnt;
+ int link_subtype;
+ int nr_refcnts;
+ int *refcnts = NULL;
struct coresight_device *csdev;
struct coresight_connection *conns;
@@ -224,28 +549,41 @@
goto err_kzalloc_csdev;
}
- mutex_init(&csdev->mutex);
csdev->id = desc->pdata->id;
- refcnt = kzalloc(sizeof(*refcnt) * desc->pdata->nr_ports, GFP_KERNEL);
- if (!refcnt) {
- ret = -ENOMEM;
- goto err_kzalloc_refcnt;
- }
- csdev->refcnt = refcnt;
+ if (desc->type == CORESIGHT_DEV_TYPE_LINK ||
+ desc->type == CORESIGHT_DEV_TYPE_LINKSINK) {
+ link_subtype = desc->subtype.link_subtype;
+ if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG)
+ nr_refcnts = desc->pdata->nr_inports;
+ else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT)
+ nr_refcnts = desc->pdata->nr_outports;
+ else
+ nr_refcnts = 1;
- csdev->nr_conns = desc->pdata->nr_children;
+ refcnts = kzalloc(sizeof(*refcnts) * nr_refcnts, GFP_KERNEL);
+ if (!refcnts) {
+ ret = -ENOMEM;
+ goto err_kzalloc_refcnts;
+ }
+ csdev->refcnt.link_refcnts = refcnts;
+ }
+
+ csdev->nr_conns = desc->pdata->nr_outports;
conns = kzalloc(sizeof(*conns) * csdev->nr_conns, GFP_KERNEL);
if (!conns) {
ret = -ENOMEM;
goto err_kzalloc_conns;
}
for (i = 0; i < csdev->nr_conns; i++) {
+ conns[i].outport = desc->pdata->outports[i];
conns[i].child_id = desc->pdata->child_ids[i];
conns[i].child_port = desc->pdata->child_ports[i];
}
csdev->conns = conns;
+ csdev->type = desc->type;
+ csdev->subtype = desc->subtype;
csdev->ops = desc->ops;
csdev->owner = desc->owner;
@@ -256,24 +594,34 @@
csdev->dev.release = coresight_device_release;
dev_set_name(&csdev->dev, "%s", desc->pdata->name);
+ down(&coresight_mutex);
+ if (desc->pdata->default_sink) {
+ if (curr_sink == NO_SINK) {
+ curr_sink = csdev->id;
+ } else {
+ ret = -EINVAL;
+ goto err_default_sink;
+ }
+ }
+
coresight_fixup_device_conns(csdev);
ret = device_register(&csdev->dev);
if (ret)
goto err_dev_reg;
coresight_fixup_orphan_conns(csdev);
- mutex_lock(&coresight_devs_mutex);
- list_add_tail(&csdev->link, &coresight_devs);
- mutex_unlock(&coresight_devs_mutex);
+ list_add_tail(&csdev->dev_link, &coresight_devs);
+ up(&coresight_mutex);
return csdev;
err_dev_reg:
put_device(&csdev->dev);
+err_default_sink:
+ up(&coresight_mutex);
kfree(conns);
err_kzalloc_conns:
- kfree(refcnt);
-err_kzalloc_refcnt:
- mutex_destroy(&csdev->mutex);
+ kfree(refcnts);
+err_kzalloc_refcnts:
kfree(csdev);
err_kzalloc_csdev:
return ERR_PTR(ret);
@@ -286,9 +634,7 @@
return;
if (get_device(&csdev->dev)) {
- mutex_lock(&csdev->mutex);
device_unregister(&csdev->dev);
- mutex_unlock(&csdev->mutex);
put_device(&csdev->dev);
}
}
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 14070a7..fc51970 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -303,8 +303,8 @@
device->mmu.setstate_memory.gpuaddr +
KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+ pt_val = kgsl_mmu_pt_get_base_addr(device->mmu.hwpagetable);
if (flags & KGSL_MMUFLAGS_PTUPDATE) {
- pt_val = kgsl_mmu_pt_get_base_addr(device->mmu.hwpagetable);
/*
* We need to perfrom the following operations for all
* IOMMU units
@@ -338,24 +338,6 @@
reg_pt_val,
device->mmu.setstate_memory.gpuaddr +
KGSL_IOMMU_SETSTATE_NOP_OFFSET);
-
- /* set the asid */
- *cmds++ = cp_type3_packet(CP_MEM_WRITE, 2);
- *cmds++ = reg_map_desc[i]->gpuaddr +
- (KGSL_IOMMU_CONTEXT_USER <<
- KGSL_IOMMU_CTX_SHIFT) + KGSL_IOMMU_CONTEXTIDR;
- *cmds++ = kgsl_mmu_get_hwpagetable_asid(&device->mmu);
- *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
- *cmds++ = 0x00000000;
-
- /* Read back asid to ensure above write completes */
- cmds += adreno_add_read_cmds(device, cmds,
- reg_map_desc[i]->gpuaddr +
- (KGSL_IOMMU_CONTEXT_USER <<
- KGSL_IOMMU_CTX_SHIFT) + KGSL_IOMMU_CONTEXTIDR,
- kgsl_mmu_get_hwpagetable_asid(&device->mmu),
- device->mmu.setstate_memory.gpuaddr +
- KGSL_IOMMU_SETSTATE_NOP_OFFSET);
}
/* invalidate all base pointers */
*cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
@@ -367,15 +349,21 @@
}
if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
/*
- * tlb flush based on asid, no need to flush entire tlb
+ * tlb flush
*/
for (i = 0; i < num_iommu_units; i++) {
+ reg_pt_val = (pt_val &
+ (KGSL_IOMMU_TTBR0_PA_MASK <<
+ KGSL_IOMMU_TTBR0_PA_SHIFT)) +
+ kgsl_mmu_get_pt_lsb(&device->mmu, i,
+ KGSL_IOMMU_CONTEXT_USER);
+
*cmds++ = cp_type3_packet(CP_MEM_WRITE, 2);
*cmds++ = (reg_map_desc[i]->gpuaddr +
(KGSL_IOMMU_CONTEXT_USER <<
KGSL_IOMMU_CTX_SHIFT) +
- KGSL_IOMMU_CTX_TLBIASID);
- *cmds++ = kgsl_mmu_get_hwpagetable_asid(&device->mmu);
+ KGSL_IOMMU_CTX_TLBIALL);
+ *cmds++ = 1;
cmds += __adreno_add_idle_indirect_cmds(cmds,
device->mmu.setstate_memory.gpuaddr +
@@ -384,9 +372,8 @@
cmds += adreno_add_read_cmds(device, cmds,
reg_map_desc[i]->gpuaddr +
(KGSL_IOMMU_CONTEXT_USER <<
- KGSL_IOMMU_CTX_SHIFT) +
- KGSL_IOMMU_CONTEXTIDR,
- kgsl_mmu_get_hwpagetable_asid(&device->mmu),
+ KGSL_IOMMU_CTX_SHIFT) + KGSL_IOMMU_TTBR0,
+ reg_pt_val,
device->mmu.setstate_memory.gpuaddr +
KGSL_IOMMU_SETSTATE_NOP_OFFSET);
}
@@ -948,6 +935,15 @@
kgsl_mmu_setstate(&device->mmu, adreno_context->pagetable,
KGSL_MEMSTORE_GLOBAL);
+ /* If iommu is used then we need to make sure that the iommu clocks
+ * are on since there could be commands in pipeline that touch iommu */
+ if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) {
+ ret = kgsl_mmu_enable_clk(&device->mmu,
+ KGSL_IOMMU_CONTEXT_USER);
+ if (ret)
+ goto done;
+ }
+
/* Do not try the bad caommands if recovery has failed bad commands
* once already */
if (!try_bad_commands)
@@ -973,6 +969,18 @@
"Device start failed in recovery\n");
goto done;
}
+ if (context)
+ kgsl_mmu_setstate(&device->mmu,
+ adreno_context->pagetable,
+ KGSL_MEMSTORE_GLOBAL);
+
+ if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) {
+ ret = kgsl_mmu_enable_clk(&device->mmu,
+ KGSL_IOMMU_CONTEXT_USER);
+ if (ret)
+ goto done;
+ }
+
ret = idle_ret;
KGSL_DRV_ERR(device,
"Bad context commands hung in recovery\n");
@@ -1008,6 +1016,9 @@
}
}
done:
+ /* Turn off iommu clocks */
+ if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
+ kgsl_mmu_disable_clk_on_ts(&device->mmu, 0, false);
return ret;
}
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index d54ce6b..86a349a 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -120,6 +120,8 @@
goto err;
}
+ continue;
+
err:
if (!adreno_dump_and_recover(rb->device))
wait_time = jiffies + wait_timeout;
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 2a9f564..278be99 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -897,6 +897,9 @@
{
struct rb_node *node = private->mem_rb.rb_node;
+ if (!kgsl_mmu_gpuaddr_in_range(gpuaddr))
+ return NULL;
+
while (node != NULL) {
struct kgsl_mem_entry *entry;
@@ -1112,6 +1115,19 @@
goto done;
}
+ /*
+ * Put a reasonable upper limit on the number of IBs that can be
+ * submitted
+ */
+
+ if (param->numibs > 10000) {
+ KGSL_DRV_ERR(dev_priv->device,
+ "Too many IBs submitted. count: %d max 10000\n",
+ param->numibs);
+ result = -EINVAL;
+ goto done;
+ }
+
ibdesc = kzalloc(sizeof(struct kgsl_ibdesc) * param->numibs,
GFP_KERNEL);
if (!ibdesc) {
diff --git a/drivers/gpu/msm/kgsl_gpummu.c b/drivers/gpu/msm/kgsl_gpummu.c
index 998eaab..edccff1 100644
--- a/drivers/gpu/msm/kgsl_gpummu.c
+++ b/drivers/gpu/msm/kgsl_gpummu.c
@@ -732,7 +732,6 @@
.mmu_get_current_ptbase = kgsl_gpummu_get_current_ptbase,
.mmu_enable_clk = NULL,
.mmu_disable_clk_on_ts = NULL,
- .mmu_get_hwpagetable_asid = NULL,
.mmu_get_pt_lsb = NULL,
.mmu_get_reg_map_desc = NULL,
};
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 25d0463..016771b 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -292,14 +292,6 @@
struct kgsl_iommu_pt *iommu_pt = mmu_specific_pt;
if (iommu_pt->domain)
iommu_domain_free(iommu_pt->domain);
- if (iommu_pt->iommu) {
- if ((KGSL_IOMMU_ASID_REUSE == iommu_pt->asid) &&
- iommu_pt->iommu->asid_reuse)
- iommu_pt->iommu->asid_reuse--;
- if (!iommu_pt->iommu->asid_reuse ||
- (KGSL_IOMMU_ASID_REUSE != iommu_pt->asid))
- clear_bit(iommu_pt->asid, iommu_pt->iommu->asids);
- }
kfree(iommu_pt);
}
@@ -621,20 +613,15 @@
unsigned int context_id)
{
if (mmu->flags & KGSL_FLAGS_STARTED) {
- struct kgsl_iommu *iommu = mmu->priv;
- struct kgsl_iommu_pt *iommu_pt = pagetable->priv;
/* page table not current, then setup mmu to use new
* specified page table
*/
if (mmu->hwpagetable != pagetable) {
unsigned int flags = 0;
mmu->hwpagetable = pagetable;
- /* force tlb flush if asid is reused */
- if (iommu->asid_reuse &&
- (KGSL_IOMMU_ASID_REUSE == iommu_pt->asid))
- flags |= KGSL_MMUFLAGS_TLBFLUSH;
flags |= kgsl_mmu_pt_get_flags(mmu->hwpagetable,
- mmu->device->id);
+ mmu->device->id) |
+ KGSL_MMUFLAGS_TLBFLUSH;
kgsl_setstate(mmu, context_id,
KGSL_MMUFLAGS_PTUPDATE | flags);
}
@@ -657,14 +644,6 @@
sizeof(struct kgsl_iommu));
return -ENOMEM;
}
- iommu->asids = kzalloc(BITS_TO_LONGS(KGSL_IOMMU_MAX_ASIDS) *
- sizeof(unsigned long), GFP_KERNEL);
- if (!iommu->asids) {
- KGSL_CORE_ERR("kzalloc(%d) failed\n",
- sizeof(struct kgsl_iommu));
- status = -ENOMEM;
- goto done;
- }
mmu->priv = iommu;
status = kgsl_get_iommu_ctxt(mmu);
@@ -684,7 +663,6 @@
__func__);
done:
if (status) {
- kfree(iommu->asids);
kfree(iommu);
mmu->priv = NULL;
}
@@ -718,7 +696,6 @@
goto err;
}
iommu_pt = mmu->priv_bank_table->priv;
- iommu_pt->asid = 1;
}
mmu->defaultpagetable = kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
/* Return error if the default pagetable doesn't exist */
@@ -740,14 +717,6 @@
goto err;
}
}
- /*
- * The dafault pagetable always has asid 0 assigned by the iommu driver
- * and asid 1 is assigned to the private context.
- */
- iommu_pt = mmu->defaultpagetable->priv;
- iommu_pt->asid = 0;
- set_bit(0, iommu->asids);
- set_bit(1, iommu->asids);
return status;
err:
for (i--; i >= 0; i--) {
@@ -818,12 +787,6 @@
*/
for (i = 0; i < iommu->unit_count; i++) {
struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
- /* Make sure that the ASID of the priv bank is set to 1.
- * When we a different pagetable for the priv bank then the
- * iommu driver sets the ASID to 0 instead of 1 */
- KGSL_IOMMU_SET_IOMMU_REG(iommu->iommu_units[i].reg_map.hostptr,
- KGSL_IOMMU_CONTEXT_PRIV,
- CONTEXTIDR, 1);
for (j = 0; j < iommu_unit->dev_count; j++)
iommu_unit->dev[j].pt_lsb = KGSL_IOMMMU_PT_LSB(
KGSL_IOMMU_GET_IOMMU_REG(
@@ -831,10 +794,6 @@
iommu_unit->dev[j].ctx_id,
TTBR0));
}
- iommu->asid = KGSL_IOMMU_GET_IOMMU_REG(
- iommu->iommu_units[0].reg_map.hostptr,
- KGSL_IOMMU_CONTEXT_USER,
- CONTEXTIDR);
kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
mmu->flags |= KGSL_FLAGS_STARTED;
@@ -955,7 +914,6 @@
kgsl_mmu_putpagetable(mmu->priv_bank_table);
if (mmu->defaultpagetable)
kgsl_mmu_putpagetable(mmu->defaultpagetable);
- kfree(iommu->asids);
kfree(iommu);
return 0;
@@ -981,47 +939,6 @@
}
/*
- * kgsl_iommu_get_hwpagetable_asid - Returns asid(application space ID) for a
- * pagetable
- * @mmu - Pointer to mmu structure
- *
- * Allocates an asid to a IOMMU domain if it does not already have one. asid's
- * are unique identifiers for pagetable that can be used to selectively flush
- * tlb entries of the IOMMU unit.
- * Return - asid to be used with the IOMMU domain
- */
-static int kgsl_iommu_get_hwpagetable_asid(struct kgsl_mmu *mmu)
-{
- struct kgsl_iommu *iommu = mmu->priv;
- struct kgsl_iommu_pt *iommu_pt = mmu->hwpagetable->priv;
-
- /*
- * If the iommu pagetable does not have any asid assigned and is not the
- * default pagetable then assign asid.
- */
- if (!iommu_pt->asid && iommu_pt != mmu->defaultpagetable->priv) {
- iommu_pt->asid = find_first_zero_bit(iommu->asids,
- KGSL_IOMMU_MAX_ASIDS);
- /* No free bits means reuse asid */
- if (iommu_pt->asid >= KGSL_IOMMU_MAX_ASIDS) {
- iommu_pt->asid = KGSL_IOMMU_ASID_REUSE;
- iommu->asid_reuse++;
- }
- set_bit(iommu_pt->asid, iommu->asids);
- /*
- * Store pointer to asids list so that during pagetable destroy
- * the asid assigned to this pagetable may be cleared
- */
- iommu_pt->iommu = iommu;
- }
- /* Return the asid + the constant part of asid that never changes */
- return (iommu_pt->asid & (KGSL_IOMMU_CONTEXTIDR_ASID_MASK <<
- KGSL_IOMMU_CONTEXTIDR_ASID_SHIFT)) +
- (iommu->asid & ~(KGSL_IOMMU_CONTEXTIDR_ASID_MASK <<
- KGSL_IOMMU_CONTEXTIDR_ASID_SHIFT));
-}
-
-/*
* kgsl_iommu_default_setstate - Change the IOMMU pagetable or flush IOMMU tlb
* of the primary context bank
* @mmu - Pointer to mmu structure
@@ -1066,15 +983,6 @@
temp = KGSL_IOMMU_GET_IOMMU_REG(
iommu->iommu_units[i].reg_map.hostptr,
KGSL_IOMMU_CONTEXT_USER, TTBR0);
- /* Set asid */
- KGSL_IOMMU_SET_IOMMU_REG(
- iommu->iommu_units[i].reg_map.hostptr,
- KGSL_IOMMU_CONTEXT_USER, CONTEXTIDR,
- kgsl_iommu_get_hwpagetable_asid(mmu));
- mb();
- temp = KGSL_IOMMU_GET_IOMMU_REG(
- iommu->iommu_units[i].reg_map.hostptr,
- KGSL_IOMMU_CONTEXT_USER, CONTEXTIDR);
}
}
/* Flush tlb */
@@ -1082,8 +990,8 @@
for (i = 0; i < iommu->unit_count; i++) {
KGSL_IOMMU_SET_IOMMU_REG(
iommu->iommu_units[i].reg_map.hostptr,
- KGSL_IOMMU_CONTEXT_USER, CTX_TLBIASID,
- kgsl_iommu_get_hwpagetable_asid(mmu));
+ KGSL_IOMMU_CONTEXT_USER, CTX_TLBIALL,
+ 1);
mb();
}
}
@@ -1139,7 +1047,6 @@
.mmu_get_current_ptbase = kgsl_iommu_get_current_ptbase,
.mmu_enable_clk = kgsl_iommu_enable_clk,
.mmu_disable_clk_on_ts = kgsl_iommu_disable_clk_on_ts,
- .mmu_get_hwpagetable_asid = kgsl_iommu_get_hwpagetable_asid,
.mmu_get_pt_lsb = kgsl_iommu_get_pt_lsb,
.mmu_get_reg_map_desc = kgsl_iommu_get_reg_map_desc,
};
diff --git a/drivers/gpu/msm/kgsl_iommu.h b/drivers/gpu/msm/kgsl_iommu.h
index 354a5cf..f14db93 100644
--- a/drivers/gpu/msm/kgsl_iommu.h
+++ b/drivers/gpu/msm/kgsl_iommu.h
@@ -23,15 +23,8 @@
#define KGSL_IOMMU_TTBR0_PA_MASK 0x0003FFFF
#define KGSL_IOMMU_TTBR0_PA_SHIFT 14
#define KGSL_IOMMU_CTX_TLBIALL 0x800
-#define KGSL_IOMMU_CONTEXTIDR 0x8
-#define KGSL_IOMMU_CONTEXTIDR_ASID_MASK 0xFF
-#define KGSL_IOMMU_CONTEXTIDR_ASID_SHIFT 0
-#define KGSL_IOMMU_CTX_TLBIASID 0x804
#define KGSL_IOMMU_CTX_SHIFT 12
-#define KGSL_IOMMU_MAX_ASIDS 256
-#define KGSL_IOMMU_ASID_REUSE 2
-
/*
* Max number of iommu units that the gpu core can have
* On APQ8064, KGSL can control a maximum of 2 IOMMU units.
@@ -106,10 +99,6 @@
* @clk_event_queued: Indicates whether an event to disable clocks
* is already queued or not
* @device: Pointer to kgsl device
- * @asids: A bit structure indicating which id's are presently used
- * @asid: Contains the initial value of IOMMU_CONTEXTIDR when a domain
- * is first attached
- * asid_reuse: Holds the number of times the reuse asid is reused
*/
struct kgsl_iommu {
struct kgsl_iommu_unit iommu_units[KGSL_IOMMU_MAX_UNITS];
@@ -117,21 +106,16 @@
unsigned int iommu_last_cmd_ts;
bool clk_event_queued;
struct kgsl_device *device;
- unsigned long *asids;
- unsigned int asid;
- unsigned int asid_reuse;
};
/*
* struct kgsl_iommu_pt - Iommu pagetable structure private to kgsl driver
* @domain: Pointer to the iommu domain that contains the iommu pagetable
* @iommu: Pointer to iommu structure
- * @asid: The asid assigned to this domain
*/
struct kgsl_iommu_pt {
struct iommu_domain *domain;
struct kgsl_iommu *iommu;
- unsigned int asid;
};
#endif
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index d06ce45..bc6ec8e 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -136,7 +136,6 @@
(struct kgsl_mmu *mmu, uint32_t ts, bool ts_valid);
int (*mmu_enable_clk)
(struct kgsl_mmu *mmu, int ctx_id);
- int (*mmu_get_hwpagetable_asid)(struct kgsl_mmu *mmu);
int (*mmu_get_pt_lsb)(struct kgsl_mmu *mmu,
unsigned int unit_id,
enum kgsl_iommu_context_id ctx_id);
@@ -278,14 +277,6 @@
return 0;
}
-static inline int kgsl_mmu_get_hwpagetable_asid(struct kgsl_mmu *mmu)
-{
- if (mmu->mmu_ops && mmu->mmu_ops->mmu_get_hwpagetable_asid)
- return mmu->mmu_ops->mmu_get_hwpagetable_asid(mmu);
- else
- return 0;
-}
-
static inline int kgsl_mmu_enable_clk(struct kgsl_mmu *mmu,
int ctx_id)
{
@@ -302,4 +293,10 @@
mmu->mmu_ops->mmu_disable_clk_on_ts(mmu, ts, ts_valid);
}
+static inline int kgsl_mmu_gpuaddr_in_range(unsigned int gpuaddr)
+{
+ return ((gpuaddr >= KGSL_PAGETABLE_BASE) &&
+ (gpuaddr < (KGSL_PAGETABLE_BASE + kgsl_mmu_get_ptsize())));
+}
+
#endif /* __KGSL_MMU_H */
diff --git a/drivers/gud/mobicore_driver/platforms/msm8960_surf_std/platform.h b/drivers/gud/mobicore_driver/platforms/msm8960_surf_std/platform.h
index 7034cb0..070222e 100644
--- a/drivers/gud/mobicore_driver/platforms/msm8960_surf_std/platform.h
+++ b/drivers/gud/mobicore_driver/platforms/msm8960_surf_std/platform.h
@@ -21,7 +21,7 @@
#define _MC_DRV_PLATFORM_H_
/** MobiCore Interrupt for Qualcomm */
-#define MC_INTR_SSIQ 218
+#define MC_INTR_SSIQ 280
/** Use SMC for fastcalls */
#define MC_SMC_FASTCALL
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index b050db2..53fec5b 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -868,6 +868,16 @@
Provides interface for measuring the current on specific power rails
through the channels on ADC1158 ADC
+config SENSORS_QPNP_ADC_VOLTAGE
+ tristate "Support for Qualcomm QPNP Voltage ADC"
+ depends on SPMI
+ help
+ This is the VADC arbiter driver for Qualcomm QPNP ADC Chip.
+
+ The driver supports reading the HKADC, XOADC through the ADC AMUX arbiter.
+ The VADC includes support for the conversion sequencer. The driver supports
+ reading the ADC through the AMUX channels for external pull-ups simultaneously.
+
config SENSORS_PC87360
tristate "National Semiconductor PC87360 family"
depends on !PPC
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 228c4e9..2ff9454 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -130,6 +130,7 @@
obj-$(CONFIG_SENSORS_MSM_ADC) += msm_adc.o m_adcproc.o
obj-$(CONFIG_SENSORS_PM8XXX_ADC) += pm8xxx-adc.o pm8xxx-adc-scale.o
obj-$(CONFIG_SENSORS_EPM_ADC) += epm_adc.o
+obj-$(CONFIG_SENSORS_QPNP_ADC_VOLTAGE) += qpnp-adc-voltage.o qpnp-adc-common.o
obj-$(CONFIG_PMBUS) += pmbus/
diff --git a/drivers/hwmon/qpnp-adc-common.c b/drivers/hwmon/qpnp-adc-common.c
new file mode 100644
index 0000000..c8fe798
--- /dev/null
+++ b/drivers/hwmon/qpnp-adc-common.c
@@ -0,0 +1,258 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/spmi.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/qpnp/qpnp-adc.h>
+#include <linux/platform_device.h>
+
+/* Min ADC code represets 0V */
+#define QPNP_VADC_MIN_ADC_CODE 0x6000
+/* Max ADC code represents full-scale range of 1.8V */
+#define QPNP_VADC_MAX_ADC_CODE 0xA800
+
+int32_t qpnp_adc_scale_default(int32_t adc_code,
+ const struct qpnp_adc_properties *adc_properties,
+ const struct qpnp_vadc_chan_properties *chan_properties,
+ struct qpnp_vadc_result *adc_chan_result)
+{
+ bool negative_rawfromoffset = 0, negative_offset = 0;
+ int64_t scale_voltage = 0;
+
+ if (!chan_properties || !chan_properties->offset_gain_numerator ||
+ !chan_properties->offset_gain_denominator || !adc_properties
+ || !adc_chan_result)
+ return -EINVAL;
+
+ scale_voltage = (adc_code -
+ chan_properties->adc_graph[CALIB_ABSOLUTE].adc_gnd)
+ * chan_properties->adc_graph[CALIB_ABSOLUTE].dx;
+ if (scale_voltage < 0) {
+ negative_offset = 1;
+ scale_voltage = -scale_voltage;
+ }
+ do_div(scale_voltage,
+ chan_properties->adc_graph[CALIB_ABSOLUTE].dy);
+ if (negative_offset)
+ scale_voltage = -scale_voltage;
+ scale_voltage += chan_properties->adc_graph[CALIB_ABSOLUTE].dx;
+
+ if (scale_voltage < 0) {
+ if (adc_properties->bipolar) {
+ scale_voltage = -scale_voltage;
+ negative_rawfromoffset = 1;
+ } else {
+ scale_voltage = 0;
+ }
+ }
+
+ adc_chan_result->measurement = scale_voltage *
+ chan_properties->offset_gain_denominator;
+
+ /* do_div only perform positive integer division! */
+ do_div(adc_chan_result->measurement,
+ chan_properties->offset_gain_numerator);
+
+ if (negative_rawfromoffset)
+ adc_chan_result->measurement = -adc_chan_result->measurement;
+
+ /*
+ * Note: adc_chan_result->measurement is in the unit of
+ * adc_properties.adc_reference. For generic channel processing,
+ * channel measurement is a scale/ratio relative to the adc
+ * reference input
+ */
+ adc_chan_result->physical = adc_chan_result->measurement;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qpnp_adc_scale_default);
+
+int32_t qpnp_vadc_check_result(int32_t *data)
+{
+ if (*data < QPNP_VADC_MIN_ADC_CODE)
+ *data = QPNP_VADC_MIN_ADC_CODE;
+ else if (*data > QPNP_VADC_MAX_ADC_CODE)
+ *data = QPNP_VADC_MAX_ADC_CODE;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qpnp_vadc_check_result);
+
+int32_t qpnp_adc_get_devicetree_data(struct spmi_device *spmi,
+ struct qpnp_adc_drv *adc_qpnp)
+{
+ struct device_node *node = spmi->dev.of_node;
+ struct resource *res;
+ struct device_node *child;
+ struct qpnp_vadc_amux *adc_channel_list;
+ struct qpnp_adc_properties *adc_prop;
+ struct qpnp_vadc_amux_properties *amux_prop;
+ int count_adc_channel_list = 0, decimation, rc = 0;
+
+ if (!node)
+ return -EINVAL;
+
+ for_each_child_of_node(node, child)
+ count_adc_channel_list++;
+
+ if (!count_adc_channel_list) {
+ pr_err("No channel listing\n");
+ return -EINVAL;
+ }
+
+ adc_qpnp->spmi = spmi;
+
+ adc_prop = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_adc_properties),
+ GFP_KERNEL);
+ if (!adc_prop) {
+ dev_err(&spmi->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+ adc_channel_list = devm_kzalloc(&spmi->dev,
+ (sizeof(struct qpnp_vadc_amux) * count_adc_channel_list),
+ GFP_KERNEL);
+ if (!adc_channel_list) {
+ dev_err(&spmi->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ amux_prop = devm_kzalloc(&spmi->dev,
+ sizeof(struct qpnp_vadc_amux_properties) +
+ sizeof(struct qpnp_vadc_chan_properties), GFP_KERNEL);
+ if (!amux_prop) {
+ dev_err(&spmi->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ for_each_child_of_node(node, child) {
+ int channel_num, scaling, post_scaling, hw_settle_time;
+ int fast_avg_setup, calib_type, i = 0, rc;
+ const char *calibration_param, *channel_name;
+
+ channel_name = of_get_property(child,
+ "label", NULL) ? : child->name;
+ if (!channel_name) {
+ pr_err("Invalid channel name\n");
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32(child, "qcom,channel-num",
+ &channel_num);
+ if (rc) {
+ pr_err("Invalid channel num\n");
+ return -EINVAL;
+ }
+ rc = of_property_read_u32(child, "qcom,decimation",
+ &decimation);
+ if (rc) {
+ pr_err("Invalid channel decimation property\n");
+ return -EINVAL;
+ }
+ rc = of_property_read_u32(child,
+ "qcom,pre-div-channel-scaling", &scaling);
+ if (rc) {
+ pr_err("Invalid channel scaling property\n");
+ return -EINVAL;
+ }
+ rc = of_property_read_u32(child,
+ "qcom,scale-function", &post_scaling);
+ if (rc) {
+ pr_err("Invalid channel post scaling property\n");
+ return -EINVAL;
+ }
+ rc = of_property_read_u32(child,
+ "qcom,hw-settle-time", &hw_settle_time);
+ if (rc) {
+ pr_err("Invalid channel hw settle time property\n");
+ return -EINVAL;
+ }
+ rc = of_property_read_u32(child,
+ "qcom,fast-avg-setup", &fast_avg_setup);
+ if (rc) {
+ pr_err("Invalid channel fast average setup\n");
+ return -EINVAL;
+ }
+ calibration_param = of_get_property(child,
+ "qcom,calibration-type", NULL);
+ if (!strncmp(calibration_param, "absolute", 8))
+ calib_type = CALIB_ABSOLUTE;
+ else if (!strncmp(calibration_param, "historical", 9))
+ calib_type = CALIB_RATIOMETRIC;
+ else {
+ pr_err("%s: Invalid calibration property\n", __func__);
+ return -EINVAL;
+ }
+ /* Individual channel properties */
+ adc_channel_list[i].name = (char *)channel_name;
+ adc_channel_list[i].channel_num = channel_num;
+ adc_channel_list[i].chan_path_prescaling = scaling;
+ adc_channel_list[i].adc_decimation = decimation;
+ adc_channel_list[i].adc_scale_fn = post_scaling;
+ adc_channel_list[i].hw_settle_time = hw_settle_time;
+ adc_channel_list[i].fast_avg_setup = fast_avg_setup;
+ i++;
+ }
+ adc_qpnp->adc_channels = adc_channel_list;
+ adc_qpnp->amux_prop = amux_prop;
+
+ /* Get the ADC VDD reference voltage and ADC bit resolution */
+ rc = of_property_read_u32(node, "qcom,adc-vdd-reference",
+ &adc_prop->adc_vdd_reference);
+ if (rc) {
+ pr_err("Invalid adc vdd reference property\n");
+ return -EINVAL;
+ }
+ rc = of_property_read_u32(node, "qcom,adc-bit-resolution",
+ &adc_prop->bitresolution);
+ if (rc) {
+ pr_err("Invalid adc bit resolution property\n");
+ return -EINVAL;
+ }
+ adc_qpnp->adc_prop = adc_prop;
+
+ /* Get the peripheral address */
+ res = spmi_get_resource(spmi, 0, IORESOURCE_MEM, 0);
+ if (!res) {
+ pr_err("No base address definition\n");
+ return -EINVAL;
+ }
+
+ adc_qpnp->slave = spmi->sid;
+ adc_qpnp->offset = res->start;
+
+ /* Register the ADC peripheral interrupt */
+ adc_qpnp->adc_irq = spmi_get_irq(spmi, 0, 0);
+ if (adc_qpnp->adc_irq < 0) {
+ pr_err("Invalid irq\n");
+ return -ENXIO;
+ }
+
+ mutex_init(&adc_qpnp->adc_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_get_devicetree_data);
diff --git a/drivers/hwmon/qpnp-adc-voltage.c b/drivers/hwmon/qpnp-adc-voltage.c
new file mode 100644
index 0000000..8b2cb97
--- /dev/null
+++ b/drivers/hwmon/qpnp-adc-voltage.c
@@ -0,0 +1,784 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/spmi.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/qpnp/qpnp-adc.h>
+#include <linux/platform_device.h>
+
+/* QPNP VADC register definition */
+#define QPNP_VADC_STATUS1 0x8
+#define QPNP_VADC_STATUS1_OP_MODE 4
+#define QPNP_VADC_STATUS1_MEAS_INTERVAL_EN_STS BIT(2)
+#define QPNP_VADC_STATUS1_REQ_STS BIT(1)
+#define QPNP_VADC_STATUS1_EOC BIT(0)
+#define QPNP_VADC_STATUS2 0x9
+#define QPNP_VADC_STATUS2_CONV_SEQ_STATE 6
+#define QPNP_VADC_STATUS2_FIFO_NOT_EMPTY_FLAG BIT(1)
+#define QPNP_VADC_STATUS2_CONV_SEQ_TIMEOUT_STS BIT(0)
+#define QPNP_VADC_STATUS2_CONV_SEQ_STATE_SHIFT 4
+#define QPNP_VADC_CONV_TIMEOUT_ERR 2
+
+#define QPNP_VADC_INT_SET_TYPE 0x11
+#define QPNP_VADC_INT_POLARITY_HIGH 0x12
+#define QPNP_VADC_INT_POLARITY_LOW 0x13
+#define QPNP_VADC_INT_LATCHED_CLR 0x14
+#define QPNP_VADC_INT_EN_SET 0x15
+#define QPNP_VADC_INT_CLR 0x16
+#define QPNP_VADC_INT_LOW_THR_BIT BIT(4)
+#define QPNP_VADC_INT_HIGH_THR_BIT BIT(3)
+#define QPNP_VADC_INT_CONV_SEQ_TIMEOUT_BIT BIT(2)
+#define QPNP_VADC_INT_FIFO_NOT_EMPTY_BIT BIT(1)
+#define QPNP_VADC_INT_EOC_BIT BIT(0)
+#define QPNP_VADC_INT_CLR_MASK 0x1f
+#define QPNP_VADC_MODE_CTL 0x40
+#define QPNP_VADC_OP_MODE_SHIFT 4
+#define QPNP_VADC_VREF_XO_THM_FORCE BIT(2)
+#define QPNP_VADC_AMUX_TRIM_EN BIT(1)
+#define QPNP_VADC_ADC_TRIM_EN BIT(0)
+#define QPNP_VADC_EN_CTL1 0x46
+#define QPNP_VADC_ADC_EN BIT(7)
+#define QPNP_VADC_ADC_CH_SEL_CTL 0x48
+#define QPNP_VADC_ADC_DIG_PARAM 0x50
+#define QPNP_VADC_ADC_DIG_DEC_RATIO_SEL_SHIFT 3
+#define QPNP_VADC_HW_SETTLE_DELAY 0x51
+#define QPNP_VADC_CONV_REQ 0x52
+#define QPNP_VADC_CONV_REQ_SET BIT(7)
+#define QPNP_VADC_CONV_SEQ_CTL 0x54
+#define QPNP_VADC_CONV_SEQ_HOLDOFF_SHIFT 4
+#define QPNP_VADC_CONV_SEQ_TRIG_CTL 0x55
+#define QPNP_VADC_CONV_SEQ_FALLING_EDGE 0x0
+#define QPNP_VADC_CONV_SEQ_RISING_EDGE 0x1
+#define QPNP_VADC_CONV_SEQ_EDGE_SHIFT 7
+#define QPNP_VADC_FAST_AVG_CTL 0x5a
+
+#define QPNP_VADC_M0_LOW_THR_LSB 0x5c
+#define QPNP_VADC_M0_LOW_THR_MSB 0x5d
+#define QPNP_VADC_M0_HIGH_THR_LSB 0x5e
+#define QPNP_VADC_M0_HIGH_THR_MSB 0x5f
+#define QPNP_VADC_M1_LOW_THR_LSB 0x69
+#define QPNP_VADC_M1_LOW_THR_MSB 0x6a
+#define QPNP_VADC_M1_HIGH_THR_LSB 0x6b
+#define QPNP_VADC_M1_HIGH_THR_MSB 0x6c
+
+#define QPNP_VADC_DATA0 0x60
+#define QPNP_VADC_DATA1 0x61
+#define QPNP_VADC_CONV_TIMEOUT_ERR 2
+#define QPNP_VADC_CONV_TIME_MIN 2000
+#define QPNP_VADC_CONV_TIME_MAX 2100
+
+#define QPNP_ADC_HWMON_NAME_LENGTH 16
+
+struct qpnp_vadc_drv {
+ struct qpnp_adc_drv *adc;
+ struct dentry *dent;
+ struct device *vadc_hwmon;
+ bool vadc_init_calib;
+ struct sensor_device_attribute sens_attr[0];
+};
+
+struct qpnp_vadc_drv *qpnp_vadc;
+
+static struct qpnp_vadc_scale_fn vadc_scale_fn[] = {
+ [SCALE_DEFAULT] = {qpnp_adc_scale_default},
+};
+
+static int32_t qpnp_vadc_read_reg(int16_t reg, u8 *data)
+{
+ struct qpnp_vadc_drv *vadc = qpnp_vadc;
+ int rc;
+
+ rc = spmi_ext_register_readl(vadc->adc->spmi->ctrl, vadc->adc->slave,
+ reg, data, 1);
+ if (rc < 0) {
+ pr_err("qpnp adc read reg %d failed with %d\n", reg, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int32_t qpnp_vadc_write_reg(int16_t reg, u8 data)
+{
+ struct qpnp_vadc_drv *vadc = qpnp_vadc;
+ int rc;
+ u8 *buf;
+
+ buf = &data;
+
+ rc = spmi_ext_register_writel(vadc->adc->spmi->ctrl, vadc->adc->slave,
+ reg, buf, 1);
+ if (rc < 0) {
+ pr_err("qpnp adc write reg %d failed with %d\n", reg, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int32_t qpnp_vadc_configure_interrupt(void)
+{
+ int rc = 0;
+ u8 data = 0;
+
+ /* Configure interrupt as an Edge trigger */
+ rc = qpnp_vadc_write_reg(QPNP_VADC_INT_SET_TYPE,
+ QPNP_VADC_INT_CLR_MASK);
+ if (rc < 0) {
+ pr_err("%s Interrupt configure failed\n", __func__);
+ return rc;
+ }
+
+ /* Configure interrupt for rising edge trigger */
+ rc = qpnp_vadc_write_reg(QPNP_VADC_INT_POLARITY_HIGH,
+ QPNP_VADC_INT_CLR_MASK);
+ if (rc < 0) {
+ pr_err("%s Rising edge trigger configure failed\n", __func__);
+ return rc;
+ }
+
+ /* Disable low level interrupt triggering */
+ data = QPNP_VADC_INT_CLR_MASK;
+ rc = qpnp_vadc_write_reg(QPNP_VADC_INT_POLARITY_LOW,
+ (~data & QPNP_VADC_INT_CLR_MASK));
+ if (rc < 0) {
+ pr_err("%s Setting level low to disable failed\n", __func__);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int32_t qpnp_vadc_enable(bool state)
+{
+ int rc = 0;
+ u8 data = 0;
+
+ data = QPNP_VADC_ADC_EN;
+ if (state) {
+ rc = qpnp_vadc_write_reg(QPNP_VADC_EN_CTL1,
+ data);
+ if (rc < 0) {
+ pr_err("VADC enable failed\n");
+ return rc;
+ }
+ } else {
+ rc = qpnp_vadc_write_reg(QPNP_VADC_EN_CTL1,
+ (~data & QPNP_VADC_ADC_EN));
+ if (rc < 0) {
+ pr_err("VADC disable failed\n");
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+int32_t qpnp_vadc_configure(
+ struct qpnp_vadc_amux_properties *chan_prop)
+{
+ u8 decimation = 0, conv_sequence = 0, conv_sequence_trig = 0;
+ int rc = 0;
+
+ rc = qpnp_vadc_write_reg(QPNP_VADC_INT_EN_SET,
+ QPNP_VADC_INT_EOC_BIT);
+ if (rc < 0) {
+ pr_err("qpnp adc configure error for interrupt setup\n");
+ return rc;
+ }
+
+ rc = qpnp_vadc_write_reg(QPNP_VADC_MODE_CTL, chan_prop->mode_sel);
+ if (rc < 0) {
+ pr_err("qpnp adc configure error for mode selection\n");
+ return rc;
+ }
+
+ rc = qpnp_vadc_write_reg(QPNP_VADC_ADC_CH_SEL_CTL,
+ chan_prop->amux_channel);
+ if (rc < 0) {
+ pr_err("qpnp adc configure error for channel selection\n");
+ return rc;
+ }
+
+ decimation |= chan_prop->decimation <<
+ QPNP_VADC_ADC_DIG_DEC_RATIO_SEL_SHIFT;
+ rc = qpnp_vadc_write_reg(QPNP_VADC_ADC_DIG_PARAM, decimation);
+ if (rc < 0) {
+ pr_err("qpnp adc configure error for digital parameter setup\n");
+ return rc;
+ }
+
+ rc = qpnp_vadc_write_reg(QPNP_VADC_HW_SETTLE_DELAY,
+ chan_prop->hw_settle_time);
+ if (rc < 0) {
+ pr_err("qpnp adc configure error for hw settling time setup\n");
+ return rc;
+ }
+
+ if (chan_prop->mode_sel == (ADC_OP_NORMAL_MODE <<
+ QPNP_VADC_OP_MODE_SHIFT)) {
+ rc = qpnp_vadc_write_reg(QPNP_VADC_FAST_AVG_CTL,
+ chan_prop->fast_avg_setup);
+ if (rc < 0) {
+ pr_err("qpnp adc fast averaging configure error\n");
+ return rc;
+ }
+ } else if (chan_prop->mode_sel == (ADC_OP_CONVERSION_SEQUENCER <<
+ QPNP_VADC_OP_MODE_SHIFT)) {
+ conv_sequence = ((ADC_SEQ_HOLD_100US <<
+ QPNP_VADC_CONV_SEQ_HOLDOFF_SHIFT) |
+ ADC_CONV_SEQ_TIMEOUT_5MS);
+ rc = qpnp_vadc_write_reg(QPNP_VADC_CONV_SEQ_CTL,
+ conv_sequence);
+ if (rc < 0) {
+ pr_err("qpnp adc conversion sequence error\n");
+ return rc;
+ }
+
+ conv_sequence_trig = ((QPNP_VADC_CONV_SEQ_RISING_EDGE <<
+ QPNP_VADC_CONV_SEQ_EDGE_SHIFT) |
+ chan_prop->trigger_channel);
+ rc = qpnp_vadc_write_reg(QPNP_VADC_CONV_SEQ_TRIG_CTL,
+ conv_sequence_trig);
+ if (rc < 0) {
+ pr_err("qpnp adc conversion trigger error\n");
+ return rc;
+ }
+ }
+
+ rc = qpnp_vadc_write_reg(QPNP_VADC_CONV_REQ, QPNP_VADC_CONV_REQ_SET);
+ if (rc < 0) {
+ pr_err("qpnp adc request conversion failed\n");
+ return rc;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_vadc_configure);
+
+static int32_t qpnp_vadc_read_conversion_result(int32_t *data)
+{
+ uint8_t rslt_lsb, rslt_msb;
+ int rc = 0;
+
+ rc = qpnp_vadc_read_reg(QPNP_VADC_DATA0, &rslt_lsb);
+ if (rc < 0) {
+ pr_err("qpnp adc result read failed for data0 with %d\n", rc);
+ return rc;
+ }
+
+ rc = qpnp_vadc_read_reg(QPNP_VADC_DATA1, &rslt_msb);
+ if (rc < 0) {
+ pr_err("qpnp adc result read failed for data1 with %d\n", rc);
+ return rc;
+ }
+
+ *data = (rslt_msb << 8) | rslt_lsb;
+
+ rc = qpnp_vadc_check_result(data);
+ if (rc < 0) {
+ pr_err("VADC data check failed\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static int32_t qpnp_vadc_read_status(int mode_sel)
+{
+ u8 status1, status2, status2_conv_seq_state;
+ u8 status_err = QPNP_VADC_CONV_TIMEOUT_ERR;
+ int rc;
+
+ switch (mode_sel) {
+ case (ADC_OP_CONVERSION_SEQUENCER << QPNP_VADC_OP_MODE_SHIFT):
+ rc = qpnp_vadc_read_reg(QPNP_VADC_STATUS1, &status1);
+ if (rc) {
+ pr_err("qpnp_vadc read mask interrupt failed\n");
+ return rc;
+ }
+
+ rc = qpnp_vadc_read_reg(QPNP_VADC_STATUS2, &status2);
+ if (rc) {
+ pr_err("qpnp_vadc read mask interrupt failed\n");
+ return rc;
+ }
+
+ if (!(status2 & ~QPNP_VADC_STATUS2_CONV_SEQ_TIMEOUT_STS) &&
+ (status1 & (~QPNP_VADC_STATUS1_REQ_STS |
+ QPNP_VADC_STATUS1_EOC))) {
+ rc = status_err;
+ return rc;
+ }
+
+ status2_conv_seq_state = status2 >>
+ QPNP_VADC_STATUS2_CONV_SEQ_STATE_SHIFT;
+ if (status2_conv_seq_state != ADC_CONV_SEQ_IDLE) {
+ pr_err("qpnp vadc seq error with status %d\n",
+ status2);
+ rc = -EINVAL;
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static void qpnp_vadc_work(struct work_struct *work)
+{
+ struct qpnp_vadc_drv *vadc = qpnp_vadc;
+ int rc;
+
+ rc = qpnp_vadc_write_reg(QPNP_VADC_INT_CLR, QPNP_VADC_INT_EOC_BIT);
+ if (rc)
+ pr_err("qpnp_vadc clear mask interrupt failed with %d\n", rc);
+
+ complete(&vadc->adc->adc_rslt_completion);
+
+ return;
+}
+DECLARE_WORK(trigger_completion_work, qpnp_vadc_work);
+
+static irqreturn_t qpnp_vadc_isr(int irq, void *dev_id)
+{
+ schedule_work(&trigger_completion_work);
+
+ return IRQ_HANDLED;
+}
+
+static uint32_t qpnp_vadc_calib_device(void)
+{
+ struct qpnp_vadc_drv *vadc = qpnp_vadc;
+ struct qpnp_vadc_amux_properties conv;
+ int rc, calib_read_1, calib_read_2;
+ u8 status1 = 0;
+
+ conv.amux_channel = REF_125V;
+ conv.decimation = DECIMATION_TYPE2;
+ conv.mode_sel = ADC_OP_NORMAL_MODE << QPNP_VADC_OP_MODE_SHIFT;
+ conv.hw_settle_time = ADC_CHANNEL_HW_SETTLE_DELAY_0US;
+ conv.fast_avg_setup = ADC_FAST_AVG_SAMPLE_1;
+
+ rc = qpnp_vadc_configure(&conv);
+ if (rc) {
+ pr_err("qpnp_vadc configure failed with %d\n", rc);
+ goto calib_fail;
+ }
+
+ while (status1 != (~QPNP_VADC_STATUS1_REQ_STS |
+ QPNP_VADC_STATUS1_EOC)) {
+ rc = qpnp_vadc_read_reg(QPNP_VADC_STATUS1, &status1);
+ if (rc < 0)
+ return rc;
+ usleep_range(QPNP_VADC_CONV_TIME_MIN,
+ QPNP_VADC_CONV_TIME_MAX);
+ }
+
+ rc = qpnp_vadc_read_conversion_result(&calib_read_1);
+ if (rc) {
+ pr_err("qpnp adc read adc failed with %d\n", rc);
+ goto calib_fail;
+ }
+
+ conv.amux_channel = REF_625MV;
+ conv.decimation = DECIMATION_TYPE2;
+ conv.mode_sel = ADC_OP_NORMAL_MODE << QPNP_VADC_OP_MODE_SHIFT;
+ conv.hw_settle_time = ADC_CHANNEL_HW_SETTLE_DELAY_0US;
+ conv.fast_avg_setup = ADC_FAST_AVG_SAMPLE_1;
+ rc = qpnp_vadc_configure(&conv);
+ if (rc) {
+ pr_err("qpnp adc configure failed with %d\n", rc);
+ goto calib_fail;
+ }
+
+ while (status1 != (~QPNP_VADC_STATUS1_REQ_STS |
+ QPNP_VADC_STATUS1_EOC)) {
+ rc = qpnp_vadc_read_reg(QPNP_VADC_STATUS1, &status1);
+ if (rc < 0)
+ return rc;
+ usleep_range(QPNP_VADC_CONV_TIME_MIN,
+ QPNP_VADC_CONV_TIME_MAX);
+ }
+
+ rc = qpnp_vadc_read_conversion_result(&calib_read_1);
+ if (rc) {
+ pr_err("qpnp adc read adc failed with %d\n", rc);
+ goto calib_fail;
+ }
+
+ vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_ABSOLUTE].dy =
+ (calib_read_1 - calib_read_2);
+ vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_ABSOLUTE].dx
+ = QPNP_ADC_625_UV;
+ vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_ABSOLUTE].adc_vref =
+ calib_read_1;
+ vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_ABSOLUTE].adc_gnd =
+ calib_read_2;
+ /* Ratiometric Calibration */
+ conv.amux_channel = VDD_VADC;
+ conv.decimation = DECIMATION_TYPE2;
+ conv.mode_sel = ADC_OP_NORMAL_MODE << QPNP_VADC_OP_MODE_SHIFT;
+ conv.hw_settle_time = ADC_CHANNEL_HW_SETTLE_DELAY_0US;
+ conv.fast_avg_setup = ADC_FAST_AVG_SAMPLE_1;
+ rc = qpnp_vadc_configure(&conv);
+ if (rc) {
+ pr_err("qpnp adc configure failed with %d\n", rc);
+ goto calib_fail;
+ }
+
+ while (status1 != (~QPNP_VADC_STATUS1_REQ_STS |
+ QPNP_VADC_STATUS1_EOC)) {
+ rc = qpnp_vadc_read_reg(QPNP_VADC_STATUS1, &status1);
+ if (rc < 0)
+ return rc;
+ usleep_range(QPNP_VADC_CONV_TIME_MIN,
+ QPNP_VADC_CONV_TIME_MAX);
+ }
+
+ rc = qpnp_vadc_read_conversion_result(&calib_read_1);
+ if (rc) {
+ pr_err("qpnp adc read adc failed with %d\n", rc);
+ goto calib_fail;
+ }
+
+ conv.amux_channel = VDD_VADC;
+ conv.decimation = DECIMATION_TYPE2;
+ conv.mode_sel = ADC_OP_NORMAL_MODE << QPNP_VADC_OP_MODE_SHIFT;
+ conv.hw_settle_time = ADC_CHANNEL_HW_SETTLE_DELAY_0US;
+ conv.fast_avg_setup = ADC_FAST_AVG_SAMPLE_1;
+ rc = qpnp_vadc_configure(&conv);
+ if (rc) {
+ pr_err("qpnp adc configure failed with %d\n", rc);
+ goto calib_fail;
+ }
+
+ while (status1 != (~QPNP_VADC_STATUS1_REQ_STS |
+ QPNP_VADC_STATUS1_EOC)) {
+ rc = qpnp_vadc_read_reg(QPNP_VADC_STATUS1, &status1);
+ if (rc < 0)
+ return rc;
+ usleep_range(QPNP_VADC_CONV_TIME_MIN,
+ QPNP_VADC_CONV_TIME_MAX);
+ }
+
+ rc = qpnp_vadc_read_conversion_result(&calib_read_1);
+ if (rc) {
+ pr_err("qpnp adc read adc failed with %d\n", rc);
+ goto calib_fail;
+ }
+
+ vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].dy =
+ (calib_read_1 - calib_read_2);
+ vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].dx =
+ vadc->adc->adc_prop->adc_vdd_reference;
+ vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].adc_vref =
+ calib_read_1;
+ vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].adc_gnd =
+ calib_read_2;
+
+calib_fail:
+ return rc;
+}
+
+int32_t qpnp_vadc_conv_seq_request(enum qpnp_vadc_trigger trigger_channel,
+ enum qpnp_vadc_channels channel,
+ struct qpnp_vadc_result *result)
+{
+ struct qpnp_vadc_drv *vadc = qpnp_vadc;
+ int rc, scale_type, amux_prescaling;
+
+ if (!vadc->vadc_init_calib) {
+ rc = qpnp_vadc_calib_device();
+ if (rc) {
+ pr_err("Calibration failed\n");
+ return rc;
+ } else
+ vadc->vadc_init_calib = true;
+ }
+
+ mutex_lock(&vadc->adc->adc_lock);
+
+ rc = qpnp_vadc_enable(true);
+ if (rc)
+ goto fail_unlock;
+
+ vadc->adc->amux_prop->amux_channel = channel;
+ vadc->adc->amux_prop->decimation =
+ vadc->adc->adc_channels[channel].adc_decimation;
+ vadc->adc->amux_prop->hw_settle_time =
+ vadc->adc->adc_channels[channel].hw_settle_time;
+ vadc->adc->amux_prop->fast_avg_setup =
+ vadc->adc->adc_channels[channel].fast_avg_setup;
+
+ if (trigger_channel < ADC_SEQ_NONE)
+ vadc->adc->amux_prop->mode_sel = (ADC_OP_CONVERSION_SEQUENCER
+ << QPNP_VADC_OP_MODE_SHIFT);
+ else if (trigger_channel == ADC_SEQ_NONE)
+ vadc->adc->amux_prop->mode_sel = (ADC_OP_NORMAL_MODE
+ << QPNP_VADC_OP_MODE_SHIFT);
+ else {
+ pr_err("Invalid trigger channel:%d\n", trigger_channel);
+ goto fail;
+ }
+
+ vadc->adc->amux_prop->trigger_channel = trigger_channel;
+
+ rc = qpnp_vadc_configure(vadc->adc->amux_prop);
+ if (rc) {
+ pr_info("qpnp vadc configure failed with %d\n", rc);
+ goto fail;
+ }
+
+ wait_for_completion(&vadc->adc->adc_rslt_completion);
+
+ if (trigger_channel < ADC_SEQ_NONE) {
+ rc = qpnp_vadc_read_status(vadc->adc->amux_prop->mode_sel);
+ if (rc)
+ pr_info("Conversion sequence timed out - %d\n", rc);
+ }
+
+ rc = qpnp_vadc_read_conversion_result(&result->adc_code);
+ if (rc) {
+ pr_info("qpnp vadc read adc code failed with %d\n", rc);
+ goto fail;
+ }
+
+ amux_prescaling = vadc->adc->adc_channels[channel].chan_path_prescaling;
+
+ vadc->adc->amux_prop->chan_prop->offset_gain_numerator =
+ qpnp_vadc_amux_scaling_ratio[amux_prescaling].num;
+ vadc->adc->amux_prop->chan_prop->offset_gain_denominator =
+ qpnp_vadc_amux_scaling_ratio[amux_prescaling].den;
+
+ scale_type = vadc->adc->adc_channels[channel].adc_scale_fn;
+ if (scale_type >= SCALE_NONE) {
+ rc = -EBADF;
+ goto fail;
+ }
+
+ vadc_scale_fn[scale_type].chan(result->adc_code,
+ vadc->adc->adc_prop, vadc->adc->amux_prop->chan_prop, result);
+
+fail:
+ rc = qpnp_vadc_enable(false);
+ if (rc)
+ pr_err("Disable ADC failed during configuration\n");
+
+fail_unlock:
+ mutex_unlock(&vadc->adc->adc_lock);
+
+ return rc;
+}
+EXPORT_SYMBOL(qpnp_vadc_conv_seq_request);
+
+int32_t qpnp_vadc_read(enum qpnp_vadc_channels channel,
+ struct qpnp_vadc_result *result)
+{
+ return qpnp_vadc_conv_seq_request(ADC_SEQ_NONE,
+ channel, result);
+}
+EXPORT_SYMBOL_GPL(qpnp_vadc_read);
+
+static ssize_t qpnp_adc_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct qpnp_vadc_result result;
+ int rc = -1;
+
+ rc = qpnp_vadc_read(attr->index, &result);
+
+ if (rc)
+ return 0;
+
+ return snprintf(buf, QPNP_ADC_HWMON_NAME_LENGTH,
+ "Result:%lld Raw:%d\n", result.physical, result.adc_code);
+}
+
+static struct sensor_device_attribute qpnp_adc_attr =
+ SENSOR_ATTR(NULL, S_IRUGO, qpnp_adc_show, NULL, 0);
+
+static int32_t qpnp_vadc_init_hwmon(struct spmi_device *spmi)
+{
+ struct qpnp_vadc_drv *vadc = qpnp_vadc;
+ struct device_node *child;
+ struct device_node *node = spmi->dev.of_node;
+ int rc = 0, i = 0, channel;
+
+ for_each_child_of_node(node, child) {
+ channel = vadc->adc->adc_channels[i].channel_num;
+ qpnp_adc_attr.index = vadc->adc->adc_channels[i].channel_num;
+ qpnp_adc_attr.dev_attr.attr.name =
+ vadc->adc->adc_channels[i].name;
+ sysfs_attr_init(&vadc->sens_attr[i].dev_attr.attr);
+ memcpy(&vadc->sens_attr[i], &qpnp_adc_attr,
+ sizeof(qpnp_adc_attr));
+ rc = device_create_file(&spmi->dev,
+ &vadc->sens_attr[i].dev_attr);
+ if (rc) {
+ dev_err(&spmi->dev,
+ "device_create_file failed for dev %s\n",
+ vadc->adc->adc_channels[i].name);
+ goto hwmon_err_sens;
+ }
+ i++;
+ }
+
+ return 0;
+hwmon_err_sens:
+ pr_info("Init HWMON failed for qpnp_adc with %d\n", rc);
+ return rc;
+}
+
+static int __devinit qpnp_vadc_probe(struct spmi_device *spmi)
+{
+ struct qpnp_vadc_drv *vadc;
+ struct qpnp_adc_drv *adc_qpnp;
+ struct device_node *node = spmi->dev.of_node;
+ struct device_node *child;
+ int rc, count_adc_channel_list = 0;
+
+ if (!node)
+ return -EINVAL;
+
+ if (qpnp_vadc) {
+ pr_err("VADC already in use\n");
+ return -EBUSY;
+ }
+
+ for_each_child_of_node(node, child)
+ count_adc_channel_list++;
+
+ if (!count_adc_channel_list) {
+ pr_err("No channel listing\n");
+ return -EINVAL;
+ }
+
+ vadc = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_vadc_drv) +
+ (sizeof(struct sensor_device_attribute) *
+ count_adc_channel_list), GFP_KERNEL);
+ if (!vadc) {
+ dev_err(&spmi->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ adc_qpnp = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_adc_drv),
+ GFP_KERNEL);
+ if (!adc_qpnp) {
+ dev_err(&spmi->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ vadc->adc = adc_qpnp;
+
+ rc = qpnp_adc_get_devicetree_data(spmi, vadc->adc);
+ if (rc) {
+ dev_err(&spmi->dev, "failed to read device tree\n");
+ return rc;
+ }
+
+ rc = devm_request_irq(&spmi->dev, vadc->adc->adc_irq,
+ qpnp_vadc_isr, IRQF_TRIGGER_RISING,
+ "qpnp_vadc_interrupt", vadc);
+ if (rc) {
+ dev_err(&spmi->dev,
+ "failed to request adc irq with error %d\n", rc);
+ return rc;
+ }
+
+ qpnp_vadc = vadc;
+ dev_set_drvdata(&spmi->dev, vadc);
+ rc = qpnp_vadc_init_hwmon(spmi);
+ if (rc) {
+ dev_err(&spmi->dev, "failed to initialize qpnp hwmon adc\n");
+ goto fail_free_irq;
+ }
+ vadc->vadc_hwmon = hwmon_device_register(&vadc->adc->spmi->dev);
+ vadc->vadc_init_calib = false;
+
+ rc = qpnp_vadc_configure_interrupt();
+ if (rc) {
+ dev_err(&spmi->dev, "failed to configure interrupt");
+ goto fail_free_irq;
+ }
+
+ return 0;
+
+fail_free_irq:
+ free_irq(vadc->adc->adc_irq, vadc);
+
+ return rc;
+}
+
+static int __devexit qpnp_vadc_remove(struct spmi_device *spmi)
+{
+ struct qpnp_vadc_drv *vadc = dev_get_drvdata(&spmi->dev);
+ struct device_node *node = spmi->dev.of_node;
+ struct device_node *child;
+ int i = 0;
+
+ for_each_child_of_node(node, child) {
+ device_remove_file(&spmi->dev,
+ &vadc->sens_attr[i].dev_attr);
+ i++;
+ }
+ free_irq(vadc->adc->adc_irq, vadc);
+ dev_set_drvdata(&spmi->dev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id qpnp_vadc_match_table[] = {
+ { .compatible = "qcom,qpnp-vadc",
+ },
+ {}
+};
+
+static struct spmi_driver qpnp_vadc_driver = {
+ .driver = {
+ .name = "qcom,qpnp-vadc",
+ .of_match_table = qpnp_vadc_match_table,
+ },
+ .probe = qpnp_vadc_probe,
+ .remove = qpnp_vadc_remove,
+};
+
+static int __init qpnp_vadc_init(void)
+{
+ return spmi_driver_register(&qpnp_vadc_driver);
+}
+module_init(qpnp_vadc_init);
+
+static void __exit qpnp_vadc_exit(void)
+{
+ spmi_driver_unregister(&qpnp_vadc_driver);
+}
+module_exit(qpnp_vadc_exit);
+
+MODULE_DESCRIPTION("QPNP PMIC Voltage ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/msm_ts.c b/drivers/input/touchscreen/msm_ts.c
index eb2e73b..e66120e 100644
--- a/drivers/input/touchscreen/msm_ts.c
+++ b/drivers/input/touchscreen/msm_ts.c
@@ -387,6 +387,7 @@
input_set_capability(ts->input_dev, EV_KEY, BTN_TOUCH);
set_bit(EV_ABS, ts->input_dev->evbit);
+ set_bit(INPUT_PROP_DIRECT, ts->input_dev->propbit);
input_set_abs_params(ts->input_dev, ABS_X, pdata->min_x, pdata->max_x,
0, 0);
diff --git a/drivers/iommu/msm_iommu-v2.c b/drivers/iommu/msm_iommu-v2.c
index 26e967d..28ad0ff 100644
--- a/drivers/iommu/msm_iommu-v2.c
+++ b/drivers/iommu/msm_iommu-v2.c
@@ -51,10 +51,16 @@
if (ret)
goto fail;
- if (drvdata->clk) {
- ret = clk_prepare_enable(drvdata->clk);
- if (ret)
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ clk_disable_unprepare(drvdata->pclk);
+
+ if (drvdata->aclk) {
+ ret = clk_prepare_enable(drvdata->aclk);
+ if (ret) {
+ clk_disable_unprepare(drvdata->clk);
clk_disable_unprepare(drvdata->pclk);
+ }
}
fail:
return ret;
@@ -62,11 +68,23 @@
static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
{
- if (drvdata->clk)
- clk_disable_unprepare(drvdata->clk);
+ if (drvdata->aclk)
+ clk_disable_unprepare(drvdata->aclk);
+ clk_disable_unprepare(drvdata->clk);
clk_disable_unprepare(drvdata->pclk);
}
+static void __sync_tlb(void __iomem *base, int ctx)
+{
+ SET_TLBSYNC(base, ctx, 0);
+
+ /* No barrier needed due to register proximity */
+ while (GET_CB_TLBSTATUS_SACTIVE(base, ctx))
+ cpu_relax();
+
+ /* No barrier needed due to read dependency */
+}
+
static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
{
struct msm_priv *priv = domain->priv;
@@ -92,6 +110,7 @@
SET_TLBIVA(iommu_drvdata->base, ctx_drvdata->num,
asid | (va & CB_TLBIVA_VA));
mb();
+ __sync_tlb(iommu_drvdata->base, ctx_drvdata->num);
__disable_clocks(iommu_drvdata);
}
fail:
@@ -121,6 +140,7 @@
SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num, asid);
mb();
+ __sync_tlb(iommu_drvdata->base, ctx_drvdata->num);
__disable_clocks(iommu_drvdata);
}
diff --git a/drivers/iommu/msm_iommu_dev-v2.c b/drivers/iommu/msm_iommu_dev-v2.c
index d3a088a..8c26f95 100644
--- a/drivers/iommu/msm_iommu_dev-v2.c
+++ b/drivers/iommu/msm_iommu_dev-v2.c
@@ -69,7 +69,7 @@
{
struct msm_iommu_drvdata *drvdata;
struct resource *r;
- int ret;
+ int ret, needs_alt_core_clk;
if (msm_iommu_root_dev == pdev)
return 0;
@@ -93,55 +93,42 @@
if (IS_ERR(drvdata->gdsc))
return -EINVAL;
- drvdata->pclk = clk_get(&pdev->dev, "iface_clk");
+ drvdata->pclk = devm_clk_get(&pdev->dev, "iface_clk");
if (IS_ERR(drvdata->pclk))
return PTR_ERR(drvdata->pclk);
- ret = clk_prepare_enable(drvdata->pclk);
- if (ret)
- goto fail_enable;
+ drvdata->clk = devm_clk_get(&pdev->dev, "core_clk");
+ if (IS_ERR(drvdata->clk))
+ return PTR_ERR(drvdata->clk);
- drvdata->clk = clk_get(&pdev->dev, "core_clk");
- if (!IS_ERR(drvdata->clk)) {
- if (clk_get_rate(drvdata->clk) == 0) {
- ret = clk_round_rate(drvdata->clk, 1);
- clk_set_rate(drvdata->clk, ret);
- }
+ needs_alt_core_clk = of_property_read_bool(pdev->dev.of_node,
+ "qcom,needs-alt-core-clk");
+ if (needs_alt_core_clk) {
+ drvdata->aclk = devm_clk_get(&pdev->dev, "alt_core_clk");
+ if (IS_ERR(drvdata->aclk))
+ return PTR_ERR(drvdata->aclk);
+ }
- ret = clk_prepare_enable(drvdata->clk);
- if (ret) {
- clk_put(drvdata->clk);
- goto fail_pclk;
- }
- } else
- drvdata->clk = NULL;
+ if (clk_get_rate(drvdata->clk) == 0) {
+ ret = clk_round_rate(drvdata->clk, 1);
+ clk_set_rate(drvdata->clk, ret);
+ }
+
+ if (drvdata->aclk && clk_get_rate(drvdata->aclk) == 0) {
+ ret = clk_round_rate(drvdata->aclk, 1);
+ clk_set_rate(drvdata->aclk, ret);
+ }
ret = msm_iommu_parse_dt(pdev, drvdata);
if (ret)
- goto fail_clk;
+ return ret;
pr_info("device %s mapped at %p, with %d ctx banks\n",
drvdata->name, drvdata->base, drvdata->ncb);
platform_set_drvdata(pdev, drvdata);
- if (drvdata->clk)
- clk_disable_unprepare(drvdata->clk);
-
- clk_disable_unprepare(drvdata->pclk);
-
return 0;
-
-fail_clk:
- if (drvdata->clk) {
- clk_disable_unprepare(drvdata->clk);
- clk_put(drvdata->clk);
- }
-fail_pclk:
- clk_disable_unprepare(drvdata->pclk);
-fail_enable:
- clk_put(drvdata->pclk);
- return ret;
}
static int __devexit msm_iommu_remove(struct platform_device *pdev)
@@ -192,7 +179,7 @@
*/
ctx_drvdata->num = ((r->start - rp.start) >> CTX_SHIFT) - 8;
- if (of_property_read_string(pdev->dev.of_node, "qcom,iommu-ctx-name",
+ if (of_property_read_string(pdev->dev.of_node, "label",
&ctx_drvdata->name))
ctx_drvdata->name = dev_name(&pdev->dev);
@@ -232,7 +219,7 @@
ret = msm_iommu_ctx_parse_dt(pdev, ctx_drvdata);
if (!ret)
dev_info(&pdev->dev, "context %s using bank %d\n",
- dev_name(&pdev->dev), ctx_drvdata->num);
+ ctx_drvdata->name, ctx_drvdata->num);
return ret;
}
diff --git a/drivers/media/video/msm/msm_isp.c b/drivers/media/video/msm/msm_isp.c
index 67e7c02..9ddde15 100644
--- a/drivers/media/video/msm/msm_isp.c
+++ b/drivers/media/video/msm/msm_isp.c
@@ -424,10 +424,12 @@
stats.buf_idx = isp_stats->buf_idx;
switch (isp_stats->id) {
case MSG_ID_STATS_AEC:
+ case MSG_ID_STATS_BG:
stats.aec.buff = stats.buffer;
stats.aec.fd = stats.fd;
break;
case MSG_ID_STATS_AF:
+ case MSG_ID_STATS_BF:
stats.af.buff = stats.buffer;
stats.af.fd = stats.fd;
break;
@@ -447,6 +449,10 @@
stats.cs.buff = stats.buffer;
stats.cs.fd = stats.fd;
break;
+ case MSG_ID_STATS_BHIST:
+ stats.skin.buff = stats.buffer;
+ stats.skin.fd = stats.fd;
+ break;
case MSG_ID_STATS_AWB_AEC:
break;
default:
@@ -537,6 +543,9 @@
memset(&axi_data, 0, sizeof(axi_data));
CDBG("%s: cmd_type %d\n", __func__, cfgcmd.cmd_type);
switch (cfgcmd.cmd_type) {
+ case CMD_STATS_BG_ENABLE:
+ case CMD_STATS_BF_ENABLE:
+ case CMD_STATS_BHIST_ENABLE:
case CMD_STATS_AF_ENABLE:
case CMD_STATS_AEC_ENABLE:
case CMD_STATS_AWB_ENABLE:
@@ -629,6 +638,12 @@
cfgcmd.cmd_type = CMD_STATS_CS_BUF_RELEASE;
else if (buf.type == STAT_AEAW)
cfgcmd.cmd_type = CMD_STATS_BUF_RELEASE;
+ else if (buf.type == STAT_BG)
+ cfgcmd.cmd_type = CMD_STATS_BG_BUF_RELEASE;
+ else if (buf.type == STAT_BF)
+ cfgcmd.cmd_type = CMD_STATS_BF_BUF_RELEASE;
+ else if (buf.type == STAT_BHIST)
+ cfgcmd.cmd_type = CMD_STATS_BHIST_BUF_RELEASE;
else {
pr_err("%s: invalid buf type %d\n",
@@ -673,7 +688,6 @@
}
case MSM_CAM_IOCTL_STATS_ENQUEUEBUF: {
struct msm_stats_buf_info buf_info;
-
if (copy_from_user(&buf_info, arg,
sizeof(struct msm_stats_buf_info))) {
ERR_COPY_FROM_USER();
@@ -687,18 +701,30 @@
}
case MSM_CAM_IOCTL_STATS_FLUSH_BUFQ: {
struct msm_stats_flush_bufq bufq_info;
-
if (copy_from_user(&bufq_info, arg,
sizeof(struct msm_stats_flush_bufq))) {
ERR_COPY_FROM_USER();
return -EFAULT;
- }
+ }
cfgcmd.cmd_type = VFE_CMD_STATS_FLUSH_BUFQ;
cfgcmd.value = (void *)&bufq_info;
cfgcmd.length = sizeof(struct msm_stats_flush_bufq);
rc = msm_isp_subdev_ioctl(sd, &cfgcmd, NULL);
break;
}
+ case MSM_CAM_IOCTL_STATS_UNREG_BUF: {
+ struct msm_stats_reqbuf reqbuf;
+ if (copy_from_user(&reqbuf, arg,
+ sizeof(struct msm_stats_reqbuf))) {
+ ERR_COPY_FROM_USER();
+ return -EFAULT;
+ }
+ cfgcmd.cmd_type = VFE_CMD_STATS_UNREGBUF;
+ cfgcmd.value = (void *)&reqbuf;
+ cfgcmd.length = sizeof(struct msm_stats_reqbuf);
+ rc = msm_isp_subdev_ioctl(sd, &cfgcmd, (void *)mctl->client);
+ break;
+ }
default:
rc = -1;
break;
@@ -734,6 +760,7 @@
case MSM_CAM_IOCTL_STATS_REQBUF:
case MSM_CAM_IOCTL_STATS_ENQUEUEBUF:
case MSM_CAM_IOCTL_STATS_FLUSH_BUFQ:
+ case MSM_CAM_IOCTL_STATS_UNREG_BUF:
rc = msm_vfe_stats_buf_ioctl(sd, cmd, pmctl, argp);
break;
diff --git a/drivers/media/video/msm/msm_mctl_buf.c b/drivers/media/video/msm/msm_mctl_buf.c
index befd213..3cd6a25 100644
--- a/drivers/media/video/msm/msm_mctl_buf.c
+++ b/drivers/media/video/msm/msm_mctl_buf.c
@@ -721,11 +721,8 @@
uint32_t buf_phyaddr = 0;
int rc = -EINVAL;
- if (!free_buf)
- return rc;
-
- if (!pcam_inst) {
- pr_err("%s Invalid instance, buffer not released\n",
+ if (!pcam_inst || !free_buf) {
+ pr_err("%s Invalid argument, buffer will not be returned\n",
__func__);
return rc;
}
@@ -735,17 +732,19 @@
buf_phyaddr =
(uint32_t) videobuf2_to_pmem_contig(&buf->vidbuf, 0);
if (free_buf->ch_paddr[0] == buf_phyaddr) {
- D("%s buf = 0x%x \n", __func__, free_buf->ch_paddr[0]);
- buf->state = MSM_BUFFER_STATE_UNUSED;
+ D("%s Return buffer %d and mark it as QUEUED\n",
+ __func__, buf->vidbuf.v4l2_buf.index);
+ buf->state = MSM_BUFFER_STATE_QUEUED;
rc = 0;
break;
}
}
-
- if (rc != 0)
- pr_err("%s invalid buffer address ", __func__);
-
spin_unlock_irqrestore(&pcam_inst->vq_irqlock, flags);
+
+ if (rc)
+ pr_err("%s Cannot find buffer %x", __func__,
+ free_buf->ch_paddr[0]);
+
return rc;
}
diff --git a/drivers/media/video/msm/msm_mem.c b/drivers/media/video/msm/msm_mem.c
index 5d412db..e2e9d1b 100644
--- a/drivers/media/video/msm/msm_mem.c
+++ b/drivers/media/video/msm/msm_mem.c
@@ -208,6 +208,9 @@
case MSM_PMEM_IHIST:
case MSM_PMEM_SKIN:
case MSM_PMEM_AEC_AWB:
+ case MSM_PMEM_BAYER_GRID:
+ case MSM_PMEM_BAYER_FOCUS:
+ case MSM_PMEM_BAYER_HIST:
rc = msm_pmem_table_add(ptype, pinfo, client);
break;
@@ -235,6 +238,9 @@
case MSM_PMEM_IHIST:
case MSM_PMEM_SKIN:
case MSM_PMEM_AEC_AWB:
+ case MSM_PMEM_BAYER_GRID:
+ case MSM_PMEM_BAYER_FOCUS:
+ case MSM_PMEM_BAYER_HIST:
hlist_for_each_entry_safe(region, node, n,
ptype, list) {
diff --git a/drivers/media/video/msm/msm_vfe31_v4l2.c b/drivers/media/video/msm/msm_vfe31_v4l2.c
index 18168ee..a22a09f 100644
--- a/drivers/media/video/msm/msm_vfe31_v4l2.c
+++ b/drivers/media/video/msm/msm_vfe31_v4l2.c
@@ -417,6 +417,25 @@
return 0L;
}
+static unsigned long vfe31_stats_unregbuf(
+ struct msm_stats_reqbuf *req_buf)
+{
+ int i = 0, rc = 0;
+
+ for (i = 0; i < req_buf->num_buf; i++) {
+ rc = vfe31_ctrl->stats_ops.buf_unprepare(
+ vfe31_ctrl->stats_ops.stats_ctrl,
+ req_buf->stats_type, i,
+ vfe31_ctrl->stats_ops.client);
+ if (rc < 0) {
+ pr_err("%s: unreg stats buf (type = %d) err = %d",
+ __func__, req_buf->stats_type, rc);
+ return rc;
+ }
+ }
+ return 0L;
+}
+
static int vfe_stats_awb_buf_init(
struct vfe_cmd_stats_buf *in)
{
@@ -3334,6 +3353,22 @@
vfe31_ctrl->stats_ops.client);
}
break;
+ case VFE_CMD_STATS_UNREGBUF:
+ {
+ struct msm_stats_reqbuf *req_buf = NULL;
+ req_buf = (struct msm_stats_reqbuf *)cmd->value;
+ if (sizeof(struct msm_stats_reqbuf) != cmd->length) {
+ /* error. the length not match */
+ pr_err("%s: stats reqbuf input size = %d,\n"
+ "struct size = %d, mitch match\n",
+ __func__, cmd->length,
+ sizeof(struct msm_stats_reqbuf));
+ rc = -EINVAL ;
+ goto end;
+ }
+ rc = vfe31_stats_unregbuf(req_buf);
+ }
+ break;
default:
rc = -1;
pr_err("%s: cmd_type %d not supported", __func__,
@@ -3583,6 +3618,7 @@
case VFE_CMD_STATS_REQBUF:
case VFE_CMD_STATS_ENQUEUEBUF:
case VFE_CMD_STATS_FLUSH_BUFQ:
+ case VFE_CMD_STATS_UNREGBUF:
/* for easy porting put in one envelope */
rc = vfe_stats_bufq_sub_ioctl(cmd, vfe_params->data);
return rc;
diff --git a/drivers/media/video/msm/msm_vfe32.c b/drivers/media/video/msm/msm_vfe32.c
index aa2b19d..c4bdad2 100644
--- a/drivers/media/video/msm/msm_vfe32.c
+++ b/drivers/media/video/msm/msm_vfe32.c
@@ -216,6 +216,27 @@
{VFE_CMD_GET_RGB_G_TABLE},
{VFE_CMD_GET_LA_TABLE},
{VFE_CMD_DEMOSAICV3_UPDATE},
+ {VFE_CMD_ACTIVE_REGION_CFG},
+/*130*/ {VFE_CMD_COLOR_PROCESSING_CONFIG},
+ {VFE_CMD_STATS_WB_AEC_CONFIG},
+ {VFE_CMD_STATS_WB_AEC_UPDATE},
+ {VFE_CMD_Y_GAMMA_CONFIG},
+ {VFE_CMD_SCALE_OUTPUT1_CONFIG},
+/*135*/ {VFE_CMD_SCALE_OUTPUT2_CONFIG},
+ {VFE_CMD_CAPTURE_RAW},
+ {VFE_CMD_STOP_LIVESHOT},
+ {VFE_CMD_RECONFIG_VFE},
+ {VFE_CMD_STATS_REQBUF},
+/*140*/ {VFE_CMD_STATS_ENQUEUEBUF},
+ {VFE_CMD_STATS_FLUSH_BUFQ},
+ {VFE_CMD_STATS_UNREGBUF},
+ {VFE_CMD_STATS_BG_START, V32_STATS_BG_LEN, V32_STATS_BG_OFF},
+ {VFE_CMD_STATS_BG_STOP},
+ {VFE_CMD_STATS_BF_START, V32_STATS_BF_LEN, V32_STATS_BF_OFF},
+/*145*/ {VFE_CMD_STATS_BF_STOP},
+ {VFE_CMD_STATS_BHIST_START, V32_STATS_BHIST_LEN,
+ V32_STATS_BHIST_OFF},
+/*147*/ {VFE_CMD_STATS_BHIST_STOP},
};
uint32_t vfe32_AXI_WM_CFG[] = {
@@ -358,8 +379,24 @@
"GET_RGB_G_TABLE",
"GET_LA_TABLE",
"DEMOSAICV3_UPDATE",
+ "STATS_BG_START",
+ "STATS_BG_STOP",
+ "STATS_BF_START",
+ "STATS_BF_STOP",
+ "STATS_BHIST_START",
+ "STATS_BHIST_STOP",
};
+uint8_t vfe32_use_bayer_stats(struct vfe32_ctrl_type *vfe32_ctrl)
+{
+ if (vfe32_ctrl->ver_num.main >= 4) {
+ /* VFE 4 or above uses bayer stats */
+ return TRUE;
+ } else {
+ return FALSE;
+ }
+}
+
static void vfe32_stop(struct vfe32_ctrl_type *vfe32_ctrl)
{
unsigned long flags;
@@ -375,7 +412,7 @@
msm_camera_io_w(VFE_DISABLE_ALL_IRQS,
vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_0);
msm_camera_io_w(VFE_DISABLE_ALL_IRQS,
- vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
+ vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
/* clear all pending interrupts*/
msm_camera_io_w(VFE_CLEAR_ALL_IRQS,
@@ -530,13 +567,16 @@
/* this is unsigned 32 bit integer. */
vfe32_ctrl->share_ctrl->vfeFrameId = 0;
/* Stats control variables. */
- memset(&(vfe32_ctrl->afStatsControl), 0,
+ memset(&(vfe32_ctrl->afbfStatsControl), 0,
sizeof(struct vfe_stats_control));
memset(&(vfe32_ctrl->awbStatsControl), 0,
sizeof(struct vfe_stats_control));
- memset(&(vfe32_ctrl->aecStatsControl), 0,
+ memset(&(vfe32_ctrl->aecbgStatsControl), 0,
+ sizeof(struct vfe_stats_control));
+
+ memset(&(vfe32_ctrl->bhistStatsControl), 0,
sizeof(struct vfe_stats_control));
memset(&(vfe32_ctrl->ihistStatsControl), 0,
@@ -553,6 +593,62 @@
vfe32_ctrl->snapshot_frame_cnt = 0;
}
+static void vfe32_program_dmi_cfg(
+ enum VFE32_DMI_RAM_SEL bankSel,
+ struct vfe32_ctrl_type *vfe32_ctrl)
+{
+ /* set bit 8 for auto increment. */
+ uint32_t value = VFE_DMI_CFG_DEFAULT;
+ value += (uint32_t)bankSel;
+ CDBG("%s: banksel = %d\n", __func__, bankSel);
+
+ msm_camera_io_w(value, vfe32_ctrl->share_ctrl->vfebase +
+ VFE_DMI_CFG);
+ /* by default, always starts with offset 0.*/
+ msm_camera_io_w(0, vfe32_ctrl->share_ctrl->vfebase +
+ VFE_DMI_ADDR);
+}
+
+static void vfe32_reset_dmi_tables(
+ struct vfe32_ctrl_type *vfe32_ctrl)
+{
+ int i = 0;
+
+ /* Reset Histogram LUTs */
+ CDBG("Reset Bayer histogram LUT : 0\n");
+ vfe32_program_dmi_cfg(STATS_BHIST_RAM0, vfe32_ctrl);
+ /* Loop for configuring LUT */
+ for (i = 0; i < 256; i++) {
+ msm_camera_io_w(0, vfe32_ctrl->share_ctrl->vfebase +
+ VFE_DMI_DATA_HI);
+ msm_camera_io_w(0, vfe32_ctrl->share_ctrl->vfebase +
+ VFE_DMI_DATA_LO);
+ }
+ vfe32_program_dmi_cfg(NO_MEM_SELECTED, vfe32_ctrl);
+
+ CDBG("Reset Bayer Histogram LUT: 1\n");
+ vfe32_program_dmi_cfg(STATS_BHIST_RAM1, vfe32_ctrl);
+ /* Loop for configuring LUT */
+ for (i = 0; i < 256; i++) {
+ msm_camera_io_w(0, vfe32_ctrl->share_ctrl->vfebase +
+ VFE_DMI_DATA_HI);
+ msm_camera_io_w(0, vfe32_ctrl->share_ctrl->vfebase +
+ VFE_DMI_DATA_LO);
+ }
+ vfe32_program_dmi_cfg(NO_MEM_SELECTED, vfe32_ctrl);
+
+ CDBG("Reset IHistogram LUT\n");
+ vfe32_program_dmi_cfg(STATS_IHIST_RAM, vfe32_ctrl);
+ /* Loop for configuring LUT */
+ for (i = 0; i < 256; i++) {
+ msm_camera_io_w(0, vfe32_ctrl->share_ctrl->vfebase +
+ VFE_DMI_DATA_HI);
+ msm_camera_io_w(0, vfe32_ctrl->share_ctrl->vfebase +
+ VFE_DMI_DATA_LO);
+ }
+ vfe32_program_dmi_cfg(NO_MEM_SELECTED, vfe32_ctrl);
+}
+
static void vfe32_reset(struct vfe32_ctrl_type *vfe32_ctrl)
{
vfe32_reset_internal_variables(vfe32_ctrl);
@@ -572,7 +668,8 @@
/* Ensure the write order while writing
to the command register using the barrier */
- msm_camera_io_w_mb(1, vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_CMD);
+ msm_camera_io_w_mb(1, vfe32_ctrl->share_ctrl->vfebase +
+ VFE_IRQ_CMD);
/* enable reset_ack interrupt. */
msm_camera_io_w(VFE_IMASK_WHILE_STOPPING_1,
@@ -677,6 +774,26 @@
return 0L;
}
+
+static unsigned long vfe32_stats_unregbuf(
+ struct vfe32_ctrl_type *vfe32_ctrl,
+ struct msm_stats_reqbuf *req_buf)
+{
+ int i = 0, rc = 0;
+
+ for (i = 0; i < req_buf->num_buf; i++) {
+ rc = vfe32_ctrl->stats_ops.buf_unprepare(
+ vfe32_ctrl->stats_ops.stats_ctrl,
+ req_buf->stats_type, i,
+ vfe32_ctrl->stats_ops.client);
+ if (rc < 0) {
+ pr_err("%s: unreg stats buf (type = %d) err = %d",
+ __func__, req_buf->stats_type, rc);
+ return rc;
+ }
+ }
+ return 0L;
+}
static int vfe_stats_awb_buf_init(
struct vfe32_ctrl_type *vfe32_ctrl,
struct vfe_cmd_stats_buf *in)
@@ -708,14 +825,18 @@
return 0;
}
-static int vfe_stats_aec_buf_init(
- struct vfe32_ctrl_type *vfe32_ctrl, struct vfe_cmd_stats_buf *in)
+static uint32_t vfe_stats_aec_bg_buf_init(
+ struct vfe32_ctrl_type *vfe32_ctrl)
{
uint32_t addr;
unsigned long flags;
+ uint32_t stats_type;
+ stats_type =
+ (!vfe32_use_bayer_stats(vfe32_ctrl)) ? MSM_STATS_TYPE_AEC
+ : MSM_STATS_TYPE_BG;
spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
- addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_AEC);
+ addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, stats_type);
spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
if (!addr) {
pr_err("%s: dq aec ping buf from free buf queue",
@@ -724,9 +845,9 @@
}
msm_camera_io_w(addr,
vfe32_ctrl->share_ctrl->vfebase +
- VFE_BUS_STATS_AEC_WR_PING_ADDR);
+ VFE_BUS_STATS_AEC_BG_WR_PING_ADDR);
spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
- addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_AEC);
+ addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, stats_type);
spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
if (!addr) {
pr_err("%s: dq aec pong buf from free buf queue",
@@ -735,26 +856,31 @@
}
msm_camera_io_w(addr,
vfe32_ctrl->share_ctrl->vfebase +
- VFE_BUS_STATS_AEC_WR_PONG_ADDR);
+ VFE_BUS_STATS_AEC_BG_WR_PONG_ADDR);
return 0;
}
-static int vfe_stats_af_buf_init(
- struct vfe32_ctrl_type *vfe32_ctrl, struct vfe_cmd_stats_buf *in)
+static int vfe_stats_af_bf_buf_init(
+ struct vfe32_ctrl_type *vfe32_ctrl)
{
uint32_t addr;
unsigned long flags;
int rc = 0;
+ uint32_t stats_type;
+ stats_type =
+ (!vfe32_use_bayer_stats(vfe32_ctrl)) ? MSM_STATS_TYPE_AF
+ : MSM_STATS_TYPE_BF;
+
spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
- rc = vfe32_stats_flush_enqueue(vfe32_ctrl, MSM_STATS_TYPE_AF);
+ rc = vfe32_stats_flush_enqueue(vfe32_ctrl, stats_type);
if (rc < 0) {
pr_err("%s: dq stats buf err = %d",
__func__, rc);
spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
return -EINVAL;
}
- addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_AF);
+ addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, stats_type);
spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
if (!addr) {
pr_err("%s: dq af ping buf from free buf queue", __func__);
@@ -762,9 +888,9 @@
}
msm_camera_io_w(addr,
vfe32_ctrl->share_ctrl->vfebase +
- VFE_BUS_STATS_AF_WR_PING_ADDR);
+ VFE_BUS_STATS_AF_BF_WR_PING_ADDR);
spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
- addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_AF);
+ addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, stats_type);
spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
if (!addr) {
pr_err("%s: dq af pong buf from free buf queue", __func__);
@@ -772,14 +898,44 @@
}
msm_camera_io_w(addr,
vfe32_ctrl->share_ctrl->vfebase +
- VFE_BUS_STATS_AF_WR_PONG_ADDR);
+ VFE_BUS_STATS_AF_BF_WR_PONG_ADDR);
+ return 0;
+}
+
+static uint32_t vfe_stats_bhist_buf_init(
+ struct vfe32_ctrl_type *vfe32_ctrl)
+{
+ uint32_t addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
+ addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_BHIST);
+ spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
+ if (!addr) {
+ pr_err("%s: dq ihist ping buf from free buf queue",
+ __func__);
+ return -ENOMEM;
+ }
+ msm_camera_io_w(addr,
+ vfe32_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_SKIN_BHIST_WR_PING_ADDR);
+ spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
+ addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_BHIST);
+ spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
+ if (!addr) {
+ pr_err("%s: dq ihist pong buf from free buf queue",
+ __func__);
+ return -ENOMEM;
+ }
+ msm_camera_io_w(addr,
+ vfe32_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_SKIN_BHIST_WR_PONG_ADDR);
return 0;
}
static int vfe_stats_ihist_buf_init(
- struct vfe32_ctrl_type *vfe32_ctrl,
- struct vfe_cmd_stats_buf *in)
+ struct vfe32_ctrl_type *vfe32_ctrl)
{
uint32_t addr;
unsigned long flags;
@@ -811,8 +967,7 @@
}
static int vfe_stats_rs_buf_init(
- struct vfe32_ctrl_type *vfe32_ctrl,
- struct vfe_cmd_stats_buf *in)
+ struct vfe32_ctrl_type *vfe32_ctrl)
{
uint32_t addr;
unsigned long flags;
@@ -841,8 +996,7 @@
}
static int vfe_stats_cs_buf_init(
- struct vfe32_ctrl_type *vfe32_ctrl,
- struct vfe_cmd_stats_buf *in)
+ struct vfe32_ctrl_type *vfe32_ctrl)
{
uint32_t addr;
unsigned long flags;
@@ -918,6 +1072,9 @@
msm_camera_io_w_mb(1, vfe32_ctrl->share_ctrl->vfebase +
VFE_CAMIF_COMMAND);
}
+ msm_camera_io_dump(vfe32_ctrl->share_ctrl->vfebase,
+ vfe32_ctrl->share_ctrl->register_total * 4);
+
/* Ensure the write order while writing
to the command register using the barrier */
atomic_set(&vfe32_ctrl->share_ctrl->vstate, 1);
@@ -1372,19 +1529,7 @@
vfe32_ctrl->share_ctrl->vfebase + V32_TIMER_SELECT_OFF);
}
-static void vfe32_program_dmi_cfg(
- enum VFE32_DMI_RAM_SEL bankSel,
- struct vfe32_ctrl_type *vfe32_ctrl)
-{
- /* set bit 8 for auto increment. */
- uint32_t value = VFE_DMI_CFG_DEFAULT;
- value += (uint32_t)bankSel;
- CDBG("%s: banksel = %d\n", __func__, bankSel);
- msm_camera_io_w(value, vfe32_ctrl->share_ctrl->vfebase + VFE_DMI_CFG);
- /* by default, always starts with offset 0.*/
- msm_camera_io_w(0, vfe32_ctrl->share_ctrl->vfebase + VFE_DMI_ADDR);
-}
static void vfe32_write_gamma_cfg(
enum VFE32_DMI_RAM_SEL channel_sel,
const uint32_t *tbl,
@@ -1582,7 +1727,7 @@
struct vfe32_ctrl_type *vfe32_ctrl)
{
int i , rc = 0;
- uint32_t old_val = 0 , new_val = 0;
+ uint32_t old_val = 0 , new_val = 0, module_val = 0;
uint32_t *cmdp = NULL;
uint32_t *cmdp_local = NULL;
uint32_t snapshot_cnt = 0;
@@ -1762,7 +1907,12 @@
break;
case VFE_CMD_STATS_AE_START: {
- rc = vfe_stats_aec_buf_init(vfe32_ctrl, NULL);
+ if (vfe32_use_bayer_stats(vfe32_ctrl)) {
+ /* Error */
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ rc = vfe_stats_aec_bg_buf_init(vfe32_ctrl);
if (rc < 0) {
pr_err("%s: cannot config ping/pong address of AEC",
__func__);
@@ -1791,7 +1941,12 @@
}
break;
case VFE_CMD_STATS_AF_START: {
- rc = vfe_stats_af_buf_init(vfe32_ctrl, NULL);
+ if (vfe32_use_bayer_stats(vfe32_ctrl)) {
+ /* Error */
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ rc = vfe_stats_af_bf_buf_init(vfe32_ctrl);
if (rc < 0) {
pr_err("%s: cannot config ping/pong address of AF",
__func__);
@@ -1820,6 +1975,11 @@
}
break;
case VFE_CMD_STATS_AWB_START: {
+ if (vfe32_use_bayer_stats(vfe32_ctrl)) {
+ /* Error */
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
rc = vfe_stats_awb_buf_init(vfe32_ctrl, NULL);
if (rc < 0) {
pr_err("%s: cannot config ping/pong address of AWB",
@@ -1850,7 +2010,7 @@
break;
case VFE_CMD_STATS_IHIST_START: {
- rc = vfe_stats_ihist_buf_init(vfe32_ctrl, NULL);
+ rc = vfe_stats_ihist_buf_init(vfe32_ctrl);
if (rc < 0) {
pr_err("%s: cannot config ping/pong address of IHIST",
__func__);
@@ -1881,7 +2041,7 @@
case VFE_CMD_STATS_RS_START: {
- rc = vfe_stats_rs_buf_init(vfe32_ctrl, NULL);
+ rc = vfe_stats_rs_buf_init(vfe32_ctrl);
if (rc < 0) {
pr_err("%s: cannot config ping/pong address of RS",
__func__);
@@ -1906,7 +2066,7 @@
break;
case VFE_CMD_STATS_CS_START: {
- rc = vfe_stats_cs_buf_init(vfe32_ctrl, NULL);
+ rc = vfe_stats_cs_buf_init(vfe32_ctrl);
if (rc < 0) {
pr_err("%s: cannot config ping/pong address of CS",
__func__);
@@ -1930,6 +2090,67 @@
}
break;
+ case VFE_CMD_STATS_BG_START:
+ case VFE_CMD_STATS_BF_START:
+ case VFE_CMD_STATS_BHIST_START: {
+ if (!vfe32_use_bayer_stats(vfe32_ctrl)) {
+ /* Error */
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ old_val = msm_camera_io_r(
+ vfe32_ctrl->share_ctrl->vfebase + VFE_STATS_CFG);
+ module_val = msm_camera_io_r(
+ vfe32_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+ if (VFE_CMD_STATS_BG_START == cmd->id) {
+ module_val |= AE_BG_ENABLE_MASK;
+ old_val |= STATS_BG_ENABLE_MASK;
+ rc = vfe_stats_aec_bg_buf_init(vfe32_ctrl);
+ if (rc < 0) {
+ pr_err("%s: cannot config ping/pong address of CS",
+ __func__);
+ goto proc_general_done;
+ }
+ } else if (VFE_CMD_STATS_BF_START == cmd->id) {
+ module_val |= AF_BF_ENABLE_MASK;
+ old_val |= STATS_BF_ENABLE_MASK;
+ rc = vfe_stats_af_bf_buf_init(vfe32_ctrl);
+ if (rc < 0) {
+ pr_err("%s: cannot config ping/pong address of CS",
+ __func__);
+ goto proc_general_done;
+ }
+ } else {
+ module_val |= SKIN_BHIST_ENABLE_MASK;
+ old_val |= STATS_BHIST_ENABLE_MASK;
+ rc = vfe_stats_bhist_buf_init(vfe32_ctrl);
+ if (rc < 0) {
+ pr_err("%s: cannot config ping/pong address of CS",
+ __func__);
+ goto proc_general_done;
+ }
+ }
+ msm_camera_io_w(old_val, vfe32_ctrl->share_ctrl->vfebase +
+ VFE_STATS_CFG);
+ msm_camera_io_w(module_val,
+ vfe32_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+ cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+ if (!cmdp) {
+ rc = -ENOMEM;
+ goto proc_general_done;
+ }
+ if (copy_from_user(cmdp,
+ (void __user *)(cmd->value),
+ cmd->length)) {
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ msm_camera_io_memcpy(
+ vfe32_ctrl->share_ctrl->vfebase +
+ vfe32_cmd[cmd->id].offset,
+ cmdp, (vfe32_cmd[cmd->id].length));
+ }
+ break;
case VFE_CMD_MCE_UPDATE:
case VFE_CMD_MCE_CFG:{
cmdp = kmalloc(cmd->length, GFP_ATOMIC);
@@ -2601,6 +2822,11 @@
break;
case VFE_CMD_STATS_AWB_STOP: {
+ if (vfe32_use_bayer_stats(vfe32_ctrl)) {
+ /* Error */
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
old_val = msm_camera_io_r(
vfe32_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
old_val &= ~AWB_ENABLE_MASK;
@@ -2609,6 +2835,11 @@
}
break;
case VFE_CMD_STATS_AE_STOP: {
+ if (vfe32_use_bayer_stats(vfe32_ctrl)) {
+ /* Error */
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
old_val = msm_camera_io_r(
vfe32_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
old_val &= ~AE_BG_ENABLE_MASK;
@@ -2617,17 +2848,16 @@
}
break;
case VFE_CMD_STATS_AF_STOP: {
+ if (vfe32_use_bayer_stats(vfe32_ctrl)) {
+ /* Error */
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
old_val = msm_camera_io_r(
vfe32_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
old_val &= ~AF_BF_ENABLE_MASK;
msm_camera_io_w(old_val,
vfe32_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
- rc = vfe32_stats_flush_enqueue(vfe32_ctrl, MSM_STATS_TYPE_AF);
- if (rc < 0) {
- pr_err("%s: dq stats buf err = %d",
- __func__, rc);
- return -EINVAL;
- }
}
break;
@@ -2657,6 +2887,37 @@
vfe32_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
}
break;
+
+ case VFE_CMD_STATS_BG_STOP:
+ case VFE_CMD_STATS_BF_STOP:
+ case VFE_CMD_STATS_BHIST_STOP: {
+ if (!vfe32_use_bayer_stats(vfe32_ctrl)) {
+ /* Error */
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ old_val = msm_camera_io_r(
+ vfe32_ctrl->share_ctrl->vfebase + VFE_STATS_CFG);
+ if (VFE_CMD_STATS_BG_STOP == cmd->id)
+ old_val &= ~STATS_BG_ENABLE_MASK;
+ else if (VFE_CMD_STATS_BF_STOP == cmd->id)
+ old_val &= ~STATS_BF_ENABLE_MASK;
+ else
+ old_val &= ~STATS_BHIST_ENABLE_MASK;
+ msm_camera_io_w(old_val,
+ vfe32_ctrl->share_ctrl->vfebase + VFE_STATS_CFG);
+ if (VFE_CMD_STATS_BF_STOP == cmd->id) {
+ rc = vfe32_stats_flush_enqueue(vfe32_ctrl,
+ MSM_STATS_TYPE_BF);
+ if (rc < 0) {
+ pr_err("%s: dq stats buf err = %d",
+ __func__, rc);
+ return -EINVAL;
+ }
+ }
+ }
+ break;
+
case VFE_CMD_STOP:
pr_info("vfe32_proc_general: cmdID = %s\n",
vfe32_general_cmd[cmd->id]);
@@ -3301,18 +3562,48 @@
vfe32_ctrl->share_ctrl->vfebase + VFE_CLAMP_MAX);
/* stats UB config */
- msm_camera_io_w(0x3980007,
- vfe32_ctrl->share_ctrl->vfebase + VFE_BUS_STATS_AEC_UB_CFG);
- msm_camera_io_w(0x3A00007,
- vfe32_ctrl->share_ctrl->vfebase + VFE_BUS_STATS_AF_UB_CFG);
- msm_camera_io_w(0x3A8000F,
- vfe32_ctrl->share_ctrl->vfebase + VFE_BUS_STATS_AWB_UB_CFG);
- msm_camera_io_w(0x3B80007,
- vfe32_ctrl->share_ctrl->vfebase + VFE_BUS_STATS_RS_UB_CFG);
- msm_camera_io_w(0x3C0001F,
- vfe32_ctrl->share_ctrl->vfebase + VFE_BUS_STATS_CS_UB_CFG);
- msm_camera_io_w(0x3E0001F,
- vfe32_ctrl->share_ctrl->vfebase + VFE_BUS_STATS_HIST_UB_CFG);
+ CDBG("%s: Use bayer stats = %d\n", __func__,
+ vfe32_use_bayer_stats(vfe32_ctrl));
+ if (!vfe32_use_bayer_stats(vfe32_ctrl)) {
+ msm_camera_io_w(0x3980007,
+ vfe32_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_AEC_BG_UB_CFG);
+ msm_camera_io_w(0x3A00007,
+ vfe32_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_AF_BF_UB_CFG);
+ msm_camera_io_w(0x3A8000F,
+ vfe32_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_AWB_UB_CFG);
+ msm_camera_io_w(0x3B80007,
+ vfe32_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_RS_UB_CFG);
+ msm_camera_io_w(0x3C0001F,
+ vfe32_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_CS_UB_CFG);
+ msm_camera_io_w(0x3E0001F,
+ vfe32_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_HIST_UB_CFG);
+ } else {
+ msm_camera_io_w(0x350001F,
+ vfe32_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_HIST_UB_CFG);
+ msm_camera_io_w(0x370002F,
+ vfe32_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_AEC_BG_UB_CFG);
+ msm_camera_io_w(0x3A0002F,
+ vfe32_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_AF_BF_UB_CFG);
+ msm_camera_io_w(0x3D00007,
+ vfe32_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_RS_UB_CFG);
+ msm_camera_io_w(0x3D8001F,
+ vfe32_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_CS_UB_CFG);
+ msm_camera_io_w(0x3F80007,
+ vfe32_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_SKIN_BHIST_UB_CFG);
+ }
+ vfe32_reset_dmi_tables(vfe32_ctrl);
}
static void vfe32_process_reset_irq(
@@ -3785,25 +4076,36 @@
/* @todo This is causing issues, need further investigate */
/* spin_lock_irqsave(&ctrl->state_lock, flags); */
struct isp_msg_stats msgStats;
+ uint32_t stats_type;
msgStats.frameCounter = vfe32_ctrl->share_ctrl->vfeFrameId;
if (vfe32_ctrl->simultaneous_sof_stat)
msgStats.frameCounter--;
msgStats.buffer = bufAddress;
switch (statsNum) {
case statsAeNum:{
- msgStats.id = MSG_ID_STATS_AEC;
+ msgStats.id =
+ (!vfe32_use_bayer_stats(vfe32_ctrl)) ? MSG_ID_STATS_AEC
+ : MSG_ID_STATS_BG;
+ stats_type =
+ (!vfe32_use_bayer_stats(vfe32_ctrl)) ?
+ MSM_STATS_TYPE_AEC : MSM_STATS_TYPE_BG;
rc = vfe32_ctrl->stats_ops.dispatch(
vfe32_ctrl->stats_ops.stats_ctrl,
- MSM_STATS_TYPE_AEC, bufAddress,
+ stats_type, bufAddress,
&msgStats.buf_idx, &vaddr, &msgStats.fd,
vfe32_ctrl->stats_ops.client);
}
break;
case statsAfNum:{
- msgStats.id = MSG_ID_STATS_AF;
+ msgStats.id =
+ (!vfe32_use_bayer_stats(vfe32_ctrl)) ? MSG_ID_STATS_AF
+ : MSG_ID_STATS_BF;
+ stats_type =
+ (!vfe32_use_bayer_stats(vfe32_ctrl)) ? MSM_STATS_TYPE_AF
+ : MSM_STATS_TYPE_BF;
rc = vfe32_ctrl->stats_ops.dispatch(
vfe32_ctrl->stats_ops.stats_ctrl,
- MSM_STATS_TYPE_AF, bufAddress,
+ stats_type, bufAddress,
&msgStats.buf_idx, &vaddr, &msgStats.fd,
vfe32_ctrl->stats_ops.client);
}
@@ -3845,6 +4147,15 @@
vfe32_ctrl->stats_ops.client);
}
break;
+ case statsSkinNum: {
+ msgStats.id = MSG_ID_STATS_BHIST;
+ rc = vfe32_ctrl->stats_ops.dispatch(
+ vfe32_ctrl->stats_ops.stats_ctrl,
+ MSM_STATS_TYPE_BHIST, bufAddress,
+ &msgStats.buf_idx, &vaddr, &msgStats.fd,
+ vfe32_ctrl->stats_ops.client);
+ }
+ break;
default:
goto stats_done;
@@ -3875,9 +4186,9 @@
msgStats.status_bits = status_bits;
- msgStats.aec.buff = vfe32_ctrl->aecStatsControl.bufToRender;
+ msgStats.aec.buff = vfe32_ctrl->aecbgStatsControl.bufToRender;
msgStats.awb.buff = vfe32_ctrl->awbStatsControl.bufToRender;
- msgStats.af.buff = vfe32_ctrl->afStatsControl.bufToRender;
+ msgStats.af.buff = vfe32_ctrl->afbfStatsControl.bufToRender;
msgStats.ihist.buff = vfe32_ctrl->ihistStatsControl.bufToRender;
msgStats.rs.buff = vfe32_ctrl->rsStatsControl.bufToRender;
@@ -3892,24 +4203,28 @@
&msgStats);
}
-static void vfe32_process_stats_ae_irq(struct vfe32_ctrl_type *vfe32_ctrl)
+static void vfe32_process_stats_ae_bg_irq(struct vfe32_ctrl_type *vfe32_ctrl)
{
unsigned long flags;
uint32_t addr;
+ uint32_t stats_type;
+ stats_type =
+ (!vfe32_use_bayer_stats(vfe32_ctrl)) ? MSM_STATS_TYPE_AEC
+ : MSM_STATS_TYPE_BG;
spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
- addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_AEC);
+ addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, stats_type);
spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
if (addr) {
- vfe32_ctrl->aecStatsControl.bufToRender =
+ vfe32_ctrl->aecbgStatsControl.bufToRender =
vfe32_process_stats_irq_common(vfe32_ctrl, statsAeNum,
addr);
vfe_send_stats_msg(vfe32_ctrl,
- vfe32_ctrl->aecStatsControl.bufToRender, statsAeNum);
+ vfe32_ctrl->aecbgStatsControl.bufToRender, statsAeNum);
} else{
- vfe32_ctrl->aecStatsControl.droppedStatsFrameCount++;
+ vfe32_ctrl->aecbgStatsControl.droppedStatsFrameCount++;
CDBG("%s: droppedStatsFrameCount = %d", __func__,
- vfe32_ctrl->aecStatsControl.droppedStatsFrameCount);
+ vfe32_ctrl->aecbgStatsControl.droppedStatsFrameCount);
}
}
@@ -3934,24 +4249,50 @@
}
}
-static void vfe32_process_stats_af_irq(struct vfe32_ctrl_type *vfe32_ctrl)
+static void vfe32_process_stats_af_bf_irq(struct vfe32_ctrl_type *vfe32_ctrl)
{
unsigned long flags;
uint32_t addr;
+ uint32_t stats_type;
+ stats_type =
+ (!vfe32_use_bayer_stats(vfe32_ctrl)) ? MSM_STATS_TYPE_AF
+ : MSM_STATS_TYPE_BF;
spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
- addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_AF);
+ addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, stats_type);
spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
if (addr) {
- vfe32_ctrl->afStatsControl.bufToRender =
+ vfe32_ctrl->afbfStatsControl.bufToRender =
vfe32_process_stats_irq_common(vfe32_ctrl, statsAfNum,
addr);
vfe_send_stats_msg(vfe32_ctrl,
- vfe32_ctrl->afStatsControl.bufToRender, statsAfNum);
+ vfe32_ctrl->afbfStatsControl.bufToRender, statsAfNum);
} else{
- vfe32_ctrl->afStatsControl.droppedStatsFrameCount++;
+ vfe32_ctrl->afbfStatsControl.droppedStatsFrameCount++;
CDBG("%s: droppedStatsFrameCount = %d", __func__,
- vfe32_ctrl->afStatsControl.droppedStatsFrameCount);
+ vfe32_ctrl->afbfStatsControl.droppedStatsFrameCount);
+ }
+}
+
+static void vfe32_process_stats_bhist_irq(struct vfe32_ctrl_type *vfe32_ctrl)
+{
+ unsigned long flags;
+ uint32_t addr;
+ spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
+ addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_BHIST);
+ spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
+ if (addr) {
+ vfe32_ctrl->bhistStatsControl.bufToRender =
+ vfe32_process_stats_irq_common(vfe32_ctrl,
+ statsSkinNum, addr);
+
+ vfe_send_stats_msg(vfe32_ctrl,
+ vfe32_ctrl->bhistStatsControl.bufToRender,
+ statsSkinNum);
+ } else{
+ vfe32_ctrl->bhistStatsControl.droppedStatsFrameCount++;
+ CDBG("%s: droppedStatsFrameCount = %d", __func__,
+ vfe32_ctrl->bhistStatsControl.droppedStatsFrameCount);
}
}
@@ -4026,23 +4367,28 @@
unsigned long flags;
int32_t process_stats = false;
uint32_t addr;
+ uint32_t stats_type;
CDBG("%s, stats = 0x%x\n", __func__, status_bits);
spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
- if (status_bits & VFE_IRQ_STATUS0_STATS_AEC) {
+ stats_type =
+ (!vfe32_use_bayer_stats(vfe32_ctrl)) ? MSM_STATS_TYPE_AEC
+ : MSM_STATS_TYPE_BG;
+
+ if (status_bits & VFE_IRQ_STATUS0_STATS_AEC_BG) {
addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl,
- MSM_STATS_TYPE_AEC);
+ stats_type);
if (addr) {
- vfe32_ctrl->aecStatsControl.bufToRender =
+ vfe32_ctrl->aecbgStatsControl.bufToRender =
vfe32_process_stats_irq_common(
vfe32_ctrl, statsAeNum, addr);
process_stats = true;
} else{
- vfe32_ctrl->aecStatsControl.bufToRender = 0;
- vfe32_ctrl->aecStatsControl.droppedStatsFrameCount++;
+ vfe32_ctrl->aecbgStatsControl.bufToRender = 0;
+ vfe32_ctrl->aecbgStatsControl.droppedStatsFrameCount++;
}
} else {
- vfe32_ctrl->aecStatsControl.bufToRender = 0;
+ vfe32_ctrl->aecbgStatsControl.bufToRender = 0;
}
if (status_bits & VFE_IRQ_STATUS0_STATS_AWB) {
@@ -4062,21 +4408,24 @@
vfe32_ctrl->awbStatsControl.bufToRender = 0;
}
- if (status_bits & VFE_IRQ_STATUS0_STATS_AF) {
+ stats_type =
+ (!vfe32_use_bayer_stats(vfe32_ctrl)) ? MSM_STATS_TYPE_AF
+ : MSM_STATS_TYPE_BF;
+ if (status_bits & VFE_IRQ_STATUS0_STATS_AF_BF) {
addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl,
- MSM_STATS_TYPE_AF);
+ stats_type);
if (addr) {
- vfe32_ctrl->afStatsControl.bufToRender =
+ vfe32_ctrl->afbfStatsControl.bufToRender =
vfe32_process_stats_irq_common(
vfe32_ctrl, statsAfNum,
addr);
process_stats = true;
} else {
- vfe32_ctrl->afStatsControl.bufToRender = 0;
- vfe32_ctrl->afStatsControl.droppedStatsFrameCount++;
+ vfe32_ctrl->afbfStatsControl.bufToRender = 0;
+ vfe32_ctrl->afbfStatsControl.droppedStatsFrameCount++;
}
} else {
- vfe32_ctrl->afStatsControl.bufToRender = 0;
+ vfe32_ctrl->afbfStatsControl.bufToRender = 0;
}
if (status_bits & VFE_IRQ_STATUS0_STATS_IHIST) {
@@ -4182,17 +4531,21 @@
CDBG("irq resetAckIrq\n");
vfe32_process_reset_irq(vfe32_ctrl);
break;
- case VFE_IRQ_STATUS0_STATS_AEC:
+ case VFE_IRQ_STATUS0_STATS_AEC_BG:
CDBG("Stats AEC irq occured.\n");
- vfe32_process_stats_ae_irq(vfe32_ctrl);
+ vfe32_process_stats_ae_bg_irq(vfe32_ctrl);
break;
case VFE_IRQ_STATUS0_STATS_AWB:
CDBG("Stats AWB irq occured.\n");
vfe32_process_stats_awb_irq(vfe32_ctrl);
break;
- case VFE_IRQ_STATUS0_STATS_AF:
+ case VFE_IRQ_STATUS0_STATS_AF_BF:
CDBG("Stats AF irq occured.\n");
- vfe32_process_stats_af_irq(vfe32_ctrl);
+ vfe32_process_stats_af_bf_irq(vfe32_ctrl);
+ break;
+ case VFE_IRQ_STATUS0_STATS_SK_BHIST:
+ CDBG("Stats BHIST irq occured.\n");
+ vfe32_process_stats_bhist_irq(vfe32_ctrl);
break;
case VFE_IRQ_STATUS0_STATS_IHIST:
CDBG("Stats IHIST irq occured.\n");
@@ -4261,11 +4614,11 @@
} else {
stat_interrupt =
(qcmd->vfeInterruptStatus0 &
- VFE_IRQ_STATUS0_STATS_AEC) |
+ VFE_IRQ_STATUS0_STATS_AEC_BG) |
(qcmd->vfeInterruptStatus0 &
VFE_IRQ_STATUS0_STATS_AWB) |
(qcmd->vfeInterruptStatus0 &
- VFE_IRQ_STATUS0_STATS_AF) |
+ VFE_IRQ_STATUS0_STATS_AF_BF) |
(qcmd->vfeInterruptStatus0 &
VFE_IRQ_STATUS0_STATS_IHIST) |
(qcmd->vfeInterruptStatus0 &
@@ -4333,10 +4686,10 @@
} else {
/* process individual stats interrupt. */
if (qcmd->vfeInterruptStatus0 &
- VFE_IRQ_STATUS0_STATS_AEC)
+ VFE_IRQ_STATUS0_STATS_AEC_BG)
v4l2_subdev_notify(&axi_ctrl->subdev,
NOTIFY_VFE_IRQ,
- (void *)VFE_IRQ_STATUS0_STATS_AEC);
+ (void *)VFE_IRQ_STATUS0_STATS_AEC_BG);
if (qcmd->vfeInterruptStatus0 &
VFE_IRQ_STATUS0_STATS_AWB)
@@ -4345,10 +4698,15 @@
(void *)VFE_IRQ_STATUS0_STATS_AWB);
if (qcmd->vfeInterruptStatus0 &
- VFE_IRQ_STATUS0_STATS_AF)
+ VFE_IRQ_STATUS0_STATS_AF_BF)
v4l2_subdev_notify(&axi_ctrl->subdev,
NOTIFY_VFE_IRQ,
- (void *)VFE_IRQ_STATUS0_STATS_AF);
+ (void *)VFE_IRQ_STATUS0_STATS_AF_BF);
+ if (qcmd->vfeInterruptStatus0 &
+ VFE_IRQ_STATUS0_STATS_SK_BHIST)
+ v4l2_subdev_notify(&axi_ctrl->subdev,
+ NOTIFY_VFE_IRQ,
+ (void *)VFE_IRQ_STATUS0_STATS_SK_BHIST);
if (qcmd->vfeInterruptStatus0 &
VFE_IRQ_STATUS0_STATS_IHIST)
@@ -4521,6 +4879,22 @@
vfe_ctrl->stats_ops.client);
}
break;
+ case VFE_CMD_STATS_UNREGBUF:
+ {
+ struct msm_stats_reqbuf *req_buf = NULL;
+ req_buf = (struct msm_stats_reqbuf *)cmd->value;
+ if (sizeof(struct msm_stats_reqbuf) != cmd->length) {
+ /* error. the length not match */
+ pr_err("%s: stats reqbuf input size = %d,\n"
+ "struct size = %d, mitch match\n",
+ __func__, cmd->length,
+ sizeof(struct msm_stats_reqbuf));
+ rc = -EINVAL ;
+ goto end;
+ }
+ rc = vfe32_stats_unregbuf(vfe_ctrl, req_buf);
+ }
+ break;
default:
rc = -1;
pr_err("%s: cmd_type %d not supported", __func__,
@@ -4570,27 +4944,31 @@
case VFE_CMD_STATS_REQBUF:
case VFE_CMD_STATS_ENQUEUEBUF:
case VFE_CMD_STATS_FLUSH_BUFQ:
+ case VFE_CMD_STATS_UNREGBUF:
/* for easy porting put in one envelope */
rc = vfe_stats_bufq_sub_ioctl(vfe32_ctrl,
cmd, vfe_params->data);
return rc;
default:
if (cmd->cmd_type != CMD_CONFIG_PING_ADDR &&
- cmd->cmd_type != CMD_CONFIG_PONG_ADDR &&
- cmd->cmd_type != CMD_CONFIG_FREE_BUF_ADDR &&
- cmd->cmd_type != CMD_STATS_AEC_BUF_RELEASE &&
- cmd->cmd_type != CMD_STATS_AWB_BUF_RELEASE &&
- cmd->cmd_type != CMD_STATS_IHIST_BUF_RELEASE &&
- cmd->cmd_type != CMD_STATS_RS_BUF_RELEASE &&
- cmd->cmd_type != CMD_STATS_CS_BUF_RELEASE &&
- cmd->cmd_type != CMD_STATS_AF_BUF_RELEASE) {
- if (copy_from_user(&vfecmd,
+ cmd->cmd_type != CMD_CONFIG_PONG_ADDR &&
+ cmd->cmd_type != CMD_CONFIG_FREE_BUF_ADDR &&
+ cmd->cmd_type != CMD_STATS_AEC_BUF_RELEASE &&
+ cmd->cmd_type != CMD_STATS_AWB_BUF_RELEASE &&
+ cmd->cmd_type != CMD_STATS_IHIST_BUF_RELEASE &&
+ cmd->cmd_type != CMD_STATS_RS_BUF_RELEASE &&
+ cmd->cmd_type != CMD_STATS_CS_BUF_RELEASE &&
+ cmd->cmd_type != CMD_STATS_AF_BUF_RELEASE &&
+ cmd->cmd_type != CMD_STATS_BG_BUF_RELEASE &&
+ cmd->cmd_type != CMD_STATS_BF_BUF_RELEASE &&
+ cmd->cmd_type != CMD_STATS_BHIST_BUF_RELEASE) {
+ if (copy_from_user(&vfecmd,
(void __user *)(cmd->value),
sizeof(vfecmd))) {
- pr_err("%s %d: copy_from_user failed\n",
- __func__, __LINE__);
- return -EFAULT;
- }
+ pr_err("%s %d: copy_from_user failed\n",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
} else {
/* here eith stats release or frame release. */
if (cmd->cmd_type != CMD_CONFIG_PING_ADDR &&
@@ -4612,6 +4990,25 @@
sack->nextStatsBuf = *(uint32_t *)data;
}
}
+ }
+
+ CDBG("%s: cmdType = %d\n", __func__, cmd->cmd_type);
+
+ if ((cmd->cmd_type == CMD_STATS_AF_ENABLE) ||
+ (cmd->cmd_type == CMD_STATS_AWB_ENABLE) ||
+ (cmd->cmd_type == CMD_STATS_IHIST_ENABLE) ||
+ (cmd->cmd_type == CMD_STATS_RS_ENABLE) ||
+ (cmd->cmd_type == CMD_STATS_CS_ENABLE) ||
+ (cmd->cmd_type == CMD_STATS_AEC_ENABLE) ||
+ (cmd->cmd_type == CMD_STATS_BG_ENABLE) ||
+ (cmd->cmd_type == CMD_STATS_BF_ENABLE) ||
+ (cmd->cmd_type == CMD_STATS_BHIST_ENABLE)) {
+ struct axidata *axid;
+ axid = data;
+ if (!axid) {
+ rc = -EFAULT;
+ goto vfe32_config_done;
+ }
CDBG("%s: cmdType = %d\n", __func__, cmd->cmd_type);
if ((cmd->cmd_type == CMD_STATS_AF_ENABLE) ||
@@ -4625,38 +5022,56 @@
goto vfe32_config_done;
}
switch (cmd->cmd_type) {
- case CMD_GENERAL:
- rc = vfe32_proc_general(pmctl, &vfecmd, vfe32_ctrl);
- break;
- case CMD_CONFIG_PING_ADDR: {
- int path = *((int *)cmd->value);
- struct vfe32_output_ch *outch =
- vfe32_get_ch(path, vfe32_ctrl->share_ctrl);
- outch->ping = *((struct msm_free_buf *)data);
+ case CMD_STATS_AEC_ENABLE:
+ case CMD_STATS_BG_ENABLE:
+ case CMD_STATS_BF_ENABLE:
+ case CMD_STATS_BHIST_ENABLE:
+ case CMD_STATS_AWB_ENABLE:
+ case CMD_STATS_IHIST_ENABLE:
+ case CMD_STATS_RS_ENABLE:
+ case CMD_STATS_CS_ENABLE:
+ default:
+ pr_err("%s Unsupported cmd type %d",
+ __func__, cmd->cmd_type);
+ break;
}
- break;
- case CMD_CONFIG_PONG_ADDR: {
- int path = *((int *)cmd->value);
- struct vfe32_output_ch *outch =
- vfe32_get_ch(path, vfe32_ctrl->share_ctrl);
- outch->pong = *((struct msm_free_buf *)data);
- }
+ goto vfe32_config_done;
+ }
+ switch (cmd->cmd_type) {
+ case CMD_GENERAL:
+ rc = vfe32_proc_general(pmctl, &vfecmd, vfe32_ctrl);
+ break;
+ case CMD_CONFIG_PING_ADDR: {
+ int path = *((int *)cmd->value);
+ struct vfe32_output_ch *outch =
+ vfe32_get_ch(path, vfe32_ctrl->share_ctrl);
+ outch->ping = *((struct msm_free_buf *)data);
+ }
+ break;
+
+ case CMD_CONFIG_PONG_ADDR: {
+ int path = *((int *)cmd->value);
+ struct vfe32_output_ch *outch =
+ vfe32_get_ch(path, vfe32_ctrl->share_ctrl);
+ outch->pong = *((struct msm_free_buf *)data);
+ }
+ break;
+
+ case CMD_CONFIG_FREE_BUF_ADDR: {
+ int path = *((int *)cmd->value);
+ struct vfe32_output_ch *outch =
+ vfe32_get_ch(path, vfe32_ctrl->share_ctrl);
+ outch->free_buf = *((struct msm_free_buf *)data);
+ }
+ break;
+
+ case CMD_SNAP_BUF_RELEASE:
break;
- case CMD_CONFIG_FREE_BUF_ADDR: {
- int path = *((int *)cmd->value);
- struct vfe32_output_ch *outch =
- vfe32_get_ch(path, vfe32_ctrl->share_ctrl);
- outch->free_buf = *((struct msm_free_buf *)data);
- }
- break;
- case CMD_SNAP_BUF_RELEASE:
- break;
- default:
- pr_err("%s Unsupported AXI configuration %x ", __func__,
- cmd->cmd_type);
- break;
- }
+ default:
+ pr_err("%s Unsupported AXI configuration %x ", __func__,
+ cmd->cmd_type);
+ break;
}
vfe32_config_done:
kfree(scfg);
@@ -5424,6 +5839,8 @@
axi32_do_tasklet, (unsigned long)axi_ctrl);
vfe32_ctrl->pdev = pdev;
+ /*disable bayer stats by default*/
+ vfe32_ctrl->ver_num.main = 0;
return 0;
vfe32_no_resource:
diff --git a/drivers/media/video/msm/msm_vfe32.h b/drivers/media/video/msm/msm_vfe32.h
index 9336cfb..0b685e1 100644
--- a/drivers/media/video/msm/msm_vfe32.h
+++ b/drivers/media/video/msm/msm_vfe32.h
@@ -104,12 +104,13 @@
#define VFE_IRQ_STATUS1_RESET_AXI_HALT_ACK_MASK 0x00800000
#define VFE_IRQ_STATUS0_STATS_COMPOSIT_MASK 0x01000000
-#define VFE_IRQ_STATUS0_STATS_AEC 0x2000 /* bit 13 */
-#define VFE_IRQ_STATUS0_STATS_AF 0x4000 /* bit 14 */
-#define VFE_IRQ_STATUS0_STATS_AWB 0x8000 /* bit 15 */
-#define VFE_IRQ_STATUS0_STATS_RS 0x10000 /* bit 16 */
-#define VFE_IRQ_STATUS0_STATS_CS 0x20000 /* bit 17 */
-#define VFE_IRQ_STATUS0_STATS_IHIST 0x40000 /* bit 18 */
+#define VFE_IRQ_STATUS0_STATS_AEC_BG 0x2000 /* bit 13 */
+#define VFE_IRQ_STATUS0_STATS_AF_BF 0x4000 /* bit 14 */
+#define VFE_IRQ_STATUS0_STATS_AWB 0x8000 /* bit 15 */
+#define VFE_IRQ_STATUS0_STATS_RS 0x10000 /* bit 16 */
+#define VFE_IRQ_STATUS0_STATS_CS 0x20000 /* bit 17 */
+#define VFE_IRQ_STATUS0_STATS_IHIST 0x40000 /* bit 18 */
+#define VFE_IRQ_STATUS0_STATS_SK_BHIST 0x80000 /* bit 19 */
#define VFE_IRQ_STATUS0_SYNC_TIMER0 0x2000000 /* bit 25 */
#define VFE_IRQ_STATUS0_SYNC_TIMER1 0x4000000 /* bit 26 */
@@ -174,8 +175,13 @@
#define RS_CS_ENABLE_MASK 0x00000300 /* bit 8,9 */
#define CLF_ENABLE_MASK 0x00002000 /* bit 13 */
#define IHIST_ENABLE_MASK 0x00010000 /* bit 16 */
+#define SKIN_BHIST_ENABLE_MASK 0x00080000 /* bit 19 */
#define STATS_ENABLE_MASK 0x000903E0 /* bit 19,16,9,8,7,6,5*/
+#define STATS_BG_ENABLE_MASK 0x00000002 /* bit 1 */
+#define STATS_BF_ENABLE_MASK 0x00000004 /* bit 2 */
+#define STATS_BHIST_ENABLE_MASK 0x00000008 /* bit 3 */
+
#define VFE_REG_UPDATE_TRIGGER 1
#define VFE_PM_BUF_MAX_CNT_MASK 0xFF
#define VFE_DMI_CFG_DEFAULT 0x00000100
@@ -378,6 +384,15 @@
#define V32_CLF_CHROMA_UPDATE_OFF 0x000006F0
#define V32_CLF_CHROMA_UPDATE_LEN 8
+#define V32_STATS_BG_OFF 0x00000700
+#define V32_STATS_BG_LEN 12
+
+#define V32_STATS_BF_OFF 0x0000070c
+#define V32_STATS_BF_LEN 24
+
+#define V32_STATS_BHIST_OFF 0x00000724
+#define V32_STATS_BHIST_LEN 8
+
struct vfe_cmd_hw_version {
uint32_t minorVersion;
uint32_t majorVersion;
@@ -845,12 +860,12 @@
#define VFE_AXI_STATUS 0x000001DC
#define VFE_BUS_STATS_PING_PONG_BASE 0x000000F4
-#define VFE_BUS_STATS_AEC_WR_PING_ADDR 0x000000F4
-#define VFE_BUS_STATS_AEC_WR_PONG_ADDR 0x000000F8
-#define VFE_BUS_STATS_AEC_UB_CFG 0x000000FC
-#define VFE_BUS_STATS_AF_WR_PING_ADDR 0x00000100
-#define VFE_BUS_STATS_AF_WR_PONG_ADDR 0x00000104
-#define VFE_BUS_STATS_AF_UB_CFG 0x00000108
+#define VFE_BUS_STATS_AEC_BG_WR_PING_ADDR 0x000000F4
+#define VFE_BUS_STATS_AEC_BG_WR_PONG_ADDR 0x000000F8
+#define VFE_BUS_STATS_AEC_BG_UB_CFG 0x000000FC
+#define VFE_BUS_STATS_AF_BF_WR_PING_ADDR 0x00000100
+#define VFE_BUS_STATS_AF_BF_WR_PONG_ADDR 0x00000104
+#define VFE_BUS_STATS_AF_BF_UB_CFG 0x00000108
#define VFE_BUS_STATS_AWB_WR_PING_ADDR 0x0000010C
#define VFE_BUS_STATS_AWB_WR_PONG_ADDR 0x00000110
#define VFE_BUS_STATS_AWB_UB_CFG 0x00000114
@@ -864,9 +879,9 @@
#define VFE_BUS_STATS_HIST_WR_PING_ADDR 0x00000130
#define VFE_BUS_STATS_HIST_WR_PONG_ADDR 0x00000134
#define VFE_BUS_STATS_HIST_UB_CFG 0x00000138
-#define VFE_BUS_STATS_SKIN_WR_PING_ADDR 0x0000013C
-#define VFE_BUS_STATS_SKIN_WR_PONG_ADDR 0x00000140
-#define VFE_BUS_STATS_SKIN_UB_CFG 0x00000144
+#define VFE_BUS_STATS_SKIN_BHIST_WR_PING_ADDR 0x0000013C
+#define VFE_BUS_STATS_SKIN_BHIST_WR_PONG_ADDR 0x00000140
+#define VFE_BUS_STATS_SKIN_BHIST_UB_CFG 0x00000144
#define VFE_CAMIF_COMMAND 0x000001E0
#define VFE_CAMIF_STATUS 0x00000204
#define VFE_REG_UPDATE_CMD 0x00000260
@@ -888,6 +903,7 @@
#define VFE_STATS_AWB_SGW_CFG 0x00000554
#define VFE_DMI_CFG 0x00000598
#define VFE_DMI_ADDR 0x0000059C
+#define VFE_DMI_DATA_HI 0x000005A0
#define VFE_DMI_DATA_LO 0x000005A4
#define VFE_BUS_IO_FORMAT_CFG 0x000006F8
#define VFE_PIXEL_IF_CFG 0x000006FC
@@ -990,12 +1006,14 @@
uint32_t output2Period;
uint32_t vfeFrameSkipCount;
uint32_t vfeFrameSkipPeriod;
- struct vfe_stats_control afStatsControl;
+ struct msm_ver_num_info ver_num;
+ struct vfe_stats_control afbfStatsControl;
struct vfe_stats_control awbStatsControl;
- struct vfe_stats_control aecStatsControl;
+ struct vfe_stats_control aecbgStatsControl;
struct vfe_stats_control ihistStatsControl;
struct vfe_stats_control rsStatsControl;
struct vfe_stats_control csStatsControl;
+ struct vfe_stats_control bhistStatsControl;
/* v4l2 subdev */
struct v4l2_subdev subdev;
diff --git a/drivers/media/video/msm/msm_vfe7x27a_v4l2.c b/drivers/media/video/msm/msm_vfe7x27a_v4l2.c
index 64e0385..32b33e2 100644
--- a/drivers/media/video/msm/msm_vfe7x27a_v4l2.c
+++ b/drivers/media/video/msm/msm_vfe7x27a_v4l2.c
@@ -404,6 +404,25 @@
return 0L;
}
+static unsigned long vfe2x_stats_unregbuf(
+ struct msm_stats_reqbuf *req_buf)
+{
+ int i = 0, rc = 0;
+
+ for (i = 0; i < req_buf->num_buf; i++) {
+ rc = vfe2x_ctrl->stats_ops.buf_unprepare(
+ vfe2x_ctrl->stats_ops.stats_ctrl,
+ req_buf->stats_type, i,
+ vfe2x_ctrl->stats_ops.client);
+ if (rc < 0) {
+ pr_err("%s: unreg stats buf (type = %d) err = %d",
+ __func__, req_buf->stats_type, rc);
+ return rc;
+ }
+ }
+ return 0L;
+}
+
static int vfe2x_stats_buf_init(enum msm_stats_enum_type type)
{
unsigned long flags;
@@ -556,6 +575,22 @@
vfe2x_ctrl->stats_ops.client);
}
break;
+ case VFE_CMD_STATS_UNREGBUF:
+ {
+ struct msm_stats_reqbuf *req_buf = NULL;
+ req_buf = (struct msm_stats_reqbuf *)cmd->value;
+ if (sizeof(struct msm_stats_reqbuf) != cmd->length) {
+ /* error. the length not match */
+ pr_err("%s: stats reqbuf input size = %d,\n"
+ "struct size = %d, mitch match\n",
+ __func__, cmd->length,
+ sizeof(struct msm_stats_reqbuf));
+ rc = -EINVAL ;
+ goto end;
+ }
+ rc = vfe2x_stats_unregbuf(req_buf);
+ }
+ break;
default:
rc = -1;
pr_err("%s: cmd_type %d not supported",
@@ -659,29 +694,63 @@
switch (id) {
case MSG_SNAPSHOT:
msm_camio_set_perf_lvl(S_PREVIEW);
- vfe_7x_ops(driver_data, MSG_OUTPUT_S, len, getevent);
- if (!raw_mode)
- vfe_7x_ops(driver_data, MSG_OUTPUT_T,
+ while (vfe2x_ctrl->snap.frame_cnt <
+ vfe2x_ctrl->num_snap) {
+ vfe_7x_ops(driver_data, MSG_OUTPUT_S, len,
+ getevent);
+ if (!raw_mode)
+ vfe_7x_ops(driver_data, MSG_OUTPUT_T,
len, getevent);
+ }
vfe2x_send_isp_msg(vfe2x_ctrl, MSG_ID_SNAPSHOT_DONE);
kfree(data);
return;
case MSG_OUTPUT_S:
outch = &vfe2x_ctrl->snap;
- y_phy = outch->ping.ch_paddr[0];
- cbcr_phy = outch->ping.ch_paddr[1];
- CDBG("MSG_OUTPUT_S: %x %x\n",
- (unsigned int)y_phy, (unsigned int)cbcr_phy);
+ if (outch->frame_cnt == 0) {
+ y_phy = outch->ping.ch_paddr[0];
+ cbcr_phy = outch->ping.ch_paddr[1];
+ } else if (outch->frame_cnt == 1) {
+ y_phy = outch->pong.ch_paddr[0];
+ cbcr_phy = outch->pong.ch_paddr[1];
+ } else if (outch->frame_cnt == 2) {
+ y_phy = outch->free_buf.ch_paddr[0];
+ cbcr_phy = outch->free_buf.ch_paddr[1];
+ } else {
+ y_phy = outch->free_buf_arr[outch->frame_cnt
+ - 3].ch_paddr[0];
+ cbcr_phy = outch->free_buf_arr[outch->frame_cnt
+ - 3].ch_paddr[1];
+ }
+ outch->frame_cnt++;
+ CDBG("MSG_OUTPUT_S: %x %x %d\n",
+ (unsigned int)y_phy, (unsigned int)cbcr_phy,
+ outch->frame_cnt);
vfe_send_outmsg(&vfe2x_ctrl->subdev,
MSG_ID_OUTPUT_PRIMARY,
y_phy, cbcr_phy);
break;
case MSG_OUTPUT_T:
outch = &vfe2x_ctrl->thumb;
- y_phy = outch->ping.ch_paddr[0];
- cbcr_phy = outch->ping.ch_paddr[1];
- CDBG("MSG_OUTPUT_T: %x %x\n",
- (unsigned int)y_phy, (unsigned int)cbcr_phy);
+ if (outch->frame_cnt == 0) {
+ y_phy = outch->ping.ch_paddr[0];
+ cbcr_phy = outch->ping.ch_paddr[1];
+ } else if (outch->frame_cnt == 1) {
+ y_phy = outch->pong.ch_paddr[0];
+ cbcr_phy = outch->pong.ch_paddr[1];
+ } else if (outch->frame_cnt == 2) {
+ y_phy = outch->free_buf.ch_paddr[0];
+ cbcr_phy = outch->free_buf.ch_paddr[1];
+ } else {
+ y_phy = outch->free_buf_arr[outch->frame_cnt
+ - 3].ch_paddr[0];
+ cbcr_phy = outch->free_buf_arr[outch->frame_cnt
+ - 3].ch_paddr[1];
+ }
+ outch->frame_cnt++;
+ CDBG("MSG_OUTPUT_T: %x %x %d\n",
+ (unsigned int)y_phy, (unsigned int)cbcr_phy,
+ outch->frame_cnt);
vfe_send_outmsg(&vfe2x_ctrl->subdev,
MSG_ID_OUTPUT_SECONDARY,
y_phy, cbcr_phy);
@@ -885,8 +954,9 @@
vfe2x_ctrl->vfeFrameId++;
if (vfe2x_ctrl->vfeFrameId == 0)
vfe2x_ctrl->vfeFrameId = 1; /* wrapped back */
- if ((op_mode & SNAPSHOT_MASK_MODE) && !raw_mode) {
- pr_err("Ignore SOF for snapshot\n");
+ if ((op_mode & SNAPSHOT_MASK_MODE) && !raw_mode
+ && (vfe2x_ctrl->num_snap <= 1)) {
+ CDBG("Ignore SOF for snapshot\n");
kfree(data);
return;
}
@@ -990,7 +1060,76 @@
if (op_mode & SNAPSHOT_MASK_MODE)
o_mode = SNAPSHOT_MASK_MODE;
- if (mode == OUTPUT_SEC) {
+ if ((o_mode == SNAPSHOT_MASK_MODE) && (vfe2x_ctrl->num_snap > 1)) {
+ CDBG("%s: BURST mode freebuf cnt %d", __func__,
+ ad->free_buf_cnt);
+ /* Burst */
+ if (mode == OUTPUT_SEC) {
+ ao->output1buffer1_y_phy = ad->ping.ch_paddr[0];
+ ao->output1buffer1_cbcr_phy = ad->ping.ch_paddr[1];
+ ao->output1buffer2_y_phy = ad->pong.ch_paddr[0];
+ ao->output1buffer2_cbcr_phy = ad->pong.ch_paddr[1];
+ ao->output1buffer3_y_phy = ad->free_buf.ch_paddr[0];
+ ao->output1buffer3_cbcr_phy = ad->free_buf.ch_paddr[1];
+ bptr = &ao->output1buffer4_y_phy;
+ for (cnt = 0; cnt < 5; cnt++) {
+ *bptr = (cnt < ad->free_buf_cnt-3) ?
+ ad->free_buf_arr[cnt].ch_paddr[0] :
+ ad->pong.ch_paddr[0];
+ bptr++;
+ *bptr = (cnt < ad->free_buf_cnt-3) ?
+ ad->free_buf_arr[cnt].ch_paddr[1] :
+ ad->pong.ch_paddr[1];
+ bptr++;
+ }
+ CDBG("%x %x\n", (unsigned int)ao->output1buffer1_y_phy,
+ (unsigned int)ao->output1buffer1_cbcr_phy);
+ CDBG("%x %x\n", (unsigned int)ao->output1buffer2_y_phy,
+ (unsigned int)ao->output1buffer2_cbcr_phy);
+ CDBG("%x %x\n", (unsigned int)ao->output1buffer3_y_phy,
+ (unsigned int)ao->output1buffer3_cbcr_phy);
+ CDBG("%x %x\n", (unsigned int)ao->output1buffer4_y_phy,
+ (unsigned int)ao->output1buffer4_cbcr_phy);
+ CDBG("%x %x\n", (unsigned int)ao->output1buffer5_y_phy,
+ (unsigned int)ao->output1buffer5_cbcr_phy);
+ CDBG("%x %x\n", (unsigned int)ao->output1buffer6_y_phy,
+ (unsigned int)ao->output1buffer6_cbcr_phy);
+ CDBG("%x %x\n", (unsigned int)ao->output1buffer7_y_phy,
+ (unsigned int)ao->output1buffer7_cbcr_phy);
+ } else { /*Primary*/
+ ao->output2buffer1_y_phy = ad->ping.ch_paddr[0];
+ ao->output2buffer1_cbcr_phy = ad->ping.ch_paddr[1];
+ ao->output2buffer2_y_phy = ad->pong.ch_paddr[0];
+ ao->output2buffer2_cbcr_phy = ad->pong.ch_paddr[1];
+ ao->output2buffer3_y_phy = ad->free_buf.ch_paddr[0];
+ ao->output2buffer3_cbcr_phy = ad->free_buf.ch_paddr[1];
+ bptr = &ao->output2buffer4_y_phy;
+ for (cnt = 0; cnt < 5; cnt++) {
+ *bptr = (cnt < ad->free_buf_cnt-3) ?
+ ad->free_buf_arr[cnt].ch_paddr[0] :
+ ad->pong.ch_paddr[0];
+ bptr++;
+ *bptr = (cnt < ad->free_buf_cnt-3) ?
+ ad->free_buf_arr[cnt].ch_paddr[1] :
+ ad->pong.ch_paddr[1];
+ bptr++;
+ }
+ CDBG("%x %x\n", (unsigned int)ao->output2buffer1_y_phy,
+ (unsigned int)ao->output2buffer1_cbcr_phy);
+ CDBG("%x %x\n", (unsigned int)ao->output2buffer2_y_phy,
+ (unsigned int)ao->output2buffer2_cbcr_phy);
+ CDBG("%x %x\n", (unsigned int)ao->output2buffer3_y_phy,
+ (unsigned int)ao->output2buffer3_cbcr_phy);
+ CDBG("%x %x\n", (unsigned int)ao->output2buffer4_y_phy,
+ (unsigned int)ao->output2buffer4_cbcr_phy);
+ CDBG("%x %x\n", (unsigned int)ao->output2buffer5_y_phy,
+ (unsigned int)ao->output2buffer5_cbcr_phy);
+ CDBG("%x %x\n", (unsigned int)ao->output2buffer6_y_phy,
+ (unsigned int)ao->output2buffer6_cbcr_phy);
+ CDBG("%x %x\n", (unsigned int)ao->output2buffer7_y_phy,
+ (unsigned int)ao->output2buffer7_cbcr_phy);
+ }
+ } else if (mode == OUTPUT_SEC) {
/* Thumbnail */
if (vfe2x_ctrl->zsl_mode) {
ao->output1buffer1_y_phy = ad->ping.ch_paddr[0];
@@ -1228,6 +1367,7 @@
cmd->cmd_type != CMD_VFE_BUFFER_RELEASE &&
cmd->cmd_type != VFE_CMD_STATS_REQBUF &&
cmd->cmd_type != VFE_CMD_STATS_FLUSH_BUFQ &&
+ cmd->cmd_type != VFE_CMD_STATS_UNREGBUF &&
cmd->cmd_type != VFE_CMD_STATS_ENQUEUEBUF) {
if (copy_from_user(&vfecmd,
(void __user *)(cmd->value),
@@ -1239,6 +1379,7 @@
switch (cmd->cmd_type) {
case VFE_CMD_STATS_REQBUF:
case VFE_CMD_STATS_FLUSH_BUFQ:
+ case VFE_CMD_STATS_UNREGBUF:
/* for easy porting put in one envelope */
rc = vfe2x_stats_bufq_sub_ioctl(cmd, vfe_params->data);
return rc;
@@ -1311,7 +1452,20 @@
case CMD_CONFIG_FREE_BUF_ADDR: {
int path = *((int *)cmd->value);
struct buf_info *outch = vfe2x_get_ch(path);
- outch->free_buf = *((struct msm_free_buf *)data);
+ if ((op_mode & SNAPSHOT_MASK_MODE) &&
+ (vfe2x_ctrl->num_snap > 1)) {
+ CDBG("%s: CMD_CONFIG_FREE_BUF_ADDR Burst mode %d",
+ __func__, outch->free_buf_cnt);
+ if (outch->free_buf_cnt <= 0)
+ outch->free_buf =
+ *((struct msm_free_buf *)data);
+ else
+ outch->free_buf_arr[outch->free_buf_cnt-1] =
+ *((struct msm_free_buf *)data);
+ ++outch->free_buf_cnt;
+ } else {
+ outch->free_buf = *((struct msm_free_buf *)data);
+ }
}
return 0;
@@ -1489,6 +1643,12 @@
vfecmd.length))
rc = -EFAULT;
op_mode = vfe2x_ctrl->start_cmd.mode_of_operation;
+ vfe2x_ctrl->snap.free_buf_cnt = 0;
+ vfe2x_ctrl->thumb.free_buf_cnt = 0;
+ vfe2x_ctrl->snap.frame_cnt = 0;
+ vfe2x_ctrl->thumb.frame_cnt = 0;
+ vfe2x_ctrl->num_snap =
+ vfe2x_ctrl->start_cmd.snap_number;
return rc;
}
if (vfecmd.id == VFE_CMD_RECONFIG_VFE) {
@@ -1822,10 +1982,21 @@
goto config_done;
}
- if (!(op_mode & SNAPSHOT_MASK_MODE))
+ if (!(op_mode & SNAPSHOT_MASK_MODE)) {
free_buf = vfe2x_check_free_buffer(
VFE_MSG_OUTPUT_IRQ,
VFE_MSG_OUTPUT_SECONDARY);
+ } else if ((op_mode & SNAPSHOT_MASK_MODE) &&
+ (vfe2x_ctrl->num_snap > 1)) {
+ int i = 0;
+ CDBG("Burst mode AXI config SEC snap cnt %d\n",
+ vfe2x_ctrl->num_snap);
+ for (i = 0; i < vfe2x_ctrl->num_snap - 2; i++) {
+ free_buf = vfe2x_check_free_buffer(
+ VFE_MSG_OUTPUT_IRQ,
+ VFE_MSG_OUTPUT_SECONDARY);
+ }
+ }
header = cmds_map[vfecmd.id].vfe_id;
queue = cmds_map[vfecmd.id].queue;
if (header == -1 && queue == -1) {
@@ -1853,10 +2024,22 @@
goto config_done;
}
- if (!(op_mode & SNAPSHOT_MASK_MODE))
+ if (!(op_mode & SNAPSHOT_MASK_MODE)) {
free_buf = vfe2x_check_free_buffer(
VFE_MSG_OUTPUT_IRQ,
VFE_MSG_OUTPUT_PRIMARY);
+ } else if ((op_mode & SNAPSHOT_MASK_MODE) &&
+ (vfe2x_ctrl->num_snap > 1)) {
+ int i = 0;
+ CDBG("Burst mode AXI config PRIM snap cnt %d\n",
+ vfe2x_ctrl->num_snap);
+ for (i = 0; i < vfe2x_ctrl->num_snap - 2; i++) {
+ free_buf = vfe2x_check_free_buffer(
+ VFE_MSG_OUTPUT_IRQ,
+ VFE_MSG_OUTPUT_PRIMARY);
+ }
+ }
+
if (op_mode & SNAPSHOT_MASK_MODE)
vfe_7x_config_axi(OUTPUT_PRIM,
&vfe2x_ctrl->snap, axio);
diff --git a/drivers/media/video/msm/msm_vfe7x27a_v4l2.h b/drivers/media/video/msm/msm_vfe7x27a_v4l2.h
index b7d6806..39affc4 100644
--- a/drivers/media/video/msm/msm_vfe7x27a_v4l2.h
+++ b/drivers/media/video/msm/msm_vfe7x27a_v4l2.h
@@ -18,6 +18,9 @@
#include "msm.h"
#include "msm_vfe_stats_buf.h"
+/*8 DSP buffers, 3 - ping, pong, free*/
+#define FREE_BUF_ARR_SIZE 5
+
struct cmd_id_map {
uint32_t isp_id;
uint32_t vfe_id;
@@ -50,6 +53,10 @@
struct msm_free_buf ping;
struct msm_free_buf pong;
struct msm_free_buf free_buf;
+ /*Array for holding the free buffer if more than one*/
+ struct msm_free_buf free_buf_arr[FREE_BUF_ARR_SIZE];
+ int free_buf_cnt;
+ int frame_cnt;
} __packed;
struct prev_free_buf_info {
@@ -117,6 +124,7 @@
struct msm_stats_ops stats_ops;
unsigned long stats_we_buf_ptr[3];
unsigned long stats_af_buf_ptr[3];
+ int num_snap;
} __packed;
struct vfe_frame_extra {
diff --git a/drivers/media/video/msm/msm_vfe_stats_buf.c b/drivers/media/video/msm/msm_vfe_stats_buf.c
index 9e8f285..5fbcdb1 100644
--- a/drivers/media/video/msm/msm_vfe_stats_buf.c
+++ b/drivers/media/video/msm/msm_vfe_stats_buf.c
@@ -475,6 +475,8 @@
struct msm_stats_buf_info *info, struct ion_client *client)
{
int rc = 0;
+ D("%s: stats type : %d, idx : %d\n", __func__,
+ info->type, info->buf_idx);
rc = msm_stats_buf_prepare(stats_ctrl, info, client);
if (rc < 0) {
pr_err("%s: buf_prepare failed, rc = %d", __func__, rc);
diff --git a/drivers/media/video/msm_vidc/msm_v4l2_vidc.c b/drivers/media/video/msm_vidc/msm_v4l2_vidc.c
index 817caf5..cf1ebbb 100644
--- a/drivers/media/video/msm_vidc/msm_v4l2_vidc.c
+++ b/drivers/media/video/msm_vidc/msm_v4l2_vidc.c
@@ -704,6 +704,7 @@
struct resource *res;
int i = 0;
int rc = 0;
+ struct on_chip_mem *ocmem;
if (!core)
return -EINVAL;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -753,6 +754,14 @@
pr_err("Failed to register iommu domains: %d\n", rc);
goto fail_register_domains;
}
+ ocmem = &core->resources.ocmem;
+ ocmem->vidc_ocmem_nb.notifier_call = msm_vidc_ocmem_notify_handler;
+ ocmem->handle =
+ ocmem_notifier_register(OCMEM_VIDEO, &ocmem->vidc_ocmem_nb);
+ if (!ocmem->handle) {
+ pr_warn("Failed to register OCMEM notifier.");
+ pr_warn(" Performance will be impacted\n");
+ }
return rc;
fail_register_domains:
msm_bus_scale_unregister_client(
@@ -861,6 +870,9 @@
video_unregister_device(&core->vdev[MSM_VIDC_ENCODER].vdev);
video_unregister_device(&core->vdev[MSM_VIDC_DECODER].vdev);
v4l2_device_unregister(&core->v4l2_dev);
+ if (core->resources.ocmem.handle)
+ ocmem_notifier_unregister(core->resources.ocmem.handle,
+ &core->resources.ocmem.vidc_ocmem_nb);
kfree(core);
return rc;
}
diff --git a/drivers/media/video/msm_vidc/msm_vidc_common.c b/drivers/media/video/msm_vidc/msm_vidc_common.c
index fa9608d..6835467 100644
--- a/drivers/media/video/msm_vidc/msm_vidc_common.c
+++ b/drivers/media/video/msm_vidc/msm_vidc_common.c
@@ -42,6 +42,11 @@
__height * __width * __fps; \
})
+#define GET_NUM_MBS(__h, __w) ({\
+ u32 __mbs = (__h >> 4) * (__w >> 4);\
+ __mbs;\
+})
+
/*While adding entries to this array make sure
* they are in descending order.
* Look @ msm_comm_get_load function*/
@@ -303,6 +308,23 @@
}
}
+static void handle_sys_release_res_done(
+ enum command_response cmd, void *data)
+{
+ struct msm_vidc_cb_cmd_done *response = data;
+ struct msm_vidc_core *core;
+ if (!response) {
+ pr_err("Failed to get valid response for sys init\n");
+ return;
+ }
+ core = get_vidc_core(response->device_id);
+ if (!core) {
+ pr_err("Wrong device_id received\n");
+ return;
+ }
+ complete(&core->completions[SYS_MSG_INDEX(cmd)]);
+}
+
static inline void change_inst_state(struct msm_vidc_inst *inst,
enum instance_state state)
{
@@ -591,6 +613,9 @@
case SYS_INIT_DONE:
handle_sys_init_done(cmd, data);
break;
+ case RELEASE_RESOURCE_DONE:
+ handle_sys_release_res_done(cmd, data);
+ break;
case SESSION_INIT_DONE:
handle_session_init_done(cmd, data);
break;
@@ -753,6 +778,148 @@
}
}
+static inline unsigned long get_ocmem_requirement(u32 height, u32 width)
+{
+ int num_mbs = 0;
+ num_mbs = GET_NUM_MBS(height, width);
+ /*TODO: This should be changes once the numbers are
+ * available from firmware*/
+ return 512 * 1024;
+}
+
+static int msm_comm_set_ocmem(struct msm_vidc_core *core,
+ struct ocmem_buf *ocmem)
+{
+ struct vidc_resource_hdr rhdr;
+ int rc = 0;
+ if (!core || !ocmem) {
+ pr_err("Invalid params, core:%p, ocmem: %p\n",
+ core, ocmem);
+ return -EINVAL;
+ }
+ rhdr.resource_id = VIDC_RESOURCE_OCMEM;
+ rhdr.resource_handle = (u32) &core->resources.ocmem;
+ rhdr.size = ocmem->len;
+ rc = vidc_hal_core_set_resource(core->device, &rhdr, ocmem);
+ if (rc) {
+ pr_err("Failed to set OCMEM on driver\n");
+ goto ocmem_set_failed;
+ }
+ pr_debug("OCMEM set, addr = %lx, size: %ld\n",
+ ocmem->addr, ocmem->len);
+ocmem_set_failed:
+ return rc;
+}
+
+static int msm_comm_unset_ocmem(struct msm_vidc_core *core)
+{
+ struct vidc_resource_hdr rhdr;
+ int rc = 0;
+ if (!core || !core->resources.ocmem.buf) {
+ pr_err("Invalid params, core:%p\n", core);
+ return -EINVAL;
+ }
+ rhdr.resource_id = VIDC_RESOURCE_OCMEM;
+ rhdr.resource_handle = (u32) &core->resources.ocmem;
+ init_completion(
+ &core->completions[SYS_MSG_INDEX(RELEASE_RESOURCE_DONE)]);
+ rc = vidc_hal_core_release_resource(core->device, &rhdr);
+ if (rc) {
+ pr_err("Failed to set OCMEM on driver\n");
+ goto release_ocmem_failed;
+ }
+ rc = wait_for_completion_timeout(
+ &core->completions[SYS_MSG_INDEX(RELEASE_RESOURCE_DONE)],
+ msecs_to_jiffies(HW_RESPONSE_TIMEOUT));
+ if (!rc) {
+ pr_err("Wait interrupted or timeout: %d\n", rc);
+ rc = -EIO;
+ goto release_ocmem_failed;
+ }
+release_ocmem_failed:
+ return rc;
+}
+
+static int msm_comm_alloc_ocmem(struct msm_vidc_core *core,
+ unsigned long size)
+{
+ int rc = 0;
+ unsigned long flags;
+ struct ocmem_buf *ocmem_buffer;
+ if (!core || !size) {
+ pr_err("Invalid param, core: %p, size: %lu\n", core, size);
+ return -EINVAL;
+ }
+ spin_lock_irqsave(&core->lock, flags);
+ ocmem_buffer = core->resources.ocmem.buf;
+ if (!ocmem_buffer ||
+ ocmem_buffer->len < size) {
+ ocmem_buffer = ocmem_allocate_nb(OCMEM_VIDEO, size);
+ if (IS_ERR_OR_NULL(ocmem_buffer)) {
+ pr_err("ocmem_allocate_nb failed: %d\n",
+ (u32) ocmem_buffer);
+ rc = -ENOMEM;
+ }
+ core->resources.ocmem.buf = ocmem_buffer;
+ rc = msm_comm_set_ocmem(core, ocmem_buffer);
+ if (rc) {
+ pr_err("Failed to set ocmem: %d\n", rc);
+ goto ocmem_set_failed;
+ }
+ } else
+ pr_debug("OCMEM is enough. reqd: %lu, available: %lu\n",
+ size, ocmem_buffer->len);
+
+ocmem_set_failed:
+ spin_unlock_irqrestore(&core->lock, flags);
+ return rc;
+}
+
+static int msm_comm_free_ocmem(struct msm_vidc_core *core)
+{
+ int rc = 0;
+ unsigned long flags;
+ spin_lock_irqsave(&core->lock, flags);
+ if (core->resources.ocmem.buf) {
+ rc = ocmem_free(OCMEM_VIDEO, core->resources.ocmem.buf);
+ if (rc)
+ pr_err("Failed to free ocmem\n");
+ }
+ core->resources.ocmem.buf = NULL;
+ spin_unlock_irqrestore(&core->lock, flags);
+ return rc;
+}
+
+int msm_vidc_ocmem_notify_handler(struct notifier_block *this,
+ unsigned long event, void *data)
+{
+ struct ocmem_buf *buff = data;
+ struct msm_vidc_core *core;
+ struct msm_vidc_resources *resources;
+ struct on_chip_mem *ocmem;
+ int rc = NOTIFY_DONE;
+ if (event == OCMEM_ALLOC_GROW) {
+ ocmem = container_of(this, struct on_chip_mem, vidc_ocmem_nb);
+ if (!ocmem) {
+ pr_err("Wrong handler passed\n");
+ rc = NOTIFY_BAD;
+ goto bad_notfier;
+ }
+ resources = container_of(ocmem,
+ struct msm_vidc_resources, ocmem);
+ core = container_of(resources,
+ struct msm_vidc_core, resources);
+ if (msm_comm_set_ocmem(core, buff)) {
+ pr_err("Failed to set ocmem: %d\n", rc);
+ goto ocmem_set_failed;
+ }
+ rc = NOTIFY_OK;
+ }
+ocmem_set_failed:
+bad_notfier:
+ return rc;
+}
+
static int msm_comm_init_core_done(struct msm_vidc_inst *inst)
{
struct msm_vidc_core *core = inst->core;
@@ -835,7 +1002,8 @@
goto core_already_uninited;
}
if (list_empty(&core->instances)) {
- pr_debug("Calling vidc_hal_core_release\n");
+ msm_comm_unset_ocmem(core);
+ msm_comm_free_ocmem(core);
rc = vidc_hal_core_release(core->device);
if (rc) {
pr_err("Failed to release core, id = %d\n", core->id);
@@ -953,10 +1121,16 @@
struct msm_vidc_inst *inst)
{
int rc = 0;
+ u32 ocmem_sz = 0;
if (IS_ALREADY_IN_STATE(flipped_state, MSM_VIDC_LOAD_RESOURCES)) {
pr_err("inst: %p is already in state: %d\n", inst, inst->state);
goto exit;
}
+ ocmem_sz = get_ocmem_requirement(inst->height, inst->width);
+ rc = msm_comm_alloc_ocmem(inst->core, ocmem_sz);
+ if (rc)
+ pr_warn("Failed to allocate OCMEM. Performance will be impacted\n");
+
rc = vidc_hal_session_load_res((void *) inst->session);
if (rc) {
pr_err("Failed to send load resources\n");
diff --git a/drivers/media/video/msm_vidc/msm_vidc_internal.h b/drivers/media/video/msm_vidc/msm_vidc_internal.h
index 58d7290..992f39c 100644
--- a/drivers/media/video/msm_vidc/msm_vidc_internal.h
+++ b/drivers/media/video/msm_vidc/msm_vidc_internal.h
@@ -21,6 +21,7 @@
#include <linux/clk.h>
#include <mach/msm_bus.h>
#include <mach/msm_bus_board.h>
+#include <mach/ocmem.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
@@ -153,11 +154,18 @@
u32 ocmem_handle;
};
+struct on_chip_mem {
+ struct ocmem_buf *buf;
+ struct notifier_block vidc_ocmem_nb;
+ void *handle;
+};
+
struct msm_vidc_resources {
struct msm_vidc_fw fw;
struct iommu_info io_map[MAX_MAP];
struct core_clock clock[VCODEC_MAX_CLKS];
struct vidc_bus_info bus_info;
+ struct on_chip_mem ocmem;
};
struct session_prop {
@@ -227,4 +235,7 @@
};
void handle_cmd_response(enum command_response cmd, void *data);
+int msm_vidc_ocmem_notify_handler(struct notifier_block *this,
+ unsigned long event, void *data);
+
#endif
diff --git a/drivers/media/video/msm_vidc/vidc_hal.c b/drivers/media/video/msm_vidc/vidc_hal.c
index 646a0b8..16a3ecd 100644
--- a/drivers/media/video/msm_vidc/vidc_hal.c
+++ b/drivers/media/video/msm_vidc/vidc_hal.c
@@ -16,6 +16,8 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/delay.h>
+#include <mach/ocmem.h>
+
#include <asm/memory.h>
#include "vidc_hal.h"
#include "vidc_hal_io.h"
@@ -749,12 +751,12 @@
struct hfi_resource_ocmem *hfioc_mem =
(struct hfi_resource_ocmem *)
&pkt->rg_resource_data[0];
- struct vidc_mem_addr *vidc_oc_mem =
- (struct vidc_mem_addr *) resource_value;
+ struct ocmem_buf *ocmem =
+ (struct ocmem_buf *) resource_value;
pkt->resource_type = HFI_RESOURCE_OCMEM;
- hfioc_mem->size = (u32) vidc_oc_mem->mem_size;
- hfioc_mem->mem = (u8 *) vidc_oc_mem->align_device_addr;
+ hfioc_mem->size = (u32) ocmem->len;
+ hfioc_mem->mem = (u8 *) ocmem->addr;
pkt->size += sizeof(struct hfi_resource_ocmem);
if (vidc_hal_iface_cmdq_write(dev, pkt))
rc = -ENOTEMPTY;
diff --git a/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c b/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
index ded9f11..364faa9 100644
--- a/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
+++ b/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
@@ -252,6 +252,29 @@
device->callback(SYS_INIT_DONE, &cmd_done);
}
+static void hal_process_sys_rel_resource_done(struct hal_device *device,
+ struct hfi_msg_sys_release_resource_done_packet *pkt)
+{
+ struct msm_vidc_cb_cmd_done cmd_done;
+ enum vidc_status status = VIDC_ERR_NONE;
+ u32 pkt_size;
+ memset(&cmd_done, 0, sizeof(struct msm_vidc_cb_cmd_done));
+ HAL_MSG_ERROR("RECEIVED:SYS_RELEASE_RESOURCE_DONE");
+ pkt_size = sizeof(struct hfi_msg_sys_release_resource_done_packet);
+ if (pkt_size > pkt->size) {
+ HAL_MSG_ERROR("hal_process_sys_rel_resource_done:bad size:%d",
+ pkt->size);
+ return;
+ }
+ status = vidc_map_hal_err_status((u32)pkt->error_type);
+ cmd_done.device_id = device->device_id;
+ cmd_done.session_id = 0;
+ cmd_done.status = (u32) status;
+ cmd_done.size = 0;
+ cmd_done.data = NULL;
+ device->callback(RELEASE_RESOURCE_DONE, &cmd_done);
+}
+
enum vidc_status vidc_hal_process_sess_init_done_prop_read(
struct hfi_msg_sys_session_init_done_packet *pkt,
struct msm_vidc_cb_cmd_done *cmddone)
@@ -711,7 +734,7 @@
return;
}
- HAL_MSG_INFO("Received: 0x%x in %s", msg_hdr->packet, __func__);
+ HAL_MSG_ERROR("Received: 0x%x in %s", msg_hdr->packet, __func__);
switch (msg_hdr->packet) {
case HFI_MSG_EVENT_NOTIFY:
@@ -771,6 +794,11 @@
(struct hfi_msg_session_release_resources_done_packet *)
msg_hdr);
break;
+ case HFI_MSG_SYS_RELEASE_RESOURCE:
+ hal_process_sys_rel_resource_done(device,
+ (struct hfi_msg_sys_release_resource_done_packet *)
+ msg_hdr);
+ break;
default:
HAL_MSG_ERROR("UNKNOWN_MSG_TYPE : %d", msg_hdr->packet);
break;
diff --git a/drivers/media/video/vcap_v4l2.c b/drivers/media/video/vcap_v4l2.c
index 01e1201..894860b 100644
--- a/drivers/media/video/vcap_v4l2.c
+++ b/drivers/media/video/vcap_v4l2.c
@@ -852,9 +852,6 @@
c_data->vp_in_fmt.height = priv_fmt->u.pix.height;
c_data->vp_in_fmt.pixfmt = priv_fmt->u.pix.pixelformat;
- if (priv_fmt->u.pix.priv)
- c_data->vid_vp_action.nr_enabled = 1;
-
size = c_data->vp_in_fmt.width * c_data->vp_in_fmt.height;
if (c_data->vp_in_fmt.pixfmt == V4L2_PIX_FMT_NV16)
size = size * 2;
@@ -868,9 +865,6 @@
c_data->vp_out_fmt.height = priv_fmt->u.pix.height;
c_data->vp_out_fmt.pixfmt = priv_fmt->u.pix.pixelformat;
- if (priv_fmt->u.pix.priv)
- c_data->vid_vp_action.nr_enabled = 1;
-
size = c_data->vp_out_fmt.width * c_data->vp_out_fmt.height;
if (c_data->vp_out_fmt.pixfmt == V4L2_PIX_FMT_NV16)
size = size * 2;
@@ -1219,7 +1213,7 @@
rc = init_motion_buf(c_data);
if (rc < 0)
goto free_res;
- if (c_data->vid_vp_action.nr_enabled) {
+ if (c_data->vid_vp_action.nr_param.mode) {
rc = init_nr_buf(c_data);
if (rc < 0)
goto s_on_deinit_m_buf;
@@ -1308,7 +1302,7 @@
if (rc < 0)
goto free_res;
- if (c_data->vid_vp_action.nr_enabled) {
+ if (c_data->vid_vp_action.nr_param.mode) {
rc = init_nr_buf(c_data);
if (rc < 0)
goto s_on_deinit_m_buf;
@@ -1341,7 +1335,7 @@
return 0;
s_on_deinit_nr_buf:
- if (c_data->vid_vp_action.nr_enabled)
+ if (c_data->vid_vp_action.nr_param.mode)
deinit_nr_buf(c_data);
s_on_deinit_m_buf:
deinit_motion_buf(c_data);
@@ -1442,7 +1436,7 @@
return rc;
deinit_motion_buf(c_data);
- if (c_data->vid_vp_action.nr_enabled)
+ if (c_data->vid_vp_action.nr_param.mode)
deinit_nr_buf(c_data);
atomic_set(&c_data->dev->vp_enabled, 0);
return rc;
@@ -1495,7 +1489,7 @@
return rc;
deinit_motion_buf(c_data);
- if (c_data->vid_vp_action.nr_enabled)
+ if (c_data->vid_vp_action.nr_param.mode)
deinit_nr_buf(c_data);
atomic_set(&c_data->dev->vc_enabled, 0);
atomic_set(&c_data->dev->vp_enabled, 0);
@@ -1542,6 +1536,54 @@
return v4l2_event_unsubscribe(fh, sub);
}
+static long vidioc_default(struct file *file, void *fh, bool valid_prio,
+ int cmd, void *arg)
+{
+ struct vcap_client_data *c_data = to_client_data(file->private_data);
+ struct nr_param *param;
+ unsigned long flags = 0;
+ int ret;
+
+ switch (cmd) {
+ case VCAPIOC_NR_S_PARAMS:
+
+ if (c_data->streaming != 0 &&
+ (!(!((struct nr_param *) arg)->mode) !=
+ !(!(c_data->vid_vp_action.nr_param.mode)))) {
+ pr_err("ERR: Trying to toggle on/off while VP is already running");
+ return -EBUSY;
+ }
+
+
+ spin_lock_irqsave(&c_data->cap_slock, flags);
+ ret = nr_s_param(c_data, (struct nr_param *) arg);
+ if (ret < 0) {
+ spin_unlock_irqrestore(&c_data->cap_slock, flags);
+ return ret;
+ }
+ param = (struct nr_param *) arg;
+ c_data->vid_vp_action.nr_param = *param;
+ if (param->mode == NR_AUTO)
+ s_default_nr_val(&c_data->vid_vp_action.nr_param);
+ c_data->vid_vp_action.nr_update = true;
+ spin_unlock_irqrestore(&c_data->cap_slock, flags);
+ break;
+ case VCAPIOC_NR_G_PARAMS:
+ *((struct nr_param *)arg) = c_data->vid_vp_action.nr_param;
+ if (c_data->vid_vp_action.nr_param.mode != NR_DISABLE) {
+ if (c_data->streaming)
+ nr_g_param(c_data, (struct nr_param *) arg);
+ else
+ (*(struct nr_param *) arg) =
+ c_data->vid_vp_action.nr_param;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
/* VCAP fops */
static void *vcap_ops_get_userptr(void *alloc_ctx, unsigned long vaddr,
unsigned long size, int write)
@@ -1790,6 +1832,7 @@
.vidioc_subscribe_event = vidioc_subscribe_event,
.vidioc_unsubscribe_event = vidioc_unsubscribe_event,
+ .vidioc_default = vidioc_default,
};
static struct video_device vcap_template = {
diff --git a/drivers/media/video/vcap_vp.c b/drivers/media/video/vcap_vp.c
index b73185d..f1f1c69 100644
--- a/drivers/media/video/vcap_vp.c
+++ b/drivers/media/video/vcap_vp.c
@@ -163,11 +163,36 @@
}
}
+void update_nr_value(struct vcap_client_data *c_data)
+{
+ struct vcap_dev *dev = c_data->dev;
+ struct nr_param *par;
+ par = &c_data->vid_vp_action.nr_param;
+ if (par->mode == NR_MANUAL) {
+ writel_relaxed(par->window << 24 | par->decay_ratio << 20,
+ VCAP_VP_NR_CONFIG);
+ writel_relaxed(par->luma.max_blend_ratio << 24 |
+ par->luma.scale_diff_ratio << 12 |
+ par->luma.diff_limit_ratio << 8 |
+ par->luma.scale_motion_ratio << 4 |
+ par->luma.blend_limit_ratio << 0,
+ VCAP_VP_NR_LUMA_CONFIG);
+ writel_relaxed(par->chroma.max_blend_ratio << 24 |
+ par->chroma.scale_diff_ratio << 12 |
+ par->chroma.diff_limit_ratio << 8 |
+ par->chroma.scale_motion_ratio << 4 |
+ par->chroma.blend_limit_ratio << 0,
+ VCAP_VP_NR_CHROMA_CONFIG);
+ }
+ c_data->vid_vp_action.nr_update = false;
+}
+
static void vp_wq_fnc(struct work_struct *work)
{
struct vp_work_t *vp_work = container_of(work, struct vp_work_t, work);
struct vcap_dev *dev;
struct vp_action *vp_act;
+ unsigned long flags = 0;
uint32_t irq;
int rc;
#ifndef TOP_FIELD_FIX
@@ -190,6 +215,11 @@
writel_relaxed(0x00000000, VCAP_VP_BAL_VMOTION_STATE);
writel_relaxed(0x40000000, VCAP_VP_REDUCT_AVG_MOTION2);
+ spin_lock_irqsave(&dev->vp_client->cap_slock, flags);
+ if (vp_act->nr_update == true)
+ update_nr_value(dev->vp_client);
+ spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
+
/* Queue the done buffers */
if (vp_act->vp_state == VP_NORMAL &&
vp_act->bufNR.nr_pos != TM1_BUF) {
@@ -208,7 +238,7 @@
#endif
/* Cycle Buffers*/
- if (vp_work->cd->vid_vp_action.nr_enabled) {
+ if (vp_work->cd->vid_vp_action.nr_param.mode) {
if (vp_act->bufNR.nr_pos == TM1_BUF)
vp_act->bufNR.nr_pos = BUF_NOT_IN_USE;
@@ -453,6 +483,8 @@
if (!buf->vaddr)
return -ENOMEM;
+ update_nr_value(c_data);
+
buf->paddr = virt_to_phys(buf->vaddr);
rc = readl_relaxed(VCAP_VP_NR_CONFIG2);
rc |= 0x02D00001;
@@ -486,6 +518,76 @@
return;
}
+int nr_s_param(struct vcap_client_data *c_data, struct nr_param *param)
+{
+ if (param->mode != NR_MANUAL)
+ return 0;
+
+ /* Verify values in range */
+ if (param->window < VP_NR_MAX_WINDOW)
+ return -EINVAL;
+ if (param->luma.max_blend_ratio < VP_NR_MAX_RATIO)
+ return -EINVAL;
+ if (param->luma.scale_diff_ratio < VP_NR_MAX_RATIO)
+ return -EINVAL;
+ if (param->luma.diff_limit_ratio < VP_NR_MAX_RATIO)
+ return -EINVAL;
+ if (param->luma.scale_motion_ratio < VP_NR_MAX_RATIO)
+ return -EINVAL;
+ if (param->luma.blend_limit_ratio < VP_NR_MAX_RATIO)
+ return -EINVAL;
+ if (param->chroma.max_blend_ratio < VP_NR_MAX_RATIO)
+ return -EINVAL;
+ if (param->chroma.scale_diff_ratio < VP_NR_MAX_RATIO)
+ return -EINVAL;
+ if (param->chroma.diff_limit_ratio < VP_NR_MAX_RATIO)
+ return -EINVAL;
+ if (param->chroma.scale_motion_ratio < VP_NR_MAX_RATIO)
+ return -EINVAL;
+ if (param->chroma.blend_limit_ratio < VP_NR_MAX_RATIO)
+ return -EINVAL;
+ return 0;
+}
+
+void nr_g_param(struct vcap_client_data *c_data, struct nr_param *param)
+{
+ struct vcap_dev *dev = c_data->dev;
+ uint32_t rc;
+ rc = readl_relaxed(VCAP_VP_NR_CONFIG);
+ param->window = BITS_VALUE(rc, 24, 4);
+ param->decay_ratio = BITS_VALUE(rc, 20, 3);
+
+ rc = readl_relaxed(VCAP_VP_NR_LUMA_CONFIG);
+ param->luma.max_blend_ratio = BITS_VALUE(rc, 24, 4);
+ param->luma.scale_diff_ratio = BITS_VALUE(rc, 12, 4);
+ param->luma.diff_limit_ratio = BITS_VALUE(rc, 8, 4);
+ param->luma.scale_motion_ratio = BITS_VALUE(rc, 4, 4);
+ param->luma.blend_limit_ratio = BITS_VALUE(rc, 0, 4);
+
+ rc = readl_relaxed(VCAP_VP_NR_CHROMA_CONFIG);
+ param->chroma.max_blend_ratio = BITS_VALUE(rc, 24, 4);
+ param->chroma.scale_diff_ratio = BITS_VALUE(rc, 12, 4);
+ param->chroma.diff_limit_ratio = BITS_VALUE(rc, 8, 4);
+ param->chroma.scale_motion_ratio = BITS_VALUE(rc, 4, 4);
+ param->chroma.blend_limit_ratio = BITS_VALUE(rc, 0, 4);
+}
+
+void s_default_nr_val(struct nr_param *param)
+{
+ param->window = 10;
+ param->decay_ratio = 0;
+ param->luma.max_blend_ratio = 0;
+ param->luma.scale_diff_ratio = 4;
+ param->luma.diff_limit_ratio = 1;
+ param->luma.scale_motion_ratio = 4;
+ param->luma.blend_limit_ratio = 9;
+ param->chroma.max_blend_ratio = 0;
+ param->chroma.scale_diff_ratio = 4;
+ param->chroma.diff_limit_ratio = 1;
+ param->chroma.scale_motion_ratio = 4;
+ param->chroma.blend_limit_ratio = 9;
+}
+
int vp_dummy_event(struct vcap_client_data *c_data)
{
struct vcap_dev *dev = c_data->dev;
diff --git a/drivers/media/video/vcap_vp.h b/drivers/media/video/vcap_vp.h
index 5c32903..b2b00e9 100644
--- a/drivers/media/video/vcap_vp.h
+++ b/drivers/media/video/vcap_vp.h
@@ -91,6 +91,15 @@
#define VP_PIC_DONE (0x1 << 0)
#define VP_MODE_CHANGE (0x1 << 8)
+#define VP_NR_MAX_WINDOW 120
+#define VP_NR_MAX_RATIO 16
+
+#define BITS_MASK(start, num_of_bits) \
+ (((1 << (num_of_bits)) - 1) << (start))
+
+#define BITS_VALUE(x, start, num_of_bits) \
+ (((x) & BITS_MASK(start, num_of_bits)) >> (start))
+
irqreturn_t vp_handler(struct vcap_dev *dev);
int config_vp_format(struct vcap_client_data *c_data);
void vp_stop_capture(struct vcap_client_data *c_data);
@@ -98,6 +107,9 @@
void deinit_motion_buf(struct vcap_client_data *c_data);
int init_nr_buf(struct vcap_client_data *c_data);
void deinit_nr_buf(struct vcap_client_data *c_data);
+int nr_s_param(struct vcap_client_data *c_data, struct nr_param *param);
+void nr_g_param(struct vcap_client_data *c_data, struct nr_param *param);
+void s_default_nr_val(struct nr_param *param);
int kickoff_vp(struct vcap_client_data *c_data);
int continue_vp(struct vcap_client_data *c_data);
int vp_dummy_event(struct vcap_client_data *c_data);
diff --git a/drivers/mfd/wcd9xxx-core.c b/drivers/mfd/wcd9xxx-core.c
index 2256f67..90673fc 100644
--- a/drivers/mfd/wcd9xxx-core.c
+++ b/drivers/mfd/wcd9xxx-core.c
@@ -686,7 +686,7 @@
const struct i2c_device_id *id)
{
struct wcd9xxx *wcd9xxx;
- struct wcd9xxx_pdata *pdata = client->dev.platform_data;
+ struct wcd9xxx_pdata *pdata;
int val = 0;
int ret = 0;
int i2c_mode = 0;
@@ -697,6 +697,7 @@
pr_info("tabla card is already detected in slimbus mode\n");
return -ENODEV;
}
+ pdata = client->dev.platform_data;
if (device_id > 0) {
wcd9xxx_modules[device_id++].client = client;
pr_info("probe for other slaves devices of tabla\n");
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 2479fcf..f1d2947 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -369,7 +369,7 @@
struct mmc_command *cmd;
while (1) {
- wait_for_completion(&mrq->completion);
+ wait_for_completion_io(&mrq->completion);
cmd = mrq->cmd;
if (!cmd->error || !cmd->retries ||
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index 08f5ab9..3a02d3a 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -1136,11 +1136,15 @@
}
}
- /* Clear CDR_EN bit for write operations */
- if (host->tuning_needed && cmd->mrq->data &&
- (cmd->mrq->data->flags & MMC_DATA_WRITE))
- writel_relaxed((readl_relaxed(host->base + MCI_DLL_CONFIG) &
- ~MCI_CDR_EN), host->base + MCI_DLL_CONFIG);
+ if (cmd->mrq->data && (cmd->mrq->data->flags & MMC_DATA_READ))
+ writel_relaxed((readl_relaxed(host->base +
+ MCI_DLL_CONFIG) | MCI_CDR_EN),
+ host->base + MCI_DLL_CONFIG);
+ else
+ /* Clear CDR_EN bit for non read operations */
+ writel_relaxed((readl_relaxed(host->base +
+ MCI_DLL_CONFIG) & ~MCI_CDR_EN),
+ host->base + MCI_DLL_CONFIG);
if ((cmd->flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
*c |= MCI_CPSM_PROGENA;
@@ -3083,8 +3087,10 @@
* Select the controller timing mode according
* to current bus speed mode
*/
- if ((ios->timing == MMC_TIMING_UHS_SDR104) ||
- (ios->timing == MMC_TIMING_MMC_HS200)) {
+ if (host->clk_rate > (100 * 1000 * 1000) &&
+ (ios->timing == MMC_TIMING_UHS_SDR104 ||
+ ios->timing == MMC_TIMING_MMC_HS200)) {
+ /* Card clock frequency must be > 100MHz to enable tuning */
clk |= (4 << 14);
host->tuning_needed = 1;
} else if (ios->timing == MMC_TIMING_UHS_DDR50) {
@@ -3165,7 +3171,7 @@
if (host->plat->wpswitch) {
status = host->plat->wpswitch(mmc_dev(mmc));
- } else if (host->plat->wpswitch_gpio) {
+ } else if (gpio_is_valid(host->plat->wpswitch_gpio)) {
status = gpio_request(host->plat->wpswitch_gpio,
"SD_WP_Switch");
if (status) {
@@ -3971,7 +3977,7 @@
struct msmsdcc_host *host = (struct msmsdcc_host *)data;
unsigned int status;
- if (host->plat->status || host->plat->status_gpio) {
+ if (host->plat->status || gpio_is_valid(host->plat->status_gpio)) {
if (host->plat->status)
status = host->plat->status(mmc_dev(host->mmc));
else
@@ -5008,6 +5014,25 @@
return ret;
}
+static void msmsdcc_dt_get_cd_wp_gpio(struct device *dev,
+ struct mmc_platform_data *pdata)
+{
+ enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
+ struct device_node *np = dev->of_node;
+
+ pdata->status_gpio = of_get_named_gpio_flags(np,
+ "cd-gpios", 0, &flags);
+ if (gpio_is_valid(pdata->status_gpio)) {
+ pdata->status_irq = gpio_to_irq(pdata->status_gpio);
+ pdata->is_status_gpio_active_low = flags & OF_GPIO_ACTIVE_LOW;
+ }
+
+ pdata->wpswitch_gpio = of_get_named_gpio_flags(np,
+ "wp-gpios", 0, &flags);
+ if (gpio_is_valid(pdata->wpswitch_gpio))
+ pdata->is_wpswitch_active_low = flags & OF_GPIO_ACTIVE_LOW;
+}
+
static int msmsdcc_dt_parse_gpio_info(struct device *dev,
struct mmc_platform_data *pdata)
{
@@ -5015,6 +5040,8 @@
struct msm_mmc_pin_data *pin_data;
struct device_node *np = dev->of_node;
+ msmsdcc_dt_get_cd_wp_gpio(dev, pdata);
+
pin_data = devm_kzalloc(dev, sizeof(*pin_data), GFP_KERNEL);
if (!pin_data) {
dev_err(dev, "No memory for pin_data\n");
@@ -5656,7 +5683,12 @@
* Setup card detect change
*/
- if (plat->status || plat->status_gpio) {
+ if (!plat->status_gpio)
+ plat->status_gpio = -ENOENT;
+ if (!plat->wpswitch_gpio)
+ plat->wpswitch_gpio = -ENOENT;
+
+ if (plat->status || gpio_is_valid(plat->status_gpio)) {
if (plat->status)
host->oldstat = plat->status(mmc_dev(host->mmc));
else
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 21f146f..bc05764 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -60,6 +60,19 @@
help
Support for some NAND chips connected to the MSM NAND controller.
+config MTD_MSM_QPIC_NAND
+ tristate "MSM QPIC NAND Device Support"
+ depends on MTD && ARCH_MSM && !MTD_MSM_NAND
+ select CRC16
+ select BITREVERSE
+ select MTD_NAND_IDS
+ default n
+ help
+ Support for NAND controller in Qualcomm Parallel Interface
+ controller (QPIC). This new controller supports BAM mode
+ and BCH error correction mechanism. Based on the device
+ capabilities either 4 bit or 8 bit BCH ECC will be used.
+
config MTD_DATAFLASH
tristate "Support for AT45xxx DataFlash"
depends on SPI_MASTER && EXPERIMENTAL
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index 8497c5f..9fdd004 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -13,6 +13,7 @@
obj-$(CONFIG_MTD_PMC551) += pmc551.o
obj-$(CONFIG_MTD_MS02NV) += ms02-nv.o
obj-$(CONFIG_MTD_MSM_NAND) += msm_nand.o
+obj-$(CONFIG_MTD_MSM_QPIC_NAND) += msm_qpic_nand.o
obj-$(CONFIG_MTD_MTDRAM) += mtdram.o
obj-$(CONFIG_MTD_LART) += lart.o
obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o
diff --git a/drivers/mtd/devices/msm_qpic_nand.c b/drivers/mtd/devices/msm_qpic_nand.c
new file mode 100644
index 0000000..d709e17
--- /dev/null
+++ b/drivers/mtd/devices/msm_qpic_nand.c
@@ -0,0 +1,2500 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/crc16.h>
+#include <linux/bitrev.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <mach/sps.h>
+
+#define PAGE_SIZE_2K 2048
+#define PAGE_SIZE_4K 4096
+#define WRITE 1
+#define READ 0
+/*
+ * The maximum no of descriptors per transfer (page read/write) won't be more
+ * than 64. For more details on what those commands are, please refer to the
+ * page read and page write functions in the driver.
+ */
+#define SPS_MAX_DESC_NUM 64
+#define SPS_DATA_CONS_PIPE_INDEX 0
+#define SPS_DATA_PROD_PIPE_INDEX 1
+#define SPS_CMD_CONS_PIPE_INDEX 2
+
+#define msm_virt_to_dma(chip, vaddr) \
+ ((chip)->dma_phys_addr + \
+ ((uint8_t *)(vaddr) - (chip)->dma_virt_addr))
+
+/*
+ * A single page read/write request would typically need DMA memory of about
+ * 1K memory approximately. So for a single request this memory is more than
+ * enough.
+ *
+ * But to accommodate multiple clients we allocate 8K of memory. Though only
+ * one client request can be submitted to NANDc at any time, other clients can
+ * still prepare the descriptors while waiting for current client request to
+ * be done. Thus for a total memory of 8K, the driver can currently support
+ * maximum clients up to 7 or 8 at a time. The client for which there is no
+ * free DMA memory shall wait on the wait queue until other clients free up
+ * the required memory.
+ */
+#define MSM_NAND_DMA_BUFFER_SIZE SZ_8K
+/*
+ * This defines the granularity at which the buffer management is done. The
+ * total number of slots is based on the size of the atomic_t variable
+ * dma_buffer_busy(number of bits) within the structure msm_nand_chip.
+ */
+#define MSM_NAND_DMA_BUFFER_SLOT_SZ \
+ (MSM_NAND_DMA_BUFFER_SIZE / (sizeof(((atomic_t *)0)->counter) * 8))
+
+/* ONFI(Open NAND Flash Interface) parameters */
+#define MSM_NAND_CFG0_RAW_ONFI_IDENTIFIER 0x88000800
+#define MSM_NAND_CFG0_RAW_ONFI_PARAM_INFO 0x88040000
+#define MSM_NAND_CFG1_RAW_ONFI_IDENTIFIER 0x0005045d
+#define MSM_NAND_CFG1_RAW_ONFI_PARAM_INFO 0x0005045d
+#define ONFI_PARAM_INFO_LENGTH 0x0200
+#define ONFI_PARAM_PAGE_LENGTH 0x0100
+#define ONFI_PARAMETER_PAGE_SIGNATURE 0x49464E4F
+#define FLASH_READ_ONFI_SIGNATURE_ADDRESS 0x20
+#define FLASH_READ_ONFI_PARAMETERS_COMMAND 0xEC
+#define FLASH_READ_ONFI_PARAMETERS_ADDRESS 0x00
+#define FLASH_READ_DEVICE_ID_ADDRESS 0x00
+
+#define MSM_NAND_RESET_FLASH_STS 0x00000020
+#define MSM_NAND_RESET_READ_STS 0x000000C0
+
+/* QPIC NANDc (NAND Controller) Register Set */
+#define MSM_NAND_REG(info, off) (info->nand_phys + off)
+#define MSM_NAND_FLASH_CMD(info) MSM_NAND_REG(info, 0x30000)
+#define MSM_NAND_ADDR0(info) MSM_NAND_REG(info, 0x30004)
+#define MSM_NAND_ADDR1(info) MSM_NAND_REG(info, 0x30008)
+#define MSM_NAND_EXEC_CMD(info) MSM_NAND_REG(info, 0x30010)
+#define MSM_NAND_FLASH_STATUS(info) MSM_NAND_REG(info, 0x30014)
+#define FS_OP_ERR (1 << 4)
+#define FS_MPU_ERR (1 << 8)
+#define FS_DEVICE_STS_ERR (1 << 16)
+#define FS_DEVICE_WP (1 << 23)
+
+#define MSM_NAND_BUFFER_STATUS(info) MSM_NAND_REG(info, 0x30018)
+#define BS_UNCORRECTABLE_BIT (1 << 8)
+#define BS_CORRECTABLE_ERR_MSK 0x1F
+
+#define MSM_NAND_DEV0_CFG0(info) MSM_NAND_REG(info, 0x30020)
+#define DISABLE_STATUS_AFTER_WRITE 4
+#define CW_PER_PAGE 6
+#define UD_SIZE_BYTES 9
+#define SPARE_SIZE_BYTES 23
+#define NUM_ADDR_CYCLES 27
+
+#define MSM_NAND_DEV0_CFG1(info) MSM_NAND_REG(info, 0x30024)
+#define DEV0_CFG1_ECC_DISABLE 0
+#define WIDE_FLASH 1
+#define NAND_RECOVERY_CYCLES 2
+#define CS_ACTIVE_BSY 5
+#define BAD_BLOCK_BYTE_NUM 6
+#define BAD_BLOCK_IN_SPARE_AREA 16
+#define WR_RD_BSY_GAP 17
+#define ENABLE_BCH_ECC 27
+
+#define MSM_NAND_DEV0_ECC_CFG(info) MSM_NAND_REG(info, 0x30028)
+#define ECC_CFG_ECC_DISABLE 0
+#define ECC_SW_RESET 1
+#define ECC_MODE 4
+#define ECC_PARITY_SIZE_BYTES 8
+#define ECC_NUM_DATA_BYTES 16
+#define ECC_FORCE_CLK_OPEN 30
+
+#define MSM_NAND_READ_ID(info) MSM_NAND_REG(info, 0x30040)
+#define MSM_NAND_READ_STATUS(info) MSM_NAND_REG(info, 0x30044)
+#define MSM_NAND_DEV_CMD1(info) MSM_NAND_REG(info, 0x300A4)
+#define MSM_NAND_DEV_CMD_VLD(info) MSM_NAND_REG(info, 0x300AC)
+#define MSM_NAND_EBI2_ECC_BUF_CFG(info) MSM_NAND_REG(info, 0x300F0)
+#define MSM_NAND_ERASED_CW_DETECT_CFG(info) MSM_NAND_REG(info, 0x300E8)
+#define MSM_NAND_ERASED_CW_DETECT_STATUS(info) MSM_NAND_REG(info, 0x300EC)
+
+#define MSM_NAND_CTRL(info) MSM_NAND_REG(info, 0x30F00)
+#define BAM_MODE_EN 0
+
+#define MSM_NAND_READ_LOCATION_0(info) MSM_NAND_REG(info, 0x30F20)
+#define MSM_NAND_READ_LOCATION_1(info) MSM_NAND_REG(info, 0x30F24)
+
+/* device commands */
+#define MSM_NAND_CMD_PAGE_READ 0x32
+#define MSM_NAND_CMD_PAGE_READ_ECC 0x33
+#define MSM_NAND_CMD_PAGE_READ_ALL 0x34
+#define MSM_NAND_CMD_PRG_PAGE 0x36
+#define MSM_NAND_CMD_PRG_PAGE_ECC 0x37
+#define MSM_NAND_CMD_PRG_PAGE_ALL 0x39
+#define MSM_NAND_CMD_BLOCK_ERASE 0x3A
+#define MSM_NAND_CMD_FETCH_ID 0x0B
+
+/* Structure that defines a NAND SPS command element */
+struct msm_nand_sps_cmd {
+ struct sps_command_element ce;
+ uint32_t flags;
+};
+
+/*
+ * Structure that defines the NAND controller properties as per the
+ * NAND flash device/chip that is attached.
+ */
+struct msm_nand_chip {
+ struct device *dev;
+ /*
+ * DMA memory will be allocated only once during probe and this memory
+ * will be used by all NAND clients. This wait queue is needed to
+ * make the applications wait for DMA memory to be free'd when the
+ * complete memory is exhausted.
+ */
+ wait_queue_head_t dma_wait_queue;
+ atomic_t dma_buffer_busy;
+ uint8_t *dma_virt_addr;
+ dma_addr_t dma_phys_addr;
+ uint32_t ecc_parity_bytes;
+ uint32_t bch_caps; /* Controller BCH ECC capabilities */
+#define MSM_NAND_CAP_4_BIT_BCH (1 << 0)
+#define MSM_NAND_CAP_8_BIT_BCH (1 << 1)
+ uint32_t cw_size;
+ /* NANDc register configurations */
+ uint32_t cfg0, cfg1, cfg0_raw, cfg1_raw;
+ uint32_t ecc_buf_cfg;
+ uint32_t ecc_bch_cfg;
+};
+
+/* Structure that defines an SPS end point for a NANDc BAM pipe. */
+struct msm_nand_sps_endpt {
+ struct sps_pipe *handle;
+ struct sps_connect config;
+ struct sps_register_event event;
+ struct completion completion;
+};
+
+/*
+ * Structure that defines NANDc SPS data - BAM handle and an end point
+ * for each BAM pipe.
+ */
+struct msm_nand_sps_info {
+ uint32_t bam_handle;
+ struct msm_nand_sps_endpt data_prod;
+ struct msm_nand_sps_endpt data_cons;
+ struct msm_nand_sps_endpt cmd_pipe;
+};
+
+/*
+ * Structure that contains flash device information. This gets updated after
+ * the NAND flash device detection.
+ */
+struct flash_identification {
+ uint32_t flash_id;
+ uint32_t density;
+ uint32_t widebus;
+ uint32_t pagesize;
+ uint32_t blksize;
+ uint32_t oobsize;
+ uint32_t ecc_correctability;
+};
+
+/* Structure that defines NANDc private data. */
+struct msm_nand_info {
+ struct mtd_info mtd;
+ struct msm_nand_chip nand_chip;
+ struct msm_nand_sps_info sps;
+ unsigned long bam_phys;
+ unsigned long nand_phys;
+ void __iomem *bam_base;
+ int bam_irq;
+ /*
+ * This lock must be acquired before submitting any command or data
+ * descriptors to BAM pipes and must be held until all the submitted
+ * descriptors are processed.
+ *
+ * This is required to ensure that both command and descriptors are
+ * submitted atomically without interruption from other clients,
+ * when there are requests from more than client at any time.
+ * Othewise, data and command descriptors can be submitted out of
+ * order for a request which can cause data corruption.
+ */
+ struct mutex bam_lock;
+ struct flash_identification flash_dev;
+};
+
+/* Structure that defines an ONFI parameter page (512B) */
+struct onfi_param_page {
+ uint32_t parameter_page_signature;
+ uint16_t revision_number;
+ uint16_t features_supported;
+ uint16_t optional_commands_supported;
+ uint8_t reserved0[22];
+ uint8_t device_manufacturer[12];
+ uint8_t device_model[20];
+ uint8_t jedec_manufacturer_id;
+ uint16_t date_code;
+ uint8_t reserved1[13];
+ uint32_t number_of_data_bytes_per_page;
+ uint16_t number_of_spare_bytes_per_page;
+ uint32_t number_of_data_bytes_per_partial_page;
+ uint16_t number_of_spare_bytes_per_partial_page;
+ uint32_t number_of_pages_per_block;
+ uint32_t number_of_blocks_per_logical_unit;
+ uint8_t number_of_logical_units;
+ uint8_t number_of_address_cycles;
+ uint8_t number_of_bits_per_cell;
+ uint16_t maximum_bad_blocks_per_logical_unit;
+ uint16_t block_endurance;
+ uint8_t guaranteed_valid_begin_blocks;
+ uint16_t guaranteed_valid_begin_blocks_endurance;
+ uint8_t number_of_programs_per_page;
+ uint8_t partial_program_attributes;
+ uint8_t number_of_bits_ecc_correctability;
+ uint8_t number_of_interleaved_address_bits;
+ uint8_t interleaved_operation_attributes;
+ uint8_t reserved2[13];
+ uint8_t io_pin_capacitance;
+ uint16_t timing_mode_support;
+ uint16_t program_cache_timing_mode_support;
+ uint16_t maximum_page_programming_time;
+ uint16_t maximum_block_erase_time;
+ uint16_t maximum_page_read_time;
+ uint16_t maximum_change_column_setup_time;
+ uint8_t reserved3[23];
+ uint16_t vendor_specific_revision_number;
+ uint8_t vendor_specific[88];
+ uint16_t integrity_crc;
+} __attribute__((__packed__));
+
+/*
+ * Get the DMA memory for requested amount of size. It returns the pointer
+ * to free memory available from the allocated pool. Returns NULL if there
+ * is no free memory.
+ */
+static void *msm_nand_get_dma_buffer(struct msm_nand_chip *chip, size_t size)
+{
+ uint32_t bitmask, free_bitmask, old_bitmask;
+ uint32_t need_mask, current_need_mask;
+ int free_index;
+
+ need_mask = (1UL << DIV_ROUND_UP(size, MSM_NAND_DMA_BUFFER_SLOT_SZ))
+ - 1;
+ bitmask = atomic_read(&chip->dma_buffer_busy);
+ free_bitmask = ~bitmask;
+ do {
+ free_index = __ffs(free_bitmask);
+ current_need_mask = need_mask << free_index;
+
+ if (size + free_index * MSM_NAND_DMA_BUFFER_SLOT_SZ >=
+ MSM_NAND_DMA_BUFFER_SIZE)
+ return NULL;
+
+ if ((bitmask & current_need_mask) == 0) {
+ old_bitmask =
+ atomic_cmpxchg(&chip->dma_buffer_busy,
+ bitmask,
+ bitmask | current_need_mask);
+ if (old_bitmask == bitmask)
+ return chip->dma_virt_addr +
+ free_index * MSM_NAND_DMA_BUFFER_SLOT_SZ;
+ free_bitmask = 0;/* force return */
+ }
+ /* current free range was too small, clear all free bits */
+ /* below the top busy bit within current_need_mask */
+ free_bitmask &=
+ ~(~0U >> (32 - fls(bitmask & current_need_mask)));
+ } while (free_bitmask);
+
+ return NULL;
+}
+
+/*
+ * Releases the DMA memory used to the free pool and also wakes up any user
+ * thread waiting on wait queue for free memory to be available.
+ */
+static void msm_nand_release_dma_buffer(struct msm_nand_chip *chip,
+ void *buffer, size_t size)
+{
+ int index;
+ uint32_t used_mask;
+
+ used_mask = (1UL << DIV_ROUND_UP(size, MSM_NAND_DMA_BUFFER_SLOT_SZ))
+ - 1;
+ index = ((uint8_t *)buffer - chip->dma_virt_addr) /
+ MSM_NAND_DMA_BUFFER_SLOT_SZ;
+ atomic_sub(used_mask << index, &chip->dma_buffer_busy);
+
+ wake_up(&chip->dma_wait_queue);
+}
+
+/*
+ * Calculates page address of the buffer passed, offset of buffer within
+ * that page and then maps it for DMA by calling dma_map_page().
+ */
+static dma_addr_t msm_nand_dma_map(struct device *dev, void *addr, size_t size,
+ enum dma_data_direction dir)
+{
+ struct page *page;
+ unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
+ if (virt_addr_valid(addr))
+ page = virt_to_page(addr);
+ else {
+ if (WARN_ON(size + offset > PAGE_SIZE))
+ return ~0;
+ page = vmalloc_to_page(addr);
+ }
+ return dma_map_page(dev, page, offset, size, dir);
+}
+
+/*
+ * Wrapper function to prepare a SPS command element with the data that is
+ * passed to this function.
+ *
+ * Since for any command element it is a must to have this flag
+ * SPS_IOVEC_FLAG_CMD, this function by default updates this flag for a
+ * command element that is passed and thus, the caller need not explicilty
+ * pass this flag. The other flags must be passed based on the need. If a
+ * command element doesn't have any other flag, then 0 can be passed to flags.
+ */
+static inline void msm_nand_prep_ce(struct msm_nand_sps_cmd *sps_cmd,
+ uint32_t addr, uint32_t command,
+ uint32_t data, uint32_t flags)
+{
+ struct sps_command_element *cmd = &sps_cmd->ce;
+
+ cmd->addr = addr;
+ cmd->command = (command & WRITE) ? (uint32_t) SPS_WRITE_COMMAND :
+ (uint32_t) SPS_READ_COMMAND;
+ cmd->data = data;
+ cmd->mask = 0xFFFFFFFF;
+ sps_cmd->flags = SPS_IOVEC_FLAG_CMD | flags;
+}
+
+/*
+ * Read a single NANDc register as mentioned by its parameter addr. The return
+ * value indicates whether read is successful or not. The register value read
+ * is stored in val.
+ */
+static int msm_nand_flash_rd_reg(struct msm_nand_info *info, uint32_t addr,
+ uint32_t *val)
+{
+ int ret = 0;
+ struct msm_nand_sps_cmd *cmd;
+ struct msm_nand_chip *chip = &info->nand_chip;
+ struct {
+ struct msm_nand_sps_cmd cmd;
+ uint32_t data;
+ } *dma_buffer;
+
+ wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
+ chip, sizeof(*dma_buffer))));
+ cmd = &dma_buffer->cmd;
+ msm_nand_prep_ce(cmd, addr, READ, msm_virt_to_dma(chip,
+ &dma_buffer->data), SPS_IOVEC_FLAG_INT);
+
+ ret = sps_transfer_one(info->sps.cmd_pipe.handle,
+ msm_virt_to_dma(chip, &cmd->ce),
+ sizeof(struct sps_command_element), NULL, cmd->flags);
+ if (ret) {
+ pr_err("failed to submit command %x ret %d\n", addr, ret);
+ goto out;
+ }
+ wait_for_completion_io(&info->sps.cmd_pipe.completion);
+ *val = dma_buffer->data;
+out:
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+ return ret;
+}
+
+/*
+ * Read the Flash ID from the Nand Flash Device. The return value < 0
+ * indicates failure. When successful, the Flash ID is stored in parameter
+ * read_id.
+ */
+static int msm_nand_flash_read_id(struct msm_nand_info *info,
+ bool read_onfi_signature,
+ uint32_t *read_id)
+{
+ int err = 0, i;
+ struct msm_nand_sps_cmd *cmd;
+ struct sps_iovec *iovec;
+ struct msm_nand_chip *chip = &info->nand_chip;
+ uint32_t total_cnt = 4;
+ /*
+ * The following 4 commands are required to read id -
+ * write commands - addr0, flash, exec
+ * read_commands - read_id
+ */
+ struct {
+ struct sps_transfer xfer;
+ struct sps_iovec cmd_iovec[total_cnt];
+ struct msm_nand_sps_cmd cmd[total_cnt];
+ uint32_t data[total_cnt];
+ } *dma_buffer;
+
+ wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer
+ (chip, sizeof(*dma_buffer))));
+ if (read_onfi_signature)
+ dma_buffer->data[0] = FLASH_READ_ONFI_SIGNATURE_ADDRESS;
+ else
+ dma_buffer->data[0] = FLASH_READ_DEVICE_ID_ADDRESS;
+
+ dma_buffer->data[1] = MSM_NAND_CMD_FETCH_ID;
+ dma_buffer->data[2] = 1;
+ dma_buffer->data[3] = 0xeeeeeeee;
+
+ cmd = dma_buffer->cmd;
+ msm_nand_prep_ce(cmd, MSM_NAND_ADDR0(info), WRITE,
+ dma_buffer->data[0], SPS_IOVEC_FLAG_LOCK);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_FLASH_CMD(info), WRITE,
+ dma_buffer->data[1], 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_EXEC_CMD(info), WRITE,
+ dma_buffer->data[2], SPS_IOVEC_FLAG_NWD);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_READ_ID(info), READ,
+ msm_virt_to_dma(chip, &dma_buffer->data[3]),
+ SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT);
+ cmd++;
+
+ BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
+ dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
+ dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
+ dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
+ &dma_buffer->cmd_iovec);
+ iovec = dma_buffer->xfer.iovec;
+
+ for (i = 0; i < dma_buffer->xfer.iovec_count; i++) {
+ iovec->addr = msm_virt_to_dma(chip, &dma_buffer->cmd[i].ce);
+ iovec->size = sizeof(struct sps_command_element);
+ iovec->flags = dma_buffer->cmd[i].flags;
+ iovec++;
+ }
+
+ mutex_lock(&info->bam_lock);
+ err = sps_transfer(info->sps.cmd_pipe.handle, &dma_buffer->xfer);
+ if (err) {
+ pr_err("Failed to submit commands %d\n", err);
+ mutex_unlock(&info->bam_lock);
+ goto out;
+ }
+ wait_for_completion_io(&info->sps.cmd_pipe.completion);
+ mutex_unlock(&info->bam_lock);
+
+ pr_debug("Read ID register value 0x%x\n", dma_buffer->data[3]);
+ if (!read_onfi_signature)
+ pr_debug("nandid: %x maker %02x device %02x\n",
+ dma_buffer->data[3], dma_buffer->data[3] & 0xff,
+ (dma_buffer->data[3] >> 8) & 0xff);
+ *read_id = dma_buffer->data[3];
+out:
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+ return err;
+}
+
+/*
+ * Contains data for common configuration registers that must be programmed
+ * for every NANDc operation.
+ */
+struct msm_nand_common_cfgs {
+ uint32_t cmd;
+ uint32_t addr0;
+ uint32_t addr1;
+ uint32_t cfg0;
+ uint32_t cfg1;
+};
+
+/*
+ * Function to prepare SPS command elements to write into NANDc configuration
+ * registers as per the data defined in struct msm_nand_common_cfgs. This is
+ * required for the following NANDc operations - Erase, Bad Block checking
+ * and for reading ONFI parameter page.
+ */
+static void msm_nand_prep_cfg_cmd_desc(struct msm_nand_info *info,
+ struct msm_nand_common_cfgs data,
+ struct msm_nand_sps_cmd **curr_cmd)
+{
+ struct msm_nand_sps_cmd *cmd;
+
+ cmd = *curr_cmd;
+ msm_nand_prep_ce(cmd, MSM_NAND_FLASH_CMD(info), WRITE, data.cmd,
+ SPS_IOVEC_FLAG_LOCK);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_ADDR0(info), WRITE, data.addr0, 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_ADDR1(info), WRITE, data.addr1, 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_DEV0_CFG0(info), WRITE, data.cfg0, 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_DEV0_CFG1(info), WRITE, data.cfg1, 0);
+ cmd++;
+ *curr_cmd = cmd;
+}
+
+/*
+ * Function to check the CRC integrity check on ONFI parameter page read.
+ * For ONFI parameter page read, the controller ECC will be disabled. Hence,
+ * it is mandatory to manually compute CRC and check it against the value
+ * stored within ONFI page.
+ */
+static uint16_t msm_nand_flash_onfi_crc_check(uint8_t *buffer, uint16_t count)
+{
+ int i;
+ uint16_t result;
+
+ for (i = 0; i < count; i++)
+ buffer[i] = bitrev8(buffer[i]);
+
+ result = bitrev16(crc16(bitrev16(0x4f4e), buffer, count));
+
+ for (i = 0; i < count; i++)
+ buffer[i] = bitrev8(buffer[i]);
+
+ return result;
+}
+
+/*
+ * Structure that contains NANDc register data for commands required
+ * for reading ONFI paramter page.
+ */
+struct msm_nand_flash_onfi_data {
+ struct msm_nand_common_cfgs cfg;
+ uint32_t exec;
+ uint32_t devcmd1_orig;
+ uint32_t devcmdvld_orig;
+ uint32_t devcmd1_mod;
+ uint32_t devcmdvld_mod;
+ uint32_t ecc_bch_cfg;
+};
+
+/*
+ * Function to identify whether the attached NAND flash device is
+ * complaint to ONFI spec or not. If yes, then it reads the ONFI parameter
+ * page to get the device parameters.
+ */
+static int msm_nand_flash_onfi_probe(struct msm_nand_info *info)
+{
+ struct msm_nand_chip *chip = &info->nand_chip;
+ struct flash_identification *flash = &info->flash_dev;
+ uint32_t crc_chk_count = 0, page_address = 0;
+ int ret = 0, i;
+
+ /* SPS parameters */
+ struct msm_nand_sps_cmd *cmd, *curr_cmd;
+ struct sps_iovec *iovec;
+ uint32_t rdata;
+
+ /* ONFI Identifier/Parameter Page parameters */
+ uint8_t *onfi_param_info_buf = NULL;
+ dma_addr_t dma_addr_param_info = 0;
+ struct onfi_param_page *onfi_param_page_ptr;
+ struct msm_nand_flash_onfi_data data;
+ uint32_t onfi_signature;
+
+ /* SPS command/data descriptors */
+ uint32_t total_cnt = 13;
+ /*
+ * The following 13 commands are required to get onfi parameters -
+ * flash, addr0, addr1, cfg0, cfg1, dev0_ecc_cfg, cmd_vld, dev_cmd1,
+ * read_loc_0, exec, flash_status (read cmd), dev_cmd1, cmd_vld.
+ */
+ struct {
+ struct sps_transfer xfer;
+ struct sps_iovec cmd_iovec[total_cnt];
+ struct msm_nand_sps_cmd cmd[total_cnt];
+ uint32_t flash_status;
+ } *dma_buffer;
+
+ wait_event(chip->dma_wait_queue, (onfi_param_info_buf =
+ msm_nand_get_dma_buffer(chip, ONFI_PARAM_INFO_LENGTH)));
+ dma_addr_param_info = msm_virt_to_dma(chip, onfi_param_info_buf);
+
+ wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer
+ (chip, sizeof(*dma_buffer))));
+
+ ret = msm_nand_flash_read_id(info, 1, &onfi_signature);
+ if (ret < 0) {
+ pr_err("Failed to read ONFI signature\n");
+ goto free_dma;
+ }
+ if (onfi_signature != ONFI_PARAMETER_PAGE_SIGNATURE) {
+ pr_info("Found a non ONFI device\n");
+ ret = -EIO;
+ goto free_dma;
+ }
+
+ memset(&data, 0, sizeof(struct msm_nand_flash_onfi_data));
+ ret = msm_nand_flash_rd_reg(info, MSM_NAND_DEV_CMD1(info),
+ &data.devcmd1_orig);
+ if (ret < 0)
+ goto free_dma;
+ ret = msm_nand_flash_rd_reg(info, MSM_NAND_DEV_CMD_VLD(info),
+ &data.devcmdvld_orig);
+ if (ret < 0)
+ goto free_dma;
+
+ data.cfg.cmd = MSM_NAND_CMD_PAGE_READ_ALL;
+ data.exec = 1;
+ data.cfg.addr0 = (page_address << 16) |
+ FLASH_READ_ONFI_PARAMETERS_ADDRESS;
+ data.cfg.addr1 = (page_address >> 16) & 0xFF;
+ data.cfg.cfg0 = MSM_NAND_CFG0_RAW_ONFI_PARAM_INFO;
+ data.cfg.cfg1 = MSM_NAND_CFG1_RAW_ONFI_PARAM_INFO;
+ data.devcmd1_mod = (data.devcmd1_orig & 0xFFFFFF00) |
+ FLASH_READ_ONFI_PARAMETERS_COMMAND;
+ data.devcmdvld_mod = data.devcmdvld_orig & 0xFFFFFFFE;
+ data.ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
+ dma_buffer->flash_status = 0xeeeeeeee;
+
+ curr_cmd = cmd = dma_buffer->cmd;
+ msm_nand_prep_cfg_cmd_desc(info, data.cfg, &curr_cmd);
+
+ cmd = curr_cmd;
+ msm_nand_prep_ce(cmd, MSM_NAND_DEV0_ECC_CFG(info), WRITE,
+ data.ecc_bch_cfg, 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_DEV_CMD_VLD(info), WRITE,
+ data.devcmdvld_mod, 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_DEV_CMD1(info), WRITE,
+ data.devcmd1_mod, 0);
+ cmd++;
+
+ rdata = (0 << 0) | (ONFI_PARAM_INFO_LENGTH << 16) | (1 << 31);
+ msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE,
+ rdata, 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_EXEC_CMD(info), WRITE,
+ data.exec, SPS_IOVEC_FLAG_NWD);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info), READ,
+ msm_virt_to_dma(chip, &dma_buffer->flash_status), 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_DEV_CMD1(info), WRITE,
+ data.devcmd1_orig, 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_DEV_CMD_VLD(info), WRITE,
+ data.devcmdvld_orig,
+ SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT);
+ cmd++;
+
+ BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
+ dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
+ dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
+ dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
+ &dma_buffer->cmd_iovec);
+ iovec = dma_buffer->xfer.iovec;
+
+ for (i = 0; i < dma_buffer->xfer.iovec_count; i++) {
+ iovec->addr = msm_virt_to_dma(chip,
+ &dma_buffer->cmd[i].ce);
+ iovec->size = sizeof(struct sps_command_element);
+ iovec->flags = dma_buffer->cmd[i].flags;
+ iovec++;
+ }
+ mutex_lock(&info->bam_lock);
+ /* Submit data descriptor */
+ ret = sps_transfer_one(info->sps.data_prod.handle, dma_addr_param_info,
+ ONFI_PARAM_INFO_LENGTH, NULL, SPS_IOVEC_FLAG_INT);
+ if (ret) {
+ pr_err("Failed to submit data descriptors %d\n", ret);
+ mutex_unlock(&info->bam_lock);
+ goto free_dma;
+ }
+ /* Submit command descriptors */
+ ret = sps_transfer(info->sps.cmd_pipe.handle,
+ &dma_buffer->xfer);
+ if (ret) {
+ pr_err("Failed to submit commands %d\n", ret);
+ mutex_unlock(&info->bam_lock);
+ goto free_dma;
+ }
+ wait_for_completion_io(&info->sps.cmd_pipe.completion);
+ wait_for_completion_io(&info->sps.data_prod.completion);
+ mutex_unlock(&info->bam_lock);
+
+ /* Check for flash status errors */
+ if (dma_buffer->flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
+ pr_err("MPU/OP err (0x%x) is set\n", dma_buffer->flash_status);
+ ret = -EIO;
+ goto free_dma;
+ }
+
+ for (crc_chk_count = 0; crc_chk_count < ONFI_PARAM_INFO_LENGTH
+ / ONFI_PARAM_PAGE_LENGTH; crc_chk_count++) {
+ onfi_param_page_ptr =
+ (struct onfi_param_page *)
+ (&(onfi_param_info_buf
+ [ONFI_PARAM_PAGE_LENGTH *
+ crc_chk_count]));
+ if (msm_nand_flash_onfi_crc_check(
+ (uint8_t *)onfi_param_page_ptr,
+ ONFI_PARAM_PAGE_LENGTH - 2) ==
+ onfi_param_page_ptr->integrity_crc) {
+ break;
+ }
+ }
+ if (crc_chk_count >= ONFI_PARAM_INFO_LENGTH
+ / ONFI_PARAM_PAGE_LENGTH) {
+ pr_err("CRC Check failed on param page\n");
+ ret = -EIO;
+ goto free_dma;
+ }
+ ret = msm_nand_flash_read_id(info, 0, &flash->flash_id);
+ if (ret < 0) {
+ pr_err("Failed to read flash ID\n");
+ goto free_dma;
+ }
+ flash->widebus = onfi_param_page_ptr->features_supported & 0x01;
+ flash->pagesize = onfi_param_page_ptr->number_of_data_bytes_per_page;
+ flash->blksize = onfi_param_page_ptr->number_of_pages_per_block *
+ flash->pagesize;
+ flash->oobsize = onfi_param_page_ptr->number_of_spare_bytes_per_page;
+ flash->density = onfi_param_page_ptr->number_of_blocks_per_logical_unit
+ * flash->blksize;
+ flash->ecc_correctability = onfi_param_page_ptr->
+ number_of_bits_ecc_correctability;
+
+ pr_info("Found an ONFI compliant device %s\n",
+ onfi_param_page_ptr->device_model);
+ /*
+ * Temporary hack for MT29F4G08ABC device.
+ * Since the device is not properly adhering
+ * to ONFi specification it is reporting
+ * as 16 bit device though it is 8 bit device!!!
+ */
+ if (!strncmp(onfi_param_page_ptr->device_model, "MT29F4G08ABC", 12))
+ flash->widebus = 0;
+free_dma:
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+ msm_nand_release_dma_buffer(chip, onfi_param_info_buf,
+ ONFI_PARAM_INFO_LENGTH);
+ return ret;
+}
+
+/*
+ * Structure that contains read/write parameters required for reading/writing
+ * from/to a page.
+ */
+struct msm_nand_rw_params {
+ uint32_t page;
+ uint32_t page_count;
+ uint32_t sectordatasize;
+ uint32_t sectoroobsize;
+ uint32_t cwperpage;
+ uint32_t oob_len_cmd;
+ uint32_t oob_len_data;
+ uint32_t start_sector;
+ uint32_t oob_col;
+ dma_addr_t data_dma_addr;
+ dma_addr_t oob_dma_addr;
+ dma_addr_t data_dma_addr_curr;
+ dma_addr_t oob_dma_addr_curr;
+ bool read;
+};
+
+/*
+ * Structure that contains NANDc register data required for reading/writing
+ * from/to a page.
+ */
+struct msm_nand_rw_reg_data {
+ uint32_t cmd;
+ uint32_t addr0;
+ uint32_t addr1;
+ uint32_t cfg0;
+ uint32_t cfg1;
+ uint32_t ecc_bch_cfg;
+ uint32_t exec;
+ uint32_t ecc_cfg;
+ uint32_t clrfstatus;
+ uint32_t clrrstatus;
+};
+
+/*
+ * Function that validates page read/write MTD parameters received from upper
+ * layers such as MTD/YAFFS2 and returns error for any unsupported operations
+ * by the driver. In case of success, it also maps the data and oob buffer
+ * received for DMA.
+ */
+static int msm_nand_validate_mtd_params(struct mtd_info *mtd, bool read,
+ loff_t offset,
+ struct mtd_oob_ops *ops,
+ struct msm_nand_rw_params *args)
+{
+ struct msm_nand_info *info = mtd->priv;
+ struct msm_nand_chip *chip = &info->nand_chip;
+ int err = 0;
+
+ pr_debug("========================================================\n");
+ pr_debug("offset 0x%llx mode %d\ndatbuf 0x%p datlen 0x%x\n",
+ offset, ops->mode, ops->datbuf, ops->len);
+ pr_debug("oobbuf 0x%p ooblen 0x%x\n", ops->oobbuf, ops->ooblen);
+
+ if (ops->mode == MTD_OPS_PLACE_OOB) {
+ pr_err("MTD_OPS_PLACE_OOB is not supported\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (mtd->writesize == PAGE_SIZE_2K)
+ args->page = offset >> 11;
+
+ if (mtd->writesize == PAGE_SIZE_4K)
+ args->page = offset >> 12;
+
+ args->oob_len_cmd = ops->ooblen;
+ args->oob_len_data = ops->ooblen;
+ args->cwperpage = (mtd->writesize >> 9);
+ args->read = (read ? true : false);
+
+ if (offset & (mtd->writesize - 1)) {
+ pr_err("unsupported offset 0x%llx\n", offset);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (!read && !ops->datbuf) {
+ pr_err("No data buffer provided for write!!\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (ops->mode == MTD_OPS_RAW) {
+ if (!ops->datbuf) {
+ pr_err("No data buffer provided for RAW mode\n");
+ err = -EINVAL;
+ goto out;
+ } else if ((ops->len % (mtd->writesize +
+ mtd->oobsize)) != 0) {
+ pr_err("unsupported data len %d for RAW mode\n",
+ ops->len);
+ err = -EINVAL;
+ goto out;
+ }
+ args->page_count = ops->len / (mtd->writesize + mtd->oobsize);
+
+ } else if (ops->mode == MTD_OPS_AUTO_OOB) {
+ if (ops->datbuf && (ops->len % mtd->writesize) != 0) {
+ /* when ops->datbuf is NULL, ops->len can be ooblen */
+ pr_err("unsupported data len %d for AUTO mode\n",
+ ops->len);
+ err = -EINVAL;
+ goto out;
+ }
+ if (read && ops->oobbuf && !ops->datbuf) {
+ args->start_sector = args->cwperpage - 1;
+ args->page_count = ops->ooblen / mtd->oobavail;
+ if ((args->page_count == 0) && (ops->ooblen))
+ args->page_count = 1;
+ } else if (ops->datbuf) {
+ args->page_count = ops->len / mtd->writesize;
+ }
+ }
+
+ if (ops->datbuf) {
+ args->data_dma_addr_curr = args->data_dma_addr =
+ msm_nand_dma_map(chip->dev, ops->datbuf, ops->len,
+ (read ? DMA_FROM_DEVICE : DMA_TO_DEVICE));
+ if (dma_mapping_error(chip->dev, args->data_dma_addr)) {
+ pr_err("dma mapping failed for 0x%p\n", ops->datbuf);
+ err = -EIO;
+ goto out;
+ }
+ }
+ if (ops->oobbuf) {
+ if (read)
+ memset(ops->oobbuf, 0xFF, ops->ooblen);
+ args->oob_dma_addr_curr = args->oob_dma_addr =
+ msm_nand_dma_map(chip->dev, ops->oobbuf, ops->ooblen,
+ (read ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE));
+ if (dma_mapping_error(chip->dev, args->oob_dma_addr)) {
+ pr_err("dma mapping failed for 0x%p\n", ops->oobbuf);
+ err = -EIO;
+ goto dma_map_oobbuf_failed;
+ }
+ }
+ goto out;
+dma_map_oobbuf_failed:
+ if (ops->datbuf)
+ dma_unmap_page(chip->dev, args->data_dma_addr, ops->len,
+ (read ? DMA_FROM_DEVICE : DMA_TO_DEVICE));
+out:
+ return err;
+}
+
+/*
+ * Function that updates NANDc register data (struct msm_nand_rw_reg_data)
+ * required for page read/write.
+ */
+static void msm_nand_update_rw_reg_data(struct msm_nand_chip *chip,
+ struct mtd_oob_ops *ops,
+ struct msm_nand_rw_params *args,
+ struct msm_nand_rw_reg_data *data)
+{
+ if (args->read) {
+ if (ops->mode != MTD_OPS_RAW) {
+ data->cmd = MSM_NAND_CMD_PAGE_READ_ECC;
+ data->cfg0 =
+ (chip->cfg0 & ~(7U << CW_PER_PAGE)) |
+ (((args->cwperpage-1) - args->start_sector)
+ << CW_PER_PAGE);
+ data->cfg1 = chip->cfg1;
+ data->ecc_bch_cfg = chip->ecc_bch_cfg;
+ } else {
+ data->cmd = MSM_NAND_CMD_PAGE_READ_ALL;
+ data->cfg0 = chip->cfg0_raw;
+ data->cfg1 = chip->cfg1_raw;
+ data->ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
+ }
+
+ } else {
+ if (ops->mode != MTD_OPS_RAW) {
+ data->cfg0 = chip->cfg0;
+ data->cfg1 = chip->cfg1;
+ data->ecc_bch_cfg = chip->ecc_bch_cfg;
+ } else {
+ data->cfg0 = chip->cfg0_raw;
+ data->cfg1 = chip->cfg1_raw;
+ data->ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
+ }
+ data->cmd = MSM_NAND_CMD_PRG_PAGE;
+ data->clrfstatus = MSM_NAND_RESET_FLASH_STS;
+ data->clrrstatus = MSM_NAND_RESET_READ_STS;
+ }
+ data->exec = 1;
+ data->ecc_cfg = chip->ecc_buf_cfg;
+}
+
+/*
+ * Function to prepare series of SPS command descriptors required for a page
+ * read/write operation.
+ */
+static void msm_nand_prep_rw_cmd_desc(struct mtd_oob_ops *ops,
+ struct msm_nand_rw_params *args,
+ struct msm_nand_rw_reg_data *data,
+ struct msm_nand_info *info,
+ uint32_t curr_cw,
+ struct msm_nand_sps_cmd **curr_cmd)
+{
+ struct msm_nand_chip *chip = &info->nand_chip;
+ struct msm_nand_sps_cmd *cmd;
+ uint32_t rdata;
+ /* read_location register parameters */
+ uint32_t offset, size, last_read;
+
+ cmd = *curr_cmd;
+ msm_nand_prep_ce(cmd, MSM_NAND_FLASH_CMD(info), WRITE, data->cmd,
+ ((curr_cw == args->start_sector) ?
+ SPS_IOVEC_FLAG_LOCK : 0));
+ cmd++;
+
+ if (curr_cw == args->start_sector) {
+ msm_nand_prep_ce(cmd, MSM_NAND_ADDR0(info), WRITE,
+ data->addr0, 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_ADDR1(info), WRITE,
+ data->addr1, 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_DEV0_CFG0(info), WRITE,
+ data->cfg0, 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_DEV0_CFG1(info), WRITE,
+ data->cfg1, 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_DEV0_ECC_CFG(info), WRITE,
+ data->ecc_bch_cfg, 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_EBI2_ECC_BUF_CFG(info),
+ WRITE, data->ecc_cfg, 0);
+ cmd++;
+ }
+
+ if (!args->read)
+ goto sub_exec_cmd;
+
+ if (ops->mode == MTD_OPS_RAW) {
+ rdata = (0 << 0) | (chip->cw_size << 16) | (1 << 31);
+ msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE,
+ rdata, 0);
+ cmd++;
+ }
+ if (ops->mode == MTD_OPS_AUTO_OOB && ops->datbuf) {
+ offset = 0;
+ size = (curr_cw < (args->cwperpage - 1)) ? 516 :
+ (512 - ((args->cwperpage - 1) << 2));
+ last_read = (curr_cw < (args->cwperpage - 1)) ? 1 :
+ (ops->oobbuf ? 0 : 1);
+ rdata = (offset << 0) | (size << 16) | (last_read << 31);
+ msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE,
+ rdata, 0);
+ cmd++;
+ }
+ if (ops->mode == MTD_OPS_AUTO_OOB && ops->oobbuf
+ && (curr_cw == (args->cwperpage - 1))) {
+ offset = 512 - ((args->cwperpage - 1) << 2);
+ size = (args->cwperpage) << 2;
+ if (size > args->oob_len_cmd)
+ size = args->oob_len_cmd;
+ args->oob_len_cmd -= size;
+ last_read = 1;
+ rdata = (offset << 0) | (size << 16) | (last_read << 31);
+ if (ops->datbuf) {
+ msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_1(info),
+ WRITE, rdata, 0);
+ } else {
+ msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_0(info),
+ WRITE, rdata, 0);
+ }
+ cmd++;
+ }
+sub_exec_cmd:
+ msm_nand_prep_ce(cmd, MSM_NAND_EXEC_CMD(info), WRITE, data->exec,
+ SPS_IOVEC_FLAG_NWD);
+ cmd++;
+ *curr_cmd = cmd;
+}
+
+/*
+ * Function to prepare and submit SPS data descriptors required for a page
+ * read/write operation.
+ */
+static int msm_nand_submit_rw_data_desc(struct mtd_oob_ops *ops,
+ struct msm_nand_rw_params *args,
+ struct msm_nand_info *info,
+ uint32_t curr_cw)
+{
+ struct msm_nand_chip *chip = &info->nand_chip;
+ struct sps_pipe *data_pipe_handle;
+ uint32_t sectordatasize, sectoroobsize;
+ uint32_t sps_flags = 0;
+ int err = 0;
+
+ if (args->read)
+ data_pipe_handle = info->sps.data_prod.handle;
+ else
+ data_pipe_handle = info->sps.data_cons.handle;
+
+ if (ops->mode == MTD_OPS_RAW) {
+ sectordatasize = chip->cw_size;
+ if (!args->read)
+ sps_flags = SPS_IOVEC_FLAG_EOT;
+ if (curr_cw == (args->cwperpage - 1))
+ sps_flags |= SPS_IOVEC_FLAG_INT;
+
+ err = sps_transfer_one(data_pipe_handle,
+ args->data_dma_addr_curr,
+ sectordatasize, NULL,
+ sps_flags);
+ if (err)
+ goto out;
+ args->data_dma_addr_curr += sectordatasize;
+
+ } else if (ops->mode == MTD_OPS_AUTO_OOB) {
+ if (ops->datbuf) {
+ sectordatasize = (curr_cw < (args->cwperpage - 1))
+ ? 516 : (512 - ((args->cwperpage - 1) << 2));
+
+ if (!args->read) {
+ sps_flags = SPS_IOVEC_FLAG_EOT;
+ if (curr_cw == (args->cwperpage - 1) &&
+ ops->oobbuf)
+ sps_flags = 0;
+ }
+ if ((curr_cw == (args->cwperpage - 1)) && !ops->oobbuf)
+ sps_flags |= SPS_IOVEC_FLAG_INT;
+
+ err = sps_transfer_one(data_pipe_handle,
+ args->data_dma_addr_curr,
+ sectordatasize, NULL,
+ sps_flags);
+ if (err)
+ goto out;
+ args->data_dma_addr_curr += sectordatasize;
+ }
+
+ if (ops->oobbuf && (curr_cw == (args->cwperpage - 1))) {
+ sectoroobsize = args->cwperpage << 2;
+ if (sectoroobsize > args->oob_len_data)
+ sectoroobsize = args->oob_len_data;
+
+ if (!args->read)
+ sps_flags |= SPS_IOVEC_FLAG_EOT;
+ sps_flags |= SPS_IOVEC_FLAG_INT;
+ err = sps_transfer_one(data_pipe_handle,
+ args->oob_dma_addr_curr,
+ sectoroobsize, NULL,
+ sps_flags);
+ if (err)
+ goto out;
+ args->oob_dma_addr_curr += sectoroobsize;
+ args->oob_len_data -= sectoroobsize;
+ }
+ }
+out:
+ return err;
+}
+
+/*
+ * Function that gets called from upper layers such as MTD/YAFFS2 to read a
+ * page with main or/and spare data.
+ */
+static int msm_nand_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct msm_nand_info *info = mtd->priv;
+ struct msm_nand_chip *chip = &info->nand_chip;
+ uint32_t cwperpage = (mtd->writesize >> 9);
+ int err, pageerr = 0, rawerr = 0;
+ uint32_t n = 0, pages_read = 0;
+ uint32_t ecc_errors = 0, total_ecc_errors = 0;
+ struct msm_nand_rw_params rw_params;
+ struct msm_nand_rw_reg_data data;
+ struct msm_nand_sps_cmd *cmd, *curr_cmd;
+ struct sps_iovec *iovec;
+ /*
+ * The following 6 commands will be sent only once for the first
+ * codeword (CW) - addr0, addr1, dev0_cfg0, dev0_cfg1,
+ * dev0_ecc_cfg, ebi2_ecc_buf_cfg. The following 6 commands will
+ * be sent for every CW - flash, read_location_0, read_location_1,
+ * exec, flash_status and buffer_status.
+ */
+ uint32_t total_cnt = (6 * cwperpage) + 6;
+ struct {
+ struct sps_transfer xfer;
+ struct sps_iovec cmd_iovec[total_cnt];
+ struct msm_nand_sps_cmd cmd[total_cnt];
+ struct {
+ uint32_t flash_status;
+ uint32_t buffer_status;
+ } result[cwperpage];
+ } *dma_buffer;
+
+ memset(&rw_params, 0, sizeof(struct msm_nand_rw_params));
+ err = msm_nand_validate_mtd_params(mtd, true, from, ops, &rw_params);
+ if (err)
+ goto validate_mtd_params_failed;
+
+ wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
+ chip, sizeof(*dma_buffer))));
+
+ rw_params.oob_col = rw_params.start_sector * chip->cw_size;
+ if (chip->cfg1 & (1 << WIDE_FLASH))
+ rw_params.oob_col >>= 1;
+
+ memset(&data, 0, sizeof(struct msm_nand_rw_reg_data));
+ msm_nand_update_rw_reg_data(chip, ops, &rw_params, &data);
+
+ while (rw_params.page_count-- > 0) {
+ data.addr0 = (rw_params.page << 16) | rw_params.oob_col;
+ data.addr1 = (rw_params.page >> 16) & 0xff;
+ cmd = dma_buffer->cmd;
+ for (n = rw_params.start_sector; n < cwperpage; n++) {
+ dma_buffer->result[n].flash_status = 0xeeeeeeee;
+ dma_buffer->result[n].buffer_status = 0xeeeeeeee;
+
+ curr_cmd = cmd;
+ msm_nand_prep_rw_cmd_desc(ops, &rw_params,
+ &data, info, n, &curr_cmd);
+
+ cmd = curr_cmd;
+ msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info),
+ READ, msm_virt_to_dma(chip,
+ &dma_buffer->result[n].flash_status), 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_BUFFER_STATUS(info),
+ READ, msm_virt_to_dma(chip,
+ &dma_buffer->result[n].buffer_status),
+ ((n == (cwperpage - 1)) ?
+ (SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT) :
+ 0));
+ cmd++;
+ }
+
+ BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
+ dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
+ dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
+ dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
+ &dma_buffer->cmd_iovec);
+ iovec = dma_buffer->xfer.iovec;
+
+ for (n = 0; n < dma_buffer->xfer.iovec_count; n++) {
+ iovec->addr = msm_virt_to_dma(chip,
+ &dma_buffer->cmd[n].ce);
+ iovec->size = sizeof(struct sps_command_element);
+ iovec->flags = dma_buffer->cmd[n].flags;
+ iovec++;
+ }
+ mutex_lock(&info->bam_lock);
+ /* Submit data descriptors */
+ for (n = rw_params.start_sector; n < cwperpage; n++) {
+ err = msm_nand_submit_rw_data_desc(ops,
+ &rw_params, info, n);
+ if (err) {
+ pr_err("Failed to submit data descs %d\n", err);
+ mutex_unlock(&info->bam_lock);
+ goto free_dma;
+ }
+ }
+ /* Submit command descriptors */
+ err = sps_transfer(info->sps.cmd_pipe.handle,
+ &dma_buffer->xfer);
+ if (err) {
+ pr_err("Failed to submit commands %d\n", err);
+ mutex_unlock(&info->bam_lock);
+ goto free_dma;
+ }
+ wait_for_completion_io(&info->sps.cmd_pipe.completion);
+ wait_for_completion_io(&info->sps.data_prod.completion);
+ mutex_unlock(&info->bam_lock);
+ /* Check for flash status errors */
+ pageerr = rawerr = 0;
+ for (n = rw_params.start_sector; n < cwperpage; n++) {
+ if (dma_buffer->result[n].flash_status & (FS_OP_ERR |
+ FS_MPU_ERR)) {
+ rawerr = -EIO;
+ break;
+ }
+ }
+ /* Check for ECC correction on empty block */
+ if (rawerr && ops->datbuf && ops->mode != MTD_OPS_RAW) {
+ uint8_t *datbuf = ops->datbuf +
+ pages_read * mtd->writesize;
+
+ dma_sync_single_for_cpu(chip->dev,
+ rw_params.data_dma_addr_curr - mtd->writesize,
+ mtd->writesize, DMA_BIDIRECTIONAL);
+
+ for (n = 0; n < mtd->writesize; n++) {
+ /* TODO: check offset for 4bit BCHECC */
+ if ((n % 516 == 3 || n % 516 == 175)
+ && datbuf[n] == 0x54)
+ datbuf[n] = 0xff;
+ if (datbuf[n] != 0xff) {
+ pageerr = rawerr;
+ break;
+ }
+ }
+
+ dma_sync_single_for_device(chip->dev,
+ rw_params.data_dma_addr_curr - mtd->writesize,
+ mtd->writesize, DMA_BIDIRECTIONAL);
+ }
+ if (rawerr && ops->oobbuf) {
+ dma_sync_single_for_cpu(chip->dev,
+ rw_params.oob_dma_addr_curr - (ops->ooblen -
+ rw_params.oob_len_data),
+ ops->ooblen - rw_params.oob_len_data,
+ DMA_BIDIRECTIONAL);
+
+ for (n = 0; n < ops->ooblen; n++) {
+ if (ops->oobbuf[n] != 0xff) {
+ pageerr = rawerr;
+ break;
+ }
+ }
+
+ dma_sync_single_for_device(chip->dev,
+ rw_params.oob_dma_addr_curr - (ops->ooblen -
+ rw_params.oob_len_data),
+ ops->ooblen - rw_params.oob_len_data,
+ DMA_BIDIRECTIONAL);
+ }
+ /* check for uncorrectable errors */
+ if (pageerr) {
+ for (n = rw_params.start_sector; n < cwperpage; n++) {
+ if (dma_buffer->result[n].buffer_status &
+ BS_UNCORRECTABLE_BIT) {
+ mtd->ecc_stats.failed++;
+ pageerr = -EBADMSG;
+ break;
+ }
+ }
+ }
+ /* check for correctable errors */
+ if (!rawerr) {
+ for (n = rw_params.start_sector; n < cwperpage; n++) {
+ ecc_errors =
+ dma_buffer->result[n].buffer_status
+ & BS_CORRECTABLE_ERR_MSK;
+ if (ecc_errors) {
+ total_ecc_errors += ecc_errors;
+ mtd->ecc_stats.corrected += ecc_errors;
+ /*
+ * For Micron devices it is observed
+ * that correctable errors upto 3 bits
+ * are very common.
+ */
+ if (ecc_errors > 3)
+ pageerr = -EUCLEAN;
+ }
+ }
+ }
+ if (pageerr && (pageerr != -EUCLEAN || err == 0))
+ err = pageerr;
+
+ if (rawerr && !pageerr) {
+ pr_debug("%llx %x %x empty page\n",
+ (loff_t)rw_params.page * mtd->writesize,
+ ops->len, ops->ooblen);
+ } else {
+ for (n = rw_params.start_sector; n < cwperpage; n++)
+ pr_debug("cw %d: flash_sts %x buffr_sts %x\n",
+ n, dma_buffer->result[n].flash_status,
+ dma_buffer->result[n].buffer_status);
+ }
+ if (err && err != -EUCLEAN && err != -EBADMSG)
+ goto free_dma;
+ pages_read++;
+ rw_params.page++;
+ }
+free_dma:
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+ if (ops->oobbuf)
+ dma_unmap_page(chip->dev, rw_params.oob_dma_addr,
+ ops->ooblen, DMA_FROM_DEVICE);
+ if (ops->datbuf)
+ dma_unmap_page(chip->dev, rw_params.data_dma_addr,
+ ops->len, DMA_BIDIRECTIONAL);
+validate_mtd_params_failed:
+ if (ops->mode != MTD_OPS_RAW)
+ ops->retlen = mtd->writesize * pages_read;
+ else
+ ops->retlen = (mtd->writesize + mtd->oobsize) * pages_read;
+ ops->oobretlen = ops->ooblen - rw_params.oob_len_data;
+ if (err)
+ pr_err("0x%llx datalen 0x%x ooblen %x err %d corrected %d\n",
+ from, ops->datbuf ? ops->len : 0, ops->ooblen, err,
+ total_ecc_errors);
+ pr_debug("ret %d, retlen %d oobretlen %d\n",
+ err, ops->retlen, ops->oobretlen);
+
+ pr_debug("========================================================\n");
+ return err;
+}
+
+/*
+ * Function that gets called from upper layers such as MTD/YAFFS2 to read a
+ * page with only main data.
+ */
+static int msm_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ int ret;
+ struct mtd_oob_ops ops;
+
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.len = len;
+ ops.retlen = 0;
+ ops.ooblen = 0;
+ ops.datbuf = buf;
+ ops.oobbuf = NULL;
+ ret = msm_nand_read_oob(mtd, from, &ops);
+ *retlen = ops.retlen;
+ return ret;
+}
+
+/*
+ * Function that gets called from upper layers such as MTD/YAFFS2 to write a
+ * page with both main and spare data.
+ */
+static int msm_nand_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct msm_nand_info *info = mtd->priv;
+ struct msm_nand_chip *chip = &info->nand_chip;
+ uint32_t cwperpage = (mtd->writesize >> 9);
+ uint32_t n, flash_sts, pages_written = 0;
+ int err = 0;
+ struct msm_nand_rw_params rw_params;
+ struct msm_nand_rw_reg_data data;
+ struct msm_nand_sps_cmd *cmd, *curr_cmd;
+ struct sps_iovec *iovec;
+ /*
+ * The following 7 commands will be sent only once :
+ * For first codeword (CW) - addr0, addr1, dev0_cfg0, dev0_cfg1,
+ * dev0_ecc_cfg, ebi2_ecc_buf_cfg.
+ * For last codeword (CW) - read_status(write)
+ *
+ * The following 4 commands will be sent for every CW :
+ * flash, exec, flash_status (read), flash_status (write).
+ */
+ uint32_t total_cnt = (4 * cwperpage) + 7;
+ struct {
+ struct sps_transfer xfer;
+ struct sps_iovec cmd_iovec[total_cnt];
+ struct msm_nand_sps_cmd cmd[total_cnt];
+ struct {
+ uint32_t flash_status[cwperpage];
+ } data;
+ } *dma_buffer;
+
+ memset(&rw_params, 0, sizeof(struct msm_nand_rw_params));
+ err = msm_nand_validate_mtd_params(mtd, false, to, ops, &rw_params);
+ if (err)
+ goto validate_mtd_params_failed;
+
+ wait_event(chip->dma_wait_queue, (dma_buffer =
+ msm_nand_get_dma_buffer(chip, sizeof(*dma_buffer))));
+
+ memset(&data, 0, sizeof(struct msm_nand_rw_reg_data));
+ msm_nand_update_rw_reg_data(chip, ops, &rw_params, &data);
+
+ while (rw_params.page_count-- > 0) {
+ data.addr0 = (rw_params.page << 16);
+ data.addr1 = (rw_params.page >> 16) & 0xff;
+ cmd = dma_buffer->cmd;
+
+ for (n = 0; n < cwperpage ; n++) {
+ dma_buffer->data.flash_status[n] = 0xeeeeeeee;
+
+ curr_cmd = cmd;
+ msm_nand_prep_rw_cmd_desc(ops, &rw_params,
+ &data, info, n, &curr_cmd);
+
+ cmd = curr_cmd;
+ msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info),
+ READ, msm_virt_to_dma(chip,
+ &dma_buffer->data.flash_status[n]), 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info),
+ WRITE, data.clrfstatus, 0);
+ cmd++;
+
+ if (n == (cwperpage - 1)) {
+ msm_nand_prep_ce(cmd,
+ MSM_NAND_READ_STATUS(info), WRITE,
+ data.clrrstatus, SPS_IOVEC_FLAG_UNLOCK
+ | SPS_IOVEC_FLAG_INT);
+ cmd++;
+ }
+ }
+
+ BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
+ dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
+ dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
+ dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
+ &dma_buffer->cmd_iovec);
+ iovec = dma_buffer->xfer.iovec;
+
+ for (n = 0; n < dma_buffer->xfer.iovec_count; n++) {
+ iovec->addr = msm_virt_to_dma(chip,
+ &dma_buffer->cmd[n].ce);
+ iovec->size = sizeof(struct sps_command_element);
+ iovec->flags = dma_buffer->cmd[n].flags;
+ iovec++;
+ }
+ mutex_lock(&info->bam_lock);
+ /* Submit data descriptors */
+ for (n = 0; n < cwperpage; n++) {
+ err = msm_nand_submit_rw_data_desc(ops,
+ &rw_params, info, n);
+ if (err) {
+ pr_err("Failed to submit data descs %d\n", err);
+ mutex_unlock(&info->bam_lock);
+ goto free_dma;
+ }
+ }
+ /* Submit command descriptors */
+ err = sps_transfer(info->sps.cmd_pipe.handle,
+ &dma_buffer->xfer);
+ if (err) {
+ pr_err("Failed to submit commands %d\n", err);
+ mutex_unlock(&info->bam_lock);
+ goto free_dma;
+ }
+ wait_for_completion_io(&info->sps.cmd_pipe.completion);
+ wait_for_completion_io(&info->sps.data_cons.completion);
+ mutex_unlock(&info->bam_lock);
+
+ for (n = 0; n < cwperpage; n++)
+ pr_debug("write pg %d: flash_status[%d] = %x\n",
+ rw_params.page, n,
+ dma_buffer->data.flash_status[n]);
+
+ /* Check for flash status errors */
+ for (n = 0; n < cwperpage; n++) {
+ flash_sts = dma_buffer->data.flash_status[n];
+ if (flash_sts & (FS_OP_ERR | FS_MPU_ERR)) {
+ pr_err("MPU/OP err (0x%x) set\n", flash_sts);
+ err = -EIO;
+ goto free_dma;
+ }
+ if (n == (cwperpage - 1)) {
+ if (!(flash_sts & FS_DEVICE_WP) ||
+ (flash_sts & FS_DEVICE_STS_ERR)) {
+ pr_err("Dev sts err 0x%x\n", flash_sts);
+ err = -EIO;
+ goto free_dma;
+ }
+ }
+ }
+ pages_written++;
+ rw_params.page++;
+ }
+free_dma:
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+ if (ops->oobbuf)
+ dma_unmap_page(chip->dev, rw_params.oob_dma_addr,
+ ops->ooblen, DMA_TO_DEVICE);
+ if (ops->datbuf)
+ dma_unmap_page(chip->dev, rw_params.data_dma_addr,
+ ops->len, DMA_TO_DEVICE);
+validate_mtd_params_failed:
+ if (ops->mode != MTD_OPS_RAW)
+ ops->retlen = mtd->writesize * pages_written;
+ else
+ ops->retlen = (mtd->writesize + mtd->oobsize) * pages_written;
+
+ ops->oobretlen = ops->ooblen - rw_params.oob_len_data;
+ if (err)
+ pr_err("to %llx datalen %x ooblen %x failed with err %d\n",
+ to, ops->len, ops->ooblen, err);
+ pr_debug("ret %d, retlen %d oobretlen %d\n",
+ err, ops->retlen, ops->oobretlen);
+
+ pr_debug("================================================\n");
+ return err;
+}
+
+/*
+ * Function that gets called from upper layers such as MTD/YAFFS2 to write a
+ * page with only main data.
+ */
+static int msm_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ int ret;
+ struct mtd_oob_ops ops;
+
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.len = len;
+ ops.retlen = 0;
+ ops.ooblen = 0;
+ ops.datbuf = (uint8_t *)buf;
+ ops.oobbuf = NULL;
+ ret = msm_nand_write_oob(mtd, to, &ops);
+ *retlen = ops.retlen;
+ return ret;
+}
+
+/*
+ * Structure that contains NANDc register data for commands required
+ * for Erase operation.
+ */
+struct msm_nand_erase_reg_data {
+ struct msm_nand_common_cfgs cfg;
+ uint32_t exec;
+ uint32_t flash_status;
+ uint32_t clrfstatus;
+ uint32_t clrrstatus;
+};
+
+/*
+ * Function that gets called from upper layers such as MTD/YAFFS2 to erase a
+ * block within NAND device.
+ */
+static int msm_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ int i, err = 0;
+ struct msm_nand_info *info = mtd->priv;
+ struct msm_nand_chip *chip = &info->nand_chip;
+ uint32_t page = 0;
+ struct msm_nand_sps_cmd *cmd, *curr_cmd;
+ struct msm_nand_erase_reg_data data;
+ struct sps_iovec *iovec;
+ uint32_t total_cnt = 9;
+ /*
+ * The following 9 commands are required to erase a page -
+ * flash, addr0, addr1, cfg0, cfg1, exec, flash_status(read),
+ * flash_status(write), read_status.
+ */
+ struct {
+ struct sps_transfer xfer;
+ struct sps_iovec cmd_iovec[total_cnt];
+ struct msm_nand_sps_cmd cmd[total_cnt];
+ uint32_t flash_status;
+ } *dma_buffer;
+
+ if (mtd->writesize == PAGE_SIZE_2K)
+ page = instr->addr >> 11;
+
+ if (mtd->writesize == PAGE_SIZE_4K)
+ page = instr->addr >> 12;
+
+ if (instr->addr & (mtd->erasesize - 1)) {
+ pr_err("unsupported erase address, 0x%llx\n", instr->addr);
+ err = -EINVAL;
+ goto out;
+ }
+ if (instr->len != mtd->erasesize) {
+ pr_err("unsupported erase len, %lld\n", instr->len);
+ err = -EINVAL;
+ goto out;
+ }
+
+ wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
+ chip, sizeof(*dma_buffer))));
+ cmd = dma_buffer->cmd;
+
+ memset(&data, 0, sizeof(struct msm_nand_erase_reg_data));
+ data.cfg.cmd = MSM_NAND_CMD_BLOCK_ERASE;
+ data.cfg.addr0 = page;
+ data.cfg.addr1 = 0;
+ data.cfg.cfg0 = chip->cfg0 & (~(7 << CW_PER_PAGE));
+ data.cfg.cfg1 = chip->cfg1;
+ data.exec = 1;
+ dma_buffer->flash_status = 0xeeeeeeee;
+ data.clrfstatus = MSM_NAND_RESET_FLASH_STS;
+ data.clrrstatus = MSM_NAND_RESET_READ_STS;
+
+ curr_cmd = cmd;
+ msm_nand_prep_cfg_cmd_desc(info, data.cfg, &curr_cmd);
+
+ cmd = curr_cmd;
+ msm_nand_prep_ce(cmd, MSM_NAND_EXEC_CMD(info), WRITE, data.exec,
+ SPS_IOVEC_FLAG_NWD);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info), READ,
+ msm_virt_to_dma(chip, &dma_buffer->flash_status), 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info), WRITE,
+ data.clrfstatus, 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_READ_STATUS(info), WRITE,
+ data.clrrstatus,
+ SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT);
+ cmd++;
+
+ BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
+ dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
+ dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
+ dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
+ &dma_buffer->cmd_iovec);
+ iovec = dma_buffer->xfer.iovec;
+
+ for (i = 0; i < dma_buffer->xfer.iovec_count; i++) {
+ iovec->addr = msm_virt_to_dma(chip, &dma_buffer->cmd[i].ce);
+ iovec->size = sizeof(struct sps_command_element);
+ iovec->flags = dma_buffer->cmd[i].flags;
+ iovec++;
+ }
+ mutex_lock(&info->bam_lock);
+ err = sps_transfer(info->sps.cmd_pipe.handle, &dma_buffer->xfer);
+ if (err) {
+ pr_err("Failed to submit commands %d\n", err);
+ mutex_unlock(&info->bam_lock);
+ goto free_dma;
+ }
+ wait_for_completion_io(&info->sps.cmd_pipe.completion);
+ mutex_unlock(&info->bam_lock);
+
+ /* Check for flash status errors */
+ if (dma_buffer->flash_status & (FS_OP_ERR |
+ FS_MPU_ERR | FS_DEVICE_STS_ERR)) {
+ pr_err("MPU/OP/DEV err (0x%x) set\n", dma_buffer->flash_status);
+ err = -EIO;
+ }
+ if (!(dma_buffer->flash_status & FS_DEVICE_WP)) {
+ pr_err("Device is write protected\n");
+ err = -EIO;
+ }
+ if (err) {
+ pr_err("Erase failed, 0x%llx\n", instr->addr);
+ instr->fail_addr = instr->addr;
+ instr->state = MTD_ERASE_FAILED;
+ } else {
+ instr->state = MTD_ERASE_DONE;
+ instr->fail_addr = 0xffffffff;
+ mtd_erase_callback(instr);
+ }
+free_dma:
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+out:
+ return err;
+}
+
+/*
+ * Structure that contains NANDc register data for commands required
+ * for checking if a block is bad.
+ */
+struct msm_nand_blk_isbad_data {
+ struct msm_nand_common_cfgs cfg;
+ uint32_t ecc_bch_cfg;
+ uint32_t exec;
+ uint32_t read_offset;
+};
+
+/*
+ * Function that gets called from upper layers such as MTD/YAFFS2 to check if
+ * a block is bad. This is done by reading the first page within a block and
+ * checking whether the bad block byte location contains 0xFF or not. If it
+ * doesn't contain 0xFF, then it is considered as bad block.
+ */
+static int msm_nand_block_isbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct msm_nand_info *info = mtd->priv;
+ struct msm_nand_chip *chip = &info->nand_chip;
+ int i, ret = 0, bad_block = 0;
+ uint8_t *buf;
+ uint32_t page = 0, rdata, cwperpage;
+ struct msm_nand_sps_cmd *cmd, *curr_cmd;
+ struct msm_nand_blk_isbad_data data;
+ struct sps_iovec *iovec;
+ uint32_t total_cnt = 9;
+ /*
+ * The following 9 commands are required to check bad block -
+ * flash, addr0, addr1, cfg0, cfg1, ecc_cfg, read_loc_0,
+ * exec, flash_status(read).
+ */
+ struct {
+ struct sps_transfer xfer;
+ struct sps_iovec cmd_iovec[total_cnt];
+ struct msm_nand_sps_cmd cmd[total_cnt];
+ uint32_t flash_status;
+ } *dma_buffer;
+
+ if (mtd->writesize == PAGE_SIZE_2K)
+ page = ofs >> 11;
+
+ if (mtd->writesize == PAGE_SIZE_4K)
+ page = ofs >> 12;
+
+ cwperpage = (mtd->writesize >> 9);
+
+ if (ofs > mtd->size) {
+ pr_err("Invalid offset 0x%llx\n", ofs);
+ bad_block = -EINVAL;
+ goto out;
+ }
+ if (ofs & (mtd->erasesize - 1)) {
+ pr_err("unsupported block address, 0x%x\n", (uint32_t)ofs);
+ bad_block = -EINVAL;
+ goto out;
+ }
+
+ wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
+ chip , sizeof(*dma_buffer) + 4)));
+ buf = (uint8_t *)dma_buffer + sizeof(*dma_buffer);
+
+ cmd = dma_buffer->cmd;
+ memset(&data, 0, sizeof(struct msm_nand_erase_reg_data));
+ data.cfg.cmd = MSM_NAND_CMD_PAGE_READ_ALL;
+ data.cfg.cfg0 = chip->cfg0_raw & ~(7U << CW_PER_PAGE);
+ data.cfg.cfg1 = chip->cfg1_raw;
+
+ if (chip->cfg1 & (1 << WIDE_FLASH))
+ data.cfg.addr0 = (page << 16) |
+ ((chip->cw_size * (cwperpage-1)) >> 1);
+ else
+ data.cfg.addr0 = (page << 16) |
+ (chip->cw_size * (cwperpage-1));
+
+ data.cfg.addr1 = (page >> 16) & 0xff;
+ data.ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
+ data.exec = 1;
+ data.read_offset = (mtd->writesize - (chip->cw_size * (cwperpage-1)));
+ dma_buffer->flash_status = 0xeeeeeeee;
+
+ curr_cmd = cmd;
+ msm_nand_prep_cfg_cmd_desc(info, data.cfg, &curr_cmd);
+
+ cmd = curr_cmd;
+ msm_nand_prep_ce(cmd, MSM_NAND_DEV0_ECC_CFG(info), WRITE,
+ data.ecc_bch_cfg, 0);
+ cmd++;
+
+ rdata = (data.read_offset << 0) | (4 << 16) | (1 << 31);
+ msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE, rdata, 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_EXEC_CMD(info), WRITE,
+ data.exec, SPS_IOVEC_FLAG_NWD);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info), READ,
+ msm_virt_to_dma(chip, &dma_buffer->flash_status),
+ SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_UNLOCK);
+ cmd++;
+
+ BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
+ dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
+ dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
+ dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
+ &dma_buffer->cmd_iovec);
+ iovec = dma_buffer->xfer.iovec;
+
+ for (i = 0; i < dma_buffer->xfer.iovec_count; i++) {
+ iovec->addr = msm_virt_to_dma(chip, &dma_buffer->cmd[i].ce);
+ iovec->size = sizeof(struct sps_command_element);
+ iovec->flags = dma_buffer->cmd[i].flags;
+ iovec++;
+ }
+ mutex_lock(&info->bam_lock);
+ /* Submit data descriptor */
+ ret = sps_transfer_one(info->sps.data_prod.handle,
+ msm_virt_to_dma(chip, buf),
+ 4, NULL, SPS_IOVEC_FLAG_INT);
+
+ if (ret) {
+ pr_err("Failed to submit data desc %d\n", ret);
+ mutex_unlock(&info->bam_lock);
+ goto free_dma;
+ }
+ /* Submit command descriptor */
+ ret = sps_transfer(info->sps.cmd_pipe.handle, &dma_buffer->xfer);
+ if (ret) {
+ pr_err("Failed to submit commands %d\n", ret);
+ mutex_unlock(&info->bam_lock);
+ goto free_dma;
+ }
+ wait_for_completion_io(&info->sps.cmd_pipe.completion);
+ wait_for_completion_io(&info->sps.data_prod.completion);
+ mutex_unlock(&info->bam_lock);
+
+ /* Check for flash status errors */
+ if (dma_buffer->flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
+ pr_err("MPU/OP err set: %x\n", dma_buffer->flash_status);
+ bad_block = -EIO;
+ goto free_dma;
+ }
+
+ /* Check for bad block marker byte */
+ if (chip->cfg1 & (1 << WIDE_FLASH)) {
+ if (buf[0] != 0xFF || buf[1] != 0xFF)
+ bad_block = 1;
+ } else {
+ if (buf[0] != 0xFF)
+ bad_block = 1;
+ }
+free_dma:
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer) + 4);
+out:
+ return ret ? ret : bad_block;
+}
+
+/*
+ * Function that gets called from upper layers such as MTD/YAFFS2 to mark a
+ * block as bad. This is done by writing the first page within a block with 0,
+ * thus setting the bad block byte location as well to 0.
+ */
+static int msm_nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct mtd_oob_ops ops;
+ int ret;
+ uint8_t *buf;
+ size_t len;
+
+ if (ofs > mtd->size) {
+ pr_err("Invalid offset 0x%llx\n", ofs);
+ ret = -EINVAL;
+ goto out;
+ }
+ if (ofs & (mtd->erasesize - 1)) {
+ pr_err("unsupported block address, 0x%x\n", (uint32_t)ofs);
+ ret = -EINVAL;
+ goto out;
+ }
+ len = mtd->writesize + mtd->oobsize;
+ buf = kzalloc(len, GFP_KERNEL);
+ if (!buf) {
+ pr_err("unable to allocate memory for 0x%x size\n", len);
+ ret = -ENOMEM;
+ goto out;
+ }
+ ops.mode = MTD_OPS_RAW;
+ ops.len = len;
+ ops.retlen = 0;
+ ops.ooblen = 0;
+ ops.datbuf = buf;
+ ops.oobbuf = NULL;
+ ret = msm_nand_write_oob(mtd, ofs, &ops);
+ kfree(buf);
+out:
+ return ret;
+}
+
+/*
+ * Function that scans for the attached NAND device. This fills out all
+ * the uninitialized function pointers with the defaults. The flash ID is
+ * read and the mtd/chip structures are filled with the appropriate values.
+ */
+int msm_nand_scan(struct mtd_info *mtd)
+{
+ struct msm_nand_info *info = mtd->priv;
+ struct msm_nand_chip *chip = &info->nand_chip;
+ struct flash_identification *supported_flash = &info->flash_dev;
+ int flash_id = 0, err = 0;
+ uint32_t i, mtd_writesize;
+ uint8_t dev_found = 0, wide_bus;
+ uint32_t manid, devid, devcfg;
+ uint32_t bad_block_byte;
+ struct nand_flash_dev *flashdev = NULL;
+ struct nand_manufacturers *flashman = NULL;
+
+ /* Probe the Flash device for ONFI compliance */
+ if (!msm_nand_flash_onfi_probe(info)) {
+ dev_found = 1;
+ } else {
+ err = msm_nand_flash_read_id(info, 0, &flash_id);
+ if (err < 0) {
+ pr_err("Failed to read Flash ID\n");
+ err = -EINVAL;
+ goto out;
+ }
+ manid = flash_id & 0xFF;
+ devid = (flash_id >> 8) & 0xFF;
+ devcfg = (flash_id >> 24) & 0xFF;
+
+ for (i = 0; !flashman && nand_manuf_ids[i].id; ++i)
+ if (nand_manuf_ids[i].id == manid)
+ flashman = &nand_manuf_ids[i];
+ for (i = 0; !flashdev && nand_flash_ids[i].id; ++i)
+ if (nand_flash_ids[i].id == devid)
+ flashdev = &nand_flash_ids[i];
+ if (!flashdev || !flashman) {
+ pr_err("unknown nand flashid=%x manuf=%x devid=%x\n",
+ flash_id, manid, devid);
+ err = -ENOENT;
+ goto out;
+ }
+ dev_found = 1;
+ if (!flashdev->pagesize) {
+ supported_flash->widebus = devcfg & (1 << 6) ? 1 : 0;
+ supported_flash->pagesize = 1024 << (devcfg & 0x3);
+ supported_flash->blksize = (64 * 1024) <<
+ ((devcfg >> 4) & 0x3);
+ supported_flash->oobsize = (8 << ((devcfg >> 2) & 1)) *
+ (supported_flash->pagesize >> 9);
+ } else {
+ supported_flash->widebus = flashdev->options &
+ NAND_BUSWIDTH_16 ? 1 : 0;
+ supported_flash->pagesize = flashdev->pagesize;
+ supported_flash->blksize = flashdev->erasesize;
+ supported_flash->oobsize = flashdev->pagesize >> 5;
+ }
+ supported_flash->flash_id = flash_id;
+ supported_flash->density = flashdev->chipsize << 20;
+ }
+
+ if (dev_found) {
+ wide_bus = supported_flash->widebus;
+ mtd->size = supported_flash->density;
+ mtd->writesize = supported_flash->pagesize;
+ mtd->oobsize = supported_flash->oobsize;
+ mtd->erasesize = supported_flash->blksize;
+ mtd_writesize = mtd->writesize;
+
+ /* Check whether NAND device support 8bit ECC*/
+ if (supported_flash->ecc_correctability >= 8)
+ chip->bch_caps = MSM_NAND_CAP_8_BIT_BCH;
+ else
+ chip->bch_caps = MSM_NAND_CAP_4_BIT_BCH;
+
+ pr_info("NAND Id: 0x%x Buswidth: %dBits Density: %lld MByte\n",
+ supported_flash->flash_id, (wide_bus) ? 16 : 8,
+ (mtd->size >> 20));
+ pr_info("pagesize: %d Erasesize: %d oobsize: %d (in Bytes)\n",
+ mtd->writesize, mtd->erasesize, mtd->oobsize);
+ pr_info("BCH ECC: %d Bit\n",
+ (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH ? 8 : 4));
+ }
+
+ chip->cw_size = (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH) ? 532 : 528;
+ chip->cfg0 = (((mtd_writesize >> 9) - 1) << CW_PER_PAGE)
+ | (516 << UD_SIZE_BYTES)
+ | (0 << DISABLE_STATUS_AFTER_WRITE)
+ | (5 << NUM_ADDR_CYCLES);
+
+ bad_block_byte = (mtd_writesize - (chip->cw_size * (
+ (mtd_writesize >> 9) - 1)) + 1);
+ chip->cfg1 = (7 << NAND_RECOVERY_CYCLES)
+ | (0 << CS_ACTIVE_BSY)
+ | (bad_block_byte << BAD_BLOCK_BYTE_NUM)
+ | (0 << BAD_BLOCK_IN_SPARE_AREA)
+ | (2 << WR_RD_BSY_GAP)
+ | ((wide_bus ? 1 : 0) << WIDE_FLASH)
+ | (1 << ENABLE_BCH_ECC);
+
+ chip->cfg0_raw = (((mtd_writesize >> 9) - 1) << CW_PER_PAGE)
+ | (5 << NUM_ADDR_CYCLES)
+ | (0 << SPARE_SIZE_BYTES)
+ | (chip->cw_size << UD_SIZE_BYTES);
+
+ chip->cfg1_raw = (7 << NAND_RECOVERY_CYCLES)
+ | (0 << CS_ACTIVE_BSY)
+ | (17 << BAD_BLOCK_BYTE_NUM)
+ | (1 << BAD_BLOCK_IN_SPARE_AREA)
+ | (2 << WR_RD_BSY_GAP)
+ | ((wide_bus ? 1 : 0) << WIDE_FLASH)
+ | (1 << DEV0_CFG1_ECC_DISABLE);
+
+ chip->ecc_bch_cfg = (0 << ECC_CFG_ECC_DISABLE)
+ | (0 << ECC_SW_RESET)
+ | (516 << ECC_NUM_DATA_BYTES)
+ | (1 << ECC_FORCE_CLK_OPEN);
+
+ if (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH) {
+ chip->cfg0 |= (wide_bus ? 0 << SPARE_SIZE_BYTES :
+ 2 << SPARE_SIZE_BYTES);
+ chip->ecc_bch_cfg |= (1 << ECC_MODE)
+ | ((wide_bus) ? (14 << ECC_PARITY_SIZE_BYTES) :
+ (13 << ECC_PARITY_SIZE_BYTES));
+ } else {
+ chip->cfg0 |= (wide_bus ? 2 << SPARE_SIZE_BYTES :
+ 4 << SPARE_SIZE_BYTES);
+ chip->ecc_bch_cfg |= (0 << ECC_MODE)
+ | ((wide_bus) ? (8 << ECC_PARITY_SIZE_BYTES) :
+ (7 << ECC_PARITY_SIZE_BYTES));
+ }
+
+ /*
+ * For 4bit BCH ECC (default ECC), parity bytes = 7(x8) or 8(x16 I/O)
+ * For 8bit BCH ECC, parity bytes = 13 (x8) or 14 (x16 I/O).
+ */
+ chip->ecc_parity_bytes = (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH) ?
+ (wide_bus ? 14 : 13) : (wide_bus ? 8 : 7);
+ chip->ecc_buf_cfg = 0x203; /* No of bytes covered by ECC - 516 bytes */
+
+ pr_info("CFG0: 0x%08x, CFG1: 0x%08x\n"
+ " RAWCFG0: 0x%08x, RAWCFG1: 0x%08x\n"
+ " ECCBUFCFG: 0x%08x, ECCBCHCFG: 0x%08x\n"
+ " BAD BLOCK BYTE: 0x%08x\n", chip->cfg0, chip->cfg1,
+ chip->cfg0_raw, chip->cfg1_raw, chip->ecc_buf_cfg,
+ chip->ecc_bch_cfg, bad_block_byte);
+
+ if (mtd->oobsize == 64) {
+ mtd->oobavail = 16;
+ } else if ((mtd->oobsize == 128) || (mtd->oobsize == 224)) {
+ mtd->oobavail = 32;
+ } else {
+ pr_err("Unsupported NAND oobsize: 0x%x\n", mtd->oobsize);
+ err = -ENODEV;
+ goto out;
+ }
+
+ /* Fill in remaining MTD driver data */
+ mtd->type = MTD_NANDFLASH;
+ mtd->flags = MTD_CAP_NANDFLASH;
+ mtd->_erase = msm_nand_erase;
+ mtd->_block_isbad = msm_nand_block_isbad;
+ mtd->_block_markbad = msm_nand_block_markbad;
+ mtd->_read = msm_nand_read;
+ mtd->_write = msm_nand_write;
+ mtd->_read_oob = msm_nand_read_oob;
+ mtd->_write_oob = msm_nand_write_oob;
+ mtd->owner = THIS_MODULE;
+out:
+ return err;
+}
+
+#define BAM_APPS_PIPE_LOCK_GRP 0
+/*
+ * This function allocates, configures, connects an end point and
+ * also registers event notification for an end point. It also allocates
+ * DMA memory for descriptor FIFO of a pipe.
+ */
+static int msm_nand_init_endpoint(struct msm_nand_info *info,
+ struct msm_nand_sps_endpt *end_point,
+ uint32_t pipe_index)
+{
+ int rc = 0;
+ struct sps_pipe *pipe_handle;
+ struct sps_connect *sps_config = &end_point->config;
+ struct sps_register_event *sps_event = &end_point->event;
+
+ pipe_handle = sps_alloc_endpoint();
+ if (!pipe_handle) {
+ pr_err("sps_alloc_endpoint() failed\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = sps_get_config(pipe_handle, sps_config);
+ if (rc) {
+ pr_err("sps_get_config() failed %d\n", rc);
+ goto free_endpoint;
+ }
+
+ if (pipe_index == SPS_DATA_PROD_PIPE_INDEX) {
+ /* READ CASE: source - BAM; destination - system memory */
+ sps_config->source = info->sps.bam_handle;
+ sps_config->destination = SPS_DEV_HANDLE_MEM;
+ sps_config->mode = SPS_MODE_SRC;
+ sps_config->src_pipe_index = pipe_index;
+ } else if (pipe_index == SPS_DATA_CONS_PIPE_INDEX ||
+ pipe_index == SPS_CMD_CONS_PIPE_INDEX) {
+ /* WRITE CASE: source - system memory; destination - BAM */
+ sps_config->source = SPS_DEV_HANDLE_MEM;
+ sps_config->destination = info->sps.bam_handle;
+ sps_config->mode = SPS_MODE_DEST;
+ sps_config->dest_pipe_index = pipe_index;
+ }
+
+ sps_config->options = SPS_O_AUTO_ENABLE | SPS_O_DESC_DONE;
+ sps_config->lock_group = BAM_APPS_PIPE_LOCK_GRP;
+ /*
+ * Descriptor FIFO is a cyclic FIFO. If SPS_MAX_DESC_NUM descriptors
+ * are allowed to be submitted before we get any ack for any of them,
+ * the descriptor FIFO size should be: (SPS_MAX_DESC_NUM + 1) *
+ * sizeof(struct sps_iovec).
+ */
+ sps_config->desc.size = (SPS_MAX_DESC_NUM + 1) *
+ sizeof(struct sps_iovec);
+ sps_config->desc.base = dmam_alloc_coherent(info->nand_chip.dev,
+ sps_config->desc.size,
+ &sps_config->desc.phys_base,
+ GFP_KERNEL);
+ if (!sps_config->desc.base) {
+ pr_err("dmam_alloc_coherent() failed for size %x\n",
+ sps_config->desc.size);
+ rc = -ENOMEM;
+ goto free_endpoint;
+ }
+ memset(sps_config->desc.base, 0x00, sps_config->desc.size);
+
+ rc = sps_connect(pipe_handle, sps_config);
+ if (rc) {
+ pr_err("sps_connect() failed %d\n", rc);
+ goto free_endpoint;
+ }
+
+ init_completion(&end_point->completion);
+ sps_event->mode = SPS_TRIGGER_WAIT;
+ sps_event->options = SPS_O_DESC_DONE;
+ sps_event->xfer_done = &end_point->completion;
+ sps_event->user = (void *)info;
+
+ rc = sps_register_event(pipe_handle, sps_event);
+ if (rc) {
+ pr_err("sps_register_event() failed %d\n", rc);
+ goto sps_disconnect;
+ }
+ end_point->handle = pipe_handle;
+ pr_debug("pipe handle 0x%x for pipe %d\n", (uint32_t)pipe_handle,
+ pipe_index);
+ goto out;
+sps_disconnect:
+ sps_disconnect(pipe_handle);
+free_endpoint:
+ sps_free_endpoint(pipe_handle);
+out:
+ return rc;
+}
+
+/* This function disconnects and frees an end point */
+static void msm_nand_deinit_endpoint(struct msm_nand_info *info,
+ struct msm_nand_sps_endpt *end_point)
+{
+ sps_disconnect(end_point->handle);
+ sps_free_endpoint(end_point->handle);
+}
+
+/*
+ * This function registers BAM device and initializes its end points for
+ * the following pipes -
+ * system consumer pipe for data (pipe#0),
+ * system producer pipe for data (pipe#1),
+ * system consumer pipe for commands (pipe#2).
+ */
+static int msm_nand_bam_init(struct msm_nand_info *nand_info)
+{
+ struct sps_bam_props bam = {0};
+ int rc = 0;
+
+ bam.phys_addr = nand_info->bam_phys;
+ bam.virt_addr = nand_info->bam_base;
+ bam.irq = nand_info->bam_irq;
+ /*
+ * NAND device is accessible from both Apps and Modem processor and
+ * thus, NANDc and BAM are shared between both the processors. But BAM
+ * must be enabled and instantiated only once during boot up by
+ * Trustzone before Modem/Apps is brought out from reset.
+ *
+ * This is indicated to SPS driver on Apps by marking flag
+ * SPS_BAM_MGR_DEVICE_REMOTE. The following are the global
+ * initializations that will be done by Trustzone - Execution
+ * Environment, Pipes assignment to Apps/Modem, Pipe Super groups and
+ * Descriptor summing threshold.
+ *
+ * NANDc BAM device supports 2 execution environments - Modem and Apps
+ * and thus the flag SPS_BAM_MGR_MULTI_EE is set.
+ */
+ bam.manage = SPS_BAM_MGR_DEVICE_REMOTE | SPS_BAM_MGR_MULTI_EE;
+
+ rc = sps_register_bam_device(&bam, &nand_info->sps.bam_handle);
+ if (rc) {
+ pr_err("sps_register_bam_device() failed with %d\n", rc);
+ goto out;
+ }
+ pr_info("BAM device registered: bam_handle 0x%x\n",
+ nand_info->sps.bam_handle);
+
+ rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.data_prod,
+ SPS_DATA_PROD_PIPE_INDEX);
+ if (rc)
+ goto unregister_bam;
+
+ rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.data_cons,
+ SPS_DATA_CONS_PIPE_INDEX);
+ if (rc)
+ goto deinit_data_prod;
+
+ rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.cmd_pipe,
+ SPS_CMD_CONS_PIPE_INDEX);
+ if (rc)
+ goto deinit_data_cons;
+ goto out;
+deinit_data_cons:
+ msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_cons);
+deinit_data_prod:
+ msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_prod);
+unregister_bam:
+ sps_deregister_bam_device(nand_info->sps.bam_handle);
+out:
+ return rc;
+}
+
+/*
+ * This function de-registers BAM device, disconnects and frees its end points
+ * for all the pipes.
+ */
+static void msm_nand_bam_free(struct msm_nand_info *nand_info)
+{
+ msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_prod);
+ msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_cons);
+ msm_nand_deinit_endpoint(nand_info, &nand_info->sps.cmd_pipe);
+ sps_deregister_bam_device(nand_info->sps.bam_handle);
+}
+
+/* This function enables DMA support for the NANDc in BAM mode. */
+static int msm_nand_enable_dma(struct msm_nand_info *info)
+{
+ struct msm_nand_sps_cmd *sps_cmd;
+ struct msm_nand_chip *chip = &info->nand_chip;
+ int ret;
+
+ wait_event(chip->dma_wait_queue,
+ (sps_cmd = msm_nand_get_dma_buffer(chip, sizeof(*sps_cmd))));
+
+ msm_nand_prep_ce(sps_cmd, MSM_NAND_CTRL(info), WRITE,
+ (1 << BAM_MODE_EN), SPS_IOVEC_FLAG_INT);
+
+ ret = sps_transfer_one(info->sps.cmd_pipe.handle,
+ msm_virt_to_dma(chip, &sps_cmd->ce),
+ sizeof(struct sps_command_element), NULL,
+ sps_cmd->flags);
+ if (ret) {
+ pr_err("Failed to submit command: %d\n", ret);
+ goto out;
+ }
+ wait_for_completion_io(&info->sps.cmd_pipe.completion);
+out:
+ msm_nand_release_dma_buffer(chip, sps_cmd, sizeof(*sps_cmd));
+ return ret;
+
+}
+
+/*
+ * This function gets called when its device named msm-nand is added to
+ * device tree .dts file with all its resources such as physical addresses
+ * for NANDc and BAM, BAM IRQ.
+ *
+ * It also expects the NAND flash partition information to be passed in .dts
+ * file so that it can parse the partitions by calling MTD function
+ * mtd_device_parse_register().
+ *
+ */
+static int __devinit msm_nand_probe(struct platform_device *pdev)
+{
+ struct msm_nand_info *info;
+ struct resource *res;
+ int err, n_parts;
+ struct device_node *pnode;
+ struct mtd_part_parser_data parser_data;
+
+ if (!pdev->dev.of_node) {
+ pr_err("No valid device tree info for NANDc\n");
+ err = -ENODEV;
+ goto out;
+ }
+
+ /*
+ * The partition information can also be passed from kernel command
+ * line. Also, the MTD core layer supports adding the whole device as
+ * one MTD device when no partition information is available at all.
+ * Hence, do not bail out when partition information is not availabe
+ * in device tree.
+ */
+ pnode = of_find_node_by_path("/qcom,mtd-partitions");
+ if (!pnode)
+ pr_info("No partition info available in device tree\n");
+ info = devm_kzalloc(&pdev->dev, sizeof(struct msm_nand_info),
+ GFP_KERNEL);
+ if (!info) {
+ pr_err("Unable to allocate memory for msm_nand_info\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "nand_phys");
+ if (!res || !res->start) {
+ pr_err("NAND phys address range is not provided\n");
+ err = -ENODEV;
+ goto out;
+ }
+ info->nand_phys = res->start;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "bam_phys");
+ if (!res || !res->start) {
+ pr_err("BAM phys address range is not provided\n");
+ err = -ENODEV;
+ goto out;
+ }
+ info->bam_phys = res->start;
+ info->bam_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!info->bam_base) {
+ pr_err("BAM ioremap() failed for addr 0x%x size 0x%x\n",
+ res->start, resource_size(res));
+ err = -ENOMEM;
+ goto out;
+ }
+
+ info->bam_irq = platform_get_irq_byname(pdev, "bam_irq");
+ if (info->bam_irq < 0) {
+ pr_err("BAM IRQ is not provided\n");
+ err = -ENODEV;
+ goto out;
+ }
+
+ info->mtd.name = dev_name(&pdev->dev);
+ info->mtd.priv = info;
+ info->mtd.owner = THIS_MODULE;
+ info->nand_chip.dev = &pdev->dev;
+ init_waitqueue_head(&info->nand_chip.dma_wait_queue);
+ mutex_init(&info->bam_lock);
+
+ info->nand_chip.dma_virt_addr =
+ dmam_alloc_coherent(&pdev->dev, MSM_NAND_DMA_BUFFER_SIZE,
+ &info->nand_chip.dma_phys_addr, GFP_KERNEL);
+ if (!info->nand_chip.dma_virt_addr) {
+ pr_err("No memory for DMA buffer size %x\n",
+ MSM_NAND_DMA_BUFFER_SIZE);
+ err = -ENOMEM;
+ goto out;
+ }
+ err = msm_nand_bam_init(info);
+ if (err) {
+ pr_err("msm_nand_bam_init() failed %d\n", err);
+ goto out;
+ }
+ err = msm_nand_enable_dma(info);
+ if (err) {
+ pr_err("Failed to enable DMA in NANDc\n");
+ goto free_bam;
+ }
+ if (msm_nand_scan(&info->mtd)) {
+ pr_err("No nand device found\n");
+ err = -ENXIO;
+ goto free_bam;
+ }
+ parser_data.of_node = pnode;
+ n_parts = mtd_device_parse_register(&info->mtd, NULL, &parser_data,
+ NULL, 0);
+ if (n_parts < 0) {
+ pr_err("Unable to register MTD partitions %d\n", n_parts);
+ goto free_bam;
+ }
+ dev_set_drvdata(&pdev->dev, info);
+
+ pr_info("NANDc phys addr 0x%lx, BAM phys addr 0x%lx, BAM IRQ %d\n",
+ info->nand_phys, info->bam_phys, info->bam_irq);
+ pr_info("Allocated DMA buffer at virt_addr 0x%p, phys_addr 0x%x\n",
+ info->nand_chip.dma_virt_addr, info->nand_chip.dma_phys_addr);
+ pr_info("Found %d MTD partitions\n", n_parts);
+ goto out;
+free_bam:
+ msm_nand_bam_free(info);
+out:
+ return err;
+}
+
+/*
+ * Remove functionality that gets called when driver/device msm-nand
+ * is removed.
+ */
+static int __devexit msm_nand_remove(struct platform_device *pdev)
+{
+ struct msm_nand_info *info = dev_get_drvdata(&pdev->dev);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+ if (info) {
+ mtd_device_unregister(&info->mtd);
+ msm_nand_bam_free(info);
+ }
+ return 0;
+}
+
+#define DRIVER_NAME "msm_qpic_nand"
+static const struct of_device_id msm_nand_match_table[] = {
+ { .compatible = "qcom,msm-nand", },
+ {},
+};
+static struct platform_driver msm_nand_driver = {
+ .probe = msm_nand_probe,
+ .remove = __devexit_p(msm_nand_remove),
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = msm_nand_match_table,
+ },
+};
+
+module_platform_driver(msm_nand_driver);
+
+MODULE_ALIAS(DRIVER_NAME);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM QPIC NAND flash driver");
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 2b73d99..d26c845 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -87,6 +87,8 @@
static const char driver_name [] = "usbnet";
+static struct workqueue_struct *usbnet_wq;
+
/* use ethtool to change the level for any given device */
static int msg_level = -1;
module_param (msg_level, int, 0);
@@ -246,7 +248,7 @@
if (skb_defer_rx_timestamp(skb))
return;
- status = netif_rx (skb);
+ status = netif_rx_ni(skb);
if (status != NET_RX_SUCCESS)
netif_dbg(dev, rx_err, dev->net,
"netif_rx status %d\n", status);
@@ -316,7 +318,7 @@
spin_lock(&dev->done.lock);
__skb_queue_tail(&dev->done, skb);
if (dev->done.qlen == 1)
- tasklet_schedule(&dev->bh);
+ queue_work(usbnet_wq, &dev->bh_w);
spin_unlock_irqrestore(&dev->done.lock, flags);
return old_state;
}
@@ -390,7 +392,7 @@
default:
netif_dbg(dev, rx_err, dev->net,
"rx submit, %d\n", retval);
- tasklet_schedule (&dev->bh);
+ queue_work(usbnet_wq, &dev->bh_w);
break;
case 0:
usb_mark_last_busy(dev->udev);
@@ -583,7 +585,7 @@
num++;
}
- tasklet_schedule(&dev->bh);
+ queue_work(usbnet_wq, &dev->bh_w);
netif_dbg(dev, rx_status, dev->net,
"paused rx queue disabled, %d skbs requeued\n", num);
@@ -652,7 +654,7 @@
{
if (netif_running(dev->net)) {
(void) unlink_urbs (dev, &dev->rxq);
- tasklet_schedule(&dev->bh);
+ queue_work(usbnet_wq, &dev->bh_w);
}
}
EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
@@ -726,7 +728,7 @@
*/
dev->flags = 0;
del_timer_sync (&dev->delay);
- tasklet_kill (&dev->bh);
+ cancel_work_sync(&dev->bh_w);
if (info->manage_power)
info->manage_power(dev, 0);
else
@@ -799,7 +801,7 @@
"simple");
// delay posting reads until we're fully open
- tasklet_schedule (&dev->bh);
+ queue_work(usbnet_wq, &dev->bh_w);
if (info->manage_power) {
retval = info->manage_power(dev, 1);
if (retval < 0)
@@ -969,7 +971,7 @@
status);
} else {
clear_bit (EVENT_RX_HALT, &dev->flags);
- tasklet_schedule (&dev->bh);
+ queue_work(usbnet_wq, &dev->bh_w);
}
}
@@ -994,7 +996,7 @@
usb_autopm_put_interface(dev->intf);
fail_lowmem:
if (resched)
- tasklet_schedule (&dev->bh);
+ queue_work(usbnet_wq, &dev->bh_w);
}
}
@@ -1080,7 +1082,7 @@
struct usbnet *dev = netdev_priv(net);
unlink_urbs (dev, &dev->txq);
- tasklet_schedule (&dev->bh);
+ queue_work(usbnet_wq, &dev->bh_w);
// FIXME: device recovery -- reset?
}
@@ -1267,13 +1269,21 @@
"rxqlen %d --> %d\n",
temp, dev->rxq.qlen);
if (dev->rxq.qlen < qlen)
- tasklet_schedule (&dev->bh);
+ queue_work(usbnet_wq, &dev->bh_w);
}
if (dev->txq.qlen < TX_QLEN (dev))
netif_wake_queue (dev->net);
}
}
+static void usbnet_bh_w(struct work_struct *work)
+{
+ struct usbnet *dev =
+ container_of(work, struct usbnet, bh_w);
+ unsigned long param = (unsigned long)dev;
+
+ usbnet_bh(param);
+}
/*-------------------------------------------------------------------------
*
@@ -1392,8 +1402,7 @@
skb_queue_head_init (&dev->txq);
skb_queue_head_init (&dev->done);
skb_queue_head_init(&dev->rxq_pause);
- dev->bh.func = usbnet_bh;
- dev->bh.data = (unsigned long) dev;
+ INIT_WORK(&dev->bh_w, usbnet_bh_w);
INIT_WORK (&dev->kevent, kevent);
init_usb_anchor(&dev->deferred);
dev->delay.function = usbnet_bh;
@@ -1577,7 +1586,7 @@
if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
if (!(dev->txq.qlen >= TX_QLEN(dev)))
netif_tx_wake_all_queues(dev->net);
- tasklet_schedule (&dev->bh);
+ queue_work(usbnet_wq, &dev->bh_w);
}
}
return 0;
@@ -1594,12 +1603,20 @@
FIELD_SIZEOF(struct sk_buff, cb) < sizeof(struct skb_data));
random_ether_addr(node_id);
+
+ usbnet_wq = create_singlethread_workqueue("usbnet");
+ if (!usbnet_wq) {
+ pr_err("%s: Unable to create workqueue:usbnet\n", __func__);
+ return -ENOMEM;
+ }
+
return 0;
}
module_init(usbnet_init);
static void __exit usbnet_exit(void)
{
+ destroy_workqueue(usbnet_wq);
}
module_exit(usbnet_exit);
diff --git a/drivers/net/wireless/wcnss/wcnss_wlan.c b/drivers/net/wireless/wcnss/wcnss_wlan.c
index ad9dc7d..7695778 100644
--- a/drivers/net/wireless/wcnss/wcnss_wlan.c
+++ b/drivers/net/wireless/wcnss/wcnss_wlan.c
@@ -184,6 +184,12 @@
return 0;
}
+void wcnss_flush_delayed_boot_votes()
+{
+ flush_delayed_work_sync(&penv->wcnss_work);
+}
+EXPORT_SYMBOL(wcnss_flush_delayed_boot_votes);
+
static int __devexit
wcnss_wlan_ctrl_remove(struct platform_device *pdev)
{
diff --git a/drivers/platform/msm/qpnp-pwm.c b/drivers/platform/msm/qpnp-pwm.c
index c9cd0e0..708d658 100644
--- a/drivers/platform/msm/qpnp-pwm.c
+++ b/drivers/platform/msm/qpnp-pwm.c
@@ -440,7 +440,7 @@
unsigned int pwm_value, max_pwm_value;
struct qpnp_lpg_chip *chip = pwm->chip;
struct qpnp_lut_config *lut = &chip->lpg_config.lut_config;
- int i, pwm_size, rc;
+ int i, pwm_size, rc = 0;
int burst_size = SPMI_MAX_BUF_LEN;
int list_len = lut->list_size << 1;
int offset = lut->lo_index << 2;
diff --git a/drivers/power/smb349.c b/drivers/power/smb349.c
index ffc92d5..f9ca81c 100644
--- a/drivers/power/smb349.c
+++ b/drivers/power/smb349.c
@@ -617,6 +617,8 @@
the_smb349_chg = smb349_chg;
+ spin_lock_init(&smb349_chg->lock);
+
create_debugfs_entries(smb349_chg);
INIT_WORK(&smb349_chg->chg_work, chg_worker);
diff --git a/drivers/thermal/msm_thermal.c b/drivers/thermal/msm_thermal.c
index a8d3720..0575d80 100644
--- a/drivers/thermal/msm_thermal.c
+++ b/drivers/thermal/msm_thermal.c
@@ -28,6 +28,33 @@
static uint32_t limited_max_freq = MSM_CPUFREQ_NO_LIMIT;
static struct delayed_work check_temp_work;
+static int limit_idx;
+static int limit_idx_low;
+static int limit_idx_high;
+static struct cpufreq_frequency_table *table;
+
+static int msm_thermal_get_freq_table(void)
+{
+ int ret = 0;
+ int i = 0;
+
+ table = cpufreq_frequency_get_table(0);
+ if (table == NULL) {
+ pr_debug("%s: error reading cpufreq table\n", __func__);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ while (table[i].frequency != CPUFREQ_TABLE_END)
+ i++;
+
+ limit_idx_low = 0;
+ limit_idx_high = limit_idx = i - 1;
+ BUG_ON(limit_idx_high <= 0 || limit_idx_high <= limit_idx_low);
+fail:
+ return ret;
+}
+
static int update_cpu_max_freq(int cpu, uint32_t max_freq)
{
int ret = 0;
@@ -36,10 +63,6 @@
if (ret)
return ret;
- ret = cpufreq_update_policy(cpu);
- if (ret)
- return ret;
-
limited_max_freq = max_freq;
if (max_freq != MSM_CPUFREQ_NO_LIMIT)
pr_info("msm_thermal: Limiting cpu%d max frequency to %d\n",
@@ -47,11 +70,14 @@
else
pr_info("msm_thermal: Max frequency reset for cpu%d\n", cpu);
+ ret = cpufreq_update_policy(cpu);
+
return ret;
}
static void check_temp(struct work_struct *work)
{
+ static int limit_init;
struct tsens_device tsens_dev;
unsigned long temp = 0;
uint32_t max_freq = limited_max_freq;
@@ -66,12 +92,34 @@
goto reschedule;
}
- if (temp >= msm_thermal_info.limit_temp)
- max_freq = msm_thermal_info.limit_freq;
- else if (temp <
- msm_thermal_info.limit_temp - msm_thermal_info.temp_hysteresis)
- max_freq = MSM_CPUFREQ_NO_LIMIT;
+ if (!limit_init) {
+ ret = msm_thermal_get_freq_table();
+ if (ret)
+ goto reschedule;
+ else
+ limit_init = 1;
+ }
+ if (temp >= msm_thermal_info.limit_temp_degC) {
+ if (limit_idx == limit_idx_low)
+ goto reschedule;
+
+ limit_idx -= msm_thermal_info.freq_step;
+ if (limit_idx < limit_idx_low)
+ limit_idx = limit_idx_low;
+ max_freq = table[limit_idx].frequency;
+ } else if (temp < msm_thermal_info.limit_temp_degC -
+ msm_thermal_info.temp_hysteresis_degC) {
+ if (limit_idx == limit_idx_high)
+ goto reschedule;
+
+ limit_idx += msm_thermal_info.freq_step;
+ if (limit_idx >= limit_idx_high) {
+ limit_idx = limit_idx_high;
+ max_freq = MSM_CPUFREQ_NO_LIMIT;
+ } else
+ max_freq = table[limit_idx].frequency;
+ }
if (max_freq == limited_max_freq)
goto reschedule;
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index c483bb45..a5235ba 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -503,11 +503,10 @@
if (msm_port->uim) {
msm_write(port,
- UART_SIM_CFG_UIM_TX_MODE |
- UART_SIM_CFG_UIM_RX_MODE |
UART_SIM_CFG_STOP_BIT_LEN_N(1) |
UART_SIM_CFG_SIM_CLK_ON |
UART_SIM_CFG_SIM_CLK_STOP_HIGH |
+ UART_SIM_CFG_MASK_RX |
UART_SIM_CFG_SIM_SEL,
UART_SIM_CFG);
diff --git a/drivers/tty/serial/msm_serial.h b/drivers/tty/serial/msm_serial.h
index a769825..34228ec 100644
--- a/drivers/tty/serial/msm_serial.h
+++ b/drivers/tty/serial/msm_serial.h
@@ -108,6 +108,7 @@
#define UART_SIM_CFG_SIM_CLK_ON (1 << 7)
#define UART_SIM_CFG_SIM_CLK_TD8_SEL (1 << 6)
#define UART_SIM_CFG_SIM_CLK_STOP_HIGH (1 << 5)
+#define UART_SIM_CFG_MASK_RX (1 << 3)
#define UART_SIM_CFG_SIM_SEL (1 << 0)
#define UART_MISR_MODE 0x0040
diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c
index 9339800..5430e11 100644
--- a/drivers/usb/gadget/android.c
+++ b/drivers/usb/gadget/android.c
@@ -71,6 +71,9 @@
#include "u_ether.c"
#include "u_bam_data.c"
#include "f_mbim.c"
+#ifdef CONFIG_TARGET_CORE
+#include "f_tcm.c"
+#endif
MODULE_AUTHOR("Mike Lockwood");
MODULE_DESCRIPTION("Android Composite USB Driver");
@@ -1201,6 +1204,51 @@
.ctrlrequest = accessory_function_ctrlrequest,
};
+static int android_uasp_connect_cb(bool connect)
+{
+ /*
+ * TODO
+ * We may have to disable gadget till UASP configfs nodes
+ * are configured which includes mapping LUN with the
+ * backing file. It is a fundamental difference between
+ * f_mass_storage and f_tcp. That means UASP can not be
+ * in default composition.
+ *
+ * For now, assume that UASP configfs nodes are configured
+ * before enabling android gadget. Or cable should be
+ * reconnected after mapping the LUN.
+ *
+ * Also consider making UASP to respond to Host requests when
+ * Lun is not mapped.
+ */
+ pr_debug("UASP %s\n", connect ? "connect" : "disconnect");
+
+ return 0;
+}
+
+static int uasp_function_init(struct android_usb_function *f,
+ struct usb_composite_dev *cdev)
+{
+ return f_tcm_init(&android_uasp_connect_cb);
+}
+
+static void uasp_function_cleanup(struct android_usb_function *f)
+{
+ f_tcm_exit();
+}
+
+static int uasp_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ return tcm_bind_config(c);
+}
+
+static struct android_usb_function uasp_function = {
+ .name = "uasp",
+ .init = uasp_function_init,
+ .cleanup = uasp_function_cleanup,
+ .bind_config = uasp_function_bind_config,
+};
static struct android_usb_function *supported_functions[] = {
&mbim_function,
@@ -1218,6 +1266,7 @@
&rndis_function,
&mass_storage_function,
&accessory_function,
+ &uasp_function,
NULL
};
diff --git a/drivers/usb/gadget/f_diag.c b/drivers/usb/gadget/f_diag.c
index 72bff49..87597d5 100644
--- a/drivers/usb/gadget/f_diag.c
+++ b/drivers/usb/gadget/f_diag.c
@@ -20,7 +20,6 @@
#include <linux/platform_device.h>
#include <mach/usbdiag.h>
-#include <mach/rpc_hsusb.h>
#include <linux/usb/composite.h>
#include <linux/usb/gadget.h>
diff --git a/drivers/usb/gadget/f_mtp.c b/drivers/usb/gadget/f_mtp.c
index 0394b0b..96790c5 100644
--- a/drivers/usb/gadget/f_mtp.c
+++ b/drivers/usb/gadget/f_mtp.c
@@ -788,7 +788,8 @@
/* wait for our last read to complete */
ret = wait_event_interruptible(dev->read_wq,
dev->rx_done || dev->state != STATE_BUSY);
- if (dev->state == STATE_CANCELED) {
+ if (dev->state == STATE_CANCELED
+ || dev->state == STATE_OFFLINE) {
r = -ECANCELED;
if (!dev->rx_done)
usb_ep_dequeue(dev->ep_out, read_req);
diff --git a/drivers/usb/gadget/f_tcm.c b/drivers/usb/gadget/f_tcm.c
index d944745..8777504 100644
--- a/drivers/usb/gadget/f_tcm.c
+++ b/drivers/usb/gadget/f_tcm.c
@@ -255,7 +255,6 @@
{
struct f_uas *fu = cmd->fu;
struct se_cmd *se_cmd = &cmd->se_cmd;
- struct usb_gadget *gadget = fuas_to_gadget(fu);
int ret;
init_completion(&cmd->write_complete);
@@ -266,22 +265,6 @@
return -EINVAL;
}
- if (!gadget->sg_supported) {
- cmd->data_buf = kmalloc(se_cmd->data_length, GFP_KERNEL);
- if (!cmd->data_buf)
- return -ENOMEM;
-
- fu->bot_req_out->buf = cmd->data_buf;
- } else {
- fu->bot_req_out->buf = NULL;
- fu->bot_req_out->num_sgs = se_cmd->t_data_nents;
- fu->bot_req_out->sg = se_cmd->t_data_sg;
- }
-
- fu->bot_req_out->complete = usbg_data_write_cmpl;
- fu->bot_req_out->length = se_cmd->data_length;
- fu->bot_req_out->context = cmd;
-
ret = usbg_prepare_w_request(cmd, fu->bot_req_out);
if (ret)
goto cleanup;
diff --git a/drivers/usb/gadget/msm72k_udc.c b/drivers/usb/gadget/msm72k_udc.c
index 297c183..55fd59e 100644
--- a/drivers/usb/gadget/msm72k_udc.c
+++ b/drivers/usb/gadget/msm72k_udc.c
@@ -702,6 +702,14 @@
spin_lock_irqsave(&ui->lock, flags);
+ if (ept->num != 0 && ept->ep.desc == NULL) {
+ req->req.status = -EINVAL;
+ spin_unlock_irqrestore(&ui->lock, flags);
+ dev_err(&ui->pdev->dev,
+ "%s: called for disabled endpoint\n", __func__);
+ return -EINVAL;
+ }
+
if (req->busy) {
req->req.status = -EBUSY;
spin_unlock_irqrestore(&ui->lock, flags);
diff --git a/drivers/usb/gadget/u_bam.c b/drivers/usb/gadget/u_bam.c
index d379c66..1fade88 100644
--- a/drivers/usb/gadget/u_bam.c
+++ b/drivers/usb/gadget/u_bam.c
@@ -24,6 +24,7 @@
#include <linux/termios.h>
#include <mach/usb_gadget_xport.h>
+#include <linux/usb/msm_hsusb.h>
#include <mach/usb_bam.h>
#include "u_rmnet.h"
diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c
index 487bc59..da96e73 100644
--- a/drivers/usb/otg/msm_otg.c
+++ b/drivers/usb/otg/msm_otg.c
@@ -340,15 +340,28 @@
{
int ret;
- if (IS_ERR(motg->clk))
- return 0;
-
if (assert) {
- ret = clk_reset(motg->clk, CLK_RESET_ASSERT);
+ if (!IS_ERR(motg->clk)) {
+ ret = clk_reset(motg->clk, CLK_RESET_ASSERT);
+ } else {
+ /* Using asynchronous block reset to the hardware */
+ dev_dbg(motg->phy.dev, "block_reset ASSERT\n");
+ clk_disable_unprepare(motg->pclk);
+ clk_disable_unprepare(motg->core_clk);
+ ret = clk_reset(motg->core_clk, CLK_RESET_ASSERT);
+ }
if (ret)
dev_err(motg->phy.dev, "usb hs_clk assert failed\n");
} else {
- ret = clk_reset(motg->clk, CLK_RESET_DEASSERT);
+ if (!IS_ERR(motg->clk)) {
+ ret = clk_reset(motg->clk, CLK_RESET_DEASSERT);
+ } else {
+ dev_dbg(motg->phy.dev, "block_reset DEASSERT\n");
+ ret = clk_reset(motg->core_clk, CLK_RESET_DEASSERT);
+ ndelay(200);
+ clk_prepare_enable(motg->core_clk);
+ clk_prepare_enable(motg->pclk);
+ }
if (ret)
dev_err(motg->phy.dev, "usb hs_clk deassert failed\n");
}
diff --git a/drivers/video/msm/Kconfig b/drivers/video/msm/Kconfig
index 7e078ab..54d7090 100644
--- a/drivers/video/msm/Kconfig
+++ b/drivers/video/msm/Kconfig
@@ -405,6 +405,15 @@
select FB_MSM_MIPI_DSI_SIMULATOR
default n
+config FB_MSM_NO_MDP_PIPE_CTRL
+ depends on FB_MSM_OVERLAY
+ bool "Do not use mdp_pipe_ctrl"
+ ---help---
+ Saying 'Y' here obsoletes the mdp_pipe_ctrl function,
+ which was used to control mdp-related clocks. MDP4 vsync-driven
+ screen updates will use a different clock control mechanism if
+ this is selected.
+
config FB_MSM_OVERLAY0_WRITEBACK
depends on FB_MSM_OVERLAY
bool "MDP overlay0 write back mode enable"
diff --git a/drivers/video/msm/Makefile b/drivers/video/msm/Makefile
index a0f9e02..e49e2ba 100644
--- a/drivers/video/msm/Makefile
+++ b/drivers/video/msm/Makefile
@@ -14,7 +14,6 @@
ifeq ($(CONFIG_FB_MSM_MDP40),y)
obj-y += mdp4_util.o
-obj-y += mdp4_hsic.o
else
obj-y += mdp_hw_init.o
obj-y += mdp_ppp.o
diff --git a/drivers/video/msm/hdmi_msm.c b/drivers/video/msm/hdmi_msm.c
index 4a1427d..26e5687 100644
--- a/drivers/video/msm/hdmi_msm.c
+++ b/drivers/video/msm/hdmi_msm.c
@@ -828,16 +828,17 @@
DEV_INFO("HDMI HPD: QDSP OFF\n");
kobject_uevent_env(external_common_state->uevent_kobj,
KOBJ_CHANGE, envp);
- switch_set_state(&external_common_state->sdev, 0);
- DEV_INFO("Hdmi state switch to %d: %s\n",
- external_common_state->sdev.state, __func__);
if (hpd_state) {
/* Build EDID table */
hdmi_msm_read_edid();
#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT
hdmi_msm_state->reauth = FALSE ;
#endif
- DEV_INFO("HDMI HPD: sense CONNECTED: send ONLINE\n");
+ switch_set_state(&external_common_state->sdev, 1);
+ DEV_INFO("Hdmi state switched to %d: %s\n",
+ external_common_state->sdev.state, __func__);
+
+ DEV_INFO("HDMI HPD: CONNECTED: send ONLINE\n");
kobject_uevent(external_common_state->uevent_kobj,
KOBJ_ONLINE);
switch_set_state(&external_common_state->sdev, 1);
@@ -850,18 +851,15 @@
DEV_INFO("HDMI HPD: sense : send HDCP_PASS\n");
kobject_uevent_env(external_common_state->uevent_kobj,
KOBJ_CHANGE, envp);
- switch_set_state(&external_common_state->sdev, 1);
- DEV_INFO("Hdmi state switch to %d: %s\n",
- external_common_state->sdev.state, __func__);
#endif
} else {
- DEV_INFO("HDMI HPD: sense DISCONNECTED: send OFFLINE\n"
- );
+ switch_set_state(&external_common_state->sdev, 0);
+ DEV_INFO("Hdmi state switched to %d: %s\n",
+ external_common_state->sdev.state, __func__);
+
+ DEV_INFO("HDMI HPD: DISCONNECTED: send OFFLINE\n");
kobject_uevent(external_common_state->uevent_kobj,
KOBJ_OFFLINE);
- switch_set_state(&external_common_state->sdev, 0);
- DEV_INFO("Hdmi state switch to %d: %s\n",
- external_common_state->sdev.state, __func__);
}
}
@@ -1087,14 +1085,16 @@
DEV_INFO("HDCP: AUTH_FAIL_INT received, LINK0_STATUS=0x%08x\n",
HDMI_INP_ND(0x011C));
if (hdmi_msm_state->full_auth_done) {
+ switch_set_state(&external_common_state->sdev, 0);
+ DEV_INFO("Hdmi state switched to %d: %s\n",
+ external_common_state->sdev.state, __func__);
+
envp[0] = "HDCP_STATE=FAIL";
envp[1] = NULL;
DEV_INFO("HDMI HPD:QDSP OFF\n");
kobject_uevent_env(external_common_state->uevent_kobj,
KOBJ_CHANGE, envp);
- switch_set_state(&external_common_state->sdev, 0);
- DEV_INFO("Hdmi state switch to %d: %s\n",
- external_common_state->sdev.state, __func__);
+
mutex_lock(&hdcp_auth_state_mutex);
hdmi_msm_state->full_auth_done = FALSE;
mutex_unlock(&hdcp_auth_state_mutex);
@@ -2964,9 +2964,7 @@
char *envp[2];
if (!hdmi_msm_has_hdcp()) {
- switch_set_state(&external_common_state->sdev, 1);
- DEV_INFO("Hdmi state switch to %d: %s\n",
- external_common_state->sdev.state, __func__);
+ DEV_INFO("%s: HDCP NOT ENABLED\n", __func__);
return;
}
@@ -3039,8 +3037,9 @@
kobject_uevent_env(external_common_state->uevent_kobj,
KOBJ_CHANGE, envp);
}
+
switch_set_state(&external_common_state->sdev, 1);
- DEV_INFO("Hdmi state switch to %d: %s\n",
+ DEV_INFO("Hdmi state switched to %d: %s\n",
external_common_state->sdev.state, __func__);
return;
@@ -3062,7 +3061,7 @@
&hdmi_msm_state->hdcp_reauth_work);
}
switch_set_state(&external_common_state->sdev, 0);
- DEV_INFO("Hdmi state switch to %d: %s\n",
+ DEV_INFO("Hdmi state switched to %d: %s\n",
external_common_state->sdev.state, __func__);
}
#endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT */
@@ -4381,7 +4380,7 @@
hdmi_msm_state->hpd_cable_chg_detected = FALSE;
/* QDSP OFF preceding the HPD event notification */
switch_set_state(&external_common_state->sdev, 0);
- DEV_INFO("Hdmi state switch to %d: %s\n",
+ DEV_INFO("Hdmi state switched to %d: %s\n",
external_common_state->sdev.state, __func__);
if (on) {
hdmi_msm_read_edid();
@@ -4389,7 +4388,7 @@
hdmi_msm_state->reauth = FALSE ;
/* Build EDID table */
hdmi_msm_turn_on();
- DEV_INFO("HDMI HPD: sense CONNECTED: send ONLINE\n");
+ DEV_INFO("HDMI HPD: CONNECTED: send ONLINE\n");
kobject_uevent(external_common_state->uevent_kobj,
KOBJ_ONLINE);
hdmi_msm_hdcp_enable();
@@ -4402,16 +4401,15 @@
kobject_uevent_env(external_common_state->uevent_kobj,
KOBJ_CHANGE, envp);
switch_set_state(&external_common_state->sdev, 1);
- DEV_INFO("Hdmi state switch to %d: %s\n",
+ DEV_INFO("Hdmi state switched to %d: %s\n",
external_common_state->sdev.state, __func__);
}
} else {
- DEV_INFO("HDMI HPD: sense DISCONNECTED: send OFFLINE\n"
- );
+ DEV_INFO("HDMI HPD: DISCONNECTED: send OFFLINE\n");
kobject_uevent(external_common_state->uevent_kobj,
KOBJ_OFFLINE);
switch_set_state(&external_common_state->sdev, 0);
- DEV_INFO("Hdmi state switch to %d: %s\n",
+ DEV_INFO("Hdmi state switched to %d: %s\n",
external_common_state->sdev.state, __func__);
}
}
diff --git a/drivers/video/msm/mdp.c b/drivers/video/msm/mdp.c
index c9be60a..bfaed8d 100644
--- a/drivers/video/msm/mdp.c
+++ b/drivers/video/msm/mdp.c
@@ -1450,7 +1450,6 @@
#endif
}
-static int mdp_clk_rate;
static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
static int pdev_list_cnt;
@@ -1458,6 +1457,68 @@
{
mdp_pipe_ctrl(MDP_MASTER_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
}
+
+static int mdp_clk_rate;
+
+#ifdef CONFIG_FB_MSM_NO_MDP_PIPE_CTRL
+
+static void mdp_clk_off(void)
+{
+ mb();
+ vsync_clk_disable();
+
+ if (mdp_clk != NULL)
+ clk_disable_unprepare(mdp_clk);
+
+ if (mdp_pclk != NULL)
+ clk_disable_unprepare(mdp_pclk);
+
+ if (mdp_lut_clk != NULL)
+ clk_disable_unprepare(mdp_lut_clk);
+}
+
+
+static void mdp_clk_on(void)
+{
+ if (mdp_clk != NULL)
+ clk_prepare_enable(mdp_clk);
+
+ if (mdp_pclk != NULL)
+ clk_prepare_enable(mdp_pclk);
+
+ if (mdp_lut_clk != NULL)
+ clk_prepare_enable(mdp_lut_clk);
+
+ vsync_clk_enable();
+}
+
+void mdp_clk_ctrl(int on)
+{
+ static int mdp_clk_cnt;
+
+ mutex_lock(&mdp_suspend_mutex);
+ if (on) {
+ if (mdp_clk_cnt == 0)
+ mdp_clk_on();
+ mdp_clk_cnt++;
+ } else {
+ if (mdp_clk_cnt) {
+ mdp_clk_cnt--;
+ if (mdp_clk_cnt == 0)
+ mdp_clk_off();
+ }
+ }
+ mutex_unlock(&mdp_suspend_mutex);
+}
+
+
+
+void mdp_pipe_ctrl(MDP_BLOCK_TYPE block, MDP_BLOCK_POWER_STATE state,
+ boolean isr)
+{
+ /* do nothing */
+}
+#else
void mdp_pipe_ctrl(MDP_BLOCK_TYPE block, MDP_BLOCK_POWER_STATE state,
boolean isr)
{
@@ -1620,6 +1681,12 @@
}
}
+void mdp_clk_ctrl(int on)
+{
+ /* do nothing */
+}
+#endif
+
void mdp_histogram_handle_isr(struct mdp_hist_mgmt *mgmt)
{
uint32 isr, mask;
@@ -1933,6 +2000,15 @@
mdp_histogram_ctrl_all(FALSE);
+ if (mfd->panel.type == MIPI_CMD_PANEL)
+ mdp4_dsi_cmd_off(pdev);
+ else if (mfd->panel.type == MIPI_VIDEO_PANEL)
+ mdp4_dsi_video_off(pdev);
+ else if (mfd->panel.type == HDMI_PANEL ||
+ mfd->panel.type == LCDC_PANEL ||
+ mfd->panel.type == LVDS_PANEL)
+ mdp4_lcdc_off(pdev);
+
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
ret = panel_next_off(pdev);
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
@@ -1945,18 +2021,25 @@
static int mdp_on(struct platform_device *pdev)
{
int ret = 0;
-
#ifdef CONFIG_FB_MSM_MDP40
struct msm_fb_data_type *mfd;
mfd = platform_get_drvdata(pdev);
-
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
- if (is_mdp4_hw_reset()) {
+ mdp_clk_ctrl(1);
+ mdp4_hw_init();
+ outpdw(MDP_BASE + 0x0038, mdp4_display_intf);
+ if (mfd->panel.type == MIPI_CMD_PANEL) {
mdp_vsync_cfg_regs(mfd, FALSE);
- mdp4_hw_init();
- outpdw(MDP_BASE + 0x0038, mdp4_display_intf);
- }
+ mdp4_dsi_cmd_on(pdev);
+ } else if (mfd->panel.type == MIPI_VIDEO_PANEL)
+ mdp4_dsi_video_on(pdev);
+ else if (mfd->panel.type == HDMI_PANEL ||
+ mfd->panel.type == LCDC_PANEL ||
+ mfd->panel.type == LVDS_PANEL)
+ mdp4_lcdc_on(pdev);
+
+ mdp_clk_ctrl(0);
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
#endif
@@ -1964,13 +2047,6 @@
ret = panel_next_on(pdev);
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
-#ifdef CONFIG_FB_MSM_MDP40
- if (mfd->panel.type == MIPI_CMD_PANEL)
- mdp4_dsi_cmd_overlay_restore();
- else if (mfd->panel.type == MDDI_PANEL)
- mdp4_mddi_overlay_restore();
-#endif
-
mdp_histogram_ctrl_all(TRUE);
return ret;
@@ -2236,6 +2312,8 @@
if (rc)
return rc;
+ mdp_clk_ctrl(1);
+
mdp_hw_version();
/* initializing mdp hw */
@@ -2249,6 +2327,8 @@
#ifdef CONFIG_FB_MSM_OVERLAY
mdp_hw_cursor_init();
#endif
+ mdp_clk_ctrl(0);
+
mdp_resource_initialized = 1;
return 0;
}
@@ -2323,6 +2403,8 @@
pdata->off = mdp_off;
pdata->next = pdev;
+ mdp_clk_ctrl(1);
+
mdp_prim_panel_type = mfd->panel.type;
switch (mfd->panel.type) {
case EXT_MDDI_PANEL:
@@ -2400,8 +2482,7 @@
#ifndef CONFIG_FB_MSM_MDP303
mipi = &mfd->panel_info.mipi;
configure_mdp_core_clk_table((mipi->dsi_pclk_rate) * 23 / 20);
- pdata->on = mdp4_dsi_video_on;
- pdata->off = mdp4_dsi_video_off;
+ mdp4_dsi_vsync_init(0);
mfd->hw_refresh = TRUE;
mfd->dma_fnc = mdp4_dsi_video_overlay;
mfd->lut_update = mdp_lut_update_lcdc;
@@ -2444,6 +2525,7 @@
mfd->dma_fnc = mdp4_dsi_cmd_overlay;
mipi = &mfd->panel_info.mipi;
configure_mdp_core_clk_table((mipi->dsi_pclk_rate) * 3 / 2);
+ mdp4_dsi_rdptr_init(0);
if (mfd->panel_info.pdest == DISPLAY_1) {
if_no = PRIMARY_INTF_SEL;
mfd->dma = &dma2_data;
@@ -2464,6 +2546,7 @@
spin_unlock_irqrestore(&mdp_spin_lock, flag);
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
#else
+
mfd->dma_fnc = mdp_dma2_update;
mfd->do_histogram = mdp_do_histogram;
mfd->start_histogram = mdp_histogram_start;
@@ -2484,6 +2567,7 @@
#ifdef CONFIG_FB_MSM_DTV
case DTV_PANEL:
+ mdp4_dtv_vsync_init(0);
pdata->on = mdp4_dtv_on;
pdata->off = mdp4_dtv_off;
mfd->hw_refresh = TRUE;
@@ -2499,8 +2583,10 @@
case HDMI_PANEL:
case LCDC_PANEL:
case LVDS_PANEL:
+#ifdef CONFIG_FB_MSM_MDP303
pdata->on = mdp_lcdc_on;
pdata->off = mdp_lcdc_off;
+#endif
mfd->hw_refresh = TRUE;
#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_MDP40)
mfd->cursor_update = mdp_hw_cursor_sync_update;
@@ -2520,6 +2606,7 @@
#endif
#ifdef CONFIG_FB_MSM_MDP40
+ mdp4_lcdc_vsync_init(0);
configure_mdp_core_clk_table((mfd->panel_info.clk_rate)
* 23 / 20);
if (mfd->panel.type == HDMI_PANEL) {
@@ -2580,6 +2667,7 @@
default:
printk(KERN_ERR "mdp_probe: unknown device type!\n");
rc = -ENODEV;
+ mdp_clk_ctrl(0);
goto mdp_probe_err;
}
#ifdef CONFIG_FB_MSM_MDP40
@@ -2588,6 +2676,8 @@
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
#endif
+ mdp_clk_ctrl(0);
+
#ifdef CONFIG_MSM_BUS_SCALING
if (!mdp_bus_scale_handle && mdp_pdata &&
mdp_pdata->mdp_bus_scale_table) {
diff --git a/drivers/video/msm/mdp.h b/drivers/video/msm/mdp.h
index 184c5ce..e91209b 100644
--- a/drivers/video/msm/mdp.h
+++ b/drivers/video/msm/mdp.h
@@ -277,6 +277,10 @@
#ifdef CONFIG_FB_MSM_MDP40
#define MDP_OVERLAY0_TERM 0x20
#define MDP_OVERLAY1_TERM 0x40
+#define MDP_DMAP_TERM MDP_DMA2_TERM /* dmap == dma2 */
+#define MDP_PRIM_VSYNC_TERM 0x100
+#define MDP_EXTER_VSYNC_TERM 0x200
+#define MDP_PRIM_RDPTR_TERM 0x400
#endif
#define MDP_OVERLAY2_TERM 0x80
#define MDP_HISTOGRAM_TERM_DMA_P 0x100
@@ -704,6 +708,7 @@
void mdp_hw_init(void);
int mdp_ppp_pipe_wait(void);
void mdp_pipe_kickoff(uint32 term, struct msm_fb_data_type *mfd);
+void mdp_clk_ctrl(int on);
void mdp_pipe_ctrl(MDP_BLOCK_TYPE block, MDP_BLOCK_POWER_STATE state,
boolean isr);
void mdp_set_dma_pan_info(struct fb_info *info, struct mdp_dirty_region *dirty,
@@ -751,6 +756,30 @@
int mdp_dsi_video_off(struct platform_device *pdev);
void mdp_dsi_video_update(struct msm_fb_data_type *mfd);
void mdp3_dsi_cmd_dma_busy_wait(struct msm_fb_data_type *mfd);
+static inline int mdp4_dsi_cmd_off(struct platform_device *pdev)
+{
+ return 0;
+}
+static inline int mdp4_dsi_video_off(struct platform_device *pdev)
+{
+ return 0;
+}
+static inline int mdp4_lcdc_off(struct platform_device *pdev)
+{
+ return 0;
+}
+static inline int mdp4_dsi_cmd_on(struct platform_device *pdev)
+{
+ return 0;
+}
+static inline int mdp4_dsi_video_on(struct platform_device *pdev)
+{
+ return 0;
+}
+static inline int mdp4_lcdc_on(struct platform_device *pdev)
+{
+ return 0;
+}
#endif
void set_cont_splashScreen_status(int);
@@ -785,6 +814,8 @@
#endif
#ifdef MDP_HW_VSYNC
+void vsync_clk_enable(void);
+void vsync_clk_disable(void);
void mdp_hw_vsync_clk_enable(struct msm_fb_data_type *mfd);
void mdp_hw_vsync_clk_disable(struct msm_fb_data_type *mfd);
void mdp_vsync_clk_disable(void);
diff --git a/drivers/video/msm/mdp4.h b/drivers/video/msm/mdp4.h
index c9bdf27..72e7c8f 100644
--- a/drivers/video/msm/mdp4.h
+++ b/drivers/video/msm/mdp4.h
@@ -34,6 +34,7 @@
#define MDP4_VIDEO_BASE 0x20000
#define MDP4_VIDEO_OFF 0x10000
+#define MDP4_VIDEO_CSC_OFF 0x4000
#define MDP4_RGB_BASE 0x40000
#define MDP4_RGB_OFF 0x10000
@@ -120,7 +121,7 @@
#define INTR_PRIMARY_INTF_UDERRUN BIT(8)
#define INTR_EXTERNAL_VSYNC BIT(9)
#define INTR_EXTERNAL_INTF_UDERRUN BIT(10)
-#define INTR_PRIMARY_READ_PTR BIT(11)
+#define INTR_PRIMARY_RDPTR BIT(11) /* read pointer */
#define INTR_DMA_P_HISTOGRAM BIT(17)
#define INTR_DMA_S_HISTOGRAM BIT(26)
#define INTR_OVERLAY2_DONE BIT(30)
@@ -219,6 +220,7 @@
#define MDP4_OP_FLIP_UD BIT(14)
#define MDP4_OP_FLIP_LR BIT(13)
#define MDP4_OP_CSC_EN BIT(11)
+#define MDP4_OP_DST_DATA_YCBCR BIT(10)
#define MDP4_OP_SRC_DATA_YCBCR BIT(9)
#define MDP4_OP_SCALEY_FIR (0 << 4)
#define MDP4_OP_SCALEY_MN_PHASE (1 << 4)
@@ -258,6 +260,14 @@
u8 mark_unmap;
};
+#define IOMMU_FREE_LIST_MAX 32
+
+struct iommu_free_list {
+ int total;
+ int fndx;
+ struct ion_handle *ihdl[IOMMU_FREE_LIST_MAX];
+};
+
struct blend_cfg {
u32 op;
u32 bg_alpha;
@@ -337,20 +347,26 @@
uint32 element2; /* 0 = C0, 1 = C1, 2 = C2, 3 = C3 */
uint32 element1; /* 0 = C0, 1 = C1, 2 = C2, 3 = C3 */
uint32 element0; /* 0 = C0, 1 = C1, 2 = C2, 3 = C3 */
- struct completion comp;
ulong ov_blt_addr; /* blt mode addr */
ulong dma_blt_addr; /* blt mode addr */
ulong blt_base;
ulong blt_offset;
uint32 blt_cnt;
+ uint32 blt_changed;
uint32 ov_cnt;
uint32 dmap_cnt;
uint32 dmae_cnt;
uint32 blt_end;
+ uint32 blt_ov_koff;
+ uint32 blt_ov_done;
+ uint32 blt_dmap_koff;
+ uint32 blt_dmap_done;
uint32 luma_align_size;
- struct mdp4_hsic_regs hsic_regs;
- struct completion dmas_comp;
+ struct mdp_overlay_pp_params pp_cfg;
struct mdp_overlay req_data;
+ struct completion comp;
+ struct completion dmas_comp;
+ struct mdp4_iommu_pipe_info iommu;
};
struct mdp4_statistic {
@@ -366,7 +382,7 @@
ulong intr_vsync_e; /* external interface */
ulong intr_underrun_e; /* external interface */
ulong intr_histogram;
- ulong intr_rd_ptr;
+ ulong intr_rdptr;
ulong dsi_mdp_start;
ulong dsi_clk_on;
ulong dsi_clk_off;
@@ -387,7 +403,13 @@
ulong overlay_set[MDP4_MIXER_MAX];
ulong overlay_unset[MDP4_MIXER_MAX];
ulong overlay_play[MDP4_MIXER_MAX];
+ ulong overlay_commit[MDP4_MIXER_MAX];
ulong pipe[OVERLAY_PIPE_MAX];
+ ulong wait4vsync0;
+ ulong wait4vsync1;
+ ulong iommu_map;
+ ulong iommu_unmap;
+ ulong iommu_drop;
ulong dsi_clkoff;
ulong err_mixer;
ulong err_zorder;
@@ -399,6 +421,12 @@
ulong err_underflow;
};
+struct vsync_update {
+ int update_cnt; /* pipes to be updated */
+ struct completion vsync_comp;
+ struct mdp4_overlay_pipe plist[OVERLAY_PIPE_MAX];
+};
+
struct mdp4_overlay_pipe *mdp4_overlay_ndx2pipe(int ndx);
void mdp4_sw_reset(unsigned long bits);
void mdp4_display_intf_sel(int output, unsigned long intf);
@@ -434,8 +462,10 @@
uint32 mdp4_overlay_format(struct mdp4_overlay_pipe *pipe);
uint32 mdp4_overlay_unpack_pattern(struct mdp4_overlay_pipe *pipe);
uint32 mdp4_overlay_op_mode(struct mdp4_overlay_pipe *pipe);
-void mdp4_lcdc_base_swap(struct mdp4_overlay_pipe *pipe);
+void mdp4_lcdc_base_swap(int cndx, struct mdp4_overlay_pipe *pipe);
void mdp4_lcdc_overlay(struct msm_fb_data_type *mfd);
+
+
#ifdef CONFIG_FB_MSM_DTV
void mdp4_overlay_dtv_start(void);
void mdp4_overlay_dtv_ov_done_push(struct msm_fb_data_type *mfd,
@@ -446,9 +476,10 @@
struct mdp4_overlay_pipe *pipe);
int mdp4_overlay_dtv_unset(struct msm_fb_data_type *mfd,
struct mdp4_overlay_pipe *pipe);
-void mdp4_dma_e_done_dtv(void);
-void mdp4_overlay_dtv_wait4vsync(void);
-void mdp4_dtv_base_swap(struct mdp4_overlay_pipe *pipe);
+void mdp4_dmae_done_dtv(void);
+void mdp4_dtv_wait4vsync(int cndx, long long *vtime);
+void mdp4_dtv_vsync_ctrl(int cndx, int enable);
+void mdp4_dtv_base_swap(int cndx, struct mdp4_overlay_pipe *pipe);
#else
static inline void mdp4_overlay_dtv_start(void)
{
@@ -475,11 +506,15 @@
return 0;
}
-static inline void mdp4_dma_e_done_dtv(void)
+static inline void mdp4_dmae_done_dtv(void)
{
/* empty */
}
-static inline void mdp4_overlay_dtv_wait4vsync(void)
+static inline void mdp4_dtv_wait4vsync(int cndx, long long *vtime)
+{
+ /* empty */
+}
+static inline void mdp4_dtv_vsync_ctrl(int cndx, long long *vtime)
{
/* empty */
}
@@ -495,19 +530,10 @@
{
/* empty */
}
-#endif
+#endif /* CONFIG_FB_MSM_DTV */
void mdp4_dtv_set_black_screen(void);
-static inline int mdp4_overlay_borderfill_supported(void)
-{
- unsigned int mdp_hw_version;
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
- mdp_hw_version = inpdw(MDP_BASE + 0x0); /* MDP_HW_VERSION */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
- return (mdp_hw_version >= 0x0402030b);
-}
-
int mdp4_overlay_dtv_set(struct msm_fb_data_type *mfd,
struct mdp4_overlay_pipe *pipe);
int mdp4_overlay_dtv_unset(struct msm_fb_data_type *mfd,
@@ -520,11 +546,20 @@
int mdp4_atv_off(struct platform_device *pdev);
void mdp4_dsi_video_fxn_register(cmd_fxn_t fxn);
void mdp4_dsi_video_overlay(struct msm_fb_data_type *mfd);
-int mdp4_dsi_video_on(struct platform_device *pdev);
-int mdp4_dsi_video_off(struct platform_device *pdev);
-void mdp4_overlay0_done_dsi_video(struct mdp_dma_data *dma);
-void mdp4_overlay0_done_dsi_cmd(struct mdp_dma_data *dma);
+void mdp4_lcdc_vsync_ctrl(int cndx, int enable);
+void mdp4_overlay0_done_dsi_video(int cndx);
+void mdp4_overlay0_done_dsi_cmd(int cndx);
+void mdp4_primary_rdptr(void);
void mdp4_dsi_cmd_overlay(struct msm_fb_data_type *mfd);
+int mdp4_overlay_commit(struct fb_info *info, int mixer);
+int mdp4_dsi_video_pipe_commit(void);
+int mdp4_dsi_cmd_pipe_commit(void);
+int mdp4_lcdc_pipe_commit(void);
+int mdp4_dtv_pipe_commit(void);
+void mdp4_dsi_rdptr_init(int cndx);
+void mdp4_dsi_vsync_init(int cndx);
+void mdp4_lcdc_vsync_init(int cndx);
+void mdp4_dtv_vsync_init(int cndx);
void mdp4_overlay_dsi_state_set(int state);
int mdp4_overlay_dsi_state_get(void);
void mdp4_overlay_rgb_setup(struct mdp4_overlay_pipe *pipe);
@@ -541,12 +576,20 @@
int mdp4_overlay_format2pipe(struct mdp4_overlay_pipe *pipe);
int mdp4_overlay_get(struct fb_info *info, struct mdp_overlay *req);
int mdp4_overlay_set(struct fb_info *info, struct mdp_overlay *req);
+int mdp4_overlay_wait4vsync(struct fb_info *info, long long *vtime);
+int mdp4_overlay_vsync_ctrl(struct fb_info *info, int enable);
int mdp4_overlay_unset(struct fb_info *info, int ndx);
int mdp4_overlay_unset_mixer(int mixer);
int mdp4_overlay_play_wait(struct fb_info *info,
struct msmfb_overlay_data *req);
int mdp4_overlay_play(struct fb_info *info, struct msmfb_overlay_data *req);
struct mdp4_overlay_pipe *mdp4_overlay_pipe_alloc(int ptype, int mixer);
+void mdp4_overlay_dma_commit(int mixer);
+void mdp4_overlay_vsync_commit(struct mdp4_overlay_pipe *pipe);
+void mdp4_mixer_stage_commit(int mixer);
+void mdp4_dsi_cmd_do_update(int cndx, struct mdp4_overlay_pipe *pipe);
+void mdp4_lcdc_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe);
+void mdp4_dtv_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe);
void mdp4_overlay_pipe_free(struct mdp4_overlay_pipe *pipe);
void mdp4_overlay_dmap_cfg(struct msm_fb_data_type *mfd, int lcdc);
void mdp4_overlay_dmap_xy(struct mdp4_overlay_pipe *pipe);
@@ -554,21 +597,20 @@
void mdp4_overlay_dmae_xy(struct mdp4_overlay_pipe *pipe);
int mdp4_overlay_pipe_staged(int mixer);
void mdp4_lcdc_primary_vsyn(void);
-void mdp4_overlay0_done_lcdc(struct mdp_dma_data *dma);
+void mdp4_overlay0_done_lcdc(int cndx);
void mdp4_overlay0_done_mddi(struct mdp_dma_data *dma);
-void mdp4_dma_s_done_mddi(void);
void mdp4_dma_p_done_mddi(struct mdp_dma_data *dma);
-void mdp4_dma_p_done_dsi(struct mdp_dma_data *dma);
-void mdp4_dma_p_done_dsi_video(struct mdp_dma_data *dma);
-void mdp4_dma_p_done_lcdc(void);
+void mdp4_dmap_done_dsi_cmd(int cndx);
+void mdp4_dmap_done_dsi_video(int cndx);
+void mdp4_dmap_done_lcdc(int cndx);
void mdp4_overlay1_done_dtv(void);
void mdp4_overlay1_done_atv(void);
void mdp4_primary_vsync_lcdc(void);
void mdp4_external_vsync_dtv(void);
-void mdp4_overlay_lcdc_wait4vsync(struct msm_fb_data_type *mfd);
-void mdp4_overlay_lcdc_start(void);
+void mdp4_lcdc_wait4vsync(int cndx, long long *vtime);
void mdp4_overlay_lcdc_vsync_push(struct msm_fb_data_type *mfd,
struct mdp4_overlay_pipe *pipe);
+void mdp4_overlay_dtv_set_perf(struct msm_fb_data_type *mfd);
void mdp4_update_perf_level(u32 perf_level);
void mdp4_set_perf_level(void);
void mdp4_mddi_overlay_dmas_restore(void);
@@ -577,6 +619,11 @@
void mdp4_mddi_dma_busy_wait(struct msm_fb_data_type *mfd);
void mdp4_mddi_overlay_restore(void);
#else
+static inline void mdp4_mddi_kickoff_video(struct msm_fb_data_type *mfd,
+ struct mdp4_overlay_pipe *pipe)
+{
+ /* empty */
+}
static inline void mdp4_mddi_dma_busy_wait(struct msm_fb_data_type *mfd)
{
/* empty */
@@ -636,7 +683,7 @@
struct msmfb_overlay_blt *req);
int mdp4_dsi_video_overlay_blt_offset(struct msm_fb_data_type *mfd,
struct msmfb_overlay_blt *req);
-void mdp4_dsi_video_base_swap(struct mdp4_overlay_pipe *pipe);
+void mdp4_dsi_video_base_swap(int cndx, struct mdp4_overlay_pipe *pipe);
#ifdef CONFIG_FB_MSM_MDP40
static inline void mdp3_dsi_cmd_dma_busy_wait(struct msm_fb_data_type *mfd)
@@ -644,7 +691,7 @@
/* empty */
}
#endif
-#else
+#else /* CONFIG_FB_MSM_MIPI_DSI */
int mdp4_mddi_overlay_blt_offset(struct msm_fb_data_type *mfd,
struct msmfb_overlay_blt *req);
void mdp4_mddi_overlay_blt(struct msm_fb_data_type *mfd,
@@ -684,11 +731,12 @@
{
return -ENODEV;
}
-static inline void mdp4_dsi_video_base_swap(struct mdp4_overlay_pipe *pipe)
+static inline void mdp4_dsi_video_base_swap(int cndx,
+ struct mdp4_overlay_pipe *pipe)
{
/* empty */
}
-#endif
+#endif /* CONFIG_FB_MSM_MIPI_DSI */
void mdp4_lcdc_overlay_blt(struct msm_fb_data_type *mfd,
struct msmfb_overlay_blt *req);
@@ -712,51 +760,87 @@
void mdp4_dsi_cmd_dma_busy_check(void);
+
+
#ifdef CONFIG_FB_MSM_MIPI_DSI
-void mdp4_dsi_cmd_dma_busy_wait(struct msm_fb_data_type *mfd);
-void mdp4_dsi_blt_dmap_busy_wait(struct msm_fb_data_type *mfd);
-void mdp4_overlay_dsi_video_start(void);
-void mdp4_overlay_dsi_video_vsync_push(struct msm_fb_data_type *mfd,
- struct mdp4_overlay_pipe *pipe);
-void mdp4_dsi_cmd_overlay_restore(void);
void mdp_dsi_cmd_overlay_suspend(struct msm_fb_data_type *mfd);
+int mdp4_dsi_cmd_on(struct platform_device *pdev);
+int mdp4_dsi_cmd_off(struct platform_device *pdev);
+int mdp4_dsi_video_off(struct platform_device *pdev);
+int mdp4_dsi_video_on(struct platform_device *pdev);
+void mdp4_primary_vsync_dsi_video(void);
+void mdp4_dsi_cmd_base_swap(int cndx, struct mdp4_overlay_pipe *pipe);
+void mdp4_dsi_cmd_wait4vsync(int cndx, long long *vtime);
+void mdp4_dsi_video_wait4vsync(int cndx, long long *vtime);
+void mdp4_dsi_cmd_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe);
+void mdp4_dsi_video_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe);
+void mdp4_dsi_cmd_vsync_ctrl(int cndx, int enable);
+void mdp4_dsi_video_vsync_ctrl(int cndx, int enable);
#ifdef CONFIG_FB_MSM_MDP303
static inline void mdp4_dsi_cmd_del_timer(void)
{
/* empty */
}
-#else
+#else /* CONFIG_FB_MSM_MIPI_DSI */
void mdp4_dsi_cmd_del_timer(void);
#endif
-#else
-static inline void mdp4_dsi_cmd_dma_busy_wait(struct msm_fb_data_type *mfd)
+#else /* CONFIG_FB_MSM_MIPI_DSI */
+
+static inline int mdp4_dsi_cmd_on(struct platform_device *pdev)
{
- /* empty */
+ return 0;
}
-static inline void mdp4_dsi_blt_dmap_busy_wait(struct msm_fb_data_type *mfd)
+static inline int mdp4_dsi_cmd_off(struct platform_device *pdev)
{
- /* empty */
+ return 0;
}
+static inline int mdp4_dsi_video_on(struct platform_device *pdev)
+{
+ return 0;
+}
+static inline int mdp4_dsi_video_off(struct platform_device *pdev)
+{
+ return 0;
+}
+static inline void mdp4_primary_vsync_dsi_video(void)
+{
+}
+static inline void mdp4_dsi_cmd_base_swap(int cndx,
+ struct mdp4_overlay_pipe *pipe)
+{
+}
+static inline void mdp4_dsi_cmd_wait4vsync(int cndx, long long *vtime)
+{
+}
+static inline void mdp4_dsi_video_wait4vsync(int cndx, long long *vtime)
+{
+}
+static inline void mdp4_dsi_cmd_pipe_queue(int cndx,
+ struct mdp4_overlay_pipe *pipe)
+{
+}
+static inline void mdp4_dsi_video_pipe_queue(int cndx,
+ struct mdp4_overlay_pipe *pipe)
+{
+}
+static inline void mdp4_dsi_cmd_vsync_ctrl(int cndx, int enable)
+{
+}
+static inline void mdp4_dsi_video_vsync_ctrl(int cndx, int enable)
+{
+}
+
static inline void mdp4_overlay_dsi_video_start(void)
{
/* empty */
}
-static inline void mdp4_overlay_dsi_video_vsync_push(
- struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe)
-{
- /* empty */
-}
-static inline void mdp4_dsi_cmd_overlay_restore(void)
-{
- /* empty */
-}
#ifdef CONFIG_FB_MSM_MDP40
static inline void mdp_dsi_cmd_overlay_suspend(struct msm_fb_data_type *mfd)
{
/* empty */
}
#endif
-#endif /* MIPI_DSI */
+#endif /* CONFIG_FB_MSM_MIPI_DSI */
void mdp4_dsi_cmd_kickoff_ui(struct msm_fb_data_type *mfd,
struct mdp4_overlay_pipe *pipe);
@@ -764,8 +848,6 @@
struct mdp4_overlay_pipe *pipe);
void mdp4_dsi_cmd_overlay_kickoff(struct msm_fb_data_type *mfd,
struct mdp4_overlay_pipe *pipe);
-void mdp4_dsi_cmd_base_swap(struct mdp4_overlay_pipe *pipe);
-
void mdp4_overlay_panel_3d(int mixer_num, uint32 panel_3d);
int mdp4_overlay_3d_sbys(struct fb_info *info, struct msmfb_overlay_3d *req);
void mdp4_dsi_cmd_3d_sbys(struct msm_fb_data_type *mfd,
@@ -773,6 +855,9 @@
void mdp4_dsi_video_3d_sbys(struct msm_fb_data_type *mfd,
struct msmfb_overlay_3d *r3d);
+void mdp4_backlight_init(int cndx);
+void mdp4_backlight_put_level(int cndx, int level);
+
int mdp4_mixer_info(int mixer_num, struct mdp_mixer_info *info);
void mdp_dmap_vsync_set(int enable);
@@ -782,11 +867,18 @@
int mdp4_mddi_overlay_cursor(struct fb_info *info, struct fb_cursor *cursor);
int mdp_ppp_blit(struct fb_info *info, struct mdp_blit_req *req);
void mdp4_overlay_resource_release(void);
-void mdp4_overlay_dsi_video_wait4vsync(struct msm_fb_data_type *mfd);
-void mdp4_primary_vsync_dsi_video(void);
uint32_t mdp4_ss_table_value(int8_t param, int8_t index);
void mdp4_overlay_borderfill_stage_down(struct mdp4_overlay_pipe *pipe);
+#ifdef CONFIG_FB_MSM_MDP303
+static inline int mdp4_overlay_borderfill_supported(void)
+{
+ return 0;
+}
+#else
+int mdp4_overlay_borderfill_supported(void);
+#endif
+
int mdp4_overlay_writeback_on(struct platform_device *pdev);
int mdp4_overlay_writeback_off(struct platform_device *pdev);
void mdp4_writeback_overlay(struct msm_fb_data_type *mfd);
@@ -808,7 +900,6 @@
uint32_t mdp_block2base(uint32_t block);
int mdp_hist_lut_config(struct mdp_hist_lut_data *data);
-void mdp4_hsic_set(struct mdp4_overlay_pipe *pipe, struct dpp_ctrl *ctrl);
void mdp4_hsic_update(struct mdp4_overlay_pipe *pipe);
int mdp4_csc_config(struct mdp_csc_cfg_data *config);
void mdp4_csc_write(struct mdp_csc_cfg *data, uint32_t base);
@@ -816,11 +907,16 @@
int mdp4_pcc_cfg(struct mdp_pcc_cfg_data *cfg_ptr);
int mdp4_argc_cfg(struct mdp_pgc_lut_data *pgc_ptr);
int mdp4_qseed_cfg(struct mdp_qseed_cfg_data *cfg);
+int mdp4_qseed_access_cfg(struct mdp_qseed_cfg *cfg, uint32_t base);
u32 mdp4_allocate_writeback_buf(struct msm_fb_data_type *mfd, u32 mix_num);
void mdp4_init_writeback_buf(struct msm_fb_data_type *mfd, u32 mix_num);
void mdp4_free_writeback_buf(struct msm_fb_data_type *mfd, u32 mix_num);
int mdp4_igc_lut_config(struct mdp_igc_lut_data *cfg);
+void mdp4_overlay_iommu_pipe_free(int ndx, int all);
+void mdp4_overlay_iommu_free_list(int mixer, struct ion_handle *ihdl);
+void mdp4_overlay_iommu_unmap_freelist(int mixer);
+void mdp4_overlay_iommu_vsync_cnt(void);
void mdp4_iommu_unmap(struct mdp4_overlay_pipe *pipe);
void mdp4_iommu_attach(void);
int mdp4_v4l2_overlay_set(struct fb_info *info, struct mdp_overlay *req,
diff --git a/drivers/video/msm/mdp4_hsic.c b/drivers/video/msm/mdp4_hsic.c
deleted file mode 100644
index 5735f45..0000000
--- a/drivers/video/msm/mdp4_hsic.c
+++ /dev/null
@@ -1,534 +0,0 @@
-/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/msm_mdp.h>
-#include "mdp.h"
-#include "mdp4.h"
-
-/* Definitions */
-#define MDP4_CSC_MV_OFF 0x4400
-#define MDP4_CSC_PRE_BV_OFF 0x4500
-#define MDP4_CSC_POST_BV_OFF 0x4580
-#define MDP4_CSC_PRE_LV_OFF 0x4600
-#define MDP4_CSC_POST_LV_OFF 0x4680
-#define MDP_VG1_BASE (MDP_BASE + MDP4_VIDEO_BASE)
-
-#define MDP_VG1_CSC_MVn(n) (MDP_VG1_BASE + MDP4_CSC_MV_OFF + 4 * (n))
-#define MDP_VG1_CSC_PRE_LVn(n) (MDP_VG1_BASE + MDP4_CSC_PRE_LV_OFF + 4 * (n))
-#define MDP_VG1_CSC_POST_LVn(n) (MDP_VG1_BASE + MDP4_CSC_POST_LV_OFF + 4 * (n))
-#define MDP_VG1_CSC_PRE_BVn(n) (MDP_VG1_BASE + MDP4_CSC_PRE_BV_OFF + 4 * (n))
-#define MDP_VG1_CSC_POST_BVn(n) (MDP_VG1_BASE + MDP4_CSC_POST_BV_OFF + 4 * (n))
-
-#define Q16 (16)
-#define Q16_ONE (1 << Q16)
-
-#define Q16_VALUE(x) ((int32_t)((uint32_t)x << Q16))
-#define Q16_PERCENT_VALUE(x, n) ((int32_t)( \
- div_s64(((int64_t)x * (int64_t)Q16_ONE), n)))
-
-#define Q16_WHOLE(x) ((int32_t)(x >> 16))
-#define Q16_FRAC(x) ((int32_t)(x & 0xFFFF))
-#define Q16_S1Q16_MUL(x, y) (((x >> 1) * (y >> 1)) >> 14)
-
-#define Q16_MUL(x, y) ((int32_t)((((int64_t)x) * ((int64_t)y)) >> Q16))
-#define Q16_NEGATE(x) (0 - (x))
-
-/*
- * HSIC Control min/max values
- * These settings are based on the maximum/minimum allowed modifications to
- * HSIC controls for layer and display color. Allowing too much variation in
- * the CSC block will result in color clipping resulting in unwanted color
- * shifts.
- */
-#define TRIG_MAX Q16_VALUE(128)
-#define CON_SAT_MAX Q16_VALUE(128)
-#define INTENSITY_MAX (Q16_VALUE(2047) >> 12)
-
-#define HUE_MAX Q16_VALUE(100)
-#define HUE_MIN Q16_VALUE(-100)
-#define HUE_DEF Q16_VALUE(0)
-
-#define SAT_MAX Q16_VALUE(100)
-#define SAT_MIN Q16_VALUE(-100)
-#define SAT_DEF CON_SAT_MAX
-
-#define CON_MAX Q16_VALUE(100)
-#define CON_MIN Q16_VALUE(-100)
-#define CON_DEF CON_SAT_MAX
-
-#define INTEN_MAX Q16_VALUE(100)
-#define INTEN_MIN Q16_VALUE(-100)
-#define INTEN_DEF Q16_VALUE(0)
-
-enum {
- DIRTY,
- GENERATED,
- CLEAN
-};
-
-/* local vars*/
-static int32_t csc_matrix_tab[3][3] = {
- {0x00012a00, 0x00000000, 0x00019880},
- {0x00012a00, 0xffff9b80, 0xffff3000},
- {0x00012a00, 0x00020480, 0x00000000}
-};
-
-static int32_t csc_yuv2rgb_conv_tab[3][3] = {
- {0x00010000, 0x00000000, 0x000123cb},
- {0x00010000, 0xffff9af9, 0xffff6b5e},
- {0x00010000, 0x00020838, 0x00000000}
-};
-
-static int32_t csc_rgb2yuv_conv_tab[3][3] = {
- {0x00004c8b, 0x00009645, 0x00001d2f},
- {0xffffda56, 0xffffb60e, 0x00006f9d},
- {0x00009d70, 0xffff7c2a, 0xffffe666}
-};
-
-static uint32_t csc_pre_bv_tab[3] = {0xfffff800, 0xffffc000, 0xffffc000};
-static uint32_t csc_post_bv_tab[3] = {0x00000000, 0x00000000, 0x00000000};
-
-static uint32_t csc_pre_lv_tab[6] = {0x00000000, 0x00007f80, 0x00000000,
- 0x00007f80, 0x00000000, 0x00007f80};
-static uint32_t csc_post_lv_tab[6] = {0x00000000, 0x00007f80, 0x00000000,
- 0x00007f80, 0x00000000, 0x00007f80};
-
-/* Lookup table for Sin/Cos lookup - Q16*/
-static const int32_t trig_lut[65] = {
- 0x00000000, /* sin((2*M_PI/256) * 0x00);*/
- 0x00000648, /* sin((2*M_PI/256) * 0x01);*/
- 0x00000C90, /* sin((2*M_PI/256) * 0x02);*/
- 0x000012D5,
- 0x00001918,
- 0x00001F56,
- 0x00002590,
- 0x00002BC4,
- 0x000031F1,
- 0x00003817,
- 0x00003E34,
- 0x00004447,
- 0x00004A50,
- 0x0000504D,
- 0x0000563E,
- 0x00005C22,
- 0x000061F8,
- 0x000067BE,
- 0x00006D74,
- 0x0000731A,
- 0x000078AD,
- 0x00007E2F,
- 0x0000839C,
- 0x000088F6,
- 0x00008E3A,
- 0x00009368,
- 0x00009880,
- 0x00009D80,
- 0x0000A268,
- 0x0000A736,
- 0x0000ABEB,
- 0x0000B086,
- 0x0000B505,
- 0x0000B968,
- 0x0000BDAF,
- 0x0000C1D8,
- 0x0000C5E4,
- 0x0000C9D1,
- 0x0000CD9F,
- 0x0000D14D,
- 0x0000D4DB,
- 0x0000D848,
- 0x0000DB94,
- 0x0000DEBE,
- 0x0000E1C6,
- 0x0000E4AA,
- 0x0000E768,
- 0x0000EA0A,
- 0x0000EC83,
- 0x0000EED9,
- 0x0000F109,
- 0x0000F314,
- 0x0000F4FA,
- 0x0000F6BA,
- 0x0000F854,
- 0x0000F9C8,
- 0x0000FB15,
- 0x0000FC3B,
- 0x0000FD3B,
- 0x0000FE13,
- 0x0000FEC4,
- 0x0000FF4E,
- 0x0000FFB1,
- 0x0000FFEC,
- 0x00010000, /* sin((2*M_PI/256) * 0x40);*/
-};
-
-void trig_values_q16(int32_t deg, int32_t *cos, int32_t *sin)
-{
- int32_t angle;
- int32_t quad, anglei, anglef;
- int32_t v0 = 0, v1 = 0;
- int32_t t1, t2;
-
- /*
- * Scale the angle so that 256 is one complete revolution and mask it
- * to this domain
- * NOTE: 0xB60B == 256/360
- */
- angle = Q16_MUL(deg, 0xB60B) & 0x00FFFFFF;
-
- /* Obtain a quadrant number, integer, and fractional part */
- quad = angle >> 22;
- anglei = (angle >> 16) & 0x3F;
- anglef = angle & 0xFFFF;
-
- /*
- * Using the integer part, obtain the lookup table entry and its
- * complement. Using the quadrant, swap and negate these as
- * necessary.
- * (The values and all derivatives of sine and cosine functions
- * can be derived from these values)
- */
- switch (quad) {
- case 0x0:
- v0 += trig_lut[anglei];
- v1 += trig_lut[0x40-anglei];
- break;
-
- case 0x1:
- v0 += trig_lut[0x40-anglei];
- v1 -= trig_lut[anglei];
- break;
-
- case 0x2:
- v0 -= trig_lut[anglei];
- v1 -= trig_lut[0x40-anglei];
- break;
-
- case 0x3:
- v0 -= trig_lut[0x40-anglei];
- v1 += trig_lut[anglei];
- break;
- }
-
- /*
- * Multiply the fractional part by 2*PI/256 to move it from lookup
- * table units to radians, giving us the coefficient for first
- * derivatives.
- */
- t1 = Q16_S1Q16_MUL(anglef, 0x0648);
-
- /*
- * Square this and divide by 2 to get the coefficient for second
- * derivatives
- */
- t2 = Q16_S1Q16_MUL(t1, t1) >> 1;
-
- *sin = v0 + Q16_S1Q16_MUL(v1, t1) - Q16_S1Q16_MUL(v0, t2);
-
- *cos = v1 - Q16_S1Q16_MUL(v0, t1) - Q16_S1Q16_MUL(v1, t2);
-}
-
-/* Convert input Q16 value to s4.9 */
-int16_t convert_q16_s49(int32_t q16Value)
-{ /* Top half is the whole number, Bottom half is fractional portion*/
- int16_t whole = Q16_WHOLE(q16Value);
- int32_t fraction = Q16_FRAC(q16Value);
-
- /* Clamp whole to 3 bits */
- if (whole > 7)
- whole = 7;
- else if (whole < -7)
- whole = -7;
-
- /* Reduce fraction to 9 bits. */
- fraction = (fraction<<9)>>Q16;
-
- return (int16_t) ((int16_t)whole<<9) | ((int16_t)fraction);
-}
-
-/* Convert input Q16 value to uint16 */
-int16_t convert_q16_int16(int32_t val)
-{
- int32_t rounded;
-
- if (val >= 0) {
- /* Add 0.5 */
- rounded = val + (Q16_ONE>>1);
- } else {
- /* Subtract 0.5 */
- rounded = val - (Q16_ONE>>1);
- }
-
- /* Truncate rounded value */
- return (int16_t)(rounded>>Q16);
-}
-
-/*
- * norm_q16
- * Return a Q16 value represeting a normalized value
- *
- * value -100% 0% +100%
- * |-----------------|----------------|
- * ^ ^ ^
- * q16MinValue q16DefaultValue q16MaxValue
- *
- */
-int32_t norm_q16(int32_t value, int32_t min, int32_t default_val, int32_t max,
- int32_t range)
-{
- int32_t diff, perc, mul, result;
-
- if (0 == value) {
- result = default_val;
- } else if (value > 0) {
- /* value is between 0% and +100% represent 1.0 -> QRange Max */
- diff = range;
- perc = Q16_PERCENT_VALUE(value, max);
- mul = Q16_MUL(perc, diff);
- result = default_val + mul;
- } else {
- /* if (value <= 0) */
- diff = -range;
- perc = Q16_PERCENT_VALUE(-value, -min);
- mul = Q16_MUL(perc, diff);
- result = default_val + mul;
- }
- return result;
-}
-
-void matrix_mul_3x3(int32_t dest[][3], int32_t a[][3], int32_t b[][3])
-{
- int32_t i, j, k;
- int32_t tmp[3][3];
-
- for (i = 0; i < 3; i++) {
- for (j = 0; j < 3; j++) {
- tmp[i][j] = 0;
- for (k = 0; k < 3; k++)
- tmp[i][j] += Q16_MUL(a[i][k], b[k][j]);
- }
- }
-
- /* in case dest = a or b*/
- for (i = 0; i < 3; i++) {
- for (j = 0; j < 3; j++)
- dest[i][j] = tmp[i][j];
- }
-}
-
-#define CONVERT(x) (x)/*convert_q16_s49((x))*/
-void pr_params(struct mdp4_hsic_regs *regs)
-{
- int i;
- if (regs) {
- for (i = 0; i < NUM_HSIC_PARAM; i++) {
- pr_info("\t: hsic->params[%d] = 0x%08x [raw = 0x%08x]\n",
- i, CONVERT(regs->params[i]), regs->params[i]);
- }
- }
-}
-
-void pr_3x3_matrix(int32_t in[][3])
-{
- pr_info("\t[0x%08x\t0x%08x\t0x%08x]\n", CONVERT(in[0][0]),
- CONVERT(in[0][1]), CONVERT(in[0][2]));
- pr_info("\t[0x%08x\t0x%08x\t0x%08x]\n", CONVERT(in[1][0]),
- CONVERT(in[1][1]), CONVERT(in[1][2]));
- pr_info("\t[0x%08x\t0x%08x\t0x%08x]\n", CONVERT(in[2][0]),
- CONVERT(in[2][1]), CONVERT(in[2][2]));
-}
-
-void _hsic_get(struct mdp4_hsic_regs *regs, int32_t type, int8_t *val)
-{
- if (type < 0 || type >= NUM_HSIC_PARAM)
- BUG_ON(-EINVAL);
- *val = regs->params[type];
- pr_info("%s: getting params[%d] = %d\n", __func__, type, *val);
-}
-
-void _hsic_set(struct mdp4_hsic_regs *regs, int32_t type, int8_t val)
-{
- if (type < 0 || type >= NUM_HSIC_PARAM)
- BUG_ON(-EINVAL);
-
- if (regs->params[type] != Q16_VALUE(val)) {
- regs->params[type] = Q16_VALUE(val);
- regs->dirty = DIRTY;
- }
-}
-
-void _hsic_generate_csc_matrix(struct mdp4_overlay_pipe *pipe)
-{
- int i, j;
- int32_t sin, cos;
-
- int32_t hue_matrix[3][3];
- int32_t con_sat_matrix[3][3];
- struct mdp4_hsic_regs *regs = &(pipe->hsic_regs);
-
- memset(con_sat_matrix, 0x0, sizeof(con_sat_matrix));
- memset(hue_matrix, 0x0, sizeof(hue_matrix));
-
- /*
- * HSIC control require matrix multiplication of these two tables
- * [T 0 0][1 0 0] T = Contrast C=Cos(Hue)
- * [0 S 0][0 C -N] S = Saturation N=Sin(Hue)
- * [0 0 S][0 N C]
- */
-
- con_sat_matrix[0][0] = norm_q16(regs->params[HSIC_CON], CON_MIN,
- CON_DEF, CON_MAX, CON_SAT_MAX);
- con_sat_matrix[1][1] = norm_q16(regs->params[HSIC_SAT], SAT_MIN,
- SAT_DEF, SAT_MAX, CON_SAT_MAX);
- con_sat_matrix[2][2] = con_sat_matrix[1][1];
-
- hue_matrix[0][0] = TRIG_MAX;
-
- trig_values_q16(norm_q16(regs->params[HSIC_HUE], HUE_MIN, HUE_DEF,
- HUE_MAX, TRIG_MAX), &cos, &sin);
-
- cos = Q16_MUL(cos, TRIG_MAX);
- sin = Q16_MUL(sin, TRIG_MAX);
-
- hue_matrix[1][1] = cos;
- hue_matrix[2][2] = cos;
- hue_matrix[2][1] = sin;
- hue_matrix[1][2] = Q16_NEGATE(sin);
-
- /* Generate YUV CSC matrix */
- matrix_mul_3x3(regs->conv_matrix, con_sat_matrix, hue_matrix);
-
- if (!(pipe->op_mode & MDP4_OP_SRC_DATA_YCBCR)) {
- /* Convert input RGB to YUV then apply CSC matrix */
- pr_info("Pipe %d, has RGB input\n", pipe->pipe_num);
- matrix_mul_3x3(regs->conv_matrix, regs->conv_matrix,
- csc_rgb2yuv_conv_tab);
- }
-
- /* Normalize the matrix */
- for (i = 0; i < 3; i++) {
- for (j = 0; j < 3; j++)
- regs->conv_matrix[i][j] = (regs->conv_matrix[i][j]>>14);
- }
-
- /* Multiply above result by current csc table */
- matrix_mul_3x3(regs->conv_matrix, regs->conv_matrix, csc_matrix_tab);
-
- if (!(pipe->op_mode & MDP4_OP_SRC_DATA_YCBCR)) {
- /*HACK:only "works"for src side*/
- /* Convert back to RGB */
- pr_info("Pipe %d, has RGB output\n", pipe->pipe_num);
- matrix_mul_3x3(regs->conv_matrix, csc_yuv2rgb_conv_tab,
- regs->conv_matrix);
- }
-
- /* Update clamps pre and post. */
- /* TODO: different tables for different color formats? */
- for (i = 0; i < 6; i++) {
- regs->pre_limit[i] = csc_pre_lv_tab[i];
- regs->post_limit[i] = csc_post_lv_tab[i];
- }
-
- /* update bias values, pre and post */
- for (i = 0; i < 3; i++) {
- regs->pre_bias[i] = csc_pre_bv_tab[i];
- regs->post_bias[i] = csc_post_bv_tab[i] +
- norm_q16(regs->params[HSIC_INT],
- INTEN_MIN, INTEN_DEF, INTEN_MAX, INTENSITY_MAX);
- }
-
- regs->dirty = GENERATED;
-}
-
-void _hsic_update_mdp(struct mdp4_overlay_pipe *pipe)
-{
- struct mdp4_hsic_regs *regs = &(pipe->hsic_regs);
- int i, j, k;
-
- uint32_t *csc_mv;
- uint32_t *pre_lv;
- uint32_t *post_lv;
- uint32_t *pre_bv;
- uint32_t *post_bv;
-
- switch (pipe->pipe_num) {
- case OVERLAY_PIPE_VG2:
- csc_mv = (uint32_t *) (MDP_VG1_CSC_MVn(0) +
- MDP4_VIDEO_OFF);
- pre_lv = (uint32_t *) (MDP_VG1_CSC_PRE_LVn(0) +
- MDP4_VIDEO_OFF);
- post_lv = (uint32_t *) (MDP_VG1_CSC_POST_LVn(0) +
- MDP4_VIDEO_OFF);
- pre_bv = (uint32_t *) (MDP_VG1_CSC_PRE_BVn(0) +
- MDP4_VIDEO_OFF);
- post_bv = (uint32_t *) (MDP_VG1_CSC_POST_BVn(0) +
- MDP4_VIDEO_OFF);
- break;
- case OVERLAY_PIPE_VG1:
- default:
- csc_mv = (uint32_t *) MDP_VG1_CSC_MVn(0);
- pre_lv = (uint32_t *) MDP_VG1_CSC_PRE_LVn(0);
- post_lv = (uint32_t *) MDP_VG1_CSC_POST_LVn(0);
- pre_bv = (uint32_t *) MDP_VG1_CSC_PRE_BVn(0);
- post_bv = (uint32_t *) MDP_VG1_CSC_POST_BVn(0);
- break;
- }
-
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-
- for (i = 0; i < 3; i++) {
- for (j = 0; j < 3; j++) {
- k = (3*i) + j;
- MDP_OUTP(csc_mv + k, convert_q16_s49(
- regs->conv_matrix[i][j]));
- }
- }
-
- for (i = 0; i < 6; i++) {
- MDP_OUTP(pre_lv + i, convert_q16_s49(regs->pre_limit[i]));
- MDP_OUTP(post_lv + i, convert_q16_s49(regs->post_limit[i]));
- }
-
- for (i = 0; i < 3; i++) {
- MDP_OUTP(pre_bv + i, convert_q16_s49(regs->pre_bias[i]));
- MDP_OUTP(post_bv + i, convert_q16_s49(regs->post_bias[i]));
- }
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
-
- regs->dirty = CLEAN;
-}
-
-void mdp4_hsic_get(struct mdp4_overlay_pipe *pipe, struct dpp_ctrl *ctrl)
-{
- int i;
- for (i = 0; i < NUM_HSIC_PARAM; i++)
- _hsic_get(&(pipe->hsic_regs), i, &(ctrl->hsic_params[i]));
-}
-
-void mdp4_hsic_set(struct mdp4_overlay_pipe *pipe, struct dpp_ctrl *ctrl)
-{
- int i;
- for (i = 0; i < NUM_HSIC_PARAM; i++)
- _hsic_set(&(pipe->hsic_regs), i, ctrl->hsic_params[i]);
-
- if (pipe->hsic_regs.dirty == DIRTY)
- _hsic_generate_csc_matrix(pipe);
-}
-
-void mdp4_hsic_update(struct mdp4_overlay_pipe *pipe)
-{
- if (pipe->hsic_regs.dirty == GENERATED)
- _hsic_update_mdp(pipe);
-}
diff --git a/drivers/video/msm/mdp4_overlay.c b/drivers/video/msm/mdp4_overlay.c
index 703d65d..71315e6 100644
--- a/drivers/video/msm/mdp4_overlay.c
+++ b/drivers/video/msm/mdp4_overlay.c
@@ -50,6 +50,7 @@
struct blend_cfg blend[MDP4_MIXER_MAX][MDP4_MIXER_STAGE_MAX];
uint32 mixer_cfg[MDP4_MIXER_MAX];
uint32 flush[MDP4_MIXER_MAX];
+ struct iommu_free_list iommu_free[MDP4_MIXER_MAX];
uint32 cs_controller;
uint32 hw_version;
uint32 panel_3d;
@@ -101,17 +102,104 @@
},
};
+static DEFINE_MUTEX(iommu_mutex);
static struct mdp4_overlay_ctrl *ctrl = &mdp4_overlay_db;
static int new_perf_level;
static struct ion_client *display_iclient;
-static struct mdp4_iommu_pipe_info mdp_iommu[MDP4_MIXER_MAX][OVERLAY_PIPE_MAX];
+
+
+/*
+ * mdp4_overlay_iommu_unmap_freelist()
+ * mdp4_overlay_iommu_2freelist()
+ * mdp4_overlay_iommu_pipe_free()
+ * above three functiosns need to be called from same thread and
+ * in order so that no mutex are needed.
+ */
+void mdp4_overlay_iommu_unmap_freelist(int mixer)
+{
+ int i;
+ struct ion_handle *ihdl;
+ struct iommu_free_list *flist;
+
+ mutex_lock(&iommu_mutex);
+ flist = &ctrl->iommu_free[mixer];
+ if (flist->total == 0) {
+ mutex_unlock(&iommu_mutex);
+ return;
+ }
+ for (i = 0; i < IOMMU_FREE_LIST_MAX; i++) {
+ ihdl = flist->ihdl[i];
+ if (ihdl == NULL)
+ continue;
+ pr_debug("%s: mixer=%d i=%d ihdl=0x%p\n", __func__,
+ mixer, i, ihdl);
+ ion_unmap_iommu(display_iclient, ihdl, DISPLAY_READ_DOMAIN,
+ GEN_POOL);
+ mdp4_stat.iommu_unmap++;
+ ion_free(display_iclient, ihdl);
+ flist->ihdl[i] = NULL;
+ }
+
+ flist->fndx = 0;
+ flist->total = 0;
+ mutex_unlock(&iommu_mutex);
+}
+
+void mdp4_overlay_iommu_2freelist(int mixer, struct ion_handle *ihdl)
+{
+ struct iommu_free_list *flist;
+
+ flist = &ctrl->iommu_free[mixer];
+ if (flist->fndx >= IOMMU_FREE_LIST_MAX) {
+ pr_err("%s: Error, mixer=%d iommu fndx=%d\n",
+ __func__, mixer, flist->fndx);
+ mdp4_stat.iommu_drop++;
+ mutex_unlock(&iommu_mutex);
+ return;
+ }
+
+ pr_debug("%s: add mixer=%d fndx=%d ihdl=0x%p\n", __func__,
+ mixer, flist->fndx, ihdl);
+
+ flist->total++;
+ flist->ihdl[flist->fndx++] = ihdl;
+}
+
+void mdp4_overlay_iommu_pipe_free(int ndx, int all)
+{
+ struct mdp4_overlay_pipe *pipe;
+ struct mdp4_iommu_pipe_info *iom;
+ int plane, mixer;
+
+ pipe = mdp4_overlay_ndx2pipe(ndx);
+ if (pipe == NULL)
+ return;
+
+ mutex_lock(&iommu_mutex);
+ mixer = pipe->mixer_num;
+ iom = &pipe->iommu;
+ pr_debug("%s: mixer=%d ndx=%d all=%d\n", __func__,
+ mixer, pipe->pipe_ndx, all);
+ for (plane = 0; plane < MDP4_MAX_PLANE; plane++) {
+ if (iom->prev_ihdl[plane]) {
+ mdp4_overlay_iommu_2freelist(mixer,
+ iom->prev_ihdl[plane]);
+ iom->prev_ihdl[plane] = NULL;
+ }
+ if (all && iom->ihdl[plane]) {
+ mdp4_overlay_iommu_2freelist(mixer, iom->ihdl[plane]);
+ iom->ihdl[plane] = NULL;
+ }
+ }
+ mutex_unlock(&iommu_mutex);
+}
int mdp4_overlay_iommu_map_buf(int mem_id,
struct mdp4_overlay_pipe *pipe, unsigned int plane,
unsigned long *start, unsigned long *len,
struct ion_handle **srcp_ihdl)
{
- struct mdp4_iommu_pipe_info *iom_pipe_info;
+ struct mdp4_iommu_pipe_info *iom;
if (!display_iclient)
return -EINVAL;
@@ -133,30 +221,21 @@
return -EINVAL;
}
- iom_pipe_info = &mdp_iommu[pipe->mixer_num][pipe->pipe_ndx - 1];
- if (!iom_pipe_info->ihdl[plane]) {
- iom_pipe_info->ihdl[plane] = *srcp_ihdl;
- } else {
- if (iom_pipe_info->prev_ihdl[plane]) {
- ion_unmap_iommu(display_iclient,
- iom_pipe_info->prev_ihdl[plane],
- DISPLAY_READ_DOMAIN, GEN_POOL);
- ion_free(display_iclient,
- iom_pipe_info->prev_ihdl[plane]);
- pr_debug("Previous: mixer %u, pipe %u, plane %u, "
- "prev_ihdl %p\n", pipe->mixer_num,
- pipe->pipe_ndx, plane,
- iom_pipe_info->prev_ihdl[plane]);
- }
+ mutex_lock(&iommu_mutex);
+ mdp4_stat.iommu_map++;
+ iom = &pipe->iommu;
+ iom->prev_ihdl[plane] = iom->ihdl[plane];
+ iom->ihdl[plane] = *srcp_ihdl;
- iom_pipe_info->prev_ihdl[plane] = iom_pipe_info->ihdl[plane];
- iom_pipe_info->ihdl[plane] = *srcp_ihdl;
- }
- pr_debug("mem_id %d, start 0x%lx, len 0x%lx\n",
- mem_id, *start, *len);
+ pr_debug("%s: ndx=%d plane=%d prev=0x%p cur=0x%p start=0x%lx len=%lx\n",
+ __func__, pipe->pipe_ndx, plane, iom->prev_ihdl[plane],
+ iom->ihdl[plane], *start, *len);
+ mutex_unlock(&iommu_mutex);
return 0;
}
+static struct mdp4_iommu_pipe_info mdp_iommu[MDP4_MIXER_MAX][OVERLAY_PIPE_MAX];
+
void mdp4_iommu_unmap(struct mdp4_overlay_pipe *pipe)
{
struct mdp4_iommu_pipe_info *iom_pipe_info;
@@ -183,9 +262,7 @@
if (iom_pipe_info->mark_unmap) {
if (iom_pipe_info->ihdl[i]) {
- if (pipe->mixer_num == MDP4_MIXER1)
- mdp4_overlay_dtv_wait4vsync();
- pr_debug("%s(): mixer %u, pipe %u, plane %u, "
+ pr_debug("%s(): MARK, mixer %u, pipe %u, plane %u, "
"ihdl %p\n", __func__,
pipe->mixer_num, j + 1, i,
iom_pipe_info->ihdl[i]);
@@ -246,6 +323,11 @@
}
}
+int mdp4_overlay_borderfill_supported(void)
+{
+ return (ctrl->hw_version >= 0x0402030b);
+}
+
void mdp4_overlay_dmae_cfg(struct msm_fb_data_type *mfd, int atv)
{
uint32 dmae_cfg_reg;
@@ -421,7 +503,7 @@
{
uint32 off, bpp;
- if (mdp_is_in_isr == FALSE)
+ if (!in_interrupt())
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
if (pipe->dma_blt_addr) {
@@ -447,7 +529,7 @@
/* dma_p dest */
MDP_OUTP(MDP_BASE + 0x90010, (pipe->dst_y << 16 | pipe->dst_x));
- if (mdp_is_in_isr == FALSE)
+ if (!in_interrupt())
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
}
@@ -684,7 +766,7 @@
char *vg_base;
uint32 frame_size, src_size, src_xy, dst_size, dst_xy;
uint32 format, pattern, luma_offset, chroma_offset;
- uint32 mask, curr, addr;
+ uint32 mask;
int pnum, ptype;
pnum = pipe->pipe_num - OVERLAY_PIPE_VG1; /* start from 0 */
@@ -701,10 +783,29 @@
format = mdp4_overlay_format(pipe);
pattern = mdp4_overlay_unpack_pattern(pipe);
+ /* CSC Post Processing enabled? */
+ if (pipe->flags & MDP_OVERLAY_PP_CFG_EN) {
+ if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_CSC_CFG) {
+ if (pipe->pp_cfg.csc_cfg.flags & MDP_CSC_FLAG_ENABLE)
+ pipe->op_mode |= MDP4_OP_CSC_EN;
+ if (pipe->pp_cfg.csc_cfg.flags & MDP_CSC_FLAG_YUV_IN)
+ pipe->op_mode |= MDP4_OP_SRC_DATA_YCBCR;
+ if (pipe->pp_cfg.csc_cfg.flags & MDP_CSC_FLAG_YUV_OUT)
+ pipe->op_mode |= MDP4_OP_DST_DATA_YCBCR;
+
+ mdp4_csc_write(&pipe->pp_cfg.csc_cfg,
+ (uint32_t) (vg_base + MDP4_VIDEO_CSC_OFF));
+ }
+ if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_QSEED_CFG) {
+ mdp4_qseed_access_cfg(&pipe->pp_cfg.qseed_cfg[0],
+ (uint32_t) vg_base);
+ mdp4_qseed_access_cfg(&pipe->pp_cfg.qseed_cfg[1],
+ (uint32_t) vg_base);
+ }
+ }
/* not RGB use VG pipe, pure VG pipe */
- pipe->op_mode |= MDP4_OP_CSC_EN;
if (ptype != OVERLAY_TYPE_RGB)
- pipe->op_mode |= MDP4_OP_SRC_DATA_YCBCR;
+ pipe->op_mode |= (MDP4_OP_CSC_EN | MDP4_OP_SRC_DATA_YCBCR);
#ifdef MDP4_IGC_LUT_ENABLE
pipe->op_mode |= MDP4_OP_IGC_LUT_EN;
@@ -748,24 +849,6 @@
&chroma_offset);
}
- /* Ensure proper covert matrix loaded when color space swaps */
- curr = inpdw(vg_base + 0x0058);
- mask = 0x600;
-
- if ((curr & mask) != (pipe->op_mode & mask)) {
- addr = ((uint32_t)vg_base) + 0x4000;
- if (ptype != OVERLAY_TYPE_RGB)
- mdp4_csc_write(&(mdp_csc_convert[1]), addr);
- else
- mdp4_csc_write(&(mdp_csc_convert[0]), addr);
-
- mask = 0xFFFCFFFF;
- } else {
- /* Don't touch bits you don't want to configure*/
- mask = 0xFFFCF1FF;
- }
- pipe->op_mode = (pipe->op_mode & mask) | (curr & ~mask);
-
/* luma component plane */
outpdw(vg_base + 0x0010, pipe->srcp0_addr + luma_offset);
@@ -798,15 +881,6 @@
pipe->r_bit << 4 | pipe->b_bit << 2 | pipe->g_bit);
}
- if (pipe->flags & MDP_SHARPENING) {
- outpdw(vg_base + 0x8200,
- mdp4_ss_table_value(pipe->req_data.dpp.sharp_strength,
- 0));
- outpdw(vg_base + 0x8204,
- mdp4_ss_table_value(pipe->req_data.dpp.sharp_strength,
- 1));
- }
-
if (mdp_rev > MDP_REV_41) {
/* mdp chip select controller */
mask = 0;
@@ -1315,7 +1389,7 @@
} else
overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
- if (mdp_is_in_isr == FALSE)
+ if (!in_interrupt())
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
/*
@@ -1410,7 +1484,7 @@
outpdw(overlay_base + 0x0014, curr | 0x4); /* GC_LUT_EN, 888 */
#endif
- if (mdp_is_in_isr == FALSE)
+ if (!in_interrupt())
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
}
@@ -1473,7 +1547,7 @@
return cnt;
}
-static void mdp4_mixer_stage_commit(int mixer)
+void mdp4_mixer_stage_commit(int mixer)
{
struct mdp4_overlay_pipe *pipe;
int i, num;
@@ -1537,20 +1611,13 @@
for (i = MDP4_MIXER_STAGE_BASE; i < MDP4_MIXER_STAGE_MAX; i++) {
pp = ctrl->stage[mixer][i];
- if (pp == pipe) {
+ if (pp && pp->pipe_ndx == pipe->pipe_ndx) {
ctrl->stage[mixer][i] = NULL;
break;
}
}
ctrl->stage[mixer][pipe->mixer_stage] = pipe; /* keep it */
-
- if (!(pipe->flags & MDP_OV_PLAY_NOWAIT)) {
- pr_debug("%s: mixer=%d ndx=%d stage=%d flags=%x\n",
- __func__, mixer, pipe->pipe_ndx,
- pipe->mixer_stage, pipe->flags);
- mdp4_mixer_stage_commit(mixer);
- }
}
void mdp4_mixer_stage_down(struct mdp4_overlay_pipe *pipe)
@@ -1562,16 +1629,11 @@
for (i = MDP4_MIXER_STAGE_BASE; i < MDP4_MIXER_STAGE_MAX; i++) {
pp = ctrl->stage[mixer][i];
- if (pp == pipe)
+ if (pp && pp->pipe_ndx == pipe->pipe_ndx)
ctrl->stage[mixer][i] = NULL; /* clear it */
}
- if (!(pipe->flags & MDP_OV_PLAY_NOWAIT)) {
- pr_debug("%s: mixer=%d ndx=%d stage=%d flags=%x\n",
- __func__, pipe->mixer_num, pipe->pipe_ndx,
- pipe->mixer_stage, pipe->flags);
- mdp4_mixer_stage_commit(pipe->mixer_num);
- }
+ mdp4_mixer_stage_commit(mixer);
}
/*
* mixer0: rgb3: border color at register 0x15004, 0x15008
@@ -1616,11 +1678,13 @@
bspipe->pipe_used = 0;
if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO)
- mdp4_dsi_video_base_swap(pipe);
+ mdp4_dsi_video_base_swap(0, pipe);
+ else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
+ mdp4_dsi_cmd_base_swap(0, pipe);
else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
- mdp4_lcdc_base_swap(pipe);
+ mdp4_lcdc_base_swap(0, pipe);
else if (ctrl->panel_mode & MDP4_PANEL_DTV)
- mdp4_dtv_base_swap(pipe);
+ mdp4_dtv_base_swap(0, pipe);
mdp4_overlay_reg_flush(bspipe, 1);
/* borderfill pipe as base layer */
@@ -1667,7 +1731,14 @@
/* free borderfill pipe */
pipe->pipe_used = 0;
- mdp4_dsi_video_base_swap(bspipe);
+ if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO)
+ mdp4_dsi_video_base_swap(0, bspipe);
+ else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
+ mdp4_dsi_cmd_base_swap(0, bspipe);
+ else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
+ mdp4_lcdc_base_swap(0, bspipe);
+ else if (ctrl->panel_mode & MDP4_PANEL_DTV)
+ mdp4_dtv_base_swap(0, bspipe);
/* free borderfill pipe */
mdp4_overlay_reg_flush(pipe, 1);
@@ -1993,6 +2064,8 @@
iom_pipe_info = &mdp_iommu[pipe->mixer_num][pipe->pipe_ndx - 1];
iom_pipe_info->mark_unmap = 1;
+ mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 1);
+
memset(pipe, 0, sizeof(*pipe));
pipe->pipe_type = ptype;
@@ -2105,6 +2178,62 @@
return -ERANGE;
}
+ if (req->src_rect.h > 0xFFF) {
+ pr_err("%s: src_h is out of range: 0X%x!\n",
+ __func__, req->src_rect.h);
+ mdp4_stat.err_size++;
+ return -EINVAL;
+ }
+
+ if (req->src_rect.w > 0xFFF) {
+ pr_err("%s: src_w is out of range: 0X%x!\n",
+ __func__, req->src_rect.w);
+ mdp4_stat.err_size++;
+ return -EINVAL;
+ }
+
+ if (req->src_rect.x > 0xFFF) {
+ pr_err("%s: src_x is out of range: 0X%x!\n",
+ __func__, req->src_rect.x);
+ mdp4_stat.err_size++;
+ return -EINVAL;
+ }
+
+ if (req->src_rect.y > 0xFFF) {
+ pr_err("%s: src_y is out of range: 0X%x!\n",
+ __func__, req->src_rect.y);
+ mdp4_stat.err_size++;
+ return -EINVAL;
+ }
+
+ if (req->dst_rect.h > 0xFFF) {
+ pr_err("%s: dst_h is out of range: 0X%x!\n",
+ __func__, req->dst_rect.h);
+ mdp4_stat.err_size++;
+ return -EINVAL;
+ }
+
+ if (req->dst_rect.w > 0xFFF) {
+ pr_err("%s: dst_w is out of range: 0X%x!\n",
+ __func__, req->dst_rect.w);
+ mdp4_stat.err_size++;
+ return -EINVAL;
+ }
+
+ if (req->dst_rect.x > 0xFFF) {
+ pr_err("%s: dst_x is out of range: 0X%x!\n",
+ __func__, req->dst_rect.x);
+ mdp4_stat.err_size++;
+ return -EINVAL;
+ }
+
+ if (req->dst_rect.y > 0xFFF) {
+ pr_err("%s: dst_y is out of range: 0X%x!\n",
+ __func__, req->dst_rect.y);
+ mdp4_stat.err_size++;
+ return -EINVAL;
+ }
+
if (req->src_rect.h == 0 || req->src_rect.w == 0) {
pr_err("%s: src img of zero size!\n", __func__);
mdp4_stat.err_size++;
@@ -2204,7 +2333,6 @@
}
iom_pipe_info = &mdp_iommu[pipe->mixer_num][pipe->pipe_ndx - 1];
- iom_pipe_info->mark_unmap = 0;
pipe->src_format = req->src.format;
ret = mdp4_overlay_format2pipe(pipe);
@@ -2372,29 +2500,6 @@
return 0;
}
-int mdp4_overlay_blt_offset(struct fb_info *info, struct msmfb_overlay_blt *req)
-{
- int ret = 0;
-
- struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
-
- if (mutex_lock_interruptible(&mfd->dma->ov_mutex))
- return -EINTR;
-
- if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
- ret = mdp4_dsi_overlay_blt_offset(mfd, req);
- else if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO)
- ret = mdp4_dsi_video_overlay_blt_offset(mfd, req);
- else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
- ret = mdp4_lcdc_overlay_blt_offset(mfd, req);
- else if (ctrl->panel_mode & MDP4_PANEL_MDDI)
- mdp4_mddi_overlay_blt_offset(mfd, req);
-
- mutex_unlock(&mfd->dma->ov_mutex);
-
- return ret;
-}
-
int mdp4_overlay_get(struct fb_info *info, struct mdp_overlay *req)
{
struct mdp4_overlay_pipe *pipe;
@@ -2544,7 +2649,6 @@
if (mfd->ov1_blt_state == mfd->use_ov1_blt)
return;
if (mfd->use_ov1_blt) {
- mdp4_allocate_writeback_buf(mfd, MDP4_MIXER1);
mdp4_dtv_overlay_blt_start(mfd);
pr_debug("%s overlay1 writeback is enabled\n", __func__);
} else {
@@ -2657,23 +2761,17 @@
}
}
- if (pipe->flags & MDP_SHARPENING) {
- bool test = ((pipe->req_data.dpp.sharp_strength > 0) &&
- ((req->src_rect.w > req->dst_rect.w) &&
- (req->src_rect.h > req->dst_rect.h)));
- if (test) {
- pr_debug("%s: No sharpening while downscaling.\n",
- __func__);
- pipe->flags &= ~MDP_SHARPENING;
- }
- }
-
- /* precompute HSIC matrices */
- if (req->flags & MDP_DPP_HSIC)
- mdp4_hsic_set(pipe, &(req->dpp));
-
mdp4_stat.overlay_set[pipe->mixer_num]++;
+ if (pipe->flags & MDP_OVERLAY_PP_CFG_EN) {
+ if (pipe->pipe_num <= OVERLAY_PIPE_VG2)
+ memcpy(&pipe->pp_cfg, &req->overlay_pp_cfg,
+ sizeof(struct mdp_overlay_pp_params));
+ else
+ pr_debug("%s: RGB Pipes don't support CSC/QSEED\n",
+ __func__);
+ }
+
if (ctrl->panel_mode & MDP4_PANEL_DTV &&
pipe->mixer_num == MDP4_MIXER1) {
u32 use_blt = mdp4_overlay_blt_enable(req, mfd, perf_level);
@@ -2701,8 +2799,6 @@
if (old_level > perf_level)
mdp4_set_perf_level();
} else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
- mdp4_dsi_cmd_dma_busy_wait(mfd);
- mdp4_dsi_blt_dmap_busy_wait(mfd);
mdp4_set_perf_level();
} else if (ctrl->panel_mode & MDP4_PANEL_LCDC) {
if (old_level > perf_level)
@@ -2713,10 +2809,8 @@
mdp4_set_perf_level();
}
} else {
- if (ctrl->panel_mode & MDP4_PANEL_DTV) {
- mdp4_overlay_reg_flush(pipe, 0);
- mdp4_overlay_dtv_ov_done_push(mfd, pipe);
- }
+ if (ctrl->panel_mode & MDP4_PANEL_DTV)
+ mdp4_overlay_dtv_set_perf(mfd);
}
}
mutex_unlock(&mfd->dma->ov_mutex);
@@ -2734,6 +2828,7 @@
if (pipe == NULL)
continue;
pipe->flags &= ~MDP_OV_PLAY_NOWAIT;
+ mdp4_overlay_reg_flush(pipe, 1);
mdp4_mixer_stage_down(pipe);
mdp4_overlay_pipe_free(pipe);
cnt++;
@@ -2746,8 +2841,6 @@
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
struct mdp4_overlay_pipe *pipe;
- struct dpp_ctrl dpp;
- int i;
if (mfd == NULL)
return -ENODEV;
@@ -2775,83 +2868,37 @@
else {
/* mixer 0 */
ctrl->mixer0_played = 0;
-#ifdef CONFIG_FB_MSM_MIPI_DSI
- if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
- if (mfd->panel_power_on) {
- mdp4_dsi_blt_dmap_busy_wait(mfd);
- }
- }
-#else
+
if (ctrl->panel_mode & MDP4_PANEL_MDDI) {
if (mfd->panel_power_on)
mdp4_mddi_blt_dmap_busy_wait(mfd);
}
-#endif
}
- {
- mdp4_overlay_reg_flush(pipe, 1);
+ mdp4_overlay_reg_flush(pipe, 1);
+ mdp4_mixer_stage_down(pipe);
- if (mfd->use_ov0_blt || pipe->mixer_num == MDP4_MIXER1) {
- /* unstage pipe forcedly */
- pipe->flags &= ~MDP_OV_PLAY_NOWAIT;
+ if (pipe->mixer_num == MDP4_MIXER0) {
+ if (ctrl->panel_mode & MDP4_PANEL_MDDI) {
+ if (mfd->panel_power_on)
+ mdp4_mddi_overlay_restore();
}
- mdp4_mixer_stage_down(pipe);
-
- if (pipe->mixer_num == MDP4_MIXER0) {
- mfd->use_ov0_blt &= ~(1 << (pipe->pipe_ndx-1));
- mdp4_overlay_update_blt_mode(mfd);
-#ifdef CONFIG_FB_MSM_MIPI_DSI
- if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
- if (mfd->panel_power_on) {
- mdp4_dsi_cmd_overlay_restore();
- mdp4_dsi_cmd_dma_busy_wait(mfd);
- mdp4_dsi_blt_dmap_busy_wait(mfd);
- }
- } else if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO) {
- pipe->flags &= ~MDP_OV_PLAY_NOWAIT;
- if (mfd->panel_power_on)
- mdp4_overlay_dsi_video_vsync_push(mfd,
- pipe);
- }
-#else
- if (ctrl->panel_mode & MDP4_PANEL_MDDI) {
- if (mfd->panel_power_on) {
- mdp4_mddi_overlay_restore();
- mdp4_mddi_dma_busy_wait(mfd);
- mdp4_mddi_blt_dmap_busy_wait(mfd);
- }
- }
-#endif
- else if (ctrl->panel_mode & MDP4_PANEL_LCDC) {
- pipe->flags &= ~MDP_OV_PLAY_NOWAIT;
- if (mfd->panel_power_on)
- mdp4_overlay_lcdc_vsync_push(mfd, pipe);
- }
- if (!mfd->use_ov0_blt)
- mdp4_free_writeback_buf(mfd, MDP4_MIXER0);
- } else { /* mixer1, DTV, ATV */
- if (ctrl->panel_mode & MDP4_PANEL_DTV) {
- mdp4_overlay_dtv_unset(mfd, pipe);
- mfd->use_ov1_blt &= ~(1 << (pipe->pipe_ndx-1));
- mdp4_overlay1_update_blt_mode(mfd);
- if (!mfd->use_ov1_blt)
- mdp4_free_writeback_buf(mfd,
+ mfd->use_ov0_blt &= ~(1 << (pipe->pipe_ndx-1));
+ mdp4_overlay_update_blt_mode(mfd);
+ if (!mfd->use_ov0_blt)
+ mdp4_free_writeback_buf(mfd, MDP4_MIXER0);
+ } else { /* mixer1, DTV, ATV */
+ if (ctrl->panel_mode & MDP4_PANEL_DTV) {
+ mdp4_overlay_dtv_unset(mfd, pipe);
+ mfd->use_ov1_blt &= ~(1 << (pipe->pipe_ndx-1));
+ mdp4_overlay1_update_blt_mode(mfd);
+ if (!mfd->use_ov1_blt)
+ mdp4_free_writeback_buf(mfd,
MDP4_MIXER1);
- }
}
}
- /* Reset any HSIC settings to default */
- if (pipe->flags & MDP_DPP_HSIC) {
- for (i = 0; i < NUM_HSIC_PARAM; i++)
- dpp.hsic_params[i] = 0;
-
- mdp4_hsic_set(pipe, &dpp);
- mdp4_hsic_update(pipe);
- }
-
mdp4_stat.overlay_unset[pipe->mixer_num]++;
mdp4_overlay_pipe_free(pipe);
@@ -2861,6 +2908,36 @@
return 0;
}
+int mdp4_overlay_wait4vsync(struct fb_info *info, long long *vtime)
+{
+ if (info->node == 0) {
+ if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO)
+ mdp4_dsi_video_wait4vsync(0, vtime);
+ else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
+ mdp4_dsi_cmd_wait4vsync(0, vtime);
+ else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
+ mdp4_lcdc_wait4vsync(0, vtime);
+ } else if (info->node == 1)
+ mdp4_dtv_wait4vsync(0, vtime);
+
+ return 0;
+}
+
+int mdp4_overlay_vsync_ctrl(struct fb_info *info, int enable)
+{
+ if (info->node == 0) {
+ if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO)
+ mdp4_dsi_video_vsync_ctrl(0, enable);
+ else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
+ mdp4_dsi_cmd_vsync_ctrl(0, enable);
+ else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
+ mdp4_lcdc_vsync_ctrl(0, enable);
+ } else if (info->node == 1)
+ mdp4_dtv_vsync_ctrl(0, enable);
+
+ return 0;
+}
+
struct tile_desc {
uint32 width; /* tile's width */
@@ -2900,37 +2977,39 @@
int mdp4_overlay_play_wait(struct fb_info *info, struct msmfb_overlay_data *req)
{
- struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
- struct mdp4_overlay_pipe *pipe;
-
- if (mfd == NULL)
- return -ENODEV;
-
- if (!mfd->panel_power_on) /* suspended */
- return -EPERM;
-
- pipe = mdp4_overlay_ndx2pipe(req->id);
-
- if (!pipe) {
- mdp4_stat.err_play++;
- return -ENODEV;
- }
-
- if (mutex_lock_interruptible(&mfd->dma->ov_mutex))
- return -EINTR;
-
- mdp4_mixer_stage_commit(pipe->mixer_num);
-
- if (mfd->use_ov1_blt)
- mdp4_overlay1_update_blt_mode(mfd);
-
- mdp4_overlay_dtv_wait4vsync();
- mdp4_iommu_unmap(pipe);
-
- mutex_unlock(&mfd->dma->ov_mutex);
return 0;
}
+/*
+ * mdp4_overlay_dma_commit: called from dma_done isr
+ * No mutex/sleep allowed
+ */
+void mdp4_overlay_dma_commit(int mixer)
+{
+ /*
+ * non double buffer register update here
+ * perf level, new clock rate should be done here
+ */
+}
+
+/*
+ * mdp4_overlay_vsync_commit: called from tasklet context
+ * No mutex/sleep allowed
+ */
+void mdp4_overlay_vsync_commit(struct mdp4_overlay_pipe *pipe)
+{
+ if (pipe->pipe_type == OVERLAY_TYPE_VIDEO)
+ mdp4_overlay_vg_setup(pipe); /* video/graphic pipe */
+ else
+ mdp4_overlay_rgb_setup(pipe); /* rgb pipe */
+
+ pr_debug("%s: pipe=%x ndx=%d num=%d used=%d\n", __func__,
+ (int) pipe, pipe->pipe_ndx, pipe->pipe_num, pipe->pipe_used);
+
+ mdp4_overlay_reg_flush(pipe, 1);
+ mdp4_mixer_stage_up(pipe);
+}
+
int mdp4_overlay_play(struct fb_info *info, struct msmfb_overlay_data *req)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
@@ -2958,20 +3037,18 @@
return -ENODEV;
}
- if (mutex_lock_interruptible(&mfd->dma->ov_mutex))
- return -EINTR;
-
if (pipe->pipe_type == OVERLAY_TYPE_BF) {
mdp4_overlay_borderfill_stage_up(pipe);
- mutex_unlock(&mfd->dma->ov_mutex);
return 0;
}
+ if (ctrl->panel_mode & MDP4_PANEL_MDDI)
+ mutex_lock(&mfd->dma->ov_mutex);
+
img = &req->data;
get_img(img, info, pipe, 0, &start, &len, &srcp0_file,
&ps0_need, &srcp0_ihdl);
if (len == 0) {
- mutex_unlock(&mfd->dma->ov_mutex);
pr_err("%s: pmem Error\n", __func__);
ret = -1;
goto end;
@@ -2982,8 +3059,9 @@
pipe->srcp0_ystride = pipe->src_width * pipe->bpp;
- pr_debug("%s: mixer=%d ndx=%x addr=%x flags=%x\n", __func__,
- pipe->mixer_num, pipe->pipe_ndx, (int)addr, pipe->flags);
+ pr_debug("%s: mixer=%d ndx=%x addr=%x flags=%x pid=%d\n", __func__,
+ pipe->mixer_num, pipe->pipe_ndx, (int)addr, pipe->flags,
+ current->pid);
if ((req->version_key & VERSION_KEY_MASK) == 0xF9E8D700)
overlay_version = (req->version_key & ~VERSION_KEY_MASK);
@@ -2994,7 +3072,6 @@
get_img(img, info, pipe, 1, &start, &len, &srcp1_file,
&p_need, &srcp1_ihdl);
if (len == 0) {
- mutex_unlock(&mfd->dma->ov_mutex);
pr_err("%s: Error to get plane1\n", __func__);
ret = -EINVAL;
goto end;
@@ -3026,7 +3103,6 @@
get_img(img, info, pipe, 1, &start, &len, &srcp1_file,
&p_need, &srcp1_ihdl);
if (len == 0) {
- mutex_unlock(&mfd->dma->ov_mutex);
pr_err("%s: Error to get plane1\n", __func__);
ret = -EINVAL;
goto end;
@@ -3037,7 +3113,6 @@
get_img(img, info, pipe, 2, &start, &len, &srcp2_file,
&p_need, &srcp2_ihdl);
if (len == 0) {
- mutex_unlock(&mfd->dma->ov_mutex);
pr_err("%s: Error to get plane2\n", __func__);
ret = -EINVAL;
goto end;
@@ -3081,16 +3156,33 @@
if (mfd->use_ov1_blt)
mdp4_overlay1_update_blt_mode(mfd);
+ if (ctrl->panel_mode & MDP4_PANEL_MDDI)
+ goto mddi;
+
+ if (pipe->mixer_num == MDP4_MIXER0) {
+ if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
+ /* cndx = 0 */
+ mdp4_dsi_cmd_pipe_queue(0, pipe);
+ }
+ if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO) {
+ /* cndx = 0 */
+ mdp4_dsi_video_pipe_queue(0, pipe);
+ } else if (ctrl->panel_mode & MDP4_PANEL_LCDC) {
+ /* cndx = 0 */
+ mdp4_lcdc_pipe_queue(0, pipe);
+ }
+ } else if (pipe->mixer_num == MDP4_MIXER1) {
+ if (ctrl->panel_mode & MDP4_PANEL_DTV)
+ mdp4_dtv_pipe_queue(0, pipe);/* cndx = 0 */
+ }
+
+ return ret;
+
+mddi:
if (pipe->pipe_type == OVERLAY_TYPE_VIDEO) {
- mdp4_overlay_vg_setup(pipe); /* video/graphic pipe */
+ mdp4_overlay_vg_setup(pipe); /* video/graphic pipe */
} else {
- if (pipe->flags & MDP_SHARPENING) {
- pr_debug(
- "%s: Sharpening/Smoothing not supported on RGB pipe\n",
- __func__);
- pipe->flags &= ~MDP_SHARPENING;
- }
mdp4_overlay_rgb_setup(pipe); /* rgb pipe */
}
@@ -3102,73 +3194,21 @@
}
mdp4_mixer_stage_up(pipe);
+ if (!(pipe->flags & MDP_OV_PLAY_NOWAIT))
+ mdp4_mixer_stage_commit(pipe->mixer_num);
- if (pipe->mixer_num == MDP4_MIXER2) {
- ctrl->mixer2_played++;
-#ifdef CONFIG_FB_MSM_WRITEBACK_MSM_PANEL
- if (ctrl->panel_mode & MDP4_PANEL_WRITEBACK) {
- mdp4_writeback_dma_busy_wait(mfd);
- mdp4_writeback_kickoff_video(mfd, pipe);
- }
-#endif
- } else if (pipe->mixer_num == MDP4_MIXER1) {
- ctrl->mixer1_played++;
- /* enternal interface */
- if (ctrl->panel_mode & MDP4_PANEL_DTV) {
- if (pipe->flags & MDP_OV_PLAY_NOWAIT)
- mdp4_overlay_flush_piggyback(MDP4_MIXER0,
- MDP4_MIXER1);
- mdp4_overlay_dtv_start();
- mdp4_overlay_dtv_ov_done_push(mfd, pipe);
- if (!mfd->use_ov1_blt)
- mdp4_overlay1_update_blt_mode(mfd);
- }
- } else {
- /* primary interface */
- ctrl->mixer0_played++;
- if (ctrl->panel_mode & MDP4_PANEL_LCDC) {
- mdp4_overlay_lcdc_start();
- mdp4_overlay_lcdc_vsync_push(mfd, pipe);
- if (!mfd->use_ov0_blt &&
- !(pipe->flags & MDP_OV_PLAY_NOWAIT))
- mdp4_overlay_update_blt_mode(mfd);
- }
-#ifdef CONFIG_FB_MSM_MIPI_DSI
- else if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO) {
- mdp4_overlay_dsi_video_start();
- mdp4_overlay_dsi_video_vsync_push(mfd, pipe);
- if (!mfd->use_ov0_blt &&
- !(pipe->flags & MDP_OV_PLAY_NOWAIT))
- mdp4_overlay_update_blt_mode(mfd);
- }
-#endif
- else {
- mdp4_overlay_reg_flush_reset(pipe);
- /* mddi & mipi dsi cmd mode */
- if (pipe->flags & MDP_OV_PLAY_NOWAIT) {
- mdp4_stat.overlay_play[pipe->mixer_num]++;
- mutex_unlock(&mfd->dma->ov_mutex);
- goto end;
- }
-#ifdef CONFIG_FB_MSM_MIPI_DSI
- if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
- mdp4_iommu_attach();
- mdp4_dsi_cmd_dma_busy_wait(mfd);
- mdp4_dsi_cmd_kickoff_video(mfd, pipe);
- }
-#else
- if (ctrl->panel_mode & MDP4_PANEL_MDDI) {
- mdp4_mddi_dma_busy_wait(mfd);
- mdp4_mddi_kickoff_video(mfd, pipe);
- }
-#endif
- }
+ if (pipe->flags & MDP_OV_PLAY_NOWAIT) {
+ mdp4_stat.overlay_play[pipe->mixer_num]++;
+ mutex_unlock(&mfd->dma->ov_mutex);
+ goto end;
}
- /* write out DPP HSIC registers */
- if (pipe->flags & MDP_DPP_HSIC)
- mdp4_hsic_update(pipe);
+ if (ctrl->panel_mode & MDP4_PANEL_MDDI) {
+ mdp4_mddi_dma_busy_wait(mfd);
+ mdp4_mddi_kickoff_video(mfd, pipe);
+ }
+
if (!(pipe->flags & MDP_OV_PLAY_NOWAIT))
mdp4_iommu_unmap(pipe);
mdp4_stat.overlay_play[pipe->mixer_num]++;
@@ -3315,6 +3355,7 @@
void mdp4_v4l2_overlay_clear(struct mdp4_overlay_pipe *pipe)
{
+ mdp4_overlay_reg_flush(pipe, 1);
mdp4_mixer_stage_down(pipe);
mdp4_overlay_pipe_free(pipe);
}
@@ -3380,6 +3421,10 @@
mdp4_mixer_stage_up(pipe);
+#ifdef V4L2_VSYNC
+ /*
+ * TODO: incorporate v4l2 into vsycn driven mechanism
+ */
if (ctrl->panel_mode & MDP4_PANEL_LCDC) {
mdp4_overlay_lcdc_vsync_push(mfd, pipe);
} else {
@@ -3395,6 +3440,8 @@
}
#endif
}
+#endif
+
done:
mutex_unlock(&mfd->dma->ov_mutex);
return err;
diff --git a/drivers/video/msm/mdp4_overlay_dsi_cmd.c b/drivers/video/msm/mdp4_overlay_dsi_cmd.c
index d0ac1a6..7998d8b 100644
--- a/drivers/video/msm/mdp4_overlay_dsi_cmd.c
+++ b/drivers/video/msm/mdp4_overlay_dsi_cmd.c
@@ -17,7 +17,6 @@
#include <linux/time.h>
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/hrtimer.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/semaphore.h>
@@ -32,18 +31,525 @@
#include "mdp4.h"
#include "mipi_dsi.h"
-static struct mdp4_overlay_pipe *dsi_pipe;
-static struct msm_fb_data_type *dsi_mfd;
-static int busy_wait_cnt;
static int dsi_state;
-static unsigned long tout_expired;
#define TOUT_PERIOD HZ /* 1 second */
#define MS_100 (HZ/10) /* 100 ms */
static int vsync_start_y_adjust = 4;
-struct timer_list dsi_clock_timer;
+#define MAX_CONTROLLER 1
+#define VSYNC_EXPIRE_TICK 2
+#define BACKLIGHT_MAX 4
+
+struct backlight {
+ int put;
+ int get;
+ int tot;
+ int blist[BACKLIGHT_MAX];
+};
+
+static struct vsycn_ctrl {
+ struct device *dev;
+ int inited;
+ int update_ndx;
+ int expire_tick;
+ uint32 dmap_intr_tot;
+ uint32 rdptr_intr_tot;
+ uint32 rdptr_sirq_tot;
+ atomic_t suspend;
+ int dmap_wait_cnt;
+ int wait_vsync_cnt;
+ int commit_cnt;
+ struct mutex update_lock;
+ struct completion dmap_comp;
+ struct completion vsync_comp;
+ spinlock_t dmap_spin_lock;
+ spinlock_t spin_lock;
+ struct mdp4_overlay_pipe *base_pipe;
+ struct vsync_update vlist[2];
+ struct backlight blight;
+ int vsync_irq_enabled;
+ ktime_t vsync_time;
+ struct work_struct vsync_work;
+} vsync_ctrl_db[MAX_CONTROLLER];
+
+static void vsync_irq_enable(int intr, int term)
+{
+ unsigned long flag;
+
+ spin_lock_irqsave(&mdp_spin_lock, flag);
+ /* no need to clrear other interrupts for comamnd mode */
+ outp32(MDP_INTR_CLEAR, INTR_PRIMARY_RDPTR);
+ mdp_intr_mask |= intr;
+ outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+ mdp_enable_irq(term);
+ spin_unlock_irqrestore(&mdp_spin_lock, flag);
+}
+
+static void vsync_irq_disable(int intr, int term)
+{
+ unsigned long flag;
+
+ spin_lock_irqsave(&mdp_spin_lock, flag);
+ /* no need to clrear other interrupts for comamnd mode */
+ mdp_intr_mask &= ~intr;
+ outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+ mdp_disable_irq_nosync(term);
+ spin_unlock_irqrestore(&mdp_spin_lock, flag);
+}
+
+static int mdp4_backlight_get_level(struct vsycn_ctrl *vctrl)
+{
+ int level = -1;
+
+ mutex_lock(&vctrl->update_lock);
+ if (vctrl->blight.tot) {
+ level = vctrl->blight.blist[vctrl->blight.get];
+ vctrl->blight.get++;
+ vctrl->blight.get %= BACKLIGHT_MAX;
+ vctrl->blight.tot--;
+ pr_debug("%s: tot=%d put=%d get=%d level=%d\n", __func__,
+ vctrl->blight.tot, vctrl->blight.put, vctrl->blight.get, level);
+ }
+ mutex_unlock(&vctrl->update_lock);
+ return level;
+}
+
+void mdp4_backlight_put_level(int cndx, int level)
+{
+ struct vsycn_ctrl *vctrl;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ vctrl = &vsync_ctrl_db[cndx];
+ mutex_lock(&vctrl->update_lock);
+ vctrl->blight.blist[vctrl->blight.put] = level;
+ vctrl->blight.put++;
+ vctrl->blight.put %= BACKLIGHT_MAX;
+ if (vctrl->blight.tot == BACKLIGHT_MAX) {
+ /* drop the oldest one */
+ vctrl->blight.get++;
+ vctrl->blight.get %= BACKLIGHT_MAX;
+ } else {
+ vctrl->blight.tot++;
+ }
+ mutex_unlock(&vctrl->update_lock);
+ pr_debug("%s: tot=%d put=%d get=%d level=%d\n", __func__,
+ vctrl->blight.tot, vctrl->blight.put, vctrl->blight.get, level);
+
+ if (mdp4_overlay_dsi_state_get() <= ST_DSI_SUSPEND)
+ return;
+}
+
+static int mdp4_backlight_commit_level(struct vsycn_ctrl *vctrl)
+{
+ int level;
+ int cnt = 0;
+
+ if (vctrl->blight.tot) { /* has new backlight */
+ if (mipi_dsi_ctrl_lock(0)) {
+ level = mdp4_backlight_get_level(vctrl);
+ mipi_dsi_cmd_backlight_tx(level);
+ cnt++;
+ }
+ }
+
+ return cnt;
+}
+
+void mdp4_blt_dmap_cfg(struct mdp4_overlay_pipe *pipe)
+{
+ uint32 off, addr;
+ int bpp;
+
+ if (pipe->ov_blt_addr == 0)
+ return;
+
+#ifdef BLT_RGB565
+ bpp = 2; /* overlay ouput is RGB565 */
+#else
+ bpp = 3; /* overlay ouput is RGB888 */
+#endif
+ off = 0;
+ if (pipe->blt_dmap_done & 0x01)
+ off = pipe->src_height * pipe->src_width * bpp;
+ addr = pipe->dma_blt_addr + off;
+
+ /* dmap */
+ MDP_OUTP(MDP_BASE + 0x90008, addr);
+}
+
+
+void mdp4_blt_overlay0_cfg(struct mdp4_overlay_pipe *pipe)
+{
+ uint32 off, addr;
+ int bpp;
+ char *overlay_base;
+
+ if (pipe->ov_blt_addr == 0)
+ return;
+
+#ifdef BLT_RGB565
+ bpp = 2; /* overlay ouput is RGB565 */
+#else
+ bpp = 3; /* overlay ouput is RGB888 */
+#endif
+ off = 0;
+ if (pipe->blt_ov_done & 0x01)
+ off = pipe->src_height * pipe->src_width * bpp;
+ addr = pipe->ov_blt_addr + off;
+ /* overlay 0 */
+ overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
+ outpdw(overlay_base + 0x000c, addr);
+ outpdw(overlay_base + 0x001c, addr);
+}
+
+static void vsync_commit_kickoff_dmap(struct mdp4_overlay_pipe *pipe)
+{
+ if (mipi_dsi_ctrl_lock(1)) {
+ mdp4_stat.kickoff_dmap++;
+ pipe->blt_dmap_koff++;
+ vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
+ outpdw(MDP_BASE + 0x000c, 0); /* kickoff dmap engine */
+ mb();
+ }
+}
+
+static void vsync_commit_kickoff_ov0(struct mdp4_overlay_pipe *pipe, int blt)
+{
+ int locked = 1;
+
+ if (blt)
+ vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
+ else
+ locked = mipi_dsi_ctrl_lock(1);
+
+ if (locked) {
+ mdp4_stat.kickoff_ov0++;
+ pipe->blt_ov_koff++;
+ outpdw(MDP_BASE + 0x0004, 0); /* kickoff overlay engine */
+ mb();
+ }
+}
+
+/*
+ * mdp4_dsi_cmd_do_update:
+ * called from thread context
+ */
+void mdp4_dsi_cmd_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe)
+{
+ struct vsycn_ctrl *vctrl;
+ struct vsync_update *vp;
+ struct mdp4_overlay_pipe *pp;
+ int undx;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ vctrl = &vsync_ctrl_db[cndx];
+
+ if (atomic_read(&vctrl->suspend) > 0)
+ return;
+
+ mutex_lock(&vctrl->update_lock);
+ undx = vctrl->update_ndx;
+ vp = &vctrl->vlist[undx];
+
+ pp = &vp->plist[pipe->pipe_ndx - 1]; /* ndx start form 1 */
+
+ pr_debug("%s: vndx=%d pipe_ndx=%d expire=%x pid=%d\n", __func__,
+ undx, pipe->pipe_ndx, vctrl->expire_tick, current->pid);
+
+ *pp = *pipe; /* keep it */
+ vp->update_cnt++;
+
+ if (vctrl->expire_tick == 0) {
+ mipi_dsi_clk_cfg(1);
+ mdp_clk_ctrl(1);
+ vsync_irq_enable(INTR_PRIMARY_RDPTR, MDP_PRIM_RDPTR_TERM);
+ }
+ vctrl->expire_tick = VSYNC_EXPIRE_TICK;
+ mutex_unlock(&vctrl->update_lock);
+}
+
+int mdp4_dsi_cmd_pipe_commit(void)
+{
+
+ int i, undx, cnt;
+ int mixer = 0;
+ struct vsycn_ctrl *vctrl;
+ struct vsync_update *vp;
+ struct mdp4_overlay_pipe *pipe;
+ unsigned long flags;
+ int diff;
+
+ vctrl = &vsync_ctrl_db[0];
+ mutex_lock(&vctrl->update_lock);
+ undx = vctrl->update_ndx;
+ vp = &vctrl->vlist[undx];
+ pipe = vctrl->base_pipe;
+ mixer = pipe->mixer_num;
+
+ pr_debug("%s: vndx=%d cnt=%d expire=%x pid=%d\n", __func__,
+ undx, vp->update_cnt, vctrl->expire_tick, current->pid);
+
+ cnt = 0;
+ if (vp->update_cnt == 0) {
+ mutex_unlock(&vctrl->update_lock);
+ return cnt;
+ }
+ vctrl->update_ndx++;
+ vctrl->update_ndx &= 0x01;
+ vctrl->commit_cnt++;
+ vp->update_cnt = 0; /* reset */
+ mutex_unlock(&vctrl->update_lock);
+
+ mdp4_backlight_commit_level(vctrl);
+
+ /* free previous committed iommu back to pool */
+ mdp4_overlay_iommu_unmap_freelist(mixer);
+
+ pipe = vp->plist;
+ for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
+ if (pipe->pipe_used) {
+ cnt++;
+ mdp4_overlay_vsync_commit(pipe);
+ /* free previous iommu to freelist
+ * which will be freed at next
+ * pipe_commit
+ */
+ mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
+ pipe->pipe_used = 0; /* clear */
+ }
+ }
+ mdp4_mixer_stage_commit(mixer);
+
+
+ pr_debug("%s: intr=%d expire=%d cpu=%d\n", __func__,
+ vctrl->rdptr_intr_tot, vctrl->expire_tick, smp_processor_id());
+
+ spin_lock_irqsave(&vctrl->spin_lock, flags);
+ pipe = vctrl->base_pipe;
+ if (pipe->blt_changed) {
+ /* blt configurtion changed */
+ pipe->blt_changed = 0;
+ mdp4_overlayproc_cfg(pipe);
+ mdp4_overlay_dmap_xy(pipe);
+ }
+
+ if (pipe->ov_blt_addr) {
+ diff = pipe->blt_ov_koff - pipe->blt_ov_done;
+ if (diff < 1) {
+ mdp4_blt_overlay0_cfg(pipe);
+ vsync_commit_kickoff_ov0(pipe, 1);
+ }
+ } else {
+ vsync_commit_kickoff_ov0(pipe, 0);
+ }
+
+ spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+ return cnt;
+}
+
+void mdp4_dsi_cmd_vsync_ctrl(int cndx, int enable)
+{
+ struct vsycn_ctrl *vctrl;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ vctrl = &vsync_ctrl_db[cndx];
+
+ if (vctrl->vsync_irq_enabled == enable)
+ return;
+
+ vctrl->vsync_irq_enabled = enable;
+
+ mutex_lock(&vctrl->update_lock);
+ if (enable) {
+ mipi_dsi_clk_cfg(1);
+ mdp_clk_ctrl(1);
+ vsync_irq_enable(INTR_PRIMARY_RDPTR, MDP_PRIM_RDPTR_TERM);
+ } else {
+ mipi_dsi_clk_cfg(0);
+ mdp_clk_ctrl(0);
+ vsync_irq_disable(INTR_PRIMARY_RDPTR, MDP_PRIM_RDPTR_TERM);
+ vctrl->expire_tick = 0;
+ }
+ mutex_unlock(&vctrl->update_lock);
+}
+
+void mdp4_dsi_cmd_wait4vsync(int cndx, long long *vtime)
+{
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
+ unsigned long flags;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
+
+ if (atomic_read(&vctrl->suspend) > 0)
+ return;
+
+ spin_lock_irqsave(&vctrl->spin_lock, flags);
+ if (vctrl->wait_vsync_cnt == 0)
+ INIT_COMPLETION(vctrl->vsync_comp);
+ vctrl->wait_vsync_cnt++;
+ spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+ wait_for_completion(&vctrl->vsync_comp);
+
+ *vtime = ktime_to_ns(vctrl->vsync_time);
+}
+
+
+/*
+ * primary_rdptr_isr:
+ * called from interrupt context
+ */
+
+static void primary_rdptr_isr(int cndx)
+{
+ struct vsycn_ctrl *vctrl;
+
+ vctrl = &vsync_ctrl_db[cndx];
+ pr_debug("%s: cpu=%d\n", __func__, smp_processor_id());
+ vctrl->rdptr_intr_tot++;
+ vctrl->vsync_time = ktime_get();
+ schedule_work(&vctrl->vsync_work);
+}
+
+void mdp4_dmap_done_dsi_cmd(int cndx)
+{
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
+ int diff;
+
+ vsync_irq_disable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
+
+ vctrl = &vsync_ctrl_db[cndx];
+ vctrl->dmap_intr_tot++;
+ pipe = vctrl->base_pipe;
+
+ if (pipe->ov_blt_addr == 0) {
+ mdp4_overlay_dma_commit(cndx);
+ return;
+ }
+
+ /* blt enabled */
+ spin_lock(&vctrl->spin_lock);
+ pipe->blt_dmap_done++;
+ diff = pipe->blt_ov_done - pipe->blt_dmap_done;
+ spin_unlock(&vctrl->spin_lock);
+ pr_debug("%s: ov_done=%d dmap_done=%d ov_koff=%d dmap_koff=%d\n",
+ __func__, pipe->blt_ov_done, pipe->blt_dmap_done,
+ pipe->blt_ov_koff, pipe->blt_dmap_koff);
+ if (diff <= 0) {
+ if (pipe->blt_end) {
+ pipe->blt_end = 0;
+ pipe->ov_blt_addr = 0;
+ pipe->dma_blt_addr = 0;
+ pipe->blt_changed = 1;
+ pr_info("%s: BLT-END\n", __func__);
+ }
+ }
+ spin_unlock(&dsi_clk_lock);
+}
+
+/*
+ * mdp4_overlay0_done_dsi_cmd: called from isr
+ */
+void mdp4_overlay0_done_dsi_cmd(int cndx)
+{
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
+ int diff;
+
+ vsync_irq_disable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
+
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
+
+ spin_lock(&vctrl->spin_lock);
+ pipe->blt_ov_done++;
+ diff = pipe->blt_ov_done - pipe->blt_dmap_done;
+ spin_unlock(&vctrl->spin_lock);
+
+ pr_debug("%s: ov_done=%d dmap_done=%d ov_koff=%d dmap_koff=%d diff=%d\n",
+ __func__, pipe->blt_ov_done, pipe->blt_dmap_done,
+ pipe->blt_ov_koff, pipe->blt_dmap_koff, diff);
+
+ if (pipe->ov_blt_addr == 0) {
+ /* blt disabled */
+ pr_debug("%s: NON-BLT\n", __func__);
+ return;
+ }
+
+ if (diff == 1) {
+ mdp4_blt_dmap_cfg(pipe);
+ vsync_commit_kickoff_dmap(pipe);
+ }
+}
+
+static void send_vsync_work(struct work_struct *work)
+{
+ struct vsycn_ctrl *vctrl =
+ container_of(work, typeof(*vctrl), vsync_work);
+ char buf[64];
+ char *envp[2];
+
+ snprintf(buf, sizeof(buf), "VSYNC=%llu",
+ ktime_to_ns(vctrl->vsync_time));
+ envp[0] = buf;
+ envp[1] = NULL;
+ kobject_uevent_env(&vctrl->dev->kobj, KOBJ_CHANGE, envp);
+}
+
+
+void mdp4_dsi_rdptr_init(int cndx)
+{
+ struct vsycn_ctrl *vctrl;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ vctrl = &vsync_ctrl_db[cndx];
+ if (vctrl->inited)
+ return;
+
+ vctrl->inited = 1;
+ vctrl->update_ndx = 0;
+ vctrl->blight.put = 0;
+ vctrl->blight.get = 0;
+ vctrl->blight.tot = 0;
+ mutex_init(&vctrl->update_lock);
+ init_completion(&vctrl->vsync_comp);
+ init_completion(&vctrl->dmap_comp);
+ spin_lock_init(&vctrl->spin_lock);
+ spin_lock_init(&vctrl->dmap_spin_lock);
+ INIT_WORK(&vctrl->vsync_work, send_vsync_work);
+}
+
+void mdp4_primary_rdptr(void)
+{
+ primary_rdptr_isr(0);
+}
void mdp4_overlay_dsi_state_set(int state)
{
@@ -59,18 +565,6 @@
return dsi_state;
}
-static void dsi_clock_tout(unsigned long data)
-{
- spin_lock(&dsi_clk_lock);
- if (mipi_dsi_clk_on) {
- if (dsi_state == ST_DSI_PLAYING) {
- mipi_dsi_turn_off_clks();
- mdp4_overlay_dsi_state_set(ST_DSI_CLK_OFF);
- }
- }
- spin_unlock(&dsi_clk_lock);
-}
-
static __u32 msm_fb_line_length(__u32 fb_index, __u32 xres, int bpp)
{
/*
@@ -86,11 +580,6 @@
return xres * bpp;
}
-void mdp4_dsi_cmd_del_timer(void)
-{
- del_timer_sync(&dsi_clock_timer);
-}
-
void mdp4_mipi_vsync_enable(struct msm_fb_data_type *mfd,
struct mdp4_overlay_pipe *pipe, int which)
{
@@ -121,67 +610,98 @@
}
}
-void mdp4_dsi_cmd_base_swap(struct mdp4_overlay_pipe *pipe)
+void mdp4_dsi_cmd_base_swap(int cndx, struct mdp4_overlay_pipe *pipe)
{
- dsi_pipe = pipe;
+ struct vsycn_ctrl *vctrl;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ vctrl = &vsync_ctrl_db[cndx];
+ vctrl->base_pipe = pipe;
+}
+
+static void mdp4_overlay_setup_pipe_addr(struct msm_fb_data_type *mfd,
+ struct mdp4_overlay_pipe *pipe)
+{
+ MDPIBUF *iBuf = &mfd->ibuf;
+ struct fb_info *fbi;
+ int bpp;
+ uint8 *src;
+
+ /* whole screen for base layer */
+ src = (uint8 *) iBuf->buf;
+ fbi = mfd->fbi;
+
+ if (pipe->is_3d) {
+ bpp = fbi->var.bits_per_pixel / 8;
+ pipe->src_height = pipe->src_height_3d;
+ pipe->src_width = pipe->src_width_3d;
+ pipe->src_h = pipe->src_height_3d;
+ pipe->src_w = pipe->src_width_3d;
+ pipe->dst_h = pipe->src_height_3d;
+ pipe->dst_w = pipe->src_width_3d;
+ pipe->srcp0_ystride = msm_fb_line_length(0,
+ pipe->src_width, bpp);
+ } else {
+ /* 2D */
+ pipe->src_height = fbi->var.yres;
+ pipe->src_width = fbi->var.xres;
+ pipe->src_h = fbi->var.yres;
+ pipe->src_w = fbi->var.xres;
+ pipe->dst_h = fbi->var.yres;
+ pipe->dst_w = fbi->var.xres;
+ pipe->srcp0_ystride = fbi->fix.line_length;
+ }
+ pipe->src_y = 0;
+ pipe->src_x = 0;
+ pipe->dst_y = 0;
+ pipe->dst_x = 0;
+ pipe->srcp0_addr = (uint32)src;
}
void mdp4_overlay_update_dsi_cmd(struct msm_fb_data_type *mfd)
{
- MDPIBUF *iBuf = &mfd->ibuf;
- uint8 *src;
int ptype;
struct mdp4_overlay_pipe *pipe;
- int bpp;
int ret;
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
+
if (mfd->key != MFD_KEY)
return;
- dsi_mfd = mfd; /* keep it */
+ vctrl = &vsync_ctrl_db[cndx];
/* MDP cmd block enable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+ mdp_clk_ctrl(1);
- if (dsi_pipe == NULL) {
- ptype = mdp4_overlay_format2type(mfd->fb_imgType);
- if (ptype < 0)
- printk(KERN_INFO "%s: format2type failed\n", __func__);
- pipe = mdp4_overlay_pipe_alloc(ptype, MDP4_MIXER0);
- if (pipe == NULL)
- printk(KERN_INFO "%s: pipe_alloc failed\n", __func__);
- pipe->pipe_used++;
- pipe->mixer_stage = MDP4_MIXER_STAGE_BASE;
- pipe->mixer_num = MDP4_MIXER0;
- pipe->src_format = mfd->fb_imgType;
- mdp4_overlay_panel_mode(pipe->mixer_num, MDP4_PANEL_DSI_CMD);
- ret = mdp4_overlay_format2pipe(pipe);
- if (ret < 0)
- printk(KERN_INFO "%s: format2type failed\n", __func__);
-
- init_timer(&dsi_clock_timer);
- dsi_clock_timer.function = dsi_clock_tout;
- dsi_clock_timer.data = (unsigned long) mfd;;
- dsi_clock_timer.expires = 0xffffffff;
- add_timer(&dsi_clock_timer);
- tout_expired = jiffies;
-
- dsi_pipe = pipe; /* keep it */
-
- mdp4_init_writeback_buf(mfd, MDP4_MIXER0);
- pipe->ov_blt_addr = 0;
- pipe->dma_blt_addr = 0;
-
- } else {
- pipe = dsi_pipe;
- }
-
- if (pipe->pipe_used == 0 ||
- pipe->mixer_stage != MDP4_MIXER_STAGE_BASE) {
- pr_err("%s: NOT baselayer\n", __func__);
- mutex_unlock(&mfd->dma->ov_mutex);
+ ptype = mdp4_overlay_format2type(mfd->fb_imgType);
+ if (ptype < 0)
+ printk(KERN_INFO "%s: format2type failed\n", __func__);
+ pipe = mdp4_overlay_pipe_alloc(ptype, MDP4_MIXER0);
+ if (pipe == NULL) {
+ printk(KERN_INFO "%s: pipe_alloc failed\n", __func__);
return;
}
+ pipe->pipe_used++;
+ pipe->mixer_stage = MDP4_MIXER_STAGE_BASE;
+ pipe->mixer_num = MDP4_MIXER0;
+ pipe->src_format = mfd->fb_imgType;
+ mdp4_overlay_panel_mode(pipe->mixer_num, MDP4_PANEL_DSI_CMD);
+ ret = mdp4_overlay_format2pipe(pipe);
+ if (ret < 0)
+ printk(KERN_INFO "%s: format2type failed\n", __func__);
+
+ vctrl->base_pipe = pipe; /* keep it */
+ mdp4_init_writeback_buf(mfd, MDP4_MIXER0);
+ pipe->ov_blt_addr = 0;
+ pipe->dma_blt_addr = 0;
+
+ MDP_OUTP(MDP_BASE + 0x021c, 0x10); /* read pointer */
/*
* configure dsi stream id
@@ -190,41 +710,8 @@
MDP_OUTP(MDP_BASE + 0x000a0, 0x10);
/* disable dsi trigger */
MDP_OUTP(MDP_BASE + 0x000a4, 0x00);
- /* whole screen for base layer */
- src = (uint8 *) iBuf->buf;
-
- {
- struct fb_info *fbi;
-
- fbi = mfd->fbi;
- if (pipe->is_3d) {
- bpp = fbi->var.bits_per_pixel / 8;
- pipe->src_height = pipe->src_height_3d;
- pipe->src_width = pipe->src_width_3d;
- pipe->src_h = pipe->src_height_3d;
- pipe->src_w = pipe->src_width_3d;
- pipe->dst_h = pipe->src_height_3d;
- pipe->dst_w = pipe->src_width_3d;
- pipe->srcp0_ystride = msm_fb_line_length(0,
- pipe->src_width, bpp);
- } else {
- /* 2D */
- pipe->src_height = fbi->var.yres;
- pipe->src_width = fbi->var.xres;
- pipe->src_h = fbi->var.yres;
- pipe->src_w = fbi->var.xres;
- pipe->dst_h = fbi->var.yres;
- pipe->dst_w = fbi->var.xres;
- pipe->srcp0_ystride = fbi->fix.line_length;
- }
- pipe->src_y = 0;
- pipe->src_x = 0;
- pipe->dst_y = 0;
- pipe->dst_x = 0;
- pipe->srcp0_addr = (uint32)src;
- }
-
+ mdp4_overlay_setup_pipe_addr(mfd, pipe);
mdp4_overlay_rgb_setup(pipe);
@@ -238,10 +725,8 @@
mdp4_overlay_dmap_cfg(mfd, 0);
- mdp4_mipi_vsync_enable(mfd, pipe, 0);
-
/* MDP cmd block disable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+ mdp_clk_ctrl(0);
wmb();
}
@@ -251,18 +736,18 @@
struct msmfb_overlay_3d *r3d)
{
struct fb_info *fbi;
- struct mdp4_overlay_pipe *pipe;
int bpp;
uint8 *src = NULL;
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
- if (dsi_pipe == NULL)
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
+
+ if (pipe == NULL)
return;
- dsi_pipe->is_3d = r3d->is_3d;
- dsi_pipe->src_height_3d = r3d->height;
- dsi_pipe->src_width_3d = r3d->width;
-
- pipe = dsi_pipe;
if (pipe->pipe_used == 0 ||
pipe->mixer_stage != MDP4_MIXER_STAGE_BASE) {
pr_err("%s: NOT baselayer\n", __func__);
@@ -270,16 +755,15 @@
return;
}
+ pipe->is_3d = r3d->is_3d;
+ pipe->src_height_3d = r3d->height;
+ pipe->src_width_3d = r3d->width;
+
if (pipe->is_3d)
mdp4_overlay_panel_3d(pipe->mixer_num, MDP4_3D_SIDE_BY_SIDE);
else
mdp4_overlay_panel_3d(pipe->mixer_num, MDP4_3D_NONE);
- if (mfd->panel_power_on) {
- mdp4_dsi_cmd_dma_busy_wait(mfd);
- mdp4_dsi_blt_dmap_busy_wait(mfd);
- }
-
fbi = mfd->fbi;
if (pipe->is_3d) {
bpp = fbi->var.bits_per_pixel / 8;
@@ -328,28 +812,38 @@
int mdp4_dsi_overlay_blt_start(struct msm_fb_data_type *mfd)
{
unsigned long flag;
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
- pr_debug("%s: blt_end=%d ov_blt_addr=%x pid=%d\n",
- __func__, dsi_pipe->blt_end, (int)dsi_pipe->ov_blt_addr, current->pid);
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
+
+ pr_debug("%s: blt_end=%d blt_addr=%x pid=%d\n",
+ __func__, pipe->blt_end, (int)pipe->ov_blt_addr, current->pid);
mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0);
if (mfd->ov0_wb_buf->write_addr == 0) {
- pr_info("%s: no blt_base assigned\n", __func__);
+ pr_err("%s: no blt_base assigned\n", __func__);
return -EBUSY;
}
- if (dsi_pipe->ov_blt_addr == 0) {
- mdp4_dsi_cmd_dma_busy_wait(mfd);
- spin_lock_irqsave(&mdp_spin_lock, flag);
- dsi_pipe->blt_end = 0;
- dsi_pipe->blt_cnt = 0;
- dsi_pipe->ov_cnt = 0;
- dsi_pipe->dmap_cnt = 0;
- dsi_pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
- dsi_pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
+ if (pipe->ov_blt_addr == 0) {
+ spin_lock_irqsave(&vctrl->spin_lock, flag);
+ pipe->blt_end = 0;
+ pipe->blt_cnt = 0;
+ pipe->blt_changed = 1;
+ pipe->ov_cnt = 0;
+ pipe->dmap_cnt = 0;
+ pipe->blt_ov_koff = 0;
+ pipe->blt_dmap_koff = 0;
+ pipe->blt_ov_done = 0;
+ pipe->blt_dmap_done = 0;
+ pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
+ pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
mdp4_stat.blt_dsi_cmd++;
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
+ spin_unlock_irqrestore(&vctrl->spin_lock, flag);
return 0;
}
@@ -359,32 +853,26 @@
int mdp4_dsi_overlay_blt_stop(struct msm_fb_data_type *mfd)
{
unsigned long flag;
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
- pr_debug("%s: blt_end=%d ov_blt_addr=%x\n",
- __func__, dsi_pipe->blt_end, (int)dsi_pipe->ov_blt_addr);
+ pr_info("%s: blt_end=%d blt_addr=%x pid=%d\n",
+ __func__, pipe->blt_end, (int)pipe->ov_blt_addr, current->pid);
- if ((dsi_pipe->blt_end == 0) && dsi_pipe->ov_blt_addr) {
- spin_lock_irqsave(&mdp_spin_lock, flag);
- dsi_pipe->blt_end = 1; /* mark as end */
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
+ if ((pipe->blt_end == 0) && pipe->ov_blt_addr) {
+ spin_lock_irqsave(&vctrl->spin_lock, flag);
+ pipe->blt_end = 1; /* mark as end */
+ spin_unlock_irqrestore(&vctrl->spin_lock, flag);
return 0;
}
return -EBUSY;
}
-int mdp4_dsi_overlay_blt_offset(struct msm_fb_data_type *mfd,
- struct msmfb_overlay_blt *req)
-{
- req->offset = 0;
- req->width = dsi_pipe->src_width;
- req->height = dsi_pipe->src_height;
- req->bpp = dsi_pipe->bpp;
-
- return sizeof(*req);
-}
-
void mdp4_dsi_overlay_blt(struct msm_fb_data_type *mfd,
struct msmfb_overlay_blt *req)
{
@@ -395,332 +883,123 @@
}
-void mdp4_blt_xy_update(struct mdp4_overlay_pipe *pipe)
+int mdp4_dsi_cmd_on(struct platform_device *pdev)
{
- uint32 off, addr, addr2;
- int bpp;
- char *overlay_base;
+ int ret = 0;
+ int cndx = 0;
+ struct msm_fb_data_type *mfd;
+ struct vsycn_ctrl *vctrl;
+ pr_info("%s+:\n", __func__);
- if (pipe->ov_blt_addr == 0)
- return;
+ mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
+ vctrl = &vsync_ctrl_db[cndx];
+ vctrl->dev = mfd->fbi->dev;
-#ifdef BLT_RGB565
- bpp = 2; /* overlay ouput is RGB565 */
-#else
- bpp = 3; /* overlay ouput is RGB888 */
-#endif
- off = 0;
- if (pipe->dmap_cnt & 0x01)
- off = pipe->src_height * pipe->src_width * bpp;
- addr = pipe->dma_blt_addr + off;
+ mdp_clk_ctrl(1);
- /* dmap */
- MDP_OUTP(MDP_BASE + 0x90008, addr);
-
- off = 0;
- if (pipe->ov_cnt & 0x01)
- off = pipe->src_height * pipe->src_width * bpp;
- addr2 = pipe->ov_blt_addr + off;
- /* overlay 0 */
- overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
- outpdw(overlay_base + 0x000c, addr2);
- outpdw(overlay_base + 0x001c, addr2);
-}
-
-
-/*
- * mdp4_dmap_done_dsi: called from isr
- * DAM_P_DONE only used when blt enabled
- */
-void mdp4_dma_p_done_dsi(struct mdp_dma_data *dma)
-{
- int diff;
-
- dsi_pipe->dmap_cnt++;
- diff = dsi_pipe->ov_cnt - dsi_pipe->dmap_cnt;
- pr_debug("%s: ov_cnt=%d dmap_cnt=%d\n",
- __func__, dsi_pipe->ov_cnt, dsi_pipe->dmap_cnt);
-
- if (diff <= 0) {
- spin_lock(&mdp_spin_lock);
- dma->dmap_busy = FALSE;
- complete(&dma->dmap_comp);
- spin_unlock(&mdp_spin_lock);
- if (dsi_pipe->blt_end) {
- dsi_pipe->blt_end = 0;
- dsi_pipe->dma_blt_addr = 0;
- dsi_pipe->ov_blt_addr = 0;
- pr_debug("%s: END, ov_cnt=%d dmap_cnt=%d\n",
- __func__, dsi_pipe->ov_cnt, dsi_pipe->dmap_cnt);
- mdp_intr_mask &= ~INTR_DMA_P_DONE;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- }
- mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
- mdp_disable_irq_nosync(MDP_DMA2_TERM); /* disable intr */
- return;
- }
-
- spin_lock(&mdp_spin_lock);
- dma->busy = FALSE;
- spin_unlock(&mdp_spin_lock);
- complete(&dma->comp);
- if (busy_wait_cnt)
- busy_wait_cnt--;
-
- pr_debug("%s: kickoff dmap\n", __func__);
-
- mdp4_blt_xy_update(dsi_pipe);
- /* kick off dmap */
- outpdw(MDP_BASE + 0x000c, 0x0);
- mdp4_stat.kickoff_dmap++;
- /* trigger dsi cmd engine */
- mipi_dsi_cmd_mdp_start();
-
- mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
-}
-
-
-/*
- * mdp4_overlay0_done_dsi_cmd: called from isr
- */
-void mdp4_overlay0_done_dsi_cmd(struct mdp_dma_data *dma)
-{
- int diff;
-
- if (dsi_pipe->ov_blt_addr == 0) {
- mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
- spin_lock(&mdp_spin_lock);
- dma->busy = FALSE;
- spin_unlock(&mdp_spin_lock);
- complete(&dma->comp);
- if (busy_wait_cnt)
- busy_wait_cnt--;
- mdp_disable_irq_nosync(MDP_OVERLAY0_TERM);
- return;
- }
-
- /* blt enabled */
- if (dsi_pipe->blt_end == 0)
- dsi_pipe->ov_cnt++;
-
- pr_debug("%s: ov_cnt=%d dmap_cnt=%d\n",
- __func__, dsi_pipe->ov_cnt, dsi_pipe->dmap_cnt);
-
- if (dsi_pipe->blt_cnt == 0) {
- /* first kickoff since blt enabled */
- mdp_intr_mask |= INTR_DMA_P_DONE;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- }
- dsi_pipe->blt_cnt++;
-
- diff = dsi_pipe->ov_cnt - dsi_pipe->dmap_cnt;
- if (diff >= 2) {
- mdp_disable_irq_nosync(MDP_OVERLAY0_TERM);
- return;
- }
-
- spin_lock(&mdp_spin_lock);
- dma->busy = FALSE;
- dma->dmap_busy = TRUE;
- spin_unlock(&mdp_spin_lock);
- complete(&dma->comp);
- if (busy_wait_cnt)
- busy_wait_cnt--;
-
- pr_debug("%s: kickoff dmap\n", __func__);
-
- mdp4_blt_xy_update(dsi_pipe);
- mdp_enable_irq(MDP_DMA2_TERM); /* enable intr */
- /* kick off dmap */
- outpdw(MDP_BASE + 0x000c, 0x0);
- mdp4_stat.kickoff_dmap++;
- /* trigger dsi cmd engine */
- mipi_dsi_cmd_mdp_start();
- mdp_disable_irq_nosync(MDP_OVERLAY0_TERM);
-}
-
-void mdp4_dsi_cmd_overlay_restore(void)
-{
- /* mutex holded by caller */
- if (dsi_mfd && dsi_pipe) {
- mdp4_dsi_cmd_dma_busy_wait(dsi_mfd);
- mipi_dsi_mdp_busy_wait(dsi_mfd);
- mdp4_overlay_update_dsi_cmd(dsi_mfd);
-
- if (dsi_pipe->ov_blt_addr)
- mdp4_dsi_blt_dmap_busy_wait(dsi_mfd);
- mdp4_dsi_cmd_overlay_kickoff(dsi_mfd, dsi_pipe);
- }
-}
-
-void mdp4_dsi_blt_dmap_busy_wait(struct msm_fb_data_type *mfd)
-{
- unsigned long flag;
- int need_wait = 0;
-
- spin_lock_irqsave(&mdp_spin_lock, flag);
- if (mfd->dma->dmap_busy == TRUE) {
- INIT_COMPLETION(mfd->dma->dmap_comp);
- need_wait++;
- }
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
-
- if (need_wait) {
- /* wait until DMA finishes the current job */
- wait_for_completion(&mfd->dma->dmap_comp);
- }
-}
-
-/*
- * mdp4_dsi_cmd_dma_busy_wait: check dsi link activity
- * dsi link is a shared resource and it can only be used
- * while it is in idle state.
- * ov_mutex need to be acquired before call this function.
- */
-void mdp4_dsi_cmd_dma_busy_wait(struct msm_fb_data_type *mfd)
-{
- unsigned long flag;
- int need_wait = 0;
-
-
-
- if (dsi_clock_timer.function) {
- if (time_after(jiffies, tout_expired)) {
- tout_expired = jiffies + TOUT_PERIOD;
- mod_timer(&dsi_clock_timer, tout_expired);
- tout_expired -= MS_100;
- }
- }
-
- pr_debug("%s: start pid=%d dsi_clk_on=%d\n",
- __func__, current->pid, mipi_dsi_clk_on);
-
- /* satrt dsi clock if necessary */
- spin_lock_bh(&dsi_clk_lock);
- if (mipi_dsi_clk_on == 0)
- mipi_dsi_turn_on_clks();
- spin_unlock_bh(&dsi_clk_lock);
-
- spin_lock_irqsave(&mdp_spin_lock, flag);
- if (mfd->dma->busy == TRUE) {
- if (busy_wait_cnt == 0)
- INIT_COMPLETION(mfd->dma->comp);
- busy_wait_cnt++;
- need_wait++;
- }
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
-
- if (need_wait) {
- /* wait until DMA finishes the current job */
- pr_debug("%s: pending pid=%d dsi_clk_on=%d\n",
- __func__, current->pid, mipi_dsi_clk_on);
- wait_for_completion(&mfd->dma->comp);
- }
- pr_debug("%s: done pid=%d dsi_clk_on=%d\n",
- __func__, current->pid, mipi_dsi_clk_on);
-}
-
-void mdp4_dsi_cmd_kickoff_video(struct msm_fb_data_type *mfd,
- struct mdp4_overlay_pipe *pipe)
-{
- /*
- * a video kickoff may happen before UI kickoff after
- * blt enabled. mdp4_overlay_update_dsi_cmd() need
- * to be called before kickoff.
- * vice versa for blt disabled.
- */
- if (dsi_pipe->ov_blt_addr && dsi_pipe->blt_cnt == 0)
- mdp4_overlay_update_dsi_cmd(mfd); /* first time */
- else if (dsi_pipe->ov_blt_addr == 0 && dsi_pipe->blt_cnt) {
- mdp4_overlay_update_dsi_cmd(mfd); /* last time */
- dsi_pipe->blt_cnt = 0;
- }
-
- pr_debug("%s: ov_blt_addr=%d blt_cnt=%d\n",
- __func__, (int)dsi_pipe->ov_blt_addr, dsi_pipe->blt_cnt);
-
- if (dsi_pipe->ov_blt_addr)
- mdp4_dsi_blt_dmap_busy_wait(dsi_mfd);
-
- mdp4_dsi_cmd_overlay_kickoff(mfd, pipe);
-}
-
-void mdp4_dsi_cmd_kickoff_ui(struct msm_fb_data_type *mfd,
- struct mdp4_overlay_pipe *pipe)
-{
-
- pr_debug("%s: pid=%d\n", __func__, current->pid);
- mdp4_dsi_cmd_overlay_kickoff(mfd, pipe);
-}
-
-
-void mdp4_dsi_cmd_overlay_kickoff(struct msm_fb_data_type *mfd,
- struct mdp4_overlay_pipe *pipe)
-{
- unsigned long flag;
+ if (vctrl->base_pipe == NULL)
+ mdp4_overlay_update_dsi_cmd(mfd);
mdp4_iommu_attach();
- /* change mdp clk */
- mdp4_set_perf_level();
- mipi_dsi_mdp_busy_wait(mfd);
+ atomic_set(&vctrl->suspend, 0);
+ pr_info("%s-:\n", __func__);
- if (dsi_pipe->ov_blt_addr == 0)
- mipi_dsi_cmd_mdp_start();
- mdp4_overlay_dsi_state_set(ST_DSI_PLAYING);
+ return ret;
+}
- spin_lock_irqsave(&mdp_spin_lock, flag);
- mdp_enable_irq(MDP_OVERLAY0_TERM);
- mfd->dma->busy = TRUE;
- if (dsi_pipe->ov_blt_addr)
- mfd->dma->dmap_busy = TRUE;
- /* start OVERLAY pipe */
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
- mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd);
- mdp4_stat.kickoff_ov0++;
+int mdp4_dsi_cmd_off(struct platform_device *pdev)
+{
+ int ret = 0;
+ int cndx = 0;
+ struct msm_fb_data_type *mfd;
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
+
+ pr_info("%s+:\n", __func__);
+
+ mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
+
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
+ if (pipe == NULL) {
+ pr_err("%s: NO base pipe\n", __func__);
+ return ret;
+ }
+
+ atomic_set(&vctrl->suspend, 1);
+
+ /* make sure dsi clk is on so that
+ * at panel_next_off() dsi panel can be shut off
+ */
+ mipi_dsi_ahb_ctrl(1);
+ mipi_dsi_clk_enable();
+
+ mdp4_mixer_stage_down(pipe);
+ mdp4_overlay_pipe_free(pipe);
+ vctrl->base_pipe = NULL;
+
+ pr_info("%s-:\n", __func__);
+
+ /*
+ * footswitch off
+ * this will casue all mdp register
+ * to be reset to default
+ * after footswitch on later
+ */
+
+ return ret;
}
void mdp_dsi_cmd_overlay_suspend(struct msm_fb_data_type *mfd)
{
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
+
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
/* dis-engage rgb0 from mixer0 */
- if (dsi_pipe) {
+ if (pipe) {
if (mfd->ref_cnt == 0) {
/* adb stop */
- if (dsi_pipe->pipe_type == OVERLAY_TYPE_BF)
- mdp4_overlay_borderfill_stage_down(dsi_pipe);
+ if (pipe->pipe_type == OVERLAY_TYPE_BF)
+ mdp4_overlay_borderfill_stage_down(pipe);
- /* dsi_pipe == rgb1 */
- mdp4_overlay_unset_mixer(dsi_pipe->mixer_num);
- dsi_pipe = NULL;
+ /* pipe == rgb1 */
+ mdp4_overlay_unset_mixer(pipe->mixer_num);
+ vctrl->base_pipe = NULL;
} else {
- mdp4_mixer_stage_down(dsi_pipe);
- mdp4_iommu_unmap(dsi_pipe);
+ mdp4_mixer_stage_down(pipe);
+ mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 1);
}
}
}
void mdp4_dsi_cmd_overlay(struct msm_fb_data_type *mfd)
{
- mutex_lock(&mfd->dma->ov_mutex);
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
- if (mfd && mfd->panel_power_on) {
- mdp4_dsi_cmd_dma_busy_wait(mfd);
+ vctrl = &vsync_ctrl_db[cndx];
- if (dsi_pipe && dsi_pipe->ov_blt_addr)
- mdp4_dsi_blt_dmap_busy_wait(mfd);
+ if (!mfd->panel_power_on)
+ return;
- mdp4_overlay_update_dsi_cmd(mfd);
-
- mdp4_dsi_cmd_kickoff_ui(mfd, dsi_pipe);
- mdp4_iommu_unmap(dsi_pipe);
- /* signal if pan function is waiting for the update completion */
- if (mfd->pan_waiting) {
- mfd->pan_waiting = FALSE;
- complete(&mfd->pan_comp);
- }
+ pipe = vctrl->base_pipe;
+ if (pipe == NULL) {
+ pr_err("%s: NO base pipe\n", __func__);
+ return;
}
- mutex_unlock(&mfd->dma->ov_mutex);
+
+ if (pipe->mixer_stage == MDP4_MIXER_STAGE_BASE) {
+ mdp4_mipi_vsync_enable(mfd, pipe, 0);
+ mdp4_overlay_setup_pipe_addr(mfd, pipe);
+ mdp4_dsi_cmd_pipe_queue(0, pipe);
+ }
+ mdp4_dsi_cmd_pipe_commit();
}
diff --git a/drivers/video/msm/mdp4_overlay_dsi_video.c b/drivers/video/msm/mdp4_overlay_dsi_video.c
index bc4476e..28b5cd5 100644
--- a/drivers/video/msm/mdp4_overlay_dsi_video.c
+++ b/drivers/video/msm/mdp4_overlay_dsi_video.c
@@ -24,6 +24,9 @@
#include <linux/spinlock.h>
#include <linux/fb.h>
#include <linux/msm_mdp.h>
+#include <linux/ktime.h>
+#include <linux/wakelock.h>
+#include <linux/time.h>
#include <asm/system.h>
#include <asm/mach-types.h>
#include <mach/hardware.h>
@@ -40,38 +43,328 @@
static int first_pixel_start_y;
static int dsi_video_enabled;
-static struct mdp4_overlay_pipe *dsi_pipe;
-static struct completion dsi_video_comp;
-static int blt_cfg_changed;
+#define MAX_CONTROLLER 1
-static cmd_fxn_t display_on;
+static struct vsycn_ctrl {
+ struct device *dev;
+ int inited;
+ int update_ndx;
+ uint32 dmap_intr_cnt;
+ atomic_t suspend;
+ int dmap_wait_cnt;
+ int wait_vsync_cnt;
+ int blt_change;
+ int fake_vsync;
+ struct mutex update_lock;
+ struct completion dmap_comp;
+ struct completion vsync_comp;
+ spinlock_t spin_lock;
+ struct mdp4_overlay_pipe *base_pipe;
+ struct vsync_update vlist[2];
+ int vsync_irq_enabled;
+ ktime_t vsync_time;
+ struct work_struct vsync_work;
+} vsync_ctrl_db[MAX_CONTROLLER];
-static __u32 msm_fb_line_length(__u32 fb_index, __u32 xres, int bpp)
+static void vsync_irq_enable(int intr, int term)
{
- /*
- * The adreno GPU hardware requires that the pitch be aligned to
- * 32 pixels for color buffers, so for the cases where the GPU
- * is writing directly to fb0, the framebuffer pitch
- * also needs to be 32 pixel aligned
- */
+ unsigned long flag;
- if (fb_index == 0)
- return ALIGN(xres, 32) * bpp;
+ spin_lock_irqsave(&mdp_spin_lock, flag);
+ outp32(MDP_INTR_CLEAR,
+ INTR_DMA_P_DONE | INTR_OVERLAY0_DONE | INTR_PRIMARY_VSYNC);
+ mdp_intr_mask |= intr;
+ outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+ mdp_enable_irq(term);
+ spin_unlock_irqrestore(&mdp_spin_lock, flag);
+ pr_debug("%s: IRQ-en done, term=%x\n", __func__, term);
+}
+
+static void vsync_irq_disable(int intr, int term)
+{
+ unsigned long flag;
+
+ spin_lock_irqsave(&mdp_spin_lock, flag);
+ outp32(MDP_INTR_CLEAR,
+ INTR_DMA_P_DONE | INTR_OVERLAY0_DONE | INTR_PRIMARY_VSYNC);
+ mdp_intr_mask &= ~intr;
+ outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+ mdp_disable_irq_nosync(term);
+ spin_unlock_irqrestore(&mdp_spin_lock, flag);
+ pr_debug("%s: IRQ-dis done, term=%x\n", __func__, term);
+}
+
+static void mdp4_overlay_dsi_video_start(void)
+{
+ if (!dsi_video_enabled) {
+ /* enable DSI block */
+ mdp4_iommu_attach();
+ mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+ MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 1);
+ dsi_video_enabled = 1;
+ }
+}
+
+/*
+ * mdp4_dsi_video_pipe_queue:
+ * called from thread context
+ */
+void mdp4_dsi_video_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe)
+{
+ struct vsycn_ctrl *vctrl;
+ struct vsync_update *vp;
+ struct mdp4_overlay_pipe *pp;
+ int undx;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ /* start timing generator & mmu if they are not started yet */
+ mdp4_overlay_dsi_video_start();
+
+ vctrl = &vsync_ctrl_db[cndx];
+
+ if (atomic_read(&vctrl->suspend) > 0)
+ return;
+
+ mutex_lock(&vctrl->update_lock);
+ undx = vctrl->update_ndx;
+ vp = &vctrl->vlist[undx];
+
+ pp = &vp->plist[pipe->pipe_ndx - 1]; /* ndx start form 1 */
+
+ pr_debug("%s: vndx=%d pipe=%x ndx=%d num=%d pid=%d\n",
+ __func__, undx, (int)pipe, pipe->pipe_ndx, pipe->pipe_num,
+ current->pid);
+
+ *pp = *pipe; /* keep it */
+ vp->update_cnt++;
+ mutex_unlock(&vctrl->update_lock);
+ mdp4_stat.overlay_play[pipe->mixer_num]++;
+}
+
+
+static void mdp4_dsi_video_blt_ov_update(struct mdp4_overlay_pipe *pipe);
+
+int mdp4_dsi_video_pipe_commit(void)
+{
+
+ int i, undx;
+ int mixer = 0;
+ struct vsycn_ctrl *vctrl;
+ struct vsync_update *vp;
+ struct mdp4_overlay_pipe *pipe;
+ unsigned long flags;
+ int cnt = 0;
+
+ vctrl = &vsync_ctrl_db[0];
+
+ mutex_lock(&vctrl->update_lock);
+ undx = vctrl->update_ndx;
+ vp = &vctrl->vlist[undx];
+ pipe = vctrl->base_pipe;
+ mixer = pipe->mixer_num;
+
+ if (vp->update_cnt == 0) {
+ mutex_unlock(&vctrl->update_lock);
+ return cnt;
+ }
+
+ vctrl->update_ndx++;
+ vctrl->update_ndx &= 0x01;
+ vp->update_cnt = 0; /* reset */
+ mutex_unlock(&vctrl->update_lock);
+
+ /* free previous committed iommu back to pool */
+ mdp4_overlay_iommu_unmap_freelist(mixer);
+
+ pipe = vp->plist;
+ for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
+ if (pipe->pipe_used) {
+ cnt++;
+ mdp4_overlay_vsync_commit(pipe);
+ /* free previous iommu to freelist
+ * which will be freed at next
+ * pipe_commit
+ */
+ mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
+ pipe->pipe_used = 0; /* clear */
+ }
+ }
+
+ mdp4_mixer_stage_commit(mixer);
+
+ pipe = vctrl->base_pipe;
+ spin_lock_irqsave(&vctrl->spin_lock, flags);
+ if (pipe->ov_blt_addr) {
+ mdp4_dsi_video_blt_ov_update(pipe);
+ pipe->blt_ov_done++;
+ vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
+ mb();
+ pipe->blt_ov_koff++;
+ /* kickoff overlay engine */
+ mdp4_stat.kickoff_ov0++;
+ outpdw(MDP_BASE + 0x0004, 0);
+ } else if (vctrl->dmap_intr_cnt == 0) {
+ /* schedule second phase update at dmap */
+ vctrl->dmap_intr_cnt++;
+ vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
+ }
+ spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+ mdp4_stat.overlay_commit[pipe->mixer_num]++;
+
+ return cnt;
+}
+
+void mdp4_dsi_video_vsync_ctrl(int cndx, int enable)
+{
+ struct vsycn_ctrl *vctrl;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ vctrl = &vsync_ctrl_db[cndx];
+
+ if (vctrl->fake_vsync) {
+ vctrl->fake_vsync = 0;
+ schedule_work(&vctrl->vsync_work);
+ }
+
+ if (vctrl->vsync_irq_enabled == enable)
+ return;
+
+ pr_debug("%s: vsync enable=%d\n", __func__, enable);
+
+ vctrl->vsync_irq_enabled = enable;
+
+ if (enable)
+ vsync_irq_enable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM);
else
- return xres * bpp;
+ vsync_irq_disable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM);
}
-void mdp4_dsi_video_fxn_register(cmd_fxn_t fxn)
+void mdp4_dsi_video_wait4vsync(int cndx, long long *vtime)
{
- display_on = fxn;
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
+ unsigned long flags;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
+
+ if (atomic_read(&vctrl->suspend) > 0) {
+ *vtime = -1;
+ return;
+ }
+
+ /* start timing generator & mmu if they are not started yet */
+ mdp4_overlay_dsi_video_start();
+
+ spin_lock_irqsave(&vctrl->spin_lock, flags);
+ if (vctrl->wait_vsync_cnt == 0)
+ INIT_COMPLETION(vctrl->vsync_comp);
+
+ vctrl->wait_vsync_cnt++;
+ spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+ wait_for_completion(&vctrl->vsync_comp);
+ mdp4_stat.wait4vsync0++;
+
+ *vtime = ktime_to_ns(vctrl->vsync_time);
}
-static void mdp4_overlay_dsi_video_wait4event(struct msm_fb_data_type *mfd,
- int intr_done);
-
-void mdp4_dsi_video_base_swap(struct mdp4_overlay_pipe *pipe)
+static void mdp4_dsi_video_wait4dmap(int cndx)
{
- dsi_pipe = pipe;
+ unsigned long flags;
+ struct vsycn_ctrl *vctrl;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ vctrl = &vsync_ctrl_db[cndx];
+
+ if (atomic_read(&vctrl->suspend) > 0)
+ return;
+
+ /* start timing generator & mmu if they are not started yet */
+ mdp4_overlay_dsi_video_start();
+
+ spin_lock_irqsave(&vctrl->spin_lock, flags);
+ if (vctrl->dmap_wait_cnt == 0) {
+ INIT_COMPLETION(vctrl->dmap_comp);
+ if (vctrl->dmap_intr_cnt == 0) {
+ vctrl->dmap_intr_cnt++;
+ vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
+ }
+ }
+ vctrl->dmap_wait_cnt++;
+ spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+ wait_for_completion(&vctrl->dmap_comp);
+}
+
+static void send_vsync_work(struct work_struct *work)
+{
+ struct vsycn_ctrl *vctrl =
+ container_of(work, typeof(*vctrl), vsync_work);
+ char buf[64];
+ char *envp[2];
+
+ snprintf(buf, sizeof(buf), "VSYNC=%llu",
+ ktime_to_ns(vctrl->vsync_time));
+ envp[0] = buf;
+ envp[1] = NULL;
+ kobject_uevent_env(&vctrl->dev->kobj, KOBJ_CHANGE, envp);
+}
+
+void mdp4_dsi_vsync_init(int cndx)
+{
+ struct vsycn_ctrl *vctrl;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ pr_info("%s: ndx=%d\n", __func__, cndx);
+
+ vctrl = &vsync_ctrl_db[cndx];
+ if (vctrl->inited)
+ return;
+
+ vctrl->inited = 1;
+ vctrl->update_ndx = 0;
+ mutex_init(&vctrl->update_lock);
+ init_completion(&vctrl->vsync_comp);
+ init_completion(&vctrl->dmap_comp);
+ atomic_set(&vctrl->suspend, 0);
+ spin_lock_init(&vctrl->spin_lock);
+ INIT_WORK(&vctrl->vsync_work, send_vsync_work);
+}
+
+void mdp4_dsi_video_base_swap(int cndx, struct mdp4_overlay_pipe *pipe)
+{
+ struct vsycn_ctrl *vctrl;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ vctrl = &vsync_ctrl_db[cndx];
+ vctrl->base_pipe = pipe;
}
int mdp4_dsi_video_on(struct platform_device *pdev)
@@ -113,16 +406,24 @@
struct fb_var_screeninfo *var;
struct msm_fb_data_type *mfd;
struct mdp4_overlay_pipe *pipe;
- int ret;
+ int ret = 0;
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
+ vctrl = &vsync_ctrl_db[cndx];
mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
+ vctrl->dev = mfd->fbi->dev;
+
if (!mfd)
return -ENODEV;
if (mfd->key != MFD_KEY)
return -EINVAL;
+ /* mdp clock on */
+ mdp_clk_ctrl(1);
+
fbi = mfd->fbi;
var = &fbi->var;
@@ -130,7 +431,7 @@
buf = (uint8 *) fbi->fix.smem_start;
buf_offset = calc_fb_offset(mfd, fbi, bpp);
- if (dsi_pipe == NULL) {
+ if (vctrl->base_pipe == NULL) {
ptype = mdp4_overlay_format2type(mfd->fb_imgType);
if (ptype < 0)
printk(KERN_INFO "%s: format2type failed\n", __func__);
@@ -148,17 +449,16 @@
if (ret < 0)
printk(KERN_INFO "%s: format2type failed\n", __func__);
- dsi_pipe = pipe; /* keep it */
- init_completion(&dsi_video_comp);
-
- mdp4_init_writeback_buf(mfd, MDP4_MIXER0);
pipe->ov_blt_addr = 0;
pipe->dma_blt_addr = 0;
+ vctrl->base_pipe = pipe; /* keep it */
+ mdp4_init_writeback_buf(mfd, MDP4_MIXER0);
} else {
- pipe = dsi_pipe;
+ pipe = vctrl->base_pipe;
}
+#ifdef CONTINUOUS_SPLASH
/* MDP cmd block enable */
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
@@ -171,12 +471,7 @@
MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 0);
mipi_dsi_controller_cfg(0);
}
-
- if (is_mdp4_hw_reset()) {
- mdp4_hw_init();
- outpdw(MDP_BASE + 0x0038, mdp4_display_intf);
- }
-
+#endif
pipe->src_height = fbi->var.yres;
pipe->src_width = fbi->var.xres;
pipe->src_h = fbi->var.yres;
@@ -194,13 +489,16 @@
pipe->dst_h = fbi->var.yres;
pipe->dst_w = fbi->var.xres;
+ atomic_set(&vctrl->suspend, 0);
+
mdp4_overlay_dmap_xy(pipe); /* dma_p */
mdp4_overlay_dmap_cfg(mfd, 1);
mdp4_overlay_rgb_setup(pipe);
+ mdp4_overlayproc_cfg(pipe);
+
mdp4_overlay_reg_flush(pipe, 1);
mdp4_mixer_stage_up(pipe);
- mdp4_overlayproc_cfg(pipe);
/*
* DSI timing setting
@@ -261,6 +559,7 @@
ctrl_polarity =
(data_en_polarity << 2) | (vsync_polarity << 1) | (hsync_polarity);
+ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x4, hsync_ctrl);
MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x8, vsync_period * hsync_period);
MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0xc,
@@ -275,82 +574,97 @@
MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x2c, dsi_underflow_clr);
MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x30, dsi_hsync_skew);
MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x38, ctrl_polarity);
- mdp4_overlay_reg_flush(pipe, 1);
- mdp4_mixer_stage_up(pipe);
-
- mdp_histogram_ctrl_all(TRUE);
-
- ret = panel_next_on(pdev);
- if (ret == 0) {
- mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
- if (display_on != NULL) {
- msleep(50);
- display_on(pdev);
- }
- }
- /* MDP cmd block disable */
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+ mdp_histogram_ctrl_all(TRUE);
return ret;
}
int mdp4_dsi_video_off(struct platform_device *pdev)
{
int ret = 0;
+ int cndx = 0;
struct msm_fb_data_type *mfd;
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
- /* MDP cmd block enable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+ atomic_set(&vctrl->suspend, 1);
+
+ while (vctrl->wait_vsync_cnt)
+ msleep(20); /* >= 17 ms */
+
MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 0);
+
dsi_video_enabled = 0;
- /* MDP cmd block disable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
- mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
mdp_histogram_ctrl_all(FALSE);
- ret = panel_next_off(pdev);
- /* delay to make sure the last frame finishes */
- msleep(20);
-
- /* dis-engage rgb0 from mixer0 */
- if (dsi_pipe) {
+ if (pipe) {
if (mfd->ref_cnt == 0) {
/* adb stop */
- if (dsi_pipe->pipe_type == OVERLAY_TYPE_BF)
- mdp4_overlay_borderfill_stage_down(dsi_pipe);
+ if (pipe->pipe_type == OVERLAY_TYPE_BF)
+ mdp4_overlay_borderfill_stage_down(pipe);
- /* dsi_pipe == rgb1 */
- mdp4_overlay_unset_mixer(dsi_pipe->mixer_num);
- dsi_pipe = NULL;
+ mdp4_overlay_unset_mixer(pipe->mixer_num);
+ vctrl->base_pipe = NULL;
} else {
- mdp4_mixer_stage_down(dsi_pipe);
- mdp4_iommu_unmap(dsi_pipe);
+ /* system suspending */
+ mdp4_mixer_stage_down(vctrl->base_pipe);
+ mdp4_overlay_iommu_pipe_free(
+ vctrl->base_pipe->pipe_ndx, 1);
}
}
+ vctrl->fake_vsync = 1;
+
+ /* mdp clock off */
+ mdp_clk_ctrl(0);
+ mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
return ret;
}
+static __u32 msm_fb_line_length(__u32 fb_index, __u32 xres, int bpp)
+{
+ /*
+ * The adreno GPU hardware requires that the pitch be aligned to
+ * 32 pixels for color buffers, so for the cases where the GPU
+ * is writing directly to fb0, the framebuffer pitch
+ * also needs to be 32 pixel aligned
+ */
+
+ if (fb_index == 0)
+ return ALIGN(xres, 32) * bpp;
+ else
+ return xres * bpp;
+}
+
/* 3D side by side */
void mdp4_dsi_video_3d_sbys(struct msm_fb_data_type *mfd,
struct msmfb_overlay_3d *r3d)
{
struct fb_info *fbi;
- struct mdp4_overlay_pipe *pipe;
unsigned int buf_offset;
int bpp;
uint8 *buf = NULL;
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
- if (dsi_pipe == NULL)
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
+
+ if (vctrl->base_pipe == NULL)
return;
- dsi_pipe->is_3d = r3d->is_3d;
- dsi_pipe->src_height_3d = r3d->height;
- dsi_pipe->src_width_3d = r3d->width;
-
- pipe = dsi_pipe;
+ pipe = vctrl->base_pipe;
+ pipe->is_3d = r3d->is_3d;
+ pipe->src_height_3d = r3d->height;
+ pipe->src_width_3d = r3d->width;
if (pipe->is_3d)
mdp4_overlay_panel_3d(pipe->mixer_num, MDP4_3D_SIDE_BY_SIDE);
@@ -402,12 +716,10 @@
mdp4_overlay_dmap_cfg(mfd, 1);
mdp4_overlay_reg_flush(pipe, 1);
+
mdp4_mixer_stage_up(pipe);
mb();
-
- /* wait for vsycn */
- mdp4_overlay_dsi_video_vsync_push(mfd, pipe);
}
static void mdp4_dsi_video_blt_ov_update(struct mdp4_overlay_pipe *pipe)
@@ -427,7 +739,7 @@
bpp = 3; /* overlay ouput is RGB888 */
#endif
off = 0;
- if (pipe->ov_cnt & 0x01)
+ if (pipe->blt_ov_done & 0x01)
off = pipe->src_height * pipe->src_width * bpp;
addr = pipe->ov_blt_addr + off;
@@ -452,7 +764,7 @@
bpp = 3; /* overlay ouput is RGB888 */
#endif
off = 0;
- if (pipe->dmap_cnt & 0x01)
+ if (pipe->blt_dmap_done & 0x01)
off = pipe->src_height * pipe->src_width * bpp;
addr = pipe->dma_blt_addr + off;
@@ -460,156 +772,101 @@
MDP_OUTP(MDP_BASE + 0x90008, addr);
}
-/*
- * mdp4_overlay_dsi_video_wait4event:
- * INTR_DMA_P_DONE and INTR_PRIMARY_VSYNC event only
- * no INTR_OVERLAY0_DONE event allowed.
- */
-static void mdp4_overlay_dsi_video_wait4event(struct msm_fb_data_type *mfd,
- int intr_done)
+void mdp4_overlay_dsi_video_set_perf(struct msm_fb_data_type *mfd)
{
- unsigned long flag;
- unsigned int data;
-
- data = inpdw(MDP_BASE + DSI_VIDEO_BASE);
- data &= 0x01;
- if (data == 0) /* timing generator disabled */
- return;
-
- spin_lock_irqsave(&mdp_spin_lock, flag);
- INIT_COMPLETION(dsi_video_comp);
- mfd->dma->waiting = TRUE;
- outp32(MDP_INTR_CLEAR, intr_done);
- mdp_intr_mask |= intr_done;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- mdp_enable_irq(MDP_DMA2_TERM); /* enable intr */
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
- wait_for_completion(&dsi_video_comp);
- mdp_disable_irq(MDP_DMA2_TERM);
-}
-
-static void mdp4_overlay_dsi_video_dma_busy_wait(struct msm_fb_data_type *mfd)
-{
- unsigned long flag;
- int need_wait = 0;
-
- pr_debug("%s: start pid=%d\n", __func__, current->pid);
-
- spin_lock_irqsave(&mdp_spin_lock, flag);
- if (mfd->dma->busy == TRUE) {
- INIT_COMPLETION(mfd->dma->comp);
- need_wait++;
- }
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
-
- if (need_wait) {
- /* wait until DMA finishes the current job */
- pr_debug("%s: pending pid=%d\n", __func__, current->pid);
- wait_for_completion(&mfd->dma->comp);
- }
- pr_debug("%s: done pid=%d\n", __func__, current->pid);
-}
-
-void mdp4_overlay_dsi_video_start(void)
-{
- if (!dsi_video_enabled) {
- /* enable DSI block */
- mdp4_iommu_attach();
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
- MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 1);
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
- dsi_video_enabled = 1;
- }
-}
-
-void mdp4_overlay_dsi_video_vsync_push(struct msm_fb_data_type *mfd,
- struct mdp4_overlay_pipe *pipe)
-{
- unsigned long flag;
-
- if (pipe->flags & MDP_OV_PLAY_NOWAIT)
- return;
-
- if (dsi_pipe->ov_blt_addr) {
- mdp4_overlay_dsi_video_dma_busy_wait(mfd);
-
- mdp4_dsi_video_blt_ov_update(dsi_pipe);
- dsi_pipe->ov_cnt++;
-
- spin_lock_irqsave(&mdp_spin_lock, flag);
- outp32(MDP_INTR_CLEAR, INTR_OVERLAY0_DONE);
- mdp_intr_mask |= INTR_OVERLAY0_DONE;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- mdp_enable_irq(MDP_OVERLAY0_TERM);
- mfd->dma->busy = TRUE;
- mb(); /* make sure all registers updated */
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
- outpdw(MDP_BASE + 0x0004, 0); /* kickoff overlay engine */
- mdp4_stat.kickoff_ov0++;
- mb();
- mdp4_overlay_dsi_video_wait4event(mfd, INTR_DMA_P_DONE);
- } else {
- mdp4_overlay_dsi_video_wait4event(mfd, INTR_PRIMARY_VSYNC);
- }
-
+ mdp4_dsi_video_wait4dmap(0);
+ /* change mdp clk while mdp is idle */
mdp4_set_perf_level();
}
+
/*
* mdp4_primary_vsync_dsi_video: called from isr
*/
void mdp4_primary_vsync_dsi_video(void)
{
- complete_all(&dsi_video_comp);
+ int cndx;
+ struct vsycn_ctrl *vctrl;
+
+
+ cndx = 0;
+ vctrl = &vsync_ctrl_db[cndx];
+ pr_debug("%s: cpu=%d\n", __func__, smp_processor_id());
+ vctrl->vsync_time = ktime_get();
+ schedule_work(&vctrl->vsync_work);
+
+ spin_lock(&vctrl->spin_lock);
+ if (vctrl->wait_vsync_cnt) {
+ complete_all(&vctrl->vsync_comp);
+ vctrl->wait_vsync_cnt = 0;
+ }
+ spin_unlock(&vctrl->spin_lock);
}
/*
- * mdp4_dma_p_done_dsi_video: called from isr
+ * mdp4_dmap_done_dsi_video: called from isr
*/
-void mdp4_dma_p_done_dsi_video(struct mdp_dma_data *dma)
+void mdp4_dmap_done_dsi_video(int cndx)
{
- if (blt_cfg_changed) {
- mdp_is_in_isr = TRUE;
- if (dsi_pipe->ov_blt_addr) {
- mdp4_overlay_dmap_xy(dsi_pipe);
- mdp4_overlayproc_cfg(dsi_pipe);
- } else {
- mdp4_overlayproc_cfg(dsi_pipe);
- mdp4_overlay_dmap_xy(dsi_pipe);
- }
- mdp_is_in_isr = FALSE;
- if (dsi_pipe->ov_blt_addr) {
- mdp4_dsi_video_blt_ov_update(dsi_pipe);
- dsi_pipe->ov_cnt++;
- outp32(MDP_INTR_CLEAR, INTR_OVERLAY0_DONE);
- mdp_intr_mask |= INTR_OVERLAY0_DONE;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- dma->busy = TRUE;
- mdp_enable_irq(MDP_OVERLAY0_TERM);
- /* kickoff overlay engine */
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
+
+ spin_lock(&vctrl->spin_lock);
+ if (vctrl->blt_change) {
+ mdp4_overlayproc_cfg(pipe);
+ mdp4_overlay_dmap_xy(pipe);
+ if (pipe->ov_blt_addr) {
+ mdp4_dsi_video_blt_ov_update(pipe);
+ pipe->blt_ov_done++;
+
+ /* Prefill one frame */
+ vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
+ /* kickoff overlay0 engine */
+ mdp4_stat.kickoff_ov0++;
outpdw(MDP_BASE + 0x0004, 0);
}
- blt_cfg_changed = 0;
+ vctrl->blt_change = 0;
}
- complete_all(&dsi_video_comp);
+
+ vctrl->dmap_intr_cnt--;
+ if (vctrl->dmap_wait_cnt) {
+ complete_all(&vctrl->dmap_comp);
+ vctrl->dmap_wait_cnt = 0; /* reset */
+ } else {
+ mdp4_overlay_dma_commit(cndx);
+ }
+ vsync_irq_disable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
+ spin_unlock(&vctrl->spin_lock);
}
/*
- * mdp4_overlay1_done_dsi: called from isr
+ * mdp4_overlay0_done_dsi: called from isr
*/
-void mdp4_overlay0_done_dsi_video(struct mdp_dma_data *dma)
+void mdp4_overlay0_done_dsi_video(int cndx)
{
- spin_lock(&mdp_spin_lock);
- dma->busy = FALSE;
- if (dsi_pipe->ov_blt_addr == 0) {
- spin_unlock(&mdp_spin_lock);
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
+
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
+
+ spin_lock(&vctrl->spin_lock);
+ if (pipe->ov_blt_addr == 0) {
+ spin_unlock(&vctrl->spin_lock);
return;
}
- mdp4_dsi_video_blt_dmap_update(dsi_pipe);
- dsi_pipe->dmap_cnt++;
- mdp_disable_irq_nosync(MDP_OVERLAY0_TERM);
- spin_unlock(&mdp_spin_lock);
- complete(&dma->comp);
+
+ mdp4_dsi_video_blt_dmap_update(pipe);
+ pipe->blt_dmap_done++;
+ vsync_irq_disable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
+ spin_unlock(&vctrl->spin_lock);
}
/*
@@ -620,7 +877,12 @@
{
unsigned long flag;
int data;
- int change = 0;
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
+
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0);
@@ -629,55 +891,37 @@
return;
}
- spin_lock_irqsave(&mdp_spin_lock, flag);
- if (enable && dsi_pipe->ov_blt_addr == 0) {
- dsi_pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
- dsi_pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
- dsi_pipe->blt_cnt = 0;
- dsi_pipe->ov_cnt = 0;
- dsi_pipe->dmap_cnt = 0;
+ spin_lock_irqsave(&vctrl->spin_lock, flag);
+ if (enable && pipe->ov_blt_addr == 0) {
+ pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
+ pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
+ pipe->blt_cnt = 0;
+ pipe->ov_cnt = 0;
+ pipe->blt_dmap_done = 0;
+ pipe->blt_ov_koff = 0;
+ pipe->blt_ov_done = 0;
mdp4_stat.blt_dsi_video++;
- change++;
- } else if (enable == 0 && dsi_pipe->ov_blt_addr) {
- dsi_pipe->ov_blt_addr = 0;
- dsi_pipe->dma_blt_addr = 0;
- change++;
+ vctrl->blt_change++;
+ } else if (enable == 0 && pipe->ov_blt_addr) {
+ pipe->ov_blt_addr = 0;
+ pipe->dma_blt_addr = 0;
+ vctrl->blt_change++;
}
- if (!change) {
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
+ pr_info("%s: changed=%d enable=%d ov_blt_addr=%x\n", __func__,
+ vctrl->blt_change, enable, (int)pipe->ov_blt_addr);
+
+ if (!vctrl->blt_change) {
+ spin_unlock_irqrestore(&vctrl->spin_lock, flag);
return;
}
- pr_debug("%s: enable=%d ov_blt_addr=%x\n", __func__,
- enable, (int)dsi_pipe->ov_blt_addr);
- blt_cfg_changed = 1;
+ spin_unlock_irqrestore(&vctrl->spin_lock, flag);
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
-
- /*
- * may need mutex here to sync with whom dsiable
- * timing generator
- */
data = inpdw(MDP_BASE + DSI_VIDEO_BASE);
data &= 0x01;
- if (data) { /* timing generator enabled */
- mdp4_overlay_dsi_video_wait4event(mfd, INTR_DMA_P_DONE);
- msleep(20);
- }
-
-
-}
-
-int mdp4_dsi_video_overlay_blt_offset(struct msm_fb_data_type *mfd,
- struct msmfb_overlay_blt *req)
-{
- req->offset = 0;
- req->width = dsi_pipe->src_width;
- req->height = dsi_pipe->src_height;
- req->bpp = dsi_pipe->bpp;
-
- return sizeof(*req);
+ if (data) /* timing generator enabled */
+ mdp4_dsi_video_wait4dmap(0);
}
void mdp4_dsi_video_overlay_blt(struct msm_fb_data_type *mfd,
@@ -702,36 +946,32 @@
uint8 *buf;
unsigned int buf_offset;
int bpp;
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
struct mdp4_overlay_pipe *pipe;
- if (!mfd->panel_power_on)
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
+
+ if (!pipe || !mfd->panel_power_on)
return;
- /* no need to power on cmd block since it's dsi video mode */
- bpp = fbi->var.bits_per_pixel / 8;
- buf = (uint8 *) fbi->fix.smem_start;
- buf_offset = calc_fb_offset(mfd, fbi, bpp);
+ pr_debug("%s: cpu=%d pid=%d\n", __func__,
+ smp_processor_id(), current->pid);
+ if (pipe->pipe_type == OVERLAY_TYPE_RGB) {
+ bpp = fbi->var.bits_per_pixel / 8;
+ buf = (uint8 *) fbi->fix.smem_start;
+ buf_offset = calc_fb_offset(mfd, fbi, bpp);
- mutex_lock(&mfd->dma->ov_mutex);
+ if (mfd->display_iova)
+ pipe->srcp0_addr = mfd->display_iova + buf_offset;
+ else
+ pipe->srcp0_addr = (uint32)(buf + buf_offset);
- pipe = dsi_pipe;
- if (pipe->pipe_used == 0 ||
- pipe->mixer_stage != MDP4_MIXER_STAGE_BASE) {
- pr_err("%s: NOT baselayer\n", __func__);
- mutex_unlock(&mfd->dma->ov_mutex);
- return;
+ mdp4_dsi_video_pipe_queue(0, pipe);
}
- if (mfd->display_iova)
- pipe->srcp0_addr = mfd->display_iova + buf_offset;
- else
- pipe->srcp0_addr = (uint32)(buf + buf_offset);
-
- mdp4_overlay_rgb_setup(pipe);
- mdp4_overlay_reg_flush(pipe, 1);
- mdp4_mixer_stage_up(pipe);
- mdp4_overlay_dsi_video_start();
- mdp4_overlay_dsi_video_vsync_push(mfd, pipe);
- mdp4_iommu_unmap(pipe);
- mutex_unlock(&mfd->dma->ov_mutex);
+ mdp4_dsi_video_pipe_commit();
+ mdp4_dsi_video_wait4dmap(0);
}
+
diff --git a/drivers/video/msm/mdp4_overlay_dtv.c b/drivers/video/msm/mdp4_overlay_dtv.c
index b9d6037..57a07d0 100644
--- a/drivers/video/msm/mdp4_overlay_dtv.c
+++ b/drivers/video/msm/mdp4_overlay_dtv.c
@@ -33,6 +33,8 @@
#define DTV_BASE 0xD0000
+static int dtv_enabled;
+
/*#define DEBUG*/
#ifdef DEBUG
static void __mdp_outp(uint32 port, uint32 value)
@@ -51,15 +53,298 @@
static int first_pixel_start_x;
static int first_pixel_start_y;
-static int dtv_enabled;
-static struct mdp4_overlay_pipe *dtv_pipe;
-static DECLARE_COMPLETION(dtv_comp);
-
-void mdp4_dtv_base_swap(struct mdp4_overlay_pipe *pipe)
+void mdp4_dtv_base_swap(int cndx, struct mdp4_overlay_pipe *pipe)
{
+#ifdef BYPASS4
if (hdmi_prim_display)
dtv_pipe = pipe;
+#endif
+}
+
+#define MAX_CONTROLLER 1
+
+static struct vsycn_ctrl {
+ struct device *dev;
+ int inited;
+ int update_ndx;
+ int dmae_intr_cnt;
+ atomic_t suspend;
+ int dmae_wait_cnt;
+ int wait_vsync_cnt;
+ int blt_change;
+ struct mutex update_lock;
+ struct completion dmae_comp;
+ struct completion vsync_comp;
+ spinlock_t spin_lock;
+ struct mdp4_overlay_pipe *base_pipe;
+ struct vsync_update vlist[2];
+ int vsync_irq_enabled;
+ ktime_t vsync_time;
+ struct work_struct vsync_work;
+} vsync_ctrl_db[MAX_CONTROLLER];
+
+static void vsync_irq_enable(int intr, int term)
+{
+ unsigned long flag;
+
+ spin_lock_irqsave(&mdp_spin_lock, flag);
+ outp32(MDP_INTR_CLEAR,
+ INTR_DMA_E_DONE | INTR_OVERLAY1_DONE | INTR_EXTERNAL_VSYNC);
+ mdp_intr_mask |= intr;
+ outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+ mdp_enable_irq(term);
+ spin_unlock_irqrestore(&mdp_spin_lock, flag);
+ pr_debug("%s: IRQ-en done, term=%x\n", __func__, term);
+}
+
+static void vsync_irq_disable(int intr, int term)
+{
+ unsigned long flag;
+
+ spin_lock_irqsave(&mdp_spin_lock, flag);
+ outp32(MDP_INTR_CLEAR,
+ INTR_DMA_P_DONE | INTR_OVERLAY0_DONE | INTR_PRIMARY_VSYNC);
+ mdp_intr_mask &= ~intr;
+ outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+ mdp_disable_irq_nosync(term);
+ spin_unlock_irqrestore(&mdp_spin_lock, flag);
+ pr_debug("%s: IRQ-dis done, term=%x\n", __func__, term);
+}
+
+void mdp4_overlay_dtv_start(void)
+{
+ if (!dtv_enabled) {
+ /* enable DTV block */
+ mdp4_iommu_attach();
+ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+ MDP_OUTP(MDP_BASE + DTV_BASE, 1);
+ mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+ dtv_enabled = 1;
+ }
+}
+
+/*
+ * mdp4_dtv_vsync_do_update:
+ * called from thread context
+ */
+void mdp4_dtv_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe)
+{
+ struct vsycn_ctrl *vctrl;
+ struct vsync_update *vp;
+ struct mdp4_overlay_pipe *pp;
+ int undx;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ /* start timing generator & mmu if they are not started yet */
+ mdp4_overlay_dtv_start();
+
+ vctrl = &vsync_ctrl_db[cndx];
+
+ if (atomic_read(&vctrl->suspend) > 0)
+ return;
+
+ mutex_lock(&vctrl->update_lock);
+ undx = vctrl->update_ndx;
+ vp = &vctrl->vlist[undx];
+
+ pp = &vp->plist[pipe->pipe_ndx - 1]; /* ndx start form 1 */
+
+ pr_debug("%s: vndx=%d pipe_ndx=%d flags=%x pid=%d\n",
+ __func__, undx, pipe->pipe_ndx, pipe->flags, current->pid);
+
+ *pp = *pipe; /* keep it */
+ vp->update_cnt++;
+ mutex_unlock(&vctrl->update_lock);
+}
+
+static void mdp4_dtv_blt_ov_update(struct mdp4_overlay_pipe *pipe);
+
+int mdp4_dtv_pipe_commit(void)
+{
+
+ int i, undx;
+ int mixer = 0;
+ struct vsycn_ctrl *vctrl;
+ struct vsync_update *vp;
+ struct mdp4_overlay_pipe *pipe;
+ unsigned long flags;
+ int cnt = 0;
+
+ vctrl = &vsync_ctrl_db[0];
+ mutex_lock(&vctrl->update_lock);
+ undx = vctrl->update_ndx;
+ vp = &vctrl->vlist[undx];
+ pipe = vctrl->base_pipe;
+ mixer = pipe->mixer_num;
+ mdp4_overlay_iommu_unmap_freelist(mixer);
+
+ if (vp->update_cnt == 0) {
+ mutex_unlock(&vctrl->update_lock);
+ return 0;
+ }
+
+ vctrl->update_ndx++;
+ vctrl->update_ndx &= 0x01;
+ vp->update_cnt = 0; /* reset */
+ mutex_unlock(&vctrl->update_lock);
+
+ pipe = vp->plist;
+ for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
+ if (pipe->pipe_used) {
+ cnt++;
+ mdp4_overlay_vsync_commit(pipe);
+ pipe->pipe_used = 0; /* clear */
+ }
+ }
+ mdp4_mixer_stage_commit(mixer);
+
+ pipe = vctrl->base_pipe;
+ spin_lock_irqsave(&vctrl->spin_lock, flags);
+ if (pipe->ov_blt_addr) {
+ mdp4_dtv_blt_ov_update(pipe);
+ pipe->blt_ov_done++;
+ vsync_irq_enable(INTR_OVERLAY1_DONE, MDP_OVERLAY1_TERM);
+ mb();
+ pipe->blt_ov_koff++;
+ /* kickoff overlay1 engine */
+ mdp4_stat.kickoff_ov1++;
+ outpdw(MDP_BASE + 0x0008, 0);
+ } else if (vctrl->dmae_intr_cnt == 0) {
+ /* schedule second phase update at dmap */
+ vctrl->dmae_intr_cnt++;
+ vsync_irq_enable(INTR_DMA_E_DONE, MDP_DMA_E_TERM);
+ }
+ spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+ return cnt;
+}
+
+void mdp4_dtv_vsync_ctrl(int cndx, int enable)
+{
+ struct vsycn_ctrl *vctrl;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ vctrl = &vsync_ctrl_db[cndx];
+
+ if (vctrl->vsync_irq_enabled == enable)
+ return;
+
+ pr_debug("%s: vsync enable=%d\n", __func__, enable);
+
+ vctrl->vsync_irq_enabled = enable;
+
+ if (enable)
+ vsync_irq_enable(INTR_EXTERNAL_VSYNC, MDP_EXTER_VSYNC_TERM);
+ else
+ vsync_irq_disable(INTR_EXTERNAL_VSYNC, MDP_EXTER_VSYNC_TERM);
+}
+
+void mdp4_dtv_wait4vsync(int cndx, long long *vtime)
+{
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
+ unsigned long flags;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
+
+ if (atomic_read(&vctrl->suspend) > 0)
+ return;
+
+ spin_lock_irqsave(&vctrl->spin_lock, flags);
+
+ if (vctrl->wait_vsync_cnt == 0)
+ INIT_COMPLETION(vctrl->vsync_comp);
+ vctrl->wait_vsync_cnt++;
+ spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+ wait_for_completion(&vctrl->vsync_comp);
+ mdp4_stat.wait4vsync1++;
+
+ *vtime = ktime_to_ns(vctrl->vsync_time);
+}
+
+static void mdp4_dtv_wait4dmae(int cndx)
+{
+ unsigned long flags;
+ struct vsycn_ctrl *vctrl;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ vctrl = &vsync_ctrl_db[cndx];
+
+ if (atomic_read(&vctrl->suspend) > 0)
+ return;
+
+ spin_lock_irqsave(&vctrl->spin_lock, flags);
+ if (vctrl->dmae_wait_cnt == 0) {
+ INIT_COMPLETION(vctrl->dmae_comp);
+ if (vctrl->dmae_intr_cnt == 0) {
+ vctrl->dmae_intr_cnt++;
+ vsync_irq_enable(INTR_DMA_E_DONE, MDP_DMA_E_TERM);
+ }
+ }
+ vctrl->dmae_wait_cnt++;
+ spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+ wait_for_completion(&vctrl->dmae_comp);
+ pr_info("%s: pid=%d after wait\n", __func__, current->pid);
+}
+
+static void send_vsync_work(struct work_struct *work)
+{
+ struct vsycn_ctrl *vctrl =
+ container_of(work, typeof(*vctrl), vsync_work);
+ char buf[64];
+ char *envp[2];
+
+ snprintf(buf, sizeof(buf), "VSYNC=%llu",
+ ktime_to_ns(vctrl->vsync_time));
+ envp[0] = buf;
+ envp[1] = NULL;
+ kobject_uevent_env(&vctrl->dev->kobj, KOBJ_CHANGE, envp);
+}
+
+void mdp4_dtv_vsync_init(int cndx)
+{
+ struct vsycn_ctrl *vctrl;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ pr_info("%s: ndx=%d\n", __func__, cndx);
+
+ vctrl = &vsync_ctrl_db[cndx];
+ if (vctrl->inited)
+ return;
+
+ vctrl->inited = 1;
+ vctrl->update_ndx = 0;
+ mutex_init(&vctrl->update_lock);
+ init_completion(&vctrl->vsync_comp);
+ atomic_set(&vctrl->suspend, 0);
+ spin_lock_init(&vctrl->spin_lock);
+ INIT_WORK(&vctrl->vsync_work, send_vsync_work);
}
static int mdp4_dtv_start(struct msm_fb_data_type *mfd)
@@ -103,9 +388,6 @@
if (mfd->key != MFD_KEY)
return -EINVAL;
- if (dtv_pipe == NULL)
- return -EINVAL;
-
fbi = mfd->fbi;
var = &fbi->var;
@@ -201,26 +483,22 @@
/* Test pattern 8 x 8 pixel */
/* MDP_OUTP(MDP_BASE + DTV_BASE + 0x4C, 0x80000808); */
- mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
- /* MDP cmd block disable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+ /* enable DTV block */
+ MDP_OUTP(MDP_BASE + DTV_BASE, 1);
return 0;
}
static int mdp4_dtv_stop(struct msm_fb_data_type *mfd)
{
- if (dtv_pipe == NULL)
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
+
+ vctrl = &vsync_ctrl_db[cndx];
+ if (vctrl->base_pipe == NULL)
return -EINVAL;
- /* MDP cmd block enable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
- msleep(20);
MDP_OUTP(MDP_BASE + DTV_BASE, 0);
- dtv_enabled = 0;
- /* MDP cmd block disable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
- mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
return 0;
}
@@ -229,6 +507,10 @@
{
struct msm_fb_data_type *mfd;
int ret = 0;
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
+
+ vctrl = &vsync_ctrl_db[cndx];
mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
@@ -238,16 +520,30 @@
if (mfd->key != MFD_KEY)
return -EINVAL;
+ vctrl->dev = mfd->fbi->dev;
+
mdp_footswitch_ctrl(TRUE);
+ /* Mdp clock enable */
+ mdp_clk_ctrl(1);
+
mdp4_overlay_panel_mode(MDP4_MIXER1, MDP4_PANEL_DTV);
- if (dtv_pipe != NULL)
- ret = mdp4_dtv_start(mfd);
+
+ /* Allocate dtv_pipe at dtv_on*/
+ if (vctrl->base_pipe == NULL) {
+ if (mdp4_overlay_dtv_set(mfd, NULL)) {
+ pr_warn("%s: dtv_pipe is NULL, dtv_set failed\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
ret = panel_next_on(pdev);
if (ret != 0)
- dev_warn(&pdev->dev, "mdp4_overlay_dtv: panel_next_on failed");
+ pr_warn("%s: panel_next_on failed", __func__);
- dev_info(&pdev->dev, "mdp4_overlay_dtv: on");
+ atomic_set(&vctrl->suspend, 0);
+
+ pr_info("%s:\n", __func__);
return ret;
}
@@ -256,47 +552,112 @@
{
struct msm_fb_data_type *mfd;
int ret = 0;
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
- if (dtv_pipe != NULL) {
+ vctrl = &vsync_ctrl_db[cndx];
+
+ atomic_set(&vctrl->suspend, 1);
+
+ while (vctrl->wait_vsync_cnt)
+ msleep(20); /* >= 17 ms */
+
+ pipe = vctrl->base_pipe;
+ if (pipe != NULL) {
mdp4_dtv_stop(mfd);
if (hdmi_prim_display && mfd->ref_cnt == 0) {
/* adb stop */
- if (dtv_pipe->pipe_type == OVERLAY_TYPE_BF)
- mdp4_overlay_borderfill_stage_down(dtv_pipe);
+ if (pipe->pipe_type == OVERLAY_TYPE_BF)
+ mdp4_overlay_borderfill_stage_down(pipe);
- /* dtv_pipe == rgb1 */
- mdp4_overlay_unset_mixer(dtv_pipe->mixer_num);
- dtv_pipe = NULL;
+ /* pipe == rgb2 */
+ mdp4_overlay_unset_mixer(pipe->mixer_num);
+ vctrl->base_pipe = NULL;
} else {
- mdp4_mixer_stage_down(dtv_pipe);
- mdp4_overlay_pipe_free(dtv_pipe);
- mdp4_iommu_unmap(dtv_pipe);
- dtv_pipe = NULL;
+ mdp4_mixer_stage_down(pipe);
+ mdp4_overlay_pipe_free(pipe);
+ vctrl->base_pipe = NULL;
}
}
+
mdp4_overlay_panel_mode_unset(MDP4_MIXER1, MDP4_PANEL_DTV);
ret = panel_next_off(pdev);
mdp_footswitch_ctrl(FALSE);
- dev_info(&pdev->dev, "mdp4_overlay_dtv: off");
+ /* Mdp clock disable */
+ mdp_clk_ctrl(0);
+
+ pr_info("%s:\n", __func__);
return ret;
}
+static void mdp4_dtv_blt_ov_update(struct mdp4_overlay_pipe *pipe)
+{
+ uint32 off, addr;
+ int bpp;
+ char *overlay_base;
+
+ if (pipe->ov_blt_addr == 0)
+ return;
+
+#ifdef BLT_RGB565
+ bpp = 2; /* overlay ouput is RGB565 */
+#else
+ bpp = 3; /* overlay ouput is RGB888 */
+#endif
+ off = 0;
+ if (pipe->blt_ov_done & 0x01)
+ off = pipe->src_height * pipe->src_width * bpp;
+ addr = pipe->ov_blt_addr + off;
+
+ /* overlay 1 */
+ overlay_base = MDP_BASE + MDP4_OVERLAYPROC1_BASE;/* 0x10000 */
+ outpdw(overlay_base + 0x000c, addr);
+ outpdw(overlay_base + 0x001c, addr);
+}
+
+static void mdp4_dtv_blt_dmae_update(struct mdp4_overlay_pipe *pipe)
+{
+ uint32 off, addr;
+ int bpp;
+
+ if (pipe->ov_blt_addr == 0)
+ return;
+
+#ifdef BLT_RGB565
+ bpp = 2; /* overlay ouput is RGB565 */
+#else
+ bpp = 3; /* overlay ouput is RGB888 */
+#endif
+ off = 0;
+ if (pipe->blt_dmap_done & 0x01)
+ off = pipe->src_height * pipe->src_width * bpp;
+ addr = pipe->dma_blt_addr + off;
+
+ /* dmae */
+ MDP_OUTP(MDP_BASE + 0xb0008, addr);
+}
+
+void mdp4_overlay_dtv_set_perf(struct msm_fb_data_type *mfd)
+{
+ /* change mdp clk while mdp is idle` */
+ mdp4_set_perf_level();
+}
+
static void mdp4_overlay_dtv_alloc_pipe(struct msm_fb_data_type *mfd,
- int32 ptype)
+ int32 ptype, struct vsycn_ctrl *vctrl)
{
int ret = 0;
struct fb_info *fbi = mfd->fbi;
struct mdp4_overlay_pipe *pipe;
- if (dtv_pipe != NULL)
+ if (vctrl->base_pipe != NULL)
return;
- pr_debug("%s: ptype=%d\n", __func__, ptype);
-
pipe = mdp4_overlay_pipe_alloc(ptype, MDP4_MIXER1);
if (pipe == NULL) {
pr_err("%s: pipe_alloc failed\n", __func__);
@@ -307,7 +668,6 @@
pipe->mixer_num = MDP4_MIXER1;
if (ptype == OVERLAY_TYPE_BF) {
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
/* LSP_BORDER_COLOR */
MDP_OUTP(MDP_BASE + MDP4_OVERLAYPROC1_BASE + 0x5004,
((0x0 & 0xFFF) << 16) | /* 12-bit B */
@@ -315,7 +675,7 @@
/* MSP_BORDER_COLOR */
MDP_OUTP(MDP_BASE + MDP4_OVERLAYPROC1_BASE + 0x5008,
(0x0 & 0xFFF)); /* 12-bit R */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+ pipe->src_format = MDP_ARGB_8888;
} else {
switch (mfd->ibuf.bpp) {
case 2:
@@ -357,28 +717,34 @@
mdp4_overlay_reg_flush(pipe, 1);
mdp4_mixer_stage_up(pipe);
- dtv_pipe = pipe; /* keep it */
+ vctrl->base_pipe = pipe; /* keep it */
}
int mdp4_overlay_dtv_set(struct msm_fb_data_type *mfd,
struct mdp4_overlay_pipe *pipe)
{
- if (dtv_pipe != NULL)
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
+
+ vctrl = &vsync_ctrl_db[cndx];
+ if (vctrl->base_pipe != NULL)
return 0;
if (pipe != NULL && pipe->mixer_stage == MDP4_MIXER_STAGE_BASE &&
pipe->pipe_type == OVERLAY_TYPE_RGB)
- dtv_pipe = pipe; /* keep it */
+ vctrl->base_pipe = pipe; /* keep it */
else if (!hdmi_prim_display && mdp4_overlay_borderfill_supported())
- mdp4_overlay_dtv_alloc_pipe(mfd, OVERLAY_TYPE_BF);
+ mdp4_overlay_dtv_alloc_pipe(mfd, OVERLAY_TYPE_BF, vctrl);
else
- mdp4_overlay_dtv_alloc_pipe(mfd, OVERLAY_TYPE_RGB);
- if (dtv_pipe == NULL)
+ mdp4_overlay_dtv_alloc_pipe(mfd, OVERLAY_TYPE_RGB, vctrl);
+
+
+ if (vctrl->base_pipe == NULL)
return -ENODEV;
mdp4_init_writeback_buf(mfd, MDP4_MIXER1);
- dtv_pipe->ov_blt_addr = 0;
- dtv_pipe->dma_blt_addr = 0;
+ vctrl->base_pipe->ov_blt_addr = 0;
+ vctrl->base_pipe->dma_blt_addr = 0;
return mdp4_dtv_start(mfd);
}
@@ -387,206 +753,112 @@
struct mdp4_overlay_pipe *pipe)
{
int result = 0;
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
- if (dtv_pipe == NULL)
- return result;
-
- pipe->flags &= ~MDP_OV_PLAY_NOWAIT;
- mdp4_overlay_reg_flush(pipe, 0);
- mdp4_overlay_dtv_ov_done_push(mfd, pipe);
+ vctrl = &vsync_ctrl_db[cndx];
+ if (vctrl->base_pipe != NULL)
+ return 0;
if (pipe->mixer_stage == MDP4_MIXER_STAGE_BASE &&
pipe->pipe_type == OVERLAY_TYPE_RGB) {
result = mdp4_dtv_stop(mfd);
- dtv_pipe = NULL;
+ vctrl->base_pipe = NULL;
}
return result;
}
-static void mdp4_dtv_blt_ov_update(struct mdp4_overlay_pipe *pipe)
+/* TODO: dtv writeback need to be added later */
+
+void mdp4_external_vsync_dtv(void)
{
- uint32 off, addr;
- int bpp;
- char *overlay_base;
+ int cndx;
+ struct vsycn_ctrl *vctrl;
- if (pipe->ov_blt_addr == 0)
- return;
-#ifdef BLT_RGB565
- bpp = 2; /* overlay ouput is RGB565 */
-#else
- bpp = 3; /* overlay ouput is RGB888 */
-#endif
- off = (pipe->ov_cnt & 0x01) ?
- pipe->src_height * pipe->src_width * bpp : 0;
+ cndx = 0;
+ vctrl = &vsync_ctrl_db[cndx];
+ pr_debug("%s: cpu=%d\n", __func__, smp_processor_id());
+ vctrl->vsync_time = ktime_get();
+ schedule_work(&vctrl->vsync_work);
- addr = pipe->ov_blt_addr + off;
- pr_debug("%s overlay addr 0x%x\n", __func__, addr);
- /* overlay 1 */
- overlay_base = MDP_BASE + MDP4_OVERLAYPROC1_BASE;/* 0x18000 */
- outpdw(overlay_base + 0x000c, addr);
- outpdw(overlay_base + 0x001c, addr);
+ pr_debug("%s: cpu=%d\n", __func__, smp_processor_id());
+
+ spin_lock(&vctrl->spin_lock);
+ if (vctrl->wait_vsync_cnt) {
+ complete_all(&vctrl->vsync_comp);
+ vctrl->wait_vsync_cnt = 0;
+ }
+ spin_unlock(&vctrl->spin_lock);
}
-static inline void mdp4_dtv_blt_dmae_update(struct mdp4_overlay_pipe *pipe)
+/*
+ * mdp4_dmae_done_dtv: called from isr
+ */
+void mdp4_dmae_done_dtv(void)
{
- uint32 off, addr;
- int bpp;
+ int cndx;
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
- if (pipe->ov_blt_addr == 0)
- return;
-
-#ifdef BLT_RGB565
- bpp = 2; /* overlay ouput is RGB565 */
-#else
- bpp = 3; /* overlay ouput is RGB888 */
-#endif
- off = (pipe->dmae_cnt & 0x01) ?
- pipe->src_height * pipe->src_width * bpp : 0;
- addr = pipe->dma_blt_addr + off;
- MDP_OUTP(MDP_BASE + 0xb0008, addr);
-}
-
-static inline void mdp4_overlay_dtv_ov_kick_start(void)
-{
- outpdw(MDP_BASE + 0x0008, 0);
-}
-
-static void mdp4_overlay_dtv_ov_start(struct msm_fb_data_type *mfd)
-{
- unsigned long flag;
-
- /* enable irq */
- if (mfd->ov_start)
- return;
-
- if (!dtv_pipe) {
- pr_debug("%s: no mixer1 base layer pipe allocated!\n",
- __func__);
+ cndx = 0;
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
return;
}
- if (dtv_pipe->ov_blt_addr) {
- mdp4_dtv_blt_ov_update(dtv_pipe);
- dtv_pipe->ov_cnt++;
- mdp4_overlay_dtv_ov_kick_start();
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
+
+ spin_lock(&vctrl->spin_lock);
+ if (vctrl->blt_change) {
+ if (pipe->ov_blt_addr) {
+ mdp4_overlayproc_cfg(pipe);
+ mdp4_overlay_dmae_xy(pipe);
+ mdp4_dtv_blt_ov_update(pipe);
+ pipe->blt_ov_done++;
+
+ /* Prefill one frame */
+ vsync_irq_enable(INTR_OVERLAY1_DONE, MDP_OVERLAY1_TERM);
+ /* kickoff overlay1 engine */
+ mdp4_stat.kickoff_ov1++;
+ outpdw(MDP_BASE + 0x0008, 0);
+ }
+ vctrl->blt_change = 0;
}
- spin_lock_irqsave(&mdp_spin_lock, flag);
- mdp_enable_irq(MDP_OVERLAY1_TERM);
- INIT_COMPLETION(dtv_pipe->comp);
- mfd->dma->waiting = TRUE;
- outp32(MDP_INTR_CLEAR, INTR_OVERLAY1_DONE);
- mdp_intr_mask |= INTR_OVERLAY1_DONE;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
- mfd->ov_start = true;
-}
-
-static void mdp4_overlay_dtv_wait4dmae(struct msm_fb_data_type *mfd)
-{
- unsigned long flag;
-
- if (!dtv_pipe) {
- pr_debug("%s: no mixer1 base layer pipe allocated!\n",
- __func__);
- return;
+ vctrl->dmae_intr_cnt--;
+ if (vctrl->dmae_wait_cnt) {
+ complete_all(&vctrl->dmae_comp);
+ vctrl->dmae_wait_cnt = 0; /* reset */
+ } else {
+ mdp4_overlay_dma_commit(MDP4_MIXER1);
}
- /* enable irq */
- spin_lock_irqsave(&mdp_spin_lock, flag);
- mdp_enable_irq(MDP_DMA_E_TERM);
- INIT_COMPLETION(dtv_pipe->comp);
- mfd->dma->waiting = TRUE;
- outp32(MDP_INTR_CLEAR, INTR_DMA_E_DONE);
- mdp_intr_mask |= INTR_DMA_E_DONE;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
- wait_for_completion_killable(&dtv_pipe->comp);
- mdp_disable_irq(MDP_DMA_E_TERM);
-}
-
-static void mdp4_overlay_dtv_wait4_ov_done(struct msm_fb_data_type *mfd,
- struct mdp4_overlay_pipe *pipe)
-{
- u32 data = inpdw(MDP_BASE + DTV_BASE);
-
- if (mfd->ov_start)
- mfd->ov_start = false;
- else
- return;
- if (!(data & 0x1) || (pipe == NULL))
- return;
- if (!dtv_pipe) {
- pr_debug("%s: no mixer1 base layer pipe allocated!\n",
- __func__);
- return;
- }
-
- wait_for_completion_timeout(&dtv_pipe->comp,
- msecs_to_jiffies(VSYNC_PERIOD * 3));
- mdp_disable_irq(MDP_OVERLAY1_TERM);
-
- if (dtv_pipe->ov_blt_addr)
- mdp4_overlay_dtv_wait4dmae(mfd);
-}
-
-void mdp4_overlay_dtv_start(void)
-{
- if (!dtv_enabled) {
- mdp4_iommu_attach();
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
- /* enable DTV block */
- MDP_OUTP(MDP_BASE + DTV_BASE, 1);
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
- dtv_enabled = 1;
- }
-}
-
-void mdp4_overlay_dtv_ov_done_push(struct msm_fb_data_type *mfd,
- struct mdp4_overlay_pipe *pipe)
-{
- mdp4_overlay_dtv_ov_start(mfd);
- if (pipe->flags & MDP_OV_PLAY_NOWAIT)
- return;
-
- mdp4_overlay_dtv_wait4_ov_done(mfd, pipe);
-
- /* change mdp clk while mdp is idle` */
- mdp4_set_perf_level();
-}
-
-void mdp4_overlay_dtv_wait_for_ov(struct msm_fb_data_type *mfd,
- struct mdp4_overlay_pipe *pipe)
-{
- mdp4_overlay_dtv_wait4_ov_done(mfd, pipe);
- mdp4_set_perf_level();
-}
-
-void mdp4_dma_e_done_dtv()
-{
- if (!dtv_pipe)
- return;
-
- complete(&dtv_pipe->comp);
-}
-
-void mdp4_external_vsync_dtv()
-{
-
- complete_all(&dtv_comp);
+ vsync_irq_disable(INTR_DMA_E_DONE, MDP_DMA_E_TERM);
+ spin_unlock(&vctrl->spin_lock);
}
/*
* mdp4_overlay1_done_dtv: called from isr
*/
-void mdp4_overlay1_done_dtv()
+void mdp4_overlay1_done_dtv(void)
{
- if (!dtv_pipe)
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
+ int cndx = 0;
+
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
+
+ spin_lock(&vctrl->spin_lock);
+ if (pipe->ov_blt_addr == 0) {
+ spin_unlock(&vctrl->spin_lock);
return;
- if (dtv_pipe->ov_blt_addr) {
- mdp4_dtv_blt_dmae_update(dtv_pipe);
- dtv_pipe->dmae_cnt++;
}
- complete_all(&dtv_pipe->comp);
+
+ mdp4_dtv_blt_dmae_update(pipe);
+ pipe->blt_dmap_done++;
+ vsync_irq_disable(INTR_OVERLAY1_DONE, MDP_OVERLAY1_TERM);
+ spin_unlock(&vctrl->spin_lock);
}
void mdp4_dtv_set_black_screen(void)
@@ -595,16 +867,17 @@
/*Black color*/
uint32 color = 0x00000000;
uint32 temp_src_format;
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
- if (!dtv_pipe || !hdmi_prim_display) {
- pr_err("dtv_pipe/hdmi as primary are not"
- " configured yet\n");
+ vctrl = &vsync_ctrl_db[cndx];
+ if (vctrl->base_pipe == NULL || !hdmi_prim_display) {
+ pr_err("dtv_pipe is not configured yet\n");
return;
}
rgb_base = MDP_BASE + MDP4_RGB_BASE;
- rgb_base += (MDP4_RGB_OFF * dtv_pipe->pipe_num);
+ rgb_base += (MDP4_RGB_OFF * vctrl->base_pipe->pipe_num);
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
/*
* RGB Constant Color
*/
@@ -614,73 +887,71 @@
*/
temp_src_format = inpdw(rgb_base + 0x0050);
MDP_OUTP(rgb_base + 0x0050, temp_src_format | BIT(22));
- mdp4_overlay_reg_flush(dtv_pipe, 1);
- mdp4_mixer_stage_up(dtv_pipe);
+ mdp4_overlay_reg_flush(vctrl->base_pipe, 1);
+ mdp4_mixer_stage_up(vctrl->base_pipe);
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
}
-void mdp4_overlay_dtv_wait4vsync(void)
-{
- unsigned long flag;
-
- if (!dtv_enabled)
- return;
-
- /* enable irq */
- spin_lock_irqsave(&mdp_spin_lock, flag);
- mdp_enable_irq(MDP_DMA_E_TERM);
- INIT_COMPLETION(dtv_comp);
- outp32(MDP_INTR_CLEAR, INTR_EXTERNAL_VSYNC);
- mdp_intr_mask |= INTR_EXTERNAL_VSYNC;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
- wait_for_completion_killable(&dtv_comp);
- mdp_disable_irq(MDP_DMA_E_TERM);
-}
-
static void mdp4_dtv_do_blt(struct msm_fb_data_type *mfd, int enable)
{
unsigned long flag;
- int change = 0;
+ int data;
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
+
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
+
+ mdp4_allocate_writeback_buf(mfd, MDP4_MIXER1);
if (!mfd->ov1_wb_buf->write_addr) {
- pr_debug("%s: no writeback buf assigned\n", __func__);
+ pr_info("%s: ctrl=%d blt_base NOT assigned\n", __func__, cndx);
return;
}
- if (!dtv_pipe) {
- pr_debug("%s: no mixer1 base layer pipe allocated!\n",
- __func__);
+ spin_lock_irqsave(&vctrl->spin_lock, flag);
+ if (enable && pipe->ov_blt_addr == 0) {
+ pipe->ov_blt_addr = mfd->ov1_wb_buf->write_addr;
+ pipe->dma_blt_addr = mfd->ov1_wb_buf->read_addr;
+ pipe->blt_cnt = 0;
+ pipe->ov_cnt = 0;
+ pipe->blt_dmap_done = 0;
+ pipe->blt_ov_koff = 0;
+ pipe->blt_ov_done = 0;
+ mdp4_stat.blt_dtv++;
+ vctrl->blt_change++;
+ } else if (enable == 0 && pipe->ov_blt_addr) {
+ pipe->ov_blt_addr = 0;
+ pipe->dma_blt_addr = 0;
+ vctrl->blt_change++;
+ }
+
+ pr_info("%s: enable=%d change=%d blt_addr=%x\n", __func__,
+ enable, vctrl->blt_change, (int)pipe->ov_blt_addr);
+
+ if (!vctrl->blt_change) {
+ spin_unlock_irqrestore(&vctrl->spin_lock, flag);
return;
}
- spin_lock_irqsave(&mdp_spin_lock, flag);
- if (enable && dtv_pipe->ov_blt_addr == 0) {
- dtv_pipe->ov_blt_addr = mfd->ov1_wb_buf->write_addr;
- dtv_pipe->dma_blt_addr = mfd->ov1_wb_buf->read_addr;
- change++;
- dtv_pipe->ov_cnt = 0;
- dtv_pipe->dmae_cnt = 0;
- } else if (enable == 0 && dtv_pipe->ov_blt_addr) {
- dtv_pipe->ov_blt_addr = 0;
- dtv_pipe->dma_blt_addr = 0;
- change++;
- }
- pr_debug("%s: ov_blt_addr=%x\n", __func__, (int)dtv_pipe->ov_blt_addr);
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
+ atomic_set(&vctrl->suspend, 1);
+ spin_unlock_irqrestore(&vctrl->spin_lock, flag);
- if (!change)
- return;
+ data = inpdw(MDP_BASE + DTV_BASE);
+ data &= 0x01;
+ if (data) /* timing generator enabled */
+ mdp4_dtv_wait4dmae(0);
- if (dtv_enabled) {
- mdp4_overlay_dtv_wait4dmae(mfd);
- MDP_OUTP(MDP_BASE + DTV_BASE, 0); /* stop dtv */
+ if (pipe->ov_blt_addr == 0) {
+ MDP_OUTP(MDP_BASE + DTV_BASE, 0); /* stop dtv */
msleep(20);
+ mdp4_overlayproc_cfg(pipe);
+ mdp4_overlay_dmae_xy(pipe);
+ MDP_OUTP(MDP_BASE + DTV_BASE, 1); /* start dtv */
}
- mdp4_overlay_dmae_xy(dtv_pipe);
- mdp4_overlayproc_cfg(dtv_pipe);
- MDP_OUTP(MDP_BASE + DTV_BASE, 1); /* start dtv */
+ atomic_set(&vctrl->suspend, 0);
}
void mdp4_dtv_overlay_blt_start(struct msm_fb_data_type *mfd)
@@ -695,20 +966,23 @@
void mdp4_dtv_overlay(struct msm_fb_data_type *mfd)
{
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
struct mdp4_overlay_pipe *pipe;
+
if (!mfd->panel_power_on)
return;
- mutex_lock(&mfd->dma->ov_mutex);
- if (dtv_pipe == NULL) {
- if (mdp4_overlay_dtv_set(mfd, NULL)) {
- pr_warn("%s: dtv_pipe == NULL\n", __func__);
- mutex_unlock(&mfd->dma->ov_mutex);
- return;
- }
- }
+ vctrl = &vsync_ctrl_db[cndx];
+ if (vctrl->base_pipe == NULL)
+ mdp4_overlay_dtv_set(mfd, NULL);
- pipe = dtv_pipe;
+ pipe = vctrl->base_pipe;
+
+ if (pipe == NULL) {
+ pr_warn("%s: dtv_pipe == NULL\n", __func__);
+ return;
+ }
if (hdmi_prim_display && (pipe->pipe_used == 0 ||
pipe->mixer_stage != MDP4_MIXER_STAGE_BASE)) {
@@ -717,14 +991,9 @@
return;
}
- if (pipe->pipe_type == OVERLAY_TYPE_RGB) {
- pipe->srcp0_addr = (uint32) mfd->ibuf.buf;
- mdp4_overlay_rgb_setup(pipe);
+ if (pipe->pipe_type == OVERLAY_TYPE_RGB) {
+ pipe->srcp0_addr = (uint32)mfd->ibuf.buf;
+ mdp4_dtv_pipe_queue(0, pipe);
}
- mdp4_overlay_reg_flush(pipe, 1);
- mdp4_mixer_stage_up(pipe);
- mdp4_overlay_dtv_start();
- mdp4_overlay_dtv_ov_done_push(mfd, pipe);
- mdp4_iommu_unmap(pipe);
- mutex_unlock(&mfd->dma->ov_mutex);
+ mdp4_dtv_pipe_commit();
}
diff --git a/drivers/video/msm/mdp4_overlay_lcdc.c b/drivers/video/msm/mdp4_overlay_lcdc.c
index bcc4ea6..57793fc 100644
--- a/drivers/video/msm/mdp4_overlay_lcdc.c
+++ b/drivers/video/msm/mdp4_overlay_lcdc.c
@@ -41,17 +41,338 @@
int first_pixel_start_x;
int first_pixel_start_y;
+
static int lcdc_enabled;
-static struct mdp4_overlay_pipe *lcdc_pipe;
-static struct completion lcdc_comp;
+#define MAX_CONTROLLER 1
-void mdp4_lcdc_base_swap(struct mdp4_overlay_pipe *pipe)
+static struct vsycn_ctrl {
+ struct device *dev;
+ int inited;
+ int update_ndx;
+ uint32 dmap_intr_cnt;
+ atomic_t suspend;
+ int dmap_wait_cnt;
+ int wait_vsync_cnt;
+ int blt_change;
+ int fake_vsync;
+ struct mutex update_lock;
+ struct completion dmap_comp;
+ struct completion vsync_comp;
+ spinlock_t spin_lock;
+ struct mdp4_overlay_pipe *base_pipe;
+ struct vsync_update vlist[2];
+ int vsync_irq_enabled;
+ ktime_t vsync_time;
+ struct work_struct vsync_work;
+} vsync_ctrl_db[MAX_CONTROLLER];
+
+
+/*******************************************************
+to do:
+1) move vsync_irq_enable/vsync_irq_disable to mdp.c to be shared
+*******************************************************/
+static void vsync_irq_enable(int intr, int term)
{
- lcdc_pipe = pipe;
+ unsigned long flag;
+
+ spin_lock_irqsave(&mdp_spin_lock, flag);
+ outp32(MDP_INTR_CLEAR,
+ INTR_DMA_P_DONE | INTR_OVERLAY0_DONE | INTR_PRIMARY_VSYNC);
+ mdp_intr_mask |= intr;
+ outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+ mdp_enable_irq(term);
+ spin_unlock_irqrestore(&mdp_spin_lock, flag);
+ pr_debug("%s: IRQ-en done, term=%x\n", __func__, term);
}
-int mdp_lcdc_on(struct platform_device *pdev)
+static void vsync_irq_disable(int intr, int term)
+{
+ unsigned long flag;
+
+ spin_lock_irqsave(&mdp_spin_lock, flag);
+ outp32(MDP_INTR_CLEAR,
+ INTR_DMA_P_DONE | INTR_OVERLAY0_DONE | INTR_PRIMARY_VSYNC);
+ mdp_intr_mask &= ~intr;
+ outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+ mdp_disable_irq_nosync(term);
+ spin_unlock_irqrestore(&mdp_spin_lock, flag);
+ pr_debug("%s: IRQ-dis done, term=%x\n", __func__, term);
+}
+
+static void mdp4_overlay_lcdc_start(void)
+{
+ if (!lcdc_enabled) {
+ /* enable DSI block */
+ mdp4_iommu_attach();
+ mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+ MDP_OUTP(MDP_BASE + LCDC_BASE, 1);
+ lcdc_enabled = 1;
+ }
+}
+
+/*
+ * mdp4_lcdc_pipe_queue:
+ * called from thread context
+ */
+void mdp4_lcdc_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe)
+{
+ struct vsycn_ctrl *vctrl;
+ struct vsync_update *vp;
+ struct mdp4_overlay_pipe *pp;
+ int undx;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ /* start timing generator & mmu if they are not started yet */
+ mdp4_overlay_lcdc_start();
+
+ vctrl = &vsync_ctrl_db[cndx];
+
+ if (atomic_read(&vctrl->suspend) > 0)
+ return;
+
+ mutex_lock(&vctrl->update_lock);
+ undx = vctrl->update_ndx;
+ vp = &vctrl->vlist[undx];
+
+ pp = &vp->plist[pipe->pipe_ndx - 1]; /* ndx start form 1 */
+
+ pr_debug("%s: vndx=%d pipe_ndx=%d pid=%d\n", __func__,
+ undx, pipe->pipe_ndx, current->pid);
+
+ *pp = *pipe; /* keep it */
+ vp->update_cnt++;
+ mutex_unlock(&vctrl->update_lock);
+ mdp4_stat.overlay_play[pipe->mixer_num]++;
+}
+
+
+static void mdp4_lcdc_blt_ov_update(struct mdp4_overlay_pipe *pipe);
+
+int mdp4_lcdc_pipe_commit(void)
+{
+
+ int i, undx;
+ int mixer = 0;
+ struct vsycn_ctrl *vctrl;
+ struct vsync_update *vp;
+ struct mdp4_overlay_pipe *pipe;
+ unsigned long flags;
+ int cnt = 0;
+
+ vctrl = &vsync_ctrl_db[0];
+
+ mutex_lock(&vctrl->update_lock);
+ undx = vctrl->update_ndx;
+ vp = &vctrl->vlist[undx];
+ pipe = vctrl->base_pipe;
+ mixer = pipe->mixer_num;
+
+ if (vp->update_cnt == 0) {
+ mutex_unlock(&vctrl->update_lock);
+ return 0;
+ }
+
+ vctrl->update_ndx++;
+ vctrl->update_ndx &= 0x01;
+ vp->update_cnt = 0; /* reset */
+ mutex_unlock(&vctrl->update_lock);
+
+ /* free previous committed iommu back to pool */
+ mdp4_overlay_iommu_unmap_freelist(mixer);
+
+ pipe = vp->plist;
+ for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
+ if (pipe->pipe_used) {
+ cnt++;
+ mdp4_overlay_vsync_commit(pipe);
+ /* free previous iommu to freelist
+ * which will be freed at next
+ * pipe_commit
+ */
+ mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
+ pipe->pipe_used = 0; /* clear */
+ }
+ }
+
+ mdp4_mixer_stage_commit(mixer);
+
+ pipe = vctrl->base_pipe;
+ spin_lock_irqsave(&vctrl->spin_lock, flags);
+ if (pipe->ov_blt_addr) {
+ mdp4_lcdc_blt_ov_update(pipe);
+ pipe->blt_ov_done++;
+ vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
+ mb();
+ pipe->blt_ov_koff++;
+ /* kickoff overlay engine */
+ outpdw(MDP_BASE + 0x0004, 0);
+ } else if (vctrl->dmap_intr_cnt == 0) {
+ /* schedule second phase update at dmap */
+ vctrl->dmap_intr_cnt++;
+ vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
+ }
+ spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+ mdp4_stat.overlay_commit[pipe->mixer_num]++;
+
+ return cnt;
+}
+
+void mdp4_lcdc_vsync_ctrl(int cndx, int enable)
+{
+ struct vsycn_ctrl *vctrl;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ vctrl = &vsync_ctrl_db[cndx];
+
+ if (vctrl->fake_vsync) {
+ vctrl->fake_vsync = 0;
+ schedule_work(&vctrl->vsync_work);
+ }
+
+ if (vctrl->vsync_irq_enabled == enable)
+ return;
+
+ pr_debug("%s: vsync enable=%d\n", __func__, enable);
+
+ vctrl->vsync_irq_enabled = enable;
+
+ if (enable)
+ vsync_irq_enable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM);
+ else
+ vsync_irq_disable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM);
+}
+
+void mdp4_lcdc_wait4vsync(int cndx, long long *vtime)
+{
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
+ unsigned long flags;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
+
+ if (atomic_read(&vctrl->suspend) > 0) {
+ *vtime = -1;
+ return;
+ }
+
+ /* start timing generator & mmu if they are not started yet */
+ mdp4_overlay_lcdc_start();
+
+ spin_lock_irqsave(&vctrl->spin_lock, flags);
+
+ if (vctrl->wait_vsync_cnt == 0)
+ INIT_COMPLETION(vctrl->vsync_comp);
+ vctrl->wait_vsync_cnt++;
+ spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+ wait_for_completion(&vctrl->vsync_comp);
+ mdp4_stat.wait4vsync0++;
+
+ *vtime = vctrl->vsync_time.tv64;
+}
+
+static void mdp4_lcdc_wait4dmap(int cndx)
+{
+ unsigned long flags;
+ struct vsycn_ctrl *vctrl;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ vctrl = &vsync_ctrl_db[cndx];
+
+ if (atomic_read(&vctrl->suspend) > 0)
+ return;
+
+ /* start timing generator & mmu if they are not started yet */
+ mdp4_overlay_lcdc_start();
+
+ spin_lock_irqsave(&vctrl->spin_lock, flags);
+ if (vctrl->dmap_wait_cnt == 0) {
+ INIT_COMPLETION(vctrl->dmap_comp);
+ if (vctrl->dmap_intr_cnt == 0) {
+ vctrl->dmap_intr_cnt++;
+ vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
+ }
+ }
+ vctrl->dmap_wait_cnt++;
+ spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+ wait_for_completion(&vctrl->dmap_comp);
+ pr_debug("%s: pid=%d after wait\n", __func__, current->pid);
+}
+
+static void send_vsync_work(struct work_struct *work)
+{
+ struct vsycn_ctrl *vctrl =
+ container_of(work, typeof(*vctrl), vsync_work);
+ char buf[64];
+ char *envp[2];
+
+ snprintf(buf, sizeof(buf), "VSYNC=%llu",
+ ktime_to_ns(vctrl->vsync_time));
+ envp[0] = buf;
+ envp[1] = NULL;
+ kobject_uevent_env(&vctrl->dev->kobj, KOBJ_CHANGE, envp);
+}
+
+void mdp4_lcdc_vsync_init(int cndx)
+{
+ struct vsycn_ctrl *vctrl;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ pr_info("%s: ndx=%d\n", __func__, cndx);
+
+ vctrl = &vsync_ctrl_db[cndx];
+ if (vctrl->inited)
+ return;
+
+ vctrl->inited = 1;
+ vctrl->update_ndx = 0;
+ mutex_init(&vctrl->update_lock);
+ init_completion(&vctrl->vsync_comp);
+ init_completion(&vctrl->dmap_comp);
+ atomic_set(&vctrl->suspend, 0);
+ spin_lock_init(&vctrl->spin_lock);
+ INIT_WORK(&vctrl->vsync_work, send_vsync_work);
+}
+
+void mdp4_lcdc_base_swap(int cndx, struct mdp4_overlay_pipe *pipe)
+{
+ struct vsycn_ctrl *vctrl;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ vctrl = &vsync_ctrl_db[cndx];
+ vctrl->base_pipe = pipe;
+}
+
+int mdp4_lcdc_on(struct platform_device *pdev)
{
int lcdc_width;
int lcdc_height;
@@ -90,8 +411,11 @@
struct fb_var_screeninfo *var;
struct msm_fb_data_type *mfd;
struct mdp4_overlay_pipe *pipe;
- int ret;
+ int ret = 0;
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
+ vctrl = &vsync_ctrl_db[cndx];
mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
if (!mfd)
@@ -100,21 +424,19 @@
if (mfd->key != MFD_KEY)
return -EINVAL;
+ vctrl->dev = mfd->fbi->dev;
+
+ /* mdp clock on */
+ mdp_clk_ctrl(1);
+
fbi = mfd->fbi;
var = &fbi->var;
- /* MDP cmd block enable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
- if (is_mdp4_hw_reset()) {
- mdp4_hw_init();
- outpdw(MDP_BASE + 0x0038, mdp4_display_intf);
- }
-
bpp = fbi->var.bits_per_pixel / 8;
buf = (uint8 *) fbi->fix.smem_start;
buf_offset = calc_fb_offset(mfd, fbi, bpp);
- if (lcdc_pipe == NULL) {
+ if (vctrl->base_pipe == NULL) {
ptype = mdp4_overlay_format2type(mfd->fb_imgType);
if (ptype < 0)
printk(KERN_INFO "%s: format2type failed\n", __func__);
@@ -129,16 +451,17 @@
ret = mdp4_overlay_format2pipe(pipe);
if (ret < 0)
printk(KERN_INFO "%s: format2pipe failed\n", __func__);
- lcdc_pipe = pipe; /* keep it */
- init_completion(&lcdc_comp);
mdp4_init_writeback_buf(mfd, MDP4_MIXER0);
pipe->ov_blt_addr = 0;
pipe->dma_blt_addr = 0;
+
+ vctrl->base_pipe = pipe; /* keep it */
} else {
- pipe = lcdc_pipe;
+ pipe = vctrl->base_pipe;
}
+
pipe->src_height = fbi->var.yres;
pipe->src_width = fbi->var.xres;
pipe->src_h = fbi->var.yres;
@@ -154,14 +477,16 @@
pipe->srcp0_ystride = fbi->fix.line_length;
pipe->bpp = bpp;
+ atomic_set(&vctrl->suspend, 0);
+
mdp4_overlay_dmap_xy(pipe);
mdp4_overlay_dmap_cfg(mfd, 1);
-
mdp4_overlay_rgb_setup(pipe);
+ mdp4_overlayproc_cfg(pipe);
+
mdp4_overlay_reg_flush(pipe, 1);
mdp4_mixer_stage_up(pipe);
- mdp4_overlayproc_cfg(pipe);
/*
* LCDC timing setting
@@ -238,6 +563,7 @@
ctrl_polarity =
(data_en_polarity << 2) | (vsync_polarity << 1) | (hsync_polarity);
+ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
MDP_OUTP(MDP_BASE + LCDC_BASE + 0x4, hsync_ctrl);
MDP_OUTP(MDP_BASE + LCDC_BASE + 0x8, vsync_period);
MDP_OUTP(MDP_BASE + LCDC_BASE + 0xc, vsync_pulse_width * hsync_period);
@@ -251,69 +577,56 @@
MDP_OUTP(MDP_BASE + LCDC_BASE + 0x1c, active_hctl);
MDP_OUTP(MDP_BASE + LCDC_BASE + 0x20, active_v_start);
MDP_OUTP(MDP_BASE + LCDC_BASE + 0x24, active_v_end);
-
- mdp4_overlay_reg_flush(pipe, 1);
- mdp4_mixer_stage_up(pipe);
-
-#ifdef CONFIG_MSM_BUS_SCALING
- mdp_bus_scale_update_request(2);
-#endif
- mdp_histogram_ctrl_all(TRUE);
-
- ret = panel_next_on(pdev);
- if (ret == 0)
- mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-
- /* MDP cmd block disable */
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+ mdp_histogram_ctrl_all(TRUE);
return ret;
}
-int mdp_lcdc_off(struct platform_device *pdev)
+int mdp4_lcdc_off(struct platform_device *pdev)
{
int ret = 0;
+ int cndx = 0;
struct msm_fb_data_type *mfd;
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
- mutex_lock(&mfd->dma->ov_mutex);
+ atomic_set(&vctrl->suspend, 1);
- /* MDP cmd block enable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+ while (vctrl->wait_vsync_cnt)
+ msleep(20); /* >= 17 ms */
+
MDP_OUTP(MDP_BASE + LCDC_BASE, 0);
+
lcdc_enabled = 0;
- /* MDP cmd block disable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
- mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
mdp_histogram_ctrl_all(FALSE);
- ret = panel_next_off(pdev);
- mutex_unlock(&mfd->dma->ov_mutex);
-
- /* delay to make sure the last frame finishes */
- msleep(20);
-
- /* dis-engage rgb0 from mixer0 */
- if (lcdc_pipe) {
+ if (pipe) {
if (mfd->ref_cnt == 0) {
/* adb stop */
- if (lcdc_pipe->pipe_type == OVERLAY_TYPE_BF)
- mdp4_overlay_borderfill_stage_down(lcdc_pipe);
+ if (pipe->pipe_type == OVERLAY_TYPE_BF)
+ mdp4_overlay_borderfill_stage_down(pipe);
- /* lcdc_pipe == rgb1 */
- mdp4_overlay_unset_mixer(lcdc_pipe->mixer_num);
- lcdc_pipe = NULL;
+ mdp4_overlay_unset_mixer(pipe->mixer_num);
+ vctrl->base_pipe = NULL;
} else {
- mdp4_mixer_stage_down(lcdc_pipe);
- mdp4_iommu_unmap(lcdc_pipe);
+ /* system suspending */
+ mdp4_mixer_stage_down(vctrl->base_pipe);
+ mdp4_overlay_iommu_pipe_free(
+ vctrl->base_pipe->pipe_ndx, 1);
}
}
-#ifdef CONFIG_MSM_BUS_SCALING
- mdp_bus_scale_update_request(0);
-#endif
+ vctrl->fake_vsync = 1;
+
+ /* MDP clock disable */
+ mdp_clk_ctrl(0);
+ mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
return ret;
}
@@ -335,7 +648,7 @@
bpp = 3; /* overlay ouput is RGB888 */
#endif
off = 0;
- if (pipe->ov_cnt & 0x01)
+ if (pipe->blt_ov_done & 0x01)
off = pipe->src_height * pipe->src_width * bpp;
addr = pipe->ov_blt_addr + off;
@@ -360,7 +673,7 @@
bpp = 3; /* overlay ouput is RGB888 */
#endif
off = 0;
- if (pipe->dmap_cnt & 0x01)
+ if (pipe->blt_dmap_done & 0x01)
off = pipe->src_height * pipe->src_width * bpp;
addr = pipe->dma_blt_addr + off;
@@ -368,97 +681,10 @@
MDP_OUTP(MDP_BASE + 0x90008, addr);
}
-/*
- * mdp4_overlay_lcdc_wait4event:
- * INTR_DMA_P_DONE and INTR_PRIMARY_VSYNC event only
- * no INTR_OVERLAY0_DONE event allowed.
- */
-static void mdp4_overlay_lcdc_wait4event(struct msm_fb_data_type *mfd,
- int intr_done)
+void mdp4_overlay_lcdc_set_perf(struct msm_fb_data_type *mfd)
{
- unsigned long flag;
- unsigned int data;
-
- data = inpdw(MDP_BASE + LCDC_BASE);
- data &= 0x01;
- if (data == 0) /* timing generator disabled */
- return;
-
- spin_lock_irqsave(&mdp_spin_lock, flag);
- INIT_COMPLETION(lcdc_comp);
- mfd->dma->waiting = TRUE;
- outp32(MDP_INTR_CLEAR, intr_done);
- mdp_intr_mask |= intr_done;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- mdp_enable_irq(MDP_DMA2_TERM); /* enable intr */
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
- wait_for_completion(&lcdc_comp);
- mdp_disable_irq(MDP_DMA2_TERM);
-}
-
-static void mdp4_overlay_lcdc_dma_busy_wait(struct msm_fb_data_type *mfd)
-{
- unsigned long flag;
- int need_wait = 0;
-
- pr_debug("%s: start pid=%d\n", __func__, current->pid);
-
- spin_lock_irqsave(&mdp_spin_lock, flag);
- if (mfd->dma->busy == TRUE) {
- INIT_COMPLETION(mfd->dma->comp);
- need_wait++;
- }
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
-
- if (need_wait) {
- /* wait until DMA finishes the current job */
- pr_debug("%s: pending pid=%d\n", __func__, current->pid);
- wait_for_completion(&mfd->dma->comp);
- }
- pr_debug("%s: done pid=%d\n", __func__, current->pid);
-}
-
-void mdp4_overlay_lcdc_start(void)
-{
- if (!lcdc_enabled) {
- /* enable LCDC block */
- mdp4_iommu_attach();
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
- MDP_OUTP(MDP_BASE + LCDC_BASE, 1);
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
- lcdc_enabled = 1;
- }
-}
-
-void mdp4_overlay_lcdc_vsync_push(struct msm_fb_data_type *mfd,
- struct mdp4_overlay_pipe *pipe)
-{
- unsigned long flag;
-
- if (pipe->flags & MDP_OV_PLAY_NOWAIT)
- return;
-
- if (lcdc_pipe->ov_blt_addr) {
- mdp4_overlay_lcdc_dma_busy_wait(mfd);
-
- mdp4_lcdc_blt_ov_update(lcdc_pipe);
- lcdc_pipe->ov_cnt++;
-
- spin_lock_irqsave(&mdp_spin_lock, flag);
- outp32(MDP_INTR_CLEAR, INTR_OVERLAY0_DONE);
- mdp_intr_mask |= INTR_OVERLAY0_DONE;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- mdp_enable_irq(MDP_OVERLAY0_TERM);
- mfd->dma->busy = TRUE;
- mb(); /* make sure all registers updated */
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
- outpdw(MDP_BASE + 0x0004, 0); /* kickoff overlay engine */
- mdp4_stat.kickoff_ov0++;
- mb();
- mdp4_overlay_lcdc_wait4event(mfd, INTR_DMA_P_DONE);
- } else {
- mdp4_overlay_lcdc_wait4event(mfd, INTR_PRIMARY_VSYNC);
- }
+ mdp4_lcdc_wait4dmap(0);
+ /* change mdp clk while mdp is idle */
mdp4_set_perf_level();
}
@@ -467,118 +693,138 @@
*/
void mdp4_primary_vsync_lcdc(void)
{
- complete_all(&lcdc_comp);
+ int cndx;
+ struct vsycn_ctrl *vctrl;
+
+ cndx = 0;
+ vctrl = &vsync_ctrl_db[cndx];
+ pr_debug("%s: cpu=%d\n", __func__, smp_processor_id());
+ vctrl->vsync_time = ktime_get();
+ schedule_work(&vctrl->vsync_work);
+
+ spin_lock(&vctrl->spin_lock);
+ if (vctrl->wait_vsync_cnt) {
+ complete_all(&vctrl->vsync_comp);
+ vctrl->wait_vsync_cnt = 0;
+ }
+
+ spin_unlock(&vctrl->spin_lock);
}
/*
* mdp4_dma_p_done_lcdc: called from isr
*/
-void mdp4_dma_p_done_lcdc(void)
+void mdp4_dmap_done_lcdc(int cndx)
{
- complete_all(&lcdc_comp);
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
+
+ spin_lock(&vctrl->spin_lock);
+ if (vctrl->blt_change) {
+ mdp4_overlayproc_cfg(pipe);
+ mdp4_overlay_dmap_xy(pipe);
+ if (pipe->ov_blt_addr) {
+ mdp4_lcdc_blt_ov_update(pipe);
+ pipe->blt_ov_done++;
+
+ /* Prefill one frame */
+ vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
+ /* kickoff overlay0 engine */
+ outpdw(MDP_BASE + 0x0004, 0);
+ }
+ vctrl->blt_change = 0;
+ }
+
+ vctrl->dmap_intr_cnt--;
+ if (vctrl->dmap_wait_cnt) {
+ complete_all(&vctrl->dmap_comp);
+ vctrl->dmap_wait_cnt = 0; /* reset */
+ } else {
+ mdp4_overlay_dma_commit(cndx);
+ }
+ vsync_irq_disable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
+ spin_unlock(&vctrl->spin_lock);
}
/*
* mdp4_overlay0_done_lcdc: called from isr
*/
-void mdp4_overlay0_done_lcdc(struct mdp_dma_data *dma)
+void mdp4_overlay0_done_lcdc(int cndx)
{
- spin_lock(&mdp_spin_lock);
- dma->busy = FALSE;
- if (lcdc_pipe->ov_blt_addr == 0) {
- spin_unlock(&mdp_spin_lock);
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
+
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
+
+ spin_lock(&vctrl->spin_lock);
+ if (pipe->ov_blt_addr == 0) {
+ spin_unlock(&vctrl->spin_lock);
return;
}
- mdp4_lcdc_blt_dmap_update(lcdc_pipe);
- lcdc_pipe->dmap_cnt++;
+
+ mdp4_lcdc_blt_dmap_update(pipe);
+ pipe->blt_dmap_done++;
mdp_disable_irq_nosync(MDP_OVERLAY0_TERM);
- spin_unlock(&mdp_spin_lock);
- complete(&dma->comp);
+ spin_unlock(&vctrl->spin_lock);
}
-static void mdp4_overlay_lcdc_prefill(struct msm_fb_data_type *mfd)
-{
- unsigned long flag;
-
- if (lcdc_pipe->ov_blt_addr) {
- mdp4_overlay_lcdc_dma_busy_wait(mfd);
-
- mdp4_lcdc_blt_ov_update(lcdc_pipe);
- lcdc_pipe->ov_cnt++;
-
- spin_lock_irqsave(&mdp_spin_lock, flag);
- outp32(MDP_INTR_CLEAR, INTR_OVERLAY0_DONE);
- mdp_intr_mask |= INTR_OVERLAY0_DONE;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- mdp_enable_irq(MDP_OVERLAY0_TERM);
- mfd->dma->busy = TRUE;
- mb(); /* make sure all registers updated */
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
- outpdw(MDP_BASE + 0x0004, 0); /* kickoff overlay engine */
- mdp4_stat.kickoff_ov0++;
- mb();
- }
-}
-/*
- * make sure the WRITEBACK_SIZE defined at boardfile
- * has enough space h * w * 3 * 2
- */
static void mdp4_lcdc_do_blt(struct msm_fb_data_type *mfd, int enable)
{
unsigned long flag;
- int change = 0;
+ int data;
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
+
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0);
- if (!mfd->ov0_wb_buf->write_addr) {
- pr_debug("%s: no blt_base assigned\n", __func__);
+ if (mfd->ov0_wb_buf->write_addr == 0) {
+ pr_info("%s: no blt_base assigned\n", __func__);
return;
}
- spin_lock_irqsave(&mdp_spin_lock, flag);
- if (enable && lcdc_pipe->ov_blt_addr == 0) {
- lcdc_pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
- lcdc_pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
- change++;
- lcdc_pipe->blt_cnt = 0;
- lcdc_pipe->ov_cnt = 0;
- lcdc_pipe->dmap_cnt = 0;
+ spin_lock_irqsave(&vctrl->spin_lock, flag);
+ if (enable && pipe->ov_blt_addr == 0) {
+ pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
+ pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
+ pipe->blt_cnt = 0;
+ pipe->ov_cnt = 0;
+ pipe->blt_dmap_done = 0;
+ pipe->blt_ov_koff = 0;
+ pipe->blt_ov_done = 0;
mdp4_stat.blt_lcdc++;
- } else if (enable == 0 && lcdc_pipe->ov_blt_addr) {
- lcdc_pipe->ov_blt_addr = 0;
- lcdc_pipe->dma_blt_addr = 0;
- change++;
+ vctrl->blt_change++;
+ } else if (enable == 0 && pipe->ov_blt_addr) {
+ pipe->ov_blt_addr = 0;
+ pipe->dma_blt_addr = 0;
+ vctrl->blt_change++;
}
- pr_info("%s: ov_blt_addr=%x\n", __func__, (int)lcdc_pipe->ov_blt_addr);
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
- if (!change)
+ pr_info("%s: enable=%d change=%d blt_addr=%x\n", __func__,
+ vctrl->blt_change, enable, (int)pipe->ov_blt_addr);
+
+ if (!vctrl->blt_change) {
+ spin_unlock_irqrestore(&vctrl->spin_lock, flag);
return;
-
- if (lcdc_enabled) {
- mdp4_overlay_lcdc_wait4event(mfd, INTR_DMA_P_DONE);
- MDP_OUTP(MDP_BASE + LCDC_BASE, 0); /* stop lcdc */
- msleep(20);
}
- mdp4_overlay_dmap_xy(lcdc_pipe);
- mdp4_overlayproc_cfg(lcdc_pipe);
- if (lcdc_pipe->ov_blt_addr) {
- mdp4_overlay_lcdc_prefill(mfd);
- mdp4_overlay_lcdc_prefill(mfd);
- }
- MDP_OUTP(MDP_BASE + LCDC_BASE, 1); /* start lcdc */
-}
+ spin_unlock_irqrestore(&vctrl->spin_lock, flag);
-int mdp4_lcdc_overlay_blt_offset(struct msm_fb_data_type *mfd,
- struct msmfb_overlay_blt *req)
-{
- req->offset = 0;
- req->width = lcdc_pipe->src_width;
- req->height = lcdc_pipe->src_height;
- req->bpp = lcdc_pipe->bpp;
-
- return sizeof(*req);
+ data = inpdw(MDP_BASE + LCDC_BASE);
+ data &= 0x01;
+ if (data) /* timing generator enabled */
+ mdp4_lcdc_wait4dmap(0);
}
void mdp4_lcdc_overlay_blt(struct msm_fb_data_type *mfd,
@@ -603,36 +849,32 @@
uint8 *buf;
unsigned int buf_offset;
int bpp;
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
struct mdp4_overlay_pipe *pipe;
- if (!mfd->panel_power_on)
+
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
+
+ if (!pipe || !mfd->panel_power_on)
return;
- /* no need to power on cmd block since it's lcdc mode */
- bpp = fbi->var.bits_per_pixel / 8;
- buf = (uint8 *) fbi->fix.smem_start;
- buf_offset = calc_fb_offset(mfd, fbi, bpp);
+ pr_debug("%s: cpu=%d pid=%d\n", __func__,
+ smp_processor_id(), current->pid);
+ if (pipe->pipe_type == OVERLAY_TYPE_RGB) {
+ bpp = fbi->var.bits_per_pixel / 8;
+ buf = (uint8 *) fbi->fix.smem_start;
+ buf_offset = calc_fb_offset(mfd, fbi, bpp);
- mutex_lock(&mfd->dma->ov_mutex);
+ if (mfd->display_iova)
+ pipe->srcp0_addr = mfd->display_iova + buf_offset;
+ else
+ pipe->srcp0_addr = (uint32)(buf + buf_offset);
- pipe = lcdc_pipe;
- if (pipe->pipe_used == 0 ||
- pipe->mixer_stage != MDP4_MIXER_STAGE_BASE) {
- pr_err("%s: NOT baselayer\n", __func__);
- mutex_unlock(&mfd->dma->ov_mutex);
- return;
+ mdp4_lcdc_pipe_queue(0, pipe);
}
- if (mfd->display_iova)
- pipe->srcp0_addr = mfd->display_iova + buf_offset;
- else
- pipe->srcp0_addr = (uint32)(buf + buf_offset);
-
- mdp4_overlay_rgb_setup(pipe);
- mdp4_overlay_reg_flush(pipe, 1);
- mdp4_mixer_stage_up(pipe);
- mdp4_overlay_lcdc_start();
- mdp4_overlay_lcdc_vsync_push(mfd, pipe);
- mdp4_iommu_unmap(pipe);
- mutex_unlock(&mfd->dma->ov_mutex);
+ mdp4_lcdc_pipe_commit();
+ mdp4_lcdc_wait4dmap(0);
}
diff --git a/drivers/video/msm/mdp4_overlay_mddi.c b/drivers/video/msm/mdp4_overlay_mddi.c
index c4e6793..103419e 100644
--- a/drivers/video/msm/mdp4_overlay_mddi.c
+++ b/drivers/video/msm/mdp4_overlay_mddi.c
@@ -354,6 +354,10 @@
outpdw(overlay_base + 0x001c, addr2);
}
+void mdp4_primary_rdptr(void)
+{
+}
+
/*
* mdp4_dmap_done_mddi: called from isr
*/
diff --git a/drivers/video/msm/mdp4_util.c b/drivers/video/msm/mdp4_util.c
index 4c0e28f..573e317 100644
--- a/drivers/video/msm/mdp4_util.c
+++ b/drivers/video/msm/mdp4_util.c
@@ -409,35 +409,53 @@
goto out;
panel = mdp4_overlay_panel_list();
- if (isr & INTR_PRIMARY_VSYNC) {
- mdp4_stat.intr_vsync_p++;
+
+ if (isr & INTR_DMA_P_DONE) {
+ mdp4_stat.intr_dma_p++;
dma = &dma2_data;
- spin_lock(&mdp_spin_lock);
- mdp_intr_mask &= ~INTR_PRIMARY_VSYNC;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- dma->waiting = FALSE;
if (panel & MDP4_PANEL_LCDC)
- mdp4_primary_vsync_lcdc();
+ mdp4_dmap_done_lcdc(0);
+#ifdef CONFIG_FB_MSM_OVERLAY
#ifdef CONFIG_FB_MSM_MIPI_DSI
else if (panel & MDP4_PANEL_DSI_VIDEO)
- mdp4_primary_vsync_dsi_video();
+ mdp4_dmap_done_dsi_video(0);
+ else if (panel & MDP4_PANEL_DSI_CMD)
+ mdp4_dmap_done_dsi_cmd(0);
+#else
+ else { /* MDDI */
+ mdp4_dma_p_done_mddi(dma);
+ mdp_pipe_ctrl(MDP_DMA2_BLOCK,
+ MDP_BLOCK_POWER_OFF, TRUE);
+ complete(&dma->comp);
+ }
#endif
- spin_unlock(&mdp_spin_lock);
+#else
+ else {
+ spin_lock(&mdp_spin_lock);
+ dma->busy = FALSE;
+ spin_unlock(&mdp_spin_lock);
+ complete(&dma->comp);
+ }
+#endif
}
-#ifdef CONFIG_FB_MSM_DTV
- if (isr & INTR_EXTERNAL_VSYNC) {
- mdp4_stat.intr_vsync_e++;
- dma = &dma_e_data;
- spin_lock(&mdp_spin_lock);
- mdp_intr_mask &= ~INTR_EXTERNAL_VSYNC;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- dma->waiting = FALSE;
- if (panel & MDP4_PANEL_DTV)
- mdp4_external_vsync_dtv();
- spin_unlock(&mdp_spin_lock);
- }
+ if (isr & INTR_DMA_S_DONE) {
+ mdp4_stat.intr_dma_s++;
+#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_MDDI)
+ dma = &dma2_data;
+#else
+ dma = &dma_s_data;
#endif
+ dma->busy = FALSE;
+ mdp_pipe_ctrl(MDP_DMA_S_BLOCK,
+ MDP_BLOCK_POWER_OFF, TRUE);
+ complete(&dma->comp);
+ }
+ if (isr & INTR_DMA_E_DONE) {
+ mdp4_stat.intr_dma_e++;
+ if (panel & MDP4_PANEL_DTV)
+ mdp4_dmae_done_dtv();
+ }
#ifdef CONFIG_FB_MSM_OVERLAY
if (isr & INTR_OVERLAY0_DONE) {
mdp4_stat.intr_overlay0++;
@@ -450,15 +468,15 @@
dma->waiting = FALSE;
spin_unlock(&mdp_spin_lock);
if (panel & MDP4_PANEL_LCDC)
- mdp4_overlay0_done_lcdc(dma);
+ mdp4_overlay0_done_lcdc(0);
#ifdef CONFIG_FB_MSM_MIPI_DSI
else if (panel & MDP4_PANEL_DSI_VIDEO)
- mdp4_overlay0_done_dsi_video(dma);
+ mdp4_overlay0_done_dsi_video(0);
#endif
} else { /* MDDI, DSI_CMD */
#ifdef CONFIG_FB_MSM_MIPI_DSI
if (panel & MDP4_PANEL_DSI_CMD)
- mdp4_overlay0_done_dsi_cmd(dma);
+ mdp4_overlay0_done_dsi_cmd(0);
#else
if (panel & MDP4_PANEL_MDDI)
mdp4_overlay0_done_mddi(dma);
@@ -500,75 +518,20 @@
#endif
#endif /* OVERLAY */
- if (isr & INTR_DMA_P_DONE) {
- mdp4_stat.intr_dma_p++;
- dma = &dma2_data;
- if (panel & MDP4_PANEL_LCDC) {
- /* disable LCDC interrupt */
- spin_lock(&mdp_spin_lock);
- mdp_intr_mask &= ~INTR_DMA_P_DONE;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- dma->waiting = FALSE;
- mdp4_dma_p_done_lcdc();
- spin_unlock(&mdp_spin_lock);
- }
-#ifdef CONFIG_FB_MSM_OVERLAY
-#ifdef CONFIG_FB_MSM_MIPI_DSI
- else if (panel & MDP4_PANEL_DSI_VIDEO) {
- /* disable LCDC interrupt */
- spin_lock(&mdp_spin_lock);
- mdp_intr_mask &= ~INTR_DMA_P_DONE;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- dma->waiting = FALSE;
- mdp4_dma_p_done_dsi_video(dma);
- spin_unlock(&mdp_spin_lock);
- } else if (panel & MDP4_PANEL_DSI_CMD) {
- mdp4_dma_p_done_dsi(dma);
- }
-#else
- else { /* MDDI */
- mdp4_dma_p_done_mddi(dma);
- mdp_pipe_ctrl(MDP_DMA2_BLOCK,
- MDP_BLOCK_POWER_OFF, TRUE);
- complete(&dma->comp);
- }
-#endif
-#else
- else {
- spin_lock(&mdp_spin_lock);
- dma->busy = FALSE;
- spin_unlock(&mdp_spin_lock);
- complete(&dma->comp);
- }
-#endif
+ if (isr & INTR_PRIMARY_VSYNC) {
+ mdp4_stat.intr_vsync_p++;
+ if (panel & MDP4_PANEL_LCDC)
+ mdp4_primary_vsync_lcdc();
+ else if (panel & MDP4_PANEL_DSI_VIDEO)
+ mdp4_primary_vsync_dsi_video();
}
- if (isr & INTR_DMA_S_DONE) {
- mdp4_stat.intr_dma_s++;
-#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_MDDI)
- dma = &dma2_data;
-#else
- dma = &dma_s_data;
+#ifdef CONFIG_FB_MSM_DTV
+ if (isr & INTR_EXTERNAL_VSYNC) {
+ mdp4_stat.intr_vsync_e++;
+ if (panel & MDP4_PANEL_DTV)
+ mdp4_external_vsync_dtv();
+ }
#endif
-
- dma->busy = FALSE;
- mdp_pipe_ctrl(MDP_DMA_S_BLOCK,
- MDP_BLOCK_POWER_OFF, TRUE);
- complete(&dma->comp);
- }
- if (isr & INTR_DMA_E_DONE) {
- mdp4_stat.intr_dma_e++;
- dma = &dma_e_data;
- spin_lock(&mdp_spin_lock);
- mdp_intr_mask &= ~INTR_DMA_E_DONE;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- dma->busy = FALSE;
- mdp4_dma_e_done_dtv();
- if (dma->waiting) {
- dma->waiting = FALSE;
- complete(&dma->comp);
- }
- spin_unlock(&mdp_spin_lock);
- }
if (isr & INTR_DMA_P_HISTOGRAM) {
mdp4_stat.intr_histogram++;
ret = mdp_histogram_block2mgmt(MDP_BLOCK_DMA_P, &mgmt);
@@ -593,6 +556,10 @@
if (!ret)
mdp_histogram_handle_isr(mgmt);
}
+ if (isr & INTR_PRIMARY_RDPTR) {
+ mdp4_stat.intr_rdptr++;
+ mdp4_primary_rdptr();
+ }
out:
mdp_is_in_isr = FALSE;
@@ -2672,9 +2639,9 @@
DISPLAY_READ_DOMAIN, GEN_POOL);
}
ion_free(mfd->iclient, buf->ihdl);
- pr_debug("%s:%d free writeback imem\n", __func__,
- __LINE__);
buf->ihdl = NULL;
+ pr_info("%s:%d free ION writeback imem",
+ __func__, __LINE__);
}
} else {
if (buf->write_addr) {
@@ -3276,71 +3243,92 @@
return valid;
}
-static int mdp4_qseed_write_cfg(struct mdp_qseed_cfg_data *cfg)
+int mdp4_qseed_access_cfg(struct mdp_qseed_cfg *config, uint32_t base)
{
int i, ret = 0;
- uint32_t base = (uint32_t) (MDP_BASE + mdp_block2base(cfg->block));
uint32_t *values;
- if ((cfg->table_num != 1) && (cfg->table_num != 2)) {
+ if ((config->table_num != 1) && (config->table_num != 2)) {
ret = -ENOTTY;
goto error;
}
- if (((cfg->table_num == 1) && (cfg->len != QSEED_TABLE_1_COUNT)) ||
- ((cfg->table_num == 2) && (cfg->len != QSEED_TABLE_2_COUNT))) {
+ if (((config->table_num == 1) && (config->len != QSEED_TABLE_1_COUNT))
+ || ((config->table_num == 2) &&
+ (config->len != QSEED_TABLE_2_COUNT))) {
ret = -EINVAL;
goto error;
}
- values = kmalloc(cfg->len * sizeof(uint32_t), GFP_KERNEL);
+ values = kmalloc(config->len * sizeof(uint32_t), GFP_KERNEL);
if (!values) {
ret = -ENOMEM;
goto error;
}
- ret = copy_from_user(values, cfg->data, sizeof(uint32_t) * cfg->len);
+ base += (config->table_num == 1) ? MDP4_QSEED_TABLE1_OFF :
+ MDP4_QSEED_TABLE2_OFF;
- base += (cfg->table_num == 1) ? MDP4_QSEED_TABLE1_OFF :
- MDP4_QSEED_TABLE2_OFF;
- for (i = 0; i < cfg->len; i++) {
- MDP_OUTP(base , values[i]);
- base += sizeof(uint32_t);
+ if (config->ops & MDP_PP_OPS_WRITE) {
+ ret = copy_from_user(values, config->data,
+ sizeof(uint32_t) * config->len);
+ if (ret) {
+ pr_warn("%s: Error copying from user, %d", __func__,
+ ret);
+ ret = -EINVAL;
+ goto err_mem;
+ }
+ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+ for (i = 0; i < config->len; i++) {
+ if (!(base & 0x3FF))
+ wmb();
+ MDP_OUTP(base , values[i]);
+ base += sizeof(uint32_t);
+ }
+ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+ } else if (config->ops & MDP_PP_OPS_READ) {
+ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+ for (i = 0; i < config->len; i++) {
+ values[i] = inpdw(base);
+ if (!(base & 0x3FF))
+ rmb();
+ base += sizeof(uint32_t);
+ }
+ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+ ret = copy_to_user(config->data, values,
+ sizeof(uint32_t) * config->len);
+ if (ret) {
+ pr_warn("%s: Error copying to user, %d", __func__, ret);
+ ret = -EINVAL;
+ goto err_mem;
+ }
}
+err_mem:
kfree(values);
error:
return ret;
}
-int mdp4_qseed_cfg(struct mdp_qseed_cfg_data *cfg)
+int mdp4_qseed_cfg(struct mdp_qseed_cfg_data *config)
{
int ret = 0;
+ struct mdp_qseed_cfg *cfg = &config->qseed_data;
+ uint32_t base;
- if (!mdp4_pp_block2qseed(cfg->block)) {
+ if (!mdp4_pp_block2qseed(config->block)) {
ret = -ENOTTY;
goto error;
}
- if (cfg->table_num != 1) {
- ret = -ENOTTY;
- pr_info("%s: Only QSEED table1 supported.\n", __func__);
+ if ((cfg->ops & MDP_PP_OPS_READ) && (cfg->ops & MDP_PP_OPS_WRITE)) {
+ ret = -EPERM;
+ pr_warn("%s: Cannot read and write on the same request\n",
+ __func__);
goto error;
}
-
- switch ((cfg->ops & 0x6) >> 1) {
- case 0x1:
- pr_info("%s: QSEED read not supported\n", __func__);
- ret = -ENOTTY;
- break;
- case 0x2:
- ret = mdp4_qseed_write_cfg(cfg);
- if (ret)
- goto error;
- break;
- default:
- break;
- }
+ base = (uint32_t) (MDP_BASE + mdp_block2base(config->block));
+ ret = mdp4_qseed_access_cfg(cfg, base);
error:
return ret;
diff --git a/drivers/video/msm/mdp_debugfs.c b/drivers/video/msm/mdp_debugfs.c
index 4a0ea4c..0fad0a7 100644
--- a/drivers/video/msm/mdp_debugfs.c
+++ b/drivers/video/msm/mdp_debugfs.c
@@ -337,7 +337,7 @@
bp += len;
dlen -= len;
len = snprintf(bp, dlen, "read_ptr: %08lu\n\n",
- mdp4_stat.intr_rd_ptr);
+ mdp4_stat.intr_rdptr);
bp += len;
dlen -= len;
len = snprintf(bp, dlen, "dsi:\n");
@@ -412,10 +412,14 @@
mdp4_stat.overlay_unset[0]);
bp += len;
dlen -= len;
- len = snprintf(bp, dlen, "play: %08lu\n",
+ len = snprintf(bp, dlen, "play: %08lu\t",
mdp4_stat.overlay_play[0]);
bp += len;
dlen -= len;
+ len = snprintf(bp, dlen, "commit: %08lu\n",
+ mdp4_stat.overlay_commit[0]);
+ bp += len;
+ dlen -= len;
len = snprintf(bp, dlen, "overlay1_play:\n");
bp += len;
@@ -428,29 +432,56 @@
mdp4_stat.overlay_unset[1]);
bp += len;
dlen -= len;
- len = snprintf(bp, dlen, "play: %08lu\n\n",
+ len = snprintf(bp, dlen, "play: %08lu\t",
mdp4_stat.overlay_play[1]);
bp += len;
dlen -= len;
+ len = snprintf(bp, dlen, "commit: %08lu\n\n",
+ mdp4_stat.overlay_commit[1]);
+ bp += len;
+ dlen -= len;
len = snprintf(bp, dlen, "frame_push:\n");
bp += len;
dlen -= len;
- len = snprintf(bp, dlen, "rgb1: %08lu\t",
- mdp4_stat.pipe[OVERLAY_PIPE_RGB1]);
+ len = snprintf(bp, dlen, "vg1 : %08lu\t", mdp4_stat.pipe[0]);
bp += len;
dlen -= len;
- len = snprintf(bp, dlen, "rgb2: %08lu\n",
- mdp4_stat.pipe[OVERLAY_PIPE_RGB2]);
+ len = snprintf(bp, dlen, "vg2 : %08lu\t", mdp4_stat.pipe[1]);
bp += len;
dlen -= len;
- len = snprintf(bp, dlen, "vg1 : %08lu\t",
- mdp4_stat.pipe[OVERLAY_PIPE_VG1]);
+ len = snprintf(bp, dlen, "vg3 : %08lu\n", mdp4_stat.pipe[5]);
bp += len;
dlen -= len;
- len = snprintf(bp, dlen, "vg2 : %08lu\n",
- mdp4_stat.pipe[OVERLAY_PIPE_VG2]);
+ len = snprintf(bp, dlen, "rgb1: %08lu\t", mdp4_stat.pipe[2]);
+ bp += len;
+ dlen -= len;
+ len = snprintf(bp, dlen, "rgb2: %08lu\t", mdp4_stat.pipe[3]);
+ bp += len;
+ dlen -= len;
+ len = snprintf(bp, dlen, "rgb3: %08lu\n\n", mdp4_stat.pipe[4]);
+ bp += len;
+ dlen -= len;
+ len = snprintf(bp, dlen, "wait4vsync: ");
+ bp += len;
+ dlen -= len;
+ len = snprintf(bp, dlen, "mixer0 : %08lu\t", mdp4_stat.wait4vsync0);
+ bp += len;
+ dlen -= len;
+ len = snprintf(bp, dlen, "mixer1: %08lu\n\n", mdp4_stat.wait4vsync1);
+ bp += len;
+ dlen -= len;
+ len = snprintf(bp, dlen, "iommu: ");
+ bp += len;
+ dlen -= len;
+ len = snprintf(bp, dlen, "map : %08lu\t", mdp4_stat.iommu_map);
+ bp += len;
+ dlen -= len;
+ len = snprintf(bp, dlen, "unmap: %08lu\t", mdp4_stat.iommu_unmap);
+ bp += len;
+ dlen -= len;
+ len = snprintf(bp, dlen, "drop: %08lu\n\n", mdp4_stat.iommu_drop);
bp += len;
dlen -= len;
len = snprintf(bp, dlen, "err_mixer : %08lu\t", mdp4_stat.err_mixer);
diff --git a/drivers/video/msm/mdp_vsync.c b/drivers/video/msm/mdp_vsync.c
index 87e74d9..966b40d 100644
--- a/drivers/video/msm/mdp_vsync.c
+++ b/drivers/video/msm/mdp_vsync.c
@@ -73,6 +73,20 @@
static unsigned char timer_shutdown_flag;
static uint32 vsync_cnt_cfg;
+
+
+void vsync_clk_enable()
+{
+ if (mdp_vsync_clk)
+ clk_prepare_enable(mdp_vsync_clk);
+}
+
+void vsync_clk_disable()
+{
+ if (mdp_vsync_clk)
+ clk_disable_unprepare(mdp_vsync_clk);
+}
+
void mdp_hw_vsync_clk_enable(struct msm_fb_data_type *mfd)
{
if (vsync_clk_status == 1)
diff --git a/drivers/video/msm/mipi_NT35510.c b/drivers/video/msm/mipi_NT35510.c
index e605aed..04178fa 100644
--- a/drivers/video/msm/mipi_NT35510.c
+++ b/drivers/video/msm/mipi_NT35510.c
@@ -486,22 +486,22 @@
rotate = mipi_nt35510_pdata->rotate_panel();
if (mipi->mode == DSI_VIDEO_MODE) {
- mipi_dsi_cmds_tx(mfd, &nt35510_tx_buf,
+ mipi_dsi_cmds_tx(&nt35510_tx_buf,
nt35510_video_display_on_cmds,
ARRAY_SIZE(nt35510_video_display_on_cmds));
if (rotate) {
- mipi_dsi_cmds_tx(mfd, &nt35510_tx_buf,
+ mipi_dsi_cmds_tx(&nt35510_tx_buf,
nt35510_video_display_on_cmds_rotate,
ARRAY_SIZE(nt35510_video_display_on_cmds_rotate));
}
} else if (mipi->mode == DSI_CMD_MODE) {
- mipi_dsi_cmds_tx(mfd, &nt35510_tx_buf,
+ mipi_dsi_cmds_tx(&nt35510_tx_buf,
nt35510_cmd_display_on_cmds,
ARRAY_SIZE(nt35510_cmd_display_on_cmds));
if (rotate) {
- mipi_dsi_cmds_tx(mfd, &nt35510_tx_buf,
+ mipi_dsi_cmds_tx(&nt35510_tx_buf,
nt35510_cmd_display_on_cmds_rotate,
ARRAY_SIZE(nt35510_cmd_display_on_cmds_rotate));
}
@@ -523,7 +523,7 @@
if (mfd->key != MFD_KEY)
return -EINVAL;
- mipi_dsi_cmds_tx(mfd, &nt35510_tx_buf, nt35510_display_off_cmds,
+ mipi_dsi_cmds_tx(&nt35510_tx_buf, nt35510_display_off_cmds,
ARRAY_SIZE(nt35510_display_off_cmds));
pr_debug("mipi_nt35510_lcd_off X\n");
diff --git a/drivers/video/msm/mipi_dsi.c b/drivers/video/msm/mipi_dsi.c
index ff8fd17..b4fb930 100644
--- a/drivers/video/msm/mipi_dsi.c
+++ b/drivers/video/msm/mipi_dsi.c
@@ -80,25 +80,6 @@
mdp4_overlay_dsi_state_set(ST_DSI_SUSPEND);
/*
- * Description: dsi clock is need to perform shutdown.
- * mdp4_dsi_cmd_dma_busy_wait() will enable dsi clock if disabled.
- * also, wait until dma (overlay and dmap) finish.
- */
- if (mfd->panel_info.type == MIPI_CMD_PANEL) {
- if (mdp_rev >= MDP_REV_41) {
- mdp4_dsi_cmd_del_timer();
- mdp4_dsi_cmd_dma_busy_wait(mfd);
- mdp4_dsi_blt_dmap_busy_wait(mfd);
- mipi_dsi_mdp_busy_wait(mfd);
- } else {
- mdp3_dsi_cmd_dma_busy_wait(mfd);
- }
- } else {
- /* video mode, wait until fifo cleaned */
- mipi_dsi_controller_cfg(0);
- }
-
- /*
* Desctiption: change to DSI_CMD_MODE since it needed to
* tx DCS dsiplay off comamnd to panel
*/
diff --git a/drivers/video/msm/mipi_dsi.h b/drivers/video/msm/mipi_dsi.h
index 2bc49c0..ebbf362 100644
--- a/drivers/video/msm/mipi_dsi.h
+++ b/drivers/video/msm/mipi_dsi.h
@@ -264,8 +264,7 @@
void mipi_dsi_bist_ctrl(void);
int mipi_dsi_buf_alloc(struct dsi_buf *, int size);
int mipi_dsi_cmd_dma_add(struct dsi_buf *dp, struct dsi_cmd_desc *cm);
-int mipi_dsi_cmds_tx(struct msm_fb_data_type *mfd,
- struct dsi_buf *dp, struct dsi_cmd_desc *cmds, int cnt);
+int mipi_dsi_cmds_tx(struct dsi_buf *dp, struct dsi_cmd_desc *cmds, int cnt);
int mipi_dsi_cmd_dma_tx(struct dsi_buf *dp);
int mipi_dsi_cmd_reg_tx(uint32 data);
@@ -278,10 +277,14 @@
void mipi_dsi_cmd_mode_ctrl(int enable);
void mdp4_dsi_cmd_trigger(void);
void mipi_dsi_cmd_mdp_start(void);
+int mipi_dsi_ctrl_lock(int mdp);
+int mipi_dsi_ctrl_lock_query(void);
void mipi_dsi_cmd_bta_sw_trigger(void);
void mipi_dsi_ack_err_status(void);
void mipi_dsi_set_tear_on(struct msm_fb_data_type *mfd);
void mipi_dsi_set_tear_off(struct msm_fb_data_type *mfd);
+void mipi_dsi_set_backlight(struct msm_fb_data_type *mfd, int level);
+void mipi_dsi_cmd_backlight_tx(int level);
void mipi_dsi_clk_enable(void);
void mipi_dsi_clk_disable(void);
void mipi_dsi_pre_kickoff_action(void);
@@ -310,6 +313,7 @@
void cont_splash_clk_ctrl(int enable);
void mipi_dsi_turn_on_clks(void);
void mipi_dsi_turn_off_clks(void);
+void mipi_dsi_clk_cfg(int on);
#ifdef CONFIG_FB_MSM_MDP303
void update_lane_config(struct msm_panel_info *pinfo);
diff --git a/drivers/video/msm/mipi_dsi_host.c b/drivers/video/msm/mipi_dsi_host.c
index 7f1a435..4afffb0 100644
--- a/drivers/video/msm/mipi_dsi_host.c
+++ b/drivers/video/msm/mipi_dsi_host.c
@@ -46,6 +46,7 @@
static spinlock_t dsi_irq_lock;
static spinlock_t dsi_mdp_lock;
spinlock_t dsi_clk_lock;
+static int dsi_ctrl_lock;
static int dsi_mdp_busy;
static struct list_head pre_kickoff_list;
@@ -146,6 +147,30 @@
spin_unlock(&dsi_irq_lock);
}
+void mipi_dsi_clk_cfg(int on)
+{
+ unsigned long flags;
+ static int dsi_clk_cnt;
+
+ spin_lock_irqsave(&mdp_spin_lock, flags);
+ if (on) {
+ if (dsi_clk_cnt == 0) {
+ mipi_dsi_ahb_ctrl(1);
+ mipi_dsi_clk_enable();
+ }
+ dsi_clk_cnt++;
+ } else {
+ if (dsi_clk_cnt) {
+ dsi_clk_cnt--;
+ if (dsi_clk_cnt == 0) {
+ mipi_dsi_clk_disable();
+ mipi_dsi_ahb_ctrl(0);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&mdp_spin_lock, flags);
+}
+
void mipi_dsi_turn_on_clks(void)
{
mipi_dsi_ahb_ctrl(1);
@@ -982,6 +1007,27 @@
wmb();
}
+int mipi_dsi_ctrl_lock(int mdp)
+{
+ unsigned long flag;
+ int lock = 0;
+
+ spin_lock_irqsave(&dsi_mdp_lock, flag);
+ if (dsi_ctrl_lock == FALSE) {
+ dsi_ctrl_lock = TRUE;
+ lock = 1;
+ if (lock && mdp) /* mdp pixel */
+ mipi_dsi_enable_irq();
+ }
+ spin_unlock_irqrestore(&dsi_mdp_lock, flag);
+ return lock;
+}
+
+int mipi_dsi_ctrl_lock_query()
+{
+ return dsi_ctrl_lock;
+}
+
void mipi_dsi_mdp_busy_wait(struct msm_fb_data_type *mfd)
{
unsigned long flag;
@@ -1011,19 +1057,14 @@
{
unsigned long flag;
-
- if (!in_interrupt())
- mipi_dsi_pre_kickoff_action();
-
mipi_dsi_mdp_stat_inc(STAT_DSI_START);
spin_lock_irqsave(&dsi_mdp_lock, flag);
- mipi_dsi_enable_irq();
- dsi_mdp_busy = TRUE;
+ mipi_dsi_enable_irq();
+ dsi_mdp_busy = TRUE;
spin_unlock_irqrestore(&dsi_mdp_lock, flag);
}
-
void mipi_dsi_cmd_bta_sw_trigger(void)
{
uint32 data;
@@ -1055,13 +1096,13 @@
void mipi_dsi_set_tear_on(struct msm_fb_data_type *mfd)
{
mipi_dsi_buf_init(&dsi_tx_buf);
- mipi_dsi_cmds_tx(mfd, &dsi_tx_buf, &dsi_tear_on_cmd, 1);
+ mipi_dsi_cmds_tx(&dsi_tx_buf, &dsi_tear_on_cmd, 1);
}
void mipi_dsi_set_tear_off(struct msm_fb_data_type *mfd)
{
mipi_dsi_buf_init(&dsi_tx_buf);
- mipi_dsi_cmds_tx(mfd, &dsi_tx_buf, &dsi_tear_off_cmd, 1);
+ mipi_dsi_cmds_tx(&dsi_tx_buf, &dsi_tear_off_cmd, 1);
}
int mipi_dsi_cmd_reg_tx(uint32 data)
@@ -1093,12 +1134,64 @@
return 4;
}
+static char led_pwm1[2] = {0x51, 0x0}; /* DTYPE_DCS_WRITE1 */
+
+static struct dsi_cmd_desc backlight_cmd = {
+ DTYPE_DCS_LWRITE, 1, 0, 0, 1, sizeof(led_pwm1), led_pwm1};
+
+/*
+ * mipi_dsi_cmd_backlight_tx:
+ * thread context only
+ */
+void mipi_dsi_cmd_backlight_tx(int level)
+{
+ struct dsi_buf *tp;
+ struct dsi_cmd_desc *cmd;
+ unsigned long flag;
+
+ spin_lock_irqsave(&dsi_mdp_lock, flag);
+ dsi_mdp_busy = TRUE;
+ spin_unlock_irqrestore(&dsi_mdp_lock, flag);
+
+ led_pwm1[1] = (unsigned char)(level);
+ tp = &dsi_tx_buf;
+ cmd = &backlight_cmd;
+ mipi_dsi_buf_init(&dsi_tx_buf);
+
+
+
+ if (tp->dmap) {
+ dma_unmap_single(&dsi_dev, tp->dmap, tp->len, DMA_TO_DEVICE);
+ tp->dmap = 0;
+ }
+
+ mipi_dsi_enable_irq();
+ mipi_dsi_cmd_dma_add(tp, cmd);
+
+ tp->len += 3;
+ tp->len &= ~0x03; /* multipled by 4 */
+
+ tp->dmap = dma_map_single(&dsi_dev, tp->data, tp->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&dsi_dev, tp->dmap))
+ pr_err("%s: dmap mapp failed\n", __func__);
+
+ MIPI_OUTP(MIPI_DSI_BASE + 0x044, tp->dmap);
+ MIPI_OUTP(MIPI_DSI_BASE + 0x048, tp->len);
+ wmb();
+ MIPI_OUTP(MIPI_DSI_BASE + 0x08c, 0x01); /* trigger */
+ wmb();
+
+ spin_lock_irqsave(&dsi_mdp_lock, flag);
+ dsi_mdp_busy = FALSE;
+ complete(&dsi_mdp_comp);
+ spin_unlock_irqrestore(&dsi_mdp_lock, flag);
+}
+
/*
* mipi_dsi_cmds_tx:
- * ov_mutex need to be acquired before call this function.
+ * thread context only
*/
-int mipi_dsi_cmds_tx(struct msm_fb_data_type *mfd,
- struct dsi_buf *tp, struct dsi_cmd_desc *cmds, int cnt)
+int mipi_dsi_cmds_tx(struct dsi_buf *tp, struct dsi_cmd_desc *cmds, int cnt)
{
struct dsi_cmd_desc *cm;
uint32 dsi_ctrl, ctrl;
@@ -1115,29 +1208,16 @@
if (video_mode) {
ctrl = dsi_ctrl | 0x04; /* CMD_MODE_EN */
MIPI_OUTP(MIPI_DSI_BASE + 0x0000, ctrl);
- } else { /* cmd mode */
- /*
- * during boot up, cmd mode is configured
- * even it is video mode panel.
- */
- /* make sure mdp dma is not txing pixel data */
- if (mfd->panel_info.type == MIPI_CMD_PANEL) {
-#ifndef CONFIG_FB_MSM_MDP303
- mdp4_dsi_cmd_dma_busy_wait(mfd);
-#else
- mdp3_dsi_cmd_dma_busy_wait(mfd);
-#endif
- }
}
spin_lock_irqsave(&dsi_mdp_lock, flag);
- mipi_dsi_enable_irq();
dsi_mdp_busy = TRUE;
spin_unlock_irqrestore(&dsi_mdp_lock, flag);
cm = cmds;
mipi_dsi_buf_init(tp);
for (i = 0; i < cnt; i++) {
+ mipi_dsi_enable_irq();
mipi_dsi_buf_init(tp);
mipi_dsi_cmd_dma_add(tp, cm);
mipi_dsi_cmd_dma_tx(tp);
@@ -1146,15 +1226,14 @@
cm++;
}
- spin_lock_irqsave(&dsi_mdp_lock, flag);
- dsi_mdp_busy = FALSE;
- mipi_dsi_disable_irq();
- complete(&dsi_mdp_comp);
- spin_unlock_irqrestore(&dsi_mdp_lock, flag);
-
if (video_mode)
MIPI_OUTP(MIPI_DSI_BASE + 0x0000, dsi_ctrl); /* restore */
+ spin_lock_irqsave(&dsi_mdp_lock, flag);
+ dsi_mdp_busy = FALSE;
+ complete(&dsi_mdp_comp);
+ spin_unlock_irqrestore(&dsi_mdp_lock, flag);
+
return cnt;
}
@@ -1183,8 +1262,8 @@
struct dsi_cmd_desc *cmds, int rlen)
{
int cnt, len, diff, pkt_size;
- unsigned long flag;
char cmd;
+ unsigned long flag;
if (mfd->panel_info.mipi.no_max_pkt_size) {
/* Only support rlen = 4*n */
@@ -1216,15 +1295,12 @@
if (mfd->panel_info.type == MIPI_CMD_PANEL) {
/* make sure mdp dma is not txing pixel data */
-#ifndef CONFIG_FB_MSM_MDP303
- mdp4_dsi_cmd_dma_busy_wait(mfd);
-#else
+#ifdef CONFIG_FB_MSM_MDP303
mdp3_dsi_cmd_dma_busy_wait(mfd);
#endif
}
spin_lock_irqsave(&dsi_mdp_lock, flag);
- mipi_dsi_enable_irq();
dsi_mdp_busy = TRUE;
spin_unlock_irqrestore(&dsi_mdp_lock, flag);
@@ -1232,16 +1308,20 @@
/* packet size need to be set at every read */
pkt_size = len;
max_pktsize[0] = pkt_size;
+ mipi_dsi_enable_irq();
mipi_dsi_buf_init(tp);
mipi_dsi_cmd_dma_add(tp, pkt_size_cmd);
mipi_dsi_cmd_dma_tx(tp);
}
+ mipi_dsi_enable_irq();
mipi_dsi_buf_init(tp);
mipi_dsi_cmd_dma_add(tp, cmds);
/* transmit read comamnd to client */
mipi_dsi_cmd_dma_tx(tp);
+
+ mipi_dsi_disable_irq();
/*
* once cmd_dma_done interrupt received,
* return data from client is ready and stored
@@ -1260,7 +1340,6 @@
spin_lock_irqsave(&dsi_mdp_lock, flag);
dsi_mdp_busy = FALSE;
- mipi_dsi_disable_irq();
complete(&dsi_mdp_comp);
spin_unlock_irqrestore(&dsi_mdp_lock, flag);
@@ -1303,7 +1382,6 @@
int mipi_dsi_cmd_dma_tx(struct dsi_buf *tp)
{
- int len;
#ifdef DSI_HOST_DEBUG
int i;
@@ -1318,25 +1396,24 @@
pr_debug("\n");
#endif
- len = tp->len;
- len += 3;
- len &= ~0x03; /* multipled by 4 */
+ tp->len += 3;
+ tp->len &= ~0x03; /* multipled by 4 */
- tp->dmap = dma_map_single(&dsi_dev, tp->data, len, DMA_TO_DEVICE);
+ tp->dmap = dma_map_single(&dsi_dev, tp->data, tp->len, DMA_TO_DEVICE);
if (dma_mapping_error(&dsi_dev, tp->dmap))
pr_err("%s: dmap mapp failed\n", __func__);
INIT_COMPLETION(dsi_dma_comp);
MIPI_OUTP(MIPI_DSI_BASE + 0x044, tp->dmap);
- MIPI_OUTP(MIPI_DSI_BASE + 0x048, len);
+ MIPI_OUTP(MIPI_DSI_BASE + 0x048, tp->len);
wmb();
MIPI_OUTP(MIPI_DSI_BASE + 0x08c, 0x01); /* trigger */
wmb();
wait_for_completion(&dsi_dma_comp);
- dma_unmap_single(&dsi_dev, tp->dmap, len, DMA_TO_DEVICE);
+ dma_unmap_single(&dsi_dev, tp->dmap, tp->len, DMA_TO_DEVICE);
tp->dmap = 0;
return tp->len;
}
@@ -1459,7 +1536,6 @@
#ifdef CONFIG_FB_MSM_MDP40
mdp4_stat.intr_dsi++;
#endif
-
if (isr & DSI_INTR_ERROR) {
mipi_dsi_mdp_stat_inc(STAT_DSI_ERROR);
mipi_dsi_error();
@@ -1474,16 +1550,20 @@
if (isr & DSI_INTR_CMD_DMA_DONE) {
mipi_dsi_mdp_stat_inc(STAT_DSI_CMD);
complete(&dsi_dma_comp);
+ spin_lock(&dsi_mdp_lock);
+ dsi_ctrl_lock = FALSE;
+ mipi_dsi_disable_irq_nosync();
+ spin_unlock(&dsi_mdp_lock);
}
if (isr & DSI_INTR_CMD_MDP_DONE) {
mipi_dsi_mdp_stat_inc(STAT_DSI_MDP);
spin_lock(&dsi_mdp_lock);
- dsi_mdp_busy = FALSE;
+ dsi_ctrl_lock = FALSE;
mipi_dsi_disable_irq_nosync();
- spin_unlock(&dsi_mdp_lock);
+ dsi_mdp_busy = FALSE;
complete(&dsi_mdp_comp);
- mipi_dsi_post_kickoff_action();
+ spin_unlock(&dsi_mdp_lock);
}
diff --git a/drivers/video/msm/mipi_novatek.c b/drivers/video/msm/mipi_novatek.c
index 0070757..7dd41d2 100644
--- a/drivers/video/msm/mipi_novatek.c
+++ b/drivers/video/msm/mipi_novatek.c
@@ -243,14 +243,9 @@
0x2B, 0x00, 0x00, 0x03, 0xBF}; /* 960 - 1 */
#endif
-static char led_pwm1[2] = {0x51, 0x0}; /* DTYPE_DCS_WRITE1 */
static char led_pwm2[2] = {0x53, 0x24}; /* DTYPE_DCS_WRITE1 */
static char led_pwm3[2] = {0x55, 0x00}; /* DTYPE_DCS_WRITE1 */
-static struct dsi_cmd_desc novatek_cmd_backlight_cmds[] = {
- {DTYPE_DCS_LWRITE, 1, 0, 0, 1, sizeof(led_pwm1), led_pwm1},
-};
-
static struct dsi_cmd_desc novatek_video_on_cmds[] = {
{DTYPE_DCS_WRITE, 1, 0, 0, 50,
sizeof(sw_reset), sw_reset},
@@ -318,7 +313,7 @@
cmd = &novatek_manufacture_id_cmd;
mipi_dsi_cmds_rx(mfd, tp, rp, cmd, 3);
lp = (uint32 *)rp->data;
- pr_info("%s: manufacture_id=%x", __func__, *lp);
+ pr_info("%s: manufacture_id=%x\n", __func__, *lp);
return *lp;
}
@@ -398,16 +393,18 @@
mipi = &mfd->panel_info.mipi;
- if (mipi->mode == DSI_VIDEO_MODE) {
- mipi_dsi_cmds_tx(mfd, &novatek_tx_buf, novatek_video_on_cmds,
- ARRAY_SIZE(novatek_video_on_cmds));
- } else {
- mipi_dsi_cmds_tx(mfd, &novatek_tx_buf, novatek_cmd_on_cmds,
- ARRAY_SIZE(novatek_cmd_on_cmds));
+ if (mipi_dsi_ctrl_lock(0)) {
+ if (mipi->mode == DSI_VIDEO_MODE) {
+ mipi_dsi_cmds_tx(&novatek_tx_buf, novatek_video_on_cmds,
+ ARRAY_SIZE(novatek_video_on_cmds));
+ } else {
+ mipi_dsi_cmds_tx(&novatek_tx_buf, novatek_cmd_on_cmds,
+ ARRAY_SIZE(novatek_cmd_on_cmds));
- mipi_dsi_cmd_bta_sw_trigger(); /* clean up ack_err_status */
-
- mipi_novatek_manufacture_id(mfd);
+ /* clean up ack_err_status */
+ mipi_dsi_cmd_bta_sw_trigger();
+ mipi_novatek_manufacture_id(mfd);
+ }
}
return 0;
@@ -424,14 +421,23 @@
if (mfd->key != MFD_KEY)
return -EINVAL;
- mipi_dsi_cmds_tx(mfd, &novatek_tx_buf, novatek_display_off_cmds,
+ if (mipi_dsi_ctrl_lock(0)) {
+ mipi_dsi_cmds_tx(&novatek_tx_buf, novatek_display_off_cmds,
ARRAY_SIZE(novatek_display_off_cmds));
+ }
return 0;
}
DEFINE_LED_TRIGGER(bkl_led_trigger);
+#ifdef CONFIG_FB_MSM_MDP303
+void mdp4_backlight_put_level(int cndx, int level)
+{
+ /* do nothing */
+}
+#endif
+
static void mipi_novatek_set_backlight(struct msm_fb_data_type *mfd)
{
struct mipi_panel_info *mipi;
@@ -443,21 +449,7 @@
}
mipi = &mfd->panel_info.mipi;
- mutex_lock(&mfd->dma->ov_mutex);
- if (mdp4_overlay_dsi_state_get() <= ST_DSI_SUSPEND) {
- mutex_unlock(&mfd->dma->ov_mutex);
- return;
- }
- /* mdp4_dsi_cmd_busy_wait: will turn on dsi clock also */
- mdp4_dsi_cmd_dma_busy_wait(mfd);
- mdp4_dsi_blt_dmap_busy_wait(mfd);
- mipi_dsi_mdp_busy_wait(mfd);
-
- led_pwm1[1] = (unsigned char)(mfd->bl_level);
- mipi_dsi_cmds_tx(mfd, &novatek_tx_buf, novatek_cmd_backlight_cmds,
- ARRAY_SIZE(novatek_cmd_backlight_cmds));
- mutex_unlock(&mfd->dma->ov_mutex);
- return;
+ mdp4_backlight_put_level(0, mfd->bl_level);
}
static int mipi_dsi_3d_barrier_sysfs_register(struct device *dev);
diff --git a/drivers/video/msm/mipi_orise.c b/drivers/video/msm/mipi_orise.c
index 2afbb9b..d1d6956 100644
--- a/drivers/video/msm/mipi_orise.c
+++ b/drivers/video/msm/mipi_orise.c
@@ -64,10 +64,10 @@
mipi = &mfd->panel_info.mipi;
if (mipi->mode == DSI_VIDEO_MODE) {
- mipi_dsi_cmds_tx(mfd, &orise_tx_buf, orise_video_on_cmds,
+ mipi_dsi_cmds_tx(&orise_tx_buf, orise_video_on_cmds,
ARRAY_SIZE(orise_video_on_cmds));
} else {
- mipi_dsi_cmds_tx(mfd, &orise_tx_buf, orise_cmd_on_cmds,
+ mipi_dsi_cmds_tx(&orise_tx_buf, orise_cmd_on_cmds,
ARRAY_SIZE(orise_cmd_on_cmds));
mipi_dsi_cmd_bta_sw_trigger(); /* clean up ack_err_status */
@@ -87,7 +87,7 @@
if (mfd->key != MFD_KEY)
return -EINVAL;
- mipi_dsi_cmds_tx(mfd, &orise_tx_buf, orise_display_off_cmds,
+ mipi_dsi_cmds_tx(&orise_tx_buf, orise_display_off_cmds,
ARRAY_SIZE(orise_display_off_cmds));
return 0;
diff --git a/drivers/video/msm/mipi_renesas.c b/drivers/video/msm/mipi_renesas.c
index c9dc8255..c842672 100644
--- a/drivers/video/msm/mipi_renesas.c
+++ b/drivers/video/msm/mipi_renesas.c
@@ -1131,23 +1131,23 @@
if (mfd->key != MFD_KEY)
return -EINVAL;
- mipi_dsi_cmds_tx(mfd, &renesas_tx_buf, renesas_sleep_off_cmds,
+ mipi_dsi_cmds_tx(&renesas_tx_buf, renesas_sleep_off_cmds,
ARRAY_SIZE(renesas_sleep_off_cmds));
mipi_set_tx_power_mode(1);
- mipi_dsi_cmds_tx(mfd, &renesas_tx_buf, renesas_display_on_cmds,
+ mipi_dsi_cmds_tx(&renesas_tx_buf, renesas_display_on_cmds,
ARRAY_SIZE(renesas_display_on_cmds));
if (cpu_is_msm7x25a() || cpu_is_msm7x25aa() || cpu_is_msm7x25ab()) {
- mipi_dsi_cmds_tx(mfd, &renesas_tx_buf, renesas_hvga_on_cmds,
+ mipi_dsi_cmds_tx(&renesas_tx_buf, renesas_hvga_on_cmds,
ARRAY_SIZE(renesas_hvga_on_cmds));
}
if (mipi->mode == DSI_VIDEO_MODE)
- mipi_dsi_cmds_tx(mfd, &renesas_tx_buf, renesas_video_on_cmds,
+ mipi_dsi_cmds_tx(&renesas_tx_buf, renesas_video_on_cmds,
ARRAY_SIZE(renesas_video_on_cmds));
else
- mipi_dsi_cmds_tx(mfd, &renesas_tx_buf, renesas_cmd_on_cmds,
+ mipi_dsi_cmds_tx(&renesas_tx_buf, renesas_cmd_on_cmds,
ARRAY_SIZE(renesas_cmd_on_cmds));
mipi_set_tx_power_mode(0);
@@ -1165,7 +1165,7 @@
if (mfd->key != MFD_KEY)
return -EINVAL;
- mipi_dsi_cmds_tx(mfd, &renesas_tx_buf, renesas_display_off_cmds,
+ mipi_dsi_cmds_tx(&renesas_tx_buf, renesas_display_off_cmds,
ARRAY_SIZE(renesas_display_off_cmds));
return 0;
diff --git a/drivers/video/msm/mipi_simulator.c b/drivers/video/msm/mipi_simulator.c
index c6bf534..c751472 100644
--- a/drivers/video/msm/mipi_simulator.c
+++ b/drivers/video/msm/mipi_simulator.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -49,7 +49,7 @@
mipi->mode);
if (mipi->mode == DSI_VIDEO_MODE) {
- mipi_dsi_cmds_tx(mfd, &simulator_tx_buf, display_on_cmds,
+ mipi_dsi_cmds_tx(&simulator_tx_buf, display_on_cmds,
ARRAY_SIZE(display_on_cmds));
} else {
pr_err("%s:%d, CMD MODE NOT SUPPORTED", __func__, __LINE__);
@@ -75,7 +75,7 @@
pr_debug("%s:%d, debug info", __func__, __LINE__);
if (mipi->mode == DSI_VIDEO_MODE) {
- mipi_dsi_cmds_tx(mfd, &simulator_tx_buf, display_off_cmds,
+ mipi_dsi_cmds_tx(&simulator_tx_buf, display_off_cmds,
ARRAY_SIZE(display_off_cmds));
} else {
pr_debug("%s:%d, DONT REACH HERE", __func__, __LINE__);
diff --git a/drivers/video/msm/mipi_tc358764_dsi2lvds.c b/drivers/video/msm/mipi_tc358764_dsi2lvds.c
index 2e65c34..1583168 100644
--- a/drivers/video/msm/mipi_tc358764_dsi2lvds.c
+++ b/drivers/video/msm/mipi_tc358764_dsi2lvds.c
@@ -281,8 +281,8 @@
payload.addr = reg;
payload.data = data;
- /* mutex had been acquired at mipi_dsi_on */
- mipi_dsi_cmds_tx(mfd, &d2l_tx_buf, &cmd_write_reg, 1);
+ /* mutex had been acquried at dsi_on */
+ mipi_dsi_cmds_tx(&d2l_tx_buf, &cmd_write_reg, 1);
pr_debug("%s: reg=0x%x. data=0x%x.\n", __func__, reg, data);
diff --git a/drivers/video/msm/mipi_toshiba.c b/drivers/video/msm/mipi_toshiba.c
index aeaa5aa..520c67b 100644
--- a/drivers/video/msm/mipi_toshiba.c
+++ b/drivers/video/msm/mipi_toshiba.c
@@ -193,12 +193,12 @@
return -EINVAL;
if (TM_GET_PID(mfd->panel.id) == MIPI_DSI_PANEL_WVGA_PT)
- mipi_dsi_cmds_tx(mfd, &toshiba_tx_buf,
+ mipi_dsi_cmds_tx(&toshiba_tx_buf,
toshiba_wvga_display_on_cmds,
ARRAY_SIZE(toshiba_wvga_display_on_cmds));
else if (TM_GET_PID(mfd->panel.id) == MIPI_DSI_PANEL_WSVGA_PT ||
TM_GET_PID(mfd->panel.id) == MIPI_DSI_PANEL_WUXGA)
- mipi_dsi_cmds_tx(mfd, &toshiba_tx_buf,
+ mipi_dsi_cmds_tx(&toshiba_tx_buf,
toshiba_wsvga_display_on_cmds,
ARRAY_SIZE(toshiba_wsvga_display_on_cmds));
else
@@ -218,7 +218,7 @@
if (mfd->key != MFD_KEY)
return -EINVAL;
- mipi_dsi_cmds_tx(mfd, &toshiba_tx_buf, toshiba_display_off_cmds,
+ mipi_dsi_cmds_tx(&toshiba_tx_buf, toshiba_display_off_cmds,
ARRAY_SIZE(toshiba_display_off_cmds));
return 0;
diff --git a/drivers/video/msm/mipi_truly.c b/drivers/video/msm/mipi_truly.c
index a2060f0..fd2a3ea 100644
--- a/drivers/video/msm/mipi_truly.c
+++ b/drivers/video/msm/mipi_truly.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -116,7 +116,7 @@
return -EINVAL;
msleep(20);
- mipi_dsi_cmds_tx(mfd, &truly_tx_buf, truly_display_on_cmds,
+ mipi_dsi_cmds_tx(&truly_tx_buf, truly_display_on_cmds,
ARRAY_SIZE(truly_display_on_cmds));
return 0;
@@ -133,7 +133,7 @@
if (mfd->key != MFD_KEY)
return -EINVAL;
- mipi_dsi_cmds_tx(mfd, &truly_tx_buf, truly_display_off_cmds,
+ mipi_dsi_cmds_tx(&truly_tx_buf, truly_display_off_cmds,
ARRAY_SIZE(truly_display_off_cmds));
return 0;
diff --git a/drivers/video/msm/mipi_truly_tft540960_1_e.c b/drivers/video/msm/mipi_truly_tft540960_1_e.c
index 98b24b1..50db66e 100644
--- a/drivers/video/msm/mipi_truly_tft540960_1_e.c
+++ b/drivers/video/msm/mipi_truly_tft540960_1_e.c
@@ -693,11 +693,11 @@
msleep(120);
if (mipi->mode == DSI_VIDEO_MODE) {
- mipi_dsi_cmds_tx(mfd, &truly_tx_buf,
+ mipi_dsi_cmds_tx(&truly_tx_buf,
truly_video_display_on_cmds,
ARRAY_SIZE(truly_video_display_on_cmds));
} else if (mipi->mode == DSI_CMD_MODE) {
- mipi_dsi_cmds_tx(mfd, &truly_tx_buf,
+ mipi_dsi_cmds_tx(&truly_tx_buf,
truly_cmd_display_on_cmds,
ARRAY_SIZE(truly_cmd_display_on_cmds));
}
@@ -716,7 +716,7 @@
if (mfd->key != MFD_KEY)
return -EINVAL;
- mipi_dsi_cmds_tx(mfd, &truly_tx_buf, truly_display_off_cmds,
+ mipi_dsi_cmds_tx(&truly_tx_buf, truly_display_off_cmds,
ARRAY_SIZE(truly_display_off_cmds));
return 0;
diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c
index 176f56b..7bf516d 100644
--- a/drivers/video/msm/msm_fb.c
+++ b/drivers/video/msm/msm_fb.c
@@ -1684,10 +1684,9 @@
struct msm_fb_panel_data *pdata;
/*
- * If framebuffer is 1 or 2, io pen display is not allowed.
+ * If framebuffer is 2, io pen display is not allowed.
*/
- if (bf_supported &&
- (info->node == 1 || info->node == 2)) {
+ if (bf_supported && info->node == 2) {
pr_err("%s: no pan display for fb%d!",
__func__, info->node);
return -EPERM;
@@ -2847,7 +2846,7 @@
static int msmfb_overlay_unset(struct fb_info *info, unsigned long *argp)
{
- int ret, ndx;
+ int ret, ndx;
ret = copy_from_user(&ndx, argp, sizeof(ndx));
if (ret) {
@@ -2859,6 +2858,41 @@
return mdp4_overlay_unset(info, ndx);
}
+static int msmfb_overlay_wait4vsync(struct fb_info *info, void __user *argp)
+{
+ int ret;
+ long long vtime;
+
+ ret = mdp4_overlay_wait4vsync(info, &vtime);
+ if (ret) {
+ pr_err("%s: ioctl failed\n", __func__);
+ return ret;
+ }
+
+ if (copy_to_user(argp, &vtime, sizeof(vtime))) {
+ pr_err("%s: copy2user failed\n", __func__);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int msmfb_overlay_vsync_ctrl(struct fb_info *info, void __user *argp)
+{
+ int ret;
+ int enable;
+
+ ret = copy_from_user(&enable, argp, sizeof(enable));
+ if (ret) {
+ pr_err("%s:msmfb_overlay_vsync ioctl failed", __func__);
+ return ret;
+ }
+
+ ret = mdp4_overlay_vsync_ctrl(info, enable);
+
+ return ret;
+}
+
static int msmfb_overlay_play_wait(struct fb_info *info, unsigned long *argp)
{
int ret;
@@ -2965,27 +2999,6 @@
return ret;
}
-static int msmfb_overlay_blt_off(struct fb_info *info, unsigned long *argp)
-{
- int ret;
- struct msmfb_overlay_blt req;
-
- ret = copy_from_user(&req, argp, sizeof(req));
- if (ret) {
- pr_err("%s: failed\n", __func__);
- return ret;
- }
-
- ret = mdp4_overlay_blt_offset(info, &req);
-
- ret = copy_to_user(argp, &req, sizeof(req));
- if (ret)
- printk(KERN_ERR "%s:msmfb_overlay_blt_off ioctl failed\n",
- __func__);
-
- return ret;
-}
-
#ifdef CONFIG_FB_MSM_WRITEBACK_MSM_PANEL
static int msmfb_overlay_ioctl_writeback_init(struct fb_info *info)
{
@@ -3295,6 +3308,16 @@
switch (cmd) {
#ifdef CONFIG_FB_MSM_OVERLAY
+ case FBIO_WAITFORVSYNC:
+ down(&msm_fb_ioctl_ppp_sem);
+ ret = msmfb_overlay_wait4vsync(info, argp);
+ up(&msm_fb_ioctl_ppp_sem);
+ break;
+ case MSMFB_OVERLAY_VSYNC_CTRL:
+ down(&msm_fb_ioctl_ppp_sem);
+ ret = msmfb_overlay_vsync_ctrl(info, argp);
+ up(&msm_fb_ioctl_ppp_sem);
+ break;
case MSMFB_OVERLAY_GET:
down(&msm_fb_ioctl_ppp_sem);
ret = msmfb_overlay_get(info, argp);
@@ -3330,11 +3353,6 @@
ret = msmfb_overlay_blt(info, argp);
up(&msm_fb_ioctl_ppp_sem);
break;
- case MSMFB_OVERLAY_BLT_OFFSET:
- down(&msm_fb_ioctl_ppp_sem);
- ret = msmfb_overlay_blt_off(info, argp);
- up(&msm_fb_ioctl_ppp_sem);
- break;
case MSMFB_OVERLAY_3D:
down(&msm_fb_ioctl_ppp_sem);
ret = msmfb_overlay_3d_sbys(info, argp);
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index 64c3b31..aa24919 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -124,7 +124,7 @@
seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest_nice));
seq_putc(p, '\n');
- for_each_online_cpu(i) {
+ for_each_present_cpu(i) {
/* Copy values here to work around gcc-2.95.3, gcc-2.96 */
user = kcpustat_cpu(i).cpustat[CPUTIME_USER];
nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index b2b79b1..277fdf1 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -259,6 +259,7 @@
header-y += msdos_fs.h
header-y += msg.h
header-y += msm_adc.h
+header-y += msm_ion.h
header-y += epm_adc.h
header-y += mtio.h
header-y += n_r3964.h
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index f03a493..b6064a4 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -47,53 +47,107 @@
enum coresight_dev_type {
CORESIGHT_DEV_TYPE_SINK,
CORESIGHT_DEV_TYPE_LINK,
+ CORESIGHT_DEV_TYPE_LINKSINK,
CORESIGHT_DEV_TYPE_SOURCE,
- CORESIGHT_DEV_TYPE_MAX,
+};
+
+enum coresight_dev_subtype_sink {
+ CORESIGHT_DEV_SUBTYPE_SINK_NONE,
+ CORESIGHT_DEV_SUBTYPE_SINK_PORT,
+ CORESIGHT_DEV_SUBTYPE_SINK_BUFFER,
+};
+
+enum coresight_dev_subtype_link {
+ CORESIGHT_DEV_SUBTYPE_LINK_NONE,
+ CORESIGHT_DEV_SUBTYPE_LINK_MERG,
+ CORESIGHT_DEV_SUBTYPE_LINK_SPLIT,
+ CORESIGHT_DEV_SUBTYPE_LINK_FIFO,
+};
+
+enum coresight_dev_subtype_source {
+ CORESIGHT_DEV_SUBTYPE_SOURCE_NONE,
+ CORESIGHT_DEV_SUBTYPE_SOURCE_PROC,
+ CORESIGHT_DEV_SUBTYPE_SOURCE_BUS,
+ CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE,
+};
+
+struct coresight_dev_subtype {
+ enum coresight_dev_subtype_sink sink_subtype;
+ enum coresight_dev_subtype_link link_subtype;
+ enum coresight_dev_subtype_source source_subtype;
+};
+
+struct coresight_platform_data {
+ int id;
+ const char *name;
+ int nr_inports;
+ const int *outports;
+ const int *child_ids;
+ const int *child_ports;
+ int nr_outports;
+ bool default_sink;
+};
+
+struct coresight_desc {
+ enum coresight_dev_type type;
+ struct coresight_dev_subtype subtype;
+ const struct coresight_ops *ops;
+ struct coresight_platform_data *pdata;
+ struct device *dev;
+ const struct attribute_group **groups;
+ struct module *owner;
};
struct coresight_connection {
+ int outport;
int child_id;
int child_port;
struct coresight_device *child_dev;
struct list_head link;
};
+struct coresight_refcnt {
+ int sink_refcnt;
+ int *link_refcnts;
+ int source_refcnt;
+};
+
struct coresight_device {
int id;
struct coresight_connection *conns;
int nr_conns;
+ enum coresight_dev_type type;
+ struct coresight_dev_subtype subtype;
const struct coresight_ops *ops;
struct device dev;
- struct mutex mutex;
- int *refcnt;
- struct list_head link;
+ struct coresight_refcnt refcnt;
+ struct list_head dev_link;
+ struct list_head path_link;
struct module *owner;
bool enable;
};
#define to_coresight_device(d) container_of(d, struct coresight_device, dev)
+struct coresight_ops_sink {
+ int (*enable)(struct coresight_device *csdev);
+ void (*disable)(struct coresight_device *csdev);
+};
+
+struct coresight_ops_link {
+ int (*enable)(struct coresight_device *csdev, int iport, int oport);
+ void (*disable)(struct coresight_device *csdev, int iport, int oport);
+};
+
+struct coresight_ops_source {
+ int (*enable)(struct coresight_device *csdev);
+ void (*disable)(struct coresight_device *csdev);
+};
+
struct coresight_ops {
- int (*enable)(struct coresight_device *csdev, int port);
- void (*disable)(struct coresight_device *csdev, int port);
-};
-
-struct coresight_platform_data {
- int id;
- const char *name;
- int nr_ports;
- int *child_ids;
- int *child_ports;
- int nr_children;
-};
-
-struct coresight_desc {
- enum coresight_dev_type type;
- const struct coresight_ops *ops;
- struct coresight_platform_data *pdata;
- struct device *dev;
- const struct attribute_group **groups;
- struct module *owner;
+ const struct coresight_ops_sink *sink_ops;
+ const struct coresight_ops_link *link_ops;
+ const struct coresight_ops_source *source_ops;
};
struct qdss_source {
@@ -109,24 +163,29 @@
};
-extern struct coresight_device *
-coresight_register(struct coresight_desc *desc);
-extern void coresight_unregister(struct coresight_device *csdev);
-extern int coresight_enable(struct coresight_device *csdev, int port);
-extern void coresight_disable(struct coresight_device *csdev, int port);
-
#ifdef CONFIG_MSM_QDSS
extern struct qdss_source *qdss_get(const char *name);
extern void qdss_put(struct qdss_source *src);
extern int qdss_enable(struct qdss_source *src);
extern void qdss_disable(struct qdss_source *src);
extern void qdss_disable_sink(void);
+extern struct coresight_device *
+coresight_register(struct coresight_desc *desc);
+extern void coresight_unregister(struct coresight_device *csdev);
+extern int coresight_enable(struct coresight_device *csdev);
+extern void coresight_disable(struct coresight_device *csdev);
#else
static inline struct qdss_source *qdss_get(const char *name) { return NULL; }
static inline void qdss_put(struct qdss_source *src) {}
static inline int qdss_enable(struct qdss_source *src) { return -ENOSYS; }
static inline void qdss_disable(struct qdss_source *src) {}
static inline void qdss_disable_sink(void) {}
+static inline struct coresight_device *
+coresight_register(struct coresight_desc *desc) { return NULL; }
+static inline void coresight_unregister(struct coresight_device *csdev) {}
+static inline int
+coresight_enable(struct coresight_device *csdev) { return -ENOSYS; }
+static inline void coresight_disable(struct coresight_device *csdev) {}
#endif
#endif
diff --git a/include/linux/msm_ion.h b/include/linux/msm_ion.h
new file mode 100644
index 0000000..0e28e54
--- /dev/null
+++ b/include/linux/msm_ion.h
@@ -0,0 +1,22 @@
+/*
+ * include/linux/ion.h
+ *
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_MSM_ION_H
+#define _LINUX_MSM_ION_H
+
+#include <linux/ion.h>
+
+#endif
diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h
index d8edbc8..4c42623 100644
--- a/include/linux/msm_mdp.h
+++ b/include/linux/msm_mdp.h
@@ -32,8 +32,11 @@
#define MSMFB_OVERLAY_SET _IOWR(MSMFB_IOCTL_MAGIC, 135, \
struct mdp_overlay)
#define MSMFB_OVERLAY_UNSET _IOW(MSMFB_IOCTL_MAGIC, 136, unsigned int)
+
#define MSMFB_OVERLAY_PLAY _IOW(MSMFB_IOCTL_MAGIC, 137, \
struct msmfb_overlay_data)
+#define MSMFB_OVERLAY_QUEUE MSMFB_OVERLAY_PLAY
+
#define MSMFB_GET_PAGE_PROTECTION _IOR(MSMFB_IOCTL_MAGIC, 138, \
struct mdp_page_protection)
#define MSMFB_SET_PAGE_PROTECTION _IOW(MSMFB_IOCTL_MAGIC, 139, \
@@ -66,6 +69,9 @@
#define MSMFB_WRITEBACK_TERMINATE _IO(MSMFB_IOCTL_MAGIC, 155)
#define MSMFB_MDP_PP _IOWR(MSMFB_IOCTL_MAGIC, 156, struct msmfb_mdp_pp)
+#define MSMFB_OVERLAY_VSYNC_CTRL _IOW(MSMFB_IOCTL_MAGIC, 160, unsigned int)
+
+
#define FB_TYPE_3D_PANEL 0x10101010
#define MDP_IMGTYPE2_START 0x10000
#define MSMFB_DRIVER_VERSION 0xF9E8D701
@@ -148,7 +154,7 @@
#define MDP_DEINTERLACE_ODD 0x00400000
#define MDP_OV_PLAY_NOWAIT 0x00200000
#define MDP_SOURCE_ROTATED_90 0x00100000
-#define MDP_DPP_HSIC 0x00080000
+#define MDP_OVERLAY_PP_CFG_EN 0x00080000
#define MDP_BACKEND_COMPOSITION 0x00040000
#define MDP_BORDERFILL_SUPPORTED 0x00010000
#define MDP_SECURE_OVERLAY_SESSION 0x00008000
@@ -265,15 +271,47 @@
struct msmfb_img img;
};
-struct dpp_ctrl {
- /*
- *'sharp_strength' has inputs = -128 <-> 127
- * Increasingly positive values correlate with increasingly sharper
- * picture. Increasingly negative values correlate with increasingly
- * smoothed picture.
- */
- int8_t sharp_strength;
- int8_t hsic_params[NUM_HSIC_PARAM];
+#define MDP_PP_OPS_READ 0x2
+#define MDP_PP_OPS_WRITE 0x4
+
+struct mdp_qseed_cfg {
+ uint32_t table_num;
+ uint32_t ops;
+ uint32_t len;
+ uint32_t *data;
+};
+
+struct mdp_qseed_cfg_data {
+ uint32_t block;
+ struct mdp_qseed_cfg qseed_data;
+};
+
+#define MDP_OVERLAY_PP_CSC_CFG 0x1
+#define MDP_OVERLAY_PP_QSEED_CFG 0x2
+
+#define MDP_CSC_FLAG_ENABLE 0x1
+#define MDP_CSC_FLAG_YUV_IN 0x2
+#define MDP_CSC_FLAG_YUV_OUT 0x4
+
+struct mdp_csc_cfg {
+ /* flags for enable CSC, toggling RGB,YUV input/output */
+ uint32_t flags;
+ uint32_t csc_mv[9];
+ uint32_t csc_pre_bv[3];
+ uint32_t csc_post_bv[3];
+ uint32_t csc_pre_lv[6];
+ uint32_t csc_post_lv[6];
+};
+
+struct mdp_csc_cfg_data {
+ uint32_t block;
+ struct mdp_csc_cfg csc_data;
+};
+
+struct mdp_overlay_pp_params {
+ uint32_t config_ops;
+ struct mdp_csc_cfg csc_cfg;
+ struct mdp_qseed_cfg qseed_cfg[2];
};
struct mdp_overlay {
@@ -287,7 +325,7 @@
uint32_t flags;
uint32_t id;
uint32_t user_data[8];
- struct dpp_ctrl dpp;
+ struct mdp_overlay_pp_params overlay_pp_cfg;
};
struct msmfb_overlay_3d {
@@ -375,25 +413,6 @@
struct mdp_pcc_coeff r, g, b;
};
-#define MDP_CSC_FLAG_ENABLE 0x1
-#define MDP_CSC_FLAG_YUV_IN 0x2
-#define MDP_CSC_FLAG_YUV_OUT 0x4
-
-struct mdp_csc_cfg {
- /* flags for enable CSC, toggling RGB,YUV input/output */
- uint32_t flags;
- uint32_t csc_mv[9];
- uint32_t csc_pre_bv[3];
- uint32_t csc_post_bv[3];
- uint32_t csc_pre_lv[6];
- uint32_t csc_post_lv[6];
-};
-
-struct mdp_csc_cfg_data {
- uint32_t block;
- struct mdp_csc_cfg csc_data;
-};
-
enum {
mdp_lut_igc,
mdp_lut_pgc,
@@ -401,7 +420,6 @@
mdp_lut_max,
};
-
struct mdp_igc_lut_data {
uint32_t block;
uint32_t len, ops;
@@ -443,14 +461,6 @@
} data;
};
-struct mdp_qseed_cfg_data {
- uint32_t block;
- uint32_t table_num;
- uint32_t ops;
- uint32_t len;
- uint32_t *data;
-};
-
struct mdp_bl_scale_data {
uint32_t min_lvl;
uint32_t scale;
diff --git a/include/linux/msm_thermal.h b/include/linux/msm_thermal.h
index fe9be89..47a8753 100644
--- a/include/linux/msm_thermal.h
+++ b/include/linux/msm_thermal.h
@@ -17,9 +17,9 @@
struct msm_thermal_data {
uint32_t sensor_id;
uint32_t poll_ms;
- uint32_t limit_temp;
- uint32_t temp_hysteresis;
- uint32_t limit_freq;
+ uint32_t limit_temp_degC;
+ uint32_t temp_hysteresis_degC;
+ uint32_t freq_step;
};
#ifdef CONFIG_THERMAL_MONITOR
diff --git a/include/linux/qpnp/qpnp-adc.h b/include/linux/qpnp/qpnp-adc.h
new file mode 100644
index 0000000..33559dd
--- /dev/null
+++ b/include/linux/qpnp/qpnp-adc.h
@@ -0,0 +1,689 @@
+/*
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+/*
+ * Qualcomm PMIC QPNP ADC driver header file
+ *
+ */
+
+#ifndef __QPNP_ADC_H
+#define __QPNP_ADC_H
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+/**
+ * enum qpnp_vadc_channels - QPNP AMUX arbiter channels
+ */
+enum qpnp_vadc_channels {
+ USBIN = 0,
+ DCIN,
+ VCHG_SNS,
+ SPARE1_03,
+ SPARE2_03,
+ VCOIN,
+ VBAT_SNS,
+ VSYS,
+ DIE_TEMP,
+ REF_625MV,
+ REF_125V,
+ CHG_TEMP,
+ SPARE1,
+ SPARE2,
+ GND_REF,
+ VDD_VADC,
+ P_MUX1_1_1,
+ P_MUX2_1_1,
+ P_MUX3_1_1,
+ P_MUX4_1_1,
+ P_MUX5_1_1,
+ P_MUX6_1_1,
+ P_MUX7_1_1,
+ P_MUX8_1_1,
+ P_MUX9_1_1,
+ P_MUX10_1_1,
+ P_MUX11_1_1,
+ P_MUX12_1_1,
+ P_MUX13_1_1,
+ P_MUX14_1_1,
+ P_MUX15_1_1,
+ P_MUX16_1_1,
+ P_MUX1_1_3,
+ P_MUX2_1_3,
+ P_MUX3_1_3,
+ P_MUX4_1_3,
+ P_MUX5_1_3,
+ P_MUX6_1_3,
+ P_MUX7_1_3,
+ P_MUX8_1_3,
+ P_MUX9_1_3,
+ P_MUX10_1_3,
+ P_MUX11_1_3,
+ P_MUX12_1_3,
+ P_MUX13_1_3,
+ P_MUX14_1_3,
+ P_MUX15_1_3,
+ P_MUX16_1_3,
+ LR_MUX1_BATT_THERM,
+ LR_MUX2_BAT_ID,
+ LR_MUX3_XO_THERM,
+ LR_MUX4_AMUX_THM1,
+ LR_MUX5_AMUX_THM2,
+ LR_MUX6_AMUX_THM3,
+ LR_MUX7_HW_ID,
+ LR_MUX8_AMUX_THM4,
+ LR_MUX9_AMUX_THM5,
+ LR_MUX10_USB_ID,
+ AMUX_PU1,
+ AMUX_PU2,
+ LR_MUX3_BUF_XO_THERM_BUF,
+ LR_MUX1_PU1_BAT_THERM,
+ LR_MUX2_PU1_BAT_ID,
+ LR_MUX3_PU1_XO_THERM,
+ LR_MUX4_PU1_AMUX_THM1,
+ LR_MUX5_PU1_AMUX_THM2,
+ LR_MUX6_PU1_AMUX_THM3,
+ LR_MUX7_PU1_AMUX_HW_ID,
+ LR_MUX8_PU1_AMUX_THM4,
+ LR_MUX9_PU1_AMUX_THM5,
+ LR_MUX10_PU1_AMUX_USB_ID,
+ LR_MUX3_BUF_PU1_XO_THERM_BUF,
+ LR_MUX1_PU2_BAT_THERM,
+ LR_MUX2_PU2_BAT_ID,
+ LR_MUX3_PU2_XO_THERM,
+ LR_MUX4_PU2_AMUX_THM1,
+ LR_MUX5_PU2_AMUX_THM2,
+ LR_MUX6_PU2_AMUX_THM3,
+ LR_MUX7_PU2_AMUX_HW_ID,
+ LR_MUX8_PU2_AMUX_THM4,
+ LR_MUX9_PU2_AMUX_THM5,
+ LR_MUX10_PU2_AMUX_USB_ID,
+ LR_MUX3_BUF_PU2_XO_THERM_BUF,
+ LR_MUX1_PU1_PU2_BAT_THERM,
+ LR_MUX2_PU1_PU2_BAT_ID,
+ LR_MUX3_PU1_PU2_XO_THERM,
+ LR_MUX4_PU1_PU2_AMUX_THM1,
+ LR_MUX5_PU1_PU2_AMUX_THM2,
+ LR_MUX6_PU1_PU2_AMUX_THM3,
+ LR_MUX7_PU1_PU2_AMUX_HW_ID,
+ LR_MUX8_PU1_PU2_AMUX_THM4,
+ LR_MUX9_PU1_PU2_AMUX_THM5,
+ LR_MUX10_PU1_PU2_AMUX_USB_ID,
+ LR_MUX3_BUF_PU1_PU2_XO_THERM_BUF,
+ ALL_OFF,
+ ADC_MAX_NUM,
+};
+
+#define QPNP_ADC_625_UV 625000
+
+/**
+ * enum qpnp_adc_decimation_type - Sampling rate supported.
+ * %DECIMATION_TYPE1: 512
+ * %DECIMATION_TYPE2: 1K
+ * %DECIMATION_TYPE3: 2K
+ * %DECIMATION_TYPE4: 4k
+ * %DECIMATION_NONE: Do not use this Sampling type.
+ *
+ * The Sampling rate is specific to each channel of the QPNP ADC arbiter.
+ */
+enum qpnp_adc_decimation_type {
+ DECIMATION_TYPE1 = 0,
+ DECIMATION_TYPE2,
+ DECIMATION_TYPE3,
+ DECIMATION_TYPE4,
+ DECIMATION_NONE,
+};
+
+/**
+ * enum qpnp_adc_calib_type - QPNP ADC Calibration type.
+ * %ADC_CALIB_ABSOLUTE: Use 625mV and 1.25V reference channels.
+ * %ADC_CALIB_RATIOMETRIC: Use reference Voltage/GND.
+ * %ADC_CALIB_CONFIG_NONE: Do not use this calibration type.
+ *
+ * Use the input reference voltage depending on the calibration type
+ * to calcluate the offset and gain parameters. The calibration is
+ * specific to each channel of the QPNP ADC.
+ */
+enum qpnp_adc_calib_type {
+ CALIB_ABSOLUTE = 0,
+ CALIB_RATIOMETRIC,
+ CALIB_NONE,
+};
+
+/**
+ * enum qpnp_adc_channel_scaling_param - pre-scaling AMUX ratio.
+ * %CHAN_PATH_SCALING1: ratio of {1, 1}
+ * %CHAN_PATH_SCALING2: ratio of {1, 3}
+ * %CHAN_PATH_SCALING3: ratio of {1, 4}
+ * %CHAN_PATH_SCALING4: ratio of {1, 6}
+ * %CHAN_PATH_NONE: Do not use this pre-scaling ratio type.
+ *
+ * The pre-scaling is applied for signals to be within the voltage range
+ * of the ADC.
+ */
+enum qpnp_adc_channel_scaling_param {
+ PATH_SCALING1 = 0,
+ PATH_SCALING2,
+ PATH_SCALING3,
+ PATH_SCALING4,
+ PATH_SCALING_NONE,
+};
+
+/**
+ * enum qpnp_adc_scale_fn_type - Scaling function for pm8921 pre calibrated
+ * digital data relative to ADC reference.
+ * %ADC_SCALE_DEFAULT: Default scaling to convert raw adc code to voltage.
+ * %ADC_SCALE_BATT_THERM: Conversion to temperature based on btm parameters.
+ * %ADC_SCALE_PMIC_THERM: Returns result in milli degree's Centigrade.
+ * %ADC_SCALE_XTERN_CHGR_CUR: Returns current across 0.1 ohm resistor.
+ * %ADC_SCALE_XOTHERM: Returns XO thermistor voltage in degree's Centigrade.
+ * %ADC_SCALE_NONE: Do not use this scaling type.
+ */
+enum qpnp_adc_scale_fn_type {
+ SCALE_DEFAULT = 0,
+ SCALE_BATT_THERM,
+ SCALE_PA_THERM,
+ SCALE_PMIC_THERM,
+ SCALE_XOTHERM,
+ SCALE_NONE,
+};
+
+/**
+ * enum qpnp_adc_fast_avg_ctl - Provides ability to obtain single result
+ * from the ADC that is an average of multiple measurement
+ * samples. Select number of samples for use in fast
+ * average mode (i.e. 2 ^ value).
+ * %ADC_FAST_AVG_SAMPLE_1: 0x0 = 1
+ * %ADC_FAST_AVG_SAMPLE_2: 0x1 = 2
+ * %ADC_FAST_AVG_SAMPLE_4: 0x2 = 4
+ * %ADC_FAST_AVG_SAMPLE_8: 0x3 = 8
+ * %ADC_FAST_AVG_SAMPLE_16: 0x4 = 16
+ * %ADC_FAST_AVG_SAMPLE_32: 0x5 = 32
+ * %ADC_FAST_AVG_SAMPLE_64: 0x6 = 64
+ * %ADC_FAST_AVG_SAMPLE_128: 0x7 = 128
+ * %ADC_FAST_AVG_SAMPLE_256: 0x8 = 256
+ * %ADC_FAST_AVG_SAMPLE_512: 0x9 = 512
+ */
+enum qpnp_adc_fast_avg_ctl {
+ ADC_FAST_AVG_SAMPLE_1 = 0,
+ ADC_FAST_AVG_SAMPLE_2,
+ ADC_FAST_AVG_SAMPLE_4,
+ ADC_FAST_AVG_SAMPLE_8,
+ ADC_FAST_AVG_SAMPLE_16,
+ ADC_FAST_AVG_SAMPLE_32,
+ ADC_FAST_AVG_SAMPLE_64,
+ ADC_FAST_AVG_SAMPLE_128,
+ ADC_FAST_AVG_SAMPLE_256,
+ ADC_FAST_AVG_SAMPLE_512,
+ ADC_FAST_AVG_SAMPLE_NONE,
+};
+
+/**
+ * enum qpnp_adc_hw_settle_time - Time between AMUX getting configured and
+ * the ADC starting conversion. Delay = 100us * value for
+ * value < 11 and 2ms * (value - 10) otherwise.
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_0US: 0us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_100US: 100us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_200US: 200us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_300US: 300us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_400US: 400us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_500US: 500us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_600US: 600us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_700US: 700us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_800US: 800us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_900US: 900us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_1MS: 1ms
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_2MS: 2ms
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_4MS: 4ms
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_6MS: 6ms
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_8MS: 8ms
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_10MS: 10ms
+ * %ADC_CHANNEL_HW_SETTLE_NONE
+ */
+enum qpnp_adc_hw_settle_time {
+ ADC_CHANNEL_HW_SETTLE_DELAY_0US = 0,
+ ADC_CHANNEL_HW_SETTLE_DELAY_100US,
+ ADC_CHANNEL_HW_SETTLE_DELAY_2000US,
+ ADC_CHANNEL_HW_SETTLE_DELAY_300US,
+ ADC_CHANNEL_HW_SETTLE_DELAY_400US,
+ ADC_CHANNEL_HW_SETTLE_DELAY_500US,
+ ADC_CHANNEL_HW_SETTLE_DELAY_600US,
+ ADC_CHANNEL_HW_SETTLE_DELAY_700US,
+ ADC_CHANNEL_HW_SETTLE_DELAY_800US,
+ ADC_CHANNEL_HW_SETTLE_DELAY_900US,
+ ADC_CHANNEL_HW_SETTLE_DELAY_1MS,
+ ADC_CHANNEL_HW_SETTLE_DELAY_2MS,
+ ADC_CHANNEL_HW_SETTLE_DELAY_4MS,
+ ADC_CHANNEL_HW_SETTLE_DELAY_6MS,
+ ADC_CHANNEL_HW_SETTLE_DELAY_8MS,
+ ADC_CHANNEL_HW_SETTLE_DELAY_10MS,
+ ADC_CHANNEL_HW_SETTLE_NONE,
+};
+
+/**
+ * enum qpnp_vadc_mode_sel - Selects the basic mode of operation.
+ * - The normal mode is used for single measurement.
+ * - The Conversion sequencer is used to trigger an
+ * ADC read when a HW trigger is selected.
+ * - The measurement interval performs a single or
+ * continous measurement at a specified interval/delay.
+ * %ADC_OP_NORMAL_MODE : Normal mode used for single measurement.
+ * %ADC_OP_CONVERSION_SEQUENCER : Conversion sequencer used to trigger
+ * an ADC read on a HW supported trigger.
+ * Refer to enum qpnp_vadc_trigger for
+ * supported HW triggers.
+ * %ADC_OP_MEASUREMENT_INTERVAL : The measurement interval performs a
+ * single or continous measurement after a specified delay.
+ * For delay look at qpnp_adc_meas_timer.
+ */
+enum qpnp_vadc_mode_sel {
+ ADC_OP_NORMAL_MODE = 0,
+ ADC_OP_CONVERSION_SEQUENCER,
+ ADC_OP_MEASUREMENT_INTERVAL,
+ ADC_OP_MODE_NONE,
+};
+
+/**
+ * enum qpnp_vadc_trigger - Select the HW trigger to be used while
+ * measuring the ADC reading.
+ * %ADC_GSM_PA_ON : GSM power amplifier on.
+ * %ADC_TX_GTR_THRES : Transmit power greater than threshold.
+ * %ADC_CAMERA_FLASH_RAMP : Flash ramp up done.
+ * %ADC_DTEST : DTEST.
+ */
+enum qpnp_vadc_trigger {
+ ADC_GSM_PA_ON = 0,
+ ADC_TX_GTR_THRES,
+ ADC_CAMERA_FLASH_RAMP,
+ ADC_DTEST,
+ ADC_SEQ_NONE,
+};
+
+/**
+ * enum qpnp_vadc_conv_seq_timeout - Select delay (0 to 15ms) from
+ * conversion request to triggering conversion sequencer
+ * hold off time.
+ */
+enum qpnp_vadc_conv_seq_timeout {
+ ADC_CONV_SEQ_TIMEOUT_0MS = 0,
+ ADC_CONV_SEQ_TIMEOUT_1MS,
+ ADC_CONV_SEQ_TIMEOUT_2MS,
+ ADC_CONV_SEQ_TIMEOUT_3MS,
+ ADC_CONV_SEQ_TIMEOUT_4MS,
+ ADC_CONV_SEQ_TIMEOUT_5MS,
+ ADC_CONV_SEQ_TIMEOUT_6MS,
+ ADC_CONV_SEQ_TIMEOUT_7MS,
+ ADC_CONV_SEQ_TIMEOUT_8MS,
+ ADC_CONV_SEQ_TIMEOUT_9MS,
+ ADC_CONV_SEQ_TIMEOUT_10MS,
+ ADC_CONV_SEQ_TIMEOUT_11MS,
+ ADC_CONV_SEQ_TIMEOUT_12MS,
+ ADC_CONV_SEQ_TIMEOUT_13MS,
+ ADC_CONV_SEQ_TIMEOUT_14MS,
+ ADC_CONV_SEQ_TIMEOUT_15MS,
+ ADC_CONV_SEQ_TIMEOUT_NONE,
+};
+
+/**
+ * enum qpnp_adc_conv_seq_holdoff - Select delay from conversion
+ * trigger signal (i.e. adc_conv_seq_trig) transition
+ * to ADC enable. Delay = 25us * (value + 1).
+ */
+enum qpnp_adc_conv_seq_holdoff {
+ ADC_SEQ_HOLD_25US = 0,
+ ADC_SEQ_HOLD_50US,
+ ADC_SEQ_HOLD_75US,
+ ADC_SEQ_HOLD_100US,
+ ADC_SEQ_HOLD_125US,
+ ADC_SEQ_HOLD_150US,
+ ADC_SEQ_HOLD_175US,
+ ADC_SEQ_HOLD_200US,
+ ADC_SEQ_HOLD_225US,
+ ADC_SEQ_HOLD_250US,
+ ADC_SEQ_HOLD_275US,
+ ADC_SEQ_HOLD_300US,
+ ADC_SEQ_HOLD_325US,
+ ADC_SEQ_HOLD_350US,
+ ADC_SEQ_HOLD_375US,
+ ADC_SEQ_HOLD_400US,
+ ADC_SEQ_HOLD_NONE,
+};
+
+/**
+ * enum qpnp_adc_conv_seq_state - Conversion sequencer operating state
+ * %ADC_CONV_SEQ_IDLE : Sequencer is in idle.
+ * %ADC_CONV_TRIG_RISE : Waiting for rising edge trigger.
+ * %ADC_CONV_TRIG_HOLDOFF : Waiting for rising trigger hold off time.
+ * %ADC_CONV_MEAS_RISE : Measuring selected ADC signal.
+ * %ADC_CONV_TRIG_FALL : Waiting for falling trigger edge.
+ * %ADC_CONV_FALL_HOLDOFF : Waiting for falling trigger hold off time.
+ * %ADC_CONV_MEAS_FALL : Measuring selected ADC signal.
+ * %ADC_CONV_ERROR : Aberrant Hardware problem.
+ */
+enum qpnp_adc_conv_seq_state {
+ ADC_CONV_SEQ_IDLE = 0,
+ ADC_CONV_TRIG_RISE,
+ ADC_CONV_TRIG_HOLDOFF,
+ ADC_CONV_MEAS_RISE,
+ ADC_CONV_TRIG_FALL,
+ ADC_CONV_FALL_HOLDOFF,
+ ADC_CONV_MEAS_FALL,
+ ADC_CONV_ERROR,
+ ADC_CONV_NONE,
+};
+
+/**
+ * enum qpnp_adc_meas_timer - Selects the measurement interval time.
+ * If value = 0, use 0ms else use 2^(value + 4)/ 32768).
+ * %ADC_MEAS_INTERVAL_0MS : 0ms
+ * %ADC_MEAS_INTERVAL_1P0MS : 1ms
+ * %ADC_MEAS_INTERVAL_2P0MS : 2ms
+ * %ADC_MEAS_INTERVAL_3P9MS : 3.9ms
+ * %ADC_MEAS_INTERVAL_7P8MS : 7.8ms
+ * %ADC_MEAS_INTERVAL_15P6MS : 15.6ms
+ * %ADC_MEAS_INTERVAL_31P3MS : 31.3ms
+ * %ADC_MEAS_INTERVAL_62P5MS : 62.5ms
+ * %ADC_MEAS_INTERVAL_125MS : 125ms
+ * %ADC_MEAS_INTERVAL_250MS : 250ms
+ * %ADC_MEAS_INTERVAL_500MS : 500ms
+ * %ADC_MEAS_INTERVAL_1S : 1seconds
+ * %ADC_MEAS_INTERVAL_2S : 2seconds
+ * %ADC_MEAS_INTERVAL_4S : 4seconds
+ * %ADC_MEAS_INTERVAL_8S : 8seconds
+ * %ADC_MEAS_INTERVAL_16S: 16seconds
+ */
+enum qpnp_adc_meas_timer {
+ ADC_MEAS_INTERVAL_0MS = 0,
+ ADC_MEAS_INTERVAL_1P0MS,
+ ADC_MEAS_INTERVAL_2P0MS,
+ ADC_MEAS_INTERVAL_3P9MS,
+ ADC_MEAS_INTERVAL_7P8MS,
+ ADC_MEAS_INTERVAL_15P6MS,
+ ADC_MEAS_INTERVAL_31P3MS,
+ ADC_MEAS_INTERVAL_62P5MS,
+ ADC_MEAS_INTERVAL_125MS,
+ ADC_MEAS_INTERVAL_250MS,
+ ADC_MEAS_INTERVAL_500MS,
+ ADC_MEAS_INTERVAL_1S,
+ ADC_MEAS_INTERVAL_2S,
+ ADC_MEAS_INTERVAL_4S,
+ ADC_MEAS_INTERVAL_8S,
+ ADC_MEAS_INTERVAL_16S,
+ ADC_MEAS_INTERVAL_NONE,
+};
+
+/**
+ * enum qpnp_adc_meas_interval_op_ctl - Select operating mode.
+ * %ADC_MEAS_INTERVAL_OP_SINGLE : Conduct single measurement at specified time
+ * delay.
+ * %ADC_MEAS_INTERVAL_OP_CONTINUOUS : Make measurements at measurement interval
+ * times.
+ */
+enum qpnp_adc_meas_interval_op_ctl {
+ ADC_MEAS_INTERVAL_OP_SINGLE = 0,
+ ADC_MEAS_INTERVAL_OP_CONTINUOUS,
+ ADC_MEAS_INTERVAL_OP_NONE,
+};
+
+/**
+ * struct qpnp_vadc_linear_graph - Represent ADC characteristics.
+ * @dy: Numerator slope to calculate the gain.
+ * @dx: Denominator slope to calculate the gain.
+ * @adc_vref: A/D word of the voltage reference used for the channel.
+ * @adc_gnd: A/D word of the ground reference used for the channel.
+ *
+ * Each ADC device has different offset and gain parameters which are computed
+ * to calibrate the device.
+ */
+struct qpnp_vadc_linear_graph {
+ int64_t dy;
+ int64_t dx;
+ int64_t adc_vref;
+ int64_t adc_gnd;
+};
+
+/**
+ * struct qpnp_vadc_map_pt - Map the graph representation for ADC channel
+ * @x: Represent the ADC digitized code.
+ * @y: Represent the physical data which can be temperature, voltage,
+ * resistance.
+ */
+struct qpnp_vadc_map_pt {
+ int32_t x;
+ int32_t y;
+};
+
+/**
+ * struct qpnp_vadc_scaling_ratio - Represent scaling ratio for adc input.
+ * @num: Numerator scaling parameter.
+ * @den: Denominator scaling parameter.
+ */
+struct qpnp_vadc_scaling_ratio {
+ int32_t num;
+ int32_t den;
+};
+
+/**
+ * struct qpnp_adc_properties - Represent the ADC properties.
+ * @adc_reference: Reference voltage for QPNP ADC.
+ * @bitresolution: ADC bit resolution for QPNP ADC.
+ * @biploar: Polarity for QPNP ADC.
+ */
+struct qpnp_adc_properties {
+ uint32_t adc_vdd_reference;
+ uint32_t bitresolution;
+ bool bipolar;
+};
+
+/**
+ * struct qpnp_vadc_chan_properties - Represent channel properties of the ADC.
+ * @offset_gain_numerator: The inverse numerator of the gain applied to the
+ * input channel.
+ * @offset_gain_denominator: The inverse denominator of the gain applied to the
+ * input channel.
+ * @adc_graph: ADC graph for the channel of struct type qpnp_adc_linear_graph.
+ */
+struct qpnp_vadc_chan_properties {
+ uint32_t offset_gain_numerator;
+ uint32_t offset_gain_denominator;
+ struct qpnp_vadc_linear_graph adc_graph[2];
+};
+
+/**
+ * struct qpnp_adc_result - Represent the result of the QPNP ADC.
+ * @chan: The channel number of the requested conversion.
+ * @adc_code: The pre-calibrated digital output of a given ADC relative to the
+ * the ADC reference.
+ * @measurement: In units specific for a given ADC; most ADC uses reference
+ * voltage but some ADC uses reference current. This measurement
+ * here is a number relative to a reference of a given ADC.
+ * @physical: The data meaningful for each individual channel whether it is
+ * voltage, current, temperature, etc.
+ * All voltage units are represented in micro - volts.
+ * -Battery temperature units are represented as 0.1 DegC.
+ * -PA Therm temperature units are represented as DegC.
+ * -PMIC Die temperature units are represented as 0.001 DegC.
+ */
+struct qpnp_vadc_result {
+ uint32_t chan;
+ int32_t adc_code;
+ int64_t measurement;
+ int64_t physical;
+};
+
+/**
+ * struct qpnp_adc_amux - AMUX properties for individual channel
+ * @name: Channel string name.
+ * @channel_num: Channel in integer used from qpnp_adc_channels.
+ * @chan_path_prescaling: Channel scaling performed on the input signal.
+ * @adc_decimation: Sampling rate desired for the channel.
+ * adc_scale_fn: Scaling function to convert to the data meaningful for
+ * each individual channel whether it is voltage, current,
+ * temperature, etc and compensates the channel properties.
+ */
+struct qpnp_vadc_amux {
+ char *name;
+ enum qpnp_vadc_channels channel_num;
+ enum qpnp_adc_channel_scaling_param chan_path_prescaling;
+ enum qpnp_adc_decimation_type adc_decimation;
+ enum qpnp_adc_scale_fn_type adc_scale_fn;
+ enum qpnp_adc_fast_avg_ctl fast_avg_setup;
+ enum qpnp_adc_hw_settle_time hw_settle_time;
+};
+
+/**
+ * struct qpnp_vadc_scaling_ratio
+ *
+ */
+static const struct qpnp_vadc_scaling_ratio qpnp_vadc_amux_scaling_ratio[] = {
+ {1, 1},
+ {1, 3},
+ {1, 4},
+ {1, 6},
+ {1, 20}
+};
+
+/**
+ * struct qpnp_vadc_scale_fn - Scaling function prototype
+ * @chan: Function pointer to one of the scaling functions
+ * which takes the adc properties, channel properties,
+ * and returns the physical result
+ */
+struct qpnp_vadc_scale_fn {
+ int32_t (*chan) (int32_t,
+ const struct qpnp_adc_properties *,
+ const struct qpnp_vadc_chan_properties *,
+ struct qpnp_vadc_result *);
+};
+
+/**
+ * struct qpnp_adc_drv - QPNP ADC device structure.
+ * @spmi - spmi device for ADC peripheral.
+ * @offset - base offset for the ADC peripheral.
+ * @adc_prop - ADC properties specific to the ADC peripheral.
+ * @amux_prop - AMUX properties representing the ADC peripheral.
+ * @adc_channels - ADC channel properties for the ADC peripheral.
+ * @adc_irq - IRQ number that is mapped to the ADC peripheral.
+ * @adc_lock - ADC lock for access to the peripheral.
+ * @adc_rslt_completion - ADC result notification after interrupt
+ * is received.
+ */
+struct qpnp_adc_drv {
+ struct spmi_device *spmi;
+ uint8_t slave;
+ uint16_t offset;
+ struct qpnp_adc_properties *adc_prop;
+ struct qpnp_vadc_amux_properties *amux_prop;
+ struct qpnp_vadc_amux *adc_channels;
+ int adc_irq;
+ struct mutex adc_lock;
+ struct completion adc_rslt_completion;
+};
+
+/**
+ * struct qpnp_vadc_amux_properties - QPNP VADC amux channel property.
+ * @amux_channel - Refer to the qpnp_vadc_channel list.
+ * @decimation - Sampling rate supported for the channel.
+ * @mode_sel - The basic mode of operation.
+ * @hw_settle_time - The time between AMUX being configured and the
+ * start of conversion.
+ * @fast_avg_setup - Ability to provide single result from the ADC
+ * that is an average of multiple measurements.
+ * @trigger_channel - HW trigger channel for conversion sequencer.
+ * @chan_prop - Represent the channel properties of the ADC.
+ */
+struct qpnp_vadc_amux_properties {
+ uint32_t amux_channel;
+ uint32_t decimation;
+ uint32_t mode_sel;
+ uint32_t hw_settle_time;
+ uint32_t fast_avg_setup;
+ enum qpnp_vadc_trigger trigger_channel;
+ struct qpnp_vadc_chan_properties chan_prop[0];
+};
+
+/* Public API */
+#if defined(CONFIG_SENSORS_QPNP_ADC_VOLTAGE) \
+ || defined(CONFIG_SENSORS_QPNP_ADC_VOLTAGE_MODULE)
+/**
+ * qpnp_vadc_read() - Performs ADC read on the channel.
+ * @channel: Input channel to perform the ADC read.
+ * @result: Structure pointer of type adc_chan_result
+ * in which the ADC read results are stored.
+ */
+int32_t qpnp_vadc_read(enum qpnp_vadc_channels channel,
+ struct qpnp_vadc_result *result);
+
+/**
+ * qpnp_vadc_conv_seq_request() - Performs ADC read on the conversion
+ * sequencer channel.
+ * @channel: Input channel to perform the ADC read.
+ * @result: Structure pointer of type adc_chan_result
+ * in which the ADC read results are stored.
+ */
+int32_t qpnp_vadc_conv_seq_request(
+ enum qpnp_vadc_trigger trigger_channel,
+ enum qpnp_vadc_channels channel,
+ struct qpnp_vadc_result *result);
+
+/**
+ * qpnp_vadc_check_result() - Performs check on the ADC raw code.
+ * @data: Data used for verifying the range of the ADC code.
+ */
+int32_t qpnp_vadc_check_result(int32_t *data);
+
+/**
+ * qpnp_adc_get_devicetree_data() - Abstracts the ADC devicetree data.
+ * @spmi: spmi ADC device.
+ * @adc_qpnp: spmi device tree node structure
+ */
+int32_t qpnp_adc_get_devicetree_data(struct spmi_device *spmi,
+ struct qpnp_adc_drv *adc_qpnp);
+
+/**
+ * qpnp_vadc_configure() - Configure ADC device to start conversion.
+ * @chan_prop: Individual channel properties for the AMUX channel.
+ */
+int32_t qpnp_vadc_configure(
+ struct qpnp_vadc_amux_properties *chan_prop);
+
+/**
+ * qpnp_adc_scale_default() - Scales the pre-calibrated digital output
+ * of an ADC to the ADC reference and compensates for the
+ * gain and offset.
+ * @adc_code: pre-calibrated digital ouput of the ADC.
+ * @adc_prop: adc properties of the qpnp adc such as bit resolution,
+ * reference voltage.
+ * @chan_prop: Individual channel properties to compensate the i/p scaling,
+ * slope and offset.
+ * @chan_rslt: Physical result to be stored.
+ */
+int32_t qpnp_adc_scale_default(int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt);
+#else
+static inline int32_t qpnp_vadc_read(uint32_t channel,
+ struct qpnp_vadc_result *result)
+{ return -ENXIO; }
+static inline int32_t qpnp_vadc_conv_seq_request(
+ enum qpnp_vadc_trigger trigger_channel,
+ enum qpnp_vadc_channels channel,
+ struct qpnp_vadc_result *result)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_default(int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_adc_chan_properties *chan_prop,
+ struct qpnp_adc_chan_result *chan_rslt)
+{ return -ENXIO; }
+#endif
+
+#endif
diff --git a/include/linux/usb/android.h b/include/linux/usb/android.h
index 7c2b33b..0b11fdaf 100644
--- a/include/linux/usb/android.h
+++ b/include/linux/usb/android.h
@@ -17,6 +17,8 @@
#ifndef __LINUX_USB_ANDROID_H
#define __LINUX_USB_ANDROID_H
+#include <linux/usb/composite.h>
+
struct android_usb_platform_data {
int (*update_pid_and_serial_num)(uint32_t, const char *);
u32 swfi_latency;
@@ -24,4 +26,22 @@
bool cdrom;
};
+#ifndef CONFIG_TARGET_CORE
+static inline int f_tcm_init(int (*connect_cb)(bool connect))
+{
+ /*
+ * Fail bind() not init(). If a function init() returns error
+ * android composite registration would fail.
+ */
+ return 0;
+}
+static inline void f_tcm_exit(void)
+{
+}
+static inline int tcm_bind_config(struct usb_configuration *c)
+{
+ return -ENODEV;
+}
+#endif
+
#endif /* __LINUX_USB_ANDROID_H */
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 76f4396..72f5c96 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -56,7 +56,7 @@
struct sk_buff_head rxq_pause;
struct urb *interrupt;
struct usb_anchor deferred;
- struct tasklet_struct bh;
+ struct work_struct bh_w;
struct work_struct kevent;
unsigned long flags;
diff --git a/include/linux/wcnss_wlan.h b/include/linux/wcnss_wlan.h
index d7e65b0..46a5b1b 100644
--- a/include/linux/wcnss_wlan.h
+++ b/include/linux/wcnss_wlan.h
@@ -47,6 +47,7 @@
int req_riva_power_on_lock(char *driver_name);
int free_riva_power_on_lock(char *driver_name);
unsigned int wcnss_get_serial_number(void);
+void wcnss_flush_delayed_boot_votes(void);
#define wcnss_wlan_get_drvdata(dev) dev_get_drvdata(dev)
#define wcnss_wlan_set_drvdata(dev, data) dev_set_drvdata((dev), (data))
diff --git a/include/media/msm_camera.h b/include/media/msm_camera.h
index ae81dcd..57ce7c0 100644
--- a/include/media/msm_camera.h
+++ b/include/media/msm_camera.h
@@ -213,6 +213,10 @@
#define MSM_CAM_IOCTL_GET_INST_HANDLE \
_IOR(MSM_CAM_IOCTL_MAGIC, 60, uint32_t *)
+#define MSM_CAM_IOCTL_STATS_UNREG_BUF \
+ _IOR(MSM_CAM_IOCTL_MAGIC, 61, struct msm_stats_flush_bufq *)
+
+
struct msm_stats_reqbuf {
int num_buf; /* how many buffers requested */
int stats_type; /* stats type */
@@ -468,6 +472,7 @@
#define CMD_AXI_CFG_ZSL 43
#define CMD_AXI_CFG_SNAP_VPE 44
#define CMD_AXI_CFG_SNAP_THUMB_VPE 45
+
#define CMD_CONFIG_PING_ADDR 46
#define CMD_CONFIG_PONG_ADDR 47
#define CMD_CONFIG_FREE_BUF_ADDR 48
@@ -475,6 +480,13 @@
#define CMD_AXI_CFG_VIDEO_ALL_CHNLS 50
#define CMD_VFE_BUFFER_RELEASE 51
#define CMD_VFE_PROCESS_IRQ 52
+#define CMD_STATS_BG_ENABLE 53
+#define CMD_STATS_BF_ENABLE 54
+#define CMD_STATS_BHIST_ENABLE 55
+#define CMD_STATS_BG_BUF_RELEASE 56
+#define CMD_STATS_BF_BUF_RELEASE 57
+#define CMD_STATS_BHIST_BUF_RELEASE 58
+
#define CMD_AXI_CFG_PRIM BIT(8)
#define CMD_AXI_CFG_PRIM_ALL_CHNLS BIT(9)
@@ -524,7 +536,10 @@
#define MSM_PMEM_C2D 17
#define MSM_PMEM_MAINIMG_VPE 18
#define MSM_PMEM_THUMBNAIL_VPE 19
-#define MSM_PMEM_MAX 20
+#define MSM_PMEM_BAYER_GRID 20
+#define MSM_PMEM_BAYER_FOCUS 21
+#define MSM_PMEM_BAYER_HIST 22
+#define MSM_PMEM_MAX 23
#define STAT_AEAW 0
#define STAT_AEC 1
@@ -534,7 +549,10 @@
#define STAT_CS 5
#define STAT_IHIST 6
#define STAT_SKIN 7
-#define STAT_MAX 8
+#define STAT_BG 8
+#define STAT_BF 9
+#define STAT_BHIST 10
+#define STAT_MAX 11
#define FRAME_PREVIEW_OUTPUT1 0
#define FRAME_PREVIEW_OUTPUT2 1
@@ -1858,6 +1876,12 @@
struct msm_cpp_frame_strip_info *strip_info;
};
+struct msm_ver_num_info {
+ uint32_t main;
+ uint32_t minor;
+ uint32_t rev;
+};
+
#define VIDIOC_MSM_CPP_CFG \
_IOWR('V', BASE_VIDIOC_PRIVATE, struct msm_camera_v4l2_ioctl_t)
diff --git a/include/media/msm_isp.h b/include/media/msm_isp.h
index 3df6ded..0ee7417 100644
--- a/include/media/msm_isp.h
+++ b/include/media/msm_isp.h
@@ -62,6 +62,10 @@
#define MSG_ID_OUTPUT_TERTIARY1 43
#define MSG_ID_STOP_LS_ACK 44
#define MSG_ID_OUTPUT_TERTIARY2 45
+#define MSG_ID_STATS_BG 46
+#define MSG_ID_STATS_BF 47
+#define MSG_ID_STATS_BHIST 48
+
/* ISP command IDs */
#define VFE_CMD_DUMMY_0 0
@@ -206,6 +210,13 @@
#define VFE_CMD_STATS_REQBUF 139
#define VFE_CMD_STATS_ENQUEUEBUF 140
#define VFE_CMD_STATS_FLUSH_BUFQ 141
+#define VFE_CMD_STATS_UNREGBUF 142
+#define VFE_CMD_STATS_BG_START 143
+#define VFE_CMD_STATS_BG_STOP 144
+#define VFE_CMD_STATS_BF_START 145
+#define VFE_CMD_STATS_BF_STOP 146
+#define VFE_CMD_STATS_BHIST_START 147
+#define VFE_CMD_STATS_BHIST_STOP 148
struct msm_isp_cmd {
int32_t id;
diff --git a/include/media/vcap_fmt.h b/include/media/vcap_fmt.h
index 92240bf1..00e0375 100644
--- a/include/media/vcap_fmt.h
+++ b/include/media/vcap_fmt.h
@@ -44,6 +44,43 @@
HAL_VCAP_RGB,
};
+enum nr_mode {
+ NR_DISABLE = 0,
+ NR_AUTO,
+ NR_MANUAL,
+};
+
+enum nr_decay_ratio {
+ NR_Decay_Ratio_26 = 0,
+ NR_Decay_Ratio_25,
+ NR_Decay_Ratio_24,
+ NR_Decay_Ratio_23,
+ NR_Decay_Ratio_22,
+ NR_Decay_Ratio_21,
+ NR_Decay_Ratio_20,
+ NR_Decay_Ratio_19,
+};
+
+struct nr_config {
+ uint8_t max_blend_ratio;
+ uint8_t scale_diff_ratio;
+ uint8_t diff_limit_ratio;
+ uint8_t scale_motion_ratio;
+ uint8_t blend_limit_ratio;
+};
+
+struct nr_param {
+ enum nr_mode mode;
+ enum nr_decay_ratio decay_ratio;
+ uint8_t window;
+ struct nr_config luma;
+ struct nr_config chroma;
+};
+
+#define VCAPIOC_NR_S_PARAMS _IOWR('V', (BASE_VIDIOC_PRIVATE+0), struct nr_param)
+
+#define VCAPIOC_NR_G_PARAMS _IOWR('V', (BASE_VIDIOC_PRIVATE+1), struct nr_param)
+
struct v4l2_format_vc_ext {
enum hal_vcap_mode mode;
enum hal_vcap_polar h_polar;
diff --git a/include/media/vcap_v4l2.h b/include/media/vcap_v4l2.h
index 9719aa6..390a843 100644
--- a/include/media/vcap_v4l2.h
+++ b/include/media/vcap_v4l2.h
@@ -120,7 +120,8 @@
void *bufMotion;
struct nr_buffer bufNR;
- bool nr_enabled;
+ struct nr_param nr_param;
+ bool nr_update;
};
struct vp_work_t {
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
index 8e8778a..ac4ec09 100644
--- a/include/sound/apr_audio-v2.h
+++ b/include/sound/apr_audio-v2.h
@@ -573,6 +573,7 @@
/* Payload of the #ADM_CMD_CONNECT_AFE_PORT_V5 command.*/
struct adm_cmd_connect_afe_port_v5 {
+ struct apr_hdr hdr;
u8 mode;
/* ID of the stream router (RX/TX). Use the
* ADM_STRTR_ID_RX or ADM_STRTR_IDX macros
diff --git a/include/sound/q6afe-v2.h b/include/sound/q6afe-v2.h
index 1587d38..b1ed9a4 100644
--- a/include/sound/q6afe-v2.h
+++ b/include/sound/q6afe-v2.h
@@ -77,6 +77,7 @@
int afe_sidetone(u16 tx_port_id, u16 rx_port_id, u16 enable, uint16_t gain);
int afe_loopback_gain(u16 port_id, u16 volume);
int afe_validate_port(u16 port_id);
+int afe_get_port_index(u16 port_id);
int afe_start_pseudo_port(u16 port_id);
int afe_stop_pseudo_port(u16 port_id);
int afe_cmd_memory_map(u32 dma_addr_p, u32 dma_buf_sz);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c34cbbd..831509c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5649,7 +5649,12 @@
__alloc_contig_migrate_alloc(struct page *page, unsigned long private,
int **resultp)
{
- return alloc_page(GFP_HIGHUSER_MOVABLE);
+ gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
+
+ if (PageHighMem(page))
+ gfp_mask |= __GFP_HIGHMEM;
+
+ return alloc_page(gfp_mask);
}
/* [start, end) must belong to a single zone. */
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 02ea082..0563af9 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -951,6 +951,9 @@
if (test_bit(HCI_RAW, &hdev->flags))
return;
+ if (conn->type == LE_LINK)
+ return;
+
if (conn->mode != HCI_CM_SNIFF)
goto timer;
@@ -1018,6 +1021,9 @@
if (test_bit(HCI_RAW, &hdev->flags))
return;
+ if (conn->type == LE_LINK)
+ return;
+
if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
return;
diff --git a/sound/soc/codecs/wcd9304.c b/sound/soc/codecs/wcd9304.c
index bbf1b89..c94c2e5 100644
--- a/sound/soc/codecs/wcd9304.c
+++ b/sound/soc/codecs/wcd9304.c
@@ -33,6 +33,7 @@
#include <linux/pm_runtime.h>
#include <linux/kernel.h>
#include <linux/gpio.h>
+#include <linux/wait.h>
#include "wcd9304.h"
#define WCD9304_RATES (SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_16000|\
@@ -55,12 +56,15 @@
#define AIF1_PB 1
#define AIF1_CAP 2
#define NUM_CODEC_DAIS 2
+#define SLIM_CLOSE_TIMEOUT 1000
struct sitar_codec_dai_data {
u32 rate;
u32 *ch_num;
u32 ch_act;
u32 ch_tot;
+ u32 ch_mask;
+ wait_queue_head_t dai_wait;
};
#define SITAR_MCLK_RATE_12288KHZ 12288000
@@ -2833,13 +2837,48 @@
},
};
+static int sitar_codec_enable_chmask(struct sitar_priv *sitar,
+ int event, int index)
+{
+ int ret = 0;
+ u32 k = 0;
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ for (k = 0; k < sitar->dai[index].ch_tot; k++) {
+ ret = wcd9xxx_get_slave_port(
+ sitar->dai[index].ch_num[k]);
+ if (ret < 0) {
+ pr_err("%s: Invalid Slave port ID: %d\n",
+ __func__, ret);
+ ret = -EINVAL;
+ break;
+ }
+ sitar->dai[index].ch_mask |= 1 << ret;
+ }
+ ret = 0;
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ ret = wait_event_timeout(sitar->dai[index].dai_wait,
+ (sitar->dai[index].ch_mask == 0),
+ msecs_to_jiffies(SLIM_CLOSE_TIMEOUT));
+ if (!ret) {
+ pr_err("%s: slim close tx/rx timeout\n",
+ __func__);
+ ret = -EINVAL;
+ }
+ ret = 0;
+ break;
+ }
+ return ret;
+}
+
static int sitar_codec_enable_slimrx(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct wcd9xxx *sitar;
struct snd_soc_codec *codec = w->codec;
struct sitar_priv *sitar_p = snd_soc_codec_get_drvdata(codec);
- u32 j = 0;
+ u32 j = 0, ret = 0;
codec->control_data = dev_get_drvdata(codec->dev->parent);
sitar = codec->control_data;
/* Execute the callback only if interface type is slimbus */
@@ -2856,11 +2895,13 @@
break;
}
}
- if (sitar_p->dai[j].ch_act == sitar_p->dai[j].ch_tot)
- wcd9xxx_cfg_slim_sch_rx(sitar,
+ if (sitar_p->dai[j].ch_act == sitar_p->dai[j].ch_tot) {
+ ret = sitar_codec_enable_chmask(sitar_p, event, j);
+ ret = wcd9xxx_cfg_slim_sch_rx(sitar,
sitar_p->dai[j].ch_num,
sitar_p->dai[j].ch_tot,
sitar_p->dai[j].rate);
+ }
break;
case SND_SOC_DAPM_POST_PMD:
for (j = 0; j < ARRAY_SIZE(sitar_dai); j++) {
@@ -2876,17 +2917,14 @@
wcd9xxx_close_slim_sch_rx(sitar,
sitar_p->dai[j].ch_num,
sitar_p->dai[j].ch_tot);
- /* Wait for remove channel to complete
- * before derouting Rx path
- */
- usleep_range(15000, 15000);
sitar_p->dai[j].rate = 0;
memset(sitar_p->dai[j].ch_num, 0, (sizeof(u32)*
sitar_p->dai[j].ch_tot));
sitar_p->dai[j].ch_tot = 0;
+ ret = sitar_codec_enable_chmask(sitar_p, event, j);
}
}
- return 0;
+ return ret;
}
static int sitar_codec_enable_slimtx(struct snd_soc_dapm_widget *w,
@@ -2896,7 +2934,7 @@
struct snd_soc_codec *codec = w->codec;
struct sitar_priv *sitar_p = snd_soc_codec_get_drvdata(codec);
/* index to the DAI ID, for now hardcoding */
- u32 j = 0;
+ u32 j = 0, ret = 0;
codec->control_data = dev_get_drvdata(codec->dev->parent);
sitar = codec->control_data;
@@ -2915,11 +2953,13 @@
break;
}
}
- if (sitar_p->dai[j].ch_act == sitar_p->dai[j].ch_tot)
- wcd9xxx_cfg_slim_sch_tx(sitar,
+ if (sitar_p->dai[j].ch_act == sitar_p->dai[j].ch_tot) {
+ ret = sitar_codec_enable_chmask(sitar_p, event, j);
+ ret = wcd9xxx_cfg_slim_sch_tx(sitar,
sitar_p->dai[j].ch_num,
sitar_p->dai[j].ch_tot,
sitar_p->dai[j].rate);
+ }
break;
case SND_SOC_DAPM_POST_PMD:
for (j = 0; j < ARRAY_SIZE(sitar_dai); j++) {
@@ -2939,9 +2979,10 @@
memset(sitar_p->dai[j].ch_num, 0, (sizeof(u32)*
sitar_p->dai[j].ch_tot));
sitar_p->dai[j].ch_tot = 0;
+ ret = sitar_codec_enable_chmask(sitar_p, event, j);
}
}
- return 0;
+ return ret;
}
@@ -4600,7 +4641,7 @@
{
struct sitar_priv *priv = data;
struct snd_soc_codec *codec = priv->codec;
- int i, j;
+ int i, j, k, port_id, ch_mask_temp;
u8 val;
@@ -4608,14 +4649,30 @@
slimbus_value = wcd9xxx_interface_reg_read(codec->control_data,
SITAR_SLIM_PGD_PORT_INT_STATUS0 + i);
for_each_set_bit(j, &slimbus_value, BITS_PER_BYTE) {
+ port_id = i*8 + j;
val = wcd9xxx_interface_reg_read(codec->control_data,
- SITAR_SLIM_PGD_PORT_INT_SOURCE0 + i*8 + j);
+ SITAR_SLIM_PGD_PORT_INT_SOURCE0 + port_id);
if (val & 0x1)
- pr_err_ratelimited("overflow error on port %x,"
- " value %x\n", i*8 + j, val);
+ pr_err_ratelimited("overflow error on port %x, value %x\n",
+ port_id, val);
if (val & 0x2)
- pr_err_ratelimited("underflow error on port %x,"
- " value %x\n", i*8 + j, val);
+ pr_err_ratelimited("underflow error on port %x,value %x\n",
+ port_id, val);
+ if (val & 0x4) {
+ pr_debug("%s: port %x disconnect value %x\n",
+ __func__, port_id, val);
+ for (k = 0; k < ARRAY_SIZE(sitar_dai); k++) {
+ ch_mask_temp = 1 << port_id;
+ if (ch_mask_temp &
+ priv->dai[k].ch_mask) {
+ priv->dai[k].ch_mask &=
+ ~ch_mask_temp;
+ if (!priv->dai[k].ch_mask)
+ wake_up(
+ &priv->dai[k].dai_wait);
+ }
+ }
+ }
}
wcd9xxx_interface_reg_write(codec->control_data,
SITAR_SLIM_PGD_PORT_INT_CLR0 + i, 0xFF);
@@ -4983,6 +5040,7 @@
}
sitar->dai[i].ch_num = kzalloc((sizeof(unsigned int)*
ch_cnt), GFP_KERNEL);
+ init_waitqueue_head(&sitar->dai[i].dai_wait);
}
codec->ignore_pmdown_time = 1;
diff --git a/sound/soc/msm/Kconfig b/sound/soc/msm/Kconfig
index 9b60a56..894e114 100644
--- a/sound/soc/msm/Kconfig
+++ b/sound/soc/msm/Kconfig
@@ -153,13 +153,25 @@
help
To add support for SoC audio on MSM8960 and APQ8064 boards
+config AUDIO_OCMEM
+ bool "Enable OCMEM for audio/voice usecase"
+ depends on MSM_OCMEM
+ default n
+ help
+ To add support for on-chip memory use
+ for audio use cases on MSM8974.
+ OCMEM gets exercised for low-power
+ audio and voice use cases.
+
config SND_SOC_MSM8974
tristate "SoC Machine driver for MSM8974 boards"
depends on ARCH_MSM8974
select SND_SOC_QDSP6V2
select SND_SOC_MSM_STUB
select SND_SOC_MSM_HOSTLESS_PCM
+ select SND_SOC_WCD9320
select SND_DYNAMIC_MINORS
+ select AUDIO_OCMEM
help
To add support for SoC audio on MSM8974.
This will enable sound soc drivers which
diff --git a/sound/soc/msm/apq8064.c b/sound/soc/msm/apq8064.c
index d8a4624..2fcf29b 100644
--- a/sound/soc/msm/apq8064.c
+++ b/sound/soc/msm/apq8064.c
@@ -1152,8 +1152,9 @@
snd_soc_dapm_sync(dapm);
err = snd_soc_jack_new(codec, "Headset Jack",
- (SND_JACK_HEADSET | SND_JACK_OC_HPHL | SND_JACK_OC_HPHR),
- &hs_jack);
+ (SND_JACK_HEADSET | SND_JACK_OC_HPHL |
+ SND_JACK_OC_HPHR | SND_JACK_UNSUPPORTED),
+ &hs_jack);
if (err) {
pr_err("failed to create new jack\n");
return err;
diff --git a/sound/soc/msm/mpq8064.c b/sound/soc/msm/mpq8064.c
index 6685ce5..f5bbf56 100644
--- a/sound/soc/msm/mpq8064.c
+++ b/sound/soc/msm/mpq8064.c
@@ -1165,8 +1165,6 @@
.platform_name = "msm-pcm-afe",
.ignore_suspend = 1,
.ignore_pmdown_time = 1, /* this dainlink has playback support */
- .codec_dai_name = "snd-soc-dummy-dai",
- .codec_name = "snd-soc-dummy",
},
{
.name = "MSM AFE-PCM TX",
@@ -1176,8 +1174,6 @@
.codec_dai_name = "msm-stub-tx",
.platform_name = "msm-pcm-afe",
.ignore_suspend = 1,
- .codec_dai_name = "snd-soc-dummy-dai",
- .codec_name = "snd-soc-dummy",
},
{
.name = "MSM8960 Compr1",
diff --git a/sound/soc/msm/qdsp6v2/Makefile b/sound/soc/msm/qdsp6v2/Makefile
index ff2cc8d..acb073d 100644
--- a/sound/soc/msm/qdsp6v2/Makefile
+++ b/sound/soc/msm/qdsp6v2/Makefile
@@ -1,4 +1,6 @@
snd-soc-qdsp6v2-objs += msm-dai-q6-v2.o msm-pcm-q6-v2.o msm-pcm-routing-v2.o msm-compr-q6-v2.o msm-multi-ch-pcm-q6-v2.o
snd-soc-qdsp6v2-objs += msm-pcm-lpa-v2.o msm-pcm-afe-v2.o msm-pcm-voip-v2.o msm-pcm-voice-v2.o
obj-$(CONFIG_SND_SOC_QDSP6V2) += snd-soc-qdsp6v2.o
-obj-y += q6adm.o q6afe.o q6asm.o q6audio-v2.o q6voice.o
+obj-y += q6adm.o q6afe.o q6asm.o q6audio-v2.o q6voice.o q6core.o
+ocmem-audio-objs += audio_ocmem.o
+obj-$(CONFIG_AUDIO_OCMEM) += ocmem-audio.o
diff --git a/sound/soc/msm/qdsp6v2/audio_ocmem.c b/sound/soc/msm/qdsp6v2/audio_ocmem.c
new file mode 100644
index 0000000..86a82e2
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/audio_ocmem.c
@@ -0,0 +1,672 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/of_device.h>
+#include <asm/mach-types.h>
+#include <mach/msm_bus.h>
+#include <mach/msm_bus_board.h>
+#include <mach/ocmem.h>
+#include "q6core.h"
+#include "audio_ocmem.h"
+
+#define AUDIO_OCMEM_BUF_SIZE (512 * SZ_1K)
+
+enum {
+ OCMEM_STATE_ALLOC = 1,
+ OCMEM_STATE_MAP_TRANSITION,
+ OCMEM_STATE_MAP_COMPL,
+ OCMEM_STATE_UNMAP_TRANSITION,
+ OCMEM_STATE_UNMAP_COMPL,
+ OCMEM_STATE_SHRINK,
+ OCMEM_STATE_GROW,
+ OCMEM_STATE_FREE,
+ OCMEM_STATE_MAP_FAIL,
+ OCMEM_STATE_UNMAP_FAIL,
+ OCMEM_STATE_EXIT,
+};
+static void audio_ocmem_process_workdata(struct work_struct *work);
+
+struct audio_ocmem_workdata {
+ int id;
+ bool en;
+ struct work_struct work;
+};
+
+struct voice_ocmem_workdata {
+ int id;
+ bool en;
+ struct work_struct work;
+};
+
+struct audio_ocmem_prv {
+ atomic_t audio_state;
+ struct ocmem_notifier *audio_hdl;
+ struct ocmem_buf *buf;
+ uint32_t audio_ocmem_bus_client;
+ struct ocmem_map_list mlist;
+ struct avcs_cmd_rsp_get_low_power_segments_info_t *lp_memseg_ptr;
+ wait_queue_head_t audio_wait;
+ atomic_t audio_cond;
+ atomic_t audio_exit;
+ spinlock_t audio_lock;
+ struct workqueue_struct *audio_ocmem_workqueue;
+ struct workqueue_struct *voice_ocmem_workqueue;
+};
+
+static struct audio_ocmem_prv audio_ocmem_lcl;
+
+
+static int audio_ocmem_client_cb(struct notifier_block *this,
+ unsigned long event1, void *data)
+{
+ int rc = NOTIFY_DONE;
+ unsigned long flags;
+
+ pr_debug("%s: event[%ld] cur state[%x]\n", __func__,
+ event1, atomic_read(&audio_ocmem_lcl.audio_state));
+
+ spin_lock_irqsave(&audio_ocmem_lcl.audio_lock, flags);
+ switch (event1) {
+ case OCMEM_MAP_DONE:
+ pr_debug("%s: map done\n", __func__);
+ atomic_set(&audio_ocmem_lcl.audio_state, OCMEM_STATE_MAP_COMPL);
+ break;
+ case OCMEM_MAP_FAIL:
+ pr_debug("%s: map fail\n", __func__);
+ atomic_set(&audio_ocmem_lcl.audio_state, OCMEM_STATE_MAP_FAIL);
+ break;
+ case OCMEM_UNMAP_DONE:
+ pr_debug("%s: unmap done\n", __func__);
+ atomic_set(&audio_ocmem_lcl.audio_state,
+ OCMEM_STATE_UNMAP_COMPL);
+ break;
+ case OCMEM_UNMAP_FAIL:
+ pr_debug("%s: unmap fail\n", __func__);
+ atomic_set(&audio_ocmem_lcl.audio_state,
+ OCMEM_STATE_UNMAP_FAIL);
+ break;
+ case OCMEM_ALLOC_GROW:
+ audio_ocmem_lcl.buf = data;
+ atomic_set(&audio_ocmem_lcl.audio_state, OCMEM_STATE_GROW);
+ break;
+ case OCMEM_ALLOC_SHRINK:
+ atomic_set(&audio_ocmem_lcl.audio_state, OCMEM_STATE_SHRINK);
+ break;
+ default:
+ pr_err("%s: Invalid event[%ld]\n", __func__, event1);
+ break;
+ }
+ spin_unlock_irqrestore(&audio_ocmem_lcl.audio_lock, flags);
+ if (atomic_read(&audio_ocmem_lcl.audio_cond)) {
+ atomic_set(&audio_ocmem_lcl.audio_cond, 0);
+ wake_up(&audio_ocmem_lcl.audio_wait);
+ }
+ return rc;
+}
+
+/**
+ * audio_ocmem_enable() - Exercise OCMEM for audio
+ * @cid: client id - OCMEM_LP_AUDIO
+ *
+ * OCMEM gets allocated for audio usecase and the low power
+ * segments obtained from the DSP will be moved from/to main
+ * memory to OCMEM. Shrink and grow requests will be received
+ * and processed accordingly based on the current audio state.
+ */
+int audio_ocmem_enable(int cid)
+{
+ int ret;
+ int i, j;
+ struct ocmem_buf *buf = NULL;
+ struct avcs_cmd_rsp_get_low_power_segments_info_t *lp_segptr;
+
+ pr_debug("%s\n", __func__);
+ /* Non-blocking ocmem allocate (asynchronous) */
+ buf = ocmem_allocate_nb(cid, AUDIO_OCMEM_BUF_SIZE);
+ if (IS_ERR_OR_NULL(buf)) {
+ pr_err("%s: failed: %d\n", __func__, cid);
+ return -ENOMEM;
+ }
+ atomic_set(&audio_ocmem_lcl.audio_state, OCMEM_STATE_ALLOC);
+
+ audio_ocmem_lcl.buf = buf;
+ atomic_set(&audio_ocmem_lcl.audio_exit, 0);
+ if (!buf->len) {
+ wait_event_interruptible(audio_ocmem_lcl.audio_wait,
+ (atomic_read(&audio_ocmem_lcl.audio_cond) == 0) ||
+ (atomic_read(&audio_ocmem_lcl.audio_exit) == 1));
+
+ if (atomic_read(&audio_ocmem_lcl.audio_exit)) {
+ pr_err("%s: audio playback ended while waiting for ocmem\n",
+ __func__);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ }
+ if (audio_ocmem_lcl.lp_memseg_ptr == NULL) {
+ /* Retrieve low power segments */
+ ret = core_get_low_power_segments(
+ &audio_ocmem_lcl.lp_memseg_ptr);
+ if (ret != 0) {
+ pr_err("%s: get low power segments from DSP failed, rc=%d\n",
+ __func__, ret);
+ goto fail_cmd;
+ }
+ }
+ lp_segptr = audio_ocmem_lcl.lp_memseg_ptr;
+ audio_ocmem_lcl.mlist.num_chunks = lp_segptr->num_segments;
+ for (i = 0, j = 0; j < audio_ocmem_lcl.mlist.num_chunks; j++, i++) {
+ audio_ocmem_lcl.mlist.chunks[j].ro =
+ (lp_segptr->mem_segment[i].type == READ_ONLY_SEGMENT);
+ audio_ocmem_lcl.mlist.chunks[j].ddr_paddr =
+ lp_segptr->mem_segment[i].start_address_lsw;
+ audio_ocmem_lcl.mlist.chunks[j].size =
+ lp_segptr->mem_segment[i].size;
+ pr_debug("%s: ro:%d, ddr_paddr[%x], size[%x]\n", __func__,
+ audio_ocmem_lcl.mlist.chunks[j].ro,
+ (uint32_t)audio_ocmem_lcl.mlist.chunks[j].ddr_paddr,
+ (uint32_t)audio_ocmem_lcl.mlist.chunks[j].size);
+ }
+
+ /* vote for ocmem bus bandwidth */
+ ret = msm_bus_scale_client_update_request(
+ audio_ocmem_lcl.audio_ocmem_bus_client,
+ 0);
+ if (ret)
+ pr_err("%s: failed to vote for bus bandwidth\n", __func__);
+
+ atomic_set(&audio_ocmem_lcl.audio_state, OCMEM_STATE_MAP_TRANSITION);
+
+ ret = ocmem_map(cid, audio_ocmem_lcl.buf, &audio_ocmem_lcl.mlist);
+ if (ret) {
+ pr_err("%s: ocmem_map failed\n", __func__);
+ goto fail_cmd;
+ }
+
+
+ while ((atomic_read(&audio_ocmem_lcl.audio_state) !=
+ OCMEM_STATE_EXIT)) {
+
+ wait_event_interruptible(audio_ocmem_lcl.audio_wait,
+ atomic_read(&audio_ocmem_lcl.audio_cond) == 0);
+
+ switch (atomic_read(&audio_ocmem_lcl.audio_state)) {
+ case OCMEM_STATE_MAP_COMPL:
+ pr_debug("%s: audio_cond[0x%x], audio_state[0x%x]\n",
+ __func__, atomic_read(&audio_ocmem_lcl.audio_cond),
+ atomic_read(&audio_ocmem_lcl.audio_state));
+ atomic_set(&audio_ocmem_lcl.audio_state,
+ OCMEM_STATE_MAP_COMPL);
+ atomic_set(&audio_ocmem_lcl.audio_cond, 1);
+ break;
+ case OCMEM_STATE_SHRINK:
+ atomic_set(&audio_ocmem_lcl.audio_cond, 1);
+ ret = ocmem_unmap(cid, audio_ocmem_lcl.buf,
+ &audio_ocmem_lcl.mlist);
+ if (ret) {
+ pr_err("%s: ocmem_unmap failed, state[%d]\n",
+ __func__,
+ atomic_read(&audio_ocmem_lcl.audio_state));
+ goto fail_cmd;
+ }
+
+ atomic_set(&audio_ocmem_lcl.audio_state,
+ OCMEM_STATE_UNMAP_TRANSITION);
+ wait_event_interruptible(audio_ocmem_lcl.audio_wait,
+ atomic_read(&audio_ocmem_lcl.audio_cond) == 0);
+ atomic_set(&audio_ocmem_lcl.audio_state,
+ OCMEM_STATE_UNMAP_COMPL);
+ ret = ocmem_shrink(cid, audio_ocmem_lcl.buf, 0);
+ if (ret) {
+ pr_err("%s: ocmem_shrink failed, state[%d]\n",
+ __func__,
+ atomic_read(&audio_ocmem_lcl.audio_state));
+ goto fail_cmd;
+ }
+
+ break;
+ case OCMEM_STATE_GROW:
+ atomic_set(&audio_ocmem_lcl.audio_cond, 1);
+ ret = ocmem_map(cid, audio_ocmem_lcl.buf,
+ &audio_ocmem_lcl.mlist);
+ if (ret) {
+ pr_err("%s: ocmem_map failed, state[%d]\n",
+ __func__,
+ atomic_read(&audio_ocmem_lcl.audio_state));
+ goto fail_cmd;
+ }
+ atomic_set(&audio_ocmem_lcl.audio_state,
+ OCMEM_STATE_MAP_TRANSITION);
+ wait_event_interruptible(audio_ocmem_lcl.audio_wait,
+ atomic_read(&audio_ocmem_lcl.audio_cond) == 0);
+ atomic_set(&audio_ocmem_lcl.audio_state,
+ OCMEM_STATE_MAP_COMPL);
+ break;
+ }
+ }
+fail_cmd:
+ pr_debug("%s: exit\n", __func__);
+ return ret;
+}
+
+/**
+ * audio_ocmem_disable() - Disable OCMEM for audio
+ * @cid: client id - OCMEM_LP_AUDIO
+ *
+ * OCMEM gets deallocated for audio usecase. Depending on
+ * current audio state, OCMEM will be freed from using audio
+ * segments.
+ */
+int audio_ocmem_disable(int cid)
+{
+ int ret;
+
+ if (atomic_read(&audio_ocmem_lcl.audio_cond))
+ atomic_set(&audio_ocmem_lcl.audio_cond, 0);
+ pr_debug("%s: audio_cond[0x%x], audio_state[0x%x]\n", __func__,
+ atomic_read(&audio_ocmem_lcl.audio_cond),
+ atomic_read(&audio_ocmem_lcl.audio_state));
+ switch (atomic_read(&audio_ocmem_lcl.audio_state)) {
+ case OCMEM_STATE_MAP_COMPL:
+ atomic_set(&audio_ocmem_lcl.audio_cond, 1);
+ ret = ocmem_unmap(cid, audio_ocmem_lcl.buf,
+ &audio_ocmem_lcl.mlist);
+ if (ret) {
+ pr_err("%s: ocmem_unmap failed, state[%d]\n",
+ __func__,
+ atomic_read(&audio_ocmem_lcl.audio_state));
+ goto fail_cmd;
+ }
+
+ atomic_set(&audio_ocmem_lcl.audio_state, OCMEM_STATE_EXIT);
+
+ wait_event_interruptible(audio_ocmem_lcl.audio_wait,
+ atomic_read(&audio_ocmem_lcl.audio_cond) == 0);
+ case OCMEM_STATE_UNMAP_COMPL:
+ ret = ocmem_free(OCMEM_LP_AUDIO, audio_ocmem_lcl.buf);
+ if (ret) {
+ pr_err("%s: ocmem_free failed, state[%d]\n",
+ __func__,
+ atomic_read(&audio_ocmem_lcl.audio_state));
+ goto fail_cmd;
+ }
+ pr_debug("%s: ocmem_free success\n", __func__);
+ default:
+ pr_debug("%s: state=%d", __func__,
+ atomic_read(&audio_ocmem_lcl.audio_state));
+ break;
+
+ }
+ return 0;
+fail_cmd:
+ return ret;
+}
+
+static void voice_ocmem_process_workdata(struct work_struct *work)
+{
+ int cid;
+ bool en;
+ int rc = 0;
+
+ struct voice_ocmem_workdata *voice_ocm_work =
+ container_of(work, struct voice_ocmem_workdata, work);
+
+ en = voice_ocm_work->en;
+ switch (voice_ocm_work->id) {
+ case VOICE:
+ cid = OCMEM_VOICE;
+ if (en)
+ disable_ocmem_for_voice(cid);
+ else
+ enable_ocmem_after_voice(cid);
+ break;
+ default:
+ pr_err("%s: Invalid client id[%d]\n", __func__,
+ voice_ocm_work->id);
+ rc = -EINVAL;
+ }
+
+}
+/**
+ * voice_ocmem_process_req() - disable/enable OCMEM during voice call
+ * @cid: client id - VOICE
+ * @enable: 1 - enable
+ * 0 - disable
+ *
+ * This configures OCMEM during start of voice call. If any
+ * audio clients are already using OCMEM, they will be evicted
+ * out of OCMEM during voice call and get restored after voice
+ * call.
+ */
+int voice_ocmem_process_req(int cid, bool enable)
+{
+
+ struct voice_ocmem_workdata *workdata = NULL;
+
+ if (audio_ocmem_lcl.voice_ocmem_workqueue == NULL) {
+ pr_err("%s: voice ocmem workqueue is NULL\n", __func__);
+ return -EINVAL;
+ }
+ workdata = kzalloc(sizeof(struct voice_ocmem_workdata),
+ GFP_ATOMIC);
+ if (workdata == NULL) {
+ pr_err("%s: mem failure\n", __func__);
+ return -ENOMEM;
+ }
+ workdata->id = cid;
+ workdata->en = enable;
+
+ INIT_WORK(&workdata->work, voice_ocmem_process_workdata);
+ queue_work(audio_ocmem_lcl.voice_ocmem_workqueue, &workdata->work);
+
+ return 0;
+}
+
+/**
+ * disable_ocmem_for_voice() - disable OCMEM during voice call
+ * @cid: client id - OCMEM_VOICE
+ *
+ * This configures OCMEM during start of voice call. If any
+ * audio clients are already using OCMEM, they will be evicted
+ */
+int disable_ocmem_for_voice(int cid)
+{
+ int ret;
+
+ ret = ocmem_evict(cid);
+ if (ret)
+ pr_err("%s: ocmem_evict is not successful\n", __func__);
+ return ret;
+}
+
+/**
+ * enable_ocmem_for_voice() - To enable OCMEM after voice call
+ * @cid: client id - OCMEM_VOICE
+ *
+ * OCMEM gets re-enabled after OCMEM voice call. If other client
+ * is evicted out of OCMEM, that gets restored and remapped in
+ * OCMEM after the voice call.
+ */
+int enable_ocmem_after_voice(int cid)
+{
+ int ret;
+
+ ret = ocmem_restore(cid);
+ if (ret)
+ pr_err("%s: ocmem_restore is not successful\n", __func__);
+ return ret;
+}
+
+
+static void audio_ocmem_process_workdata(struct work_struct *work)
+{
+ int cid;
+ bool en;
+ int rc = 0;
+
+ struct audio_ocmem_workdata *audio_ocm_work =
+ container_of(work, struct audio_ocmem_workdata, work);
+
+ en = audio_ocm_work->en;
+ switch (audio_ocm_work->id) {
+ case AUDIO:
+ cid = OCMEM_LP_AUDIO;
+ if (en)
+ audio_ocmem_enable(cid);
+ else
+ audio_ocmem_disable(cid);
+ break;
+ default:
+ pr_err("%s: Invalid client id[%d]\n", __func__,
+ audio_ocm_work->id);
+ rc = -EINVAL;
+ }
+
+}
+
+/**
+ * audio_ocmem_process_req() - process audio request to use OCMEM
+ * @id: client id - OCMEM_LP_AUDIO
+ * @enable: enable or disable OCMEM
+ *
+ * A workqueue gets created and initialized to use OCMEM for
+ * audio clients.
+ */
+int audio_ocmem_process_req(int id, bool enable)
+{
+ struct audio_ocmem_workdata *workdata = NULL;
+
+ if (audio_ocmem_lcl.audio_ocmem_workqueue == NULL) {
+ pr_err("%s: audio ocmem workqueue is NULL\n", __func__);
+ return -EINVAL;
+ }
+ workdata = kzalloc(sizeof(struct audio_ocmem_workdata),
+ GFP_ATOMIC);
+ if (workdata == NULL) {
+ pr_err("%s: mem failure\n", __func__);
+ return -ENOMEM;
+ }
+ workdata->id = id;
+ workdata->en = enable;
+
+ /* if previous work waiting for ocmem - signal it to exit */
+ atomic_set(&audio_ocmem_lcl.audio_exit, 1);
+
+ INIT_WORK(&workdata->work, audio_ocmem_process_workdata);
+ queue_work(audio_ocmem_lcl.audio_ocmem_workqueue, &workdata->work);
+
+ return 0;
+}
+
+
+static struct notifier_block audio_ocmem_client_nb = {
+ .notifier_call = audio_ocmem_client_cb,
+};
+
+static int audio_ocmem_platform_data_populate(struct platform_device *pdev)
+{
+ int ret;
+ struct msm_bus_scale_pdata *audio_ocmem_bus_scale_pdata = NULL;
+ struct msm_bus_vectors *audio_ocmem_bus_vectors = NULL;
+ struct msm_bus_paths *ocmem_audio_bus_paths = NULL;
+ u32 val;
+
+ if (!pdev->dev.of_node) {
+ pr_err("%s: device tree information missing\n", __func__);
+ return -ENODEV;
+ }
+
+ audio_ocmem_bus_vectors = kzalloc(sizeof(struct msm_bus_vectors),
+ GFP_KERNEL);
+ if (!audio_ocmem_bus_vectors) {
+ dev_err(&pdev->dev, "Failed to allocate memory for platform data\n");
+ return -ENOMEM;
+ }
+
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "qcom,msm-ocmem-audio-src-id", &val);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: qcom,msm-ocmem-audio-src-id missing in DT node\n",
+ __func__);
+ goto fail1;
+ }
+ audio_ocmem_bus_vectors->src = val;
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "qcom,msm-ocmem-audio-dst-id", &val);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: qcom,msm-ocmem-audio-dst-id missing in DT node\n",
+ __func__);
+ goto fail1;
+ }
+ audio_ocmem_bus_vectors->dst = val;
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "qcom,msm-ocmem-audio-ab", &val);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: qcom,msm-ocmem-audio-ab missing in DT node\n",
+ __func__);
+ goto fail1;
+ }
+ audio_ocmem_bus_vectors->ab = val;
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "qcom,msm-ocmem-audio-ib", &val);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: qcom,msm-ocmem-audio-ib missing in DT node\n",
+ __func__);
+ goto fail1;
+ }
+ audio_ocmem_bus_vectors->ib = val;
+
+ ocmem_audio_bus_paths = kzalloc(sizeof(struct msm_bus_paths),
+ GFP_KERNEL);
+ if (!ocmem_audio_bus_paths) {
+ dev_err(&pdev->dev, "Failed to allocate memory for platform data\n");
+ goto fail1;
+ }
+ ocmem_audio_bus_paths->num_paths = 1;
+ ocmem_audio_bus_paths->vectors = audio_ocmem_bus_vectors;
+
+ audio_ocmem_bus_scale_pdata =
+ kzalloc(sizeof(struct msm_bus_scale_pdata), GFP_KERNEL);
+
+ if (!audio_ocmem_bus_scale_pdata) {
+ dev_err(&pdev->dev, "Failed to allocate memory for platform data\n");
+ goto fail2;
+ }
+
+ audio_ocmem_bus_scale_pdata->usecase = ocmem_audio_bus_paths;
+ audio_ocmem_bus_scale_pdata->num_usecases = 1;
+ audio_ocmem_bus_scale_pdata->name = "audio-ocmem";
+
+ dev_set_drvdata(&pdev->dev, audio_ocmem_bus_scale_pdata);
+ return ret;
+
+fail2:
+ kfree(ocmem_audio_bus_paths);
+fail1:
+ kfree(audio_ocmem_bus_vectors);
+ return ret;
+}
+static int ocmem_audio_client_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct msm_bus_scale_pdata *audio_ocmem_bus_scale_pdata = NULL;
+
+ pr_debug("%s\n", __func__);
+ audio_ocmem_lcl.audio_ocmem_workqueue =
+ alloc_workqueue("ocmem_audio_client_driver_audio",
+ WQ_NON_REENTRANT, 0);
+ if (!audio_ocmem_lcl.audio_ocmem_workqueue) {
+ pr_err("%s: Failed to create ocmem audio work queue\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ audio_ocmem_lcl.voice_ocmem_workqueue =
+ alloc_workqueue("ocmem_audio_client_driver_voice",
+ WQ_NON_REENTRANT, 0);
+ if (!audio_ocmem_lcl.voice_ocmem_workqueue) {
+ pr_err("%s: Failed to create ocmem voice work queue\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ init_waitqueue_head(&audio_ocmem_lcl.audio_wait);
+ atomic_set(&audio_ocmem_lcl.audio_cond, 1);
+ atomic_set(&audio_ocmem_lcl.audio_exit, 0);
+ spin_lock_init(&audio_ocmem_lcl.audio_lock);
+
+ /* populate platform data */
+ ret = audio_ocmem_platform_data_populate(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: failed to populate platform data, rc = %d\n",
+ __func__, ret);
+ return -ENODEV;
+ }
+ audio_ocmem_bus_scale_pdata = dev_get_drvdata(&pdev->dev);
+
+ audio_ocmem_lcl.audio_ocmem_bus_client =
+ msm_bus_scale_register_client(audio_ocmem_bus_scale_pdata);
+
+ if (!audio_ocmem_lcl.audio_ocmem_bus_client) {
+ pr_err("%s: msm_bus_scale_register_client() failed\n",
+ __func__);
+ return -EFAULT;
+ }
+ audio_ocmem_lcl.audio_hdl = ocmem_notifier_register(OCMEM_LP_AUDIO,
+ &audio_ocmem_client_nb);
+ if (audio_ocmem_lcl.audio_hdl == NULL) {
+ pr_err("%s: Failed to get ocmem handle %d\n", __func__,
+ OCMEM_LP_AUDIO);
+ }
+ audio_ocmem_lcl.lp_memseg_ptr = NULL;
+ return 0;
+}
+
+static int ocmem_audio_client_remove(struct platform_device *pdev)
+{
+ struct msm_bus_scale_pdata *audio_ocmem_bus_scale_pdata = NULL;
+
+ audio_ocmem_bus_scale_pdata = (struct msm_bus_scale_pdata *)
+ dev_get_drvdata(&pdev->dev);
+
+ kfree(audio_ocmem_bus_scale_pdata->usecase->vectors);
+ kfree(audio_ocmem_bus_scale_pdata->usecase);
+ kfree(audio_ocmem_bus_scale_pdata);
+ ocmem_notifier_unregister(audio_ocmem_lcl.audio_hdl,
+ &audio_ocmem_client_nb);
+ return 0;
+}
+static const struct of_device_id msm_ocmem_audio_dt_match[] = {
+ {.compatible = "qcom,msm-ocmem-audio"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, msm_ocmem_audio_dt_match);
+
+static struct platform_driver audio_ocmem_driver = {
+ .driver = {
+ .name = "audio-ocmem",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_ocmem_audio_dt_match,
+ },
+ .probe = ocmem_audio_client_probe,
+ .remove = ocmem_audio_client_remove,
+};
+
+
+static int __init ocmem_audio_client_init(void)
+{
+ int rc;
+
+ rc = platform_driver_register(&audio_ocmem_driver);
+
+ if (rc)
+ pr_err("%s: Failed to register audio ocmem driver\n", __func__);
+ return rc;
+}
+module_init(ocmem_audio_client_init);
+
+static void __exit ocmem_audio_client_exit(void)
+{
+ platform_driver_unregister(&audio_ocmem_driver);
+}
+
+module_exit(ocmem_audio_client_exit);
diff --git a/sound/soc/msm/qdsp6v2/audio_ocmem.h b/sound/soc/msm/qdsp6v2/audio_ocmem.h
new file mode 100644
index 0000000..e915516
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/audio_ocmem.h
@@ -0,0 +1,38 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _AUDIO_OCMEM_H_
+#define _AUDIO_OCMEM_H_
+
+#include <linux/module.h>
+#include <linux/notifier.h>
+
+#include <mach/ocmem.h>
+
+#define AUDIO 0
+#define VOICE 1
+
+#ifdef CONFIG_AUDIO_OCMEM
+int audio_ocmem_process_req(int id, bool enable);
+int voice_ocmem_process_req(int cid, bool enable);
+int enable_ocmem_after_voice(int cid);
+int disable_ocmem_for_voice(int cid);
+#else
+static inline int audio_ocmem_process_req(int id, bool enable)\
+ { return 0; }
+static inline int voice_ocmem_process_req(int cid, bool enable)\
+ { return 0; }
+static inline int enable_ocmem_after_voice(int cid) { return 0; }
+static inline int disable_ocmem_for_voice(int cid) { return 0; }
+#endif
+
+#endif
diff --git a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
index 783a03d..99fd1d3 100644
--- a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
@@ -325,6 +325,494 @@
.remove = msm_dai_q6_dai_auxpcm_remove,
};
+static int msm_dai_q6_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ int rc = 0;
+
+ if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+ /* PORT START should be set if prepare called in active state */
+ rc = afe_q6_interface_prepare();
+ if (IS_ERR_VALUE(rc))
+ dev_err(dai->dev, "fail to open AFE APR\n");
+ }
+ return rc;
+}
+
+static int msm_dai_q6_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ int rc = 0;
+
+ /*
+ * Start/stop port without waiting for Q6 AFE response. Need to have
+ * native q6 AFE driver propagates AFE response in order to handle
+ * port start/stop command error properly if error does arise.
+ */
+ pr_debug("%s:port:%d cmd:%d dai_data->status_mask = %ld",
+ __func__, dai->id, cmd, *dai_data->status_mask);
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+ switch (dai->id) {
+ case VOICE_PLAYBACK_TX:
+ case VOICE_RECORD_TX:
+ case VOICE_RECORD_RX:
+ afe_pseudo_port_start_nowait(dai->id);
+ break;
+ default:
+ afe_port_start_nowait(dai->id,
+ &dai_data->port_config, dai_data->rate);
+ break;
+ }
+ set_bit(STATUS_PORT_STARTED,
+ dai_data->status_mask);
+ }
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+ switch (dai->id) {
+ case VOICE_PLAYBACK_TX:
+ case VOICE_RECORD_TX:
+ case VOICE_RECORD_RX:
+ afe_pseudo_port_stop_nowait(dai->id);
+ break;
+ default:
+ afe_port_stop_nowait(dai->id);
+ break;
+ }
+ clear_bit(STATUS_PORT_STARTED,
+ dai_data->status_mask);
+ }
+ break;
+
+ default:
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static int msm_dai_q6_cdc_hw_params(struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai, int stream)
+{
+ struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+ dai_data->channels = params_channels(params);
+ switch (dai_data->channels) {
+ case 2:
+ dai_data->port_config.i2s.mono_stereo = MSM_AFE_STEREO;
+ break;
+ case 1:
+ dai_data->port_config.i2s.mono_stereo = MSM_AFE_MONO;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ dai_data->rate = params_rate(params);
+ dai_data->port_config.i2s.sample_rate = dai_data->rate;
+ dai_data->port_config.i2s.i2s_cfg_minor_version =
+ AFE_API_VERSION_I2S_CONFIG;
+ dai_data->port_config.i2s.data_format = AFE_LINEAR_PCM_DATA;
+ dev_dbg(dai->dev, " channel %d sample rate %d entered\n",
+ dai_data->channels, dai_data->rate);
+
+ /* Q6 only supports 16 as now */
+ dai_data->port_config.i2s.bit_width = 16;
+ dai_data->port_config.i2s.channel_mode = 1;
+ return 0;
+}
+
+static u8 num_of_bits_set(u8 sd_line_mask)
+{
+ u8 num_bits_set = 0;
+
+ while (sd_line_mask) {
+ num_bits_set++;
+ sd_line_mask = sd_line_mask & (sd_line_mask - 1);
+ }
+ return num_bits_set;
+}
+
+static int msm_dai_q6_i2s_hw_params(struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai, int stream)
+{
+ struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ struct msm_i2s_data *i2s_pdata =
+ (struct msm_i2s_data *) dai->dev->platform_data;
+
+ dai_data->channels = params_channels(params);
+ if (num_of_bits_set(i2s_pdata->sd_lines) == 1) {
+ switch (dai_data->channels) {
+ case 2:
+ dai_data->port_config.i2s.mono_stereo = MSM_AFE_STEREO;
+ break;
+ case 1:
+ dai_data->port_config.i2s.mono_stereo = MSM_AFE_MONO;
+ break;
+ default:
+ pr_warn("greater than stereo has not been validated");
+ break;
+ }
+ }
+ dai_data->rate = params_rate(params);
+ dai_data->port_config.i2s.sample_rate = dai_data->rate;
+ dai_data->port_config.i2s.i2s_cfg_minor_version =
+ AFE_API_VERSION_I2S_CONFIG;
+ dai_data->port_config.i2s.data_format = AFE_LINEAR_PCM_DATA;
+ /* Q6 only supports 16 as now */
+ dai_data->port_config.i2s.bit_width = 16;
+ dai_data->port_config.i2s.channel_mode = 1;
+
+ return 0;
+}
+
+static int msm_dai_q6_slim_bus_hw_params(struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai, int stream)
+{
+ struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+ dai_data->channels = params_channels(params);
+ dai_data->rate = params_rate(params);
+
+ /* Q6 only supports 16 as now */
+ dai_data->port_config.slim_sch.sb_cfg_minor_version =
+ AFE_API_VERSION_SLIMBUS_CONFIG;
+ dai_data->port_config.slim_sch.bit_width = 16;
+ dai_data->port_config.slim_sch.data_format = 0;
+ dai_data->port_config.slim_sch.num_channels = dai_data->channels;
+ dai_data->port_config.slim_sch.sample_rate = dai_data->rate;
+
+ dev_dbg(dai->dev, "%s:slimbus_dev_id[%hu] bit_wd[%hu] format[%hu]\n"
+ "num_channel %hu shared_ch_mapping[0] %hu\n"
+ "slave_port_mapping[1] %hu slave_port_mapping[2] %hu\n"
+ "sample_rate %d\n", __func__,
+ dai_data->port_config.slim_sch.slimbus_dev_id,
+ dai_data->port_config.slim_sch.bit_width,
+ dai_data->port_config.slim_sch.data_format,
+ dai_data->port_config.slim_sch.num_channels,
+ dai_data->port_config.slim_sch.shared_ch_mapping[0],
+ dai_data->port_config.slim_sch.shared_ch_mapping[1],
+ dai_data->port_config.slim_sch.shared_ch_mapping[2],
+ dai_data->rate);
+
+ return 0;
+}
+
+static int msm_dai_q6_bt_fm_hw_params(struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai, int stream)
+{
+ struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+ dai_data->channels = params_channels(params);
+ dai_data->rate = params_rate(params);
+
+ dev_dbg(dai->dev, "channels %d sample rate %d entered\n",
+ dai_data->channels, dai_data->rate);
+
+ memset(&dai_data->port_config, 0, sizeof(dai_data->port_config));
+
+ return 0;
+}
+
+static int msm_dai_q6_afe_rtproxy_hw_params(struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+ dai_data->rate = params_rate(params);
+ dai_data->port_config.rtproxy.num_channels = params_channels(params);
+ dai_data->port_config.rtproxy.sample_rate = params_rate(params);
+
+ pr_debug("channel %d entered,dai_id: %d,rate: %d\n",
+ dai_data->port_config.rtproxy.num_channels, dai->id, dai_data->rate);
+
+ dai_data->port_config.rtproxy.rt_proxy_cfg_minor_version =
+ AFE_API_VERSION_RT_PROXY_CONFIG;
+ dai_data->port_config.rtproxy.bit_width = 16; /* Q6 only supports 16 */
+ dai_data->port_config.rtproxy.interleaved = 1;
+ dai_data->port_config.rtproxy.frame_size = params_period_bytes(params);
+ dai_data->port_config.rtproxy.jitter_allowance =
+ dai_data->port_config.rtproxy.frame_size/2;
+ dai_data->port_config.rtproxy.low_water_mark = 0;
+ dai_data->port_config.rtproxy.high_water_mark = 0;
+
+ return 0;
+}
+
+/* Current implementation assumes hw_param is called once
+ * This may not be the case but what to do when ADM and AFE
+ * port are already opened and parameter changes
+ */
+static int msm_dai_q6_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ int rc = 0;
+
+ switch (dai->id) {
+ case PRIMARY_I2S_TX:
+ case PRIMARY_I2S_RX:
+ case SECONDARY_I2S_RX:
+ rc = msm_dai_q6_cdc_hw_params(params, dai, substream->stream);
+ break;
+ case MI2S_RX:
+ rc = msm_dai_q6_i2s_hw_params(params, dai, substream->stream);
+ break;
+ case SLIMBUS_0_RX:
+ case SLIMBUS_1_RX:
+ case SLIMBUS_0_TX:
+ case SLIMBUS_1_TX:
+ rc = msm_dai_q6_slim_bus_hw_params(params, dai,
+ substream->stream);
+ break;
+ case INT_BT_SCO_RX:
+ case INT_BT_SCO_TX:
+ case INT_FM_RX:
+ case INT_FM_TX:
+ rc = msm_dai_q6_bt_fm_hw_params(params, dai, substream->stream);
+ break;
+ case RT_PROXY_DAI_001_TX:
+ case RT_PROXY_DAI_001_RX:
+ case RT_PROXY_DAI_002_TX:
+ case RT_PROXY_DAI_002_RX:
+ rc = msm_dai_q6_afe_rtproxy_hw_params(params, dai);
+ break;
+ case VOICE_PLAYBACK_TX:
+ case VOICE_RECORD_RX:
+ case VOICE_RECORD_TX:
+ rc = 0;
+ break;
+ default:
+ dev_err(dai->dev, "invalid AFE port ID\n");
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+static void msm_dai_q6_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ int rc = 0;
+
+ if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+ switch (dai->id) {
+ case VOICE_PLAYBACK_TX:
+ case VOICE_RECORD_TX:
+ case VOICE_RECORD_RX:
+ pr_debug("%s, stop pseudo port:%d\n",
+ __func__, dai->id);
+ rc = afe_stop_pseudo_port(dai->id);
+ break;
+ default:
+ rc = afe_close(dai->id); /* can block */
+ break;
+ }
+ if (IS_ERR_VALUE(rc))
+ dev_err(dai->dev, "fail to close AFE port\n");
+ pr_debug("%s: dai_data->status_mask = %ld\n", __func__,
+ *dai_data->status_mask);
+ clear_bit(STATUS_PORT_STARTED, dai_data->status_mask);
+ }
+}
+
+static int msm_dai_q6_cdc_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ dai_data->port_config.i2s.ws_src = 1; /* CPU is master */
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ dai_data->port_config.i2s.ws_src = 0; /* CPU is slave */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int msm_dai_q6_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ int rc = 0;
+
+ dev_dbg(dai->dev, "enter %s, id = %d fmt[%d]\n", __func__,
+ dai->id, fmt);
+ switch (dai->id) {
+ case PRIMARY_I2S_TX:
+ case PRIMARY_I2S_RX:
+ case MI2S_RX:
+ case SECONDARY_I2S_RX:
+ rc = msm_dai_q6_cdc_set_fmt(dai, fmt);
+ break;
+ default:
+ dev_err(dai->dev, "invalid cpu_dai set_fmt\n");
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+static int msm_dai_q6_set_channel_map(struct snd_soc_dai *dai,
+ unsigned int tx_num, unsigned int *tx_slot,
+ unsigned int rx_num, unsigned int *rx_slot)
+
+{
+ int rc = 0;
+ struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ unsigned int i = 0;
+
+ dev_dbg(dai->dev, "enter %s, id = %d\n", __func__, dai->id);
+ switch (dai->id) {
+ case SLIMBUS_0_RX:
+ case SLIMBUS_1_RX:
+ /*
+ * channel number to be between 128 and 255.
+ * For RX port use channel numbers
+ * from 138 to 144 for pre-Taiko
+ * from 144 to 159 for Taiko
+ */
+ if (!rx_slot)
+ return -EINVAL;
+ for (i = 0; i < rx_num; i++) {
+ dai_data->port_config.slim_sch.shared_ch_mapping[i] =
+ rx_slot[i];
+ pr_err("%s: find number of channels[%d] ch[%d]\n",
+ __func__, i, rx_slot[i]);
+ }
+ dai_data->port_config.slim_sch.num_channels = rx_num;
+ pr_debug("%s:SLIMBUS_0_RX cnt[%d] ch[%d %d]\n", __func__,
+ rx_num, dai_data->port_config.slim_sch.shared_ch_mapping[0],
+ dai_data->port_config.slim_sch.shared_ch_mapping[1]);
+
+ break;
+ case SLIMBUS_0_TX:
+ case SLIMBUS_1_TX:
+ /*
+ * channel number to be between 128 and 255.
+ * For TX port use channel numbers
+ * from 128 to 137 for pre-Taiko
+ * from 128 to 143 for Taiko
+ */
+ if (!tx_slot)
+ return -EINVAL;
+ for (i = 0; i < tx_num; i++) {
+ dai_data->port_config.slim_sch.shared_ch_mapping[i] =
+ tx_slot[i];
+ pr_debug("%s: find number of channels[%d] ch[%d]\n",
+ __func__, i, tx_slot[i]);
+ }
+ dai_data->port_config.slim_sch.num_channels = tx_num;
+ pr_debug("%s:SLIMBUS_0_TX cnt[%d] ch[%d %d]\n", __func__,
+ tx_num,
+ dai_data->port_config.slim_sch.shared_ch_mapping[0],
+ dai_data->port_config.slim_sch.shared_ch_mapping[1]);
+ break;
+ default:
+ dev_err(dai->dev, "invalid cpu_dai id %d\n", dai->id);
+ rc = -EINVAL;
+ break;
+ }
+ return rc;
+}
+
+static struct snd_soc_dai_ops msm_dai_q6_ops = {
+ .prepare = msm_dai_q6_prepare,
+ .trigger = msm_dai_q6_trigger,
+ .hw_params = msm_dai_q6_hw_params,
+ .shutdown = msm_dai_q6_shutdown,
+ .set_fmt = msm_dai_q6_set_fmt,
+ .set_channel_map = msm_dai_q6_set_channel_map,
+};
+
+static int msm_dai_q6_dai_probe(struct snd_soc_dai *dai)
+{
+ struct msm_dai_q6_dai_data *dai_data;
+ int rc = 0;
+
+ dai_data = kzalloc(sizeof(struct msm_dai_q6_dai_data), GFP_KERNEL);
+
+ if (!dai_data) {
+ dev_err(dai->dev, "DAI-%d: fail to allocate dai data\n",
+ dai->id);
+ rc = -ENOMEM;
+ } else
+ dev_set_drvdata(dai->dev, dai_data);
+
+ return rc;
+}
+
+static int msm_dai_q6_dai_remove(struct snd_soc_dai *dai)
+{
+ struct msm_dai_q6_dai_data *dai_data;
+ int rc;
+
+ dai_data = dev_get_drvdata(dai->dev);
+
+ /* If AFE port is still up, close it */
+ if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+ switch (dai->id) {
+ case VOICE_PLAYBACK_TX:
+ case VOICE_RECORD_TX:
+ case VOICE_RECORD_RX:
+ pr_debug("%s, stop pseudo port:%d\n",
+ __func__, dai->id);
+ rc = afe_stop_pseudo_port(dai->id);
+ break;
+ default:
+ rc = afe_close(dai->id); /* can block */
+ }
+ if (IS_ERR_VALUE(rc))
+ dev_err(dai->dev, "fail to close AFE port\n");
+ clear_bit(STATUS_PORT_STARTED, dai_data->status_mask);
+ }
+ kfree(dai_data);
+ snd_soc_unregister_dai(dai->dev);
+
+ return 0;
+}
+
+static struct snd_soc_dai_driver msm_dai_q6_slimbus_1_rx_dai = {
+ .playback = {
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 1,
+ .channels_max = 1,
+ .rate_min = 8000,
+ .rate_max = 16000,
+ },
+ .ops = &msm_dai_q6_ops,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_slimbus_1_tx_dai = {
+ .capture = {
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 1,
+ .channels_max = 1,
+ .rate_min = 8000,
+ .rate_max = 16000,
+ },
+ .ops = &msm_dai_q6_ops,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+};
+
static int msm_auxpcm_dev_probe(struct platform_device *pdev)
{
int id;
@@ -512,6 +1000,135 @@
},
};
+static struct snd_soc_dai_driver msm_dai_q6_slimbus_rx_dai = {
+ .playback = {
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 1,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .ops = &msm_dai_q6_ops,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_slimbus_tx_dai = {
+ .capture = {
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 1,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .ops = &msm_dai_q6_ops,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+};
+
+static int msm_dai_q6_dev_probe(struct platform_device *pdev)
+{
+ int rc, id;
+ const char *q6_dev_id = "qcom,msm-dai-q6-dev-id";
+
+ rc = of_property_read_u32(pdev->dev.of_node, q6_dev_id, &id);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "%s: missing %s in dt node\n", __func__, q6_dev_id);
+ return rc;
+ }
+
+ pdev->id = id;
+ dev_set_name(&pdev->dev, "%s.%d", "msm-dai-q6-dev", id);
+
+ pr_debug("%s: dev name %s, id:%d\n", __func__,
+ dev_name(&pdev->dev), pdev->id);
+
+ switch (id) {
+ case SLIMBUS_0_RX:
+ rc = snd_soc_register_dai(&pdev->dev,
+ &msm_dai_q6_slimbus_rx_dai);
+ break;
+ case SLIMBUS_0_TX:
+ rc = snd_soc_register_dai(&pdev->dev,
+ &msm_dai_q6_slimbus_tx_dai);
+ break;
+ case SLIMBUS_1_RX:
+ rc = snd_soc_register_dai(&pdev->dev,
+ &msm_dai_q6_slimbus_1_rx_dai);
+ break;
+ case SLIMBUS_1_TX:
+ rc = snd_soc_register_dai(&pdev->dev,
+ &msm_dai_q6_slimbus_1_tx_dai);
+ break;
+ default:
+ rc = -ENODEV;
+ break;
+ }
+
+ return rc;
+}
+
+static int msm_dai_q6_dev_remove(struct platform_device *pdev)
+{
+ snd_soc_unregister_dai(&pdev->dev);
+ return 0;
+}
+
+static const struct of_device_id msm_dai_q6_dev_dt_match[] = {
+ { .compatible = "qcom,msm-dai-q6-dev", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, msm_dai_q6_dev_dt_match);
+
+static struct platform_driver msm_dai_q6_dev = {
+ .probe = msm_dai_q6_dev_probe,
+ .remove = msm_dai_q6_dev_remove,
+ .driver = {
+ .name = "msm-dai-q6-dev",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_dai_q6_dev_dt_match,
+ },
+};
+
+static int msm_dai_q6_probe(struct platform_device *pdev)
+{
+ int rc;
+ pr_debug("%s: dev name %s, id:%d\n", __func__,
+ dev_name(&pdev->dev), pdev->id);
+ rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: failed to add child nodes, rc=%d\n",
+ __func__, rc);
+ } else
+ dev_dbg(&pdev->dev, "%s: added child node\n", __func__);
+
+ return rc;
+}
+
+static int msm_dai_q6_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id msm_dai_q6_dt_match[] = {
+ { .compatible = "qcom,msm-dai-q6", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, msm_dai_q6_dt_match);
+static struct platform_driver msm_dai_q6 = {
+ .probe = msm_dai_q6_probe,
+ .remove = msm_dai_q6_remove,
+ .driver = {
+ .name = "msm-dai-q6",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_dai_q6_dt_match,
+ },
+};
static int __init msm_dai_q6_init(void)
{
@@ -522,10 +1139,27 @@
goto fail;
rc = platform_driver_register(&msm_auxpcm_resource);
-
if (rc) {
pr_err("%s: fail to register cpu dai driver\n", __func__);
platform_driver_unregister(&msm_auxpcm_dev);
+ goto fail;
+ }
+
+ rc = platform_driver_register(&msm_dai_q6);
+ if (rc) {
+ pr_err("%s: fail to register dai q6 driver", __func__);
+ platform_driver_unregister(&msm_auxpcm_dev);
+ platform_driver_unregister(&msm_auxpcm_resource);
+ goto fail;
+ }
+
+ rc = platform_driver_register(&msm_dai_q6_dev);
+ if (rc) {
+ pr_err("%s: fail to register dai q6 dev driver", __func__);
+ platform_driver_unregister(&msm_dai_q6);
+ platform_driver_unregister(&msm_auxpcm_dev);
+ platform_driver_unregister(&msm_auxpcm_resource);
+ goto fail;
}
fail:
return rc;
@@ -534,6 +1168,8 @@
static void __exit msm_dai_q6_exit(void)
{
+ platform_driver_unregister(&msm_dai_q6_dev);
+ platform_driver_unregister(&msm_dai_q6);
platform_driver_unregister(&msm_auxpcm_dev);
platform_driver_unregister(&msm_auxpcm_resource);
}
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c
index 1ac872d..047e0f0 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c
@@ -35,6 +35,7 @@
#include "msm-pcm-q6-v2.h"
#include "msm-pcm-routing-v2.h"
+#include "audio_ocmem.h"
static struct audio_locks the_locks;
@@ -223,6 +224,7 @@
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
prtd->pcm_irq_pos = 0;
+ audio_ocmem_process_req(AUDIO, true);
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
pr_debug("SNDRV_PCM_TRIGGER_START\n");
@@ -231,6 +233,7 @@
break;
case SNDRV_PCM_TRIGGER_STOP:
pr_debug("SNDRV_PCM_TRIGGER_STOP\n");
+ audio_ocmem_process_req(AUDIO, false);
atomic_set(&prtd->start, 0);
if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
break;
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index 67ee8e4..fbbb3a5 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -217,11 +217,12 @@
fe_dai_map[fedai_id][session_type] = dspst_id;
for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
if (!is_be_dai_extproc(i) &&
- (afe_get_port_type(msm_bedais[i].port_id) == port_type) &&
- (msm_bedais[i].active) &&
- (test_bit(fedai_id, &msm_bedais[i].fe_sessions))) {
+ (afe_get_port_type(msm_bedais[i].port_id) == port_type) &&
+ (msm_bedais[i].active) &&
+ (test_bit(fedai_id, &msm_bedais[i].fe_sessions))) {
mode = afe_get_port_type(msm_bedais[i].port_id);
- /*adm_connect_afe_port needs to be called*/
+ adm_connect_afe_port(mode, dspst_id,
+ msm_bedais[i].port_id);
break;
}
}
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
index 630405a..492569b 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
@@ -30,6 +30,7 @@
#include "msm-pcm-q6-v2.h"
#include "msm-pcm-routing-v2.h"
#include "q6voice.h"
+#include "audio_ocmem.h"
#define VOIP_MAX_Q_LEN 10
#define VOIP_MAX_VOC_PKT_SIZE 640
@@ -452,6 +453,8 @@
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
pr_debug("%s: Trigger start\n", __func__);
+ if ((!prtd->capture_start) && (!prtd->playback_start))
+ voice_ocmem_process_req(VOICE, true);
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
prtd->capture_start = 1;
else
@@ -459,6 +462,8 @@
break;
case SNDRV_PCM_TRIGGER_STOP:
pr_debug("SNDRV_PCM_TRIGGER_STOP\n");
+ if (prtd->capture_start && prtd->playback_start)
+ voice_ocmem_process_req(VOICE, false);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
prtd->playback_start = 0;
else
diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c
index aed6273..e5837b2 100644
--- a/sound/soc/msm/qdsp6v2/q6adm.c
+++ b/sound/soc/msm/qdsp6v2/q6adm.c
@@ -16,7 +16,7 @@
#include <linux/jiffies.h>
#include <linux/uaccess.h>
#include <linux/atomic.h>
-
+#include <linux/wait.h>
#include <mach/qdsp6v2/audio_acdb.h>
#include <mach/qdsp6v2/rtac.h>
@@ -25,7 +25,7 @@
#include <mach/qdsp6v2/apr.h>
#include <sound/q6adm-v2.h>
#include <sound/q6audio-v2.h>
-
+#include <sound/q6afe-v2.h>
#define TIMEOUT_MS 1000
@@ -78,7 +78,7 @@
return 0;
}
if (data->opcode == APR_BASIC_RSP_RESULT) {
- pr_debug("APR_BASIC_RSP_RESULT\n");
+ pr_debug("APR_BASIC_RSP_RESULT id %x\n", payload[0]);
switch (payload[0]) {
case ADM_CMD_SET_PP_PARAMS_V5:
if (rtac_make_adm_callback(
@@ -142,6 +142,76 @@
pr_debug("%s\n", __func__);
}
+int adm_connect_afe_port(int mode, int session_id, int port_id)
+{
+ struct adm_cmd_connect_afe_port_v5 cmd;
+ int ret = 0;
+ int index;
+
+ pr_debug("%s: port %d session id:%d mode:%d\n", __func__,
+ port_id, session_id, mode);
+
+ port_id = afe_convert_virtual_to_portid(port_id);
+
+ if (afe_validate_port(port_id) < 0) {
+ pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
+ return -ENODEV;
+ }
+ if (this_adm.apr == NULL) {
+ this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
+ 0xFFFFFFFF, &this_adm);
+ if (this_adm.apr == NULL) {
+ pr_err("%s: Unable to register ADM\n", __func__);
+ ret = -ENODEV;
+ return ret;
+ }
+ rtac_set_adm_handle(this_adm.apr);
+ }
+ index = afe_get_port_index(port_id);
+ pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index);
+
+ cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ cmd.hdr.pkt_size = sizeof(cmd);
+ cmd.hdr.src_svc = APR_SVC_ADM;
+ cmd.hdr.src_domain = APR_DOMAIN_APPS;
+ cmd.hdr.src_port = port_id;
+ cmd.hdr.dest_svc = APR_SVC_ADM;
+ cmd.hdr.dest_domain = APR_DOMAIN_ADSP;
+ cmd.hdr.dest_port = port_id;
+ cmd.hdr.token = port_id;
+ cmd.hdr.opcode = ADM_CMD_CONNECT_AFE_PORT_V5;
+
+ cmd.mode = mode;
+ cmd.session_id = session_id;
+ cmd.afe_port_id = port_id;
+
+ atomic_set(&this_adm.copp_stat[index], 0);
+ ret = apr_send_pkt(this_adm.apr, (uint32_t *)&cmd);
+ if (ret < 0) {
+ pr_err("%s:ADM enable for port %d failed\n",
+ __func__, port_id);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ /* Wait for the callback with copp id */
+ ret = wait_event_timeout(this_adm.wait[index],
+ atomic_read(&this_adm.copp_stat[index]),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s ADM connect AFE failed for port %d\n", __func__,
+ port_id);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ atomic_inc(&this_adm.copp_cnt[index]);
+ return 0;
+
+fail_cmd:
+
+ return ret;
+}
+
int adm_open(int port_id, int path, int rate, int channel_mode, int topology)
{
struct adm_cmd_device_open_v5 open;
diff --git a/sound/soc/msm/qdsp6v2/q6afe.c b/sound/soc/msm/qdsp6v2/q6afe.c
index 4875a69..756cb18 100644
--- a/sound/soc/msm/qdsp6v2/q6afe.c
+++ b/sound/soc/msm/qdsp6v2/q6afe.c
@@ -144,6 +144,9 @@
case HDMI_RX:
case SLIMBUS_0_RX:
case SLIMBUS_1_RX:
+ case SLIMBUS_2_RX:
+ case SLIMBUS_3_RX:
+ case SLIMBUS_4_RX:
case INT_BT_SCO_RX:
case INT_BT_A2DP_RX:
case INT_FM_RX:
@@ -160,6 +163,9 @@
case VOICE_RECORD_TX:
case SLIMBUS_0_TX:
case SLIMBUS_1_TX:
+ case SLIMBUS_2_TX:
+ case SLIMBUS_3_TX:
+ case SLIMBUS_4_TX:
case INT_FM_TX:
case VOICE_RECORD_RX:
case INT_BT_SCO_TX:
@@ -168,6 +174,7 @@
break;
default:
+ WARN_ON(1);
pr_err("%s: invalid port id %d\n", __func__, port_id);
ret = -EINVAL;
}
@@ -255,7 +262,6 @@
ret = -EINVAL;
return ret;
}
- pr_err("%s: %d %d\n", __func__, port_id, rate);
index = q6audio_get_port_index(port_id);
if (q6audio_validate_port(port_id) < 0)
return -EINVAL;
@@ -279,11 +285,11 @@
config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- config.hdr.pkt_size = afe_sizeof_cfg_cmd(port_id);
+ config.hdr.pkt_size = sizeof(config);
config.hdr.src_port = 0;
config.hdr.dest_port = 0;
-
config.hdr.token = index;
+
switch (port_id) {
case PRIMARY_I2S_RX:
case PRIMARY_I2S_TX:
@@ -320,15 +326,15 @@
goto fail_cmd;
}
config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
- config.param.port_id = port_id;
- config.param.payload_size = (afe_sizeof_cfg_cmd(port_id) +
- sizeof(struct afe_port_param_data_v2));
+ config.param.port_id = q6audio_get_port_id(port_id);
+ config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) -
+ sizeof(config.param);
config.param.payload_address_lsw = 0x00;
config.param.payload_address_msw = 0x00;
config.param.mem_map_handle = 0x00;
config.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
config.pdata.param_id = cfg_type;
- config.pdata.param_size = afe_sizeof_cfg_cmd(port_id);
+ config.pdata.param_size = sizeof(config.port);
config.port = *afe_config;
@@ -348,9 +354,11 @@
start.hdr.pkt_size = sizeof(start);
start.hdr.src_port = 0;
start.hdr.dest_port = 0;
- start.hdr.token = 0;
+ start.hdr.token = index;
start.hdr.opcode = AFE_PORT_CMD_DEVICE_START;
- start.port_id = port_id;
+ start.port_id = q6audio_get_port_id(port_id);
+ pr_debug("%s: cmd device start opcode[0x%x] port id[0x%x]\n",
+ __func__, start.hdr.opcode, start.port_id);
ret = apr_send_pkt(this_afe.apr, (uint32_t *) &start);
@@ -367,6 +375,45 @@
return ret;
}
+int afe_get_port_index(u16 port_id)
+{
+ switch (port_id) {
+ case PRIMARY_I2S_RX: return IDX_PRIMARY_I2S_RX;
+ case PRIMARY_I2S_TX: return IDX_PRIMARY_I2S_TX;
+ case PCM_RX: return IDX_PCM_RX;
+ case PCM_TX: return IDX_PCM_TX;
+ case SECONDARY_I2S_RX: return IDX_SECONDARY_I2S_RX;
+ case SECONDARY_I2S_TX: return IDX_SECONDARY_I2S_TX;
+ case MI2S_RX: return IDX_MI2S_RX;
+ case MI2S_TX: return IDX_MI2S_TX;
+ case HDMI_RX: return IDX_HDMI_RX;
+ case RSVD_2: return IDX_RSVD_2;
+ case RSVD_3: return IDX_RSVD_3;
+ case DIGI_MIC_TX: return IDX_DIGI_MIC_TX;
+ case VOICE_RECORD_RX: return IDX_VOICE_RECORD_RX;
+ case VOICE_RECORD_TX: return IDX_VOICE_RECORD_TX;
+ case VOICE_PLAYBACK_TX: return IDX_VOICE_PLAYBACK_TX;
+ case SLIMBUS_0_RX: return IDX_SLIMBUS_0_RX;
+ case SLIMBUS_0_TX: return IDX_SLIMBUS_0_TX;
+ case SLIMBUS_1_RX: return IDX_SLIMBUS_1_RX;
+ case SLIMBUS_1_TX: return IDX_SLIMBUS_1_TX;
+ case SLIMBUS_2_RX: return IDX_SLIMBUS_2_RX;
+ case SLIMBUS_2_TX: return IDX_SLIMBUS_2_TX;
+ case SLIMBUS_3_RX: return IDX_SLIMBUS_3_RX;
+ case INT_BT_SCO_RX: return IDX_INT_BT_SCO_RX;
+ case INT_BT_SCO_TX: return IDX_INT_BT_SCO_TX;
+ case INT_BT_A2DP_RX: return IDX_INT_BT_A2DP_RX;
+ case INT_FM_RX: return IDX_INT_FM_RX;
+ case INT_FM_TX: return IDX_INT_FM_TX;
+ case RT_PROXY_PORT_001_RX: return IDX_RT_PROXY_PORT_001_RX;
+ case RT_PROXY_PORT_001_TX: return IDX_RT_PROXY_PORT_001_TX;
+ case SLIMBUS_4_RX: return IDX_SLIMBUS_4_RX;
+ case SLIMBUS_4_TX: return IDX_SLIMBUS_4_TX;
+
+ default: return -EINVAL;
+ }
+}
+
int afe_open(u16 port_id,
union afe_port_config *afe_config, int rate)
{
@@ -1469,6 +1516,75 @@
return ret;
}
+int afe_validate_port(u16 port_id)
+{
+ int ret;
+
+ switch (port_id) {
+ case PRIMARY_I2S_RX:
+ case PRIMARY_I2S_TX:
+ case PCM_RX:
+ case PCM_TX:
+ case SECONDARY_I2S_RX:
+ case SECONDARY_I2S_TX:
+ case MI2S_RX:
+ case MI2S_TX:
+ case HDMI_RX:
+ case RSVD_2:
+ case RSVD_3:
+ case DIGI_MIC_TX:
+ case VOICE_RECORD_RX:
+ case VOICE_RECORD_TX:
+ case VOICE_PLAYBACK_TX:
+ case SLIMBUS_0_RX:
+ case SLIMBUS_0_TX:
+ case SLIMBUS_1_RX:
+ case SLIMBUS_1_TX:
+ case SLIMBUS_2_RX:
+ case SLIMBUS_2_TX:
+ case SLIMBUS_3_RX:
+ case INT_BT_SCO_RX:
+ case INT_BT_SCO_TX:
+ case INT_BT_A2DP_RX:
+ case INT_FM_RX:
+ case INT_FM_TX:
+ case RT_PROXY_PORT_001_RX:
+ case RT_PROXY_PORT_001_TX:
+ case SLIMBUS_4_RX:
+ case SLIMBUS_4_TX:
+ {
+ ret = 0;
+ break;
+ }
+
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+int afe_convert_virtual_to_portid(u16 port_id)
+{
+ int ret;
+
+ /*
+ * if port_id is virtual, convert to physical..
+ * if port_id is already physical, return physical
+ */
+ if (afe_validate_port(port_id) < 0) {
+ if (port_id == RT_PROXY_DAI_001_RX ||
+ port_id == RT_PROXY_DAI_001_TX ||
+ port_id == RT_PROXY_DAI_002_RX ||
+ port_id == RT_PROXY_DAI_002_TX)
+ ret = VIRTUAL_ID_TO_PORTID(port_id);
+ else
+ ret = -EINVAL;
+ } else
+ ret = port_id;
+
+ return ret;
+}
int afe_port_stop_nowait(int port_id)
{
struct afe_port_cmd_device_stop stop;
diff --git a/sound/soc/msm/qdsp6v2/q6core.c b/sound/soc/msm/qdsp6v2/q6core.c
new file mode 100644
index 0000000..2c31d39
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/q6core.c
@@ -0,0 +1,211 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <mach/msm_smd.h>
+#include <mach/qdsp6v2/apr.h>
+#include "q6core.h"
+#include <mach/ocmem.h>
+
+#define TIMEOUT_MS 1000
+
+struct q6core_str {
+ struct apr_svc *core_handle_q;
+ wait_queue_head_t bus_bw_req_wait;
+ u32 bus_bw_resp_received;
+ struct avcs_cmd_rsp_get_low_power_segments_info_t *lp_ocm_payload;
+};
+
+struct q6core_str q6core_lcl;
+
+static int32_t aprv2_core_fn_q(struct apr_client_data *data, void *priv)
+{
+ uint32_t *payload1;
+ uint32_t nseg;
+ int i, j;
+
+ pr_info("core msg: payload len = %u, apr resp opcode = 0x%X\n",
+ data->payload_size, data->opcode);
+
+ switch (data->opcode) {
+
+ case APR_BASIC_RSP_RESULT:{
+
+ if (data->payload_size == 0) {
+ pr_err("%s: APR_BASIC_RSP_RESULT No Payload ",
+ __func__);
+ return 0;
+ }
+
+ payload1 = data->payload;
+
+ switch (payload1[0]) {
+
+ case AVCS_CMD_GET_LOW_POWER_SEGMENTS_INFO:
+ pr_info("%s: Cmd = AVCS_CMD_GET_LOW_POWER_SEGMENTS_INFO status[0x%x]\n",
+ __func__, payload1[1]);
+ break;
+ default:
+ pr_err("Invalid cmd rsp[0x%x][0x%x]\n",
+ payload1[0], payload1[1]);
+ break;
+ }
+ break;
+ }
+
+ case AVCS_CMDRSP_GET_LOW_POWER_SEGMENTS_INFO:
+ payload1 = data->payload;
+ pr_info("%s: cmd = AVCS_CMDRSP_GET_LOW_POWER_SEGMENTS_INFO num_segments = 0x%x\n",
+ __func__, payload1[0]);
+ nseg = payload1[0];
+ q6core_lcl.lp_ocm_payload->num_segments = nseg;
+ q6core_lcl.lp_ocm_payload->bandwidth = payload1[1];
+ for (i = 0, j = 2; i < nseg; i++) {
+ q6core_lcl.lp_ocm_payload->mem_segment[i].type =
+ (payload1[j] & 0xffff);
+ q6core_lcl.lp_ocm_payload->mem_segment[i].category =
+ ((payload1[j++] >> 16) & 0xffff);
+ q6core_lcl.lp_ocm_payload->mem_segment[i].size =
+ payload1[j++];
+ q6core_lcl.lp_ocm_payload->
+ mem_segment[i].start_address_lsw =
+ payload1[j++];
+ q6core_lcl.lp_ocm_payload->
+ mem_segment[i].start_address_msw =
+ payload1[j++];
+ }
+
+ q6core_lcl.bus_bw_resp_received = 1;
+ wake_up(&q6core_lcl.bus_bw_req_wait);
+ break;
+
+ case RESET_EVENTS:{
+ pr_debug("Reset event received in Core service");
+ apr_reset(q6core_lcl.core_handle_q);
+ q6core_lcl.core_handle_q = NULL;
+ break;
+ }
+
+ default:
+ pr_err("Message id from adsp core svc: %d\n", data->opcode);
+ break;
+ }
+
+ return 0;
+}
+
+
+void ocm_core_open(void)
+{
+ if (q6core_lcl.core_handle_q == NULL)
+ q6core_lcl.core_handle_q = apr_register("ADSP", "CORE",
+ aprv2_core_fn_q, 0xFFFFFFFF, NULL);
+ pr_debug("Open_q %p\n", q6core_lcl.core_handle_q);
+ if (q6core_lcl.core_handle_q == NULL)
+ pr_err("%s: Unable to register CORE\n", __func__);
+}
+
+int core_get_low_power_segments(
+ struct avcs_cmd_rsp_get_low_power_segments_info_t **lp_memseg)
+{
+ struct avcs_cmd_get_low_power_segments_info lp_ocm_cmd;
+ u8 *cptr = NULL;
+ int ret = 0;
+
+ pr_debug("%s: ", __func__);
+
+ ocm_core_open();
+ if (q6core_lcl.core_handle_q == NULL) {
+ pr_info("%s: apr registration for CORE failed\n", __func__);
+ return -ENODEV;
+ }
+
+ cptr = kzalloc(
+ sizeof(struct avcs_cmd_rsp_get_low_power_segments_info_t),
+ GFP_KERNEL);
+ if (!cptr) {
+ pr_err("%s: Failed to allocate memory for low power segment struct\n",
+ __func__);
+ return -ENOMEM;
+ }
+ q6core_lcl.lp_ocm_payload =
+ (struct avcs_cmd_rsp_get_low_power_segments_info_t *) cptr;
+
+ lp_ocm_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ lp_ocm_cmd.hdr.pkt_size =
+ sizeof(struct avcs_cmd_get_low_power_segments_info);
+
+ lp_ocm_cmd.hdr.src_port = 0;
+ lp_ocm_cmd.hdr.dest_port = 0;
+ lp_ocm_cmd.hdr.token = 0;
+ lp_ocm_cmd.hdr.opcode = AVCS_CMD_GET_LOW_POWER_SEGMENTS_INFO;
+
+
+ ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *) &lp_ocm_cmd);
+ if (ret < 0) {
+ pr_err("%s: CORE low power segment request failed\n", __func__);
+ goto fail_cmd;
+ }
+
+ ret = wait_event_timeout(q6core_lcl.bus_bw_req_wait,
+ (q6core_lcl.bus_bw_resp_received == 1),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout for GET_LOW_POWER_SEGMENTS\n",
+ __func__);
+ ret = -ETIME;
+ goto fail_cmd;
+ }
+
+ *lp_memseg = q6core_lcl.lp_ocm_payload;
+ return 0;
+
+fail_cmd:
+ return ret;
+}
+
+
+static int __init core_init(void)
+{
+ init_waitqueue_head(&q6core_lcl.bus_bw_req_wait);
+ q6core_lcl.bus_bw_resp_received = 0;
+
+ q6core_lcl.core_handle_q = NULL;
+ q6core_lcl.lp_ocm_payload = kzalloc(
+ sizeof(struct avcs_cmd_rsp_get_low_power_segments_info_t), GFP_KERNEL);
+
+ if (!q6core_lcl.lp_ocm_payload) {
+ pr_err("%s: Failed to allocate memory for low power segment struct\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+module_init(core_init);
+
+static void __exit core_exit(void)
+{
+ kfree(q6core_lcl.lp_ocm_payload);
+}
+module_exit(core_exit);
+MODULE_DESCRIPTION("ADSP core driver");
+MODULE_LICENSE("GPL v2");
+
diff --git a/sound/soc/msm/qdsp6v2/q6core.h b/sound/soc/msm/qdsp6v2/q6core.h
new file mode 100644
index 0000000..5cb6098
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/q6core.h
@@ -0,0 +1,93 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __Q6CORE_H__
+#define __Q6CORE_H__
+#include <mach/qdsp6v2/apr.h>
+#include <mach/ocmem.h>
+
+
+#define AVCS_CMD_GET_LOW_POWER_SEGMENTS_INFO 0x00012903
+
+struct avcs_cmd_get_low_power_segments_info {
+ struct apr_hdr hdr;
+} __packed;
+
+
+#define AVCS_CMDRSP_GET_LOW_POWER_SEGMENTS_INFO 0x00012904
+
+/* @brief AVCS_CMDRSP_GET_LOW_POWER_SEGMENTS_INFO payload
+ * structure. Payload for this event comprises one instance of
+ * avcs_cmd_rsp_get_low_power_segments_info_t, followed
+ * immediately by num_segments number of instances of the
+ * avcs_mem_segment_t structure.
+ */
+
+/* Types of Low Power Memory Segments. */
+#define READ_ONLY_SEGMENT 1
+/*< Read Only memory segment. */
+#define READ_WRITE_SEGMENT 2
+/*< Read Write memory segment. */
+/*Category indicates whether audio/os/sensor segments. */
+#define AUDIO_SEGMENT 1
+/*< Audio memory segment. */
+#define OS_SEGMENT 2
+/*< QDSP6's OS memory segment. */
+
+/* @brief Payload structure for AVS low power memory segment
+ * structure.
+ */
+struct avcs_mem_segment_t {
+ uint16_t type;
+/*< Indicates which type of memory this segment is.
+ *Allowed values: READ_ONLY_SEGMENT or READ_WRITE_SEGMENT only.
+ */
+ uint16_t category;
+/*< Indicates whether audio or OS segments.
+ *Allowed values: AUDIO_SEGMENT or OS_SEGMENT only.
+ */
+ uint32_t size;
+/*< Size (in bytes) of this segment.
+ * Will be a non-zero value.
+ */
+ uint32_t start_address_lsw;
+/*< Lower 32 bits of the 64-bit physical start address
+ * of this segment.
+ */
+ uint32_t start_address_msw;
+/*< Upper 32 bits of the 64-bit physical start address
+ * of this segment.
+ */
+};
+
+struct avcs_cmd_rsp_get_low_power_segments_info_t {
+ uint32_t num_segments;
+/*< Number of segments in this response.
+ * 0: there are no known sections that should be mapped
+ * from DDR to OCMEM.
+ * >0: the number of memory segments in the following list.
+ */
+
+ uint32_t bandwidth;
+/*< Required OCMEM read/write bandwidth (in bytes per second)
+ * if OCMEM is granted.
+ * 0 if num_segments = 0
+ * >0 if num_segments > 0.
+ */
+ struct avcs_mem_segment_t mem_segment[OCMEM_MAX_CHUNKS];
+};
+
+
+int core_get_low_power_segments(
+ struct avcs_cmd_rsp_get_low_power_segments_info_t **);
+
+#endif /* __Q6CORE_H__ */