Merge "radio-tavarua: Validate whether the current station is good or bad" into msm-3.4
diff --git a/Documentation/devicetree/bindings/arm/msm/dcvs-core-info.txt b/Documentation/devicetree/bindings/arm/msm/dcvs-core-info.txt
new file mode 100644
index 0000000..a39356c
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/dcvs-core-info.txt
@@ -0,0 +1,78 @@
+DCVS Core Info
+
+This data describes specific DCVS tuning data for a specific core (CPU, GPU,
+etc).
+
+Required properties:
+
+- qcom,core-max-time-us: Maximum time limit in micorseconds for switching clock rate.
+ Limited to this value if switching time takes longer than this limit. Typical value is 100000.
+- qcom,algo-slack-time-us: Time in microseconds after which the QoS guarantee will kick in
+ and the clock rate will increased as necessary. Typical value is about 30000.
+- qcom,algo-disable-pc-threshold: If core frequency (kHz) is higher than this value, power collapse is disallowed. Set to 0 for GPU.
+- qcom,algo-ss-window-size: Steady state window size in microseconds.
+- qcom,algo-ss-util-pct: When determining the steady state level, this percentage value is used to provide headroom
+ from the utilized cpu to the selected level.
+- qcom,algo-ee-max-util-pct: When determining the level with the lowest energy, any level that exceeds this busy
+ percentage, for the measured work in the last window, is disqualified for performance reasons.
+- qcom,algo-ss-iobusy-conv: Used to convert correlation time into assumed IO Busy time, which is removed
+ from measured elapsed time when computing cpu utilization.
+
+
+A number of frequency levels are represented as sub-nodes:
+
+required properties:
+- reg: The index of the frequency entry
+- qcom,freq The frequency of the DVS entry (in kHZ)
+- qcom,idle-energy: The idle energy cost of the entry (in micro watts)
+- qcom,active-energy: The active energy cost of the entry (in micro watts)
+
+Sample:
+
+qcom,kgsl-3d0@fdb00000 {
+ ...
+ qcom,dcvs-core-info {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compatible = "qcom,dcvs-core-info";
+
+ qcom,core-max-time-us = <100000>;
+ qcom,algo-slack-time-us = <39000>;
+ qcom,algo-disable-pc-threshold = <86000>;
+ qcom,algo-ss-window-size = <1000000>;
+ qcom,algo-ss-util-pct = <95>;
+ qcom,algo-em-max-util-pct = <97>;
+ qcom,algo-ss-iobusy-conv = <100>;
+
+ qcom,dcvs-freq@0 {
+ reg = <0>;
+ qcom,freq = <0>;
+ qcom,idle-energy = <0>;
+ qcom,active-energy = <333932>;
+ };
+
+ qcom,dcvs-freq@1 {
+ reg = <1>;
+ qcom,freq = <0>;
+ qcom,idle-energy = <0>;
+ qcom,active-energy = <497532>;
+ };
+
+ qcom,dcvs-freq@2 {
+ reg = <2>;
+ qcom,freq = <0>;
+ qcom,idle-energy = <0>;
+ qcom,active-energy = <707610>;
+ };
+
+ qcom,dcvs-freq@3 {
+ reg = <3>;
+ qcom,freq = <0>;
+ qcom,idle-energy = <0>;
+ qcom,active-energy = <844545>;
+ };
+ };
+ ...
+};
+
diff --git a/Documentation/devicetree/bindings/arm/msm/lpm-levels.txt b/Documentation/devicetree/bindings/arm/msm/lpm-levels.txt
index 35385d3..4129f7f 100644
--- a/Documentation/devicetree/bindings/arm/msm/lpm-levels.txt
+++ b/Documentation/devicetree/bindings/arm/msm/lpm-levels.txt
@@ -18,7 +18,11 @@
- qcom,vdd-mem-upper-bound: The upper bound value of mem voltage in uV
- qcom,vdd-mem-lower-bound: The lower bound value of mem voltage in uV
- qcom,vdd-dig-upper-bound: The upper bound value of dig voltage in uV
+ or an RBCPR (Rapid Bridge Core Power Reduction)
+ corner voltage.
- qcom,vdd-dig-lower-bound: The lower bound value of dig voltage in uV
+ or an RBCPR (Rapid Bridge Core Power Reduction)
+ corner voltage.
- qcom,latency-us: The latency in handling the interrupt if this level was
chosen, in uSec
- qcom,ss-power: The steady state power expelled when the processor is in this
@@ -37,8 +41,8 @@
qcom,l2 = <3>; /* ACTIVE */
qcom,vdd-mem-upper-bound = <1150000>; /* MAX */
qcom,vdd-mem-lower-bound = <1050000>; /* ACTIVE */
- qcom,vdd-dig-upper-bound = <1150000>; /* MAX */
- qcom,vdd-dig-lower-bound = <950000>; /* ACTIVE */
+ qcom,vdd-dig-upper-bound = <5>; /* MAX */
+ qcom,vdd-dig-lower-bound = <3>; /* ACTIVE */
qcom,latency-us = <100>;
qcom,ss-power = <650>;
qcom,energy-overhead = <801>;
diff --git a/Documentation/devicetree/bindings/arm/msm/lpm-resources.txt b/Documentation/devicetree/bindings/arm/msm/lpm-resources.txt
index 9ff43a1..b16d40f 100644
--- a/Documentation/devicetree/bindings/arm/msm/lpm-resources.txt
+++ b/Documentation/devicetree/bindings/arm/msm/lpm-resources.txt
@@ -13,19 +13,20 @@
- compatible: "qcom,lpm-resources"
- reg: The numeric level id
-- qcom,name: The name of the low power resource.
-- qcom,type: The string represeting the type of resource used
- like smps or pxo.
+- qcom,name: The name of the low power resource represented
+ as a string.
+- qcom,type: The type of resource used like smps or pxo
+ represented as a hex value.
- qcom,id: The id representing a device within a resource type.
- qcom,key: The key is the specific attribute of the resource being
- monitored.
+ monitored represented as a hex value.
Example:
qcom,lpm-resources@0 {
reg = <0x0>;
qcom,name = "vdd-dig";
- qcom,type = "smpb\0";
+ qcom,type = <0x62706d73>; /* "smpb" */
qcom,id = <0x02>;
- qcom,key = "uv\0\0";
+ qcom,key = <0x6e726f63>; /* "corn" */
};
diff --git a/Documentation/devicetree/bindings/arm/msm/memory-reserve.txt b/Documentation/devicetree/bindings/arm/msm/memory-reserve.txt
new file mode 100644
index 0000000..068e256
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/memory-reserve.txt
@@ -0,0 +1,42 @@
+* Memory reservations for MSM targets
+
+Large contiguous allocations (generally sizes greater than 64KB) must be
+allocated from a carved out memory pool. The size of the carved out pools
+is based on the sizes drivers need. To properly size the pools, devices
+must specify the size and type of the memory needed. Any driver wanting to
+allocate contiguous memory should indicate this via device tree bindings:
+
+Required parameters:
+- qcom,memory-reservation-type: type of memory to be reserved. This is a
+string defined in arch/arm/mach-msm/memory.c
+- qcom,memory-reservation-size: size of memory to be reserved
+
+Example:
+
+ qcom,a-driver {
+ compatible = "qcom,a-driver";
+ qcom,memory-reservation-type = "EBI1" /* reserve EBI memory */
+ qcom,memory-reservation-size = <0x400000>; /* size 4MB */
+ };
+
+Under some circumstances, it may be necessary to remove a chunk of memory
+from the kernel completely using memblock remove. Note this is different
+than adjusting the memory tags passed in via the bootloader as the virtual
+range is not affected. Any driver needing to remove a block of memory should
+add the appropriate binding:
+
+Required parameters:
+- qcom,memblock-remove: base and size of block to be removed
+
+ qcom,a-driver {
+ compatible = "qcom,a-driver";
+ /* Remove 4MB at 0x200000*/
+ qcom,memblock-remove = <0x200000 0x400000>;
+ };
+
+In order to ensure memory is only reserved when a driver is actually enabled,
+drivers are required to add EXPORT_COMPAT(<name of compatible string>) some
+where in the driver. For the examples above, the driver must add
+EXPORT_COMPAT("qcom,a-driver") to the driver, similar to EXPORT_SYMBOL.
+The EXPORT_COMPAT is to ensure that memory is only carved out if the
+driver is actually enabled, otherwise the memory will not be used.
diff --git a/Documentation/devicetree/bindings/arm/msm/mpm.txt b/Documentation/devicetree/bindings/arm/msm/mpm.txt
new file mode 100644
index 0000000..0b504c3
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/mpm.txt
@@ -0,0 +1,67 @@
+* MSM Sleep Power Manager (mpm-v2)
+
+The MPM acts a sleep power manager to shutdown the clock source and put the
+device into a retention mode to save power. The MPM is also responsible for
+waking up and bringing up the resources from sleep. The MPM driver configures
+interrupts monitored by the MPM hardware before entering sleep through a
+RPM interface.
+
+The required nodes for the MPM driver are:
+
+- compatible: "qcom, mpm-v2"
+- reg: Specifies the base physical address(s) and the size of the MPM
+ registers. The MPM driver access two memory regions for confifure the
+ virtual MPM driver on the RPM. The first region is the memory space
+ shared with the virtual MPM driver. The second region is the address
+ to the register that triggers a interrupt to the RPM.
+- reg-names: "vmpm" - string to identify the shared memory space region
+ "ipc" - string to identify the register that triggers a interrupt
+- qcom,ipc-bit-offset: The bit to set in the ipc register that triggers a interrupt
+ to the RPM
+- qcom,gic-parent: phandle to the gic interrupt controller
+- qcom,gic-map: Provides a mapping of how a GIC interrupt is connect to a MPM. The
+ mapping is presented in tuples. Each tuple represents a MPM pin and
+ which GIC interrupt is routed to it. Since MPM monitors interrupts
+ only during system wide low power mode, system interrupts originating
+ from other processors can be ignored and assigned an MPM pin mapping
+ of 0xff.
+- qcom,gpio-parent: phandle to the GPIO interrupt controller
+- qcom,gpio-map: Provides a mapping of how a GPIO interrupt is connect to a MPM. The
+ mapping is presented in tuples. Each tuple represents a MPM pin and
+ which GIC interrupt is routed to it. Since MPM monitors interrupts
+ only during system wide low power mode, system interrupts originating
+ from other processors can be ignored and assigned an MPM pin mapping
+ of 0xff.
+
+Example:
+ qcom,mpm@fc4281d0 {
+ compatible = "qcom,mpm-v2";
+ reg = <0xfc4281d0 0x1000>, /* MSM_RPM_MPM_BASE 4K*/
+ <0xfa006000 0x1000>; /* MSM_APCS_GCC_BASE 4K*/
+ reg-names = "vmpm", "ipc"
+ interrupts = <0 171 1>;
+
+ qcom,ipc-bit-offset = <0>;
+
+ qcom,gic-parent = <&intc>;
+ qcom,gic-map = <25 132>,
+ <27 111>,
+ <0xff 48>,
+ <0xff 51>,
+ <0xff 52>,
+ <0xff 53>,
+ <0xff 54>,
+ <0xff 55>;
+
+ qcom,gpio-parent = <&msmgpio>;
+ qcom,gpio-map = <1 46>,
+ <2 150>,
+ <4 103>,
+ <5 104>,
+ <6 105>,
+ <7 106>,
+ <8 107>,
+ <53 37>,
+ <54 24>,
+ <55 14>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_rtb.txt b/Documentation/devicetree/bindings/arm/msm/msm_rtb.txt
new file mode 100644
index 0000000..7b8642b
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm_rtb.txt
@@ -0,0 +1,21 @@
+Register Trace Buffer (RTB)
+
+The RTB is used to log discrete events in the system in an uncached buffer that
+can be post processed from RAM dumps. The RTB must reserve memory using
+the msm specific memory reservation bindings (see
+Documentation/devicetree/bindings/arm/msm/memory-reserve.txt).
+
+Required properties
+
+- compatible: "qcom,msm-rtb"
+- qcom,memory-reservation-size: size of reserved memory for the RTB buffer
+- qcom,memory-reservation-type: type of memory to be reserved
+(see memory-reserve.txt for information about memory reservations)
+
+Example:
+
+ qcom,msm-rtb {
+ compatible = "qcom,msm-rtb";
+ qcom,memory-reservation-type = "EBI1";
+ qcom,memory-reservation-size = <0x100000>; /* 1M EBI1 buffer */
+ };
diff --git a/Documentation/devicetree/bindings/arm/msm/rpm-regulator-smd.txt b/Documentation/devicetree/bindings/arm/msm/rpm-regulator-smd.txt
index 82935ed..93b5144 100644
--- a/Documentation/devicetree/bindings/arm/msm/rpm-regulator-smd.txt
+++ b/Documentation/devicetree/bindings/arm/msm/rpm-regulator-smd.txt
@@ -140,12 +140,25 @@
corner values supported on MSM8974 for PMIC
PM8841 SMPS 2 (VDD_Dig); nominal voltages for
these corners are also shown:
- 0 = Retention (0.5000 V)
- 1 = SVS Krait (0.7250 V)
- 2 = SVS SOC (0.8125 V)
- 3 = Normal (0.9000 V)
- 4 = Turbo (0.9875 V)
- 5 = Super Turbo (1.0500 V)
+ 0 = None (don't care)
+ 1 = Retention (0.5000 V)
+ 2 = SVS Krait (0.7250 V)
+ 3 = SVS SOC (0.8125 V)
+ 4 = Normal (0.9000 V)
+ 5 = Turbo (0.9875 V)
+ 6 = Super Turbo (1.0500 V)
+- qcom,init-disallow-bypass: Specify that bypass mode should not be used for a
+ given LDO regulator. When in bypass mode, an
+ LDO performs no regulation and acts as a simple
+ switch. The RPM can utilize this mode for an
+ LDO that is subregulated from an SMPS when it is
+ possible to reduce the SMPS voltage to the
+ desired LDO output level. Bypass mode may be
+ disallowed if lower LDO output noise is
+ required. Supported values are:
+ 0 = Allow RPM to utilize LDO bypass mode
+ if possible
+ 1 = Disallow LDO bypass mode
All properties specified within the core regulator framework can also be used in
second level nodes. These bindings can be found in:
diff --git a/Documentation/devicetree/bindings/cache/msm_cache_erp.txt b/Documentation/devicetree/bindings/cache/msm_cache_erp.txt
new file mode 100644
index 0000000..400b299
--- /dev/null
+++ b/Documentation/devicetree/bindings/cache/msm_cache_erp.txt
@@ -0,0 +1,16 @@
+* Qualcomm Krait L1 / L2 cache error reporting driver
+
+Required properties:
+- compatible: Should be "qcom,cache_erp"
+- interrupts: Should contain the L1/CPU error interrupt number and
+ the L2 cache interrupt number
+- interrupt-names: Should contain the interrupt names "l1_irq" and
+ "l2_irq"
+
+Example:
+ qcom,cache_erp {
+ compatible = "qcom,cache_erp";
+ interrupts = <1 9 0>, <0 2 0>;
+ interrupt-names = "l1_irq", "l2_irq";
+ };
+
diff --git a/Documentation/devicetree/bindings/gpu/adreno-pwrlevels.txt b/Documentation/devicetree/bindings/gpu/adreno-pwrlevels.txt
new file mode 100644
index 0000000..d50a21c
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpu/adreno-pwrlevels.txt
@@ -0,0 +1,40 @@
+Adreno Power Levels
+
+The Adreno GPU definition should include a variable number of power levels
+defining the GPU and bus frequencies for the levels that the GPU can operate at.
+
+Required properties:
+
+- compatible: The compatible name for the object (qcom,gpu-pwrlevels)
+
+Each powerlevel definition is as follows:
+
+- reg: Index of the power level (lower is considered higher
+ performance)
+- qcom,gpu-freq: The GPU frequency for the power level (in HZ)
+- qcom,bus-freq: An index representing the bus scaling usecase appropriate
+ for the power level
+- qcom,io-fraction: A number indicating the fraction of the CPU I/O busy that
+ this operating point should represent.
+
+Sample usage:
+
+qcom,kgsl-3d0@fdb00000 {
+ ...
+ qcom,gpu-pwrlevels {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compatible = "qcom,gpu-pwrlevels";
+
+ qcom,gpu-pwrlevel@0 {
+ reg = <0>;
+ qcom,gpu-freq = <5000000000>;
+ qcom,bus-freq = <3>;
+ qcom,io_fraction = <0>;
+ };
+ };
+
+ ...
+};
+
diff --git a/Documentation/devicetree/bindings/gpu/adreno.txt b/Documentation/devicetree/bindings/gpu/adreno.txt
new file mode 100644
index 0000000..16925fb
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpu/adreno.txt
@@ -0,0 +1,159 @@
+Qualcomm GPU
+
+Qualcomm Adreno GPU
+
+Required properties:
+- label: A string used as a descriptive name for the device.
+- compatible: Must be "qcom,kgsl-3d0" and "qcom,kgsl-3d"
+- reg: Specifies the base address and address size for this device.
+- interrupts: Interrupt mapping for GPU IRQ.
+- interrupt-names: String property to describe the name of the interrupt.
+- qcom,id: An integer used as an identification number for the device.
+
+- qcom,clk-map: A bit map value for clocks controlled by kgsl.
+ KGSL_CLK_SRC 0x00000001
+ KGSL_CLK_CORE 0x00000002
+ KGSL_CLK_IFACE 0x00000004
+ KGSL_CLK_MEM 0x00000008
+ KGSL_CLK_MEM_IFACE 0x00000010
+ KGSL_CLK_AXI 0x00000020
+
+Bus Scaling Data:
+- qcom,grp3d-vectors: A series of 4 cell properties, format of which is:
+ <src dst ab ib>, <src dst ab ib>, // For Bus Scaling Usecase 1
+ <src dst ab ib>, <src dst ab ib>, // For Bus Scaling Usecase 2
+ <.. .. .. ..>, <.. .. .. ..>; // For Bus Scaling Usecase n
+ This property is a series of all vectors for all Bus Scaling Usecases.
+ Each set of vectors for each usecase describes bandwidth votes for a combination
+ of src/dst ports. The driver will set the desired use case based on the selected
+ power level and the desired bandwidth vote will be registered for the port pairs.
+ Current values of src are:
+ 0 = MSM_BUS_MASTER_GRAPHICS_3D
+ 1 = MSM_BUS_MASTER_GRAPHICS_3D_PORT1
+ 2 = MSM_BUS_MASTER_V_OCMEM_GFX3D
+ Current values of dst are:
+ 0 = MSM_BUS_SLAVE_EBI_CH0
+ 1 = MSM_BUS_SLAVE_OCMEM
+ ab: Represents aggregated bandwidth. This value is 0 for Graphics.
+ ib: Represents instantaneous bandwidth. This value has a range <0 8000 MB/s>
+- qcom,grp3d-num-vectors-per-usecase: This represents the number of vectors in each Bus Scaling Usecase.
+- qcom,grp3d-num-bus-scale-usecases: This is the the number of Bus Scaling use cases defined in the vectors property
+
+GDSC Oxili Regulators:
+- vddcx-supply: Phandle for vddcx regulator device node.
+- vdd-supply: Phandle for vdd regulator device node.
+
+IOMMU Data:
+- iommu: Phandle for the KGSL IOMMU device node
+
+GPU Power levels:
+- qcom,gpu-pwrlevels: Container for the GPU Power Levels (see
+ adreno-pwrlevels.txt)
+
+DCVS Core info
+- qcom,dcvs-core-info Container for the DCVS core info (see
+ dcvs-core-info.txt)
+
+Optional Properties:
+- qcom,initial-powerlevel: This value indicates which qcom,gpu-pwrlevel should be used at start time
+ and when coming back out of resume
+- qcom,idle-timeout: This property represents the time in microseconds for idle timeout.
+- qcom,nap-allowed: Boolean. <0> or <1> to disable/enable nap.
+- qcom,chipid: If it exists this property is used to replace
+ the chip identification read from the GPU hardware.
+ This is used to override faulty hardware readings.
+
+Example of A330 GPU in MSM8974:
+
+/ {
+ qcom,kgsl-3d0@fdb00000 {
+ label = "kgsl-3d0";
+ compatible = "qcom,kgsl-3d0", "qcom,kgsl-3d";
+ reg = <0xfdb00000 0x20000>;
+ reg-names = "kgsl_3d0_reg_memory";
+ interrupts = <0 33 0>;
+ interrupt-names = "kgsl_3d0_irq";
+ qcom,id = <0>;
+
+ qcom,chipid = <0x03030000>;
+
+ /* Power Settings */
+
+ qcom,initial-pwrlevel = <1>;
+ qcom,idle-timeout = <83>; //<HZ/12>
+ qcom,nap-allowed = <1>;
+ qcom,clk-map = <0x00000016>; //KGSL_CLK_CORE | KGSL_CLK_IFACE | KGSL_CLK_MEM_IFACE
+
+ /* Bus Scale Settings */
+ qcom,grp3d-vectors = <0 0 0 0>, <2 1 0 0>,
+ <0 0 0 2000>, <2 1 0 3000>,
+ <0 0 0 4000>, <2 1 0 5000>,
+ <0 0 0 6400>, <2 1 0 7600>;
+ qcom,grp3d-num-vectors-per-usecase = <2>;
+ qcom,grp3d-num-bus-scale-usecases = <4>;
+
+ /* GDSC oxili regulators */
+ vddcx-supply = <&gdsc_oxili_cx>;
+ vdd-supply = <&gdsc_oxili_gx>;
+
+ /* IOMMU Data */
+ iommu = <&kgsl>;
+
+ qcom,gpu-pwrlevels {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compatible = "qcom,gpu-pwrlevels";
+
+ qcom,gpu-pwrlevel@0 {
+ reg = <0>;
+ qcom,gpu-freq = <5000000000>;
+ qcom,bus-freq = <3>;
+ qcom,io-fraction = <0>;
+ };
+ };
+
+ qcom,dcvs-core-info {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compatible = "qcom,dcvs-core-info";
+
+ qcom,core-max-time-us = <100000>;
+ qcom,algo-slack-time-us = <39000>;
+ qcom,algo-disable-pc-threshold = <86000>;
+ qcom,algo-ss-window-size = <1000000>;
+ qcom,algo-ss-util-pct = <95>;
+ qcom,algo-em-max-util-pct = <97>;
+ qcom,algo-ss-iobusy-conv = <100>;
+
+ qcom,dcvs-freq@0 {
+ reg = <0>;
+ qcom,freq = <0>;
+ qcom,idle-energy = <0>;
+ qcom,active-energy = <333932>;
+ };
+
+ qcom,dcvs-freq@1 {
+ reg = <1>;
+ qcom,freq = <0>;
+ qcom,idle-energy = <0>;
+ qcom,active-energy = <497532>;
+ };
+
+ qcom,dcvs-freq@2 {
+ reg = <2>;
+ qcom,freq = <0>;
+ qcom,idle-energy = <0>;
+ qcom,active-energy = <707610>;
+ };
+
+ qcom,dcvs-freq@3 {
+ reg = <3>;
+ qcom,freq = <0>;
+ qcom,idle-energy = <0>;
+ qcom,active-energy = <844545>;
+ };
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt b/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt
new file mode 100644
index 0000000..2ba7341
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt
@@ -0,0 +1,98 @@
+Qualcomm's QPNP PMIC Voltage ADC Arbiter
+
+QPNP PMIC Voltage ADC (VADC) provides interface to clients to read
+Voltage. A 15 bit ADC is used for Voltage measurements. There are multiple
+peripherals to the VADC and the scope of the driver is to provide interface
+for the USR peripheral of the VADC.
+
+VADC node
+
+Required properties:
+- compatible : should be "qcom,qpnp-vadc" for Voltage ADC driver.
+- reg : offset and length of the PMIC Aribter register map.
+- interrupts : The USR bank peripheral VADC interrupt.
+- qcom,adc-bit-resolution : Bit resolution of the ADC.
+- qcom,adc-vdd-reference : Voltage reference used by the ADC.
+
+Channel nodes
+NOTE: Atleast one Channel node is required.
+
+Required properties:
+- label : Channel name used for sysfs entry.
+- qcom,channel-num : Channel number associated to the AMUX input.
+- qcom,decimation : Sampling rate to use for the individual channel measurement.
+ Select from following unsigned int.
+ 0 : 512
+ 1 : 1K
+ 2 : 2K
+ 3 : 4K
+- qcom,pre-div-channel-scaling : Pre-div used for the channel before the signal
+ is being measured.
+- qcom,calibration-type : Reference voltage to use for channel calibration.
+ Channel calibration is dependendent on the channel.
+ Certain channels like XO_THERM, BATT_THERM use ratiometric
+ calibration. Most other channels fall under absolute calibration.
+ Select from the following strings.
+ "absolute" : Uses the 625mv and 1.25V reference channels.
+ "ratiometric" : Uses the reference Voltage/GND for calibration.
+- qcom,scale-function : Scaling function used to convert raw ADC code to units specific to
+ a given channel.
+ Select from the following unsigned int.
+ 0 : Default scaling to convert raw adc code to voltage.
+ 1 : Conversion to temperature based on btm parameters.
+ 2 : Returns result in milli degree's Centigrade.
+ 3 : Returns current across 0.1 ohm resistor.
+ 4 : Returns XO thermistor voltage in degree's Centigrade.
+- qcom,hw-settle-time : Settling period for the channel before ADC read.
+ Select from the following unsigned int.
+ 0 : 0us
+ 1 : 100us
+ 2 : 200us
+ 3 : 300us
+ 4 : 400us
+ 5 : 500us
+ 6 : 600us
+ 7 : 700us
+ 8 : 800us
+ 9 : 900us
+ 0xa : 1ms
+ 0xb : 2ms
+ 0xc : 4ms
+ 0xd : 6ms
+ 0xe : 8ms
+ 0xf : 10ms
+- qcom,fast-avg-setup : Average number of samples to be used for measurement. Fast averaging
+ provides the option to obtain a single measurement from the ADC that
+ is an average of multiple samples. The value selected is 2^(value)
+ Select from the following unsigned int.
+ 0 : 1
+ 1 : 2
+ 2 : 4
+ 3 : 8
+ 4 : 16
+ 5 : 32
+ 6 : 64
+ 7 : 128
+ 8 : 256
+
+Example:
+ /* Main Node */
+ qcom,vadc@3100 {
+ compatible = "qcom,qpnp-vadc";
+ reg = <0x3100 0x100>;
+ interrupts = <0x0 0x31 0x0>;
+ qcom,adc-bit-resolution = <15>;
+ qcom,adc-vdd-reference = <1800>;
+
+ /* Channel Node */
+ chan@0 {
+ label = "usb_in";
+ qcom,channel-num = <0>;
+ qcom,decimation = <0>;
+ qcom,pre-div-channel-scaling = <20>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iommu/msm_iommu.txt b/Documentation/devicetree/bindings/iommu/msm_iommu.txt
index c198fe9..f5a2590 100644
--- a/Documentation/devicetree/bindings/iommu/msm_iommu.txt
+++ b/Documentation/devicetree/bindings/iommu/msm_iommu.txt
@@ -12,10 +12,14 @@
- interrupts : should contain the context bank interrupt.
- qcom,iommu-ctx-sids : List of stream identifiers associated with this
translation context.
- - qcom,iommu-ctx-name : Name of the context bank
+ - label : Name of the context bank
- qcom,iommu-smt-size : Number of SMR entries in the SMT of this HW block
- vdd-supply : vdd-supply: phandle to GDSC regulator controlling this IOMMU.
+Optional properties:
+- qcom,needs-alt-core-clk : boolean to enable the secondary core clock for
+ access to the IOMMU configuration registers
+
Example:
qcom,iommu@fda64000 {
@@ -27,12 +31,12 @@
reg = <0xfda6c000 0x1000>;
interrupts = <0 70 0>;
qcom,iommu-ctx-sids = <0 2>;
- qcom,iommu-ctx-name = "ctx_0";
+ label = "ctx_0";
};
qcom,iommu-ctx@fda6d000 {
reg = <0xfda6d000 0x1000>;
interrupts = <0 71 0>;
qcom,iommu-ctx-sids = <1>;
- qcom,iommu-ctx-name = "ctx_1";
+ label = "ctx_1";
};
};
diff --git a/Documentation/devicetree/bindings/media/video/msm-vidc.txt b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
index 11af7a9..79a3ab5 100644
--- a/Documentation/devicetree/bindings/media/video/msm-vidc.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
@@ -5,11 +5,28 @@
- "qcom,msm-vidc"
- reg : offset and length of the register set for the device.
- interrupts : should contain the vidc interrupt.
+- vidc-cp-map : start and size of device virtual address range for secure buffers.
+ Video hardware uses this address range to identify if the buffers are secure
+ or non-secure.
+- vidc-ns-map : start and size of device virtual address range for non-secure buffers.
+ Video hardware uses this address range to identify if the buffers are secure
+ or non-secure.
+- load-freq-tbl : load (in macroblocks/sec) and corresponding vcodec clock
+ required for optimal performance in descending order.
Example:
+
qcom,vidc@fdc00000 {
compatible = "qcom,msm-vidc";
reg = <0xfdc00000 0xff000>;
interrupts = <0 44 0>;
+ vidc-cp-map = <0x1000000 0x40000000>;
+ vidc-ns-map = <0x40000000 0x40000000>;
+ load-freq-tbl = <979200 410000000>,
+ <560145 266670000>,
+ <421161 200000000>,
+ <243000 133330000>,
+ <108000 100000000>,
+ <36000 50000000>;
};
diff --git a/Documentation/devicetree/bindings/mmc/msm_sdcc.txt b/Documentation/devicetree/bindings/mmc/msm_sdcc.txt
index 35ac0ec..c674a13 100644
--- a/Documentation/devicetree/bindings/mmc/msm_sdcc.txt
+++ b/Documentation/devicetree/bindings/mmc/msm_sdcc.txt
@@ -16,10 +16,8 @@
Optional Properties:
- cell-index - defines slot ID.
- qcom,sdcc-bus-width - defines the bus I/O width that controller supports.
- - qcom,sdcc-wp-gpio - defines write protect switch gpio.
- - qcom,sdcc-wp-polarity - specifies the polarity of wp switch.
- - qcom,sdcc-cd-gpio - defines card detect gpio number.
- - qcom,sdcc-cd-polarity - specifies the polarity of cd gpio.
+ - wp-gpios - specify GPIO for write protect switch detection.
+ - cd-gpios - specify GPIO for card detection.
- qcom,sdcc-nonremovable - specifies whether the card in slot is
hot pluggable or hard wired.
- qcom,sdcc-disable_cmd23 - disable sending CMD23 to card when controller can't support it.
@@ -35,6 +33,15 @@
- qcom,sdcc-<supply>-current_level - specifies load levels for supply in lpm or
high power mode (hpm). Should be specified in pairs (lpm, hpm), units uA.
+ - gpios - specifies gpios assigned for sdcc slot.
+ - qcom,sdcc-gpio-names - a list of strings that map in order to the list of gpios
+ A slot has either gpios or dedicated tlmm pins as represented below.
+ - qcom,sdcc-pad-pull-on - Active pull configuration for sdc tlmm pins
+ - qcom,sdcc-pad-pull-off - Suspend pull configuration for sdc tlmm pins.
+ - qcom,sdcc-pad-drv-on - Active drive strength configuration for sdc tlmm pins.
+ - qcom,sdcc-pad-drv-off - Suspend drive strength configuration for sdc tlmm pins.
+ Tlmm pins are specified as <clk cmd data>
+
Example:
qcom,sdcc@f9600000 {
diff --git a/Documentation/devicetree/bindings/mtd/msm_qpic_nand.txt b/Documentation/devicetree/bindings/mtd/msm_qpic_nand.txt
new file mode 100644
index 0000000..bddbbae
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/msm_qpic_nand.txt
@@ -0,0 +1,51 @@
+Qualcomm Parallel Interface controller (QPIC) for NAND devices
+
+Required properties:
+- compatible : "qcom,msm-nand".
+- reg : should specify QPIC NANDc and BAM physical address range.
+- reg-names : should specify relevant names to each reg property defined.
+- interrupts : should specify QPIC/BAM interrupt numbers.
+- interrupt-names : should specify relevant names to each interrupts property
+ defined.
+
+MTD flash partition layout for NAND devices -
+
+Each partition is represented as a sub-node of the qcom,mtd-partitions device.
+Each node's name represents the name of the corresponding partition.
+
+Required properties:
+- reg : The partition offset and size
+- label : The label / name for this partition.
+
+Optional properties:
+- read-only: This parameter, if present, indicates that this partition
+ should only be mounted read-only.
+
+Examples:
+
+ qcom,nand@f9af0000 {
+ compatible = "qcom,msm-nand";
+ reg = <0xf9af0000 0x1000>,
+ <0xf9ac4000 0x8000>;
+ reg-names = "nand_phys",
+ "bam_phys";
+ interrupts = <0 279 0>;
+ interrupt-names = "bam_irq";
+ };
+
+ qcom,mtd-partitions {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ partition@0 {
+ label = "boot";
+ reg = <0x00000000 0x1000>;
+ };
+ partition@00020000 {
+ label = "userdata";
+ reg = <0x00020000 0x1000>;
+ };
+ partition@00040000 {
+ label = "system";
+ reg = <0x00040000 0x1000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/ocmem/msm-ocmem.txt b/Documentation/devicetree/bindings/ocmem/msm-ocmem.txt
new file mode 100644
index 0000000..1549f10
--- /dev/null
+++ b/Documentation/devicetree/bindings/ocmem/msm-ocmem.txt
@@ -0,0 +1,70 @@
+Qualcomm MSM On-Chip Memory Driver
+
+msm-ocmem is a driver for managing On-Chip Memory (OCMEM) in MSM SoCs.
+It is responsible for allowing various clients to allocate memory from
+OCMEM based on performance, latency and power requirements.
+
+Required Properties:
+- compatible: Must be "qcom,msm-ocmem"
+- reg: Four pairs of physical base addresses and region sizes
+ of memory mapped registers.
+- reg-names : Register region name(s) referenced in reg above
+ "ocmem_ctrl_physical" corresponds to OCMEM control registers.
+ "dm_ctrl_physical" corresponds to DM control registers.
+ "br_ctrl_physical" corresponds to BR control registers.
+ "ocmem_physical" corresponds to address range of OCMEM memory.
+- interrupts: OCMEM core interrupt(s).
+- interrupt-names: OCMEM core interrupt name(s) reference in interrupts above
+ "ocmem_irq" corresponds to OCMEM Error Interrupt.
+ "dm_irq" corresponds to DM Interrupt.
+- qcom,ocmem-num-regions: The number of OCMEM hardware memory regions.
+
+In addition to the information on the OCMEM core, the
+device tree contains additional information describing partitions
+of the OCMEM address space. This is used to establish regions
+of OCMEM that are used for each potential client. The partitions
+can overlap and the OCMEM driver ensures that there is no possibility
+of concurrent access from more than one client to the same address range.
+This allows the OCMEM driver to maximize the usage of OCMEM at all times.
+
+Each partition is represented as a sub-node of the OCMEM device.
+
+OCMEM partitions
+
+Required Properties:
+ - reg : The partition's offset and size within OCMEM.
+ - qcom,ocmem-part-name : The name for this partition.
+ - qcom,ocmem-part-min: The minimum amount of memory reserved exclusively for
+ this client.
+Optional Properties:
+ - qcom,ocmem-part-tail : This parameter, if present, indicates that successive
+ allocations from this partition must be allocated at
+ lower offsetis.
+Example:
+
+ qcom,ocmem@fdd00000 {
+ reg = <0xfdd00000 0x2000>,
+ <0xfdd02000 0x2000>,
+ <0xfe039000 0x400>,
+ <0xfec00000 0x180000>;
+ reg-names = "ocmem_ctrl_physical", "dm_ctrl_physical", "br_ctrl_physical", "ocmem_physical";
+ interrupts = <0 76 0 0 77 0>;
+ interrupt-names = "ocmem_irq", "dm_irq";
+ qcom,ocmem-num-regions = <0x3>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0xfec00000 0x180000>;
+
+ partition@0 {
+ reg = <0x0 0x100000>;
+ qcom,ocmem-part-name = "graphics";
+ qcom,ocmem-part-min = <0x80000>;
+ };
+
+ partition@100000 {
+ reg = <0x100000 0x80000>;
+ qcom,ocmem-part-name = "video";
+ qcom,ocmem-part-min = <0x55000>;
+ };
+
+ };
diff --git a/Documentation/devicetree/bindings/platform/msm/qpnp-power-on.txt b/Documentation/devicetree/bindings/platform/msm/qpnp-power-on.txt
new file mode 100644
index 0000000..2e7f9c3
--- /dev/null
+++ b/Documentation/devicetree/bindings/platform/msm/qpnp-power-on.txt
@@ -0,0 +1,39 @@
+Qualcomm QPNP power-on
+
+The qpnp-power-on is a driver which supports the power-on(PON)
+peripheral on Qualcomm PMICs. The supported functionality includes
+power on/off reason, power-key press/release detection and other PON
+features. This peripheral is connected to the host processor via the SPMI
+interface.
+
+Required properties:
+- compatible: Must be "qcom,qpnp-power-on"
+- reg: Specifies the SPMI address and size for this PON (power-on) peripheral
+- interrupts: Specifies the interrupt associated with the power-key.
+
+Optional properties:
+- qcom,pon-key-enable: Enable power-key detection. It enables monitoring
+ of the KPDPWR_N line (connected to the power-key).
+- qcom,pon-key-dbc-delay: The debouce delay for the power-key interrupt
+ specifed in us. The value ranges from 2 seconds
+ to 1/64 of a second. Possible values are -
+ - 2, 1, 1/2, 1/4, 1/8, 1/16, 1/32, 1/64
+ - Intermediate value is rounded down to the
+ nearest valid value.
+- qcom,pon-key-pull-up: The intial state of the KPDPWR_N pin
+ (connected to the power-key)
+ 0 = No pull-up
+ 1 = pull-up enabled
+
+If any of the above optional property is not defined, the driver will continue
+with the default hardware state.
+
+Example:
+ qcom,power-on@800 {
+ compatible = "qcom,qpnp-power-on";
+ reg = <0x800 0x100>;
+ interrupts = <0x0 0x8 0x1>;
+ qcom,pon-key-enable= <true>;
+ qcom,pon-key-pull-up = <true>;
+ qcom,pon-key-dbc-delay = <15625>;
+ }
diff --git a/Documentation/devicetree/bindings/rtc/qpnp-rtc.txt b/Documentation/devicetree/bindings/rtc/qpnp-rtc.txt
new file mode 100644
index 0000000..156141f
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/qpnp-rtc.txt
@@ -0,0 +1,64 @@
+* msm-qpnp-rtc
+
+msm-qpnp-rtc is a RTC driver that supports 32 bit RTC housed inside PMIC.
+Driver utilizes MSM SPMI interface to communicate with the RTC module.
+RTC device is divided into two sub-peripherals one which controls basic RTC
+and other for controlling alarm.
+
+[PMIC RTC Device Declarations]
+
+-Root Node-
+
+Required properties :
+ - compatible: Must be "qcom,qpnp-rtc"
+ - #address-cells: The number of cells dedicated to represent an address
+ This must be set to '1'.
+ - #size-cells: The number of cells dedicated to represent address
+ space range of a peripheral. This must be set to '1'.
+ - spmi-dev-container: This specifies that all the device nodes specified
+ within this node should have their resources
+ coalesced into a single spmi_device.
+
+Optional properties:
+ - qcom,qpnp-rtc-write: This property enables/disables rtc write
+ operation. If not mentioned rtc driver keeps
+ rtc writes disabled.
+ 0 = Disable rtc writes.
+ 1 = Enable rtc writes.
+ - qcom,qpnp-rtc-alarm-pwrup: This property enables/disables feature of
+ powering up phone (from power down state)
+ through alarm interrupt.
+ If not mentioned rtc driver will disable
+ feature of powring-up phone through alarm.
+ 0 = Disable powering up of phone through
+ alarm interrupt.
+ 1 = Enable powering up of phone through
+ alarm interrupt.
+
+-Child Nodes-
+
+Required properties :
+ - reg : Specify the spmi offset and size for device.
+ - interrupts: Specifies alarm interrupt, only for rtc_alarm
+ sub-peripheral.
+
+Example:
+ qcom,pm8941_rtc {
+ spmi-dev-container;
+ compatible = "qcom,qpnp-rtc";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ qcom,qpnp-rtc-write = <0>;
+ qcom,qpnp-rtc-alarm-pwrup = <0>;
+
+ qcom,pm8941_rtc_rw@6000 {
+ reg = <0x6000 0x100>;
+ };
+
+ qcom,pm8941_rtc_alarm@6100 {
+ reg = <0x6100 0x100>;
+ interrupts = <0x0 0x61 0x1>;
+ };
+ };
+
+
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index b6086e7..84f0c24 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -1,4 +1,4 @@
-Qualcomm audio devices for ALSA sound soc
+Qualcomm audio devices for ALSA sound SoC
* msm-pcm
@@ -18,6 +18,12 @@
- compatible : "qcom,msm-pcm-lpa"
+* msm-compr-dsp
+
+Required properties:
+
+ - compatible : "qcom,msm-compr-dsp"
+
* msm-voip-dsp
Required properties:
@@ -36,6 +42,22 @@
- compatible : "qcom,msm-dai-fe"
+* msm-dai-q6
+
+[First Level Nodes]
+
+Required properties:
+
+ - compatible : "msm-dai-q6"
+
+[Second Level Nodes]
+
+Required properties:
+
+ - compatible : "qcom,msm-dai-q6-dev"
+ - qcom,msm-dai-q6-dev-id : The slimbus multi channel port ID
+ Value is from 16384 to 16393
+
* msm-auxpcm
[First Level Nodes]
@@ -81,6 +103,22 @@
- compatible : "qcom,msm-pcm-hostless"
+* msm-ocmem-audio
+
+Required properties:
+
+ - compatible : "qcom,msm-ocmem-audio"
+
+ - qcom,msm-ocmem-audio-src-id: Master port id
+
+ - qcom,msm-ocmem-audio-dst-id: Slave port id
+
+ - qcom,msm-ocmem-audio-ab: arbitrated bandwidth
+ in Bytes/s
+
+ - qcom,msm-ocmem-audio-ib: instantaneous bandwidth
+ in Bytes/s
+
Example:
qcom,msm-pcm {
@@ -95,6 +133,10 @@
compatible = "qcom,msm-pcm-lpa";
};
+ qcom,msm-compr-dsp {
+ compatible = "qcom,msm-compr-dsp";
+ };
+
qcom,msm-voip-dsp {
compatible = "qcom,msm-voip-dsp";
};
@@ -107,6 +149,19 @@
compatible = "qcom,msm-dai-fe";
};
+ qcom,msm-dai-q6 {
+ compatible = "qcom,msm-dai-q6";
+ qcom,msm-dai-q6-sb-0-rx {
+ compatible = "qcom,msm-dai-q6-dev";
+ qcom,msm-dai-q6-dev-id = <16384>;
+ };
+
+ qcom,msm-dai-q6-sb-0-tx {
+ compatible = "qcom,msm-dai-q6-dev";
+ qcom,msm-dai-q6-dev-id = <16385>;
+ };
+ };
+
qcom,msm-auxpcm {
compatible = "qcom,msm-auxpcm-resource";
qcom,msm-cpudai-auxpcm-clk = "pcm_clk";
@@ -119,12 +174,12 @@
qcom,msm-cpudai-auxpcm-pcm-clk-rate = <2048000>;
qcom,msm-auxpcm-rx {
- qcom,msm-auxpcm-dev-id = <4106>;
+ qcom,msm-auxpcm-dev-id = <4106>;
compatible = "qcom,msm-auxpcm-dev";
};
qcom,msm-auxpcm-tx {
- qcom,msm-auxpcm-dev-id = <4107>;
+ qcom,msm-auxpcm-dev-id = <4107>;
compatible = "qcom,msm-auxpcm-dev";
};
};
@@ -133,3 +188,11 @@
compatible = "qcom,msm-pcm-hostless";
};
+ qcom,msm-ocmem-audio {
+ compatible = "qcom,msm-ocmem-audio";
+ qcom,msm-ocmem-audio-src-id = <11>;
+ qcom,msm-ocmem-audio-dst-id = <604>;
+ qcom,msm-ocmem-audio-ab = <209715200>;
+ qcom,msm-ocmem-audio-ib = <471859200>;
+ };
+
diff --git a/Documentation/mtd/devices/msm_qpic_nand.txt b/Documentation/mtd/devices/msm_qpic_nand.txt
new file mode 100644
index 0000000..301e823
--- /dev/null
+++ b/Documentation/mtd/devices/msm_qpic_nand.txt
@@ -0,0 +1,296 @@
+Introduction
+============
+
+In MDM9x25, new NAND controller(NANDc) has been added and it has the
+following major changes as compared to its previous version -
+
+1. It includes Secured BAM-Lite and the support for ADM(Application Data Mover)
+has been removed.
+
+2. It includes 4 bit BCH ECC and the support for 4 bit Reed Solomon ECC has
+been removed.
+
+3. The support for Dual NAND controllers has been removed and thus the
+software features like ping-pong mode and interleave mode are deprecated.
+
+4. It includes support for dual buffers in case of read and one dedicated
+write buffer to each processor (Modem and Apps).
+
+This new NAND driver takes care of all the above new hardware changes. In
+addition to the above hardware changes, it also takes care of software device
+tree changes.
+
+Hardware description
+====================
+
+The NANDc Core:
+---------------
+Qualcomm Parallel Interface Controller (QPIC), formerly named EBI2, is a
+wrapper module which integrates a NAND controller core and a LCD controller
+core and multiplexes their access to shared parallel interfaces pins. Both
+controller cores are accessible to processors (Modem and Apps), and share
+master access to the Peripheral NoC (Network on Chip) via a BAM module.
+
+In MDM9x25, QPIC is located on the peripheral NoC, connected via a 32-bit AHB
+Master port and a 32-bit AHB Slave Port. The NANDc register interface goes
+through AHB Slave Port and data transfers using BAM goes through AHB Master
+Port. The NAND Controller (NANDc) is a hardware core which manages the access
+to an off-chip NAND device.
+
+BAM-Lite:
+---------
+BAM(Bus Access Manager) can transfer data between a peripheral and memory,
+or between two peripherals in a BAM to BAM mode. Each BAM contains multiple
+DMA channels, called pipes. A pipe provides a unidirectional data transfer
+engine, capable of either receiving data in consumer mode, or transmitting
+data in producer mode. The consumer fetches the data from the source system
+memory, and the producer writes data to the destination system memory.
+
+BAM-Lite's interface is similar to the BAM interface with slight changes to
+the sideband interface. BAM-Lite is an area-optimized version of BAM. BAM-Lite
+supports new features such as Notify-When-Done(NWD), pipe lock/unlock and
+command descriptors.
+
+NANDc has a secured BAM-Lite which provides DMA support for the NANDc and
+command support for accessing the NANDc registers. It is called secured
+because it has an integrated APU (Address Protection Unit) that validates
+every access to BAM and its peripheral registers.
+
+The NANDc has in total 6 BAM pipes - 3 pipes are dedicated for each processor
+(Modem and Apps) at the hardware level.
+
+Software description
+====================
+
+The NAND device is shared between two independent file systems, each running
+on a different processor - the application processor (Apps) and the Modem.
+The NAND driver uses BAM driver to transfer NAND operation requests and
+data to/from the NAND Controller (NANDc) core through the BAM pipes. Every
+NANDc register read/write access must go through BAM as it facilitates security
+mechanisms to enable simultaneous access to NAND device from both processors
+(Modem and Apps).
+
+The Apps NAND driver registers NANDc BAM peripheral with BAM driver, allocates
+endpoints and descriptor FIFO memory and registers for complete event
+notification for the following pipes:
+
+ - system consumer pipe for data (pipe#0) : This BAM pipe will be used
+ for transferring data from system memory to NANDc i.e., during write.
+
+ - system producer pipe for data (pipe#1) : This BAM pipe will be used
+ for transferring data from NANDc to system memory i.e., during read.
+
+ - system consumer pipe for commands (pipe#2) : This BAM pipe will be
+ used for both reading and writing to NANDc registers. It can be
+ configured either as consumer pipe or producer pipe but as per HW
+ team's recommendation it is configured as consumer pipe.
+
+Control path:
+-------------
+Each NAND operation can be described as a set of BAM command or/and data
+descriptors.
+
+A command descriptor(CD) points to the starting address of a command
+block. Each command block may contain a set of command elements where
+each command element is a single NANDc register read/write. The NAND
+driver submits all command descriptors to its system consumer pipe#2.
+
+Data path:
+----------
+A Data Descriptor(DD) points to the start of a data block which is a sequential
+chunk of data.
+
+For page write operations, the NAND driver submits data descriptors to system
+consumer pipe#0 and as per the descriptors submitted, the BAM reads data from
+the data block into the NANDc buffer.
+
+For page read operations, the NAND driver submits data descriptors to system
+producer pipe#1 and as per the descriptors submitted, the BAM reads data from
+the NANDc buffer into the data block.
+
+The driver submits a CD/DD using BAM driver APIs sps_transfer_one()/
+sps_transfer(). To this API, flags is passed as one of the arguments and if
+SPS_IOVEC_FLAG_CMD is passed, then it is identified as a CD. Otherwise, it is
+identified as a DD. The other valid SPS flags for a CD/DD are -
+
+ - SPS_IOVEC_FLAG_INT : This flag indicates BAM driver to raise BAM
+ interrupt after the current descriptor with this flag has been
+ processed by BAM HW. This flag is applicable for both CD and DD.
+
+ - SPS_IOVEC_FLAG_NWD : This flag indicates BAM HW to not process
+ next descriptors until it receives an acknowledgement by NANDc
+ that the current descriptor with this flag is completely
+ executed. This flag is applicable only for a CD.
+
+ - SPS_IOVEC_FLAG_LOCK: This flag marks the beginning of a series of
+ commands and it indicates that all the CDs submitted on this pipe
+ must be executed atomically without any interruption by commands
+ from other pipes. This is applicable only for a CD.
+
+ - SPS_IOVEC_FLAG_UNLOCK: This flag marks the end of a series of
+ commands and it indicates that the other pipe that was locked due to
+ SPS_IOVEC_FLAG_LOCK flag can be unblocked after the current CD
+ with this flag is executed. This is applicable only for a CD.
+
+ - SPS_IOVEC_FLAG_EOT - This flag indicates to BAM driver that the
+ current descriptor with this flag is the last descriptor submitted
+ during write operation. This is applicable only for a DD.
+
+Error handling:
+---------------
+After a page read/write complete notification from BAM, NAND driver validates
+the values read from NANDc registers to confirm the success/failure of page
+read/write operation. For example, after a page read/write is complete, the
+drivers reads the NANDc status registers to check for any operational errors,
+protection violation errors and device status errors, number of correctable/
+uncorrectable errors reported by the controller. Based on the error conditions
+that are met, the driver reports appropriate error codes to upper layers. The
+upper layers respond to these errors and take appropriate action.
+
+Design
+======
+
+The existing NAND driver (ADM based) can not be reused due to many major HW
+changes (see Introduction section) in the new NANDc core. Some of the complex
+features (Dual NAND controllers support) too are deprecated in the new NANDc.
+Hence, a new NAND driver is written to take care of both SPS/BAM changes and
+other controller specific changes. The rest of the interaction with MTD and
+YAFFS2 remains same as its previous version of NAND driver msm_nand.c.
+
+Power Management
+================
+
+Two clocks are supplied by the system's clock controller to NANDc - AHB clock
+and interface clock. The interface clock is the clock that drives some of the
+HW blocks within NANDc. As of now, both these clocks are always on. But NANDc
+provides clock gating if some of the QPIC clock control registers are
+configured. The clock gating is yet to be enabled by driver.
+
+SMP/Multi-Core
+==============
+
+The locking mechanism for page read/write operations is taken care of by the
+higher layers such as MTD/YAFFS2 and only one single page operation can happen
+at any time on a given partition. For a single page operation, there is always
+only one context associated within the driver and thus no additional handling
+is required within the driver. But it is possible for file system to issue
+one request on partition and at the same time to issue another request on
+another partition as each partition corresponds to different MTD block device.
+This situation is handled within the driver by properly acquiring a mutex lock
+before submitting any command/data descriptors to any of the BAM pipes.
+
+
+Security
+========
+
+The same NAND device is accessible from both processors (Modem and Apps) and
+thus to avoid any configuration overwrite issues during a page operation,
+driver on each processor (Modem and Apps) must explicitly use BAM pipe
+lock/unlock mechanism. This is taken care of by the NAND driver. The partition
+violation issues are prevented by an MPU (Memory Protection Unit) that is
+attached to NANDc.
+
+Performance
+===========
+
+None.
+
+Interface
+=========
+
+The NAND driver registers each partition on NAND device as a MTD block device
+using mtd_device_register(). As part of this registration, the following ops
+(struct mtd_info *mtd) are registered with MTD layer for each partition:
+
+mtd->_block_isbad = msm_nand_block_isbad;
+mtd->_block_markbad = msm_nand_block_markbad;
+mtd->_read = msm_nand_read;
+mtd->_write = msm_nand_write;
+mtd->_read_oob = msm_nand_read_oob;
+mtd->_write_oob = msm_nand_write_oob;
+mtd->_erase = msm_nand_erase;
+
+msm_nand_block_isbad() - This checks if a block is bad or not by reading bad
+block byte in the first page of a block. A block is considered as bad if bad
+block byte location contains any value other than 0xFF.
+
+msm_nand_block_markbad() - This marks a block as bad by writing 0 to the
+entire first page of the block and thus writing 0 to bad block byte location.
+
+msm_nand_read/write() - This is used to read/write only main data from/to
+single/multiple pages within NAND device. The YAFFS2 file system can send
+read/write request for two types of data -
+
+ - Main data : This is the actual data to be read/written from/to a
+ page during a read/write operation on this device. The size of this
+ data request is typically based on the page size of the device
+ (2K/4K).
+
+ - OOB(Out Of Band) data : This is the spare data that will be used by
+ file system to keep track of its meta data/tags associated with the
+ actual data. As of now, the file system needs only 16 bytes to
+ accommodate this data. The NAND driver always writes this data
+ towards the end of main data.
+
+It is up to the file system whether or not to send a read/write request for OOB
+data along with main data.
+
+msm_nand_read_oob()/write_oob() - This is used to read/write both main data
+and spare data from/to single/multiple pages within NAND device.
+
+msm_nand_erase() - This erases the complete block by sending erase command to
+the device.
+
+The YAFFS2 file system registers as the user of MTD device and uses the ops
+exposed by the NAND driver to perform read/write/erase operations on NAND
+device. As of now, the driver can work with only YAFFS2 file system. An
+attempt to use it with any other file system might demand additional changes
+in the driver.
+
+Driver parameters
+=================
+
+None.
+
+Config options
+==============
+
+The config option MTD_MSM_QPIC_NAND enables this driver.
+
+Dependencies
+============
+
+It depends on the following kernel components:
+
+- SPS/BAM driver
+- MTD core layer
+- To add necessary NANDc and BAM resources to .dts file
+
+It depends on the following non-kernel components:
+
+The partition information of the NAND device must be passed by Modem subsystem
+to Apps boot loader and Apps boot loader must update the .dts file
+with the partition information as per the defined MTD bindings.
+
+The detailed information on MTD bindings can be found at -
+Documentation/devicetree/bindings/mtd/msm_qpic_nand.txt
+
+User space utilities
+====================
+
+None.
+
+Other
+=====
+
+No changes other than device tree changes are anticipated.
+
+Known issues
+============
+
+None.
+
+To do
+=====
+
+The NANDc core supports clock gating and is not yet supported by the driver.
diff --git a/arch/arm/boot/dts/msm-iommu.dtsi b/arch/arm/boot/dts/msm-iommu.dtsi
new file mode 100755
index 0000000..0e2ddce9
--- /dev/null
+++ b/arch/arm/boot/dts/msm-iommu.dtsi
@@ -0,0 +1,161 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/ {
+ jpeg_iommu: qcom,iommu@fda64000 {
+ compatible = "qcom,msm-smmu-v2";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ reg = <0xfda64000 0x10000>;
+ vdd-supply = <&gdsc_jpeg>;
+ qcom,iommu-smt-size = <16>;
+ status = "disabled";
+
+ qcom,iommu-ctx@fda6c000 {
+ reg = <0xfda6c000 0x1000>;
+ interrupts = <0 69 0>;
+ qcom,iommu-ctx-sids = <0>;
+ label = "jpeg_enc0";
+ };
+
+ qcom,iommu-ctx@fda6d000 {
+ reg = <0xfda6d000 0x1000>;
+ interrupts = <0 70 0>;
+ qcom,iommu-ctx-sids = <1>;
+ label = "jpeg_enc1";
+ };
+
+ qcom,iommu-ctx@fda6e000 {
+ reg = <0xfda6e000 0x1000>;
+ interrupts = <0 71 0>;
+ qcom,iommu-ctx-sids = <2>;
+ label = "jpeg_dec";
+ };
+ };
+
+ mdp_iommu: qcom,iommu@fd928000 {
+ compatible = "qcom,msm-smmu-v2";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ reg = <0xfd928000 0x10000>;
+ vdd-supply = <&gdsc_mdss>;
+ qcom,iommu-smt-size = <16>;
+ status = "disabled";
+
+ qcom,iommu-ctx@fd930000 {
+ reg = <0xfd930000 0x1000>;
+ interrupts = <0 46 0>;
+ qcom,iommu-ctx-sids = <0>;
+ label = "mdp_0";
+ };
+
+ qcom,iommu-ctx@fd931000 {
+ reg = <0xfd931000 0x1000>;
+ interrupts = <0 47 0>;
+ qcom,iommu-ctx-sids = <1>;
+ label = "mdp_1";
+ };
+ };
+
+ venus_iommu: qcom,iommu@fdc84000 {
+ compatible = "qcom,msm-smmu-v2";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ reg = <0xfdc84000 0x10000>;
+ vdd-supply = <&gdsc_venus>;
+ qcom,iommu-smt-size = <16>;
+ qcom,needs-alt-core-clk;
+ status = "disabled";
+
+ qcom,iommu-ctx@fdc8c000 {
+ reg = <0xfdc8c000 0x1000>;
+ interrupts = <0 43 0>;
+ qcom,iommu-ctx-sids = <0 1 2 3 4 5>;
+ label = "venus_ns";
+ };
+
+ qcom,iommu-ctx@fdc8d000 {
+ reg = <0xfdc8d000 0x1000>;
+ interrupts = <0 42 0>;
+ qcom,iommu-ctx-sids = <0x80 0x81 0x82 0x83 0x84 0x85>;
+ label = "venus_cp";
+ };
+
+ qcom,iommu-ctx@fdc8e000 {
+ reg = <0xfdc8e000 0x1000>;
+ interrupts = <0 41 0>;
+ qcom,iommu-ctx-sids = <0xc0 0xc6>;
+ label = "venus_fw";
+ };
+ };
+
+ kgsl_iommu: qcom,iommu@fdb10000 {
+ compatible = "qcom,msm-smmu-v2";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ reg = <0xfdb10000 0x10000>;
+ vdd-supply = <&gdsc_oxili_cx>;
+ qcom,iommu-smt-size = <32>;
+ qcom,needs-alt-core-clk;
+ status = "disabled";
+
+ qcom,iommu-ctx@fdb18000 {
+ reg = <0xfdb18000 0x1000>;
+ interrupts = <0 240 0>;
+ qcom,iommu-ctx-sids = <0>;
+ label = "gfx3d_user";
+ };
+
+ qcom,iommu-ctx@fdb19000 {
+ reg = <0xfdb19000 0x1000>;
+ interrupts = <0 241 0>;
+ qcom,iommu-ctx-sids = <1>;
+ label = "gfx3d_priv";
+ };
+ };
+
+ vfe_iommu: qcom,iommu@fda44000 {
+ compatible = "qcom,msm-smmu-v2";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ reg = <0xfda44000 0x10000>;
+ vdd-supply = <&gdsc_vfe>;
+ qcom,iommu-smt-size = <32>;
+ status = "disabled";
+
+ qcom,iommu-ctx@fda4c000 {
+ reg = <0xfda4c000 0x1000>;
+ interrupts = <0 64 0>;
+ qcom,iommu-ctx-sids = <0>;
+ label = "vfe0";
+ };
+
+ qcom,iommu-ctx@fda4d000 {
+ reg = <0xfda4d000 0x1000>;
+ interrupts = <0 65 0>;
+ qcom,iommu-ctx-sids = <1>;
+ label = "vfe1";
+ };
+
+ qcom,iommu-ctx@fda4e000 {
+ reg = <0xfda4e000 0x1000>;
+ interrupts = <0 66 0>;
+ qcom,iommu-ctx-sids = <2>;
+ label = "cpp";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/msm-pm8941.dtsi b/arch/arm/boot/dts/msm-pm8941.dtsi
index 87864fd..51ec10c 100644
--- a/arch/arm/boot/dts/msm-pm8941.dtsi
+++ b/arch/arm/boot/dts/msm-pm8941.dtsi
@@ -22,6 +22,16 @@
#address-cells = <1>;
#size-cells = <1>;
+ qcom,power-on@800 {
+ compatible = "qcom,qpnp-power-on";
+ reg = <0x800 0x100>;
+ interrupts = <0x0 0x8 0x0>;
+ interrupt-names = "power-key";
+ qcom,pon-key-enable = <1>;
+ qcom,pon-key-dbc-delay = <15625>;
+ qcom,pon-key-pull-up = <1>;
+ };
+
pm8941_gpios {
spmi-dev-container;
compatible = "qcom,qpnp-pin";
@@ -305,6 +315,42 @@
status = "disabled";
};
};
+
+ qcom,pm8941_rtc {
+ spmi-dev-container;
+ compatible = "qcom,qpnp-rtc";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ qcom,qpnp-rtc-write = <0>;
+ qcom,qpnp-rtc-alarm-pwrup = <0>;
+
+ qcom,pm8941_rtc_rw@6000 {
+ reg = <0x6000 0x100>;
+ };
+ qcom,pm8941_rtc_alarm@6100 {
+ reg = <0x6100 0x100>;
+ interrupts = <0x0 0x61 0x1>;
+ };
+ };
+
+ vadc@3100 {
+ compatible = "qcom,qpnp-vadc";
+ reg = <0x3100 0x100>;
+ interrupts = <0x0 0x31 0x0>;
+ qcom,adc-bit-resolution = <15>;
+ qcom,adc-vdd-reference = <1800>;
+
+ chan@0 {
+ label = "usb_in";
+ qcom,channel-num = <0>;
+ qcom,decimation = <0>;
+ qcom,pre-div-channel-scaling = <20>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ };
+ };
};
qcom,pm8941@1 {
diff --git a/arch/arm/boot/dts/msm8974-gpu.dtsi b/arch/arm/boot/dts/msm8974-gpu.dtsi
new file mode 100644
index 0000000..a972d7f
--- /dev/null
+++ b/arch/arm/boot/dts/msm8974-gpu.dtsi
@@ -0,0 +1,126 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+/ {
+ qcom,kgsl-3d0@fdb00000 {
+ label = "kgsl-3d0";
+ compatible = "qcom,kgsl-3d0", "qcom,kgsl-3d";
+ reg = <0xfdb00000 0x20000>;
+ reg-names = "kgsl_3d0_reg_memory";
+ interrupts = <0 33 0>;
+ interrupt-names = "kgsl_3d0_irq";
+ qcom,id = <0>;
+
+ qcom,chipid = <0x03030000>;
+
+ qcom,initial-pwrlevel = <1>;
+
+ qcom,idle-timeout = <83>; //<HZ/12>
+ qcom,nap-allowed = <1>;
+ qcom,clk-map = <0x00000016>; //KGSL_CLK_CORE | KGSL_CLK_IFACE | KGSL_CLK_MEM_IFACE
+
+ /* Bus Scale Settings */
+ qcom,grp3d-vectors = <0 0 0 0>, <2 1 0 0>,
+ <0 0 0 2000>, <2 1 0 3000>,
+ <0 0 0 4000>, <2 1 0 5000>,
+ <0 0 0 6400>, <2 1 0 7600>;
+ qcom,grp3d-num-vectors-per-usecase = <2>;
+ qcom,grp3d-num-bus-scale-usecases = <4>;
+
+ /* GDSC oxili regulators */
+ vddcx-supply = <&gdsc_oxili_cx>;
+ vdd-supply = <&gdsc_oxili_gx>;
+
+ /* Power levels */
+
+ /* IOMMU Data */
+ iommu = <&kgsl_iommu>;
+
+ qcom,gpu-pwrlevels {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compatible = "qcom,gpu-pwrlevels";
+
+ qcom,gpu-pwrlevel@0 {
+ reg = <0>;
+ qcom,gpu-freq = <500000000>;
+ qcom,bus-freq = <3>;
+ qcom,io-fraction = <0>;
+ };
+
+ qcom,gpu-pwrlevel@1 {
+ reg = <1>;
+ qcom,gpu-freq = <333000000>;
+ qcom,bus-freq = <2>;
+ qcom,io-fraction = <33>;
+ };
+
+ qcom,gpu-pwrlevel@2 {
+ reg = <2>;
+ qcom,gpu-freq = <200000000>;
+ qcom,bus-freq = <1>;
+ qcom,io-fraction = <100>;
+ };
+
+ qcom,gpu-pwrlevel@3 {
+ reg = <3>;
+ qcom,gpu-freq = <27000000>;
+ qcom,bus-freq = <0>;
+ qcom,io-fraction = <0>;
+ };
+ };
+
+ qcom,dcvs-core-info {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compatible = "qcom,dcvs-core-info";
+
+ qcom,core-max-time-us = <100000>;
+ qcom,algo-slack-time-us = <39000>;
+ qcom,algo-disable-pc-threshold = <86000>;
+ qcom,algo-ss-window-size = <1000000>;
+ qcom,algo-ss-util-pct = <95>;
+ qcom,algo-em-max-util-pct = <97>;
+ qcom,algo-ss-iobusy-conv = <100>;
+
+ qcom,dcvs-freq@0 {
+ reg = <0>;
+ qcom,freq = <0>;
+ qcom,idle-energy = <0>;
+ qcom,active-energy = <333932>;
+ };
+
+ qcom,dcvs-freq@1 {
+ reg = <1>;
+ qcom,freq = <0>;
+ qcom,idle-energy = <0>;
+ qcom,active-energy = <497532>;
+ };
+
+ qcom,dcvs-freq@2 {
+ reg = <2>;
+ qcom,freq = <0>;
+ qcom,idle-energy = <0>;
+ qcom,active-energy = <707610>;
+ };
+
+ qcom,dcvs-freq@3 {
+ reg = <3>;
+ qcom,freq = <0>;
+ qcom,idle-energy = <0>;
+ qcom,active-energy = <844545>;
+ };
+ };
+
+ };
+};
diff --git a/arch/arm/boot/dts/msm8974-iommu.dtsi b/arch/arm/boot/dts/msm8974-iommu.dtsi
index a115fd8..184826e 100755
--- a/arch/arm/boot/dts/msm8974-iommu.dtsi
+++ b/arch/arm/boot/dts/msm8974-iommu.dtsi
@@ -10,108 +10,24 @@
* GNU General Public License for more details.
*/
-/ {
- jpeg: qcom,iommu@fda64000 {
- compatible = "qcom,msm-smmu-v2";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
- reg = <0xfda64000 0x10000>;
- vdd-supply = <&gdsc_jpeg>;
- qcom,iommu-smt-size = <16>;
+/include/ "msm-iommu.dtsi"
- qcom,iommu-ctx@fda6c000 {
- reg = <0xfda6c000 0x1000>;
- interrupts = <0 69 0>;
- qcom,iommu-ctx-sids = <0>;
- qcom,iommu-ctx-name = "jpeg_enc0";
- };
- qcom,iommu-ctx@fda6d000 {
- reg = <0xfda6d000 0x1000>;
- interrupts = <0 70 0>;
- qcom,iommu-ctx-sids = <1>;
- qcom,iommu-ctx-name = "jpeg_enc1";
- };
- qcom,iommu-ctx@fda6e000 {
- reg = <0xfda6e000 0x1000>;
- interrupts = <0 71 0>;
- qcom,iommu-ctx-sids = <2>;
- qcom,iommu-ctx-name = "jpeg_dec";
- };
- };
+&jpeg_iommu {
+ status = "ok";
+};
- mdp: qcom,iommu@fd928000 {
- compatible = "qcom,msm-smmu-v2";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
- reg = <0xfd928000 0x10000>;
- vdd-supply = <&gdsc_mdss>;
- qcom,iommu-smt-size = <16>;
+&mdp_iommu {
+ status = "ok";
+};
- qcom,iommu-ctx@fd930000 {
- reg = <0xfd930000 0x1000>;
- interrupts = <0 74 0>;
- qcom,iommu-ctx-sids = <0>;
- qcom,iommu-ctx-name = "mdp_0";
- };
- qcom,iommu-ctx@fd931000 {
- reg = <0xfd931000 0x1000>;
- interrupts = <0 75 0>;
- qcom,iommu-ctx-sids = <1>;
- qcom,iommu-ctx-name = "mdp_1";
- };
- };
+&venus_iommu {
+ status = "ok";
+};
- venus: qcom,iommu@fdc84000 {
- compatible = "qcom,msm-smmu-v2";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
- reg = <0xfdc84000 0x10000>;
- vdd-supply = <&gdsc_venus>;
- qcom,iommu-smt-size = <16>;
+&kgsl_iommu {
+ status = "ok";
+};
- qcom,iommu-ctx@fdc8c000 {
- reg = <0xfdc8c000 0x1000>;
- interrupts = <0 43 0>;
- qcom,iommu-ctx-sids = <0 1 2 3 4 5>;
- qcom,iommu-ctx-name = "venus_ns";
- };
- qcom,iommu-ctx@fdc8d000 {
- reg = <0xfdc8d000 0x1000>;
- interrupts = <0 42 0>;
- qcom,iommu-ctx-sids = <0x80 0x81 0x82 0x83 0x84 0x85>;
- qcom,iommu-ctx-name = "venus_cp";
- };
- qcom,iommu-ctx@fdc8e000 {
- reg = <0xfdc8e000 0x1000>;
- interrupts = <0 41 0>;
- qcom,iommu-ctx-sids = <0xc0 0xc6>;
- qcom,iommu-ctx-name = "venus_fw";
- };
- };
-
- kgsl: qcom,iommu@fdb10000 {
- compatible = "qcom,msm-smmu-v2";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
- reg = <0xfdb10000 0x10000>;
- vdd-supply = <&gdsc_oxili_cx>;
- qcom,iommu-smt-size = <32>;
-
- qcom,iommu-ctx@fdb18000 {
- reg = <0xfdb18000 0x1000>;
- interrupts = <0 240 0>;
- qcom,iommu-ctx-sids = <0>;
- qcom,iommu-ctx-name = "gfx3d_user";
- };
- qcom,iommu-ctx@fdb19000 {
- reg = <0xfdb19000 0x1000>;
- interrupts = <0 241 0>;
- qcom,iommu-ctx-sids = <1>;
- qcom,iommu-ctx-name = "gfx3d_priv";
- };
- };
+&vfe_iommu {
+ status = "ok";
};
diff --git a/arch/arm/boot/dts/msm8974-regulator.dtsi b/arch/arm/boot/dts/msm8974-regulator.dtsi
index f0c635e..b376544 100644
--- a/arch/arm/boot/dts/msm8974-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8974-regulator.dtsi
@@ -109,7 +109,7 @@
regulator-name = "8841_s2_corner";
qcom,set = <3>;
regulator-min-microvolt = <1>;
- regulator-max-microvolt = <6>;
+ regulator-max-microvolt = <7>;
qcom,use-voltage-corner;
compatible = "qcom,rpm-regulator-smd";
qcom,consumer-supplies = "vdd_dig", "";
@@ -118,7 +118,7 @@
regulator-name = "8841_s2_corner_ao";
qcom,set = <1>;
regulator-min-microvolt = <1>;
- regulator-max-microvolt = <6>;
+ regulator-max-microvolt = <7>;
qcom,use-voltage-corner;
compatible = "qcom,rpm-regulator-smd";
};
@@ -218,9 +218,9 @@
status = "okay";
pm8941_l3: regulator-l3 {
parent-supply = <&pm8941_s1>;
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
- qcom,init-voltage = <1200000>;
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ qcom,init-voltage = <1225000>;
status = "okay";
};
};
@@ -229,9 +229,9 @@
status = "okay";
pm8941_l4: regulator-l4 {
parent-supply = <&pm8941_s1>;
- regulator-min-microvolt = <1150000>;
- regulator-max-microvolt = <1150000>;
- qcom,init-voltage = <1150000>;
+ regulator-min-microvolt = <12250000>;
+ regulator-max-microvolt = <12250000>;
+ qcom,init-voltage = <12250000>;
status = "okay";
};
};
@@ -303,9 +303,9 @@
status = "okay";
pm8941_l11: regulator-l11 {
parent-supply = <&pm8941_s1>;
- regulator-min-microvolt = <1250000>;
- regulator-max-microvolt = <1250000>;
- qcom,init-voltage = <1250000>;
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1300000>;
+ qcom,init-voltage = <1300000>;
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/msm8974-rumi.dts b/arch/arm/boot/dts/msm8974-rumi.dts
index b179d94..2cf68b8 100644
--- a/arch/arm/boot/dts/msm8974-rumi.dts
+++ b/arch/arm/boot/dts/msm8974-rumi.dts
@@ -103,4 +103,8 @@
qcom,pronto@fb21b000 {
status = "disable";
};
+
+ qcom,mss@fc880000 {
+ status = "disable";
+ };
};
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index 1531a95..12f46a3 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -14,6 +14,7 @@
/include/ "msm8974_pm.dtsi"
/include/ "msm8974-iommu.dtsi"
/include/ "msm-gdsc.dtsi"
+/include/ "msm8974-gpu.dtsi"
/ {
model = "Qualcomm MSM 8974";
@@ -46,6 +47,14 @@
compatible = "qcom,msm-vidc";
reg = <0xfdc00000 0xff000>;
interrupts = <0 44 0>;
+ vidc-cp-map = <0x1000000 0x40000000>;
+ vidc-ns-map = <0x40000000 0x40000000>;
+ load-freq-tbl = <979200 410000000>,
+ <560145 266670000>,
+ <421161 200000000>,
+ <243000 133330000>,
+ <108000 100000000>,
+ <36000 50000000>;
};
serial@f991f000 {
@@ -74,10 +83,12 @@
};
qcom,sdcc@f9824000 {
- cell-index = <1>;
+ cell-index = <1>; /* SDC1 eMMC slot */
compatible = "qcom,msm-sdcc";
reg = <0xf9824000 0x1000>;
+ reg-names = "core_mem";
interrupts = <0 123 0>;
+ interrupt-names = "core_irq";
vdd-supply = <&pm8941_l20>;
vdd-io-supply = <&pm8941_s3>;
@@ -88,6 +99,11 @@
qcom,sdcc-vdd-io-voltage_level = <1800000 1800000>;
qcom,sdcc-vdd-io-current_level = <250 154000>;
+ qcom,sdcc-pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,sdcc-pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,sdcc-pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+ qcom,sdcc-pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
+
qcom,sdcc-clk-rates = <400000 25000000 50000000 100000000 200000000>;
qcom,sdcc-sup-voltages = <2950 2950>;
qcom,sdcc-bus-width = <8>;
@@ -96,10 +112,12 @@
};
qcom,sdcc@f98a4000 {
- cell-index = <2>;
+ cell-index = <2>; /* SDC2 SD card slot */
compatible = "qcom,msm-sdcc";
reg = <0xf98a4000 0x1000>;
+ reg-names = "core_mem";
interrupts = <0 125 0>;
+ interrupt-names = "core_irq";
vdd-supply = <&pm8941_l21>;
vdd-io-supply = <&pm8941_l13>;
@@ -111,6 +129,11 @@
qcom,sdcc-vdd-io-voltage_level = <1800000 2950000>;
qcom,sdcc-vdd-io-current_level = <6 22000>;
+ qcom,sdcc-pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,sdcc-pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,sdcc-pad-drv-on = <0x7 0x4 0x4>; /* 16mA, 10mA, 10mA */
+ qcom,sdcc-pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
+
qcom,sdcc-clk-rates = <400000 25000000 50000000 100000000 200000000>;
qcom,sdcc-sup-voltages = <2950 2950>;
qcom,sdcc-bus-width = <4>;
@@ -120,10 +143,20 @@
};
qcom,sdcc@f9864000 {
- cell-index = <3>;
+ cell-index = <3>; /* SDC3 SDIO slot */
compatible = "qcom,msm-sdcc";
reg = <0xf9864000 0x1000>;
+ reg-names = "core_mem";
interrupts = <0 127 0>;
+ interrupt-names = "core_irq";
+
+ gpios = <&msmgpio 40 0>, /* CLK */
+ <&msmgpio 39 0>, /* CMD */
+ <&msmgpio 38 0>, /* DATA0 */
+ <&msmgpio 37 0>, /* DATA1 */
+ <&msmgpio 36 0>, /* DATA2 */
+ <&msmgpio 35 0>; /* DATA3 */
+ qcom,sdcc-gpio-names = "CLK", "CMD", "DAT0", "DAT1", "DAT2", "DAT3";
qcom,sdcc-clk-rates = <400000 25000000 50000000 100000000>;
qcom,sdcc-sup-voltages = <1800 1800>;
@@ -133,10 +166,20 @@
};
qcom,sdcc@f98e4000 {
- cell-index = <4>;
+ cell-index = <4>; /* SDC4 SDIO slot */
compatible = "qcom,msm-sdcc";
reg = <0xf98e4000 0x1000>;
+ reg-names = "core_mem";
interrupts = <0 129 0>;
+ interrupt-names = "core_irq";
+
+ gpios = <&msmgpio 93 0>, /* CLK */
+ <&msmgpio 91 0>, /* CMD */
+ <&msmgpio 96 0>, /* DATA0 */
+ <&msmgpio 95 0>, /* DATA1 */
+ <&msmgpio 94 0>, /* DATA2 */
+ <&msmgpio 92 0>; /* DATA3 */
+ qcom,sdcc-gpio-names = "CLK", "CMD", "DAT0", "DAT1", "DAT2", "DAT3";
qcom,sdcc-clk-rates = <400000 25000000 50000000 100000000>;
qcom,sdcc-sup-voltages = <1800 1800>;
@@ -350,6 +393,10 @@
compatible = "qcom,msm-pcm-lpa";
};
+ qcom,msm-compr-dsp {
+ compatible = "qcom,msm-compr-dsp";
+ };
+
qcom,msm-voip-dsp {
compatible = "qcom,msm-voip-dsp";
};
@@ -362,6 +409,19 @@
compatible = "qcom,msm-dai-fe";
};
+ qcom,msm-dai-q6 {
+ compatible = "qcom,msm-dai-q6";
+ qcom,msm-dai-q6-sb-0-rx {
+ compatible = "qcom,msm-dai-q6-dev";
+ qcom,msm-dai-q6-dev-id = <16384>;
+ };
+
+ qcom,msm-dai-q6-sb-0-tx {
+ compatible = "qcom,msm-dai-q6-dev";
+ qcom,msm-dai-q6-dev-id = <16385>;
+ };
+ };
+
qcom,msm-auxpcm {
compatible = "qcom,msm-auxpcm-resource";
qcom,msm-cpudai-auxpcm-clk = "pcm_clk";
@@ -388,6 +448,14 @@
compatible = "qcom,msm-pcm-hostless";
};
+ qcom,msm-ocmem-audio {
+ compatible = "qcom,msm-ocmem-audio";
+ qcom,msm-ocmem-audio-src-id = <11>;
+ qcom,msm-ocmem-audio-dst-id = <604>;
+ qcom,msm-ocmem-audio-ab = <32505856>;
+ qcom,msm-ocmem-audio-ib = <32505856>;
+ };
+
qcom,mss@fc880000 {
compatible = "qcom,pil-q6v5-mss";
reg = <0xfc880000 0x100>,
@@ -420,7 +488,48 @@
};
qcom,ocmem@fdd00000 {
- compatible = "qcom,msm_ocmem";
+ compatible = "qcom,msm-ocmem";
+ reg = <0xfdd00000 0x2000>,
+ <0xfdd02000 0x2000>,
+ <0xfe039000 0x400>,
+ <0xfec00000 0x180000>;
+ reg-names = "ocmem_ctrl_physical", "dm_ctrl_physical", "br_ctrl_physical", "ocmem_physical";
+ interrupts = <0 76 0 0 77 0>;
+ interrupt-names = "ocmem_irq", "dm_irq";
+ qcom,ocmem-num-regions = <0x3>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0xfec00000 0x180000>;
+
+ partition@0 {
+ reg = <0x0 0x100000>;
+ qcom,ocmem-part-name = "graphics";
+ qcom,ocmem-part-min = <0x80000>;
+ };
+
+ partition@80000 {
+ reg = <0x80000 0xA0000>;
+ qcom,ocmem-part-name = "lp_audio";
+ qcom,ocmem-part-min = <0xA0000>;
+ };
+
+ partition@E0000 {
+ reg = <0x120000 0x20000>;
+ qcom,ocmem-part-name = "other_os";
+ qcom,ocmem-part-min = <0x20000>;
+ };
+
+ partition@100000 {
+ reg = <0x100000 0x80000>;
+ qcom,ocmem-part-name = "video";
+ qcom,ocmem-part-min = <0x55000>;
+ };
+
+ partition@140000 {
+ reg = <0x140000 0x40000>;
+ qcom,ocmem-part-name = "sensors";
+ qcom,ocmem-part-min = <0x40000>;
+ };
};
rpm_bus: qcom,rpm-smd {
@@ -478,6 +587,12 @@
qcom,firmware-max-paddr = <0xFA00000>;
};
+ qcom,cache_erp {
+ compatible = "qcom,cache_erp";
+ interrupts = <1 9 0>, <0 2 0>;
+ interrupt-names = "l1_irq", "l2_irq";
+ };
+
tsens@fc4a8000 {
compatible = "qcom,msm-tsens";
reg = <0xfc4a8000 0x2000>,
@@ -485,8 +600,30 @@
reg-names = "tsens_physical", "tsens_eeprom_physical";
interrupts = <0 184 0>;
qcom,sensors = <11>;
- qcom,slope = <1134 1122 1142 1123 1176 1176 1176 1186 1176
- 1176 1176>;
+ qcom,slope = <3200 3200 3200 3200 3200 3200 3200 3200 3200
+ 3200 3200>;
+ };
+
+ qcom,msm-rtb {
+ compatible = "qcom,msm-rtb";
+ qcom,memory-reservation-type = "EBI1";
+ qcom,memory-reservation-size = <0x100000>; /* 1M EBI1 buffer */
+ };
+
+ qcom,qcedev@fd440000 {
+ compatible = "qcom,qcedev";
+ reg = <0xfd440000 0x20000>,
+ <0xfd444000 0x8000>;
+ interrupts = <0 235 0>;
+ qcom,bam-pipes = <0>;
+ };
+
+ qcom,qcrypto@fd444000 {
+ compatible = "qcom,qcrypto";
+ reg = <0xfd440000 0x20000>,
+ <0xfd444000 0x8000>;
+ interrupts = <0 235 0>;
+ qcom,bam-pipes = <1>;
};
};
diff --git a/arch/arm/boot/dts/msm8974_pm.dtsi b/arch/arm/boot/dts/msm8974_pm.dtsi
index 6f12e31c..476f2b5 100644
--- a/arch/arm/boot/dts/msm8974_pm.dtsi
+++ b/arch/arm/boot/dts/msm8974_pm.dtsi
@@ -140,25 +140,25 @@
qcom,lpm-resources@0 {
reg = <0x0>;
qcom,name = "vdd-dig";
- qcom,type = "smpb\0";
+ qcom,type = <0x62706d73>; /* "smpb" */
qcom,id = <0x02>;
- qcom,key = "uv\0\0";
+ qcom,key = <0x6e726f63>; /* "corn" */
};
qcom,lpm-resources@1 {
reg = <0x1>;
qcom,name = "vdd-mem";
- qcom,type = "smpb\0";
+ qcom,type = <0x62706d73>; /* "smpb" */
qcom,id = <0x01>;
- qcom,key = "uv\0\0";
+ qcom,key = <0x7675>; /* "uv" */
};
qcom,lpm-resources@2 {
reg = <0x2>;
qcom,name = "pxo";
- qcom,type = "clk0\0";
+ qcom,type = <0x306b6c63>; /* "clk0" */
qcom,id = <0x00>;
- qcom,key = "Enab";
+ qcom,key = <0x62616e45>; /* "Enab" */
};
};
@@ -174,8 +174,8 @@
qcom,l2 = <3>; /* ACTIVE */
qcom,vdd-mem-upper-bound = <1150000>; /* MAX */
qcom,vdd-mem-lower-bound = <1050000>; /* ACTIVE */
- qcom,vdd-dig-upper-bound = <1150000>; /* MAX */
- qcom,vdd-dig-lower-bound = <950000>; /* ACTIVE */
+ qcom,vdd-dig-upper-bound = <5>; /* MAX */
+ qcom,vdd-dig-lower-bound = <3>; /* ACTIVE */
qcom,latency-us = <100>;
qcom,ss-power = <650>;
qcom,energy-overhead = <801>;
@@ -189,8 +189,8 @@
qcom,l2 = <3>; /* ACTIVE */
qcom,vdd-mem-upper-bound = <1150000>; /* MAX */
qcom,vdd-mem-lower-bound = <1050000>; /* ACTIVE */
- qcom,vdd-dig-upper-bound = <1150000>; /* MAX */
- qcom,vdd-dig-lower-bound = <950000>; /* ACTIVE */
+ qcom,vdd-dig-upper-bound = <5>; /* MAX */
+ qcom,vdd-dig-lower-bound = <3>; /* ACTIVE */
qcom,latency-us = <2000>;
qcom,ss-power = <200>;
qcom,energy-overhead = <576000>;
@@ -204,8 +204,8 @@
qcom,l2 = <1>; /* GDHS */
qcom,vdd-mem-upper-bound = <1150000>; /* MAX */
qcom,vdd-mem-lower-bound = <1050000>; /* ACTIVE */
- qcom,vdd-dig-upper-bound = <1150000>; /* MAX */
- qcom,vdd-dig-lower-bound = <950000>; /* ACTIVE */
+ qcom,vdd-dig-upper-bound = <5>; /* MAX */
+ qcom,vdd-dig-lower-bound = <3>; /* ACTIVE */
qcom,latency-us = <8500>;
qcom,ss-power = <51>;
qcom,energy-overhead = <1122000>;
@@ -219,8 +219,8 @@
qcom,l2 = <0>; /* OFF */
qcom,vdd-mem-upper-bound = <1150000>; /* MAX */
qcom,vdd-mem-lower-bound = <1050000>; /* ACTIVE */
- qcom,vdd-dig-upper-bound = <1150000>; /* MAX */
- qcom,vdd-dig-lower-bound = <950000>; /* ACTIVE */
+ qcom,vdd-dig-upper-bound = <5>; /* MAX */
+ qcom,vdd-dig-lower-bound = <3>; /* ACTIVE */
qcom,latency-us = <9000>;
qcom,ss-power = <51>;
qcom,energy-overhead = <1130300>;
@@ -234,8 +234,8 @@
qcom,l2 = <0>; /* OFF */
qcom,vdd-mem-upper-bound = <1050000>; /* ACTIVE */
qcom,vdd-mem-lower-bound = <750000>; /* RETENTION HIGH */
- qcom,vdd-dig-upper-bound = <950000>; /* ACTIVE */
- qcom,vdd-dig-lower-bound = <750000>; /* RETENTION HIGH */
+ qcom,vdd-dig-upper-bound = <3>; /* ACTIVE */
+ qcom,vdd-dig-lower-bound = <2>; /* RETENTION HIGH */
qcom,latency-us = <10000>;
qcom,ss-power = <51>;
qcom,energy-overhead = <1130300>;
@@ -249,8 +249,8 @@
qcom,l2 = <1>; /* GDHS */
qcom,vdd-mem-upper-bound = <1150000>; /* MAX */
qcom,vdd-mem-lower-bound = <1050000>; /* ACTIVE */
- qcom,vdd-dig-upper-bound = <1150000>; /* MAX */
- qcom,vdd-dig-lower-bound = <950000>; /* ACTIVE */
+ qcom,vdd-dig-upper-bound = <5>; /* MAX */
+ qcom,vdd-dig-lower-bound = <3>; /* ACTIVE */
qcom,latency-us = <12000>;
qcom,ss-power = <14>;
qcom,energy-overhead = <2205900>;
@@ -264,8 +264,8 @@
qcom,l2 = <0>; /* OFF */
qcom,vdd-mem-upper-bound = <1150000>; /* MAX */
qcom,vdd-mem-lower-bound = <1050000>; /* ACTIVE */
- qcom,vdd-dig-upper-bound = <1150000>; /* MAX */
- qcom,vdd-dig-lower-bound = <950000>; /* ACTIVE */
+ qcom,vdd-dig-upper-bound = <5>; /* MAX */
+ qcom,vdd-dig-lower-bound = <3>; /* ACTIVE */
qcom,latency-us = <18000>;
qcom,ss-power = <12>;
qcom,energy-overhead = <2364250>;
@@ -279,8 +279,8 @@
qcom,l2 = <0>; /* OFF */
qcom,vdd-mem-upper-bound = <1050000>; /* ACTIVE */
qcom,vdd-mem-lower-bound = <750000>; /* RETENTION HIGH */
- qcom,vdd-dig-upper-bound = <950000>; /* ACTIVE */
- qcom,vdd-dig-lower-bound = <750000>; /* RETIONTION HIGH */
+ qcom,vdd-dig-upper-bound = <3>; /* ACTIVE */
+ qcom,vdd-dig-lower-bound = <2>; /* RETIONTION HIGH */
qcom,latency-us = <23500>;
qcom,ss-power = <10>;
qcom,energy-overhead = <2667000>;
@@ -294,8 +294,8 @@
qcom,l2 = <0>; /* OFF */
qcom,vdd-mem-upper-bound = <750000>; /* RETENTION HIGH */
qcom,vdd-mem-lower-bound = <750000>; /* RETENTION LOW */
- qcom,vdd-dig-upper-bound = <750000>; /* RETENTION HIGH */
- qcom,vdd-dig-lower-bound = <500000>; /* RETENTION LOW */
+ qcom,vdd-dig-upper-bound = <2>; /* RETENTION HIGH */
+ qcom,vdd-dig-lower-bound = <0>; /* RETENTION LOW */
qcom,latency-us = <29700>;
qcom,ss-power = <5>;
qcom,energy-overhead = <2867000>;
@@ -307,4 +307,89 @@
compatible = "qcom,pm-boot";
qcom,mode = <0>; /* MSM_PM_BOOT_CONFIG_TZ */
};
+
+ qcom,mpm@fc4281d0 {
+ compatible = "qcom,mpm-v2";
+ reg = <0xfc4281d0 0x1000>, /* MSM_RPM_MPM_BASE 4K */
+ <0xfa006000 0x1000>; /* MSM_APCS_GCC_BASE 4K */
+ reg-names = "vmpm", "ipc";
+ interrupts = <0 171 1>;
+
+ qcom,ipc-bit-offset = <0>;
+
+ qcom,gic-parent = <&intc>;
+ qcom,gic-map = <41 180>, /* usb2_hsic_async_wakeup_irq */
+ <53 104>, /* mdss_irq */
+ <0xff 57>, /* mss_to_apps_irq(0) */
+ <0xff 58>, /* mss_to_apps_irq(1) */
+ <0xff 59>, /* mss_to_apps_irq(2) */
+ <0xff 60>, /* mss_to_apps_irq(3) */
+ <0xff 173>, /* o_wcss_apss_smd_hi */
+ <0xff 174>, /* o_wcss_apss_smd_med */
+ <0xff 175>, /* o_wcss_apss_smd_low */
+ <0xff 176>, /* o_wcss_apss_smsm_irq */
+ <0xff 177>, /* o_wcss_apss_wlan_data_xfer_done */
+ <0xff 178>, /* o_wcss_apss_wlan_rx_data_avail */
+ <0xff 179>, /* o_wcss_apss_asic_intr
+
+ <0xff 188>, /* lpass_irq_out_apcs(0) */
+ <0xff 189>, /* lpass_irq_out_apcs(1) */
+ <0xff 190>, /* lpass_irq_out_apcs(2) */
+ <0xff 191>, /* lpass_irq_out_apcs(3) */
+ <0xff 192>, /* lpass_irq_out_apcs(4) */
+ <0xff 193>, /* lpass_irq_out_apcs(5) */
+ <0xff 194>, /* lpass_irq_out_apcs(6) */
+ <0xff 195>, /* lpass_irq_out_apcs(7) */
+ <0xff 196>, /* lpass_irq_out_apcs(8) */
+ <0xff 197>, /* lpass_irq_out_apcs(9) */
+ <0xff 200>, /* rpm_ipc(4) */
+ <0xff 201>, /* rpm_ipc(5) */
+ <0xff 202>, /* rpm_ipc(6) */
+ <0xff 203>, /* rpm_ipc(7) */
+ <0xff 204>, /* rpm_ipc(24) */
+ <0xff 205>, /* rpm_ipc(25) */
+ <0xff 206>, /* rpm_ipc(26) */
+ <0xff 207>, /* rpm_ipc(27) */
+ <0xff 240>; /* summary_irq_kpss */
+
+ qcom,gpio-parent = <&msmgpio>;
+ qcom,gpio-map = <3 102>,
+ <4 1 >,
+ <5 5 >,
+ <6 9 >,
+ <7 18>,
+ <8 20>,
+ <9 24>,
+ <10 27>,
+ <11 28>,
+ <12 34>,
+ <13 35>,
+ <14 37>,
+ <15 42>,
+ <16 44>,
+ <17 46>,
+ <18 50>,
+ <19 54>,
+ <20 59>,
+ <21 61>,
+ <22 62>,
+ <23 64>,
+ <24 65>,
+ <25 66>,
+ <26 67>,
+ <27 68>,
+ <28 71>,
+ <29 72>,
+ <30 73>,
+ <31 74>,
+ <32 75>,
+ <33 77>,
+ <34 79>,
+ <35 80>,
+ <36 82>,
+ <37 86>,
+ <38 92>,
+ <39 93>,
+ <40 95>;
+ };
};
diff --git a/arch/arm/boot/dts/msm9625.dts b/arch/arm/boot/dts/msm9625.dts
index 6c007fb..6b44be9 100644
--- a/arch/arm/boot/dts/msm9625.dts
+++ b/arch/arm/boot/dts/msm9625.dts
@@ -55,4 +55,14 @@
reg = <0xf991f000 0x1000>;
interrupts = <0 109 0>;
};
+
+ qcom,nand@f9ac0000 {
+ compatible = "qcom,msm-nand";
+ reg = <0xf9ac0000 0x1000>,
+ <0xf9ac4000 0x8000>;
+ reg-names = "nand_phys",
+ "bam_phys";
+ interrupts = <0 247 0>;
+ interrupt-names = "bam_irq";
+ };
};
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 88c4862..bbd6c63 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -47,6 +47,8 @@
#include <asm/hardware/gic.h>
#include <asm/system.h>
+#include <mach/socinfo.h>
+
union gic_base {
void __iomem *common_base;
void __percpu __iomem **percpu_base;
@@ -56,6 +58,7 @@
unsigned int irq_offset;
union gic_base dist_base;
union gic_base cpu_base;
+ bool need_access_lock;
#ifdef CONFIG_CPU_PM
u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
@@ -207,14 +210,8 @@
{
unsigned int i;
void __iomem *base = gic_data_dist_base(gic);
-#ifdef CONFIG_ARCH_MSM8625
- unsigned long flags;
-#endif
for (i = 0; i * 32 < gic->max_irq; i++) {
-#ifdef CONFIG_ARCH_MSM8625
- raw_spin_lock_irqsave(&irq_controller_lock, flags);
-#endif
gic->enabled_irqs[i]
= readl_relaxed(base + GIC_DIST_ENABLE_SET + i * 4);
/* disable all of them */
@@ -222,9 +219,6 @@
/* enable the wakeup set */
writel_relaxed(gic->wakeup_irqs[i],
base + GIC_DIST_ENABLE_SET + i * 4);
-#ifdef CONFIG_ARCH_MSM8625
- raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
-#endif
}
mb();
return 0;
@@ -305,18 +299,19 @@
static void gic_eoi_irq(struct irq_data *d)
{
+ struct gic_chip_data *gic = irq_data_get_irq_chip_data(d);
+
if (gic_arch_extn.irq_eoi) {
raw_spin_lock(&irq_controller_lock);
gic_arch_extn.irq_eoi(d);
raw_spin_unlock(&irq_controller_lock);
}
-#ifdef CONFIG_ARCH_MSM8625
- raw_spin_lock(&irq_controller_lock);
-#endif
+
+ if (gic->need_access_lock)
+ raw_spin_lock(&irq_controller_lock);
writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
-#ifdef CONFIG_ARCH_MSM8625
- raw_spin_unlock(&irq_controller_lock);
-#endif
+ if (gic->need_access_lock)
+ raw_spin_unlock(&irq_controller_lock);
}
static int gic_set_type(struct irq_data *d, unsigned int type)
@@ -435,13 +430,11 @@
void __iomem *cpu_base = gic_data_cpu_base(gic);
do {
-#ifdef CONFIG_ARCH_MSM8625
- raw_spin_lock(&irq_controller_lock);
-#endif
+ if (gic->need_access_lock)
+ raw_spin_lock(&irq_controller_lock);
irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
-#ifdef CONFIG_ARCH_MSM8625
- raw_spin_unlock(&irq_controller_lock);
-#endif
+ if (gic->need_access_lock)
+ raw_spin_unlock(&irq_controller_lock);
irqnr = irqstat & ~0x1c00;
if (likely(irqnr > 15 && irqnr < 1021)) {
@@ -450,13 +443,11 @@
continue;
}
if (irqnr < 16) {
-#ifdef CONFIG_ARCH_MSM8625
- raw_spin_lock(&irq_controller_lock);
-#endif
+ if (gic->need_access_lock)
+ raw_spin_lock(&irq_controller_lock);
writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
-#ifdef CONFIG_ARCH_MSM8625
- raw_spin_unlock(&irq_controller_lock);
-#endif
+ if (gic->need_access_lock)
+ raw_spin_unlock(&irq_controller_lock);
#ifdef CONFIG_SMP
handle_IPI(irqnr, regs);
#endif
@@ -583,9 +574,8 @@
* Deal with the banked PPI and SGI interrupts - disable all
* PPI interrupts, ensure all SGI interrupts are enabled.
*/
-#ifdef CONFIG_ARCH_MSM8625
- raw_spin_lock(&irq_controller_lock);
-#endif
+ if (gic->need_access_lock)
+ raw_spin_lock(&irq_controller_lock);
writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
@@ -605,9 +595,8 @@
writel_relaxed(0xF, base + GIC_CPU_CTRL);
else
writel_relaxed(1, base + GIC_CPU_CTRL);
-#ifdef CONFIG_ARCH_MSM8625
- raw_spin_unlock(&irq_controller_lock);
-#endif
+ if (gic->need_access_lock)
+ raw_spin_unlock(&irq_controller_lock);
mb();
}
@@ -883,6 +872,10 @@
BUG_ON(gic_nr >= MAX_GIC_NR);
gic = &gic_data[gic_nr];
+ if (cpu_is_msm8625() &&
+ (SOCINFO_VERSION_MAJOR(socinfo_get_version()) <= 1))
+ gic->need_access_lock = true;
+
#ifdef CONFIG_GIC_NON_BANKED
if (percpu_offset) { /* Frankein-GIC without banked registers... */
unsigned int cpu;
@@ -967,9 +960,8 @@
int cpu;
unsigned long sgir;
unsigned long map = 0;
-#ifdef CONFIG_ARCH_MSM8625
- unsigned long flags;
-#endif
+ unsigned long flags = 0;
+ struct gic_chip_data *gic = &gic_data[0];
/* Convert our logical CPU mask into a physical one. */
for_each_cpu(cpu, mask)
@@ -985,16 +977,12 @@
*/
dsb();
-#ifdef CONFIG_ARCH_MSM8625
- raw_spin_lock_irqsave(&irq_controller_lock, flags);
-#endif
+ if (gic->need_access_lock)
+ raw_spin_lock_irqsave(&irq_controller_lock, flags);
/* this always happens on GIC0 */
-
- writel_relaxed(sgir,
- gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
-#ifdef CONFIG_ARCH_MSM8625
- raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
-#endif
+ writel_relaxed(sgir, gic_data_dist_base(gic) + GIC_DIST_SOFTINT);
+ if (gic->need_access_lock)
+ raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
mb();
}
#endif
@@ -1151,7 +1139,7 @@
}
#endif
-void msm_gic_save(bool modem_wake, int from_idle)
+void msm_gic_save(void)
{
unsigned int i;
struct gic_chip_data *gic = &gic_data[0];
diff --git a/arch/arm/configs/msm7627a-perf_defconfig b/arch/arm/configs/msm7627a-perf_defconfig
index c40d4d0..a8abb30 100644
--- a/arch/arm/configs/msm7627a-perf_defconfig
+++ b/arch/arm/configs/msm7627a-perf_defconfig
@@ -312,6 +312,7 @@
CONFIG_LEDS_GPIO=y
CONFIG_LEDS_MSM_PDM=y
CONFIG_LEDS_PMIC_MPP=y
+CONFIG_LEDS_MSM_TRICOLOR=y
CONFIG_SWITCH=y
CONFIG_SWITCH_GPIO=y
CONFIG_RTC_CLASS=y
diff --git a/arch/arm/configs/msm7627a_defconfig b/arch/arm/configs/msm7627a_defconfig
index 838933c..314f91b 100644
--- a/arch/arm/configs/msm7627a_defconfig
+++ b/arch/arm/configs/msm7627a_defconfig
@@ -312,6 +312,7 @@
CONFIG_MMC_MSM_SDC3_8_BIT_SUPPORT=y
CONFIG_LEDS_MSM_PDM=y
CONFIG_LEDS_PMIC_MPP=y
+CONFIG_LEDS_MSM_TRICOLOR=y
CONFIG_SWITCH=y
CONFIG_SWITCH_GPIO=y
CONFIG_RTC_CLASS=y
diff --git a/arch/arm/configs/msm8660-perf_defconfig b/arch/arm/configs/msm8660-perf_defconfig
index 173dcca..a51b76d 100644
--- a/arch/arm/configs/msm8660-perf_defconfig
+++ b/arch/arm/configs/msm8660-perf_defconfig
@@ -340,6 +340,7 @@
CONFIG_FB_MSM_TRIPLE_BUFFER=y
CONFIG_FB_MSM_MDP40=y
CONFIG_FB_MSM_OVERLAY=y
+CONFIG_FB_MSM_NO_MDP_PIPE_CTRL=y
CONFIG_FB_MSM_OVERLAY0_WRITEBACK=y
CONFIG_FB_MSM_OVERLAY1_WRITEBACK=y
CONFIG_FB_MSM_WRITEBACK_MSM_PANEL=y
@@ -416,8 +417,6 @@
CONFIG_ANDROID_TIMED_GPIO=y
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
CONFIG_MSM_SSBI=y
-CONFIG_MSM_IOMMU=y
-# CONFIG_IOMMU_PGTABLES_L2 is not set
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT3_FS=y
diff --git a/arch/arm/configs/msm8660_defconfig b/arch/arm/configs/msm8660_defconfig
index f329c81..5d19237 100644
--- a/arch/arm/configs/msm8660_defconfig
+++ b/arch/arm/configs/msm8660_defconfig
@@ -288,8 +288,6 @@
# CONFIG_I2C_MSM is not set
CONFIG_I2C_QUP=y
CONFIG_I2C_SSBI=y
-CONFIG_MSM_IOMMU=y
-# CONFIG_IOMMU_PGTABLES_L2 is not set
CONFIG_SPI=y
CONFIG_SPI_QUP=y
CONFIG_SPI_SPIDEV=m
@@ -344,6 +342,7 @@
CONFIG_FB_MSM_TRIPLE_BUFFER=y
CONFIG_FB_MSM_MDP40=y
CONFIG_FB_MSM_OVERLAY=y
+CONFIG_FB_MSM_NO_MDP_PIPE_CTRL=y
CONFIG_FB_MSM_OVERLAY0_WRITEBACK=y
CONFIG_FB_MSM_OVERLAY1_WRITEBACK=y
CONFIG_FB_MSM_WRITEBACK_MSM_PANEL=y
diff --git a/arch/arm/configs/msm8960-perf_defconfig b/arch/arm/configs/msm8960-perf_defconfig
index 6c213c3..795a5a7 100644
--- a/arch/arm/configs/msm8960-perf_defconfig
+++ b/arch/arm/configs/msm8960-perf_defconfig
@@ -83,12 +83,12 @@
CONFIG_MSM_GSS_SSR_8064=y
CONFIG_MSM_TZ_LOG=y
CONFIG_MSM_RPM_LOG=y
+CONFIG_MSM_RPM_RBCPR_STATS_LOG=y
CONFIG_MSM_RPM_STATS_LOG=y
CONFIG_MSM_BUS_SCALING=y
CONFIG_MSM_BUS_RPM_MULTI_TIER_ENABLED=y
CONFIG_MSM_WATCHDOG=y
CONFIG_MSM_DLOAD_MODE=y
-CONFIG_MSM_QDSS=y
CONFIG_MSM_SLEEP_STATS=y
CONFIG_MSM_EBI_ERP=y
CONFIG_MSM_CACHE_ERP=y
@@ -285,6 +285,8 @@
CONFIG_INPUT_MISC=y
CONFIG_INPUT_PMIC8XXX_PWRKEY=y
CONFIG_INPUT_UINPUT=y
+CONFIG_STM_LIS3DH=y
+CONFIG_INPUT_MPU3050=y
# CONFIG_LEGACY_PTYS is not set
CONFIG_N_SMUX=y
CONFIG_N_SMUX_LOOPBACK=y
@@ -351,6 +353,7 @@
CONFIG_IMX074_EEPROM=y
CONFIG_IMX091_EEPROM=y
CONFIG_MSM_GEMINI=y
+CONFIG_MSM_CSI20_HEADER=y
CONFIG_S5K3L1YX=y
CONFIG_IMX091=y
CONFIG_RADIO_IRIS=y
@@ -367,6 +370,7 @@
CONFIG_FB_MSM_TRIPLE_BUFFER=y
CONFIG_FB_MSM_MDP40=y
CONFIG_FB_MSM_OVERLAY=y
+CONFIG_FB_MSM_NO_MDP_PIPE_CTRL=y
CONFIG_FB_MSM_OVERLAY0_WRITEBACK=y
CONFIG_FB_MSM_OVERLAY1_WRITEBACK=y
CONFIG_FB_MSM_WRITEBACK_MSM_PANEL=y
@@ -451,6 +455,7 @@
CONFIG_MSM_IOMMU=y
CONFIG_MOBICORE_SUPPORT=m
CONFIG_MOBICORE_API=m
+CONFIG_MSM_QDSS=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT3_FS=y
diff --git a/arch/arm/configs/msm8960_defconfig b/arch/arm/configs/msm8960_defconfig
index 3da628f..4674584 100644
--- a/arch/arm/configs/msm8960_defconfig
+++ b/arch/arm/configs/msm8960_defconfig
@@ -83,12 +83,11 @@
CONFIG_MSM_TZ_LOG=y
CONFIG_MSM_RPM_LOG=y
CONFIG_MSM_RPM_STATS_LOG=y
+CONFIG_MSM_RPM_RBCPR_STATS_LOG=y
CONFIG_MSM_BUS_SCALING=y
CONFIG_MSM_BUS_RPM_MULTI_TIER_ENABLED=y
CONFIG_MSM_WATCHDOG=y
CONFIG_MSM_DLOAD_MODE=y
-CONFIG_MSM_QDSS=y
-CONFIG_MSM_QDSS_ETM_DEFAULT_ENABLE=y
CONFIG_MSM_RTB=y
CONFIG_MSM_RTB_SEPARATE_CPUS=y
CONFIG_MSM_EBI_ERP=y
@@ -290,6 +289,8 @@
CONFIG_INPUT_MISC=y
CONFIG_INPUT_PMIC8XXX_PWRKEY=y
CONFIG_INPUT_UINPUT=y
+CONFIG_STM_LIS3DH=y
+CONFIG_INPUT_MPU3050=y
# CONFIG_LEGACY_PTYS is not set
CONFIG_N_SMUX=y
CONFIG_N_SMUX_LOOPBACK=y
@@ -355,6 +356,7 @@
CONFIG_IMX074_EEPROM=y
CONFIG_IMX091_EEPROM=y
CONFIG_MSM_GEMINI=y
+CONFIG_MSM_CSI20_HEADER=y
CONFIG_S5K3L1YX=y
CONFIG_IMX091=y
CONFIG_RADIO_IRIS=y
@@ -371,6 +373,7 @@
CONFIG_FB_MSM_TRIPLE_BUFFER=y
CONFIG_FB_MSM_MDP40=y
CONFIG_FB_MSM_OVERLAY=y
+CONFIG_FB_MSM_NO_MDP_PIPE_CTRL=y
CONFIG_FB_MSM_OVERLAY0_WRITEBACK=y
CONFIG_FB_MSM_OVERLAY1_WRITEBACK=y
CONFIG_FB_MSM_WRITEBACK_MSM_PANEL=y
@@ -454,6 +457,8 @@
CONFIG_MSM_IOMMU=y
CONFIG_MOBICORE_SUPPORT=m
CONFIG_MOBICORE_API=m
+CONFIG_MSM_QDSS=y
+CONFIG_MSM_QDSS_ETM_DEFAULT_ENABLE=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT3_FS=y
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index 4087f24..f3a62a5 100644
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -46,18 +46,17 @@
CONFIG_MSM_PIL_LPASS_QDSP6V5=y
CONFIG_MSM_PIL_MSS_QDSP6V5=y
CONFIG_MSM_PIL_MBA=y
-CONFIG_MSM_PIL_PRONTO=y
CONFIG_MSM_PIL_VENUS=y
+CONFIG_MSM_PIL_PRONTO=y
CONFIG_MSM_TZ_LOG=y
CONFIG_MSM_DIRECT_SCLK_ACCESS=y
CONFIG_MSM_OCMEM=y
-CONFIG_MSM_WATCHDOG_V2=y
+CONFIG_MSM_MEMORY_DUMP=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_SMP=y
# CONFIG_SMP_ON_UP is not set
CONFIG_ARM_ARCH_TIMER=y
-CONFIG_HOTPLUG_CPU=y
CONFIG_PREEMPT=y
CONFIG_AEABI=y
CONFIG_HIGHMEM=y
@@ -101,6 +100,7 @@
CONFIG_IPV6_SUBTREES=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
+CONFIG_QSEECOM=y
CONFIG_SCSI=y
CONFIG_SCSI_TGT=y
CONFIG_BLK_DEV_SD=y
@@ -114,9 +114,9 @@
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_NETDEVICES=y
+CONFIG_DUMMY=y
CONFIG_KS8851=m
# CONFIG_MSM_RMNET is not set
-CONFIG_DUMMY=y
CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_EVBUG=m
CONFIG_INPUT_JOYSTICK=y
@@ -137,7 +137,6 @@
CONFIG_SPI_SPIDEV=m
CONFIG_SPMI=y
CONFIG_SPMI_MSM_PMIC_ARB=y
-CONFIG_MSM_QPNP=y
CONFIG_MSM_QPNP_INT=y
CONFIG_SLIMBUS=y
CONFIG_SLIMBUS_MSM_CTRL=y
@@ -148,6 +147,8 @@
CONFIG_POWER_SUPPLY=y
# CONFIG_BATTERY_MSM is not set
# CONFIG_HWMON is not set
+CONFIG_THERMAL=y
+CONFIG_THERMAL_TSENS8974=y
CONFIG_REGULATOR_STUB=y
CONFIG_REGULATOR_QPNP=y
CONFIG_MEDIA_SUPPORT=y
@@ -168,7 +169,11 @@
# CONFIG_LCD_CLASS_DEVICE is not set
CONFIG_BACKLIGHT_CLASS_DEVICE=y
# CONFIG_BACKLIGHT_GENERIC is not set
-# CONFIG_HID_SUPPORT is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_MSM8974=y
+CONFIG_WCD9320_CODEC=y
CONFIG_USB=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_STORAGE=y
@@ -197,6 +202,9 @@
CONFIG_MMC_TEST=m
CONFIG_MMC_MSM=y
CONFIG_SWITCH=y
+CONFIG_RTC_CLASS=y
+# CONFIG_RTC_DRV_MSM is not set
+CONFIG_RTC_DRV_QPNP=y
CONFIG_STAGING=y
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
@@ -208,6 +216,7 @@
CONFIG_SPS=y
CONFIG_SPS_SUPPORT_BAMDMA=y
CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_QPNP_POWER_ON=y
CONFIG_MSM_IOMMU=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
@@ -233,25 +242,14 @@
CONFIG_DEBUG_LL=y
CONFIG_EARLY_PRINTK=y
CONFIG_KEYS=y
-CONFIG_CRYPTO_AUTHENC=y
-CONFIG_CRYPTO_CBC=y
-CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MD4=y
-CONFIG_CRYPTO_MD5=y
-CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_ARC4=y
-CONFIG_CRYPTO_DES=y
CONFIG_CRYPTO_TWOFISH=y
-CONFIG_CRYPTO_DEFLATE=y
-CONFIG_CRC_CCITT=y
-CONFIG_LIBCRC32C=y
-CONFIG_SOUND=y
-CONFIG_SND=y
-CONFIG_SND_SOC=y
-CONFIG_SND_SOC_MSM8974=y
CONFIG_CRYPTO_DEV_QCRYPTO=m
CONFIG_CRYPTO_DEV_QCE=m
CONFIG_CRYPTO_DEV_QCEDEV=m
-
+CONFIG_CRC_CCITT=y
+CONFIG_LIBCRC32C=y
+CONFIG_MSM_BUS_SCALING=y
diff --git a/arch/arm/configs/msm9615_defconfig b/arch/arm/configs/msm9615_defconfig
index c9bc610..db94f13 100644
--- a/arch/arm/configs/msm9615_defconfig
+++ b/arch/arm/configs/msm9615_defconfig
@@ -78,8 +78,11 @@
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V2=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
diff --git a/arch/arm/configs/msm9625_defconfig b/arch/arm/configs/msm9625_defconfig
index bb3554b..9094db7 100644
--- a/arch/arm/configs/msm9625_defconfig
+++ b/arch/arm/configs/msm9625_defconfig
@@ -50,6 +50,12 @@
CONFIG_NEON=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
# CONFIG_SUSPEND is not set
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_MTD_MSM_NAND is not set
+CONFIG_MTD_MSM_QPIC_NAND=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
# CONFIG_ANDROID_PMEM is not set
@@ -69,9 +75,13 @@
# CONFIG_HWMON is not set
# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_BAMDMA=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
-# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_YAFFS_FS=y
+CONFIG_YAFFS_DISABLE_TAGS_ECC=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
@@ -85,9 +95,6 @@
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_USER=y
-CONFIG_SPS=y
-CONFIG_SPS_SUPPORT_BAMDMA=y
-CONFIG_SPS_SUPPORT_NDP_BAM=y
CONFIG_KEYS=y
CONFIG_CRYPTO_AUTHENC=y
CONFIG_CRYPTO_CBC=y
@@ -103,5 +110,4 @@
CONFIG_CRYPTO_DEFLATE=y
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC_CCITT=y
-CONFIG_CRC16=y
CONFIG_LIBCRC32C=y
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
index 99ee2de..e32194f 100644
--- a/arch/arm/include/asm/arch_timer.h
+++ b/arch/arm/include/asm/arch_timer.h
@@ -2,6 +2,7 @@
#define __ASMARM_ARCH_TIMER_H
#include <linux/ioport.h>
+#include <linux/clocksource.h>
struct arch_timer {
struct resource res[3];
@@ -10,6 +11,7 @@
#ifdef CONFIG_ARM_ARCH_TIMER
int arch_timer_register(struct arch_timer *);
int arch_timer_of_register(void);
+cycle_t arch_counter_get_cntpct(void);
#else
static inline int arch_timer_register(struct arch_timer *at)
{
@@ -20,6 +22,11 @@
{
return -ENXIO;
}
+
+static inline cycle_t arch_counter_get_cntpct(void)
+{
+ return 0;
+}
#endif
#endif
diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h
index 5078148..ad12bcd 100644
--- a/arch/arm/include/asm/hardware/gic.h
+++ b/arch/arm/include/asm/hardware/gic.h
@@ -60,7 +60,7 @@
}
void gic_set_irq_secure(unsigned int irq);
-void msm_gic_save(bool modem_wake, int from_idle);
+void msm_gic_save(void);
void msm_gic_restore(void);
void core1_gic_configure_and_raise(void);
#endif
diff --git a/arch/arm/include/asm/mach/mmc.h b/arch/arm/include/asm/mach/mmc.h
index 562f13c..d341ea9 100644
--- a/arch/arm/include/asm/mach/mmc.h
+++ b/arch/arm/include/asm/mach/mmc.h
@@ -146,7 +146,7 @@
unsigned int uhs_caps2;
void (*sdio_lpm_gpio_setup)(struct device *, unsigned int);
unsigned int status_irq;
- unsigned int status_gpio;
+ int status_gpio;
/* Indicates the polarity of the GPIO line when card is inserted */
bool is_status_gpio_active_low;
unsigned int sdiowakeup_irq;
@@ -158,7 +158,7 @@
unsigned int msmsdcc_fmax;
bool nonremovable;
unsigned int mpm_sdiowakeup_int;
- unsigned int wpswitch_gpio;
+ int wpswitch_gpio;
bool is_wpswitch_active_low;
struct msm_mmc_slot_reg_data *vreg_data;
int is_sdio_al_client;
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 582c9b3..94aa75e 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -7,6 +7,8 @@
#include <asm/processor.h>
+extern int msm_krait_need_wfe_fixup;
+
/*
* sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
* extensions, so when running on UP, we have to patch these instructions away.
@@ -21,25 +23,42 @@
#ifdef CONFIG_THUMB2_KERNEL
#define SEV ALT_SMP("sev.w", "nop.w")
/*
- * For Thumb-2, special care is needed to ensure that the conditional WFE
- * instruction really does assemble to exactly 4 bytes (as required by
- * the SMP_ON_UP fixup code). By itself "wfene" might cause the
- * assembler to insert a extra (16-bit) IT instruction, depending on the
- * presence or absence of neighbouring conditional instructions.
- *
- * To avoid this unpredictableness, an approprite IT is inserted explicitly:
- * the assembler won't change IT instructions which are explicitly present
- * in the input.
+ * Both instructions given to the ALT_SMP macro need to be the same size, to
+ * allow the SMP_ON_UP fixups to function correctly. Hence the explicit encoding
+ * specifications.
*/
-#define WFE(cond) ALT_SMP( \
- "it " cond "\n\t" \
- "wfe" cond ".n", \
- \
+#define WFE() ALT_SMP( \
+ "wfe.w", \
"nop.w" \
)
#else
#define SEV ALT_SMP("sev", "nop")
-#define WFE(cond) ALT_SMP("wfe" cond, "nop")
+#define WFE() ALT_SMP("wfe", "nop")
+#endif
+
+/*
+ * The fixup involves disabling interrupts during execution of the WFE
+ * instruction. This could potentially lead to deadlock if a thread is trying
+ * to acquire a spinlock which is being released from an interrupt context.
+ */
+#ifdef CONFIG_MSM_KRAIT_WFE_FIXUP
+#define WFE_SAFE(fixup, tmp) \
+" mrs " tmp ", cpsr\n" \
+" cmp " fixup ", #0\n" \
+" wfeeq\n" \
+" beq 10f\n" \
+" cpsid if\n" \
+" mrc p15, 7, " fixup ", c15, c0, 5\n" \
+" bic " fixup ", " fixup ", #0x10000\n" \
+" mcr p15, 7, " fixup ", c15, c0, 5\n" \
+" isb\n" \
+" wfe\n" \
+" orr " fixup ", " fixup ", #0x10000\n" \
+" mcr p15, 7, " fixup ", c15, c0, 5\n" \
+" isb\n" \
+"10: msr cpsr_cf, " tmp "\n"
+#else
+#define WFE_SAFE(fixup, tmp) " wfe\n"
#endif
static inline void dsb_sev(void)
@@ -79,17 +98,19 @@
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
- unsigned long tmp;
+ unsigned long tmp, fixup = msm_krait_need_wfe_fixup;
__asm__ __volatile__(
-"1: ldrex %0, [%1]\n"
-" teq %0, #0\n"
- WFE("ne")
-" strexeq %0, %2, [%1]\n"
-" teqeq %0, #0\n"
+"1: ldrex %[tmp], [%[lock]]\n"
+" teq %[tmp], #0\n"
+" beq 2f\n"
+ WFE_SAFE("%[fixup]", "%[tmp]")
+"2:\n"
+" strexeq %[tmp], %[bit0], [%[lock]]\n"
+" teqeq %[tmp], #0\n"
" bne 1b"
- : "=&r" (tmp)
- : "r" (&lock->lock), "r" (1)
+ : [tmp] "=&r" (tmp), [fixup] "+r" (fixup)
+ : [lock] "r" (&lock->lock), [bit0] "r" (1)
: "cc");
smp_mb();
@@ -155,6 +176,7 @@
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp, ticket, next_ticket;
+ unsigned long fixup = msm_krait_need_wfe_fixup;
/* Grab the next ticket and wait for it to be "served" */
__asm__ __volatile__(
@@ -166,12 +188,15 @@
" uxth %[ticket], %[ticket]\n"
"2:\n"
#ifdef CONFIG_CPU_32v6K
-" wfene\n"
+" beq 3f\n"
+ WFE_SAFE("%[fixup]", "%[tmp]")
+"3:\n"
#endif
" ldr %[tmp], [%[lockaddr]]\n"
" cmp %[ticket], %[tmp], lsr #16\n"
" bne 2b"
- : [ticket]"=&r" (ticket), [tmp]"=&r" (tmp), [next_ticket]"=&r" (next_ticket)
+ : [ticket]"=&r" (ticket), [tmp]"=&r" (tmp),
+ [next_ticket]"=&r" (next_ticket), [fixup]"+r" (fixup)
: [lockaddr]"r" (&lock->lock), [val1]"r" (1)
: "cc");
smp_mb();
@@ -220,13 +245,16 @@
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
- unsigned long ticket;
+ unsigned long ticket, tmp, fixup = msm_krait_need_wfe_fixup;
/* Wait for now_serving == next_ticket */
__asm__ __volatile__(
#ifdef CONFIG_CPU_32v6K
" cmpne %[lockaddr], %[lockaddr]\n"
-"1: wfene\n"
+"1:\n"
+" beq 2f\n"
+ WFE_SAFE("%[fixup]", "%[tmp]")
+"2:\n"
#else
"1:\n"
#endif
@@ -235,7 +263,8 @@
" uxth %[ticket], %[ticket]\n"
" cmp %[ticket], #0\n"
" bne 1b"
- : [ticket]"=&r" (ticket)
+ : [ticket]"=&r" (ticket), [tmp]"=&r" (tmp),
+ [fixup]"+r" (fixup)
: [lockaddr]"r" (&lock->lock)
: "cc");
}
@@ -263,17 +292,19 @@
static inline void arch_write_lock(arch_rwlock_t *rw)
{
- unsigned long tmp;
+ unsigned long tmp, fixup = msm_krait_need_wfe_fixup;
__asm__ __volatile__(
-"1: ldrex %0, [%1]\n"
-" teq %0, #0\n"
- WFE("ne")
-" strexeq %0, %2, [%1]\n"
-" teq %0, #0\n"
+"1: ldrex %[tmp], [%[lock]]\n"
+" teq %[tmp], #0\n"
+" beq 2f\n"
+ WFE_SAFE("%[fixup]", "%[tmp]")
+"2:\n"
+" strexeq %[tmp], %[bit31], [%[lock]]\n"
+" teq %[tmp], #0\n"
" bne 1b"
- : "=&r" (tmp)
- : "r" (&rw->lock), "r" (0x80000000)
+ : [tmp] "=&r" (tmp), [fixup] "+r" (fixup)
+ : [lock] "r" (&rw->lock), [bit31] "r" (0x80000000)
: "cc");
smp_mb();
@@ -329,17 +360,19 @@
*/
static inline void arch_read_lock(arch_rwlock_t *rw)
{
- unsigned long tmp, tmp2;
+ unsigned long tmp, tmp2, fixup = msm_krait_need_wfe_fixup;
__asm__ __volatile__(
-"1: ldrex %0, [%2]\n"
-" adds %0, %0, #1\n"
-" strexpl %1, %0, [%2]\n"
- WFE("mi")
-" rsbpls %0, %1, #0\n"
+"1: ldrex %[tmp], [%[lock]]\n"
+" adds %[tmp], %[tmp], #1\n"
+" strexpl %[tmp2], %[tmp], [%[lock]]\n"
+" bpl 2f\n"
+ WFE_SAFE("%[fixup]", "%[tmp]")
+"2:\n"
+" rsbpls %[tmp], %[tmp2], #0\n"
" bmi 1b"
- : "=&r" (tmp), "=&r" (tmp2)
- : "r" (&rw->lock)
+ : [tmp] "=&r" (tmp), [tmp2] "=&r" (tmp2), [fixup] "+r" (fixup)
+ : [lock] "r" (&rw->lock)
: "cc");
smp_mb();
diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c
index 87bb7d3..43c627d 100644
--- a/arch/arm/kernel/arch_timer.c
+++ b/arch/arm/kernel/arch_timer.c
@@ -22,6 +22,7 @@
#include <linux/of_address.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/export.h>
#include <asm/cputype.h>
#include <asm/localtimer.h>
@@ -315,10 +316,16 @@
return ((cycle_t) cvalh << 32) | cvall;
}
-static cycle_t arch_counter_read(struct clocksource *cs)
+cycle_t arch_counter_get_cntpct(void)
{
return arch_specific_timer->get_cntpct();
}
+EXPORT_SYMBOL(arch_counter_get_cntpct);
+
+static cycle_t arch_counter_read(struct clocksource *cs)
+{
+ return arch_counter_get_cntpct();
+}
#ifdef ARCH_HAS_READ_CURRENT_TIMER
int read_current_timer(unsigned long *timer_val)
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index a8f2858..7d767c3 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -185,6 +185,7 @@
INIT_SETUP(16)
INIT_CALLS
CON_INITCALL
+ COMPAT_EXPORTS
SECURITY_INITCALL
INIT_RAM_FS
}
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index c23bc10..54f3292 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -39,6 +39,7 @@
select MSM_RUN_QUEUE_STATS if MSM_SOC_REV_A
select DONT_MAP_HOLE_AFTER_MEMBANK0
select MIGHT_HAVE_CACHE_L2X0
+ select MSM_MODEM_RESTART
config ARCH_MSM7X30
bool "MSM7x30"
@@ -65,6 +66,7 @@
select MULTI_IRQ_HANDLER
select MSM_PM2 if PM
select HOLES_IN_ZONE if SPARSEMEM
+ select MSM_MODEM_RESTART
config ARCH_QSD8X50
bool "QSD8X50"
@@ -78,6 +80,7 @@
select MSM_GPIOMUX
select MSM_DALRPC
select MSM_PM2 if PM
+ select MSM_MODEM_RESTART
config ARCH_MSM8X60
bool "MSM8X60"
@@ -172,6 +175,7 @@
select HOLES_IN_ZONE if SPARSEMEM
select MSM_RUN_QUEUE_STATS
select ARM_HAS_SG_CHAIN
+ select MSM_KRAIT_WFE_FIXUP
config ARCH_MSM8930
bool "MSM8930"
@@ -203,6 +207,7 @@
select MSM_PM8X60 if PM
select HOLES_IN_ZONE if SPARSEMEM
select ARM_HAS_SG_CHAIN
+ select MSM_KRAIT_WFE_FIXUP
config ARCH_APQ8064
bool "APQ8064"
@@ -229,6 +234,7 @@
select MIGHT_HAVE_PCI
select ARCH_SUPPORTS_MSI
select ARM_HAS_SG_CHAIN
+ select MSM_KRAIT_WFE_FIXUP
config ARCH_MSM8974
bool "MSM8974"
@@ -370,6 +376,9 @@
select MSM_SMP
bool
+config MSM_KRAIT_WFE_FIXUP
+ bool
+
config ARCH_MSM_CORTEX_A5
bool
select HAVE_HW_BRKPT_RESERVED_RW_ACCESS
@@ -392,10 +401,33 @@
config MSM_RPM_SMD
depends on MSM_SMD
- bool "Support for using SMD as the transport layer for communicatons with RPM"
+ select MSM_MPM_OF
+ bool "RPM driver using SMD protocol"
+ help
+ RPM is the dedicated hardware engine for managing shared SoC
+ resources. This config adds driver support for using SMD as a
+ transport layer communication with RPM hardware. It also selects
+ the MSM_MPM config that programs the MPM module to monitor interrupts
+ during sleep modes.
config MSM_MPM
bool "Modem Power Manager"
+ help
+ MPM is a dedicated hardware resource responsible for entering and
+ waking up from a system wide low power mode. The MPM driver tracks
+ the wakeup interrupts and configures the MPM to monitor the wakeup
+ interrupts when going to a system wide sleep mode.
+
+config MSM_MPM_OF
+ bool "Modem Power Manager"
+ depends on CONFIG_OF
+ help
+ MPM is a dedicated hardware resource responsible for entering and
+ waking up from a system wide low power mode. The MPM driver tracks
+ the wakeup interrupts and configures the MPM to monitor the wakeup
+ interrupts when going to a system wide sleep mode. This config option
+ enables the MPM driver that supports initialization from a device
+ tree
config MSM_XO
bool
@@ -666,20 +698,6 @@
help
Support for the Qualcomm MSM8x55 SVLTE SURF eval board.
-config MACH_MSM8X60_RUMI3
- depends on ARCH_MSM8X60
- default n
- bool "MSM8x60 RUMI3"
- help
- Support for the Qualcomm MSM8x60 RUMI3 emulator.
-
-config MACH_MSM8X60_SIM
- depends on ARCH_MSM8X60
- default n
- bool "MSM8x60 Simulator"
- help
- Support for the Qualcomm MSM8x60 simulator.
-
config MACH_MSM8X60_SURF
depends on ARCH_MSM8X60
default n
@@ -725,18 +743,6 @@
help
Support for the Qualcomm MSM8x60 Dragon board.
-config MACH_MSM8960_SIM
- depends on ARCH_MSM8960
- bool "MSM8960 Simulator"
- help
- Support for the Qualcomm MSM8960 simulator.
-
-config MACH_MSM8960_RUMI3
- depends on ARCH_MSM8960
- bool "MSM8960 RUMI3"
- help
- Support for the Qualcomm MSM8960 RUMI3 emulator.
-
config MACH_MSM8960_CDP
depends on ARCH_MSM8960
bool "MSM8960 CDP"
@@ -811,18 +817,6 @@
The two TSIF cores share the same DM configuration
so they cannot be used simultaneously.
-config MACH_APQ8064_SIM
- depends on ARCH_APQ8064
- bool "APQ8064 Simulator"
- help
- Support for the Qualcomm APQ8064 simulator.
-
-config MACH_APQ8064_RUMI3
- depends on ARCH_APQ8064
- bool "APQ8064 RUMI3"
- help
- Support for the Qualcomm APQ8064 RUMI3 emulator.
-
config MACH_APQ8064_CDP
depends on ARCH_APQ8064
bool "APQ8064 CDP"
@@ -2035,6 +2029,16 @@
the low power modes that RPM enters. The drivers outputs the message
via a debugfs node.
+config MSM_RPM_RBCPR_STATS_LOG
+ tristate "MSM Resource Power Manager RPBCPR Stat Driver"
+ depends on DEBUG_FS
+ depends on MSM_RPM
+ help
+ This option enables a driver which reads RPM messages from a shared
+ memory location. These messages provide statistical information about
+ RBCPR (Rapid Bridge Core Power Reduction) information . The drivers
+ outputs the message via a debugfs node.
+
config MSM_DIRECT_SCLK_ACCESS
bool "Direct access to the SCLK timer"
default n
@@ -2051,6 +2055,9 @@
config MSM_NATIVE_RESTART
bool
+config MSM_MODEM_RESTART
+ bool
+
config MSM_PM2
depends on PM
bool
@@ -2088,6 +2095,14 @@
deadlocks. It does not run during the bootup process, so it will
not catch any early lockups.
+config MSM_MEMORY_DUMP
+ bool "MSM Memory Dump Support"
+ help
+ This enables memory dump feature. It allows various client
+ subsystems to register respective dump regions. At the time
+ of deadlocks or cpu hangs these dump regions are captured to
+ give a snapshot of the system at the time of the crash.
+
config MSM_DLOAD_MODE
bool "Enable download mode on crashes"
depends on ARCH_MSM8X60 || ARCH_MSM8960 || ARCH_MSM9615
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 7ccdd0f..33b153f 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -23,6 +23,7 @@
endif
obj-y += acpuclock.o
+obj-$(CONFIG_ARCH_MSM_KRAIT) += acpuclock-krait.o
obj-$(CONFIG_ARCH_MSM7X27) += acpuclock-7627.o clock-pll.o
obj-$(CONFIG_ARCH_MSM_SCORPION) += pmu.o
obj-$(CONFIG_ARCH_MSM_SCORPIONMP) += perf_event_msm_l2.o
@@ -228,8 +229,7 @@
obj-$(CONFIG_MSM_WATCHDOG) += msm_watchdog.o
obj-$(CONFIG_MSM_WATCHDOG) += msm_watchdog_asm.o
obj-$(CONFIG_MSM_WATCHDOG_V2) += msm_watchdog_v2.o
-obj-$(CONFIG_MACH_MSM8X60_RUMI3) += board-msm8x60.o
-obj-$(CONFIG_MACH_MSM8X60_SIM) += board-msm8x60.o
+obj-$(CONFIG_MSM_MEMORY_DUMP) += msm_memory_dump.o
obj-$(CONFIG_MACH_MSM8X60_SURF) += board-msm8x60.o
obj-$(CONFIG_MACH_MSM8X60_FFA) += board-msm8x60.o
obj-$(CONFIG_MACH_MSM8X60_FLUID) += board-msm8x60.o
@@ -269,11 +269,10 @@
obj-$(CONFIG_ARCH_MSM8960) += saw-regulator.o
obj-$(CONFIG_ARCH_MSM8960) += devices-8960.o
obj-$(CONFIG_ARCH_APQ8064) += devices-8960.o devices-8064.o
+obj-$(CONFIG_ARCH_APQ8064) += acpuclock-8064.o
board-8960-all-objs += board-8960.o board-8960-camera.o board-8960-display.o board-8960-pmic.o board-8960-storage.o board-8960-gpiomux.o
board-8930-all-objs += board-8930.o board-8930-camera.o board-8930-display.o board-8930-pmic.o board-8930-storage.o board-8930-gpiomux.o devices-8930.o board-8930-gpu.o
board-8064-all-objs += board-8064.o board-8064-pmic.o board-8064-storage.o board-8064-gpiomux.o board-8064-camera.o board-8064-display.o board-8064-gpu.o
-obj-$(CONFIG_MACH_MSM8960_SIM) += board-8960-all.o board-8960-regulator.o
-obj-$(CONFIG_MACH_MSM8960_RUMI3) += board-8960-all.o board-8960-regulator.o
obj-$(CONFIG_MACH_MSM8960_CDP) += board-8960-all.o board-8960-regulator.o
obj-$(CONFIG_MACH_MSM8960_MTP) += board-8960-all.o board-8960-regulator.o
obj-$(CONFIG_MACH_MSM8960_FLUID) += board-8960-all.o board-8960-regulator.o
@@ -281,8 +280,6 @@
obj-$(CONFIG_MACH_MSM8930_MTP) += board-8930-all.o board-8930-regulator.o
obj-$(CONFIG_MACH_MSM8930_FLUID) += board-8930-all.o board-8930-regulator.o
obj-$(CONFIG_PM8921_BMS) += bms-batterydata.o bms-batterydata-desay.o
-obj-$(CONFIG_MACH_APQ8064_SIM) += board-8064-all.o board-8064-regulator.o
-obj-$(CONFIG_MACH_APQ8064_RUMI3) += board-8064-all.o board-8064-regulator.o
obj-$(CONFIG_MACH_APQ8064_CDP) += board-8064-all.o board-8064-regulator.o
obj-$(CONFIG_MACH_APQ8064_MTP) += board-8064-all.o board-8064-regulator.o
obj-$(CONFIG_MACH_APQ8064_LIQUID) += board-8064-all.o board-8064-regulator.o
@@ -291,10 +288,11 @@
obj-$(CONFIG_ARCH_MSM9615) += board-9615.o devices-9615.o board-9615-regulator.o board-9615-gpiomux.o board-9615-storage.o board-9615-display.o
obj-$(CONFIG_ARCH_MSM9615) += clock-local.o clock-9615.o acpuclock-9615.o clock-rpm.o clock-pll.o
obj-$(CONFIG_ARCH_MSM8974) += board-8974.o board-dt.o board-8974-regulator.o board-8974-gpiomux.o
-obj-$(CONFIG_ARCH_MSM8974) += acpuclock-krait.o acpuclock-8974.o
+obj-$(CONFIG_ARCH_MSM8974) += acpuclock-8974.o
obj-$(CONFIG_ARCH_MSM8974) += clock-local2.o clock-pll.o clock-8974.o clock-rpm.o clock-voter.o
obj-$(CONFIG_ARCH_MSM8974) += gdsc.o
obj-$(CONFIG_ARCH_MSM9625) += board-9625.o board-9625-gpiomux.o
+obj-$(CONFIG_ARCH_MSM8930) += acpuclock-8930.o acpuclock-8627.o acpuclock-8930aa.o
obj-$(CONFIG_MACH_SAPPHIRE) += board-sapphire.o board-sapphire-gpio.o
obj-$(CONFIG_MACH_SAPPHIRE) += board-sapphire-keypad.o board-sapphire-panel.o
@@ -322,8 +320,10 @@
ifdef CONFIG_MSM_RPM_SMD
obj-$(CONFIG_ARCH_MSM8974) += lpm_levels.o lpm_resources.o
endif
+obj-$(CONFIG_MSM_MPM_OF) += mpm-of.o
obj-$(CONFIG_MSM_MPM) += mpm.o
obj-$(CONFIG_MSM_RPM_STATS_LOG) += rpm_stats.o
+obj-$(CONFIG_MSM_RPM_RBCPR_STATS_LOG) += rpm_rbcpr_stats.o
obj-$(CONFIG_MSM_RPM_LOG) += rpm_log.o
obj-$(CONFIG_MSM_TZ_LOG) += tz_log.o
obj-$(CONFIG_MSM_XO) += msm_xo.o
@@ -336,7 +336,7 @@
obj-$(CONFIG_ARCH_MSM8X60) += board-msm8x60-vcm.o
endif
obj-$(CONFIG_MSM_OCMEM) += ocmem.o ocmem_allocator.o ocmem_notifier.o
-obj-$(CONFIG_MSM_OCMEM) += ocmem_sched.o ocmem_api.o
+obj-$(CONFIG_MSM_OCMEM) += ocmem_sched.o ocmem_api.o ocmem_rdm.o
obj-$(CONFIG_ARCH_MSM7X27) += gpiomux-7x27.o gpiomux-v1.o gpiomux.o
obj-$(CONFIG_ARCH_MSM7X30) += gpiomux-7x30.o gpiomux-v1.o gpiomux.o
@@ -358,6 +358,7 @@
obj-$(CONFIG_MSM_FAKE_BATTERY) += fish_battery.o
obj-$(CONFIG_MSM_RPC_VIBRATOR) += msm_vibrator.o
obj-$(CONFIG_MSM_NATIVE_RESTART) += restart.o
+obj-$(CONFIG_MSM_MODEM_RESTART) += restart_7k.o
obj-$(CONFIG_MSM_PROC_COMM_REGULATOR) += proccomm-regulator.o
ifdef CONFIG_MSM_PROC_COMM_REGULATOR
diff --git a/arch/arm/mach-msm/acpuclock-7627.c b/arch/arm/mach-msm/acpuclock-7627.c
index f9ff226..639cc94 100644
--- a/arch/arm/mach-msm/acpuclock-7627.c
+++ b/arch/arm/mach-msm/acpuclock-7627.c
@@ -38,11 +38,15 @@
#include "smd_private.h"
#include "acpuclock.h"
+#include "clock.h"
#define A11S_CLK_CNTL_ADDR (MSM_CSR_BASE + 0x100)
#define A11S_CLK_SEL_ADDR (MSM_CSR_BASE + 0x104)
#define A11S_VDD_SVS_PLEVEL_ADDR (MSM_CSR_BASE + 0x124)
+#define PLL4_L_VAL_ADDR (MSM_CLK_CTL_BASE + 0x378)
+#define PLL4_M_VAL_ADDR (MSM_CLK_CTL_BASE + 0x37C)
+#define PLL4_N_VAL_ADDR (MSM_CLK_CTL_BASE + 0x380)
#define POWER_COLLAPSE_KHZ 19200
@@ -67,6 +71,12 @@
const char *name;
};
+struct pll_config {
+ unsigned int l;
+ unsigned int m;
+ unsigned int n;
+};
+
static struct acpu_clk_src pll_clk[ACPU_PLL_END] = {
[ACPU_PLL_0] = { .name = "pll0_clk" },
[ACPU_PLL_1] = { .name = "pll1_clk" },
@@ -74,6 +84,13 @@
[ACPU_PLL_4] = { .name = "pll4_clk" },
};
+static struct pll_config pll4_cfg_tbl[] = {
+ { 36, 1, 2 }, /* 700.8 MHz */
+ { 52, 1, 2 }, /* 1008 MHz */
+ { 63, 0, 1 }, /* 1209.6 MHz */
+ { 73, 0, 1 }, /* 1401.6 MHz */
+};
+
struct clock_state {
struct clkctl_acpu_speed *current_speed;
struct mutex lock;
@@ -91,15 +108,20 @@
unsigned int ahbclk_div;
int vdd;
unsigned int axiclk_khz;
+ struct pll_config *pll_rate;
unsigned long lpj; /* loops_per_jiffy */
/* Pointers in acpu_freq_tbl[] for max up/down steppings. */
struct clkctl_acpu_speed *down[ACPU_PLL_END];
struct clkctl_acpu_speed *up[ACPU_PLL_END];
};
+static bool dynamic_reprogram;
static struct clock_state drv_state = { 0 };
static struct clkctl_acpu_speed *acpu_freq_tbl;
+/* Switch to this when reprogramming PLL4 */
+static struct clkctl_acpu_speed *backup_s;
+
/*
* ACPU freq tables used for different PLLs frequency combinations. The
* correct table is selected during init.
@@ -119,7 +141,7 @@
{ 0, 400000, ACPU_PLL_2, 2, 2, 133333, 2, 5, 160000 },
{ 1, 480000, ACPU_PLL_0, 4, 1, 160000, 2, 6, 160000 },
{ 1, 600000, ACPU_PLL_2, 2, 1, 200000, 2, 7, 200000 },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+ { 0 }
};
/* 7627 with CDMA capable modem */
@@ -133,7 +155,7 @@
{ 0, 400000, ACPU_PLL_2, 2, 2, 133333, 2, 5, 160000 },
{ 1, 480000, ACPU_PLL_0, 4, 1, 160000, 2, 6, 160000 },
{ 1, 600000, ACPU_PLL_2, 2, 1, 200000, 2, 7, 200000 },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+ { 0 }
};
/* 7627 with GSM capable modem - PLL2 @ 800 */
@@ -147,7 +169,7 @@
{ 0, 400000, ACPU_PLL_2, 2, 1, 133333, 2, 5, 160000 },
{ 1, 480000, ACPU_PLL_0, 4, 1, 160000, 2, 6, 160000 },
{ 1, 800000, ACPU_PLL_2, 2, 0, 200000, 3, 7, 200000 },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+ { 0 }
};
/* 7627 with CDMA capable modem - PLL2 @ 800 */
@@ -161,7 +183,7 @@
{ 0, 400000, ACPU_PLL_2, 2, 1, 133333, 2, 5, 160000 },
{ 1, 480000, ACPU_PLL_0, 4, 1, 160000, 2, 6, 160000 },
{ 1, 800000, ACPU_PLL_2, 2, 0, 200000, 3, 7, 200000 },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+ { 0 }
};
/* 7627a PLL2 @ 1200MHz with GSM capable modem */
@@ -176,7 +198,7 @@
{ 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, 5, 122880 },
{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
{ 1, 800000, ACPU_PLL_4, 6, 0, 100000, 3, 7, 200000 },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+ { 0 }
};
/* 7627a PLL2 @ 1200MHz with CDMA capable modem */
@@ -191,7 +213,7 @@
{ 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, 5, 120000 },
{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
{ 1, 800000, ACPU_PLL_4, 6, 0, 100000, 3, 7, 200000 },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+ { 0 }
};
/* 7627aa PLL4 @ 1008MHz with GSM capable modem */
@@ -206,7 +228,7 @@
{ 0, 504000, ACPU_PLL_4, 6, 1, 63000, 3, 6, 160000 },
{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
{ 1, 1008000, ACPU_PLL_4, 6, 0, 126000, 3, 7, 200000},
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+ { 0 }
};
/* 7627aa PLL4 @ 1008MHz with CDMA capable modem */
@@ -221,7 +243,7 @@
{ 0, 504000, ACPU_PLL_4, 6, 1, 63000, 3, 6, 160000 },
{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
{ 1, 1008000, ACPU_PLL_4, 6, 0, 126000, 3, 7, 200000},
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+ { 0 }
};
/* 8625 PLL4 @ 1209MHz with GSM capable modem */
@@ -235,7 +257,7 @@
{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
{ 0, 604800, ACPU_PLL_4, 6, 1, 75600, 3, 6, 160000 },
{ 1, 1209600, ACPU_PLL_4, 6, 0, 151200, 3, 7, 200000},
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+ { 0 }
};
/* 8625 PLL4 @ 1209MHz with CDMA capable modem */
@@ -249,7 +271,40 @@
{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
{ 0, 604800, ACPU_PLL_4, 6, 1, 75600, 3, 6, 160000 },
{ 1, 1209600, ACPU_PLL_4, 6, 0, 151200, 3, 7, 200000},
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+ { 0 }
+};
+
+/* 8625 PLL4 @ 1401.6MHz with GSM capable modem */
+static struct clkctl_acpu_speed pll0_960_pll1_245_pll2_1200_pll4_1401[] = {
+ { 0, 19200, ACPU_PLL_TCXO, 0, 0, 2400, 3, 0, 30720 },
+ { 0, 61440, ACPU_PLL_1, 1, 3, 7680, 3, 0, 61440 },
+ { 0, 122880, ACPU_PLL_1, 1, 1, 15360, 3, 1, 61440 },
+ { 1, 245760, ACPU_PLL_1, 1, 0, 30720, 3, 1, 61440 },
+ { 0, 300000, ACPU_PLL_2, 2, 3, 37500, 3, 2, 122880 },
+ { 1, 320000, ACPU_PLL_0, 4, 2, 40000, 3, 2, 122880 },
+ { 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, 3, 122880 },
+ { 0, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 4, 160000 },
+ { 1, 700800, ACPU_PLL_4, 6, 0, 87500, 3, 4, 160000, &pll4_cfg_tbl[0]},
+ { 1, 1008000, ACPU_PLL_4, 6, 0, 126000, 3, 5, 200000, &pll4_cfg_tbl[1]},
+ { 1, 1209600, ACPU_PLL_4, 6, 0, 151200, 3, 6, 200000, &pll4_cfg_tbl[2]},
+ { 1, 1401600, ACPU_PLL_4, 6, 0, 175000, 3, 7, 200000, &pll4_cfg_tbl[3]},
+ { 0 }
+};
+
+/* 8625 PLL4 @ 1401.6MHz with CDMA capable modem */
+static struct clkctl_acpu_speed pll0_960_pll1_196_pll2_1200_pll4_1401[] = {
+ { 0, 19200, ACPU_PLL_TCXO, 0, 0, 2400, 3, 0, 24576 },
+ { 0, 65536, ACPU_PLL_1, 1, 3, 8192, 3, 1, 49152 },
+ { 0, 98304, ACPU_PLL_1, 1, 1, 12288, 3, 2, 49152 },
+ { 1, 196608, ACPU_PLL_1, 1, 0, 24576, 3, 3, 98304 },
+ { 1, 320000, ACPU_PLL_0, 4, 2, 40000, 3, 2, 122880 },
+ { 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, 3, 122880 },
+ { 0, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 4, 160000 },
+ { 1, 700800, ACPU_PLL_4, 6, 0, 87500, 3, 4, 160000, &pll4_cfg_tbl[0]},
+ { 1, 1008000, ACPU_PLL_4, 6, 0, 126000, 3, 5, 200000, &pll4_cfg_tbl[1]},
+ { 1, 1209600, ACPU_PLL_4, 6, 0, 151200, 3, 6, 200000, &pll4_cfg_tbl[2]},
+ { 1, 1401600, ACPU_PLL_4, 6, 0, 175000, 3, 7, 200000, &pll4_cfg_tbl[3]},
+ { 0 }
};
/* 8625 PLL4 @ 1152MHz with GSM capable modem */
@@ -263,7 +318,7 @@
{ 0, 576000, ACPU_PLL_4, 6, 1, 72000, 3, 6, 160000 },
{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
{ 1, 1152000, ACPU_PLL_4, 6, 0, 144000, 3, 7, 200000},
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+ { 0 }
};
/* 8625 PLL4 @ 1115MHz with CDMA capable modem */
@@ -277,7 +332,7 @@
{ 0, 576000, ACPU_PLL_4, 6, 1, 72000, 3, 6, 160000 },
{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
{ 1, 1152000, ACPU_PLL_4, 6, 0, 144000, 3, 7, 200000},
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+ { 0 }
};
@@ -292,7 +347,7 @@
{ 0, 400000, ACPU_PLL_2, 2, 2, 50000, 3, 4, 122880 },
{ 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, 5, 122880 },
{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 200000 },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+ { 0 }
};
/* 7627a PLL2 @ 1200MHz with GSM capable modem */
@@ -307,7 +362,7 @@
{ 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, 5, 122880 },
{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
{ 1, 800000, ACPU_PLL_4, 6, 0, 100000, 3, 7, 200000 },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+ { 0 }
};
/* 7627a PLL2 @ 1200MHz with CDMA capable modem */
@@ -322,7 +377,7 @@
{ 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, 5, 120000 },
{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
{ 1, 800000, ACPU_PLL_4, 6, 0, 100000, 3, 7, 200000 },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+ { 0 }
};
/* 7627aa PLL4 @ 1008MHz with GSM capable modem */
@@ -337,7 +392,7 @@
{ 0, 504000, ACPU_PLL_4, 6, 1, 63000, 3, 6, 160000 },
{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
{ 1, 1008000, ACPU_PLL_4, 6, 0, 126000, 3, 7, 200000},
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+ { 0 }
};
/* 7627aa PLL4 @ 1008MHz with CDMA capable modem */
@@ -352,7 +407,7 @@
{ 0, 504000, ACPU_PLL_4, 6, 1, 63000, 3, 6, 160000 },
{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
{ 1, 1008000, ACPU_PLL_4, 6, 0, 126000, 3, 7, 200000},
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+ { 0 }
};
/* 7625a PLL2 @ 1200MHz with GSM capable modem */
@@ -366,7 +421,7 @@
{ 0, 400000, ACPU_PLL_2, 2, 2, 50000, 3, 4, 122880 },
{ 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, 5, 122880 },
{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 200000 },
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+ { 0 }
};
#define PLL_CONFIG(m0, m1, m2, m4) { \
@@ -399,6 +454,8 @@
PLL_CONFIG(960, 196, 1200, 1209),
PLL_CONFIG(960, 245, 1200, 1152),
PLL_CONFIG(960, 196, 1200, 1152),
+ PLL_CONFIG(960, 245, 1200, 1401),
+ PLL_CONFIG(960, 196, 1200, 1401),
{ 0, 0, 0, 0, 0 }
};
@@ -439,6 +496,31 @@
}
#endif
+static void update_jiffies(int cpu, unsigned long loops)
+{
+#ifdef CONFIG_SMP
+ for_each_possible_cpu(cpu) {
+ per_cpu(cpu_data, cpu).loops_per_jiffy =
+ loops;
+ }
+#endif
+ /* Adjust the global one */
+ loops_per_jiffy = loops;
+}
+
+/* Assumes PLL4 is off and the acpuclock isn't sourced from PLL4 */
+static void acpuclk_config_pll4(struct pll_config *pll)
+{
+ /* Make sure write to disable PLL_4 has completed
+ * before reconfiguring that PLL. */
+ mb();
+ writel_relaxed(pll->l, PLL4_L_VAL_ADDR);
+ writel_relaxed(pll->m, PLL4_M_VAL_ADDR);
+ writel_relaxed(pll->n, PLL4_N_VAL_ADDR);
+ /* Make sure PLL is programmed before returning. */
+ mb();
+}
+
static int acpuclk_set_vdd_level(int vdd)
{
uint32_t current_vdd;
@@ -524,6 +606,7 @@
struct clkctl_acpu_speed *cur_s, *tgt_s, *strt_s;
int res, rc = 0;
unsigned int plls_enabled = 0, pll;
+ int delta;
if (reason == SETRATE_CPUFREQ)
mutex_lock(&drv_state.lock);
@@ -592,6 +675,61 @@
pr_debug("Switching from ACPU rate %u KHz -> %u KHz\n",
strt_s->a11clk_khz, tgt_s->a11clk_khz);
+ delta = abs((int)(strt_s->a11clk_khz - tgt_s->a11clk_khz));
+
+ if (dynamic_reprogram) {
+ if (tgt_s->pll == ACPU_PLL_4) {
+ if (strt_s->pll == ACPU_PLL_4 ||
+ delta > drv_state.max_speed_delta_khz) {
+ /*
+ * Enable the backup PLL if required
+ * and switch to it.
+ */
+ clk_enable(pll_clk[backup_s->pll].clk);
+ acpuclk_set_div(backup_s);
+ }
+ /* Make sure PLL4 is off before reprogramming */
+ if ((plls_enabled & (1 << tgt_s->pll))) {
+ clk_disable(pll_clk[tgt_s->pll].clk);
+ plls_enabled &= (0 << tgt_s->pll);
+ }
+ acpuclk_config_pll4(tgt_s->pll_rate);
+ pll_clk[tgt_s->pll].clk->rate = tgt_s->a11clk_khz*1000;
+
+ } else if (strt_s->pll == ACPU_PLL_4) {
+ if (delta > drv_state.max_speed_delta_khz) {
+ /*
+ * Enable the bcackup PLL if required
+ * and switch to it.
+ */
+ clk_enable(pll_clk[backup_s->pll].clk);
+ acpuclk_set_div(backup_s);
+ }
+ }
+
+ if (!(plls_enabled & (1 << tgt_s->pll))) {
+ rc = clk_enable(pll_clk[tgt_s->pll].clk);
+ if (rc < 0) {
+ pr_err("PLL%d enable failed (%d)\n",
+ tgt_s->pll, rc);
+ goto out;
+ }
+ plls_enabled |= 1 << tgt_s->pll;
+ }
+ acpuclk_set_div(tgt_s);
+ drv_state.current_speed = tgt_s;
+ /* Re-adjust lpj for the new clock speed. */
+ update_jiffies(cpu, cur_s->lpj);
+
+ /* Disable the backup PLL */
+ if ((delta > drv_state.max_speed_delta_khz)
+ || (strt_s->pll == ACPU_PLL_4 &&
+ tgt_s->pll == ACPU_PLL_4))
+ clk_disable_unprepare(pll_clk[backup_s->pll].clk);
+
+ goto done;
+ }
+
while (cur_s != tgt_s) {
/*
* Always jump to target freq if within max_speed_delta_khz,
@@ -648,17 +786,10 @@
acpuclk_set_div(cur_s);
drv_state.current_speed = cur_s;
/* Re-adjust lpj for the new clock speed. */
-#ifdef CONFIG_SMP
- for_each_possible_cpu(cpu) {
- per_cpu(cpu_data, cpu).loops_per_jiffy =
- cur_s->lpj;
- }
-#endif
- /* Adjust the global one */
- loops_per_jiffy = cur_s->lpj;
+ update_jiffies(cpu, cur_s->lpj);
}
-
+done:
/* Nothing else to do for SWFI. */
if (reason == SETRATE_SWFI)
goto out;
@@ -781,7 +912,7 @@
static void __devinit select_freq_plan(void)
{
unsigned long pll_mhz[ACPU_PLL_END];
- struct pll_freq_tbl_map *t;
+ struct pll_freq_tbl_map *t = acpu_freq_tbl_list;
int i;
/* Get PLL clocks */
@@ -817,7 +948,7 @@
}
} else {
/* Select the right table to use. */
- for (t = acpu_freq_tbl_list; t->tbl != 0; t++) {
+ for (; t->tbl != 0; t++) {
if (t->pll0_rate == pll_mhz[ACPU_PLL_0]
&& t->pll1_rate == pll_mhz[ACPU_PLL_1]
&& t->pll2_rate == pll_mhz[ACPU_PLL_2]
@@ -828,6 +959,25 @@
}
}
+ /*
+ * When PLL4 can run max @ 1401.6MHz, we have to support
+ * dynamic reprograming of PLL4.
+ *
+ * Also find the backup pll used during PLL4 reprogramming.
+ * We are using PLL2@600MHz as backup PLL, since 800MHz jump
+ * is fine.
+ */
+ if (t->pll4_rate == 1401) {
+ dynamic_reprogram = 1;
+ for ( ; t->tbl->a11clk_khz; t->tbl++) {
+ if (t->tbl->pll == ACPU_PLL_2 &&
+ t->tbl->a11clk_src_div == 1) {
+ backup_s = t->tbl;
+ break;
+ }
+ }
+ }
+
if (acpu_freq_tbl == NULL) {
pr_crit("Unknown PLL configuration!\n");
BUG();
@@ -988,3 +1138,4 @@
return platform_driver_register(&acpuclk_7627_driver);
}
postcore_initcall(acpuclk_7627_init);
+
diff --git a/arch/arm/mach-msm/acpuclock-8064.c b/arch/arm/mach-msm/acpuclock-8064.c
new file mode 100644
index 0000000..6f9960d
--- /dev/null
+++ b/arch/arm/mach-msm/acpuclock-8064.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <mach/rpm-regulator.h>
+#include <mach/msm_bus_board.h>
+#include <mach/msm_bus.h>
+
+#include "acpuclock.h"
+#include "acpuclock-krait.h"
+
+static struct hfpll_data hfpll_data __initdata = {
+ .mode_offset = 0x00,
+ .l_offset = 0x08,
+ .m_offset = 0x0C,
+ .n_offset = 0x10,
+ .config_offset = 0x04,
+ .config_val = 0x7845C665,
+ .has_droop_ctl = true,
+ .droop_offset = 0x14,
+ .droop_val = 0x0108C000,
+ .low_vdd_l_max = 22,
+ .nom_vdd_l_max = 42,
+ .vdd[HFPLL_VDD_NONE] = 0,
+ .vdd[HFPLL_VDD_LOW] = 945000,
+ .vdd[HFPLL_VDD_NOM] = 1050000,
+ .vdd[HFPLL_VDD_HIGH] = 1150000,
+};
+
+static struct scalable scalable[] __initdata = {
+ [CPU0] = {
+ .hfpll_phys_base = 0x00903200,
+ .aux_clk_sel_phys = 0x02088014,
+ .aux_clk_sel = 3,
+ .l2cpmr_iaddr = 0x4501,
+ .vreg[VREG_CORE] = { "krait0", 1300000 },
+ .vreg[VREG_MEM] = { "krait0_mem", 1150000 },
+ .vreg[VREG_DIG] = { "krait0_dig", 1150000 },
+ .vreg[VREG_HFPLL_A] = { "krait0_hfpll", 1800000 },
+ },
+ [CPU1] = {
+ .hfpll_phys_base = 0x00903240,
+ .aux_clk_sel_phys = 0x02098014,
+ .aux_clk_sel = 3,
+ .l2cpmr_iaddr = 0x5501,
+ .vreg[VREG_CORE] = { "krait1", 1300000 },
+ .vreg[VREG_MEM] = { "krait1_mem", 1150000 },
+ .vreg[VREG_DIG] = { "krait1_dig", 1150000 },
+ .vreg[VREG_HFPLL_A] = { "krait1_hfpll", 1800000 },
+ },
+ [CPU2] = {
+ .hfpll_phys_base = 0x00903280,
+ .aux_clk_sel_phys = 0x020A8014,
+ .aux_clk_sel = 3,
+ .l2cpmr_iaddr = 0x6501,
+ .vreg[VREG_CORE] = { "krait2", 1300000 },
+ .vreg[VREG_MEM] = { "krait2_mem", 1150000 },
+ .vreg[VREG_DIG] = { "krait2_dig", 1150000 },
+ .vreg[VREG_HFPLL_A] = { "krait2_hfpll", 1800000 },
+ },
+ [CPU3] = {
+ .hfpll_phys_base = 0x009032C0,
+ .aux_clk_sel_phys = 0x020B8014,
+ .aux_clk_sel = 3,
+ .l2cpmr_iaddr = 0x7501,
+ .vreg[VREG_CORE] = { "krait3", 1300000 },
+ .vreg[VREG_MEM] = { "krait3_mem", 1150000 },
+ .vreg[VREG_DIG] = { "krait3_dig", 1150000 },
+ .vreg[VREG_HFPLL_A] = { "krait3_hfpll", 1800000 },
+ },
+ [L2] = {
+ .hfpll_phys_base = 0x00903300,
+ .aux_clk_sel_phys = 0x02011028,
+ .aux_clk_sel = 3,
+ .l2cpmr_iaddr = 0x0500,
+ .vreg[VREG_HFPLL_A] = { "l2_hfpll", 1800000 },
+ },
+};
+
+static struct msm_bus_paths bw_level_tbl[] __initdata = {
+ [0] = BW_MBPS(640), /* At least 80 MHz on bus. */
+ [1] = BW_MBPS(1064), /* At least 133 MHz on bus. */
+ [2] = BW_MBPS(1600), /* At least 200 MHz on bus. */
+ [3] = BW_MBPS(2128), /* At least 266 MHz on bus. */
+ [4] = BW_MBPS(3200), /* At least 400 MHz on bus. */
+ [5] = BW_MBPS(4264), /* At least 533 MHz on bus. */
+};
+
+static struct msm_bus_scale_pdata bus_scale_data __initdata = {
+ .usecase = bw_level_tbl,
+ .num_usecases = ARRAY_SIZE(bw_level_tbl),
+ .active_only = 1,
+ .name = "acpuclk-8064",
+};
+
+static struct l2_level l2_freq_tbl[] __initdata __initdata = {
+ [0] = { { 384000, PLL_8, 0, 2, 0x00 }, 1050000, 1050000, 1 },
+ [1] = { { 432000, HFPLL, 2, 0, 0x20 }, 1050000, 1050000, 2 },
+ [2] = { { 486000, HFPLL, 2, 0, 0x24 }, 1050000, 1050000, 2 },
+ [3] = { { 540000, HFPLL, 2, 0, 0x28 }, 1050000, 1050000, 2 },
+ [4] = { { 594000, HFPLL, 1, 0, 0x16 }, 1050000, 1050000, 2 },
+ [5] = { { 648000, HFPLL, 1, 0, 0x18 }, 1050000, 1050000, 4 },
+ [6] = { { 702000, HFPLL, 1, 0, 0x1A }, 1050000, 1050000, 4 },
+ [7] = { { 756000, HFPLL, 1, 0, 0x1C }, 1150000, 1150000, 4 },
+ [8] = { { 810000, HFPLL, 1, 0, 0x1E }, 1150000, 1150000, 4 },
+ [9] = { { 864000, HFPLL, 1, 0, 0x20 }, 1150000, 1150000, 4 },
+ [10] = { { 918000, HFPLL, 1, 0, 0x22 }, 1150000, 1150000, 5 },
+ [11] = { { 972000, HFPLL, 1, 0, 0x24 }, 1150000, 1150000, 5 },
+ [12] = { { 1026000, HFPLL, 1, 0, 0x26 }, 1150000, 1150000, 5 },
+ [13] = { { 1080000, HFPLL, 1, 0, 0x28 }, 1150000, 1150000, 5 },
+ [14] = { { 1134000, HFPLL, 1, 0, 0x2A }, 1150000, 1150000, 5 },
+};
+
+static struct acpu_level acpu_freq_tbl_slow[] __initdata = {
+ { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(0), 950000 },
+ { 0, { 432000, HFPLL, 2, 0, 0x20 }, L2(6), 975000 },
+ { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(6), 975000 },
+ { 0, { 540000, HFPLL, 2, 0, 0x28 }, L2(6), 1000000 },
+ { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(6), 1000000 },
+ { 0, { 648000, HFPLL, 1, 0, 0x18 }, L2(6), 1025000 },
+ { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(6), 1025000 },
+ { 0, { 756000, HFPLL, 1, 0, 0x1C }, L2(6), 1075000 },
+ { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(6), 1075000 },
+ { 0, { 864000, HFPLL, 1, 0, 0x20 }, L2(6), 1100000 },
+ { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(6), 1100000 },
+ { 0, { 972000, HFPLL, 1, 0, 0x24 }, L2(6), 1125000 },
+ { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(6), 1125000 },
+ { 0, { 1080000, HFPLL, 1, 0, 0x28 }, L2(14), 1175000 },
+ { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(14), 1175000 },
+ { 0, { 1188000, HFPLL, 1, 0, 0x2C }, L2(14), 1200000 },
+ { 1, { 1242000, HFPLL, 1, 0, 0x2E }, L2(14), 1200000 },
+ { 0, { 1296000, HFPLL, 1, 0, 0x30 }, L2(14), 1225000 },
+ { 1, { 1350000, HFPLL, 1, 0, 0x32 }, L2(14), 1225000 },
+ { 0, { 1404000, HFPLL, 1, 0, 0x34 }, L2(14), 1237500 },
+ { 1, { 1458000, HFPLL, 1, 0, 0x36 }, L2(14), 1237500 },
+ { 1, { 1512000, HFPLL, 1, 0, 0x38 }, L2(14), 1250000 },
+ { 0, { 0 } }
+};
+
+static struct acpu_level acpu_freq_tbl_nom[] __initdata = {
+ { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(0), 900000 },
+ { 0, { 432000, HFPLL, 2, 0, 0x20 }, L2(6), 925000 },
+ { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(6), 925000 },
+ { 0, { 540000, HFPLL, 2, 0, 0x28 }, L2(6), 950000 },
+ { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(6), 950000 },
+ { 0, { 648000, HFPLL, 1, 0, 0x18 }, L2(6), 975000 },
+ { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(6), 975000 },
+ { 0, { 756000, HFPLL, 1, 0, 0x1C }, L2(6), 1025000 },
+ { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(6), 1025000 },
+ { 0, { 864000, HFPLL, 1, 0, 0x20 }, L2(6), 1050000 },
+ { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(6), 1050000 },
+ { 0, { 972000, HFPLL, 1, 0, 0x24 }, L2(6), 1075000 },
+ { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(6), 1075000 },
+ { 0, { 1080000, HFPLL, 1, 0, 0x28 }, L2(14), 1125000 },
+ { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(14), 1125000 },
+ { 0, { 1188000, HFPLL, 1, 0, 0x2C }, L2(14), 1150000 },
+ { 1, { 1242000, HFPLL, 1, 0, 0x2E }, L2(14), 1150000 },
+ { 0, { 1296000, HFPLL, 1, 0, 0x30 }, L2(14), 1175000 },
+ { 1, { 1350000, HFPLL, 1, 0, 0x32 }, L2(14), 1175000 },
+ { 0, { 1404000, HFPLL, 1, 0, 0x34 }, L2(14), 1187500 },
+ { 1, { 1458000, HFPLL, 1, 0, 0x36 }, L2(14), 1187500 },
+ { 1, { 1512000, HFPLL, 1, 0, 0x38 }, L2(14), 1200000 },
+ { 0, { 0 } }
+};
+
+static struct acpu_level acpu_freq_tbl_fast[] __initdata = {
+ { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(0), 850000 },
+ { 0, { 432000, HFPLL, 2, 0, 0x20 }, L2(6), 875000 },
+ { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(6), 875000 },
+ { 0, { 540000, HFPLL, 2, 0, 0x28 }, L2(6), 900000 },
+ { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(6), 900000 },
+ { 0, { 648000, HFPLL, 1, 0, 0x18 }, L2(6), 925000 },
+ { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(6), 925000 },
+ { 0, { 756000, HFPLL, 1, 0, 0x1C }, L2(6), 975000 },
+ { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(6), 975000 },
+ { 0, { 864000, HFPLL, 1, 0, 0x20 }, L2(6), 1000000 },
+ { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(6), 1000000 },
+ { 0, { 972000, HFPLL, 1, 0, 0x24 }, L2(6), 1025000 },
+ { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(6), 1025000 },
+ { 0, { 1080000, HFPLL, 1, 0, 0x28 }, L2(14), 1075000 },
+ { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(14), 1075000 },
+ { 0, { 1188000, HFPLL, 1, 0, 0x2C }, L2(14), 1100000 },
+ { 1, { 1242000, HFPLL, 1, 0, 0x2E }, L2(14), 1100000 },
+ { 0, { 1296000, HFPLL, 1, 0, 0x30 }, L2(14), 1125000 },
+ { 1, { 1350000, HFPLL, 1, 0, 0x32 }, L2(14), 1125000 },
+ { 0, { 1404000, HFPLL, 1, 0, 0x34 }, L2(14), 1137500 },
+ { 1, { 1458000, HFPLL, 1, 0, 0x36 }, L2(14), 1137500 },
+ { 1, { 1512000, HFPLL, 1, 0, 0x38 }, L2(14), 1150000 },
+ { 0, { 0 } }
+};
+
+static struct pvs_table pvs_tables[NUM_PVS] __initdata = {
+[PVS_SLOW] = { acpu_freq_tbl_slow, sizeof(acpu_freq_tbl_slow), 0 },
+[PVS_NOMINAL] = { acpu_freq_tbl_nom, sizeof(acpu_freq_tbl_nom), 25000 },
+[PVS_FAST] = { acpu_freq_tbl_fast, sizeof(acpu_freq_tbl_fast), 25000 },
+/* TODO: update the faster table when data is available */
+[PVS_FASTER] = { acpu_freq_tbl_fast, sizeof(acpu_freq_tbl_fast), 25000 },
+};
+
+static struct acpuclk_krait_params acpuclk_8064_params __initdata = {
+ .scalable = scalable,
+ .scalable_size = sizeof(scalable),
+ .hfpll_data = &hfpll_data,
+ .pvs_tables = pvs_tables,
+ .l2_freq_tbl = l2_freq_tbl,
+ .l2_freq_tbl_size = sizeof(l2_freq_tbl),
+ .bus_scale = &bus_scale_data,
+ .qfprom_phys_base = 0x00700000,
+ .stby_khz = 384000,
+};
+
+static int __init acpuclk_8064_probe(struct platform_device *pdev)
+{
+ return acpuclk_krait_init(&pdev->dev, &acpuclk_8064_params);
+}
+
+static struct platform_driver acpuclk_8064_driver = {
+ .driver = {
+ .name = "acpuclk-8064",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init acpuclk_8064_init(void)
+{
+ return platform_driver_probe(&acpuclk_8064_driver,
+ acpuclk_8064_probe);
+}
+device_initcall(acpuclk_8064_init);
diff --git a/arch/arm/mach-msm/acpuclock-8627.c b/arch/arm/mach-msm/acpuclock-8627.c
new file mode 100644
index 0000000..1642dae
--- /dev/null
+++ b/arch/arm/mach-msm/acpuclock-8627.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <mach/rpm-regulator.h>
+#include <mach/msm_bus_board.h>
+#include <mach/msm_bus.h>
+
+#include "acpuclock.h"
+#include "acpuclock-krait.h"
+
+/* Corner type vreg VDD values */
+#define LVL_NONE RPM_VREG_CORNER_NONE
+#define LVL_LOW RPM_VREG_CORNER_LOW
+#define LVL_NOM RPM_VREG_CORNER_NOMINAL
+#define LVL_HIGH RPM_VREG_CORNER_HIGH
+
+static struct hfpll_data hfpll_data __initdata = {
+ .mode_offset = 0x00,
+ .l_offset = 0x08,
+ .m_offset = 0x0C,
+ .n_offset = 0x10,
+ .config_offset = 0x04,
+ .config_val = 0x7845C665,
+ .has_droop_ctl = true,
+ .droop_offset = 0x14,
+ .droop_val = 0x0108C000,
+ .low_vdd_l_max = 22,
+ .nom_vdd_l_max = 42,
+ .vdd[HFPLL_VDD_NONE] = LVL_NONE,
+ .vdd[HFPLL_VDD_LOW] = LVL_LOW,
+ .vdd[HFPLL_VDD_NOM] = LVL_NOM,
+ .vdd[HFPLL_VDD_HIGH] = LVL_HIGH,
+};
+
+static struct scalable scalable[] __initdata = {
+ [CPU0] = {
+ .hfpll_phys_base = 0x00903200,
+ .aux_clk_sel_phys = 0x02088014,
+ .aux_clk_sel = 3,
+ .l2cpmr_iaddr = 0x4501,
+ .vreg[VREG_CORE] = { "krait0", 1300000 },
+ .vreg[VREG_MEM] = { "krait0_mem", 1150000 },
+ .vreg[VREG_DIG] = { "krait0_dig", 1150000 },
+ .vreg[VREG_HFPLL_A] = { "krait0_hfpll", 1800000 },
+ },
+ [CPU1] = {
+ .hfpll_phys_base = 0x00903300,
+ .aux_clk_sel_phys = 0x02098014,
+ .aux_clk_sel = 3,
+ .l2cpmr_iaddr = 0x5501,
+ .vreg[VREG_CORE] = { "krait1", 1300000 },
+ .vreg[VREG_MEM] = { "krait1_mem", 1150000 },
+ .vreg[VREG_DIG] = { "krait1_dig", 1150000 },
+ .vreg[VREG_HFPLL_A] = { "krait1_hfpll", 1800000 },
+ },
+ [L2] = {
+ .hfpll_phys_base = 0x00903400,
+ .aux_clk_sel_phys = 0x02011028,
+ .aux_clk_sel = 3,
+ .l2cpmr_iaddr = 0x0500,
+ .vreg[VREG_HFPLL_A] = { "l2_hfpll", 1800000 },
+ },
+};
+
+static struct msm_bus_paths bw_level_tbl[] __initdata = {
+ [0] = BW_MBPS(640), /* At least 80 MHz on bus. */
+ [1] = BW_MBPS(1064), /* At least 133 MHz on bus. */
+ [2] = BW_MBPS(1600), /* At least 200 MHz on bus. */
+ [3] = BW_MBPS(2128), /* At least 266 MHz on bus. */
+ [4] = BW_MBPS(3200), /* At least 400 MHz on bus. */
+};
+
+static struct msm_bus_scale_pdata bus_scale_data __initdata = {
+ .usecase = bw_level_tbl,
+ .num_usecases = ARRAY_SIZE(bw_level_tbl),
+ .active_only = 1,
+ .name = "acpuclk-8627",
+};
+
+/* TODO: Update vdd_dig, vdd_mem and bw when data is available. */
+static struct l2_level l2_freq_tbl[] __initdata = {
+ [0] = { { 384000, PLL_8, 0, 2, 0x00 }, LVL_NOM, 1050000, 1 },
+ [1] = { { 432000, HFPLL, 2, 0, 0x20 }, LVL_NOM, 1050000, 1 },
+ [2] = { { 486000, HFPLL, 2, 0, 0x24 }, LVL_NOM, 1050000, 1 },
+ [3] = { { 540000, HFPLL, 2, 0, 0x28 }, LVL_NOM, 1050000, 2 },
+ [4] = { { 594000, HFPLL, 1, 0, 0x16 }, LVL_NOM, 1050000, 2 },
+ [5] = { { 648000, HFPLL, 1, 0, 0x18 }, LVL_NOM, 1050000, 2 },
+ [6] = { { 702000, HFPLL, 1, 0, 0x1A }, LVL_NOM, 1050000, 3 },
+ [7] = { { 756000, HFPLL, 1, 0, 0x1C }, LVL_HIGH, 1150000, 3 },
+ [8] = { { 810000, HFPLL, 1, 0, 0x1E }, LVL_HIGH, 1150000, 3 },
+ [9] = { { 864000, HFPLL, 1, 0, 0x20 }, LVL_HIGH, 1150000, 4 },
+ [10] = { { 918000, HFPLL, 1, 0, 0x22 }, LVL_HIGH, 1150000, 4 },
+ [11] = { { 972000, HFPLL, 1, 0, 0x24 }, LVL_HIGH, 1150000, 4 },
+};
+
+/* TODO: Update core voltages when data is available. */
+static struct acpu_level acpu_freq_tbl[] __initdata = {
+ { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(0), 900000 },
+ { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(4), 925000 },
+ { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(4), 925000 },
+ { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(4), 937500 },
+ { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(4), 962500 },
+ { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(8), 987500 },
+ { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(8), 1000000 },
+ { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(8), 1025000 },
+ { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(8), 1062500 },
+ { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(11), 1062500 },
+ { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(11), 1087500 },
+ { 1, { 972000, HFPLL, 1, 0, 0x24 }, L2(11), 1100000 },
+ { 0, { 0 } }
+};
+
+static struct pvs_table pvs_tables[NUM_PVS] __initdata = {
+ [PVS_SLOW] = { acpu_freq_tbl, sizeof(acpu_freq_tbl), 0 },
+ [PVS_NOMINAL] = { acpu_freq_tbl, sizeof(acpu_freq_tbl), 25000 },
+ [PVS_FAST] = { acpu_freq_tbl, sizeof(acpu_freq_tbl), 25000 },
+};
+
+static struct acpuclk_krait_params acpuclk_8627_params __initdata = {
+ .scalable = scalable,
+ .scalable_size = sizeof(scalable),
+ .hfpll_data = &hfpll_data,
+ .pvs_tables = pvs_tables,
+ .l2_freq_tbl = l2_freq_tbl,
+ .l2_freq_tbl_size = sizeof(l2_freq_tbl),
+ .bus_scale = &bus_scale_data,
+ .qfprom_phys_base = 0x00700000,
+ .stby_khz = 384000,
+};
+
+static int __init acpuclk_8627_probe(struct platform_device *pdev)
+{
+ return acpuclk_krait_init(&pdev->dev, &acpuclk_8627_params);
+}
+
+static struct platform_driver acpuclk_8627_driver = {
+ .driver = {
+ .name = "acpuclk-8627",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init acpuclk_8627_init(void)
+{
+ return platform_driver_probe(&acpuclk_8627_driver,
+ acpuclk_8627_probe);
+}
+device_initcall(acpuclk_8627_init);
diff --git a/arch/arm/mach-msm/acpuclock-8930.c b/arch/arm/mach-msm/acpuclock-8930.c
new file mode 100644
index 0000000..5647d14
--- /dev/null
+++ b/arch/arm/mach-msm/acpuclock-8930.c
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <mach/rpm-regulator.h>
+#include <mach/msm_bus_board.h>
+#include <mach/msm_bus.h>
+
+#include "acpuclock.h"
+#include "acpuclock-krait.h"
+
+/* Corner type vreg VDD values */
+#define LVL_NONE RPM_VREG_CORNER_NONE
+#define LVL_LOW RPM_VREG_CORNER_LOW
+#define LVL_NOM RPM_VREG_CORNER_NOMINAL
+#define LVL_HIGH RPM_VREG_CORNER_HIGH
+
+static struct hfpll_data hfpll_data __initdata = {
+ .mode_offset = 0x00,
+ .l_offset = 0x08,
+ .m_offset = 0x0C,
+ .n_offset = 0x10,
+ .config_offset = 0x04,
+ .config_val = 0x7845C665,
+ .has_droop_ctl = true,
+ .droop_offset = 0x14,
+ .droop_val = 0x0108C000,
+ .low_vdd_l_max = 22,
+ .nom_vdd_l_max = 42,
+ .vdd[HFPLL_VDD_NONE] = LVL_NONE,
+ .vdd[HFPLL_VDD_LOW] = LVL_LOW,
+ .vdd[HFPLL_VDD_NOM] = LVL_NOM,
+ .vdd[HFPLL_VDD_HIGH] = LVL_HIGH,
+};
+
+static struct scalable scalable[] __initdata = {
+ [CPU0] = {
+ .hfpll_phys_base = 0x00903200,
+ .aux_clk_sel_phys = 0x02088014,
+ .aux_clk_sel = 3,
+ .l2cpmr_iaddr = 0x4501,
+ .vreg[VREG_CORE] = { "krait0", 1300000 },
+ .vreg[VREG_MEM] = { "krait0_mem", 1150000 },
+ .vreg[VREG_DIG] = { "krait0_dig", 1150000 },
+ .vreg[VREG_HFPLL_A] = { "krait0_hfpll", 1800000 },
+ },
+ [CPU1] = {
+ .hfpll_phys_base = 0x00903300,
+ .aux_clk_sel_phys = 0x02098014,
+ .aux_clk_sel = 3,
+ .l2cpmr_iaddr = 0x5501,
+ .vreg[VREG_CORE] = { "krait1", 1300000 },
+ .vreg[VREG_MEM] = { "krait1_mem", 1150000 },
+ .vreg[VREG_DIG] = { "krait1_dig", 1150000 },
+ .vreg[VREG_HFPLL_A] = { "krait1_hfpll", 1800000 },
+ },
+ [L2] = {
+ .hfpll_phys_base = 0x00903400,
+ .aux_clk_sel_phys = 0x02011028,
+ .aux_clk_sel = 3,
+ .l2cpmr_iaddr = 0x0500,
+ .vreg[VREG_HFPLL_A] = { "l2_hfpll", 1800000 },
+ },
+};
+
+static struct msm_bus_paths bw_level_tbl[] __initdata = {
+ [0] = BW_MBPS(640), /* At least 80 MHz on bus. */
+ [1] = BW_MBPS(1064), /* At least 133 MHz on bus. */
+ [2] = BW_MBPS(1600), /* At least 200 MHz on bus. */
+ [3] = BW_MBPS(2128), /* At least 266 MHz on bus. */
+ [4] = BW_MBPS(3200), /* At least 400 MHz on bus. */
+ [5] = BW_MBPS(3600), /* At least 450 MHz on bus. */
+ [6] = BW_MBPS(3936), /* At least 492 MHz on bus. */
+ [7] = BW_MBPS(4264), /* At least 533 MHz on bus. */
+};
+
+static struct msm_bus_scale_pdata bus_scale_data __initdata = {
+ .usecase = bw_level_tbl,
+ .num_usecases = ARRAY_SIZE(bw_level_tbl),
+ .active_only = 1,
+ .name = "acpuclk-8930",
+};
+
+/* TODO: Update vdd_dig, vdd_mem and bw when data is available. */
+static struct l2_level l2_freq_tbl[] __initdata = {
+ [0] = { { 384000, PLL_8, 0, 2, 0x00 }, LVL_NOM, 1050000, 1 },
+ [1] = { { 432000, HFPLL, 2, 0, 0x20 }, LVL_NOM, 1050000, 2 },
+ [2] = { { 486000, HFPLL, 2, 0, 0x24 }, LVL_NOM, 1050000, 2 },
+ [3] = { { 540000, HFPLL, 2, 0, 0x28 }, LVL_NOM, 1050000, 2 },
+ [4] = { { 594000, HFPLL, 1, 0, 0x16 }, LVL_NOM, 1050000, 2 },
+ [5] = { { 648000, HFPLL, 1, 0, 0x18 }, LVL_NOM, 1050000, 4 },
+ [6] = { { 702000, HFPLL, 1, 0, 0x1A }, LVL_NOM, 1050000, 4 },
+ [7] = { { 756000, HFPLL, 1, 0, 0x1C }, LVL_HIGH, 1150000, 4 },
+ [8] = { { 810000, HFPLL, 1, 0, 0x1E }, LVL_HIGH, 1150000, 4 },
+ [9] = { { 864000, HFPLL, 1, 0, 0x20 }, LVL_HIGH, 1150000, 4 },
+ [10] = { { 918000, HFPLL, 1, 0, 0x22 }, LVL_HIGH, 1150000, 7 },
+ [11] = { { 972000, HFPLL, 1, 0, 0x24 }, LVL_HIGH, 1150000, 7 },
+ [12] = { { 1026000, HFPLL, 1, 0, 0x26 }, LVL_HIGH, 1150000, 7 },
+ [13] = { { 1080000, HFPLL, 1, 0, 0x28 }, LVL_HIGH, 1150000, 7 },
+ [14] = { { 1134000, HFPLL, 1, 0, 0x2A }, LVL_HIGH, 1150000, 7 },
+ [15] = { { 1188000, HFPLL, 1, 0, 0x2C }, LVL_HIGH, 1150000, 7 },
+};
+
+static struct acpu_level acpu_freq_tbl_slow[] __initdata = {
+ { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(0), 950000 },
+ { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(5), 975000 },
+ { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(5), 975000 },
+ { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(5), 1000000 },
+ { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(5), 1000000 },
+ { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(5), 1025000 },
+ { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(5), 1025000 },
+ { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(10), 1075000 },
+ { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(10), 1075000 },
+ { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(10), 1100000 },
+ { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(10), 1100000 },
+ { 1, { 972000, HFPLL, 1, 0, 0x24 }, L2(10), 1125000 },
+ { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(10), 1125000 },
+ { 1, { 1080000, HFPLL, 1, 0, 0x28 }, L2(15), 1175000 },
+ { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(15), 1175000 },
+ { 1, { 1188000, HFPLL, 1, 0, 0x2C }, L2(15), 1200000 },
+ { 0, { 0 } }
+};
+
+static struct acpu_level acpu_freq_tbl_nom[] __initdata = {
+ { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(0), 925000 },
+ { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(5), 950000 },
+ { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(5), 950000 },
+ { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(5), 975000 },
+ { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(5), 975000 },
+ { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(5), 1000000 },
+ { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(5), 1000000 },
+ { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(10), 1050000 },
+ { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(10), 1050000 },
+ { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(10), 1075000 },
+ { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(10), 1075000 },
+ { 1, { 972000, HFPLL, 1, 0, 0x24 }, L2(10), 1100000 },
+ { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(10), 1100000 },
+ { 1, { 1080000, HFPLL, 1, 0, 0x28 }, L2(15), 1150000 },
+ { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(15), 1150000 },
+ { 1, { 1188000, HFPLL, 1, 0, 0x2C }, L2(15), 1175000 },
+ { 0, { 0 } }
+};
+
+static struct acpu_level acpu_freq_tbl_fast[] __initdata = {
+ { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(0), 900000 },
+ { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(5), 900000 },
+ { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(5), 900000 },
+ { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(5), 925000 },
+ { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(5), 925000 },
+ { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(5), 950000 },
+ { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(5), 950000 },
+ { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(10), 1000000 },
+ { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(10), 1000000 },
+ { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(10), 1025000 },
+ { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(10), 1025000 },
+ { 1, { 972000, HFPLL, 1, 0, 0x24 }, L2(10), 1050000 },
+ { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(10), 1050000 },
+ { 1, { 1080000, HFPLL, 1, 0, 0x28 }, L2(15), 1100000 },
+ { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(15), 1100000 },
+ { 1, { 1188000, HFPLL, 1, 0, 0x2C }, L2(15), 1125000 },
+ { 0, { 0 } }
+};
+
+static struct pvs_table pvs_tables[NUM_PVS] __initdata = {
+[PVS_SLOW] = { acpu_freq_tbl_slow, sizeof(acpu_freq_tbl_slow), 0 },
+[PVS_NOMINAL] = { acpu_freq_tbl_nom, sizeof(acpu_freq_tbl_nom), 25000 },
+[PVS_FAST] = { acpu_freq_tbl_fast, sizeof(acpu_freq_tbl_fast), 25000 },
+};
+
+static struct acpuclk_krait_params acpuclk_8930_params __initdata = {
+ .scalable = scalable,
+ .scalable_size = sizeof(scalable),
+ .hfpll_data = &hfpll_data,
+ .pvs_tables = pvs_tables,
+ .l2_freq_tbl = l2_freq_tbl,
+ .l2_freq_tbl_size = sizeof(l2_freq_tbl),
+ .bus_scale = &bus_scale_data,
+ .qfprom_phys_base = 0x00700000,
+ .stby_khz = 384000,
+};
+
+static int __init acpuclk_8930_probe(struct platform_device *pdev)
+{
+ return acpuclk_krait_init(&pdev->dev, &acpuclk_8930_params);
+}
+
+static struct platform_driver acpuclk_8930_driver = {
+ .driver = {
+ .name = "acpuclk-8930",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init acpuclk_8930_init(void)
+{
+ return platform_driver_probe(&acpuclk_8930_driver,
+ acpuclk_8930_probe);
+}
+device_initcall(acpuclk_8930_init);
diff --git a/arch/arm/mach-msm/acpuclock-8930aa.c b/arch/arm/mach-msm/acpuclock-8930aa.c
new file mode 100644
index 0000000..34ba1da
--- /dev/null
+++ b/arch/arm/mach-msm/acpuclock-8930aa.c
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <mach/rpm-regulator.h>
+#include <mach/msm_bus_board.h>
+#include <mach/msm_bus.h>
+
+#include "acpuclock.h"
+#include "acpuclock-krait.h"
+
+/* Corner type vreg VDD values */
+#define LVL_NONE RPM_VREG_CORNER_NONE
+#define LVL_LOW RPM_VREG_CORNER_LOW
+#define LVL_NOM RPM_VREG_CORNER_NOMINAL
+#define LVL_HIGH RPM_VREG_CORNER_HIGH
+
+static struct hfpll_data hfpll_data __initdata = {
+ .mode_offset = 0x00,
+ .l_offset = 0x08,
+ .m_offset = 0x0C,
+ .n_offset = 0x10,
+ .config_offset = 0x04,
+ .config_val = 0x7845C665,
+ .has_droop_ctl = true,
+ .droop_offset = 0x14,
+ .droop_val = 0x0108C000,
+ .low_vdd_l_max = 22,
+ .nom_vdd_l_max = 42,
+ .vdd[HFPLL_VDD_NONE] = LVL_NONE,
+ .vdd[HFPLL_VDD_LOW] = LVL_LOW,
+ .vdd[HFPLL_VDD_NOM] = LVL_NOM,
+ .vdd[HFPLL_VDD_HIGH] = LVL_HIGH,
+};
+
+static struct scalable scalable[] __initdata = {
+ [CPU0] = {
+ .hfpll_phys_base = 0x00903200,
+ .aux_clk_sel_phys = 0x02088014,
+ .aux_clk_sel = 3,
+ .l2cpmr_iaddr = 0x4501,
+ .vreg[VREG_CORE] = { "krait0", 1300000 },
+ .vreg[VREG_MEM] = { "krait0_mem", 1150000 },
+ .vreg[VREG_DIG] = { "krait0_dig", 1150000 },
+ .vreg[VREG_HFPLL_A] = { "krait0_hfpll", 1800000 },
+ },
+ [CPU1] = {
+ .hfpll_phys_base = 0x00903300,
+ .aux_clk_sel_phys = 0x02098014,
+ .aux_clk_sel = 3,
+ .l2cpmr_iaddr = 0x5501,
+ .vreg[VREG_CORE] = { "krait1", 1300000 },
+ .vreg[VREG_MEM] = { "krait1_mem", 1150000 },
+ .vreg[VREG_DIG] = { "krait1_dig", 1150000 },
+ .vreg[VREG_HFPLL_A] = { "krait1_hfpll", 1800000 },
+ },
+ [L2] = {
+ .hfpll_phys_base = 0x00903400,
+ .aux_clk_sel_phys = 0x02011028,
+ .aux_clk_sel = 3,
+ .l2cpmr_iaddr = 0x0500,
+ .vreg[VREG_HFPLL_A] = { "l2_hfpll", 1800000 },
+ },
+};
+
+static struct msm_bus_paths bw_level_tbl[] __initdata = {
+ [0] = BW_MBPS(640), /* At least 80 MHz on bus. */
+ [1] = BW_MBPS(1064), /* At least 133 MHz on bus. */
+ [2] = BW_MBPS(1600), /* At least 200 MHz on bus. */
+ [3] = BW_MBPS(2128), /* At least 266 MHz on bus. */
+ [4] = BW_MBPS(3200), /* At least 400 MHz on bus. */
+ [5] = BW_MBPS(3600), /* At least 450 MHz on bus. */
+ [6] = BW_MBPS(3936), /* At least 492 MHz on bus. */
+ [7] = BW_MBPS(4264), /* At least 533 MHz on bus. */
+};
+
+static struct msm_bus_scale_pdata bus_scale_data __initdata = {
+ .usecase = bw_level_tbl,
+ .num_usecases = ARRAY_SIZE(bw_level_tbl),
+ .active_only = 1,
+ .name = "acpuclk-8930aa",
+};
+
+/* TODO: Update vdd_dig, vdd_mem and bw when data is available. */
+static struct l2_level l2_freq_tbl[] __initdata = {
+ [0] = { { 384000, PLL_8, 0, 2, 0x00 }, LVL_NOM, 1050000, 1 },
+ [1] = { { 432000, HFPLL, 2, 0, 0x20 }, LVL_NOM, 1050000, 2 },
+ [2] = { { 486000, HFPLL, 2, 0, 0x24 }, LVL_NOM, 1050000, 2 },
+ [3] = { { 540000, HFPLL, 2, 0, 0x28 }, LVL_NOM, 1050000, 2 },
+ [4] = { { 594000, HFPLL, 1, 0, 0x16 }, LVL_NOM, 1050000, 2 },
+ [5] = { { 648000, HFPLL, 1, 0, 0x18 }, LVL_NOM, 1050000, 4 },
+ [6] = { { 702000, HFPLL, 1, 0, 0x1A }, LVL_NOM, 1050000, 4 },
+ [7] = { { 756000, HFPLL, 1, 0, 0x1C }, LVL_HIGH, 1150000, 4 },
+ [8] = { { 810000, HFPLL, 1, 0, 0x1E }, LVL_HIGH, 1150000, 4 },
+ [9] = { { 864000, HFPLL, 1, 0, 0x20 }, LVL_HIGH, 1150000, 4 },
+ [10] = { { 918000, HFPLL, 1, 0, 0x22 }, LVL_HIGH, 1150000, 7 },
+ [11] = { { 972000, HFPLL, 1, 0, 0x24 }, LVL_HIGH, 1150000, 7 },
+ [12] = { { 1026000, HFPLL, 1, 0, 0x26 }, LVL_HIGH, 1150000, 7 },
+ [13] = { { 1080000, HFPLL, 1, 0, 0x28 }, LVL_HIGH, 1150000, 7 },
+ [14] = { { 1134000, HFPLL, 1, 0, 0x2A }, LVL_HIGH, 1150000, 7 },
+ [15] = { { 1188000, HFPLL, 1, 0, 0x2C }, LVL_HIGH, 1150000, 7 },
+};
+
+static struct acpu_level acpu_freq_tbl_slow[] __initdata = {
+ { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(0), 950000 },
+ { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(5), 975000 },
+ { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(5), 975000 },
+ { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(5), 1000000 },
+ { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(5), 1000000 },
+ { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(5), 1025000 },
+ { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(5), 1025000 },
+ { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(10), 1075000 },
+ { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(10), 1075000 },
+ { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(10), 1100000 },
+ { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(10), 1100000 },
+ { 1, { 972000, HFPLL, 1, 0, 0x24 }, L2(10), 1125000 },
+ { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(10), 1125000 },
+ { 1, { 1080000, HFPLL, 1, 0, 0x28 }, L2(15), 1175000 },
+ { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(15), 1175000 },
+ { 1, { 1188000, HFPLL, 1, 0, 0x2C }, L2(15), 1200000 },
+ { 1, { 1242000, HFPLL, 1, 0, 0x2E }, L2(15), 1200000 },
+ { 1, { 1296000, HFPLL, 1, 0, 0x30 }, L2(15), 1225000 },
+ { 1, { 1350000, HFPLL, 1, 0, 0x32 }, L2(15), 1225000 },
+ { 1, { 1404000, HFPLL, 1, 0, 0x34 }, L2(15), 1237500 },
+ { 0, { 0 } }
+};
+
+static struct acpu_level acpu_freq_tbl_nom[] __initdata = {
+ { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(0), 925000 },
+ { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(5), 950000 },
+ { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(5), 950000 },
+ { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(5), 975000 },
+ { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(5), 975000 },
+ { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(5), 1000000 },
+ { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(5), 1000000 },
+ { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(10), 1050000 },
+ { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(10), 1050000 },
+ { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(10), 1075000 },
+ { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(10), 1075000 },
+ { 1, { 972000, HFPLL, 1, 0, 0x24 }, L2(10), 1100000 },
+ { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(10), 1100000 },
+ { 1, { 1080000, HFPLL, 1, 0, 0x28 }, L2(15), 1150000 },
+ { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(15), 1150000 },
+ { 1, { 1188000, HFPLL, 1, 0, 0x2C }, L2(15), 1175000 },
+ { 1, { 1242000, HFPLL, 1, 0, 0x2E }, L2(15), 1175000 },
+ { 1, { 1296000, HFPLL, 1, 0, 0x30 }, L2(15), 1200000 },
+ { 1, { 1350000, HFPLL, 1, 0, 0x32 }, L2(15), 1200000 },
+ { 1, { 1404000, HFPLL, 1, 0, 0x34 }, L2(15), 1212500 },
+ { 0, { 0 } }
+};
+
+static struct acpu_level acpu_freq_tbl_fast[] __initdata = {
+ { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(0), 900000 },
+ { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(5), 900000 },
+ { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(5), 900000 },
+ { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(5), 925000 },
+ { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(5), 925000 },
+ { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(5), 950000 },
+ { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(5), 950000 },
+ { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(10), 1000000 },
+ { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(10), 1000000 },
+ { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(10), 1025000 },
+ { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(10), 1025000 },
+ { 1, { 972000, HFPLL, 1, 0, 0x24 }, L2(10), 1050000 },
+ { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(10), 1050000 },
+ { 1, { 1080000, HFPLL, 1, 0, 0x28 }, L2(15), 1100000 },
+ { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(15), 1100000 },
+ { 1, { 1188000, HFPLL, 1, 0, 0x2C }, L2(15), 1125000 },
+ { 1, { 1242000, HFPLL, 1, 0, 0x2E }, L2(15), 1125000 },
+ { 1, { 1296000, HFPLL, 1, 0, 0x30 }, L2(15), 1150000 },
+ { 1, { 1350000, HFPLL, 1, 0, 0x32 }, L2(15), 1150000 },
+ { 1, { 1404000, HFPLL, 1, 0, 0x34 }, L2(15), 1162500 },
+ { 0, { 0 } }
+};
+
+static struct pvs_table pvs_tables[NUM_PVS] __initdata = {
+[PVS_SLOW] = { acpu_freq_tbl_slow, sizeof(acpu_freq_tbl_slow), 0 },
+[PVS_NOMINAL] = { acpu_freq_tbl_nom, sizeof(acpu_freq_tbl_nom), 25000 },
+[PVS_FAST] = { acpu_freq_tbl_fast, sizeof(acpu_freq_tbl_fast), 25000 },
+};
+
+static struct acpuclk_krait_params acpuclk_8930aa_params __initdata = {
+ .scalable = scalable,
+ .scalable_size = sizeof(scalable),
+ .hfpll_data = &hfpll_data,
+ .pvs_tables = pvs_tables,
+ .l2_freq_tbl = l2_freq_tbl,
+ .l2_freq_tbl_size = sizeof(l2_freq_tbl),
+ .bus_scale = &bus_scale_data,
+ .qfprom_phys_base = 0x00700000,
+ .stby_khz = 384000,
+};
+
+static int __init acpuclk_8930aa_probe(struct platform_device *pdev)
+{
+ return acpuclk_krait_init(&pdev->dev, &acpuclk_8930aa_params);
+}
+
+static struct platform_driver acpuclk_8930aa_driver = {
+ .driver = {
+ .name = "acpuclk-8930aa",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init acpuclk_8930aa_init(void)
+{
+ return platform_driver_probe(&acpuclk_8930aa_driver,
+ acpuclk_8930aa_probe);
+}
+device_initcall(acpuclk_8930aa_init);
diff --git a/arch/arm/mach-msm/acpuclock-8960.c b/arch/arm/mach-msm/acpuclock-8960.c
index 7f198d2..8cc4b13 100644
--- a/arch/arm/mach-msm/acpuclock-8960.c
+++ b/arch/arm/mach-msm/acpuclock-8960.c
@@ -1,4 +1,5 @@
-/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+/*
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,413 +11,68 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "%s: " fmt, __func__
-
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/err.h>
-#include <linux/errno.h>
-#include <linux/cpufreq.h>
-#include <linux/cpu.h>
-#include <linux/regulator/consumer.h>
#include <linux/platform_device.h>
-
-#include <asm/mach-types.h>
-#include <asm/cpu.h>
-
-#include <mach/board.h>
-#include <mach/msm_iomap.h>
#include <mach/rpm-regulator.h>
-#include <mach/msm_bus.h>
#include <mach/msm_bus_board.h>
-#include <mach/socinfo.h>
-#include <mach/msm-krait-l2-accessors.h>
-#include <mach/rpm-regulator.h>
+#include <mach/msm_bus.h>
#include "acpuclock.h"
-#include "pm.h"
+#include "acpuclock-krait.h"
-/*
- * Source IDs.
- * These must be negative to not overlap with the source IDs
- * used by the 8x60 local clock driver.
- */
-#define PLL_8 0
-#define HFPLL -1
-#define QSB -2
-
-/* Mux source selects. */
-#define PRI_SRC_SEL_SEC_SRC 0
-#define PRI_SRC_SEL_HFPLL 1
-#define PRI_SRC_SEL_HFPLL_DIV2 2
-#define SEC_SRC_SEL_QSB 0
-#define SEC_SRC_SEL_AUX 2
-
-/* HFPLL registers offsets. */
-#define HFPLL_MODE 0x00
-#define HFPLL_CONFIG_CTL 0x04
-#define HFPLL_L_VAL 0x08
-#define HFPLL_M_VAL 0x0C
-#define HFPLL_N_VAL 0x10
-#define HFPLL_DROOP_CTL 0x14
-
-/* CP15 L2 indirect addresses. */
-#define L2CPMR_IADDR 0x500
-#define L2CPUCPMR_IADDR 0x501
-
-#define STBY_KHZ 1
-
-#define HFPLL_LOW_VDD_PLL_L_MAX 0x28
-
-#define SECCLKAGD BIT(4)
-
-/* PTE EFUSE register. */
-#define QFPROM_PTE_EFUSE_ADDR (MSM_QFPROM_BASE + 0x00C0)
-
-/* Corner type vreg VDD values */
-#define LVL_NONE RPM_VREG_CORNER_NONE
-#define LVL_LOW RPM_VREG_CORNER_LOW
-#define LVL_NOM RPM_VREG_CORNER_NOMINAL
-#define LVL_HIGH RPM_VREG_CORNER_HIGH
-
-enum scalables {
- CPU0 = 0,
- CPU1,
- CPU2,
- CPU3,
- L2,
- NUM_SCALABLES
+static struct hfpll_data hfpll_data __initdata = {
+ .mode_offset = 0x00,
+ .l_offset = 0x08,
+ .m_offset = 0x0C,
+ .n_offset = 0x10,
+ .config_offset = 0x04,
+ .config_val = 0x7845C665,
+ .has_droop_ctl = true,
+ .droop_offset = 0x14,
+ .droop_val = 0x0108C000,
+ .low_vdd_l_max = 22,
+ .nom_vdd_l_max = 42,
+ .vdd[HFPLL_VDD_NONE] = 0,
+ .vdd[HFPLL_VDD_LOW] = 945000,
+ .vdd[HFPLL_VDD_NOM] = 1050000,
+ .vdd[HFPLL_VDD_HIGH] = 1150000,
};
-enum vregs {
- VREG_CORE,
- VREG_MEM,
- VREG_DIG,
- VREG_HFPLL_A,
- VREG_HFPLL_B,
- NUM_VREG
-};
-
-enum hfpll_vdd_levels {
- HFPLL_VDD_NONE,
- HFPLL_VDD_LOW,
- HFPLL_VDD_NOM
-};
-
-enum pvs {
- PVS_SLOW,
- PVS_NOM,
- PVS_FAST,
- PVS_FASTER,
- NUM_PVS
-};
-
-struct vreg {
- const char name[15];
- const unsigned int max_vdd;
- const int rpm_vreg_voter;
- const int rpm_vreg_id;
- struct regulator *reg;
- unsigned int cur_vdd;
-};
-
-struct core_speed {
- unsigned int khz;
- int src;
- unsigned int pri_src_sel;
- unsigned int sec_src_sel;
- unsigned int pll_l_val;
-};
-
-struct l2_level {
- struct core_speed speed;
- unsigned int vdd_dig;
- unsigned int vdd_mem;
- unsigned int bw_level;
-};
-
-struct acpu_level {
- unsigned int use_for_scaling;
- struct core_speed speed;
- struct l2_level *l2_level;
- unsigned int vdd_core;
-};
-
-struct scalable {
- void * __iomem const hfpll_base;
- void * __iomem const aux_clk_sel;
- const uint32_t l2cpmr_iaddr;
- struct core_speed *current_speed;
- struct l2_level *l2_vote;
- struct vreg vreg[NUM_VREG];
- unsigned int *hfpll_vdd_tbl;
- bool regulators_initialized;
- bool clocks_initialized;
-};
-
-static unsigned int hfpll_vdd_tbl_8960[] = {
- [HFPLL_VDD_NONE] = 0,
- [HFPLL_VDD_LOW] = 850000,
- [HFPLL_VDD_NOM] = 1050000
-};
-
-static unsigned int hfpll_vdd_tbl_8064[] = {
- [HFPLL_VDD_NONE] = 0,
- [HFPLL_VDD_LOW] = 945000,
- [HFPLL_VDD_NOM] = 1050000
-};
-
-static unsigned int hfpll_vdd_dig_tbl_8930[] = {
- [HFPLL_VDD_NONE] = LVL_NONE,
- [HFPLL_VDD_LOW] = LVL_LOW,
- [HFPLL_VDD_NOM] = LVL_NOM
-};
-
-static struct scalable scalable_8960[] = {
+static struct scalable scalable[] __initdata = {
[CPU0] = {
- .hfpll_base = MSM_HFPLL_BASE + 0x200,
- .aux_clk_sel = MSM_ACC0_BASE + 0x014,
- .l2cpmr_iaddr = L2CPUCPMR_IADDR,
- .vreg[VREG_CORE] = { "krait0", 1300000 },
- .vreg[VREG_MEM] = { "krait0_mem", 1150000,
- RPM_VREG_VOTER1,
- RPM_VREG_ID_PM8921_L24 },
- .vreg[VREG_DIG] = { "krait0_dig", 1150000,
- RPM_VREG_VOTER1,
- RPM_VREG_ID_PM8921_S3 },
- .vreg[VREG_HFPLL_A] = { "hfpll0_s8", 2100000,
- RPM_VREG_VOTER1,
- RPM_VREG_ID_PM8921_S8 },
- .vreg[VREG_HFPLL_B] = { "hfpll0_l23", 1800000,
- RPM_VREG_VOTER1,
- RPM_VREG_ID_PM8921_L23 },
- },
+ .hfpll_phys_base = 0x00903200,
+ .aux_clk_sel_phys = 0x02088014,
+ .aux_clk_sel = 3,
+ .l2cpmr_iaddr = 0x4501,
+ .vreg[VREG_CORE] = { "krait0", 1300000 },
+ .vreg[VREG_MEM] = { "krait0_mem", 1150000 },
+ .vreg[VREG_DIG] = { "krait0_dig", 1150000 },
+ .vreg[VREG_HFPLL_A] = { "krait0_s8", 2050000 },
+ .vreg[VREG_HFPLL_B] = { "krait0_l23", 1800000 },
+ },
[CPU1] = {
- .hfpll_base = MSM_HFPLL_BASE + 0x300,
- .aux_clk_sel = MSM_ACC1_BASE + 0x014,
- .l2cpmr_iaddr = L2CPUCPMR_IADDR,
- .vreg[VREG_CORE] = { "krait1", 1300000 },
- .vreg[VREG_MEM] = { "krait1_mem", 1150000,
- RPM_VREG_VOTER2,
- RPM_VREG_ID_PM8921_L24 },
- .vreg[VREG_DIG] = { "krait1_dig", 1150000,
- RPM_VREG_VOTER2,
- RPM_VREG_ID_PM8921_S3 },
- .vreg[VREG_HFPLL_A] = { "hfpll1_s8", 2100000,
- RPM_VREG_VOTER2,
- RPM_VREG_ID_PM8921_S8 },
- .vreg[VREG_HFPLL_B] = { "hfpll1_l23", 1800000,
- RPM_VREG_VOTER2,
- RPM_VREG_ID_PM8921_L23 },
- },
+ .hfpll_phys_base = 0x00903300,
+ .aux_clk_sel_phys = 0x02098014,
+ .aux_clk_sel = 3,
+ .l2cpmr_iaddr = 0x5501,
+ .vreg[VREG_CORE] = { "krait1", 1300000 },
+ .vreg[VREG_MEM] = { "krait1_mem", 1150000 },
+ .vreg[VREG_DIG] = { "krait1_dig", 1150000 },
+ .vreg[VREG_HFPLL_A] = { "krait1_s8", 2050000 },
+ .vreg[VREG_HFPLL_B] = { "krait1_l23", 1800000 },
+ },
[L2] = {
- .hfpll_base = MSM_HFPLL_BASE + 0x400,
- .hfpll_vdd_tbl = hfpll_vdd_tbl_8960,
- .aux_clk_sel = MSM_APCS_GCC_BASE + 0x028,
- .l2cpmr_iaddr = L2CPMR_IADDR,
- .vreg[VREG_HFPLL_A] = { "hfpll_l2_s8", 2100000,
- RPM_VREG_VOTER6,
- RPM_VREG_ID_PM8921_S8 },
- .vreg[VREG_HFPLL_B] = { "hfpll_l2_l23", 1800000,
- RPM_VREG_VOTER6,
- RPM_VREG_ID_PM8921_L23 },
- },
+ .hfpll_phys_base = 0x00903400,
+ .aux_clk_sel_phys = 0x02011028,
+ .aux_clk_sel = 3,
+ .l2cpmr_iaddr = 0x0500,
+ .vreg[VREG_HFPLL_A] = { "l2_s8", 2050000 },
+ .vreg[VREG_HFPLL_B] = { "l2_l23", 1800000 },
+ },
};
-static DEFINE_MUTEX(driver_lock);
-static DEFINE_SPINLOCK(l2_lock);
-
-static struct scalable scalable_8064[] = {
- [CPU0] = {
- .hfpll_base = MSM_HFPLL_BASE + 0x200,
- .aux_clk_sel = MSM_ACC0_BASE + 0x014,
- .l2cpmr_iaddr = L2CPUCPMR_IADDR,
- .vreg[VREG_CORE] = { "krait0", 1300000 },
- .vreg[VREG_MEM] = { "krait0_mem", 1150000,
- RPM_VREG_VOTER1,
- RPM_VREG_ID_PM8921_L24 },
- .vreg[VREG_DIG] = { "krait0_dig", 1150000,
- RPM_VREG_VOTER1,
- RPM_VREG_ID_PM8921_S3 },
- .vreg[VREG_HFPLL_B] = { "hfpll0", 1800000,
- RPM_VREG_VOTER1,
- RPM_VREG_ID_PM8921_LVS7 },
- },
- [CPU1] = {
- .hfpll_base = MSM_HFPLL_BASE + 0x240,
- .aux_clk_sel = MSM_ACC1_BASE + 0x014,
- .l2cpmr_iaddr = L2CPUCPMR_IADDR,
- .vreg[VREG_CORE] = { "krait1", 1300000 },
- .vreg[VREG_MEM] = { "krait1_mem", 1150000,
- RPM_VREG_VOTER2,
- RPM_VREG_ID_PM8921_L24 },
- .vreg[VREG_DIG] = { "krait1_dig", 1150000,
- RPM_VREG_VOTER2,
- RPM_VREG_ID_PM8921_S3 },
- .vreg[VREG_HFPLL_B] = { "hfpll1", 1800000,
- RPM_VREG_VOTER2,
- RPM_VREG_ID_PM8921_LVS7 },
- },
- [CPU2] = {
- .hfpll_base = MSM_HFPLL_BASE + 0x280,
- .aux_clk_sel = MSM_ACC2_BASE + 0x014,
- .l2cpmr_iaddr = L2CPUCPMR_IADDR,
- .vreg[VREG_CORE] = { "krait2", 1300000 },
- .vreg[VREG_MEM] = { "krait2_mem", 1150000,
- RPM_VREG_VOTER4,
- RPM_VREG_ID_PM8921_L24 },
- .vreg[VREG_DIG] = { "krait2_dig", 1150000,
- RPM_VREG_VOTER4,
- RPM_VREG_ID_PM8921_S3 },
- .vreg[VREG_HFPLL_B] = { "hfpll2", 1800000,
- RPM_VREG_VOTER4,
- RPM_VREG_ID_PM8921_LVS7 },
- },
- [CPU3] = {
- .hfpll_base = MSM_HFPLL_BASE + 0x2C0,
- .aux_clk_sel = MSM_ACC3_BASE + 0x014,
- .l2cpmr_iaddr = L2CPUCPMR_IADDR,
- .vreg[VREG_CORE] = { "krait3", 1300000 },
- .vreg[VREG_MEM] = { "krait3_mem", 1150000,
- RPM_VREG_VOTER5,
- RPM_VREG_ID_PM8921_L24 },
- .vreg[VREG_DIG] = { "krait3_dig", 1150000,
- RPM_VREG_VOTER5,
- RPM_VREG_ID_PM8921_S3 },
- .vreg[VREG_HFPLL_B] = { "hfpll3", 1800000,
- RPM_VREG_VOTER5,
- RPM_VREG_ID_PM8921_LVS7 },
- },
- [L2] = {
- .hfpll_base = MSM_HFPLL_BASE + 0x300,
- .hfpll_vdd_tbl = hfpll_vdd_tbl_8064,
- .aux_clk_sel = MSM_APCS_GCC_BASE + 0x028,
- .l2cpmr_iaddr = L2CPMR_IADDR,
- .vreg[VREG_HFPLL_B] = { "hfpll_l2", 1800000,
- RPM_VREG_VOTER6,
- RPM_VREG_ID_PM8921_LVS7 },
- },
-};
-
-static struct scalable scalable_8930[] = {
- [CPU0] = {
- .hfpll_base = MSM_HFPLL_BASE + 0x200,
- .aux_clk_sel = MSM_ACC0_BASE + 0x014,
- .l2cpmr_iaddr = L2CPUCPMR_IADDR,
- .vreg[VREG_CORE] = { "krait0", 1300000 },
- .vreg[VREG_MEM] = { "krait0_mem", 1150000,
- RPM_VREG_VOTER1,
- RPM_VREG_ID_PM8038_L24 },
- .vreg[VREG_DIG] = { "krait0_dig", LVL_HIGH,
- RPM_VREG_VOTER1,
- RPM_VREG_ID_PM8038_VDD_DIG_CORNER
- },
- .vreg[VREG_HFPLL_B] = { "hfpll0", 1800000,
- RPM_VREG_VOTER1,
- RPM_VREG_ID_PM8038_L23 },
- },
- [CPU1] = {
- .hfpll_base = MSM_HFPLL_BASE + 0x300,
- .aux_clk_sel = MSM_ACC1_BASE + 0x014,
- .l2cpmr_iaddr = L2CPUCPMR_IADDR,
- .vreg[VREG_CORE] = { "krait1", 1300000 },
- .vreg[VREG_MEM] = { "krait1_mem", 1150000,
- RPM_VREG_VOTER2,
- RPM_VREG_ID_PM8038_L24 },
- .vreg[VREG_DIG] = { "krait1_dig", LVL_HIGH,
- RPM_VREG_VOTER2,
- RPM_VREG_ID_PM8038_VDD_DIG_CORNER
- },
- .vreg[VREG_HFPLL_B] = { "hfpll1", 1800000,
- RPM_VREG_VOTER2,
- RPM_VREG_ID_PM8038_L23 },
- },
- [L2] = {
- .hfpll_base = MSM_HFPLL_BASE + 0x400,
- .hfpll_vdd_tbl = hfpll_vdd_dig_tbl_8930,
- .aux_clk_sel = MSM_APCS_GCC_BASE + 0x028,
- .l2cpmr_iaddr = L2CPMR_IADDR,
- .vreg[VREG_HFPLL_B] = { "hfpll_l2", 1800000,
- RPM_VREG_VOTER6,
- RPM_VREG_ID_PM8038_L23 },
- },
-};
-
-/*TODO: Update the rpm vreg id when the rpm driver is ready */
-static struct scalable scalable_8627[] = {
- [CPU0] = {
- .hfpll_base = MSM_HFPLL_BASE + 0x200,
- .aux_clk_sel = MSM_ACC0_BASE + 0x014,
- .l2cpmr_iaddr = L2CPUCPMR_IADDR,
- .vreg[VREG_CORE] = { "krait0", 1300000 },
- .vreg[VREG_MEM] = { "krait0_mem", 1150000,
- RPM_VREG_VOTER1,
- RPM_VREG_ID_PM8038_L24 },
- .vreg[VREG_DIG] = { "krait0_dig", LVL_HIGH,
- RPM_VREG_VOTER1,
- RPM_VREG_ID_PM8038_VDD_DIG_CORNER
- },
- .vreg[VREG_HFPLL_B] = { "hfpll0", 1800000,
- RPM_VREG_VOTER1,
- RPM_VREG_ID_PM8038_L23 },
- },
- [CPU1] = {
- .hfpll_base = MSM_HFPLL_BASE + 0x300,
- .aux_clk_sel = MSM_ACC1_BASE + 0x014,
- .l2cpmr_iaddr = L2CPUCPMR_IADDR,
- .vreg[VREG_CORE] = { "krait1", 1300000 },
- .vreg[VREG_MEM] = { "krait1_mem", 1150000,
- RPM_VREG_VOTER2,
- RPM_VREG_ID_PM8038_L24 },
- .vreg[VREG_DIG] = { "krait1_dig", LVL_HIGH,
- RPM_VREG_VOTER2,
- RPM_VREG_ID_PM8038_VDD_DIG_CORNER
- },
- .vreg[VREG_HFPLL_B] = { "hfpll1", 1800000,
- RPM_VREG_VOTER2,
- RPM_VREG_ID_PM8038_L23 },
- },
- [L2] = {
- .hfpll_base = MSM_HFPLL_BASE + 0x400,
- .hfpll_vdd_tbl = hfpll_vdd_dig_tbl_8930,
- .aux_clk_sel = MSM_APCS_GCC_BASE + 0x028,
- .l2cpmr_iaddr = L2CPMR_IADDR,
- .vreg[VREG_HFPLL_B] = { "hfpll_l2", 1800000,
- RPM_VREG_VOTER6,
- RPM_VREG_ID_PM8038_L23 },
- },
-};
-
-static struct l2_level *l2_freq_tbl;
-static struct acpu_level *acpu_freq_tbl;
-static int l2_freq_tbl_size;
-static struct scalable *scalable;
-#define SCALABLE_TO_CPU(sc) ((sc) - scalable)
-
-/* Instantaneous bandwidth requests in MB/s. */
-#define BW_MBPS(_bw) \
- { \
- .vectors = (struct msm_bus_vectors[]){ \
- {\
- .src = MSM_BUS_MASTER_AMPSS_M0, \
- .dst = MSM_BUS_SLAVE_EBI_CH0, \
- .ib = (_bw) * 1000000UL, \
- .ab = (_bw) * 100000UL, \
- }, \
- { \
- .src = MSM_BUS_MASTER_AMPSS_M1, \
- .dst = MSM_BUS_SLAVE_EBI_CH0, \
- .ib = (_bw) * 1000000UL, \
- .ab = (_bw) * 100000UL, \
- }, \
- }, \
- .num_paths = 2, \
- }
-static struct msm_bus_paths bw_level_tbl[] = {
+static struct msm_bus_paths bw_level_tbl[] __initdata = {
[0] = BW_MBPS(640), /* At least 80 MHz on bus. */
[1] = BW_MBPS(1064), /* At least 133 MHz on bus. */
[2] = BW_MBPS(1600), /* At least 200 MHz on bus. */
@@ -424,1243 +80,136 @@
[4] = BW_MBPS(3200), /* At least 400 MHz on bus. */
[5] = BW_MBPS(3600), /* At least 450 MHz on bus. */
[6] = BW_MBPS(3936), /* At least 492 MHz on bus. */
- [7] = BW_MBPS(4264), /* At least 533 MHz on bus. */
};
-static struct msm_bus_scale_pdata bus_client_pdata = {
+static struct msm_bus_scale_pdata bus_scale_data __initdata = {
.usecase = bw_level_tbl,
.num_usecases = ARRAY_SIZE(bw_level_tbl),
.active_only = 1,
- .name = "acpuclock",
+ .name = "acpuclk-8960",
};
-static uint32_t bus_perf_client;
-
-/* TODO: Update vdd_dig and vdd_mem when voltage data is available. */
-#define L2(x) (&l2_freq_tbl_8960_kraitv1[(x)])
-static struct l2_level l2_freq_tbl_8960_kraitv1[] = {
- [0] = { {STBY_KHZ, QSB, 0, 0, 0x00 }, 1050000, 1050000, 0 },
- [1] = { { 384000, PLL_8, 0, 2, 0x00 }, 1050000, 1050000, 1 },
- [2] = { { 432000, HFPLL, 2, 0, 0x20 }, 1050000, 1050000, 1 },
- [3] = { { 486000, HFPLL, 2, 0, 0x24 }, 1050000, 1050000, 1 },
- [4] = { { 540000, HFPLL, 2, 0, 0x28 }, 1050000, 1050000, 1 },
- [5] = { { 594000, HFPLL, 1, 0, 0x16 }, 1050000, 1050000, 2 },
- [6] = { { 648000, HFPLL, 1, 0, 0x18 }, 1050000, 1050000, 2 },
- [7] = { { 702000, HFPLL, 1, 0, 0x1A }, 1050000, 1050000, 2 },
- [8] = { { 756000, HFPLL, 1, 0, 0x1C }, 1150000, 1150000, 2 },
- [9] = { { 810000, HFPLL, 1, 0, 0x1E }, 1150000, 1150000, 3 },
- [10] = { { 864000, HFPLL, 1, 0, 0x20 }, 1150000, 1150000, 3 },
- [11] = { { 918000, HFPLL, 1, 0, 0x22 }, 1150000, 1150000, 3 },
+static struct l2_level l2_freq_tbl[] __initdata = {
+ [0] = { { 384000, PLL_8, 0, 2, 0x00 }, 1050000, 1050000, 1 },
+ [1] = { { 432000, HFPLL, 2, 0, 0x20 }, 1050000, 1050000, 2 },
+ [2] = { { 486000, HFPLL, 2, 0, 0x24 }, 1050000, 1050000, 2 },
+ [3] = { { 540000, HFPLL, 2, 0, 0x28 }, 1050000, 1050000, 2 },
+ [4] = { { 594000, HFPLL, 1, 0, 0x16 }, 1050000, 1050000, 2 },
+ [5] = { { 648000, HFPLL, 1, 0, 0x18 }, 1050000, 1050000, 4 },
+ [6] = { { 702000, HFPLL, 1, 0, 0x1A }, 1050000, 1050000, 4 },
+ [7] = { { 756000, HFPLL, 1, 0, 0x1C }, 1150000, 1150000, 4 },
+ [8] = { { 810000, HFPLL, 1, 0, 0x1E }, 1150000, 1150000, 4 },
+ [9] = { { 864000, HFPLL, 1, 0, 0x20 }, 1150000, 1150000, 4 },
+ [10] = { { 918000, HFPLL, 1, 0, 0x22 }, 1150000, 1150000, 6 },
+ [11] = { { 972000, HFPLL, 1, 0, 0x24 }, 1150000, 1150000, 6 },
+ [12] = { { 1026000, HFPLL, 1, 0, 0x26 }, 1150000, 1150000, 6 },
+ [13] = { { 1080000, HFPLL, 1, 0, 0x28 }, 1150000, 1150000, 6 },
+ [14] = { { 1134000, HFPLL, 1, 0, 0x2A }, 1150000, 1150000, 6 },
+ [15] = { { 1188000, HFPLL, 1, 0, 0x2C }, 1150000, 1150000, 6 },
+ [16] = { { 1242000, HFPLL, 1, 0, 0x2E }, 1150000, 1150000, 6 },
+ [17] = { { 1296000, HFPLL, 1, 0, 0x30 }, 1150000, 1150000, 6 },
+ [18] = { { 1350000, HFPLL, 1, 0, 0x32 }, 1150000, 1150000, 6 },
};
-static struct acpu_level acpu_freq_tbl_8960_kraitv1_slow[] = {
- { 0, {STBY_KHZ, QSB, 0, 0, 0x00 }, L2(0), 900000 },
- { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(1), 900000 },
- { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(6), 925000 },
- { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(6), 925000 },
- { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(6), 937500 },
- { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(6), 962500 },
- { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(6), 987500 },
- { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(6), 1000000 },
- { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(11), 1025000 },
- { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(11), 1062500 },
- { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(11), 1062500 },
- { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(11), 1087500 },
- { 0, { 0 } }
-};
-
-static struct acpu_level acpu_freq_tbl_8960_kraitv1_nom_fast[] = {
- { 0, {STBY_KHZ, QSB, 0, 0, 0x00 }, L2(0), 862500 },
- { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(1), 862500 },
- { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(6), 862500 },
- { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(6), 887500 },
- { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(6), 900000 },
- { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(6), 925000 },
- { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(6), 925000 },
- { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(6), 937500 },
- { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(11), 962500 },
- { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(11), 1012500 },
- { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(11), 1025000 },
- { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(11), 1025000 },
- { 0, { 0 } }
-};
-
-#undef L2
-#define L2(x) (&l2_freq_tbl_8960_kraitv2[(x)])
-static struct l2_level l2_freq_tbl_8960_kraitv2[] = {
- [0] = { {STBY_KHZ, QSB, 0, 0, 0x00 }, 1050000, 1050000, 0 },
- [1] = { { 384000, PLL_8, 0, 2, 0x00 }, 1050000, 1050000, 1 },
- [2] = { { 432000, HFPLL, 2, 0, 0x20 }, 1050000, 1050000, 2 },
- [3] = { { 486000, HFPLL, 2, 0, 0x24 }, 1050000, 1050000, 2 },
- [4] = { { 540000, HFPLL, 2, 0, 0x28 }, 1050000, 1050000, 2 },
- [5] = { { 594000, HFPLL, 1, 0, 0x16 }, 1050000, 1050000, 2 },
- [6] = { { 648000, HFPLL, 1, 0, 0x18 }, 1050000, 1050000, 4 },
- [7] = { { 702000, HFPLL, 1, 0, 0x1A }, 1050000, 1050000, 4 },
- [8] = { { 756000, HFPLL, 1, 0, 0x1C }, 1150000, 1150000, 4 },
- [9] = { { 810000, HFPLL, 1, 0, 0x1E }, 1150000, 1150000, 4 },
- [10] = { { 864000, HFPLL, 1, 0, 0x20 }, 1150000, 1150000, 4 },
- [11] = { { 918000, HFPLL, 1, 0, 0x22 }, 1150000, 1150000, 6 },
- [12] = { { 972000, HFPLL, 1, 0, 0x24 }, 1150000, 1150000, 6 },
- [13] = { { 1026000, HFPLL, 1, 0, 0x26 }, 1150000, 1150000, 6 },
- [14] = { { 1080000, HFPLL, 1, 0, 0x28 }, 1150000, 1150000, 6 },
- [15] = { { 1134000, HFPLL, 1, 0, 0x2A }, 1150000, 1150000, 6 },
- [16] = { { 1188000, HFPLL, 1, 0, 0x2C }, 1150000, 1150000, 6 },
- [17] = { { 1242000, HFPLL, 1, 0, 0x2E }, 1150000, 1150000, 6 },
- [18] = { { 1296000, HFPLL, 1, 0, 0x30 }, 1150000, 1150000, 6 },
- [19] = { { 1350000, HFPLL, 1, 0, 0x32 }, 1150000, 1150000, 6 },
-};
-
-static struct acpu_level acpu_freq_tbl_8960_kraitv2_slow[] = {
- { 0, { STBY_KHZ, QSB, 0, 0, 0x00 }, L2(0), 950000 },
- { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(1), 950000 },
- { 0, { 432000, HFPLL, 2, 0, 0x20 }, L2(7), 975000 },
- { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(7), 975000 },
- { 0, { 540000, HFPLL, 2, 0, 0x28 }, L2(7), 1000000 },
- { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(7), 1000000 },
- { 0, { 648000, HFPLL, 1, 0, 0x18 }, L2(7), 1025000 },
- { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(7), 1025000 },
- { 0, { 756000, HFPLL, 1, 0, 0x1C }, L2(7), 1075000 },
- { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(7), 1075000 },
- { 0, { 864000, HFPLL, 1, 0, 0x20 }, L2(7), 1100000 },
- { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(7), 1100000 },
- { 0, { 972000, HFPLL, 1, 0, 0x24 }, L2(7), 1125000 },
- { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(7), 1125000 },
- { 0, { 1080000, HFPLL, 1, 0, 0x28 }, L2(19), 1175000 },
- { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(19), 1175000 },
- { 0, { 1188000, HFPLL, 1, 0, 0x2C }, L2(19), 1200000 },
- { 1, { 1242000, HFPLL, 1, 0, 0x2E }, L2(19), 1200000 },
- { 0, { 1296000, HFPLL, 1, 0, 0x30 }, L2(19), 1225000 },
- { 1, { 1350000, HFPLL, 1, 0, 0x32 }, L2(19), 1225000 },
- { 0, { 1404000, HFPLL, 1, 0, 0x34 }, L2(19), 1237500 },
- { 1, { 1458000, HFPLL, 1, 0, 0x36 }, L2(19), 1237500 },
- { 1, { 1512000, HFPLL, 1, 0, 0x38 }, L2(19), 1250000 },
- { 0, { 0 } }
-};
-
-static struct acpu_level acpu_freq_tbl_8960_kraitv2_nom[] = {
- { 0, { STBY_KHZ, QSB, 0, 0, 0x00 }, L2(0), 900000 },
- { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(1), 900000 },
- { 0, { 432000, HFPLL, 2, 0, 0x20 }, L2(7), 925000 },
- { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(7), 925000 },
- { 0, { 540000, HFPLL, 2, 0, 0x28 }, L2(7), 950000 },
- { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(7), 950000 },
- { 0, { 648000, HFPLL, 1, 0, 0x18 }, L2(7), 975000 },
- { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(7), 975000 },
- { 0, { 756000, HFPLL, 1, 0, 0x1C }, L2(7), 1025000 },
- { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(7), 1025000 },
- { 0, { 864000, HFPLL, 1, 0, 0x20 }, L2(7), 1050000 },
- { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(7), 1050000 },
- { 0, { 972000, HFPLL, 1, 0, 0x24 }, L2(7), 1075000 },
- { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(7), 1075000 },
- { 0, { 1080000, HFPLL, 1, 0, 0x28 }, L2(19), 1125000 },
- { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(19), 1125000 },
- { 0, { 1188000, HFPLL, 1, 0, 0x2C }, L2(19), 1150000 },
- { 1, { 1242000, HFPLL, 1, 0, 0x2E }, L2(19), 1150000 },
- { 0, { 1296000, HFPLL, 1, 0, 0x30 }, L2(19), 1175000 },
- { 1, { 1350000, HFPLL, 1, 0, 0x32 }, L2(19), 1175000 },
- { 0, { 1404000, HFPLL, 1, 0, 0x34 }, L2(19), 1187500 },
- { 1, { 1458000, HFPLL, 1, 0, 0x36 }, L2(19), 1187500 },
- { 1, { 1512000, HFPLL, 1, 0, 0x38 }, L2(19), 1200000 },
- { 0, { 0 } }
-};
-
-static struct acpu_level acpu_freq_tbl_8960_kraitv2_fast[] = {
- { 0, { STBY_KHZ, QSB, 0, 0, 0x00 }, L2(0), 850000 },
- { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(1), 850000 },
- { 0, { 432000, HFPLL, 2, 0, 0x20 }, L2(7), 875000 },
- { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(7), 875000 },
- { 0, { 540000, HFPLL, 2, 0, 0x28 }, L2(7), 900000 },
- { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(7), 900000 },
- { 0, { 648000, HFPLL, 1, 0, 0x18 }, L2(7), 925000 },
- { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(7), 925000 },
- { 0, { 756000, HFPLL, 1, 0, 0x1C }, L2(7), 975000 },
- { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(7), 975000 },
- { 0, { 864000, HFPLL, 1, 0, 0x20 }, L2(7), 1000000 },
- { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(7), 1000000 },
- { 0, { 972000, HFPLL, 1, 0, 0x24 }, L2(7), 1025000 },
- { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(7), 1025000 },
- { 0, { 1080000, HFPLL, 1, 0, 0x28 }, L2(19), 1075000 },
- { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(19), 1075000 },
- { 0, { 1188000, HFPLL, 1, 0, 0x2C }, L2(19), 1100000 },
- { 1, { 1242000, HFPLL, 1, 0, 0x2E }, L2(19), 1100000 },
- { 0, { 1296000, HFPLL, 1, 0, 0x30 }, L2(19), 1125000 },
- { 1, { 1350000, HFPLL, 1, 0, 0x32 }, L2(19), 1125000 },
- { 0, { 1404000, HFPLL, 1, 0, 0x34 }, L2(19), 1137500 },
- { 1, { 1458000, HFPLL, 1, 0, 0x36 }, L2(19), 1137500 },
- { 1, { 1512000, HFPLL, 1, 0, 0x38 }, L2(19), 1150000 },
- { 0, { 0 } }
-};
-
-/* TODO: Update vdd_dig and vdd_mem when voltage data is available. */
-#undef L2
-#define L2(x) (&l2_freq_tbl_8064[(x)])
-static struct l2_level l2_freq_tbl_8064[] = {
- [0] = { {STBY_KHZ, QSB, 0, 0, 0x00 }, 1050000, 1050000, 0 },
- [1] = { { 384000, PLL_8, 0, 2, 0x00 }, 1050000, 1050000, 1 },
- [2] = { { 432000, HFPLL, 2, 0, 0x20 }, 1050000, 1050000, 2 },
- [3] = { { 486000, HFPLL, 2, 0, 0x24 }, 1050000, 1050000, 2 },
- [4] = { { 540000, HFPLL, 2, 0, 0x28 }, 1050000, 1050000, 2 },
- [5] = { { 594000, HFPLL, 1, 0, 0x16 }, 1050000, 1050000, 2 },
- [6] = { { 648000, HFPLL, 1, 0, 0x18 }, 1050000, 1050000, 4 },
- [7] = { { 702000, HFPLL, 1, 0, 0x1A }, 1050000, 1050000, 4 },
- [8] = { { 756000, HFPLL, 1, 0, 0x1C }, 1150000, 1150000, 4 },
- [9] = { { 810000, HFPLL, 1, 0, 0x1E }, 1150000, 1150000, 4 },
- [10] = { { 864000, HFPLL, 1, 0, 0x20 }, 1150000, 1150000, 4 },
- [11] = { { 918000, HFPLL, 1, 0, 0x22 }, 1150000, 1150000, 7 },
- [12] = { { 972000, HFPLL, 1, 0, 0x24 }, 1150000, 1150000, 7 },
- [13] = { { 1026000, HFPLL, 1, 0, 0x26 }, 1150000, 1150000, 7 },
- [14] = { { 1080000, HFPLL, 1, 0, 0x28 }, 1150000, 1150000, 7 },
- [15] = { { 1134000, HFPLL, 1, 0, 0x2A }, 1150000, 1150000, 7 },
-};
-
-/* TODO: Update core voltages when data is available. */
-static struct acpu_level acpu_freq_tbl_8064_slow[] = {
- { 0, { STBY_KHZ, QSB, 0, 0, 0x00 }, L2(0), 950000 },
- { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(1), 950000 },
- { 0, { 432000, HFPLL, 2, 0, 0x20 }, L2(7), 975000 },
- { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(7), 975000 },
- { 0, { 540000, HFPLL, 2, 0, 0x28 }, L2(7), 1000000 },
- { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(7), 1000000 },
- { 0, { 648000, HFPLL, 1, 0, 0x18 }, L2(7), 1025000 },
- { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(7), 1025000 },
- { 0, { 756000, HFPLL, 1, 0, 0x1C }, L2(7), 1075000 },
- { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(7), 1075000 },
- { 0, { 864000, HFPLL, 1, 0, 0x20 }, L2(7), 1100000 },
- { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(7), 1100000 },
- { 0, { 972000, HFPLL, 1, 0, 0x24 }, L2(7), 1125000 },
- { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(7), 1125000 },
- { 0, { 1080000, HFPLL, 1, 0, 0x28 }, L2(15), 1175000 },
- { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(15), 1175000 },
- { 0, { 1188000, HFPLL, 1, 0, 0x2C }, L2(15), 1200000 },
- { 1, { 1242000, HFPLL, 1, 0, 0x2E }, L2(15), 1200000 },
- { 0, { 1296000, HFPLL, 1, 0, 0x30 }, L2(15), 1225000 },
- { 1, { 1350000, HFPLL, 1, 0, 0x32 }, L2(15), 1225000 },
- { 0, { 1404000, HFPLL, 1, 0, 0x34 }, L2(15), 1237500 },
- { 1, { 1458000, HFPLL, 1, 0, 0x36 }, L2(15), 1237500 },
- { 1, { 1512000, HFPLL, 1, 0, 0x38 }, L2(15), 1250000 },
- { 0, { 0 } }
-};
-
-static struct acpu_level acpu_freq_tbl_8064_nom[] = {
- { 0, { STBY_KHZ, QSB, 0, 0, 0x00 }, L2(0), 900000 },
- { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(1), 900000 },
- { 0, { 432000, HFPLL, 2, 0, 0x20 }, L2(7), 925000 },
- { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(7), 925000 },
- { 0, { 540000, HFPLL, 2, 0, 0x28 }, L2(7), 950000 },
- { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(7), 950000 },
- { 0, { 648000, HFPLL, 1, 0, 0x18 }, L2(7), 975000 },
- { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(7), 975000 },
- { 0, { 756000, HFPLL, 1, 0, 0x1C }, L2(7), 1025000 },
- { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(7), 1025000 },
- { 0, { 864000, HFPLL, 1, 0, 0x20 }, L2(7), 1050000 },
- { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(7), 1050000 },
- { 0, { 972000, HFPLL, 1, 0, 0x24 }, L2(7), 1075000 },
- { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(7), 1075000 },
- { 0, { 1080000, HFPLL, 1, 0, 0x28 }, L2(15), 1125000 },
- { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(15), 1125000 },
- { 0, { 1188000, HFPLL, 1, 0, 0x2C }, L2(15), 1150000 },
- { 1, { 1242000, HFPLL, 1, 0, 0x2E }, L2(15), 1150000 },
- { 0, { 1296000, HFPLL, 1, 0, 0x30 }, L2(15), 1175000 },
- { 1, { 1350000, HFPLL, 1, 0, 0x32 }, L2(15), 1175000 },
- { 0, { 1404000, HFPLL, 1, 0, 0x34 }, L2(15), 1187500 },
- { 1, { 1458000, HFPLL, 1, 0, 0x36 }, L2(15), 1187500 },
- { 1, { 1512000, HFPLL, 1, 0, 0x38 }, L2(15), 1200000 },
- { 0, { 0 } }
-};
-
-static struct acpu_level acpu_freq_tbl_8064_fast[] = {
- { 0, { STBY_KHZ, QSB, 0, 0, 0x00 }, L2(0), 850000 },
- { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(1), 850000 },
- { 0, { 432000, HFPLL, 2, 0, 0x20 }, L2(7), 875000 },
- { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(7), 875000 },
- { 0, { 540000, HFPLL, 2, 0, 0x28 }, L2(7), 900000 },
- { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(7), 900000 },
- { 0, { 648000, HFPLL, 1, 0, 0x18 }, L2(7), 925000 },
- { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(7), 925000 },
- { 0, { 756000, HFPLL, 1, 0, 0x1C }, L2(7), 975000 },
- { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(7), 975000 },
- { 0, { 864000, HFPLL, 1, 0, 0x20 }, L2(7), 1000000 },
- { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(7), 1000000 },
- { 0, { 972000, HFPLL, 1, 0, 0x24 }, L2(7), 1025000 },
- { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(7), 1025000 },
- { 0, { 1080000, HFPLL, 1, 0, 0x28 }, L2(15), 1075000 },
- { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(15), 1075000 },
- { 0, { 1188000, HFPLL, 1, 0, 0x2C }, L2(15), 1100000 },
- { 1, { 1242000, HFPLL, 1, 0, 0x2E }, L2(15), 1100000 },
- { 0, { 1296000, HFPLL, 1, 0, 0x30 }, L2(15), 1125000 },
- { 1, { 1350000, HFPLL, 1, 0, 0x32 }, L2(15), 1125000 },
- { 0, { 1404000, HFPLL, 1, 0, 0x34 }, L2(15), 1137500 },
- { 1, { 1458000, HFPLL, 1, 0, 0x36 }, L2(15), 1137500 },
- { 1, { 1512000, HFPLL, 1, 0, 0x38 }, L2(15), 1150000 },
- { 0, { 0 } }
-};
-
-/* TODO: Update vdd_dig, vdd_mem and bw when data is available. */
-#undef L2
-#define L2(x) (&l2_freq_tbl_8930[(x)])
-static struct l2_level l2_freq_tbl_8930[] = {
- [0] = { {STBY_KHZ, QSB, 0, 0, 0x00 }, LVL_NOM, 1050000, 0 },
- [1] = { { 384000, PLL_8, 0, 2, 0x00 }, LVL_NOM, 1050000, 1 },
- [2] = { { 432000, HFPLL, 2, 0, 0x20 }, LVL_NOM, 1050000, 2 },
- [3] = { { 486000, HFPLL, 2, 0, 0x24 }, LVL_NOM, 1050000, 2 },
- [4] = { { 540000, HFPLL, 2, 0, 0x28 }, LVL_NOM, 1050000, 2 },
- [5] = { { 594000, HFPLL, 1, 0, 0x16 }, LVL_NOM, 1050000, 2 },
- [6] = { { 648000, HFPLL, 1, 0, 0x18 }, LVL_NOM, 1050000, 4 },
- [7] = { { 702000, HFPLL, 1, 0, 0x1A }, LVL_NOM, 1050000, 4 },
- [8] = { { 756000, HFPLL, 1, 0, 0x1C }, LVL_HIGH, 1150000, 4 },
- [9] = { { 810000, HFPLL, 1, 0, 0x1E }, LVL_HIGH, 1150000, 4 },
- [10] = { { 864000, HFPLL, 1, 0, 0x20 }, LVL_HIGH, 1150000, 4 },
- [11] = { { 918000, HFPLL, 1, 0, 0x22 }, LVL_HIGH, 1150000, 7 },
- [12] = { { 972000, HFPLL, 1, 0, 0x24 }, LVL_HIGH, 1150000, 7 },
- [13] = { { 1026000, HFPLL, 1, 0, 0x26 }, LVL_HIGH, 1150000, 7 },
- [14] = { { 1080000, HFPLL, 1, 0, 0x28 }, LVL_HIGH, 1150000, 7 },
- [15] = { { 1134000, HFPLL, 1, 0, 0x2A }, LVL_HIGH, 1150000, 7 },
- [16] = { { 1188000, HFPLL, 1, 0, 0x2C }, LVL_HIGH, 1150000, 7 },
-};
-
-/* TODO: Update core voltages when data is available. */
-static struct acpu_level acpu_freq_tbl_8930_slow[] = {
- { 0, { STBY_KHZ, QSB, 0, 0, 0x00 }, L2(0), 950000 },
- { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(1), 950000 },
- { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(6), 975000 },
+static struct acpu_level acpu_freq_tbl_slow[] __initdata = {
+ { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(0), 950000 },
+ { 0, { 432000, HFPLL, 2, 0, 0x20 }, L2(6), 975000 },
{ 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(6), 975000 },
- { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(6), 1000000 },
+ { 0, { 540000, HFPLL, 2, 0, 0x28 }, L2(6), 1000000 },
{ 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(6), 1000000 },
- { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(6), 1025000 },
+ { 0, { 648000, HFPLL, 1, 0, 0x18 }, L2(6), 1025000 },
{ 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(6), 1025000 },
- { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(11), 1075000 },
- { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(11), 1075000 },
- { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(11), 1100000 },
- { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(11), 1100000 },
- { 1, { 972000, HFPLL, 1, 0, 0x24 }, L2(11), 1125000 },
- { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(11), 1125000 },
- { 1, { 1080000, HFPLL, 1, 0, 0x28 }, L2(16), 1175000 },
- { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(16), 1175000 },
- { 1, { 1188000, HFPLL, 1, 0, 0x2C }, L2(16), 1200000 },
+ { 0, { 756000, HFPLL, 1, 0, 0x1C }, L2(6), 1075000 },
+ { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(6), 1075000 },
+ { 0, { 864000, HFPLL, 1, 0, 0x20 }, L2(6), 1100000 },
+ { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(6), 1100000 },
+ { 0, { 972000, HFPLL, 1, 0, 0x24 }, L2(6), 1125000 },
+ { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(6), 1125000 },
+ { 0, { 1080000, HFPLL, 1, 0, 0x28 }, L2(18), 1175000 },
+ { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(18), 1175000 },
+ { 0, { 1188000, HFPLL, 1, 0, 0x2C }, L2(18), 1200000 },
+ { 1, { 1242000, HFPLL, 1, 0, 0x2E }, L2(18), 1200000 },
+ { 0, { 1296000, HFPLL, 1, 0, 0x30 }, L2(18), 1225000 },
+ { 1, { 1350000, HFPLL, 1, 0, 0x32 }, L2(18), 1225000 },
+ { 0, { 1404000, HFPLL, 1, 0, 0x34 }, L2(18), 1237500 },
+ { 1, { 1458000, HFPLL, 1, 0, 0x36 }, L2(18), 1237500 },
+ { 1, { 1512000, HFPLL, 1, 0, 0x38 }, L2(18), 1250000 },
{ 0, { 0 } }
};
-static struct acpu_level acpu_freq_tbl_8930_nom[] = {
- { 0, { STBY_KHZ, QSB, 0, 0, 0x00 }, L2(0), 925000 },
- { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(1), 925000 },
- { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(6), 950000 },
- { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(6), 950000 },
- { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(6), 975000 },
- { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(6), 975000 },
- { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(6), 1000000 },
- { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(6), 1000000 },
- { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(11), 1050000 },
- { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(11), 1050000 },
- { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(11), 1075000 },
- { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(11), 1075000 },
- { 1, { 972000, HFPLL, 1, 0, 0x24 }, L2(11), 1100000 },
- { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(11), 1100000 },
- { 1, { 1080000, HFPLL, 1, 0, 0x28 }, L2(16), 1150000 },
- { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(16), 1150000 },
- { 1, { 1188000, HFPLL, 1, 0, 0x2C }, L2(16), 1175000 },
+static struct acpu_level acpu_freq_tbl_nom[] __initdata = {
+ { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(0), 900000 },
+ { 0, { 432000, HFPLL, 2, 0, 0x20 }, L2(6), 925000 },
+ { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(6), 925000 },
+ { 0, { 540000, HFPLL, 2, 0, 0x28 }, L2(6), 950000 },
+ { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(6), 950000 },
+ { 0, { 648000, HFPLL, 1, 0, 0x18 }, L2(6), 975000 },
+ { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(6), 975000 },
+ { 0, { 756000, HFPLL, 1, 0, 0x1C }, L2(6), 1025000 },
+ { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(6), 1025000 },
+ { 0, { 864000, HFPLL, 1, 0, 0x20 }, L2(6), 1050000 },
+ { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(6), 1050000 },
+ { 0, { 972000, HFPLL, 1, 0, 0x24 }, L2(6), 1075000 },
+ { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(6), 1075000 },
+ { 0, { 1080000, HFPLL, 1, 0, 0x28 }, L2(18), 1125000 },
+ { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(18), 1125000 },
+ { 0, { 1188000, HFPLL, 1, 0, 0x2C }, L2(18), 1150000 },
+ { 1, { 1242000, HFPLL, 1, 0, 0x2E }, L2(18), 1150000 },
+ { 0, { 1296000, HFPLL, 1, 0, 0x30 }, L2(18), 1175000 },
+ { 1, { 1350000, HFPLL, 1, 0, 0x32 }, L2(18), 1175000 },
+ { 0, { 1404000, HFPLL, 1, 0, 0x34 }, L2(18), 1187500 },
+ { 1, { 1458000, HFPLL, 1, 0, 0x36 }, L2(18), 1187500 },
+ { 1, { 1512000, HFPLL, 1, 0, 0x38 }, L2(18), 1200000 },
{ 0, { 0 } }
};
-static struct acpu_level acpu_freq_tbl_8930_fast[] = {
- { 0, { STBY_KHZ, QSB, 0, 0, 0x00 }, L2(0), 900000 },
- { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(1), 900000 },
- { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(6), 900000 },
- { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(6), 900000 },
- { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(6), 925000 },
- { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(6), 925000 },
- { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(6), 950000 },
- { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(6), 950000 },
- { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(11), 1000000 },
- { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(11), 1000000 },
- { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(11), 1025000 },
- { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(11), 1025000 },
- { 1, { 972000, HFPLL, 1, 0, 0x24 }, L2(11), 1050000 },
- { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(11), 1050000 },
- { 1, { 1080000, HFPLL, 1, 0, 0x28 }, L2(16), 1100000 },
- { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(16), 1100000 },
- { 1, { 1188000, HFPLL, 1, 0, 0x2C }, L2(16), 1125000 },
- { 0, { 0 } }
-};
-/* TODO: Update vdd_dig, vdd_mem and bw when data is available. */
-#undef L2
-#define L2(x) (&l2_freq_tbl_8627[(x)])
-static struct l2_level l2_freq_tbl_8627[] = {
- [0] = { {STBY_KHZ, QSB, 0, 0, 0x00 }, LVL_NOM, 1050000, 0 },
- [1] = { { 384000, PLL_8, 0, 2, 0x00 }, LVL_NOM, 1050000, 1 },
- [2] = { { 432000, HFPLL, 2, 0, 0x20 }, LVL_NOM, 1050000, 1 },
- [3] = { { 486000, HFPLL, 2, 0, 0x24 }, LVL_NOM, 1050000, 1 },
- [4] = { { 540000, HFPLL, 2, 0, 0x28 }, LVL_NOM, 1050000, 2 },
- [5] = { { 594000, HFPLL, 1, 0, 0x16 }, LVL_NOM, 1050000, 2 },
- [6] = { { 648000, HFPLL, 1, 0, 0x18 }, LVL_NOM, 1050000, 2 },
- [7] = { { 702000, HFPLL, 1, 0, 0x1A }, LVL_NOM, 1050000, 3 },
- [8] = { { 756000, HFPLL, 1, 0, 0x1C }, LVL_HIGH, 1150000, 3 },
- [9] = { { 810000, HFPLL, 1, 0, 0x1E }, LVL_HIGH, 1150000, 3 },
- [10] = { { 864000, HFPLL, 1, 0, 0x20 }, LVL_HIGH, 1150000, 4 },
- [11] = { { 918000, HFPLL, 1, 0, 0x22 }, LVL_HIGH, 1150000, 4 },
- [12] = { { 972000, HFPLL, 1, 0, 0x24 }, LVL_HIGH, 1150000, 4 },
-};
-
-/* TODO: Update core voltages when data is available. */
-static struct acpu_level acpu_freq_tbl_8627[] = {
- { 0, { STBY_KHZ, QSB, 0, 0, 0x00 }, L2(0), 900000 },
- { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(1), 900000 },
- { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(5), 925000 },
- { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(5), 925000 },
- { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(5), 937500 },
- { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(5), 962500 },
- { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(9), 987500 },
- { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(9), 1000000 },
- { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(9), 1025000 },
- { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(9), 1062500 },
- { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(12), 1062500 },
- { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(12), 1087500 },
- { 1, { 972000, HFPLL, 1, 0, 0x24 }, L2(12), 1100000 },
+static struct acpu_level acpu_freq_tbl_fast[] __initdata = {
+ { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(0), 850000 },
+ { 0, { 432000, HFPLL, 2, 0, 0x20 }, L2(6), 875000 },
+ { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(6), 875000 },
+ { 0, { 540000, HFPLL, 2, 0, 0x28 }, L2(6), 900000 },
+ { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(6), 900000 },
+ { 0, { 648000, HFPLL, 1, 0, 0x18 }, L2(6), 925000 },
+ { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(6), 925000 },
+ { 0, { 756000, HFPLL, 1, 0, 0x1C }, L2(6), 975000 },
+ { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(6), 975000 },
+ { 0, { 864000, HFPLL, 1, 0, 0x20 }, L2(6), 1000000 },
+ { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(6), 1000000 },
+ { 0, { 972000, HFPLL, 1, 0, 0x24 }, L2(6), 1025000 },
+ { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(6), 1025000 },
+ { 0, { 1080000, HFPLL, 1, 0, 0x28 }, L2(18), 1075000 },
+ { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(18), 1075000 },
+ { 0, { 1188000, HFPLL, 1, 0, 0x2C }, L2(18), 1100000 },
+ { 1, { 1242000, HFPLL, 1, 0, 0x2E }, L2(18), 1100000 },
+ { 0, { 1296000, HFPLL, 1, 0, 0x30 }, L2(18), 1125000 },
+ { 1, { 1350000, HFPLL, 1, 0, 0x32 }, L2(18), 1125000 },
+ { 0, { 1404000, HFPLL, 1, 0, 0x34 }, L2(18), 1137500 },
+ { 1, { 1458000, HFPLL, 1, 0, 0x36 }, L2(18), 1137500 },
+ { 1, { 1512000, HFPLL, 1, 0, 0x38 }, L2(18), 1150000 },
{ 0, { 0 } }
};
-static struct acpu_level *acpu_freq_tbl_8960_v1[NUM_PVS] __initdata = {
- [PVS_SLOW] = acpu_freq_tbl_8960_kraitv1_slow,
- [PVS_NOM] = acpu_freq_tbl_8960_kraitv1_nom_fast,
- [PVS_FAST] = acpu_freq_tbl_8960_kraitv1_nom_fast,
+static struct pvs_table pvs_tables[NUM_PVS] __initdata = {
+[PVS_SLOW] = { acpu_freq_tbl_slow, sizeof(acpu_freq_tbl_slow), 0 },
+[PVS_NOMINAL] = { acpu_freq_tbl_nom, sizeof(acpu_freq_tbl_nom), 25000 },
+[PVS_FAST] = { acpu_freq_tbl_fast, sizeof(acpu_freq_tbl_fast), 25000 },
};
-static struct acpu_level *acpu_freq_tbl_8960_v2[NUM_PVS] __initdata = {
- [PVS_SLOW] = acpu_freq_tbl_8960_kraitv2_slow,
- [PVS_NOM] = acpu_freq_tbl_8960_kraitv2_nom,
- [PVS_FAST] = acpu_freq_tbl_8960_kraitv2_fast,
-};
-
-/* TODO: update the faster table when data is available */
-static struct acpu_level *acpu_freq_tbl_8064[NUM_PVS] __initdata = {
- [PVS_SLOW] = acpu_freq_tbl_8064_slow,
- [PVS_NOM] = acpu_freq_tbl_8064_nom,
- [PVS_FAST] = acpu_freq_tbl_8064_fast,
- [PVS_FASTER] = acpu_freq_tbl_8064_fast,
-};
-
-static struct acpu_level *acpu_freq_tbl_8930_pvs[NUM_PVS] __initdata = {
- [PVS_SLOW] = acpu_freq_tbl_8930_slow,
- [PVS_NOM] = acpu_freq_tbl_8930_nom,
- [PVS_FAST] = acpu_freq_tbl_8930_fast,
-};
-
-static struct acpu_level *max_acpu_level;
-
-static unsigned long acpuclk_8960_get_rate(int cpu)
-{
- return scalable[cpu].current_speed->khz;
-}
-
-/* Get the selected source on primary MUX. */
-static int get_pri_clk_src(struct scalable *sc)
-{
- uint32_t regval;
-
- regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
- return regval & 0x3;
-}
-
-/* Set the selected source on primary MUX. */
-static void set_pri_clk_src(struct scalable *sc, uint32_t pri_src_sel)
-{
- uint32_t regval;
-
- regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
- regval &= ~0x3;
- regval |= (pri_src_sel & 0x3);
- set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
- /* Wait for switch to complete. */
- mb();
- udelay(1);
-}
-
-/* Get the selected source on secondary MUX. */
-static int get_sec_clk_src(struct scalable *sc)
-{
- uint32_t regval;
-
- regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
- return (regval >> 2) & 0x3;
-}
-
-/* Set the selected source on secondary MUX. */
-static void set_sec_clk_src(struct scalable *sc, uint32_t sec_src_sel)
-{
- uint32_t regval;
-
- /* Disable secondary source clock gating during switch. */
- regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
- regval |= SECCLKAGD;
- set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
-
- /* Program the MUX. */
- regval &= ~(0x3 << 2);
- regval |= ((sec_src_sel & 0x3) << 2);
- set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
-
- /* Wait for switch to complete. */
- mb();
- udelay(1);
-
- /* Re-enable secondary source clock gating. */
- regval &= ~SECCLKAGD;
- set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
-}
-
-/* Enable an already-configured HFPLL. */
-static void hfpll_enable(struct scalable *sc, bool skip_regulators)
-{
- int rc;
-
- if (!skip_regulators) {
- if (cpu_is_msm8960()) {
- rc = rpm_vreg_set_voltage(
- sc->vreg[VREG_HFPLL_A].rpm_vreg_id,
- sc->vreg[VREG_HFPLL_A].rpm_vreg_voter,
- 2050000,
- sc->vreg[VREG_HFPLL_A].max_vdd, 0);
- if (rc)
- pr_err("%s regulator enable failed (%d)\n",
- sc->vreg[VREG_HFPLL_A].name, rc);
- }
- rc = rpm_vreg_set_voltage(sc->vreg[VREG_HFPLL_B].rpm_vreg_id,
- sc->vreg[VREG_HFPLL_B].rpm_vreg_voter, 1800000,
- sc->vreg[VREG_HFPLL_B].max_vdd, 0);
- if (rc)
- pr_err("%s regulator enable failed (%d)\n",
- sc->vreg[VREG_HFPLL_B].name, rc);
- }
- /* Disable PLL bypass mode. */
- writel_relaxed(0x2, sc->hfpll_base + HFPLL_MODE);
-
- /*
- * H/W requires a 5us delay between disabling the bypass and
- * de-asserting the reset. Delay 10us just to be safe.
- */
- mb();
- udelay(10);
-
- /* De-assert active-low PLL reset. */
- writel_relaxed(0x6, sc->hfpll_base + HFPLL_MODE);
-
- /* Wait for PLL to lock. */
- mb();
- udelay(60);
-
- /* Enable PLL output. */
- writel_relaxed(0x7, sc->hfpll_base + HFPLL_MODE);
-}
-
-/* Disable a HFPLL for power-savings or while its being reprogrammed. */
-static void hfpll_disable(struct scalable *sc, bool skip_regulators)
-{
- int rc;
-
- /*
- * Disable the PLL output, disable test mode, enable
- * the bypass mode, and assert the reset.
- */
- writel_relaxed(0, sc->hfpll_base + HFPLL_MODE);
-
- if (!skip_regulators) {
- rc = rpm_vreg_set_voltage(sc->vreg[VREG_HFPLL_B].rpm_vreg_id,
- sc->vreg[VREG_HFPLL_B].rpm_vreg_voter, 0,
- 0, 0);
- if (rc)
- pr_err("%s regulator enable failed (%d)\n",
- sc->vreg[VREG_HFPLL_B].name, rc);
-
- if (cpu_is_msm8960()) {
- rc = rpm_vreg_set_voltage(
- sc->vreg[VREG_HFPLL_A].rpm_vreg_id,
- sc->vreg[VREG_HFPLL_A].rpm_vreg_voter,
- 0, 0, 0);
- if (rc)
- pr_err("%s regulator enable failed (%d)\n",
- sc->vreg[VREG_HFPLL_A].name, rc);
- }
- }
-}
-
-/* Program the HFPLL rate. Assumes HFPLL is already disabled. */
-static void hfpll_set_rate(struct scalable *sc, struct core_speed *tgt_s)
-{
- writel_relaxed(tgt_s->pll_l_val, sc->hfpll_base + HFPLL_L_VAL);
-}
-
-/* Return the L2 speed that should be applied. */
-static struct l2_level *compute_l2_level(struct scalable *sc,
- struct l2_level *vote_l)
-{
- struct l2_level *new_l;
- int cpu;
-
- /* Bounds check. */
- BUG_ON(vote_l >= (l2_freq_tbl + l2_freq_tbl_size));
-
- /* Find max L2 speed vote. */
- sc->l2_vote = vote_l;
- new_l = l2_freq_tbl;
- for_each_present_cpu(cpu)
- new_l = max(new_l, scalable[cpu].l2_vote);
-
- return new_l;
-}
-
-/* Update the bus bandwidth request. */
-static void set_bus_bw(unsigned int bw)
-{
- int ret;
-
- /* Bounds check. */
- if (bw >= ARRAY_SIZE(bw_level_tbl)) {
- pr_err("invalid bandwidth request (%d)\n", bw);
- return;
- }
-
- /* Update bandwidth if request has changed. This may sleep. */
- ret = msm_bus_scale_client_update_request(bus_perf_client, bw);
- if (ret)
- pr_err("bandwidth request failed (%d)\n", ret);
-}
-
-/* Set the CPU or L2 clock speed. */
-static void set_speed(struct scalable *sc, struct core_speed *tgt_s,
- enum setrate_reason reason)
-{
- struct core_speed *strt_s = sc->current_speed;
-
- if (tgt_s == strt_s)
- return;
-
- if (strt_s->src == HFPLL && tgt_s->src == HFPLL) {
- /*
- * Move to an always-on source running at a frequency that does
- * not require an elevated CPU voltage. PLL8 is used here.
- */
- set_sec_clk_src(sc, SEC_SRC_SEL_AUX);
- set_pri_clk_src(sc, PRI_SRC_SEL_SEC_SRC);
-
- /* Program CPU HFPLL. */
- hfpll_disable(sc, 1);
- hfpll_set_rate(sc, tgt_s);
- hfpll_enable(sc, 1);
-
- /* Move CPU to HFPLL source. */
- set_pri_clk_src(sc, tgt_s->pri_src_sel);
- } else if (strt_s->src == HFPLL && tgt_s->src != HFPLL) {
- /*
- * If responding to CPU_DEAD we must be running on another CPU.
- * Therefore, we can't access the downed CPU's clock MUX CP15
- * registers from here and can't change clock sources. If the
- * CPU is collapsed, however, it is still safe to turn off the
- * PLL without switching the MUX away from it.
- */
- if (reason != SETRATE_HOTPLUG || sc == &scalable[L2]) {
- set_sec_clk_src(sc, tgt_s->sec_src_sel);
- set_pri_clk_src(sc, tgt_s->pri_src_sel);
- hfpll_disable(sc, 0);
- } else if (reason == SETRATE_HOTPLUG
- && msm_pm_verify_cpu_pc(SCALABLE_TO_CPU(sc))) {
- hfpll_disable(sc, 0);
- }
- } else if (strt_s->src != HFPLL && tgt_s->src == HFPLL) {
- /*
- * If responding to CPU_UP_PREPARE, we can't change CP15
- * registers for the CPU that's coming up since we're not
- * running on that CPU. That's okay though, since the MUX
- * source was not changed on the way down, either.
- */
- if (reason != SETRATE_HOTPLUG || sc == &scalable[L2]) {
- hfpll_set_rate(sc, tgt_s);
- hfpll_enable(sc, 0);
- set_pri_clk_src(sc, tgt_s->pri_src_sel);
- } else if (reason == SETRATE_HOTPLUG
- && msm_pm_verify_cpu_pc(SCALABLE_TO_CPU(sc))) {
- /* PLL was disabled during hot-unplug. Re-enable it. */
- hfpll_set_rate(sc, tgt_s);
- hfpll_enable(sc, 0);
- }
- } else {
- if (reason != SETRATE_HOTPLUG || sc == &scalable[L2])
- set_sec_clk_src(sc, tgt_s->sec_src_sel);
- }
-
- sc->current_speed = tgt_s;
-}
-
-/* Apply any per-cpu voltage increases. */
-static int increase_vdd(int cpu, unsigned int vdd_core, unsigned int vdd_mem,
- unsigned int vdd_dig, enum setrate_reason reason)
-{
- struct scalable *sc = &scalable[cpu];
- int rc = 0;
-
- /*
- * Increase vdd_mem active-set before vdd_dig.
- * vdd_mem should be >= vdd_dig.
- */
- if (vdd_mem > sc->vreg[VREG_MEM].cur_vdd) {
- rc = rpm_vreg_set_voltage(sc->vreg[VREG_MEM].rpm_vreg_id,
- sc->vreg[VREG_MEM].rpm_vreg_voter, vdd_mem,
- sc->vreg[VREG_MEM].max_vdd, 0);
- if (rc) {
- pr_err("%s increase failed (%d)\n",
- sc->vreg[VREG_MEM].name, rc);
- return rc;
- }
- sc->vreg[VREG_MEM].cur_vdd = vdd_mem;
- }
-
- /* Increase vdd_dig active-set vote. */
- if (vdd_dig > sc->vreg[VREG_DIG].cur_vdd) {
- rc = rpm_vreg_set_voltage(sc->vreg[VREG_DIG].rpm_vreg_id,
- sc->vreg[VREG_DIG].rpm_vreg_voter, vdd_dig,
- sc->vreg[VREG_DIG].max_vdd, 0);
- if (rc) {
- pr_err("%s increase failed (%d)\n",
- sc->vreg[VREG_DIG].name, rc);
- return rc;
- }
- sc->vreg[VREG_DIG].cur_vdd = vdd_dig;
- }
-
- /*
- * Update per-CPU core voltage. Don't do this for the hotplug path for
- * which it should already be correct. Attempting to set it is bad
- * because we don't know what CPU we are running on at this point, but
- * the CPU regulator API requires we call it from the affected CPU.
- */
- if (vdd_core > sc->vreg[VREG_CORE].cur_vdd
- && reason != SETRATE_HOTPLUG) {
- rc = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core,
- sc->vreg[VREG_CORE].max_vdd);
- if (rc) {
- pr_err("%s increase failed (%d)\n",
- sc->vreg[VREG_CORE].name, rc);
- return rc;
- }
- sc->vreg[VREG_CORE].cur_vdd = vdd_core;
- }
-
- return rc;
-}
-
-/* Apply any per-cpu voltage decreases. */
-static void decrease_vdd(int cpu, unsigned int vdd_core, unsigned int vdd_mem,
- unsigned int vdd_dig, enum setrate_reason reason)
-{
- struct scalable *sc = &scalable[cpu];
- int ret;
-
- /*
- * Update per-CPU core voltage. This must be called on the CPU
- * that's being affected. Don't do this in the hotplug remove path,
- * where the rail is off and we're executing on the other CPU.
- */
- if (vdd_core < sc->vreg[VREG_CORE].cur_vdd
- && reason != SETRATE_HOTPLUG) {
- ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core,
- sc->vreg[VREG_CORE].max_vdd);
- if (ret) {
- pr_err("%s decrease failed (%d)\n",
- sc->vreg[VREG_CORE].name, ret);
- return;
- }
- sc->vreg[VREG_CORE].cur_vdd = vdd_core;
- }
-
- /* Decrease vdd_dig active-set vote. */
- if (vdd_dig < sc->vreg[VREG_DIG].cur_vdd) {
- ret = rpm_vreg_set_voltage(sc->vreg[VREG_DIG].rpm_vreg_id,
- sc->vreg[VREG_DIG].rpm_vreg_voter, vdd_dig,
- sc->vreg[VREG_DIG].max_vdd, 0);
- if (ret) {
- pr_err("%s decrease failed (%d)\n",
- sc->vreg[VREG_DIG].name, ret);
- return;
- }
- sc->vreg[VREG_DIG].cur_vdd = vdd_dig;
- }
-
- /*
- * Decrease vdd_mem active-set after vdd_dig.
- * vdd_mem should be >= vdd_dig.
- */
- if (vdd_mem < sc->vreg[VREG_MEM].cur_vdd) {
- ret = rpm_vreg_set_voltage(sc->vreg[VREG_MEM].rpm_vreg_id,
- sc->vreg[VREG_MEM].rpm_vreg_voter, vdd_mem,
- sc->vreg[VREG_MEM].max_vdd, 0);
- if (ret) {
- pr_err("%s decrease failed (%d)\n",
- sc->vreg[VREG_MEM].name, ret);
- return;
- }
- sc->vreg[VREG_MEM].cur_vdd = vdd_mem;
- }
-}
-
-static unsigned int calculate_vdd_mem(struct acpu_level *tgt)
-{
- return tgt->l2_level->vdd_mem;
-}
-
-static unsigned int calculate_vdd_dig(struct acpu_level *tgt)
-{
- unsigned int pll_vdd_dig;
-
- if (tgt->l2_level->speed.src != HFPLL)
- pll_vdd_dig = scalable[L2].hfpll_vdd_tbl[HFPLL_VDD_NONE];
- else if (tgt->l2_level->speed.pll_l_val > HFPLL_LOW_VDD_PLL_L_MAX)
- pll_vdd_dig = scalable[L2].hfpll_vdd_tbl[HFPLL_VDD_NOM];
- else
- pll_vdd_dig = scalable[L2].hfpll_vdd_tbl[HFPLL_VDD_LOW];
-
- return max(tgt->l2_level->vdd_dig, pll_vdd_dig);
-}
-
-static unsigned int calculate_vdd_core(struct acpu_level *tgt)
-{
- return tgt->vdd_core;
-}
-
-/* Set the CPU's clock rate and adjust the L2 rate, if appropriate. */
-static int acpuclk_8960_set_rate(int cpu, unsigned long rate,
- enum setrate_reason reason)
-{
- struct core_speed *strt_acpu_s, *tgt_acpu_s;
- struct l2_level *tgt_l2_l;
- struct acpu_level *tgt;
- unsigned int vdd_mem, vdd_dig, vdd_core;
- unsigned long flags;
- int rc = 0;
-
- if (cpu > num_possible_cpus())
- return -EINVAL;
-
- if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG)
- mutex_lock(&driver_lock);
-
- strt_acpu_s = scalable[cpu].current_speed;
-
- /* Return early if rate didn't change. */
- if (rate == strt_acpu_s->khz)
- goto out;
-
- /* Find target frequency. */
- for (tgt = acpu_freq_tbl; tgt->speed.khz != 0; tgt++) {
- if (tgt->speed.khz == rate) {
- tgt_acpu_s = &tgt->speed;
- break;
- }
- }
- if (tgt->speed.khz == 0) {
- rc = -EINVAL;
- goto out;
- }
-
- /* Calculate voltage requirements for the current CPU. */
- vdd_mem = calculate_vdd_mem(tgt);
- vdd_dig = calculate_vdd_dig(tgt);
- vdd_core = calculate_vdd_core(tgt);
-
- /* Increase VDD levels if needed. */
- if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG) {
- rc = increase_vdd(cpu, vdd_core, vdd_mem, vdd_dig, reason);
- if (rc)
- goto out;
- }
-
- pr_debug("Switching from ACPU%d rate %u KHz -> %u KHz\n",
- cpu, strt_acpu_s->khz, tgt_acpu_s->khz);
-
- /* Set the CPU speed. */
- set_speed(&scalable[cpu], tgt_acpu_s, reason);
-
- /*
- * Update the L2 vote and apply the rate change. A spinlock is
- * necessary to ensure L2 rate is calulated and set atomically,
- * even if acpuclk_8960_set_rate() is called from an atomic context
- * and the driver_lock mutex is not acquired.
- */
- spin_lock_irqsave(&l2_lock, flags);
- tgt_l2_l = compute_l2_level(&scalable[cpu], tgt->l2_level);
- set_speed(&scalable[L2], &tgt_l2_l->speed, reason);
- spin_unlock_irqrestore(&l2_lock, flags);
-
- /* Nothing else to do for power collapse or SWFI. */
- if (reason == SETRATE_PC || reason == SETRATE_SWFI)
- goto out;
-
- /* Update bus bandwith request. */
- set_bus_bw(tgt_l2_l->bw_level);
-
- /* Drop VDD levels if we can. */
- decrease_vdd(cpu, vdd_core, vdd_mem, vdd_dig, reason);
-
- pr_debug("ACPU%d speed change complete\n", cpu);
-
-out:
- if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG)
- mutex_unlock(&driver_lock);
- return rc;
-}
-
-/* Initialize a HFPLL at a given rate and enable it. */
-static void __cpuinit hfpll_init(struct scalable *sc, struct core_speed *tgt_s)
-{
- pr_debug("Initializing HFPLL%d\n", sc - scalable);
-
- /* Disable the PLL for re-programming. */
- hfpll_disable(sc, 1);
-
- /* Configure PLL parameters for integer mode. */
- writel_relaxed(0x7845C665, sc->hfpll_base + HFPLL_CONFIG_CTL);
- writel_relaxed(0, sc->hfpll_base + HFPLL_M_VAL);
- writel_relaxed(1, sc->hfpll_base + HFPLL_N_VAL);
-
- /* Program droop controller. */
- writel_relaxed(0x0108C000, sc->hfpll_base + HFPLL_DROOP_CTL);
-
- /* Set an initial rate and enable the PLL. */
- hfpll_set_rate(sc, tgt_s);
- hfpll_enable(sc, 0);
-}
-
-/* Voltage regulator initialization. */
-static void __cpuinit regulator_init(int cpu, struct acpu_level *lvl)
-{
- int ret;
- struct scalable *sc = &scalable[cpu];
- unsigned int vdd_mem, vdd_dig, vdd_core;
-
- vdd_mem = calculate_vdd_mem(lvl);
- vdd_dig = calculate_vdd_dig(lvl);
-
- /* Set initial vdd_mem vote. */
- ret = rpm_vreg_set_voltage(sc->vreg[VREG_MEM].rpm_vreg_id,
- sc->vreg[VREG_MEM].rpm_vreg_voter, vdd_mem,
- sc->vreg[VREG_MEM].max_vdd, 0);
- if (ret) {
- pr_err("%s initialization failed (%d)\n",
- sc->vreg[VREG_MEM].name, ret);
- BUG();
- }
- sc->vreg[VREG_MEM].cur_vdd = vdd_mem;
-
- /* Set initial vdd_dig vote. */
- ret = rpm_vreg_set_voltage(sc->vreg[VREG_DIG].rpm_vreg_id,
- sc->vreg[VREG_DIG].rpm_vreg_voter, vdd_dig,
- sc->vreg[VREG_DIG].max_vdd, 0);
- if (ret) {
- pr_err("%s initialization failed (%d)\n",
- sc->vreg[VREG_DIG].name, ret);
- BUG();
- }
- sc->vreg[VREG_DIG].cur_vdd = vdd_dig;
-
- /* Setup Krait CPU regulators and initial core voltage. */
- sc->vreg[VREG_CORE].reg = regulator_get(NULL,
- sc->vreg[VREG_CORE].name);
- if (IS_ERR(sc->vreg[VREG_CORE].reg)) {
- pr_err("regulator_get(%s) failed (%ld)\n",
- sc->vreg[VREG_CORE].name,
- PTR_ERR(sc->vreg[VREG_CORE].reg));
- BUG();
- }
- vdd_core = calculate_vdd_core(lvl);
- ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core,
- sc->vreg[VREG_CORE].max_vdd);
- if (ret) {
- pr_err("%s initialization failed (%d)\n",
- sc->vreg[VREG_CORE].name, ret);
- BUG();
- }
- sc->vreg[VREG_CORE].cur_vdd = vdd_core;
- ret = regulator_enable(sc->vreg[VREG_CORE].reg);
- if (ret) {
- pr_err("regulator_enable(%s) failed (%d)\n",
- sc->vreg[VREG_CORE].name, ret);
- BUG();
- }
- sc->regulators_initialized = true;
-}
-
-/* Set initial rate for a given core. */
-static void __cpuinit init_clock_sources(struct scalable *sc,
- struct core_speed *tgt_s)
-{
- uint32_t regval;
-
- /* Select PLL8 as AUX source input to the secondary MUX. */
- writel_relaxed(0x3, sc->aux_clk_sel);
-
- /* Switch away from the HFPLL while it's re-initialized. */
- set_sec_clk_src(sc, SEC_SRC_SEL_AUX);
- set_pri_clk_src(sc, PRI_SRC_SEL_SEC_SRC);
- hfpll_init(sc, tgt_s);
-
- /* Set PRI_SRC_SEL_HFPLL_DIV2 divider to div-2. */
- regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
- regval &= ~(0x3 << 6);
- set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
-
- /* Switch to the target clock source. */
- set_sec_clk_src(sc, tgt_s->sec_src_sel);
- set_pri_clk_src(sc, tgt_s->pri_src_sel);
- sc->current_speed = tgt_s;
-}
-
-static void __cpuinit per_cpu_init(void *data)
-{
- int cpu = smp_processor_id();
-
- init_clock_sources(&scalable[cpu], &max_acpu_level->speed);
- scalable[cpu].l2_vote = max_acpu_level->l2_level;
- scalable[cpu].clocks_initialized = true;
-}
-
-/* Register with bus driver. */
-static void __init bus_init(unsigned int init_bw)
-{
- int ret;
-
- bus_perf_client = msm_bus_scale_register_client(&bus_client_pdata);
- if (!bus_perf_client) {
- pr_err("unable to register bus client\n");
- BUG();
- }
-
- ret = msm_bus_scale_client_update_request(bus_perf_client, init_bw);
- if (ret)
- pr_err("initial bandwidth request failed (%d)\n", ret);
-}
-
-#ifdef CONFIG_CPU_FREQ_MSM
-static struct cpufreq_frequency_table freq_table[NR_CPUS][30];
-
-static void __init cpufreq_table_init(void)
-{
- int cpu;
-
- for_each_possible_cpu(cpu) {
- int i, freq_cnt = 0;
- /* Construct the freq_table tables from acpu_freq_tbl. */
- for (i = 0; acpu_freq_tbl[i].speed.khz != 0
- && freq_cnt < ARRAY_SIZE(*freq_table); i++) {
- if (acpu_freq_tbl[i].use_for_scaling) {
- freq_table[cpu][freq_cnt].index = freq_cnt;
- freq_table[cpu][freq_cnt].frequency
- = acpu_freq_tbl[i].speed.khz;
- freq_cnt++;
- }
- }
- /* freq_table not big enough to store all usable freqs. */
- BUG_ON(acpu_freq_tbl[i].speed.khz != 0);
-
- freq_table[cpu][freq_cnt].index = freq_cnt;
- freq_table[cpu][freq_cnt].frequency = CPUFREQ_TABLE_END;
-
- pr_info("CPU%d: %d scaling frequencies supported.\n",
- cpu, freq_cnt);
-
- /* Register table with CPUFreq. */
- cpufreq_frequency_table_get_attr(freq_table[cpu], cpu);
- }
-}
-#else
-static void __init cpufreq_table_init(void) {}
-#endif
-
-#define HOT_UNPLUG_KHZ STBY_KHZ
-static int __cpuinit acpuclock_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- static int prev_khz[NR_CPUS];
- static int prev_pri_src[NR_CPUS];
- static int prev_sec_src[NR_CPUS];
- int cpu = (int)hcpu;
-
- switch (action) {
- case CPU_DYING:
- case CPU_DYING_FROZEN:
- /*
- * On Krait v1 and 8064v1, the primary and secondary muxes must
- * be set to QSB before L2 power collapse and restored after.
- */
- if (cpu_is_krait_v1() || cpu_is_apq8064()) {
- prev_sec_src[cpu] = get_sec_clk_src(&scalable[cpu]);
- prev_pri_src[cpu] = get_pri_clk_src(&scalable[cpu]);
- set_sec_clk_src(&scalable[cpu], SEC_SRC_SEL_QSB);
- set_pri_clk_src(&scalable[cpu], PRI_SRC_SEL_SEC_SRC);
- }
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- prev_khz[cpu] = acpuclk_8960_get_rate(cpu);
- /* Fall through. */
- case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
- if (scalable[cpu].clocks_initialized)
- acpuclk_8960_set_rate(cpu, HOT_UNPLUG_KHZ,
- SETRATE_HOTPLUG);
- break;
- case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
- if (scalable[cpu].clocks_initialized)
- acpuclk_8960_set_rate(cpu, prev_khz[cpu],
- SETRATE_HOTPLUG);
- if (!scalable[cpu].regulators_initialized)
- regulator_init(cpu, max_acpu_level);
- break;
- case CPU_STARTING:
- case CPU_STARTING_FROZEN:
- if (!scalable[cpu].clocks_initialized) {
- per_cpu_init(NULL);
- } else if (cpu_is_krait_v1() || cpu_is_apq8064()) {
- set_sec_clk_src(&scalable[cpu], prev_sec_src[cpu]);
- set_pri_clk_src(&scalable[cpu], prev_pri_src[cpu]);
- }
- break;
- default:
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block __cpuinitdata acpuclock_cpu_notifier = {
- .notifier_call = acpuclock_cpu_callback,
-};
-
-static const int krait_needs_vmin(void)
-{
- switch (read_cpuid_id()) {
- case 0x511F04D0:
- case 0x511F04D1:
- case 0x510F06F0:
- return 1;
- default:
- return 0;
- };
-}
-
-static void kraitv2_apply_vmin(struct acpu_level *tbl)
-{
- for (; tbl->speed.khz != 0; tbl++)
- if (tbl->vdd_core < 1150000)
- tbl->vdd_core = 1150000;
-}
-
-static enum pvs __init get_pvs(void)
-{
- uint32_t pte_efuse, pvs;
-
- pte_efuse = readl_relaxed(QFPROM_PTE_EFUSE_ADDR);
- pvs = (pte_efuse >> 10) & 0x7;
- if (pvs == 0x7)
- pvs = (pte_efuse >> 13) & 0x7;
-
- switch (pvs) {
- case 0x0:
- case 0x7:
- pr_info("ACPU PVS: Slow\n");
- return PVS_SLOW;
- case 0x1:
- pr_info("ACPU PVS: Nominal\n");
- return PVS_NOM;
- case 0x3:
- pr_info("ACPU PVS: Fast\n");
- return PVS_FAST;
- case 0x4:
- if (cpu_is_apq8064()) {
- pr_info("ACPU PVS: Faster\n");
- return PVS_FASTER;
- }
- default:
- pr_warn("ACPU PVS: Unknown. Defaulting to slow\n");
- return PVS_SLOW;
- }
-}
-
-static void __init select_freq_plan(void)
-{
- struct acpu_level *l;
-
- /* Select frequency tables. */
- if (cpu_is_msm8960()) {
- enum pvs pvs_id = get_pvs();
-
- scalable = scalable_8960;
- if (cpu_is_krait_v1()) {
- acpu_freq_tbl = acpu_freq_tbl_8960_v1[pvs_id];
- l2_freq_tbl = l2_freq_tbl_8960_kraitv1;
- l2_freq_tbl_size = ARRAY_SIZE(l2_freq_tbl_8960_kraitv1);
- } else {
- acpu_freq_tbl = acpu_freq_tbl_8960_v2[pvs_id];
- l2_freq_tbl = l2_freq_tbl_8960_kraitv2;
- l2_freq_tbl_size = ARRAY_SIZE(l2_freq_tbl_8960_kraitv2);
- }
- } else if (cpu_is_apq8064()) {
- enum pvs pvs_id = get_pvs();
-
- scalable = scalable_8064;
- acpu_freq_tbl = acpu_freq_tbl_8064[pvs_id];
- l2_freq_tbl = l2_freq_tbl_8064;
- l2_freq_tbl_size = ARRAY_SIZE(l2_freq_tbl_8064);
- } else if (cpu_is_msm8627()) {
- scalable = scalable_8627;
- acpu_freq_tbl = acpu_freq_tbl_8627;
- l2_freq_tbl = l2_freq_tbl_8627;
- l2_freq_tbl_size = ARRAY_SIZE(l2_freq_tbl_8627);
- } else if (cpu_is_msm8930() || cpu_is_msm8930aa()) {
- enum pvs pvs_id = get_pvs();
-
- scalable = scalable_8930;
- acpu_freq_tbl = acpu_freq_tbl_8930_pvs[pvs_id];
- l2_freq_tbl = l2_freq_tbl_8930;
- l2_freq_tbl_size = ARRAY_SIZE(l2_freq_tbl_8930);
- } else {
- BUG();
- }
- BUG_ON(!acpu_freq_tbl);
- if (krait_needs_vmin())
- kraitv2_apply_vmin(acpu_freq_tbl);
-
- /* Find the max supported scaling frequency. */
- for (l = acpu_freq_tbl; l->speed.khz != 0; l++)
- if (l->use_for_scaling)
- max_acpu_level = l;
- BUG_ON(!max_acpu_level);
- pr_info("Max ACPU freq: %u KHz\n", max_acpu_level->speed.khz);
-}
-
-static struct acpuclk_data acpuclk_8960_data = {
- .set_rate = acpuclk_8960_set_rate,
- .get_rate = acpuclk_8960_get_rate,
- .power_collapse_khz = STBY_KHZ,
- .wait_for_irq_khz = STBY_KHZ,
+static struct acpuclk_krait_params acpuclk_8960_params __initdata = {
+ .scalable = scalable,
+ .scalable_size = sizeof(scalable),
+ .hfpll_data = &hfpll_data,
+ .pvs_tables = pvs_tables,
+ .l2_freq_tbl = l2_freq_tbl,
+ .l2_freq_tbl_size = sizeof(l2_freq_tbl),
+ .bus_scale = &bus_scale_data,
+ .qfprom_phys_base = 0x00700000,
+ .stby_khz = 384000,
};
static int __init acpuclk_8960_probe(struct platform_device *pdev)
{
- int cpu;
-
- select_freq_plan();
-
- for_each_online_cpu(cpu)
- regulator_init(cpu, max_acpu_level);
- bus_init(max_acpu_level->l2_level->bw_level);
-
- init_clock_sources(&scalable[L2], &max_acpu_level->l2_level->speed);
- on_each_cpu(per_cpu_init, NULL, true);
-
- cpufreq_table_init();
-
- acpuclk_register(&acpuclk_8960_data);
- register_hotcpu_notifier(&acpuclock_cpu_notifier);
-
- return 0;
+ return acpuclk_krait_init(&pdev->dev, &acpuclk_8960_params);
}
static struct platform_driver acpuclk_8960_driver = {
diff --git a/arch/arm/mach-msm/acpuclock-8974.c b/arch/arm/mach-msm/acpuclock-8974.c
index 8cf9c2b..8c89014 100644
--- a/arch/arm/mach-msm/acpuclock-8974.c
+++ b/arch/arm/mach-msm/acpuclock-8974.c
@@ -28,40 +28,31 @@
#define LVL_NOM RPM_REGULATOR_CORNER_NORMAL
#define LVL_HIGH RPM_REGULATOR_CORNER_SUPER_TURBO
-static struct hfpll_data hfpll_data_cpu = {
+static struct hfpll_data hfpll_data __initdata = {
.mode_offset = 0x00,
.l_offset = 0x04,
.m_offset = 0x08,
.n_offset = 0x0C,
+ .has_user_reg = true,
+ .user_offset = 0x10,
.config_offset = 0x14,
- /* TODO: Verify magic number for 8974 when available. */
- .config_val = 0x7845C665,
+ /* TODO: Verify magic numbers when final values are available. */
+ .user_val = 0x8,
+ .config_val = 0x04D0405D,
+ .low_vco_l_max = 65,
.low_vdd_l_max = 52,
- .vdd[HFPLL_VDD_NONE] = 0,
- .vdd[HFPLL_VDD_LOW] = 810000,
- .vdd[HFPLL_VDD_NOM] = 900000,
-};
-
-static struct hfpll_data hfpll_data_l2 = {
- .mode_offset = 0x00,
- .l_offset = 0x04,
- .m_offset = 0x08,
- .n_offset = 0x0C,
- .config_offset = 0x14,
- /* TODO: Verify magic number for 8974 when available. */
- .config_val = 0x7845C665,
- .low_vdd_l_max = 52,
+ .nom_vdd_l_max = 104,
.vdd[HFPLL_VDD_NONE] = LVL_NONE,
.vdd[HFPLL_VDD_LOW] = LVL_LOW,
.vdd[HFPLL_VDD_NOM] = LVL_NOM,
+ .vdd[HFPLL_VDD_HIGH] = LVL_HIGH,
};
-static struct scalable scalable[] = {
+static struct scalable scalable[] __initdata = {
[CPU0] = {
.hfpll_phys_base = 0xF908A000,
- .hfpll_data = &hfpll_data_cpu,
.l2cpmr_iaddr = 0x4501,
- .vreg[VREG_CORE] = { "krait0", 1050000, 3200000 },
+ .vreg[VREG_CORE] = { "krait0", 1050000 },
.vreg[VREG_MEM] = { "krait0_mem", 1050000 },
.vreg[VREG_DIG] = { "krait0_dig", LVL_HIGH },
.vreg[VREG_HFPLL_A] = { "krait0_hfpll_a", 2150000 },
@@ -69,9 +60,8 @@
},
[CPU1] = {
.hfpll_phys_base = 0xF909A000,
- .hfpll_data = &hfpll_data_cpu,
.l2cpmr_iaddr = 0x5501,
- .vreg[VREG_CORE] = { "krait1", 1050000, 3200000 },
+ .vreg[VREG_CORE] = { "krait1", 1050000 },
.vreg[VREG_MEM] = { "krait1_mem", 1050000 },
.vreg[VREG_DIG] = { "krait1_dig", LVL_HIGH },
.vreg[VREG_HFPLL_A] = { "krait1_hfpll_a", 2150000 },
@@ -79,9 +69,8 @@
},
[CPU2] = {
.hfpll_phys_base = 0xF90AA000,
- .hfpll_data = &hfpll_data_cpu,
.l2cpmr_iaddr = 0x6501,
- .vreg[VREG_CORE] = { "krait2", 1050000, 3200000 },
+ .vreg[VREG_CORE] = { "krait2", 1050000 },
.vreg[VREG_MEM] = { "krait2_mem", 1050000 },
.vreg[VREG_DIG] = { "krait2_dig", LVL_HIGH },
.vreg[VREG_HFPLL_A] = { "krait2_hfpll_a", 2150000 },
@@ -89,9 +78,8 @@
},
[CPU3] = {
.hfpll_phys_base = 0xF90BA000,
- .hfpll_data = &hfpll_data_cpu,
.l2cpmr_iaddr = 0x7501,
- .vreg[VREG_CORE] = { "krait3", 1050000, 3200000 },
+ .vreg[VREG_CORE] = { "krait3", 1050000 },
.vreg[VREG_MEM] = { "krait3_mem", 1050000 },
.vreg[VREG_DIG] = { "krait3_dig", LVL_HIGH },
.vreg[VREG_HFPLL_A] = { "krait3_hfpll_a", 2150000 },
@@ -99,14 +87,13 @@
},
[L2] = {
.hfpll_phys_base = 0xF9016000,
- .hfpll_data = &hfpll_data_l2,
.l2cpmr_iaddr = 0x0500,
.vreg[VREG_HFPLL_A] = { "l2_hfpll_a", 2150000 },
.vreg[VREG_HFPLL_B] = { "l2_hfpll_b", 1800000 },
},
};
-static struct msm_bus_paths bw_level_tbl[] = {
+static struct msm_bus_paths bw_level_tbl[] __initdata = {
[0] = BW_MBPS(400), /* At least 50 MHz on bus. */
[1] = BW_MBPS(800), /* At least 100 MHz on bus. */
[2] = BW_MBPS(1334), /* At least 167 MHz on bus. */
@@ -114,54 +101,58 @@
[4] = BW_MBPS(3200), /* At least 333 MHz on bus. */
};
-static struct msm_bus_scale_pdata bus_scale_data = {
+static struct msm_bus_scale_pdata bus_scale_data __initdata = {
.usecase = bw_level_tbl,
.num_usecases = ARRAY_SIZE(bw_level_tbl),
.active_only = 1,
.name = "acpuclk-8974",
};
-#define L2(x) (&l2_freq_tbl[(x)])
-static struct l2_level l2_freq_tbl[] = {
- [0] = { {STBY_KHZ, QSB, 0, 0, 0 }, LVL_LOW, 1050000, 0 },
- [1] = { { 300000, PLL_0, 0, 2, 0 }, LVL_LOW, 1050000, 2 },
- [2] = { { 384000, HFPLL, 2, 0, 40 }, LVL_NOM, 1050000, 2 },
- [3] = { { 460800, HFPLL, 2, 0, 48 }, LVL_NOM, 1050000, 2 },
- [4] = { { 537600, HFPLL, 1, 0, 28 }, LVL_NOM, 1050000, 2 },
- [5] = { { 576000, HFPLL, 1, 0, 30 }, LVL_NOM, 1050000, 3 },
- [6] = { { 652800, HFPLL, 1, 0, 34 }, LVL_NOM, 1050000, 3 },
- [7] = { { 729600, HFPLL, 1, 0, 38 }, LVL_NOM, 1050000, 3 },
- [8] = { { 806400, HFPLL, 1, 0, 42 }, LVL_NOM, 1050000, 3 },
- [9] = { { 883200, HFPLL, 1, 0, 46 }, LVL_NOM, 1050000, 4 },
- [10] = { { 960000, HFPLL, 1, 0, 50 }, LVL_NOM, 1050000, 4 },
- [11] = { { 1036800, HFPLL, 1, 0, 54 }, LVL_NOM, 1050000, 4 },
+static struct l2_level l2_freq_tbl[] __initdata = {
+ [0] = { { 300000, PLL_0, 0, 2, 0 }, LVL_LOW, 1050000, 2 },
+ [1] = { { 384000, HFPLL, 2, 0, 40 }, LVL_NOM, 1050000, 2 },
+ [2] = { { 460800, HFPLL, 2, 0, 48 }, LVL_NOM, 1050000, 2 },
+ [3] = { { 537600, HFPLL, 1, 0, 28 }, LVL_NOM, 1050000, 2 },
+ [4] = { { 576000, HFPLL, 1, 0, 30 }, LVL_NOM, 1050000, 3 },
+ [5] = { { 652800, HFPLL, 1, 0, 34 }, LVL_NOM, 1050000, 3 },
+ [6] = { { 729600, HFPLL, 1, 0, 38 }, LVL_NOM, 1050000, 3 },
+ [7] = { { 806400, HFPLL, 1, 0, 42 }, LVL_NOM, 1050000, 3 },
+ [8] = { { 883200, HFPLL, 1, 0, 46 }, LVL_NOM, 1050000, 4 },
+ [9] = { { 960000, HFPLL, 1, 0, 50 }, LVL_NOM, 1050000, 4 },
+ [10] = { { 1036800, HFPLL, 1, 0, 54 }, LVL_NOM, 1050000, 4 },
};
-static struct acpu_level acpu_freq_tbl[] = {
- { 0, {STBY_KHZ, QSB, 0, 0, 0 }, L2(0), 1050000 },
- { 1, { 300000, PLL_0, 0, 2, 0 }, L2(1), 1050000 },
- { 1, { 384000, HFPLL, 2, 0, 40 }, L2(2), 1050000 },
- { 1, { 460800, HFPLL, 2, 0, 48 }, L2(3), 1050000 },
- { 1, { 537600, HFPLL, 1, 0, 28 }, L2(4), 1050000 },
- { 1, { 576000, HFPLL, 1, 0, 30 }, L2(5), 1050000 },
- { 1, { 652800, HFPLL, 1, 0, 34 }, L2(6), 1050000 },
- { 1, { 729600, HFPLL, 1, 0, 38 }, L2(7), 1050000 },
- { 1, { 806400, HFPLL, 1, 0, 42 }, L2(8), 1050000 },
- { 1, { 883200, HFPLL, 1, 0, 46 }, L2(9), 1050000 },
- { 1, { 960000, HFPLL, 1, 0, 50 }, L2(10), 1050000 },
- { 1, { 1036800, HFPLL, 1, 0, 54 }, L2(11), 1050000 },
+static struct acpu_level acpu_freq_tbl[] __initdata = {
+ { 1, { 300000, PLL_0, 0, 2, 0 }, L2(0), 1050000, 3200000 },
+ { 1, { 384000, HFPLL, 2, 0, 40 }, L2(1), 1050000, 3200000 },
+ { 1, { 460800, HFPLL, 2, 0, 48 }, L2(2), 1050000, 3200000 },
+ { 1, { 537600, HFPLL, 1, 0, 28 }, L2(3), 1050000, 3200000 },
+ { 1, { 576000, HFPLL, 1, 0, 30 }, L2(4), 1050000, 3200000 },
+ { 1, { 652800, HFPLL, 1, 0, 34 }, L2(5), 1050000, 3200000 },
+ { 1, { 729600, HFPLL, 1, 0, 38 }, L2(6), 1050000, 3200000 },
+ { 1, { 806400, HFPLL, 1, 0, 42 }, L2(7), 1050000, 3200000 },
+ { 1, { 883200, HFPLL, 1, 0, 46 }, L2(8), 1050000, 3200000 },
+ { 1, { 960000, HFPLL, 1, 0, 50 }, L2(9), 1050000, 3200000 },
+ { 1, { 1036800, HFPLL, 1, 0, 54 }, L2(10), 1050000, 3200000 },
{ 0, { 0 } }
};
-static struct acpuclk_krait_params acpuclk_8974_params = {
+static struct pvs_table pvs_tables[NUM_PVS] __initdata = {
+ [PVS_SLOW] = { acpu_freq_tbl, sizeof(acpu_freq_tbl) },
+ [PVS_NOMINAL] = { acpu_freq_tbl, sizeof(acpu_freq_tbl) },
+ [PVS_FAST] = { acpu_freq_tbl, sizeof(acpu_freq_tbl) },
+};
+
+static struct acpuclk_krait_params acpuclk_8974_params __initdata = {
.scalable = scalable,
- .pvs_acpu_freq_tbl[PVS_SLOW] = acpu_freq_tbl,
- .pvs_acpu_freq_tbl[PVS_NOMINAL] = acpu_freq_tbl,
- .pvs_acpu_freq_tbl[PVS_FAST] = acpu_freq_tbl,
+ .scalable_size = sizeof(scalable),
+ .hfpll_data = &hfpll_data,
+ .pvs_tables = pvs_tables,
.l2_freq_tbl = l2_freq_tbl,
- .l2_freq_tbl_size = ARRAY_SIZE(l2_freq_tbl),
- .bus_scale_data = &bus_scale_data,
+ .l2_freq_tbl_size = sizeof(l2_freq_tbl),
+ .bus_scale = &bus_scale_data,
.qfprom_phys_base = 0xFC4A8000,
+ .stby_khz = 300000,
};
static int __init acpuclk_8974_probe(struct platform_device *pdev)
diff --git a/arch/arm/mach-msm/acpuclock-krait.c b/arch/arm/mach-msm/acpuclock-krait.c
index 8bd54e3..33396e5 100644
--- a/arch/arm/mach-msm/acpuclock-krait.c
+++ b/arch/arm/mach-msm/acpuclock-krait.c
@@ -11,9 +11,8 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "%s: " fmt, __func__
-
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/delay.h>
@@ -42,7 +41,6 @@
#define PRI_SRC_SEL_SEC_SRC 0
#define PRI_SRC_SEL_HFPLL 1
#define PRI_SRC_SEL_HFPLL_DIV2 2
-#define SEC_SRC_SEL_QSB 0
#define SEC_SRC_SEL_L2PLL 1
#define SEC_SRC_SEL_AUX 2
@@ -54,10 +52,12 @@
static struct drv_data {
struct acpu_level *acpu_freq_tbl;
- const struct acpu_level *max_acpu_lvl;
const struct l2_level *l2_freq_tbl;
struct scalable *scalable;
+ struct hfpll_data *hfpll_data;
u32 bus_perf_client;
+ struct msm_bus_scale_pdata *bus_scale;
+ int boost_uv;
struct device *dev;
} drv;
@@ -130,7 +130,7 @@
}
/* Disable PLL bypass mode. */
- writel_relaxed(0x2, sc->hfpll_base + sc->hfpll_data->mode_offset);
+ writel_relaxed(0x2, sc->hfpll_base + drv.hfpll_data->mode_offset);
/*
* H/W requires a 5us delay between disabling the bypass and
@@ -140,14 +140,14 @@
udelay(10);
/* De-assert active-low PLL reset. */
- writel_relaxed(0x6, sc->hfpll_base + sc->hfpll_data->mode_offset);
+ writel_relaxed(0x6, sc->hfpll_base + drv.hfpll_data->mode_offset);
/* Wait for PLL to lock. */
mb();
udelay(60);
/* Enable PLL output. */
- writel_relaxed(0x7, sc->hfpll_base + sc->hfpll_data->mode_offset);
+ writel_relaxed(0x7, sc->hfpll_base + drv.hfpll_data->mode_offset);
}
/* Disable a HFPLL for power-savings or while it's being reprogrammed. */
@@ -157,7 +157,7 @@
* Disable the PLL output, disable test mode, enable the bypass mode,
* and assert the reset.
*/
- writel_relaxed(0, sc->hfpll_base + sc->hfpll_data->mode_offset);
+ writel_relaxed(0, sc->hfpll_base + drv.hfpll_data->mode_offset);
if (!skip_regulators) {
/* Remove voltage votes required by the HFPLL. */
@@ -169,20 +169,29 @@
/* Program the HFPLL rate. Assumes HFPLL is already disabled. */
static void hfpll_set_rate(struct scalable *sc, const struct core_speed *tgt_s)
{
- writel_relaxed(tgt_s->pll_l_val,
- sc->hfpll_base + sc->hfpll_data->l_offset);
+ void __iomem *base = sc->hfpll_base;
+ u32 regval;
+
+ writel_relaxed(tgt_s->pll_l_val, base + drv.hfpll_data->l_offset);
+
+ if (drv.hfpll_data->has_user_reg) {
+ regval = readl_relaxed(base + drv.hfpll_data->user_offset);
+ if (tgt_s->pll_l_val <= drv.hfpll_data->low_vco_l_max)
+ regval &= ~drv.hfpll_data->user_vco_mask;
+ else
+ regval |= drv.hfpll_data->user_vco_mask;
+ writel_relaxed(regval, base + drv.hfpll_data->user_offset);
+ }
}
/* Return the L2 speed that should be applied. */
-static const struct l2_level *compute_l2_level(struct scalable *sc,
- const struct l2_level *vote_l)
+static unsigned int compute_l2_level(struct scalable *sc, unsigned int vote_l)
{
- const struct l2_level *new_l;
+ unsigned int new_l = 0;
int cpu;
/* Find max L2 speed vote. */
sc->l2_vote = vote_l;
- new_l = drv.l2_freq_tbl;
for_each_present_cpu(cpu)
new_l = max(new_l, drv.scalable[cpu].l2_vote);
@@ -235,40 +244,59 @@
sc->cur_speed = tgt_s;
}
+struct vdd_data {
+ int vdd_mem;
+ int vdd_dig;
+ int vdd_core;
+ int ua_core;
+};
+
/* Apply any per-cpu voltage increases. */
-static int increase_vdd(int cpu, int vdd_core, int vdd_mem, int vdd_dig,
+static int increase_vdd(int cpu, struct vdd_data *data,
enum setrate_reason reason)
{
struct scalable *sc = &drv.scalable[cpu];
- int rc = 0;
+ int rc;
/*
* Increase vdd_mem active-set before vdd_dig.
* vdd_mem should be >= vdd_dig.
*/
- if (vdd_mem > sc->vreg[VREG_MEM].cur_vdd) {
+ if (data->vdd_mem > sc->vreg[VREG_MEM].cur_vdd) {
rc = rpm_regulator_set_voltage(sc->vreg[VREG_MEM].rpm_reg,
- vdd_mem, sc->vreg[VREG_MEM].max_vdd);
+ data->vdd_mem, sc->vreg[VREG_MEM].max_vdd);
if (rc) {
dev_err(drv.dev,
"vdd_mem (cpu%d) increase failed (%d)\n",
cpu, rc);
return rc;
}
- sc->vreg[VREG_MEM].cur_vdd = vdd_mem;
+ sc->vreg[VREG_MEM].cur_vdd = data->vdd_mem;
}
/* Increase vdd_dig active-set vote. */
- if (vdd_dig > sc->vreg[VREG_DIG].cur_vdd) {
+ if (data->vdd_dig > sc->vreg[VREG_DIG].cur_vdd) {
rc = rpm_regulator_set_voltage(sc->vreg[VREG_DIG].rpm_reg,
- vdd_dig, sc->vreg[VREG_DIG].max_vdd);
+ data->vdd_dig, sc->vreg[VREG_DIG].max_vdd);
if (rc) {
dev_err(drv.dev,
"vdd_dig (cpu%d) increase failed (%d)\n",
cpu, rc);
return rc;
}
- sc->vreg[VREG_DIG].cur_vdd = vdd_dig;
+ sc->vreg[VREG_DIG].cur_vdd = data->vdd_dig;
+ }
+
+ /* Increase current request. */
+ if (data->ua_core > sc->vreg[VREG_CORE].cur_ua) {
+ rc = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
+ data->ua_core);
+ if (rc < 0) {
+ dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n",
+ sc->vreg[VREG_CORE].name, rc);
+ return rc;
+ }
+ sc->vreg[VREG_CORE].cur_ua = data->ua_core;
}
/*
@@ -277,25 +305,25 @@
* because we don't know what CPU we are running on at this point, but
* the CPU regulator API requires we call it from the affected CPU.
*/
- if (vdd_core > sc->vreg[VREG_CORE].cur_vdd
+ if (data->vdd_core > sc->vreg[VREG_CORE].cur_vdd
&& reason != SETRATE_HOTPLUG) {
- rc = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core,
- sc->vreg[VREG_CORE].max_vdd);
+ rc = regulator_set_voltage(sc->vreg[VREG_CORE].reg,
+ data->vdd_core, sc->vreg[VREG_CORE].max_vdd);
if (rc) {
dev_err(drv.dev,
"vdd_core (cpu%d) increase failed (%d)\n",
cpu, rc);
return rc;
}
- sc->vreg[VREG_CORE].cur_vdd = vdd_core;
+ sc->vreg[VREG_CORE].cur_vdd = data->vdd_core;
}
- return rc;
+ return 0;
}
/* Apply any per-cpu voltage decreases. */
-static void decrease_vdd(int cpu, int vdd_core, int vdd_mem, int vdd_dig,
- enum setrate_reason reason)
+static void decrease_vdd(int cpu, struct vdd_data *data,
+ enum setrate_reason reason)
{
struct scalable *sc = &drv.scalable[cpu];
int ret;
@@ -305,73 +333,99 @@
* that's being affected. Don't do this in the hotplug remove path,
* where the rail is off and we're executing on the other CPU.
*/
- if (vdd_core < sc->vreg[VREG_CORE].cur_vdd
+ if (data->vdd_core < sc->vreg[VREG_CORE].cur_vdd
&& reason != SETRATE_HOTPLUG) {
- ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core,
- sc->vreg[VREG_CORE].max_vdd);
+ ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg,
+ data->vdd_core, sc->vreg[VREG_CORE].max_vdd);
if (ret) {
dev_err(drv.dev,
"vdd_core (cpu%d) decrease failed (%d)\n",
cpu, ret);
return;
}
- sc->vreg[VREG_CORE].cur_vdd = vdd_core;
+ sc->vreg[VREG_CORE].cur_vdd = data->vdd_core;
+ }
+
+ /* Decrease current request. */
+ if (data->ua_core < sc->vreg[VREG_CORE].cur_ua) {
+ ret = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
+ data->ua_core);
+ if (ret < 0) {
+ dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n",
+ sc->vreg[VREG_CORE].name, ret);
+ return;
+ }
+ sc->vreg[VREG_CORE].cur_ua = data->ua_core;
}
/* Decrease vdd_dig active-set vote. */
- if (vdd_dig < sc->vreg[VREG_DIG].cur_vdd) {
+ if (data->vdd_dig < sc->vreg[VREG_DIG].cur_vdd) {
ret = rpm_regulator_set_voltage(sc->vreg[VREG_DIG].rpm_reg,
- vdd_dig, sc->vreg[VREG_DIG].max_vdd);
+ data->vdd_dig, sc->vreg[VREG_DIG].max_vdd);
if (ret) {
dev_err(drv.dev,
"vdd_dig (cpu%d) decrease failed (%d)\n",
cpu, ret);
return;
}
- sc->vreg[VREG_DIG].cur_vdd = vdd_dig;
+ sc->vreg[VREG_DIG].cur_vdd = data->vdd_dig;
}
/*
* Decrease vdd_mem active-set after vdd_dig.
* vdd_mem should be >= vdd_dig.
*/
- if (vdd_mem < sc->vreg[VREG_MEM].cur_vdd) {
+ if (data->vdd_mem < sc->vreg[VREG_MEM].cur_vdd) {
ret = rpm_regulator_set_voltage(sc->vreg[VREG_MEM].rpm_reg,
- vdd_mem, sc->vreg[VREG_MEM].max_vdd);
+ data->vdd_mem, sc->vreg[VREG_MEM].max_vdd);
if (ret) {
dev_err(drv.dev,
"vdd_mem (cpu%d) decrease failed (%d)\n",
cpu, ret);
return;
}
- sc->vreg[VREG_MEM].cur_vdd = vdd_mem;
+ sc->vreg[VREG_MEM].cur_vdd = data->vdd_mem;
}
}
static int calculate_vdd_mem(const struct acpu_level *tgt)
{
- return tgt->l2_level->vdd_mem;
+ return drv.l2_freq_tbl[tgt->l2_level].vdd_mem;
+}
+
+static int get_src_dig(const struct core_speed *s)
+{
+ const int *hfpll_vdd = drv.hfpll_data->vdd;
+ const u32 low_vdd_l_max = drv.hfpll_data->low_vdd_l_max;
+ const u32 nom_vdd_l_max = drv.hfpll_data->nom_vdd_l_max;
+
+ if (s->src != HFPLL)
+ return hfpll_vdd[HFPLL_VDD_NONE];
+ else if (s->pll_l_val > nom_vdd_l_max)
+ return hfpll_vdd[HFPLL_VDD_HIGH];
+ else if (s->pll_l_val > low_vdd_l_max)
+ return hfpll_vdd[HFPLL_VDD_NOM];
+ else
+ return hfpll_vdd[HFPLL_VDD_LOW];
}
static int calculate_vdd_dig(const struct acpu_level *tgt)
{
- int pll_vdd_dig;
- const int *hfpll_vdd = drv.scalable[L2].hfpll_data->vdd;
- const u32 low_vdd_l_max = drv.scalable[L2].hfpll_data->low_vdd_l_max;
+ int l2_pll_vdd_dig, cpu_pll_vdd_dig;
- if (tgt->l2_level->speed.src != HFPLL)
- pll_vdd_dig = hfpll_vdd[HFPLL_VDD_NONE];
- else if (tgt->l2_level->speed.pll_l_val > low_vdd_l_max)
- pll_vdd_dig = hfpll_vdd[HFPLL_VDD_NOM];
- else
- pll_vdd_dig = hfpll_vdd[HFPLL_VDD_LOW];
+ l2_pll_vdd_dig = get_src_dig(&drv.l2_freq_tbl[tgt->l2_level].speed);
+ cpu_pll_vdd_dig = get_src_dig(&tgt->speed);
- return max(tgt->l2_level->vdd_dig, pll_vdd_dig);
+ return max(drv.l2_freq_tbl[tgt->l2_level].vdd_dig,
+ max(l2_pll_vdd_dig, cpu_pll_vdd_dig));
}
+static bool enable_boost = true;
+module_param_named(boost, enable_boost, bool, S_IRUGO | S_IWUSR);
+
static int calculate_vdd_core(const struct acpu_level *tgt)
{
- return tgt->vdd_core;
+ return tgt->vdd_core + (enable_boost ? drv.boost_uv : 0);
}
/* Set the CPU's clock rate and adjust the L2 rate, voltage and BW requests. */
@@ -379,9 +433,9 @@
enum setrate_reason reason)
{
const struct core_speed *strt_acpu_s, *tgt_acpu_s;
- const struct l2_level *tgt_l2_l;
const struct acpu_level *tgt;
- int vdd_mem, vdd_dig, vdd_core;
+ int tgt_l2_l;
+ struct vdd_data vdd_data;
unsigned long flags;
int rc = 0;
@@ -410,19 +464,20 @@
}
/* Calculate voltage requirements for the current CPU. */
- vdd_mem = calculate_vdd_mem(tgt);
- vdd_dig = calculate_vdd_dig(tgt);
- vdd_core = calculate_vdd_core(tgt);
+ vdd_data.vdd_mem = calculate_vdd_mem(tgt);
+ vdd_data.vdd_dig = calculate_vdd_dig(tgt);
+ vdd_data.vdd_core = calculate_vdd_core(tgt);
+ vdd_data.ua_core = tgt->ua_core;
/* Increase VDD levels if needed. */
if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG) {
- rc = increase_vdd(cpu, vdd_core, vdd_mem, vdd_dig, reason);
+ rc = increase_vdd(cpu, &vdd_data, reason);
if (rc)
goto out;
}
- pr_debug("Switching from ACPU%d rate %lu KHz -> %lu KHz\n",
- cpu, strt_acpu_s->khz, tgt_acpu_s->khz);
+ dev_dbg(drv.dev, "Switching from ACPU%d rate %lu KHz -> %lu KHz\n",
+ cpu, strt_acpu_s->khz, tgt_acpu_s->khz);
/* Set the new CPU speed. */
set_speed(&drv.scalable[cpu], tgt_acpu_s);
@@ -436,7 +491,7 @@
*/
spin_lock_irqsave(&l2_lock, flags);
tgt_l2_l = compute_l2_level(&drv.scalable[cpu], tgt->l2_level);
- set_speed(&drv.scalable[L2], &tgt_l2_l->speed);
+ set_speed(&drv.scalable[L2], &drv.l2_freq_tbl[tgt_l2_l].speed);
spin_unlock_irqrestore(&l2_lock, flags);
/* Nothing else to do for power collapse or SWFI. */
@@ -444,12 +499,12 @@
goto out;
/* Update bus bandwith request. */
- set_bus_bw(tgt_l2_l->bw_level);
+ set_bus_bw(drv.l2_freq_tbl[tgt_l2_l].bw_level);
/* Drop VDD levels if we can. */
- decrease_vdd(cpu, vdd_core, vdd_mem, vdd_dig, reason);
+ decrease_vdd(cpu, &vdd_data, reason);
- pr_debug("ACPU%d speed change complete\n", cpu);
+ dev_dbg(drv.dev, "ACPU%d speed change complete\n", cpu);
out:
if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG)
@@ -457,25 +512,33 @@
return rc;
}
+static struct acpuclk_data acpuclk_krait_data = {
+ .set_rate = acpuclk_krait_set_rate,
+ .get_rate = acpuclk_krait_get_rate,
+};
+
/* Initialize a HFPLL at a given rate and enable it. */
static void __init hfpll_init(struct scalable *sc,
const struct core_speed *tgt_s)
{
- pr_debug("Initializing HFPLL%d\n", sc - drv.scalable);
+ dev_dbg(drv.dev, "Initializing HFPLL%d\n", sc - drv.scalable);
/* Disable the PLL for re-programming. */
hfpll_disable(sc, true);
/* Configure PLL parameters for integer mode. */
- writel_relaxed(sc->hfpll_data->config_val,
- sc->hfpll_base + sc->hfpll_data->config_offset);
- writel_relaxed(0, sc->hfpll_base + sc->hfpll_data->m_offset);
- writel_relaxed(1, sc->hfpll_base + sc->hfpll_data->n_offset);
+ writel_relaxed(drv.hfpll_data->config_val,
+ sc->hfpll_base + drv.hfpll_data->config_offset);
+ writel_relaxed(0, sc->hfpll_base + drv.hfpll_data->m_offset);
+ writel_relaxed(1, sc->hfpll_base + drv.hfpll_data->n_offset);
+ if (drv.hfpll_data->has_user_reg)
+ writel_relaxed(drv.hfpll_data->user_val,
+ sc->hfpll_base + drv.hfpll_data->user_offset);
/* Program droop controller, if supported */
- if (sc->hfpll_data->has_droop_ctl)
- writel_relaxed(sc->hfpll_data->droop_val,
- sc->hfpll_base + sc->hfpll_data->droop_offset);
+ if (drv.hfpll_data->has_droop_ctl)
+ writel_relaxed(drv.hfpll_data->droop_val,
+ sc->hfpll_base + drv.hfpll_data->droop_offset);
/* Set an initial rate and enable the PLL. */
hfpll_set_rate(sc, tgt_s);
@@ -533,19 +596,21 @@
}
/* Voltage regulator initialization. */
-static int __cpuinit regulator_init(struct scalable *sc)
+static int __cpuinit regulator_init(struct scalable *sc,
+ const struct acpu_level *acpu_level)
{
int ret, vdd_mem, vdd_dig, vdd_core;
- vdd_mem = calculate_vdd_mem(drv.max_acpu_lvl);
- vdd_dig = calculate_vdd_dig(drv.max_acpu_lvl);
-
+ vdd_mem = calculate_vdd_mem(acpu_level);
ret = rpm_regulator_init(sc, VREG_MEM, vdd_mem, true);
if (ret)
goto err_mem;
+
+ vdd_dig = calculate_vdd_dig(acpu_level);
ret = rpm_regulator_init(sc, VREG_DIG, vdd_dig, true);
if (ret)
goto err_dig;
+
ret = rpm_regulator_init(sc, VREG_HFPLL_A,
sc->vreg[VREG_HFPLL_A].max_vdd, false);
if (ret)
@@ -564,7 +629,15 @@
sc->vreg[VREG_CORE].name, ret);
goto err_core_get;
}
- vdd_core = calculate_vdd_core(drv.max_acpu_lvl);
+ ret = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
+ acpu_level->ua_core);
+ if (ret < 0) {
+ dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n",
+ sc->vreg[VREG_CORE].name, ret);
+ goto err_core_conf;
+ }
+ sc->vreg[VREG_CORE].cur_ua = acpu_level->ua_core;
+ vdd_core = calculate_vdd_core(acpu_level);
ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core,
sc->vreg[VREG_CORE].max_vdd);
if (ret) {
@@ -573,13 +646,6 @@
goto err_core_conf;
}
sc->vreg[VREG_CORE].cur_vdd = vdd_core;
- ret = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
- sc->vreg[VREG_CORE].peak_ua);
- if (ret < 0) {
- dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n",
- sc->vreg[VREG_CORE].name, ret);
- goto err_core_conf;
- }
ret = regulator_enable(sc->vreg[VREG_CORE].reg);
if (ret) {
dev_err(drv.dev, "regulator_enable(%s) failed (%d)\n",
@@ -647,9 +713,63 @@
return 0;
}
+static void __cpuinit fill_cur_core_speed(struct core_speed *s,
+ struct scalable *sc)
+{
+ s->pri_src_sel = get_l2_indirect_reg(sc->l2cpmr_iaddr) & 0x3;
+ s->sec_src_sel = (get_l2_indirect_reg(sc->l2cpmr_iaddr) >> 2) & 0x3;
+ s->pll_l_val = readl_relaxed(sc->hfpll_base + drv.hfpll_data->l_offset);
+}
+
+static bool __cpuinit speed_equal(const struct core_speed *s1,
+ const struct core_speed *s2)
+{
+ return (s1->pri_src_sel == s2->pri_src_sel &&
+ s1->sec_src_sel == s2->sec_src_sel &&
+ s1->pll_l_val == s2->pll_l_val);
+}
+
+static const struct acpu_level __cpuinit *find_cur_acpu_level(int cpu)
+{
+ struct scalable *sc = &drv.scalable[cpu];
+ const struct acpu_level *l;
+ struct core_speed cur_speed;
+
+ fill_cur_core_speed(&cur_speed, sc);
+ for (l = drv.acpu_freq_tbl; l->speed.khz != 0; l++)
+ if (speed_equal(&l->speed, &cur_speed))
+ return l;
+ return NULL;
+}
+
+static const struct l2_level __init *find_cur_l2_level(void)
+{
+ struct scalable *sc = &drv.scalable[L2];
+ const struct l2_level *l;
+ struct core_speed cur_speed;
+
+ fill_cur_core_speed(&cur_speed, sc);
+ for (l = drv.l2_freq_tbl; l->speed.khz != 0; l++)
+ if (speed_equal(&l->speed, &cur_speed))
+ return l;
+ return NULL;
+}
+
+static const struct acpu_level __cpuinit *find_min_acpu_level(void)
+{
+ struct acpu_level *l;
+
+ for (l = drv.acpu_freq_tbl; l->speed.khz != 0; l++)
+ if (l->use_for_scaling)
+ return l;
+
+ return NULL;
+}
+
static int __cpuinit per_cpu_init(int cpu)
{
struct scalable *sc = &drv.scalable[cpu];
+ const struct acpu_level *acpu_level;
int ret;
sc->hfpll_base = ioremap(sc->hfpll_phys_base, SZ_32);
@@ -658,14 +778,29 @@
goto err_ioremap;
}
- ret = regulator_init(sc);
+ acpu_level = find_cur_acpu_level(cpu);
+ if (!acpu_level) {
+ acpu_level = find_min_acpu_level();
+ if (!acpu_level) {
+ ret = -ENODEV;
+ goto err_table;
+ }
+ dev_dbg(drv.dev, "CPU%d is running at an unknown rate. Defaulting to %lu KHz.\n",
+ cpu, acpu_level->speed.khz);
+ } else {
+ dev_dbg(drv.dev, "CPU%d is running at %lu KHz\n", cpu,
+ acpu_level->speed.khz);
+ }
+
+ ret = regulator_init(sc, acpu_level);
if (ret)
goto err_regulators;
- ret = init_clock_sources(sc, &drv.max_acpu_lvl->speed);
+ ret = init_clock_sources(sc, &acpu_level->speed);
if (ret)
goto err_clocks;
- sc->l2_vote = drv.max_acpu_lvl->l2_level;
+
+ sc->l2_vote = acpu_level->l2_level;
sc->initialized = true;
return 0;
@@ -673,24 +808,25 @@
err_clocks:
regulator_cleanup(sc);
err_regulators:
+err_table:
iounmap(sc->hfpll_base);
err_ioremap:
return ret;
}
/* Register with bus driver. */
-static void __init bus_init(struct msm_bus_scale_pdata *bus_scale_data)
+static void __init bus_init(const struct l2_level *l2_level)
{
int ret;
- drv.bus_perf_client = msm_bus_scale_register_client(bus_scale_data);
+ drv.bus_perf_client = msm_bus_scale_register_client(drv.bus_scale);
if (!drv.bus_perf_client) {
dev_err(drv.dev, "unable to register bus client\n");
BUG();
}
ret = msm_bus_scale_client_update_request(drv.bus_perf_client,
- drv.max_acpu_lvl->l2_level->bw_level);
+ l2_level->bw_level);
if (ret)
dev_err(drv.dev, "initial bandwidth req failed (%d)\n", ret);
}
@@ -731,20 +867,20 @@
static void __init cpufreq_table_init(void) {}
#endif
-#define HOT_UNPLUG_KHZ STBY_KHZ
static int __cpuinit acpuclk_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
static int prev_khz[NR_CPUS];
int rc, cpu = (int)hcpu;
struct scalable *sc = &drv.scalable[cpu];
+ unsigned long hot_unplug_khz = acpuclk_krait_data.power_collapse_khz;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DEAD:
prev_khz[cpu] = acpuclk_krait_get_rate(cpu);
/* Fall through. */
case CPU_UP_CANCELED:
- acpuclk_krait_set_rate(cpu, HOT_UNPLUG_KHZ, SETRATE_HOTPLUG);
+ acpuclk_krait_set_rate(cpu, hot_unplug_khz, SETRATE_HOTPLUG);
regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg, 0);
break;
case CPU_UP_PREPARE:
@@ -757,7 +893,7 @@
if (WARN_ON(!prev_khz[cpu]))
return NOTIFY_BAD;
rc = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
- sc->vreg[VREG_CORE].peak_ua);
+ sc->vreg[VREG_CORE].cur_ua);
if (rc < 0)
return NOTIFY_BAD;
acpuclk_krait_set_rate(cpu, prev_khz[cpu], SETRATE_HOTPLUG);
@@ -792,13 +928,11 @@
tbl->vdd_core = 1150000;
}
-static void __init select_freq_plan(struct acpu_level *const *pvs_tbl,
- u32 qfprom_phys)
+static int __init select_freq_plan(u32 qfprom_phys)
{
- const struct acpu_level *l;
void __iomem *qfprom_base;
u32 pte_efuse, pvs, tbl_idx;
- char *pvs_names[] = { "Slow", "Nominal", "Fast", "Unknown" };
+ char *pvs_names[] = { "Slow", "Nominal", "Fast", "Faster", "Unknown" };
qfprom_base = ioremap(qfprom_phys, SZ_256);
/* Select frequency tables. */
@@ -820,6 +954,9 @@
case 0x3:
tbl_idx = PVS_FAST;
break;
+ case 0x4:
+ tbl_idx = PVS_FASTER;
+ break;
default:
tbl_idx = PVS_UNKNOWN;
break;
@@ -828,47 +965,63 @@
tbl_idx = PVS_UNKNOWN;
dev_err(drv.dev, "Unable to map QFPROM base\n");
}
- dev_info(drv.dev, "ACPU PVS: %s\n", pvs_names[tbl_idx]);
if (tbl_idx == PVS_UNKNOWN) {
tbl_idx = PVS_SLOW;
dev_warn(drv.dev, "ACPU PVS: Defaulting to %s\n",
pvs_names[tbl_idx]);
+ } else {
+ dev_info(drv.dev, "ACPU PVS: %s\n", pvs_names[tbl_idx]);
}
- drv.acpu_freq_tbl = pvs_tbl[tbl_idx];
+
+ return tbl_idx;
+}
+
+static void __init drv_data_init(struct device *dev,
+ const struct acpuclk_krait_params *params)
+{
+ int tbl_idx;
+
+ drv.dev = dev;
+ drv.scalable = kmemdup(params->scalable, params->scalable_size,
+ GFP_KERNEL);
+ BUG_ON(!drv.scalable);
+
+ drv.hfpll_data = kmemdup(params->hfpll_data, sizeof(*drv.hfpll_data),
+ GFP_KERNEL);
+ BUG_ON(!drv.hfpll_data);
+
+ drv.l2_freq_tbl = kmemdup(params->l2_freq_tbl, params->l2_freq_tbl_size,
+ GFP_KERNEL);
+ BUG_ON(!drv.l2_freq_tbl);
+
+ drv.bus_scale = kmemdup(params->bus_scale, sizeof(*drv.bus_scale),
+ GFP_KERNEL);
+ BUG_ON(!drv.bus_scale);
+ drv.bus_scale->usecase = kmemdup(drv.bus_scale->usecase,
+ drv.bus_scale->num_usecases * sizeof(*drv.bus_scale->usecase),
+ GFP_KERNEL);
+ BUG_ON(!drv.bus_scale->usecase);
+
+ tbl_idx = select_freq_plan(params->qfprom_phys_base);
+ drv.acpu_freq_tbl = kmemdup(params->pvs_tables[tbl_idx].table,
+ params->pvs_tables[tbl_idx].size,
+ GFP_KERNEL);
+ BUG_ON(!drv.acpu_freq_tbl);
+ drv.boost_uv = params->pvs_tables[tbl_idx].boost_uv;
+
+ acpuclk_krait_data.power_collapse_khz = params->stby_khz;
+ acpuclk_krait_data.wait_for_irq_khz = params->stby_khz;
+}
+
+static void __init hw_init(void)
+{
+ struct scalable *l2 = &drv.scalable[L2];
+ const struct l2_level *l2_level;
+ int cpu, rc;
if (krait_needs_vmin())
krait_apply_vmin(drv.acpu_freq_tbl);
- /* Find the max supported scaling frequency. */
- for (l = drv.acpu_freq_tbl; l->speed.khz != 0; l++)
- if (l->use_for_scaling)
- drv.max_acpu_lvl = l;
- BUG_ON(!drv.max_acpu_lvl);
- dev_info(drv.dev, "Max ACPU freq: %lu KHz\n",
- drv.max_acpu_lvl->speed.khz);
-}
-
-static struct acpuclk_data acpuclk_krait_data = {
- .set_rate = acpuclk_krait_set_rate,
- .get_rate = acpuclk_krait_get_rate,
- .power_collapse_khz = STBY_KHZ,
- .wait_for_irq_khz = STBY_KHZ,
-};
-
-int __init acpuclk_krait_init(struct device *dev,
- const struct acpuclk_krait_params *params)
-{
- struct scalable *l2;
- int cpu, rc;
-
- drv.scalable = params->scalable;
- drv.l2_freq_tbl = params->l2_freq_tbl;
- drv.dev = dev;
-
- select_freq_plan(params->pvs_acpu_freq_tbl, params->qfprom_phys_base);
- bus_init(params->bus_scale_data);
-
- l2 = &drv.scalable[L2];
l2->hfpll_base = ioremap(l2->hfpll_phys_base, SZ_32);
BUG_ON(!l2->hfpll_base);
@@ -879,7 +1032,17 @@
l2->vreg[VREG_HFPLL_B].max_vdd, false);
BUG_ON(rc);
- rc = init_clock_sources(l2, &drv.max_acpu_lvl->l2_level->speed);
+ l2_level = find_cur_l2_level();
+ if (!l2_level) {
+ l2_level = drv.l2_freq_tbl;
+ dev_dbg(drv.dev, "L2 is running at an unknown rate. Defaulting to %lu KHz.\n",
+ l2_level->speed.khz);
+ } else {
+ dev_dbg(drv.dev, "L2 is running at %lu KHz\n",
+ l2_level->speed.khz);
+ }
+
+ rc = init_clock_sources(l2, &l2_level->speed);
BUG_ON(rc);
for_each_online_cpu(cpu) {
@@ -887,8 +1050,16 @@
BUG_ON(rc);
}
- cpufreq_table_init();
+ bus_init(l2_level);
+}
+int __init acpuclk_krait_init(struct device *dev,
+ const struct acpuclk_krait_params *params)
+{
+ drv_data_init(dev, params);
+ hw_init();
+
+ cpufreq_table_init();
acpuclk_register(&acpuclk_krait_data);
register_hotcpu_notifier(&acpuclk_cpu_notifier);
diff --git a/arch/arm/mach-msm/acpuclock-krait.h b/arch/arm/mach-msm/acpuclock-krait.h
index f121548..5a95e76 100644
--- a/arch/arm/mach-msm/acpuclock-krait.h
+++ b/arch/arm/mach-msm/acpuclock-krait.h
@@ -14,8 +14,7 @@
#ifndef __ARCH_ARM_MACH_MSM_ACPUCLOCK_KRAIT_H
#define __ARCH_ARM_MACH_MSM_ACPUCLOCK_KRAIT_H
-#define STBY_KHZ 1
-
+#define L2(x) (x)
#define BW_MBPS(_bw) \
{ \
.vectors = (struct msm_bus_vectors[]){ \
@@ -39,7 +38,6 @@
enum src_id {
PLL_0 = 0,
HFPLL,
- QSB,
PLL_8,
};
@@ -50,6 +48,7 @@
PVS_SLOW = 0,
PVS_NOMINAL,
PVS_FAST,
+ PVS_FASTER,
PVS_UNKNOWN,
NUM_PVS
};
@@ -73,6 +72,7 @@
HFPLL_VDD_NONE,
HFPLL_VDD_LOW,
HFPLL_VDD_NOM,
+ HFPLL_VDD_HIGH,
NUM_HFPLL_VDD
};
@@ -95,15 +95,15 @@
* @reg: Regulator handle.
* @rpm_reg: RPM Regulator handle.
* @cur_vdd: Last-set voltage in uV.
- * @peak_ua: Maximum current draw expected in uA.
+ * @cur_ua: Last-set current in uA.
*/
struct vreg {
const char *name;
const int max_vdd;
- const int peak_ua;
struct regulator *reg;
struct rpm_regulator *rpm_reg;
int cur_vdd;
+ int cur_ua;
};
/**
@@ -115,11 +115,11 @@
* @pll_l_val: HFPLL "L" value to be applied when an HFPLL source is selected.
*/
struct core_speed {
- const unsigned long khz;
- const int src;
- const u32 pri_src_sel;
- const u32 sec_src_sel;
- const u32 pll_l_val;
+ unsigned long khz;
+ int src;
+ u32 pri_src_sel;
+ u32 sec_src_sel;
+ u32 pll_l_val;
};
/**
@@ -142,12 +142,14 @@
* @speed: CPU clock configuration.
* @l2_level: L2 configuration to use.
* @vdd_core: CPU core voltage in uV.
+ * @ua_core: CPU core current consumption in uA.
*/
struct acpu_level {
const int use_for_scaling;
const struct core_speed speed;
- const struct l2_level *l2_level;
+ const unsigned int l2_level;
int vdd_core;
+ int ua_core;
};
/**
@@ -158,11 +160,17 @@
* @n_offset: "N" value register offset from base address.
* @config_offset: Configuration register offset from base address.
* @config_val: Value to initialize the @config_offset register to.
+ * @has_user_reg: Indicates the presence of an addition config register.
+ * @user_offset: User register offset from base address, if applicable.
+ * @user_val: Value to initialize the @user_offset register to.
+ * @user_vco_mask: Bit in the @user_offset to enable high-frequency VCO mode.
* @has_droop_ctl: Indicates the presence of a voltage droop controller.
* @droop_offset: Droop controller register offset from base address.
* @droop_val: Value to initialize the @config_offset register to.
* @low_vdd_l_max: Maximum "L" value supported at HFPLL_VDD_LOW.
- * @vdd: voltage requirements for each VDD level.
+ * @nom_vdd_l_max: Maximum "L" value supported at HFPLL_VDD_NOM.
+ * @low_vco_l_max: Maximum "L" value supported in low-frequency VCO mode.
+ * @vdd: voltage requirements for each VDD level for the L2 PLL.
*/
struct hfpll_data {
const u32 mode_offset;
@@ -171,10 +179,16 @@
const u32 n_offset;
const u32 config_offset;
const u32 config_val;
+ const bool has_user_reg;
+ const u32 user_offset;
+ const u32 user_val;
+ const u32 user_vco_mask;
const bool has_droop_ctl;
const u32 droop_offset;
const u32 droop_val;
const u32 low_vdd_l_max;
+ const u32 nom_vdd_l_max;
+ const u32 low_vco_l_max;
const int vdd[NUM_HFPLL_VDD];
};
@@ -185,7 +199,6 @@
* @aux_clk_sel_phys: Physical address of auxiliary MUX.
* @aux_clk_sel: Auxiliary mux input to select at boot.
* @l2cpmr_iaddr: Indirect address of the CPMR MUX/divider CP15 register.
- * @hfpll_data: Descriptive data of HFPLL hardware.
* @cur_speed: Pointer to currently-set speed.
* @l2_vote: L2 performance level vote associate with the current CPU speed.
* @vreg: Array of voltage regulators needed by the scalable.
@@ -197,29 +210,46 @@
const phys_addr_t aux_clk_sel_phys;
const u32 aux_clk_sel;
const u32 l2cpmr_iaddr;
- const struct hfpll_data *hfpll_data;
const struct core_speed *cur_speed;
- const struct l2_level *l2_vote;
+ unsigned int l2_vote;
struct vreg vreg[NUM_VREG];
bool initialized;
};
/**
+ * struct pvs_table - CPU performance level table and size.
+ * @table: CPU performance level table
+ * @size: sizeof(@table)
+ * @boost_uv: Voltage boost amount
+ */
+struct pvs_table {
+ struct acpu_level *table;
+ size_t size;
+ int boost_uv;
+};
+
+/**
* struct acpuclk_krait_params - SoC specific driver parameters.
* @scalable: Array of scalables.
- * @pvs_acpu_freq_tbl: Array of CPU frequency tables.
+ * @scalable_size: Size of @scalable.
+ * @hfpll_data: HFPLL configuration data.
+ * @pvs_tables: CPU frequency tables.
* @l2_freq_tbl: L2 frequency table.
- * @l2_freq_tbl_size: Number of rows in @l2_freq_tbl.
+ * @l2_freq_tbl_size: Size of @l2_freq_tbl.
* @qfprom_phys_base: Physical base address of QFPROM.
- * @bus_scale_data: MSM bus driver parameters.
+ * @bus_scale: MSM bus driver parameters.
+ * @stby_khz: KHz value corresponding to an always-on clock source.
*/
struct acpuclk_krait_params {
struct scalable *scalable;
- struct acpu_level *pvs_acpu_freq_tbl[NUM_PVS];
- const struct l2_level *l2_freq_tbl;
- const size_t l2_freq_tbl_size;
- const phys_addr_t qfprom_phys_base;
- struct msm_bus_scale_pdata *bus_scale_data;
+ size_t scalable_size;
+ struct hfpll_data *hfpll_data;
+ struct pvs_table *pvs_tables;
+ struct l2_level *l2_freq_tbl;
+ size_t l2_freq_tbl_size;
+ phys_addr_t qfprom_phys_base;
+ struct msm_bus_scale_pdata *bus_scale;
+ unsigned long stby_khz;
};
/**
diff --git a/arch/arm/mach-msm/acpuclock.h b/arch/arm/mach-msm/acpuclock.h
index e73a2af..841f717 100644
--- a/arch/arm/mach-msm/acpuclock.h
+++ b/arch/arm/mach-msm/acpuclock.h
@@ -90,4 +90,4 @@
*/
void acpuclk_register(struct acpuclk_data *data);
-#endif
+#endif /*__ARCH_ARM_MACH_MSM_ACPUCLOCK_H*/
diff --git a/arch/arm/mach-msm/bam_dmux.c b/arch/arm/mach-msm/bam_dmux.c
index 813824e..b35e949 100644
--- a/arch/arm/mach-msm/bam_dmux.c
+++ b/arch/arm/mach-msm/bam_dmux.c
@@ -283,6 +283,9 @@
static int bam_dmux_uplink_vote;
static int bam_dmux_power_state;
+static void bam_dmux_log(const char *fmt, ...)
+ __printf(1, 2);
+
#define DMUX_LOG_KERR(fmt...) \
do { \
@@ -569,7 +572,8 @@
rx_hdr->ch_id);
handle_bam_mux_cmd_open(rx_hdr);
if (!(rx_hdr->reserved & ENABLE_DISCONNECT_ACK)) {
- bam_dmux_log("%s: deactivating disconnect ack\n");
+ bam_dmux_log("%s: deactivating disconnect ack\n",
+ __func__);
disconnect_ack = 0;
}
dev_kfree_skb_any(rx_skb);
@@ -1912,7 +1916,7 @@
ul_powerdown_finish();
a2_pc_disabled = 0;
a2_pc_disabled_wakelock_skipped = 0;
- disconnect_ack = 0;
+ disconnect_ack = 1;
/* Cleanup Channel States */
mutex_lock(&bam_pdev_mutexlock);
diff --git a/arch/arm/mach-msm/board-8064-camera.c b/arch/arm/mach-msm/board-8064-camera.c
index c37491d..40995bb 100644
--- a/arch/arm/mach-msm/board-8064-camera.c
+++ b/arch/arm/mach-msm/board-8064-camera.c
@@ -243,7 +243,7 @@
{
.src = MSM_BUS_MASTER_VFE,
.dst = MSM_BUS_SLAVE_EBI_CH0,
- .ab = 140451840,
+ .ab = 274406400,
.ib = 561807360,
},
{
@@ -302,6 +302,27 @@
},
};
+static struct msm_bus_vectors cam_video_ls_vectors[] = {
+ {
+ .src = MSM_BUS_MASTER_VFE,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 348192000,
+ .ib = 617103360,
+ },
+ {
+ .src = MSM_BUS_MASTER_VPE,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 206807040,
+ .ib = 488816640,
+ },
+ {
+ .src = MSM_BUS_MASTER_JPEG_ENC,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 540000000,
+ .ib = 1350000000,
+ },
+};
+
static struct msm_bus_paths cam_bus_client_config[] = {
{
ARRAY_SIZE(cam_init_vectors),
@@ -323,6 +344,10 @@
ARRAY_SIZE(cam_zsl_vectors),
cam_zsl_vectors,
},
+ {
+ ARRAY_SIZE(cam_video_ls_vectors),
+ cam_video_ls_vectors,
+ },
};
static struct msm_bus_scale_pdata cam_bus_client_pdata = {
diff --git a/arch/arm/mach-msm/board-8064-display.c b/arch/arm/mach-msm/board-8064-display.c
index 177e1fd..330d7a8 100644
--- a/arch/arm/mach-msm/board-8064-display.c
+++ b/arch/arm/mach-msm/board-8064-display.c
@@ -1045,7 +1045,8 @@
}
}
-void __init apq8064_set_display_params(char *prim_panel, char *ext_panel)
+void __init apq8064_set_display_params(char *prim_panel, char *ext_panel,
+ unsigned char resolution)
{
/*
* For certain MPQ boards, HDMI should be set as primary display
@@ -1080,4 +1081,6 @@
pr_debug("msm_fb_pdata.ext_panel_name %s\n",
msm_fb_pdata.ext_panel_name);
}
+
+ msm_fb_pdata.ext_resolution = resolution;
}
diff --git a/arch/arm/mach-msm/board-8064-gpu.c b/arch/arm/mach-msm/board-8064-gpu.c
index eb36a81e..122505e 100644
--- a/arch/arm/mach-msm/board-8064-gpu.c
+++ b/arch/arm/mach-msm/board-8064-gpu.c
@@ -225,6 +225,7 @@
.set_grp_async = NULL,
.idle_timeout = HZ/10,
.nap_allowed = true,
+ .strtstp_sleepwake = true,
.clk_map = KGSL_CLK_CORE | KGSL_CLK_IFACE | KGSL_CLK_MEM_IFACE,
#ifdef CONFIG_MSM_BUS_SCALING
.bus_scale_table = &grp3d_bus_scale_pdata,
diff --git a/arch/arm/mach-msm/board-8064-pmic.c b/arch/arm/mach-msm/board-8064-pmic.c
index 43a79b5..879434d 100644
--- a/arch/arm/mach-msm/board-8064-pmic.c
+++ b/arch/arm/mach-msm/board-8064-pmic.c
@@ -127,6 +127,7 @@
/* TABLA CODEC RESET */
PM8921_GPIO_OUTPUT(34, 1, MED),
PM8921_GPIO_OUTPUT(13, 0, HIGH), /* PCIE_CLK_PWR_EN */
+ PM8921_GPIO_INPUT(12, PM_GPIO_PULL_UP_30), /* PCIE_WAKE_N */
};
static struct pm8xxx_gpio_init pm8921_mtp_kp_gpios[] __initdata = {
@@ -439,10 +440,7 @@
apq8064_pm8921_platform_data.num_regulators =
msm8064_pm8921_regulator_pdata_len;
- if (machine_is_apq8064_rumi3()) {
- apq8064_pm8921_irq_pdata.devirq = 0;
- apq8064_pm8821_irq_pdata.devirq = 0;
- } else if (machine_is_apq8064_mtp()) {
+ if (machine_is_apq8064_mtp()) {
apq8064_pm8921_bms_pdata.battery_type = BATT_PALLADIUM;
} else if (machine_is_apq8064_liquid()) {
apq8064_pm8921_bms_pdata.battery_type = BATT_DESAY;
diff --git a/arch/arm/mach-msm/board-8064-regulator.c b/arch/arm/mach-msm/board-8064-regulator.c
index f727852..a84cb39 100644
--- a/arch/arm/mach-msm/board-8064-regulator.c
+++ b/arch/arm/mach-msm/board-8064-regulator.c
@@ -179,11 +179,11 @@
};
VREG_CONSUMERS(S5) = {
REGULATOR_SUPPLY("8921_s5", NULL),
- REGULATOR_SUPPLY("krait0", NULL),
+ REGULATOR_SUPPLY("krait0", "acpuclk-8064"),
};
VREG_CONSUMERS(S6) = {
REGULATOR_SUPPLY("8921_s6", NULL),
- REGULATOR_SUPPLY("krait1", NULL),
+ REGULATOR_SUPPLY("krait1", "acpuclk-8064"),
};
VREG_CONSUMERS(S7) = {
REGULATOR_SUPPLY("8921_s7", NULL),
@@ -221,7 +221,6 @@
REGULATOR_SUPPLY("8921_lvs7", NULL),
REGULATOR_SUPPLY("pll_vdd", "pil_riva"),
REGULATOR_SUPPLY("lvds_vdda", "lvds.0"),
- REGULATOR_SUPPLY("hdmi_pll_fs", "mdp.0"),
REGULATOR_SUPPLY("dsi1_vddio", "mipi_dsi.1"),
REGULATOR_SUPPLY("hdmi_vdda", "hdmi_msm.0"),
};
@@ -238,11 +237,11 @@
};
VREG_CONSUMERS(8821_S0) = {
REGULATOR_SUPPLY("8821_s0", NULL),
- REGULATOR_SUPPLY("krait2", NULL),
+ REGULATOR_SUPPLY("krait2", "acpuclk-8064"),
};
VREG_CONSUMERS(8821_S1) = {
REGULATOR_SUPPLY("8821_s1", NULL),
- REGULATOR_SUPPLY("krait3", NULL),
+ REGULATOR_SUPPLY("krait3", "acpuclk-8064"),
};
VREG_CONSUMERS(EXT_5V) = {
REGULATOR_SUPPLY("ext_5v", NULL),
@@ -607,10 +606,37 @@
int msm8064_pm8921_regulator_pdata_len __devinitdata =
ARRAY_SIZE(msm8064_pm8921_regulator_pdata);
+#define RPM_REG_MAP(_id, _sleep_also, _voter, _supply, _dev_name) \
+ { \
+ .vreg_id = RPM_VREG_ID_PM8921_##_id, \
+ .sleep_also = _sleep_also, \
+ .voter = _voter, \
+ .supply = _supply, \
+ .dev_name = _dev_name, \
+ }
+static struct rpm_regulator_consumer_mapping
+ msm_rpm_regulator_consumer_mapping[] __devinitdata = {
+ RPM_REG_MAP(LVS7, 0, 1, "krait0_hfpll", "acpuclk-8064"),
+ RPM_REG_MAP(LVS7, 0, 2, "krait1_hfpll", "acpuclk-8064"),
+ RPM_REG_MAP(LVS7, 0, 4, "krait2_hfpll", "acpuclk-8064"),
+ RPM_REG_MAP(LVS7, 0, 5, "krait3_hfpll", "acpuclk-8064"),
+ RPM_REG_MAP(LVS7, 0, 6, "l2_hfpll", "acpuclk-8064"),
+ RPM_REG_MAP(L24, 0, 1, "krait0_mem", "acpuclk-8064"),
+ RPM_REG_MAP(L24, 0, 2, "krait1_mem", "acpuclk-8064"),
+ RPM_REG_MAP(L24, 0, 4, "krait2_mem", "acpuclk-8064"),
+ RPM_REG_MAP(L24, 0, 5, "krait3_mem", "acpuclk-8064"),
+ RPM_REG_MAP(S3, 0, 1, "krait0_dig", "acpuclk-8064"),
+ RPM_REG_MAP(S3, 0, 2, "krait1_dig", "acpuclk-8064"),
+ RPM_REG_MAP(S3, 0, 4, "krait2_dig", "acpuclk-8064"),
+ RPM_REG_MAP(S3, 0, 5, "krait3_dig", "acpuclk-8064"),
+};
+
struct rpm_regulator_platform_data apq8064_rpm_regulator_pdata __devinitdata = {
.init_data = apq8064_rpm_regulator_init_data,
.num_regulators = ARRAY_SIZE(apq8064_rpm_regulator_init_data),
.version = RPM_VREG_VERSION_8960,
.vreg_id_vdd_mem = RPM_VREG_ID_PM8921_L24,
.vreg_id_vdd_dig = RPM_VREG_ID_PM8921_S3,
+ .consumer_map = msm_rpm_regulator_consumer_mapping,
+ .consumer_map_len = ARRAY_SIZE(msm_rpm_regulator_consumer_mapping),
};
diff --git a/arch/arm/mach-msm/board-8064-storage.c b/arch/arm/mach-msm/board-8064-storage.c
index 13d8b3b..c81a637 100644
--- a/arch/arm/mach-msm/board-8064-storage.c
+++ b/arch/arm/mach-msm/board-8064-storage.c
@@ -331,21 +331,6 @@
void __init apq8064_init_mmc(void)
{
- if ((machine_is_apq8064_rumi3()) || machine_is_apq8064_sim()) {
- if (apq8064_sdc1_pdata) {
- if (machine_is_apq8064_sim())
- apq8064_sdc1_pdata->disable_bam = true;
- apq8064_sdc1_pdata->disable_runtime_pm = true;
- apq8064_sdc1_pdata->disable_cmd23 = true;
- }
- if (apq8064_sdc3_pdata) {
- if (machine_is_apq8064_sim())
- apq8064_sdc3_pdata->disable_bam = true;
- apq8064_sdc3_pdata->disable_runtime_pm = true;
- apq8064_sdc3_pdata->disable_cmd23 = true;
- }
- }
-
if (apq8064_sdc1_pdata)
apq8064_add_sdcc(1, apq8064_sdc1_pdata);
diff --git a/arch/arm/mach-msm/board-8064.c b/arch/arm/mach-msm/board-8064.c
index 08c3408..90563ad 100644
--- a/arch/arm/mach-msm/board-8064.c
+++ b/arch/arm/mach-msm/board-8064.c
@@ -128,7 +128,8 @@
#define PCIE_AXI_BAR_PHYS 0x08000000
#define PCIE_AXI_BAR_SIZE SZ_128M
-/* PCIe power enable pmic gpio */
+/* PCIe pmic gpios */
+#define PCIE_WAKE_N_PMIC_GPIO 12
#define PCIE_PWR_EN_PMIC_GPIO 13
#define PCIE_RST_N_PMIC_MPP 1
@@ -657,6 +658,9 @@
static char prim_panel_name[PANEL_NAME_MAX_LEN];
static char ext_panel_name[PANEL_NAME_MAX_LEN];
+
+static int ext_resolution;
+
static int __init prim_display_setup(char *param)
{
if (strnlen(param, PANEL_NAME_MAX_LEN))
@@ -673,9 +677,18 @@
}
early_param("ext_display", ext_display_setup);
+static int __init hdmi_resulution_setup(char *param)
+{
+ int ret;
+ ret = kstrtoint(param, 10, &ext_resolution);
+ return ret;
+}
+early_param("ext_resolution", hdmi_resulution_setup);
+
static void __init apq8064_reserve(void)
{
- apq8064_set_display_params(prim_panel_name, ext_panel_name);
+ apq8064_set_display_params(prim_panel_name, ext_panel_name,
+ ext_resolution);
msm_reserve();
if (apq8064_fmem_pdata.size) {
#if defined(CONFIG_ION_MSM) && defined(CONFIG_MSM_MULTIMEDIA_USE_ION)
@@ -1739,10 +1752,10 @@
static struct msm_thermal_data msm_thermal_pdata = {
.sensor_id = 7,
- .poll_ms = 1000,
- .limit_temp = 60,
- .temp_hysteresis = 10,
- .limit_freq = 918000,
+ .poll_ms = 250,
+ .limit_temp_degC = 60,
+ .temp_hysteresis_degC = 10,
+ .freq_step = 2,
};
#define MSM_SHARED_RAM_PHYS 0x80000000
@@ -2037,12 +2050,6 @@
},
};
-static struct msm_pm_sleep_status_data msm_pm_slp_sts_data = {
- .base_addr = MSM_ACC0_BASE + 0x08,
- .cpu_offset = MSM_ACC1_BASE - MSM_ACC0_BASE,
- .mask = 1UL << 13,
-};
-
static void __init apq8064_init_buses(void)
{
msm_bus_rpm_set_mt_mask();
@@ -2069,6 +2076,7 @@
.gpio = msm_pcie_gpio_info,
.axi_addr = PCIE_AXI_BAR_PHYS,
.axi_size = PCIE_AXI_BAR_SIZE,
+ .wake_n = PM8921_GPIO_IRQ(PM8921_IRQ_BASE, PCIE_WAKE_N_PMIC_GPIO),
};
static int __init mpq8064_pcie_enabled(void)
@@ -2148,7 +2156,7 @@
};
static struct platform_device *common_devices[] __initdata = {
- &msm8960_device_acpuclk,
+ &apq8064_device_acpuclk,
&apq8064_device_dmov,
&apq8064_device_qup_spi_gsbi5,
&apq8064_device_ext_5v_vreg,
@@ -2184,6 +2192,9 @@
&qseecom_device,
#endif
+ &msm_8064_device_tsif[0],
+ &msm_8064_device_tsif[1],
+
#if defined(CONFIG_CRYPTO_DEV_QCRYPTO) || \
defined(CONFIG_CRYPTO_DEV_QCRYPTO_MODULE)
&qcrypto_device,
@@ -2247,7 +2258,6 @@
&msm_gss,
&apq8064_rtb_device,
&apq8064_cpu_idle_device,
- &apq8064_msm_gov_device,
&apq8064_device_cache_erp,
&msm8960_device_ebi1_ch0_erp,
&msm8960_device_ebi1_ch1_erp,
@@ -2267,19 +2277,6 @@
&apq8064_cache_dump_device,
};
-static struct platform_device *sim_devices[] __initdata = {
- &apq8064_device_uart_gsbi3,
- &msm_device_sps_apq8064,
-};
-
-static struct platform_device *rumi3_devices[] __initdata = {
- &apq8064_device_uart_gsbi1,
- &msm_device_sps_apq8064,
-#ifdef CONFIG_MSM_ROTATOR
- &msm_rotator_device,
-#endif
-};
-
static struct platform_device *cdp_devices[] __initdata = {
&apq8064_device_uart_gsbi1,
&apq8064_device_uart_gsbi7,
@@ -2841,10 +2838,6 @@
mach_mask = I2C_FFA;
else if (machine_is_apq8064_liquid())
mach_mask = I2C_LIQUID;
- else if (machine_is_apq8064_rumi3())
- mach_mask = I2C_RUMI;
- else if (machine_is_apq8064_sim())
- mach_mask = I2C_SIM;
else if (PLATFORM_IS_MPQ8064())
mach_mask = I2C_MPQ_CDP;
else
@@ -2953,7 +2946,6 @@
msm_spm_init(msm_spm_data, ARRAY_SIZE(msm_spm_data));
msm_spm_l2_init(msm_spm_l2_data);
BUG_ON(msm_pm_boot_init(&msm_pm_boot_pdata));
- msm_pm_init_sleep_status_data(&msm_pm_slp_sts_data);
apq8064_epm_adc_init();
}
@@ -2962,25 +2954,6 @@
apq8064_allocate_fb_region();
}
-static void __init apq8064_sim_init(void)
-{
- struct msm_watchdog_pdata *wdog_pdata = (struct msm_watchdog_pdata *)
- &msm8064_device_watchdog.dev.platform_data;
-
- wdog_pdata->bark_time = 15000;
- apq8064_common_init();
- platform_add_devices(sim_devices, ARRAY_SIZE(sim_devices));
-}
-
-static void __init apq8064_rumi3_init(void)
-{
- apq8064_common_init();
- ethernet_init();
- msm_rotator_set_split_iommu_domain();
- platform_add_devices(rumi3_devices, ARRAY_SIZE(rumi3_devices));
- spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info));
-}
-
static void __init apq8064_cdp_init(void)
{
if (meminfo_init(SYS_MEMORY, SZ_256M) < 0)
@@ -3020,27 +2993,6 @@
}
}
-MACHINE_START(APQ8064_SIM, "QCT APQ8064 SIMULATOR")
- .map_io = apq8064_map_io,
- .reserve = apq8064_reserve,
- .init_irq = apq8064_init_irq,
- .handle_irq = gic_handle_irq,
- .timer = &msm_timer,
- .init_machine = apq8064_sim_init,
- .restart = msm_restart,
-MACHINE_END
-
-MACHINE_START(APQ8064_RUMI3, "QCT APQ8064 RUMI3")
- .map_io = apq8064_map_io,
- .reserve = apq8064_reserve,
- .init_irq = apq8064_init_irq,
- .handle_irq = gic_handle_irq,
- .timer = &msm_timer,
- .init_machine = apq8064_rumi3_init,
- .init_early = apq8064_allocate_memory_regions,
- .restart = msm_restart,
-MACHINE_END
-
MACHINE_START(APQ8064_CDP, "QCT APQ8064 CDP")
.map_io = apq8064_map_io,
.reserve = apq8064_reserve,
diff --git a/arch/arm/mach-msm/board-8064.h b/arch/arm/mach-msm/board-8064.h
index a241ab3..2258b8d 100644
--- a/arch/arm/mach-msm/board-8064.h
+++ b/arch/arm/mach-msm/board-8064.h
@@ -87,7 +87,8 @@
void apq8064_init_fb(void);
void apq8064_allocate_fb_region(void);
void apq8064_mdp_writeback(struct memtype_reserve *reserve_table);
-void __init apq8064_set_display_params(char *prim_panel, char *ext_panel);
+void __init apq8064_set_display_params(char *prim_panel, char *ext_panel,
+ unsigned char resolution);
void apq8064_init_gpu(void);
void apq8064_pm8xxx_gpio_mpp_init(void);
diff --git a/arch/arm/mach-msm/board-8930-camera.c b/arch/arm/mach-msm/board-8930-camera.c
index 6ee315c..d3e37cd 100644
--- a/arch/arm/mach-msm/board-8930-camera.c
+++ b/arch/arm/mach-msm/board-8930-camera.c
@@ -251,7 +251,7 @@
{
.src = MSM_BUS_MASTER_VFE,
.dst = MSM_BUS_SLAVE_EBI_CH0,
- .ab = 140451840,
+ .ab = 274406400,
.ib = 561807360,
},
{
@@ -310,6 +310,27 @@
},
};
+static struct msm_bus_vectors cam_video_ls_vectors[] = {
+ {
+ .src = MSM_BUS_MASTER_VFE,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 348192000,
+ .ib = 617103360,
+ },
+ {
+ .src = MSM_BUS_MASTER_VPE,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 206807040,
+ .ib = 488816640,
+ },
+ {
+ .src = MSM_BUS_MASTER_JPEG_ENC,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 540000000,
+ .ib = 1350000000,
+ },
+};
+
static struct msm_bus_paths cam_bus_client_config[] = {
{
ARRAY_SIZE(cam_init_vectors),
@@ -331,6 +352,10 @@
ARRAY_SIZE(cam_zsl_vectors),
cam_zsl_vectors,
},
+ {
+ ARRAY_SIZE(cam_video_ls_vectors),
+ cam_video_ls_vectors,
+ },
};
static struct msm_bus_scale_pdata cam_bus_client_pdata = {
diff --git a/arch/arm/mach-msm/board-8930-display.c b/arch/arm/mach-msm/board-8930-display.c
index 739dc85..d975997 100644
--- a/arch/arm/mach-msm/board-8930-display.c
+++ b/arch/arm/mach-msm/board-8930-display.c
@@ -131,6 +131,7 @@
* appropriate function.
*/
#define DISP_RST_GPIO 58
+#define DISP_3D_2D_MODE 1
static int mipi_dsi_cdp_panel_power(int on)
{
static struct regulator *reg_l8, *reg_l23, *reg_l2;
@@ -183,6 +184,19 @@
gpio_free(DISP_RST_GPIO);
return -ENODEV;
}
+ rc = gpio_request(DISP_3D_2D_MODE, "disp_3d_2d");
+ if (rc) {
+ pr_err("request gpio DISP_3D_2D_MODE failed, rc=%d\n",
+ rc);
+ gpio_free(DISP_3D_2D_MODE);
+ return -ENODEV;
+ }
+ rc = gpio_direction_output(DISP_3D_2D_MODE, 0);
+ if (rc) {
+ pr_err("gpio_direction_output failed for %d gpio rc=%d\n",
+ DISP_3D_2D_MODE, rc);
+ return -ENODEV;
+ }
dsi_power_on = true;
}
if (on) {
@@ -222,6 +236,8 @@
gpio_set_value(DISP_RST_GPIO, 0);
usleep(20);
gpio_set_value(DISP_RST_GPIO, 1);
+ gpio_set_value(DISP_3D_2D_MODE, 1);
+ usleep(20);
} else {
gpio_set_value(DISP_RST_GPIO, 0);
@@ -256,6 +272,8 @@
pr_err("set_optimum_mode l2 failed, rc=%d\n", rc);
return -EINVAL;
}
+ gpio_set_value(DISP_3D_2D_MODE, 0);
+ usleep(20);
}
return 0;
}
@@ -464,16 +482,16 @@
static struct mipi_dsi_phy_ctrl dsi_novatek_cmd_mode_phy_db = {
/* DSI_BIT_CLK at 500MHz, 2 lane, RGB888 */
- {0x0F, 0x0a, 0x04, 0x00, 0x20}, /* regulator */
+ {0x09, 0x08, 0x05, 0x00, 0x20}, /* regulator */
/* timing */
{0xab, 0x8a, 0x18, 0x00, 0x92, 0x97, 0x1b, 0x8c,
0x0c, 0x03, 0x04, 0xa0},
{0x5f, 0x00, 0x00, 0x10}, /* phy ctrl */
{0xff, 0x00, 0x06, 0x00}, /* strength */
/* pll control */
- {0x40, 0xf9, 0x30, 0xda, 0x00, 0x40, 0x03, 0x62,
+ {0x0, 0xe, 0x30, 0xda, 0x00, 0x10, 0x0f, 0x61,
0x40, 0x07, 0x03,
- 0x00, 0x1a, 0x00, 0x00, 0x02, 0x00, 0x20, 0x00, 0x01},
+ 0x00, 0x1a, 0x00, 0x00, 0x02, 0x00, 0x20, 0x00, 0x02},
};
static struct mipi_dsi_panel_platform_data novatek_pdata = {
diff --git a/arch/arm/mach-msm/board-8930-gpu.c b/arch/arm/mach-msm/board-8930-gpu.c
index 105a5aa..99a5a34 100644
--- a/arch/arm/mach-msm/board-8930-gpu.c
+++ b/arch/arm/mach-msm/board-8930-gpu.c
@@ -140,6 +140,7 @@
.set_grp_async = NULL,
.idle_timeout = HZ/12,
.nap_allowed = true,
+ .strtstp_sleepwake = true,
.clk_map = KGSL_CLK_CORE | KGSL_CLK_IFACE | KGSL_CLK_MEM_IFACE,
#ifdef CONFIG_MSM_BUS_SCALING
.bus_scale_table = &grp3d_bus_scale_pdata,
@@ -162,8 +163,8 @@
{
unsigned int version = socinfo_get_version();
- if (cpu_is_msm8627())
- kgsl_3d0_pdata.pwrlevel[0].gpu_freq = 400000000;
+ if (cpu_is_msm8930aa())
+ kgsl_3d0_pdata.pwrlevel[0].gpu_freq = 450000000;
if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
(SOCINFO_VERSION_MINOR(version) == 2))
diff --git a/arch/arm/mach-msm/board-8930-regulator.c b/arch/arm/mach-msm/board-8930-regulator.c
index f06a1b7..d3a4960 100644
--- a/arch/arm/mach-msm/board-8930-regulator.c
+++ b/arch/arm/mach-msm/board-8930-regulator.c
@@ -172,11 +172,15 @@
};
VREG_CONSUMERS(S5) = {
REGULATOR_SUPPLY("8038_s5", NULL),
- REGULATOR_SUPPLY("krait0", NULL),
+ REGULATOR_SUPPLY("krait0", "acpuclk-8627"),
+ REGULATOR_SUPPLY("krait0", "acpuclk-8930"),
+ REGULATOR_SUPPLY("krait0", "acpuclk-8930aa"),
};
VREG_CONSUMERS(S6) = {
REGULATOR_SUPPLY("8038_s6", NULL),
- REGULATOR_SUPPLY("krait1", NULL),
+ REGULATOR_SUPPLY("krait1", "acpuclk-8627"),
+ REGULATOR_SUPPLY("krait1", "acpuclk-8930"),
+ REGULATOR_SUPPLY("krait1", "acpuclk-8930aa"),
};
VREG_CONSUMERS(LVS1) = {
REGULATOR_SUPPLY("8038_lvs1", NULL),
@@ -511,10 +515,47 @@
int msm8930_pm8038_regulator_pdata_len __devinitdata =
ARRAY_SIZE(msm8930_pm8038_regulator_pdata);
+#define RPM_REG_MAP(_id, _sleep_also, _voter, _supply, _dev_name) \
+ { \
+ .vreg_id = RPM_VREG_ID_PM8038_##_id, \
+ .sleep_also = _sleep_also, \
+ .voter = _voter, \
+ .supply = _supply, \
+ .dev_name = _dev_name, \
+ }
+static struct rpm_regulator_consumer_mapping
+ msm_rpm_regulator_consumer_mapping[] __devinitdata = {
+ RPM_REG_MAP(L23, 0, 1, "krait0_hfpll", "acpuclk-8930"),
+ RPM_REG_MAP(L23, 0, 2, "krait1_hfpll", "acpuclk-8930"),
+ RPM_REG_MAP(L23, 0, 6, "l2_hfpll", "acpuclk-8930"),
+ RPM_REG_MAP(L24, 0, 1, "krait0_mem", "acpuclk-8930"),
+ RPM_REG_MAP(L24, 0, 2, "krait1_mem", "acpuclk-8930"),
+ RPM_REG_MAP(VDD_DIG_CORNER, 0, 1, "krait0_dig", "acpuclk-8930"),
+ RPM_REG_MAP(VDD_DIG_CORNER, 0, 2, "krait1_dig", "acpuclk-8930"),
+
+ RPM_REG_MAP(L23, 0, 1, "krait0_hfpll", "acpuclk-8627"),
+ RPM_REG_MAP(L23, 0, 2, "krait1_hfpll", "acpuclk-8627"),
+ RPM_REG_MAP(L23, 0, 6, "l2_hfpll", "acpuclk-8627"),
+ RPM_REG_MAP(L24, 0, 1, "krait0_mem", "acpuclk-8627"),
+ RPM_REG_MAP(L24, 0, 2, "krait1_mem", "acpuclk-8627"),
+ RPM_REG_MAP(VDD_DIG_CORNER, 0, 1, "krait0_dig", "acpuclk-8627"),
+ RPM_REG_MAP(VDD_DIG_CORNER, 0, 2, "krait1_dig", "acpuclk-8627"),
+
+ RPM_REG_MAP(L23, 0, 1, "krait0_hfpll", "acpuclk-8930aa"),
+ RPM_REG_MAP(L23, 0, 2, "krait1_hfpll", "acpuclk-8930aa"),
+ RPM_REG_MAP(L23, 0, 6, "l2_hfpll", "acpuclk-8930aa"),
+ RPM_REG_MAP(L24, 0, 1, "krait0_mem", "acpuclk-8930aa"),
+ RPM_REG_MAP(L24, 0, 2, "krait1_mem", "acpuclk-8930aa"),
+ RPM_REG_MAP(VDD_DIG_CORNER, 0, 1, "krait0_dig", "acpuclk-8930aa"),
+ RPM_REG_MAP(VDD_DIG_CORNER, 0, 2, "krait1_dig", "acpuclk-8930aa"),
+};
+
struct rpm_regulator_platform_data msm8930_rpm_regulator_pdata __devinitdata = {
.init_data = msm8930_rpm_regulator_init_data,
.num_regulators = ARRAY_SIZE(msm8930_rpm_regulator_init_data),
.version = RPM_VREG_VERSION_8930,
.vreg_id_vdd_mem = RPM_VREG_ID_PM8038_L24,
.vreg_id_vdd_dig = RPM_VREG_ID_PM8038_VDD_DIG_CORNER,
+ .consumer_map = msm_rpm_regulator_consumer_mapping,
+ .consumer_map_len = ARRAY_SIZE(msm_rpm_regulator_consumer_mapping),
};
diff --git a/arch/arm/mach-msm/board-8930.c b/arch/arm/mach-msm/board-8930.c
index ec218ac..fb20307 100644
--- a/arch/arm/mach-msm/board-8930.c
+++ b/arch/arm/mach-msm/board-8930.c
@@ -47,6 +47,10 @@
#include <linux/mfd/wcd9xxx/core.h>
#include <linux/mfd/wcd9xxx/pdata.h>
+#ifdef CONFIG_STM_LIS3DH
+#include <linux/input/lis3dh.h>
+#endif
+
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/setup.h>
@@ -1460,6 +1464,9 @@
#ifdef CONFIG_MSM_BUS_SCALING
.bus_scale_table = &usb_bus_scale_pdata,
#endif
+#ifdef CONFIG_FB_MSM_HDMI_MHL_8334
+ .mhl_dev_name = "sii8334",
+#endif
};
#endif
@@ -2092,10 +2099,10 @@
static struct msm_thermal_data msm_thermal_pdata = {
.sensor_id = 9,
- .poll_ms = 1000,
- .limit_temp = 60,
- .temp_hysteresis = 10,
- .limit_freq = 918000,
+ .poll_ms = 250,
+ .limit_temp_degC = 60,
+ .temp_hysteresis_degC = 10,
+ .freq_step = 2,
};
#ifdef CONFIG_MSM_FAKE_BATTERY
@@ -2159,7 +2166,6 @@
};
static struct platform_device *common_devices[] __initdata = {
- &msm8960_device_acpuclk,
&msm8960_device_dmov,
&msm_device_smd,
&msm8960_device_uart_gsbi5,
@@ -2224,6 +2230,7 @@
#endif
&msm8930_rpm_device,
&msm8930_rpm_log_device,
+ &msm8930_rpm_rbcpr_device,
&msm8930_rpm_stat_device,
#ifdef CONFIG_ION_MSM
&msm8930_ion_dev,
@@ -2399,12 +2406,6 @@
.mode = MSM_PM_BOOT_CONFIG_TZ,
};
-static struct msm_pm_sleep_status_data msm_pm_slp_sts_data = {
- .base_addr = MSM_ACC0_BASE + 0x08,
- .cpu_offset = MSM_ACC1_BASE - MSM_ACC0_BASE,
- .mask = 1UL << 13,
-};
-
#ifdef CONFIG_I2C
#define I2C_SURF 1
#define I2C_FFA (1 << 1)
@@ -2455,6 +2456,31 @@
};
#endif /* CONFIG_ISL9519_CHARGER */
+#ifdef CONFIG_STM_LIS3DH
+static struct lis3dh_acc_platform_data lis3dh_accel = {
+ .poll_interval = 200,
+ .min_interval = 10,
+ .g_range = LIS3DH_ACC_G_2G,
+ .axis_map_x = 1,
+ .axis_map_y = 0,
+ .axis_map_z = 2,
+ .negate_x = 0,
+ .negate_y = 0,
+ .negate_z = 1,
+ .init = NULL,
+ .exit = NULL,
+ .gpio_int1 = -EINVAL,
+ .gpio_int2 = -EINVAL,
+};
+
+static struct i2c_board_info __initdata lis3dh_i2c_boardinfo[] = {
+ {
+ I2C_BOARD_INFO(LIS3DH_ACC_DEV_NAME, 0x18),
+ .platform_data = &lis3dh_accel,
+ },
+};
+#endif /* CONFIG_STM_LIS3DH */
+
static struct i2c_registry msm8960_i2c_devices[] __initdata = {
#ifdef CONFIG_ISL9519_CHARGER
{
@@ -2490,6 +2516,14 @@
sii_device_info,
ARRAY_SIZE(sii_device_info),
},
+#ifdef CONFIG_STM_LIS3DH
+ {
+ I2C_FFA | I2C_FLUID,
+ MSM_8930_GSBI12_QUP_I2C_BUS_ID,
+ lis3dh_i2c_boardinfo,
+ ARRAY_SIZE(lis3dh_i2c_boardinfo),
+ },
+#endif
};
#endif /* CONFIG_I2C */
@@ -2573,6 +2607,12 @@
msm_spm_l2_init(msm_spm_l2_data);
msm8930_init_buses();
platform_add_devices(msm8930_footswitch, msm8930_num_footswitch);
+ if (cpu_is_msm8627())
+ platform_device_register(&msm8627_device_acpuclk);
+ else if (cpu_is_msm8930())
+ platform_device_register(&msm8930_device_acpuclk);
+ else if (cpu_is_msm8930aa())
+ platform_device_register(&msm8930aa_device_acpuclk);
platform_add_devices(common_devices, ARRAY_SIZE(common_devices));
msm8930_add_vidc_device();
/*
@@ -2597,7 +2637,6 @@
ARRAY_SIZE(msm_slim_devices));
change_memory_power = &msm8930_change_memory_power;
BUG_ON(msm_pm_boot_init(&msm_pm_boot_pdata));
- msm_pm_init_sleep_status_data(&msm_pm_slp_sts_data);
if (PLATFORM_IS_CHARM25())
platform_add_devices(mdm_devices, ARRAY_SIZE(mdm_devices));
diff --git a/arch/arm/mach-msm/board-8960-camera.c b/arch/arm/mach-msm/board-8960-camera.c
index b6c03a4..a21c4c3 100644
--- a/arch/arm/mach-msm/board-8960-camera.c
+++ b/arch/arm/mach-msm/board-8960-camera.c
@@ -272,7 +272,7 @@
{
.src = MSM_BUS_MASTER_VFE,
.dst = MSM_BUS_SLAVE_EBI_CH0,
- .ab = 154275840,
+ .ab = 274406400,
.ib = 617103360,
},
{
@@ -367,6 +367,40 @@
},
};
+static struct msm_bus_vectors cam_video_ls_vectors[] = {
+ {
+ .src = MSM_BUS_MASTER_VFE,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 348192000,
+ .ib = 617103360,
+ },
+ {
+ .src = MSM_BUS_MASTER_VPE,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 206807040,
+ .ib = 488816640,
+ },
+ {
+ .src = MSM_BUS_MASTER_JPEG_ENC,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 540000000,
+ .ib = 1350000000,
+ },
+ {
+ .src = MSM_BUS_MASTER_JPEG_ENC,
+ .dst = MSM_BUS_SLAVE_MM_IMEM,
+ .ab = 0,
+ .ib = 0,
+ },
+ {
+ .src = MSM_BUS_MASTER_VFE,
+ .dst = MSM_BUS_SLAVE_MM_IMEM,
+ .ab = 0,
+ .ib = 0,
+ },
+};
+
+
static struct msm_bus_paths cam_bus_client_config[] = {
{
ARRAY_SIZE(cam_init_vectors),
@@ -388,6 +422,10 @@
ARRAY_SIZE(cam_zsl_vectors),
cam_zsl_vectors,
},
+ {
+ ARRAY_SIZE(cam_video_ls_vectors),
+ cam_video_ls_vectors,
+ },
};
static struct msm_bus_scale_pdata cam_bus_client_pdata = {
diff --git a/arch/arm/mach-msm/board-8960-display.c b/arch/arm/mach-msm/board-8960-display.c
index 88827ab..ddeba32 100644
--- a/arch/arm/mach-msm/board-8960-display.c
+++ b/arch/arm/mach-msm/board-8960-display.c
@@ -615,16 +615,6 @@
return mdp_pdata.cont_splash_enabled;
}
-static struct platform_device mipi_dsi_renesas_panel_device = {
- .name = "mipi_renesas",
- .id = 0,
-};
-
-static struct platform_device mipi_dsi_simulator_panel_device = {
- .name = "mipi_simulator",
- .id = 0,
-};
-
#define LPM_CHANNEL0 0
static int toshiba_gpio[] = {LPM_CHANNEL0};
@@ -1015,31 +1005,19 @@
platform_device_register(&wfd_device);
#endif
- if (machine_is_msm8960_sim())
- platform_device_register(&mipi_dsi_simulator_panel_device);
-
- if (machine_is_msm8960_rumi3())
- platform_device_register(&mipi_dsi_renesas_panel_device);
-
- if (!machine_is_msm8960_sim() && !machine_is_msm8960_rumi3()) {
- platform_device_register(&mipi_dsi_novatek_panel_device);
- platform_device_register(&mipi_dsi_orise_panel_device);
+ platform_device_register(&mipi_dsi_novatek_panel_device);
+ platform_device_register(&mipi_dsi_orise_panel_device);
#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL
- platform_device_register(&hdmi_msm_device);
+ platform_device_register(&hdmi_msm_device);
#endif
- }
if (machine_is_msm8960_liquid())
platform_device_register(&mipi_dsi2lvds_bridge_device);
else
platform_device_register(&mipi_dsi_toshiba_panel_device);
- if (machine_is_msm8x60_rumi3()) {
- msm_fb_register_device("mdp", NULL);
- mipi_dsi_pdata.target_type = 1;
- } else
- msm_fb_register_device("mdp", &mdp_pdata);
+ msm_fb_register_device("mdp", &mdp_pdata);
msm_fb_register_device("mipi_dsi", &mipi_dsi_pdata);
#ifdef CONFIG_MSM_BUS_SCALING
msm_fb_register_device("dtv", &dtv_pdata);
diff --git a/arch/arm/mach-msm/board-8960-gpiomux.c b/arch/arm/mach-msm/board-8960-gpiomux.c
index fd326f1..67be99a 100644
--- a/arch/arm/mach-msm/board-8960-gpiomux.c
+++ b/arch/arm/mach-msm/board-8960-gpiomux.c
@@ -76,6 +76,18 @@
.pull = GPIOMUX_PULL_NONE,
};
+static struct gpiomux_setting gsbi8_uartdm_active_cfg = {
+ .func = GPIOMUX_FUNC_1,
+ .drv = GPIOMUX_DRV_8MA,
+ .pull = GPIOMUX_PULL_NONE,
+};
+
+static struct gpiomux_setting gsbi8_uartdm_suspended_cfg = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_DOWN,
+};
+
static struct gpiomux_setting gsbi9_active_cfg = {
.func = GPIOMUX_FUNC_2,
.drv = GPIOMUX_DRV_8MA,
@@ -240,6 +252,12 @@
.pull = GPIOMUX_PULL_DOWN,
};
+static struct gpiomux_setting usbsw_cfg = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_8MA,
+ .pull = GPIOMUX_PULL_DOWN,
+};
+
static struct gpiomux_setting mdp_vsync_suspend_cfg = {
.func = GPIOMUX_FUNC_GPIO,
.drv = GPIOMUX_DRV_2MA,
@@ -304,6 +322,37 @@
},
};
#endif
+/* GSBI8 UART GPIOs for Atheros Bluetooth */
+static struct msm_gpiomux_config msm8960_gsbi8_uartdm_configs[] = {
+ {
+ .gpio = 34,
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &gsbi8_uartdm_suspended_cfg,
+ [GPIOMUX_ACTIVE] = &gsbi8_uartdm_active_cfg,
+ }
+ },
+ {
+ .gpio = 35,
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &gsbi8_uartdm_suspended_cfg,
+ [GPIOMUX_ACTIVE] = &gsbi8_uartdm_active_cfg,
+ }
+ },
+ {
+ .gpio = 36,
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &gsbi8_uartdm_suspended_cfg,
+ [GPIOMUX_ACTIVE] = &gsbi8_uartdm_active_cfg,
+ }
+ },
+ {
+ .gpio = 37,
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &gsbi8_uartdm_suspended_cfg,
+ [GPIOMUX_ACTIVE] = &gsbi8_uartdm_active_cfg,
+ }
+ },
+};
static struct msm_gpiomux_config msm8960_fusion_gsbi_configs[] = {
{
@@ -710,6 +759,16 @@
},
};
+static struct msm_gpiomux_config hap_lvl_shft_config_sglte[] __initdata = {
+ {
+ .gpio = 89,
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &hap_lvl_shft_suspended_config,
+ [GPIOMUX_ACTIVE] = &hap_lvl_shft_active_config,
+ },
+ },
+};
+
static struct msm_gpiomux_config sglte_configs[] __initdata = {
/* AP2MDM_STATUS */
{
@@ -760,6 +819,13 @@
[GPIOMUX_SUSPENDED] = &ap2mdm_cfg,
}
},
+ /* USB_SW */
+ {
+ .gpio = 25,
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &usbsw_cfg,
+ }
+ }
};
static struct msm_gpiomux_config msm8960_mdp_vsync_configs[] __initdata = {
@@ -923,8 +989,9 @@
}
#if defined(CONFIG_KS8851) || defined(CONFIG_KS8851_MODULE)
- msm_gpiomux_install(msm8960_ethernet_configs,
- ARRAY_SIZE(msm8960_ethernet_configs));
+ if (socinfo_get_platform_subtype() != PLATFORM_SUBTYPE_SGLTE)
+ msm_gpiomux_install(msm8960_ethernet_configs,
+ ARRAY_SIZE(msm8960_ethernet_configs));
#endif
msm_gpiomux_install(msm8960_gsbi_configs,
@@ -951,9 +1018,15 @@
#endif
if (machine_is_msm8960_mtp() || machine_is_msm8960_fluid() ||
- machine_is_msm8960_liquid() || machine_is_msm8960_cdp())
- msm_gpiomux_install(hap_lvl_shft_config,
- ARRAY_SIZE(hap_lvl_shft_config));
+ machine_is_msm8960_liquid() || machine_is_msm8960_cdp()) {
+ if (socinfo_get_platform_subtype() == PLATFORM_SUBTYPE_SGLTE)
+ msm_gpiomux_install(hap_lvl_shft_config_sglte,
+ ARRAY_SIZE(hap_lvl_shft_config_sglte));
+
+ else
+ msm_gpiomux_install(hap_lvl_shft_config,
+ ARRAY_SIZE(hap_lvl_shft_config));
+ }
#ifdef CONFIG_USB_EHCI_MSM_HSIC
if ((SOCINFO_VERSION_MAJOR(socinfo_get_version()) != 1) &&
@@ -975,6 +1048,10 @@
msm_gpiomux_install(msm8960_mdp_vsync_configs,
ARRAY_SIZE(msm8960_mdp_vsync_configs));
+ if (socinfo_get_platform_subtype() != PLATFORM_SUBTYPE_SGLTE)
+ msm_gpiomux_install(msm8960_gsbi8_uartdm_configs,
+ ARRAY_SIZE(msm8960_gsbi8_uartdm_configs));
+
if (socinfo_get_platform_subtype() == PLATFORM_SUBTYPE_SGLTE)
msm_gpiomux_install(msm8960_gsbi8_uart_configs,
ARRAY_SIZE(msm8960_gsbi8_uart_configs));
diff --git a/arch/arm/mach-msm/board-8960-pmic.c b/arch/arm/mach-msm/board-8960-pmic.c
index 17b0b6f..5950026 100644
--- a/arch/arm/mach-msm/board-8960-pmic.c
+++ b/arch/arm/mach-msm/board-8960-pmic.c
@@ -386,25 +386,6 @@
KEY(0, 3, KEY_CAMERA_FOCUS),
};
-static struct matrix_keymap_data keymap_data_sim = {
- .keymap_size = ARRAY_SIZE(keymap_sim),
- .keymap = keymap_sim,
-};
-
-static struct pm8xxx_keypad_platform_data keypad_data_sim = {
- .input_name = "keypad_8960",
- .input_phys_device = "keypad_8960/input0",
- .num_rows = 12,
- .num_cols = 8,
- .rows_gpio_start = PM8921_GPIO_PM_TO_SYS(9),
- .cols_gpio_start = PM8921_GPIO_PM_TO_SYS(1),
- .debounce_ms = 15,
- .scan_delay_ms = 32,
- .row_hold_ns = 91500,
- .wakeup = 1,
- .keymap_data = &keymap_data_sim,
-};
-
static int pm8921_therm_mitigation[] = {
1100,
700,
@@ -613,10 +594,6 @@
&msm8960_ssbi_pm8921_pdata;
pm8921_platform_data.num_regulators = msm_pm8921_regulator_pdata_len;
- /* Simulator supports a QWERTY keypad */
- if (machine_is_msm8960_sim())
- pm8921_platform_data.keypad_pdata = &keypad_data_sim;
-
if (machine_is_msm8960_liquid()) {
pm8921_platform_data.keypad_pdata = &keypad_data_liquid;
pm8921_platform_data.leds_pdata = &pm8xxx_leds_pdata_liquid;
diff --git a/arch/arm/mach-msm/board-8960-regulator.c b/arch/arm/mach-msm/board-8960-regulator.c
index 2664d6b..6ad44d8 100644
--- a/arch/arm/mach-msm/board-8960-regulator.c
+++ b/arch/arm/mach-msm/board-8960-regulator.c
@@ -116,7 +116,6 @@
REGULATOR_SUPPLY("8921_l23", NULL),
REGULATOR_SUPPLY("dsi_vddio", "mipi_dsi.1"),
REGULATOR_SUPPLY("hdmi_avdd", "hdmi_msm.0"),
- REGULATOR_SUPPLY("hdmi_pll_fs", "mdp.0"),
REGULATOR_SUPPLY("pll_vdd", "pil_riva"),
REGULATOR_SUPPLY("pll_vdd", "pil_qdsp6v4.1"),
REGULATOR_SUPPLY("pll_vdd", "pil_qdsp6v4.2"),
@@ -182,11 +181,11 @@
};
VREG_CONSUMERS(S5) = {
REGULATOR_SUPPLY("8921_s5", NULL),
- REGULATOR_SUPPLY("krait0", NULL),
+ REGULATOR_SUPPLY("krait0", "acpuclk-8960"),
};
VREG_CONSUMERS(S6) = {
REGULATOR_SUPPLY("8921_s6", NULL),
- REGULATOR_SUPPLY("krait1", NULL),
+ REGULATOR_SUPPLY("krait1", "acpuclk-8960"),
};
VREG_CONSUMERS(S7) = {
REGULATOR_SUPPLY("8921_s7", NULL),
@@ -562,10 +561,34 @@
int msm_pm8921_regulator_pdata_len __devinitdata =
ARRAY_SIZE(msm_pm8921_regulator_pdata);
+#define RPM_REG_MAP(_id, _sleep_also, _voter, _supply, _dev_name) \
+ { \
+ .vreg_id = RPM_VREG_ID_PM8921_##_id, \
+ .sleep_also = _sleep_also, \
+ .voter = _voter, \
+ .supply = _supply, \
+ .dev_name = _dev_name, \
+ }
+static struct rpm_regulator_consumer_mapping
+ msm_rpm_regulator_consumer_mapping[] __devinitdata = {
+ RPM_REG_MAP(L23, 0, 1, "krait0_l23", "acpuclk-8960"),
+ RPM_REG_MAP(L23, 0, 2, "krait1_l23", "acpuclk-8960"),
+ RPM_REG_MAP(L23, 0, 6, "l2_l23", "acpuclk-8960"),
+ RPM_REG_MAP(L24, 0, 1, "krait0_mem", "acpuclk-8960"),
+ RPM_REG_MAP(L24, 0, 2, "krait1_mem", "acpuclk-8960"),
+ RPM_REG_MAP(S3, 0, 1, "krait0_dig", "acpuclk-8960"),
+ RPM_REG_MAP(S3, 0, 2, "krait1_dig", "acpuclk-8960"),
+ RPM_REG_MAP(S8, 0, 1, "krait0_s8", "acpuclk-8960"),
+ RPM_REG_MAP(S8, 0, 2, "krait1_s8", "acpuclk-8960"),
+ RPM_REG_MAP(S8, 0, 6, "l2_s8", "acpuclk-8960"),
+};
+
struct rpm_regulator_platform_data msm_rpm_regulator_pdata __devinitdata = {
.init_data = msm_rpm_regulator_init_data,
.num_regulators = ARRAY_SIZE(msm_rpm_regulator_init_data),
.version = RPM_VREG_VERSION_8960,
.vreg_id_vdd_mem = RPM_VREG_ID_PM8921_L24,
.vreg_id_vdd_dig = RPM_VREG_ID_PM8921_S3,
+ .consumer_map = msm_rpm_regulator_consumer_mapping,
+ .consumer_map_len = ARRAY_SIZE(msm_rpm_regulator_consumer_mapping),
};
diff --git a/arch/arm/mach-msm/board-8960.c b/arch/arm/mach-msm/board-8960.c
index bb20d04..dc28b83 100644
--- a/arch/arm/mach-msm/board-8960.c
+++ b/arch/arm/mach-msm/board-8960.c
@@ -109,7 +109,6 @@
#define KS8851_RST_GPIO 89
#define KS8851_IRQ_GPIO 90
-#define HAP_SHIFT_LVL_OE_GPIO 47
#define MHL_GPIO_INT 4
#define MHL_GPIO_RESET 15
@@ -1293,6 +1292,7 @@
.peripheral_platform_device = NULL,
.ramdump_timeout_ms = 600000,
.no_powerdown_after_ramdumps = 1,
+ .image_upgrade_supported = 1,
};
#define MSM_TSIF0_PHYS (0x18200000)
@@ -1406,11 +1406,18 @@
msm_bus_rpm_set_mt_mask();
msm_bus_8960_apps_fabric_pdata.rpm_enabled = 1;
msm_bus_8960_sys_fabric_pdata.rpm_enabled = 1;
- msm_bus_8960_mm_fabric_pdata.rpm_enabled = 1;
msm_bus_apps_fabric.dev.platform_data =
&msm_bus_8960_apps_fabric_pdata;
msm_bus_sys_fabric.dev.platform_data = &msm_bus_8960_sys_fabric_pdata;
- msm_bus_mm_fabric.dev.platform_data = &msm_bus_8960_mm_fabric_pdata;
+ if (cpu_is_msm8960ab()) {
+ msm_bus_8960_sg_mm_fabric_pdata.rpm_enabled = 1;
+ msm_bus_mm_fabric.dev.platform_data =
+ &msm_bus_8960_sg_mm_fabric_pdata;
+ } else {
+ msm_bus_8960_mm_fabric_pdata.rpm_enabled = 1;
+ msm_bus_mm_fabric.dev.platform_data =
+ &msm_bus_8960_mm_fabric_pdata;
+ }
msm_bus_sys_fpb.dev.platform_data = &msm_bus_8960_sys_fpb_pdata;
msm_bus_cpss_fpb.dev.platform_data = &msm_bus_8960_cpss_fpb_pdata;
#endif
@@ -1488,6 +1495,9 @@
#ifdef CONFIG_MSM_BUS_SCALING
.bus_scale_table = &usb_bus_scale_pdata,
#endif
+#ifdef CONFIG_FB_MSM_HDMI_MHL_8334
+ .mhl_dev_name = "sii8334",
+#endif
};
#endif
@@ -1695,6 +1705,8 @@
},
};
+#define HAP_SHIFT_LVL_OE_GPIO 47
+#define HAP_SHIFT_LVL_OE_GPIO_SGLTE 89
#define PM_HAP_EN_GPIO PM8921_GPIO_PM_TO_SYS(33)
#define PM_HAP_LEN_GPIO PM8921_GPIO_PM_TO_SYS(20)
@@ -1703,8 +1715,13 @@
static int isa1200_power(int on)
{
int rc = 0;
+ int hap_oe_gpio = HAP_SHIFT_LVL_OE_GPIO;
- gpio_set_value(HAP_SHIFT_LVL_OE_GPIO, !!on);
+ if (socinfo_get_platform_subtype() == PLATFORM_SUBTYPE_SGLTE)
+ hap_oe_gpio = HAP_SHIFT_LVL_OE_GPIO_SGLTE;
+
+
+ gpio_set_value(hap_oe_gpio, !!on);
rc = on ? msm_xo_mode_vote(xo_handle_d1, MSM_XO_MODE_ON) :
msm_xo_mode_vote(xo_handle_d1, MSM_XO_MODE_OFF);
@@ -1717,13 +1734,14 @@
return 0;
err_xo_vote:
- gpio_set_value(HAP_SHIFT_LVL_OE_GPIO, !on);
+ gpio_set_value(hap_oe_gpio, !on);
return rc;
}
static int isa1200_dev_setup(bool enable)
{
int rc = 0;
+ int hap_oe_gpio = HAP_SHIFT_LVL_OE_GPIO;
struct pm_gpio hap_gpio_config = {
.direction = PM_GPIO_DIR_OUT,
@@ -1736,6 +1754,9 @@
.output_value = 0,
};
+ if (socinfo_get_platform_subtype() == PLATFORM_SUBTYPE_SGLTE)
+ hap_oe_gpio = HAP_SHIFT_LVL_OE_GPIO_SGLTE;
+
if (enable == true) {
rc = pm8xxx_gpio_config(PM_HAP_EN_GPIO, &hap_gpio_config);
if (rc) {
@@ -1751,14 +1772,14 @@
return rc;
}
- rc = gpio_request(HAP_SHIFT_LVL_OE_GPIO, "hap_shft_lvl_oe");
+ rc = gpio_request(hap_oe_gpio, "hap_shft_lvl_oe");
if (rc) {
pr_err("%s: unable to request gpio %d (%d)\n",
- __func__, HAP_SHIFT_LVL_OE_GPIO, rc);
+ __func__, hap_oe_gpio, rc);
return rc;
}
- rc = gpio_direction_output(HAP_SHIFT_LVL_OE_GPIO, 0);
+ rc = gpio_direction_output(hap_oe_gpio, 0);
if (rc) {
pr_err("%s: Unable to set direction\n", __func__);
goto free_gpio;
@@ -1772,7 +1793,7 @@
goto gpio_set_dir;
}
} else {
- gpio_free(HAP_SHIFT_LVL_OE_GPIO);
+ gpio_free(hap_oe_gpio);
msm_xo_put(xo_handle_d1);
}
@@ -1780,9 +1801,9 @@
return 0;
gpio_set_dir:
- gpio_set_value(HAP_SHIFT_LVL_OE_GPIO, 0);
+ gpio_set_value(hap_oe_gpio, 0);
free_gpio:
- gpio_free(HAP_SHIFT_LVL_OE_GPIO);
+ gpio_free(hap_oe_gpio);
return rc;
}
@@ -2317,18 +2338,12 @@
.src_clk_rate = 24000000,
};
-static struct msm_pm_sleep_status_data msm_pm_slp_sts_data = {
- .base_addr = MSM_ACC0_BASE + 0x08,
- .cpu_offset = MSM_ACC1_BASE - MSM_ACC0_BASE,
- .mask = 1UL << 13,
-};
-
static struct ks8851_pdata spi_eth_pdata = {
.irq_gpio = KS8851_IRQ_GPIO,
.rst_gpio = KS8851_RST_GPIO,
};
-static struct spi_board_info spi_board_info[] __initdata = {
+static struct spi_board_info spi_eth_info[] __initdata = {
{
.modalias = "ks8851",
.irq = MSM_GPIO_TO_INT(KS8851_IRQ_GPIO),
@@ -2338,6 +2353,8 @@
.mode = SPI_MODE_0,
.platform_data = &spi_eth_pdata
},
+};
+static struct spi_board_info spi_board_info[] __initdata = {
{
.modalias = "dsi_novatek_3d_panel_spi",
.max_speed_hz = 10800000,
@@ -2377,10 +2394,10 @@
static struct msm_thermal_data msm_thermal_pdata = {
.sensor_id = 0,
- .poll_ms = 1000,
- .limit_temp = 60,
- .temp_hysteresis = 10,
- .limit_freq = 918000,
+ .poll_ms = 250,
+ .limit_temp_degC = 60,
+ .temp_hysteresis_degC = 10,
+ .freq_step = 2,
};
#ifdef CONFIG_MSM_FAKE_BATTERY
@@ -2458,7 +2475,36 @@
static struct msm_serial_hs_platform_data msm_uart_dm9_pdata = {
.gpio_config = configure_uart_gpios,
};
+
+static int configure_gsbi8_uart_gpios(int on)
+{
+ int ret = 0, i;
+ int uart_gpios[] = {34, 35, 36, 37};
+
+ for (i = 0; i < ARRAY_SIZE(uart_gpios); i++) {
+ if (on) {
+ ret = gpio_request(uart_gpios[i], NULL);
+ if (ret) {
+ pr_err("%s: unable to request uart gpio[%d]\n",
+ __func__, uart_gpios[i]);
+ break;
+ }
+ } else {
+ gpio_free(uart_gpios[i]);
+ }
+ }
+
+ if (ret && on && i)
+ for (; i >= 0; i--)
+ gpio_free(uart_gpios[i]);
+ return ret;
+}
+
+static struct msm_serial_hs_platform_data msm_uart_dm8_pdata = {
+ .gpio_config = configure_gsbi8_uart_gpios,
+};
#else
+static struct msm_serial_hs_platform_data msm_uart_dm8_pdata;
static struct msm_serial_hs_platform_data msm_uart_dm9_pdata;
#endif
@@ -2549,67 +2595,6 @@
&msm_tsens_device,
};
-static struct platform_device *sim_devices[] __initdata = {
- &msm8960_device_uart_gsbi5,
- &msm8960_device_otg,
- &msm8960_device_gadget_peripheral,
- &msm_device_hsusb_host,
- &msm_device_hsic_host,
- &android_usb_device,
- &msm_device_vidc,
- &msm_bus_apps_fabric,
- &msm_bus_sys_fabric,
- &msm_bus_mm_fabric,
- &msm_bus_sys_fpb,
- &msm_bus_cpss_fpb,
- &msm_pcm,
- &msm_multi_ch_pcm,
- &msm_pcm_routing,
- &msm_cpudai0,
- &msm_cpudai1,
- &msm8960_cpudai_slimbus_2_rx,
- &msm8960_cpudai_slimbus_2_tx,
- &msm_cpudai_hdmi_rx,
- &msm_cpudai_bt_rx,
- &msm_cpudai_bt_tx,
- &msm_cpudai_fm_rx,
- &msm_cpudai_fm_tx,
- &msm_cpudai_auxpcm_rx,
- &msm_cpudai_auxpcm_tx,
- &msm_cpu_fe,
- &msm_stub_codec,
- &msm_voice,
- &msm_voip,
- &msm_lpa_pcm,
- &msm_compr_dsp,
- &msm_cpudai_incall_music_rx,
- &msm_cpudai_incall_record_rx,
- &msm_cpudai_incall_record_tx,
-
-#if defined(CONFIG_CRYPTO_DEV_QCRYPTO) || \
- defined(CONFIG_CRYPTO_DEV_QCRYPTO_MODULE)
- &qcrypto_device,
-#endif
-
-#if defined(CONFIG_CRYPTO_DEV_QCEDEV) || \
- defined(CONFIG_CRYPTO_DEV_QCEDEV_MODULE)
- &qcedev_device,
-#endif
-};
-
-static struct platform_device *rumi3_devices[] __initdata = {
- &msm8960_device_uart_gsbi5,
- &msm_kgsl_3d0,
- &msm_kgsl_2d0,
- &msm_kgsl_2d1,
-#ifdef CONFIG_MSM_GEMINI
- &msm8960_gemini_device,
-#endif
-#ifdef CONFIG_MSM_MERCURY
- &msm8960_mercury_device,
-#endif
-};
-
static struct platform_device *cdp_devices[] __initdata = {
&msm_8960_q6_lpass,
&msm_8960_q6_mss_fw,
@@ -2936,7 +2921,7 @@
ARRAY_SIZE(sii_device_info),
},
{
- I2C_LIQUID,
+ I2C_LIQUID | I2C_FFA,
MSM_8960_GSBI10_QUP_I2C_BUS_ID,
msm_isa1200_board_info,
ARRAY_SIZE(msm_isa1200_board_info),
@@ -2967,10 +2952,6 @@
/* Build the matching 'supported_machs' bitmask */
if (machine_is_msm8960_cdp())
mach_mask = I2C_SURF;
- else if (machine_is_msm8960_rumi3())
- mach_mask = I2C_RUMI;
- else if (machine_is_msm8960_sim())
- mach_mask = I2C_SIM;
else if (machine_is_msm8960_fluid())
mach_mask = I2C_FLUID;
else if (machine_is_msm8960_liquid())
@@ -3009,73 +2990,6 @@
#endif
}
-static void __init msm8960_sim_init(void)
-{
- struct msm_watchdog_pdata *wdog_pdata = (struct msm_watchdog_pdata *)
- &msm8960_device_watchdog.dev.platform_data;
-
- wdog_pdata->bark_time = 15000;
- msm_tsens_early_init(&msm_tsens_pdata);
- msm_thermal_init(&msm_thermal_pdata);
- BUG_ON(msm_rpm_init(&msm8960_rpm_data));
- BUG_ON(msm_rpmrs_levels_init(&msm_rpmrs_data));
- regulator_suppress_info_printing();
- platform_device_register(&msm8960_device_rpm_regulator);
- msm_clock_init(&msm8960_clock_init_data);
- msm8960_init_pmic();
-
- msm8960_device_otg.dev.platform_data = &msm_otg_pdata;
- msm8960_init_gpiomux();
- msm8960_i2c_init();
- msm_spm_init(msm_spm_data, ARRAY_SIZE(msm_spm_data));
- msm_spm_l2_init(msm_spm_l2_data);
- msm8960_init_buses();
- platform_add_devices(common_devices, ARRAY_SIZE(common_devices));
- msm8960_pm8921_gpio_mpp_init();
- platform_add_devices(sim_devices, ARRAY_SIZE(sim_devices));
-
- msm8960_device_qup_spi_gsbi1.dev.platform_data =
- &msm8960_qup_spi_gsbi1_pdata;
- spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info));
-
- msm8960_init_mmc();
- msm8960_init_fb();
- slim_register_board_info(msm_slim_devices,
- ARRAY_SIZE(msm_slim_devices));
- BUG_ON(msm_pm_boot_init(&msm_pm_boot_pdata));
- msm_pm_init_sleep_status_data(&msm_pm_slp_sts_data);
-}
-
-static void __init msm8960_rumi3_init(void)
-{
- msm_tsens_early_init(&msm_tsens_pdata);
- msm_thermal_init(&msm_thermal_pdata);
- BUG_ON(msm_rpm_init(&msm8960_rpm_data));
- BUG_ON(msm_rpmrs_levels_init(&msm_rpmrs_data));
- regulator_suppress_info_printing();
- platform_device_register(&msm8960_device_rpm_regulator);
- msm8960_init_gpiomux();
- msm8960_init_pmic();
- msm8960_device_qup_spi_gsbi1.dev.platform_data =
- &msm8960_qup_spi_gsbi1_pdata;
- spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info));
- msm8960_i2c_init();
- msm_spm_init(msm_spm_data, ARRAY_SIZE(msm_spm_data));
- msm_spm_l2_init(msm_spm_l2_data);
- platform_add_devices(common_devices, ARRAY_SIZE(common_devices));
- msm8960_pm8921_gpio_mpp_init();
- platform_add_devices(rumi3_devices, ARRAY_SIZE(rumi3_devices));
- msm8960_init_mmc();
- register_i2c_devices();
-
-
- msm8960_init_fb();
- slim_register_board_info(msm_slim_devices,
- ARRAY_SIZE(msm_slim_devices));
- BUG_ON(msm_pm_boot_init(&msm_pm_boot_pdata));
- msm_pm_init_sleep_status_data(&msm_pm_slp_sts_data);
-}
-
static void __init msm8960_cdp_init(void)
{
if (meminfo_init(SYS_MEMORY, SZ_256M) < 0)
@@ -3111,10 +3025,12 @@
msm8960_device_qup_spi_gsbi1.dev.platform_data =
&msm8960_qup_spi_gsbi1_pdata;
spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info));
+ if (socinfo_get_platform_subtype() != PLATFORM_SUBTYPE_SGLTE)
+ spi_register_board_info(spi_eth_info, ARRAY_SIZE(spi_eth_info));
msm8960_init_pmic();
- if ((SOCINFO_VERSION_MAJOR(socinfo_get_version()) >= 2 &&
- (machine_is_msm8960_mtp())) || machine_is_msm8960_liquid())
+ if (machine_is_msm8960_liquid() || (machine_is_msm8960_mtp() &&
+ (socinfo_get_platform_subtype() == PLATFORM_SUBTYPE_SGLTE)))
msm_isa1200_board_info[0].platform_data = &isa1200_1_pdata;
msm8960_i2c_init();
msm8960_gfx_init();
@@ -3139,6 +3055,11 @@
platform_device_register(&msm_device_uart_dm9);
}
+ /* For 8960 Standalone External Bluetooth Interface */
+ if (socinfo_get_platform_subtype() != PLATFORM_SUBTYPE_SGLTE) {
+ msm_device_uart_dm8.dev.platform_data = &msm_uart_dm8_pdata;
+ platform_device_register(&msm_device_uart_dm8);
+ }
platform_add_devices(common_devices, ARRAY_SIZE(common_devices));
msm8960_pm8921_gpio_mpp_init();
platform_add_devices(cdp_devices, ARRAY_SIZE(cdp_devices));
@@ -3157,37 +3078,12 @@
msm8960_init_dsps();
change_memory_power = &msm8960_change_memory_power;
BUG_ON(msm_pm_boot_init(&msm_pm_boot_pdata));
- msm_pm_init_sleep_status_data(&msm_pm_slp_sts_data);
if (socinfo_get_platform_subtype() == PLATFORM_SUBTYPE_SGLTE) {
mdm_sglte_device.dev.platform_data = &sglte_platform_data;
platform_device_register(&mdm_sglte_device);
}
}
-MACHINE_START(MSM8960_SIM, "QCT MSM8960 SIMULATOR")
- .map_io = msm8960_map_io,
- .reserve = msm8960_reserve,
- .init_irq = msm8960_init_irq,
- .handle_irq = gic_handle_irq,
- .timer = &msm_timer,
- .init_machine = msm8960_sim_init,
- .init_early = msm8960_allocate_memory_regions,
- .init_very_early = msm8960_early_memory,
- .restart = msm_restart,
-MACHINE_END
-
-MACHINE_START(MSM8960_RUMI3, "QCT MSM8960 RUMI3")
- .map_io = msm8960_map_io,
- .reserve = msm8960_reserve,
- .init_irq = msm8960_init_irq,
- .handle_irq = gic_handle_irq,
- .timer = &msm_timer,
- .init_machine = msm8960_rumi3_init,
- .init_early = msm8960_allocate_memory_regions,
- .init_very_early = msm8960_early_memory,
- .restart = msm_restart,
-MACHINE_END
-
MACHINE_START(MSM8960_CDP, "QCT MSM8960 CDP")
.map_io = msm8960_map_io,
.reserve = msm8960_reserve,
diff --git a/arch/arm/mach-msm/board-8974.c b/arch/arm/mach-msm/board-8974.c
index 8cbde5f..b939dc2 100644
--- a/arch/arm/mach-msm/board-8974.c
+++ b/arch/arm/mach-msm/board-8974.c
@@ -44,6 +44,7 @@
#include <mach/qpnp-int.h>
#include <mach/socinfo.h>
#include <mach/msm_bus_board.h>
+#include <mach/mpm.h>
#include "clock.h"
#include "devices.h"
#include "spm.h"
@@ -58,7 +59,7 @@
#endif
#define MSM_ION_MM_FW_SIZE 0xa00000 /* (10MB) */
#define MSM_ION_MM_SIZE 0x7800000 /* (120MB) */
-#define MSM_ION_QSECOM_SIZE 0x100000 /* (1MB) */
+#define MSM_ION_QSECOM_SIZE 0x600000 /* (6MB) */
#define MSM_ION_MFC_SIZE SZ_8K
#define MSM_ION_AUDIO_SIZE 0x2B4000
#define MSM_ION_HEAP_NUM 8
@@ -208,12 +209,12 @@
static struct resource smd_resource[] = {
{
.name = "modem_smd_in",
- .start = 32 + 17, /* mss_sw_to_kpss_ipc_irq0 */
+ .start = 32 + 25, /* mss_sw_to_kpss_ipc_irq0 */
.flags = IORESOURCE_IRQ,
},
{
.name = "modem_smsm_in",
- .start = 32 + 18, /* mss_sw_to_kpss_ipc_irq1 */
+ .start = 32 + 26, /* mss_sw_to_kpss_ipc_irq1 */
.flags = IORESOURCE_IRQ,
},
{
@@ -383,6 +384,7 @@
static void __init msm_8974_early_memory(void)
{
reserve_info = &msm_8974_reserve_info;
+ of_scan_flat_dt(dt_scan_for_memory_reserve, msm_8974_reserve_table);
}
void __init msm_8974_reserve(void)
@@ -542,40 +544,6 @@
msm_8974_stub_regulator_devices_len);
}
-static struct clk_lookup msm_clocks_dummy[] = {
- CLK_DUMMY("xo", XO_CLK, NULL, OFF),
- CLK_DUMMY("xo", XO_CLK, "pil_pronto", OFF),
- CLK_DUMMY("core_clk", BLSP2_UART_CLK, "msm_serial_hsl.0", OFF),
- CLK_DUMMY("iface_clk", BLSP2_UART_CLK, "msm_serial_hsl.0", OFF),
- CLK_DUMMY("core_clk", SDC1_CLK, NULL, OFF),
- CLK_DUMMY("iface_clk", SDC1_P_CLK, NULL, OFF),
- CLK_DUMMY("core_clk", SDC3_CLK, NULL, OFF),
- CLK_DUMMY("iface_clk", SDC3_P_CLK, NULL, OFF),
- CLK_DUMMY("phy_clk", NULL, "msm_otg", OFF),
- CLK_DUMMY("core_clk", NULL, "msm_otg", OFF),
- CLK_DUMMY("iface_clk", NULL, "msm_otg", OFF),
- CLK_DUMMY("xo", NULL, "msm_otg", OFF),
- CLK_DUMMY("dfab_clk", DFAB_CLK, NULL, 0),
- CLK_DUMMY("dma_bam_pclk", DMA_BAM_P_CLK, NULL, 0),
- CLK_DUMMY("mem_clk", NULL, NULL, 0),
- CLK_DUMMY("core_clk", SPI_CLK, "spi_qsd.1", OFF),
- CLK_DUMMY("iface_clk", SPI_P_CLK, "spi_qsd.1", OFF),
- CLK_DUMMY("core_clk", NULL, "f9966000.i2c", 0),
- CLK_DUMMY("iface_clk", NULL, "f9966000.i2c", 0),
- CLK_DUMMY("core_clk", NULL, "fe12f000.slim", OFF),
- CLK_DUMMY("core_clk", "mdp.0", NULL, 0),
- CLK_DUMMY("core_clk_src", "mdp.0", NULL, 0),
- CLK_DUMMY("lut_clk", "mdp.0", NULL, 0),
- CLK_DUMMY("vsync_clk", "mdp.0", NULL, 0),
- CLK_DUMMY("iface_clk", "mdp.0", NULL, 0),
- CLK_DUMMY("bus_clk", "mdp.0", NULL, 0),
-};
-
-struct clock_init_data msm_dummy_clock_init_data __initdata = {
- .table = msm_clocks_dummy,
- .size = ARRAY_SIZE(msm_clocks_dummy),
-};
-
/*
* Used to satisfy dependencies for devices that need to be
* run early or in a particular order. Most likely your device doesn't fall
@@ -592,7 +560,7 @@
msm_spm_device_init();
regulator_stub_init();
if (machine_is_msm8974_rumi())
- msm_clock_init(&msm_dummy_clock_init_data);
+ msm_clock_init(&msm8974_rumi_clock_init_data);
else
msm_clock_init(&msm8974_clock_init_data);
msm8974_init_buses();
@@ -604,10 +572,22 @@
{ .compatible = "qcom,spmi-pmic-arb", .data = qpnpint_of_init, },
{}
};
+static struct of_device_id mpm_match[] __initdata = {
+ {.compatible = "qcom,mpm-v2", },
+ {},
+};
void __init msm_8974_init_irq(void)
{
+ struct device_node *node;
+
of_irq_init(irq_match);
+ node = of_find_matching_node(NULL, mpm_match);
+
+ WARN_ON(!node);
+
+ if (node)
+ of_mpm_init(node);
}
static struct of_dev_auxdata msm_8974_auxdata_lookup[] __initdata = {
@@ -642,6 +622,10 @@
OF_DEV_AUXDATA("qcom,mdss_mdp", 0xFD900000, "mdp.0", NULL),
OF_DEV_AUXDATA("qcom,msm-tsens", 0xFC4A8000, \
"msm-tsens", NULL),
+ OF_DEV_AUXDATA("qcom,qcedev", 0xFD440000, \
+ "qcedev.0", NULL),
+ OF_DEV_AUXDATA("qcom,qcrypto", 0xFD440000, \
+ "qcrypto.0", NULL),
{}
};
diff --git a/arch/arm/mach-msm/board-msm7627a-camera.c b/arch/arm/mach-msm/board-msm7627a-camera.c
index 57684f9..1e198a7 100644
--- a/arch/arm/mach-msm/board-msm7627a-camera.c
+++ b/arch/arm/mach-msm/board-msm7627a-camera.c
@@ -281,8 +281,15 @@
.gpio_no_mux = 1,
};
+static struct msm_camera_sensor_flash_src msm_flash_src_ov8825 = {
+ .flash_sr_type = MSM_CAMERA_FLASH_SRC_LED1,
+ ._fsrc.ext_driver_src.led_en = 13,
+ ._fsrc.ext_driver_src.led_flash_en = 32,
+};
+
static struct msm_camera_sensor_flash_data flash_ov8825 = {
- .flash_type = MSM_CAMERA_FLASH_NONE,
+ .flash_type = MSM_CAMERA_FLASH_LED,
+ .flash_src = &msm_flash_src_ov8825,
};
static struct msm_camera_sensor_platform_info sensor_board_info_ov8825 = {
diff --git a/arch/arm/mach-msm/board-msm7627a-display.c b/arch/arm/mach-msm/board-msm7627a-display.c
index 9259161..e305fe6 100644
--- a/arch/arm/mach-msm/board-msm7627a-display.c
+++ b/arch/arm/mach-msm/board-msm7627a-display.c
@@ -81,6 +81,7 @@
"gpio_disp_reset",
};
+static char lcdc_splash_is_enabled(void);
static int lcdc_truly_gpio_init(void)
{
int i;
@@ -103,7 +104,12 @@
lcdc_truly_gpio_table[i]);
goto truly_gpio_fail;
}
- rc = gpio_direction_output(lcdc_truly_gpio_table[i], 0);
+ if (lcdc_splash_is_enabled())
+ rc = gpio_direction_output(
+ lcdc_truly_gpio_table[i], 1);
+ else
+ rc = gpio_direction_output(
+ lcdc_truly_gpio_table[i], 0);
if (rc < 0) {
pr_err("Error direct lcdc gpio:%d\n",
lcdc_truly_gpio_table[i]);
@@ -247,6 +253,7 @@
static int sku3_lcdc_power_save(int on)
{
int rc = 0;
+ static int cont_splash_done;
if (on) {
sku3_lcdc_lcd_camera_power_onoff(1);
@@ -257,6 +264,11 @@
return rc;
}
+ if (lcdc_splash_is_enabled() && !cont_splash_done) {
+ cont_splash_done = 1;
+ return rc;
+ }
+
if (lcdc_truly_gpio_initialized) {
/*LCD reset*/
gpio_set_value(SKU3_LCDC_GPIO_DISPLAY_RESET, 1);
@@ -778,8 +790,14 @@
static struct msm_panel_common_pdata mdp_pdata = {
.gpio = 97,
.mdp_rev = MDP_REV_303,
+ .cont_splash_enabled = 0x1,
};
+static char lcdc_splash_is_enabled()
+{
+ return mdp_pdata.cont_splash_enabled;
+}
+
#define GPIO_LCDC_BRDG_PD 128
#define GPIO_LCDC_BRDG_RESET_N 129
#define GPIO_LCD_DSI_SEL 125
@@ -1142,11 +1160,11 @@
}
static int qrd3_dsi_gpio_initialized;
+static struct regulator *gpio_reg_2p85v, *gpio_reg_1p8v;
static int mipi_dsi_panel_qrd3_power(int on)
{
int rc = 0;
- static struct regulator *gpio_reg_2p85v, *gpio_reg_1p8v;
if (!qrd3_dsi_gpio_initialized) {
pmapp_disp_backlight_init();
@@ -1155,21 +1173,42 @@
if (rc < 0)
return rc;
- gpio_reg_2p85v = regulator_get(&mipi_dsi_device.dev,
- "lcd_vdd");
- if (IS_ERR(gpio_reg_2p85v)) {
- pr_err("%s:ext_2p85v regulator get failed", __func__);
- return -EINVAL;
- }
-
- gpio_reg_1p8v = regulator_get(&mipi_dsi_device.dev,
- "lcd_vddi");
- if (IS_ERR(gpio_reg_1p8v)) {
- pr_err("%s:ext_1p8v regulator get failed", __func__);
- return -EINVAL;
- }
-
qrd3_dsi_gpio_initialized = 1;
+
+ if (mdp_pdata.cont_splash_enabled) {
+ rc = gpio_tlmm_config(GPIO_CFG(
+ GPIO_QRD3_LCD_BACKLIGHT_EN, 0, GPIO_CFG_OUTPUT,
+ GPIO_CFG_PULL_UP, GPIO_CFG_2MA), GPIO_CFG_ENABLE);
+ if (rc < 0) {
+ pr_err("failed QRD3 GPIO_BACKLIGHT_EN tlmm config\n");
+ return rc;
+ }
+ rc = gpio_direction_output(GPIO_QRD3_LCD_BACKLIGHT_EN,
+ 1);
+ if (rc < 0) {
+ pr_err("failed to enable backlight\n");
+ gpio_free(GPIO_QRD3_LCD_BACKLIGHT_EN);
+ return rc;
+ }
+
+ /*Configure LCD Bridge reset*/
+ rc = gpio_tlmm_config(qrd3_mipi_dsi_gpio[0],
+ GPIO_CFG_ENABLE);
+ if (rc < 0) {
+ pr_err("Failed to enable LCD Bridge reset enable\n");
+ return rc;
+ }
+
+ rc = gpio_direction_output(GPIO_QRD3_LCD_BRDG_RESET_N,
+ 1);
+
+ if (rc < 0) {
+ pr_err("Failed GPIO bridge Reset\n");
+ gpio_free(GPIO_QRD3_LCD_BRDG_RESET_N);
+ return rc;
+ }
+ return 0;
+ }
}
if (on) {
@@ -1188,9 +1227,9 @@
}
/*Toggle Backlight GPIO*/
gpio_set_value_cansleep(GPIO_QRD3_LCD_BACKLIGHT_EN, 1);
- udelay(190);
+ udelay(100);
gpio_set_value_cansleep(GPIO_QRD3_LCD_BACKLIGHT_EN, 0);
- udelay(286);
+ udelay(430);
gpio_set_value_cansleep(GPIO_QRD3_LCD_BACKLIGHT_EN, 1);
/* 1 wire mode starts from this low to high transition */
udelay(50);
@@ -1246,6 +1285,7 @@
return rc;
}
+static char mipi_dsi_splash_is_enabled(void);
static int mipi_dsi_panel_power(int on)
{
int rc = 0;
@@ -1268,9 +1308,15 @@
.dsi_power_save = mipi_dsi_panel_power,
.dsi_client_reset = msm_fb_dsi_client_reset,
.get_lane_config = msm_fb_get_lane_config,
+ .splash_is_enabled = mipi_dsi_splash_is_enabled,
};
#endif
+static char mipi_dsi_splash_is_enabled(void)
+{
+ return mdp_pdata.cont_splash_enabled;
+}
+
static char prim_panel_name[PANEL_NAME_MAX_LEN];
static int __init prim_display_setup(char *param)
{
@@ -1280,6 +1326,8 @@
}
early_param("prim_display", prim_display_setup);
+static int disable_splash;
+
void msm7x27a_set_display_params(char *prim_panel)
{
if (strnlen(prim_panel, PANEL_NAME_MAX_LEN)) {
@@ -1288,10 +1336,22 @@
pr_debug("msm_fb_pdata.prim_panel_name %s\n",
msm_fb_pdata.prim_panel_name);
}
+ if (strnlen(msm_fb_pdata.prim_panel_name, PANEL_NAME_MAX_LEN)) {
+ if (strncmp((char *)msm_fb_pdata.prim_panel_name,
+ "mipi_cmd_nt35510_wvga",
+ strnlen("mipi_cmd_nt35510_wvga",
+ PANEL_NAME_MAX_LEN)) &&
+ strncmp((char *)msm_fb_pdata.prim_panel_name,
+ "mipi_video_nt35510_wvga",
+ strnlen("mipi_video_nt35510_wvga",
+ PANEL_NAME_MAX_LEN)))
+ disable_splash = 1;
+ }
}
void __init msm_fb_add_devices(void)
{
+ int rc = 0;
msm7x27a_set_display_params(prim_panel_name);
if (machine_is_msm7627a_qrd1())
platform_add_devices(qrd_fb_devices,
@@ -1300,15 +1360,22 @@
|| machine_is_msm8625_evt()) {
mipi_NT35510_pdata.bl_lock = 1;
mipi_NT35516_pdata.bl_lock = 1;
+ if (disable_splash)
+ mdp_pdata.cont_splash_enabled = 0x0;
+
+
platform_add_devices(evb_fb_devices,
ARRAY_SIZE(evb_fb_devices));
} else if (machine_is_msm7627a_qrd3() || machine_is_msm8625_qrd7()) {
sku3_lcdc_lcd_camera_power_init();
+ mdp_pdata.cont_splash_enabled = 0x1;
platform_add_devices(qrd3_fb_devices,
ARRAY_SIZE(qrd3_fb_devices));
- } else
+ } else {
+ mdp_pdata.cont_splash_enabled = 0x0;
platform_add_devices(msm_fb_devices,
ARRAY_SIZE(msm_fb_devices));
+ }
msm_fb_register_device("mdp", &mdp_pdata);
if (machine_is_msm7625a_surf() || machine_is_msm7x27a_surf() ||
@@ -1318,4 +1385,26 @@
#ifdef CONFIG_FB_MSM_MIPI_DSI
msm_fb_register_device("mipi_dsi", &mipi_dsi_pdata);
#endif
+ if (machine_is_msm7627a_evb() || machine_is_msm8625_evb()
+ || machine_is_msm8625_evt()) {
+ gpio_reg_2p85v = regulator_get(&mipi_dsi_device.dev,
+ "lcd_vdd");
+ if (IS_ERR(gpio_reg_2p85v))
+ pr_err("%s:ext_2p85v regulator get failed", __func__);
+
+ gpio_reg_1p8v = regulator_get(&mipi_dsi_device.dev,
+ "lcd_vddi");
+ if (IS_ERR(gpio_reg_1p8v))
+ pr_err("%s:ext_1p8v regulator get failed", __func__);
+
+ if (mdp_pdata.cont_splash_enabled) {
+ /*Enable EXT_2.85 and 1.8 regulators*/
+ rc = regulator_enable(gpio_reg_2p85v);
+ if (rc < 0)
+ pr_err("%s: reg enable failed\n", __func__);
+ rc = regulator_enable(gpio_reg_1p8v);
+ if (rc < 0)
+ pr_err("%s: reg enable failed\n", __func__);
+ }
+ }
}
diff --git a/arch/arm/mach-msm/board-msm7627a-io.c b/arch/arm/mach-msm/board-msm7627a-io.c
index 22095cd..47e8381 100644
--- a/arch/arm/mach-msm/board-msm7627a-io.c
+++ b/arch/arm/mach-msm/board-msm7627a-io.c
@@ -22,6 +22,7 @@
#include <linux/delay.h>
#include <linux/atmel_maxtouch.h>
#include <linux/input/ft5x06_ts.h>
+#include <linux/leds-msm-tricolor.h>
#include <asm/gpio.h>
#include <asm/mach-types.h>
#include <mach/rpc_server_handset.h>
@@ -162,32 +163,6 @@
};
#define LED_GPIO_PDM 96
-#define LED_RED_GPIO_8625 49
-#define LED_GREEN_GPIO_8625 34
-
-static struct gpio_led gpio_leds_config_8625[] = {
- {
- .name = "green",
- .gpio = LED_GREEN_GPIO_8625,
- },
- {
- .name = "red",
- .gpio = LED_RED_GPIO_8625,
- },
-};
-
-static struct gpio_led_platform_data gpio_leds_pdata_8625 = {
- .num_leds = ARRAY_SIZE(gpio_leds_config_8625),
- .leds = gpio_leds_config_8625,
-};
-
-static struct platform_device gpio_leds_8625 = {
- .name = "leds-gpio",
- .id = -1,
- .dev = {
- .platform_data = &gpio_leds_pdata_8625,
- },
-};
#define MXT_TS_IRQ_GPIO 48
#define MXT_TS_RESET_GPIO 26
@@ -772,6 +747,30 @@
},
};
+static struct led_info tricolor_led_info[] = {
+ [0] = {
+ .name = "red",
+ .flags = LED_COLOR_RED,
+ },
+ [1] = {
+ .name = "green",
+ .flags = LED_COLOR_GREEN,
+ },
+};
+
+static struct led_platform_data tricolor_led_pdata = {
+ .leds = tricolor_led_info,
+ .num_leds = ARRAY_SIZE(tricolor_led_info),
+};
+
+static struct platform_device tricolor_leds_pdev = {
+ .name = "msm-tricolor-leds",
+ .id = -1,
+ .dev = {
+ .platform_data = &tricolor_led_pdata,
+ },
+};
+
void __init msm7627a_add_io_devices(void)
{
/* touchscreen */
@@ -868,24 +867,9 @@
platform_device_register(&kp_pdev_sku3);
/* leds */
- if (machine_is_msm7627a_evb() || machine_is_msm8625_evb()) {
- rc = gpio_tlmm_config(GPIO_CFG(LED_RED_GPIO_8625, 0,
- GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP,
- GPIO_CFG_16MA), GPIO_CFG_ENABLE);
- if (rc) {
- pr_err("%s: gpio_tlmm_config for %d failed\n",
- __func__, LED_RED_GPIO_8625);
- }
-
- rc = gpio_tlmm_config(GPIO_CFG(LED_GREEN_GPIO_8625, 0,
- GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP,
- GPIO_CFG_16MA), GPIO_CFG_ENABLE);
- if (rc) {
- pr_err("%s: gpio_tlmm_config for %d failed\n",
- __func__, LED_GREEN_GPIO_8625);
- }
-
- platform_device_register(&gpio_leds_8625);
+ if (machine_is_msm7627a_evb() || machine_is_msm8625_evb() ||
+ machine_is_msm8625_evt()) {
platform_device_register(&pmic_mpp_leds_pdev);
+ platform_device_register(&tricolor_leds_pdev);
}
}
diff --git a/arch/arm/mach-msm/board-msm7x27a.c b/arch/arm/mach-msm/board-msm7x27a.c
index 7db4bda..3bd7eeb 100644
--- a/arch/arm/mach-msm/board-msm7x27a.c
+++ b/arch/arm/mach-msm/board-msm7x27a.c
@@ -48,7 +48,6 @@
#include <mach/msm_battery.h>
#include <linux/smsc911x.h>
#include <linux/atmel_maxtouch.h>
-#include <linux/fmem.h>
#include <linux/msm_adc.h>
#include <linux/ion.h>
#include "devices.h"
@@ -448,9 +447,6 @@
.allocator_type = PMEM_ALLOCATORTYPE_BITMAP,
.cached = 1,
.memory_type = MEMTYPE_EBI1,
- .request_region = request_fmem_c_region,
- .release_region = release_fmem_c_region,
- .reusable = 1,
};
static struct platform_device android_pmem_adsp_device = {
@@ -778,14 +774,6 @@
static void msm7x27a_cfg_uart2dm_serial(void) { }
#endif
-static struct fmem_platform_data fmem_pdata;
-
-static struct platform_device fmem_device = {
- .name = "fmem",
- .id = 1,
- .dev = { .platform_data = &fmem_pdata },
-};
-
static struct platform_device *rumi_sim_devices[] __initdata = {
&msm_device_dmov,
&msm_device_smd,
@@ -823,7 +811,6 @@
&android_pmem_device,
&android_pmem_adsp_device,
&android_pmem_audio_device,
- &fmem_device,
&msm_device_nand,
&msm_device_snd,
&msm_device_adspdec,
@@ -896,6 +883,7 @@
*/
static struct ion_platform_data ion_pdata = {
.nr = MSM_ION_HEAP_NUM,
+ .has_outer_cache = 1,
.heaps = {
{
.id = ION_SYSTEM_HEAP_ID,
@@ -909,7 +897,6 @@
.type = ION_HEAP_TYPE_CARVEOUT,
.name = ION_CAMERA_HEAP_NAME,
.memory_type = ION_EBI_TYPE,
- .has_outer_cache = 1,
.extra_data = (void *)&co_ion_pdata,
},
/* PMEM_AUDIO */
@@ -918,7 +905,6 @@
.type = ION_HEAP_TYPE_CARVEOUT,
.name = ION_AUDIO_HEAP_NAME,
.memory_type = ION_EBI_TYPE,
- .has_outer_cache = 1,
.extra_data = (void *)&co_ion_pdata,
},
/* PMEM_MDP = SF */
@@ -927,7 +913,6 @@
.type = ION_HEAP_TYPE_CARVEOUT,
.name = ION_SF_HEAP_NAME,
.memory_type = ION_EBI_TYPE,
- .has_outer_cache = 1,
.extra_data = (void *)&co_ion_pdata,
},
#endif
@@ -966,32 +951,10 @@
{
#ifdef CONFIG_ANDROID_PMEM
#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
- unsigned int i;
- unsigned int reusable_count = 0;
-
android_pmem_adsp_pdata.size = pmem_adsp_size;
android_pmem_pdata.size = pmem_mdp_size;
android_pmem_audio_pdata.size = pmem_audio_size;
- fmem_pdata.size = 0;
- fmem_pdata.align = PAGE_SIZE;
-
- /* Find pmem devices that should use FMEM (reusable) memory.
- */
- for (i = 0; i < ARRAY_SIZE(pmem_pdata_array); ++i) {
- struct android_pmem_platform_data *pdata = pmem_pdata_array[i];
-
- if (!reusable_count && pdata->reusable)
- fmem_pdata.size += pdata->size;
-
- reusable_count += (pdata->reusable) ? 1 : 0;
-
- if (pdata->reusable && reusable_count > 1) {
- pr_err("%s: Too many PMEM devices specified as reusable. PMEM device %s was not configured as reusable.\n",
- __func__, pdata->name);
- pdata->reusable = 0;
- }
- }
#endif
#endif
}
diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c
index 1b21c23..50b1ac5 100644
--- a/arch/arm/mach-msm/board-msm8x60.c
+++ b/arch/arm/mach-msm/board-msm8x60.c
@@ -468,26 +468,6 @@
},
};
-/*
- * The smc91x configuration varies depending on platform.
- * The resources data structure is filled in at runtime.
- */
-static struct resource smc91x_resources[] = {
- [0] = {
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device smc91x_device = {
- .name = "smc91x",
- .id = 0,
- .num_resources = ARRAY_SIZE(smc91x_resources),
- .resource = smc91x_resources,
-};
-
static struct resource smsc911x_resources[] = {
[0] = {
.flags = IORESOURCE_MEM,
@@ -4208,72 +4188,6 @@
.id = -1,
};
-static struct platform_device *rumi_sim_devices[] __initdata = {
- &smc91x_device,
- &msm_device_uart_dm12,
-#ifdef CONFIG_I2C_QUP
- &msm_gsbi3_qup_i2c_device,
- &msm_gsbi4_qup_i2c_device,
- &msm_gsbi7_qup_i2c_device,
- &msm_gsbi8_qup_i2c_device,
- &msm_gsbi9_qup_i2c_device,
- &msm_gsbi12_qup_i2c_device,
-#endif
-#ifdef CONFIG_I2C_SSBI
- &msm_device_ssbi3,
-#endif
-#ifdef CONFIG_ANDROID_PMEM
-#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
- &android_pmem_device,
- &android_pmem_adsp_device,
- &android_pmem_smipool_device,
- &android_pmem_audio_device,
-#endif /*CONFIG_MSM_MULTIMEDIA_USE_ION*/
-#endif /*CONFIG_ANDROID_PMEM*/
-#ifdef CONFIG_MSM_ROTATOR
- &msm_rotator_device,
-#endif
- &msm_fb_device,
- &msm_kgsl_3d0,
- &msm_kgsl_2d0,
- &msm_kgsl_2d1,
- &lcdc_samsung_panel_device,
-#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL
- &hdmi_msm_device,
-#endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL */
-#ifdef CONFIG_MSM_CAMERA
-#ifndef CONFIG_MSM_CAMERA_V4L2
-#ifdef CONFIG_MT9E013
- &msm_camera_sensor_mt9e013,
-#endif
-#ifdef CONFIG_IMX074
- &msm_camera_sensor_imx074,
-#endif
-#ifdef CONFIG_VX6953
- &msm_camera_sensor_vx6953,
-#endif
-#ifdef CONFIG_WEBCAM_OV7692
- &msm_camera_sensor_webcam_ov7692,
-#endif
-#ifdef CONFIG_WEBCAM_OV9726
- &msm_camera_sensor_webcam_ov9726,
-#endif
-#ifdef CONFIG_QS_S5K4E1
- &msm_camera_sensor_qs_s5k4e1,
-#endif
-#endif
-#endif
-#ifdef CONFIG_MSM_GEMINI
- &msm_gemini_device,
-#endif
-#ifdef CONFIG_MSM_VPE
-#ifndef CONFIG_MSM_CAMERA_V4L2
- &msm_vpe_device,
-#endif
-#endif
- &msm_device_vidc,
-};
-
#if defined(CONFIG_GPIO_SX150X) || defined(CONFIG_GPIO_SX150X_MODULE)
enum {
SX150X_CORE,
@@ -7452,10 +7366,6 @@
mach_mask = I2C_SURF;
else if (machine_is_msm8x60_ffa() || machine_is_msm8x60_fusn_ffa())
mach_mask = I2C_FFA;
- else if (machine_is_msm8x60_rumi3())
- mach_mask = I2C_RUMI;
- else if (machine_is_msm8x60_sim())
- mach_mask = I2C_SIM;
else if (machine_is_msm8x60_fluid())
mach_mask = I2C_FLUID;
else if (machine_is_msm8x60_dragon())
@@ -7642,10 +7552,6 @@
machine_is_msm8x60_fluid() ||
machine_is_msm8x60_dragon())
ebi2_cfg |= (1 << 4) | (1 << 5); /* CS2, CS3 */
- else if (machine_is_msm8x60_sim())
- ebi2_cfg |= (1 << 4); /* CS2 */
- else if (machine_is_msm8x60_rumi3())
- ebi2_cfg |= (1 << 5); /* CS3 */
writel_relaxed(ebi2_cfg, ebi2_cfg_ptr);
iounmap(ebi2_cfg_ptr);
@@ -7686,32 +7592,6 @@
}
}
-static void __init msm8x60_configure_smc91x(void)
-{
- if (machine_is_msm8x60_sim()) {
-
- smc91x_resources[0].start = 0x1b800300;
- smc91x_resources[0].end = 0x1b8003ff;
-
- smc91x_resources[1].start = (NR_MSM_IRQS + 40);
- smc91x_resources[1].end = (NR_MSM_IRQS + 40);
-
- } else if (machine_is_msm8x60_rumi3()) {
-
- smc91x_resources[0].start = 0x1d000300;
- smc91x_resources[0].end = 0x1d0003ff;
-
- smc91x_resources[1].start = TLMM_MSM_DIR_CONN_IRQ_0;
- smc91x_resources[1].end = TLMM_MSM_DIR_CONN_IRQ_0;
- }
-}
-
-static void __init msm8x60_init_tlmm(void)
-{
- if (machine_is_msm8x60_rumi3())
- msm_gpio_install_direct_irq(0, 0, 1);
-}
-
#if (defined(CONFIG_MMC_MSM_SDC1_SUPPORT)\
|| defined(CONFIG_MMC_MSM_SDC2_SUPPORT)\
|| defined(CONFIG_MMC_MSM_SDC3_SUPPORT)\
@@ -9875,10 +9755,7 @@
mdp_pdata.num_mdp_clk = 0;
mdp_pdata.mdp_core_clk_rate = 200000000;
#endif
- if (machine_is_msm8x60_rumi3())
- msm_fb_register_device("mdp", NULL);
- else
- msm_fb_register_device("mdp", &mdp_pdata);
+ msm_fb_register_device("mdp", &mdp_pdata);
msm_fb_register_device("lcdc", &lcdc_pdata);
msm_fb_register_device("mipi_dsi", &mipi_dsi_pdata);
@@ -10300,14 +10177,6 @@
struct msm_gpiomux_configs *gpiomux_cfgs;
};
-static struct msm_board_data msm8x60_rumi3_board_data __initdata = {
- .gpiomux_cfgs = msm8x60_surf_ffa_gpiomux_cfgs,
-};
-
-static struct msm_board_data msm8x60_sim_board_data __initdata = {
- .gpiomux_cfgs = msm8x60_surf_ffa_gpiomux_cfgs,
-};
-
static struct msm_board_data msm8x60_surf_board_data __initdata = {
.gpiomux_cfgs = msm8x60_surf_ffa_gpiomux_cfgs,
};
@@ -10418,12 +10287,9 @@
* it disabled for all others for additional power savings.
*/
if (machine_is_msm8x60_surf() || machine_is_msm8x60_ffa() ||
- machine_is_msm8x60_rumi3() ||
- machine_is_msm8x60_sim() ||
machine_is_msm8x60_fluid() ||
machine_is_msm8x60_dragon())
msm8x60_init_ebi2();
- msm8x60_init_tlmm();
msm8x60_init_gpiomux(board_data->gpiomux_cfgs);
msm8x60_init_uart12dm();
#ifdef CONFIG_MSM_CAMERA_V4L2
@@ -10513,10 +10379,6 @@
#endif
platform_add_devices(asoc_devices,
ARRAY_SIZE(asoc_devices));
- } else {
- msm8x60_configure_smc91x();
- platform_add_devices(rumi_sim_devices,
- ARRAY_SIZE(rumi_sim_devices));
}
#if defined(CONFIG_USB_PEHCI_HCD) || defined(CONFIG_USB_PEHCI_HCD_MODULE)
if (machine_is_msm8x60_surf() || machine_is_msm8x60_ffa() ||
@@ -10540,8 +10402,7 @@
if (machine_is_msm8x60_fluid())
cyttsp_set_params();
#endif
- if (!machine_is_msm8x60_sim())
- msm_fb_add_devices();
+ msm_fb_add_devices();
fixup_i2c_configs();
register_i2c_devices();
@@ -10608,16 +10469,6 @@
msm_fusion_setup_pinctrl();
}
-static void __init msm8x60_rumi3_init(void)
-{
- msm8x60_init(&msm8x60_rumi3_board_data);
-}
-
-static void __init msm8x60_sim_init(void)
-{
- msm8x60_init(&msm8x60_sim_board_data);
-}
-
static void __init msm8x60_surf_init(void)
{
msm8x60_init(&msm8x60_surf_board_data);
@@ -10653,28 +10504,6 @@
msm8x60_init(&msm8x60_dragon_board_data);
}
-MACHINE_START(MSM8X60_RUMI3, "QCT MSM8X60 RUMI3")
- .map_io = msm8x60_map_io,
- .reserve = msm8x60_reserve,
- .init_irq = msm8x60_init_irq,
- .handle_irq = gic_handle_irq,
- .init_machine = msm8x60_rumi3_init,
- .timer = &msm_timer,
- .init_early = msm8x60_charm_init_early,
- .restart = msm_restart,
-MACHINE_END
-
-MACHINE_START(MSM8X60_SIM, "QCT MSM8X60 SIMULATOR")
- .map_io = msm8x60_map_io,
- .reserve = msm8x60_reserve,
- .init_irq = msm8x60_init_irq,
- .handle_irq = gic_handle_irq,
- .init_machine = msm8x60_sim_init,
- .timer = &msm_timer,
- .init_early = msm8x60_charm_init_early,
- .restart = msm_restart,
-MACHINE_END
-
MACHINE_START(MSM8X60_SURF, "QCT MSM8X60 SURF")
.map_io = msm8x60_map_io,
.reserve = msm8x60_reserve,
diff --git a/arch/arm/mach-msm/board-qrd7627a.c b/arch/arm/mach-msm/board-qrd7627a.c
index 8182fc4..1921cc3 100644
--- a/arch/arm/mach-msm/board-qrd7627a.c
+++ b/arch/arm/mach-msm/board-qrd7627a.c
@@ -31,7 +31,6 @@
#include <linux/memblock.h>
#include <linux/input/ft5x06_ts.h>
#include <linux/msm_adc.h>
-#include <linux/fmem.h>
#include <linux/regulator/msm-gpio-regulator.h>
#include <linux/ion.h>
#include <asm/mach/mmc.h>
@@ -394,9 +393,6 @@
.allocator_type = PMEM_ALLOCATORTYPE_BITMAP,
.cached = 1,
.memory_type = MEMTYPE_EBI1,
- .request_region = request_fmem_c_region,
- .release_region = release_fmem_c_region,
- .reusable = 1,
};
static struct platform_device android_pmem_adsp_device = {
@@ -593,9 +589,9 @@
static u32 msm_calculate_batt_capacity(u32 current_voltage);
static struct msm_psy_batt_pdata msm_psy_batt_data = {
- .voltage_min_design = 3200,
+ .voltage_min_design = 3500,
.voltage_max_design = 4200,
- .voltage_fail_safe = 3340,
+ .voltage_fail_safe = 3598,
.avail_chg_sources = AC_CHG | USB_CHG ,
.batt_technology = POWER_SUPPLY_TECHNOLOGY_LION,
.calculate_capacity = &msm_calculate_batt_capacity,
@@ -639,14 +635,6 @@
},
};
-static struct fmem_platform_data fmem_pdata;
-
-static struct platform_device fmem_device = {
- .name = "fmem",
- .id = 1,
- .dev = { .platform_data = &fmem_pdata },
-};
-
#define GPIO_VREG_INIT(_id, _reg_name, _gpio_label, _gpio, _active_low) \
[GPIO_VREG_ID_##_id] = { \
.init_data = { \
@@ -717,7 +705,6 @@
&asoc_msm_dai0,
&asoc_msm_dai1,
&msm_adc_device,
- &fmem_device,
#ifdef CONFIG_ION_MSM
&ion_dev,
#endif
@@ -785,6 +772,7 @@
*/
static struct ion_platform_data ion_pdata = {
.nr = MSM_ION_HEAP_NUM,
+ .has_outer_cache = 1,
.heaps = {
{
.id = ION_SYSTEM_HEAP_ID,
@@ -799,7 +787,6 @@
.name = ION_CAMERA_HEAP_NAME,
.size = MSM_ION_CAMERA_SIZE,
.memory_type = ION_EBI_TYPE,
- .has_outer_cache = 1,
.extra_data = (void *)&co_ion_pdata,
},
/* PMEM_AUDIO */
@@ -809,7 +796,6 @@
.name = ION_AUDIO_HEAP_NAME,
.size = MSM_ION_AUDIO_SIZE,
.memory_type = ION_EBI_TYPE,
- .has_outer_cache = 1,
.extra_data = (void *)&co_ion_pdata,
},
/* PMEM_MDP = SF */
@@ -819,7 +805,6 @@
.name = ION_SF_HEAP_NAME,
.size = MSM_ION_SF_SIZE,
.memory_type = ION_EBI_TYPE,
- .has_outer_cache = 1,
.extra_data = (void *)&co_ion_pdata,
},
#endif
@@ -858,32 +843,9 @@
{
#ifdef CONFIG_ANDROID_PMEM
#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
- unsigned int i;
- unsigned int reusable_count = 0;
-
android_pmem_adsp_pdata.size = pmem_adsp_size;
android_pmem_pdata.size = pmem_mdp_size;
android_pmem_audio_pdata.size = pmem_audio_size;
-
- fmem_pdata.size = 0;
- fmem_pdata.align = PAGE_SIZE;
-
- /* Find pmem devices that should use FMEM (reusable) memory.
- */
- for (i = 0; i < ARRAY_SIZE(pmem_pdata_array); ++i) {
- struct android_pmem_platform_data *pdata = pmem_pdata_array[i];
-
- if (!reusable_count && pdata->reusable)
- fmem_pdata.size += pdata->size;
-
- reusable_count += (pdata->reusable) ? 1 : 0;
-
- if (pdata->reusable && reusable_count > 1) {
- pr_err("%s: Too many PMEM devices specified as reusable. PMEM device %s was not configured as reusable.\n",
- __func__, pdata->name);
- pdata->reusable = 0;
- }
- }
#endif
#endif
}
diff --git a/arch/arm/mach-msm/cache_erp.c b/arch/arm/mach-msm/cache_erp.c
index c3302ec..8a73c84 100644
--- a/arch/arm/mach-msm/cache_erp.c
+++ b/arch/arm/mach-msm/cache_erp.c
@@ -253,22 +253,11 @@
int print_regs = cesr & CESR_PRINT_MASK;
int log_event = cesr & CESR_LOG_EVENT_MASK;
- void *const saw_bases[] = {
- MSM_SAW0_BASE,
- MSM_SAW1_BASE,
- MSM_SAW2_BASE,
- MSM_SAW3_BASE,
- };
-
if (print_regs) {
pr_alert("L1 / TLB Error detected on CPU %d!\n", cpu);
pr_alert("\tCESR = 0x%08x\n", cesr);
pr_alert("\tCPU speed = %lu\n", acpuclk_get_rate(cpu));
pr_alert("\tMIDR = 0x%08x\n", read_cpuid_id());
- pr_alert("\tPTE fuses = 0x%08x\n",
- readl_relaxed(MSM_QFPROM_BASE + 0xC0));
- pr_alert("\tPMIC_VREG = 0x%08x\n",
- readl_relaxed(saw_bases[cpu] + 0x14));
}
if (cesr & CESR_DCTPE) {
@@ -550,12 +539,18 @@
return 0;
}
+static struct of_device_id cache_erp_match_table[] = {
+ { .compatible = "qcom,cache_erp", },
+ {}
+};
+
static struct platform_driver msm_cache_erp_driver = {
.probe = msm_cache_erp_probe,
.remove = msm_cache_erp_remove,
.driver = {
.name = MODULE_NAME,
.owner = THIS_MODULE,
+ .of_match_table = cache_erp_match_table,
},
};
diff --git a/arch/arm/mach-msm/clock-8960.c b/arch/arm/mach-msm/clock-8960.c
index 2a4566e..02b28b6 100644
--- a/arch/arm/mach-msm/clock-8960.c
+++ b/arch/arm/mach-msm/clock-8960.c
@@ -194,6 +194,7 @@
#define DSI2_ESC_CC_REG REG_MM(0x013C)
#define DSI_PIXEL_CC_REG REG_MM(0x0130)
#define DSI2_PIXEL_CC_REG REG_MM(0x0094)
+#define DSI2_PIXEL_CC2_REG REG_MM(0x0264)
#define DBG_BUS_VEC_A_REG REG_MM(0x01C8)
#define DBG_BUS_VEC_B_REG REG_MM(0x01CC)
#define DBG_BUS_VEC_C_REG REG_MM(0x01D0)
@@ -419,16 +420,16 @@
.fmax[VDD_DIG_##l2] = (f2), \
.fmax[VDD_DIG_##l3] = (f3)
-enum vdd_sr2_pll_levels {
- VDD_SR2_PLL_OFF,
- VDD_SR2_PLL_ON
+enum vdd_sr2_hdmi_pll_levels {
+ VDD_SR2_HDMI_PLL_OFF,
+ VDD_SR2_HDMI_PLL_ON
};
-static int set_vdd_sr2_pll_8960(struct clk_vdd_class *vdd_class, int level)
+static int set_vdd_sr2_hdmi_pll_8960(struct clk_vdd_class *vdd_class, int level)
{
int rc = 0;
- if (level == VDD_SR2_PLL_OFF) {
+ if (level == VDD_SR2_HDMI_PLL_OFF) {
rc = rpm_vreg_set_voltage(RPM_VREG_ID_PM8921_L23,
RPM_VREG_VOTER3, 0, 0, 1);
if (rc)
@@ -453,20 +454,20 @@
return rc;
}
-static DEFINE_VDD_CLASS(vdd_sr2_pll, set_vdd_sr2_pll_8960);
+static DEFINE_VDD_CLASS(vdd_sr2_hdmi_pll, set_vdd_sr2_hdmi_pll_8960);
static int sr2_lreg_uv[] = {
- [VDD_SR2_PLL_OFF] = 0,
- [VDD_SR2_PLL_ON] = 1800000,
+ [VDD_SR2_HDMI_PLL_OFF] = 0,
+ [VDD_SR2_HDMI_PLL_ON] = 1800000,
};
-static int set_vdd_sr2_pll_8064(struct clk_vdd_class *vdd_class, int level)
+static int set_vdd_sr2_hdmi_pll_8064(struct clk_vdd_class *vdd_class, int level)
{
return rpm_vreg_set_voltage(RPM_VREG_ID_PM8921_LVS7, RPM_VREG_VOTER3,
sr2_lreg_uv[level], sr2_lreg_uv[level], 1);
}
-static int set_vdd_sr2_pll_8930(struct clk_vdd_class *vdd_class, int level)
+static int set_vdd_sr2_hdmi_pll_8930(struct clk_vdd_class *vdd_class, int level)
{
return rpm_vreg_set_voltage(RPM_VREG_ID_PM8038_L23, RPM_VREG_VOTER3,
sr2_lreg_uv[level], sr2_lreg_uv[level], 1);
@@ -498,8 +499,8 @@
.dbg_name = "pll3_clk",
.rate = 1200000000,
.ops = &clk_ops_local_pll,
- .vdd_class = &vdd_sr2_pll,
- .fmax[VDD_SR2_PLL_ON] = ULONG_MAX,
+ .vdd_class = &vdd_sr2_hdmi_pll,
+ .fmax[VDD_SR2_HDMI_PLL_ON] = ULONG_MAX,
CLK_INIT(pll3_clk.c),
.warned = true,
},
@@ -785,7 +786,7 @@
};
/* gfx3d_axi_clk is set as a dependency of gmem_axi_clk at runtime */
-static struct branch_clk gfx3d_axi_clk_8064 = {
+static struct branch_clk gfx3d_axi_clk = {
.b = {
.ctl_reg = MAXI_EN5_REG,
.en_mask = BIT(25),
@@ -799,7 +800,7 @@
.c = {
.dbg_name = "gfx3d_axi_clk",
.ops = &clk_ops_branch,
- CLK_INIT(gfx3d_axi_clk_8064.c),
+ CLK_INIT(gfx3d_axi_clk.c),
},
};
@@ -2688,6 +2689,7 @@
.ops = &clk_ops_rcg,
VDD_DIG_FMAX_MAP2(LOW, 86000000, NOMINAL, 178000000),
CLK_INIT(csi0_src_clk.c),
+ .warned = true,
},
};
@@ -2705,6 +2707,7 @@
.dbg_name = "csi0_clk",
.ops = &clk_ops_branch,
CLK_INIT(csi0_clk.c),
+ .warned = true,
},
};
@@ -2744,6 +2747,7 @@
.ops = &clk_ops_rcg,
VDD_DIG_FMAX_MAP2(LOW, 86000000, NOMINAL, 178000000),
CLK_INIT(csi1_src_clk.c),
+ .warned = true,
},
};
@@ -2761,6 +2765,7 @@
.dbg_name = "csi1_clk",
.ops = &clk_ops_branch,
CLK_INIT(csi1_clk.c),
+ .warned = true,
},
};
@@ -2800,6 +2805,7 @@
.ops = &clk_ops_rcg,
VDD_DIG_FMAX_MAP2(LOW, 86000000, NOMINAL, 178000000),
CLK_INIT(csi2_src_clk.c),
+ .warned = true,
},
};
@@ -2817,6 +2823,7 @@
.dbg_name = "csi2_clk",
.ops = &clk_ops_branch,
CLK_INIT(csi2_clk.c),
+ .warned = true,
},
};
@@ -3406,6 +3413,27 @@
.ctl_val = CC_BANKED(9, 6, n), \
}
+static struct clk_freq_tbl clk_tbl_gfx3d_8960ab[] = {
+ F_GFX3D( 0, gnd, 0, 0),
+ F_GFX3D( 27000000, pxo, 0, 0),
+ F_GFX3D( 48000000, pll8, 1, 8),
+ F_GFX3D( 54857000, pll8, 1, 7),
+ F_GFX3D( 64000000, pll8, 1, 6),
+ F_GFX3D( 76800000, pll8, 1, 5),
+ F_GFX3D( 96000000, pll8, 1, 4),
+ F_GFX3D(128000000, pll8, 1, 3),
+ F_GFX3D(145455000, pll2, 2, 11),
+ F_GFX3D(160000000, pll2, 1, 5),
+ F_GFX3D(177778000, pll2, 2, 9),
+ F_GFX3D(200000000, pll2, 1, 4),
+ F_GFX3D(228571000, pll2, 2, 7),
+ F_GFX3D(266667000, pll2, 1, 3),
+ F_GFX3D(320000000, pll2, 2, 5),
+ F_GFX3D(325000000, pll3, 1, 2),
+ F_GFX3D(400000000, pll2, 1, 2),
+ F_END
+};
+
static struct clk_freq_tbl clk_tbl_gfx3d_8960[] = {
F_GFX3D( 0, gnd, 0, 0),
F_GFX3D( 27000000, pxo, 0, 0),
@@ -3713,6 +3741,27 @@
.ns_val = NS_MND_BANKED8(22, 14, n, m, 3, 0, s##_to_mm_mux), \
.ctl_val = CC_BANKED(9, 6, n), \
}
+static struct clk_freq_tbl clk_tbl_mdp_8960ab[] = {
+ F_MDP( 0, gnd, 0, 0),
+ F_MDP( 9600000, pll8, 1, 40),
+ F_MDP( 13710000, pll8, 1, 28),
+ F_MDP( 27000000, pxo, 0, 0),
+ F_MDP( 29540000, pll8, 1, 13),
+ F_MDP( 34910000, pll8, 1, 11),
+ F_MDP( 38400000, pll8, 1, 10),
+ F_MDP( 59080000, pll8, 2, 13),
+ F_MDP( 76800000, pll8, 1, 5),
+ F_MDP( 85330000, pll8, 2, 9),
+ F_MDP( 96000000, pll8, 1, 4),
+ F_MDP(128000000, pll8, 1, 3),
+ F_MDP(160000000, pll2, 1, 5),
+ F_MDP(177780000, pll2, 2, 9),
+ F_MDP(200000000, pll2, 1, 4),
+ F_MDP(228571000, pll2, 2, 7),
+ F_MDP(266667000, pll2, 1, 3),
+ F_END
+};
+
static struct clk_freq_tbl clk_tbl_mdp[] = {
F_MDP( 0, gnd, 0, 0),
F_MDP( 9600000, pll8, 1, 40),
@@ -3912,11 +3961,6 @@
spin_unlock_irqrestore(&local_clock_reg_lock, flags);
}
-static unsigned long hdmi_pll_clk_get_rate(struct clk *c)
-{
- return hdmi_pll_get_rate();
-}
-
static struct clk *hdmi_pll_clk_get_parent(struct clk *c)
{
return &pxo_clk.c;
@@ -3925,13 +3969,14 @@
static struct clk_ops clk_ops_hdmi_pll = {
.enable = hdmi_pll_clk_enable,
.disable = hdmi_pll_clk_disable,
- .get_rate = hdmi_pll_clk_get_rate,
.get_parent = hdmi_pll_clk_get_parent,
};
static struct clk hdmi_pll_clk = {
.dbg_name = "hdmi_pll_clk",
.ops = &clk_ops_hdmi_pll,
+ .vdd_class = &vdd_sr2_hdmi_pll,
+ .fmax[VDD_SR2_HDMI_PLL_ON] = ULONG_MAX,
CLK_INIT(hdmi_pll_clk),
};
@@ -3975,8 +4020,10 @@
void set_rate_tv(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
{
unsigned long pll_rate = (unsigned long)nf->extra_freq_data;
- if (pll_rate)
+ if (pll_rate) {
hdmi_pll_set_rate(pll_rate);
+ hdmi_pll_clk.rate = pll_rate;
+ }
set_rate_mnd(rcg, nf);
}
@@ -4338,7 +4385,23 @@
.md_val = MD8(8, m, 0, n), \
.ns_val = NS(31, 24, n, m, 5, 4, 3, d, 2, 0, s##_to_lpa_mux), \
}
-static struct clk_freq_tbl clk_tbl_aif_osr[] = {
+static struct clk_freq_tbl clk_tbl_aif_osr_492[] = {
+ F_AIF_OSR( 0, gnd, 1, 0, 0),
+ F_AIF_OSR( 512000, pll4, 4, 1, 240),
+ F_AIF_OSR( 768000, pll4, 4, 1, 160),
+ F_AIF_OSR( 1024000, pll4, 4, 1, 120),
+ F_AIF_OSR( 1536000, pll4, 4, 1, 80),
+ F_AIF_OSR( 2048000, pll4, 4, 1, 60),
+ F_AIF_OSR( 3072000, pll4, 4, 1, 40),
+ F_AIF_OSR( 4096000, pll4, 4, 1, 30),
+ F_AIF_OSR( 6144000, pll4, 4, 1, 20),
+ F_AIF_OSR( 8192000, pll4, 4, 1, 15),
+ F_AIF_OSR(12288000, pll4, 4, 1, 10),
+ F_AIF_OSR(24576000, pll4, 4, 1, 5),
+ F_END
+};
+
+static struct clk_freq_tbl clk_tbl_aif_osr_393[] = {
F_AIF_OSR( 0, gnd, 1, 0, 0),
F_AIF_OSR( 512000, pll4, 4, 1, 192),
F_AIF_OSR( 768000, pll4, 4, 1, 128),
@@ -4371,7 +4434,7 @@
.ns_mask = (BM(31, 24) | BM(6, 0)), \
.mnd_en_mask = BIT(8), \
.set_rate = set_rate_mnd, \
- .freq_tbl = clk_tbl_aif_osr, \
+ .freq_tbl = clk_tbl_aif_osr_393, \
.current_freq = &rcg_dummy_freq, \
.c = { \
.dbg_name = #i "_clk", \
@@ -4397,7 +4460,7 @@
.ns_mask = (BM(31, 24) | BM(6, 0)), \
.mnd_en_mask = BIT(8), \
.set_rate = set_rate_mnd, \
- .freq_tbl = clk_tbl_aif_osr, \
+ .freq_tbl = clk_tbl_aif_osr_393, \
.current_freq = &rcg_dummy_freq, \
.c = { \
.dbg_name = #i "_clk", \
@@ -4478,8 +4541,26 @@
.md_val = MD16(m, n), \
.ns_val = NS(31, 16, n, m, 5, 4, 3, d, 2, 0, s##_to_lpa_mux), \
}
-static struct clk_freq_tbl clk_tbl_pcm[] = {
+static struct clk_freq_tbl clk_tbl_pcm_492[] = {
{ .ns_val = BIT(10) /* external input */ },
+ F_PCM( 256000, pll4, 4, 1, 480),
+ F_PCM( 512000, pll4, 4, 1, 240),
+ F_PCM( 768000, pll4, 4, 1, 160),
+ F_PCM( 1024000, pll4, 4, 1, 120),
+ F_PCM( 1536000, pll4, 4, 1, 80),
+ F_PCM( 2048000, pll4, 4, 1, 60),
+ F_PCM( 3072000, pll4, 4, 1, 40),
+ F_PCM( 4096000, pll4, 4, 1, 30),
+ F_PCM( 6144000, pll4, 4, 1, 20),
+ F_PCM( 8192000, pll4, 4, 1, 15),
+ F_PCM(12288000, pll4, 4, 1, 10),
+ F_PCM(24576000, pll4, 4, 1, 5),
+ F_END
+};
+
+static struct clk_freq_tbl clk_tbl_pcm_393[] = {
+ { .ns_val = BIT(10) /* external input */ },
+ F_PCM( 256000, pll4, 4, 1, 384),
F_PCM( 512000, pll4, 4, 1, 192),
F_PCM( 768000, pll4, 4, 1, 128),
F_PCM( 1024000, pll4, 4, 1, 96),
@@ -4510,7 +4591,7 @@
.ns_mask = BM(31, 16) | BIT(10) | BM(6, 0),
.mnd_en_mask = BIT(8),
.set_rate = set_rate_mnd,
- .freq_tbl = clk_tbl_pcm,
+ .freq_tbl = clk_tbl_pcm_393,
.current_freq = &rcg_dummy_freq,
.c = {
.dbg_name = "pcm_clk",
@@ -4537,7 +4618,7 @@
.ns_mask = (BM(31, 24) | BM(6, 0)),
.mnd_en_mask = BIT(8),
.set_rate = set_rate_mnd,
- .freq_tbl = clk_tbl_aif_osr,
+ .freq_tbl = clk_tbl_aif_osr_393,
.current_freq = &rcg_dummy_freq,
.c = {
.dbg_name = "audio_slimbus_clk",
@@ -4823,7 +4904,7 @@
{ TEST_MM_HS(0x35), &vcap_axi_clk.c },
{ TEST_MM_HS(0x36), &rgb_tv_clk.c },
{ TEST_MM_HS(0x37), &npl_tv_clk.c },
- { TEST_MM_HS(0x38), &gfx3d_axi_clk_8064.c },
+ { TEST_MM_HS(0x38), &gfx3d_axi_clk.c },
{ TEST_LPA(0x0F), &mi2s_bit_clk.c },
{ TEST_LPA(0x10), &codec_i2s_mic_bit_clk.c },
@@ -5201,7 +5282,7 @@
CLK_LOOKUP("core_clk", gfx3d_clk.c, "kgsl-3d0.0"),
CLK_LOOKUP("core_clk", gfx3d_clk.c, "footswitch-8x60.2"),
CLK_LOOKUP("bus_clk",
- gfx3d_axi_clk_8064.c, "footswitch-8x60.2"),
+ gfx3d_axi_clk.c, "footswitch-8x60.2"),
CLK_LOOKUP("iface_clk", vcap_p_clk.c, ""),
CLK_LOOKUP("iface_clk", vcap_p_clk.c, "msm_vcap.0"),
CLK_LOOKUP("iface_clk", vcap_p_clk.c, "footswitch-8x60.10"),
@@ -5307,7 +5388,7 @@
CLK_LOOKUP("core_clk", vfe_axi_clk.c, ""),
CLK_LOOKUP("core_clk", vcodec_axi_a_clk.c, ""),
CLK_LOOKUP("core_clk", vcodec_axi_b_clk.c, ""),
- CLK_LOOKUP("core_clk", gfx3d_axi_clk_8064.c, ""),
+ CLK_LOOKUP("core_clk", gfx3d_axi_clk.c, ""),
CLK_LOOKUP("dfab_dsps_clk", dfab_dsps_clk.c, NULL),
CLK_LOOKUP("core_clk", dfab_usb_hs_clk.c, "msm_otg"),
@@ -5337,8 +5418,9 @@
CLK_LOOKUP("core_clk", vfe_axi_clk.c, "msm_iommu.6"),
CLK_LOOKUP("core_clk", vcodec_axi_a_clk.c, "msm_iommu.7"),
CLK_LOOKUP("core_clk", vcodec_axi_b_clk.c, "msm_iommu.8"),
- CLK_LOOKUP("core_clk", gfx3d_axi_clk_8064.c, "msm_iommu.9"),
- CLK_LOOKUP("core_clk", gfx3d_axi_clk_8064.c, "msm_iommu.10"),
+ CLK_LOOKUP("core_clk", gfx3d_axi_clk.c, "msm_iommu.9"),
+ CLK_LOOKUP("core_clk", gfx3d_axi_clk.c, "msm_iommu.10"),
+
CLK_LOOKUP("core_clk", vcap_axi_clk.c, "msm_iommu.11"),
CLK_LOOKUP("mdp_iommu_clk", mdp_axi_clk.c, "msm_vidc.0"),
@@ -5360,7 +5442,7 @@
CLK_LOOKUP("krait3_mclk", krait3_m_clk, ""),
};
-static struct clk_lookup msm_clocks_8960[] = {
+static struct clk_lookup msm_clocks_8960_common[] __initdata = {
CLK_LOOKUP("xo", cxo_a_clk.c, ""),
CLK_LOOKUP("xo", pxo_a_clk.c, ""),
CLK_LOOKUP("cxo", cxo_clk.c, "wcnss_wlan.0"),
@@ -5428,7 +5510,10 @@
CLK_LOOKUP("core_clk", gsbi5_uart_clk.c, "msm_serial_hsl.0"),
CLK_LOOKUP("core_clk", gsbi6_uart_clk.c, "msm_serial_hs.0"),
CLK_LOOKUP("core_clk", gsbi7_uart_clk.c, ""),
+ /* used on 8960 SGLTE for console */
CLK_LOOKUP("core_clk", gsbi8_uart_clk.c, "msm_serial_hsl.1"),
+ /* used on 8960 standalone with Atheros Bluetooth */
+ CLK_LOOKUP("core_clk", gsbi8_uart_clk.c, "msm_serial_hs.2"),
CLK_LOOKUP("core_clk", gsbi9_uart_clk.c, "msm_serial_hs.1"),
CLK_LOOKUP("core_clk", gsbi10_uart_clk.c, ""),
CLK_LOOKUP("core_clk", gsbi11_uart_clk.c, ""),
@@ -5480,7 +5565,10 @@
CLK_LOOKUP("iface_clk", gsbi5_p_clk.c, "msm_serial_hsl.0"),
CLK_LOOKUP("iface_clk", gsbi6_p_clk.c, "msm_serial_hs.0"),
CLK_LOOKUP("iface_clk", gsbi7_p_clk.c, ""),
+ /* used on 8960 SGLTE for serial console */
CLK_LOOKUP("iface_clk", gsbi8_p_clk.c, "msm_serial_hsl.1"),
+ /* used on 8960 standalone with Atheros Bluetooth */
+ CLK_LOOKUP("iface_clk", gsbi8_p_clk.c, "msm_serial_hs.2"),
CLK_LOOKUP("iface_clk", gsbi9_p_clk.c, "msm_serial_hs.1"),
CLK_LOOKUP("iface_clk", gsbi10_p_clk.c, "qup_i2c.10"),
CLK_LOOKUP("iface_clk", gsbi11_p_clk.c, ""),
@@ -5539,10 +5627,6 @@
CLK_LOOKUP("byte_clk", dsi2_byte_clk.c, "mipi_dsi.2"),
CLK_LOOKUP("esc_clk", dsi1_esc_clk.c, "mipi_dsi.1"),
CLK_LOOKUP("esc_clk", dsi2_esc_clk.c, "mipi_dsi.2"),
- CLK_LOOKUP("core_clk", gfx2d0_clk.c, "kgsl-2d0.0"),
- CLK_LOOKUP("core_clk", gfx2d0_clk.c, "footswitch-8x60.0"),
- CLK_LOOKUP("core_clk", gfx2d1_clk.c, "kgsl-2d1.1"),
- CLK_LOOKUP("core_clk", gfx2d1_clk.c, "footswitch-8x60.1"),
CLK_LOOKUP("core_clk", gfx3d_clk.c, "kgsl-3d0.0"),
CLK_LOOKUP("core_clk", gfx3d_clk.c, "footswitch-8x60.2"),
CLK_LOOKUP("bus_clk", ijpeg_axi_clk.c, "footswitch-8x60.3"),
@@ -5562,8 +5646,6 @@
CLK_LOOKUP("src_clk", tv_src_clk.c, "dtv.0"),
CLK_LOOKUP("src_clk", tv_src_clk.c, "tvenc.0"),
CLK_LOOKUP("tv_src_clk", tv_src_clk.c, "footswitch-8x60.4"),
- CLK_LOOKUP("enc_clk", tv_enc_clk.c, "tvenc.0"),
- CLK_LOOKUP("dac_clk", tv_dac_clk.c, "tvenc.0"),
CLK_LOOKUP("core_clk", vcodec_clk.c, "msm_vidc.0"),
CLK_LOOKUP("core_clk", vcodec_clk.c, "footswitch-8x60.7"),
CLK_LOOKUP("mdp_clk", mdp_tv_clk.c, "dtv.0"),
@@ -5592,10 +5674,6 @@
CLK_LOOKUP("slave_iface_clk", dsi1_s_p_clk.c, "mipi_dsi.1"),
CLK_LOOKUP("master_iface_clk", dsi2_m_p_clk.c, "mipi_dsi.2"),
CLK_LOOKUP("slave_iface_clk", dsi2_s_p_clk.c, "mipi_dsi.2"),
- CLK_LOOKUP("iface_clk", gfx2d0_p_clk.c, "kgsl-2d0.0"),
- CLK_LOOKUP("iface_clk", gfx2d0_p_clk.c, "footswitch-8x60.0"),
- CLK_LOOKUP("iface_clk", gfx2d1_p_clk.c, "kgsl-2d1.1"),
- CLK_LOOKUP("iface_clk", gfx2d1_p_clk.c, "footswitch-8x60.1"),
CLK_LOOKUP("iface_clk", gfx3d_p_clk.c, "kgsl-3d0.0"),
CLK_LOOKUP("iface_clk", gfx3d_p_clk.c, "footswitch-8x60.2"),
CLK_LOOKUP("master_iface_clk", hdmi_m_p_clk.c, "hdmi_msm.1"),
@@ -5609,7 +5687,6 @@
CLK_LOOKUP("iface_clk", smmu_p_clk.c, "msm_iommu"),
CLK_LOOKUP("iface_clk", rot_p_clk.c, "msm_rotator.0"),
CLK_LOOKUP("iface_clk", rot_p_clk.c, "footswitch-8x60.6"),
- CLK_LOOKUP("iface_clk", tv_enc_p_clk.c, "tvenc.0"),
CLK_LOOKUP("iface_clk", vcodec_p_clk.c, "msm_vidc.0"),
CLK_LOOKUP("iface_clk", vcodec_p_clk.c, "footswitch-8x60.7"),
CLK_LOOKUP("vfe_pclk", vfe_p_clk.c, "msm_vfe.0"),
@@ -5650,8 +5727,6 @@
CLK_LOOKUP("core_clk", vcodec_axi_a_clk.c, "msm_iommu.7"),
CLK_LOOKUP("core_clk", vcodec_axi_b_clk.c, "msm_iommu.8"),
CLK_LOOKUP("core_clk", gfx3d_clk.c, "msm_iommu.9"),
- CLK_LOOKUP("core_clk", gfx2d0_clk.c, "msm_iommu.10"),
- CLK_LOOKUP("core_clk", gfx2d1_clk.c, "msm_iommu.11"),
CLK_LOOKUP("mdp_iommu_clk", mdp_axi_clk.c, "msm_vidc.0"),
CLK_LOOKUP("rot_iommu_clk", rot_axi_clk.c, "msm_vidc.0"),
@@ -5685,6 +5760,32 @@
CLK_LOOKUP("q6_func_clk", q6_func_clk, ""),
};
+static struct clk_lookup msm_clocks_8960_only[] __initdata = {
+ CLK_LOOKUP("enc_clk", tv_enc_clk.c, "tvenc.0"),
+ CLK_LOOKUP("dac_clk", tv_dac_clk.c, "tvenc.0"),
+ CLK_LOOKUP("iface_clk", tv_enc_p_clk.c, "tvenc.0"),
+
+ CLK_LOOKUP("core_clk", gfx2d0_clk.c, "kgsl-2d0.0"),
+ CLK_LOOKUP("core_clk", gfx2d0_clk.c, "footswitch-8x60.0"),
+ CLK_LOOKUP("core_clk", gfx2d1_clk.c, "kgsl-2d1.1"),
+ CLK_LOOKUP("core_clk", gfx2d1_clk.c, "footswitch-8x60.1"),
+ CLK_LOOKUP("iface_clk", gfx2d0_p_clk.c, "kgsl-2d0.0"),
+ CLK_LOOKUP("iface_clk", gfx2d0_p_clk.c, "footswitch-8x60.0"),
+ CLK_LOOKUP("iface_clk", gfx2d1_p_clk.c, "kgsl-2d1.1"),
+ CLK_LOOKUP("iface_clk", gfx2d1_p_clk.c, "footswitch-8x60.1"),
+ CLK_LOOKUP("core_clk", gfx2d0_clk.c, "msm_iommu.10"),
+ CLK_LOOKUP("core_clk", gfx2d1_clk.c, "msm_iommu.11"),
+};
+
+static struct clk_lookup msm_clocks_8960ab_only[] __initdata = {
+ CLK_LOOKUP("bus_clk", gfx3d_axi_clk.c, "footswitch-8x60.2"),
+ CLK_LOOKUP("div_clk", tv_src_div_clk.c, ""),
+};
+
+static struct clk_lookup msm_clocks_8960[ARRAY_SIZE(msm_clocks_8960_common)
+ + ARRAY_SIZE(msm_clocks_8960_only)
+ + ARRAY_SIZE(msm_clocks_8960ab_only)];
+
static struct clk_lookup msm_clocks_8930[] = {
CLK_LOOKUP("xo", cxo_clk.c, "msm_xo"),
CLK_LOOKUP("cxo", cxo_clk.c, "wcnss_wlan.0"),
@@ -6004,7 +6105,7 @@
.mode_reg = LCC_PLL0_MODE_REG,
};
-static struct pll_config pll4_config __initdata = {
+static struct pll_config pll4_config_393 __initdata = {
.l = 0xE,
.m = 0x27A,
.n = 0x465,
@@ -6082,12 +6183,12 @@
*/
/*
* Initialize MM AHB registers: Enable the FPB clock and disable HW
- * gating on 8627 for all clocks. Also set VFE_AHB's
+ * gating on 8627 and 8960 for all clocks. Also set VFE_AHB's
* FORCE_CORE_ON bit to prevent its memory from being collapsed when
* the clock is halted. The sleep and wake-up delays are set to safe
* values.
*/
- if (cpu_is_msm8627()) {
+ if (cpu_is_msm8627() || cpu_is_msm8960ab()) {
rmwreg(0x00000003, AHB_EN_REG, 0x6C000103);
writel_relaxed(0x000007F9, AHB_EN2_REG);
} else {
@@ -6105,7 +6206,7 @@
/* Initialize MM AXI registers: Enable HW gating for all clocks that
* support it. Also set FORCE_CORE_ON bits, and any sleep and wake-up
* delays to safe values. */
- if ((cpu_is_msm8960() &&
+ if (cpu_is_msm8960ab() || (cpu_is_msm8960() &&
SOCINFO_VERSION_MAJOR(socinfo_get_version()) < 3) ||
cpu_is_msm8627()) {
rmwreg(0x000007F9, MAXI_EN_REG, 0x0803FFFF);
@@ -6122,8 +6223,13 @@
rmwreg(0x019FECFF, MAXI_EN5_REG, 0x01FFEFFF);
if (cpu_is_msm8930() || cpu_is_msm8930aa() || cpu_is_msm8627())
rmwreg(0x000004FF, MAXI_EN5_REG, 0x00000FFF);
+ if (cpu_is_msm8960ab())
+ rmwreg(0x009FE000, MAXI_EN5_REG, 0x01FFE000);
+
if (cpu_is_msm8627())
rmwreg(0x000003C7, SAXI_EN_REG, 0x00003FFF);
+ else if (cpu_is_msm8960ab())
+ rmwreg(0x000001C6, SAXI_EN_REG, 0x00001DF6);
else
rmwreg(0x00003C38, SAXI_EN_REG, 0x00003FFF);
@@ -6151,14 +6257,19 @@
rmwreg(0x80FF0000, VFE_CC_REG, 0xE0FF4010);
rmwreg(0x800000FF, VFE_CC2_REG, 0xE00000FF);
rmwreg(0x80FF0000, VPE_CC_REG, 0xE0FF0010);
- if (cpu_is_msm8960() || cpu_is_apq8064()) {
+ if (cpu_is_msm8960ab() || cpu_is_msm8960() || cpu_is_apq8064()) {
rmwreg(0x80FF0000, DSI2_BYTE_CC_REG, 0xE0FF0010);
rmwreg(0x80FF0000, DSI2_PIXEL_CC_REG, 0xE0FF0010);
rmwreg(0x80FF0000, JPEGD_CC_REG, 0xE0FF0010);
}
+ if (cpu_is_msm8960ab())
+ rmwreg(0x00000001, DSI2_PIXEL_CC2_REG, 0x00000001);
+
if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_msm8930aa() ||
cpu_is_msm8627())
- rmwreg(0x80FF0000, TV_CC_REG, 0xE1FFC010);
+ rmwreg(0x80FF0000, TV_CC_REG, 0xE1FFC010);
+ if (cpu_is_msm8960ab())
+ rmwreg(0x00000000, TV_CC_REG, 0x00004010);
if (cpu_is_msm8960()) {
rmwreg(0x80FF0000, GFX2D0_CC_REG, 0xE0FF0010);
@@ -6192,12 +6303,12 @@
writel_relaxed(BIT(15), PDM_CLK_NS_REG);
/* Source SLIMBus xo src from slimbus reference clock */
- if (cpu_is_msm8960())
+ if (cpu_is_msm8960ab() || cpu_is_msm8960())
writel_relaxed(0x3, SLIMBUS_XO_SRC_CLK_CTL_REG);
/* Source the dsi_byte_clks from the DSI PHY PLLs */
rmwreg(0x1, DSI1_BYTE_NS_REG, 0x7);
- if (cpu_is_msm8960() || cpu_is_apq8064())
+ if (cpu_is_msm8960ab() || cpu_is_msm8960() || cpu_is_apq8064())
rmwreg(0x2, DSI2_BYTE_NS_REG, 0x7);
/* Source the dsi1_esc_clk from the DSI1 PHY PLLs */
@@ -6235,7 +6346,7 @@
is_pll_enabled = readl_relaxed(LCC_PLL0_STATUS_REG) & BIT(16);
if (!is_pll_enabled)
/* Ref clk = 27MHz and program pll4 to 393.2160MHz */
- configure_pll(&pll4_config, &pll4_regs, 1);
+ configure_pll(&pll4_config_393, &pll4_regs, 1);
/* Enable PLL4 source on the LPASS Primary PLL Mux */
writel_relaxed(0x1, LCC_PRI_PLL_CLK_CTL_REG);
@@ -6259,15 +6370,54 @@
}
}
+struct clock_init_data msm8960_clock_init_data __initdata;
static void __init msm8960_clock_pre_init(void)
{
+ /* Initialize clock registers. */
+ reg_init();
+
if (cpu_is_apq8064()) {
- vdd_sr2_pll.set_vdd = set_vdd_sr2_pll_8064;
+ vdd_sr2_hdmi_pll.set_vdd = set_vdd_sr2_hdmi_pll_8064;
} else if (cpu_is_msm8930() || cpu_is_msm8930aa() || cpu_is_msm8627()) {
vdd_dig.set_vdd = set_vdd_dig_8930;
- vdd_sr2_pll.set_vdd = set_vdd_sr2_pll_8930;
+ vdd_sr2_hdmi_pll.set_vdd = set_vdd_sr2_hdmi_pll_8930;
}
+ /* Detect PLL4 programmed for alternate 491.52MHz clock plan. */
+ if (readl_relaxed(LCC_PLL0_L_VAL_REG) == 0x12) {
+ pll4_clk.c.rate = 491520000;
+ audio_slimbus_clk.freq_tbl = clk_tbl_aif_osr_492;
+ mi2s_osr_clk.freq_tbl = clk_tbl_aif_osr_492;
+ codec_i2s_mic_osr_clk.freq_tbl = clk_tbl_aif_osr_492;
+ spare_i2s_mic_osr_clk.freq_tbl = clk_tbl_aif_osr_492;
+ codec_i2s_spkr_osr_clk.freq_tbl = clk_tbl_aif_osr_492;
+ spare_i2s_spkr_osr_clk.freq_tbl = clk_tbl_aif_osr_492;
+ pcm_clk.freq_tbl = clk_tbl_pcm_492;
+ }
+
+ if (cpu_is_msm8960() || cpu_is_msm8960ab())
+ memcpy(msm_clocks_8960, msm_clocks_8960_common,
+ sizeof(msm_clocks_8960_common));
+ if (cpu_is_msm8960ab()) {
+ pll3_clk.c.rate = 650000000;
+ gfx3d_clk.freq_tbl = clk_tbl_gfx3d_8960ab;
+ gfx3d_clk.c.fmax[VDD_DIG_LOW] = 192000000;
+ gfx3d_clk.c.fmax[VDD_DIG_NOMINAL] = 325000000;
+ gfx3d_clk.c.fmax[VDD_DIG_HIGH] = 400000000;
+ mdp_clk.freq_tbl = clk_tbl_mdp_8960ab;
+ mdp_clk.c.fmax[VDD_DIG_LOW] = 128000000;
+ mdp_clk.c.fmax[VDD_DIG_NOMINAL] = 266667000;
+
+ memcpy(msm_clocks_8960 + ARRAY_SIZE(msm_clocks_8960_common),
+ msm_clocks_8960ab_only, sizeof(msm_clocks_8960ab_only));
+ msm8960_clock_init_data.size -=
+ ARRAY_SIZE(msm_clocks_8960_only);
+ } else if (cpu_is_msm8960()) {
+ memcpy(msm_clocks_8960 + ARRAY_SIZE(msm_clocks_8960_common),
+ msm_clocks_8960_only, sizeof(msm_clocks_8960_only));
+ msm8960_clock_init_data.size -=
+ ARRAY_SIZE(msm_clocks_8960ab_only);
+ }
/*
* Change the freq tables for and voltage requirements for
* clocks which differ between 8960 and 8064.
@@ -6286,7 +6436,7 @@
memcpy(vfe_clk.c.fmax, fmax_vfe_8064,
sizeof(vfe_clk.c.fmax));
- gmem_axi_clk.c.depends = &gfx3d_axi_clk_8064.c;
+ gmem_axi_clk.c.depends = &gfx3d_axi_clk.c;
}
/*
@@ -6308,9 +6458,6 @@
vote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
clk_ops_local_pll.enable = sr_pll_clk_enable;
-
- /* Initialize clock registers. */
- reg_init();
}
static void __init msm8960_clock_post_init(void)
@@ -6338,8 +6485,8 @@
clk_set_rate(&usb_hs4_xcvr_clk.c, 60000000);
}
clk_set_rate(&usb_fs1_src_clk.c, 60000000);
- if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_msm8930aa() ||
- cpu_is_msm8627())
+ if (cpu_is_msm8960ab() || cpu_is_msm8960() || cpu_is_msm8930() ||
+ cpu_is_msm8930aa() || cpu_is_msm8627())
clk_set_rate(&usb_fs2_src_clk.c, 60000000);
clk_set_rate(&usb_hsic_xcvr_fs_clk.c, 60000000);
clk_set_rate(&usb_hsic_hsic_src_clk.c, 480000000);
diff --git a/arch/arm/mach-msm/clock-8974.c b/arch/arm/mach-msm/clock-8974.c
index 930de81..8076571 100644
--- a/arch/arm/mach-msm/clock-8974.c
+++ b/arch/arm/mach-msm/clock-8974.c
@@ -20,6 +20,8 @@
#include <linux/clk.h>
#include <mach/clk.h>
+#include <mach/rpm-regulator-smd.h>
+#include <mach/socinfo.h>
#include "clock-local2.h"
#include "clock-pll.h"
@@ -197,6 +199,7 @@
#define AHB_CMD_RCGR 0x5000
#define AXI_CMD_RCGR 0x5040
#define OCMEMNOC_CMD_RCGR 0x5090
+#define OCMEMCX_OCMEMNOC_CBCR 0x4058
#define MMSS_BCR 0x0240
#define USB_30_BCR 0x03C0
@@ -580,17 +583,26 @@
VDD_DIG_HIGH
};
+static const int vdd_corner[] = {
+ [VDD_DIG_NONE] = RPM_REGULATOR_CORNER_NONE,
+ [VDD_DIG_LOW] = RPM_REGULATOR_CORNER_SVS_SOC,
+ [VDD_DIG_NOMINAL] = RPM_REGULATOR_CORNER_NORMAL,
+ [VDD_DIG_HIGH] = RPM_REGULATOR_CORNER_SUPER_TURBO,
+};
+
+static struct rpm_regulator *vdd_dig_reg;
+
static int set_vdd_dig(struct clk_vdd_class *vdd_class, int level)
{
- /* TODO: Actually call into regulator APIs to set VDD_DIG here. */
- return 0;
+ return rpm_regulator_set_voltage(vdd_dig_reg, vdd_corner[level],
+ RPM_REGULATOR_CORNER_SUPER_TURBO);
}
static DEFINE_VDD_CLASS(vdd_dig, set_vdd_dig);
-#define RPM_MISC_CLK_TYPE 0x306b6c63
-#define RPM_BUS_CLK_TYPE 0x316b6c63
-#define RPM_MEM_CLK_TYPE 0x326b6c63
+#define RPM_MISC_CLK_TYPE 0x306b6c63
+#define RPM_BUS_CLK_TYPE 0x316b6c63
+#define RPM_MEM_CLK_TYPE 0x326b6c63
#define CXO_ID 0x0
#define QDSS_ID 0x1
@@ -603,6 +615,14 @@
#define BIMC_ID 0x0
#define OCMEM_ID 0x1
+enum {
+ D0_ID = 1,
+ D1_ID,
+ A0_ID,
+ A1_ID,
+ A2_ID,
+};
+
DEFINE_CLK_RPM_SMD(pnoc_clk, pnoc_a_clk, RPM_BUS_CLK_TYPE, PNOC_ID, NULL);
DEFINE_CLK_RPM_SMD(snoc_clk, snoc_a_clk, RPM_BUS_CLK_TYPE, SNOC_ID, NULL);
DEFINE_CLK_RPM_SMD(cnoc_clk, cnoc_a_clk, RPM_BUS_CLK_TYPE, CNOC_ID, NULL);
@@ -617,9 +637,20 @@
RPM_MISC_CLK_TYPE, CXO_ID, 19200000);
DEFINE_CLK_RPM_SMD_QDSS(qdss_clk, qdss_a_clk, RPM_MISC_CLK_TYPE, QDSS_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(cxo_d0, cxo_d0_a, D0_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(cxo_d1, cxo_d1_a, D1_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(cxo_a0, cxo_a0_a, A0_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(cxo_a1, cxo_a1_a, A1_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(cxo_a2, cxo_a2_a, A2_ID);
+
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(cxo_d0_pin, cxo_d0_a_pin, D0_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(cxo_d1_pin, cxo_d1_a_pin, D1_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(cxo_a0_pin, cxo_a0_a_pin, A0_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(cxo_a1_pin, cxo_a1_a_pin, A1_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(cxo_a2_pin, cxo_a2_a_pin, A2_ID);
+
static struct pll_vote_clk gpll0_clk_src = {
.en_reg = (void __iomem *)APCS_GPLL_ENA_VOTE_REG,
- .en_mask = BIT(0),
.status_reg = (void __iomem *)GPLL0_STATUS_REG,
.status_mask = BIT(17),
.parent = &cxo_clk_src.c,
@@ -690,7 +721,7 @@
.base = &virt_bases[MMSS_BASE],
.c = {
.dbg_name = "mmpll1_clk_src",
- .rate = 1000000000,
+ .rate = 846000000,
.ops = &clk_ops_pll_vote,
.warned = true,
CLK_INIT(mmpll1_clk_src.c),
@@ -706,6 +737,7 @@
.dbg_name = "mmpll3_clk_src",
.rate = 1000000000,
.ops = &clk_ops_local_pll,
+ .warned = true,
CLK_INIT(mmpll3_clk_src.c),
},
};
@@ -1239,6 +1271,12 @@
F_END
};
+static struct clk_freq_tbl ftbl_gcc_sdcc_apps_rumi_clk[] = {
+ F( 400000, cxo, 12, 1, 4),
+ F( 19200000, cxo, 1, 0, 0),
+ F_END
+};
+
static struct rcg_clk sdcc1_apps_clk_src = {
.cmd_rcgr_reg = SDCC1_APPS_CMD_RCGR,
.set_rate = set_rate_mnd,
@@ -2116,6 +2154,7 @@
static struct branch_clk gcc_usb30_master_clk = {
.cbcr_reg = USB30_MASTER_CBCR,
+ .bcr_reg = USB_30_BCR,
.parent = &usb30_master_clk_src.c,
.has_sibling = 1,
.base = &virt_bases[GCC_BASE],
@@ -2150,6 +2189,7 @@
static struct branch_clk gcc_usb_hs_system_clk = {
.cbcr_reg = USB_HS_SYSTEM_CBCR,
+ .bcr_reg = USB_HS_BCR,
.parent = &usb_hs_system_clk_src.c,
.base = &virt_bases[GCC_BASE],
.c = {
@@ -2172,6 +2212,7 @@
static struct branch_clk gcc_usb_hsic_clk = {
.cbcr_reg = USB_HSIC_CBCR,
+ .bcr_reg = USB_HS_HSIC_BCR,
.parent = &usb_hsic_clk_src.c,
.base = &virt_bases[GCC_BASE],
.c = {
@@ -2203,6 +2244,28 @@
},
};
+struct branch_clk gcc_mmss_noc_cfg_ahb_clk = {
+ .cbcr_reg = MMSS_NOC_CFG_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_mmss_noc_cfg_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_mmss_noc_cfg_ahb_clk.c),
+ },
+};
+
+struct branch_clk gcc_ocmem_noc_cfg_ahb_clk = {
+ .cbcr_reg = OCMEM_NOC_CFG_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_ocmem_noc_cfg_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_ocmem_noc_cfg_ahb_clk.c),
+ },
+};
+
static struct branch_clk gcc_mss_cfg_ahb_clk = {
.cbcr_reg = MSS_CFG_AHB_CBCR,
.has_sibling = 1,
@@ -2215,10 +2278,11 @@
};
static struct clk_freq_tbl ftbl_mmss_axi_clk[] = {
- F_MM( 19200000, cxo, 1, 0, 0),
- F_MM(150000000, gpll0, 4, 0, 0),
- F_MM(333330000, mmpll1, 3, 0, 0),
- F_MM(400000000, mmpll0, 2, 0, 0),
+ F_MM( 19200000, cxo, 1, 0, 0),
+ F_MM(150000000, gpll0, 4, 0, 0),
+ F_MM(282000000, mmpll1, 3, 0, 0),
+ F_MM(320000000, mmpll0, 2.5, 0, 0),
+ F_MM(400000000, mmpll0, 2, 0, 0),
F_END
};
@@ -2231,8 +2295,8 @@
.c = {
.dbg_name = "axi_clk_src",
.ops = &clk_ops_rcg,
- VDD_DIG_FMAX_MAP3(LOW, 150000000, NOMINAL, 333330000,
- HIGH, 400000000),
+ VDD_DIG_FMAX_MAP3(LOW, 150000000, NOMINAL, 282000000,
+ HIGH, 320000000),
CLK_INIT(axi_clk_src.c),
},
};
@@ -2240,7 +2304,7 @@
static struct clk_freq_tbl ftbl_ocmemnoc_clk[] = {
F_MM( 19200000, cxo, 1, 0, 0),
F_MM(150000000, gpll0, 4, 0, 0),
- F_MM(333330000, mmpll1, 3, 0, 0),
+ F_MM(282000000, mmpll1, 3, 0, 0),
F_MM(400000000, mmpll0, 2, 0, 0),
F_END
};
@@ -2254,7 +2318,7 @@
.c = {
.dbg_name = "ocmemnoc_clk_src",
.ops = &clk_ops_rcg,
- VDD_DIG_FMAX_MAP3(LOW, 150000000, NOMINAL, 333330000,
+ VDD_DIG_FMAX_MAP3(LOW, 150000000, NOMINAL, 282000000,
HIGH, 400000000),
CLK_INIT(ocmemnoc_clk_src.c),
},
@@ -3733,6 +3797,18 @@
},
};
+struct branch_clk ocmemcx_ocmemnoc_clk = {
+ .cbcr_reg = OCMEMCX_OCMEMNOC_CBCR,
+ .parent = &ocmemnoc_clk_src.c,
+ .has_sibling = 1,
+ .base = &virt_bases[MMSS_BASE],
+ .c = {
+ .dbg_name = "ocmemcx_ocmemnoc_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(ocmemcx_ocmemnoc_clk.c),
+ },
+};
+
static struct branch_clk venus0_ahb_clk = {
.cbcr_reg = VENUS0_AHB_CBCR,
.has_sibling = 1,
@@ -3968,15 +4044,29 @@
},
};
+struct rcg_clk audio_core_lpaif_pcmoe_clk_src = {
+ .cmd_rcgr_reg = LPAIF_PCMOE_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_audio_core_lpaif_clock,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[LPASS_BASE],
+ .c = {
+ .dbg_name = "audio_core_lpaif_pcmoe_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP1(LOW, 12290000),
+ CLK_INIT(audio_core_lpaif_pcmoe_clk_src.c),
+ },
+};
+
static struct branch_clk audio_core_lpaif_codec_spkr_osr_clk = {
.cbcr_reg = AUDIO_CORE_LPAIF_CODEC_SPKR_OSR_CBCR,
.parent = &audio_core_lpaif_codec_spkr_clk_src.c,
.has_sibling = 1,
.base = &virt_bases[LPASS_BASE],
.c = {
- .dbg_name = "audio_core_lpaif_codec_spkr_clk_src",
+ .dbg_name = "audio_core_lpaif_codec_spkr_osr_clk",
.ops = &clk_ops_branch,
- CLK_INIT(audio_core_lpaif_codec_spkr_clk_src.c),
+ CLK_INIT(audio_core_lpaif_codec_spkr_osr_clk.c),
},
};
@@ -3985,9 +4075,9 @@
.has_sibling = 1,
.base = &virt_bases[LPASS_BASE],
.c = {
- .dbg_name = "audio_core_lpaif_codec_spkr_clk_src",
+ .dbg_name = "audio_core_lpaif_codec_spkr_ebit_clk",
.ops = &clk_ops_branch,
- CLK_INIT(audio_core_lpaif_codec_spkr_clk_src.c),
+ CLK_INIT(audio_core_lpaif_codec_spkr_ebit_clk.c),
},
};
@@ -3998,9 +4088,9 @@
.max_div = 15,
.base = &virt_bases[LPASS_BASE],
.c = {
- .dbg_name = "audio_core_lpaif_codec_spkr_clk_src",
+ .dbg_name = "audio_core_lpaif_codec_spkr_ibit_clk",
.ops = &clk_ops_branch,
- CLK_INIT(audio_core_lpaif_codec_spkr_clk_src.c),
+ CLK_INIT(audio_core_lpaif_codec_spkr_ibit_clk.c),
},
};
@@ -4195,6 +4285,17 @@
},
};
+struct branch_clk audio_core_lpaif_pcmoe_clk = {
+ .cbcr_reg = AUDIO_CORE_LPAIF_PCM_DATA_OE_CBCR,
+ .parent = &audio_core_lpaif_pcmoe_clk_src.c,
+ .base = &virt_bases[LPASS_BASE],
+ .c = {
+ .dbg_name = "audio_core_lpaif_pcmoe_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(audio_core_lpaif_pcmoe_clk.c),
+ },
+};
+
static struct branch_clk q6ss_ahb_lfabif_clk = {
.cbcr_reg = LPASS_Q6SS_AHB_LFABIF_CBCR,
.has_sibling = 1,
@@ -4292,6 +4393,8 @@
{&gcc_blsp2_uart5_apps_clk.c, GCC_BASE, 0x00c6},
{&gcc_blsp2_uart6_apps_clk.c, GCC_BASE, 0x00cb},
{&gcc_boot_rom_ahb_clk.c, GCC_BASE, 0x0100},
+ {&gcc_ocmem_noc_cfg_ahb_clk.c, GCC_BASE, 0x0029},
+ {&gcc_mmss_noc_cfg_ahb_clk.c, GCC_BASE, 0x002A},
{&gcc_mss_cfg_ahb_clk.c, GCC_BASE, 0x0030},
{&gcc_ce1_clk.c, GCC_BASE, 0x0140},
{&gcc_ce2_clk.c, GCC_BASE, 0x0148},
@@ -4319,6 +4422,7 @@
{&mmss_mmssnoc_ahb_clk.c, MMSS_BASE, 0x0001},
{&mmss_mmssnoc_axi_clk.c, MMSS_BASE, 0x0004},
{&ocmemnoc_clk.c, MMSS_BASE, 0x0007},
+ {&ocmemcx_ocmemnoc_clk.c, MMSS_BASE, 0x0009},
{&camss_cci_cci_ahb_clk.c, MMSS_BASE, 0x002e},
{&camss_cci_cci_clk.c, MMSS_BASE, 0x002d},
{&camss_csi0_ahb_clk.c, MMSS_BASE, 0x0042},
@@ -4391,6 +4495,7 @@
{&audio_core_lpaif_quad_clk_src.c, LPASS_BASE, 0x0014},
{&audio_core_lpaif_pcm0_clk_src.c, LPASS_BASE, 0x0013},
{&audio_core_lpaif_pcm1_clk_src.c, LPASS_BASE, 0x0012},
+ {&audio_core_lpaif_pcmoe_clk_src.c, LPASS_BASE, 0x000f},
{&audio_core_slimbus_core_clk.c, LPASS_BASE, 0x003d},
{&audio_core_slimbus_lfabif_clk.c, LPASS_BASE, 0x003e},
{&q6ss_xo_clk.c, LPASS_BASE, 0x002b},
@@ -4583,6 +4688,48 @@
.multiplier = 1,
};
+
+static struct clk_lookup msm_clocks_8974_rumi[] = {
+ CLK_LOOKUP("iface_clk", gcc_sdcc1_ahb_clk.c, "msm_sdcc.1"),
+ CLK_LOOKUP("core_clk", gcc_sdcc1_apps_clk.c, "msm_sdcc.1"),
+ CLK_LOOKUP("bus_clk", pnoc_sdcc1_clk.c, "msm_sdcc.1"),
+ CLK_LOOKUP("iface_clk", gcc_sdcc2_ahb_clk.c, "msm_sdcc.2"),
+ CLK_LOOKUP("core_clk", gcc_sdcc2_apps_clk.c, "msm_sdcc.2"),
+ CLK_LOOKUP("bus_clk", pnoc_sdcc2_clk.c, "msm_sdcc.2"),
+ CLK_LOOKUP("iface_clk", gcc_sdcc3_ahb_clk.c, "msm_sdcc.3"),
+ CLK_LOOKUP("core_clk", gcc_sdcc3_apps_clk.c, "msm_sdcc.3"),
+ CLK_LOOKUP("bus_clk", pnoc_sdcc3_clk.c, "msm_sdcc.3"),
+ CLK_LOOKUP("iface_clk", gcc_sdcc4_ahb_clk.c, "msm_sdcc.4"),
+ CLK_LOOKUP("core_clk", gcc_sdcc4_apps_clk.c, "msm_sdcc.4"),
+ CLK_LOOKUP("bus_clk", pnoc_sdcc4_clk.c, "msm_sdcc.4"),
+ CLK_DUMMY("xo", XO_CLK, NULL, OFF),
+ CLK_DUMMY("xo", XO_CLK, "pil_pronto", OFF),
+ CLK_DUMMY("core_clk", BLSP2_UART_CLK, "msm_serial_hsl.0", OFF),
+ CLK_DUMMY("iface_clk", BLSP2_UART_CLK, "msm_serial_hsl.0", OFF),
+ CLK_DUMMY("core_clk", SDC1_CLK, NULL, OFF),
+ CLK_DUMMY("iface_clk", SDC1_P_CLK, NULL, OFF),
+ CLK_DUMMY("core_clk", SDC3_CLK, NULL, OFF),
+ CLK_DUMMY("iface_clk", SDC3_P_CLK, NULL, OFF),
+ CLK_DUMMY("phy_clk", NULL, "msm_otg", OFF),
+ CLK_DUMMY("core_clk", NULL, "msm_otg", OFF),
+ CLK_DUMMY("iface_clk", NULL, "msm_otg", OFF),
+ CLK_DUMMY("xo", NULL, "msm_otg", OFF),
+ CLK_DUMMY("dfab_clk", DFAB_CLK, NULL, 0),
+ CLK_DUMMY("dma_bam_pclk", DMA_BAM_P_CLK, NULL, 0),
+ CLK_DUMMY("mem_clk", NULL, NULL, 0),
+ CLK_DUMMY("core_clk", SPI_CLK, "spi_qsd.1", OFF),
+ CLK_DUMMY("iface_clk", SPI_P_CLK, "spi_qsd.1", OFF),
+ CLK_DUMMY("core_clk", NULL, "f9966000.i2c", 0),
+ CLK_DUMMY("iface_clk", NULL, "f9966000.i2c", 0),
+ CLK_DUMMY("core_clk", NULL, "fe12f000.slim", OFF),
+ CLK_DUMMY("core_clk", "mdp.0", NULL, 0),
+ CLK_DUMMY("core_clk_src", "mdp.0", NULL, 0),
+ CLK_DUMMY("lut_clk", "mdp.0", NULL, 0),
+ CLK_DUMMY("vsync_clk", "mdp.0", NULL, 0),
+ CLK_DUMMY("iface_clk", "mdp.0", NULL, 0),
+ CLK_DUMMY("bus_clk", "mdp.0", NULL, 0),
+};
+
static struct clk_lookup msm_clocks_8974[] = {
CLK_LOOKUP("xo", cxo_clk_src.c, "msm_otg"),
CLK_LOOKUP("xo", cxo_clk_src.c, "pil-q6v5-lpass"),
@@ -4641,6 +4788,16 @@
CLK_LOOKUP("bus_clk", gcc_ce1_axi_clk.c, ""),
CLK_LOOKUP("bus_clk", gcc_ce2_axi_clk.c, ""),
+ CLK_LOOKUP("core_clk", gcc_ce2_clk.c, "qcedev.0"),
+ CLK_LOOKUP("iface_clk", gcc_ce2_ahb_clk.c, "qcedev.0"),
+ CLK_LOOKUP("bus_clk", gcc_ce2_axi_clk.c, "qcedev.0"),
+ CLK_LOOKUP("core_clk_src", ce2_clk_src.c, "qcedev.0"),
+
+ CLK_LOOKUP("core_clk", gcc_ce2_clk.c, "qcrypto.0"),
+ CLK_LOOKUP("iface_clk", gcc_ce2_ahb_clk.c, "qcrypto.0"),
+ CLK_LOOKUP("bus_clk", gcc_ce2_axi_clk.c, "qcrypto.0"),
+ CLK_LOOKUP("core_clk_src", ce2_clk_src.c, "qcrypto.0"),
+
CLK_LOOKUP("core_clk", gcc_gp1_clk.c, ""),
CLK_LOOKUP("core_clk", gcc_gp2_clk.c, ""),
CLK_LOOKUP("core_clk", gcc_gp3_clk.c, ""),
@@ -4737,8 +4894,8 @@
CLK_LOOKUP("core_clk", camss_phy1_csi1phytimer_clk.c, ""),
CLK_LOOKUP("core_clk", camss_phy2_csi2phytimer_clk.c, ""),
CLK_LOOKUP("iface_clk", camss_top_ahb_clk.c, ""),
- CLK_LOOKUP("iface_clk", camss_vfe_cpp_ahb_clk.c, ""),
- CLK_LOOKUP("core_clk", camss_vfe_cpp_clk.c, ""),
+ CLK_LOOKUP("iface_clk", camss_vfe_cpp_ahb_clk.c, "fda44000.qcom,iommu"),
+ CLK_LOOKUP("core_clk", camss_vfe_cpp_clk.c, "fda44000.qcom,iommu"),
CLK_LOOKUP("camss_vfe_vfe0_clk", camss_vfe_vfe0_clk.c, ""),
CLK_LOOKUP("camss_vfe_vfe1_clk", camss_vfe_vfe1_clk.c, ""),
CLK_LOOKUP("vfe0_clk_src", vfe0_clk_src.c, ""),
@@ -4752,9 +4909,13 @@
CLK_LOOKUP("bus_clk", mdss_axi_clk.c, "mdp.0"),
CLK_LOOKUP("core_clk", oxili_gfx3d_clk.c, "fdb00000.qcom,kgsl-3d0"),
CLK_LOOKUP("iface_clk", oxilicx_ahb_clk.c, "fdb00000.qcom,kgsl-3d0"),
+ CLK_LOOKUP("mem_iface_clk", ocmemcx_ocmemnoc_clk.c,
+ "fdb00000.qcom,kgsl-3d0"),
CLK_LOOKUP("core_clk", oxilicx_axi_clk.c, "fdb10000.qcom,iommu"),
CLK_LOOKUP("iface_clk", oxilicx_ahb_clk.c, "fdb10000.qcom,iommu"),
+ CLK_LOOKUP("alt_core_clk", oxili_gfx3d_clk.c, "fdb10000.qcom,iommu"),
CLK_LOOKUP("iface_clk", venus0_ahb_clk.c, "fdc84000.qcom,iommu"),
+ CLK_LOOKUP("alt_core_clk", venus0_vcodec0_clk.c, "fdc84000.qcom,iommu"),
CLK_LOOKUP("core_clk", venus0_axi_clk.c, "fdc84000.qcom,iommu"),
CLK_LOOKUP("bus_clk", venus0_axi_clk.c, ""),
CLK_LOOKUP("src_clk", vcodec0_clk_src.c, "fdce0000.qcom,venus"),
@@ -4762,6 +4923,10 @@
CLK_LOOKUP("iface_clk", venus0_ahb_clk.c, "fdce0000.qcom,venus"),
CLK_LOOKUP("bus_clk", venus0_axi_clk.c, "fdce0000.qcom,venus"),
CLK_LOOKUP("mem_clk", venus0_ocmemnoc_clk.c, "fdce0000.qcom,venus"),
+ CLK_LOOKUP("core_clk", venus0_vcodec0_clk.c, "fdc00000.qcom,vidc"),
+ CLK_LOOKUP("iface_clk", venus0_ahb_clk.c, "fdc00000.qcom,vidc"),
+ CLK_LOOKUP("bus_clk", venus0_axi_clk.c, "fdc00000.qcom,vidc"),
+ CLK_LOOKUP("mem_clk", venus0_ocmemnoc_clk.c, "fdc00000.qcom,vidc"),
/* LPASS clocks */
@@ -4794,6 +4959,8 @@
CLK_LOOKUP("core_clk", audio_core_lpaif_pcm1_clk_src.c, ""),
CLK_LOOKUP("ebit_clk", audio_core_lpaif_pcm1_ebit_clk.c, ""),
CLK_LOOKUP("ibit_clk", audio_core_lpaif_pcm1_ibit_clk.c, ""),
+ CLK_LOOKUP("core_clk_src", audio_core_lpaif_pcmoe_clk_src.c, ""),
+ CLK_LOOKUP("core_clk", audio_core_lpaif_pcmoe_clk.c, ""),
CLK_LOOKUP("core_clk", mss_xo_q6_clk.c, "pil-q6v5-mss"),
CLK_LOOKUP("bus_clk", mss_bus_q6_clk.c, "pil-q6v5-mss"),
@@ -4835,6 +5002,8 @@
CLK_LOOKUP("bus_a_clk", ocmemnoc_clk.c, "msm_ocmem_noc"),
CLK_LOOKUP("bus_clk", mmss_mmssnoc_axi_clk.c, "msm_mmss_noc"),
CLK_LOOKUP("bus_a_clk", mmss_mmssnoc_axi_clk.c, "msm_mmss_noc"),
+ CLK_LOOKUP("iface_clk", gcc_mmss_noc_cfg_ahb_clk.c, ""),
+ CLK_LOOKUP("iface_clk", gcc_ocmem_noc_cfg_ahb_clk.c, ""),
CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-tmc-etr"),
CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-tpiu"),
@@ -4954,9 +5123,9 @@
/* MMPLL1 at 1000 MHz, main output enabled. */
static struct pll_config mmpll1_config __initdata = {
- .l = 0x34,
+ .l = 0x2C,
.m = 0x1,
- .n = 0xC,
+ .n = 0x10,
.vco_val = 0x0,
.vco_mask = BM(21, 20),
.pre_div_val = 0x0,
@@ -5021,7 +5190,7 @@
.main_output_mask = BIT(0),
};
-#define PLL_AUX_OUTPUT BIT(1)
+#define PLL_AUX_OUTPUT_BIT 1
static void __init reg_init(void)
{
@@ -5042,7 +5211,7 @@
/* Active GPLL0's aux output. This is needed by acpuclock. */
regval = readl_relaxed(GCC_REG_BASE(GPLL0_USER_CTL_REG));
- regval |= BIT(PLL_AUX_OUTPUT);
+ regval |= BIT(PLL_AUX_OUTPUT_BIT);
writel_relaxed(regval, GCC_REG_BASE(GPLL0_USER_CTL_REG));
/* Vote for GPLL0 to turn on. Needed by acpuclock. */
@@ -5059,8 +5228,8 @@
static void __init msm8974_clock_post_init(void)
{
- clk_set_rate(&axi_clk_src.c, 333330000);
- clk_set_rate(&ocmemnoc_clk_src.c, 333330000);
+ clk_set_rate(&axi_clk_src.c, 282000000);
+ clk_set_rate(&ocmemnoc_clk_src.c, 282000000);
/*
* Hold an active set vote at a rate of 40MHz for the MMSS NOC AHB
@@ -5075,6 +5244,13 @@
*/
clk_prepare_enable(&cxo_a_clk_src.c);
+ /*
+ * TODO: Temporarily enable NOC configuration AHB clocks. Remove when
+ * the bus driver is ready.
+ */
+ clk_prepare_enable(&gcc_mmss_noc_cfg_ahb_clk.c);
+ clk_prepare_enable(&gcc_ocmem_noc_cfg_ahb_clk.c);
+
/* Set rates for single-rate clocks. */
clk_set_rate(&usb30_master_clk_src.c,
usb30_master_clk_src.freq_tbl[0].freq_hz);
@@ -5136,12 +5312,63 @@
clk_ops_local_pll.enable = msm8974_pll_clk_enable;
+ vdd_dig_reg = rpm_regulator_get(NULL, "vdd_dig");
+ if (IS_ERR(vdd_dig_reg))
+ panic("clock-8974: Unable to get the vdd_dig regulator!");
+
+ /*
+ * TODO: Set a voltage and enable vdd_dig, leaving the voltage high
+ * until late_init. This may not be necessary with clock handoff;
+ * Investigate this code on a real non-simulator target to determine
+ * its necessity.
+ */
+ vote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
+ rpm_regulator_enable(vdd_dig_reg);
+
reg_init();
}
+static int __init msm8974_clock_late_init(void)
+{
+ return unvote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
+}
+
+static void __init msm8974_rumi_clock_pre_init(void)
+{
+ virt_bases[GCC_BASE] = ioremap(GCC_CC_PHYS, GCC_CC_SIZE);
+ if (!virt_bases[GCC_BASE])
+ panic("clock-8974: Unable to ioremap GCC memory!");
+
+ /* SDCC clocks are partially emulated in the RUMI */
+ sdcc1_apps_clk_src.freq_tbl = ftbl_gcc_sdcc_apps_rumi_clk;
+ sdcc2_apps_clk_src.freq_tbl = ftbl_gcc_sdcc_apps_rumi_clk;
+ sdcc3_apps_clk_src.freq_tbl = ftbl_gcc_sdcc_apps_rumi_clk;
+ sdcc4_apps_clk_src.freq_tbl = ftbl_gcc_sdcc_apps_rumi_clk;
+
+ vdd_dig_reg = rpm_regulator_get(NULL, "vdd_dig");
+ if (IS_ERR(vdd_dig_reg))
+ panic("clock-8974: Unable to get the vdd_dig regulator!");
+
+ /*
+ * TODO: Set a voltage and enable vdd_dig, leaving the voltage high
+ * until late_init. This may not be necessary with clock handoff;
+ * Investigate this code on a real non-simulator target to determine
+ * its necessity.
+ */
+ vote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
+ rpm_regulator_enable(vdd_dig_reg);
+}
+
struct clock_init_data msm8974_clock_init_data __initdata = {
.table = msm_clocks_8974,
.size = ARRAY_SIZE(msm_clocks_8974),
.pre_init = msm8974_clock_pre_init,
.post_init = msm8974_clock_post_init,
+ .late_init = msm8974_clock_late_init,
+};
+
+struct clock_init_data msm8974_rumi_clock_init_data __initdata = {
+ .table = msm_clocks_8974_rumi,
+ .size = ARRAY_SIZE(msm_clocks_8974_rumi),
+ .pre_init = msm8974_rumi_clock_pre_init,
};
diff --git a/arch/arm/mach-msm/clock-dss-8960.c b/arch/arm/mach-msm/clock-dss-8960.c
index 4e17b29..ca1a3e1 100644
--- a/arch/arm/mach-msm/clock-dss-8960.c
+++ b/arch/arm/mach-msm/clock-dss-8960.c
@@ -90,7 +90,6 @@
#define PLL_PWRDN_B BIT(3)
#define PD_PLL BIT(1)
-static unsigned current_rate;
static unsigned hdmi_pll_on;
int hdmi_pll_enable(void)
@@ -219,11 +218,6 @@
hdmi_pll_on = 0;
}
-unsigned hdmi_pll_get_rate(void)
-{
- return current_rate;
-}
-
int hdmi_pll_set_rate(unsigned rate)
{
unsigned int set_power_dwn = 0;
@@ -378,7 +372,6 @@
if (set_power_dwn)
hdmi_pll_enable();
- current_rate = rate;
if (!ahb_enabled)
writel_relaxed(ahb_en_reg & ~BIT(4), AHB_EN_REG);
diff --git a/arch/arm/mach-msm/clock-dss-8960.h b/arch/arm/mach-msm/clock-dss-8960.h
index 4734cde..72e70fc 100644
--- a/arch/arm/mach-msm/clock-dss-8960.h
+++ b/arch/arm/mach-msm/clock-dss-8960.h
@@ -15,7 +15,6 @@
int hdmi_pll_enable(void);
void hdmi_pll_disable(void);
-unsigned hdmi_pll_get_rate(void);
int hdmi_pll_set_rate(unsigned rate);
#endif
diff --git a/arch/arm/mach-msm/clock-local.c b/arch/arm/mach-msm/clock-local.c
index ca913dc..51e5703 100644
--- a/arch/arm/mach-msm/clock-local.c
+++ b/arch/arm/mach-msm/clock-local.c
@@ -355,7 +355,7 @@
{
u32 reg_val;
- reg_val = readl_relaxed(b->ctl_reg);
+ reg_val = b->ctl_reg ? readl_relaxed(b->ctl_reg) : 0;
if (b->en_mask) {
reg_val &= ~(b->en_mask);
writel_relaxed(reg_val, b->ctl_reg);
@@ -564,7 +564,7 @@
if (!branch_in_hwcg_mode(b)) {
b->hwcg_mask = 0;
c->flags &= ~CLKFLAG_HWCG;
- if (readl_relaxed(b->ctl_reg) & b->en_mask)
+ if (b->ctl_reg && readl_relaxed(b->ctl_reg) & b->en_mask)
return HANDOFF_ENABLED_CLK;
} else {
c->flags |= CLKFLAG_HWCG;
diff --git a/arch/arm/mach-msm/clock-pll.c b/arch/arm/mach-msm/clock-pll.c
index 2938135..d5831e2 100644
--- a/arch/arm/mach-msm/clock-pll.c
+++ b/arch/arm/mach-msm/clock-pll.c
@@ -319,6 +319,7 @@
{60, 1152000000},
{62, 1200000000},
{63, 1209600000},
+ {73, 1401600000},
{0, 0},
};
diff --git a/arch/arm/mach-msm/clock-rpm.h b/arch/arm/mach-msm/clock-rpm.h
index 22691c5..e203028 100644
--- a/arch/arm/mach-msm/clock-rpm.h
+++ b/arch/arm/mach-msm/clock-rpm.h
@@ -21,6 +21,10 @@
#define RPM_SMD_KEY_ENABLE 0x62616E45
#define RPM_SMD_KEY_STATE 0x54415453
+#define RPM_CLK_BUFFER_A_REQ 0x616B6C63
+#define RPM_KEY_SOFTWARE_ENABLE 0x6E657773
+#define RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY 0x62636370
+
struct clk_ops;
struct clk_rpmrs_data;
extern struct clk_ops clk_ops_rpm;
@@ -187,5 +191,17 @@
#define DEFINE_CLK_RPM_SMD_QDSS(name, active, type, r_id) \
__DEFINE_CLK_RPM_QDSS(name, active, type, r_id, \
0, RPM_SMD_KEY_STATE, &clk_rpmrs_data_smd)
+/*
+ * The RPM XO buffer clock management code aggregates votes for pin-control mode
+ * and software mode separately. Software-enable has higher priority over pin-
+ * control, and if the software-mode aggregation results in a 'disable', the
+ * buffer will be left in pin-control mode if a pin-control vote is in place.
+ */
+#define DEFINE_CLK_RPM_SMD_XO_BUFFER(name, active, r_id) \
+ __DEFINE_CLK_RPM_BRANCH(name, active, RPM_CLK_BUFFER_A_REQ, r_id, 0, \
+ 1000, RPM_KEY_SOFTWARE_ENABLE, &clk_rpmrs_data_smd)
+#define DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(name, active, r_id) \
+ __DEFINE_CLK_RPM_BRANCH(name, active, RPM_CLK_BUFFER_A_REQ, r_id, 0, \
+ 1000, RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY, &clk_rpmrs_data_smd)
#endif
diff --git a/arch/arm/mach-msm/clock.h b/arch/arm/mach-msm/clock.h
index df2aa4e..d236e13 100644
--- a/arch/arm/mach-msm/clock.h
+++ b/arch/arm/mach-msm/clock.h
@@ -171,6 +171,7 @@
extern struct clock_init_data msm8625_dummy_clock_init_data;
extern struct clock_init_data msm8930_clock_init_data;
extern struct clock_init_data msm8974_clock_init_data;
+extern struct clock_init_data msm8974_rumi_clock_init_data;
void msm_clock_init(struct clock_init_data *data);
int vote_vdd_level(struct clk_vdd_class *vdd_class, int level);
diff --git a/arch/arm/mach-msm/devices-8064.c b/arch/arm/mach-msm/devices-8064.c
index 8a41a7c..ac26acf 100644
--- a/arch/arm/mach-msm/devices-8064.c
+++ b/arch/arm/mach-msm/devices-8064.c
@@ -15,6 +15,7 @@
#include <linux/list.h>
#include <linux/platform_device.h>
#include <linux/msm_rotator.h>
+#include <linux/gpio.h>
#include <linux/clkdev.h>
#include <linux/dma-mapping.h>
#include <linux/coresight.h>
@@ -27,6 +28,7 @@
#include <mach/msm_dsps.h>
#include <sound/msm-dai-q6.h>
#include <sound/apr_audio.h>
+#include <mach/msm_tsif.h>
#include <mach/msm_bus_board.h>
#include <mach/rpm.h>
#include <mach/mdm2.h>
@@ -464,6 +466,112 @@
.id = 0x4009,
};
+#define MSM_TSIF0_PHYS (0x18200000)
+#define MSM_TSIF1_PHYS (0x18201000)
+#define MSM_TSIF_SIZE (0x200)
+
+#define TSIF_0_CLK GPIO_CFG(55, 1, GPIO_CFG_INPUT, \
+ GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA)
+#define TSIF_0_EN GPIO_CFG(56, 1, GPIO_CFG_INPUT, \
+ GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA)
+#define TSIF_0_DATA GPIO_CFG(57, 1, GPIO_CFG_INPUT, \
+ GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA)
+#define TSIF_0_SYNC GPIO_CFG(62, 1, GPIO_CFG_INPUT, \
+ GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA)
+#define TSIF_1_CLK GPIO_CFG(59, 1, GPIO_CFG_INPUT, \
+ GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA)
+#define TSIF_1_EN GPIO_CFG(60, 1, GPIO_CFG_INPUT, \
+ GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA)
+#define TSIF_1_DATA GPIO_CFG(61, 1, GPIO_CFG_INPUT, \
+ GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA)
+#define TSIF_1_SYNC GPIO_CFG(58, 1, GPIO_CFG_INPUT, \
+ GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA)
+
+static const struct msm_gpio tsif0_gpios[] = {
+ { .gpio_cfg = TSIF_0_CLK, .label = "tsif_clk", },
+ { .gpio_cfg = TSIF_0_EN, .label = "tsif_en", },
+ { .gpio_cfg = TSIF_0_DATA, .label = "tsif_data", },
+ { .gpio_cfg = TSIF_0_SYNC, .label = "tsif_sync", },
+};
+
+static const struct msm_gpio tsif1_gpios[] = {
+ { .gpio_cfg = TSIF_1_CLK, .label = "tsif_clk", },
+ { .gpio_cfg = TSIF_1_EN, .label = "tsif_en", },
+ { .gpio_cfg = TSIF_1_DATA, .label = "tsif_data", },
+ { .gpio_cfg = TSIF_1_SYNC, .label = "tsif_sync", },
+};
+
+struct msm_tsif_platform_data tsif1_8064_platform_data = {
+ .num_gpios = ARRAY_SIZE(tsif1_gpios),
+ .gpios = tsif1_gpios,
+ .tsif_pclk = "iface_clk",
+ .tsif_ref_clk = "ref_clk",
+};
+
+struct resource tsif1_8064_resources[] = {
+ [0] = {
+ .flags = IORESOURCE_IRQ,
+ .start = TSIF2_IRQ,
+ .end = TSIF2_IRQ,
+ },
+ [1] = {
+ .flags = IORESOURCE_MEM,
+ .start = MSM_TSIF1_PHYS,
+ .end = MSM_TSIF1_PHYS + MSM_TSIF_SIZE - 1,
+ },
+ [2] = {
+ .flags = IORESOURCE_DMA,
+ .start = DMOV8064_TSIF_CHAN,
+ .end = DMOV8064_TSIF_CRCI,
+ },
+};
+
+struct msm_tsif_platform_data tsif0_8064_platform_data = {
+ .num_gpios = ARRAY_SIZE(tsif0_gpios),
+ .gpios = tsif0_gpios,
+ .tsif_pclk = "iface_clk",
+ .tsif_ref_clk = "ref_clk",
+};
+
+struct resource tsif0_8064_resources[] = {
+ [0] = {
+ .flags = IORESOURCE_IRQ,
+ .start = TSIF1_IRQ,
+ .end = TSIF1_IRQ,
+ },
+ [1] = {
+ .flags = IORESOURCE_MEM,
+ .start = MSM_TSIF0_PHYS,
+ .end = MSM_TSIF0_PHYS + MSM_TSIF_SIZE - 1,
+ },
+ [2] = {
+ .flags = IORESOURCE_DMA,
+ .start = DMOV_TSIF_CHAN,
+ .end = DMOV_TSIF_CRCI,
+ },
+};
+
+struct platform_device msm_8064_device_tsif[2] = {
+ {
+ .name = "msm_tsif",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(tsif0_8064_resources),
+ .resource = tsif0_8064_resources,
+ .dev = {
+ .platform_data = &tsif0_8064_platform_data
+ },
+ },
+ {
+ .name = "msm_tsif",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(tsif1_8064_resources),
+ .resource = tsif1_8064_resources,
+ .dev = {
+ .platform_data = &tsif1_8064_platform_data
+ },
+ }
+};
+
/*
* Machine specific data for AUX PCM Interface
* which the driver will be unware of.
@@ -852,6 +960,11 @@
},
};
+struct platform_device apq8064_device_acpuclk = {
+ .name = "acpuclk-8064",
+ .id = -1,
+};
+
#define SHARED_IMEM_TZ_BASE 0x2a03f720
static struct resource tzlog_resources[] = {
{
@@ -1224,19 +1337,19 @@
},
#ifdef CONFIG_MMC_MSM_SPS_SUPPORT
{
- .name = "sdcc_dml_addr",
+ .name = "dml_mem",
.start = MSM_SDC1_DML_BASE,
.end = MSM_SDC1_BAM_BASE - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "sdcc_bam_addr",
+ .name = "bam_mem",
.start = MSM_SDC1_BAM_BASE,
.end = MSM_SDC1_BAM_BASE + (2 * SZ_4K) - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "sdcc_bam_irq",
+ .name = "bam_irq",
.start = SDC1_BAM_IRQ,
.end = SDC1_BAM_IRQ,
.flags = IORESOURCE_IRQ,
@@ -1259,19 +1372,19 @@
},
#ifdef CONFIG_MMC_MSM_SPS_SUPPORT
{
- .name = "sdcc_dml_addr",
+ .name = "dml_mem",
.start = MSM_SDC2_DML_BASE,
.end = MSM_SDC2_BAM_BASE - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "sdcc_bam_addr",
+ .name = "bam_mem",
.start = MSM_SDC2_BAM_BASE,
.end = MSM_SDC2_BAM_BASE + (2 * SZ_4K) - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "sdcc_bam_irq",
+ .name = "bam_irq",
.start = SDC2_BAM_IRQ,
.end = SDC2_BAM_IRQ,
.flags = IORESOURCE_IRQ,
@@ -1294,19 +1407,19 @@
},
#ifdef CONFIG_MMC_MSM_SPS_SUPPORT
{
- .name = "sdcc_dml_addr",
+ .name = "dml_mem",
.start = MSM_SDC3_DML_BASE,
.end = MSM_SDC3_BAM_BASE - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "sdcc_bam_addr",
+ .name = "bam_mem",
.start = MSM_SDC3_BAM_BASE,
.end = MSM_SDC3_BAM_BASE + (2 * SZ_4K) - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "sdcc_bam_irq",
+ .name = "bam_irq",
.start = SDC3_BAM_IRQ,
.end = SDC3_BAM_IRQ,
.flags = IORESOURCE_IRQ,
@@ -1329,19 +1442,19 @@
},
#ifdef CONFIG_MMC_MSM_SPS_SUPPORT
{
- .name = "sdcc_dml_addr",
+ .name = "dml_mem",
.start = MSM_SDC4_DML_BASE,
.end = MSM_SDC4_BAM_BASE - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "sdcc_bam_addr",
+ .name = "bam_mem",
.start = MSM_SDC4_BAM_BASE,
.end = MSM_SDC4_BAM_BASE + (2 * SZ_4K) - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "sdcc_bam_irq",
+ .name = "bam_irq",
.start = SDC4_BAM_IRQ,
.end = SDC4_BAM_IRQ,
.flags = IORESOURCE_IRQ,
@@ -2231,6 +2344,7 @@
RIVA_APPS_WLAN_SMSM_IRQ,
RIVA_APPS_WLAN_RX_DATA_AVAIL_IRQ,
RIVA_APPS_WLAN_DATA_XFER_DONE_IRQ,
+ PM8821_SEC_IRQ_N,
};
struct msm_mpm_device_data apq8064_mpm_dev_data __initdata = {
diff --git a/arch/arm/mach-msm/devices-8930.c b/arch/arm/mach-msm/devices-8930.c
index a36e7d7..7cb6e95 100644
--- a/arch/arm/mach-msm/devices-8930.c
+++ b/arch/arm/mach-msm/devices-8930.c
@@ -30,6 +30,7 @@
#include "devices.h"
#include "rpm_log.h"
#include "rpm_stats.h"
+#include "rpm_rbcpr_stats.h"
#include "footswitch.h"
#ifdef CONFIG_MSM_MPM
@@ -287,6 +288,31 @@
},
};
+static struct resource msm_rpm_rbcpr_resource = {
+ .start = 0x0010CB00,
+ .end = 0x0010CB00 + SZ_8K - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct msm_rpmrbcpr_platform_data msm_rpm_rbcpr_pdata = {
+ .rbcpr_data = {
+ .upside_steps = 1,
+ .downside_steps = 2,
+ .svs_voltage = 1050000,
+ .nominal_voltage = 1162500,
+ .turbo_voltage = 1287500,
+ },
+};
+
+struct platform_device msm8930_rpm_rbcpr_device = {
+ .name = "msm_rpm_rbcpr",
+ .id = -1,
+ .dev = {
+ .platform_data = &msm_rpm_rbcpr_pdata,
+ },
+ .resource = &msm_rpm_rbcpr_resource,
+};
+
static int msm8930_LPM_latency = 1000; /* >100 usec for WFI */
struct platform_device msm8930_cpu_idle_device = {
@@ -355,6 +381,21 @@
.id = MSM_BUS_FAB_CPSS_FPB,
};
+struct platform_device msm8627_device_acpuclk = {
+ .name = "acpuclk-8627",
+ .id = -1,
+};
+
+struct platform_device msm8930_device_acpuclk = {
+ .name = "acpuclk-8930",
+ .id = -1,
+};
+
+struct platform_device msm8930aa_device_acpuclk = {
+ .name = "acpuclk-8930aa",
+ .id = -1,
+};
+
static struct fs_driver_data gfx3d_fs_data = {
.clks = (struct fs_clk_data[]){
{ .name = "core_clk", .reset_rate = 27000000 },
@@ -626,6 +667,58 @@
.ib = 10000000,
},
};
+static struct msm_bus_vectors vidc_venc_1080p_turbo_vectors[] = {
+ {
+ .src = MSM_BUS_MASTER_HD_CODEC_PORT0,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 222298112,
+ .ib = 3522000000U,
+ },
+ {
+ .src = MSM_BUS_MASTER_HD_CODEC_PORT1,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 330301440,
+ .ib = 3522000000U,
+ },
+ {
+ .src = MSM_BUS_MASTER_AMPSS_M0,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 2500000,
+ .ib = 700000000,
+ },
+ {
+ .src = MSM_BUS_MASTER_AMPSS_M0,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 2500000,
+ .ib = 10000000,
+ },
+};
+static struct msm_bus_vectors vidc_vdec_1080p_turbo_vectors[] = {
+ {
+ .src = MSM_BUS_MASTER_HD_CODEC_PORT0,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 222298112,
+ .ib = 3522000000U,
+ },
+ {
+ .src = MSM_BUS_MASTER_HD_CODEC_PORT1,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 330301440,
+ .ib = 3522000000U,
+ },
+ {
+ .src = MSM_BUS_MASTER_AMPSS_M0,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 2500000,
+ .ib = 700000000,
+ },
+ {
+ .src = MSM_BUS_MASTER_AMPSS_M0,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 2500000,
+ .ib = 10000000,
+ },
+};
static struct msm_bus_paths vidc_bus_client_config[] = {
{
@@ -656,6 +749,14 @@
ARRAY_SIZE(vidc_vdec_1080p_vectors),
vidc_vdec_1080p_vectors,
},
+ {
+ ARRAY_SIZE(vidc_venc_1080p_turbo_vectors),
+ vidc_vdec_1080p_turbo_vectors,
+ },
+ {
+ ARRAY_SIZE(vidc_vdec_1080p_turbo_vectors),
+ vidc_vdec_1080p_turbo_vectors,
+ },
};
static struct msm_bus_scale_pdata vidc_bus_client_data = {
diff --git a/arch/arm/mach-msm/devices-8960.c b/arch/arm/mach-msm/devices-8960.c
index 79f8c88..3d1926c 100644
--- a/arch/arm/mach-msm/devices-8960.c
+++ b/arch/arm/mach-msm/devices-8960.c
@@ -292,6 +292,52 @@
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
+
+/* GSBI 8 used into UARTDM Mode */
+static struct resource msm_uart_dm8_resources[] = {
+ {
+ .start = MSM_UART8DM_PHYS,
+ .end = MSM_UART8DM_PHYS + PAGE_SIZE - 1,
+ .name = "uartdm_resource",
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = GSBI8_UARTDM_IRQ,
+ .end = GSBI8_UARTDM_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = MSM_GSBI8_PHYS,
+ .end = MSM_GSBI8_PHYS + 4 - 1,
+ .name = "gsbi_resource",
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = DMOV_HSUART_GSBI8_TX_CHAN,
+ .end = DMOV_HSUART_GSBI8_RX_CHAN,
+ .name = "uartdm_channels",
+ .flags = IORESOURCE_DMA,
+ },
+ {
+ .start = DMOV_HSUART_GSBI8_TX_CRCI,
+ .end = DMOV_HSUART_GSBI8_RX_CRCI,
+ .name = "uartdm_crci",
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+static u64 msm_uart_dm8_dma_mask = DMA_BIT_MASK(32);
+struct platform_device msm_device_uart_dm8 = {
+ .name = "msm_serial_hs",
+ .id = 2,
+ .num_resources = ARRAY_SIZE(msm_uart_dm8_resources),
+ .resource = msm_uart_dm8_resources,
+ .dev = {
+ .dma_mask = &msm_uart_dm8_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
/*
* GSBI 9 used into UARTDM Mode
* For 8960 Fusion 2.2 Primary IPC
@@ -773,19 +819,19 @@
},
#ifdef CONFIG_MMC_MSM_SPS_SUPPORT
{
- .name = "sdcc_dml_addr",
+ .name = "dml_mem",
.start = MSM_SDC1_DML_BASE,
.end = MSM_SDC1_BAM_BASE - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "sdcc_bam_addr",
+ .name = "bam_mem",
.start = MSM_SDC1_BAM_BASE,
.end = MSM_SDC1_BAM_BASE + (2 * SZ_4K) - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "sdcc_bam_irq",
+ .name = "bam_irq",
.start = SDC1_BAM_IRQ,
.end = SDC1_BAM_IRQ,
.flags = IORESOURCE_IRQ,
@@ -808,19 +854,19 @@
},
#ifdef CONFIG_MMC_MSM_SPS_SUPPORT
{
- .name = "sdcc_dml_addr",
+ .name = "dml_mem",
.start = MSM_SDC2_DML_BASE,
.end = MSM_SDC2_BAM_BASE - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "sdcc_bam_addr",
+ .name = "bam_mem",
.start = MSM_SDC2_BAM_BASE,
.end = MSM_SDC2_BAM_BASE + (2 * SZ_4K) - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "sdcc_bam_irq",
+ .name = "bam_irq",
.start = SDC2_BAM_IRQ,
.end = SDC2_BAM_IRQ,
.flags = IORESOURCE_IRQ,
@@ -843,19 +889,19 @@
},
#ifdef CONFIG_MMC_MSM_SPS_SUPPORT
{
- .name = "sdcc_dml_addr",
+ .name = "dml_mem",
.start = MSM_SDC3_DML_BASE,
.end = MSM_SDC3_BAM_BASE - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "sdcc_bam_addr",
+ .name = "bam_mem",
.start = MSM_SDC3_BAM_BASE,
.end = MSM_SDC3_BAM_BASE + (2 * SZ_4K) - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "sdcc_bam_irq",
+ .name = "bam_irq",
.start = SDC3_BAM_IRQ,
.end = SDC3_BAM_IRQ,
.flags = IORESOURCE_IRQ,
@@ -878,19 +924,19 @@
},
#ifdef CONFIG_MMC_MSM_SPS_SUPPORT
{
- .name = "sdcc_dml_addr",
+ .name = "dml_mem",
.start = MSM_SDC4_DML_BASE,
.end = MSM_SDC4_BAM_BASE - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "sdcc_bam_addr",
+ .name = "bam_mem",
.start = MSM_SDC4_BAM_BASE,
.end = MSM_SDC4_BAM_BASE + (2 * SZ_4K) - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "sdcc_bam_irq",
+ .name = "bam_irq",
.start = SDC4_BAM_IRQ,
.end = SDC4_BAM_IRQ,
.flags = IORESOURCE_IRQ,
@@ -913,19 +959,19 @@
},
#ifdef CONFIG_MMC_MSM_SPS_SUPPORT
{
- .name = "sdcc_dml_addr",
+ .name = "dml_mem",
.start = MSM_SDC5_DML_BASE,
.end = MSM_SDC5_BAM_BASE - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "sdcc_bam_addr",
+ .name = "bam_mem",
.start = MSM_SDC5_BAM_BASE,
.end = MSM_SDC5_BAM_BASE + (2 * SZ_4K) - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "sdcc_bam_irq",
+ .name = "bam_irq",
.start = SDC5_BAM_IRQ,
.end = SDC5_BAM_IRQ,
.flags = IORESOURCE_IRQ,
@@ -1742,8 +1788,8 @@
struct msm_tsif_platform_data tsif1_platform_data = {
.num_gpios = ARRAY_SIZE(tsif1_gpios),
.gpios = tsif1_gpios,
- .tsif_pclk = "tsif_pclk",
- .tsif_ref_clk = "tsif_ref_clk",
+ .tsif_pclk = "iface_clk",
+ .tsif_ref_clk = "ref_clk",
};
struct resource tsif1_resources[] = {
@@ -1767,8 +1813,8 @@
struct msm_tsif_platform_data tsif0_platform_data = {
.num_gpios = ARRAY_SIZE(tsif0_gpios),
.gpios = tsif0_gpios,
- .tsif_pclk = "tsif_pclk",
- .tsif_ref_clk = "tsif_ref_clk",
+ .tsif_pclk = "iface_clk",
+ .tsif_ref_clk = "ref_clk",
};
struct resource tsif0_resources[] = {
[0] = {
@@ -3812,6 +3858,7 @@
#define AP2MDM_PMIC_PWR_EN 22
#define AP2MDM_KPDPWR_N 79
#define AP2MDM_SOFT_RESET 78
+#define USB_SW 25
static struct resource sglte_resources[] = {
{
@@ -3856,6 +3903,12 @@
.name = "AP2MDM_SOFT_RESET",
.flags = IORESOURCE_IO,
},
+ {
+ .start = USB_SW,
+ .end = USB_SW,
+ .name = "USB_SW",
+ .flags = IORESOURCE_IO,
+ },
};
struct platform_device mdm_sglte_device = {
diff --git a/arch/arm/mach-msm/devices-9615.c b/arch/arm/mach-msm/devices-9615.c
index 9c2b26a..9f03878 100644
--- a/arch/arm/mach-msm/devices-9615.c
+++ b/arch/arm/mach-msm/devices-9615.c
@@ -845,19 +845,19 @@
},
#ifdef CONFIG_MMC_MSM_SPS_SUPPORT
{
- .name = "sdcc_dml_addr",
+ .name = "dml_mem",
.start = MSM_SDC1_DML_BASE,
.end = MSM_SDC1_BAM_BASE - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "sdcc_bam_addr",
+ .name = "bam_mem",
.start = MSM_SDC1_BAM_BASE,
.end = MSM_SDC1_BAM_BASE + (2 * SZ_4K) - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "sdcc_bam_irq",
+ .name = "bam_irq",
.start = SDC1_BAM_IRQ,
.end = SDC1_BAM_IRQ,
.flags = IORESOURCE_IRQ,
@@ -880,19 +880,19 @@
},
#ifdef CONFIG_MMC_MSM_SPS_SUPPORT
{
- .name = "sdcc_dml_addr",
+ .name = "dml_mem",
.start = MSM_SDC2_DML_BASE,
.end = MSM_SDC2_BAM_BASE - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "sdcc_bam_addr",
+ .name = "bam_mem",
.start = MSM_SDC2_BAM_BASE,
.end = MSM_SDC2_BAM_BASE + (2 * SZ_4K) - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "sdcc_bam_irq",
+ .name = "bam_irq",
.start = SDC2_BAM_IRQ,
.end = SDC2_BAM_IRQ,
.flags = IORESOURCE_IRQ,
diff --git a/arch/arm/mach-msm/devices-msm7x25.c b/arch/arm/mach-msm/devices-msm7x25.c
index 2be7d5e..99b2960 100644
--- a/arch/arm/mach-msm/devices-msm7x25.c
+++ b/arch/arm/mach-msm/devices-msm7x25.c
@@ -439,23 +439,25 @@
#define MSM_SDC4_BASE 0xA0700000
static struct resource resources_sdc1[] = {
{
+ .name = "core_mem",
.start = MSM_SDC1_BASE,
.end = MSM_SDC1_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
+ .name = "core_irq",
.start = INT_SDC1_0,
.end = INT_SDC1_1,
.flags = IORESOURCE_IRQ,
},
{
- .name = "sdcc_dma_chnl",
+ .name = "dma_chnl",
.start = DMOV_SDC1_CHAN,
.end = DMOV_SDC1_CHAN,
.flags = IORESOURCE_DMA,
},
{
- .name = "sdcc_dma_crci",
+ .name = "dma_crci",
.start = DMOV_SDC1_CRCI,
.end = DMOV_SDC1_CRCI,
.flags = IORESOURCE_DMA,
@@ -464,23 +466,25 @@
static struct resource resources_sdc2[] = {
{
+ .name = "core_mem",
.start = MSM_SDC2_BASE,
.end = MSM_SDC2_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
+ .name = "core_irq",
.start = INT_SDC2_0,
.end = INT_SDC2_1,
.flags = IORESOURCE_IRQ,
},
{
- .name = "sdcc_dma_chnl",
+ .name = "dma_chnl",
.start = DMOV_SDC2_CHAN,
.end = DMOV_SDC2_CHAN,
.flags = IORESOURCE_DMA,
},
{
- .name = "sdcc_dma_crci",
+ .name = "dma_crci",
.start = DMOV_SDC2_CRCI,
.end = DMOV_SDC2_CRCI,
.flags = IORESOURCE_DMA,
@@ -489,23 +493,25 @@
static struct resource resources_sdc3[] = {
{
+ .name = "core_mem",
.start = MSM_SDC3_BASE,
.end = MSM_SDC3_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
+ .name = "core_irq",
.start = INT_SDC3_0,
.end = INT_SDC3_1,
.flags = IORESOURCE_IRQ,
},
{
- .name = "sdcc_dma_chnl",
+ .name = "dma_chnl",
.start = DMOV_SDC3_CHAN,
.end = DMOV_SDC3_CHAN,
.flags = IORESOURCE_DMA,
},
{
- .name = "sdcc_dma_crci",
+ .name = "dma_crci",
.start = DMOV_SDC3_CRCI,
.end = DMOV_SDC3_CRCI,
.flags = IORESOURCE_DMA,
@@ -514,23 +520,25 @@
static struct resource resources_sdc4[] = {
{
+ .name = "core_mem",
.start = MSM_SDC4_BASE,
.end = MSM_SDC4_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
+ .name = "core_irq",
.start = INT_SDC4_0,
.end = INT_SDC4_1,
.flags = IORESOURCE_IRQ,
},
{
- .name = "sdcc_dma_chnl",
+ .name = "dma_chnl",
.start = DMOV_SDC4_CHAN,
.end = DMOV_SDC4_CHAN,
.flags = IORESOURCE_DMA,
},
{
- .name = "sdcc_dma_crci",
+ .name = "dma_crci",
.start = DMOV_SDC4_CRCI,
.end = DMOV_SDC4_CRCI,
.flags = IORESOURCE_DMA,
diff --git a/arch/arm/mach-msm/devices-msm7x27.c b/arch/arm/mach-msm/devices-msm7x27.c
index 69d7430..82c5eed 100644
--- a/arch/arm/mach-msm/devices-msm7x27.c
+++ b/arch/arm/mach-msm/devices-msm7x27.c
@@ -448,23 +448,25 @@
#define MSM_SDC4_BASE 0xA0700000
static struct resource resources_sdc1[] = {
{
+ .name = "core_mem",
.start = MSM_SDC1_BASE,
.end = MSM_SDC1_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
+ .name = "core_irq",
.start = INT_SDC1_0,
.end = INT_SDC1_1,
.flags = IORESOURCE_IRQ,
},
{
- .name = "sdcc_dma_chnl",
+ .name = "dma_chnl",
.start = DMOV_SDC1_CHAN,
.end = DMOV_SDC1_CHAN,
.flags = IORESOURCE_DMA,
},
{
- .name = "sdcc_dma_crci",
+ .name = "dma_crci",
.start = DMOV_SDC1_CRCI,
.end = DMOV_SDC1_CRCI,
.flags = IORESOURCE_DMA,
@@ -473,23 +475,25 @@
static struct resource resources_sdc2[] = {
{
+ .name = "core_mem",
.start = MSM_SDC2_BASE,
.end = MSM_SDC2_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
+ .name = "core_irq",
.start = INT_SDC2_0,
.end = INT_SDC2_1,
.flags = IORESOURCE_IRQ,
},
{
- .name = "sdcc_dma_chnl",
+ .name = "dma_chnl",
.start = DMOV_SDC2_CHAN,
.end = DMOV_SDC2_CHAN,
.flags = IORESOURCE_DMA,
},
{
- .name = "sdcc_dma_crci",
+ .name = "dma_crci",
.start = DMOV_SDC2_CRCI,
.end = DMOV_SDC2_CRCI,
.flags = IORESOURCE_DMA,
@@ -498,23 +502,25 @@
static struct resource resources_sdc3[] = {
{
+ .name = "core_mem",
.start = MSM_SDC3_BASE,
.end = MSM_SDC3_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
+ .name = "core_irq",
.start = INT_SDC3_0,
.end = INT_SDC3_1,
.flags = IORESOURCE_IRQ,
},
{
- .name = "sdcc_dma_chnl",
+ .name = "dma_chnl",
.start = DMOV_SDC3_CHAN,
.end = DMOV_SDC3_CHAN,
.flags = IORESOURCE_DMA,
},
{
- .name = "sdcc_dma_crci",
+ .name = "dma_crci",
.start = DMOV_SDC3_CRCI,
.end = DMOV_SDC3_CRCI,
.flags = IORESOURCE_DMA,
@@ -523,23 +529,25 @@
static struct resource resources_sdc4[] = {
{
+ .name = "core_mem",
.start = MSM_SDC4_BASE,
.end = MSM_SDC4_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
+ .name = "core_irq",
.start = INT_SDC4_0,
.end = INT_SDC4_1,
.flags = IORESOURCE_IRQ,
},
{
- .name = "sdcc_dma_chnl",
+ .name = "dma_chnl",
.start = DMOV_SDC4_CHAN,
.end = DMOV_SDC4_CHAN,
.flags = IORESOURCE_DMA,
},
{
- .name = "sdcc_dma_crci",
+ .name = "dma_crci",
.start = DMOV_SDC4_CRCI,
.end = DMOV_SDC4_CRCI,
.flags = IORESOURCE_DMA,
diff --git a/arch/arm/mach-msm/devices-msm7x27a.c b/arch/arm/mach-msm/devices-msm7x27a.c
index 8912e96..8fef953 100644
--- a/arch/arm/mach-msm/devices-msm7x27a.c
+++ b/arch/arm/mach-msm/devices-msm7x27a.c
@@ -236,12 +236,22 @@
.max_speed_delta_khz = 604800,
};
+static struct acpuclk_pdata msm8625ab_acpuclk_pdata = {
+ .max_speed_delta_khz = 801600,
+};
+
struct platform_device msm8625_device_acpuclk = {
.name = "acpuclk-7627",
.id = -1,
.dev.platform_data = &msm8625_acpuclk_pdata,
};
+struct platform_device msm8625ab_device_acpuclk = {
+ .name = "acpuclk-7627",
+ .id = -1,
+ .dev.platform_data = &msm8625ab_acpuclk_pdata,
+};
+
struct platform_device msm_device_smd = {
.name = "msm_smd",
.id = -1,
@@ -497,23 +507,25 @@
#define MSM_SDC4_BASE 0xA0700000
static struct resource resources_sdc1[] = {
{
+ .name = "core_mem",
.start = MSM_SDC1_BASE,
.end = MSM_SDC1_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
+ .name = "core_irq",
.start = INT_SDC1_0,
.end = INT_SDC1_1,
.flags = IORESOURCE_IRQ,
},
{
- .name = "sdcc_dma_chnl",
+ .name = "dma_chnl",
.start = DMOV_SDC1_CHAN,
.end = DMOV_SDC1_CHAN,
.flags = IORESOURCE_DMA,
},
{
- .name = "sdcc_dma_crci",
+ .name = "dma_crci",
.start = DMOV_SDC1_CRCI,
.end = DMOV_SDC1_CRCI,
.flags = IORESOURCE_DMA,
@@ -522,23 +534,25 @@
static struct resource resources_sdc2[] = {
{
+ .name = "core_mem",
.start = MSM_SDC2_BASE,
.end = MSM_SDC2_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
+ .name = "core_irq",
.start = INT_SDC2_0,
.end = INT_SDC2_1,
.flags = IORESOURCE_IRQ,
},
{
- .name = "sdcc_dma_chnl",
+ .name = "dma_chnl",
.start = DMOV_SDC2_CHAN,
.end = DMOV_SDC2_CHAN,
.flags = IORESOURCE_DMA,
},
{
- .name = "sdcc_dma_crci",
+ .name = "dma_crci",
.start = DMOV_SDC2_CRCI,
.end = DMOV_SDC2_CRCI,
.flags = IORESOURCE_DMA,
@@ -547,23 +561,25 @@
static struct resource resources_sdc3[] = {
{
+ .name = "core_mem",
.start = MSM_SDC3_BASE,
.end = MSM_SDC3_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
+ .name = "core_irq",
.start = INT_SDC3_0,
.end = INT_SDC3_1,
.flags = IORESOURCE_IRQ,
},
{
- .name = "sdcc_dma_chnl",
+ .name = "dma_chnl",
.start = DMOV_NAND_CHAN,
.end = DMOV_NAND_CHAN,
.flags = IORESOURCE_DMA,
},
{
- .name = "sdcc_dma_crci",
+ .name = "dma_crci",
.start = DMOV_SDC3_CRCI,
.end = DMOV_SDC3_CRCI,
.flags = IORESOURCE_DMA,
@@ -572,23 +588,25 @@
static struct resource resources_sdc4[] = {
{
+ .name = "core_mem",
.start = MSM_SDC4_BASE,
.end = MSM_SDC4_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
+ .name = "core_irq",
.start = INT_SDC4_0,
.end = INT_SDC4_1,
.flags = IORESOURCE_IRQ,
},
{
- .name = "sdcc_dma_chnl",
+ .name = "dma_chnl",
.start = DMOV_SDC4_CHAN,
.end = DMOV_SDC4_CHAN,
.flags = IORESOURCE_DMA,
},
{
- .name = "sdcc_dma_crci",
+ .name = "dma_crci",
.start = DMOV_SDC4_CRCI,
.end = DMOV_SDC4_CRCI,
.flags = IORESOURCE_DMA,
@@ -1165,23 +1183,25 @@
static struct resource msm8625_resources_sdc1[] = {
{
+ .name = "core_mem",
.start = MSM_SDC1_BASE,
.end = MSM_SDC1_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
+ .name = "core_irq",
.start = MSM8625_INT_SDC1_0,
.end = MSM8625_INT_SDC1_1,
.flags = IORESOURCE_IRQ,
},
{
- .name = "sdcc_dma_chnl",
+ .name = "dma_chnl",
.start = DMOV_SDC1_CHAN,
.end = DMOV_SDC1_CHAN,
.flags = IORESOURCE_DMA,
},
{
- .name = "sdcc_dma_crci",
+ .name = "dma_crci",
.start = DMOV_SDC1_CRCI,
.end = DMOV_SDC1_CRCI,
.flags = IORESOURCE_DMA,
@@ -1190,23 +1210,25 @@
static struct resource msm8625_resources_sdc2[] = {
{
+ .name = "core_mem",
.start = MSM_SDC2_BASE,
.end = MSM_SDC2_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
+ .name = "core_irq",
.start = MSM8625_INT_SDC2_0,
.end = MSM8625_INT_SDC2_1,
.flags = IORESOURCE_IRQ,
},
{
- .name = "sdcc_dma_chnl",
+ .name = "dma_chnl",
.start = DMOV_SDC2_CHAN,
.end = DMOV_SDC2_CHAN,
.flags = IORESOURCE_DMA,
},
{
- .name = "sdcc_dma_crci",
+ .name = "dma_crci",
.start = DMOV_SDC2_CRCI,
.end = DMOV_SDC2_CRCI,
.flags = IORESOURCE_DMA,
@@ -1215,23 +1237,25 @@
static struct resource msm8625_resources_sdc3[] = {
{
+ .name = "core_mem",
.start = MSM_SDC3_BASE,
.end = MSM_SDC3_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
+ .name = "core_irq",
.start = MSM8625_INT_SDC3_0,
.end = MSM8625_INT_SDC3_1,
.flags = IORESOURCE_IRQ,
},
{
- .name = "sdcc_dma_chnl",
+ .name = "dma_chnl",
.start = DMOV_SDC3_CHAN,
.end = DMOV_SDC3_CHAN,
.flags = IORESOURCE_DMA,
},
{
- .name = "sdcc_dma_crci",
+ .name = "dma_crci",
.start = DMOV_SDC3_CRCI,
.end = DMOV_SDC3_CRCI,
.flags = IORESOURCE_DMA,
@@ -1240,23 +1264,25 @@
static struct resource msm8625_resources_sdc4[] = {
{
+ .name = "core_mem",
.start = MSM_SDC4_BASE,
.end = MSM_SDC4_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
+ .name = "core_irq",
.start = MSM8625_INT_SDC4_0,
.end = MSM8625_INT_SDC4_1,
.flags = IORESOURCE_IRQ,
},
{
- .name = "sdcc_dma_chnl",
+ .name = "dma_chnl",
.start = DMOV_SDC4_CHAN,
.end = DMOV_SDC4_CHAN,
.flags = IORESOURCE_DMA,
},
{
- .name = "sdcc_dma_crci",
+ .name = "dma_crci",
.start = DMOV_SDC4_CRCI,
.end = DMOV_SDC4_CRCI,
.flags = IORESOURCE_DMA,
@@ -1623,6 +1649,7 @@
enum {
MSM8625,
MSM8625A,
+ MSM8625AB,
};
static int __init msm8625_cpu_id(void)
@@ -1643,6 +1670,11 @@
case 0x781:
cpu = MSM8625A;
break;
+ case 0x775:
+ case 0x776:
+ case 0x782:
+ cpu = MSM8625AB;
+ break;
default:
pr_err("Invalid Raw ID\n");
return -ENODEV;
@@ -1665,10 +1697,11 @@
platform_device_register(&msm7x27aa_device_acpuclk);
else if (msm8625_cpu_id() == MSM8625A)
platform_device_register(&msm8625_device_acpuclk);
+ else if (msm8625_cpu_id() == MSM8625AB)
+ platform_device_register(&msm8625ab_device_acpuclk);
} else {
platform_device_register(&msm7x27a_device_acpuclk);
}
-
return 0;
}
diff --git a/arch/arm/mach-msm/devices-msm7x2xa.h b/arch/arm/mach-msm/devices-msm7x2xa.h
index 4184a86..8febe26 100644
--- a/arch/arm/mach-msm/devices-msm7x2xa.h
+++ b/arch/arm/mach-msm/devices-msm7x2xa.h
@@ -33,4 +33,5 @@
void __init msm8x25_spm_device_init(void);
void __init msm8x25_kgsl_3d0_init(void);
void __iomem *core1_reset_base(void);
+extern void setup_mm_for_reboot(void);
#endif
diff --git a/arch/arm/mach-msm/devices-msm7x30.c b/arch/arm/mach-msm/devices-msm7x30.c
index f04ef9d..8a5d0e8 100644
--- a/arch/arm/mach-msm/devices-msm7x30.c
+++ b/arch/arm/mach-msm/devices-msm7x30.c
@@ -784,23 +784,25 @@
#define MSM_SDC4_BASE 0xA3100000
static struct resource resources_sdc1[] = {
{
+ .name = "core_mem",
.start = MSM_SDC1_BASE,
.end = MSM_SDC1_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
+ .name = "core_irq",
.start = INT_SDC1_0,
.end = INT_SDC1_1,
.flags = IORESOURCE_IRQ,
},
{
- .name = "sdcc_dma_chnl",
+ .name = "dma_chnl",
.start = DMOV_SDC1_CHAN,
.end = DMOV_SDC1_CHAN,
.flags = IORESOURCE_DMA,
},
{
- .name = "sdcc_dma_crci",
+ .name = "dma_crci",
.start = DMOV_SDC1_CRCI,
.end = DMOV_SDC1_CRCI,
.flags = IORESOURCE_DMA,
@@ -809,23 +811,25 @@
static struct resource resources_sdc2[] = {
{
+ .name = "core_mem",
.start = MSM_SDC2_BASE,
.end = MSM_SDC2_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
+ .name = "core_irq",
.start = INT_SDC2_0,
.end = INT_SDC2_1,
.flags = IORESOURCE_IRQ,
},
{
- .name = "sdcc_dma_chnl",
+ .name = "dma_chnl",
.start = DMOV_NAND_CHAN,
.end = DMOV_NAND_CHAN,
.flags = IORESOURCE_DMA,
},
{
- .name = "sdcc_dma_crci",
+ .name = "dma_crci",
.start = DMOV_SDC2_CRCI,
.end = DMOV_SDC2_CRCI,
.flags = IORESOURCE_DMA,
@@ -834,23 +838,25 @@
static struct resource resources_sdc3[] = {
{
+ .name = "core_mem",
.start = MSM_SDC3_BASE,
.end = MSM_SDC3_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
+ .name = "core_irq",
.start = INT_SDC3_0,
.end = INT_SDC3_1,
.flags = IORESOURCE_IRQ,
},
{
- .name = "sdcc_dma_chnl",
+ .name = "dma_chnl",
.start = DMOV_SDC3_CHAN,
.end = DMOV_SDC3_CHAN,
.flags = IORESOURCE_DMA,
},
{
- .name = "sdcc_dma_crci",
+ .name = "dma_crci",
.start = DMOV_SDC3_CRCI,
.end = DMOV_SDC3_CRCI,
.flags = IORESOURCE_DMA,
@@ -859,23 +865,25 @@
static struct resource resources_sdc4[] = {
{
+ .name = "core_mem",
.start = MSM_SDC4_BASE,
.end = MSM_SDC4_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
+ .name = "core_irq",
.start = INT_SDC4_0,
.end = INT_SDC4_1,
.flags = IORESOURCE_IRQ,
},
{
- .name = "sdcc_dma_chnl",
+ .name = "dma_chnl",
.start = DMOV_SDC4_CHAN,
.end = DMOV_SDC4_CHAN,
.flags = IORESOURCE_DMA,
},
{
- .name = "sdcc_dma_crci",
+ .name = "dma_crci",
.start = DMOV_SDC4_CRCI,
.end = DMOV_SDC4_CRCI,
.flags = IORESOURCE_DMA,
diff --git a/arch/arm/mach-msm/devices-msm8x60.c b/arch/arm/mach-msm/devices-msm8x60.c
index 3920abe..9ea817f 100644
--- a/arch/arm/mach-msm/devices-msm8x60.c
+++ b/arch/arm/mach-msm/devices-msm8x60.c
@@ -1091,43 +1091,45 @@
static struct resource resources_sdc1[] = {
{
+ .name = "core_mem",
.start = MSM_SDC1_BASE,
.end = MSM_SDC1_DML_BASE - 1,
.flags = IORESOURCE_MEM,
},
{
+ .name = "core_irq",
.start = SDC1_IRQ_0,
.end = SDC1_IRQ_0,
.flags = IORESOURCE_IRQ,
},
#ifdef CONFIG_MMC_MSM_SPS_SUPPORT
{
- .name = "sdcc_dml_addr",
+ .name = "dml_mem",
.start = MSM_SDC1_DML_BASE,
.end = MSM_SDC1_BAM_BASE - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "sdcc_bam_addr",
+ .name = "bam_mem",
.start = MSM_SDC1_BAM_BASE,
.end = MSM_SDC1_BAM_BASE + (2 * SZ_4K) - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "sdcc_bam_irq",
+ .name = "bam_irq",
.start = SDC1_BAM_IRQ,
.end = SDC1_BAM_IRQ,
.flags = IORESOURCE_IRQ,
},
#else
{
- .name = "sdcc_dma_chnl",
+ .name = "dma_chnl",
.start = DMOV_SDC1_CHAN,
.end = DMOV_SDC1_CHAN,
.flags = IORESOURCE_DMA,
},
{
- .name = "sdcc_dma_crci",
+ .name = "dma_crci",
.start = DMOV_SDC1_CRCI,
.end = DMOV_SDC1_CRCI,
.flags = IORESOURCE_DMA,
@@ -1137,43 +1139,45 @@
static struct resource resources_sdc2[] = {
{
+ .name = "core_mem",
.start = MSM_SDC2_BASE,
.end = MSM_SDC2_DML_BASE - 1,
.flags = IORESOURCE_MEM,
},
{
+ .name = "core_irq",
.start = SDC2_IRQ_0,
.end = SDC2_IRQ_0,
.flags = IORESOURCE_IRQ,
},
#ifdef CONFIG_MMC_MSM_SPS_SUPPORT
{
- .name = "sdcc_dml_addr",
+ .name = "dml_mem",
.start = MSM_SDC2_DML_BASE,
.end = MSM_SDC2_BAM_BASE - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "sdcc_bam_addr",
+ .name = "bam_mem",
.start = MSM_SDC2_BAM_BASE,
.end = MSM_SDC2_BAM_BASE + (2 * SZ_4K) - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "sdcc_bam_irq",
+ .name = "bam_irq",
.start = SDC2_BAM_IRQ,
.end = SDC2_BAM_IRQ,
.flags = IORESOURCE_IRQ,
},
#else
{
- .name = "sdcc_dma_chnl",
+ .name = "dma_chnl",
.start = DMOV_SDC2_CHAN,
.end = DMOV_SDC2_CHAN,
.flags = IORESOURCE_DMA,
},
{
- .name = "sdcc_dma_crci",
+ .name = "dma_crci",
.start = DMOV_SDC2_CRCI,
.end = DMOV_SDC2_CRCI,
.flags = IORESOURCE_DMA,
@@ -1183,43 +1187,45 @@
static struct resource resources_sdc3[] = {
{
+ .name = "core_mem",
.start = MSM_SDC3_BASE,
.end = MSM_SDC3_DML_BASE - 1,
.flags = IORESOURCE_MEM,
},
{
+ .name = "core_irq",
.start = SDC3_IRQ_0,
.end = SDC3_IRQ_0,
.flags = IORESOURCE_IRQ,
},
#ifdef CONFIG_MMC_MSM_SPS_SUPPORT
{
- .name = "sdcc_dml_addr",
+ .name = "dml_mem",
.start = MSM_SDC3_DML_BASE,
.end = MSM_SDC3_BAM_BASE - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "sdcc_bam_addr",
+ .name = "bam_mem",
.start = MSM_SDC3_BAM_BASE,
.end = MSM_SDC3_BAM_BASE + (2 * SZ_4K) - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "sdcc_bam_irq",
+ .name = "bam_irq",
.start = SDC3_BAM_IRQ,
.end = SDC3_BAM_IRQ,
.flags = IORESOURCE_IRQ,
},
#else
{
- .name = "sdcc_dma_chnl",
+ .name = "dma_chnl",
.start = DMOV_SDC3_CHAN,
.end = DMOV_SDC3_CHAN,
.flags = IORESOURCE_DMA,
},
{
- .name = "sdcc_dma_crci",
+ .name = "dma_crci",
.start = DMOV_SDC3_CRCI,
.end = DMOV_SDC3_CRCI,
.flags = IORESOURCE_DMA,
@@ -1229,43 +1235,45 @@
static struct resource resources_sdc4[] = {
{
+ .name = "core_mem",
.start = MSM_SDC4_BASE,
.end = MSM_SDC4_DML_BASE - 1,
.flags = IORESOURCE_MEM,
},
{
+ .name = "core_irq",
.start = SDC4_IRQ_0,
.end = SDC4_IRQ_0,
.flags = IORESOURCE_IRQ,
},
#ifdef CONFIG_MMC_MSM_SPS_SUPPORT
{
- .name = "sdcc_dml_addr",
+ .name = "dml_mem",
.start = MSM_SDC4_DML_BASE,
.end = MSM_SDC4_BAM_BASE - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "sdcc_bam_addr",
+ .name = "bam_mem",
.start = MSM_SDC4_BAM_BASE,
.end = MSM_SDC4_BAM_BASE + (2 * SZ_4K) - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "sdcc_bam_irq",
+ .name = "bam_irq",
.start = SDC4_BAM_IRQ,
.end = SDC4_BAM_IRQ,
.flags = IORESOURCE_IRQ,
},
#else
{
- .name = "sdcc_dma_chnl",
+ .name = "dma_chnl",
.start = DMOV_SDC4_CHAN,
.end = DMOV_SDC4_CHAN,
.flags = IORESOURCE_DMA,
},
{
- .name = "sdcc_dma_crci",
+ .name = "dma_crci",
.start = DMOV_SDC4_CRCI,
.end = DMOV_SDC4_CRCI,
.flags = IORESOURCE_DMA,
@@ -1275,43 +1283,45 @@
static struct resource resources_sdc5[] = {
{
+ .name = "core_mem",
.start = MSM_SDC5_BASE,
.end = MSM_SDC5_DML_BASE - 1,
.flags = IORESOURCE_MEM,
},
{
+ .name = "core_irq",
.start = SDC5_IRQ_0,
.end = SDC5_IRQ_0,
.flags = IORESOURCE_IRQ,
},
#ifdef CONFIG_MMC_MSM_SPS_SUPPORT
{
- .name = "sdcc_dml_addr",
+ .name = "dml_mem",
.start = MSM_SDC5_DML_BASE,
.end = MSM_SDC5_BAM_BASE - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "sdcc_bam_addr",
+ .name = "bam_mem",
.start = MSM_SDC5_BAM_BASE,
.end = MSM_SDC5_BAM_BASE + (2 * SZ_4K) - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "sdcc_bam_irq",
+ .name = "bam_irq",
.start = SDC5_BAM_IRQ,
.end = SDC5_BAM_IRQ,
.flags = IORESOURCE_IRQ,
},
#else
{
- .name = "sdcc_dma_chnl",
+ .name = "dma_chnl",
.start = DMOV_SDC5_CHAN,
.end = DMOV_SDC5_CHAN,
.flags = IORESOURCE_DMA,
},
{
- .name = "sdcc_dma_crci",
+ .name = "dma_crci",
.start = DMOV_SDC5_CRCI,
.end = DMOV_SDC5_CRCI,
.flags = IORESOURCE_DMA,
diff --git a/arch/arm/mach-msm/devices-qsd8x50.c b/arch/arm/mach-msm/devices-qsd8x50.c
index ec4a14f..03ffa2f 100644
--- a/arch/arm/mach-msm/devices-qsd8x50.c
+++ b/arch/arm/mach-msm/devices-qsd8x50.c
@@ -490,23 +490,25 @@
#define MSM_SDC4_BASE 0xA0600000
static struct resource resources_sdc1[] = {
{
+ .name = "core_mem",
.start = MSM_SDC1_BASE,
.end = MSM_SDC1_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
+ .name = "core_irq",
.start = INT_SDC1_0,
.end = INT_SDC1_1,
.flags = IORESOURCE_IRQ,
},
{
- .name = "sdcc_dma_chnl",
+ .name = "dma_chnl",
.start = DMOV_SDC1_CHAN,
.end = DMOV_SDC1_CHAN,
.flags = IORESOURCE_DMA,
},
{
- .name = "sdcc_dma_crci",
+ .name = "dma_crci",
.start = DMOV_SDC1_CRCI,
.end = DMOV_SDC1_CRCI,
.flags = IORESOURCE_DMA,
@@ -515,23 +517,25 @@
static struct resource resources_sdc2[] = {
{
+ .name = "core_mem",
.start = MSM_SDC2_BASE,
.end = MSM_SDC2_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
+ .name = "core_irq",
.start = INT_SDC2_0,
.end = INT_SDC2_1,
.flags = IORESOURCE_IRQ,
},
{
- .name = "sdcc_dma_chnl",
+ .name = "dma_chnl",
.start = DMOV_SDC2_CHAN,
.end = DMOV_SDC2_CHAN,
.flags = IORESOURCE_DMA,
},
{
- .name = "sdcc_dma_crci",
+ .name = "dma_crci",
.start = DMOV_SDC2_CRCI,
.end = DMOV_SDC2_CRCI,
.flags = IORESOURCE_DMA,
@@ -540,23 +544,25 @@
static struct resource resources_sdc3[] = {
{
+ .name = "core_mem",
.start = MSM_SDC3_BASE,
.end = MSM_SDC3_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
+ .name = "core_irq",
.start = INT_SDC3_0,
.end = INT_SDC3_1,
.flags = IORESOURCE_IRQ,
},
{
- .name = "sdcc_dma_chnl",
+ .name = "dma_chnl",
.start = DMOV_SDC3_CHAN,
.end = DMOV_SDC3_CHAN,
.flags = IORESOURCE_DMA,
},
{
- .name = "sdcc_dma_crci",
+ .name = "dma_crci",
.start = DMOV_SDC3_CRCI,
.end = DMOV_SDC3_CRCI,
.flags = IORESOURCE_DMA,
@@ -565,23 +571,25 @@
static struct resource resources_sdc4[] = {
{
+ .name = "core_mem",
.start = MSM_SDC4_BASE,
.end = MSM_SDC4_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
+ .name = "core_irq",
.start = INT_SDC4_0,
.end = INT_SDC4_1,
.flags = IORESOURCE_IRQ,
},
{
- .name = "sdcc_dma_chnl",
+ .name = "dma_chnl",
.start = DMOV_SDC4_CHAN,
.end = DMOV_SDC4_CHAN,
.flags = IORESOURCE_DMA,
},
{
- .name = "sdcc_dma_crci",
+ .name = "dma_crci",
.start = DMOV_SDC4_CRCI,
.end = DMOV_SDC4_CRCI,
.flags = IORESOURCE_DMA,
diff --git a/arch/arm/mach-msm/devices.h b/arch/arm/mach-msm/devices.h
index 045dfb9..886a9ae 100644
--- a/arch/arm/mach-msm/devices.h
+++ b/arch/arm/mach-msm/devices.h
@@ -53,6 +53,7 @@
extern struct platform_device msm_device_uart_dm12;
extern struct platform_device *msm_device_uart_gsbi9;
extern struct platform_device msm_device_uart_dm6;
+extern struct platform_device msm_device_uart_dm8;
extern struct platform_device msm_device_uart_dm9;
extern struct platform_device msm8960_device_uart_gsbi2;
@@ -188,6 +189,7 @@
#endif
extern struct platform_device msm_device_tsif[2];
+extern struct platform_device msm_8064_device_tsif[2];
extern struct platform_device msm_device_ssbi_pmic1;
extern struct platform_device msm_device_ssbi_pmic2;
@@ -327,6 +329,7 @@
extern struct platform_device msm8930_rpm_device;
extern struct platform_device msm8930_rpm_stat_device;
extern struct platform_device msm8930_rpm_log_device;
+extern struct platform_device msm8930_rpm_rbcpr_device;
extern struct platform_device msm8660_rpm_device;
extern struct platform_device msm8660_rpm_stat_device;
@@ -419,8 +422,13 @@
extern struct platform_device msm7x27a_device_acpuclk;
extern struct platform_device msm7x27aa_device_acpuclk;
extern struct platform_device msm7x30_device_acpuclk;
+extern struct platform_device apq8064_device_acpuclk;
extern struct platform_device msm8625_device_acpuclk;
+extern struct platform_device msm8627_device_acpuclk;
+extern struct platform_device msm8625ab_device_acpuclk;
extern struct platform_device msm8x50_device_acpuclk;
extern struct platform_device msm8x60_device_acpuclk;
+extern struct platform_device msm8930_device_acpuclk;
+extern struct platform_device msm8930aa_device_acpuclk;
extern struct platform_device msm8960_device_acpuclk;
extern struct platform_device msm9615_device_acpuclk;
diff --git a/arch/arm/mach-msm/gss-8064.c b/arch/arm/mach-msm/gss-8064.c
index b9b877a..e528650 100644
--- a/arch/arm/mach-msm/gss-8064.c
+++ b/arch/arm/mach-msm/gss-8064.c
@@ -43,6 +43,8 @@
static int crash_shutdown;
+static struct subsys_device *gss_8064_dev;
+
#define MAX_SSR_REASON_LEN 81U
static void log_gss_sfr(void)
@@ -72,7 +74,7 @@
static void restart_gss(void)
{
log_gss_sfr();
- subsystem_restart("gss");
+ subsystem_restart_dev(gss_8064_dev);
}
static void smsm_state_cb(void *data, uint32_t old_state, uint32_t new_state)
@@ -91,7 +93,7 @@
#define Q6_FW_WDOG_ENABLE 0x08882024
#define Q6_SW_WDOG_ENABLE 0x08982024
-static int gss_shutdown(const struct subsys_data *subsys)
+static int gss_shutdown(const struct subsys_desc *desc)
{
pil_force_shutdown("gss");
disable_irq_nosync(GSS_A5_WDOG_EXPIRED);
@@ -99,14 +101,14 @@
return 0;
}
-static int gss_powerup(const struct subsys_data *subsys)
+static int gss_powerup(const struct subsys_desc *desc)
{
pil_force_boot("gss");
enable_irq(GSS_A5_WDOG_EXPIRED);
return 0;
}
-void gss_crash_shutdown(const struct subsys_data *subsys)
+void gss_crash_shutdown(const struct subsys_desc *desc)
{
crash_shutdown = 1;
smsm_reset_modem(SMSM_RESET);
@@ -122,7 +124,7 @@
};
static int gss_ramdump(int enable,
- const struct subsys_data *crashed_subsys)
+ const struct subsys_desc *crashed_subsys)
{
int ret = 0;
@@ -157,7 +159,7 @@
return IRQ_HANDLED;
}
-static struct subsys_data gss_8064 = {
+static struct subsys_desc gss_8064 = {
.name = "gss",
.shutdown = gss_shutdown,
.powerup = gss_powerup,
@@ -167,7 +169,10 @@
static int gss_subsystem_restart_init(void)
{
- return ssr_register_subsystem(&gss_8064);
+ gss_8064_dev = subsys_register(&gss_8064);
+ if (IS_ERR(gss_8064_dev))
+ return PTR_ERR(gss_8064_dev);
+ return 0;
}
static int gss_open(struct inode *inode, struct file *filep)
diff --git a/arch/arm/mach-msm/include/mach/board.h b/arch/arm/mach-msm/include/mach/board.h
index 8607177..1770b97 100644
--- a/arch/arm/mach-msm/include/mach/board.h
+++ b/arch/arm/mach-msm/include/mach/board.h
@@ -184,10 +184,11 @@
REG_LDO,
REG_VS,
REG_GPIO,
+ REG_MAX
};
struct camera_vreg_t {
- char *reg_name;
+ const char *reg_name;
enum camera_vreg_type type;
int min_voltage;
int max_voltage;
@@ -201,8 +202,8 @@
};
struct msm_camera_csi_lane_params {
- uint8_t csi_lane_assign;
- uint8_t csi_lane_mask;
+ uint16_t csi_lane_assign;
+ uint16_t csi_lane_mask;
};
struct msm_camera_gpio_conf {
@@ -475,6 +476,7 @@
struct msm_fb_platform_data {
int (*detect_client)(const char *name);
int mddi_prescan;
+ unsigned char ext_resolution;
int (*allow_set_offset)(void);
char prim_panel_name[PANEL_NAME_MAX_LEN];
char ext_panel_name[PANEL_NAME_MAX_LEN];
diff --git a/arch/arm/mach-msm/include/mach/camera.h b/arch/arm/mach-msm/include/mach/camera.h
index 4d66d88..a51a6b5 100644
--- a/arch/arm/mach-msm/include/mach/camera.h
+++ b/arch/arm/mach-msm/include/mach/camera.h
@@ -85,13 +85,12 @@
VFE_MSG_SYNC_TIMER1,
VFE_MSG_SYNC_TIMER2,
VFE_MSG_COMMON,
- VFE_MSG_V32_START,
- VFE_MSG_V32_START_RECORDING, /* 20 */
- VFE_MSG_V32_CAPTURE,
- VFE_MSG_V32_JPEG_CAPTURE,
+ VFE_MSG_START,
+ VFE_MSG_START_RECORDING, /* 20 */
+ VFE_MSG_CAPTURE,
+ VFE_MSG_JPEG_CAPTURE,
VFE_MSG_OUTPUT_IRQ,
- VFE_MSG_V2X_PREVIEW,
- VFE_MSG_V2X_CAPTURE,
+ VFE_MSG_PREVIEW,
VFE_MSG_OUTPUT_PRIMARY,
VFE_MSG_OUTPUT_SECONDARY,
VFE_MSG_OUTPUT_TERTIARY1,
@@ -135,14 +134,16 @@
struct msm_camera_csid_params {
uint8_t lane_cnt;
- uint8_t lane_assign;
+ uint16_t lane_assign;
+ uint8_t phy_sel;
struct msm_camera_csid_lut_params lut_params;
};
struct msm_camera_csiphy_params {
uint8_t lane_cnt;
uint8_t settle_cnt;
- uint8_t lane_mask;
+ uint16_t lane_mask;
+ uint8_t combo_mode;
};
struct msm_camera_csi2_params {
@@ -289,6 +290,109 @@
int (*strobe_flash_charge)(int32_t, int32_t, uint32_t);
};
+enum cci_i2c_master_t {
+ MASTER_0,
+ MASTER_1,
+};
+
+enum cci_i2c_queue_t {
+ QUEUE_0,
+ QUEUE_1,
+};
+
+struct msm_camera_cci_client {
+ struct v4l2_subdev *cci_subdev;
+ uint32_t freq;
+ enum cci_i2c_master_t cci_i2c_master;
+ uint16_t sid;
+ uint16_t cid;
+ uint32_t timeout;
+ uint16_t retries;
+ uint16_t id_map;
+};
+
+enum msm_cci_cmd_type {
+ MSM_CCI_INIT,
+ MSM_CCI_RELEASE,
+ MSM_CCI_SET_SID,
+ MSM_CCI_SET_FREQ,
+ MSM_CCI_SET_SYNC_CID,
+ MSM_CCI_I2C_READ,
+ MSM_CCI_I2C_WRITE,
+ MSM_CCI_GPIO_WRITE,
+};
+
+struct msm_camera_cci_wait_sync_cfg {
+ uint16_t line;
+ uint16_t delay;
+};
+
+struct msm_camera_cci_gpio_cfg {
+ uint16_t gpio_queue;
+ uint16_t i2c_queue;
+};
+
+enum msm_camera_i2c_reg_addr_type {
+ MSM_CAMERA_I2C_BYTE_ADDR = 1,
+ MSM_CAMERA_I2C_WORD_ADDR,
+};
+
+enum msm_camera_i2c_data_type {
+ MSM_CAMERA_I2C_BYTE_DATA = 1,
+ MSM_CAMERA_I2C_WORD_DATA,
+ MSM_CAMERA_I2C_SET_BYTE_MASK,
+ MSM_CAMERA_I2C_UNSET_BYTE_MASK,
+ MSM_CAMERA_I2C_SET_WORD_MASK,
+ MSM_CAMERA_I2C_UNSET_WORD_MASK,
+ MSM_CAMERA_I2C_SET_BYTE_WRITE_MASK_DATA,
+};
+
+enum msm_camera_i2c_cmd_type {
+ MSM_CAMERA_I2C_CMD_WRITE,
+ MSM_CAMERA_I2C_CMD_POLL,
+};
+
+struct msm_camera_i2c_reg_conf {
+ uint16_t reg_addr;
+ uint16_t reg_data;
+ enum msm_camera_i2c_data_type dt;
+ enum msm_camera_i2c_cmd_type cmd_type;
+ int16_t mask;
+};
+
+struct msm_camera_cci_i2c_write_cfg {
+ struct msm_camera_i2c_reg_conf *reg_conf_tbl;
+ enum msm_camera_i2c_reg_addr_type addr_type;
+ enum msm_camera_i2c_data_type data_type;
+ uint16_t size;
+};
+
+struct msm_camera_cci_i2c_read_cfg {
+ uint16_t addr;
+ enum msm_camera_i2c_reg_addr_type addr_type;
+ uint8_t *data;
+ uint16_t num_byte;
+};
+
+struct msm_camera_cci_i2c_queue_info {
+ uint32_t max_queue_size;
+ uint32_t report_id;
+ uint32_t irq_en;
+ uint32_t capture_rep_data;
+};
+
+struct msm_camera_cci_ctrl {
+ int32_t status;
+ struct msm_camera_cci_client *cci_info;
+ enum msm_cci_cmd_type cmd;
+ union {
+ struct msm_camera_cci_i2c_write_cfg cci_i2c_write_cfg;
+ struct msm_camera_cci_i2c_read_cfg cci_i2c_read_cfg;
+ struct msm_camera_cci_wait_sync_cfg cci_wait_sync_cfg;
+ struct msm_camera_cci_gpio_cfg gpio_cfg;
+ } cfg;
+};
+
/* this structure is used in kernel */
struct msm_queue_cmd {
struct list_head list_config;
@@ -565,6 +669,7 @@
S_STEREO_VIDEO,
S_STEREO_CAPTURE,
S_DEFAULT,
+ S_LIVESHOT,
S_EXIT
};
diff --git a/arch/arm/mach-msm/include/mach/dma.h b/arch/arm/mach-msm/include/mach/dma.h
index 381ea12..6a389de 100644
--- a/arch/arm/mach-msm/include/mach/dma.h
+++ b/arch/arm/mach-msm/include/mach/dma.h
@@ -188,6 +188,12 @@
#define DMOV_HSUART_GSBI6_RX_CHAN 8
#define DMOV_HSUART_GSBI6_RX_CRCI 11
+#define DMOV_HSUART_GSBI8_TX_CHAN 7
+#define DMOV_HSUART_GSBI8_TX_CRCI 10
+
+#define DMOV_HSUART_GSBI8_RX_CHAN 8
+#define DMOV_HSUART_GSBI8_RX_CRCI 9
+
#define DMOV_HSUART_GSBI9_TX_CHAN 4
#define DMOV_HSUART_GSBI9_TX_CRCI 13
diff --git a/arch/arm/mach-msm/include/mach/iommu.h b/arch/arm/mach-msm/include/mach/iommu.h
index 28c53db..b14f145 100644
--- a/arch/arm/mach-msm/include/mach/iommu.h
+++ b/arch/arm/mach-msm/include/mach/iommu.h
@@ -72,6 +72,7 @@
* @irq: Interrupt number
* @clk: The bus clock for this IOMMU hardware instance
* @pclk: The clock for the IOMMU bus interconnect
+ * @aclk: Alternate clock for this IOMMU core, if any
* @name: Human-readable name of this IOMMU device
* @gdsc: Regulator needed to power this HW block (v2 only)
* @nsmr: Size of the SMT on this HW block (v2 only)
@@ -85,6 +86,7 @@
int ttbr_split;
struct clk *clk;
struct clk *pclk;
+ struct clk *aclk;
const char *name;
struct regulator *gdsc;
unsigned int nsmr;
diff --git a/arch/arm/mach-msm/include/mach/irqs-8064.h b/arch/arm/mach-msm/include/mach/irqs-8064.h
index a5f78f5..973034b 100644
--- a/arch/arm/mach-msm/include/mach/irqs-8064.h
+++ b/arch/arm/mach-msm/include/mach/irqs-8064.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -287,13 +287,14 @@
#define PCIE20_INTC (GIC_SPI_START + 241)
#define PCIE20_INTD (GIC_SPI_START + 242)
#define PCIE20_INT_PLS_HP (GIC_SPI_START + 243)
-#define PCIE20_INT_PLS_PME (GIC_SPI_START + 244)
-#define PCIE20_INT_LINK_UP (GIC_SPI_START + 245)
-#define PCIE20_INT_LINK_DOWN (GIC_SPI_START + 246)
-#define PCIE20_INT_HP_LEGACY (GIC_SPI_START + 247)
-#define PCIE20_AER_LEGACY (GIC_SPI_START + 248)
-#define PCIE20_INT_PME_LEGACY (GIC_SPI_START + 249)
-#define PCIE20_INT_BRIDGE_FLUSH_N (GIC_SPI_START + 250)
+#define PCIE20_INT_PLS_ERR (GIC_SPI_START + 244)
+#define PCIE20_INT_PLS_PME (GIC_SPI_START + 245)
+#define PCIE20_INT_LINK_UP (GIC_SPI_START + 246)
+#define PCIE20_INT_LINK_DOWN (GIC_SPI_START + 247)
+#define PCIE20_INT_HP_LEGACY (GIC_SPI_START + 248)
+#define PCIE20_INT_AER_LEGACY (GIC_SPI_START + 249)
+#define PCIE20_INT_PME_LEGACY (GIC_SPI_START + 250)
+#define PCIE20_INT_BRIDGE_FLUSH_N (GIC_SPI_START + 251)
/* Backwards compatible IRQ macros. */
#define INT_ADM_AARM ADM_0_SCSS_0_IRQ
diff --git a/arch/arm/mach-msm/include/mach/mdm2.h b/arch/arm/mach-msm/include/mach/mdm2.h
index 637a3cc..c4877cc 100644
--- a/arch/arm/mach-msm/include/mach/mdm2.h
+++ b/arch/arm/mach-msm/include/mach/mdm2.h
@@ -31,6 +31,7 @@
struct mdm_vddmin_resource *vddmin_resource;
struct platform_device *peripheral_platform_device;
const unsigned int ramdump_timeout_ms;
+ int image_upgrade_supported;
};
#endif
diff --git a/arch/arm/mach-msm/include/mach/memory.h b/arch/arm/mach-msm/include/mach/memory.h
index 2596364..bf92f7d 100644
--- a/arch/arm/mach-msm/include/mach/memory.h
+++ b/arch/arm/mach-msm/include/mach/memory.h
@@ -118,6 +118,23 @@
(virt) - MEMBANK0_PAGE_OFFSET + MEMBANK0_PHYS_OFFSET)
#endif
+/*
+ * Need a temporary unique variable that no one will ever see to
+ * hold the compat string. Line number gives this easily.
+ * Need another layer of indirection to get __LINE__ to expand
+ * properly as opposed to appending and ending up with
+ * __compat___LINE__
+ */
+#define __CONCAT(a, b) ___CONCAT(a, b)
+#define ___CONCAT(a, b) a ## b
+
+#define EXPORT_COMPAT(com) \
+static char *__CONCAT(__compat_, __LINE__) __used \
+ __attribute((__section__(".exportcompat.init"))) = com
+
+extern char *__compat_exports_start[];
+extern char *__compat_exports_end[];
+
#endif
#if defined CONFIG_ARCH_MSM_SCORPION || defined CONFIG_ARCH_MSM_KRAIT
@@ -135,4 +152,5 @@
#ifndef CONFIG_ARCH_MSM7X27
#define CONSISTENT_DMA_SIZE (SZ_1M * 14)
+
#endif
diff --git a/arch/arm/mach-msm/include/mach/mpm.h b/arch/arm/mach-msm/include/mach/mpm.h
index 10a6fb0..fabaa09 100644
--- a/arch/arm/mach-msm/include/mach/mpm.h
+++ b/arch/arm/mach-msm/include/mach/mpm.h
@@ -38,14 +38,109 @@
void msm_mpm_irq_extn_init(struct msm_mpm_device_data *mpm_data);
-#ifdef CONFIG_MSM_MPM
+#if defined(CONFIG_MSM_MPM) || defined(CONFIG_MSM_MPM_OF)
+/**
+ * msm_mpm_enable_pin() - Enable/Disable a MPM pin for idle wakeups.
+ *
+ * @pin: MPM pin to set
+ * @enable: enable/disable the pin
+ *
+ * returns 0 on success or errorno
+ *
+ * Drivers can call the function to configure MPM pins for wakeup from idle low
+ * power modes. The API provides a direct access to the configuring MPM pins
+ * that are not connected to a IRQ/GPIO
+ */
int msm_mpm_enable_pin(unsigned int pin, unsigned int enable);
+
+/**
+ * msm_mpm_set_pin_wake() - Enable/Disable a MPM pin during suspend
+ *
+ * @pin: MPM pin to set
+ * @enable: enable/disable the pin as wakeup
+ *
+ * returns 0 on success or errorno
+ *
+ * Drivers can call the function to configure MPM pins for wakeup from suspend
+ * low power modes. The API provides a direct access to the configuring MPM pins
+ * that are not connected to a IRQ/GPIO
+ */
int msm_mpm_set_pin_wake(unsigned int pin, unsigned int on);
+/**
+ * msm_mpm_set_pin_type() - Set the flowtype of a MPM pin.
+ *
+ * @pin: MPM pin to configure
+ * @flow_type: flowtype of the MPM pin.
+ *
+ * returns 0 on success or errorno
+ *
+ * Drivers can call the function to configure the flowtype of the MPM pins
+ * The API provides a direct access to the configuring MPM pins that are not
+ * connected to a IRQ/GPIO
+ */
int msm_mpm_set_pin_type(unsigned int pin, unsigned int flow_type);
+/**
+ * msm_mpm_irqs_detectable() - Check if active irqs can be monitored by MPM
+ *
+ * @from_idle: indicates if the sytem is entering low power mode as a part of
+ * suspend/idle task.
+ *
+ * returns true if all active interrupts can be monitored by the MPM
+ *
+ * Low power management code calls into this API to check if all active
+ * interrupts can be monitored by MPM and choose a level such that all active
+ * interrupts can wake the system up from low power mode.
+ */
bool msm_mpm_irqs_detectable(bool from_idle);
+/**
+ * msm_mpm_gpio_detectable() - Check if active gpio irqs can be monitored by
+ * MPM
+ *
+ * @from_idle: indicates if the sytem is entering low power mode as a part of
+ * suspend/idle task.
+ *
+ * returns true if all active GPIO interrupts can be monitored by the MPM
+ *
+ * Low power management code calls into this API to check if all active
+ * GPIO interrupts can be monitored by MPM and choose a level such that all
+ * active interrupts can wake the system up from low power mode.
+ */
bool msm_mpm_gpio_irqs_detectable(bool from_idle);
-void msm_mpm_enter_sleep(bool from_idle);
+/**
+ * msm_mpm_enter_sleep() -Called from PM code before entering low power mode
+ *
+ * @sclk_count: wakeup time in sclk counts for programmed RPM wakeup
+ * @from_idle: indicates if the sytem is entering low power mode as a part of
+ * suspend/idle task.
+ *
+ * Low power management code calls into this API to configure the MPM to
+ * monitor the active irqs before going to sleep.
+ */
+void msm_mpm_enter_sleep(uint32_t sclk_count, bool from_idle);
+/**
+ * msm_mpm_exit_sleep() -Called from PM code after resuming from low power mode
+ *
+ * @from_idle: indicates if the sytem is entering low power mode as a part of
+ * suspend/idle task.
+ *
+ * Low power management code calls into this API to query the MPM for the
+ * wakeup source and retriggering the appropriate interrupt.
+ */
void msm_mpm_exit_sleep(bool from_idle);
+/**
+ * of_mpm_init() - Device tree initialization function
+ *
+ * @node: MPM device tree node.
+ *
+ * The initialization function is called from the machine irq function after
+ * GPIO/GIC device initialization routines are called. MPM driver keeps track
+ * of all enabled/wakeup interrupts in the system to be able to configure MPM
+ * when entering a system wide low power mode. The MPM is a alway-on low power
+ * hardware block that monitors 64 wakeup interrupts when the system is in a
+ * low power mode. The initialization function constructs the MPM mapping
+ * between the IRQs and the MPM pin based on data in the device tree.
+ */
+void __init of_mpm_init(struct device_node *node);
#else
static inline int msm_mpm_enable_irq(unsigned int irq, unsigned int enable)
{ return -ENODEV; }
@@ -64,10 +159,8 @@
{ return false; }
static inline bool msm_mpm_gpio_irqs_detectable(bool from_idle)
{ return false; }
-static inline void msm_mpm_enter_sleep(bool from_idle) {}
+static inline void msm_mpm_enter_sleep(uint32_t sclk_count, bool from_idle) {}
static inline void msm_mpm_exit_sleep(bool from_idle) {}
+static inline void __init of_mpm_init(struct device_node *node) {}
#endif
-
-
-
#endif /* __ARCH_ARM_MACH_MSM_MPM_H */
diff --git a/arch/arm/mach-msm/include/mach/msm_bus_board.h b/arch/arm/mach-msm/include/mach/msm_bus_board.h
index d95e4a4..0a53b46 100644
--- a/arch/arm/mach-msm/include/mach/msm_bus_board.h
+++ b/arch/arm/mach-msm/include/mach/msm_bus_board.h
@@ -65,6 +65,7 @@
extern struct msm_bus_fabric_registration msm_bus_8960_apps_fabric_pdata;
extern struct msm_bus_fabric_registration msm_bus_8960_sys_fabric_pdata;
extern struct msm_bus_fabric_registration msm_bus_8960_mm_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8960_sg_mm_fabric_pdata;
extern struct msm_bus_fabric_registration msm_bus_8960_sys_fpb_pdata;
extern struct msm_bus_fabric_registration msm_bus_8960_cpss_fpb_pdata;
diff --git a/arch/arm/mach-msm/include/mach/msm_memory_dump.h b/arch/arm/mach-msm/include/mach/msm_memory_dump.h
new file mode 100644
index 0000000..3b8c9fd
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/msm_memory_dump.h
@@ -0,0 +1,57 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_MEMORY_DUMP_H
+#define __MSM_MEMORY_DUMP_H
+
+#include <linux/types.h>
+
+enum dump_client_type {
+ MSM_CPU_CTXT = 0,
+ MSM_CACHE,
+ MSM_OCMEM,
+ MSM_ETB,
+ MSM_ETM,
+ MSM_TMC,
+ MAX_NUM_CLIENTS,
+};
+
+struct msm_client_dump {
+ enum dump_client_type id;
+ unsigned long start_addr;
+ unsigned long end_addr;
+};
+
+struct msm_dump_table {
+ u32 version;
+ u32 num_entries;
+ struct msm_client_dump client_entries[MAX_NUM_CLIENTS];
+};
+
+struct msm_memory_dump {
+ unsigned long dump_table_phys;
+ struct msm_dump_table *dump_table_ptr;
+};
+
+#define TABLE_MAJOR(val) (val >> 20)
+#define TABLE_MINOR(val) (val & 0xFFFFF)
+#define MK_TABLE(ma, mi) ((ma << 20) | mi)
+
+#ifndef CONFIG_MSM_MEMORY_DUMP
+static inline int msm_dump_table_register(struct msm_client_dump *entry)
+{
+ return -EIO;
+}
+#else
+int msm_dump_table_register(struct msm_client_dump *client_entry);
+#endif
+#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_memtypes.h b/arch/arm/mach-msm/include/mach/msm_memtypes.h
index 7afb38d..5ca5861 100644
--- a/arch/arm/mach-msm/include/mach/msm_memtypes.h
+++ b/arch/arm/mach-msm/include/mach/msm_memtypes.h
@@ -66,5 +66,8 @@
extern struct reserve_info *reserve_info;
+int __init dt_scan_for_memory_reserve(unsigned long node, const char *uname,
+ int depth, void *data);
+
unsigned long __init reserve_memory_for_fmem(unsigned long, unsigned long);
#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_pcie.h b/arch/arm/mach-msm/include/mach/msm_pcie.h
index 8bc4317..74f0f5b 100644
--- a/arch/arm/mach-msm/include/mach/msm_pcie.h
+++ b/arch/arm/mach-msm/include/mach/msm_pcie.h
@@ -22,7 +22,7 @@
MSM_PCIE_MAX_GPIO
};
-/* gpio info structrue */
+/* gpio info structure */
struct msm_pcie_gpio_info_t {
char *name;
uint32_t num;
@@ -32,8 +32,10 @@
/* msm pcie platfrom data */
struct msm_pcie_platform {
struct msm_pcie_gpio_info_t *gpio;
+
uint32_t axi_addr;
uint32_t axi_size;
+ uint32_t wake_n;
};
#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_smd.h b/arch/arm/mach-msm/include/mach/msm_smd.h
index dc633fb..97c03e7 100644
--- a/arch/arm/mach-msm/include/mach/msm_smd.h
+++ b/arch/arm/mach-msm/include/mach/msm_smd.h
@@ -223,6 +223,19 @@
*/
void smd_disable_read_intr(smd_channel_t *ch);
+/**
+ * Enable/disable receive interrupts for the remote processor used by a
+ * particular channel.
+ * @ch: open channel handle to use for the edge
+ * @mask: 1 = mask interrupts; 0 = unmask interrupts
+ * @returns: 0 for success; < 0 for failure
+ *
+ * Note that this enables/disables all interrupts from the remote subsystem for
+ * all channels. As such, it should be used with care and only for specific
+ * use cases such as power-collapse sequencing.
+ */
+int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask);
+
/* Starts a packet transaction. The size of the packet may exceed the total
* size of the smd ring buffer.
*
@@ -389,6 +402,11 @@
{
}
+static inline int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask)
+{
+ return -ENODEV;
+}
+
static inline int smd_write_start(smd_channel_t *ch, int len)
{
return -ENODEV;
diff --git a/arch/arm/mach-msm/include/mach/ocmem.h b/arch/arm/mach-msm/include/mach/ocmem.h
index b0475ed..415f8ed 100644
--- a/arch/arm/mach-msm/include/mach/ocmem.h
+++ b/arch/arm/mach-msm/include/mach/ocmem.h
@@ -22,7 +22,9 @@
/* Maximum number of slots in DM */
#define OCMEM_MAX_CHUNKS 32
-#define MIN_CHUNK_SIZE (SZ_1K/8)
+#define MIN_CHUNK_SIZE 128
+
+struct ocmem_notifier;
struct ocmem_buf {
unsigned long addr;
@@ -59,7 +61,7 @@
/* IMEM Clients */
OCMEM_LP_AUDIO,
OCMEM_SENSORS,
- OCMEM_BLAST,
+ OCMEM_OTHER_OS,
OCMEM_CLIENT_MAX,
};
@@ -80,9 +82,11 @@
/* APIS */
/* Notification APIs */
-void *ocmem_notifier_register(int client_id, struct notifier_block *nb);
+struct ocmem_notifier *ocmem_notifier_register(int client_id,
+ struct notifier_block *nb);
-int ocmem_notifier_unregister(void *notif_hndl, struct notifier_block *nb);
+int ocmem_notifier_unregister(struct ocmem_notifier *notif_hndl,
+ struct notifier_block *nb);
/* Obtain the maximum quota for the client */
unsigned long get_max_quota(int client_id);
@@ -104,8 +108,13 @@
int ocmem_shrink(int client_id, struct ocmem_buf *buf,
unsigned long new_size);
-int ocmem_expand(int client_id, struct ocmem_buf *buf,
- unsigned long new_size);
+/* Transfer APIs */
+int ocmem_map(int client_id, struct ocmem_buf *buffer,
+ struct ocmem_map_list *list);
+
+
+int ocmem_unmap(int client_id, struct ocmem_buf *buffer,
+ struct ocmem_map_list *list);
/* Priority Enforcement APIs */
int ocmem_evict(int client_id);
diff --git a/arch/arm/mach-msm/include/mach/ocmem_priv.h b/arch/arm/mach-msm/include/mach/ocmem_priv.h
index dd976ea..70b5a45 100644
--- a/arch/arm/mach-msm/include/mach/ocmem_priv.h
+++ b/arch/arm/mach-msm/include/mach/ocmem_priv.h
@@ -20,12 +20,13 @@
#include "ocmem.h"
#include <mach/msm_iomap.h>
#include <asm/io.h>
+#include <linux/platform_device.h>
#define OCMEM_PHYS_BASE 0xFEC00000
#define OCMEM_PHYS_SIZE 0x180000
-#define TO_OCMEM 0x1
-#define TO_DDR 0x2
+#define TO_OCMEM 0x0
+#define TO_DDR 0x1
struct ocmem_zone;
@@ -38,7 +39,7 @@
int owner;
int active_regions;
int max_regions;
- struct list_head region_list;
+ struct list_head req_list;
unsigned long z_start;
unsigned long z_end;
unsigned long z_head;
@@ -61,12 +62,41 @@
SCHED_DUMP,
};
+struct ocmem_plat_data {
+ void __iomem *vbase;
+ unsigned long size;
+ unsigned long base;
+ struct ocmem_partition *parts;
+ int nr_parts;
+ void __iomem *reg_base;
+ void __iomem *br_base;
+ void __iomem *dm_base;
+ unsigned nr_regions;
+ unsigned nr_macros;
+ unsigned nr_ports;
+ int ocmem_irq;
+ int dm_irq;
+ bool interleaved;
+};
+
+struct ocmem_eviction_data {
+ struct completion completion;
+ struct list_head victim_list;
+ struct list_head req_list;
+ struct work_struct work;
+ int prio;
+ int pending;
+ bool passive;
+};
+
struct ocmem_req {
struct rw_semaphore rw_sem;
/* Chain in sched queue */
struct list_head sched_list;
/* Chain in zone list */
struct list_head zone_list;
+ /* Chain in eviction list */
+ struct list_head eviction_list;
int owner;
int prio;
uint32_t req_id;
@@ -83,6 +113,7 @@
unsigned long req_start;
unsigned long req_end;
unsigned long req_sz;
+ struct ocmem_eviction_data *edata;
};
struct ocmem_handle {
@@ -133,13 +164,20 @@
int ocmem_notifier_init(void);
int check_notifier(int);
+const char *get_name(int);
+int check_id(int);
int dispatch_notification(int, enum ocmem_notif_type, struct ocmem_buf *);
int ocmem_sched_init(void);
+int ocmem_rdm_init(struct platform_device *);
int process_allocate(int, struct ocmem_handle *, unsigned long, unsigned long,
unsigned long, bool, bool);
int process_free(int, struct ocmem_handle *);
-int process_xfer(int, struct ocmem_handle *, struct ocmem_map_list *,
- int direction);
+int process_xfer(int, struct ocmem_handle *, struct ocmem_map_list *, int);
+int process_evict(int);
+int process_restore(int);
+int process_shrink(int, struct ocmem_handle *, unsigned long);
+int ocmem_rdm_transfer(int, struct ocmem_map_list *,
+ unsigned long, int);
unsigned long process_quota(int);
#endif
diff --git a/arch/arm/mach-msm/include/mach/rpm-regulator-smd.h b/arch/arm/mach-msm/include/mach/rpm-regulator-smd.h
index 12bf2b7..a32e168 100644
--- a/arch/arm/mach-msm/include/mach/rpm-regulator-smd.h
+++ b/arch/arm/mach-msm/include/mach/rpm-regulator-smd.h
@@ -29,8 +29,8 @@
* in this enum correspond to MSM8974 for PMIC PM8841 SMPS 2 (VDD_Dig).
*/
enum rpm_regulator_voltage_corner {
- RPM_REGULATOR_CORNER_RETENTION = 1,
- RPM_REGULATOR_CORNER_NONE = RPM_REGULATOR_CORNER_RETENTION,
+ RPM_REGULATOR_CORNER_NONE = 1,
+ RPM_REGULATOR_CORNER_RETENTION,
RPM_REGULATOR_CORNER_SVS_KRAIT,
RPM_REGULATOR_CORNER_SVS_SOC,
RPM_REGULATOR_CORNER_NORMAL,
diff --git a/arch/arm/mach-msm/include/mach/socinfo.h b/arch/arm/mach-msm/include/mach/socinfo.h
index 9110632..f7ba507 100644
--- a/arch/arm/mach-msm/include/mach/socinfo.h
+++ b/arch/arm/mach-msm/include/mach/socinfo.h
@@ -94,7 +94,9 @@
int __init socinfo_init(void) __must_check;
const int read_msm_cpu_type(void);
const int get_core_count(void);
+const int cpu_is_krait(void);
const int cpu_is_krait_v1(void);
+const int cpu_is_krait_v2(void);
static inline int cpu_is_msm7x01(void)
{
@@ -328,4 +330,16 @@
#endif
}
+static inline int cpu_is_msm8974(void)
+{
+#ifdef CONFIG_ARCH_MSM8974
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8974;
+#else
+ return 0;
+#endif
+}
+
#endif
diff --git a/arch/arm/mach-msm/include/mach/subsystem_restart.h b/arch/arm/mach-msm/include/mach/subsystem_restart.h
index 51ace96..6d15f47 100644
--- a/arch/arm/mach-msm/include/mach/subsystem_restart.h
+++ b/arch/arm/mach-msm/include/mach/subsystem_restart.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -18,6 +18,8 @@
#define SUBSYS_NAME_MAX_LENGTH 40
+struct subsys_device;
+
enum {
RESET_SOC = 1,
RESET_SUBSYS_COUPLED,
@@ -25,29 +27,23 @@
RESET_LEVEL_MAX
};
-struct subsys_data {
+struct subsys_desc {
const char *name;
- int (*shutdown) (const struct subsys_data *);
- int (*powerup) (const struct subsys_data *);
- void (*crash_shutdown) (const struct subsys_data *);
- int (*ramdump) (int, const struct subsys_data *);
- /* Internal use only */
- struct list_head list;
- void *notif_handle;
-
- struct mutex shutdown_lock;
- struct mutex powerup_lock;
-
- void *restart_order;
- struct subsys_data *single_restart_list[1];
+ int (*shutdown)(const struct subsys_desc *desc);
+ int (*powerup)(const struct subsys_desc *desc);
+ void (*crash_shutdown)(const struct subsys_desc *desc);
+ int (*ramdump)(int, const struct subsys_desc *desc);
};
#if defined(CONFIG_MSM_SUBSYSTEM_RESTART)
-int get_restart_level(void);
-int subsystem_restart(const char *subsys_name);
-int ssr_register_subsystem(struct subsys_data *subsys);
+extern int get_restart_level(void);
+extern int subsystem_restart_dev(struct subsys_device *dev);
+extern int subsystem_restart(const char *name);
+
+extern struct subsys_device *subsys_register(struct subsys_desc *desc);
+extern void subsys_unregister(struct subsys_device *dev);
#else
@@ -56,16 +52,24 @@
return 0;
}
-static inline int subsystem_restart(const char *subsystem_name)
+static inline int subsystem_restart_dev(struct subsys_device *dev)
{
return 0;
}
-static inline int ssr_register_subsystem(struct subsys_data *subsys)
+static inline int subsystem_restart(const char *name)
{
return 0;
}
+static inline
+struct subsys_device *subsys_register(struct subsys_desc *desc)
+{
+ return NULL;
+}
+
+static inline void subsys_unregister(struct subsys_device *dev) { }
+
#endif /* CONFIG_MSM_SUBSYSTEM_RESTART */
#endif
diff --git a/arch/arm/mach-msm/iommu_domains.c b/arch/arm/mach-msm/iommu_domains.c
index 42f0438..8f9464c 100644
--- a/arch/arm/mach-msm/iommu_domains.c
+++ b/arch/arm/mach-msm/iommu_domains.c
@@ -41,6 +41,11 @@
DEFINE_MUTEX(domain_mutex);
static atomic_t domain_nums = ATOMIC_INIT(-1);
+int msm_use_iommu()
+{
+ return iommu_present(&platform_bus_type);
+}
+
int msm_iommu_map_extra(struct iommu_domain *domain,
unsigned long start_iova,
unsigned long size,
@@ -165,6 +170,11 @@
if (size & (align - 1))
return -EINVAL;
+ if (!msm_use_iommu()) {
+ *iova_val = phys;
+ return 0;
+ }
+
ret = msm_allocate_iova_address(domain_no, partition_no, size, align,
&iova);
@@ -187,6 +197,9 @@
unsigned int partition_no,
unsigned long size)
{
+ if (!msm_use_iommu())
+ return;
+
iommu_unmap_range(msm_get_iommu_domain(domain_no), iova, size);
msm_free_iova_address(iova, domain_no, partition_no, size);
}
@@ -390,11 +403,6 @@
return -EINVAL;
}
-int msm_use_iommu()
-{
- return iommu_present(&platform_bus_type);
-}
-
static int __init iommu_domain_probe(struct platform_device *pdev)
{
struct iommu_domains_pdata *p = pdev->dev.platform_data;
diff --git a/arch/arm/mach-msm/lpass-8660.c b/arch/arm/mach-msm/lpass-8660.c
index 1018360..be18b68 100644
--- a/arch/arm/mach-msm/lpass-8660.c
+++ b/arch/arm/mach-msm/lpass-8660.c
@@ -19,6 +19,7 @@
#include <linux/stringify.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/err.h>
#include <mach/irqs.h>
#include <mach/scm.h>
@@ -35,6 +36,8 @@
#define MODULE_NAME "lpass_8x60"
#define SCM_Q6_NMI_CMD 0x1
+static struct subsys_device *subsys_8x60_q6_dev;
+
/* Subsystem restart: QDSP6 data, functions */
static void *q6_ramdump_dev;
static void q6_fatal_fn(struct work_struct *);
@@ -44,7 +47,7 @@
static void q6_fatal_fn(struct work_struct *work)
{
pr_err("%s: Watchdog bite received from Q6!\n", MODULE_NAME);
- subsystem_restart("lpass");
+ subsystem_restart_dev(subsys_8x60_q6_dev);
enable_irq(LPASS_Q6SS_WDOG_EXPIRED);
}
@@ -65,7 +68,7 @@
pr_info("subsystem-fatal-8x60: Q6 NMI was sent.\n");
}
-int subsys_q6_shutdown(const struct subsys_data *crashed_subsys)
+int subsys_q6_shutdown(const struct subsys_desc *crashed_subsys)
{
void __iomem *q6_wdog_addr =
ioremap_nocache(Q6SS_WDOG_ENABLE, 8);
@@ -82,7 +85,7 @@
return 0;
}
-int subsys_q6_powerup(const struct subsys_data *crashed_subsys)
+int subsys_q6_powerup(const struct subsys_desc *crashed_subsys)
{
int ret = pil_force_boot("q6");
enable_irq(LPASS_Q6SS_WDOG_EXPIRED);
@@ -93,7 +96,7 @@
static struct ramdump_segment q6_segments[] = { {0x46700000, 0x47F00000 -
0x46700000}, {0x28400000, 0x12800} };
static int subsys_q6_ramdump(int enable,
- const struct subsys_data *crashed_subsys)
+ const struct subsys_desc *crashed_subsys)
{
if (enable)
return do_ramdump(q6_ramdump_dev, q6_segments,
@@ -102,7 +105,7 @@
return 0;
}
-void subsys_q6_crash_shutdown(const struct subsys_data *crashed_subsys)
+void subsys_q6_crash_shutdown(const struct subsys_desc *crashed_subsys)
{
send_q6_nmi();
}
@@ -117,7 +120,7 @@
return IRQ_HANDLED;
}
-static struct subsys_data subsys_8x60_q6 = {
+static struct subsys_desc subsys_8x60_q6 = {
.name = "lpass",
.shutdown = subsys_q6_shutdown,
.powerup = subsys_q6_powerup,
@@ -127,6 +130,7 @@
static void __exit lpass_fatal_exit(void)
{
+ subsys_unregister(subsys_8x60_q6_dev);
iounmap(q6_wakeup_intr);
free_irq(LPASS_Q6SS_WDOG_EXPIRED, NULL);
}
@@ -156,7 +160,9 @@
if (!q6_wakeup_intr)
pr_warn("lpass-8660: Unable to ioremap q6 wakeup address.");
- ret = ssr_register_subsystem(&subsys_8x60_q6);
+ subsys_8x60_q6_dev = subsys_register(&subsys_8x60_q6);
+ if (IS_ERR(subsys_8x60_q6_dev))
+ ret = PTR_ERR(subsys_8x60_q6_dev);
out:
return ret;
}
diff --git a/arch/arm/mach-msm/lpass-8960.c b/arch/arm/mach-msm/lpass-8960.c
index c58b0e1..b714a7f 100644
--- a/arch/arm/mach-msm/lpass-8960.c
+++ b/arch/arm/mach-msm/lpass-8960.c
@@ -152,7 +152,7 @@
pr_debug("%s: Q6 NMI was sent.\n", __func__);
}
-static int lpass_shutdown(const struct subsys_data *subsys)
+static int lpass_shutdown(const struct subsys_desc *subsys)
{
send_q6_nmi();
pil_force_shutdown("q6");
@@ -161,7 +161,7 @@
return 0;
}
-static int lpass_powerup(const struct subsys_data *subsys)
+static int lpass_powerup(const struct subsys_desc *subsys)
{
int ret = pil_force_boot("q6");
enable_irq(LPASS_Q6SS_WDOG_EXPIRED);
@@ -170,7 +170,7 @@
/* RAM segments - address and size for 8960 */
static struct ramdump_segment q6_segments[] = { {0x8da00000, 0x8f200000 -
0x8da00000}, {0x28400000, 0x20000} };
-static int lpass_ramdump(int enable, const struct subsys_data *subsys)
+static int lpass_ramdump(int enable, const struct subsys_desc *subsys)
{
pr_debug("%s: enable[%d]\n", __func__, enable);
if (enable)
@@ -181,7 +181,7 @@
return 0;
}
-static void lpass_crash_shutdown(const struct subsys_data *subsys)
+static void lpass_crash_shutdown(const struct subsys_desc *subsys)
{
q6_crash_shutdown = 1;
send_q6_nmi();
@@ -198,7 +198,9 @@
return IRQ_HANDLED;
}
-static struct subsys_data lpass_8960 = {
+static struct subsys_device *lpass_8960_dev;
+
+static struct subsys_desc lpass_8960 = {
.name = "lpass",
.shutdown = lpass_shutdown,
.powerup = lpass_powerup,
@@ -208,7 +210,10 @@
static int __init lpass_restart_init(void)
{
- return ssr_register_subsystem(&lpass_8960);
+ lpass_8960_dev = subsys_register(&lpass_8960);
+ if (IS_ERR(lpass_8960_dev))
+ return PTR_ERR(lpass_8960_dev);
+ return 0;
}
static int __init lpass_fatal_init(void)
@@ -275,6 +280,7 @@
{
subsys_notif_unregister_notifier(ssr_notif_hdle, &rnb);
subsys_notif_unregister_notifier(ssr_modem_notif_hdle, &mnb);
+ subsys_unregister(lpass_8960_dev);
free_irq(LPASS_Q6SS_WDOG_EXPIRED, NULL);
}
diff --git a/arch/arm/mach-msm/lpm_levels.c b/arch/arm/mach-msm/lpm_levels.c
index e65f71c..f7456ef 100644
--- a/arch/arm/mach-msm/lpm_levels.c
+++ b/arch/arm/mach-msm/lpm_levels.c
@@ -20,6 +20,7 @@
#include <mach/mpm.h>
#include "lpm_resources.h"
#include "pm.h"
+#include "rpm-notifier.h"
static struct msm_rpmrs_level *msm_lpm_levels;
static int msm_lpm_level_count;
@@ -40,17 +41,25 @@
bool from_idle, bool notify_rpm)
{
int ret = 0;
+ struct msm_rpmrs_limits *l = (struct msm_rpmrs_limits *)limits;
- ret = msm_lpmrs_enter_sleep((struct msm_rpmrs_limits *)limits,
- from_idle, notify_rpm);
+ ret = msm_rpm_enter_sleep();
+ if (ret) {
+ pr_warn("%s(): RPM failed to enter sleep err:%d\n",
+ __func__, ret);
+ goto bail;
+ }
+ ret = msm_lpmrs_enter_sleep(sclk_count, l, from_idle, notify_rpm);
+bail:
return ret;
}
static void msm_lpm_exit_sleep(void *limits, bool from_idle,
bool notify_rpm, bool collapsed)
{
- /* TODO */
- return;
+ msm_rpm_exit_sleep();
+ msm_lpmrs_exit_sleep((struct msm_rpmrs_limits *)limits,
+ from_idle, notify_rpm, collapsed);
}
void msm_lpm_show_resources(void)
diff --git a/arch/arm/mach-msm/lpm_resources.c b/arch/arm/mach-msm/lpm_resources.c
index f57f974..0758651 100644
--- a/arch/arm/mach-msm/lpm_resources.c
+++ b/arch/arm/mach-msm/lpm_resources.c
@@ -18,14 +18,14 @@
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/cpu.h>
-#include <mach/mpm.h>
#include <linux/notifier.h>
#include <linux/hrtimer.h>
#include <linux/tick.h>
+#include <mach/mpm.h>
+#include <mach/rpm-smd.h>
#include "spm.h"
#include "lpm_resources.h"
#include "rpm-notifier.h"
-#include <mach/rpm-smd.h>
#include "idle.h"
/*Debug Definitions*/
@@ -46,7 +46,7 @@
static bool msm_lpm_get_rpm_notif = true;
/*Macros*/
-#define VDD_DIG_ACTIVE (950000)
+#define VDD_DIG_ACTIVE (5)
#define VDD_MEM_ACTIVE (1050000)
#define MAX_RS_NAME (16)
#define MAX_RS_SIZE (4)
@@ -243,6 +243,7 @@
uint32_t key, uint8_t *value)
{
int ret = 0;
+ int msg_id;
if (!handle)
return ret;
@@ -255,10 +256,18 @@
return ret;
}
- ret = msm_rpm_send_request_noirq(handle);
- if (ret < 0) {
+ msg_id = msm_rpm_send_request_noirq(handle);
+ if (!msg_id) {
pr_err("%s: Error sending RPM request key %u, handle 0x%x\n",
__func__, key, (unsigned int)handle);
+ ret = -EIO;
+ return ret;
+ }
+
+ ret = msm_rpm_wait_for_ack_noirq(msg_id);
+ if (ret < 0) {
+ pr_err("%s: Couldn't get ACK from RPM for Msg %d Error %d",
+ __func__, msg_id, ret);
return ret;
}
if (msm_lpm_debug_mask & MSM_LPMRS_DEBUG_RPM)
@@ -610,12 +619,10 @@
msm_lpm_notify_common(rpm_notifier_cb, rs);
}
-/* MPM
-static bool msm_lpm_use_mpm(struct msm_rpmrs_limits *limits)
+static inline bool msm_lpm_use_mpm(struct msm_rpmrs_limits *limits)
{
- return ((limits->pxo == MSM_LPM_PXO_OFF) ||
- (limits->vdd_dig_lower_bound <= VDD_DIG_RET_HIGH));
-}*/
+ return (limits->pxo == MSM_LPM_PXO_OFF);
+}
/* LPM levels interface */
bool msm_lpm_level_beyond_limit(struct msm_rpmrs_limits *limits)
@@ -638,7 +645,7 @@
return beyond_limit;
}
-int msm_lpmrs_enter_sleep(struct msm_rpmrs_limits *limits,
+int msm_lpmrs_enter_sleep(uint32_t sclk_count, struct msm_rpmrs_limits *limits,
bool from_idle, bool notify_rpm)
{
int ret = 0;
@@ -659,19 +666,20 @@
}
msm_lpm_get_rpm_notif = true;
- /* MPM Enter sleep
if (msm_lpm_use_mpm(limits))
- msm_mpm_enter_sleep(from_idle);*/
+ msm_mpm_enter_sleep(sclk_count, from_idle);
return ret;
}
-void msm_lpmrs_exit_sleep(uint32_t sclk_count, struct msm_rpmrs_limits *limits,
- bool from_idle, bool notify_rpm)
+void msm_lpmrs_exit_sleep(struct msm_rpmrs_limits *limits,
+ bool from_idle, bool notify_rpm, bool collapsed)
{
/* MPM exit sleep
if (msm_lpm_use_mpm(limits))
msm_mpm_exit_sleep(from_idle);*/
+
+ msm_spm_l2_set_low_power_mode(MSM_SPM_MODE_DISABLED, notify_rpm);
}
static int msm_lpm_cpu_callback(struct notifier_block *cpu_nb,
diff --git a/arch/arm/mach-msm/lpm_resources.h b/arch/arm/mach-msm/lpm_resources.h
index 9973fbf..120832f 100644
--- a/arch/arm/mach-msm/lpm_resources.h
+++ b/arch/arm/mach-msm/lpm_resources.h
@@ -71,6 +71,7 @@
* on the low power mode being entered. L2 low power mode is also set in
* this function.
+ * @sclk_count: wakeup counter for RPM.
* @limits: pointer to the resource limits of the low power mode being entered.
* @from_idle: bool to determine if this call being made as a part of
* idle power collapse.
@@ -78,19 +79,19 @@
*
* returns 0 on success.
*/
-int msm_lpmrs_enter_sleep(struct msm_rpmrs_limits *limits,
+int msm_lpmrs_enter_sleep(uint32_t sclk_count, struct msm_rpmrs_limits *limits,
bool from_idle, bool notify_rpm);
/**
* msm_lpmrs_exit_sleep() - Exit sleep, reset the MPM and L2 mode.
- * @ sclk_count - Sleep Clock count.
* @ limits: pointer to resource limits of the most recent low power mode.
* @from_idle: bool to determine if this call being made as a part of
* idle power collapse.
* @notify_rpm: bool that informs if this is an RPM notified power collapse.
+ * @collapsed: bool that informs if the Krait was power collapsed.
*/
-void msm_lpmrs_exit_sleep(uint32_t sclk_count, struct msm_rpmrs_limits *limits,
- bool from_idle, bool notify_rpm);
+void msm_lpmrs_exit_sleep(struct msm_rpmrs_limits *limits,
+ bool from_idle, bool notify_rpm, bool collapsed);
/**
* msm_lpmrs_module_init() - Init function that parses the device tree to
* get the low power resource attributes and registers with RPM driver for
@@ -106,15 +107,14 @@
return true;
}
-static inline int msm_lpmrs_enter_sleep(struct msm_rpmrs_limits *limits,
- bool from_idle, bool notify_rpm)
+static inline int msm_lpmrs_enter_sleep(uint32_t sclk_count,
+ struct msm_rpmrs_limits *limits, bool from_idle, bool notify_rpm)
{
return 0;
}
-static inline void msm_lpmrs_exit_sleep(uint32_t sclk_count,
- struct msm_rpmrs_limits *limits, bool from_idle,
- bool notify_rpm)
+static inline void msm_lpmrs_exit_sleep(struct msm_rpmrs_limits *limits,
+ bool from_idle, bool notify_rpm, bool collapsed)
{
return;
}
diff --git a/arch/arm/mach-msm/mdm.c b/arch/arm/mach-msm/mdm.c
index cbdc92a..4280fb4 100644
--- a/arch/arm/mach-msm/mdm.c
+++ b/arch/arm/mach-msm/mdm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -73,14 +73,14 @@
}
-static int charm_subsys_shutdown(const struct subsys_data *crashed_subsys)
+static int charm_subsys_shutdown(const struct subsys_desc *crashed_subsys)
{
charm_ready = 0;
power_down_charm();
return 0;
}
-static int charm_subsys_powerup(const struct subsys_data *crashed_subsys)
+static int charm_subsys_powerup(const struct subsys_desc *crashed_subsys)
{
power_on_charm();
boot_type = CHARM_NORMAL_BOOT;
@@ -92,7 +92,7 @@
}
static int charm_subsys_ramdumps(int want_dumps,
- const struct subsys_data *crashed_subsys)
+ const struct subsys_desc *crashed_subsys)
{
charm_ram_dump_status = 0;
if (want_dumps) {
@@ -105,7 +105,9 @@
return charm_ram_dump_status;
}
-static struct subsys_data charm_subsystem = {
+static struct subsys_device *charm_subsys;
+
+static struct subsys_desc charm_subsystem = {
.shutdown = charm_subsys_shutdown,
.ramdump = charm_subsys_ramdumps,
.powerup = charm_subsys_powerup,
@@ -229,7 +231,7 @@
static void charm_status_fn(struct work_struct *work)
{
pr_info("Reseting the charm because status changed\n");
- subsystem_restart("external_modem");
+ subsystem_restart_dev(charm_subsys);
}
static DECLARE_WORK(charm_status_work, charm_status_fn);
@@ -239,7 +241,7 @@
pr_info("Reseting the charm due to an errfatal\n");
if (get_restart_level() == RESET_SOC)
pm8xxx_stay_on();
- subsystem_restart("external_modem");
+ subsystem_restart_dev(charm_subsys);
}
static DECLARE_WORK(charm_fatal_work, charm_fatal_fn);
@@ -349,7 +351,11 @@
atomic_notifier_chain_register(&panic_notifier_list, &charm_panic_blk);
charm_debugfs_init();
- ssr_register_subsystem(&charm_subsystem);
+ charm_subsys = subsys_register(&charm_subsystem);
+ if (IS_ERR(charm_subsys)) {
+ ret = PTR_ERR(charm_subsys);
+ goto fatal_err;
+ }
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
diff --git a/arch/arm/mach-msm/mdm2.c b/arch/arm/mach-msm/mdm2.c
index 6e7086e..e74af2e 100644
--- a/arch/arm/mach-msm/mdm2.c
+++ b/arch/arm/mach-msm/mdm2.c
@@ -249,6 +249,33 @@
}
}
+static void mdm_image_upgrade(struct mdm_modem_drv *mdm_drv, int type)
+{
+ switch (type) {
+ case APQ_CONTROLLED_UPGRADE:
+ pr_debug("%s APQ controlled modem image upgrade\n", __func__);
+ mdm_drv->mdm_ready = 0;
+ mdm_toggle_soft_reset(mdm_drv);
+ break;
+ case MDM_CONTROLLED_UPGRADE:
+ pr_debug("%s MDM controlled modem image upgrade\n", __func__);
+ mdm_drv->mdm_ready = 0;
+ /*
+ * If we have no image currently present on the modem, then we
+ * would be in PBL, in which case the status gpio would not go
+ * high.
+ */
+ mdm_drv->disable_status_check = 1;
+ if (mdm_drv->usb_switch_gpio > 0) {
+ pr_info("%s Switching usb control to MDM\n", __func__);
+ gpio_direction_output(mdm_drv->usb_switch_gpio, 1);
+ } else
+ pr_err("%s usb switch gpio unavailable\n", __func__);
+ break;
+ default:
+ pr_err("%s invalid upgrade type\n", __func__);
+ }
+}
static struct mdm_ops mdm_cb = {
.power_on_mdm_cb = mdm_power_on_common,
.reset_mdm_cb = mdm_power_on_common,
@@ -256,6 +283,7 @@
.power_down_mdm_cb = mdm_power_down_common,
.debug_state_changed_cb = debug_state_changed,
.status_cb = mdm_status_changed,
+ .image_upgrade_cb = mdm_image_upgrade,
};
static int __init mdm_modem_probe(struct platform_device *pdev)
diff --git a/arch/arm/mach-msm/mdm_common.c b/arch/arm/mach-msm/mdm_common.c
index 5b181e1..6b40cda 100644
--- a/arch/arm/mach-msm/mdm_common.c
+++ b/arch/arm/mach-msm/mdm_common.c
@@ -56,6 +56,7 @@
#define EXTERNAL_MODEM "external_modem"
static struct mdm_modem_drv *mdm_drv;
+static struct subsys_device *mdm_subsys_dev;
DECLARE_COMPLETION(mdm_needs_reload);
DECLARE_COMPLETION(mdm_boot);
@@ -150,11 +151,13 @@
* If the mdm modem did not pull the MDM2AP_STATUS gpio
* high then call subsystem_restart.
*/
- if (gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 0) {
- pr_err("%s: MDM2AP_STATUS gpio did not go high\n",
- __func__);
- mdm_drv->mdm_ready = 0;
- subsystem_restart(EXTERNAL_MODEM);
+ if (!mdm_drv->disable_status_check) {
+ if (gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 0) {
+ pr_err("%s: MDM2AP_STATUS gpio did not go high\n",
+ __func__);
+ mdm_drv->mdm_ready = 0;
+ subsystem_restart_dev(mdm_subsys_dev);
+ }
}
}
@@ -238,6 +241,15 @@
else
put_user(0, (unsigned long __user *) arg);
break;
+ case IMAGE_UPGRADE:
+ pr_debug("%s Image upgrade ioctl recieved\n", __func__);
+ if (mdm_drv->pdata->image_upgrade_supported &&
+ mdm_drv->ops->image_upgrade_cb) {
+ get_user(status, (unsigned long __user *) arg);
+ mdm_drv->ops->image_upgrade_cb(mdm_drv, status);
+ } else
+ pr_debug("%s Image upgrade not supported\n", __func__);
+ break;
default:
pr_err("%s: invalid ioctl cmd = %d\n", __func__, _IOC_NR(cmd));
ret = -EINVAL;
@@ -271,7 +283,7 @@
(gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 1)) {
pr_info("%s: Reseting the mdm due to an errfatal\n", __func__);
mdm_drv->mdm_ready = 0;
- subsystem_restart(EXTERNAL_MODEM);
+ subsystem_restart_dev(mdm_subsys_dev);
}
return IRQ_HANDLED;
}
@@ -332,7 +344,7 @@
pr_info("%s: unexpected reset external modem\n", __func__);
mdm_drv->mdm_unexpected_reset_occurred = 1;
mdm_drv->mdm_ready = 0;
- subsystem_restart(EXTERNAL_MODEM);
+ subsystem_restart_dev(mdm_subsys_dev);
} else if (value == 1) {
cancel_delayed_work(&mdm2ap_status_check_work);
pr_info("%s: status = 1: mdm is now ready\n", __func__);
@@ -349,7 +361,7 @@
return IRQ_HANDLED;
}
-static int mdm_subsys_shutdown(const struct subsys_data *crashed_subsys)
+static int mdm_subsys_shutdown(const struct subsys_desc *crashed_subsys)
{
mdm_drv->mdm_ready = 0;
gpio_direction_output(mdm_drv->ap2mdm_errfatal_gpio, 1);
@@ -363,11 +375,10 @@
mdm_drv->ops->reset_mdm_cb(mdm_drv);
else
mdm_drv->mdm_unexpected_reset_occurred = 0;
-
return 0;
}
-static int mdm_subsys_powerup(const struct subsys_data *crashed_subsys)
+static int mdm_subsys_powerup(const struct subsys_desc *crashed_subsys)
{
gpio_direction_output(mdm_drv->ap2mdm_errfatal_gpio, 0);
gpio_direction_output(mdm_drv->ap2mdm_status_gpio, 1);
@@ -390,7 +401,7 @@
}
static int mdm_subsys_ramdumps(int want_dumps,
- const struct subsys_data *crashed_subsys)
+ const struct subsys_desc *crashed_subsys)
{
mdm_drv->mdm_ram_dump_status = 0;
if (want_dumps) {
@@ -411,7 +422,7 @@
return mdm_drv->mdm_ram_dump_status;
}
-static struct subsys_data mdm_subsystem = {
+static struct subsys_desc mdm_subsystem = {
.shutdown = mdm_subsys_shutdown,
.ramdump = mdm_subsys_ramdumps,
.powerup = mdm_subsys_powerup,
@@ -514,6 +525,12 @@
if (pres)
mdm_drv->mdm2ap_pblrdy = pres->start;
+ /*USB_SW*/
+ pres = platform_get_resource_byname(pdev, IORESOURCE_IO,
+ "USB_SW");
+ if (pres)
+ mdm_drv->usb_switch_gpio = pres->start;
+
mdm_drv->boot_type = CHARM_NORMAL_BOOT;
mdm_drv->ops = mdm_ops;
@@ -556,6 +573,13 @@
if (mdm_drv->ap2mdm_wakeup_gpio > 0)
gpio_request(mdm_drv->ap2mdm_wakeup_gpio, "AP2MDM_WAKEUP");
+ if (mdm_drv->usb_switch_gpio > 0) {
+ if (gpio_request(mdm_drv->usb_switch_gpio, "USB_SW")) {
+ pr_err("%s Failed to get usb switch gpio\n", __func__);
+ mdm_drv->usb_switch_gpio = -1;
+ }
+ }
+
gpio_direction_output(mdm_drv->ap2mdm_status_gpio, 1);
gpio_direction_output(mdm_drv->ap2mdm_errfatal_gpio, 0);
@@ -588,7 +612,11 @@
mdm_debugfs_init();
/* Register subsystem handlers */
- ssr_register_subsystem(&mdm_subsystem);
+ mdm_subsys_dev = subsys_register(&mdm_subsystem);
+ if (IS_ERR(mdm_subsys_dev)) {
+ ret = PTR_ERR(mdm_subsys_dev);
+ goto fatal_err;
+ }
/* ERR_FATAL irq. */
irq = MSM_GPIO_TO_INT(mdm_drv->mdm2ap_errfatal_gpio);
diff --git a/arch/arm/mach-msm/mdm_private.h b/arch/arm/mach-msm/mdm_private.h
index 7ac3727..7aba83d 100644
--- a/arch/arm/mach-msm/mdm_private.h
+++ b/arch/arm/mach-msm/mdm_private.h
@@ -23,6 +23,7 @@
void (*power_down_mdm_cb)(struct mdm_modem_drv *mdm_drv);
void (*debug_state_changed_cb)(int value);
void (*status_cb)(struct mdm_modem_drv *mdm_drv, int value);
+ void (*image_upgrade_cb)(struct mdm_modem_drv *mdm_drv, int type);
};
/* Private mdm2 data structure */
@@ -37,6 +38,7 @@
unsigned ap2mdm_soft_reset_gpio;
unsigned ap2mdm_pmic_pwr_en_gpio;
unsigned mdm2ap_pblrdy;
+ unsigned usb_switch_gpio;
int mdm_errfatal_irq;
int mdm_status_irq;
@@ -46,6 +48,7 @@
enum charm_boot_type boot_type;
int mdm_debug_on;
int mdm_unexpected_reset_occurred;
+ int disable_status_check;
struct mdm_ops *ops;
struct mdm_platform_data *pdata;
diff --git a/arch/arm/mach-msm/memory.c b/arch/arm/mach-msm/memory.c
index a1b21c5..63c2d3a 100644
--- a/arch/arm/mach-msm/memory.c
+++ b/arch/arm/mach-msm/memory.c
@@ -37,6 +37,7 @@
#include <mach/msm_iomap.h>
#include <mach/socinfo.h>
#include <linux/sched.h>
+#include <linux/of_fdt.h>
/* fixme */
#include <asm/tlbflush.h>
@@ -381,3 +382,117 @@
{
return fmem_set_state(FMEM_T_STATE);
}
+
+static char * const memtype_names[] = {
+ [MEMTYPE_SMI_KERNEL] = "SMI_KERNEL",
+ [MEMTYPE_SMI] = "SMI",
+ [MEMTYPE_EBI0] = "EBI0",
+ [MEMTYPE_EBI1] = "EBI1",
+};
+
+static int reserve_memory_type(char *mem_name,
+ struct memtype_reserve *reserve_table,
+ int size)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(memtype_names); i++) {
+ if (memtype_names[i] && strcmp(mem_name,
+ memtype_names[i]) == 0) {
+ reserve_table[i].size += size;
+ return 0;
+ }
+ }
+
+ pr_err("Could not find memory type %s\n", mem_name);
+ return -EINVAL;
+}
+
+static int check_for_compat(unsigned long node)
+{
+ char **start = __compat_exports_start;
+
+ for ( ; start < __compat_exports_end; start++)
+ if (of_flat_dt_is_compatible(node, *start))
+ return 1;
+
+ return 0;
+}
+
+int __init dt_scan_for_memory_reserve(unsigned long node, const char *uname,
+ int depth, void *data)
+{
+ char *memory_name_prop;
+ unsigned int *memory_remove_prop;
+ unsigned long memory_name_prop_length;
+ unsigned long memory_remove_prop_length;
+ unsigned long memory_size_prop_length;
+ unsigned int *memory_size_prop;
+ unsigned int memory_size;
+ unsigned int memory_start;
+ int ret;
+
+ memory_name_prop = of_get_flat_dt_prop(node,
+ "qcom,memory-reservation-type",
+ &memory_name_prop_length);
+ memory_remove_prop = of_get_flat_dt_prop(node,
+ "qcom,memblock-remove",
+ &memory_remove_prop_length);
+
+ if (memory_name_prop || memory_remove_prop) {
+ if (!check_for_compat(node))
+ goto out;
+ } else {
+ goto out;
+ }
+
+ if (memory_name_prop) {
+ if (strnlen(memory_name_prop, memory_name_prop_length) == 0) {
+ WARN(1, "Memory name was malformed\n");
+ goto mem_remove;
+ }
+
+ memory_size_prop = of_get_flat_dt_prop(node,
+ "qcom,memory-reservation-size",
+ &memory_size_prop_length);
+
+ if (memory_size_prop &&
+ (memory_size_prop_length == sizeof(unsigned int))) {
+ memory_size = be32_to_cpu(*memory_size_prop);
+
+ if (reserve_memory_type(memory_name_prop,
+ data, memory_size) == 0)
+ pr_info("%s reserved %s size %x\n",
+ uname, memory_name_prop, memory_size);
+ else
+ WARN(1, "Node %s reserve failed\n",
+ uname);
+ } else {
+ WARN(1, "Node %s specified bad/nonexistent size\n",
+ uname);
+ }
+ }
+
+mem_remove:
+
+ if (memory_remove_prop) {
+ if (memory_remove_prop_length != (2*sizeof(unsigned int))) {
+ WARN(1, "Memory remove malformed\n");
+ goto out;
+ }
+
+ memory_start = be32_to_cpu(memory_remove_prop[0]);
+ memory_size = be32_to_cpu(memory_remove_prop[1]);
+
+ ret = memblock_remove(memory_start, memory_size);
+ if (ret)
+ WARN(1, "Failed to remove memory %x-%x\n",
+ memory_start, memory_start+memory_size);
+ else
+ pr_info("Node %s removed memory %x-%x\n", uname,
+ memory_start, memory_start+memory_size);
+ }
+
+out:
+ return 0;
+}
diff --git a/arch/arm/mach-msm/modem-8660.c b/arch/arm/mach-msm/modem-8660.c
index 9c558e4..096ed9c 100644
--- a/arch/arm/mach-msm/modem-8660.c
+++ b/arch/arm/mach-msm/modem-8660.c
@@ -19,6 +19,7 @@
#include <linux/stringify.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/err.h>
#include <mach/irqs.h>
#include <mach/scm.h>
@@ -48,6 +49,8 @@
module_param(reset_modem, int, 0644);
#endif
+static struct subsys_device *modem_8660_dev;
+
/* Subsystem restart: Modem data, functions */
static void *modem_ramdump_dev;
static void modem_fatal_fn(struct work_struct *);
@@ -75,7 +78,7 @@
mb();
iounmap(hwio_modem_reset_addr);
- subsystem_restart("modem");
+ subsystem_restart_dev(modem_8660_dev);
enable_irq(MARM_WDOG_EXPIRED);
}
@@ -93,7 +96,7 @@
if (modem_state == 0 || modem_state & panic_smsm_states) {
- subsystem_restart("modem");
+ subsystem_restart_dev(modem_8660_dev);
enable_irq(MARM_WDOG_EXPIRED);
} else if (modem_state & reset_smsm_states) {
@@ -135,13 +138,13 @@
goto out;
}
pr_err("%s: Modem error fatal'ed.", MODULE_NAME);
- subsystem_restart("modem");
+ subsystem_restart_dev(modem_8660_dev);
}
out:
return NOTIFY_DONE;
}
-static int modem_shutdown(const struct subsys_data *crashed_subsys)
+static int modem_shutdown(const struct subsys_desc *crashed_subsys)
{
void __iomem *modem_wdog_addr;
@@ -178,7 +181,7 @@
return 0;
}
-static int modem_powerup(const struct subsys_data *crashed_subsys)
+static int modem_powerup(const struct subsys_desc *crashed_subsys)
{
int ret;
@@ -192,8 +195,7 @@
static struct ramdump_segment modem_segments[] = {
{0x42F00000, 0x46000000 - 0x42F00000} };
-static int modem_ramdump(int enable,
- const struct subsys_data *crashed_subsys)
+static int modem_ramdump(int enable, const struct subsys_desc *crashed_subsys)
{
if (enable)
return do_ramdump(modem_ramdump_dev, modem_segments,
@@ -202,8 +204,7 @@
return 0;
}
-static void modem_crash_shutdown(
- const struct subsys_data *crashed_subsys)
+static void modem_crash_shutdown(const struct subsys_desc *crashed_subsys)
{
/* If modem hasn't already crashed, send SMSM_RESET. */
if (!(smsm_get_state(SMSM_MODEM_STATE) & SMSM_RESET)) {
@@ -225,7 +226,7 @@
return IRQ_HANDLED;
}
-static struct subsys_data subsys_8660_modem = {
+static struct subsys_desc subsys_8660_modem = {
.name = "modem",
.shutdown = modem_shutdown,
.powerup = modem_powerup,
@@ -260,13 +261,16 @@
goto out;
}
- ret = ssr_register_subsystem(&subsys_8660_modem);
+ modem_8660_dev = subsys_register(&subsys_8660_modem);
+ if (IS_ERR(modem_8660_dev))
+ ret = PTR_ERR(modem_8660_dev);
out:
return ret;
}
static void __exit modem_8660_exit(void)
{
+ subsys_unregister(modem_8660_dev);
free_irq(MARM_WDOG_EXPIRED, NULL);
}
diff --git a/arch/arm/mach-msm/modem-8960.c b/arch/arm/mach-msm/modem-8960.c
index fd7b7b5..73b9b1f 100644
--- a/arch/arm/mach-msm/modem-8960.c
+++ b/arch/arm/mach-msm/modem-8960.c
@@ -35,6 +35,8 @@
static int crash_shutdown;
+static struct subsys_device *modem_8960_dev;
+
#define MAX_SSR_REASON_LEN 81U
#define Q6_FW_WDOG_ENABLE 0x08882024
#define Q6_SW_WDOG_ENABLE 0x08982024
@@ -66,7 +68,7 @@
static void restart_modem(void)
{
log_modem_sfr();
- subsystem_restart("modem");
+ subsystem_restart_dev(modem_8960_dev);
}
static void modem_wdog_check(struct work_struct *work)
@@ -101,7 +103,7 @@
}
}
-static int modem_shutdown(const struct subsys_data *subsys)
+static int modem_shutdown(const struct subsys_desc *subsys)
{
void __iomem *q6_fw_wdog_addr;
void __iomem *q6_sw_wdog_addr;
@@ -142,7 +144,7 @@
#define MODEM_WDOG_CHECK_TIMEOUT_MS 10000
-static int modem_powerup(const struct subsys_data *subsys)
+static int modem_powerup(const struct subsys_desc *subsys)
{
pil_force_boot("modem_fw");
pil_force_boot("modem");
@@ -153,7 +155,7 @@
return 0;
}
-void modem_crash_shutdown(const struct subsys_data *subsys)
+void modem_crash_shutdown(const struct subsys_desc *subsys)
{
crash_shutdown = 1;
smsm_reset_modem(SMSM_RESET);
@@ -176,8 +178,7 @@
static void *modemsw_ramdump_dev;
static void *smem_ramdump_dev;
-static int modem_ramdump(int enable,
- const struct subsys_data *crashed_subsys)
+static int modem_ramdump(int enable, const struct subsys_desc *crashed_subsys)
{
int ret = 0;
@@ -234,7 +235,7 @@
return IRQ_HANDLED;
}
-static struct subsys_data modem_8960 = {
+static struct subsys_desc modem_8960 = {
.name = "modem",
.shutdown = modem_shutdown,
.powerup = modem_powerup,
@@ -244,13 +245,16 @@
static int modem_subsystem_restart_init(void)
{
- return ssr_register_subsystem(&modem_8960);
+ modem_8960_dev = subsys_register(&modem_8960);
+ if (IS_ERR(modem_8960_dev))
+ return PTR_ERR(modem_8960_dev);
+ return 0;
}
static int modem_debug_set(void *data, u64 val)
{
if (val == 1)
- subsystem_restart("modem");
+ subsystem_restart_dev(modem_8960_dev);
return 0;
}
diff --git a/arch/arm/mach-msm/mpm-8625.c b/arch/arm/mach-msm/mpm-8625.c
index 954e5cc..3c219be 100644
--- a/arch/arm/mach-msm/mpm-8625.c
+++ b/arch/arm/mach-msm/mpm-8625.c
@@ -221,7 +221,7 @@
/* save the contents of GIC CPU interface and Distributor
* Disable all the Interrupts, if we enter from idle pc
*/
- msm_gic_save(modem_wake, from_idle);
+ msm_gic_save();
irq_set_irq_type(MSM8625_INT_A9_M2A_6, IRQF_TRIGGER_RISING);
enable_irq(MSM8625_INT_A9_M2A_6);
pr_debug("%s going for sleep now\n", __func__);
diff --git a/arch/arm/mach-msm/mpm-of.c b/arch/arm/mach-msm/mpm-of.c
new file mode 100644
index 0000000..e4c0e4e
--- /dev/null
+++ b/arch/arm/mach-msm/mpm-of.c
@@ -0,0 +1,765 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <asm/hardware/gic.h>
+#include <asm/arch_timer.h>
+#include <mach/gpio.h>
+#include <mach/mpm.h>
+
+enum {
+ MSM_MPM_GIC_IRQ_DOMAIN,
+ MSM_MPM_GPIO_IRQ_DOMAIN,
+ MSM_MPM_NR_IRQ_DOMAINS,
+};
+
+enum {
+ MSM_MPM_SET_ENABLED,
+ MSM_MPM_SET_WAKEUP,
+ MSM_NR_IRQS_SET,
+};
+
+struct mpm_irqs_a2m {
+ struct irq_domain *domain;
+ struct device_node *parent;
+ irq_hw_number_t hwirq;
+ unsigned long pin;
+ struct hlist_node node;
+};
+
+struct mpm_irqs {
+ struct irq_domain *domain;
+ unsigned long *enabled_irqs;
+ unsigned long *wakeup_irqs;
+};
+
+static struct mpm_irqs unlisted_irqs[MSM_MPM_NR_IRQ_DOMAINS];
+
+static struct hlist_head irq_hash[MSM_MPM_NR_MPM_IRQS];
+static unsigned int msm_mpm_irqs_m2a[MSM_MPM_NR_MPM_IRQS];
+#define MSM_MPM_REG_WIDTH DIV_ROUND_UP(MSM_MPM_NR_MPM_IRQS, 32)
+
+#define MSM_MPM_IRQ_INDEX(irq) (irq / 32)
+#define MSM_MPM_IRQ_MASK(irq) BIT(irq % 32)
+
+#define MSM_MPM_DETECT_CTL_INDEX(irq) (irq / 16)
+#define MSM_MPM_DETECT_CTL_SHIFT(irq) ((irq % 16) * 2)
+
+#define hashfn(val) (val % MSM_MPM_NR_MPM_IRQS)
+#define SCLK_HZ (32768)
+#define ARCH_TIMER_HZ (19200000)
+static struct msm_mpm_device_data msm_mpm_dev_data;
+
+enum mpm_reg_offsets {
+ MSM_MPM_REG_WAKEUP,
+ MSM_MPM_REG_ENABLE,
+ MSM_MPM_REG_DETECT_CTL,
+ MSM_MPM_REG_DETECT_CTL1,
+ MSM_MPM_REG_POLARITY,
+ MSM_MPM_REG_STATUS,
+};
+
+static DEFINE_SPINLOCK(msm_mpm_lock);
+
+static uint32_t msm_mpm_enabled_irq[MSM_MPM_REG_WIDTH];
+static uint32_t msm_mpm_wake_irq[MSM_MPM_REG_WIDTH];
+static uint32_t msm_mpm_detect_ctl[MSM_MPM_REG_WIDTH * 2];
+static uint32_t msm_mpm_polarity[MSM_MPM_REG_WIDTH];
+
+enum {
+ MSM_MPM_DEBUG_NON_DETECTABLE_IRQ = BIT(0),
+ MSM_MPM_DEBUG_PENDING_IRQ = BIT(1),
+ MSM_MPM_DEBUG_WRITE = BIT(2),
+ MSM_MPM_DEBUG_NON_DETECTABLE_IRQ_IDLE = BIT(3),
+};
+
+static int msm_mpm_debug_mask = 1;
+module_param_named(
+ debug_mask, msm_mpm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
+);
+
+enum mpm_state {
+ MSM_MPM_IRQ_MAPPING_DONE = BIT(0),
+ MSM_MPM_DEVICE_PROBED = BIT(1),
+};
+
+static enum mpm_state msm_mpm_initialized;
+
+static inline bool msm_mpm_is_initialized(void)
+{
+ return msm_mpm_initialized &
+ (MSM_MPM_IRQ_MAPPING_DONE | MSM_MPM_DEVICE_PROBED);
+
+}
+
+static inline uint32_t msm_mpm_read(
+ unsigned int reg, unsigned int subreg_index)
+{
+ unsigned int offset = reg * MSM_MPM_REG_WIDTH + subreg_index;
+ return __raw_readl(msm_mpm_dev_data.mpm_request_reg_base + offset * 4);
+}
+
+static inline void msm_mpm_write(
+ unsigned int reg, unsigned int subreg_index, uint32_t value)
+{
+ unsigned int offset = reg * MSM_MPM_REG_WIDTH + subreg_index;
+
+ __raw_writel(value, msm_mpm_dev_data.mpm_request_reg_base + offset * 4);
+ if (MSM_MPM_DEBUG_WRITE & msm_mpm_debug_mask)
+ pr_info("%s: reg %u.%u: 0x%08x\n",
+ __func__, reg, subreg_index, value);
+}
+
+static inline void msm_mpm_send_interrupt(void)
+{
+ __raw_writel(msm_mpm_dev_data.mpm_apps_ipc_val,
+ msm_mpm_dev_data.mpm_apps_ipc_reg);
+ /* Ensure the write is complete before returning. */
+ wmb();
+}
+
+static irqreturn_t msm_mpm_irq(int irq, void *dev_id)
+{
+ /*
+ * When the system resumes from deep sleep mode, the RPM hardware wakes
+ * up the Apps processor by triggering this interrupt. This interrupt
+ * has to be enabled and set as wake for the irq to get SPM out of
+ * sleep. Handle the interrupt here to make sure that it gets cleared.
+ */
+ return IRQ_HANDLED;
+}
+
+static void msm_mpm_set(cycle_t wakeup, bool wakeset)
+{
+ uint32_t *irqs;
+ unsigned int reg;
+ int i;
+ uint32_t *expiry_timer;
+
+ expiry_timer = (uint32_t *)&wakeup;
+
+ irqs = wakeset ? msm_mpm_wake_irq : msm_mpm_enabled_irq;
+ for (i = 0; i < MSM_MPM_REG_WIDTH; i++) {
+ reg = MSM_MPM_REG_WAKEUP;
+ msm_mpm_write(reg, i, expiry_timer[i]);
+
+ reg = MSM_MPM_REG_ENABLE;
+ msm_mpm_write(reg, i, irqs[i]);
+
+ reg = MSM_MPM_REG_DETECT_CTL;
+ msm_mpm_write(reg, i, msm_mpm_detect_ctl[i]);
+
+ reg = MSM_MPM_REG_DETECT_CTL1;
+ msm_mpm_write(reg, i, msm_mpm_detect_ctl[2+i]);
+
+ reg = MSM_MPM_REG_POLARITY;
+ msm_mpm_write(reg, i, msm_mpm_polarity[i]);
+ }
+
+ /*
+ * Ensure that the set operation is complete before sending the
+ * interrupt
+ */
+ wmb();
+ msm_mpm_send_interrupt();
+}
+
+static inline unsigned int msm_mpm_get_irq_m2a(unsigned int pin)
+{
+ return msm_mpm_irqs_m2a[pin];
+}
+
+static inline uint16_t msm_mpm_get_irq_a2m(struct irq_data *d)
+{
+ struct hlist_node *elem;
+ struct mpm_irqs_a2m *node = NULL;
+
+ hlist_for_each_entry(node, elem, &irq_hash[hashfn(d->hwirq)], node) {
+ if ((node->hwirq == d->hwirq)
+ && (d->domain == node->domain)) {
+ /* Update the linux irq mapping */
+ msm_mpm_irqs_m2a[node->pin] = d->irq;
+ break;
+ }
+ }
+ return node ? node->pin : 0;
+}
+
+static int msm_mpm_enable_irq_exclusive(
+ struct irq_data *d, bool enable, bool wakeset)
+{
+ uint16_t mpm_pin;
+
+ WARN_ON(!d);
+ if (!d)
+ return 0;
+
+ mpm_pin = msm_mpm_get_irq_a2m(d);
+
+ if (mpm_pin == 0xff)
+ return 0;
+
+ if (mpm_pin) {
+ uint32_t *mpm_irq_masks = wakeset ?
+ msm_mpm_wake_irq : msm_mpm_enabled_irq;
+ uint32_t index = MSM_MPM_IRQ_INDEX(mpm_pin);
+ uint32_t mask = MSM_MPM_IRQ_MASK(mpm_pin);
+
+ if (enable)
+ mpm_irq_masks[index] |= mask;
+ else
+ mpm_irq_masks[index] &= ~mask;
+ } else {
+ int i;
+ unsigned long *irq_apps;
+
+ for (i = 0; i < MSM_MPM_NR_IRQ_DOMAINS; i++) {
+ if (d->domain == unlisted_irqs[i].domain)
+ break;
+ }
+
+ if (i == MSM_MPM_NR_IRQ_DOMAINS)
+ return 0;
+ irq_apps = wakeset ? unlisted_irqs[i].wakeup_irqs :
+ unlisted_irqs[i].enabled_irqs;
+
+ if (enable)
+ __set_bit(d->hwirq, irq_apps);
+ else
+ __clear_bit(d->hwirq, irq_apps);
+
+ }
+
+ return 0;
+}
+
+static void msm_mpm_set_detect_ctl(int pin, unsigned int flow_type)
+{
+ uint32_t index;
+ uint32_t val = 0;
+ uint32_t shift;
+
+ index = MSM_MPM_DETECT_CTL_INDEX(pin);
+ shift = MSM_MPM_DETECT_CTL_SHIFT(pin);
+
+ if (flow_type & IRQ_TYPE_EDGE_RISING)
+ val |= 0x02;
+
+ if (flow_type & IRQ_TYPE_EDGE_FALLING)
+ val |= 0x01;
+
+ msm_mpm_detect_ctl[index] &= ~(0x3 << shift);
+ msm_mpm_detect_ctl[index] |= (val & 0x03) << shift;
+}
+
+static int msm_mpm_set_irq_type_exclusive(
+ struct irq_data *d, unsigned int flow_type)
+{
+ uint32_t mpm_irq;
+
+ mpm_irq = msm_mpm_get_irq_a2m(d);
+
+ if (mpm_irq == 0xff)
+ return 0;
+
+ if (mpm_irq) {
+ uint32_t index = MSM_MPM_IRQ_INDEX(mpm_irq);
+ uint32_t mask = MSM_MPM_IRQ_MASK(mpm_irq);
+
+ if (index >= MSM_MPM_REG_WIDTH)
+ return -EFAULT;
+
+ msm_mpm_set_detect_ctl(mpm_irq, flow_type);
+
+ if (flow_type & IRQ_TYPE_LEVEL_HIGH)
+ msm_mpm_polarity[index] |= mask;
+ else
+ msm_mpm_polarity[index] &= ~mask;
+ }
+ return 0;
+}
+
+static int __msm_mpm_enable_irq(struct irq_data *d, bool enable)
+{
+ unsigned long flags;
+ int rc;
+
+ if (!msm_mpm_is_initialized())
+ return -EINVAL;
+
+ spin_lock_irqsave(&msm_mpm_lock, flags);
+
+ rc = msm_mpm_enable_irq_exclusive(d, enable, false);
+ spin_unlock_irqrestore(&msm_mpm_lock, flags);
+
+ return rc;
+}
+
+static void msm_mpm_enable_irq(struct irq_data *d)
+{
+ __msm_mpm_enable_irq(d, true);
+}
+
+static void msm_mpm_disable_irq(struct irq_data *d)
+{
+ __msm_mpm_enable_irq(d, false);
+}
+
+static int msm_mpm_set_irq_wake(struct irq_data *d, unsigned int on)
+{
+ unsigned long flags;
+ int rc;
+
+ if (!msm_mpm_is_initialized())
+ return -EINVAL;
+
+ spin_lock_irqsave(&msm_mpm_lock, flags);
+ rc = msm_mpm_enable_irq_exclusive(d, (bool)on, true);
+ spin_unlock_irqrestore(&msm_mpm_lock, flags);
+
+ return rc;
+}
+
+static int msm_mpm_set_irq_type(struct irq_data *d, unsigned int flow_type)
+{
+ unsigned long flags;
+ int rc;
+
+ if (!msm_mpm_is_initialized())
+ return -EINVAL;
+
+ spin_lock_irqsave(&msm_mpm_lock, flags);
+ rc = msm_mpm_set_irq_type_exclusive(d, flow_type);
+ spin_unlock_irqrestore(&msm_mpm_lock, flags);
+
+ return rc;
+}
+
+/******************************************************************************
+ * Public functions
+ *****************************************************************************/
+int msm_mpm_enable_pin(unsigned int pin, unsigned int enable)
+{
+ uint32_t index = MSM_MPM_IRQ_INDEX(pin);
+ uint32_t mask = MSM_MPM_IRQ_MASK(pin);
+ unsigned long flags;
+
+ if (!msm_mpm_is_initialized())
+ return -EINVAL;
+
+ if (pin > MSM_MPM_NR_MPM_IRQS)
+ return -EINVAL;
+
+ spin_lock_irqsave(&msm_mpm_lock, flags);
+
+ if (enable)
+ msm_mpm_enabled_irq[index] |= mask;
+ else
+ msm_mpm_enabled_irq[index] &= ~mask;
+
+ spin_unlock_irqrestore(&msm_mpm_lock, flags);
+ return 0;
+}
+
+int msm_mpm_set_pin_wake(unsigned int pin, unsigned int on)
+{
+ uint32_t index = MSM_MPM_IRQ_INDEX(pin);
+ uint32_t mask = MSM_MPM_IRQ_MASK(pin);
+ unsigned long flags;
+
+ if (!msm_mpm_is_initialized())
+ return -EINVAL;
+
+ if (pin >= MSM_MPM_NR_MPM_IRQS)
+ return -EINVAL;
+
+ spin_lock_irqsave(&msm_mpm_lock, flags);
+
+ if (on)
+ msm_mpm_wake_irq[index] |= mask;
+ else
+ msm_mpm_wake_irq[index] &= ~mask;
+
+ spin_unlock_irqrestore(&msm_mpm_lock, flags);
+ return 0;
+}
+
+int msm_mpm_set_pin_type(unsigned int pin, unsigned int flow_type)
+{
+ uint32_t index = MSM_MPM_IRQ_INDEX(pin);
+ uint32_t mask = MSM_MPM_IRQ_MASK(pin);
+ unsigned long flags;
+
+ if (!msm_mpm_is_initialized())
+ return -EINVAL;
+
+ if (pin >= MSM_MPM_NR_MPM_IRQS)
+ return -EINVAL;
+
+ spin_lock_irqsave(&msm_mpm_lock, flags);
+
+ msm_mpm_set_detect_ctl(pin, flow_type);
+
+ if (flow_type & IRQ_TYPE_LEVEL_HIGH)
+ msm_mpm_polarity[index] |= mask;
+ else
+ msm_mpm_polarity[index] &= ~mask;
+
+ spin_unlock_irqrestore(&msm_mpm_lock, flags);
+ return 0;
+}
+
+bool msm_mpm_irqs_detectable(bool from_idle)
+{
+ /* TODO:
+ * Return true if unlisted irqs is empty
+ */
+
+ if (!msm_mpm_is_initialized())
+ return false;
+
+ return true;
+}
+
+bool msm_mpm_gpio_irqs_detectable(bool from_idle)
+{
+ /* TODO:
+ * Return true if unlisted irqs is empty
+ */
+ if (!msm_mpm_is_initialized())
+ return false;
+ return true;
+}
+
+void msm_mpm_enter_sleep(uint32_t sclk_count, bool from_idle)
+{
+ cycle_t wakeup = (u64)sclk_count * ARCH_TIMER_HZ;
+
+ if (!msm_mpm_is_initialized()) {
+ pr_err("%s(): MPM not initialized\n", __func__);
+ return;
+ }
+
+ if (sclk_count) {
+ do_div(wakeup, SCLK_HZ);
+ wakeup += arch_counter_get_cntpct();
+ } else {
+ wakeup = (~0ULL);
+ }
+
+ msm_mpm_set(wakeup, !from_idle);
+}
+
+void msm_mpm_exit_sleep(bool from_idle)
+{
+ unsigned long pending;
+ int i;
+ int k;
+
+ if (!msm_mpm_is_initialized()) {
+ pr_err("%s(): MPM not initialized\n", __func__);
+ return;
+ }
+
+ for (i = 0; i < MSM_MPM_REG_WIDTH; i++) {
+ pending = msm_mpm_read(MSM_MPM_REG_STATUS, i);
+
+ if (MSM_MPM_DEBUG_PENDING_IRQ & msm_mpm_debug_mask)
+ pr_info("%s: pending.%d: 0x%08lx", __func__,
+ i, pending);
+
+ k = find_first_bit(&pending, 32);
+ while (k < 32) {
+ unsigned int mpm_irq = 32 * i + k;
+ unsigned int apps_irq = msm_mpm_get_irq_m2a(mpm_irq);
+ struct irq_desc *desc = apps_irq ?
+ irq_to_desc(apps_irq) : NULL;
+
+ if (desc && !irqd_is_level_type(&desc->irq_data)) {
+ irq_set_pending(apps_irq);
+ if (from_idle) {
+ raw_spin_lock(&desc->lock);
+ check_irq_resend(desc, apps_irq);
+ raw_spin_unlock(&desc->lock);
+ }
+ }
+
+ k = find_next_bit(&pending, 32, k + 1);
+ }
+ }
+}
+
+static int __devinit msm_mpm_dev_probe(struct platform_device *pdev)
+{
+ struct resource *res = NULL;
+ int offset, ret;
+ struct msm_mpm_device_data *dev = &msm_mpm_dev_data;
+
+ if (msm_mpm_initialized & MSM_MPM_DEVICE_PROBED) {
+ pr_warn("MPM device probed multiple times\n");
+ return 0;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vmpm");
+ if (!res) {
+ pr_err("%s(): Missing RPM memory resource\n", __func__);
+ goto fail;
+ }
+
+ dev->mpm_request_reg_base = devm_request_and_ioremap(&pdev->dev, res);
+
+ if (!dev->mpm_request_reg_base) {
+ pr_err("%s(): Unable to iomap\n", __func__);
+ goto fail;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ipc");
+ if (!res) {
+ pr_err("%s(): Missing GCC memory resource\n", __func__);
+ goto failed_irq_get;
+ }
+
+ dev->mpm_apps_ipc_reg = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+
+ if (of_property_read_u32(pdev->dev.of_node,
+ "qcom,ipc-bit-offset", &offset)) {
+ pr_info("%s(): Cannot read ipc bit offset\n", __func__);
+ goto failed_free_irq;
+ }
+
+ dev->mpm_apps_ipc_val = (1 << offset);
+
+ if (!dev->mpm_apps_ipc_reg)
+ goto failed_irq_get;
+
+ dev->mpm_ipc_irq = platform_get_irq(pdev, 0);
+
+ if (dev->mpm_ipc_irq == -ENXIO) {
+ pr_info("%s(): Cannot find IRQ resource\n", __func__);
+ goto failed_irq_get;
+ }
+ ret = request_irq(dev->mpm_ipc_irq, msm_mpm_irq,
+ IRQF_TRIGGER_RISING, pdev->name, msm_mpm_irq);
+
+ if (ret) {
+ pr_info("%s(): request_irq failed errno: %d\n", __func__, ret);
+ goto failed_irq_get;
+ }
+ ret = irq_set_irq_wake(dev->mpm_ipc_irq, 1);
+
+ if (ret) {
+ pr_err("%s: failed to set wakeup irq %u: %d\n",
+ __func__, dev->mpm_ipc_irq, ret);
+ goto failed_irq_get;
+
+ }
+ msm_mpm_initialized |= MSM_MPM_DEVICE_PROBED;
+
+ return 0;
+
+failed_free_irq:
+ free_irq(dev->mpm_ipc_irq, msm_mpm_irq);
+failed_irq_get:
+ if (dev->mpm_apps_ipc_reg)
+ devm_iounmap(&pdev->dev, dev->mpm_apps_ipc_reg);
+ if (dev->mpm_request_reg_base)
+ devm_iounmap(&pdev->dev, dev->mpm_request_reg_base);
+fail:
+ return -EINVAL;
+}
+
+static inline int __init mpm_irq_domain_linear_size(struct irq_domain *d)
+{
+ return d->revmap_data.linear.size;
+}
+
+static inline int __init mpm_irq_domain_legacy_size(struct irq_domain *d)
+{
+ return d->revmap_data.legacy.size;
+}
+
+void __init of_mpm_init(struct device_node *node)
+{
+ const __be32 *list;
+
+ struct mpm_of {
+ char *pkey;
+ char *map;
+ struct irq_chip *chip;
+ int (*get_max_irqs)(struct irq_domain *d);
+ };
+ int i;
+
+ struct mpm_of mpm_of_map[MSM_MPM_NR_IRQ_DOMAINS] = {
+ {
+ "qcom,gic-parent",
+ "qcom,gic-map",
+ &gic_arch_extn,
+ mpm_irq_domain_linear_size,
+ },
+ {
+ "qcom,gpio-parent",
+ "qcom,gpio-map",
+ &msm_gpio_irq_extn,
+ mpm_irq_domain_legacy_size,
+ },
+ };
+
+ if (msm_mpm_initialized & MSM_MPM_IRQ_MAPPING_DONE) {
+ pr_warn("%s(): MPM driver mapping exists\n", __func__);
+ return;
+ }
+
+ for (i = 0; i < MSM_MPM_NR_MPM_IRQS; i++)
+ INIT_HLIST_HEAD(&irq_hash[i]);
+
+ for (i = 0; i < MSM_MPM_NR_IRQ_DOMAINS; i++) {
+ struct device_node *parent = NULL;
+ struct mpm_irqs_a2m *mpm_node = NULL;
+ struct irq_domain *domain = NULL;
+ int size;
+
+ parent = of_parse_phandle(node, mpm_of_map[i].pkey, 0);
+
+ if (!parent) {
+ pr_warn("%s(): %s Not found\n", __func__,
+ mpm_of_map[i].pkey);
+ continue;
+ }
+
+ domain = irq_find_host(parent);
+
+ if (!domain) {
+ pr_warn("%s(): Cannot find irq controller for %s\n",
+ __func__, mpm_of_map[i].pkey);
+ continue;
+ }
+
+ size = mpm_of_map[i].get_max_irqs(domain);
+
+ unlisted_irqs[i].enabled_irqs =
+ kzalloc(BITS_TO_LONGS(size) * sizeof(unsigned long),
+ GFP_KERNEL);
+
+ if (!unlisted_irqs[i].enabled_irqs)
+ goto failed_malloc;
+
+ unlisted_irqs[i].wakeup_irqs =
+ kzalloc(BITS_TO_LONGS(size) * sizeof(unsigned long),
+ GFP_KERNEL);
+
+ if (!unlisted_irqs[i].wakeup_irqs)
+ goto failed_malloc;
+
+ unlisted_irqs[i].domain = domain;
+
+ list = of_get_property(node, mpm_of_map[i].map, &size);
+
+ if (!list || !size) {
+ __WARN();
+ continue;
+ }
+
+ /*
+ * Size is in bytes. Convert to size of uint32_t
+ */
+ size /= sizeof(*list);
+
+ /*
+ * The data is represented by a tuple mapping hwirq to a MPM
+ * pin. The number of mappings in the device tree would be
+ * size/2
+ */
+ mpm_node = kzalloc(sizeof(struct mpm_irqs_a2m) * size / 2,
+ GFP_KERNEL);
+ if (!mpm_node)
+ goto failed_malloc;
+
+ while (size) {
+ unsigned long pin = be32_to_cpup(list++);
+ irq_hw_number_t hwirq = be32_to_cpup(list++);
+
+ mpm_node->pin = pin;
+ mpm_node->hwirq = hwirq;
+ mpm_node->parent = parent;
+ mpm_node->domain = domain;
+ INIT_HLIST_NODE(&mpm_node->node);
+
+ hlist_add_head(&mpm_node->node,
+ &irq_hash[hashfn(mpm_node->hwirq)]);
+ size -= 2;
+ mpm_node++;
+ }
+
+ if (mpm_of_map[i].chip) {
+ mpm_of_map[i].chip->irq_mask = msm_mpm_disable_irq;
+ mpm_of_map[i].chip->irq_unmask = msm_mpm_enable_irq;
+ mpm_of_map[i].chip->irq_disable = msm_mpm_disable_irq;
+ mpm_of_map[i].chip->irq_set_type = msm_mpm_set_irq_type;
+ mpm_of_map[i].chip->irq_set_wake = msm_mpm_set_irq_wake;
+ }
+
+ }
+ msm_mpm_initialized |= MSM_MPM_IRQ_MAPPING_DONE;
+
+ return;
+
+failed_malloc:
+ for (i = 0; i < MSM_MPM_NR_MPM_IRQS; i++) {
+ mpm_of_map[i].chip->irq_mask = NULL;
+ mpm_of_map[i].chip->irq_unmask = NULL;
+ mpm_of_map[i].chip->irq_disable = NULL;
+ mpm_of_map[i].chip->irq_set_type = NULL;
+ mpm_of_map[i].chip->irq_set_wake = NULL;
+
+ kfree(unlisted_irqs[i].enabled_irqs);
+ kfree(unlisted_irqs[i].wakeup_irqs);
+
+ }
+}
+
+static struct of_device_id msm_mpm_match_table[] = {
+ {.compatible = "qcom,mpm-v2"},
+ {},
+};
+
+static struct platform_driver msm_mpm_dev_driver = {
+ .probe = msm_mpm_dev_probe,
+ .driver = {
+ .name = "mpm-v2",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_mpm_match_table,
+ },
+};
+
+int __init msm_mpm_device_init(void)
+{
+ return platform_driver_register(&msm_mpm_dev_driver);
+}
+arch_initcall(msm_mpm_device_init);
diff --git a/arch/arm/mach-msm/mpm.c b/arch/arm/mach-msm/mpm.c
index b395b61..1c39415 100644
--- a/arch/arm/mach-msm/mpm.c
+++ b/arch/arm/mach-msm/mpm.c
@@ -421,7 +421,7 @@
MSM_MPM_NR_APPS_IRQS);
}
-void msm_mpm_enter_sleep(bool from_idle)
+void msm_mpm_enter_sleep(uint32_t sclk_count, bool from_idle)
{
msm_mpm_set(!from_idle);
}
diff --git a/arch/arm/mach-msm/msm-buspm-dev.c b/arch/arm/mach-msm/msm-buspm-dev.c
index 296418d..a818eed 100644
--- a/arch/arm/mach-msm/msm-buspm-dev.c
+++ b/arch/arm/mach-msm/msm-buspm-dev.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -40,6 +40,13 @@
return (dev) ? dev->vaddr : NULL;
}
+static inline unsigned int msm_buspm_dev_get_buflen(struct file *filp)
+{
+ struct msm_buspm_map_dev *dev = filp->private_data;
+
+ return dev ? dev->buflen : 0;
+}
+
static inline unsigned long msm_buspm_dev_get_paddr(struct file *filp)
{
struct msm_buspm_map_dev *dev = filp->private_data;
@@ -114,6 +121,7 @@
unsigned long paddr;
int retval = 0;
void *buf = msm_buspm_dev_get_vaddr(filp);
+ unsigned int buflen = msm_buspm_dev_get_buflen(filp);
unsigned char *dbgbuf = buf;
switch (cmd) {
@@ -156,7 +164,7 @@
break;
}
- if ((xfer.size <= sizeof(buf)) &&
+ if ((xfer.size <= buflen) &&
(copy_to_user((void __user *)xfer.data, buf,
xfer.size))) {
retval = -EFAULT;
@@ -177,7 +185,7 @@
break;
}
- if ((sizeof(buf) <= xfer.size) &&
+ if ((buflen <= xfer.size) &&
(copy_from_user(buf, (void __user *)xfer.data,
xfer.size))) {
retval = -EFAULT;
diff --git a/arch/arm/mach-msm/msm-krait-l2-accessors.c b/arch/arm/mach-msm/msm-krait-l2-accessors.c
index 3d341e3..3da155a 100644
--- a/arch/arm/mach-msm/msm-krait-l2-accessors.c
+++ b/arch/arm/mach-msm/msm-krait-l2-accessors.c
@@ -14,19 +14,79 @@
#include <linux/spinlock.h>
#include <linux/module.h>
#include <asm/mach-types.h>
+#include <asm/cputype.h>
DEFINE_RAW_SPINLOCK(l2_access_lock);
+#define L2CPMR 0x500
+#define L2CPUCPMR 0x501
+#define L2CPUVRF8 0x708
+#define CPUNDX_MASK (0x7 << 12)
+
+/*
+ * For Krait versions found in APQ8064v1.x, save L2CPUVRF8 before
+ * L2CPMR or L2CPUCPMR writes and restore it after to work around an
+ * issue where L2CPUVRF8 becomes corrupt.
+ */
+static bool l2cpuvrf8_needs_fix(u32 reg_addr)
+{
+ switch (read_cpuid_id()) {
+ case 0x510F06F0: /* KR28M4A10 */
+ case 0x510F06F1: /* KR28M4A10B */
+ case 0x510F06F2: /* KR28M4A11 */
+ break;
+ default:
+ return false;
+ };
+
+ switch (reg_addr & ~CPUNDX_MASK) {
+ case L2CPMR:
+ case L2CPUCPMR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static u32 l2cpuvrf8_fix_save(u32 reg_addr, u32 *l2cpuvrf8_val)
+{
+ u32 l2cpuvrf8_addr = L2CPUVRF8 | (reg_addr & CPUNDX_MASK);
+
+ mb();
+ asm volatile ("mcr p15, 3, %[l2cpselr], c15, c0, 6\n\t"
+ "isb\n\t"
+ "mrc p15, 3, %[l2cpdr], c15, c0, 7\n\t"
+ : [l2cpdr]"=r" (*l2cpuvrf8_val)
+ : [l2cpselr]"r" (l2cpuvrf8_addr)
+ );
+
+ return l2cpuvrf8_addr;
+}
+
+static void l2cpuvrf8_fix_restore(u32 l2cpuvrf8_addr, u32 l2cpuvrf8_val)
+{
+ mb();
+ asm volatile ("mcr p15, 3, %[l2cpselr], c15, c0, 6\n\t"
+ "isb\n\t"
+ "mcr p15, 3, %[l2cpdr], c15, c0, 7\n\t"
+ "isb\n\t"
+ :
+ : [l2cpselr]"r" (l2cpuvrf8_addr),
+ [l2cpdr]"r" (l2cpuvrf8_val)
+ );
+}
+
u32 set_get_l2_indirect_reg(u32 reg_addr, u32 val)
{
unsigned long flags;
+ u32 uninitialized_var(l2cpuvrf8_val), l2cpuvrf8_addr = 0;
u32 ret_val;
- /* CP15 registers are not emulated on RUMI3. */
- if (machine_is_msm8960_rumi3())
- return 0;
raw_spin_lock_irqsave(&l2_access_lock, flags);
+ if (l2cpuvrf8_needs_fix(reg_addr))
+ l2cpuvrf8_addr = l2cpuvrf8_fix_save(reg_addr, &l2cpuvrf8_val);
+
mb();
asm volatile ("mcr p15, 3, %[l2cpselr], c15, c0, 6\n\t"
"isb\n\t"
@@ -36,6 +96,10 @@
: [l2cpdr_read]"=r" (ret_val)
: [l2cpselr]"r" (reg_addr), [l2cpdr]"r" (val)
);
+
+ if (l2cpuvrf8_addr)
+ l2cpuvrf8_fix_restore(l2cpuvrf8_addr, l2cpuvrf8_val);
+
raw_spin_unlock_irqrestore(&l2_access_lock, flags);
return ret_val;
@@ -45,11 +109,13 @@
void set_l2_indirect_reg(u32 reg_addr, u32 val)
{
unsigned long flags;
- /* CP15 registers are not emulated on RUMI3. */
- if (machine_is_msm8960_rumi3())
- return;
+ u32 uninitialized_var(l2cpuvrf8_val), l2cpuvrf8_addr = 0;
raw_spin_lock_irqsave(&l2_access_lock, flags);
+
+ if (l2cpuvrf8_needs_fix(reg_addr))
+ l2cpuvrf8_addr = l2cpuvrf8_fix_save(reg_addr, &l2cpuvrf8_val);
+
mb();
asm volatile ("mcr p15, 3, %[l2cpselr], c15, c0, 6\n\t"
"isb\n\t"
@@ -58,6 +124,10 @@
:
: [l2cpselr]"r" (reg_addr), [l2cpdr]"r" (val)
);
+
+ if (l2cpuvrf8_addr)
+ l2cpuvrf8_fix_restore(l2cpuvrf8_addr, l2cpuvrf8_val);
+
raw_spin_unlock_irqrestore(&l2_access_lock, flags);
}
EXPORT_SYMBOL(set_l2_indirect_reg);
@@ -66,9 +136,6 @@
{
u32 val;
unsigned long flags;
- /* CP15 registers are not emulated on RUMI3. */
- if (machine_is_msm8960_rumi3())
- return 0;
raw_spin_lock_irqsave(&l2_access_lock, flags);
asm volatile ("mcr p15, 3, %[l2cpselr], c15, c0, 6\n\t"
diff --git a/arch/arm/mach-msm/msm_bus/Makefile b/arch/arm/mach-msm/msm_bus/Makefile
index ab62c20..924577f 100644
--- a/arch/arm/mach-msm/msm_bus/Makefile
+++ b/arch/arm/mach-msm/msm_bus/Makefile
@@ -2,7 +2,9 @@
# Makefile for msm-bus driver specific files
#
obj-y += msm_bus_core.o msm_bus_fabric.o msm_bus_config.o msm_bus_arb.o
-obj-y += msm_bus_rpm.o msm_bus_bimc.o msm_bus_noc.o
+obj-y += msm_bus_bimc.o msm_bus_noc.o
+obj-$(CONFIG_MSM_RPM) += msm_bus_rpm.o
+obj-$(CONFIG_MSM_RPM_SMD) += msm_bus_rpm_smd.o
obj-$(CONFIG_ARCH_MSM8X60) += msm_bus_board_8660.o
obj-$(CONFIG_ARCH_MSM8960) += msm_bus_board_8960.o
obj-$(CONFIG_ARCH_MSM9615) += msm_bus_board_9615.o
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c b/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
index 823f14d..2072cb1 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
@@ -1817,44 +1817,45 @@
info->node_info->id, info->node_info->priv_id, add_bw);
binfo = (struct msm_bus_bimc_info *)fab_pdata->hw_data;
- if (!info->node_info->qport) {
- MSM_BUS_DBG("No qos ports to update!\n");
- return;
- }
if (info->node_info->num_mports == 0) {
MSM_BUS_DBG("BIMC: Skip Master BW\n");
goto skip_mas_bw;
}
+ ports = info->node_info->num_mports;
bw = INTERLEAVED_BW(fab_pdata, add_bw, ports);
- ports = INTERLEAVED_VAL(fab_pdata, ports);
for (i = 0; i < ports; i++) {
- MSM_BUS_DBG("qport: %d\n", info->node_info->qport[i]);
sel_cd->mas[info->node_info->masterp[i]].bw += bw;
sel_cd->mas[info->node_info->masterp[i]].hw_id =
info->node_info->mas_hw_id;
- qbw.bw = sel_cd->mas[info->node_info->masterp[i]].bw;
- qbw.ws = info->node_info->ws;
- /* Threshold low = 90% of bw */
- qbw.thl = (90 * bw) / 100;
- /* Threshold medium = bw */
- qbw.thm = bw;
- /* Threshold high = 10% more than bw */
- qbw.thh = (110 * bw) / 100;
- /* Check if info is a shared master.
- * If it is, mark it dirty
- * If it isn't, then set QOS Bandwidth
- **/
- MSM_BUS_DBG("BIMC: Update mas_bw for ID: %d -> %ld\n",
+ MSM_BUS_DBG("BIMC: Update mas_bw for ID: %d -> %llu\n",
info->node_info->priv_id,
sel_cd->mas[info->node_info->masterp[i]].bw);
if (info->node_info->hw_sel == MSM_BUS_RPM)
sel_cd->mas[info->node_info->masterp[i]].dirty = 1;
- else
+ else {
+ if (!info->node_info->qport) {
+ MSM_BUS_DBG("No qos ports to update!\n");
+ break;
+ }
+ MSM_BUS_DBG("qport: %d\n", info->node_info->qport[i]);
+ qbw.bw = sel_cd->mas[info->node_info->masterp[i]].bw;
+ qbw.ws = info->node_info->ws;
+ /* Threshold low = 90% of bw */
+ qbw.thl = (90 * bw) / 100;
+ /* Threshold medium = bw */
+ qbw.thm = bw;
+ /* Threshold high = 10% more than bw */
+ qbw.thh = (110 * bw) / 100;
+ /* Check if info is a shared master.
+ * If it is, mark it dirty
+ * If it isn't, then set QOS Bandwidth
+ **/
msm_bus_bimc_set_qos_bw(binfo,
info->node_info->qport[i], &qbw);
+ }
}
skip_mas_bw:
@@ -1870,7 +1871,7 @@
sel_cd->slv[hop->node_info->slavep[i]].bw += bw;
sel_cd->slv[hop->node_info->slavep[i]].hw_id =
hop->node_info->slv_hw_id;
- MSM_BUS_DBG("BIMC: Update slave_bw: ID: %d -> %ld\n",
+ MSM_BUS_DBG("BIMC: Update slave_bw: ID: %d -> %llu\n",
hop->node_info->priv_id,
sel_cd->slv[hop->node_info->slavep[i]].bw);
MSM_BUS_DBG("BIMC: Update slave_bw: index: %d\n",
@@ -1893,6 +1894,7 @@
*fab_pdata, void *hw_data, void **cdata)
{
MSM_BUS_DBG("\nReached BIMC Commit\n");
+ msm_bus_remote_hw_commit(fab_pdata, hw_data, cdata);
return 0;
}
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_board_8960.c b/arch/arm/mach-msm/msm_bus/msm_bus_board_8960.c
index 7ede23d..9ba9f7b1 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_board_8960.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_board_8960.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -39,6 +39,12 @@
MSM_BUS_TIERED_SLAVE_KMPSS_L2,
};
+enum msm_bus_sg_tiered_slaves_type {
+ SG_TIERED_SLAVE_MM_IMEM = 1,
+ SG_MMSS_TIERED_SLAVE_FAB_APPS_0,
+ SG_MMSS_TIERED_SLAVE_FAB_APPS_1,
+};
+
enum msm_bus_8960_master_ports_type {
MSM_BUS_SYSTEM_MASTER_PORT_APPSS_FAB = 0,
MSM_BUS_MASTER_PORT_SPS,
@@ -106,6 +112,23 @@
MSM_BUS_SLAVE_PORT_RIVA,
};
+enum msm_bus_8960_sg_master_ports_type {
+ MSM_BUS_MASTER_PORT_GRAPHICS_3D_PORT0 =
+ MSM_BUS_MMSS_MASTER_PORT_UNUSED_2,
+ MSM_BUS_MASTER_PORT_VIDEO_CAP =
+ MSM_BUS_MASTER_PORT_GRAPHICS_2D_CORE0,
+ MSM_BUS_MASTER_PORT_VIDEO_DEC =
+ MSM_BUS_MASTER_PORT_GRAPHICS_2D_CORE1,
+ MSM_BUS_MASTER_PORT_VIDEO_ENC =
+ MSM_BUS_MASTER_PORT_HD_CODEC_PORT0,
+};
+
+enum msm_bus_8960_sg_slave_ports_type {
+ SG_SLAVE_PORT_MM_IMEM = 0,
+ SG_MMSS_SLAVE_PORT_APPS_FAB_0,
+ SG_MMSS_SLAVE_PORT_APPS_FAB_1,
+};
+
static int tier2[] = {MSM_BUS_BW_TIER2,};
static uint32_t master_iids[NMASTERS];
static uint32_t slave_iids[NSLAVES];
@@ -424,6 +447,10 @@
static int mport_mdp1[] = {MSM_BUS_MASTER_PORT_MDP_PORT1,};
static int mport_rotator[] = {MSM_BUS_MASTER_PORT_ROTATOR,};
static int mport_graphics_3d[] = {MSM_BUS_MASTER_PORT_GRAPHICS_3D,};
+static int pro_mport_graphics_3d[] = {
+ MSM_BUS_MASTER_PORT_GRAPHICS_3D_PORT0,
+ MSM_BUS_MASTER_PORT_GRAPHICS_3D,
+};
static int mport_jpeg_dec[] = {MSM_BUS_MASTER_PORT_JPEG_DEC,};
static int mport_graphics_2d_core0[] = {MSM_BUS_MASTER_PORT_GRAPHICS_2D_CORE0,};
static int mport_vfe[] = {MSM_BUS_MASTER_PORT_VFE,};
@@ -432,6 +459,9 @@
static int mport_graphics_2d_core1[] = {MSM_BUS_MASTER_PORT_GRAPHICS_2D_CORE1,};
static int mport_hd_codec_port0[] = {MSM_BUS_MASTER_PORT_HD_CODEC_PORT0,};
static int mport_hd_codec_port1[] = {MSM_BUS_MASTER_PORT_HD_CODEC_PORT1,};
+static int mport_video_cap[] = {MSM_BUS_MASTER_PORT_VIDEO_CAP};
+static int mport_video_enc[] = {MSM_BUS_MASTER_PORT_VIDEO_ENC};
+static int mport_video_dec[] = {MSM_BUS_MASTER_PORT_VIDEO_DEC};
static int appss_mport_fab_mmss[] = {
MSM_BUS_APPSS_MASTER_PORT_FAB_MMSS_0,
MSM_BUS_APPSS_MASTER_PORT_FAB_MMSS_1
@@ -439,15 +469,25 @@
static int mmss_sport_apps_fab[] = {
MSM_BUS_MMSS_SLAVE_PORT_APPS_FAB_0,
- MSM_BUS_MMSS_SLAVE_PORT_APPS_FAB_1
+ MSM_BUS_MMSS_SLAVE_PORT_APPS_FAB_1,
+};
+static int sg_sport_apps_fab[] = {
+ SG_MMSS_SLAVE_PORT_APPS_FAB_0,
+ SG_MMSS_SLAVE_PORT_APPS_FAB_1,
};
static int sport_mm_imem[] = {MSM_BUS_SLAVE_PORT_MM_IMEM,};
+static int sg_sport_mm_imem[] = {SG_SLAVE_PORT_MM_IMEM,};
static int mmss_tiered_slave_fab_apps[] = {
MSM_BUS_MMSS_TIERED_SLAVE_FAB_APPS_0,
MSM_BUS_MMSS_TIERED_SLAVE_FAB_APPS_1,
};
+static int sg_tiered_slave_fab_apps[] = {
+ SG_MMSS_TIERED_SLAVE_FAB_APPS_0,
+ SG_MMSS_TIERED_SLAVE_FAB_APPS_1,
+};
static int tiered_slave_mm_imem[] = {MSM_BUS_TIERED_SLAVE_MM_IMEM,};
+static int sg_tiered_slave_mm_imem[] = {SG_TIERED_SLAVE_MM_IMEM,};
static struct msm_bus_node_info mmss_fabric_info[] = {
@@ -557,6 +597,106 @@
},
};
+static struct msm_bus_node_info sg_mmss_fabric_info[] = {
+ {
+ .id = MSM_BUS_MASTER_MDP_PORT0,
+ .masterp = mport_mdp,
+ .num_mports = ARRAY_SIZE(mport_mdp),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ },
+ {
+ .id = MSM_BUS_MASTER_MDP_PORT1,
+ .masterp = mport_mdp1,
+ .num_mports = ARRAY_SIZE(mport_mdp1),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ },
+ {
+ .id = MSM_BUS_MASTER_ROTATOR,
+ .masterp = mport_rotator,
+ .num_mports = ARRAY_SIZE(mport_rotator),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ },
+ {
+ .id = MSM_BUS_MASTER_GRAPHICS_3D,
+ .masterp = pro_mport_graphics_3d,
+ .num_mports = ARRAY_SIZE(pro_mport_graphics_3d),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ },
+ {
+ .id = MSM_BUS_MASTER_JPEG_DEC,
+ .masterp = mport_jpeg_dec,
+ .num_mports = ARRAY_SIZE(mport_jpeg_dec),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ },
+ {
+ .id = MSM_BUS_MASTER_VIDEO_CAP,
+ .masterp = mport_video_cap,
+ .num_mports = ARRAY_SIZE(mport_video_cap),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ },
+ {
+ .id = MSM_BUS_MASTER_VFE,
+ .masterp = mport_vfe,
+ .num_mports = ARRAY_SIZE(mport_vfe),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ },
+ {
+ .id = MSM_BUS_MASTER_VPE,
+ .masterp = mport_vpe,
+ .num_mports = ARRAY_SIZE(mport_vpe),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ },
+ {
+ .id = MSM_BUS_MASTER_JPEG_ENC,
+ .masterp = mport_jpeg_enc,
+ .num_mports = ARRAY_SIZE(mport_jpeg_enc),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ },
+ /* This port has been added for V2. It is absent in V1 */
+ {
+ .id = MSM_BUS_MASTER_VIDEO_DEC,
+ .masterp = mport_video_dec,
+ .num_mports = ARRAY_SIZE(mport_video_dec),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ },
+ {
+ .id = MSM_BUS_MASTER_VIDEO_ENC,
+ .masterp = mport_video_enc,
+ .num_mports = ARRAY_SIZE(mport_video_enc),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ },
+ {
+ .id = MSM_BUS_FAB_APPSS,
+ .gateway = 1,
+ .slavep = sg_sport_apps_fab,
+ .num_sports = ARRAY_SIZE(sg_sport_apps_fab),
+ .masterp = appss_mport_fab_mmss,
+ .num_mports = ARRAY_SIZE(appss_mport_fab_mmss),
+ .tier = sg_tiered_slave_fab_apps,
+ .num_tiers = ARRAY_SIZE(sg_tiered_slave_fab_apps),
+ .buswidth = 16,
+ },
+ {
+ .id = MSM_BUS_SLAVE_MM_IMEM,
+ .slavep = sg_sport_mm_imem,
+ .num_sports = ARRAY_SIZE(sg_sport_mm_imem),
+ .tier = sg_tiered_slave_mm_imem,
+ .num_tiers = ARRAY_SIZE(sg_tiered_slave_mm_imem),
+ .buswidth = 8,
+ },
+};
+
static struct msm_bus_node_info sys_fpb_fabric_info[] = {
{
.id = MSM_BUS_FAB_SYSTEM,
@@ -919,6 +1059,22 @@
.board_algo = &msm_bus_board_algo,
};
+struct msm_bus_fabric_registration msm_bus_8960_sg_mm_fabric_pdata = {
+ .id = MSM_BUS_FAB_MMSS,
+ .name = "msm_mm_fab",
+ sg_mmss_fabric_info,
+ ARRAY_SIZE(sg_mmss_fabric_info),
+ .ahb = 0,
+ .fabclk[DUAL_CTX] = "bus_clk",
+ .fabclk[ACTIVE_CTX] = "bus_a_clk",
+ .haltid = MSM_RPM_ID_MMSS_FABRIC_CFG_HALT_0,
+ .offset = MSM_RPM_ID_MM_FABRIC_ARB_0,
+ .nmasters = 13,
+ .nslaves = 3,
+ .ntieredslaves = 3,
+ .board_algo = &msm_bus_board_algo,
+};
+
struct msm_bus_fabric_registration msm_bus_8960_sys_fpb_pdata = {
.id = MSM_BUS_FAB_SYSTEM_FPB,
.name = "msm_sys_fpb",
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_core.h b/arch/arm/mach-msm/msm_bus/msm_bus_core.h
index 264afbd..333fe4b 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_core.h
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_core.h
@@ -123,7 +123,7 @@
struct msm_bus_node_hw_info {
bool dirty;
unsigned int hw_id;
- unsigned long bw;
+ uint64_t bw;
};
struct msm_bus_hw_algorithm {
@@ -202,6 +202,8 @@
int curr;
};
+int msm_bus_remote_hw_commit(struct msm_bus_fabric_registration
+ *fab_pdata, void *hw_data, void **cdata);
int msm_bus_fabric_device_register(struct msm_bus_fabric_device *fabric);
void msm_bus_fabric_device_unregister(struct msm_bus_fabric_device *fabric);
struct msm_bus_fabric_device *msm_bus_get_fabric_device(int fabid);
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_noc.c b/arch/arm/mach-msm/msm_bus/msm_bus_noc.c
index 5179d2a..2597e27 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_noc.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_noc.c
@@ -518,14 +518,12 @@
return;
}
- if (!info->node_info->qport) {
- MSM_BUS_DBG("NOC: No QoS Ports to update bw\n");
- return;
+ if (info->node_info->num_mports == 0) {
+ MSM_BUS_DBG("NOC: Skip Master BW\n");
+ goto skip_mas_bw;
}
ports = info->node_info->num_mports;
- qos_bw.ws = info->node_info->ws;
-
bw = INTERLEAVED_BW(fab_pdata, add_bw, ports);
MSM_BUS_DBG("NOC: Update bw for: %d: %ld\n",
@@ -534,26 +532,36 @@
sel_cd->mas[info->node_info->masterp[i]].bw += bw;
sel_cd->mas[info->node_info->masterp[i]].hw_id =
info->node_info->mas_hw_id;
- qos_bw.bw = sel_cd->mas[info->node_info->masterp[i]].bw;
- MSM_BUS_DBG("NOC: Update mas_bw for ID: %d, BW: %ld, QoS: %u\n",
+ MSM_BUS_DBG("NOC: Update mas_bw: ID: %d, BW: %llu ports:%d\n",
info->node_info->priv_id,
sel_cd->mas[info->node_info->masterp[i]].bw,
- qos_bw.ws);
+ ports);
/* Check if info is a shared master.
* If it is, mark it dirty
* If it isn't, then set QOS Bandwidth
**/
if (info->node_info->hw_sel == MSM_BUS_RPM)
sel_cd->mas[info->node_info->masterp[i]].dirty = 1;
- else
+ else {
+ if (!info->node_info->qport) {
+ MSM_BUS_DBG("No qos ports to update!\n");
+ break;
+ }
+ qos_bw.bw = sel_cd->mas[info->node_info->masterp[i]].
+ bw;
+ qos_bw.ws = info->node_info->ws;
msm_bus_noc_set_qos_bw(ninfo,
info->node_info->qport[i],
info->node_info->perm_mode, &qos_bw);
+ MSM_BUS_DBG("NOC: QoS: Update mas_bw: ws: %u\n",
+ qos_bw.ws);
+ }
}
+skip_mas_bw:
ports = hop->node_info->num_sports;
if (ports == 0) {
- MSM_BUS_ERR("\nDIVIDE BY 0, hop: %d\n",
+ MSM_BUS_DBG("\nDIVIDE BY 0, hop: %d\n",
hop->node_info->priv_id);
return;
}
@@ -562,7 +570,7 @@
sel_cd->slv[hop->node_info->slavep[i]].bw += bw;
sel_cd->slv[hop->node_info->slavep[i]].hw_id =
hop->node_info->slv_hw_id;
- MSM_BUS_DBG("NOC: Update slave_bw for ID: %d -> %ld\n",
+ MSM_BUS_DBG("NOC: Update slave_bw for ID: %d -> %llu\n",
hop->node_info->priv_id,
sel_cd->slv[hop->node_info->slavep[i]].bw);
MSM_BUS_DBG("NOC: Update slave_bw for hw_id: %d, index: %d\n",
@@ -581,6 +589,7 @@
*fab_pdata, void *hw_data, void **cdata)
{
MSM_BUS_DBG("\nReached NOC Commit\n");
+ msm_bus_remote_hw_commit(fab_pdata, hw_data, cdata);
return 0;
}
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_rpm.c b/arch/arm/mach-msm/msm_bus/msm_bus_rpm.c
index 4653431..2213132 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_rpm.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_rpm.c
@@ -946,6 +946,12 @@
return status;
}
+int msm_bus_remote_hw_commit(struct msm_bus_fabric_registration
+ *fab_pdata, void *hw_data, void **cdata)
+{
+ return 0;
+}
+
int msm_bus_rpm_hw_init(struct msm_bus_fabric_registration *pdata,
struct msm_bus_hw_algorithm *hw_algo)
{
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_rpm_smd.c b/arch/arm/mach-msm/msm_bus/msm_bus_rpm_smd.c
new file mode 100644
index 0000000..88fab96
--- /dev/null
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_rpm_smd.c
@@ -0,0 +1,238 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include "msm_bus_core.h"
+#include <mach/msm_bus.h>
+#include <mach/msm_bus_board.h>
+#include <mach/rpm-smd.h>
+
+/* Stubs for backward compatibility */
+void msm_bus_rpm_set_mt_mask()
+{
+}
+
+bool msm_bus_rpm_is_mem_interleaved(void)
+{
+ return true;
+}
+
+struct commit_data {
+ struct msm_bus_node_hw_info *mas_arb;
+ struct msm_bus_node_hw_info *slv_arb;
+};
+
+#ifdef CONFIG_DEBUG_FS
+void msm_bus_rpm_fill_cdata_buffer(int *curr, char *buf, const int max_size,
+ void *cdata, int nmasters, int nslaves, int ntslaves)
+{
+ int c;
+ struct commit_data *cd = (struct commit_data *)cdata;
+
+ *curr += scnprintf(buf + *curr, max_size - *curr, "\nMas BW:\n");
+ for (c = 0; c < nmasters; c++)
+ *curr += scnprintf(buf + *curr, max_size - *curr,
+ "%d: %llu\t", cd->mas_arb[c].hw_id,
+ cd->mas_arb[c].bw);
+ *curr += scnprintf(buf + *curr, max_size - *curr, "\nSlave BW:\n");
+ for (c = 0; c < nslaves; c++) {
+ *curr += scnprintf(buf + *curr, max_size - *curr,
+ "%d: %llu\t", cd->slv_arb[c].hw_id,
+ cd->slv_arb[c].bw);
+ }
+}
+#endif
+
+static int msm_bus_rpm_compare_cdata(
+ struct msm_bus_fabric_registration *fab_pdata,
+ struct commit_data *cd1, struct commit_data *cd2)
+{
+ size_t n;
+ int ret;
+
+ n = sizeof(struct msm_bus_node_hw_info) * fab_pdata->nmasters * 2;
+ ret = memcmp(cd1->mas_arb, cd2->mas_arb, n);
+ if (ret) {
+ MSM_BUS_DBG("Master Arb Data not equal\n");
+ return ret;
+ }
+
+ n = sizeof(struct msm_bus_node_hw_info) * fab_pdata->nslaves * 2;
+ ret = memcmp(cd1->slv_arb, cd2->slv_arb, n);
+ if (ret) {
+ MSM_BUS_DBG("Master Arb Data not equal\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int msm_bus_rpm_req(int ctx, uint32_t rsc_type, uint32_t key,
+ struct msm_bus_node_hw_info *hw_info, bool valid)
+{
+ struct msm_rpm_request *rpm_req;
+ int ret = 0, msg_id;
+
+ if (ctx == ACTIVE_CTX)
+ ctx = MSM_RPM_CTX_ACTIVE_SET;
+ else if (ctx == DUAL_CTX)
+ ctx = MSM_RPM_CTX_SLEEP_SET;
+
+ rpm_req = msm_rpm_create_request(ctx, rsc_type, hw_info->hw_id, 1);
+ if (rpm_req == NULL) {
+ MSM_BUS_WARN("RPM: Couldn't create RPM Request\n");
+ return -ENXIO;
+ }
+
+ if (valid) {
+ ret = msm_rpm_add_kvp_data(rpm_req, key, (const uint8_t *)
+ &hw_info->bw, (int)(sizeof(uint64_t)));
+ if (ret) {
+ MSM_BUS_WARN("RPM: Add KVP failed for RPM Req:%u\n",
+ rsc_type);
+ return ret;
+ }
+
+ MSM_BUS_DBG("Added Key: %d, Val: %llu, size: %d\n", key,
+ hw_info->bw, sizeof(uint64_t));
+ } else {
+ /* Invalidate RPM requests */
+ ret = msm_rpm_add_kvp_data(rpm_req, 0, NULL, 0);
+ if (ret) {
+ MSM_BUS_WARN("RPM: Add KVP failed for RPM Req:%u\n",
+ rsc_type);
+ return ret;
+ }
+ }
+
+ msg_id = msm_rpm_send_request(rpm_req);
+ if (!msg_id) {
+ MSM_BUS_WARN("RPM: No message ID for req\n");
+ return -ENXIO;
+ }
+
+ ret = msm_rpm_wait_for_ack(msg_id);
+ if (ret) {
+ MSM_BUS_WARN("RPM: Ack failed\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int msm_bus_rpm_commit_arb(struct msm_bus_fabric_registration
+ *fab_pdata, int ctx, void *rpm_data,
+ struct commit_data *cd, bool valid)
+{
+ int i, status = 0, rsc_type, key;
+
+ MSM_BUS_DBG("Context: %d\n", ctx);
+ rsc_type = RPM_BUS_MASTER_REQ;
+ key = RPM_MASTER_FIELD_BW;
+ for (i = 0; i < fab_pdata->nmasters; i++) {
+ if (cd->mas_arb[i].dirty) {
+ MSM_BUS_DBG("MAS HWID: %d, BW: %llu DIRTY: %d\n",
+ cd->mas_arb[i].hw_id,
+ cd->mas_arb[i].bw,
+ cd->mas_arb[i].dirty);
+ status = msm_bus_rpm_req(ctx, rsc_type, key,
+ &cd->mas_arb[i], valid);
+ if (status) {
+ MSM_BUS_ERR("RPM: Req fail: mas:%d, bw:%llu\n",
+ cd->mas_arb[i].hw_id,
+ cd->mas_arb[i].bw);
+ break;
+ } else {
+ cd->mas_arb[i].dirty = false;
+ }
+ }
+ }
+
+ rsc_type = RPM_BUS_SLAVE_REQ;
+ key = RPM_SLAVE_FIELD_BW;
+ for (i = 0; i < fab_pdata->nslaves; i++) {
+ if (cd->slv_arb[i].dirty) {
+ MSM_BUS_DBG("SLV HWID: %d, BW: %llu DIRTY: %d\n",
+ cd->slv_arb[i].hw_id,
+ cd->slv_arb[i].bw,
+ cd->slv_arb[i].dirty);
+ status = msm_bus_rpm_req(ctx, rsc_type, key,
+ &cd->slv_arb[i], valid);
+ if (status) {
+ MSM_BUS_ERR("RPM: Req fail: slv:%d, bw:%llu\n",
+ cd->slv_arb[i].hw_id,
+ cd->slv_arb[i].bw);
+ break;
+ } else {
+ cd->slv_arb[i].dirty = false;
+ }
+ }
+ }
+
+ return status;
+}
+
+/**
+* msm_bus_remote_hw_commit() - Commit the arbitration data to RPM
+* @fabric: Fabric for which the data should be committed
+**/
+int msm_bus_remote_hw_commit(struct msm_bus_fabric_registration
+ *fab_pdata, void *hw_data, void **cdata)
+{
+
+ int ret;
+ bool valid;
+ struct commit_data *dual_cd, *act_cd;
+ void *rpm_data = hw_data;
+
+ MSM_BUS_DBG("\nReached RPM Commit\n");
+ dual_cd = (struct commit_data *)cdata[DUAL_CTX];
+ act_cd = (struct commit_data *)cdata[ACTIVE_CTX];
+
+ /*
+ * If the arb data for active set and sleep set is
+ * different, commit both sets.
+ * If the arb data for active set and sleep set is
+ * the same, invalidate the sleep set.
+ */
+ ret = msm_bus_rpm_compare_cdata(fab_pdata, act_cd, dual_cd);
+ if (!ret)
+ /* Invalidate sleep set.*/
+ valid = false;
+ else
+ valid = true;
+
+ ret = msm_bus_rpm_commit_arb(fab_pdata, DUAL_CTX, rpm_data,
+ dual_cd, valid);
+ if (ret)
+ MSM_BUS_ERR("Error comiting fabric:%d in %d ctx\n",
+ fab_pdata->id, DUAL_CTX);
+
+ valid = true;
+ ret = msm_bus_rpm_commit_arb(fab_pdata, ACTIVE_CTX, rpm_data, act_cd,
+ valid);
+ if (ret)
+ MSM_BUS_ERR("Error comiting fabric:%d in %d ctx\n",
+ fab_pdata->id, ACTIVE_CTX);
+
+ return ret;
+}
+
+int msm_bus_rpm_hw_init(struct msm_bus_fabric_registration *pdata,
+ struct msm_bus_hw_algorithm *hw_algo)
+{
+ if (!pdata->ahb)
+ pdata->rpm_enabled = 1;
+ return 0;
+}
diff --git a/arch/arm/mach-msm/msm_dsps.c b/arch/arm/mach-msm/msm_dsps.c
index 200d717..6dde576 100644
--- a/arch/arm/mach-msm/msm_dsps.c
+++ b/arch/arm/mach-msm/msm_dsps.c
@@ -722,6 +722,8 @@
.unlocked_ioctl = dsps_ioctl,
};
+static struct subsys_device *dsps_dev;
+
/**
* Fatal error handler
* Resets DSPS.
@@ -735,7 +737,7 @@
pr_err("%s: DSPS already resetting. Count %d\n", __func__,
atomic_read(&drv->crash_in_progress));
} else {
- subsystem_restart("dsps");
+ subsystem_restart_dev(dsps_dev);
}
}
@@ -765,7 +767,7 @@
* called by the restart notifier
*
*/
-static int dsps_shutdown(const struct subsys_data *subsys)
+static int dsps_shutdown(const struct subsys_desc *subsys)
{
pr_debug("%s\n", __func__);
disable_irq_nosync(drv->wdog_irq);
@@ -779,7 +781,7 @@
* called by the restart notifier
*
*/
-static int dsps_powerup(const struct subsys_data *subsys)
+static int dsps_powerup(const struct subsys_desc *subsys)
{
pr_debug("%s\n", __func__);
dsps_power_on_handler();
@@ -794,7 +796,7 @@
* called by the restart notifier
*
*/
-static void dsps_crash_shutdown(const struct subsys_data *subsys)
+static void dsps_crash_shutdown(const struct subsys_desc *subsys)
{
pr_debug("%s\n", __func__);
dsps_crash_shutdown_g = 1;
@@ -806,7 +808,7 @@
* called by the restart notifier
*
*/
-static int dsps_ramdump(int enable, const struct subsys_data *subsys)
+static int dsps_ramdump(int enable, const struct subsys_desc *subsys)
{
int ret = 0;
pr_debug("%s\n", __func__);
@@ -838,7 +840,7 @@
return ret;
}
-static struct subsys_data dsps_ssrops = {
+static struct subsys_desc dsps_ssrops = {
.name = "dsps",
.shutdown = dsps_shutdown,
.powerup = dsps_powerup,
@@ -919,9 +921,10 @@
goto smsm_register_err;
}
- ret = ssr_register_subsystem(&dsps_ssrops);
- if (ret) {
- pr_err("%s: ssr_register_subsystem fail %d\n", __func__,
+ dsps_dev = subsys_register(&dsps_ssrops);
+ if (IS_ERR(dsps_dev)) {
+ ret = PTR_ERR(dsps_dev);
+ pr_err("%s: subsys_register fail %d\n", __func__,
ret);
goto ssr_register_err;
}
@@ -953,6 +956,7 @@
{
pr_debug("%s.\n", __func__);
+ subsys_unregister(dsps_dev);
dsps_power_off_handler();
dsps_free_resources();
diff --git a/arch/arm/mach-msm/msm_memory_dump.c b/arch/arm/mach-msm/msm_memory_dump.c
new file mode 100644
index 0000000..4f48a0d
--- /dev/null
+++ b/arch/arm/mach-msm/msm_memory_dump.c
@@ -0,0 +1,78 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <asm/cacheflush.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/notifier.h>
+#include <linux/export.h>
+#include <mach/msm_iomap.h>
+#include <mach/msm_memory_dump.h>
+
+
+/*TODO: Needs to be set to correct value */
+#define DUMP_TABLE_OFFSET 0x20
+#define MSM_DUMP_TABLE_VERSION MK_TABLE(1, 0)
+
+static struct msm_memory_dump mem_dump_data;
+
+static int msm_memory_dump_panic(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ writel_relaxed(0, MSM_IMEM_BASE + DUMP_TABLE_OFFSET);
+ return 0;
+}
+
+static struct notifier_block msm_memory_dump_blk = {
+ .notifier_call = msm_memory_dump_panic,
+};
+
+int msm_dump_table_register(struct msm_client_dump *client_entry)
+{
+ struct msm_client_dump *entry;
+ struct msm_dump_table *table = mem_dump_data.dump_table_ptr;
+
+ if (!table || table->num_entries >= MAX_NUM_CLIENTS)
+ return -EINVAL;
+ entry = &table->client_entries[table->num_entries];
+ entry->id = client_entry->id;
+ entry->start_addr = client_entry->start_addr;
+ entry->end_addr = client_entry->end_addr;
+ table->num_entries++;
+ /* flush cache */
+ dmac_flush_range(table, table + sizeof(struct msm_dump_table));
+ return 0;
+}
+EXPORT_SYMBOL(msm_dump_table_register);
+
+static int __init init_memory_dump(void)
+{
+ struct msm_dump_table *table;
+
+ mem_dump_data.dump_table_ptr = kzalloc(sizeof(struct msm_dump_table),
+ GFP_KERNEL);
+ if (!mem_dump_data.dump_table_ptr) {
+ printk(KERN_ERR "unable to allocate memory for dump table\n");
+ return -ENOMEM;
+ }
+ table = mem_dump_data.dump_table_ptr;
+ table->version = MSM_DUMP_TABLE_VERSION;
+ mem_dump_data.dump_table_phys = virt_to_phys(table);
+ /* TODO: Need to write physical address of table to IMEM */
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &msm_memory_dump_blk);
+ printk(KERN_INFO "MSM Memory Dump table set up\n");
+ return 0;
+}
+
+early_initcall(init_memory_dump);
+
diff --git a/arch/arm/mach-msm/msm_rtb.c b/arch/arm/mach-msm/msm_rtb.c
index 9dbf9c1..a60c213 100644
--- a/arch/arm/mach-msm/msm_rtb.c
+++ b/arch/arm/mach-msm/msm_rtb.c
@@ -16,11 +16,13 @@
#include <linux/kernel.h>
#include <linux/memory_alloc.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/atomic.h>
+#include <linux/of.h>
#include <asm/io.h>
#include <asm-generic/sizes.h>
#include <mach/memory.h>
@@ -31,6 +33,8 @@
#define SENTINEL_BYTE_2 0xAA
#define SENTINEL_BYTE_3 0xFF
+#define RTB_COMPAT_STR "qcom,msm-rtb"
+
/* Write
* 1) 3 bytes sentinel
* 2) 1 bytes of log type
@@ -227,8 +231,22 @@
#if defined(CONFIG_MSM_RTB_SEPARATE_CPUS)
unsigned int cpu;
#endif
+ int ret;
- msm_rtb.size = d->size;
+ if (!pdev->dev.of_node) {
+ msm_rtb.size = d->size;
+ } else {
+ int size;
+
+ ret = of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,memory-reservation-size",
+ &size);
+
+ if (ret < 0)
+ return ret;
+
+ msm_rtb.size = size;
+ }
if (msm_rtb.size <= 0 || msm_rtb.size > SZ_1M)
return -EINVAL;
@@ -275,10 +293,17 @@
return 0;
}
+static struct of_device_id msm_match_table[] = {
+ {.compatible = RTB_COMPAT_STR},
+ {},
+};
+EXPORT_COMPAT(RTB_COMPAT_STR);
+
static struct platform_driver msm_rtb_driver = {
.driver = {
.name = "msm_rtb",
- .owner = THIS_MODULE
+ .owner = THIS_MODULE,
+ .of_match_table = msm_match_table
},
};
diff --git a/arch/arm/mach-msm/msm_smem_iface.c b/arch/arm/mach-msm/msm_smem_iface.c
new file mode 100644
index 0000000..b09fda5
--- /dev/null
+++ b/arch/arm/mach-msm/msm_smem_iface.c
@@ -0,0 +1,44 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm_smem_iface.h"
+
+/**
+ * mem_get_cpr_info() - Copy Core Power Reduction (CPR) driver specific
+ * data from Shared memory (SMEM).
+ * @cpr_info - Pointer to CPR data. Memory to be allocated and freed by
+ * calling function.
+ *
+ * Copy CPR specific data from SMEM to cpr_info.
+ */
+
+void msm_smem_get_cpr_info(struct cpr_info_type *cpr_info)
+{
+ struct boot_info_for_apps *boot_info;
+ struct cpr_info_type *temp_cpr_info;
+ uint32_t smem_boot_info_size;
+
+ boot_info = smem_get_entry(SMEM_BOOT_INFO_FOR_APPS,
+ &smem_boot_info_size);
+ BUG_ON(!boot_info);
+ if (smem_boot_info_size < sizeof(struct boot_info_for_apps)) {
+ pr_err("%s: Shared boot info data structure too small!\n",
+ __func__);
+ BUG();
+ } else {
+ pr_debug("%s: Shared boot info available.\n", __func__);
+ }
+ temp_cpr_info = (struct cpr_info_type *) &(boot_info->cpr_info);
+ cpr_info->ring_osc = temp_cpr_info->ring_osc;
+ cpr_info->turbo_quot = temp_cpr_info->turbo_quot;
+ cpr_info->pvs_fuse = temp_cpr_info->pvs_fuse;
+}
diff --git a/arch/arm/mach-msm/msm_smem_iface.h b/arch/arm/mach-msm/msm_smem_iface.h
new file mode 100644
index 0000000..2da0232
--- /dev/null
+++ b/arch/arm/mach-msm/msm_smem_iface.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_SMEM_IFACE_H
+#define __ARCH_ARM_MACH_MSM_SMEM_IFACE_H
+
+#include <mach/msm_smsm.h>
+#include "smd_private.h"
+
+#define MAX_KEY_EVENTS 10
+#define MAX_SEC_KEY_PAYLOAD 32
+
+struct boot_shared_ssd_status_info {
+ uint32_t update_status; /* To check if process is successful or not */
+ uint32_t bl_error_code; /* To indicate error code in bootloader */
+};
+
+struct boot_symmetric_key_info {
+ uint32_t key_len; /* Encrypted Symmetric Key Length */
+ uint32_t iv_len; /* Initialization Vector Length */
+ uint8_t key[MAX_SEC_KEY_PAYLOAD]; /* Encrypted Symmetric Key */
+ uint8_t iv[MAX_SEC_KEY_PAYLOAD]; /* Initialization Vector */
+};
+
+struct cpr_info_type {
+ uint8_t ring_osc; /* CPR FUSE [0]: TURBO RO SEL BIT */
+ uint8_t turbo_quot; /* CPRFUSE[1:7] : TURBO QUOT*/
+ uint8_t pvs_fuse; /* TURBO PVS FUSE */
+};
+
+struct boot_info_for_apps {
+ uint32_t apps_image_start_addr; /* apps image start address */
+ uint32_t boot_flags; /* bit mask of upto 32 flags */
+ struct boot_shared_ssd_status_info ssd_status_info; /* SSD status */
+ struct boot_symmetric_key_info key_info;
+ uint16_t boot_keys_pressed[MAX_KEY_EVENTS]; /* Log of key presses */
+ uint32_t timetick; /* Modem tick timer value before apps out of reset */
+ struct cpr_info_type cpr_info;
+ uint8_t PAD[25];
+};
+
+void msm_smem_get_cpr_info(struct cpr_info_type *cpr_info);
+
+#endif /* __ARCH_ARM_MACH_MSM_SMEM_IFACE_H */
diff --git a/arch/arm/mach-msm/ocmem.c b/arch/arm/mach-msm/ocmem.c
index 43c7fc8..753f6fb 100644
--- a/arch/arm/mach-msm/ocmem.c
+++ b/arch/arm/mach-msm/ocmem.c
@@ -10,7 +10,6 @@
* GNU General Public License for more details.
*/
-#include <mach/ocmem_priv.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -18,10 +17,12 @@
#include <linux/rbtree.h>
#include <linux/genalloc.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <mach/ocmem_priv.h>
/* This code is to temporarily work around the default state of OCMEM
regions in Virtio. These registers will be read from DT in a subsequent
@@ -29,8 +30,9 @@
*/
#define OCMEM_REGION_CTL_BASE 0xFDD0003C
-#define OCMEM_REGION_CTL_SIZE 0xC
+#define OCMEM_REGION_CTL_SIZE 0xFD0
#define REGION_ENABLE 0x00003333
+#define GRAPHICS_REGION_CTL (0x17F000)
struct ocmem_partition {
const char *name;
@@ -41,14 +43,6 @@
unsigned int p_tail;
};
-struct ocmem_plat_data {
- void __iomem *vbase;
- unsigned long size;
- unsigned long base;
- struct ocmem_partition *parts;
- unsigned nr_parts;
-};
-
struct ocmem_zone zones[OCMEM_CLIENT_MAX];
struct ocmem_zone *get_zone(unsigned id)
@@ -62,6 +56,7 @@
static struct ocmem_plat_data *ocmem_pdata;
#define CLIENT_NAME_MAX 10
+
/* Must be in sync with enum ocmem_client */
static const char *client_names[OCMEM_CLIENT_MAX] = {
"graphics",
@@ -71,7 +66,7 @@
"voice",
"lp_audio",
"sensors",
- "blast",
+ "other_os",
};
struct ocmem_quota_table {
@@ -92,7 +87,7 @@
{ "voice", OCMEM_VOICE, 0x0, 0x0, 0x0, 0 },
{ "hp_audio", OCMEM_HP_AUDIO, 0x0, 0x0, 0x0, 0},
{ "lp_audio", OCMEM_LP_AUDIO, 0x80000, 0xA0000, 0xA0000, 0},
- { "blast", OCMEM_BLAST, 0x120000, 0x20000, 0x20000, 0},
+ { "other_os", OCMEM_OTHER_OS, 0x120000, 0x20000, 0x20000, 0},
{ "sensors", OCMEM_SENSORS, 0x140000, 0x40000, 0x40000, 0},
};
@@ -106,6 +101,18 @@
return -EINVAL;
}
+int check_id(int id)
+{
+ return (id < OCMEM_CLIENT_MAX && id >= OCMEM_GRAPHICS);
+}
+
+const char *get_name(int id)
+{
+ if (!check_id(id))
+ return NULL;
+ return client_names[id];
+}
+
inline unsigned long phys_to_offset(unsigned long addr)
{
if (!ocmem_pdata)
@@ -183,8 +190,216 @@
return pdata;
}
+int __devinit of_ocmem_parse_regions(struct device *dev,
+ struct ocmem_partition **part)
+{
+ const char *name;
+ struct device_node *child = NULL;
+ int nr_parts = 0;
+ int i = 0;
+ int rc = 0;
+ int id = -1;
+
+ /*Compute total partitions */
+ for_each_child_of_node(dev->of_node, child)
+ nr_parts++;
+
+ if (nr_parts == 0)
+ return 0;
+
+ *part = devm_kzalloc(dev, nr_parts * sizeof(**part),
+ GFP_KERNEL);
+
+ if (!*part)
+ return -ENOMEM;
+
+ for_each_child_of_node(dev->of_node, child)
+ {
+ const u32 *addr;
+ u32 min;
+ u64 size;
+ u64 p_start;
+
+ addr = of_get_address(child, 0, &size, NULL);
+
+ if (!addr) {
+ dev_err(dev, "Invalid addr for partition %d, ignored\n",
+ i);
+ continue;
+ }
+
+ rc = of_property_read_u32(child, "qcom,ocmem-part-min", &min);
+
+ if (rc) {
+ dev_err(dev, "No min for partition %d, ignored\n", i);
+ continue;
+ }
+
+ rc = of_property_read_string(child, "qcom,ocmem-part-name",
+ &name);
+
+ if (rc) {
+ dev_err(dev, "No name for partition %d, ignored\n", i);
+ continue;
+ }
+
+ id = get_id(name);
+
+ if (id < 0) {
+ dev_err(dev, "Ignoring invalid partition %s\n", name);
+ continue;
+ }
+
+ p_start = of_translate_address(child, addr);
+
+ if (p_start == OF_BAD_ADDR) {
+ dev_err(dev, "Invalid offset for partition %d\n", i);
+ continue;
+ }
+
+ (*part)[i].p_start = p_start;
+ (*part)[i].p_size = size;
+ (*part)[i].id = id;
+ (*part)[i].name = name;
+ (*part)[i].p_min = min;
+ (*part)[i].p_tail = of_property_read_bool(child, "tail");
+ i++;
+ }
+
+ return i;
+}
+
static struct ocmem_plat_data *parse_dt_config(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = pdev->dev.of_node;
+ struct ocmem_plat_data *pdata = NULL;
+ struct ocmem_partition *parts = NULL;
+ struct resource *ocmem_irq;
+ struct resource *dm_irq;
+ struct resource *ocmem_mem;
+ struct resource *reg_base;
+ struct resource *br_base;
+ struct resource *dm_base;
+ struct resource *ocmem_mem_io;
+ unsigned nr_parts = 0;
+ unsigned nr_regions = 0;
+
+ pdata = devm_kzalloc(dev, sizeof(struct ocmem_plat_data),
+ GFP_KERNEL);
+
+ if (!pdata) {
+ dev_err(dev, "Unable to allocate memory for platform data\n");
+ return NULL;
+ }
+
+ ocmem_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "ocmem_physical");
+ if (!ocmem_mem) {
+ dev_err(dev, "No OCMEM memory resource\n");
+ return NULL;
+ }
+
+ ocmem_mem_io = request_mem_region(ocmem_mem->start,
+ resource_size(ocmem_mem), pdev->name);
+
+ if (!ocmem_mem_io) {
+ dev_err(dev, "Could not claim OCMEM memory\n");
+ return NULL;
+ }
+
+ pdata->base = ocmem_mem->start;
+ pdata->size = resource_size(ocmem_mem);
+ pdata->vbase = devm_ioremap_nocache(dev, ocmem_mem->start,
+ resource_size(ocmem_mem));
+ if (!pdata->vbase) {
+ dev_err(dev, "Could not ioremap ocmem memory\n");
+ return NULL;
+ }
+
+ reg_base = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "ocmem_ctrl_physical");
+ if (!reg_base) {
+ dev_err(dev, "No OCMEM register resource\n");
+ return NULL;
+ }
+
+ pdata->reg_base = devm_ioremap_nocache(dev, reg_base->start,
+ resource_size(reg_base));
+ if (!pdata->reg_base) {
+ dev_err(dev, "Could not ioremap register map\n");
+ return NULL;
+ }
+
+ br_base = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "br_ctrl_physical");
+ if (!br_base) {
+ dev_err(dev, "No OCMEM BR resource\n");
+ return NULL;
+ }
+
+ pdata->br_base = devm_ioremap_nocache(dev, br_base->start,
+ resource_size(br_base));
+ if (!pdata->br_base) {
+ dev_err(dev, "Could not ioremap BR resource\n");
+ return NULL;
+ }
+
+ dm_base = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "dm_ctrl_physical");
+ if (!dm_base) {
+ dev_err(dev, "No OCMEM DM resource\n");
+ return NULL;
+ }
+
+ pdata->dm_base = devm_ioremap_nocache(dev, dm_base->start,
+ resource_size(dm_base));
+ if (!pdata->dm_base) {
+ dev_err(dev, "Could not ioremap DM resource\n");
+ return NULL;
+ }
+
+ ocmem_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ "ocmem_irq");
+
+ if (!ocmem_irq) {
+ dev_err(dev, "No OCMEM IRQ resource\n");
+ return NULL;
+ }
+
+ dm_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ "dm_irq");
+
+ if (!dm_irq) {
+ dev_err(dev, "No DM IRQ resource\n");
+ return NULL;
+ }
+
+ if (of_property_read_u32(node, "qcom,ocmem-num-regions",
+ &nr_regions)) {
+ dev_err(dev, "No OCMEM memory regions specified\n");
+ }
+
+ if (nr_regions == 0) {
+ dev_err(dev, "No hardware memory regions found\n");
+ return NULL;
+ }
+
+ /* Figure out the number of partititons */
+ nr_parts = of_ocmem_parse_regions(dev, &parts);
+ if (nr_parts <= 0) {
+ dev_err(dev, "No valid OCMEM partitions found\n");
+ goto pdata_error;
+ } else
+ dev_dbg(dev, "Found %d ocmem partitions\n", nr_parts);
+
+ pdata->nr_parts = nr_parts;
+ pdata->parts = parts;
+ pdata->nr_regions = nr_regions;
+ pdata->ocmem_irq = ocmem_irq->start;
+ pdata->dm_irq = dm_irq->start;
+ return pdata;
+pdata_error:
return NULL;
}
@@ -225,7 +440,7 @@
return -EBUSY;
}
- start = pdata->base + part->p_start;
+ start = part->p_start;
ret = gen_pool_add(zone->z_pool, start,
part->p_size, -1);
@@ -254,7 +469,7 @@
zone->owner = part->id;
zone->active_regions = 0;
zone->max_regions = 0;
- INIT_LIST_HEAD(&zone->region_list);
+ INIT_LIST_HEAD(&zone->req_list);
zone->z_ops = z_ops;
if (part->p_tail) {
z_ops->allocate = allocate_tail;
@@ -273,7 +488,7 @@
zone->z_end, part->p_size/SZ_1K);
}
- dev_info(dev, "Total active zones = %d\n", active_zones);
+ dev_dbg(dev, "Total active zones = %d\n", active_zones);
return 0;
}
@@ -282,7 +497,7 @@
struct device *dev = &pdev->dev;
void *ocmem_region_vbase = NULL;
- if (!pdev->dev.of_node->child) {
+ if (!pdev->dev.of_node) {
dev_info(dev, "Missing Configuration in Device Tree\n");
ocmem_pdata = parse_static_config(pdev);
} else {
@@ -297,6 +512,8 @@
BUG_ON(!IS_ALIGNED(ocmem_pdata->size, PAGE_SIZE));
BUG_ON(!IS_ALIGNED(ocmem_pdata->base, PAGE_SIZE));
+ dev_info(dev, "OCMEM Virtual addr %p\n", ocmem_pdata->vbase);
+
platform_set_drvdata(pdev, ocmem_pdata);
if (ocmem_zone_init(pdev))
@@ -316,7 +533,14 @@
writel_relaxed(REGION_ENABLE, ocmem_region_vbase);
writel_relaxed(REGION_ENABLE, ocmem_region_vbase + 4);
writel_relaxed(REGION_ENABLE, ocmem_region_vbase + 8);
- dev_info(dev, "initialized successfully\n");
+ /* Enable the ocmem graphics mpU as a workaround in Virtio */
+ /* This will be programmed by TZ after TZ support is integrated */
+ writel_relaxed(GRAPHICS_REGION_CTL, ocmem_region_vbase + 0xFCC);
+
+ if (ocmem_rdm_init(pdev))
+ return -EBUSY;
+
+ dev_dbg(dev, "initialized successfully\n");
return 0;
}
@@ -326,7 +550,7 @@
}
static struct of_device_id msm_ocmem_dt_match[] = {
- { .compatible = "qcom,msm_ocmem",
+ { .compatible = "qcom,msm-ocmem",
},
{}
};
diff --git a/arch/arm/mach-msm/ocmem_api.c b/arch/arm/mach-msm/ocmem_api.c
index bed13de..bb32fca 100644
--- a/arch/arm/mach-msm/ocmem_api.c
+++ b/arch/arm/mach-msm/ocmem_api.c
@@ -13,10 +13,8 @@
#include <linux/slab.h>
#include <mach/ocmem_priv.h>
-static inline int check_id(int id)
-{
- return (id < OCMEM_CLIENT_MAX && id >= OCMEM_GRAPHICS);
-}
+static DEFINE_MUTEX(ocmem_eviction_lock);
+static DECLARE_BITMAP(evicted, OCMEM_CLIENT_MAX);
static struct ocmem_handle *generate_handle(void)
{
@@ -61,6 +59,24 @@
return 0;
}
+static int __ocmem_shrink(int id, struct ocmem_buf *buf, unsigned long len)
+{
+ int ret = 0;
+ struct ocmem_handle *handle = buffer_to_handle(buf);
+
+ if (!handle)
+ return -EINVAL;
+
+ mutex_lock(&handle->handle_mutex);
+ ret = process_shrink(id, handle, len);
+ mutex_unlock(&handle->handle_mutex);
+
+ if (ret)
+ return -EINVAL;
+
+ return 0;
+}
+
static struct ocmem_buf *__ocmem_allocate_range(int id, unsigned long min,
unsigned long max, unsigned long step, bool block, bool wait)
{
@@ -218,6 +234,15 @@
return __ocmem_free(client_id, buffer);
}
+int ocmem_shrink(int client_id, struct ocmem_buf *buffer, unsigned long len)
+{
+ if (!buffer)
+ return -EINVAL;
+ if (len >= buffer->len)
+ return -EINVAL;
+ return __ocmem_shrink(client_id, buffer, len);
+}
+
int pre_validate_chunk_list(struct ocmem_map_list *list)
{
int i = 0;
@@ -236,8 +261,12 @@
for (i = 0; i < list->num_chunks; i++) {
if (!chunks[i].ddr_paddr ||
- chunks[i].size < MIN_CHUNK_SIZE)
+ chunks[i].size < MIN_CHUNK_SIZE ||
+ !IS_ALIGNED(chunks[i].size, MIN_CHUNK_SIZE)) {
+ pr_err("Invalid ocmem chunk at index %d (p: %lx, size %lx)\n",
+ i, chunks[i].ddr_paddr, chunks[i].size);
return -EINVAL;
+ }
}
return 0;
}
@@ -265,7 +294,7 @@
return -EINVAL;
}
- if (!pre_validate_chunk_list(list))
+ if (pre_validate_chunk_list(list) != 0)
return -EINVAL;
handle = buffer_to_handle(buffer);
@@ -303,14 +332,10 @@
return -EINVAL;
}
- if (!pre_validate_chunk_list(list))
+ if (pre_validate_chunk_list(list) != 0)
return -EINVAL;
handle = buffer_to_handle(buffer);
-
- if (!handle)
- return -EINVAL;
-
mutex_lock(&handle->handle_mutex);
ret = process_xfer(client_id, handle, list, TO_DDR);
mutex_unlock(&handle->handle_mutex);
@@ -325,3 +350,52 @@
}
return process_quota(client_id);
}
+
+/* Synchronous eviction/restore calls */
+/* Only a single eviction or restoration is allowed */
+/* Evictions/Restorations cannot be concurrent with other maps */
+int ocmem_evict(int client_id)
+{
+ int ret = 0;
+
+ if (!check_id(client_id)) {
+ pr_err("ocmem: Invalid client id: %d\n", client_id);
+ return -EINVAL;
+ }
+
+ mutex_lock(&ocmem_eviction_lock);
+ if (test_bit(client_id, evicted)) {
+ pr_err("ocmem: Previous eviction was not restored by %d\n",
+ client_id);
+ mutex_unlock(&ocmem_eviction_lock);
+ return -EINVAL;
+ }
+
+ ret = process_evict(client_id);
+ if (ret == 0)
+ set_bit(client_id, evicted);
+
+ mutex_unlock(&ocmem_eviction_lock);
+ return ret;
+}
+
+int ocmem_restore(int client_id)
+{
+ int ret = 0;
+
+ if (!check_id(client_id)) {
+ pr_err("ocmem: Invalid client id: %d\n", client_id);
+ return -EINVAL;
+ }
+
+ mutex_lock(&ocmem_eviction_lock);
+ if (!test_bit(client_id, evicted)) {
+ pr_err("ocmem: No previous eviction by %d\n", client_id);
+ mutex_unlock(&ocmem_eviction_lock);
+ return -EINVAL;
+ }
+ ret = process_restore(client_id);
+ clear_bit(client_id, evicted);
+ mutex_unlock(&ocmem_eviction_lock);
+ return ret;
+}
diff --git a/arch/arm/mach-msm/ocmem_notifier.c b/arch/arm/mach-msm/ocmem_notifier.c
index 58ad3d9..9fbcd73 100644
--- a/arch/arm/mach-msm/ocmem_notifier.c
+++ b/arch/arm/mach-msm/ocmem_notifier.c
@@ -24,11 +24,6 @@
unsigned listeners;
} notifiers[OCMEM_CLIENT_MAX];
-static int check_id(int id)
-{
- return (id < OCMEM_CLIENT_MAX && id >= OCMEM_GRAPHICS);
-}
-
int check_notifier(int id)
{
int ret = 0;
@@ -75,7 +70,8 @@
return ret;
}
-void *ocmem_notifier_register(int client_id, struct notifier_block *nb)
+struct ocmem_notifier *ocmem_notifier_register(int client_id,
+ struct notifier_block *nb)
{
int ret = 0;
@@ -115,13 +111,12 @@
}
EXPORT_SYMBOL(ocmem_notifier_register);
-int ocmem_notifier_unregister(void *hndl, struct notifier_block *nb)
+int ocmem_notifier_unregister(struct ocmem_notifier *nc_hndl,
+ struct notifier_block *nb)
{
int ret = 0;
- struct ocmem_notifier *nc_hndl = (struct ocmem_notifier *) hndl;
-
if (!nc_hndl) {
pr_err("ocmem: Invalid notification handle\n");
return -EINVAL;
diff --git a/arch/arm/mach-msm/ocmem_rdm.c b/arch/arm/mach-msm/ocmem_rdm.c
new file mode 100644
index 0000000..6b93d04
--- /dev/null
+++ b/arch/arm/mach-msm/ocmem_rdm.c
@@ -0,0 +1,238 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/rbtree.h>
+#include <linux/genalloc.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <mach/ocmem_priv.h>
+
+#define RDM_MAX_ENTRIES 32
+#define RDM_MAX_CLIENTS 2
+
+/* Data Mover Parameters */
+#define DM_BLOCK_128 0x0
+#define DM_BLOCK_256 0x1
+#define DM_BR_ID_LPASS 0x0
+#define DM_BR_ID_GPS 0x1
+
+#define DM_INTR_CLR (0x8)
+#define DM_INTR_MASK (0xC)
+#define DM_GEN_STATUS (0x10)
+#define DM_STATUS (0x14)
+#define DM_CTRL (0x1000)
+#define DM_TBL_BASE (0x1010)
+#define DM_TBL_IDX(x) ((x) * 0x18)
+#define DM_TBL_n(x) (DM_TBL_BASE + (DM_TBL_IDX(x)))
+#define DM_TBL_n_offset(x) DM_TBL_n(x)
+#define DM_TBL_n_size(x) (DM_TBL_n(x)+0x4)
+#define DM_TBL_n_paddr(x) (DM_TBL_n(x)+0x8)
+#define DM_TBL_n_ctrl(x) (DM_TBL_n(x)+0x10)
+
+#define BR_CTRL (0x0)
+#define BR_CLIENT_BASE (0x4)
+#define BR_CLIENT_n_IDX(x) ((x) * 0x4)
+#define BR_CLIENT_n_ctrl(x) (BR_CLIENT_BASE + (BR_CLIENT_n_IDX(x)))
+#define BR_STATUS (0x14)
+/* 16 entries per client are supported */
+/* Use entries 0 - 15 for client0 */
+#define BR_CLIENT0_MASK (0x1000)
+/* Use entries 16- 31 for client1 */
+#define BR_CLIENT1_MASK (0x2010)
+
+#define BR_TBL_BASE (0x40)
+#define BR_TBL_IDX(x) ((x) * 0x18)
+#define BR_TBL_n(x) (BR_TBL_BASE + (BR_TBL_IDX(x)))
+#define BR_TBL_n_offset(x) BR_TBL_n(x)
+#define BR_TBL_n_size(x) (BR_TBL_n(x)+0x4)
+#define BR_TBL_n_paddr(x) (BR_TBL_n(x)+0x8)
+#define BR_TBL_n_ctrl(x) (BR_TBL_n(x)+0x10)
+
+/* Constants and Shifts */
+#define BR_TBL_ENTRY_ENABLE 0x1
+#define BR_TBL_START 0x0
+#define BR_TBL_END 0x8
+#define BR_RW_SHIFT 0x2
+
+#define DM_TBL_START 0x10
+#define DM_TBL_END 0x18
+#define DM_CLIENT_SHIFT 0x8
+#define DM_BR_ID_SHIFT 0x4
+#define DM_BR_BLK_SHIFT 0x1
+#define DM_DIR_SHIFT 0x0
+
+#define DM_DONE 0x1
+#define DM_INTR_ENABLE 0x0
+#define DM_INTR_DISABLE 0x1
+
+static void *br_base;
+static void *dm_base;
+
+static atomic_t dm_pending;
+static wait_queue_head_t dm_wq;
+/* Shadow tables for debug purposes */
+struct ocmem_br_table {
+ unsigned int offset;
+ unsigned int size;
+ unsigned int ddr_low;
+ unsigned int ddr_high;
+ unsigned int ctrl;
+} br_table[RDM_MAX_ENTRIES];
+
+/* DM Table replicates an entire BR table */
+/* Note: There are more than 1 BRs in the system */
+struct ocmem_dm_table {
+ unsigned int offset;
+ unsigned int size;
+ unsigned int ddr_low;
+ unsigned int ddr_high;
+ unsigned int ctrl;
+} dm_table[RDM_MAX_ENTRIES];
+
+/* Wrapper that will shadow these values later */
+static int ocmem_read(void *at)
+{
+ return readl_relaxed(at);
+}
+
+/* Wrapper that will shadow these values later */
+static int ocmem_write(unsigned long val, void *at)
+{
+ writel_relaxed(val, at);
+ return 0;
+}
+
+static inline int client_ctrl_id(int id)
+{
+ return (id == OCMEM_SENSORS) ? 1 : 0;
+}
+
+static inline int client_slot_start(int id)
+{
+
+ return client_ctrl_id(id) * 16;
+}
+
+static irqreturn_t ocmem_dm_irq_handler(int irq, void *dev_id)
+{
+ atomic_set(&dm_pending, 0);
+ ocmem_write(DM_INTR_DISABLE, dm_base + DM_INTR_CLR);
+ wake_up_interruptible(&dm_wq);
+ return IRQ_HANDLED;
+}
+
+/* Lock during transfers */
+int ocmem_rdm_transfer(int id, struct ocmem_map_list *clist,
+ unsigned long start, int direction)
+{
+ int num_chunks = clist->num_chunks;
+ int slot = client_slot_start(id);
+ int table_start = 0;
+ int table_end = 0;
+ int br_ctrl = 0;
+ int br_id = 0;
+ int dm_ctrl = 0;
+ int i = 0;
+ int j = 0;
+ int status = 0;
+
+ for (i = 0, j = slot; i < num_chunks; i++, j++) {
+
+ struct ocmem_chunk *chunk = &clist->chunks[i];
+ int sz = chunk->size;
+ int paddr = chunk->ddr_paddr;
+ int tbl_n_ctrl = 0;
+
+ tbl_n_ctrl |= BR_TBL_ENTRY_ENABLE;
+ if (chunk->ro)
+ tbl_n_ctrl |= (1 << BR_RW_SHIFT);
+
+ /* Table Entry n of BR and DM */
+ ocmem_write(start, br_base + BR_TBL_n_offset(j));
+ ocmem_write(sz, br_base + BR_TBL_n_size(j));
+ ocmem_write(paddr, br_base + BR_TBL_n_paddr(j));
+ ocmem_write(tbl_n_ctrl, br_base + BR_TBL_n_ctrl(j));
+
+ ocmem_write(start, dm_base + DM_TBL_n_offset(j));
+ ocmem_write(sz, dm_base + DM_TBL_n_size(j));
+ ocmem_write(paddr, dm_base + DM_TBL_n_paddr(j));
+ ocmem_write(tbl_n_ctrl, dm_base + DM_TBL_n_ctrl(j));
+
+ start += sz;
+ }
+
+ br_id = client_ctrl_id(id);
+ table_start = slot;
+ table_end = slot + num_chunks - 1;
+ br_ctrl |= (table_start << BR_TBL_START);
+ br_ctrl |= (table_end << BR_TBL_END);
+
+ ocmem_write(br_ctrl, (br_base + BR_CLIENT_n_ctrl(br_id)));
+ /* Enable BR */
+ ocmem_write(0x1, br_base + BR_CTRL);
+
+ /* Compute DM Control Value */
+ dm_ctrl |= (table_start << DM_TBL_START);
+ dm_ctrl |= (table_end << DM_TBL_END);
+
+ dm_ctrl |= (DM_BR_ID_LPASS << DM_BR_ID_SHIFT);
+ dm_ctrl |= (DM_BLOCK_256 << DM_BR_BLK_SHIFT);
+ dm_ctrl |= (direction << DM_DIR_SHIFT);
+
+ status = ocmem_read(dm_base + DM_STATUS);
+ pr_debug("Transfer status before %x\n", status);
+ atomic_set(&dm_pending, 1);
+ /* Trigger DM */
+ ocmem_write(dm_ctrl, dm_base + DM_CTRL);
+ pr_debug("ocmem: rdm: dm_ctrl %x br_ctrl %x\n", dm_ctrl, br_ctrl);
+
+ wait_event_interruptible(dm_wq,
+ atomic_read(&dm_pending) == 0);
+
+ return 0;
+}
+
+int ocmem_rdm_init(struct platform_device *pdev)
+{
+
+ struct ocmem_plat_data *pdata = NULL;
+ int rc = 0;
+
+ pdata = platform_get_drvdata(pdev);
+
+ br_base = pdata->br_base;
+ dm_base = pdata->dm_base;
+
+ rc = devm_request_irq(&pdev->dev, pdata->dm_irq, ocmem_dm_irq_handler,
+ IRQF_TRIGGER_RISING, "ocmem_dm_irq", pdata);
+
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to request dm irq");
+ return -EINVAL;
+ }
+
+ init_waitqueue_head(&dm_wq);
+ /* enable dm interrupts */
+ ocmem_write(DM_INTR_ENABLE, dm_base + DM_INTR_MASK);
+ return 0;
+}
diff --git a/arch/arm/mach-msm/ocmem_sched.c b/arch/arm/mach-msm/ocmem_sched.c
index 10a267c..f6d066d 100644
--- a/arch/arm/mach-msm/ocmem_sched.c
+++ b/arch/arm/mach-msm/ocmem_sched.c
@@ -54,7 +54,7 @@
MIN_PRIO = 0x0,
NO_PRIO = MIN_PRIO,
PRIO_SENSORS = 0x1,
- PRIO_BLAST = 0x1,
+ PRIO_OTHER_OS = 0x1,
PRIO_LP_AUDIO = 0x1,
PRIO_HP_AUDIO = 0x2,
PRIO_VOICE = 0x3,
@@ -75,6 +75,21 @@
*/
#define SCHED_DELAY 10
+static struct list_head rdm_queue;
+static struct mutex rdm_mutex;
+static struct workqueue_struct *ocmem_rdm_wq;
+static struct workqueue_struct *ocmem_eviction_wq;
+
+static struct ocmem_eviction_data *evictions[OCMEM_CLIENT_MAX];
+
+struct ocmem_rdm_work {
+ int id;
+ struct ocmem_map_list *list;
+ struct ocmem_handle *handle;
+ int direction;
+ struct work_struct work;
+};
+
/* OCMEM Operational modes */
enum ocmem_client_modes {
OCMEM_PERFORMANCE = 1,
@@ -107,7 +122,7 @@
{OCMEM_VOICE, PRIO_VOICE, OCMEM_PASSIVE, OCMEM_BLOCKED},
{OCMEM_LP_AUDIO, PRIO_LP_AUDIO, OCMEM_LOW_POWER, OCMEM_SYSNOC},
{OCMEM_SENSORS, PRIO_SENSORS, OCMEM_LOW_POWER, OCMEM_SYSNOC},
- {OCMEM_BLAST, PRIO_BLAST, OCMEM_LOW_POWER, OCMEM_SYSNOC},
+ {OCMEM_OTHER_OS, PRIO_OTHER_OS, OCMEM_LOW_POWER, OCMEM_SYSNOC},
};
static struct rb_root sched_tree;
@@ -119,6 +134,8 @@
struct rb_node region_rb;
/* Hash map of requests */
struct idr region_idr;
+ /* Chain in eviction list */
+ struct list_head eviction_list;
unsigned long r_start;
unsigned long r_end;
unsigned long r_sz;
@@ -244,6 +261,7 @@
if (!p)
return NULL;
idr_init(&p->region_idr);
+ INIT_LIST_HEAD(&p->eviction_list);
p->r_start = p->r_end = p->r_sz = 0x0;
p->max_prio = NO_PRIO;
return p;
@@ -461,10 +479,14 @@
{
int rc = 0;
+ down_write(&req->rw_sem);
+
mutex_lock(&sched_mutex);
rc = __sched_map(req);
mutex_unlock(&sched_mutex);
+ up_write(&req->rw_sem);
+
if (rc == OP_FAIL)
return -EINVAL;
@@ -475,10 +497,14 @@
{
int rc = 0;
+ down_write(&req->rw_sem);
+
mutex_lock(&sched_mutex);
rc = __sched_unmap(req);
mutex_unlock(&sched_mutex);
+ up_write(&req->rw_sem);
+
if (rc == OP_FAIL)
return -EINVAL;
@@ -713,6 +739,104 @@
}
/* Must be called with sched_mutex held */
+static int __sched_shrink(struct ocmem_req *req, unsigned long new_sz)
+{
+ int owner = req->owner;
+ int ret = 0;
+
+ struct ocmem_req *matched_req = NULL;
+ struct ocmem_region *matched_region = NULL;
+ struct ocmem_region *region = NULL;
+ unsigned long alloc_addr = 0x0;
+
+ struct ocmem_zone *zone = get_zone(owner);
+
+ BUG_ON(!zone);
+
+ /* The shrink should not be called for zero size */
+ BUG_ON(new_sz == 0);
+
+ matched_region = find_region_match(req->req_start, req->req_end);
+ matched_req = find_req_match(req->req_id, matched_region);
+
+ if (!matched_region || !matched_req)
+ goto invalid_op_error;
+ if (matched_req != req)
+ goto invalid_op_error;
+
+
+ ret = zone->z_ops->free(zone,
+ matched_req->req_start, matched_req->req_sz);
+
+ if (ret < 0) {
+ pr_err("Zone Allocation operation failed\n");
+ goto internal_error;
+ }
+
+ alloc_addr = zone->z_ops->allocate(zone, new_sz);
+
+ if (alloc_addr < 0) {
+ pr_err("Zone Allocation operation failed\n");
+ goto internal_error;
+ }
+
+ /* Detach the region from the interval tree */
+ /* This is to guarantee that the change in size
+ * causes the tree to be rebalanced if required */
+
+ detach_req(matched_region, req);
+ if (req_count(matched_region) == 0) {
+ remove_region(matched_region);
+ region = matched_region;
+ } else {
+ region = create_region();
+ if (!region) {
+ pr_err("ocmem: Unable to create region\n");
+ goto internal_error;
+ }
+ }
+ /* update the request */
+ req->req_start = alloc_addr;
+ req->req_sz = new_sz;
+ req->req_end = alloc_addr + req->req_sz;
+
+ if (req_count(region) == 0) {
+ remove_region(matched_region);
+ destroy_region(matched_region);
+ }
+
+ /* update request state */
+ SET_STATE(req, R_MUST_GROW);
+ SET_STATE(req, R_MUST_MAP);
+ req->op = SCHED_MAP;
+
+ /* attach the request to the region */
+ attach_req(region, req);
+ populate_region(region, req);
+ update_region_prio(region);
+
+ /* update the tree with new region */
+ if (insert_region(region)) {
+ pr_err("ocmem: Failed to insert the region\n");
+ zone->z_ops->free(zone, alloc_addr, new_sz);
+ detach_req(region, req);
+ update_region_prio(region);
+ /* req will be destroyed by the caller */
+ goto region_error;
+ }
+ return OP_COMPLETE;
+
+region_error:
+ destroy_region(region);
+internal_error:
+ pr_err("ocmem: shrink: Failed\n");
+ return OP_FAIL;
+invalid_op_error:
+ pr_err("ocmem: shrink: Failed to find matching region\n");
+ return OP_FAIL;
+}
+
+/* Must be called with sched_mutex held */
static int __sched_allocate(struct ocmem_req *req, bool can_block,
bool can_wait)
{
@@ -906,12 +1030,6 @@
return req;
}
-int process_xfer(int id, struct ocmem_handle *handle,
- struct ocmem_map_list *list, int direction)
-{
-
- return 0;
-}
unsigned long process_quota(int id)
{
@@ -989,6 +1107,35 @@
return 0;
}
+static int do_shrink(struct ocmem_req *req, unsigned long shrink_size)
+{
+
+ int rc = 0;
+ struct ocmem_buf *buffer = NULL;
+
+ down_write(&req->rw_sem);
+ buffer = req->buffer;
+
+ /* Take the scheduler mutex */
+ mutex_lock(&sched_mutex);
+ rc = __sched_shrink(req, shrink_size);
+ mutex_unlock(&sched_mutex);
+
+ if (rc == OP_FAIL)
+ goto err_op_fail;
+
+ else if (rc == OP_COMPLETE) {
+ buffer->addr = device_address(req->owner, req->req_start);
+ buffer->len = req->req_sz;
+ }
+
+ up_write(&req->rw_sem);
+ return 0;
+err_op_fail:
+ up_write(&req->rw_sem);
+ return -EINVAL;
+}
+
static void ocmem_sched_wk_func(struct work_struct *work);
DECLARE_DELAYED_WORK(ocmem_sched_thread, ocmem_sched_wk_func);
@@ -1076,6 +1223,250 @@
return 0;
}
+static void ocmem_rdm_worker(struct work_struct *work)
+{
+ int offset = 0;
+ int rc = 0;
+ int event;
+ struct ocmem_rdm_work *work_data = container_of(work,
+ struct ocmem_rdm_work, work);
+ int id = work_data->id;
+ struct ocmem_map_list *list = work_data->list;
+ int direction = work_data->direction;
+ struct ocmem_handle *handle = work_data->handle;
+ struct ocmem_req *req = handle_to_req(handle);
+ struct ocmem_buf *buffer = handle_to_buffer(handle);
+
+ down_write(&req->rw_sem);
+ offset = phys_to_offset(req->req_start);
+ rc = ocmem_rdm_transfer(id, list, offset, direction);
+ if (work_data->direction == TO_OCMEM)
+ event = (rc == 0) ? OCMEM_MAP_DONE : OCMEM_MAP_FAIL;
+ else
+ event = (rc == 0) ? OCMEM_UNMAP_DONE : OCMEM_UNMAP_FAIL;
+
+ up_write(&req->rw_sem);
+ kfree(work_data);
+ dispatch_notification(id, event, buffer);
+}
+
+int queue_transfer(struct ocmem_req *req, struct ocmem_handle *handle,
+ struct ocmem_map_list *list, int direction)
+{
+ struct ocmem_rdm_work *work_data = NULL;
+
+ down_write(&req->rw_sem);
+
+ work_data = kzalloc(sizeof(struct ocmem_rdm_work), GFP_ATOMIC);
+ if (!work_data)
+ BUG();
+
+ work_data->handle = handle;
+ work_data->list = list;
+ work_data->id = req->owner;
+ work_data->direction = direction;
+ INIT_WORK(&work_data->work, ocmem_rdm_worker);
+ up_write(&req->rw_sem);
+ queue_work(ocmem_rdm_wq, &work_data->work);
+ return 0;
+}
+
+int process_xfer_out(int id, struct ocmem_handle *handle,
+ struct ocmem_map_list *list)
+{
+ struct ocmem_req *req = NULL;
+ int rc = 0;
+
+ req = handle_to_req(handle);
+
+ if (!req)
+ return -EINVAL;
+
+ if (!is_mapped(req)) {
+ pr_err("Buffer is not already mapped\n");
+ goto transfer_out_error;
+ }
+
+ rc = process_unmap(req, req->req_start, req->req_end);
+ if (rc < 0) {
+ pr_err("Unmapping the buffer failed\n");
+ goto transfer_out_error;
+ }
+
+ rc = queue_transfer(req, handle, list, TO_DDR);
+
+ if (rc < 0) {
+ pr_err("Failed to queue rdm transfer to DDR\n");
+ goto transfer_out_error;
+ }
+
+ return 0;
+
+transfer_out_error:
+ return -EINVAL;
+}
+
+int process_xfer_in(int id, struct ocmem_handle *handle,
+ struct ocmem_map_list *list)
+{
+ struct ocmem_req *req = NULL;
+ int rc = 0;
+
+ req = handle_to_req(handle);
+
+ if (!req)
+ return -EINVAL;
+
+ if (is_mapped(req)) {
+ pr_err("Buffer is already mapped\n");
+ goto transfer_in_error;
+ }
+
+ rc = process_map(req, req->req_start, req->req_end);
+ if (rc < 0) {
+ pr_err("Mapping the buffer failed\n");
+ goto transfer_in_error;
+ }
+
+ rc = queue_transfer(req, handle, list, TO_OCMEM);
+
+ if (rc < 0) {
+ pr_err("Failed to queue rdm transfer to OCMEM\n");
+ goto transfer_in_error;
+ }
+
+ return 0;
+transfer_in_error:
+ return -EINVAL;
+}
+
+int process_shrink(int id, struct ocmem_handle *handle, unsigned long size)
+{
+ struct ocmem_req *req = NULL;
+ struct ocmem_buf *buffer = NULL;
+ struct ocmem_eviction_data *edata = NULL;
+ int rc = 0;
+
+ if (is_blocked(id)) {
+ pr_err("Client %d cannot request free\n", id);
+ return -EINVAL;
+ }
+
+ req = handle_to_req(handle);
+ buffer = handle_to_buffer(handle);
+
+ if (!req)
+ return -EINVAL;
+
+ if (req->req_start != core_address(id, buffer->addr)) {
+ pr_err("Invalid buffer handle passed for shrink\n");
+ return -EINVAL;
+ }
+
+ edata = req->edata;
+
+ if (is_tcm(req->owner))
+ do_unmap(req);
+
+ if (size == 0) {
+ pr_info("req %p being shrunk to zero\n", req);
+ rc = do_free(req);
+ if (rc < 0)
+ return -EINVAL;
+ } else {
+ rc = do_shrink(req, size);
+ if (rc < 0)
+ return -EINVAL;
+ }
+
+ edata->pending--;
+ if (edata->pending == 0) {
+ pr_debug("All regions evicted");
+ complete(&edata->completion);
+ }
+
+ return 0;
+}
+
+int process_xfer(int id, struct ocmem_handle *handle,
+ struct ocmem_map_list *list, int direction)
+{
+ int rc = 0;
+
+ if (is_tcm(id)) {
+ WARN(1, "Mapping operation is invalid for client\n");
+ return -EINVAL;
+ }
+
+ if (direction == TO_DDR)
+ rc = process_xfer_out(id, handle, list);
+ else if (direction == TO_OCMEM)
+ rc = process_xfer_in(id, handle, list);
+ return rc;
+}
+
+int ocmem_eviction_thread(struct work_struct *work)
+{
+ return 0;
+}
+
+int process_evict(int id)
+{
+ struct ocmem_eviction_data *edata = NULL;
+ int prio = ocmem_client_table[id].priority;
+ struct rb_node *rb_node = NULL;
+ struct ocmem_req *req = NULL;
+ struct ocmem_buf buffer;
+ int j = 0;
+
+ edata = kzalloc(sizeof(struct ocmem_eviction_data), GFP_ATOMIC);
+
+ INIT_LIST_HEAD(&edata->victim_list);
+ INIT_LIST_HEAD(&edata->req_list);
+ edata->prio = prio;
+ edata->pending = 0;
+ edata->passive = 1;
+ evictions[id] = edata;
+
+ mutex_lock(&sched_mutex);
+
+ for (rb_node = rb_first(&sched_tree); rb_node;
+ rb_node = rb_next(rb_node)) {
+ struct ocmem_region *tmp_region = NULL;
+ tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
+ if (tmp_region->max_prio < prio) {
+ for (j = id - 1; j > NO_PRIO; j--) {
+ req = find_req_match(j, tmp_region);
+ if (req) {
+ pr_info("adding %p to eviction list\n",
+ tmp_region);
+ list_add_tail(
+ &tmp_region->eviction_list,
+ &edata->victim_list);
+ list_add_tail(
+ &req->eviction_list,
+ &edata->req_list);
+ edata->pending++;
+ req->edata = edata;
+ buffer.addr = req->req_start;
+ buffer.len = 0x0;
+ dispatch_notification(req->owner,
+ OCMEM_ALLOC_SHRINK, &buffer);
+ }
+ }
+ } else {
+ pr_info("skipping %p from eviction\n", tmp_region);
+ }
+ }
+ mutex_unlock(&sched_mutex);
+ pr_debug("Waiting for all regions to be shrunk\n");
+ if (edata->pending > 0) {
+ init_completion(&edata->completion);
+ wait_for_completion(&edata->completion);
+ }
+ return 0;
+}
+
static int do_allocate(struct ocmem_req *req, bool can_block, bool can_wait)
{
int rc = 0;
@@ -1113,6 +1504,31 @@
return -EINVAL;
}
+int process_restore(int id)
+{
+ struct ocmem_req *req = NULL;
+ struct ocmem_req *next = NULL;
+ struct ocmem_eviction_data *edata = evictions[id];
+
+ if (!edata)
+ return 0;
+
+ list_for_each_entry_safe(req, next, &edata->req_list, eviction_list)
+ {
+ if (req) {
+ pr_debug("ocmem: Fetched evicted request %p\n",
+ req);
+ list_del(&req->sched_list);
+ req->op = SCHED_ALLOCATE;
+ sched_enqueue(req);
+ }
+ }
+ kfree(edata);
+ evictions[id] = NULL;
+ pr_debug("Restore all evicted regions\n");
+ ocmem_schedule_pending();
+ return 0;
+}
int process_allocate(int id, struct ocmem_handle *handle,
unsigned long min, unsigned long max,
@@ -1185,6 +1601,7 @@
rc = do_allocate(req, true, false);
+
if (rc < 0)
goto do_allocate_error;
@@ -1251,5 +1668,13 @@
for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++)
INIT_LIST_HEAD(&sched_queue[i]);
+ mutex_init(&rdm_mutex);
+ INIT_LIST_HEAD(&rdm_queue);
+ ocmem_rdm_wq = alloc_workqueue("ocmem_rdm_wq", 0, 0);
+ if (!ocmem_rdm_wq)
+ return -ENOMEM;
+ ocmem_eviction_wq = alloc_workqueue("ocmem_eviction_wq", 0, 0);
+ if (!ocmem_eviction_wq)
+ return -ENOMEM;
return 0;
}
diff --git a/arch/arm/mach-msm/pcie.c b/arch/arm/mach-msm/pcie.c
index f105356..709c8e8 100644
--- a/arch/arm/mach-msm/pcie.c
+++ b/arch/arm/mach-msm/pcie.c
@@ -200,6 +200,16 @@
static int msm_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
int where, int size, u32 val)
{
+ /*
+ *Attempt to reset secondary bus is causing PCIE core to reset.
+ *Disable secondary bus reset functionality.
+ */
+ if ((bus->number == 0) && (where == PCI_BRIDGE_CONTROL) &&
+ (val & PCI_BRIDGE_CTL_BUS_RESET)) {
+ pr_info("PCIE secondary bus reset not supported\n");
+ val &= ~PCI_BRIDGE_CTL_BUS_RESET;
+ }
+
return msm_pcie_oper_conf(bus, devfn, WR, where, size, &val);
}
@@ -609,6 +619,7 @@
msm_pcie_dev.pdev = pdev;
pdata = pdev->dev.platform_data;
msm_pcie_dev.gpio = pdata->gpio;
+ msm_pcie_dev.wake_n = pdata->wake_n;
msm_pcie_dev.vreg = msm_pcie_vreg_info;
msm_pcie_dev.clk = msm_pcie_clk_info;
msm_pcie_dev.res = msm_pcie_res_info;
@@ -696,6 +707,26 @@
DECLARE_PCI_FIXUP_EARLY(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
msm_pcie_fixup_early);
+/* enable wake_n interrupt during suspend */
+static void msm_pcie_fixup_suspend(struct pci_dev *dev)
+{
+ PCIE_DBG("enabling wake_n\n");
+ if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT)
+ enable_irq(msm_pcie_dev.wake_n);
+}
+DECLARE_PCI_FIXUP_SUSPEND(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
+ msm_pcie_fixup_suspend);
+
+/* disable wake_n interrupt when system is not in suspend */
+static void msm_pcie_fixup_resume(struct pci_dev *dev)
+{
+ PCIE_DBG("disabling wake_n\n");
+ if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT)
+ disable_irq(msm_pcie_dev.wake_n);
+}
+DECLARE_PCI_FIXUP_RESUME(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
+ msm_pcie_fixup_resume);
+
/*
* actual physical (BAR) address of the device resources starts from
* MSM_PCIE_DEV_BAR_ADDR; the system axi address for the device resources starts
diff --git a/arch/arm/mach-msm/pcie.h b/arch/arm/mach-msm/pcie.h
index fba6b11..d7cce3e 100644
--- a/arch/arm/mach-msm/pcie.h
+++ b/arch/arm/mach-msm/pcie.h
@@ -68,6 +68,8 @@
uint32_t axi_bar_end;
struct resource dev_mem_res;
+
+ uint32_t wake_n;
};
extern uint32_t msm_pcie_irq_init(struct msm_pcie_dev_t *dev);
diff --git a/arch/arm/mach-msm/pcie_irq.c b/arch/arm/mach-msm/pcie_irq.c
index d915561..5a44a17 100644
--- a/arch/arm/mach-msm/pcie_irq.c
+++ b/arch/arm/mach-msm/pcie_irq.c
@@ -39,7 +39,13 @@
static DECLARE_BITMAP(msi_irq_in_use, NR_PCIE_MSI_IRQS);
-irqreturn_t handle_msi_irq(int irq, void *data)
+static irqreturn_t handle_wake_irq(int irq, void *data)
+{
+ PCIE_DBG("\n");
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t handle_msi_irq(int irq, void *data)
{
int i, j;
unsigned long val;
@@ -87,15 +93,32 @@
/* register handler for physical MSI interrupt line */
rc = request_irq(PCIE20_INT_MSI, handle_msi_irq, IRQF_TRIGGER_RISING,
"msm_pcie_msi", dev);
- if (rc)
+ if (rc) {
pr_err("Unable to allocate msi interrupt\n");
+ goto out;
+ }
+ /* register handler for PCIE_WAKE_N interrupt line */
+ rc = request_irq(dev->wake_n, handle_wake_irq, IRQF_TRIGGER_FALLING,
+ "msm_pcie_wake", dev);
+ if (rc) {
+ pr_err("Unable to allocate wake interrupt\n");
+ free_irq(PCIE20_INT_MSI, dev);
+ goto out;
+ }
+
+ enable_irq_wake(dev->wake_n);
+
+ /* PCIE_WAKE_N should be enabled only during system suspend */
+ disable_irq(dev->wake_n);
+out:
return rc;
}
void __exit msm_pcie_irq_deinit(struct msm_pcie_dev_t *dev)
{
free_irq(PCIE20_INT_MSI, dev);
+ free_irq(dev->wake_n, dev);
}
void msm_pcie_destroy_irq(unsigned int irq)
diff --git a/arch/arm/mach-msm/perf_event_msm_krait_l2.c b/arch/arm/mach-msm/perf_event_msm_krait_l2.c
index 3635572..103eef0 100644
--- a/arch/arm/mach-msm/perf_event_msm_krait_l2.c
+++ b/arch/arm/mach-msm/perf_event_msm_krait_l2.c
@@ -532,7 +532,7 @@
{
krait_l2_pmu.plat_device = pdev;
- if (!armpmu_register(&krait_l2_pmu, "kraitl2", -1))
+ if (!armpmu_register(&krait_l2_pmu, "msm-l2", -1))
pmu_type = krait_l2_pmu.pmu.type;
return 0;
diff --git a/arch/arm/mach-msm/perf_event_msm_l2.c b/arch/arm/mach-msm/perf_event_msm_l2.c
index aae2552..2ad36df 100644
--- a/arch/arm/mach-msm/perf_event_msm_l2.c
+++ b/arch/arm/mach-msm/perf_event_msm_l2.c
@@ -877,7 +877,7 @@
{
scorpion_l2_pmu.plat_device = pdev;
- if (!armpmu_register(&scorpion_l2_pmu, "scorpionl2", -1))
+ if (!armpmu_register(&scorpion_l2_pmu, "msm-l2", -1))
pmu_type = scorpion_l2_pmu.pmu.type;
return 0;
diff --git a/arch/arm/mach-msm/peripheral-loader.c b/arch/arm/mach-msm/peripheral-loader.c
index 3f6eb95..540ffbb 100644
--- a/arch/arm/mach-msm/peripheral-loader.c
+++ b/arch/arm/mach-msm/peripheral-loader.c
@@ -565,18 +565,6 @@
static void msm_pil_debugfs_remove(struct pil_device *pil) { }
#endif
-static int __msm_pil_shutdown(struct device *dev, void *data)
-{
- pil_shutdown(to_pil_device(dev));
- return 0;
-}
-
-static int msm_pil_shutdown_at_boot(void)
-{
- return bus_for_each_dev(&pil_bus_type, NULL, NULL, __msm_pil_shutdown);
-}
-late_initcall(msm_pil_shutdown_at_boot);
-
static void pil_device_release(struct device *dev)
{
struct pil_device *pil = to_pil_device(dev);
diff --git a/arch/arm/mach-msm/pil-pronto.c b/arch/arm/mach-msm/pil-pronto.c
index 58d5176..8897cb5 100644
--- a/arch/arm/mach-msm/pil-pronto.c
+++ b/arch/arm/mach-msm/pil-pronto.c
@@ -117,11 +117,12 @@
void __iomem *base = drv->base;
unsigned long start_addr = drv->start_addr;
- /* Deassert reset to Pronto */
+ /* Deassert reset to subsystem and wait for propagation */
reg = readl_relaxed(drv->reset_base);
reg &= ~CLK_CTL_WCNSS_RESTART_BIT;
writel_relaxed(reg, drv->reset_base);
mb();
+ udelay(2);
/* Configure boot address */
writel_relaxed(start_addr >> 16, base +
diff --git a/arch/arm/mach-msm/pil-q6v4.c b/arch/arm/mach-msm/pil-q6v4.c
index 131a74b..32cce1d 100644
--- a/arch/arm/mach-msm/pil-q6v4.c
+++ b/arch/arm/mach-msm/pil-q6v4.c
@@ -116,7 +116,7 @@
int err;
struct q6v4_data *drv = dev_get_drvdata(dev);
- err = regulator_set_voltage(drv->vreg, 375000, 375000);
+ err = regulator_set_voltage(drv->vreg, 743750, 743750);
if (err) {
dev_err(dev, "Failed to set regulator's voltage step.\n");
return err;
diff --git a/arch/arm/mach-msm/pil-q6v5-mss.c b/arch/arm/mach-msm/pil-q6v5-mss.c
index e279f99..ff0e792d 100644
--- a/arch/arm/mach-msm/pil-q6v5-mss.c
+++ b/arch/arm/mach-msm/pil-q6v5-mss.c
@@ -139,8 +139,10 @@
struct q6v5_data *drv = dev_get_drvdata(pil->dev);
int ret;
+ /* Deassert reset to subsystem and wait for propagation */
writel_relaxed(0, drv->restart_reg);
mb();
+ udelay(2);
/*
* Bring subsystem out of reset and enable required
@@ -155,11 +157,14 @@
goto err_clks;
/* Program Image Address */
- if (drv->self_auth)
+ if (drv->self_auth) {
writel_relaxed(drv->start_addr, drv->rmb_base + RMB_MBA_IMAGE);
- else
+ /* Ensure write to RMB base occurs before reset is released. */
+ mb();
+ } else {
writel_relaxed((drv->start_addr >> 4) & 0x0FFFFFF0,
drv->reg_base + QDSP6SS_RST_EVB);
+ }
ret = pil_q6v5_reset(pil);
if (ret)
diff --git a/arch/arm/mach-msm/pil-riva.c b/arch/arm/mach-msm/pil-riva.c
index 8a16b43..2d1fa80 100644
--- a/arch/arm/mach-msm/pil-riva.c
+++ b/arch/arm/mach-msm/pil-riva.c
@@ -263,17 +263,17 @@
static int pil_riva_init_image_trusted(struct pil_desc *pil,
const u8 *metadata, size_t size)
{
- return pas_init_image(PAS_RIVA, metadata, size);
+ return pas_init_image(PAS_WCNSS, metadata, size);
}
static int pil_riva_reset_trusted(struct pil_desc *pil)
{
- return pas_auth_and_reset(PAS_RIVA);
+ return pas_auth_and_reset(PAS_WCNSS);
}
static int pil_riva_shutdown_trusted(struct pil_desc *pil)
{
- return pas_shutdown(PAS_RIVA);
+ return pas_shutdown(PAS_WCNSS);
}
static struct pil_reset_ops pil_riva_ops_trusted = {
@@ -334,7 +334,7 @@
desc->owner = THIS_MODULE;
desc->proxy_timeout = 10000;
- if (pas_supported(PAS_RIVA) > 0) {
+ if (pas_supported(PAS_WCNSS) > 0) {
desc->ops = &pil_riva_ops_trusted;
dev_info(&pdev->dev, "using secure boot\n");
} else {
diff --git a/arch/arm/mach-msm/platsmp.c b/arch/arm/mach-msm/platsmp.c
index 5e063a1..a11ca95 100644
--- a/arch/arm/mach-msm/platsmp.c
+++ b/arch/arm/mach-msm/platsmp.c
@@ -83,14 +83,6 @@
if (!base_ptr)
return -ENODEV;
- if (machine_is_msm8960_sim() || machine_is_msm8960_rumi3()) {
- writel_relaxed(0x10, base_ptr+0x04);
- writel_relaxed(0x80, base_ptr+0x04);
- }
-
- if (machine_is_apq8064_sim())
- writel_relaxed(0xf0000, base_ptr+0x04);
-
if (machine_is_msm8974_sim()) {
writel_relaxed(0x800, base_ptr+0x04);
writel_relaxed(0x3FFF, base_ptr+0x14);
@@ -135,10 +127,6 @@
if (cpu_is_msm8x60())
return scorpion_release_secondary();
- if (machine_is_msm8960_sim() || machine_is_msm8960_rumi3() ||
- machine_is_apq8064_sim())
- return krait_release_secondary_sim(0x02088000, cpu);
-
if (machine_is_msm8974_sim())
return krait_release_secondary_sim(0xf9088000, cpu);
diff --git a/arch/arm/mach-msm/pm-8x60.c b/arch/arm/mach-msm/pm-8x60.c
index 42616c6..e203667 100644
--- a/arch/arm/mach-msm/pm-8x60.c
+++ b/arch/arm/mach-msm/pm-8x60.c
@@ -24,7 +24,6 @@
#include <linux/smp.h>
#include <linux/suspend.h>
#include <linux/tick.h>
-#include <linux/delay.h>
#include <mach/msm_iomap.h>
#include <mach/socinfo.h>
#include <mach/system.h>
@@ -799,28 +798,6 @@
return 0;
}
-static struct msm_pm_sleep_status_data *msm_pm_slp_sts;
-
-static DEFINE_PER_CPU_SHARED_ALIGNED(enum msm_pm_sleep_mode,
- msm_pm_last_slp_mode);
-
-bool msm_pm_verify_cpu_pc(unsigned int cpu)
-{
- enum msm_pm_sleep_mode mode = per_cpu(msm_pm_last_slp_mode, cpu);
-
- if (msm_pm_slp_sts) {
- int acc_sts = __raw_readl(msm_pm_slp_sts->base_addr
- + cpu * msm_pm_slp_sts->cpu_offset);
-
- if ((acc_sts & msm_pm_slp_sts->mask) &&
- ((mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE) ||
- (mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)))
- return true;
- }
-
- return false;
-}
-
void msm_pm_cpu_enter_lowpower(unsigned int cpu)
{
int i;
@@ -836,54 +813,14 @@
if (MSM_PM_DEBUG_HOTPLUG & msm_pm_debug_mask)
pr_notice("CPU%u: %s: shutting down cpu\n", cpu, __func__);
- if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE]) {
- per_cpu(msm_pm_last_slp_mode, cpu)
- = MSM_PM_SLEEP_MODE_POWER_COLLAPSE;
+ if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE])
msm_pm_power_collapse(false);
- } else if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE]) {
- per_cpu(msm_pm_last_slp_mode, cpu)
- = MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE;
+ else if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE])
msm_pm_power_collapse_standalone(false);
- } else if (allow[MSM_PM_SLEEP_MODE_RETENTION]) {
- per_cpu(msm_pm_last_slp_mode, cpu)
- = MSM_PM_SLEEP_MODE_RETENTION;
+ else if (allow[MSM_PM_SLEEP_MODE_RETENTION])
msm_pm_retention();
- } else if (allow[MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT]) {
- per_cpu(msm_pm_last_slp_mode, cpu)
- = MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT;
+ else if (allow[MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT])
msm_pm_swfi();
- } else
- per_cpu(msm_pm_last_slp_mode, cpu) = MSM_PM_SLEEP_MODE_NR;
-}
-
-int msm_pm_wait_cpu_shutdown(unsigned int cpu)
-{
-
- int timeout = 10;
-
- if (!msm_pm_slp_sts)
- return 0;
-
- while (timeout--) {
-
- /*
- * Check for the SPM of the core being hotplugged to set
- * its sleep state.The SPM sleep state indicates that the
- * core has been power collapsed.
- */
-
- int acc_sts = __raw_readl(msm_pm_slp_sts->base_addr
- + cpu * msm_pm_slp_sts->cpu_offset);
- mb();
-
- if (acc_sts & msm_pm_slp_sts->mask)
- return 0;
-
- usleep(100);
- }
- pr_warn("%s(): Timed out waiting for CPU %u SPM to enter sleep state",
- __func__, cpu);
- return -EBUSY;
}
static int msm_pm_enter(suspend_state_t state)
@@ -980,12 +917,6 @@
/******************************************************************************
* Initialization routine
*****************************************************************************/
-void __init msm_pm_init_sleep_status_data(
- struct msm_pm_sleep_status_data *data)
-{
- msm_pm_slp_sts = data;
-}
-
void msm_pm_set_sleep_ops(struct msm_pm_sleep_ops *ops)
{
if (ops)
diff --git a/arch/arm/mach-msm/pm.h b/arch/arm/mach-msm/pm.h
index 70d54da..c722ff6 100644
--- a/arch/arm/mach-msm/pm.h
+++ b/arch/arm/mach-msm/pm.h
@@ -70,12 +70,6 @@
extern struct msm_pm_platform_data msm_pm_sleep_modes[];
-struct msm_pm_sleep_status_data {
- void *base_addr;
- uint32_t cpu_offset;
- uint32_t mask;
-};
-
struct msm_pm_sleep_ops {
void *(*lowest_limits)(bool from_idle,
enum msm_pm_sleep_mode sleep_mode, uint32_t latency_us,
@@ -93,19 +87,11 @@
int msm_pm_idle_enter(enum msm_pm_sleep_mode sleep_mode);
void msm_pm_cpu_enter_lowpower(unsigned int cpu);
-void __init msm_pm_init_sleep_status_data(
- struct msm_pm_sleep_status_data *sleep_data);
-
-
#ifdef CONFIG_MSM_PM8X60
void msm_pm_set_rpm_wakeup_irq(unsigned int irq);
-int msm_pm_wait_cpu_shutdown(unsigned int cpu);
-bool msm_pm_verify_cpu_pc(unsigned int cpu);
void msm_pm_set_sleep_ops(struct msm_pm_sleep_ops *ops);
#else
static inline void msm_pm_set_rpm_wakeup_irq(unsigned int irq) {}
-static inline int msm_pm_wait_cpu_shutdown(unsigned int cpu) { return 0; }
-static inline bool msm_pm_verify_cpu_pc(unsigned int cpu) { return true; }
static inline void msm_pm_set_sleep_ops(struct msm_pm_sleep_ops *ops) {}
#endif
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/arm/mach-msm/pm2.c b/arch/arm/mach-msm/pm2.c
index 7a8e4c3..8fccda4 100644
--- a/arch/arm/mach-msm/pm2.c
+++ b/arch/arm/mach-msm/pm2.c
@@ -21,10 +21,8 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/init.h>
-#include <linux/pm.h>
#include <linux/pm_qos.h>
#include <linux/suspend.h>
-#include <linux/reboot.h>
#include <linux/io.h>
#include <linux/tick.h>
#include <linux/memory.h>
@@ -34,7 +32,6 @@
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#endif
-#include <asm/system_misc.h>
#ifdef CONFIG_CACHE_L2X0
#include <asm/hardware/cache-l2x0.h>
#endif
@@ -1577,55 +1574,6 @@
}
}
-/******************************************************************************
- * Restart Definitions
- *****************************************************************************/
-
-static uint32_t restart_reason = 0x776655AA;
-
-static void msm_pm_power_off(void)
-{
- msm_rpcrouter_close();
- msm_proc_comm(PCOM_POWER_DOWN, 0, 0);
- for (;;)
- ;
-}
-
-static void msm_pm_restart(char str, const char *cmd)
-{
- msm_rpcrouter_close();
- msm_proc_comm(PCOM_RESET_CHIP, &restart_reason, 0);
-
- for (;;)
- ;
-}
-
-static int msm_reboot_call
- (struct notifier_block *this, unsigned long code, void *_cmd)
-{
- if ((code == SYS_RESTART) && _cmd) {
- char *cmd = _cmd;
- if (!strcmp(cmd, "bootloader")) {
- restart_reason = 0x77665500;
- } else if (!strcmp(cmd, "recovery")) {
- restart_reason = 0x77665502;
- } else if (!strcmp(cmd, "eraseflash")) {
- restart_reason = 0x776655EF;
- } else if (!strncmp(cmd, "oem-", 4)) {
- unsigned code = simple_strtoul(cmd + 4, 0, 16) & 0xff;
- restart_reason = 0x6f656d00 | code;
- } else {
- restart_reason = 0x77665501;
- }
- }
- return NOTIFY_DONE;
-}
-
-static struct notifier_block msm_reboot_notifier = {
- .notifier_call = msm_reboot_call,
-};
-
-
/*
* Initialize the power management subsystem.
*
@@ -1693,10 +1641,6 @@
virt_to_phys(&msm_pm_pc_pgd));
#endif
- pm_power_off = msm_pm_power_off;
- arm_pm_restart = msm_pm_restart;
- register_reboot_notifier(&msm_reboot_notifier);
-
msm_pm_smem_data = smem_alloc(SMEM_APPS_DEM_SLAVE_DATA,
sizeof(*msm_pm_smem_data));
if (msm_pm_smem_data == NULL) {
diff --git a/arch/arm/mach-msm/qdsp5/adsp.c b/arch/arm/mach-msm/qdsp5/adsp.c
index eee7e37..11f6b28 100644
--- a/arch/arm/mach-msm/qdsp5/adsp.c
+++ b/arch/arm/mach-msm/qdsp5/adsp.c
@@ -1072,12 +1072,28 @@
int msm_adsp_enable(struct msm_adsp_module *module)
{
int rc = 0;
+ struct msm_adsp_module *module_en = NULL;
if (!module)
return -EINVAL;
MM_INFO("enable '%s'state[%d] id[%d]\n",
module->name, module->state, module->id);
+ if (!strncmp(module->name, "JPEGTASK", sizeof(module->name)))
+ module_en = find_adsp_module_by_name(&adsp_info, "VIDEOTASK");
+ else if (!strncmp(module->name, "VIDEOTASK", sizeof(module->name)))
+ module_en = find_adsp_module_by_name(&adsp_info, "JPEGTASK");
+ if (module_en) {
+ mutex_lock(&module_en->lock);
+ if (module_en->state == ADSP_STATE_ENABLED ||
+ module_en->state == ADSP_STATE_ENABLING) {
+ MM_ERR("both jpeg and video module can't"\
+ " exist at a time\n");
+ mutex_unlock(&module_en->lock);
+ return -EINVAL;
+ }
+ mutex_unlock(&module_en->lock);
+ }
mutex_lock(&module->lock);
switch (module->state) {
diff --git a/arch/arm/mach-msm/qdsp5/audio_out.c b/arch/arm/mach-msm/qdsp5/audio_out.c
index 8eaf829..0c8034c 100644
--- a/arch/arm/mach-msm/qdsp5/audio_out.c
+++ b/arch/arm/mach-msm/qdsp5/audio_out.c
@@ -111,7 +111,7 @@
-#define BUFSZ (960 * 5)
+#define BUFSZ (5248)
#define DMASZ (BUFSZ * 2)
#define COMMON_OBJ_ID 6
@@ -817,7 +817,7 @@
goto done;
audio->out_buffer_size = BUFSZ;
- audio->out_sample_rate = 44100;
+ audio->out_sample_rate = 48000;
audio->out_channel_mode = AUDPP_CMD_PCM_INTF_STEREO_V;
audio->out_weight = 100;
diff --git a/arch/arm/mach-msm/qdsp5/audio_voicememo.c b/arch/arm/mach-msm/qdsp5/audio_voicememo.c
index 03dd295..34e5b81 100644
--- a/arch/arm/mach-msm/qdsp5/audio_voicememo.c
+++ b/arch/arm/mach-msm/qdsp5/audio_voicememo.c
@@ -459,7 +459,7 @@
if (datacb_data->pkt.fw_data.fw_ptr_status &&
be32_to_cpu(datacb_data->pkt.fw_data.rec_length) &&
be32_to_cpu(datacb_data->pkt.fw_data.rec_length)
- <= MAX_FRAME_SIZE) {
+ <= MAX_REC_BUF_SIZE) {
MM_DBG("Copy FW link:rec_buf_size \
= 0x%08x, rec_length=0x%08x\n",
@@ -484,7 +484,7 @@
} else if (datacb_data->pkt.rw_data.rw_ptr_status &&
be32_to_cpu(datacb_data->pkt.rw_data.rec_length) &&
be32_to_cpu(datacb_data->pkt.rw_data.rec_length)
- <= MAX_FRAME_SIZE) {
+ <= MAX_REC_BUF_SIZE) {
MM_DBG("Copy RW link:rec_buf_size \
=0x%08x, rec_length=0x%08x\n",
@@ -509,12 +509,12 @@
} else {
MM_ERR("FW: ptr_status %d, rec_length=0x%08x,"
"RW: ptr_status %d, rec_length=0x%08x\n",
- datacb_data->pkt.rw_data.fw_ptr_status, \
+ datacb_data->pkt.fw_data.fw_ptr_status, \
be32_to_cpu( \
datacb_data->pkt.fw_data.rec_length), \
- datacb_data->pkt.rw_data.fw_ptr_status, \
+ datacb_data->pkt.rw_data.rw_ptr_status, \
be32_to_cpu( \
- datacb_data->pkt.fw_data.rec_length));
+ datacb_data->pkt.rw_data.rec_length));
}
if (rec_status != RPC_VOC_REC_STAT_DONE) {
/* Not end of record */
diff --git a/arch/arm/mach-msm/qdsp6v2/apr.c b/arch/arm/mach-msm/qdsp6v2/apr.c
index 2403c02..a0bfb27 100644
--- a/arch/arm/mach-msm/qdsp6v2/apr.c
+++ b/arch/arm/mach-msm/qdsp6v2/apr.c
@@ -372,10 +372,14 @@
if (q6.state == APR_Q6_NOIMG) {
q6.pil = pil_get("q6");
if (IS_ERR(q6.pil)) {
- rc = PTR_ERR(q6.pil);
- pr_err("APR: Unable to load q6 image, error:%d\n", rc);
- mutex_unlock(&q6.lock);
- return svc;
+ q6.pil = pil_get("adsp");
+ if (IS_ERR(q6.pil)) {
+ rc = PTR_ERR(q6.pil);
+ pr_err("APR: Unable to load q6 image, error:%d\n",
+ rc);
+ mutex_unlock(&q6.lock);
+ return svc;
+ }
}
q6.state = APR_Q6_LOADED;
}
diff --git a/arch/arm/mach-msm/qdsp6v2/ultrasound/usf.c b/arch/arm/mach-msm/qdsp6v2/ultrasound/usf.c
index a8773ea..a973b92 100644
--- a/arch/arm/mach-msm/qdsp6v2/ultrasound/usf.c
+++ b/arch/arm/mach-msm/qdsp6v2/ultrasound/usf.c
@@ -1131,13 +1131,12 @@
{
uint16_t ind = 0;
+ usf_unregister_conflicting_events(
+ usf->conflicting_event_types);
+ usf->conflicting_event_types = 0;
for (ind = 0; ind < USF_MAX_EVENT_IND; ++ind) {
if (usf->input_ifs[ind] == NULL)
continue;
-
- usf_unregister_conflicting_events(
- usf->conflicting_event_types);
- usf->conflicting_event_types = 0;
input_unregister_device(usf->input_ifs[ind]);
usf->input_ifs[ind] = NULL;
pr_debug("%s input_unregister_device[%s]\n",
diff --git a/arch/arm/mach-msm/qdsp6v2/ultrasound/usfcdev.c b/arch/arm/mach-msm/qdsp6v2/ultrasound/usfcdev.c
index b99a9b0..f566e82 100644
--- a/arch/arm/mach-msm/qdsp6v2/ultrasound/usfcdev.c
+++ b/arch/arm/mach-msm/qdsp6v2/ultrasound/usfcdev.c
@@ -39,9 +39,22 @@
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_KEYBIT |
INPUT_DEVICE_ID_MATCH_ABSBIT,
- .evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) },
+ .evbit = { BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY) },
.keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
- .absbit = { BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
+ /* assumption: ABS_X & ABS_Y are in the same long */
+ .absbit = { [BIT_WORD(ABS_X)] = BIT_MASK(ABS_X) |
+ BIT_MASK(ABS_Y) },
+ },
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
+ INPUT_DEVICE_ID_MATCH_KEYBIT |
+ INPUT_DEVICE_ID_MATCH_ABSBIT,
+ .evbit = { BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY) },
+ .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
+ /* assumption: MT_.._X & MT_.._Y are in the same long */
+ .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
+ BIT_MASK(ABS_MT_POSITION_X) |
+ BIT_MASK(ABS_MT_POSITION_Y) },
},
{ } /* Terminating entry */
};
@@ -76,12 +89,12 @@
int ind = handler->minor;
pr_debug("%s: name=[%s]; ind=%d\n", __func__, dev->name, ind);
+
if (s_usfcdev_events[ind].registered_event &&
s_usfcdev_events[ind].match_cb) {
rc = (*s_usfcdev_events[ind].match_cb)((uint16_t)ind, dev);
pr_debug("%s: [%s]; rc=%d\n", __func__, dev->name, rc);
}
-
return rc;
}
@@ -128,10 +141,12 @@
{
uint16_t ind = (uint16_t)handle->handler->minor;
- pr_debug("%s: event_type=%d; filter=%d\n",
+ pr_debug("%s: event_type=%d; filter=%d; abs_xy=%ld; abs_y_mt[]=%ld\n",
__func__,
ind,
- s_usfcdev_events[ind].filter);
+ s_usfcdev_events[ind].filter,
+ usfc_tsc_ids[0].absbit[0],
+ usfc_tsc_ids[1].absbit[1]);
return s_usfcdev_events[ind].filter;
}
diff --git a/arch/arm/mach-msm/ramdump.c b/arch/arm/mach-msm/ramdump.c
index a18acd6..21e81dd 100644
--- a/arch/arm/mach-msm/ramdump.c
+++ b/arch/arm/mach-msm/ramdump.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -222,6 +222,17 @@
return (void *)rd_dev;
}
+void destroy_ramdump_device(void *dev)
+{
+ struct ramdump_device *rd_dev = dev;
+
+ if (IS_ERR_OR_NULL(rd_dev))
+ return;
+
+ misc_deregister(&rd_dev->device);
+ kfree(rd_dev);
+}
+
int do_ramdump(void *handle, struct ramdump_segment *segments,
int nsegments)
{
diff --git a/arch/arm/mach-msm/ramdump.h b/arch/arm/mach-msm/ramdump.h
index 0b60a44..9006010 100644
--- a/arch/arm/mach-msm/ramdump.h
+++ b/arch/arm/mach-msm/ramdump.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -19,6 +19,7 @@
};
void *create_ramdump_device(const char *dev_name);
+void destroy_ramdump_device(void *dev);
int do_ramdump(void *handle, struct ramdump_segment *segments,
int nsegments);
diff --git a/arch/arm/mach-msm/restart_7k.c b/arch/arm/mach-msm/restart_7k.c
new file mode 100644
index 0000000..dc9edf4
--- /dev/null
+++ b/arch/arm/mach-msm/restart_7k.c
@@ -0,0 +1,101 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/reboot.h>
+#include <linux/pm.h>
+#include <asm/system_misc.h>
+#include <mach/proc_comm.h>
+
+#include "devices-msm7x2xa.h"
+#include "smd_rpcrouter.h"
+
+static uint32_t restart_reason = 0x776655AA;
+
+static void msm_pm_power_off(void)
+{
+ msm_rpcrouter_close();
+ msm_proc_comm(PCOM_POWER_DOWN, 0, 0);
+ for (;;)
+ ;
+}
+
+static void msm_pm_restart(char str, const char *cmd)
+{
+ msm_rpcrouter_close();
+ pr_debug("The reset reason is %x\n", restart_reason);
+
+ /* Disable interrupts */
+ local_irq_disable();
+ local_fiq_disable();
+
+ /*
+ * Take out a flat memory mapping and will
+ * insert a 1:1 mapping in place of
+ * the user-mode pages to ensure predictable results
+ * This function takes care of flushing the caches
+ * and flushing the TLB.
+ */
+ setup_mm_for_reboot();
+
+ msm_proc_comm(PCOM_RESET_CHIP, &restart_reason, 0);
+
+ for (;;)
+ ;
+}
+
+static int msm_reboot_call
+ (struct notifier_block *this, unsigned long code, void *_cmd)
+{
+ if ((code == SYS_RESTART) && _cmd) {
+ char *cmd = _cmd;
+ if (!strncmp(cmd, "bootloader", 10)) {
+ restart_reason = 0x77665500;
+ } else if (!strncmp(cmd, "recovery", 8)) {
+ restart_reason = 0x77665502;
+ } else if (!strncmp(cmd, "eraseflash", 10)) {
+ restart_reason = 0x776655EF;
+ } else if (!strncmp(cmd, "oem-", 4)) {
+ unsigned long code;
+ int res;
+ res = kstrtoul(cmd + 4, 16, &code);
+ code &= 0xff;
+ restart_reason = 0x6f656d00 | code;
+ } else {
+ restart_reason = 0x77665501;
+ }
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block msm_reboot_notifier = {
+ .notifier_call = msm_reboot_call,
+};
+
+static int __init msm_pm_restart_init(void)
+{
+ int ret;
+
+ pm_power_off = msm_pm_power_off;
+ arm_pm_restart = msm_pm_restart;
+
+ ret = register_reboot_notifier(&msm_reboot_notifier);
+ if (ret)
+ pr_err("Failed to register reboot notifier\n");
+
+ return ret;
+}
+late_initcall(msm_pm_restart_init);
diff --git a/arch/arm/mach-msm/rpm-notifier.h b/arch/arm/mach-msm/rpm-notifier.h
index df8d9b3..33086c6 100644
--- a/arch/arm/mach-msm/rpm-notifier.h
+++ b/arch/arm/mach-msm/rpm-notifier.h
@@ -20,8 +20,31 @@
uint32_t size;
uint8_t *value;
};
-
+/**
+ * msm_rpm_register_notifier - Register for sleep set notifications
+ *
+ * @nb - notifier block to register
+ *
+ * return 0 on success, errno on failure.
+ */
int msm_rpm_register_notifier(struct notifier_block *nb);
+
+/**
+ * msm_rpm_unregister_notifier - Unregister previously registered notifications
+ *
+ * @nb - notifier block to unregister
+ *
+ * return 0 on success, errno on failure.
+ */
int msm_rpm_unregister_notifier(struct notifier_block *nb);
+/**
+ * msm_rpm_enter_sleep - Notify RPM driver to prepare for entering sleep
+ */
+int msm_rpm_enter_sleep(void);
+
+/**
+ * msm_rpm_exit_sleep - Notify RPM driver about resuming from power collapse
+ */
+void msm_rpm_exit_sleep(void);
#endif /*__ARCH_ARM_MACH_MSM_RPM_NOTIF_H */
diff --git a/arch/arm/mach-msm/rpm-regulator-smd.c b/arch/arm/mach-msm/rpm-regulator-smd.c
index fdff231..a8af9e7 100644
--- a/arch/arm/mach-msm/rpm-regulator-smd.c
+++ b/arch/arm/mach-msm/rpm-regulator-smd.c
@@ -68,6 +68,7 @@
RPM_REGULATOR_PARAM_QUIET_MODE,
RPM_REGULATOR_PARAM_FREQ_REASON,
RPM_REGULATOR_PARAM_CORNER,
+ RPM_REGULATOR_PARAM_BYPASS,
RPM_REGULATOR_PARAM_MAX,
};
@@ -111,7 +112,8 @@
PARAM(HEAD_ROOM, 1, 0, 0, 1, "hr", 0, 0x7FFFFFFF, "qcom,init-head-room"),
PARAM(QUIET_MODE, 0, 1, 0, 0, "qm", 0, 2, "qcom,init-quiet-mode"),
PARAM(FREQ_REASON, 0, 1, 0, 1, "resn", 0, 8, "qcom,init-freq-reason"),
- PARAM(CORNER, 0, 1, 0, 0, "corn", 0, 5, "qcom,init-voltage-corner"),
+ PARAM(CORNER, 0, 1, 0, 0, "corn", 0, 6, "qcom,init-voltage-corner"),
+ PARAM(BYPASS, 1, 0, 0, 0, "bypa", 0, 1, "qcom,init-disallow-bypass"),
};
struct rpm_vreg_request {
@@ -440,6 +442,7 @@
RPM_VREG_AGGR_MAX(QUIET_MODE, param_aggr, param_reg);
RPM_VREG_AGGR_MAX(FREQ_REASON, param_aggr, param_reg);
RPM_VREG_AGGR_MAX(CORNER, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(BYPASS, param_aggr, param_reg);
}
static int rpm_vreg_aggregate_requests(struct rpm_regulator *regulator)
@@ -682,7 +685,7 @@
* regulator_set_voltage function to the actual corner values
* sent to the RPM.
*/
- corner = min_uV - RPM_REGULATOR_CORNER_RETENTION;
+ corner = min_uV - RPM_REGULATOR_CORNER_NONE;
if (corner < params[RPM_REGULATOR_PARAM_CORNER].min
|| corner > params[RPM_REGULATOR_PARAM_CORNER].max) {
@@ -716,7 +719,7 @@
struct rpm_regulator *reg = rdev_get_drvdata(rdev);
return reg->req.param[RPM_REGULATOR_PARAM_CORNER]
- + RPM_REGULATOR_CORNER_RETENTION;
+ + RPM_REGULATOR_CORNER_NONE;
}
static int rpm_vreg_set_mode(struct regulator_dev *rdev, unsigned int mode)
diff --git a/arch/arm/mach-msm/rpm-smd.c b/arch/arm/mach-msm/rpm-smd.c
index b8bb27b..0faafc8 100644
--- a/arch/arm/mach-msm/rpm-smd.c
+++ b/arch/arm/mach-msm/rpm-smd.c
@@ -24,6 +24,7 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
+#include <linux/string.h>
#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/slab.h>
@@ -36,6 +37,19 @@
#include <mach/rpm-smd.h>
#include "rpm-notifier.h"
+/* Debug Definitions */
+
+enum {
+ MSM_RPM_LOG_REQUEST_PRETTY = BIT(0),
+ MSM_RPM_LOG_REQUEST_RAW = BIT(1),
+ MSM_RPM_LOG_REQUEST_SHOW_MSG_ID = BIT(2),
+};
+
+static int msm_rpm_debug_mask;
+module_param_named(
+ debug_mask, msm_rpm_debug_mask, int, S_IRUGO | S_IWUSR
+);
+
struct msm_rpm_driver_data {
const char *ch_name;
uint32_t ch_type;
@@ -492,6 +506,140 @@
}
}
+#define DEBUG_PRINT_BUFFER_SIZE 512
+
+static void msm_rpm_log_request(struct msm_rpm_request *cdata)
+{
+ char buf[DEBUG_PRINT_BUFFER_SIZE];
+ size_t buflen = DEBUG_PRINT_BUFFER_SIZE;
+ char name[5];
+ u32 value;
+ int i, j, prev_valid;
+ int valid_count = 0;
+ int pos = 0;
+
+ name[4] = 0;
+
+ for (i = 0; i < cdata->write_idx; i++)
+ if (cdata->kvp[i].valid)
+ valid_count++;
+
+ pos += scnprintf(buf + pos, buflen - pos, "%sRPM req: ", KERN_INFO);
+ if (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_SHOW_MSG_ID)
+ pos += scnprintf(buf + pos, buflen - pos, "msg_id=%u, ",
+ cdata->msg_hdr.msg_id);
+ pos += scnprintf(buf + pos, buflen - pos, "s=%s",
+ (cdata->msg_hdr.set == MSM_RPM_CTX_ACTIVE_SET ? "act" : "slp"));
+
+ if ((msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_PRETTY)
+ && (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_RAW)) {
+ /* Both pretty and raw formatting */
+ memcpy(name, &cdata->msg_hdr.resource_type, sizeof(uint32_t));
+ pos += scnprintf(buf + pos, buflen - pos,
+ ", rsc_type=0x%08X (%s), rsc_id=%u; ",
+ cdata->msg_hdr.resource_type, name,
+ cdata->msg_hdr.resource_id);
+
+ for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
+ if (!cdata->kvp[i].valid)
+ continue;
+
+ memcpy(name, &cdata->kvp[i].key, sizeof(uint32_t));
+ pos += scnprintf(buf + pos, buflen - pos,
+ "[key=0x%08X (%s), value=%s",
+ cdata->kvp[i].key, name,
+ (cdata->kvp[i].nbytes ? "0x" : "null"));
+
+ for (j = 0; j < cdata->kvp[i].nbytes; j++)
+ pos += scnprintf(buf + pos, buflen - pos,
+ "%02X ",
+ cdata->kvp[i].value[j]);
+
+ if (cdata->kvp[i].nbytes)
+ pos += scnprintf(buf + pos, buflen - pos, "(");
+
+ for (j = 0; j < cdata->kvp[i].nbytes; j += 4) {
+ value = 0;
+ memcpy(&value, &cdata->kvp[i].value[j],
+ min(sizeof(uint32_t),
+ cdata->kvp[i].nbytes - j));
+ pos += scnprintf(buf + pos, buflen - pos, "%u",
+ value);
+ if (j + 4 < cdata->kvp[i].nbytes)
+ pos += scnprintf(buf + pos,
+ buflen - pos, " ");
+ }
+ if (cdata->kvp[i].nbytes)
+ pos += scnprintf(buf + pos, buflen - pos, ")");
+ pos += scnprintf(buf + pos, buflen - pos, "]");
+ if (prev_valid + 1 < valid_count)
+ pos += scnprintf(buf + pos, buflen - pos, ", ");
+ prev_valid++;
+ }
+ } else if (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_PRETTY) {
+ /* Pretty formatting only */
+ memcpy(name, &cdata->msg_hdr.resource_type, sizeof(uint32_t));
+ pos += scnprintf(buf + pos, buflen - pos, " %s %u; ", name,
+ cdata->msg_hdr.resource_id);
+
+ for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
+ if (!cdata->kvp[i].valid)
+ continue;
+
+ memcpy(name, &cdata->kvp[i].key, sizeof(uint32_t));
+ pos += scnprintf(buf + pos, buflen - pos, "%s=%s",
+ name, (cdata->kvp[i].nbytes ? "" : "null"));
+
+ for (j = 0; j < cdata->kvp[i].nbytes; j += 4) {
+ value = 0;
+ memcpy(&value, &cdata->kvp[i].value[j],
+ min(sizeof(uint32_t),
+ cdata->kvp[i].nbytes - j));
+ pos += scnprintf(buf + pos, buflen - pos, "%u",
+ value);
+
+ if (j + 4 < cdata->kvp[i].nbytes)
+ pos += scnprintf(buf + pos,
+ buflen - pos, " ");
+ }
+ if (prev_valid + 1 < valid_count)
+ pos += scnprintf(buf + pos, buflen - pos, ", ");
+ prev_valid++;
+ }
+ } else {
+ /* Raw formatting only */
+ pos += scnprintf(buf + pos, buflen - pos,
+ ", rsc_type=0x%08X, rsc_id=%u; ",
+ cdata->msg_hdr.resource_type,
+ cdata->msg_hdr.resource_id);
+
+ for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
+ if (!cdata->kvp[i].valid)
+ continue;
+
+ pos += scnprintf(buf + pos, buflen - pos,
+ "[key=0x%08X, value=%s",
+ cdata->kvp[i].key,
+ (cdata->kvp[i].nbytes ? "0x" : "null"));
+ for (j = 0; j < cdata->kvp[i].nbytes; j++) {
+ pos += scnprintf(buf + pos, buflen - pos,
+ "%02X",
+ cdata->kvp[i].value[j]);
+ if (j + 1 < cdata->kvp[i].nbytes)
+ pos += scnprintf(buf + pos,
+ buflen - pos, " ");
+ }
+ pos += scnprintf(buf + pos, buflen - pos, "]");
+ if (prev_valid + 1 < valid_count)
+ pos += scnprintf(buf + pos, buflen - pos, ", ");
+ prev_valid++;
+ }
+ }
+
+ pos += scnprintf(buf + pos, buflen - pos, "\n");
+ printk(buf);
+}
+
static int msm_rpm_send_data(struct msm_rpm_request *cdata,
int msg_type, bool noirq)
{
@@ -546,6 +694,10 @@
tmpbuff += cdata->kvp[i].nbytes;
}
+ if (msm_rpm_debug_mask
+ & (MSM_RPM_LOG_REQUEST_PRETTY | MSM_RPM_LOG_REQUEST_RAW))
+ msm_rpm_log_request(cdata);
+
if (standalone) {
for (i = 0; (i < cdata->write_idx); i++)
cdata->kvp[i].valid = false;
@@ -734,6 +886,27 @@
return rc;
}
EXPORT_SYMBOL(msm_rpm_send_message_noirq);
+
+/**
+ * During power collapse, the rpm driver disables the SMD interrupts to make
+ * sure that the interrupt doesn't wakes us from sleep.
+ */
+int msm_rpm_enter_sleep(void)
+{
+ return smd_mask_receive_interrupt(msm_rpm_data.ch_info, true);
+}
+EXPORT_SYMBOL(msm_rpm_enter_sleep);
+
+/**
+ * When the system resumes from power collapse, the SMD interrupt disabled by
+ * enter function has to reenabled to continue processing SMD message.
+ */
+void msm_rpm_exit_sleep(void)
+{
+ smd_mask_receive_interrupt(msm_rpm_data.ch_info, false);
+}
+EXPORT_SYMBOL(msm_rpm_exit_sleep);
+
static bool msm_rpm_set_standalone(void)
{
if (machine_is_msm8974()) {
diff --git a/arch/arm/mach-msm/rpm_log.c b/arch/arm/mach-msm/rpm_log.c
index 3ed55da..4835cef 100644
--- a/arch/arm/mach-msm/rpm_log.c
+++ b/arch/arm/mach-msm/rpm_log.c
@@ -115,13 +115,11 @@
continue;
}
/*
- * Ensure that the reported buffer size is within limits of
- * known maximum size and that all indices are 4 byte aligned.
- * These conditions are required to interact with a ULog buffer
+ * Ensure that all indices are 4 byte aligned.
+ * This conditions is required to interact with a ULog buffer
* properly.
*/
- if (tail_idx - head_idx > pdata->log_len ||
- !IS_ALIGNED((tail_idx | head_idx | *read_idx), 4))
+ if (!IS_ALIGNED((tail_idx | head_idx | *read_idx), 4))
break;
msg_len = msm_rpm_log_read(pdata, MSM_RPM_LOG_PAGE_BUFFER,
diff --git a/arch/arm/mach-msm/rpm_rbcpr_stats.c b/arch/arm/mach-msm/rpm_rbcpr_stats.c
new file mode 100644
index 0000000..7f27efc
--- /dev/null
+++ b/arch/arm/mach-msm/rpm_rbcpr_stats.c
@@ -0,0 +1,415 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/sort.h>
+#include <asm/uaccess.h>
+#include <mach/msm_iomap.h>
+#include "timer.h"
+#include "rpm_rbcpr_stats.h"
+
+#define RBCPR_USER_BUF (2000)
+#define STR(a) (#a)
+#define GETFIELD(a) ((strnstr(STR(a), "->", 80) + 2))
+#define PRINTFIELD(buf, buf_size, pos, format, ...) \
+ ((pos < buf_size) ? snprintf((buf + pos), (buf_size - pos), format,\
+ ## __VA_ARGS__) : 0)
+
+enum {
+ RBCPR_CORNER_SVS = 0,
+ RBCPR_CORNER_NOMINAL,
+ RBCPR_CORNER_TURBO,
+ RBCPR_CORNERS_COUNT,
+ RBCPR_CORNER_INVALID = 0x7FFFFFFF,
+};
+
+struct msm_rpmrbcpr_recmnd {
+ uint32_t voltage;
+ uint32_t timestamp;
+};
+
+struct msm_rpmrbcpr_corners {
+ int efuse_adjustment;
+ struct msm_rpmrbcpr_recmnd *rpm_rcmnd;
+ uint32_t programmed_voltage;
+ uint32_t isr_counter;
+ uint32_t min_counter;
+ uint32_t max_counter;
+};
+
+struct msm_rpmrbcpr_stats {
+ uint32_t status_count;
+ uint32_t num_corners;
+ uint32_t num_latest_recommends;
+ struct msm_rpmrbcpr_corners *rbcpr_corners;
+ uint32_t current_corner;
+ uint32_t railway_voltage;
+ uint32_t enable;
+};
+
+struct msm_rpmrbcpr_stats_internal {
+ void __iomem *regbase;
+ uint32_t len;
+ char buf[RBCPR_USER_BUF];
+};
+
+static DEFINE_SPINLOCK(rpm_rbcpr_lock);
+static struct msm_rpmrbcpr_design_data rbcpr_design_data;
+static struct msm_rpmrbcpr_stats rbcpr_stats;
+static struct msm_rpmrbcpr_stats_internal pvtdata;
+
+static inline unsigned long msm_rpmrbcpr_read_data(void __iomem *regbase,
+ int offset)
+{
+ return readl_relaxed(regbase + (offset * 4));
+}
+
+static int msm_rpmrbcpr_cmp_func(const void *a, const void *b)
+{
+ struct msm_rpmrbcpr_recmnd *pa = (struct msm_rpmrbcpr_recmnd *)(a);
+ struct msm_rpmrbcpr_recmnd *pb = (struct msm_rpmrbcpr_recmnd *)(b);
+ return pa->timestamp - pb->timestamp;
+}
+
+static char *msm_rpmrbcpr_corner_string(uint32_t corner)
+{
+ switch (corner) {
+ case RBCPR_CORNER_SVS:
+ return STR(RBCPR_CORNER_SVS);
+ break;
+ case RBCPR_CORNER_NOMINAL:
+ return STR(RBCPR_CORNER_NOMINAL);
+ break;
+ case RBCPR_CORNER_TURBO:
+ return STR(RBCPR_CORNER_TURBO);
+ break;
+ case RBCPR_CORNERS_COUNT:
+ case RBCPR_CORNER_INVALID:
+ default:
+ return STR(RBCPR_CORNER_INVALID);
+ break;
+ }
+}
+
+static int msm_rpmrbcpr_print_buf(struct msm_rpmrbcpr_stats *pdata,
+ struct msm_rpmrbcpr_design_data *pdesdata,
+ char *buf)
+{
+ int pos = 0;
+ struct msm_rpmrbcpr_corners *corners;
+ struct msm_rpmrbcpr_recmnd *rcmnd;
+ int i, j;
+ int current_timestamp = msm_timer_get_sclk_ticks();
+
+ if (!pdata->enable) {
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ "RBCPR Stats not enabled at RPM");
+ return pos;
+ }
+
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ ":RBCPR Platform Data");
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ " (%s: %u)", GETFIELD(pdesdata->upside_steps),
+ pdesdata->upside_steps);
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ " (%s:%u)", GETFIELD(pdesdata->downside_steps),
+ pdesdata->downside_steps);
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ " (%s: %d)", GETFIELD(pdesdata->svs_voltage),
+ pdesdata->svs_voltage);
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ " (%s: %d)", GETFIELD(pdesdata->nominal_voltage),
+ pdesdata->nominal_voltage);
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ " (%s: %d)\n", GETFIELD(pdesdata->turbo_voltage),
+ pdesdata->turbo_voltage);
+
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ ":RBCPR Stats");
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ " (%s: %u)", GETFIELD(pdata->status_counter),
+ pdata->status_count);
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ " (%s: %s)", GETFIELD(pdata->current_corner),
+ msm_rpmrbcpr_corner_string(pdata->current_corner));
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ " (current_timestamp: 0x%x)",
+ current_timestamp);
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ " (%s: %u)\n", GETFIELD(pdata->railway_voltage),
+ pdata->railway_voltage);
+
+ for (i = 0; i < pdata->num_corners; i++) {
+ corners = &pdata->rbcpr_corners[i];
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ ":\tRBCPR Corner Data");
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ " (name: %s)", msm_rpmrbcpr_corner_string(i));
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ " (%s: %d)", GETFIELD(corners->efuse_adjustment),
+ corners->efuse_adjustment);
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ " (%s: %u)", GETFIELD(corners->programmed_voltage),
+ corners->programmed_voltage);
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ " (%s: %u)", GETFIELD(corners->isr_counter),
+ corners->isr_counter);
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ "(%s: %u)", GETFIELD(corners->min_counter),
+ corners->min_counter);
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ "(%s:%u)\n", GETFIELD(corners->max_counter),
+ corners->max_counter);
+ for (j = 0; j < pdata->num_latest_recommends; j++) {
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ ":\t\tVoltage History[%d]", j);
+ rcmnd = &corners->rpm_rcmnd[j];
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ " (%s: %u)", GETFIELD(rcmnd->voltage),
+ rcmnd->voltage);
+ pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+ " (%s: 0x%x)\n", GETFIELD(rcmnd->timestamp),
+ rcmnd->timestamp);
+ }
+ }
+ return pos;
+}
+
+
+static void msm_rpmrbcpr_copy_data(struct msm_rpmrbcpr_stats_internal *pdata,
+ struct msm_rpmrbcpr_stats *prbcpr_stats)
+{
+ struct msm_rpmrbcpr_corners *corners;
+ struct msm_rpmrbcpr_recmnd *rcmnd;
+ int i, j;
+ int offset = (offsetof(struct msm_rpmrbcpr_stats, rbcpr_corners) / 4);
+
+ if (!prbcpr_stats)
+ return;
+
+ for (i = 0; i < prbcpr_stats->num_corners; i++) {
+ corners = &prbcpr_stats->rbcpr_corners[i];
+ corners->efuse_adjustment = msm_rpmrbcpr_read_data(
+ pdata->regbase, offset++);
+ for (j = 0; j < prbcpr_stats->num_latest_recommends; j++) {
+ rcmnd = &corners->rpm_rcmnd[j];
+ rcmnd->voltage = msm_rpmrbcpr_read_data(
+ pdata->regbase, offset++);
+ rcmnd->timestamp = msm_rpmrbcpr_read_data(
+ pdata->regbase, offset++);
+ }
+ sort(&corners->rpm_rcmnd[0],
+ prbcpr_stats->num_latest_recommends,
+ sizeof(struct msm_rpmrbcpr_recmnd),
+ msm_rpmrbcpr_cmp_func, NULL);
+ corners->programmed_voltage = msm_rpmrbcpr_read_data(
+ pdata->regbase, offset++);
+ corners->isr_counter = msm_rpmrbcpr_read_data(pdata->regbase,
+ offset++);
+ corners->min_counter = msm_rpmrbcpr_read_data(pdata->regbase,
+ offset++);
+ corners->max_counter = msm_rpmrbcpr_read_data(pdata->regbase,
+ offset++);
+ }
+ prbcpr_stats->current_corner = msm_rpmrbcpr_read_data(pdata->regbase,
+ offset++);
+ prbcpr_stats->railway_voltage = msm_rpmrbcpr_read_data
+ (pdata->regbase, offset++);
+ prbcpr_stats->enable = msm_rpmrbcpr_read_data(pdata->regbase, offset++);
+}
+
+static int msm_rpmrbcpr_file_read(struct file *file, char __user *bufu,
+ size_t count, loff_t *ppos)
+{
+ struct msm_rpmrbcpr_stats_internal *pdata = file->private_data;
+ int ret;
+ int status_counter;
+
+ if (!pdata) {
+ pr_info("%s pdata is null", __func__);
+ return -EINVAL;
+ }
+
+ if (!bufu || count < 0) {
+ pr_info("%s count %d ", __func__, count);
+ return -EINVAL;
+ }
+
+ if (*ppos > pdata->len || !pdata->len) {
+ /* Read RPM stats */
+ status_counter = readl_relaxed(pdata->regbase +
+ offsetof(struct msm_rpmrbcpr_stats, status_count));
+ if (status_counter != rbcpr_stats.status_count) {
+ spin_lock(&rpm_rbcpr_lock);
+ msm_rpmrbcpr_copy_data(pdata, &rbcpr_stats);
+ rbcpr_stats.status_count = status_counter;
+ spin_unlock(&rpm_rbcpr_lock);
+ }
+ pdata->len = msm_rpmrbcpr_print_buf(&rbcpr_stats,
+ &rbcpr_design_data, pdata->buf);
+ *ppos = 0;
+ }
+ /* copy to user data */
+ ret = simple_read_from_buffer(bufu, count, ppos, pdata->buf,
+ pdata->len);
+ return ret;
+}
+
+static void msm_rpmrbcpr_free_mem(struct msm_rpmrbcpr_stats_internal *pvtdata,
+ struct msm_rpmrbcpr_stats *prbcpr_stats)
+{
+ int i;
+ if (pvtdata->regbase)
+ iounmap(pvtdata->regbase);
+
+
+ if (prbcpr_stats) {
+ for (i = 0; i < prbcpr_stats->num_corners; i++) {
+ kfree(prbcpr_stats->rbcpr_corners[i].rpm_rcmnd);
+ prbcpr_stats->rbcpr_corners[i].rpm_rcmnd = NULL;
+ }
+
+ kfree(prbcpr_stats->rbcpr_corners);
+ prbcpr_stats->rbcpr_corners = NULL;
+ }
+}
+
+static int msm_rpmrbcpr_allocate_mem(struct msm_rpmrbcpr_platform_data *pdata,
+ struct resource *res)
+{
+ int i;
+
+ pvtdata.regbase = ioremap(res->start, (res->end - res->start + 1));
+ memcpy(&rbcpr_design_data, &pdata->rbcpr_data,
+ sizeof(struct msm_rpmrbcpr_design_data));
+
+
+ rbcpr_stats.num_corners = readl_relaxed(pvtdata.regbase +
+ offsetof(struct msm_rpmrbcpr_stats, num_corners));
+ rbcpr_stats.num_latest_recommends = readl_relaxed(pvtdata.regbase +
+ offsetof(struct msm_rpmrbcpr_stats,
+ num_latest_recommends));
+
+ rbcpr_stats.rbcpr_corners = kzalloc(
+ sizeof(struct msm_rpmrbcpr_corners)
+ * rbcpr_stats.num_corners, GFP_KERNEL);
+
+ if (!rbcpr_stats.rbcpr_corners) {
+ msm_rpmrbcpr_free_mem(&pvtdata, &rbcpr_stats);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < rbcpr_stats.num_corners; i++) {
+ rbcpr_stats.rbcpr_corners[i].rpm_rcmnd =
+ kzalloc(sizeof(struct msm_rpmrbcpr_corners)
+ * rbcpr_stats.num_latest_recommends,
+ GFP_KERNEL);
+
+ if (!rbcpr_stats.rbcpr_corners[i].rpm_rcmnd) {
+ msm_rpmrbcpr_free_mem(&pvtdata, &rbcpr_stats);
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+static int msm_rpmrbcpr_file_open(struct inode *inode, struct file *file)
+{
+ file->private_data = &pvtdata;
+ pvtdata.len = 0;
+
+ if (!pvtdata.regbase)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int msm_rpmrbcpr_file_close(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static const struct file_operations msm_rpmrbcpr_fops = {
+ .owner = THIS_MODULE,
+ .open = msm_rpmrbcpr_file_open,
+ .read = msm_rpmrbcpr_file_read,
+ .release = msm_rpmrbcpr_file_close,
+ .llseek = no_llseek,
+};
+
+static int __devinit msm_rpmrbcpr_probe(struct platform_device *pdev)
+{
+ struct dentry *dent;
+ struct msm_rpmrbcpr_platform_data *pdata;
+ int ret = 0;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata)
+ return -EINVAL;
+ dent = debugfs_create_file("rpm_rbcpr", S_IRUGO, NULL,
+ pdev->dev.platform_data, &msm_rpmrbcpr_fops);
+
+ if (!dent) {
+ pr_err("%s: ERROR debugfs_create_file failed\n", __func__);
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pdev, dent);
+ ret = msm_rpmrbcpr_allocate_mem(pdata, pdev->resource);
+ return ret;
+}
+
+static int __devexit msm_rpmrbcpr_remove(struct platform_device *pdev)
+{
+ struct dentry *dent;
+
+ msm_rpmrbcpr_free_mem(&pvtdata, &rbcpr_stats);
+ dent = platform_get_drvdata(pdev);
+ debugfs_remove(dent);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static struct platform_driver msm_rpmrbcpr_driver = {
+ .probe = msm_rpmrbcpr_probe,
+ .remove = __devexit_p(msm_rpmrbcpr_remove),
+ .driver = {
+ .name = "msm_rpm_rbcpr",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init msm_rpmrbcpr_init(void)
+{
+ return platform_driver_register(&msm_rpmrbcpr_driver);
+}
+
+static void __exit msm_rpmrbcpr_exit(void)
+{
+ platform_driver_unregister(&msm_rpmrbcpr_driver);
+}
+
+module_init(msm_rpmrbcpr_init);
+module_exit(msm_rpmrbcpr_exit);
diff --git a/arch/arm/mach-msm/rpm_rbcpr_stats.h b/arch/arm/mach-msm/rpm_rbcpr_stats.h
new file mode 100644
index 0000000..55644d0
--- /dev/null
+++ b/arch/arm/mach-msm/rpm_rbcpr_stats.h
@@ -0,0 +1,30 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_RPM_RBCPR_STATS_H
+#define __ARCH_ARM_MACH_MSM_RPM_RBCPR_STATS_H
+
+#include <linux/types.h>
+
+struct msm_rpmrbcpr_design_data {
+ u32 upside_steps;
+ u32 downside_steps;
+ int svs_voltage;
+ int nominal_voltage;
+ int turbo_voltage;
+};
+
+struct msm_rpmrbcpr_platform_data {
+ struct msm_rpmrbcpr_design_data rbcpr_data;
+};
+#endif
diff --git a/arch/arm/mach-msm/rpm_resources.c b/arch/arm/mach-msm/rpm_resources.c
index a88e42e..9d794e7 100644
--- a/arch/arm/mach-msm/rpm_resources.c
+++ b/arch/arm/mach-msm/rpm_resources.c
@@ -967,7 +967,7 @@
return rc;
if (msm_rpmrs_use_mpm(limits))
- msm_mpm_enter_sleep(from_idle);
+ msm_mpm_enter_sleep(sclk_count, from_idle);
}
rc = msm_rpmrs_flush_L2(limits, notify_rpm);
diff --git a/arch/arm/mach-msm/scm-pas.c b/arch/arm/mach-msm/scm-pas.c
index 4096d9c..55ae2f8 100644
--- a/arch/arm/mach-msm/scm-pas.c
+++ b/arch/arm/mach-msm/scm-pas.c
@@ -94,7 +94,7 @@
{
int ret = 0;
- if (!scm_perf_client || !scm_bus_clk)
+ if (!scm_perf_client)
return -EINVAL;
mutex_lock(&scm_pas_bw_mutex);
@@ -102,7 +102,7 @@
ret = msm_bus_scale_client_update_request(scm_perf_client, 1);
if (ret) {
pr_err("bandwidth request failed (%d)\n", ret);
- } else {
+ } else if (scm_bus_clk) {
ret = clk_prepare_enable(scm_bus_clk);
if (ret)
pr_err("clock enable failed\n");
@@ -121,7 +121,8 @@
mutex_lock(&scm_pas_bw_mutex);
if (scm_pas_bw_count-- == 1) {
msm_bus_scale_client_update_request(scm_perf_client, 0);
- clk_disable_unprepare(scm_bus_clk);
+ if (scm_bus_clk)
+ clk_disable_unprepare(scm_bus_clk);
}
mutex_unlock(&scm_pas_bw_mutex);
}
@@ -190,16 +191,23 @@
static int __init scm_pas_init(void)
{
+ if (cpu_is_msm8974()) {
+ scm_pas_bw_tbl[0].vectors[0].src = MSM_BUS_MASTER_CRYPTO_CORE0;
+ scm_pas_bw_tbl[1].vectors[0].src = MSM_BUS_MASTER_CRYPTO_CORE0;
+ } else {
+ scm_bus_clk = clk_get_sys("scm", "bus_clk");
+ if (!IS_ERR(scm_bus_clk)) {
+ clk_set_rate(scm_bus_clk, 64000000);
+ } else {
+ scm_bus_clk = NULL;
+ pr_warn("unable to get bus clock\n");
+ }
+ }
+
scm_perf_client = msm_bus_scale_register_client(&scm_pas_bus_pdata);
if (!scm_perf_client)
pr_warn("unable to register bus client\n");
- scm_bus_clk = clk_get_sys("scm", "bus_clk");
- if (!IS_ERR(scm_bus_clk)) {
- clk_set_rate(scm_bus_clk, 64000000);
- } else {
- scm_bus_clk = NULL;
- pr_warn("unable to get bus clock\n");
- }
+
return 0;
}
module_init(scm_pas_init);
diff --git a/arch/arm/mach-msm/scm-pas.h b/arch/arm/mach-msm/scm-pas.h
index 2fe71a9..dd24e20 100644
--- a/arch/arm/mach-msm/scm-pas.h
+++ b/arch/arm/mach-msm/scm-pas.h
@@ -19,7 +19,7 @@
PAS_TZAPPS,
PAS_MODEM_SW,
PAS_MODEM_FW,
- PAS_RIVA,
+ PAS_WCNSS,
PAS_SECAPP,
PAS_GSS,
PAS_VIDC,
diff --git a/arch/arm/mach-msm/smd.c b/arch/arm/mach-msm/smd.c
index 54512ab..decee95 100644
--- a/arch/arm/mach-msm/smd.c
+++ b/arch/arm/mach-msm/smd.c
@@ -124,6 +124,7 @@
uint32_t out_bit_pos;
void __iomem *out_base;
uint32_t out_offset;
+ int irq_id;
};
struct interrupt_config {
@@ -559,7 +560,11 @@
* on DEM-based targets. Grabbing a wakelock in this case will
* abort the power-down sequencing.
*/
- smsm_cb_snapshot(0);
+ if (smsm_info.intr_mask &&
+ (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS))
+ & notify_mask)) {
+ smsm_cb_snapshot(0);
+ }
}
void smd_diag(void)
@@ -2124,6 +2129,56 @@
}
EXPORT_SYMBOL(smd_disable_read_intr);
+/**
+ * Enable/disable receive interrupts for the remote processor used by a
+ * particular channel.
+ * @ch: open channel handle to use for the edge
+ * @mask: 1 = mask interrupts; 0 = unmask interrupts
+ * @returns: 0 for success; < 0 for failure
+ *
+ * Note that this enables/disables all interrupts from the remote subsystem for
+ * all channels. As such, it should be used with care and only for specific
+ * use cases such as power-collapse sequencing.
+ */
+int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask)
+{
+ struct irq_chip *irq_chip;
+ struct irq_data *irq_data;
+ struct interrupt_config_item *int_cfg;
+
+ if (!ch)
+ return -EINVAL;
+
+ if (ch->type >= ARRAY_SIZE(edge_to_pids))
+ return -ENODEV;
+
+ int_cfg = &private_intr_config[edge_to_pids[ch->type].remote_pid].smd;
+
+ if (int_cfg->irq_id < 0)
+ return -ENODEV;
+
+ irq_chip = irq_get_chip(int_cfg->irq_id);
+ if (!irq_chip)
+ return -ENODEV;
+
+ irq_data = irq_get_irq_data(int_cfg->irq_id);
+ if (!irq_data)
+ return -ENODEV;
+
+ if (mask) {
+ SMx_POWER_INFO("SMD Masking interrupts from %s\n",
+ edge_to_pids[ch->type].subsys_name);
+ irq_chip->irq_mask(irq_data);
+ } else {
+ SMx_POWER_INFO("SMD Unmasking interrupts from %s\n",
+ edge_to_pids[ch->type].subsys_name);
+ irq_chip->irq_unmask(irq_data);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(smd_mask_receive_interrupt);
+
int smd_wait_until_readable(smd_channel_t *ch, int bytes)
{
return -1;
@@ -3242,8 +3297,10 @@
);
if (ret < 0) {
platform_irq->irq_id = ret;
+ private_irq->irq_id = ret;
} else {
platform_irq->irq_id = irq_id;
+ private_irq->irq_id = irq_id;
ret_wake = enable_irq_wake(irq_id);
if (ret_wake < 0) {
pr_err("smd: enable_irq_wake failed on %s",
diff --git a/arch/arm/mach-msm/smd_pkt.c b/arch/arm/mach-msm/smd_pkt.c
index fdbc387..b9fe341 100644
--- a/arch/arm/mach-msm/smd_pkt.c
+++ b/arch/arm/mach-msm/smd_pkt.c
@@ -725,6 +725,8 @@
SMD_APPS_MODEM,
};
#endif
+module_param_named(loopback_edge, smd_ch_edge[LOOPBACK_INX],
+ int, S_IRUGO | S_IWUSR | S_IWGRP);
static int smd_pkt_dummy_probe(struct platform_device *pdev)
{
diff --git a/arch/arm/mach-msm/socinfo.c b/arch/arm/mach-msm/socinfo.c
index 281e7b8..39fbba8 100644
--- a/arch/arm/mach-msm/socinfo.c
+++ b/arch/arm/mach-msm/socinfo.c
@@ -628,11 +628,8 @@
static void * __init setup_dummy_socinfo(void)
{
- if (machine_is_msm8960_rumi3() || machine_is_msm8960_sim() ||
- machine_is_msm8960_cdp())
+ if (machine_is_msm8960_cdp())
dummy_socinfo.id = 87;
- else if (machine_is_apq8064_rumi3() || machine_is_apq8064_sim())
- dummy_socinfo.id = 109;
else if (machine_is_msm9615_mtp() || machine_is_msm9615_cdp())
dummy_socinfo.id = 104;
else if (early_machine_is_msm8974()) {
@@ -757,9 +754,7 @@
if (!(read_cpuid_mpidr() & BIT(31)))
return 1;
- if (read_cpuid_mpidr() & BIT(30) &&
- !machine_is_msm8960_sim() &&
- !machine_is_apq8064_sim())
+ if (read_cpuid_mpidr() & BIT(30))
return 1;
/* 1 + the PART[1:0] field of MIDR */
@@ -768,9 +763,6 @@
const int read_msm_cpu_type(void)
{
- if (machine_is_msm8960_sim() || machine_is_msm8960_rumi3())
- return MSM_CPU_8960;
-
if (socinfo_get_msm_cpu() != MSM_CPU_UNKNOWN)
return socinfo_get_msm_cpu();
@@ -793,11 +785,20 @@
case 0x510F06F0:
return MSM_CPU_8064;
+ case 0x511F06F1:
+ case 0x512F06F0:
+ return MSM_CPU_8974;
+
default:
return MSM_CPU_UNKNOWN;
};
}
+const int cpu_is_krait(void)
+{
+ return ((read_cpuid_id() & 0xFF00FC00) == 0x51000400);
+}
+
const int cpu_is_krait_v1(void)
{
switch (read_cpuid_id()) {
@@ -810,3 +811,22 @@
return 0;
};
}
+
+const int cpu_is_krait_v2(void)
+{
+ switch (read_cpuid_id()) {
+ case 0x511F04D0:
+ case 0x511F04D1:
+ case 0x511F04D2:
+ case 0x511F04D3:
+ case 0x511F04D4:
+
+ case 0x510F06F0:
+ case 0x510F06F1:
+ case 0x510F06F2:
+ return 1;
+
+ default:
+ return 0;
+ };
+}
diff --git a/arch/arm/mach-msm/subsystem_restart.c b/arch/arm/mach-msm/subsystem_restart.c
index 0b6f225..65da903 100644
--- a/arch/arm/mach-msm/subsystem_restart.c
+++ b/arch/arm/mach-msm/subsystem_restart.c
@@ -17,7 +17,6 @@
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/fs.h>
-#include <linux/proc_fs.h>
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/io.h>
@@ -25,11 +24,13 @@
#include <linux/time.h>
#include <linux/wakelock.h>
#include <linux/suspend.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <asm/current.h>
#include <mach/peripheral-loader.h>
-#include <mach/scm.h>
#include <mach/socinfo.h>
#include <mach/subsystem_notif.h>
#include <mach/subsystem_restart.h>
@@ -42,30 +43,40 @@
struct mutex shutdown_lock;
struct mutex powerup_lock;
- struct subsys_data *subsys_ptrs[];
-};
-
-struct restart_wq_data {
- struct subsys_data *subsys;
- struct wake_lock ssr_wake_lock;
- char wlname[64];
- int use_restart_order;
- struct work_struct work;
+ struct subsys_device *subsys_ptrs[];
};
struct restart_log {
struct timeval time;
- struct subsys_data *subsys;
+ struct subsys_device *dev;
struct list_head list;
};
-static int restart_level;
+struct subsys_device {
+ struct subsys_desc *desc;
+ struct list_head list;
+ struct wake_lock wake_lock;
+ char wlname[64];
+ struct work_struct work;
+ spinlock_t restart_lock;
+ bool restarting;
+
+ void *notify;
+
+ struct mutex shutdown_lock;
+ struct mutex powerup_lock;
+
+ void *restart_order;
+};
+
static int enable_ramdumps;
+module_param(enable_ramdumps, int, S_IRUGO | S_IWUSR);
+
struct workqueue_struct *ssr_wq;
static LIST_HEAD(restart_log_list);
static LIST_HEAD(subsystem_list);
-static DEFINE_SPINLOCK(subsystem_list_lock);
+static DEFINE_MUTEX(subsystem_list_lock);
static DEFINE_MUTEX(soc_order_reg_lock);
static DEFINE_MUTEX(restart_log_mutex);
@@ -122,10 +133,7 @@
static struct subsys_soc_restart_order **restart_orders;
static int n_restart_orders;
-module_param(enable_ramdumps, int, S_IRUGO | S_IWUSR);
-
-static struct subsys_soc_restart_order *_update_restart_order(
- struct subsys_data *subsys);
+static int restart_level = RESET_SOC;
int get_restart_level()
{
@@ -148,18 +156,14 @@
return ret;
switch (restart_level) {
-
case RESET_SOC:
case RESET_SUBSYS_COUPLED:
case RESET_SUBSYS_INDEPENDENT:
pr_info("Phase %d behavior activated.\n", restart_level);
- break;
-
+ break;
default:
restart_level = old_val;
return -EINVAL;
- break;
-
}
return 0;
}
@@ -167,62 +171,29 @@
module_param_call(restart_level, restart_level_set, param_get_int,
&restart_level, 0644);
-static struct subsys_data *_find_subsystem(const char *subsys_name)
-{
- struct subsys_data *subsys;
- unsigned long flags;
-
- spin_lock_irqsave(&subsystem_list_lock, flags);
- list_for_each_entry(subsys, &subsystem_list, list)
- if (!strncmp(subsys->name, subsys_name,
- SUBSYS_NAME_MAX_LENGTH)) {
- spin_unlock_irqrestore(&subsystem_list_lock, flags);
- return subsys;
- }
- spin_unlock_irqrestore(&subsystem_list_lock, flags);
-
- return NULL;
-}
-
-static struct subsys_soc_restart_order *_update_restart_order(
- struct subsys_data *subsys)
+static struct subsys_soc_restart_order *
+update_restart_order(struct subsys_device *dev)
{
int i, j;
-
- if (!subsys)
- return NULL;
-
- if (!subsys->name)
- return NULL;
+ struct subsys_soc_restart_order *order;
+ const char *name = dev->desc->name;
+ int len = SUBSYS_NAME_MAX_LENGTH;
mutex_lock(&soc_order_reg_lock);
for (j = 0; j < n_restart_orders; j++) {
- for (i = 0; i < restart_orders[j]->count; i++)
- if (!strncmp(restart_orders[j]->subsystem_list[i],
- subsys->name, SUBSYS_NAME_MAX_LENGTH)) {
-
- restart_orders[j]->subsys_ptrs[i] =
- subsys;
- mutex_unlock(&soc_order_reg_lock);
- return restart_orders[j];
+ order = restart_orders[j];
+ for (i = 0; i < order->count; i++) {
+ if (!strncmp(order->subsystem_list[i], name, len)) {
+ order->subsys_ptrs[i] = dev;
+ goto found;
}
+ }
}
-
+ order = NULL;
+found:
mutex_unlock(&soc_order_reg_lock);
- return NULL;
-}
-
-static void _send_notification_to_order(struct subsys_data
- **restart_list, int count,
- enum subsys_notif_type notif_type)
-{
- int i;
-
- for (i = 0; i < count; i++)
- if (restart_list[i])
- subsys_notif_queue_notification(
- restart_list[i]->notif_handle, notif_type);
+ return order;
}
static int max_restarts;
@@ -231,7 +202,7 @@
static long max_history_time = 3600;
module_param(max_history_time, long, 0644);
-static void do_epoch_check(struct subsys_data *subsys)
+static void do_epoch_check(struct subsys_device *dev)
{
int n = 0;
struct timeval *time_first = NULL, *curr_time;
@@ -251,7 +222,7 @@
r_log = kmalloc(sizeof(struct restart_log), GFP_KERNEL);
if (!r_log)
goto out;
- r_log->subsys = subsys;
+ r_log->dev = dev;
do_gettimeofday(&r_log->time);
curr_time = &r_log->time;
INIT_LIST_HEAD(&r_log->list);
@@ -289,42 +260,94 @@
mutex_unlock(&restart_log_mutex);
}
+static void for_each_subsys_device(struct subsys_device **list, unsigned count,
+ void *data, void (*fn)(struct subsys_device *, void *))
+{
+ while (count--) {
+ struct subsys_device *dev = *list++;
+ if (!dev)
+ continue;
+ fn(dev, data);
+ }
+}
+
+static void __send_notification_to_order(struct subsys_device *dev, void *data)
+{
+ enum subsys_notif_type type = (enum subsys_notif_type)data;
+
+ subsys_notif_queue_notification(dev->notify, type);
+}
+
+static void send_notification_to_order(struct subsys_device **l, unsigned n,
+ enum subsys_notif_type t)
+{
+ for_each_subsys_device(l, n, (void *)t, __send_notification_to_order);
+}
+
+static void subsystem_shutdown(struct subsys_device *dev, void *data)
+{
+ const char *name = dev->desc->name;
+
+ pr_info("[%p]: Shutting down %s\n", current, name);
+ if (dev->desc->shutdown(dev->desc) < 0)
+ panic("subsys-restart: [%p]: Failed to shutdown %s!",
+ current, name);
+}
+
+static void subsystem_ramdump(struct subsys_device *dev, void *data)
+{
+ const char *name = dev->desc->name;
+
+ if (dev->desc->ramdump)
+ if (dev->desc->ramdump(enable_ramdumps, dev->desc) < 0)
+ pr_warn("%s[%p]: Ramdump failed.\n", name, current);
+}
+
+static void subsystem_powerup(struct subsys_device *dev, void *data)
+{
+ const char *name = dev->desc->name;
+
+ pr_info("[%p]: Powering up %s\n", current, name);
+ if (dev->desc->powerup(dev->desc) < 0)
+ panic("[%p]: Failed to powerup %s!", current, name);
+}
+
static void subsystem_restart_wq_func(struct work_struct *work)
{
- struct restart_wq_data *r_work = container_of(work,
- struct restart_wq_data, work);
- struct subsys_data **restart_list;
- struct subsys_data *subsys = r_work->subsys;
+ struct subsys_device *dev = container_of(work,
+ struct subsys_device, work);
+ struct subsys_device **list;
+ struct subsys_desc *desc = dev->desc;
struct subsys_soc_restart_order *soc_restart_order = NULL;
-
struct mutex *powerup_lock;
struct mutex *shutdown_lock;
+ unsigned count;
+ unsigned long flags;
- int i;
- int restart_list_count = 0;
+ if (restart_level != RESET_SUBSYS_INDEPENDENT)
+ soc_restart_order = dev->restart_order;
- if (r_work->use_restart_order)
- soc_restart_order = subsys->restart_order;
-
- /* It's OK to not take the registration lock at this point.
+ /*
+ * It's OK to not take the registration lock at this point.
* This is because the subsystem list inside the relevant
* restart order is not being traversed.
*/
if (!soc_restart_order) {
- restart_list = subsys->single_restart_list;
- restart_list_count = 1;
- powerup_lock = &subsys->powerup_lock;
- shutdown_lock = &subsys->shutdown_lock;
+ list = &dev;
+ count = 1;
+ powerup_lock = &dev->powerup_lock;
+ shutdown_lock = &dev->shutdown_lock;
} else {
- restart_list = soc_restart_order->subsys_ptrs;
- restart_list_count = soc_restart_order->count;
+ list = soc_restart_order->subsys_ptrs;
+ count = soc_restart_order->count;
powerup_lock = &soc_restart_order->powerup_lock;
shutdown_lock = &soc_restart_order->shutdown_lock;
}
pr_debug("[%p]: Attempting to get shutdown lock!\n", current);
- /* Try to acquire shutdown_lock. If this fails, these subsystems are
+ /*
+ * Try to acquire shutdown_lock. If this fails, these subsystems are
* already being restarted - return.
*/
if (!mutex_trylock(shutdown_lock))
@@ -332,7 +355,8 @@
pr_debug("[%p]: Attempting to get powerup lock!\n", current);
- /* Now that we've acquired the shutdown lock, either we're the first to
+ /*
+ * Now that we've acquired the shutdown lock, either we're the first to
* restart these subsystems or some other thread is doing the powerup
* sequence for these subsystems. In the latter case, panic and bail
* out, since a subsystem died in its powerup sequence.
@@ -341,38 +365,23 @@
panic("%s[%p]: Subsystem died during powerup!",
__func__, current);
- do_epoch_check(subsys);
+ do_epoch_check(dev);
- /* Now it is necessary to take the registration lock. This is because
- * the subsystem list in the SoC restart order will be traversed
- * and it shouldn't be changed until _this_ restart sequence completes.
+ /*
+ * It's necessary to take the registration lock because the subsystem
+ * list in the SoC restart order will be traversed and it shouldn't be
+ * changed until _this_ restart sequence completes.
*/
mutex_lock(&soc_order_reg_lock);
pr_debug("[%p]: Starting restart sequence for %s\n", current,
- r_work->subsys->name);
+ desc->name);
+ send_notification_to_order(list, count, SUBSYS_BEFORE_SHUTDOWN);
+ for_each_subsys_device(list, count, NULL, subsystem_shutdown);
+ send_notification_to_order(list, count, SUBSYS_AFTER_SHUTDOWN);
- _send_notification_to_order(restart_list,
- restart_list_count,
- SUBSYS_BEFORE_SHUTDOWN);
-
- for (i = 0; i < restart_list_count; i++) {
-
- if (!restart_list[i])
- continue;
-
- pr_info("[%p]: Shutting down %s\n", current,
- restart_list[i]->name);
-
- if (restart_list[i]->shutdown(subsys) < 0)
- panic("subsys-restart: %s[%p]: Failed to shutdown %s!",
- __func__, current, restart_list[i]->name);
- }
-
- _send_notification_to_order(restart_list, restart_list_count,
- SUBSYS_AFTER_SHUTDOWN);
-
- /* Now that we've finished shutting down these subsystems, release the
+ /*
+ * Now that we've finished shutting down these subsystems, release the
* shutdown lock. If a subsystem restart request comes in for a
* subsystem in _this_ restart order after the unlock below, and
* before the powerup lock is released, panic and bail out.
@@ -380,40 +389,14 @@
mutex_unlock(shutdown_lock);
/* Collect ram dumps for all subsystems in order here */
- for (i = 0; i < restart_list_count; i++) {
- if (!restart_list[i])
- continue;
+ for_each_subsys_device(list, count, NULL, subsystem_ramdump);
- if (restart_list[i]->ramdump)
- if (restart_list[i]->ramdump(enable_ramdumps,
- subsys) < 0)
- pr_warn("%s[%p]: Ramdump failed.\n",
- restart_list[i]->name, current);
- }
-
- _send_notification_to_order(restart_list,
- restart_list_count,
- SUBSYS_BEFORE_POWERUP);
-
- for (i = restart_list_count - 1; i >= 0; i--) {
-
- if (!restart_list[i])
- continue;
-
- pr_info("[%p]: Powering up %s\n", current,
- restart_list[i]->name);
-
- if (restart_list[i]->powerup(subsys) < 0)
- panic("%s[%p]: Failed to powerup %s!", __func__,
- current, restart_list[i]->name);
- }
-
- _send_notification_to_order(restart_list,
- restart_list_count,
- SUBSYS_AFTER_POWERUP);
+ send_notification_to_order(list, count, SUBSYS_BEFORE_POWERUP);
+ for_each_subsys_device(list, count, NULL, subsystem_powerup);
+ send_notification_to_order(list, count, SUBSYS_AFTER_POWERUP);
pr_info("[%p]: Restart sequence for %s completed.\n",
- current, r_work->subsys->name);
+ current, desc->name);
mutex_unlock(powerup_lock);
@@ -422,123 +405,119 @@
pr_debug("[%p]: Released powerup lock!\n", current);
out:
- wake_unlock(&r_work->ssr_wake_lock);
- wake_lock_destroy(&r_work->ssr_wake_lock);
- kfree(r_work);
+ spin_lock_irqsave(&dev->restart_lock, flags);
+ wake_unlock(&dev->wake_lock);
+ dev->restarting = false;
+ spin_unlock_irqrestore(&dev->restart_lock, flags);
}
-static void __subsystem_restart(struct subsys_data *subsys)
+static void __subsystem_restart_dev(struct subsys_device *dev)
{
- struct restart_wq_data *data = NULL;
- int rc;
+ struct subsys_desc *desc = dev->desc;
+ unsigned long flags;
- pr_debug("Restarting %s [level=%d]!\n", subsys->name,
+ spin_lock_irqsave(&dev->restart_lock, flags);
+ if (!dev->restarting) {
+ pr_debug("Restarting %s [level=%d]!\n", desc->name,
restart_level);
- data = kzalloc(sizeof(struct restart_wq_data), GFP_ATOMIC);
- if (!data)
- panic("%s: Unable to allocate memory to restart %s.",
- __func__, subsys->name);
-
- data->subsys = subsys;
-
- if (restart_level != RESET_SUBSYS_INDEPENDENT)
- data->use_restart_order = 1;
-
- snprintf(data->wlname, sizeof(data->wlname), "ssr(%s)", subsys->name);
- wake_lock_init(&data->ssr_wake_lock, WAKE_LOCK_SUSPEND, data->wlname);
- wake_lock(&data->ssr_wake_lock);
-
- INIT_WORK(&data->work, subsystem_restart_wq_func);
- rc = queue_work(ssr_wq, &data->work);
- if (rc < 0)
- panic("%s: Unable to schedule work to restart %s (%d).",
- __func__, subsys->name, rc);
+ dev->restarting = true;
+ wake_lock(&dev->wake_lock);
+ queue_work(ssr_wq, &dev->work);
+ }
+ spin_unlock_irqrestore(&dev->restart_lock, flags);
}
-int subsystem_restart(const char *subsys_name)
+int subsystem_restart_dev(struct subsys_device *dev)
{
- struct subsys_data *subsys;
-
- if (!subsys_name) {
- pr_err("Invalid subsystem name.\n");
- return -EINVAL;
- }
+ const char *name = dev->desc->name;
pr_info("Restart sequence requested for %s, restart_level = %d.\n",
- subsys_name, restart_level);
-
- /* List of subsystems is protected by a lock. New subsystems can
- * still come in.
- */
- subsys = _find_subsystem(subsys_name);
-
- if (!subsys) {
- pr_warn("Unregistered subsystem %s!\n", subsys_name);
- return -EINVAL;
- }
+ name, restart_level);
switch (restart_level) {
case RESET_SUBSYS_COUPLED:
case RESET_SUBSYS_INDEPENDENT:
- __subsystem_restart(subsys);
+ __subsystem_restart_dev(dev);
break;
-
case RESET_SOC:
- panic("subsys-restart: Resetting the SoC - %s crashed.",
- subsys->name);
+ panic("subsys-restart: Resetting the SoC - %s crashed.", name);
break;
-
default:
panic("subsys-restart: Unknown restart level!\n");
- break;
-
+ break;
}
return 0;
}
+EXPORT_SYMBOL(subsystem_restart_dev);
+
+int subsystem_restart(const char *name)
+{
+ struct subsys_device *dev;
+
+ mutex_lock(&subsystem_list_lock);
+ list_for_each_entry(dev, &subsystem_list, list)
+ if (!strncmp(dev->desc->name, name, SUBSYS_NAME_MAX_LENGTH))
+ goto found;
+ dev = NULL;
+found:
+ mutex_unlock(&subsystem_list_lock);
+ if (dev)
+ return subsystem_restart_dev(dev);
+ return -ENODEV;
+}
EXPORT_SYMBOL(subsystem_restart);
-int ssr_register_subsystem(struct subsys_data *subsys)
+struct subsys_device *subsys_register(struct subsys_desc *desc)
{
- unsigned long flags;
+ struct subsys_device *dev;
- if (!subsys)
- goto err;
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
- if (!subsys->name)
- goto err;
+ dev->desc = desc;
+ dev->notify = subsys_notif_add_subsys(desc->name);
+ dev->restart_order = update_restart_order(dev);
- if (!subsys->powerup || !subsys->shutdown)
- goto err;
+ snprintf(dev->wlname, sizeof(dev->wlname), "ssr(%s)", desc->name);
+ wake_lock_init(&dev->wake_lock, WAKE_LOCK_SUSPEND, dev->wlname);
+ INIT_WORK(&dev->work, subsystem_restart_wq_func);
+ spin_lock_init(&dev->restart_lock);
- subsys->notif_handle = subsys_notif_add_subsys(subsys->name);
- subsys->restart_order = _update_restart_order(subsys);
- subsys->single_restart_list[0] = subsys;
+ mutex_init(&dev->shutdown_lock);
+ mutex_init(&dev->powerup_lock);
- mutex_init(&subsys->shutdown_lock);
- mutex_init(&subsys->powerup_lock);
+ mutex_lock(&subsystem_list_lock);
+ list_add(&dev->list, &subsystem_list);
+ mutex_unlock(&subsystem_list_lock);
- spin_lock_irqsave(&subsystem_list_lock, flags);
- list_add(&subsys->list, &subsystem_list);
- spin_unlock_irqrestore(&subsystem_list_lock, flags);
-
- return 0;
-
-err:
- return -EINVAL;
+ return dev;
}
-EXPORT_SYMBOL(ssr_register_subsystem);
+EXPORT_SYMBOL(subsys_register);
+
+void subsys_unregister(struct subsys_device *dev)
+{
+ if (IS_ERR_OR_NULL(dev))
+ return;
+ mutex_lock(&subsystem_list_lock);
+ list_del(&dev->list);
+ mutex_unlock(&subsystem_list_lock);
+ wake_lock_destroy(&dev->wake_lock);
+ kfree(dev);
+}
+EXPORT_SYMBOL(subsys_unregister);
static int ssr_panic_handler(struct notifier_block *this,
unsigned long event, void *ptr)
{
- struct subsys_data *subsys;
+ struct subsys_device *dev;
- list_for_each_entry(subsys, &subsystem_list, list)
- if (subsys->crash_shutdown)
- subsys->crash_shutdown(subsys);
+ list_for_each_entry(dev, &subsystem_list, list)
+ if (dev->desc->crash_shutdown)
+ dev->desc->crash_shutdown(dev->desc);
return NOTIFY_DONE;
}
@@ -595,20 +574,12 @@
static int __init subsys_restart_init(void)
{
- int ret = 0;
-
- restart_level = RESET_SOC;
-
ssr_wq = alloc_workqueue("ssr_wq", 0, 0);
-
if (!ssr_wq)
panic("Couldn't allocate workqueue for subsystem restart.\n");
- ret = ssr_init_soc_restart_orders();
-
- return ret;
+ return ssr_init_soc_restart_orders();
}
-
arch_initcall(subsys_restart_init);
MODULE_DESCRIPTION("Subsystem Restart Driver");
diff --git a/arch/arm/mach-msm/wcnss-ssr-8960.c b/arch/arm/mach-msm/wcnss-ssr-8960.c
index 6e8d57c..318523b 100644
--- a/arch/arm/mach-msm/wcnss-ssr-8960.c
+++ b/arch/arm/mach-msm/wcnss-ssr-8960.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/wcnss_wlan.h>
+#include <linux/err.h>
#include <mach/irqs.h>
#include <mach/scm.h>
#include <mach/subsystem_restart.h>
@@ -37,6 +38,7 @@
static int riva_crash;
static int ss_restart_inprogress;
static int enable_riva_ssr;
+static struct subsys_device *riva_8960_dev;
static void smsm_state_cb_hdlr(void *data, uint32_t old_state,
uint32_t new_state)
@@ -83,7 +85,7 @@
}
ss_restart_inprogress = true;
- subsystem_restart("riva");
+ subsystem_restart_dev(riva_8960_dev);
}
static irqreturn_t riva_wdog_bite_irq_hdlr(int irq, void *dev_id)
@@ -100,7 +102,7 @@
panic(MODULE_NAME ": Watchdog bite received from Riva");
ss_restart_inprogress = true;
- subsystem_restart("riva");
+ subsystem_restart_dev(riva_8960_dev);
return IRQ_HANDLED;
}
@@ -126,16 +128,17 @@
}
/* Subsystem handlers */
-static int riva_shutdown(const struct subsys_data *subsys)
+static int riva_shutdown(const struct subsys_desc *subsys)
{
pil_force_shutdown("wcnss");
flush_delayed_work(&cancel_vote_work);
+ wcnss_flush_delayed_boot_votes();
disable_irq_nosync(RIVA_APSS_WDOG_BITE_RESET_RDY_IRQ);
return 0;
}
-static int riva_powerup(const struct subsys_data *subsys)
+static int riva_powerup(const struct subsys_desc *subsys)
{
struct platform_device *pdev = wcnss_get_platform_device();
struct wcnss_wlan_config *pwlanconfig = wcnss_get_wlan_config();
@@ -162,7 +165,7 @@
static struct ramdump_segment riva_segments[] = {{0x8f200000,
0x8f700000 - 0x8f200000} };
-static int riva_ramdump(int enable, const struct subsys_data *subsys)
+static int riva_ramdump(int enable, const struct subsys_desc *subsys)
{
pr_debug("%s: enable[%d]\n", MODULE_NAME, enable);
if (enable)
@@ -174,14 +177,14 @@
}
/* Riva crash handler */
-static void riva_crash_shutdown(const struct subsys_data *subsys)
+static void riva_crash_shutdown(const struct subsys_desc *subsys)
{
pr_err("%s: crash shutdown : %d\n", MODULE_NAME, riva_crash);
if (riva_crash != true)
smsm_riva_reset();
}
-static struct subsys_data riva_8960 = {
+static struct subsys_desc riva_8960 = {
.name = "riva",
.shutdown = riva_shutdown,
.powerup = riva_powerup,
@@ -208,7 +211,10 @@
static int __init riva_restart_init(void)
{
- return ssr_register_subsystem(&riva_8960);
+ riva_8960_dev = subsys_register(&riva_8960);
+ if (IS_ERR(riva_8960_dev))
+ return PTR_ERR(riva_8960_dev);
+ return 0;
}
static int __init riva_ssr_module_init(void)
@@ -253,6 +259,7 @@
static void __exit riva_ssr_module_exit(void)
{
+ subsys_unregister(riva_8960_dev);
free_irq(RIVA_APSS_WDOG_BITE_RESET_RDY_IRQ, NULL);
}
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 9107231..94b1e61 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -856,8 +856,10 @@
case 0x08000000: /* ldm or stm, or thumb-2 32bit instruction */
if (thumb2_32b)
handler = do_alignment_t32_to_handler(&instr, regs, &offset);
- else
+ else {
handler = do_alignment_ldmstm;
+ offset.un = 0;
+ }
break;
default:
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 32f61f5..b6fb52a 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -32,6 +32,7 @@
#include <asm/sizes.h>
#include <asm/tlb.h>
#include <asm/fixmap.h>
+#include <asm/cputype.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -40,6 +41,8 @@
static unsigned long phys_initrd_start __initdata = 0;
static unsigned long phys_initrd_size __initdata = 0;
+int msm_krait_need_wfe_fixup;
+EXPORT_SYMBOL(msm_krait_need_wfe_fixup);
static int __init early_initrd(char *p)
{
@@ -916,3 +919,17 @@
__setup("keepinitrd", keepinitrd_setup);
#endif
+
+#ifdef CONFIG_MSM_KRAIT_WFE_FIXUP
+static int __init msm_krait_wfe_init(void)
+{
+ unsigned int val, midr;
+ midr = read_cpuid_id() & 0xffffff00;
+ if ((midr == 0x511f0400) || (midr == 0x510f0600)) {
+ asm volatile("mrc p15, 7, %0, c15, c0, 5" : "=r" (val));
+ msm_krait_need_wfe_fixup = (val & 0x10000) ? 1 : 0;
+ }
+ return 0;
+}
+pure_initcall(msm_krait_wfe_init);
+#endif
diff --git a/block/test-iosched.c b/block/test-iosched.c
index ce9527f..0a033dc 100644
--- a/block/test-iosched.c
+++ b/block/test-iosched.c
@@ -173,6 +173,9 @@
bio->bi_size = nr_sects << 9;
bio->bi_sector = start_sec;
break;
+ case REQ_UNIQUE_SANITIZE:
+ bio->bi_rw = REQ_WRITE | REQ_SANITIZE;
+ break;
default:
test_pr_err("%s: Invalid request type %d", __func__,
req_unique);
diff --git a/drivers/base/genlock.c b/drivers/base/genlock.c
index aed71c0..f54d820 100644
--- a/drivers/base/genlock.c
+++ b/drivers/base/genlock.c
@@ -116,6 +116,7 @@
struct genlock *genlock_create_lock(struct genlock_handle *handle)
{
struct genlock *lock;
+ void *ret;
if (IS_ERR_OR_NULL(handle)) {
GENLOCK_LOG_ERR("Invalid handle\n");
@@ -145,8 +146,13 @@
* other processes
*/
- lock->file = anon_inode_getfile("genlock", &genlock_fops,
- lock, O_RDWR);
+ ret = anon_inode_getfile("genlock", &genlock_fops, lock, O_RDWR);
+ if (IS_ERR_OR_NULL(ret)) {
+ GENLOCK_LOG_ERR("Unable to create lock inode\n");
+ kfree(lock);
+ return ret;
+ }
+ lock->file = ret;
/* Attach the new lock to the handle */
handle->lock = lock;
@@ -660,12 +666,19 @@
struct genlock_handle *genlock_get_handle(void)
{
+ void *ret;
struct genlock_handle *handle = _genlock_get_handle();
if (IS_ERR(handle))
return handle;
- handle->file = anon_inode_getfile("genlock-handle",
+ ret = anon_inode_getfile("genlock-handle",
&genlock_handle_fops, handle, O_RDWR);
+ if (IS_ERR_OR_NULL(ret)) {
+ GENLOCK_LOG_ERR("Unable to create handle inode\n");
+ kfree(handle);
+ return ret;
+ }
+ handle->file = ret;
return handle;
}
diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c
index 4093935..2bb82ce 100644
--- a/drivers/bluetooth/hci_ath.c
+++ b/drivers/bluetooth/hci_ath.c
@@ -5,6 +5,7 @@
* power management protocol extension to H4 to support AR300x Bluetooth Chip.
*
* Copyright (c) 2009-2010 Atheros Communications Inc.
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
*
* Acknowledgements:
* This file is based on hci_h4.c, which was written
@@ -35,12 +36,53 @@
#include <linux/errno.h>
#include <linux/ioctl.h>
#include <linux/skbuff.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include "hci_uart.h"
+unsigned int enableuartsleep;
+module_param(enableuartsleep, uint, 0644);
+/*
+ * Global variables
+ */
+/** Global state flags */
+static unsigned long flags;
+
+/** Tasklet to respond to change in hostwake line */
+static struct tasklet_struct hostwake_task;
+
+/** Transmission timer */
+static void bluesleep_tx_timer_expire(unsigned long data);
+static DEFINE_TIMER(tx_timer, bluesleep_tx_timer_expire, 0, 0);
+
+/** Lock for state transitions */
+static spinlock_t rw_lock;
+
+#define POLARITY_LOW 0
+#define POLARITY_HIGH 1
+
+struct bluesleep_info {
+ unsigned host_wake; /* wake up host */
+ unsigned ext_wake; /* wake up device */
+ unsigned host_wake_irq;
+ int irq_polarity;
+};
+
+/* 1 second timeout */
+#define TX_TIMER_INTERVAL 1
+
+/* state variable names and bit positions */
+#define BT_TXEXPIRED 0x01
+#define BT_SLEEPENABLE 0x02
+#define BT_SLEEPCMD 0x03
+
+/* global pointer to a single hci device. */
+static struct bluesleep_info *bsi;
+
struct ath_struct {
struct hci_uart *hu;
unsigned int cur_sleep;
@@ -49,35 +91,30 @@
struct work_struct ctxtsw;
};
+static void hostwake_interrupt(unsigned long data)
+{
+ printk(KERN_INFO " wakeup host\n");
+}
+
+static void modify_timer_task(void)
+{
+ spin_lock(&rw_lock);
+ mod_timer(&tx_timer, jiffies + (TX_TIMER_INTERVAL * HZ));
+ clear_bit(BT_TXEXPIRED, &flags);
+ spin_unlock(&rw_lock);
+
+}
+
static int ath_wakeup_ar3k(struct tty_struct *tty)
{
- struct ktermios ktermios;
- int status = tty->driver->ops->tiocmget(tty);
-
- if (status & TIOCM_CTS)
- return status;
-
- /* Disable Automatic RTSCTS */
- memcpy(&ktermios, tty->termios, sizeof(ktermios));
- ktermios.c_cflag &= ~CRTSCTS;
- tty_set_termios(tty, &ktermios);
-
- /* Clear RTS first */
- status = tty->driver->ops->tiocmget(tty);
- tty->driver->ops->tiocmset(tty, 0x00, TIOCM_RTS);
- mdelay(20);
-
- /* Set RTS, wake up board */
- status = tty->driver->ops->tiocmget(tty);
- tty->driver->ops->tiocmset(tty, TIOCM_RTS, 0x00);
- mdelay(20);
-
- status = tty->driver->ops->tiocmget(tty);
-
- /* Disable Automatic RTSCTS */
- ktermios.c_cflag |= CRTSCTS;
- status = tty_set_termios(tty, &ktermios);
-
+ int status = 0;
+ if (test_bit(BT_TXEXPIRED, &flags)) {
+ printk(KERN_INFO "wakeup device\n");
+ gpio_set_value(bsi->ext_wake, 1);
+ msleep(20);
+ gpio_set_value(bsi->ext_wake, 0);
+ }
+ modify_timer_task();
return status;
}
@@ -94,12 +131,8 @@
tty = hu->tty;
/* verify and wake up controller */
- if (ath->cur_sleep) {
+ if (test_bit(BT_SLEEPENABLE, &flags))
status = ath_wakeup_ar3k(tty);
- if (!(status & TIOCM_CTS))
- return;
- }
-
/* Ready to send Data */
clear_bit(HCI_UART_SENDING, &hu->tx_state);
hci_uart_tx_wakeup(hu);
@@ -121,6 +154,11 @@
hu->priv = ath;
ath->hu = hu;
+ ath->cur_sleep = enableuartsleep;
+ if (ath->cur_sleep == 1) {
+ set_bit(BT_SLEEPENABLE, &flags);
+ modify_timer_task();
+ }
INIT_WORK(&ath->ctxtsw, ath_hci_uart_work);
return 0;
@@ -173,9 +211,10 @@
*/
if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
struct hci_command_hdr *hdr = (void *)skb->data;
-
- if (__le16_to_cpu(hdr->opcode) == HCI_OP_ATH_SLEEP)
+ if (__le16_to_cpu(hdr->opcode) == HCI_OP_ATH_SLEEP) {
+ set_bit(BT_SLEEPCMD, &flags);
ath->cur_sleep = skb->data[HCI_COMMAND_HDR_SIZE];
+ }
}
BT_DBG("hu %p skb %p", hu, skb);
@@ -201,17 +240,57 @@
/* Recv data */
static int ath_recv(struct hci_uart *hu, void *data, int count)
{
- int ret;
+ struct ath_struct *ath = hu->priv;
+ unsigned int type;
- ret = hci_recv_stream_fragment(hu->hdev, data, count);
- if (ret < 0) {
+ if (hci_recv_stream_fragment(hu->hdev, data, count) < 0)
BT_ERR("Frame Reassembly Failed");
- return ret;
- }
+ if (count & test_bit(BT_SLEEPCMD, &flags)) {
+ struct sk_buff *skb = hu->hdev->reassembly[0];
+
+ if (!skb) {
+ struct { char type; } *pkt;
+
+ /* Start of the frame */
+ pkt = data;
+ type = pkt->type;
+ } else
+ type = bt_cb(skb)->pkt_type;
+
+ if (type == HCI_EVENT_PKT) {
+ clear_bit(BT_SLEEPCMD, &flags);
+ printk(KERN_INFO "cur_sleep:%d\n", ath->cur_sleep);
+ if (ath->cur_sleep == 1)
+ set_bit(BT_SLEEPENABLE, &flags);
+ else
+ clear_bit(BT_SLEEPENABLE, &flags);
+ }
+ if (test_bit(BT_SLEEPENABLE, &flags))
+ modify_timer_task();
+ }
return count;
}
+static void bluesleep_tx_timer_expire(unsigned long data)
+{
+ unsigned long irq_flags;
+
+ if (!test_bit(BT_SLEEPENABLE, &flags))
+ return;
+ BT_DBG("Tx timer expired");
+ printk(KERN_INFO "Tx timer expired\n");
+
+ set_bit(BT_TXEXPIRED, &flags);
+}
+
+static irqreturn_t bluesleep_hostwake_isr(int irq, void *dev_id)
+{
+ /* schedule a tasklet to handle the change in the host wake line */
+ tasklet_schedule(&hostwake_task);
+ return IRQ_HANDLED;
+}
+
static struct hci_uart_proto athp = {
.id = HCI_UART_ATH3K,
.open = ath_open,
@@ -222,19 +301,159 @@
.flush = ath_flush,
};
+static int __init bluesleep_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct resource *res;
+
+ bsi = kzalloc(sizeof(struct bluesleep_info), GFP_KERNEL);
+ if (!bsi) {
+ ret = -ENOMEM;
+ goto failed;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_IO,
+ "gpio_host_wake");
+ if (!res) {
+ BT_ERR("couldn't find host_wake gpio\n");
+ ret = -ENODEV;
+ goto free_bsi;
+ }
+ bsi->host_wake = res->start;
+
+ ret = gpio_request(bsi->host_wake, "bt_host_wake");
+ if (ret)
+ goto free_bsi;
+
+ /* configure host_wake as input */
+ ret = gpio_direction_input(bsi->host_wake);
+ if (ret < 0) {
+ pr_err("%s: gpio_direction_input failed for GPIO %d, error %d\n",
+ __func__, bsi->host_wake, ret);
+ gpio_free(bsi->host_wake);
+ goto free_bsi;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_IO,
+ "gpio_ext_wake");
+ if (!res) {
+ BT_ERR("couldn't find ext_wake gpio\n");
+ ret = -ENODEV;
+ goto free_bt_host_wake;
+ }
+ bsi->ext_wake = res->start;
+
+ ret = gpio_request(bsi->ext_wake, "bt_ext_wake");
+ if (ret)
+ goto free_bt_host_wake;
+
+ /* configure ext_wake as output mode*/
+ ret = gpio_direction_output(bsi->ext_wake, 1);
+ if (ret < 0) {
+ pr_err("%s: gpio_direction_output failed for GPIO %d, error %d\n",
+ __func__, bsi->ext_wake, ret);
+ gpio_free(bsi->ext_wake);
+ goto free_bt_host_wake;
+ }
+ gpio_set_value(bsi->ext_wake, 0);
+
+ bsi->host_wake_irq = platform_get_irq_byname(pdev, "host_wake");
+ if (bsi->host_wake_irq < 0) {
+ BT_ERR("couldn't find host_wake irq\n");
+ ret = -ENODEV;
+ goto free_bt_ext_wake;
+ }
+
+ bsi->irq_polarity = POLARITY_LOW; /* low edge (falling edge) */
+
+ /* Initialize spinlock. */
+ spin_lock_init(&rw_lock);
+
+ /* Initialize timer */
+ init_timer(&tx_timer);
+ tx_timer.function = bluesleep_tx_timer_expire;
+ tx_timer.data = 0;
+
+ /* initialize host wake tasklet */
+ tasklet_init(&hostwake_task, hostwake_interrupt, 0);
+
+ if (bsi->irq_polarity == POLARITY_LOW) {
+ ret = request_irq(bsi->host_wake_irq, bluesleep_hostwake_isr,
+ IRQF_DISABLED | IRQF_TRIGGER_FALLING,
+ "bluetooth hostwake", NULL);
+ } else {
+ ret = request_irq(bsi->host_wake_irq, bluesleep_hostwake_isr,
+ IRQF_DISABLED | IRQF_TRIGGER_RISING,
+ "bluetooth hostwake", NULL);
+ }
+ if (ret < 0) {
+ BT_ERR("Couldn't acquire BT_HOST_WAKE IRQ");
+ goto free_bt_timer;
+ }
+
+ ret = enable_irq_wake(bsi->host_wake_irq);
+ if (ret < 0) {
+ BT_ERR("Couldn't enable BT_HOST_WAKE as wakeup interrupt");
+ free_irq(bsi->host_wake_irq, NULL);
+ goto free_bt_timer;
+ }
+
+ return 0;
+
+free_bt_timer:
+ del_timer(&tx_timer);
+free_bt_ext_wake:
+ gpio_free(bsi->ext_wake);
+free_bt_host_wake:
+ gpio_free(bsi->host_wake);
+free_bsi:
+ kfree(bsi);
+failed:
+ return ret;
+}
+
+static int bluesleep_remove(struct platform_device *pdev)
+{
+ /* assert bt wake */
+ gpio_set_value(bsi->ext_wake, 0);
+ if (disable_irq_wake(bsi->host_wake_irq))
+ BT_ERR("Couldn't disable hostwake IRQ wakeup mode\n");
+ free_irq(bsi->host_wake_irq, NULL);
+ del_timer_sync(&tx_timer);
+ gpio_free(bsi->host_wake);
+ gpio_free(bsi->ext_wake);
+ kfree(bsi);
+ return 0;
+}
+
+static struct platform_driver bluesleep_driver = {
+ .remove = bluesleep_remove,
+ .driver = {
+ .name = "bluesleep",
+ .owner = THIS_MODULE,
+ },
+};
+
int __init ath_init(void)
{
- int err = hci_uart_register_proto(&athp);
+ int ret;
- if (!err)
+ ret = hci_uart_register_proto(&athp);
+
+ if (!ret)
BT_INFO("HCIATH3K protocol initialized");
- else
+ else {
BT_ERR("HCIATH3K protocol registration failed");
-
- return err;
+ return ret;
+ }
+ ret = platform_driver_probe(&bluesleep_driver, bluesleep_probe);
+ if (ret)
+ return ret;
+ return 0;
}
int __exit ath_deinit(void)
{
+ platform_driver_unregister(&bluesleep_driver);
return hci_uart_unregister_proto(&athp);
}
diff --git a/drivers/bluetooth/hci_ibs.c b/drivers/bluetooth/hci_ibs.c
index 6845020..fb084f5 100644
--- a/drivers/bluetooth/hci_ibs.c
+++ b/drivers/bluetooth/hci_ibs.c
@@ -37,9 +37,7 @@
#include <linux/fcntl.h>
#include <linux/interrupt.h>
#include <linux/ptrace.h>
-#include <linux/ftrace.h>
#include <linux/poll.h>
-#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/tty.h>
@@ -94,15 +92,6 @@
HCI_IBS_RX_VOTE_CLOCK_OFF,
};
-/* HCI_IBS state for the WorkQueue */
-enum hci_ibs_wq_state_e {
- HCI_IBS_WQ_INIT_STATE = 0,
- HCI_IBS_WQ_TX_VOTE_OFF,
- HCI_IBS_WQ_RX_VOTE_OFF,
- HCI_IBS_WQ_AWAKE_RX,
- HCI_IBS_WQ_AWAKE_DEVICE,
-};
-
static unsigned long wake_retrans = 1;
static unsigned long tx_idle_delay = (HZ * 2);
@@ -123,11 +112,6 @@
unsigned long rx_vote; /* clock must be on for RX */
struct timer_list tx_idle_timer;
struct timer_list wake_retrans_timer;
- struct workqueue_struct *workqueue;
- struct work_struct ws_ibs;
- unsigned long ibs_wq_state;
- void *ibs_hu; /* keeps the hci_uart pointer for reference */
-
/* debug */
unsigned long ibs_sent_wacks;
unsigned long ibs_sent_slps;
@@ -258,56 +242,6 @@
return err;
}
-static void ibs_wq(struct work_struct *work)
-{
- unsigned long flags = 0;
- struct ibs_struct *ibs = container_of(work, struct ibs_struct,
- ws_ibs);
- struct hci_uart *hu = (struct hci_uart *)ibs->ibs_hu;
-
- BT_DBG("hu %p, ibs_wq state: %lu\n", hu, ibs->ibs_wq_state);
-
- /* lock hci_ibs state */
- spin_lock_irqsave(&ibs->hci_ibs_lock, flags);
-
- switch (ibs->ibs_wq_state) {
- case HCI_IBS_WQ_AWAKE_DEVICE:
- /* Vote for serial clock */
- ibs_msm_serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
-
- /* send wake indication to device */
- if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0)
- BT_ERR("cannot send WAKE to device");
-
- ibs->ibs_sent_wakes++; /* debug */
-
- /* start retransmit timer */
- mod_timer(&ibs->wake_retrans_timer, jiffies + wake_retrans);
- break;
- case HCI_IBS_WQ_AWAKE_RX:
- ibs_msm_serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
- ibs->rx_ibs_state = HCI_IBS_RX_AWAKE;
-
- if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0)
- BT_ERR("cannot acknowledge device wake up");
-
- ibs->ibs_sent_wacks++; /* debug */
- /* actually send the packets */
- hci_uart_tx_wakeup(hu);
- break;
- case HCI_IBS_WQ_RX_VOTE_OFF:
- ibs_msm_serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_OFF, hu);
- break;
- case HCI_IBS_WQ_TX_VOTE_OFF:
- ibs_msm_serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu);
- break;
- default:
- BT_DBG("Invalid state in ibs workqueue");
- break;
- }
- spin_unlock_irqrestore(&ibs->hci_ibs_lock, flags);
-}
-
static void hci_ibs_tx_idle_timeout(unsigned long arg)
{
struct hci_uart *hu = (struct hci_uart *) arg;
@@ -348,9 +282,7 @@
spin_lock_irqsave_nested(&ibs->hci_ibs_lock,
flags, SINGLE_DEPTH_NESTING);
- /* vote off tx clock */
- ibs->ibs_wq_state = HCI_IBS_WQ_TX_VOTE_OFF;
- queue_work(ibs->workqueue, &ibs->ws_ibs);
+ ibs_msm_serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu);
out:
spin_unlock_irqrestore(&ibs->hci_ibs_lock, flags);
}
@@ -404,16 +336,6 @@
skb_queue_head_init(&ibs->txq);
skb_queue_head_init(&ibs->tx_wait_q);
spin_lock_init(&ibs->hci_ibs_lock);
- ibs->workqueue = create_singlethread_workqueue("ibs_wq");
- if (!ibs->workqueue) {
- BT_ERR("IBS Workqueue not initialized properly");
- kfree(ibs);
- return -ENOMEM;
- }
-
- INIT_WORK(&ibs->ws_ibs, ibs_wq);
- ibs->ibs_hu = (void *)hu;
- ibs->ibs_wq_state = HCI_IBS_WQ_INIT_STATE;
/* Assume we start with both sides asleep -- extra wakes OK */
ibs->tx_ibs_state = HCI_IBS_TX_ASLEEP;
@@ -510,8 +432,6 @@
skb_queue_purge(&ibs->txq);
del_timer(&ibs->tx_idle_timer);
del_timer(&ibs->wake_retrans_timer);
- destroy_workqueue(ibs->workqueue);
- ibs->ibs_hu = NULL;
kfree_skb(ibs->rx_skb);
@@ -543,11 +463,9 @@
/* Make sure clock is on - we may have turned clock off since
* receiving the wake up indicator
*/
- /* awake rx clock */
- ibs->ibs_wq_state = HCI_IBS_WQ_AWAKE_RX;
- queue_work(ibs->workqueue, &ibs->ws_ibs);
- spin_unlock_irqrestore(&ibs->hci_ibs_lock, flags);
- return;
+ ibs_msm_serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
+ ibs->rx_ibs_state = HCI_IBS_RX_AWAKE;
+ /* deliberate fall-through */
case HCI_IBS_RX_AWAKE:
/* Always acknowledge device wake up,
* sending IBS message doesn't count as TX ON.
@@ -592,9 +510,7 @@
case HCI_IBS_RX_AWAKE:
/* update state */
ibs->rx_ibs_state = HCI_IBS_RX_ASLEEP;
- /* vote off rx clock under workqueue */
- ibs->ibs_wq_state = HCI_IBS_WQ_RX_VOTE_OFF;
- queue_work(ibs->workqueue, &ibs->ws_ibs);
+ ibs_msm_serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_OFF, hu);
break;
case HCI_IBS_RX_ASLEEP:
/* deliberate fall-through */
@@ -679,12 +595,20 @@
case HCI_IBS_TX_ASLEEP:
BT_DBG("device asleep, waking up and queueing packet");
+ ibs_msm_serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
/* save packet for later */
skb_queue_tail(&ibs->tx_wait_q, skb);
+ /* awake device */
+ if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) {
+ BT_ERR("cannot send WAKE to device");
+ break;
+ }
+ ibs->ibs_sent_wakes++; /* debug */
+
+ /* start retransmit timer */
+ mod_timer(&ibs->wake_retrans_timer, jiffies + wake_retrans);
+
ibs->tx_ibs_state = HCI_IBS_TX_WAKING;
- /* schedule a work queue to wake up device */
- ibs->ibs_wq_state = HCI_IBS_WQ_AWAKE_DEVICE;
- queue_work(ibs->workqueue, &ibs->ws_ibs);
break;
case HCI_IBS_TX_WAKING:
diff --git a/drivers/char/diag/diag_dci.h b/drivers/char/diag/diag_dci.h
index c0b82df..b70efe3 100644
--- a/drivers/char/diag/diag_dci.h
+++ b/drivers/char/diag/diag_dci.h
@@ -28,11 +28,6 @@
int signal_type;
};
-#define DIAG_CON_APSS (0x0001) /* Bit mask for APSS */
-#define DIAG_CON_MPSS (0x0002) /* Bit mask for MPSS */
-#define DIAG_CON_LPASS (0x0004) /* Bit mask for LPASS */
-#define DIAG_CON_WCNSS (0x0008) /* Bit mask for WCNSS */
-
enum {
DIAG_DCI_NO_ERROR = 1001, /* No error */
DIAG_DCI_NO_REG, /* Could not register */
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 2f356f0..95a85f2a 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -58,6 +58,11 @@
#define DIAG_CTRL_MSG_F3_MASK 11
#define CONTROL_CHAR 0x7E
+#define DIAG_CON_APSS (0x0001) /* Bit mask for APSS */
+#define DIAG_CON_MPSS (0x0002) /* Bit mask for MPSS */
+#define DIAG_CON_LPASS (0x0004) /* Bit mask for LPASS */
+#define DIAG_CON_WCNSS (0x0008) /* Bit mask for WCNSS */
+
/* Maximum number of pkt reg supported at initialization*/
extern unsigned int diag_max_reg;
extern unsigned int diag_threshold_reg;
@@ -224,6 +229,9 @@
struct work_struct diag_qdsp_mask_update_work;
struct work_struct diag_wcnss_mask_update_work;
struct work_struct diag_read_smd_dci_work;
+ struct work_struct diag_clean_modem_reg_work;
+ struct work_struct diag_clean_lpass_reg_work;
+ struct work_struct diag_clean_wcnss_reg_work;
uint8_t *msg_masks;
uint8_t *log_masks;
int log_masks_length;
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 547f42f..240a514 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -1268,6 +1268,12 @@
diag_read_smd_wcnss_cntl_work_fn);
INIT_WORK(&(driver->diag_read_smd_dci_work),
diag_read_smd_dci_work_fn);
+ INIT_WORK(&(driver->diag_clean_modem_reg_work),
+ diag_clean_modem_reg_fn);
+ INIT_WORK(&(driver->diag_clean_lpass_reg_work),
+ diag_clean_lpass_reg_fn);
+ INIT_WORK(&(driver->diag_clean_wcnss_reg_work),
+ diag_clean_wcnss_reg_fn);
diag_debugfs_init();
diagfwd_init();
diagfwd_cntl_init();
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 384c1bf..b228276 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -1735,8 +1735,8 @@
static void diag_smd_notify(void *ctxt, unsigned event)
{
if (event == SMD_EVENT_CLOSE) {
- pr_info("diag: clean modem registration\n");
- diag_clear_reg(MODEM_PROC);
+ queue_work(driver->diag_cntl_wq,
+ &(driver->diag_clean_modem_reg_work));
driver->ch = 0;
return;
} else if (event == SMD_EVENT_OPEN) {
@@ -1750,8 +1750,8 @@
static void diag_smd_qdsp_notify(void *ctxt, unsigned event)
{
if (event == SMD_EVENT_CLOSE) {
- pr_info("diag: clean lpass registration\n");
- diag_clear_reg(QDSP_PROC);
+ queue_work(driver->diag_cntl_wq,
+ &(driver->diag_clean_lpass_reg_work));
driver->chqdsp = 0;
return;
} else if (event == SMD_EVENT_OPEN) {
@@ -1765,8 +1765,8 @@
static void diag_smd_wcnss_notify(void *ctxt, unsigned event)
{
if (event == SMD_EVENT_CLOSE) {
- pr_info("diag: clean wcnss registration\n");
- diag_clear_reg(WCNSS_PROC);
+ queue_work(driver->diag_cntl_wq,
+ &(driver->diag_clean_wcnss_reg_work));
driver->ch_wcnss = 0;
return;
} else if (event == SMD_EVENT_OPEN) {
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
index de1a5b5..95abd21 100644
--- a/drivers/char/diag/diagfwd_cntl.c
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -20,9 +20,34 @@
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#endif
-
+/* tracks which peripheral is undergoing SSR */
+static uint16_t reg_dirty;
#define HDR_SIZ 8
+void diag_clean_modem_reg_fn(struct work_struct *work)
+{
+ pr_debug("diag: clean modem registration\n");
+ reg_dirty |= DIAG_CON_MPSS;
+ diag_clear_reg(MODEM_PROC);
+ reg_dirty ^= DIAG_CON_MPSS;
+}
+
+void diag_clean_lpass_reg_fn(struct work_struct *work)
+{
+ pr_debug("diag: clean lpass registration\n");
+ reg_dirty |= DIAG_CON_LPASS;
+ diag_clear_reg(QDSP_PROC);
+ reg_dirty ^= DIAG_CON_LPASS;
+}
+
+void diag_clean_wcnss_reg_fn(struct work_struct *work)
+{
+ pr_debug("diag: clean wcnss registration\n");
+ reg_dirty |= DIAG_CON_WCNSS;
+ diag_clear_reg(WCNSS_PROC);
+ reg_dirty ^= DIAG_CON_WCNSS;
+}
+
void diag_smd_cntl_notify(void *ctxt, unsigned event)
{
int r1, r2;
@@ -105,6 +130,8 @@
struct bindpkt_params *temp;
void *buf = NULL;
smd_channel_t *smd_ch = NULL;
+ /* tracks which peripheral is sending registration */
+ uint16_t reg_mask = 0;
if (pkt_params == NULL) {
pr_alert("diag: Memory allocation failure\n");
@@ -114,12 +141,15 @@
if (proc_num == MODEM_PROC) {
buf = driver->buf_in_cntl;
smd_ch = driver->ch_cntl;
+ reg_mask = DIAG_CON_MPSS;
} else if (proc_num == QDSP_PROC) {
buf = driver->buf_in_qdsp_cntl;
smd_ch = driver->chqdsp_cntl;
+ reg_mask = DIAG_CON_LPASS;
} else if (proc_num == WCNSS_PROC) {
buf = driver->buf_in_wcnss_cntl;
smd_ch = driver->ch_wcnss_cntl;
+ reg_mask = DIAG_CON_WCNSS;
}
if (!smd_ch || !buf) {
@@ -180,8 +210,16 @@
temp -= pkt_params->count;
pkt_params->params = temp;
flag = 1;
- diagchar_ioctl(NULL, DIAG_IOCTL_COMMAND_REG,
- (unsigned long)pkt_params);
+ /* peripheral undergoing SSR should not
+ * record new registration
+ */
+ if (!(reg_dirty & reg_mask))
+ diagchar_ioctl(NULL,
+ DIAG_IOCTL_COMMAND_REG, (unsigned long)
+ pkt_params);
+ else
+ pr_err("diag: drop reg proc %d\n",
+ proc_num);
kfree(temp);
}
buf = buf + HDR_SIZ + data_len;
@@ -275,6 +313,7 @@
void diagfwd_cntl_init(void)
{
+ reg_dirty = 0;
driver->polling_reg_flag = 0;
driver->diag_cntl_wq = create_singlethread_workqueue("diag_cntl_wq");
if (driver->buf_in_cntl == NULL) {
diff --git a/drivers/char/diag/diagfwd_cntl.h b/drivers/char/diag/diagfwd_cntl.h
index 743ddc1..59e5e6b 100644
--- a/drivers/char/diag/diagfwd_cntl.h
+++ b/drivers/char/diag/diagfwd_cntl.h
@@ -85,7 +85,9 @@
void diag_smd_cntl_notify(void *ctxt, unsigned event);
void diag_smd_qdsp_cntl_notify(void *ctxt, unsigned event);
void diag_smd_wcnss_cntl_notify(void *ctxt, unsigned event);
-
+void diag_clean_modem_reg_fn(struct work_struct *);
+void diag_clean_lpass_reg_fn(struct work_struct *);
+void diag_clean_wcnss_reg_fn(struct work_struct *);
void diag_debugfs_init(void);
void diag_debugfs_cleanup(void);
diff --git a/drivers/coresight/coresight-tpiu.c b/drivers/coresight/coresight-tpiu.c
index 4b52c4d..c0bcfdd 100644
--- a/drivers/coresight/coresight-tpiu.c
+++ b/drivers/coresight/coresight-tpiu.c
@@ -119,9 +119,17 @@
if (ret)
goto err_clk_rate;
+ /* Disable tpiu to support older targets that need this */
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ goto err_clk_enable;
+ __tpiu_disable();
+ clk_disable_unprepare(drvdata->clk);
+
dev_info(drvdata->dev, "TPIU initialized\n");
return 0;
+err_clk_enable:
err_clk_rate:
clk_put(drvdata->clk);
err_clk_get:
diff --git a/drivers/coresight/coresight.c b/drivers/coresight/coresight.c
index 055ef55..a17ac9a 100644
--- a/drivers/coresight/coresight.c
+++ b/drivers/coresight/coresight.c
@@ -20,71 +20,351 @@
#include <linux/err.h>
#include <linux/export.h>
#include <linux/slab.h>
-#include <linux/mutex.h>
+#include <linux/semaphore.h>
#include <linux/clk.h>
#include <linux/coresight.h>
#include "coresight-priv.h"
+
+#define NO_SINK (-1)
#define MAX_STR_LEN (65535)
+static int curr_sink = NO_SINK;
static LIST_HEAD(coresight_orph_conns);
-static DEFINE_MUTEX(coresight_conns_mutex);
static LIST_HEAD(coresight_devs);
-static DEFINE_MUTEX(coresight_devs_mutex);
+static DEFINE_SEMAPHORE(coresight_mutex);
-int coresight_enable(struct coresight_device *csdev, int port)
+static int coresight_find_link_inport(struct coresight_device *csdev)
{
int i;
- int ret;
+ struct coresight_device *parent;
struct coresight_connection *conn;
- mutex_lock(&csdev->mutex);
- if (csdev->refcnt[port] == 0) {
- for (i = 0; i < csdev->nr_conns; i++) {
- conn = &csdev->conns[i];
- ret = coresight_enable(conn->child_dev,
- conn->child_port);
- if (ret)
- goto err_enable_child;
- }
- if (csdev->ops->enable)
- ret = csdev->ops->enable(csdev, port);
- if (ret)
- goto err_enable;
+ parent = container_of(csdev->path_link.next, struct coresight_device,
+ path_link);
+ for (i = 0; i < parent->nr_conns; i++) {
+ conn = &parent->conns[i];
+ if (conn->child_dev == csdev)
+ return conn->child_port;
}
- csdev->refcnt[port]++;
- mutex_unlock(&csdev->mutex);
+
+ pr_err("coresight: couldn't find inport, parent: %d, child: %d\n",
+ parent->id, csdev->id);
return 0;
-err_enable_child:
- while (i) {
- conn = &csdev->conns[--i];
- coresight_disable(conn->child_dev, conn->child_port);
+}
+
+static int coresight_find_link_outport(struct coresight_device *csdev)
+{
+ int i;
+ struct coresight_device *child;
+ struct coresight_connection *conn;
+
+ child = container_of(csdev->path_link.prev, struct coresight_device,
+ path_link);
+ for (i = 0; i < csdev->nr_conns; i++) {
+ conn = &csdev->conns[i];
+ if (conn->child_dev == child)
+ return conn->outport;
}
-err_enable:
- mutex_unlock(&csdev->mutex);
+
+ pr_err("coresight: couldn't find outport, parent: %d, child: %d\n",
+ csdev->id, child->id);
+ return 0;
+}
+
+static int coresight_enable_sink(struct coresight_device *csdev)
+{
+ int ret;
+
+ if (csdev->refcnt.sink_refcnt == 0) {
+ if (csdev->ops->sink_ops->enable) {
+ ret = csdev->ops->sink_ops->enable(csdev);
+ if (ret)
+ goto err;
+ csdev->enable = true;
+ }
+ }
+ csdev->refcnt.sink_refcnt++;
+
+ return 0;
+err:
+ return ret;
+}
+
+static void coresight_disable_sink(struct coresight_device *csdev)
+{
+ if (csdev->refcnt.sink_refcnt == 1) {
+ if (csdev->ops->sink_ops->disable) {
+ csdev->ops->sink_ops->disable(csdev);
+ csdev->enable = false;
+ }
+ }
+ csdev->refcnt.sink_refcnt--;
+}
+
+static int coresight_enable_link(struct coresight_device *csdev)
+{
+ int ret;
+ int link_subtype;
+ int refport, inport, outport;
+
+ inport = coresight_find_link_inport(csdev);
+ outport = coresight_find_link_outport(csdev);
+
+ link_subtype = csdev->subtype.link_subtype;
+ if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG)
+ refport = inport;
+ else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT)
+ refport = outport;
+ else
+ refport = 0;
+
+ if (csdev->refcnt.link_refcnts[refport] == 0) {
+ if (csdev->ops->link_ops->enable) {
+ ret = csdev->ops->link_ops->enable(csdev, inport,
+ outport);
+ if (ret)
+ goto err;
+ csdev->enable = true;
+ }
+ }
+ csdev->refcnt.link_refcnts[refport]++;
+
+ return 0;
+err:
+ return ret;
+}
+
+static void coresight_disable_link(struct coresight_device *csdev)
+{
+ int link_subtype;
+ int refport, inport, outport;
+
+ inport = coresight_find_link_inport(csdev);
+ outport = coresight_find_link_outport(csdev);
+
+ link_subtype = csdev->subtype.link_subtype;
+ if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG)
+ refport = inport;
+ else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT)
+ refport = outport;
+ else
+ refport = 0;
+
+ if (csdev->refcnt.link_refcnts[refport] == 1) {
+ if (csdev->ops->link_ops->disable) {
+ csdev->ops->link_ops->disable(csdev, inport, outport);
+ csdev->enable = false;
+ }
+ }
+ csdev->refcnt.link_refcnts[refport]--;
+}
+
+static int coresight_enable_source(struct coresight_device *csdev)
+{
+ int ret;
+
+ if (csdev->refcnt.source_refcnt == 0) {
+ if (csdev->ops->source_ops->enable) {
+ ret = csdev->ops->source_ops->enable(csdev);
+ if (ret)
+ goto err;
+ csdev->enable = true;
+ }
+ }
+ csdev->refcnt.source_refcnt++;
+
+ return 0;
+err:
+ return ret;
+}
+
+static void coresight_disable_source(struct coresight_device *csdev)
+{
+ if (csdev->refcnt.source_refcnt == 1) {
+ if (csdev->ops->source_ops->disable) {
+ csdev->ops->source_ops->disable(csdev);
+ csdev->enable = false;
+ }
+ }
+ csdev->refcnt.source_refcnt--;
+}
+
+static struct list_head *coresight_build_path(struct coresight_device *csdev,
+ struct list_head *path)
+{
+ int i;
+ struct list_head *p;
+ struct coresight_connection *conn;
+
+ if (csdev->id == curr_sink) {
+ list_add_tail(&csdev->path_link, path);
+ return path;
+ }
+
+ for (i = 0; i < csdev->nr_conns; i++) {
+ conn = &csdev->conns[i];
+ p = coresight_build_path(conn->child_dev, path);
+ if (p) {
+ list_add_tail(&csdev->path_link, p);
+ return p;
+ }
+ }
+ return NULL;
+}
+
+static void coresight_release_path(struct list_head *path)
+{
+ struct coresight_device *cd, *temp;
+
+ list_for_each_entry_safe(cd, temp, path, path_link)
+ list_del(&cd->path_link);
+}
+
+static int coresight_enable_path(struct list_head *path, bool incl_source)
+{
+ int ret = 0;
+ struct coresight_device *cd;
+
+ list_for_each_entry(cd, path, path_link) {
+ if (cd == list_first_entry(path, struct coresight_device,
+ path_link)) {
+ ret = coresight_enable_sink(cd);
+ } else if (list_is_last(&cd->path_link, path)) {
+ if (incl_source)
+ ret = coresight_enable_source(cd);
+ } else {
+ ret = coresight_enable_link(cd);
+ }
+ if (ret)
+ goto err;
+ }
+ return 0;
+err:
+ list_for_each_entry_continue_reverse(cd, path, path_link) {
+ if (cd == list_first_entry(path, struct coresight_device,
+ path_link)) {
+ coresight_disable_sink(cd);
+ } else if (list_is_last(&cd->path_link, path)) {
+ if (incl_source)
+ coresight_disable_source(cd);
+ } else {
+ coresight_disable_link(cd);
+ }
+ }
+ return ret;
+}
+
+static void coresight_disable_path(struct list_head *path, bool incl_source)
+{
+ struct coresight_device *cd;
+
+ list_for_each_entry(cd, path, path_link) {
+ if (cd == list_first_entry(path, struct coresight_device,
+ path_link)) {
+ coresight_disable_sink(cd);
+ } else if (list_is_last(&cd->path_link, path)) {
+ if (incl_source)
+ coresight_disable_source(cd);
+ } else {
+ coresight_disable_link(cd);
+ }
+ }
+}
+
+static int coresight_switch_sink(struct coresight_device *csdev)
+{
+ int ret = 0;
+ LIST_HEAD(path);
+ struct coresight_device *cd;
+
+ if (IS_ERR_OR_NULL(csdev))
+ return -EINVAL;
+
+ down(&coresight_mutex);
+ if (csdev->id == curr_sink)
+ goto out;
+
+ list_for_each_entry(cd, &coresight_devs, dev_link) {
+ if (cd->type == CORESIGHT_DEV_TYPE_SOURCE && cd->enable) {
+ coresight_build_path(cd, &path);
+ coresight_disable_path(&path, false);
+ coresight_release_path(&path);
+ }
+ }
+ curr_sink = csdev->id;
+ list_for_each_entry(cd, &coresight_devs, dev_link) {
+ if (cd->type == CORESIGHT_DEV_TYPE_SOURCE && cd->enable) {
+ coresight_build_path(cd, &path);
+ ret = coresight_enable_path(&path, false);
+ coresight_release_path(&path);
+ if (ret)
+ goto err;
+ }
+ }
+out:
+ up(&coresight_mutex);
+ return 0;
+err:
+ list_for_each_entry(cd, &coresight_devs, dev_link) {
+ if (cd->type == CORESIGHT_DEV_TYPE_SOURCE && cd->enable)
+ coresight_disable_source(cd);
+ }
+ pr_err("coresight: sink switch failed, sources disabled; try again\n");
+ return ret;
+}
+
+int coresight_enable(struct coresight_device *csdev)
+{
+ int ret = 0;
+ LIST_HEAD(path);
+
+ if (IS_ERR_OR_NULL(csdev))
+ return -EINVAL;
+
+ down(&coresight_mutex);
+ if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) {
+ ret = -EINVAL;
+ pr_err("coresight: wrong device type in %s\n", __func__);
+ goto out;
+ }
+ if (csdev->enable)
+ goto out;
+
+ coresight_build_path(csdev, &path);
+ ret = coresight_enable_path(&path, true);
+ coresight_release_path(&path);
+ if (ret)
+ pr_err("coresight: enable failed\n");
+out:
+ up(&coresight_mutex);
return ret;
}
EXPORT_SYMBOL(coresight_enable);
-void coresight_disable(struct coresight_device *csdev, int port)
+void coresight_disable(struct coresight_device *csdev)
{
- int i;
- struct coresight_connection *conn;
+ LIST_HEAD(path);
- mutex_lock(&csdev->mutex);
- if (csdev->refcnt[port] == 1) {
- if (csdev->ops->disable)
- csdev->ops->disable(csdev, port);
- for (i = 0; i < csdev->nr_conns; i++) {
- conn = &csdev->conns[i];
- coresight_disable(conn->child_dev, conn->child_port);
- }
+ if (IS_ERR_OR_NULL(csdev))
+ return;
+
+ down(&coresight_mutex);
+ if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) {
+ pr_err("coresight: wrong device type in %s\n", __func__);
+ goto out;
}
- csdev->refcnt[port]--;
- mutex_unlock(&csdev->mutex);
+ if (!csdev->enable)
+ goto out;
+
+ coresight_build_path(csdev, &path);
+ coresight_disable_path(&path, true);
+ coresight_release_path(&path);
+out:
+ up(&coresight_mutex);
}
EXPORT_SYMBOL(coresight_disable);
@@ -104,6 +384,39 @@
.dev_attrs = coresight_dev_attrs,
};
+static ssize_t coresight_show_curr_sink(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct coresight_device *csdev = to_coresight_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ csdev->id == curr_sink ? 1 : 0);
+}
+
+static ssize_t coresight_store_curr_sink(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret = 0;
+ unsigned long val;
+ struct coresight_device *csdev = to_coresight_device(dev);
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ if (val)
+ ret = coresight_switch_sink(csdev);
+ else
+ ret = -EINVAL;
+
+ if (ret)
+ return ret;
+ return size;
+}
+static DEVICE_ATTR(curr_sink, S_IRUGO | S_IWUSR, coresight_show_curr_sink,
+ coresight_store_curr_sink);
+
static ssize_t coresight_show_enable(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -124,9 +437,9 @@
return -EINVAL;
if (val)
- ret = coresight_enable(csdev, 0);
+ ret = coresight_enable(csdev);
else
- coresight_disable(csdev, 0);
+ coresight_disable(csdev);
if (ret)
return ret;
@@ -135,38 +448,55 @@
static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, coresight_show_enable,
coresight_store_enable);
-static struct attribute *coresight_attrs[] = {
+static struct attribute *coresight_attrs_sink[] = {
+ &dev_attr_curr_sink.attr,
+ NULL,
+};
+
+static struct attribute_group coresight_attr_grp_sink = {
+ .attrs = coresight_attrs_sink,
+};
+
+static const struct attribute_group *coresight_attr_grps_sink[] = {
+ &coresight_attr_grp_sink,
+ NULL,
+};
+
+static struct attribute *coresight_attrs_source[] = {
&dev_attr_enable.attr,
NULL,
};
-static struct attribute_group coresight_attr_grp = {
- .attrs = coresight_attrs,
+static struct attribute_group coresight_attr_grp_source = {
+ .attrs = coresight_attrs_source,
};
-static const struct attribute_group *coresight_attr_grps[] = {
- &coresight_attr_grp,
+static const struct attribute_group *coresight_attr_grps_source[] = {
+ &coresight_attr_grp_source,
NULL,
};
-static struct device_type coresight_dev_type[CORESIGHT_DEV_TYPE_MAX] = {
+static struct device_type coresight_dev_type[] = {
{
- .name = "source",
- .groups = coresight_attr_grps,
+ .name = "sink",
+ .groups = coresight_attr_grps_sink,
},
{
.name = "link",
},
{
- .name = "sink",
- .groups = coresight_attr_grps,
+ .name = "linksink",
+ .groups = coresight_attr_grps_sink,
+ },
+ {
+ .name = "source",
+ .groups = coresight_attr_grps_source,
},
};
static void coresight_device_release(struct device *dev)
{
struct coresight_device *csdev = to_coresight_device(dev);
- mutex_destroy(&csdev->mutex);
kfree(csdev);
}
@@ -174,14 +504,12 @@
{
struct coresight_connection *conn, *temp;
- mutex_lock(&coresight_conns_mutex);
list_for_each_entry_safe(conn, temp, &coresight_orph_conns, link) {
if (conn->child_id == csdev->id) {
conn->child_dev = csdev;
list_del(&conn->link);
}
}
- mutex_unlock(&coresight_conns_mutex);
}
static void coresight_fixup_device_conns(struct coresight_device *csdev)
@@ -192,21 +520,16 @@
for (i = 0; i < csdev->nr_conns; i++) {
found = false;
- mutex_lock(&coresight_devs_mutex);
- list_for_each_entry(cd, &coresight_devs, link) {
+ list_for_each_entry(cd, &coresight_devs, dev_link) {
if (csdev->conns[i].child_id == cd->id) {
csdev->conns[i].child_dev = cd;
found = true;
break;
}
}
- mutex_unlock(&coresight_devs_mutex);
- if (!found) {
- mutex_lock(&coresight_conns_mutex);
+ if (!found)
list_add_tail(&csdev->conns[i].link,
&coresight_orph_conns);
- mutex_unlock(&coresight_conns_mutex);
- }
}
}
@@ -214,7 +537,9 @@
{
int i;
int ret;
- int *refcnt;
+ int link_subtype;
+ int nr_refcnts;
+ int *refcnts = NULL;
struct coresight_device *csdev;
struct coresight_connection *conns;
@@ -224,28 +549,41 @@
goto err_kzalloc_csdev;
}
- mutex_init(&csdev->mutex);
csdev->id = desc->pdata->id;
- refcnt = kzalloc(sizeof(*refcnt) * desc->pdata->nr_ports, GFP_KERNEL);
- if (!refcnt) {
- ret = -ENOMEM;
- goto err_kzalloc_refcnt;
- }
- csdev->refcnt = refcnt;
+ if (desc->type == CORESIGHT_DEV_TYPE_LINK ||
+ desc->type == CORESIGHT_DEV_TYPE_LINKSINK) {
+ link_subtype = desc->subtype.link_subtype;
+ if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG)
+ nr_refcnts = desc->pdata->nr_inports;
+ else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT)
+ nr_refcnts = desc->pdata->nr_outports;
+ else
+ nr_refcnts = 1;
- csdev->nr_conns = desc->pdata->nr_children;
+ refcnts = kzalloc(sizeof(*refcnts) * nr_refcnts, GFP_KERNEL);
+ if (!refcnts) {
+ ret = -ENOMEM;
+ goto err_kzalloc_refcnts;
+ }
+ csdev->refcnt.link_refcnts = refcnts;
+ }
+
+ csdev->nr_conns = desc->pdata->nr_outports;
conns = kzalloc(sizeof(*conns) * csdev->nr_conns, GFP_KERNEL);
if (!conns) {
ret = -ENOMEM;
goto err_kzalloc_conns;
}
for (i = 0; i < csdev->nr_conns; i++) {
+ conns[i].outport = desc->pdata->outports[i];
conns[i].child_id = desc->pdata->child_ids[i];
conns[i].child_port = desc->pdata->child_ports[i];
}
csdev->conns = conns;
+ csdev->type = desc->type;
+ csdev->subtype = desc->subtype;
csdev->ops = desc->ops;
csdev->owner = desc->owner;
@@ -256,24 +594,34 @@
csdev->dev.release = coresight_device_release;
dev_set_name(&csdev->dev, "%s", desc->pdata->name);
+ down(&coresight_mutex);
+ if (desc->pdata->default_sink) {
+ if (curr_sink == NO_SINK) {
+ curr_sink = csdev->id;
+ } else {
+ ret = -EINVAL;
+ goto err_default_sink;
+ }
+ }
+
coresight_fixup_device_conns(csdev);
ret = device_register(&csdev->dev);
if (ret)
goto err_dev_reg;
coresight_fixup_orphan_conns(csdev);
- mutex_lock(&coresight_devs_mutex);
- list_add_tail(&csdev->link, &coresight_devs);
- mutex_unlock(&coresight_devs_mutex);
+ list_add_tail(&csdev->dev_link, &coresight_devs);
+ up(&coresight_mutex);
return csdev;
err_dev_reg:
put_device(&csdev->dev);
+err_default_sink:
+ up(&coresight_mutex);
kfree(conns);
err_kzalloc_conns:
- kfree(refcnt);
-err_kzalloc_refcnt:
- mutex_destroy(&csdev->mutex);
+ kfree(refcnts);
+err_kzalloc_refcnts:
kfree(csdev);
err_kzalloc_csdev:
return ERR_PTR(ret);
@@ -286,9 +634,7 @@
return;
if (get_device(&csdev->dev)) {
- mutex_lock(&csdev->mutex);
device_unregister(&csdev->dev);
- mutex_unlock(&csdev->mutex);
put_device(&csdev->dev);
}
}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index e9d654b..be71347 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -142,7 +142,7 @@
static LIST_HEAD(cpufreq_governor_list);
static DEFINE_MUTEX(cpufreq_governor_mutex);
-struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
+static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, int sysfs)
{
struct cpufreq_policy *data;
unsigned long flags;
@@ -166,7 +166,7 @@
if (!data)
goto err_out_put_module;
- if (!kobject_get(&data->kobj))
+ if (!sysfs && !kobject_get(&data->kobj))
goto err_out_put_module;
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -179,16 +179,35 @@
err_out:
return NULL;
}
+
+struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
+{
+ return __cpufreq_cpu_get(cpu, 0);
+}
EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
+static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
+{
+ return __cpufreq_cpu_get(cpu, 1);
+}
+
+static void __cpufreq_cpu_put(struct cpufreq_policy *data, int sysfs)
+{
+ if (!sysfs)
+ kobject_put(&data->kobj);
+ module_put(cpufreq_driver->owner);
+}
void cpufreq_cpu_put(struct cpufreq_policy *data)
{
- kobject_put(&data->kobj);
- module_put(cpufreq_driver->owner);
+ __cpufreq_cpu_put(data, 0);
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
+static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
+{
+ __cpufreq_cpu_put(data, 1);
+}
/*********************************************************************
* EXTERNALLY AFFECTING FREQUENCY CHANGES *
@@ -643,7 +662,7 @@
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
- policy = cpufreq_cpu_get(policy->cpu);
+ policy = cpufreq_cpu_get_sysfs(policy->cpu);
if (!policy)
goto no_policy;
@@ -657,7 +676,7 @@
unlock_policy_rwsem_read(policy->cpu);
fail:
- cpufreq_cpu_put(policy);
+ cpufreq_cpu_put_sysfs(policy);
no_policy:
return ret;
}
@@ -668,7 +687,7 @@
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
- policy = cpufreq_cpu_get(policy->cpu);
+ policy = cpufreq_cpu_get_sysfs(policy->cpu);
if (!policy)
goto no_policy;
@@ -682,7 +701,7 @@
unlock_policy_rwsem_write(policy->cpu);
fail:
- cpufreq_cpu_put(policy);
+ cpufreq_cpu_put_sysfs(policy);
no_policy:
return ret;
}
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 5798c94..785ba6c 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -994,7 +994,6 @@
rc = input_register_handler(&dbs_input_handler);
mutex_unlock(&dbs_mutex);
- mutex_init(&this_dbs_info->timer_mutex);
if (!ondemand_powersave_bias_setspeed(
this_dbs_info->cur_policy,
@@ -1071,6 +1070,9 @@
return -EFAULT;
}
for_each_possible_cpu(i) {
+ struct cpu_dbs_info_s *this_dbs_info =
+ &per_cpu(od_cpu_dbs_info, i);
+ mutex_init(&this_dbs_info->timer_mutex);
INIT_WORK(&per_cpu(dbs_refresh_work, i), dbs_refresh_callback);
}
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index fb1ffd0..4657c37 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -288,6 +288,9 @@
config CRYPTO_DEV_QCE40
bool
+config CRYPTO_DEV_QCE50
+ bool
+
config CRYPTO_DEV_QCRYPTO
tristate "Qualcomm Crypto accelerator"
select CRYPTO_DES
@@ -303,20 +306,22 @@
config CRYPTO_DEV_QCE
tristate "Qualcomm Crypto Engine (QCE) module"
select CRYPTO_DEV_QCE40 if ARCH_MSM8960 || ARCH_MSM9615
+ select CRYPTO_DEV_QCE50 if ARCH_MSM8974
default n
help
This driver supports Qualcomm Crypto Engine in MSM7x30, MSM8660
MSM8x55, MSM8960 and MSM9615
To compile this driver as a module, choose M here: the
For MSM7x30 MSM8660 and MSM8x55 the module is called qce
- For MSM8960 and MSM9615 the module is called qce40
+ For MSM8960, APQ8064 and MSM9615 the module is called qce40
+ For MSM8974 the module is called qce50
config CRYPTO_DEV_QCEDEV
tristate "QCEDEV Interface to CE module"
default n
help
This driver supports Qualcomm QCEDEV Crypto in MSM7x30, MSM8660,
- MSM8960 and MSM9615.
+ MSM8960, MSM9615, APQ8064 and MSM8974.
This exposes the interface to the QCE hardware accelerator via IOCTLs
To compile this driver as a module, choose M here: the
module will be called qcedev.
diff --git a/drivers/crypto/msm/Makefile b/drivers/crypto/msm/Makefile
index 61406b9..df9acf2 100644
--- a/drivers/crypto/msm/Makefile
+++ b/drivers/crypto/msm/Makefile
@@ -1,8 +1,12 @@
obj-$(CONFIG_CRYPTO_DEV_QCEDEV) += qcedev.o
-ifeq ($(CONFIG_CRYPTO_DEV_QCE40), y)
- obj-$(CONFIG_CRYPTO_DEV_QCE) += qce40.o
+ifeq ($(CONFIG_CRYPTO_DEV_QCE50), y)
+ obj-$(CONFIG_CRYPTO_DEV_QCE) += qce50.o
else
- obj-$(CONFIG_CRYPTO_DEV_QCE) += qce.o
+ ifeq ($(CONFIG_CRYPTO_DEV_QCE40), y)
+ obj-$(CONFIG_CRYPTO_DEV_QCE) += qce40.o
+ else
+ obj-$(CONFIG_CRYPTO_DEV_QCE) += qce.o
+ endif
endif
obj-$(CONFIG_CRYPTO_DEV_QCRYPTO) += qcrypto.o
obj-$(CONFIG_CRYPTO_DEV_OTA_CRYPTO) += ota_crypto.o
diff --git a/drivers/crypto/msm/qce.c b/drivers/crypto/msm/qce.c
index 55cf651..4a9729c 100644
--- a/drivers/crypto/msm/qce.c
+++ b/drivers/crypto/msm/qce.c
@@ -1,6 +1,6 @@
/* Qualcomm Crypto Engine driver.
*
- * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2463,6 +2463,8 @@
ce_support->aes_xts = false;
ce_support->aes_ccm = false;
ce_support->ota = pce_dev->ota;
+ ce_support->aligned_only = false;
+ ce_support->bam = false;
return 0;
}
EXPORT_SYMBOL(qce_hw_support);
@@ -2703,7 +2705,5 @@
EXPORT_SYMBOL(qce_f9_req);
MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
MODULE_DESCRIPTION("Crypto Engine driver");
-MODULE_VERSION("1.15");
diff --git a/drivers/crypto/msm/qce.h b/drivers/crypto/msm/qce.h
index edd2089..037861c 100644
--- a/drivers/crypto/msm/qce.h
+++ b/drivers/crypto/msm/qce.h
@@ -1,6 +1,6 @@
/* Qualcomm Crypto Engine driver API
*
- * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -71,6 +71,7 @@
QCE_HASH_SHA1_HMAC = 2,
QCE_HASH_SHA256_HMAC = 3,
QCE_HASH_AES_CMAC = 4,
+ QCE_AEAD_SHA1_HMAC = 5,
QCE_HASH_LAST
};
@@ -110,6 +111,8 @@
bool aes_xts;
bool aes_ccm;
bool ota;
+ bool aligned_only;
+ bool bam;
};
/* Sha operation parameters */
diff --git a/drivers/crypto/msm/qce40.c b/drivers/crypto/msm/qce40.c
index c203fc5..7a229a5 100644
--- a/drivers/crypto/msm/qce40.c
+++ b/drivers/crypto/msm/qce40.c
@@ -2599,11 +2599,11 @@
ce_support->aes_xts = true;
ce_support->aes_ccm = true;
ce_support->ota = false;
+ ce_support->aligned_only = false;
+ ce_support->bam = false;
return 0;
}
EXPORT_SYMBOL(qce_hw_support);
MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
MODULE_DESCRIPTION("Crypto Engine driver");
-MODULE_VERSION("2.17");
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
new file mode 100644
index 0000000..4ccd89d
--- /dev/null
+++ b/drivers/crypto/msm/qce50.c
@@ -0,0 +1,2739 @@
+/* Qualcomm Crypto Engine driver.
+ *
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "QCE50: %s: " fmt, __func__
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <linux/qcedev.h>
+#include <linux/bitops.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <mach/dma.h>
+#include <mach/clk.h>
+#include <mach/socinfo.h>
+
+#include "qce.h"
+#include "qce50.h"
+#include "qcryptohw_50.h"
+
+#define CRYPTO_CONFIG_RESET 0xE001F
+
+static DEFINE_MUTEX(bam_register_cnt);
+struct bam_registration_info {
+ uint32_t handle;
+ uint32_t cnt;
+};
+static struct bam_registration_info bam_registry;
+
+/*
+ * CE HW device structure.
+ * Each engine has an instance of the structure.
+ * Each engine can only handle one crypto operation at one time. It is up to
+ * the sw above to ensure single threading of operation on an engine.
+ */
+struct qce_device {
+ struct device *pdev; /* Handle to platform_device structure */
+
+ unsigned char *coh_vmem; /* Allocated coherent virtual memory */
+ dma_addr_t coh_pmem; /* Allocated coherent physical memory */
+ int memsize; /* Memory allocated */
+
+ void __iomem *iobase; /* Virtual io base of CE HW */
+ unsigned int phy_iobase; /* Physical io base of CE HW */
+
+ struct clk *ce_core_src_clk; /* Handle to CE src clk*/
+ struct clk *ce_core_clk; /* Handle to CE clk */
+ struct clk *ce_clk; /* Handle to CE clk */
+
+ qce_comp_func_ptr_t qce_cb; /* qce callback function pointer */
+
+ int assoc_nents;
+ int ivsize;
+ int authsize;
+ int src_nents;
+ int dst_nents;
+
+ dma_addr_t phy_iv_in;
+
+ void *areq;
+ enum qce_cipher_mode_enum mode;
+ struct ce_sps_data ce_sps;
+};
+
+/* Standard initialization vector for SHA-1, source: FIPS 180-2 */
+static uint32_t _std_init_vector_sha1[] = {
+ 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0
+};
+
+/* Standard initialization vector for SHA-256, source: FIPS 180-2 */
+static uint32_t _std_init_vector_sha256[] = {
+ 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
+ 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19
+};
+
+static void _byte_stream_to_net_words(uint32_t *iv, unsigned char *b,
+ unsigned int len)
+{
+ unsigned n;
+
+ n = len / sizeof(uint32_t) ;
+ for (; n > 0; n--) {
+ *iv = ((*b << 24) & 0xff000000) |
+ (((*(b+1)) << 16) & 0xff0000) |
+ (((*(b+2)) << 8) & 0xff00) |
+ (*(b+3) & 0xff);
+ b += sizeof(uint32_t);
+ iv++;
+ }
+
+ n = len % sizeof(uint32_t);
+ if (n == 3) {
+ *iv = ((*b << 24) & 0xff000000) |
+ (((*(b+1)) << 16) & 0xff0000) |
+ (((*(b+2)) << 8) & 0xff00) ;
+ } else if (n == 2) {
+ *iv = ((*b << 24) & 0xff000000) |
+ (((*(b+1)) << 16) & 0xff0000) ;
+ } else if (n == 1) {
+ *iv = ((*b << 24) & 0xff000000) ;
+ }
+}
+
+static void _byte_stream_swap_to_net_words(uint32_t *iv, unsigned char *b,
+ unsigned int len)
+{
+ unsigned i, j;
+ unsigned char swap_iv[AES_IV_LENGTH];
+
+ memset(swap_iv, 0, AES_IV_LENGTH);
+ for (i = (AES_IV_LENGTH-len), j = len-1; i < AES_IV_LENGTH; i++, j--)
+ swap_iv[i] = b[j];
+ _byte_stream_to_net_words(iv, swap_iv, AES_IV_LENGTH);
+}
+
+static int count_sg(struct scatterlist *sg, int nbytes)
+{
+ int i;
+
+ for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
+ nbytes -= sg->length;
+ return i;
+}
+
+static int _probe_ce_engine(struct qce_device *pce_dev)
+{
+ unsigned int rev;
+ unsigned int maj_rev, min_rev, step_rev;
+
+ rev = readl_relaxed(pce_dev->iobase + CRYPTO_VERSION_REG);
+ mb();
+ maj_rev = (rev & CRYPTO_CORE_MAJOR_REV_MASK) >> CRYPTO_CORE_MAJOR_REV;
+ min_rev = (rev & CRYPTO_CORE_MINOR_REV_MASK) >> CRYPTO_CORE_MINOR_REV;
+ step_rev = (rev & CRYPTO_CORE_STEP_REV_MASK) >> CRYPTO_CORE_STEP_REV;
+
+ if ((maj_rev != 0x05) || (min_rev > 0x02) || (step_rev > 0x02)) {
+ pr_err("Unknown Qualcomm crypto device at 0x%x, rev %d.%d.%d\n",
+ pce_dev->phy_iobase, maj_rev, min_rev, step_rev);
+ return -EIO;
+ };
+ if ((min_rev > 0) && (step_rev != 0)) {
+ pr_err("Unknown Qualcomm crypto device at 0x%x, rev %d.%d.%d\n",
+ pce_dev->phy_iobase, maj_rev, min_rev, step_rev);
+ return -EIO;
+ };
+ pce_dev->ce_sps.minor_version = min_rev;
+
+ dev_info(pce_dev->pdev, "Qualcomm Crypto %d.%d.%d device found @0x%x\n",
+ maj_rev, min_rev, step_rev, pce_dev->phy_iobase);
+
+ pce_dev->ce_sps.ce_burst_size = MAX_CE_BAM_BURST_SIZE;
+
+ dev_info(pce_dev->pdev,
+ "IO base, CE = 0x%x\n, "
+ "Consumer (IN) PIPE %d, "
+ "Producer (OUT) PIPE %d\n"
+ "IO base BAM = 0x%x\n"
+ "BAM IRQ %d\n",
+ (uint32_t) pce_dev->iobase,
+ pce_dev->ce_sps.dest_pipe_index,
+ pce_dev->ce_sps.src_pipe_index,
+ (uint32_t)pce_dev->ce_sps.bam_iobase,
+ pce_dev->ce_sps.bam_irq);
+ return 0;
+};
+
+static int _ce_get_hash_cmdlistinfo(struct qce_device *pce_dev,
+ struct qce_sha_req *sreq,
+ struct qce_cmdlist_info **cmdplistinfo)
+{
+ struct qce_cmdlistptr_ops *cmdlistptr = &pce_dev->ce_sps.cmdlistptr;
+
+ switch (sreq->alg) {
+ case QCE_HASH_SHA1:
+ *cmdplistinfo = &cmdlistptr->auth_sha1;
+ break;
+
+ case QCE_HASH_SHA256:
+ *cmdplistinfo = &cmdlistptr->auth_sha256;
+ break;
+
+ case QCE_HASH_SHA1_HMAC:
+ *cmdplistinfo = &cmdlistptr->auth_sha1_hmac;
+ break;
+
+ case QCE_HASH_SHA256_HMAC:
+ *cmdplistinfo = &cmdlistptr->auth_sha256_hmac;
+ break;
+
+ case QCE_HASH_AES_CMAC:
+ if (sreq->authklen == AES128_KEY_SIZE)
+ *cmdplistinfo = &cmdlistptr->auth_aes_128_cmac;
+ else
+ *cmdplistinfo = &cmdlistptr->auth_aes_256_cmac;
+ break;
+
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int _ce_setup_hash(struct qce_device *pce_dev,
+ struct qce_sha_req *sreq,
+ struct qce_cmdlist_info *cmdlistinfo)
+{
+ uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
+ uint32_t diglen;
+ int i;
+ uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ bool sha1 = false;
+ struct sps_command_element *pce = NULL;
+
+ if ((sreq->alg == QCE_HASH_SHA1_HMAC) ||
+ (sreq->alg == QCE_HASH_SHA256_HMAC) ||
+ (sreq->alg == QCE_HASH_AES_CMAC)) {
+ uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t);
+
+ _byte_stream_to_net_words(mackey32, sreq->authkey,
+ sreq->authklen);
+
+ /* check for null key. If null, use hw key*/
+ for (i = 0; i < authk_size_in_word; i++) {
+ if (mackey32[i] != 0)
+ break;
+ }
+
+ pce = cmdlistinfo->go_proc;
+ if (i == authk_size_in_word) {
+ pce->addr = (uint32_t)(CRYPTO_GOPROC_OEM_KEY_REG +
+ pce_dev->phy_iobase);
+ } else {
+ pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
+ pce_dev->phy_iobase);
+ pce = cmdlistinfo->auth_key;
+ for (i = 0; i < authk_size_in_word; i++, pce++)
+ pce->data = mackey32[i];
+ }
+ }
+
+ if (sreq->alg == QCE_HASH_AES_CMAC)
+ goto go_proc;
+
+ /* if not the last, the size has to be on the block boundary */
+ if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE))
+ return -EIO;
+
+ switch (sreq->alg) {
+ case QCE_HASH_SHA1:
+ case QCE_HASH_SHA1_HMAC:
+ diglen = SHA1_DIGEST_SIZE;
+ sha1 = true;
+ break;
+ case QCE_HASH_SHA256:
+ case QCE_HASH_SHA256_HMAC:
+ diglen = SHA256_DIGEST_SIZE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */
+ if (sreq->first_blk) {
+ if (sha1) {
+ for (i = 0; i < 5; i++)
+ auth32[i] = _std_init_vector_sha1[i];
+ } else {
+ for (i = 0; i < 8; i++)
+ auth32[i] = _std_init_vector_sha256[i];
+ }
+ } else {
+ _byte_stream_to_net_words(auth32, sreq->digest, diglen);
+ }
+
+ pce = cmdlistinfo->auth_iv;
+ for (i = 0; i < 5; i++, pce++)
+ pce->data = auth32[i];
+
+ if ((sreq->alg == QCE_HASH_SHA256) ||
+ (sreq->alg == QCE_HASH_SHA256_HMAC)) {
+ for (i = 5; i < 8; i++, pce++)
+ pce->data = auth32[i];
+ }
+
+ /* write auth_bytecnt 0/1, start with 0 */
+ pce = cmdlistinfo->auth_bytecount;
+ for (i = 0; i < 2; i++, pce++)
+ pce->data = sreq->auth_data[i];
+
+ /* Set/reset last bit in CFG register */
+ pce = cmdlistinfo->auth_seg_cfg;
+ if (sreq->last_blk)
+ pce->data |= 1 << CRYPTO_LAST;
+ else
+ pce->data &= ~(1 << CRYPTO_LAST);
+ if (sreq->first_blk)
+ pce->data |= 1 << CRYPTO_FIRST;
+ else
+ pce->data &= ~(1 << CRYPTO_FIRST);
+go_proc:
+ /* write auth seg size */
+ pce = cmdlistinfo->auth_seg_size;
+ pce->data = sreq->size;
+
+ /* write auth seg size start*/
+ pce = cmdlistinfo->auth_seg_start;
+ pce->data = 0;
+
+ /* write seg size */
+ pce = cmdlistinfo->seg_size;
+ pce->data = sreq->size;
+
+ return 0;
+}
+
+static int _ce_get_cipher_cmdlistinfo(struct qce_device *pce_dev,
+ struct qce_req *creq,
+ struct qce_cmdlist_info **cmdlistinfo)
+{
+ struct qce_cmdlistptr_ops *cmdlistptr = &pce_dev->ce_sps.cmdlistptr;
+
+ if (creq->alg != CIPHER_ALG_AES) {
+ switch (creq->alg) {
+ case CIPHER_ALG_DES:
+ if (creq->mode == QCE_MODE_ECB)
+ *cmdlistinfo = &cmdlistptr->cipher_des_ecb;
+ else
+ *cmdlistinfo = &cmdlistptr->cipher_des_cbc;
+ break;
+
+ case CIPHER_ALG_3DES:
+ if (creq->mode == QCE_MODE_ECB)
+ *cmdlistinfo =
+ &cmdlistptr->cipher_3des_ecb;
+ else
+ *cmdlistinfo =
+ &cmdlistptr->cipher_3des_cbc;
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (creq->mode) {
+ case QCE_MODE_ECB:
+ if (creq->encklen == AES128_KEY_SIZE)
+ *cmdlistinfo = &cmdlistptr->cipher_aes_128_ecb;
+ else
+ *cmdlistinfo = &cmdlistptr->cipher_aes_256_ecb;
+ break;
+
+ case QCE_MODE_CBC:
+ case QCE_MODE_CTR:
+ if (creq->encklen == AES128_KEY_SIZE)
+ *cmdlistinfo =
+ &cmdlistptr->cipher_aes_128_cbc_ctr;
+ else
+ *cmdlistinfo =
+ &cmdlistptr->cipher_aes_256_cbc_ctr;
+ break;
+
+ case QCE_MODE_XTS:
+ if (creq->encklen == AES128_KEY_SIZE)
+ *cmdlistinfo = &cmdlistptr->cipher_aes_128_xts;
+ else
+ *cmdlistinfo = &cmdlistptr->cipher_aes_256_xts;
+ break;
+
+ case QCE_MODE_CCM:
+ if (creq->encklen == AES128_KEY_SIZE)
+ *cmdlistinfo = &cmdlistptr->aead_aes_128_ccm;
+ else
+ *cmdlistinfo = &cmdlistptr->aead_aes_256_ccm;
+ break;
+
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq,
+ uint32_t totallen_in, uint32_t coffset,
+ struct qce_cmdlist_info *cmdlistinfo)
+{
+ uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
+ 0, 0, 0, 0};
+ uint32_t enck_size_in_word = 0;
+ uint32_t key_size;
+ bool use_hw_key = false;
+ uint32_t encr_cfg = 0;
+ uint32_t ivsize = creq->ivsize;
+ int i;
+ struct sps_command_element *pce = NULL;
+
+ if (creq->mode == QCE_MODE_XTS)
+ key_size = creq->encklen/2;
+ else
+ key_size = creq->encklen;
+
+ _byte_stream_to_net_words(enckey32, creq->enckey, key_size);
+
+ /* check for null key. If null, use hw key*/
+ enck_size_in_word = key_size/sizeof(uint32_t);
+ for (i = 0; i < enck_size_in_word; i++) {
+ if (enckey32[i] != 0)
+ break;
+ }
+ pce = cmdlistinfo->go_proc;
+ if (i == enck_size_in_word) {
+ use_hw_key = true;
+ pce->addr = (uint32_t)(CRYPTO_GOPROC_OEM_KEY_REG +
+ pce_dev->phy_iobase);
+ } else {
+ pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
+ pce_dev->phy_iobase);
+ }
+
+ if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) {
+ uint32_t authklen32 = creq->encklen/sizeof(uint32_t);
+ uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t);
+ uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0};
+ uint32_t auth_cfg = 0;
+
+ /* write nonce */
+ _byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE);
+ pce = cmdlistinfo->auth_nonce_info;
+ for (i = 0; i < noncelen32; i++, pce++)
+ pce->data = nonce32[i];
+
+ /* TBD NEW FEATURE partial AES CCM pkt support set last bit */
+ auth_cfg |= ((1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST));
+ if (creq->dir == QCE_ENCRYPT)
+ auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+ else
+ auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
+ auth_cfg |= ((creq->authsize - 1) << CRYPTO_AUTH_SIZE);
+ auth_cfg |= (CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE);
+ if (creq->authklen == AES128_KEY_SIZE)
+ auth_cfg |= (CRYPTO_AUTH_KEY_SZ_AES128 <<
+ CRYPTO_AUTH_KEY_SIZE);
+ else {
+ if (creq->authklen == AES256_KEY_SIZE)
+ auth_cfg |= (CRYPTO_AUTH_KEY_SZ_AES256 <<
+ CRYPTO_AUTH_KEY_SIZE);
+ }
+ auth_cfg |= (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG);
+ auth_cfg |= ((MAX_NONCE/sizeof(uint32_t)) <<
+ CRYPTO_AUTH_NONCE_NUM_WORDS);
+
+ if (use_hw_key == true) {
+ auth_cfg |= (1 << CRYPTO_USE_HW_KEY_AUTH);
+ } else {
+ auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
+ /* write auth key */
+ pce = cmdlistinfo->auth_key;
+ for (i = 0; i < authklen32; i++, pce++)
+ pce->data = enckey32[i];
+ }
+
+ pce = cmdlistinfo->auth_seg_cfg;
+ pce->data = auth_cfg;
+
+ pce = cmdlistinfo->auth_seg_size;
+ pce->data = totallen_in;
+ pce = cmdlistinfo->auth_seg_start;
+ pce->data = 0;
+ }
+
+ switch (creq->mode) {
+ case QCE_MODE_ECB:
+ encr_cfg |= (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
+ break;
+ case QCE_MODE_CBC:
+ encr_cfg |= (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
+ break;
+ case QCE_MODE_XTS:
+ encr_cfg |= (CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE);
+ break;
+ case QCE_MODE_CCM:
+ encr_cfg |= (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE);
+ break;
+ case QCE_MODE_CTR:
+ default:
+ encr_cfg |= (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
+ break;
+ }
+ pce_dev->mode = creq->mode;
+
+ switch (creq->alg) {
+ case CIPHER_ALG_DES:
+ if (creq->mode != QCE_MODE_ECB) {
+ _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
+ pce = cmdlistinfo->encr_cntr_iv;
+ pce->data = enciv32[0];
+ pce++;
+ pce->data = enciv32[1];
+ }
+ if (use_hw_key == false) {
+ pce = cmdlistinfo->encr_key;
+ pce->data = enckey32[0];
+ pce++;
+ pce->data = enckey32[1];
+ }
+ break;
+ case CIPHER_ALG_3DES:
+ if (creq->mode != QCE_MODE_ECB) {
+ _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
+ pce = cmdlistinfo->encr_cntr_iv;
+ pce->data = enciv32[0];
+ pce++;
+ pce->data = enciv32[1];
+ }
+ if (use_hw_key == false) {
+ /* write encr key */
+ pce = cmdlistinfo->encr_key;
+ for (i = 0; i < 6; i++, pce++)
+ pce->data = enckey32[i];
+ }
+ break;
+ case CIPHER_ALG_AES:
+ default:
+ if (creq->mode == QCE_MODE_XTS) {
+ uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)]
+ = {0, 0, 0, 0, 0, 0, 0, 0};
+ uint32_t xtsklen =
+ creq->encklen/(2 * sizeof(uint32_t));
+
+ _byte_stream_to_net_words(xtskey32, (creq->enckey +
+ creq->encklen/2), creq->encklen/2);
+ /* write xts encr key */
+ pce = cmdlistinfo->encr_xts_key;
+ for (i = 0; i < xtsklen; i++, pce++)
+ pce->data = xtskey32[i];
+
+ /* write xts du size */
+ pce = cmdlistinfo->encr_xts_du_size;
+ pce->data = creq->cryptlen;
+ }
+ if (creq->mode != QCE_MODE_ECB) {
+ if (creq->mode == QCE_MODE_XTS)
+ _byte_stream_swap_to_net_words(enciv32,
+ creq->iv, ivsize);
+ else
+ _byte_stream_to_net_words(enciv32, creq->iv,
+ ivsize);
+ /* write encr cntr iv */
+ pce = cmdlistinfo->encr_cntr_iv;
+ for (i = 0; i < 4; i++, pce++)
+ pce->data = enciv32[i];
+
+ if (creq->mode == QCE_MODE_CCM) {
+ /* write cntr iv for ccm */
+ pce = cmdlistinfo->encr_ccm_cntr_iv;
+ for (i = 0; i < 4; i++, pce++)
+ pce->data = enciv32[i];
+ /* update cntr_iv[3] by one */
+ pce = cmdlistinfo->encr_cntr_iv;
+ pce += 3;
+ pce->data += 1;
+ }
+ }
+
+ if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
+ encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
+ CRYPTO_ENCR_KEY_SZ);
+ encr_cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG;
+ } else {
+ if (use_hw_key == false) {
+ /* write encr key */
+ pce = cmdlistinfo->encr_key;
+ for (i = 0; i < enck_size_in_word; i++, pce++)
+ pce->data = enckey32[i];
+ switch (key_size) {
+ case AES128_KEY_SIZE:
+ encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128
+ << CRYPTO_ENCR_KEY_SZ);
+ break;
+ case AES256_KEY_SIZE:
+ default:
+ encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES256
+ << CRYPTO_ENCR_KEY_SZ);
+ break;
+ } /* end of switch (creq->encklen) */
+ }
+ encr_cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG;
+ } /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
+ break;
+ } /* end of switch (creq->mode) */
+
+ /* write encr seg cfg */
+ pce = cmdlistinfo->encr_seg_cfg;
+ if ((creq->alg == CIPHER_ALG_DES) || (creq->alg == CIPHER_ALG_3DES)) {
+ if (creq->dir == QCE_ENCRYPT)
+ pce->data |= (1 << CRYPTO_ENCODE);
+ else
+ pce->data &= ~(1 << CRYPTO_ENCODE);
+ encr_cfg = pce->data;
+ } else {
+ encr_cfg |=
+ ((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
+ }
+ if (use_hw_key == true)
+ encr_cfg |= (CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
+ else
+ encr_cfg &= ~(CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
+ pce->data = encr_cfg;
+
+ /* write encr seg size */
+ pce = cmdlistinfo->encr_seg_size;
+ if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT))
+ pce->data = (creq->cryptlen + creq->authsize);
+ else
+ pce->data = creq->cryptlen;
+
+ /* write encr seg start */
+ pce = cmdlistinfo->encr_seg_start;
+ pce->data = (coffset & 0xffff);
+
+ /* write seg size */
+ pce = cmdlistinfo->seg_size;
+ pce->data = totallen_in;
+
+ return 0;
+};
+
+static int _aead_complete(struct qce_device *pce_dev)
+{
+ struct aead_request *areq;
+ unsigned char mac[SHA256_DIGEST_SIZE];
+
+ areq = (struct aead_request *) pce_dev->areq;
+ if (areq->src != areq->dst) {
+ dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
+ DMA_FROM_DEVICE);
+ }
+ dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+ (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+ DMA_TO_DEVICE);
+ dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
+ DMA_TO_DEVICE);
+ /* check MAC */
+ memcpy(mac, (char *)(&pce_dev->ce_sps.result->auth_iv[0]),
+ SHA256_DIGEST_SIZE);
+ if (pce_dev->mode == QCE_MODE_CCM) {
+ uint32_t result_status;
+ result_status = pce_dev->ce_sps.result->status;
+ result_status &= (1 << CRYPTO_MAC_FAILED);
+ result_status |= (pce_dev->ce_sps.consumer_status |
+ pce_dev->ce_sps.producer_status);
+ pce_dev->qce_cb(areq, mac, NULL, result_status);
+ } else {
+ uint32_t ivsize = 0;
+ struct crypto_aead *aead;
+ unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE];
+
+ aead = crypto_aead_reqtfm(areq);
+ ivsize = crypto_aead_ivsize(aead);
+ dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in,
+ ivsize, DMA_TO_DEVICE);
+ memcpy(iv, (char *)(pce_dev->ce_sps.result->encr_cntr_iv),
+ sizeof(iv));
+ pce_dev->qce_cb(areq, mac, iv, pce_dev->ce_sps.consumer_status |
+ pce_dev->ce_sps.producer_status);
+
+ }
+ return 0;
+};
+
+static void _sha_complete(struct qce_device *pce_dev)
+{
+ struct ahash_request *areq;
+ unsigned char digest[SHA256_DIGEST_SIZE];
+
+ areq = (struct ahash_request *) pce_dev->areq;
+ dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+ DMA_TO_DEVICE);
+ memcpy(digest, (char *)(&pce_dev->ce_sps.result->auth_iv[0]),
+ SHA256_DIGEST_SIZE);
+ pce_dev->qce_cb(areq, digest,
+ (char *)pce_dev->ce_sps.result->auth_byte_count,
+ pce_dev->ce_sps.consumer_status);
+};
+
+static int _ablk_cipher_complete(struct qce_device *pce_dev)
+{
+ struct ablkcipher_request *areq;
+ unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE];
+
+ areq = (struct ablkcipher_request *) pce_dev->areq;
+
+ if (areq->src != areq->dst) {
+ dma_unmap_sg(pce_dev->pdev, areq->dst,
+ pce_dev->dst_nents, DMA_FROM_DEVICE);
+ }
+ dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+ (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+ DMA_TO_DEVICE);
+
+ if (pce_dev->mode == QCE_MODE_ECB) {
+ pce_dev->qce_cb(areq, NULL, NULL,
+ pce_dev->ce_sps.consumer_status |
+ pce_dev->ce_sps.producer_status);
+ } else {
+ if (pce_dev->ce_sps.minor_version == 0) {
+ if (pce_dev->mode == QCE_MODE_CBC)
+ memcpy(iv, (char *)sg_virt(areq->src),
+ sizeof(iv));
+
+ if ((pce_dev->mode == QCE_MODE_CTR) ||
+ (pce_dev->mode == QCE_MODE_XTS)) {
+ uint32_t num_blk = 0;
+ uint32_t cntr_iv = 0;
+
+ memcpy(iv, areq->info, sizeof(iv));
+ if (pce_dev->mode == QCE_MODE_CTR)
+ num_blk = areq->nbytes/16;
+ cntr_iv = (u32)(((u32)(*(iv + 14))) << 8) |
+ (u32)(*(iv + 15));
+ *(iv + 14) = (char)((cntr_iv + num_blk) >> 8);
+ *(iv + 15) = (char)((cntr_iv + num_blk) & 0xFF);
+ }
+ } else {
+ memcpy(iv,
+ (char *)(pce_dev->ce_sps.result->encr_cntr_iv),
+ sizeof(iv));
+ }
+ pce_dev->qce_cb(areq, NULL, iv,
+ pce_dev->ce_sps.consumer_status |
+ pce_dev->ce_sps.producer_status);
+ }
+ return 0;
+};
+
+#ifdef QCE_DEBUG
+static void _qce_dump_descr_fifos(struct qce_device *pce_dev)
+{
+ int i, j, ents;
+ struct sps_iovec *iovec = pce_dev->ce_sps.in_transfer.iovec;
+ uint32_t cmd_flags = SPS_IOVEC_FLAG_CMD | SPS_IOVEC_FLAG_NWD;
+
+ printk(KERN_INFO "==============================================\n");
+ printk(KERN_INFO "CONSUMER (TX/IN/DEST) PIPE DESCRIPTOR\n");
+ printk(KERN_INFO "==============================================\n");
+ for (i = 0; i < pce_dev->ce_sps.in_transfer.iovec_count; i++) {
+ printk(KERN_INFO " [%d] addr=0x%x size=0x%x flags=0x%x\n", i,
+ iovec->addr, iovec->size, iovec->flags);
+ if (iovec->flags & cmd_flags) {
+ struct sps_command_element *pced;
+
+ pced = (struct sps_command_element *)
+ (GET_VIRT_ADDR(iovec->addr));
+ ents = iovec->size/(sizeof(struct sps_command_element));
+ for (j = 0; j < ents; j++) {
+ printk(KERN_INFO " [%d] [0x%x] 0x%x\n", j,
+ pced->addr, pced->data);
+ pced++;
+ }
+ }
+ iovec++;
+ }
+
+ printk(KERN_INFO "==============================================\n");
+ printk(KERN_INFO "PRODUCER (RX/OUT/SRC) PIPE DESCRIPTOR\n");
+ printk(KERN_INFO "==============================================\n");
+ iovec = pce_dev->ce_sps.out_transfer.iovec;
+ for (i = 0; i < pce_dev->ce_sps.out_transfer.iovec_count; i++) {
+ printk(KERN_INFO " [%d] addr=0x%x size=0x%x flags=0x%x\n", i,
+ iovec->addr, iovec->size, iovec->flags);
+ iovec++;
+ }
+}
+
+#else
+static void _qce_dump_descr_fifos(struct qce_device *pce_dev)
+{
+}
+#endif
+
+static void _qce_sps_iovec_count_init(struct qce_device *pce_dev)
+{
+ pce_dev->ce_sps.in_transfer.iovec_count = 0;
+ pce_dev->ce_sps.out_transfer.iovec_count = 0;
+}
+
+static void _qce_set_eot_flag(struct sps_transfer *sps_bam_pipe)
+{
+ struct sps_iovec *iovec = sps_bam_pipe->iovec +
+ (sps_bam_pipe->iovec_count - 1);
+ iovec->flags |= SPS_IOVEC_FLAG_EOT;
+}
+
+static void _qce_sps_add_data(uint32_t addr, uint32_t len,
+ struct sps_transfer *sps_bam_pipe)
+{
+ struct sps_iovec *iovec = sps_bam_pipe->iovec +
+ sps_bam_pipe->iovec_count;
+ if (len) {
+ iovec->size = len;
+ iovec->addr = addr;
+ iovec->flags = 0;
+ sps_bam_pipe->iovec_count++;
+ }
+}
+
+static int _qce_sps_add_sg_data(struct qce_device *pce_dev,
+ struct scatterlist *sg_src, uint32_t nbytes,
+ struct sps_transfer *sps_bam_pipe)
+{
+ uint32_t addr, data_cnt, len;
+ struct sps_iovec *iovec = sps_bam_pipe->iovec +
+ sps_bam_pipe->iovec_count;
+
+ while (nbytes > 0) {
+ len = min(nbytes, sg_dma_len(sg_src));
+ nbytes -= len;
+ addr = sg_dma_address(sg_src);
+ if (pce_dev->ce_sps.minor_version == 0)
+ len = ALIGN(len, pce_dev->ce_sps.ce_burst_size);
+ while (len > 0) {
+ if (len > SPS_MAX_PKT_SIZE) {
+ data_cnt = SPS_MAX_PKT_SIZE;
+ iovec->size = data_cnt;
+ iovec->addr = addr;
+ iovec->flags = 0;
+ } else {
+ data_cnt = len;
+ iovec->size = data_cnt;
+ iovec->addr = addr;
+ iovec->flags = 0;
+ }
+ iovec++;
+ sps_bam_pipe->iovec_count++;
+ addr += data_cnt;
+ len -= data_cnt;
+ }
+ sg_src++;
+ }
+ return 0;
+}
+
+static int _qce_sps_add_cmd(struct qce_device *pce_dev, uint32_t flag,
+ struct qce_cmdlist_info *cmdptr,
+ struct sps_transfer *sps_bam_pipe)
+{
+ struct sps_iovec *iovec = sps_bam_pipe->iovec +
+ sps_bam_pipe->iovec_count;
+ iovec->size = cmdptr->size;
+ iovec->addr = GET_PHYS_ADDR(cmdptr->cmdlist);
+ iovec->flags = SPS_IOVEC_FLAG_CMD | SPS_IOVEC_FLAG_NWD | flag;
+ sps_bam_pipe->iovec_count++;
+
+ return 0;
+}
+
+static int _qce_sps_transfer(struct qce_device *pce_dev)
+{
+ int rc = 0;
+
+ _qce_dump_descr_fifos(pce_dev);
+ rc = sps_transfer(pce_dev->ce_sps.consumer.pipe,
+ &pce_dev->ce_sps.in_transfer);
+ if (rc) {
+ pr_err("sps_xfr() fail (consumer pipe=0x%x) rc = %d,",
+ (u32)pce_dev->ce_sps.consumer.pipe, rc);
+ return rc;
+ }
+ rc = sps_transfer(pce_dev->ce_sps.producer.pipe,
+ &pce_dev->ce_sps.out_transfer);
+ if (rc) {
+ pr_err("sps_xfr() fail (producer pipe=0x%x) rc = %d,",
+ (u32)pce_dev->ce_sps.producer.pipe, rc);
+ return rc;
+ }
+ return rc;
+}
+
+/**
+ * Allocate and Connect a CE peripheral's SPS endpoint
+ *
+ * This function allocates endpoint context and
+ * connect it with memory endpoint by calling
+ * appropriate SPS driver APIs.
+ *
+ * Also registers a SPS callback function with
+ * SPS driver
+ *
+ * This function should only be called once typically
+ * during driver probe.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ * @ep - Pointer to sps endpoint data structure
+ * @is_produce - 1 means Producer endpoint
+ * 0 means Consumer endpoint
+ *
+ * @return - 0 if successful else negative value.
+ *
+ */
+static int qce_sps_init_ep_conn(struct qce_device *pce_dev,
+ struct qce_sps_ep_conn_data *ep,
+ bool is_producer)
+{
+ int rc = 0;
+ struct sps_pipe *sps_pipe_info;
+ struct sps_connect *sps_connect_info = &ep->connect;
+ struct sps_register_event *sps_event = &ep->event;
+
+ /* Allocate endpoint context */
+ sps_pipe_info = sps_alloc_endpoint();
+ if (!sps_pipe_info) {
+ pr_err("sps_alloc_endpoint() failed!!! is_producer=%d",
+ is_producer);
+ rc = -ENOMEM;
+ goto out;
+ }
+ /* Now save the sps pipe handle */
+ ep->pipe = sps_pipe_info;
+
+ /* Get default connection configuration for an endpoint */
+ rc = sps_get_config(sps_pipe_info, sps_connect_info);
+ if (rc) {
+ pr_err("sps_get_config() fail pipe_handle=0x%x, rc = %d\n",
+ (u32)sps_pipe_info, rc);
+ goto get_config_err;
+ }
+
+ /* Modify the default connection configuration */
+ if (is_producer) {
+ /*
+ * For CE producer transfer, source should be
+ * CE peripheral where as destination should
+ * be system memory.
+ */
+ sps_connect_info->source = pce_dev->ce_sps.bam_handle;
+ sps_connect_info->destination = SPS_DEV_HANDLE_MEM;
+ /* Producer pipe will handle this connection */
+ sps_connect_info->mode = SPS_MODE_SRC;
+ sps_connect_info->options =
+ SPS_O_AUTO_ENABLE | SPS_O_EOT;
+ } else {
+ /* For CE consumer transfer, source should be
+ * system memory where as destination should
+ * CE peripheral
+ */
+ sps_connect_info->source = SPS_DEV_HANDLE_MEM;
+ sps_connect_info->destination = pce_dev->ce_sps.bam_handle;
+ sps_connect_info->mode = SPS_MODE_DEST;
+ sps_connect_info->options =
+ SPS_O_AUTO_ENABLE | SPS_O_EOT;
+ }
+
+ /* Producer pipe index */
+ sps_connect_info->src_pipe_index = pce_dev->ce_sps.src_pipe_index;
+ /* Consumer pipe index */
+ sps_connect_info->dest_pipe_index = pce_dev->ce_sps.dest_pipe_index;
+ sps_connect_info->event_thresh = 0x10;
+ /*
+ * Max. no of scatter/gather buffers that can
+ * be passed by block layer = 32 (NR_SG).
+ * Each BAM descritor needs 64 bits (8 bytes).
+ * One BAM descriptor is required per buffer transfer.
+ * So we would require total 256 (32 * 8) bytes of descriptor FIFO.
+ * But due to HW limitation we need to allocate atleast one extra
+ * descriptor memory (256 bytes + 8 bytes). But in order to be
+ * in power of 2, we are allocating 512 bytes of memory.
+ */
+ sps_connect_info->desc.size = 512;
+ sps_connect_info->desc.base = dma_alloc_coherent(pce_dev->pdev,
+ sps_connect_info->desc.size,
+ &sps_connect_info->desc.phys_base,
+ GFP_KERNEL);
+ if (sps_connect_info->desc.base == NULL) {
+ rc = -ENOMEM;
+ pr_err("Can not allocate coherent memory for sps data\n");
+ goto get_config_err;
+ }
+
+ memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size);
+
+ /* Establish connection between peripheral and memory endpoint */
+ rc = sps_connect(sps_pipe_info, sps_connect_info);
+ if (rc) {
+ pr_err("sps_connect() fail pipe_handle=0x%x, rc = %d\n",
+ (u32)sps_pipe_info, rc);
+ goto sps_connect_err;
+ }
+
+ sps_event->mode = SPS_TRIGGER_CALLBACK;
+ sps_event->options = SPS_O_EOT;
+ sps_event->xfer_done = NULL;
+ sps_event->user = (void *)pce_dev;
+
+ pr_debug("success, %s : pipe_handle=0x%x, desc fifo base (phy) = 0x%x\n",
+ is_producer ? "PRODUCER(RX/OUT)" : "CONSUMER(TX/IN)",
+ (u32)sps_pipe_info, sps_connect_info->desc.phys_base);
+ goto out;
+
+sps_connect_err:
+ dma_free_coherent(pce_dev->pdev,
+ sps_connect_info->desc.size,
+ sps_connect_info->desc.base,
+ sps_connect_info->desc.phys_base);
+get_config_err:
+ sps_free_endpoint(sps_pipe_info);
+out:
+ return rc;
+}
+
+/**
+ * Disconnect and Deallocate a CE peripheral's SPS endpoint
+ *
+ * This function disconnect endpoint and deallocates
+ * endpoint context.
+ *
+ * This function should only be called once typically
+ * during driver remove.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ * @ep - Pointer to sps endpoint data structure
+ *
+ */
+static void qce_sps_exit_ep_conn(struct qce_device *pce_dev,
+ struct qce_sps_ep_conn_data *ep)
+{
+ struct sps_pipe *sps_pipe_info = ep->pipe;
+ struct sps_connect *sps_connect_info = &ep->connect;
+
+ sps_disconnect(sps_pipe_info);
+ dma_free_coherent(pce_dev->pdev,
+ sps_connect_info->desc.size,
+ sps_connect_info->desc.base,
+ sps_connect_info->desc.phys_base);
+ sps_free_endpoint(sps_pipe_info);
+}
+/**
+ * Initialize SPS HW connected with CE core
+ *
+ * This function register BAM HW resources with
+ * SPS driver and then initialize 2 SPS endpoints
+ *
+ * This function should only be called once typically
+ * during driver probe.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ *
+ * @return - 0 if successful else negative value.
+ *
+ */
+static int qce_sps_init(struct qce_device *pce_dev)
+{
+ int rc = 0;
+ struct sps_bam_props bam = {0};
+ bool register_bam = false;
+
+ bam.phys_addr = pce_dev->ce_sps.bam_mem;
+ bam.virt_addr = pce_dev->ce_sps.bam_iobase;
+
+ /*
+ * This event thresold value is only significant for BAM-to-BAM
+ * transfer. It's ignored for BAM-to-System mode transfer.
+ */
+ bam.event_threshold = 0x10; /* Pipe event threshold */
+ /*
+ * This threshold controls when the BAM publish
+ * the descriptor size on the sideband interface.
+ * SPS HW will only be used when
+ * data transfer size > 64 bytes.
+ */
+ bam.summing_threshold = 64;
+ /* SPS driver wll handle the crypto BAM IRQ */
+ bam.irq = (u32)pce_dev->ce_sps.bam_irq;
+ bam.manage = SPS_BAM_MGR_LOCAL;
+
+ pr_debug("bam physical base=0x%x\n", (u32)bam.phys_addr);
+ pr_debug("bam virtual base=0x%x\n", (u32)bam.virt_addr);
+
+ mutex_lock(&bam_register_cnt);
+ if ((bam_registry.handle == 0) && (bam_registry.cnt == 0)) {
+ /* Register CE Peripheral BAM device to SPS driver */
+ rc = sps_register_bam_device(&bam, &bam_registry.handle);
+ if (rc) {
+ pr_err("sps_register_bam_device() failed! err=%d", rc);
+ return -EIO;
+ }
+ bam_registry.cnt++;
+ register_bam = true;
+ } else {
+ bam_registry.cnt++;
+ }
+ mutex_unlock(&bam_register_cnt);
+ pce_dev->ce_sps.bam_handle = bam_registry.handle;
+ pr_debug("BAM device registered. bam_handle=0x%x",
+ pce_dev->ce_sps.bam_handle);
+
+ rc = qce_sps_init_ep_conn(pce_dev, &pce_dev->ce_sps.producer, true);
+ if (rc)
+ goto sps_connect_producer_err;
+ rc = qce_sps_init_ep_conn(pce_dev, &pce_dev->ce_sps.consumer, false);
+ if (rc)
+ goto sps_connect_consumer_err;
+
+ pce_dev->ce_sps.out_transfer.user = pce_dev->ce_sps.producer.pipe;
+ pce_dev->ce_sps.in_transfer.user = pce_dev->ce_sps.consumer.pipe;
+ pr_info(" Qualcomm MSM CE-BAM at 0x%016llx irq %d\n",
+ (unsigned long long)pce_dev->ce_sps.bam_mem,
+ (unsigned int)pce_dev->ce_sps.bam_irq);
+ return rc;
+
+sps_connect_consumer_err:
+ qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_sps.producer);
+sps_connect_producer_err:
+ if (register_bam)
+ sps_deregister_bam_device(pce_dev->ce_sps.bam_handle);
+
+ return rc;
+}
+
+/**
+ * De-initialize SPS HW connected with CE core
+ *
+ * This function deinitialize SPS endpoints and then
+ * deregisters BAM resources from SPS driver.
+ *
+ * This function should only be called once typically
+ * during driver remove.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ *
+ */
+static void qce_sps_exit(struct qce_device *pce_dev)
+{
+ qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_sps.consumer);
+ qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_sps.producer);
+ mutex_lock(&bam_register_cnt);
+ if ((bam_registry.handle != 0) && (bam_registry.cnt == 1)) {
+ sps_deregister_bam_device(pce_dev->ce_sps.bam_handle);
+ bam_registry.cnt = 0;
+ bam_registry.handle = 0;
+ }
+ if ((bam_registry.handle != 0) && (bam_registry.cnt > 1))
+ bam_registry.cnt--;
+ mutex_unlock(&bam_register_cnt);
+
+ iounmap(pce_dev->ce_sps.bam_iobase);
+}
+
+static void _aead_sps_producer_callback(struct sps_event_notify *notify)
+{
+ struct qce_device *pce_dev = (struct qce_device *)
+ ((struct sps_event_notify *)notify)->user;
+
+ pce_dev->ce_sps.notify = *notify;
+ pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
+ notify->event_id,
+ notify->data.transfer.iovec.addr,
+ notify->data.transfer.iovec.size,
+ notify->data.transfer.iovec.flags);
+
+ pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP;
+ if (pce_dev->ce_sps.consumer_state == QCE_PIPE_STATE_COMP) {
+ pce_dev->ce_sps.consumer_state = QCE_PIPE_STATE_IDLE;
+ pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
+
+ /* done */
+ _aead_complete(pce_dev);
+ }
+};
+
+static void _aead_sps_consumer_callback(struct sps_event_notify *notify)
+{
+ struct qce_device *pce_dev = (struct qce_device *)
+ ((struct sps_event_notify *)notify)->user;
+
+ pce_dev->ce_sps.notify = *notify;
+ pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
+ notify->event_id,
+ notify->data.transfer.iovec.addr,
+ notify->data.transfer.iovec.size,
+ notify->data.transfer.iovec.flags);
+
+ pce_dev->ce_sps.consumer_state = QCE_PIPE_STATE_COMP;
+ if (pce_dev->ce_sps.producer_state == QCE_PIPE_STATE_COMP) {
+ pce_dev->ce_sps.consumer_state = QCE_PIPE_STATE_IDLE;
+ pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
+
+ /* done */
+ _aead_complete(pce_dev);
+ }
+};
+
+static void _sha_sps_producer_callback(struct sps_event_notify *notify)
+{
+ struct qce_device *pce_dev = (struct qce_device *)
+ ((struct sps_event_notify *)notify)->user;
+
+ pce_dev->ce_sps.notify = *notify;
+ pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
+ notify->event_id,
+ notify->data.transfer.iovec.addr,
+ notify->data.transfer.iovec.size,
+ notify->data.transfer.iovec.flags);
+
+ pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP;
+ if (pce_dev->ce_sps.consumer_state == QCE_PIPE_STATE_COMP) {
+ pce_dev->ce_sps.consumer_state = QCE_PIPE_STATE_IDLE;
+ pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
+
+ /* done */
+ _sha_complete(pce_dev);
+ }
+};
+
+static void _sha_sps_consumer_callback(struct sps_event_notify *notify)
+{
+ struct qce_device *pce_dev = (struct qce_device *)
+ ((struct sps_event_notify *)notify)->user;
+
+ pce_dev->ce_sps.notify = *notify;
+ pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
+ notify->event_id,
+ notify->data.transfer.iovec.addr,
+ notify->data.transfer.iovec.size,
+ notify->data.transfer.iovec.flags);
+
+ pce_dev->ce_sps.consumer_state = QCE_PIPE_STATE_COMP;
+ if (pce_dev->ce_sps.producer_state == QCE_PIPE_STATE_COMP) {
+ pce_dev->ce_sps.consumer_state = QCE_PIPE_STATE_IDLE;
+ pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
+
+ /* done */
+ _sha_complete(pce_dev);
+ }
+};
+
+static void _ablk_cipher_sps_producer_callback(struct sps_event_notify *notify)
+{
+ struct qce_device *pce_dev = (struct qce_device *)
+ ((struct sps_event_notify *)notify)->user;
+
+ pce_dev->ce_sps.notify = *notify;
+ pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
+ notify->event_id,
+ notify->data.transfer.iovec.addr,
+ notify->data.transfer.iovec.size,
+ notify->data.transfer.iovec.flags);
+
+ pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP;
+ if (pce_dev->ce_sps.consumer_state == QCE_PIPE_STATE_COMP) {
+ pce_dev->ce_sps.consumer_state = QCE_PIPE_STATE_IDLE;
+ pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
+
+ /* done */
+ _ablk_cipher_complete(pce_dev);
+ }
+};
+
+static void _ablk_cipher_sps_consumer_callback(struct sps_event_notify *notify)
+{
+ struct qce_device *pce_dev = (struct qce_device *)
+ ((struct sps_event_notify *)notify)->user;
+
+ pce_dev->ce_sps.notify = *notify;
+ pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
+ notify->event_id,
+ notify->data.transfer.iovec.addr,
+ notify->data.transfer.iovec.size,
+ notify->data.transfer.iovec.flags);
+
+ pce_dev->ce_sps.consumer_state = QCE_PIPE_STATE_COMP;
+ if (pce_dev->ce_sps.producer_state == QCE_PIPE_STATE_COMP) {
+ pce_dev->ce_sps.consumer_state = QCE_PIPE_STATE_IDLE;
+ pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
+
+ /* done */
+ _ablk_cipher_complete(pce_dev);
+ }
+};
+
+static void qce_add_cmd_element(struct qce_device *pdev,
+ struct sps_command_element **cmd_ptr, u32 addr,
+ u32 data, struct sps_command_element **populate)
+{
+ (*cmd_ptr)->addr = (uint32_t)(addr + pdev->phy_iobase);
+ (*cmd_ptr)->data = data;
+ (*cmd_ptr)->mask = 0xFFFFFFFF;
+ if (populate != NULL)
+ *populate = *cmd_ptr;
+ (*cmd_ptr)++ ;
+}
+
+static int _setup_cipher_aes_cmdlistptrs(struct qce_device *pdev,
+ unsigned char **pvaddr, enum qce_cipher_mode_enum mode,
+ bool key_128)
+{
+ struct sps_command_element *ce_vaddr =
+ (struct sps_command_element *)(*pvaddr);
+ uint32_t ce_vaddr_start = (uint32_t)(*pvaddr);
+ struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr;
+ struct qce_cmdlist_info *pcl_info = NULL;
+ int i = 0;
+ uint32_t encr_cfg = 0;
+ uint32_t key_reg = 0;
+ uint32_t xts_key_reg = 0;
+ uint32_t iv_reg = 0;
+ uint32_t crypto_cfg = 0;
+ uint32_t beats = (pdev->ce_sps.ce_burst_size >> 3) - 1;
+ uint32_t pipe_pair = pdev->ce_sps.pipe_pair_index;
+
+ crypto_cfg = (beats << CRYPTO_REQ_SIZE) |
+ BIT(CRYPTO_MASK_DOUT_INTR) |
+ BIT(CRYPTO_MASK_DIN_INTR) |
+ BIT(CRYPTO_MASK_OP_DONE_INTR) |
+ (0 << CRYPTO_HIGH_SPD_EN_N) |
+ (pipe_pair << CRYPTO_PIPE_SET_SELECT);
+ /*
+ * Designate chunks of the allocated memory to various
+ * command list pointers related to AES cipher operations defined
+ * in ce_cmdlistptrs_ops structure.
+ */
+ switch (mode) {
+ case QCE_MODE_CBC:
+ case QCE_MODE_CTR:
+ if (key_128 == true) {
+ cmdlistptr->cipher_aes_128_cbc_ctr.cmdlist =
+ (uint32_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->cipher_aes_128_cbc_ctr);
+
+ encr_cfg = (CRYPTO_ENCR_KEY_SZ_AES128 <<
+ CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_AES <<
+ CRYPTO_ENCR_ALG);
+ iv_reg = 4;
+ key_reg = 4;
+ xts_key_reg = 0;
+ } else {
+ cmdlistptr->cipher_aes_256_cbc_ctr.cmdlist =
+ (uint32_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->cipher_aes_256_cbc_ctr);
+
+ encr_cfg = (CRYPTO_ENCR_KEY_SZ_AES256 <<
+ CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_AES <<
+ CRYPTO_ENCR_ALG);
+ iv_reg = 4;
+ key_reg = 8;
+ xts_key_reg = 0;
+ }
+ break;
+ case QCE_MODE_ECB:
+ if (key_128 == true) {
+ cmdlistptr->cipher_aes_128_ecb.cmdlist =
+ (uint32_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->cipher_aes_128_ecb);
+
+ encr_cfg = (CRYPTO_ENCR_KEY_SZ_AES128 <<
+ CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_AES <<
+ CRYPTO_ENCR_ALG) |
+ (CRYPTO_ENCR_MODE_ECB <<
+ CRYPTO_ENCR_MODE);
+ iv_reg = 0;
+ key_reg = 4;
+ xts_key_reg = 0;
+ } else {
+ cmdlistptr->cipher_aes_256_ecb.cmdlist =
+ (uint32_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->cipher_aes_256_ecb);
+
+ encr_cfg = (CRYPTO_ENCR_KEY_SZ_AES256 <<
+ CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_AES <<
+ CRYPTO_ENCR_ALG) |
+ (CRYPTO_ENCR_MODE_ECB <<
+ CRYPTO_ENCR_MODE);
+ iv_reg = 0;
+ key_reg = 8;
+ xts_key_reg = 0;
+ }
+ break;
+ case QCE_MODE_XTS:
+ if (key_128 == true) {
+ cmdlistptr->cipher_aes_128_xts.cmdlist =
+ (uint32_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->cipher_aes_128_xts);
+
+ encr_cfg = (CRYPTO_ENCR_KEY_SZ_AES128 <<
+ CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_AES <<
+ CRYPTO_ENCR_ALG) |
+ (CRYPTO_ENCR_MODE_XTS <<
+ CRYPTO_ENCR_MODE);
+ iv_reg = 4;
+ key_reg = 4;
+ xts_key_reg = 4;
+ } else {
+ cmdlistptr->cipher_aes_256_xts.cmdlist =
+ (uint32_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->cipher_aes_256_xts);
+
+ encr_cfg = (CRYPTO_ENCR_KEY_SZ_AES256 <<
+ CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_AES <<
+ CRYPTO_ENCR_ALG) |
+ (CRYPTO_ENCR_MODE_XTS <<
+ CRYPTO_ENCR_MODE);
+ iv_reg = 4;
+ key_reg = 8;
+ xts_key_reg = 8;
+ }
+ break;
+ default:
+ break;
+ }
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, crypto_cfg,
+ &pcl_info->crypto_cfg);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+ &pcl_info->seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
+ &pcl_info->encr_seg_cfg);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+ &pcl_info->encr_seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+ &pcl_info->encr_seg_start);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG,
+ (uint32_t)0xffffffff, &pcl_info->encr_mask);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
+ &pcl_info->auth_seg_cfg);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+ &pcl_info->encr_key);
+ for (i = 1; i < key_reg; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+ if (xts_key_reg) {
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_XTS_KEY0_REG,
+ 0, &pcl_info->encr_xts_key);
+ for (i = 1; i < xts_key_reg; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ CRYPTO_ENCR_XTS_DU_SIZE_REG, 0, NULL);
+ }
+ if (iv_reg) {
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+ &pcl_info->encr_cntr_iv);
+ for (i = 1; i < iv_reg; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+ }
+ /* Add dummy to align size to burst-size multiple */
+ if (mode == QCE_MODE_XTS) {
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
+ 0, &pcl_info->auth_seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+ 0, &pcl_info->auth_seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG,
+ 0, &pcl_info->auth_seg_size);
+
+ }
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+ ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+ &pcl_info->go_proc);
+
+ pcl_info->size = (uint32_t)ce_vaddr - (uint32_t)ce_vaddr_start;
+ *pvaddr = (unsigned char *) ce_vaddr;
+
+ return 0;
+}
+
+static int _setup_cipher_des_cmdlistptrs(struct qce_device *pdev,
+ unsigned char **pvaddr, enum qce_cipher_alg_enum alg,
+ bool mode_cbc)
+{
+
+ struct sps_command_element *ce_vaddr =
+ (struct sps_command_element *)(*pvaddr);
+ uint32_t ce_vaddr_start = (uint32_t)(*pvaddr);
+ struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr;
+ struct qce_cmdlist_info *pcl_info = NULL;
+ int i = 0;
+ uint32_t encr_cfg = 0;
+ uint32_t key_reg = 0;
+ uint32_t iv_reg = 0;
+ uint32_t crypto_cfg = 0;
+ uint32_t beats = (pdev->ce_sps.ce_burst_size >> 3) - 1;
+ uint32_t pipe_pair = pdev->ce_sps.pipe_pair_index;
+
+ crypto_cfg = (beats << CRYPTO_REQ_SIZE) |
+ BIT(CRYPTO_MASK_DOUT_INTR) |
+ BIT(CRYPTO_MASK_DIN_INTR) |
+ BIT(CRYPTO_MASK_OP_DONE_INTR) |
+ (0 << CRYPTO_HIGH_SPD_EN_N) |
+ (pipe_pair << CRYPTO_PIPE_SET_SELECT);
+
+ /*
+ * Designate chunks of the allocated memory to various
+ * command list pointers related to cipher operations defined
+ * in ce_cmdlistptrs_ops structure.
+ */
+ switch (alg) {
+ case CIPHER_ALG_DES:
+ if (mode_cbc) {
+ cmdlistptr->cipher_des_cbc.cmdlist =
+ (uint32_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->cipher_des_cbc);
+
+ encr_cfg = (CRYPTO_ENCR_KEY_SZ_DES <<
+ CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_DES <<
+ CRYPTO_ENCR_ALG) |
+ (CRYPTO_ENCR_MODE_CBC <<
+ CRYPTO_ENCR_MODE);
+ iv_reg = 2;
+ key_reg = 2;
+ } else {
+ cmdlistptr->cipher_des_ecb.cmdlist =
+ (uint32_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->cipher_des_ecb);
+
+ encr_cfg = (CRYPTO_ENCR_KEY_SZ_DES <<
+ CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_DES <<
+ CRYPTO_ENCR_ALG) |
+ (CRYPTO_ENCR_MODE_ECB <<
+ CRYPTO_ENCR_MODE);
+ iv_reg = 0;
+ key_reg = 2;
+ }
+ break;
+ case CIPHER_ALG_3DES:
+ if (mode_cbc) {
+ cmdlistptr->cipher_3des_cbc.cmdlist =
+ (uint32_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->cipher_3des_cbc);
+
+ encr_cfg = (CRYPTO_ENCR_KEY_SZ_3DES <<
+ CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_DES <<
+ CRYPTO_ENCR_ALG) |
+ (CRYPTO_ENCR_MODE_CBC <<
+ CRYPTO_ENCR_MODE);
+ iv_reg = 2;
+ key_reg = 6;
+ } else {
+ cmdlistptr->cipher_3des_ecb.cmdlist =
+ (uint32_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->cipher_3des_ecb);
+
+ encr_cfg = (CRYPTO_ENCR_KEY_SZ_3DES <<
+ CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_DES <<
+ CRYPTO_ENCR_ALG) |
+ (CRYPTO_ENCR_MODE_ECB <<
+ CRYPTO_ENCR_MODE);
+ iv_reg = 0;
+ key_reg = 6;
+ }
+ break;
+ default:
+ break;
+ }
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, crypto_cfg,
+ &pcl_info->crypto_cfg);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+ &pcl_info->seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
+ &pcl_info->encr_seg_cfg);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+ &pcl_info->encr_seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+ &pcl_info->encr_seg_start);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
+ &pcl_info->auth_seg_cfg);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+ &pcl_info->encr_key);
+ for (i = 1; i < key_reg; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+ if (iv_reg) {
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+ &pcl_info->encr_cntr_iv);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR1_IV1_REG, 0,
+ NULL);
+ /* Add 2 dummy to align size to burst-size multiple */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR2_IV2_REG, 0,
+ NULL);
+ }
+ /* Add dummy to align size to burst-size multiple */
+ if (!mode_cbc) {
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
+ 0, &pcl_info->auth_seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+ 0, &pcl_info->auth_seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG,
+ 0, &pcl_info->auth_seg_size);
+
+ }
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+ ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+ &pcl_info->go_proc);
+
+ pcl_info->size = (uint32_t)ce_vaddr - (uint32_t)ce_vaddr_start;
+ *pvaddr = (unsigned char *) ce_vaddr;
+
+ return 0;
+}
+
+static int _setup_auth_cmdlistptrs(struct qce_device *pdev,
+ unsigned char **pvaddr, enum qce_hash_alg_enum alg,
+ bool key_128)
+{
+ struct sps_command_element *ce_vaddr =
+ (struct sps_command_element *)(*pvaddr);
+ uint32_t ce_vaddr_start = (uint32_t)(*pvaddr);
+ struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr;
+ struct qce_cmdlist_info *pcl_info = NULL;
+ int i = 0;
+ uint32_t key_reg = 0;
+ uint32_t auth_cfg = 0;
+ uint32_t iv_reg = 0;
+ uint32_t crypto_cfg = 0;
+ uint32_t beats = (pdev->ce_sps.ce_burst_size >> 3) - 1;
+ uint32_t pipe_pair = pdev->ce_sps.pipe_pair_index;
+
+ crypto_cfg = (beats << CRYPTO_REQ_SIZE) |
+ BIT(CRYPTO_MASK_DOUT_INTR) |
+ BIT(CRYPTO_MASK_DIN_INTR) |
+ BIT(CRYPTO_MASK_OP_DONE_INTR) |
+ (0 << CRYPTO_HIGH_SPD_EN_N) |
+ (pipe_pair << CRYPTO_PIPE_SET_SELECT);
+ /*
+ * Designate chunks of the allocated memory to various
+ * command list pointers related to authentication operations
+ * defined in ce_cmdlistptrs_ops structure.
+ */
+ switch (alg) {
+ case QCE_HASH_SHA1:
+ cmdlistptr->auth_sha1.cmdlist = (uint32_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->auth_sha1);
+
+ auth_cfg = (CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE)|
+ (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
+ (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+ (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+ iv_reg = 5;
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ crypto_cfg, &pcl_info->crypto_cfg);
+ /* 1 dummy write */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+ 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+ 0, NULL);
+
+ break;
+ case QCE_HASH_SHA256:
+ cmdlistptr->auth_sha256.cmdlist = (uint32_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->auth_sha256);
+
+ auth_cfg = (CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE)|
+ (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
+ (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+ (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+ iv_reg = 8;
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ crypto_cfg, &pcl_info->crypto_cfg);
+ /* 2 dummy writes */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+ 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+ 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+ 0, NULL);
+ break;
+ case QCE_HASH_SHA1_HMAC:
+ cmdlistptr->auth_sha1_hmac.cmdlist = (uint32_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->auth_sha1_hmac);
+
+ auth_cfg = (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
+ (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
+ (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+ (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+ key_reg = 16;
+ iv_reg = 5;
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ crypto_cfg, &pcl_info->crypto_cfg);
+ /* 1 dummy write */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+ 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+ 0, NULL);
+ break;
+ case QCE_AEAD_SHA1_HMAC:
+ cmdlistptr->aead_sha1_hmac.cmdlist = (uint32_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->aead_sha1_hmac);
+
+ auth_cfg = (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
+ (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
+ (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+ (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS) |
+ (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST);
+
+ key_reg = 16;
+ iv_reg = 5;
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ crypto_cfg, &pcl_info->crypto_cfg);
+ /* 2 dummy writes */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+ 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+ 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+ 0, NULL);
+ break;
+ case QCE_HASH_SHA256_HMAC:
+ cmdlistptr->auth_sha256_hmac.cmdlist = (uint32_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->auth_sha256_hmac);
+
+ auth_cfg = (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
+ (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
+ (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+ (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+ key_reg = 16;
+ iv_reg = 8;
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ crypto_cfg, &pcl_info->crypto_cfg);
+ /* 2 dummy writes */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+ 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+ 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+ 0, NULL);
+ break;
+ case QCE_HASH_AES_CMAC:
+ if (key_128 == true) {
+ cmdlistptr->auth_aes_128_cmac.cmdlist =
+ (uint32_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->auth_aes_128_cmac);
+
+ auth_cfg = (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
+ (CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE)|
+ (CRYPTO_AUTH_SIZE_ENUM_16_BYTES <<
+ CRYPTO_AUTH_SIZE) |
+ (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
+ (CRYPTO_AUTH_KEY_SZ_AES128 <<
+ CRYPTO_AUTH_KEY_SIZE) |
+ (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+ key_reg = 4;
+ } else {
+ cmdlistptr->auth_aes_256_cmac.cmdlist =
+ (uint32_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->auth_aes_256_cmac);
+
+ auth_cfg = (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST)|
+ (CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE)|
+ (CRYPTO_AUTH_SIZE_ENUM_16_BYTES <<
+ CRYPTO_AUTH_SIZE) |
+ (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
+ (CRYPTO_AUTH_KEY_SZ_AES256 <<
+ CRYPTO_AUTH_KEY_SIZE) |
+ (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+ key_reg = 8;
+ }
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ crypto_cfg, &pcl_info->crypto_cfg);
+ /* 2 dummy writes */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+ 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+ 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+ 0, NULL);
+ break;
+ default:
+ break;
+ }
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+ &pcl_info->seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0,
+ &pcl_info->encr_seg_cfg);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+ auth_cfg, &pcl_info->auth_seg_cfg);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
+ &pcl_info->auth_seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+ &pcl_info->auth_seg_start);
+
+ if (alg == QCE_HASH_AES_CMAC) {
+ /* reset auth iv, bytecount and key registers */
+ for (i = 0; i < 16; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+ for (i = 0; i < 16; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)),
+ 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+ 0, NULL);
+ } else {
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
+ &pcl_info->auth_iv);
+ for (i = 1; i < iv_reg; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
+ 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+ 0, &pcl_info->auth_bytecount);
+ }
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
+
+ if (key_reg) {
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ CRYPTO_AUTH_KEY0_REG, 0, &pcl_info->auth_key);
+ for (i = 1; i < key_reg; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)),
+ 0, NULL);
+ }
+ if (alg != QCE_AEAD_SHA1_HMAC)
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+ ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+ &pcl_info->go_proc);
+
+ pcl_info->size = (uint32_t)ce_vaddr - (uint32_t)ce_vaddr_start;
+ *pvaddr = (unsigned char *) ce_vaddr;
+
+ return 0;
+}
+
+static int _setup_aead_cmdlistptrs(struct qce_device *pdev,
+ unsigned char **pvaddr, bool key_128)
+{
+ struct sps_command_element *ce_vaddr =
+ (struct sps_command_element *)(*pvaddr);
+ uint32_t ce_vaddr_start = (uint32_t)(*pvaddr);
+ struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr;
+ struct qce_cmdlist_info *pcl_info = NULL;
+ int i = 0;
+ uint32_t encr_cfg = 0;
+ uint32_t auth_cfg = 0;
+ uint32_t key_reg = 0;
+ uint32_t crypto_cfg = 0;
+ uint32_t beats = (pdev->ce_sps.ce_burst_size >> 3) - 1;
+ uint32_t pipe_pair = pdev->ce_sps.pipe_pair_index;
+
+ crypto_cfg = (beats << CRYPTO_REQ_SIZE) |
+ BIT(CRYPTO_MASK_DOUT_INTR) |
+ BIT(CRYPTO_MASK_DIN_INTR) |
+ BIT(CRYPTO_MASK_OP_DONE_INTR) |
+ (0 << CRYPTO_HIGH_SPD_EN_N) |
+ (pipe_pair << CRYPTO_PIPE_SET_SELECT);
+ /*
+ * Designate chunks of the allocated memory to various
+ * command list pointers related to aead operations
+ * defined in ce_cmdlistptrs_ops structure.
+ */
+ if (key_128 == true) {
+ cmdlistptr->aead_aes_128_ccm.cmdlist = (uint32_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->aead_aes_128_ccm);
+
+ auth_cfg = (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
+ (CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE)|
+ (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
+ (CRYPTO_AUTH_KEY_SZ_AES128 << CRYPTO_AUTH_KEY_SIZE);
+ auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
+ encr_cfg = (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+ ((CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE));
+ key_reg = 4;
+ } else {
+
+ cmdlistptr->aead_aes_256_ccm.cmdlist = (uint32_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->aead_aes_256_ccm);
+
+ auth_cfg = (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
+ (CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE)|
+ (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
+ (CRYPTO_AUTH_KEY_SZ_AES256 << CRYPTO_AUTH_KEY_SIZE) |
+ ((MAX_NONCE/sizeof(uint32_t)) <<
+ CRYPTO_AUTH_NONCE_NUM_WORDS);
+ auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
+ encr_cfg = (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
+ (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+ ((CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE));
+ key_reg = 8;
+ }
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ crypto_cfg, &pcl_info->crypto_cfg);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+ NULL);
+ /* add 1 dummy */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+ &pcl_info->seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG,
+ encr_cfg, &pcl_info->encr_seg_cfg);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+ &pcl_info->encr_seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+ &pcl_info->encr_seg_start);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG,
+ (uint32_t)0xffffffff, &pcl_info->encr_mask);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+ auth_cfg, &pcl_info->auth_seg_cfg);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
+ &pcl_info->auth_seg_size);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+ &pcl_info->auth_seg_start);
+ /* reset auth iv, bytecount and key registers */
+ for (i = 0; i < 8; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+ 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG,
+ 0, NULL);
+ for (i = 0; i < 16; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+ /* set auth key */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_KEY0_REG, 0,
+ &pcl_info->auth_key);
+ for (i = 1; i < key_reg; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+ /* set NONCE info */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_INFO_NONCE0_REG, 0,
+ &pcl_info->auth_nonce_info);
+ for (i = 1; i < 4; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_AUTH_INFO_NONCE0_REG +
+ i * sizeof(uint32_t)), 0, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+ &pcl_info->encr_key);
+ for (i = 1; i < key_reg; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+ &pcl_info->encr_cntr_iv);
+ for (i = 1; i < 4; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_CCM_INT_CNTR0_REG, 0,
+ &pcl_info->encr_ccm_cntr_iv);
+ for (i = 1; i < 4; i++)
+ qce_add_cmd_element(pdev, &ce_vaddr,
+ (CRYPTO_ENCR_CCM_INT_CNTR0_REG + i * sizeof(uint32_t)),
+ 0, NULL);
+
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+ ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+ &pcl_info->go_proc);
+
+ pcl_info->size = (uint32_t)ce_vaddr - (uint32_t)ce_vaddr_start;
+ *pvaddr = (unsigned char *) ce_vaddr;
+
+ return 0;
+}
+
+static int _setup_unlock_pipe_cmdlistptrs(struct qce_device *pdev,
+ unsigned char **pvaddr)
+{
+ struct sps_command_element *ce_vaddr =
+ (struct sps_command_element *)(*pvaddr);
+ uint32_t ce_vaddr_start = (uint32_t)(*pvaddr);
+ struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr;
+ struct qce_cmdlist_info *pcl_info = NULL;
+
+ cmdlistptr->unlock_all_pipes.cmdlist = (uint32_t)ce_vaddr;
+ pcl_info = &(cmdlistptr->unlock_all_pipes);
+
+ /*
+ * Designate chunks of the allocated memory to command list
+ * to unlock pipes.
+ */
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ CRYPTO_CONFIG_RESET, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ CRYPTO_CONFIG_RESET, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ CRYPTO_CONFIG_RESET, NULL);
+ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+ CRYPTO_CONFIG_RESET, NULL);
+ pcl_info->size = (uint32_t)ce_vaddr - (uint32_t)ce_vaddr_start;
+ *pvaddr = (unsigned char *) ce_vaddr;
+
+ return 0;
+}
+
+static int qce_setup_cmdlistptrs(struct qce_device *pdev,
+ unsigned char **pvaddr)
+{
+ struct sps_command_element *ce_vaddr =
+ (struct sps_command_element *)(*pvaddr);
+ /*
+ * Designate chunks of the allocated memory to various
+ * command list pointers related to operations defined
+ * in ce_cmdlistptrs_ops structure.
+ */
+ ce_vaddr =
+ (struct sps_command_element *) ALIGN(((unsigned int) ce_vaddr),
+ 16);
+ *pvaddr = (unsigned char *) ce_vaddr;
+
+ _setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_CBC, true);
+ _setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_CTR, true);
+ _setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_ECB, true);
+ _setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_XTS, true);
+ _setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_CBC, false);
+ _setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_CTR, false);
+ _setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_ECB, false);
+ _setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_XTS, false);
+
+ _setup_cipher_des_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_DES, true);
+ _setup_cipher_des_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_DES, false);
+ _setup_cipher_des_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_3DES, true);
+ _setup_cipher_des_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_3DES, false);
+
+ _setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_SHA1, false);
+ _setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_SHA256, false);
+
+ _setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_SHA1_HMAC, false);
+ _setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_SHA256_HMAC, false);
+
+ _setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_AES_CMAC, true);
+ _setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_AES_CMAC, false);
+
+ _setup_auth_cmdlistptrs(pdev, pvaddr, QCE_AEAD_SHA1_HMAC, false);
+
+ _setup_aead_cmdlistptrs(pdev, pvaddr, true);
+ _setup_aead_cmdlistptrs(pdev, pvaddr, false);
+ _setup_unlock_pipe_cmdlistptrs(pdev, pvaddr);
+
+ return 0;
+}
+
+static int qce_setup_ce_sps_data(struct qce_device *pce_dev)
+{
+ unsigned char *vaddr;
+
+ vaddr = pce_dev->coh_vmem;
+ vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
+
+ /* Allow for 256 descriptor (cmd and data) entries per pipe */
+ pce_dev->ce_sps.in_transfer.iovec = (struct sps_iovec *)vaddr;
+ pce_dev->ce_sps.in_transfer.iovec_phys =
+ (uint32_t)GET_PHYS_ADDR(vaddr);
+ vaddr += MAX_BAM_DESCRIPTORS * 8;
+
+ pce_dev->ce_sps.out_transfer.iovec = (struct sps_iovec *)vaddr;
+ pce_dev->ce_sps.out_transfer.iovec_phys =
+ (uint32_t)GET_PHYS_ADDR(vaddr);
+ vaddr += MAX_BAM_DESCRIPTORS * 8;
+
+ qce_setup_cmdlistptrs(pce_dev, &vaddr);
+ pce_dev->ce_sps.result_dump = (uint32_t)vaddr;
+ pce_dev->ce_sps.result = (struct ce_result_dump_format *)vaddr;
+ vaddr += 128;
+
+ return 0;
+}
+
+int qce_aead_sha1_hmac_setup(struct qce_req *creq, struct crypto_aead *aead,
+ struct qce_cmdlist_info *cmdlistinfo)
+{
+ uint32_t authk_size_in_word = creq->authklen/sizeof(uint32_t);
+ uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ struct sps_command_element *pce = NULL;
+ struct aead_request *areq = (struct aead_request *)creq->areq;
+ int i;
+
+ _byte_stream_to_net_words(mackey32, creq->authkey,
+ creq->authklen);
+ pce = cmdlistinfo->auth_key;
+ for (i = 0; i < authk_size_in_word; i++, pce++)
+ pce->data = mackey32[i];
+ pce = cmdlistinfo->auth_iv;
+ for (i = 0; i < 5; i++, pce++)
+ pce->data = _std_init_vector_sha1[i];
+ /* write auth seg size */
+ pce = cmdlistinfo->auth_seg_size;
+ pce->data = creq->cryptlen + areq->assoclen + crypto_aead_ivsize(aead);
+
+ /* write auth seg size start*/
+ pce = cmdlistinfo->auth_seg_start;
+ pce->data = 0;
+
+ return 0;
+}
+
+int qce_aead_req(void *handle, struct qce_req *q_req)
+{
+ struct qce_device *pce_dev = (struct qce_device *) handle;
+ struct aead_request *areq = (struct aead_request *) q_req->areq;
+ uint32_t authsize = q_req->authsize;
+ struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ uint32_t ivsize = 0;
+ uint32_t totallen_in, out_len;
+ uint32_t hw_pad_out = 0;
+ int rc = 0;
+ int ce_burst_size;
+ struct qce_cmdlist_info *cmdlistinfo = NULL;
+ struct qce_cmdlist_info *auth_cmdlistinfo = NULL;
+
+ if (q_req->mode != QCE_MODE_CCM)
+ ivsize = crypto_aead_ivsize(aead);
+
+ ce_burst_size = pce_dev->ce_sps.ce_burst_size;
+ if (q_req->dir == QCE_ENCRYPT) {
+ q_req->cryptlen = areq->cryptlen;
+ totallen_in = q_req->cryptlen + areq->assoclen + ivsize;
+ if (q_req->mode == QCE_MODE_CCM) {
+ out_len = areq->cryptlen + authsize;
+ hw_pad_out = ALIGN(authsize, ce_burst_size) - authsize;
+ } else {
+ out_len = areq->cryptlen;
+ }
+ } else {
+ q_req->cryptlen = areq->cryptlen - authsize;
+ if (q_req->mode == QCE_MODE_CCM)
+ totallen_in = areq->cryptlen + areq->assoclen;
+ else
+ totallen_in = q_req->cryptlen + areq->assoclen + ivsize;
+ out_len = q_req->cryptlen;
+ hw_pad_out = authsize;
+ }
+
+ pce_dev->assoc_nents = count_sg(areq->assoc, areq->assoclen);
+ pce_dev->src_nents = count_sg(areq->src, areq->cryptlen);
+ pce_dev->ivsize = q_req->ivsize;
+ pce_dev->authsize = q_req->authsize;
+ pce_dev->phy_iv_in = 0;
+
+ /* associated data input */
+ dma_map_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
+ DMA_TO_DEVICE);
+ /* cipher input */
+ dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+ (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+ DMA_TO_DEVICE);
+ /* cipher + mac output for encryption */
+ if (areq->src != areq->dst) {
+ pce_dev->dst_nents = count_sg(areq->dst, out_len);
+ dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
+ DMA_FROM_DEVICE);
+ } else {
+ pce_dev->dst_nents = pce_dev->src_nents;
+ }
+
+ _ce_get_cipher_cmdlistinfo(pce_dev, q_req, &cmdlistinfo);
+ /* set up crypto device */
+ rc = _ce_setup_cipher(pce_dev, q_req, totallen_in,
+ areq->assoclen + ivsize, cmdlistinfo);
+ if (rc < 0)
+ goto bad;
+
+ if (q_req->mode != QCE_MODE_CCM) {
+ rc = qce_aead_sha1_hmac_setup(q_req, aead, auth_cmdlistinfo);
+ if (rc < 0)
+ goto bad;
+ /* overwrite seg size */
+ cmdlistinfo->seg_size->data = totallen_in;
+ /* cipher iv for input */
+ pce_dev->phy_iv_in = dma_map_single(pce_dev->pdev, q_req->iv,
+ ivsize, DMA_TO_DEVICE);
+ }
+
+ /* setup for callback, and issue command to bam */
+ pce_dev->areq = q_req->areq;
+ pce_dev->qce_cb = q_req->qce_cb;
+
+ /* Register callback event for EOT (End of transfer) event. */
+ pce_dev->ce_sps.producer.event.callback = _aead_sps_producer_callback;
+ rc = sps_register_event(pce_dev->ce_sps.producer.pipe,
+ &pce_dev->ce_sps.producer.event);
+ if (rc) {
+ pr_err("Producer callback registration failed rc = %d\n", rc);
+ goto bad;
+ }
+
+ /* Register callback event for EOT (End of transfer) event. */
+ pce_dev->ce_sps.consumer.event.callback = _aead_sps_consumer_callback;
+ rc = sps_register_event(pce_dev->ce_sps.consumer.pipe,
+ &pce_dev->ce_sps.consumer.event);
+ if (rc) {
+ pr_err("Consumer callback registration failed rc = %d\n", rc);
+ goto bad;
+ }
+
+ _qce_sps_iovec_count_init(pce_dev);
+
+ _qce_sps_add_cmd(pce_dev, 0, cmdlistinfo,
+ &pce_dev->ce_sps.in_transfer);
+
+ if (pce_dev->ce_sps.minor_version == 0) {
+ _qce_sps_add_sg_data(pce_dev, areq->src, totallen_in,
+ &pce_dev->ce_sps.in_transfer);
+
+ _qce_set_eot_flag(&pce_dev->ce_sps.in_transfer);
+ _qce_sps_add_sg_data(pce_dev, areq->dst, out_len +
+ areq->assoclen + hw_pad_out,
+ &pce_dev->ce_sps.out_transfer);
+ _qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump),
+ CRYPTO_RESULT_DUMP_SIZE,
+ &pce_dev->ce_sps.out_transfer);
+ } else {
+ _qce_sps_add_sg_data(pce_dev, areq->assoc, areq->assoclen,
+ &pce_dev->ce_sps.in_transfer);
+ _qce_sps_add_data((uint32_t)pce_dev->phy_iv_in, ivsize,
+ &pce_dev->ce_sps.in_transfer);
+ _qce_sps_add_sg_data(pce_dev, areq->src, areq->cryptlen,
+ &pce_dev->ce_sps.in_transfer);
+ _qce_set_eot_flag(&pce_dev->ce_sps.in_transfer);
+
+ /* Pass through to ignore associated (+iv, if applicable) data*/
+ _qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.ignore_buffer),
+ (ivsize + areq->assoclen),
+ &pce_dev->ce_sps.out_transfer);
+ _qce_sps_add_sg_data(pce_dev, areq->dst, out_len,
+ &pce_dev->ce_sps.out_transfer);
+ /* Pass through to ignore hw_pad (padding of the MAC data) */
+ _qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.ignore_buffer),
+ hw_pad_out, &pce_dev->ce_sps.out_transfer);
+
+ _qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump),
+ CRYPTO_RESULT_DUMP_SIZE, &pce_dev->ce_sps.out_transfer);
+ }
+ rc = _qce_sps_transfer(pce_dev);
+ if (rc)
+ goto bad;
+ return 0;
+
+bad:
+ if (pce_dev->assoc_nents) {
+ dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
+ DMA_TO_DEVICE);
+ }
+ if (pce_dev->src_nents) {
+ dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+ (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+ DMA_TO_DEVICE);
+ }
+ if (areq->src != areq->dst) {
+ dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
+ DMA_FROM_DEVICE);
+ }
+ if (pce_dev->phy_iv_in) {
+ dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in,
+ ivsize, DMA_TO_DEVICE);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(qce_aead_req);
+
+int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
+{
+ int rc = 0;
+ struct qce_device *pce_dev = (struct qce_device *) handle;
+ struct ablkcipher_request *areq = (struct ablkcipher_request *)
+ c_req->areq;
+ struct qce_cmdlist_info *cmdlistinfo = NULL;
+
+ pce_dev->src_nents = 0;
+ pce_dev->dst_nents = 0;
+ _ce_get_cipher_cmdlistinfo(pce_dev, c_req, &cmdlistinfo);
+
+ /* cipher input */
+ pce_dev->src_nents = count_sg(areq->src, areq->nbytes);
+
+ dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+ (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+ DMA_TO_DEVICE);
+ /* cipher output */
+ if (areq->src != areq->dst) {
+ pce_dev->dst_nents = count_sg(areq->dst, areq->nbytes);
+ dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
+ DMA_FROM_DEVICE);
+ } else {
+ pce_dev->dst_nents = pce_dev->src_nents;
+ }
+ /* set up crypto device */
+ rc = _ce_setup_cipher(pce_dev, c_req, areq->nbytes, 0, cmdlistinfo);
+ if (rc < 0)
+ goto bad;
+
+ /* setup for client callback, and issue command to BAM */
+ pce_dev->areq = areq;
+ pce_dev->qce_cb = c_req->qce_cb;
+
+ /* Register callback event for EOT (End of transfer) event. */
+ pce_dev->ce_sps.producer.event.callback =
+ _ablk_cipher_sps_producer_callback;
+ rc = sps_register_event(pce_dev->ce_sps.producer.pipe,
+ &pce_dev->ce_sps.producer.event);
+ if (rc) {
+ pr_err("Producer callback registration failed rc = %d\n", rc);
+ goto bad;
+ }
+
+ /* Register callback event for EOT (End of transfer) event. */
+ pce_dev->ce_sps.consumer.event.callback =
+ _ablk_cipher_sps_consumer_callback;
+ rc = sps_register_event(pce_dev->ce_sps.consumer.pipe,
+ &pce_dev->ce_sps.consumer.event);
+ if (rc) {
+ pr_err("Consumer callback registration failed rc = %d\n", rc);
+ goto bad;
+ }
+
+ _qce_sps_iovec_count_init(pce_dev);
+
+ _qce_sps_add_cmd(pce_dev, 0, cmdlistinfo,
+ &pce_dev->ce_sps.in_transfer);
+ _qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes,
+ &pce_dev->ce_sps.in_transfer);
+ _qce_set_eot_flag(&pce_dev->ce_sps.in_transfer);
+
+ _qce_sps_add_sg_data(pce_dev, areq->dst, areq->nbytes,
+ &pce_dev->ce_sps.out_transfer);
+ _qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump),
+ CRYPTO_RESULT_DUMP_SIZE,
+ &pce_dev->ce_sps.out_transfer);
+ rc = _qce_sps_transfer(pce_dev);
+ if (rc)
+ goto bad;
+ return 0;
+bad:
+ if (pce_dev->dst_nents) {
+ dma_unmap_sg(pce_dev->pdev, areq->dst,
+ pce_dev->dst_nents, DMA_FROM_DEVICE);
+ }
+ if (pce_dev->src_nents) {
+ dma_unmap_sg(pce_dev->pdev, areq->src,
+ pce_dev->src_nents,
+ (areq->src == areq->dst) ?
+ DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+ }
+ return rc;
+}
+EXPORT_SYMBOL(qce_ablk_cipher_req);
+
+int qce_process_sha_req(void *handle, struct qce_sha_req *sreq)
+{
+ struct qce_device *pce_dev = (struct qce_device *) handle;
+ int rc;
+
+ struct ahash_request *areq = (struct ahash_request *)sreq->areq;
+ struct qce_cmdlist_info *cmdlistinfo = NULL;
+
+ pce_dev->src_nents = count_sg(sreq->src, sreq->size);
+ _ce_get_hash_cmdlistinfo(pce_dev, sreq, &cmdlistinfo);
+ dma_map_sg(pce_dev->pdev, sreq->src, pce_dev->src_nents,
+ DMA_TO_DEVICE);
+ rc = _ce_setup_hash(pce_dev, sreq, cmdlistinfo);
+ if (rc < 0)
+ goto bad;
+
+ pce_dev->areq = areq;
+ pce_dev->qce_cb = sreq->qce_cb;
+
+ /* Register callback event for EOT (End of transfer) event. */
+ pce_dev->ce_sps.producer.event.callback = _sha_sps_producer_callback;
+ rc = sps_register_event(pce_dev->ce_sps.producer.pipe,
+ &pce_dev->ce_sps.producer.event);
+ if (rc) {
+ pr_err("Producer callback registration failed rc = %d\n", rc);
+ goto bad;
+ }
+
+ /* Register callback event for EOT (End of transfer) event. */
+ pce_dev->ce_sps.consumer.event.callback = _sha_sps_consumer_callback;
+ rc = sps_register_event(pce_dev->ce_sps.consumer.pipe,
+ &pce_dev->ce_sps.consumer.event);
+ if (rc) {
+ pr_err("Consumer callback registration failed rc = %d\n", rc);
+ goto bad;
+ }
+
+ _qce_sps_iovec_count_init(pce_dev);
+
+ _qce_sps_add_cmd(pce_dev, 0, cmdlistinfo,
+ &pce_dev->ce_sps.in_transfer);
+ _qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes,
+ &pce_dev->ce_sps.in_transfer);
+ _qce_set_eot_flag(&pce_dev->ce_sps.in_transfer);
+
+ _qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump),
+ CRYPTO_RESULT_DUMP_SIZE,
+ &pce_dev->ce_sps.out_transfer);
+ rc = _qce_sps_transfer(pce_dev);
+ if (rc)
+ goto bad;
+ return 0;
+bad:
+ if (pce_dev->src_nents) {
+ dma_unmap_sg(pce_dev->pdev, sreq->src,
+ pce_dev->src_nents, DMA_TO_DEVICE);
+ }
+ return rc;
+}
+EXPORT_SYMBOL(qce_process_sha_req);
+
+static int __qce_get_device_tree_data(struct platform_device *pdev,
+ struct qce_device *pce_dev)
+{
+ struct resource *resource;
+ int rc = 0;
+
+ if (of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,bam-pipe-pair",
+ &pce_dev->ce_sps.pipe_pair_index)) {
+ pr_err("Fail to get bam pipe pair information.\n");
+ return -EINVAL;
+ } else {
+ pr_warn("bam_pipe_pair=0x%x", pce_dev->ce_sps.pipe_pair_index);
+ }
+ pce_dev->ce_sps.dest_pipe_index = 2 * pce_dev->ce_sps.pipe_pair_index;
+ pce_dev->ce_sps.src_pipe_index = pce_dev->ce_sps.dest_pipe_index + 1;
+
+ resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "crypto-base");
+ if (resource) {
+ pce_dev->phy_iobase = resource->start;
+ pce_dev->iobase = ioremap_nocache(resource->start,
+ resource_size(resource));
+ if (!pce_dev->iobase) {
+ pr_err("Can not map CRYPTO io memory\n");
+ return -ENOMEM;
+ }
+ } else {
+ pr_err("CRYPTO HW mem unavailable.\n");
+ return -ENODEV;
+ }
+ pr_warn("ce_phy_reg_base=0x%x ", pce_dev->phy_iobase);
+ pr_warn("ce_virt_reg_base=0x%x\n", (uint32_t)pce_dev->iobase);
+
+ resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "crypto-bam-base");
+ if (resource) {
+ pce_dev->ce_sps.bam_mem = resource->start;
+ pce_dev->ce_sps.bam_iobase = ioremap_nocache(resource->start,
+ resource_size(resource));
+ if (!pce_dev->iobase) {
+ rc = -ENOMEM;
+ pr_err("Can not map BAM io memory\n");
+ goto err_getting_bam_info;
+ }
+ } else {
+ pr_err("CRYPTO BAM mem unavailable.\n");
+ rc = -ENODEV;
+ goto err_getting_bam_info;
+ }
+ pr_warn("ce_bam_phy_reg_base=0x%x ", pce_dev->ce_sps.bam_mem);
+ pr_warn("ce_bam_virt_reg_base=0x%x\n",
+ (uint32_t)pce_dev->ce_sps.bam_iobase);
+
+ resource = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (resource) {
+ pce_dev->ce_sps.bam_irq = resource->start;
+ pr_warn("CRYPTO BAM IRQ = %d.\n", pce_dev->ce_sps.bam_irq);
+ } else {
+ pr_err("CRYPTO BAM IRQ unavailable.\n");
+ goto err_dev;
+ }
+ return rc;
+err_dev:
+ if (pce_dev->ce_sps.bam_iobase)
+ iounmap(pce_dev->ce_sps.bam_iobase);
+
+err_getting_bam_info:
+ if (pce_dev->iobase)
+ iounmap(pce_dev->iobase);
+
+ return rc;
+}
+
+static int __qce_init_clk(struct qce_device *pce_dev)
+{
+ int rc = 0;
+ struct clk *ce_core_clk;
+ struct clk *ce_clk;
+ struct clk *ce_core_src_clk;
+
+ /* Get CE3 src core clk. */
+ ce_core_src_clk = clk_get(pce_dev->pdev, "core_clk_src");
+ if (!IS_ERR(ce_core_src_clk)) {
+ pce_dev->ce_core_src_clk = ce_core_src_clk;
+
+ /* Set the core src clk @100Mhz */
+ rc = clk_set_rate(pce_dev->ce_core_src_clk, 100000000);
+ if (rc) {
+ clk_put(pce_dev->ce_core_src_clk);
+ pr_err("Unable to set the core src clk @100Mhz.\n");
+ goto err_clk;
+ }
+ } else {
+ pr_warn("Unable to get CE core src clk, set to NULL\n");
+ pce_dev->ce_core_src_clk = NULL;
+ }
+
+ /* Get CE core clk */
+ ce_core_clk = clk_get(pce_dev->pdev, "core_clk");
+ if (IS_ERR(ce_core_clk)) {
+ rc = PTR_ERR(ce_core_clk);
+ pr_err("Unable to get CE core clk\n");
+ if (pce_dev->ce_core_src_clk != NULL)
+ clk_put(pce_dev->ce_core_src_clk);
+ goto err_clk;
+ }
+ pce_dev->ce_core_clk = ce_core_clk;
+
+ /* Get CE Interface clk */
+ ce_clk = clk_get(pce_dev->pdev, "iface_clk");
+ if (IS_ERR(ce_clk)) {
+ rc = PTR_ERR(ce_clk);
+ pr_err("Unable to get CE interface clk\n");
+ if (pce_dev->ce_core_src_clk != NULL)
+ clk_put(pce_dev->ce_core_src_clk);
+ clk_put(pce_dev->ce_core_clk);
+ goto err_clk;
+ }
+ pce_dev->ce_clk = ce_clk;
+
+ /* Enable CE core clk */
+ rc = clk_prepare_enable(pce_dev->ce_core_clk);
+ if (rc) {
+ pr_err("Unable to enable/prepare CE core clk\n");
+ if (pce_dev->ce_core_src_clk != NULL)
+ clk_put(pce_dev->ce_core_src_clk);
+ clk_put(pce_dev->ce_core_clk);
+ clk_put(pce_dev->ce_clk);
+ goto err_clk;
+ } else {
+ /* Enable CE clk */
+ rc = clk_prepare_enable(pce_dev->ce_clk);
+ if (rc) {
+ pr_err("Unable to enable/prepare CE iface clk\n");
+ clk_disable_unprepare(pce_dev->ce_core_clk);
+ if (pce_dev->ce_core_src_clk != NULL)
+ clk_put(pce_dev->ce_core_src_clk);
+ clk_put(pce_dev->ce_core_clk);
+ clk_put(pce_dev->ce_clk);
+ goto err_clk;
+ }
+ }
+err_clk:
+ if (rc)
+ pr_err("Unable to init CE clks, rc = %d\n", rc);
+ return rc;
+}
+
+/* crypto engine open function. */
+void *qce_open(struct platform_device *pdev, int *rc)
+{
+ struct qce_device *pce_dev;
+
+ pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL);
+ if (!pce_dev) {
+ *rc = -ENOMEM;
+ pr_err("Can not allocate memory: %d\n", *rc);
+ return NULL;
+ }
+ pce_dev->pdev = &pdev->dev;
+
+ if (pdev->dev.of_node) {
+ *rc = __qce_get_device_tree_data(pdev, pce_dev);
+ if (*rc)
+ goto err_pce_dev;
+ } else {
+ *rc = -EINVAL;
+ pr_err("Device Node not found.\n");
+ goto err_pce_dev;
+ }
+
+ pce_dev->memsize = 9 * PAGE_SIZE;
+ pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev,
+ pce_dev->memsize, &pce_dev->coh_pmem, GFP_KERNEL);
+ if (pce_dev->coh_vmem == NULL) {
+ *rc = -ENOMEM;
+ pr_err("Can not allocate coherent memory for sps data\n");
+ goto err_iobase;
+ }
+
+ *rc = __qce_init_clk(pce_dev);
+ if (*rc)
+ goto err_mem;
+
+ if (_probe_ce_engine(pce_dev)) {
+ *rc = -ENXIO;
+ goto err;
+ }
+ *rc = 0;
+ qce_setup_ce_sps_data(pce_dev);
+ qce_sps_init(pce_dev);
+
+ return pce_dev;
+err:
+ clk_disable_unprepare(pce_dev->ce_clk);
+ clk_disable_unprepare(pce_dev->ce_core_clk);
+
+ if (pce_dev->ce_core_src_clk != NULL)
+ clk_put(pce_dev->ce_core_src_clk);
+ clk_put(pce_dev->ce_clk);
+ clk_put(pce_dev->ce_core_clk);
+err_mem:
+ if (pce_dev->coh_vmem)
+ dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
+ pce_dev->coh_vmem, pce_dev->coh_pmem);
+err_iobase:
+ if (pce_dev->ce_sps.bam_iobase)
+ iounmap(pce_dev->ce_sps.bam_iobase);
+ if (pce_dev->iobase)
+ iounmap(pce_dev->iobase);
+err_pce_dev:
+ kfree(pce_dev);
+ return NULL;
+}
+EXPORT_SYMBOL(qce_open);
+
+/* crypto engine close function. */
+int qce_close(void *handle)
+{
+ struct qce_device *pce_dev = (struct qce_device *) handle;
+
+ if (handle == NULL)
+ return -ENODEV;
+
+ if (pce_dev->iobase)
+ iounmap(pce_dev->iobase);
+ if (pce_dev->coh_vmem)
+ dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
+ pce_dev->coh_vmem, pce_dev->coh_pmem);
+
+ clk_disable_unprepare(pce_dev->ce_clk);
+ clk_disable_unprepare(pce_dev->ce_core_clk);
+ if (pce_dev->ce_core_src_clk != NULL)
+ clk_put(pce_dev->ce_core_src_clk);
+ clk_put(pce_dev->ce_clk);
+ clk_put(pce_dev->ce_core_clk);
+
+ qce_sps_exit(pce_dev);
+ kfree(handle);
+
+ return 0;
+}
+EXPORT_SYMBOL(qce_close);
+
+int qce_hw_support(void *handle, struct ce_hw_support *ce_support)
+{
+ struct qce_device *pce_dev = (struct qce_device *)handle;
+
+ if (ce_support == NULL)
+ return -EINVAL;
+
+ ce_support->sha1_hmac_20 = false;
+ ce_support->sha1_hmac = false;
+ ce_support->sha256_hmac = false;
+ ce_support->sha_hmac = true;
+ ce_support->cmac = true;
+ ce_support->aes_key_192 = false;
+ ce_support->aes_xts = true;
+ ce_support->ota = false;
+ ce_support->bam = true;
+ if (pce_dev->ce_sps.minor_version) {
+ ce_support->aligned_only = false;
+ ce_support->aes_ccm = true;
+ } else {
+ ce_support->aligned_only = true;
+ ce_support->aes_ccm = false;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(qce_hw_support);
+
+static int __init qce_init(void)
+{
+ bam_registry.handle = 0;
+ bam_registry.cnt = 0;
+ return 0;
+}
+
+static void __exit qce_exit(void)
+{
+ bam_registry.handle = 0;
+ bam_registry.cnt = 0;
+}
+
+module_init(qce_init);
+module_exit(qce_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Crypto Engine driver");
diff --git a/drivers/crypto/msm/qce50.h b/drivers/crypto/msm/qce50.h
new file mode 100644
index 0000000..c9eba82
--- /dev/null
+++ b/drivers/crypto/msm/qce50.h
@@ -0,0 +1,148 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _DRIVERS_CRYPTO_MSM_QCE50_H_
+#define _DRIVERS_CRYPTO_MSM_QCE50_H_
+
+#include <mach/sps.h>
+
+/* MAX Data xfer block size between BAM and CE */
+#define MAX_CE_BAM_BURST_SIZE 0x40
+#define QCEBAM_BURST_SIZE MAX_CE_BAM_BURST_SIZE
+#define MAX_BAM_DESCRIPTORS (0x40 - 1)
+
+#define GET_VIRT_ADDR(x) \
+ ((uint32_t)pce_dev->coh_vmem + \
+ ((uint32_t)x - pce_dev->coh_pmem))
+#define GET_PHYS_ADDR(x) \
+ (pce_dev->coh_pmem + (x - (uint32_t)pce_dev->coh_vmem))
+
+#define CRYPTO_REG_SIZE 4
+#define NUM_OF_CRYPTO_AUTH_IV_REG 16
+#define NUM_OF_CRYPTO_CNTR_IV_REG 4
+#define NUM_OF_CRYPTO_AUTH_BYTE_COUNT_REG 4
+#define CRYPTO_TOTAL_REGISTERS_DUMPED 26
+#define CRYPTO_RESULT_DUMP_SIZE \
+ ALIGN((CRYPTO_TOTAL_REGISTERS_DUMPED * CRYPTO_REG_SIZE), \
+ QCEBAM_BURST_SIZE)
+
+/* QCE max number of descriptor in a descriptor list */
+#define QCE_MAX_NUM_DESC 128
+#define SPS_MAX_PKT_SIZE (64 * 1024 - 1)
+
+/* State of consumer/producer Pipe */
+enum qce_pipe_st_enum {
+ QCE_PIPE_STATE_IDLE = 0,
+ QCE_PIPE_STATE_IN_PROG = 1,
+ QCE_PIPE_STATE_COMP = 2,
+ QCE_PIPE_STATE_LAST
+};
+
+struct qce_sps_ep_conn_data {
+ struct sps_pipe *pipe;
+ struct sps_connect connect;
+ struct sps_register_event event;
+};
+
+/* CE Result DUMP format*/
+struct ce_result_dump_format {
+ uint32_t auth_iv[NUM_OF_CRYPTO_AUTH_IV_REG];
+ uint32_t auth_byte_count[NUM_OF_CRYPTO_AUTH_BYTE_COUNT_REG];
+ uint32_t encr_cntr_iv[NUM_OF_CRYPTO_CNTR_IV_REG];
+ uint32_t status;
+ uint32_t status2;
+};
+
+struct qce_cmdlist_info {
+
+ uint32_t cmdlist;
+ struct sps_command_element *crypto_cfg;
+ struct sps_command_element *encr_seg_cfg;
+ struct sps_command_element *encr_seg_size;
+ struct sps_command_element *encr_seg_start;
+ struct sps_command_element *encr_key;
+ struct sps_command_element *encr_xts_key;
+ struct sps_command_element *encr_cntr_iv;
+ struct sps_command_element *encr_ccm_cntr_iv;
+ struct sps_command_element *encr_mask;
+ struct sps_command_element *encr_xts_du_size;
+
+ struct sps_command_element *auth_seg_cfg;
+ struct sps_command_element *auth_seg_size;
+ struct sps_command_element *auth_seg_start;
+ struct sps_command_element *auth_key;
+ struct sps_command_element *auth_iv;
+ struct sps_command_element *auth_nonce_info;
+ struct sps_command_element *auth_bytecount;
+ struct sps_command_element *seg_size;
+ struct sps_command_element *go_proc;
+ uint32_t size;
+};
+
+struct qce_cmdlistptr_ops {
+ struct qce_cmdlist_info cipher_aes_128_cbc_ctr;
+ struct qce_cmdlist_info cipher_aes_256_cbc_ctr;
+ struct qce_cmdlist_info cipher_aes_128_ecb;
+ struct qce_cmdlist_info cipher_aes_256_ecb;
+ struct qce_cmdlist_info cipher_aes_128_xts;
+ struct qce_cmdlist_info cipher_aes_256_xts;
+ struct qce_cmdlist_info cipher_des_cbc;
+ struct qce_cmdlist_info cipher_des_ecb;
+ struct qce_cmdlist_info cipher_3des_cbc;
+ struct qce_cmdlist_info cipher_3des_ecb;
+ struct qce_cmdlist_info auth_sha1;
+ struct qce_cmdlist_info auth_sha256;
+ struct qce_cmdlist_info auth_sha1_hmac;
+ struct qce_cmdlist_info auth_sha256_hmac;
+ struct qce_cmdlist_info auth_aes_128_cmac;
+ struct qce_cmdlist_info auth_aes_256_cmac;
+ struct qce_cmdlist_info aead_sha1_hmac;
+ struct qce_cmdlist_info aead_aes_128_ccm;
+ struct qce_cmdlist_info aead_aes_256_ccm;
+ struct qce_cmdlist_info unlock_all_pipes;
+};
+
+
+/* DM data structure with buffers, commandlists & commmand pointer lists */
+struct ce_sps_data {
+
+ uint32_t bam_irq;
+ uint32_t bam_mem;
+ void __iomem *bam_iobase;
+
+ struct qce_sps_ep_conn_data producer;
+ struct qce_sps_ep_conn_data consumer;
+ struct sps_event_notify notify;
+ struct scatterlist *src;
+ struct scatterlist *dst;
+ unsigned int pipe_pair_index;
+ unsigned int src_pipe_index;
+ unsigned int dest_pipe_index;
+ uint32_t bam_handle;
+
+ enum qce_pipe_st_enum consumer_state; /* Consumer pipe state */
+ enum qce_pipe_st_enum producer_state; /* Producer pipe state */
+
+ int consumer_status; /* consumer pipe status */
+ int producer_status; /* producer pipe status */
+
+ struct sps_transfer in_transfer;
+ struct sps_transfer out_transfer;
+
+ int ce_burst_size;
+
+ struct qce_cmdlistptr_ops cmdlistptr;
+ uint32_t result_dump;
+ uint32_t ignore_buffer;
+ struct ce_result_dump_format *result;
+ uint32_t minor_version;
+};
+#endif /* _DRIVERS_CRYPTO_MSM_QCE50_H */
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
index cc6d744..63f7fd9 100644
--- a/drivers/crypto/msm/qcedev.c
+++ b/drivers/crypto/msm/qcedev.c
@@ -572,6 +572,7 @@
if (podev->ce_support.sha_hmac) {
sreq.alg = QCE_HASH_SHA1_HMAC;
sreq.authkey = &handle->sha_ctxt.authkey[0];
+ sreq.authklen = QCEDEV_MAX_SHA_BLOCK_SIZE;
} else {
sreq.alg = QCE_HASH_SHA1;
@@ -582,7 +583,7 @@
if (podev->ce_support.sha_hmac) {
sreq.alg = QCE_HASH_SHA256_HMAC;
sreq.authkey = &handle->sha_ctxt.authkey[0];
-
+ sreq.authklen = QCEDEV_MAX_SHA_BLOCK_SIZE;
} else {
sreq.alg = QCE_HASH_SHA256;
sreq.authkey = NULL;
@@ -959,7 +960,6 @@
uint8_t *k_buf_src = NULL;
uint8_t *k_align_src = NULL;
- handle->sha_ctxt.first_blk = 0;
handle->sha_ctxt.last_blk = 1;
total = handle->sha_ctxt.trailing_buf_len;
@@ -977,9 +977,6 @@
CACHE_LINE_SIZE);
memcpy(k_align_src, &handle->sha_ctxt.trailing_buf[0], total);
}
- handle->sha_ctxt.last_blk = 1;
- handle->sha_ctxt.first_blk = 0;
-
qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
sg_set_buf(qcedev_areq->sha_req.sreq.src, k_align_src, total);
sg_mark_end(qcedev_areq->sha_req.sreq.src);
@@ -1071,6 +1068,7 @@
int err = 0;
if (areq->sha_op_req.authklen <= QCEDEV_MAX_KEY_SIZE) {
+ qcedev_sha_init(areq, handle);
/* Verify Source Address */
if (!access_ok(VERIFY_READ,
(void __user *)areq->sha_op_req.authkey,
@@ -1082,6 +1080,7 @@
return -EFAULT;
} else {
struct qcedev_async_req authkey_areq;
+ uint8_t authkey[QCEDEV_MAX_SHA_BLOCK_SIZE];
init_completion(&authkey_areq.complete);
@@ -1091,6 +1090,8 @@
authkey_areq.sha_op_req.data[0].len = areq->sha_op_req.authklen;
authkey_areq.sha_op_req.data_len = areq->sha_op_req.authklen;
authkey_areq.sha_op_req.diglen = 0;
+ authkey_areq.handle = handle;
+
memset(&authkey_areq.sha_op_req.digest[0], 0,
QCEDEV_MAX_SHA_DIGEST);
if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
@@ -1106,8 +1107,11 @@
err = qcedev_sha_final(&authkey_areq, handle);
else
return err;
- memcpy(&handle->sha_ctxt.authkey[0],
- &handle->sha_ctxt.digest[0],
+ memcpy(&authkey[0], &handle->sha_ctxt.digest[0],
+ handle->sha_ctxt.diglen);
+ qcedev_sha_init(areq, handle);
+
+ memcpy(&handle->sha_ctxt.authkey[0], &authkey[0],
handle->sha_ctxt.diglen);
}
return err;
@@ -1209,7 +1213,6 @@
int err;
struct qcedev_control *podev = handle->cntl;
- qcedev_sha_init(areq, handle);
err = qcedev_set_hmac_auth_key(areq, handle);
if (err)
return err;
@@ -2013,21 +2016,8 @@
struct qcedev_control *podev;
struct msm_ce_hw_support *platform_support;
- if (pdev->id >= MAX_QCE_DEVICE) {
- pr_err("%s: device id %d exceeds allowed %d\n",
- __func__, pdev->id, MAX_QCE_DEVICE);
- return -ENOENT;
- }
- podev = &qce_dev[pdev->id];
+ podev = &qce_dev[0];
- platform_support = (struct msm_ce_hw_support *)pdev->dev.platform_data;
- podev->platform_support.ce_shared = platform_support->ce_shared;
- podev->platform_support.shared_ce_resource =
- platform_support->shared_ce_resource;
- podev->platform_support.hw_key_support =
- platform_support->hw_key_support;
- podev->platform_support.bus_scale_table =
- platform_support->bus_scale_table;
podev->ce_lock_count = 0;
podev->high_bw_req_count = 0;
INIT_LIST_HEAD(&podev->ready_commands);
@@ -2047,7 +2037,6 @@
podev->qce = handle;
podev->pdev = pdev;
platform_set_drvdata(pdev, podev);
- qce_hw_support(podev->qce, &podev->ce_support);
if (podev->platform_support.bus_scale_table != NULL) {
podev->bus_scale_handle =
@@ -2062,7 +2051,25 @@
}
}
rc = misc_register(&podev->miscdevice);
-
+ qce_hw_support(podev->qce, &podev->ce_support);
+ if (podev->ce_support.bam) {
+ podev->platform_support.ce_shared = 0;
+ podev->platform_support.shared_ce_resource = 0;
+ podev->platform_support.hw_key_support = 0;
+ podev->platform_support.bus_scale_table = NULL;
+ podev->platform_support.sha_hmac = 1;
+ } else {
+ platform_support =
+ (struct msm_ce_hw_support *)pdev->dev.platform_data;
+ podev->platform_support.ce_shared = platform_support->ce_shared;
+ podev->platform_support.shared_ce_resource =
+ platform_support->shared_ce_resource;
+ podev->platform_support.hw_key_support =
+ platform_support->hw_key_support;
+ podev->platform_support.bus_scale_table =
+ platform_support->bus_scale_table;
+ podev->platform_support.sha_hmac = platform_support->sha_hmac;
+ }
if (rc >= 0)
return 0;
else
@@ -2227,9 +2234,7 @@
}
MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
MODULE_DESCRIPTION("Qualcomm DEV Crypto driver");
-MODULE_VERSION("1.27");
module_init(qcedev_init);
module_exit(qcedev_exit);
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
index c11c36e..7fc5cab 100644
--- a/drivers/crypto/msm/qcrypto.c
+++ b/drivers/crypto/msm/qcrypto.c
@@ -228,6 +228,13 @@
enum qce_cipher_alg_enum alg;
enum qce_cipher_dir_enum dir;
enum qce_cipher_mode_enum mode;
+
+ struct scatterlist *orig_src; /* Original src sg ptr */
+ struct scatterlist *orig_dst; /* Original dst sg ptr */
+ struct scatterlist dsg; /* Dest Data sg */
+ struct scatterlist ssg; /* Source Data sg */
+ unsigned char *data; /* Incoming data pointer*/
+
};
#define SHA_MAX_BLOCK_SIZE SHA256_BLOCK_SIZE
@@ -275,6 +282,11 @@
};
struct scatterlist *src;
uint32_t nbytes;
+
+ struct scatterlist *orig_src; /* Original src sg ptr */
+ struct scatterlist dsg; /* Data sg */
+ unsigned char *data; /* Incoming data pointer*/
+ unsigned char *data2; /* Updated data pointer*/
};
static void _byte_stream_to_words(uint32_t *iv, unsigned char *b,
@@ -497,9 +509,6 @@
&sha_ctx->ahash_req_complete);
crypto_ahash_clear_flags(ahash, ~0);
- if (sha_ctx->cp->platform_support.bus_scale_table != NULL)
- qcrypto_ce_high_bw_req(sha_ctx->cp, true);
-
return 0;
};
@@ -785,7 +794,6 @@
dev_info(&cp->pdev->dev, "_qce_ahash_complete: %p ret %d\n",
areq, ret);
#endif
-
if (digest) {
memcpy(sha_ctx->digest, digest, diglen);
memcpy(areq->result, digest, diglen);
@@ -819,6 +827,10 @@
cp->res = 0;
pstat->sha_op_success++;
}
+ if (cp->ce_support.aligned_only) {
+ areq->src = rctx->orig_src;
+ kfree(rctx->data);
+ }
if (cp->platform_support.ce_shared)
schedule_work(&cp->unlock_ce_ws);
@@ -850,6 +862,24 @@
cp->res = 0;
pstat->ablk_cipher_op_success++;
}
+
+ if (cp->ce_support.aligned_only) {
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct scatterlist *sg;
+ uint32_t bytes = 0;
+
+ rctx = ablkcipher_request_ctx(areq);
+ areq->src = rctx->orig_src;
+ areq->dst = rctx->orig_dst;
+
+ for (sg = areq->dst; bytes != areq->nbytes; sg++) {
+ memcpy(sg_virt(sg), ((char *)rctx->data + bytes),
+ sg->length);
+ bytes += sg->length;
+ }
+ kfree(rctx->data);
+ }
+
if (cp->platform_support.ce_shared)
schedule_work(&cp->unlock_ce_ws);
tasklet_schedule(&cp->done_tasklet);
@@ -871,6 +901,30 @@
rctx = aead_request_ctx(areq);
if (rctx->mode == QCE_MODE_CCM) {
+ if (cp->ce_support.aligned_only) {
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct scatterlist *sg;
+ uint32_t bytes = 0;
+ uint32_t nbytes = 0;
+
+ rctx = aead_request_ctx(areq);
+ areq->src = rctx->orig_src;
+ areq->dst = rctx->orig_dst;
+ if (rctx->dir == QCE_ENCRYPT)
+ nbytes = areq->cryptlen +
+ crypto_aead_authsize(aead);
+ else
+ nbytes = areq->cryptlen -
+ crypto_aead_authsize(aead);
+
+ for (sg = areq->dst; bytes != nbytes; sg++) {
+ memcpy(sg_virt(sg),
+ ((char *)rctx->data + rctx->assoclen + bytes),
+ sg->length);
+ bytes += sg->length;
+ }
+ kfree(rctx->data);
+ }
kzfree(rctx->assoc);
areq->assoc = rctx->assoc_sg;
areq->assoclen = rctx->assoclen;
@@ -997,17 +1051,222 @@
return 0;
}
+static int _qcrypto_process_ablkcipher(struct crypto_priv *cp,
+ struct crypto_async_request *async_req)
+{
+ struct qce_req qreq;
+ int ret;
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *cipher_ctx;
+ struct ablkcipher_request *req;
+ struct crypto_ablkcipher *tfm;
+
+ req = container_of(async_req, struct ablkcipher_request, base);
+ cipher_ctx = crypto_tfm_ctx(async_req->tfm);
+ rctx = ablkcipher_request_ctx(req);
+ tfm = crypto_ablkcipher_reqtfm(req);
+ if (cp->ce_support.aligned_only) {
+ uint32_t bytes = 0;
+ struct scatterlist *sg = req->src;
+
+ rctx->orig_src = req->src;
+ rctx->orig_dst = req->dst;
+ rctx->data = kzalloc((req->nbytes + 64), GFP_KERNEL);
+ for (sg = req->src; bytes != req->nbytes; sg++) {
+ memcpy(((char *)rctx->data + bytes),
+ sg_virt(sg), sg->length);
+ bytes += sg->length;
+ }
+ sg_set_buf(&rctx->dsg, rctx->data, req->nbytes);
+ sg_mark_end(&rctx->dsg);
+ rctx->iv = req->info;
+
+ req->src = &rctx->dsg;
+ req->dst = &rctx->dsg;
+
+ }
+ qreq.op = QCE_REQ_ABLK_CIPHER;
+ qreq.qce_cb = _qce_ablk_cipher_complete;
+ qreq.areq = req;
+ qreq.alg = rctx->alg;
+ qreq.dir = rctx->dir;
+ qreq.mode = rctx->mode;
+ qreq.enckey = cipher_ctx->enc_key;
+ qreq.encklen = cipher_ctx->enc_key_len;
+ qreq.iv = req->info;
+ qreq.ivsize = crypto_ablkcipher_ivsize(tfm);
+ qreq.cryptlen = req->nbytes;
+ qreq.use_pmem = 0;
+
+ if ((cipher_ctx->enc_key_len == 0) &&
+ (cp->platform_support.hw_key_support == 0))
+ ret = -EINVAL;
+ else
+ ret = qce_ablk_cipher_req(cp->qce, &qreq);
+
+ return ret;
+}
+
+static int _qcrypto_process_ahash(struct crypto_priv *cp,
+ struct crypto_async_request *async_req)
+{
+ struct ahash_request *req;
+ struct qce_sha_req sreq;
+ struct qcrypto_sha_ctx *sha_ctx;
+ int ret = 0;
+
+ req = container_of(async_req,
+ struct ahash_request, base);
+ sha_ctx = crypto_tfm_ctx(async_req->tfm);
+
+ sreq.qce_cb = _qce_ahash_complete;
+ sreq.digest = &sha_ctx->digest[0];
+ sreq.src = req->src;
+ sreq.auth_data[0] = sha_ctx->byte_count[0];
+ sreq.auth_data[1] = sha_ctx->byte_count[1];
+ sreq.auth_data[2] = sha_ctx->byte_count[2];
+ sreq.auth_data[3] = sha_ctx->byte_count[3];
+ sreq.first_blk = sha_ctx->first_blk;
+ sreq.last_blk = sha_ctx->last_blk;
+ sreq.size = req->nbytes;
+ sreq.areq = req;
+
+ switch (sha_ctx->alg) {
+ case QCE_HASH_SHA1:
+ sreq.alg = QCE_HASH_SHA1;
+ sreq.authkey = NULL;
+ break;
+ case QCE_HASH_SHA256:
+ sreq.alg = QCE_HASH_SHA256;
+ sreq.authkey = NULL;
+ break;
+ case QCE_HASH_SHA1_HMAC:
+ sreq.alg = QCE_HASH_SHA1_HMAC;
+ sreq.authkey = &sha_ctx->authkey[0];
+ sreq.authklen = SHA_HMAC_KEY_SIZE;
+ break;
+ case QCE_HASH_SHA256_HMAC:
+ sreq.alg = QCE_HASH_SHA256_HMAC;
+ sreq.authkey = &sha_ctx->authkey[0];
+ sreq.authklen = SHA_HMAC_KEY_SIZE;
+ break;
+ default:
+ break;
+ };
+ ret = qce_process_sha_req(cp->qce, &sreq);
+
+ return ret;
+}
+
+static int _qcrypto_process_aead(struct crypto_priv *cp,
+ struct crypto_async_request *async_req)
+{
+ struct qce_req qreq;
+ int ret = 0;
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *cipher_ctx;
+ struct aead_request *req = container_of(async_req,
+ struct aead_request, base);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+
+ rctx = aead_request_ctx(req);
+ cipher_ctx = crypto_tfm_ctx(async_req->tfm);
+
+ qreq.op = QCE_REQ_AEAD;
+ qreq.qce_cb = _qce_aead_complete;
+
+ qreq.areq = req;
+ qreq.alg = rctx->alg;
+ qreq.dir = rctx->dir;
+ qreq.mode = rctx->mode;
+ qreq.iv = rctx->iv;
+
+ qreq.enckey = cipher_ctx->enc_key;
+ qreq.encklen = cipher_ctx->enc_key_len;
+ qreq.authkey = cipher_ctx->auth_key;
+ qreq.authklen = cipher_ctx->auth_key_len;
+ qreq.authsize = crypto_aead_authsize(aead);
+ qreq.ivsize = crypto_aead_ivsize(aead);
+ if (qreq.mode == QCE_MODE_CCM) {
+ if (qreq.dir == QCE_ENCRYPT)
+ qreq.cryptlen = req->cryptlen;
+ else
+ qreq.cryptlen = req->cryptlen -
+ qreq.authsize;
+ /* Get NONCE */
+ ret = qccrypto_set_aead_ccm_nonce(&qreq);
+ if (ret)
+ return ret;
+
+ /* Format Associated data */
+ ret = qcrypto_aead_ccm_format_adata(&qreq,
+ req->assoclen,
+ req->assoc);
+ if (ret)
+ return ret;
+
+ if (cp->ce_support.aligned_only) {
+ uint32_t bytes = 0;
+ struct scatterlist *sg = req->src;
+
+ rctx->orig_src = req->src;
+ rctx->orig_dst = req->dst;
+ rctx->data = kzalloc((req->cryptlen + qreq.assoclen +
+ qreq.authsize + 64*2), GFP_KERNEL);
+
+ memcpy((char *)rctx->data, qreq.assoc, qreq.assoclen);
+
+ for (sg = req->src; bytes != req->cryptlen; sg++) {
+ memcpy((rctx->data + bytes + qreq.assoclen),
+ sg_virt(sg), sg->length);
+ bytes += sg->length;
+ }
+ sg_set_buf(&rctx->ssg, rctx->data, req->cryptlen +
+ qreq.assoclen);
+ sg_mark_end(&rctx->ssg);
+
+ if (qreq.dir == QCE_ENCRYPT)
+ sg_set_buf(&rctx->dsg, rctx->data,
+ qreq.assoclen + qreq.cryptlen +
+ ALIGN(qreq.authsize, 64));
+ else
+ sg_set_buf(&rctx->dsg, rctx->data,
+ qreq.assoclen + req->cryptlen +
+ qreq.authsize);
+ sg_mark_end(&rctx->dsg);
+
+ req->src = &rctx->ssg;
+ req->dst = &rctx->dsg;
+ }
+ /*
+ * Save the original associated data
+ * length and sg
+ */
+ rctx->assoc_sg = req->assoc;
+ rctx->assoclen = req->assoclen;
+ rctx->assoc = qreq.assoc;
+ /*
+ * update req with new formatted associated
+ * data info
+ */
+ req->assoc = &rctx->asg;
+ req->assoclen = qreq.assoclen;
+ sg_set_buf(req->assoc, qreq.assoc,
+ req->assoclen);
+ sg_mark_end(req->assoc);
+ }
+ ret = qce_aead_req(cp->qce, &qreq);
+
+ return ret;
+}
+
static void _start_qcrypto_process(struct crypto_priv *cp)
{
struct crypto_async_request *async_req = NULL;
struct crypto_async_request *backlog = NULL;
unsigned long flags;
u32 type;
- struct qce_req qreq;
int ret;
- struct qcrypto_cipher_req_ctx *rctx;
- struct qcrypto_cipher_ctx *cipher_ctx;
- struct qcrypto_sha_ctx *sha_ctx;
struct crypto_stat *pstat;
pstat = &_qcrypto_stat[cp->pdev->id];
@@ -1026,139 +1285,21 @@
backlog->complete(backlog, -EINPROGRESS);
type = crypto_tfm_alg_type(async_req->tfm);
- if (type == CRYPTO_ALG_TYPE_ABLKCIPHER) {
- struct ablkcipher_request *req;
- struct crypto_ablkcipher *tfm;
-
- req = container_of(async_req, struct ablkcipher_request, base);
- cipher_ctx = crypto_tfm_ctx(async_req->tfm);
- rctx = ablkcipher_request_ctx(req);
- tfm = crypto_ablkcipher_reqtfm(req);
-
- qreq.op = QCE_REQ_ABLK_CIPHER;
- qreq.qce_cb = _qce_ablk_cipher_complete;
- qreq.areq = req;
- qreq.alg = rctx->alg;
- qreq.dir = rctx->dir;
- qreq.mode = rctx->mode;
- qreq.enckey = cipher_ctx->enc_key;
- qreq.encklen = cipher_ctx->enc_key_len;
- qreq.iv = req->info;
- qreq.ivsize = crypto_ablkcipher_ivsize(tfm);
- qreq.cryptlen = req->nbytes;
- qreq.use_pmem = 0;
-
- if ((cipher_ctx->enc_key_len == 0) &&
- (cp->platform_support.hw_key_support == 0))
- ret = -EINVAL;
- else
- ret = qce_ablk_cipher_req(cp->qce, &qreq);
- } else {
- if (type == CRYPTO_ALG_TYPE_AHASH) {
-
- struct ahash_request *req;
- struct qce_sha_req sreq;
-
- req = container_of(async_req,
- struct ahash_request, base);
- sha_ctx = crypto_tfm_ctx(async_req->tfm);
-
- sreq.qce_cb = _qce_ahash_complete;
- sreq.digest = &sha_ctx->digest[0];
- sreq.src = req->src;
- sreq.auth_data[0] = sha_ctx->byte_count[0];
- sreq.auth_data[1] = sha_ctx->byte_count[1];
- sreq.auth_data[2] = sha_ctx->byte_count[2];
- sreq.auth_data[3] = sha_ctx->byte_count[3];
- sreq.first_blk = sha_ctx->first_blk;
- sreq.last_blk = sha_ctx->last_blk;
- sreq.size = req->nbytes;
- sreq.areq = req;
-
- switch (sha_ctx->alg) {
- case QCE_HASH_SHA1:
- sreq.alg = QCE_HASH_SHA1;
- sreq.authkey = NULL;
- break;
- case QCE_HASH_SHA256:
- sreq.alg = QCE_HASH_SHA256;
- sreq.authkey = NULL;
- break;
- case QCE_HASH_SHA1_HMAC:
- sreq.alg = QCE_HASH_SHA1_HMAC;
- sreq.authkey = &sha_ctx->authkey[0];
- break;
- case QCE_HASH_SHA256_HMAC:
- sreq.alg = QCE_HASH_SHA256_HMAC;
- sreq.authkey = &sha_ctx->authkey[0];
- break;
- default:
- break;
- };
- ret = qce_process_sha_req(cp->qce, &sreq);
-
- } else {
- struct aead_request *req = container_of(async_req,
- struct aead_request, base);
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
-
- rctx = aead_request_ctx(req);
- cipher_ctx = crypto_tfm_ctx(async_req->tfm);
-
- qreq.op = QCE_REQ_AEAD;
- qreq.qce_cb = _qce_aead_complete;
-
- qreq.areq = req;
- qreq.alg = rctx->alg;
- qreq.dir = rctx->dir;
- qreq.mode = rctx->mode;
- qreq.iv = rctx->iv;
-
- qreq.enckey = cipher_ctx->enc_key;
- qreq.encklen = cipher_ctx->enc_key_len;
- qreq.authkey = cipher_ctx->auth_key;
- qreq.authklen = cipher_ctx->auth_key_len;
- qreq.authsize = crypto_aead_authsize(aead);
- qreq.ivsize = crypto_aead_ivsize(aead);
- if (qreq.mode == QCE_MODE_CCM) {
- if (qreq.dir == QCE_ENCRYPT)
- qreq.cryptlen = req->cryptlen;
- else
- qreq.cryptlen = req->cryptlen -
- qreq.authsize;
- /* Get NONCE */
- ret = qccrypto_set_aead_ccm_nonce(&qreq);
- if (ret)
- goto done;
- /* Format Associated data */
- ret = qcrypto_aead_ccm_format_adata(&qreq,
- req->assoclen,
- req->assoc);
- if (ret)
- goto done;
- /*
- * Save the original associated data
- * length and sg
- */
- rctx->assoc_sg = req->assoc;
- rctx->assoclen = req->assoclen;
- rctx->assoc = qreq.assoc;
- /*
- * update req with new formatted associated
- * data info
- */
- req->assoc = &rctx->asg;
- req->assoclen = qreq.assoclen;
- sg_set_buf(req->assoc, qreq.assoc,
- req->assoclen);
- sg_mark_end(req->assoc);
- }
- ret = qce_aead_req(cp->qce, &qreq);
- }
+ switch (type) {
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ ret = _qcrypto_process_ablkcipher(cp, async_req);
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ ret = _qcrypto_process_ahash(cp, async_req);
+ break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ ret = _qcrypto_process_aead(cp, async_req);
+ break;
+ default:
+ ret = -EINVAL;
};
-done:
- if (ret) {
+ if (ret) {
spin_lock_irqsave(&cp->lock, flags);
cp->req = NULL;
spin_unlock_irqrestore(&cp->lock, flags);
@@ -2118,6 +2259,26 @@
return 0;
}
+static void _copy_source(struct ahash_request *req)
+{
+ struct qcrypto_sha_req_ctx *srctx = NULL;
+ uint32_t bytes = 0;
+ struct scatterlist *sg = req->src;
+
+ srctx = ahash_request_ctx(req);
+ srctx->orig_src = req->src;
+ srctx->data = kzalloc((req->nbytes + 64), GFP_KERNEL);
+ for (sg = req->src; bytes != req->nbytes;
+ sg++) {
+ memcpy(((char *)srctx->data + bytes),
+ sg_virt(sg), sg->length);
+ bytes += sg->length;
+ }
+ sg_set_buf(&srctx->dsg, srctx->data,
+ req->nbytes);
+ sg_mark_end(&srctx->dsg);
+ req->src = &srctx->dsg;
+}
static int _sha_update(struct ahash_request *req, uint32_t sha_block_size)
{
@@ -2198,23 +2359,55 @@
}
if (sha_ctx->trailing_buf_len) {
- num_sg = end_src + 2;
- sha_ctx->sg = kzalloc(num_sg * (sizeof(struct scatterlist)),
+ if (cp->ce_support.aligned_only) {
+ sha_ctx->sg = kzalloc(sizeof(struct scatterlist),
GFP_KERNEL);
- if (sha_ctx->sg == NULL) {
- pr_err("qcrypto Can't Allocate mem: sha_ctx->sg, error %ld\n",
- PTR_ERR(sha_ctx->sg));
- return -ENOMEM;
- }
-
- sg_set_buf(&sha_ctx->sg[0], sha_ctx->tmp_tbuf,
+ if (sha_ctx->sg == NULL) {
+ pr_err("MemAlloc fail sha_ctx->sg, error %ld\n",
+ PTR_ERR(sha_ctx->sg));
+ return -ENOMEM;
+ }
+ rctx->data2 = kzalloc((req->nbytes + 64), GFP_KERNEL);
+ if (rctx->data2 == NULL) {
+ pr_err("Mem Alloc fail srctx->data2, err %ld\n",
+ PTR_ERR(rctx->data2));
+ kfree(sha_ctx->sg);
+ return -ENOMEM;
+ }
+ memcpy(rctx->data2, sha_ctx->tmp_tbuf,
sha_ctx->trailing_buf_len);
- for (i = 1; i < num_sg; i++)
- sg_set_buf(&sha_ctx->sg[i], sg_virt(&req->src[i-1]),
- req->src[i-1].length);
+ memcpy((rctx->data2 + sha_ctx->trailing_buf_len),
+ rctx->data, req->src[i-1].length);
+ kfree(rctx->data);
+ rctx->data = rctx->data2;
+ sg_set_buf(&sha_ctx->sg[0], rctx->data,
+ (sha_ctx->trailing_buf_len +
+ req->src[i-1].length));
+ req->src = sha_ctx->sg;
+ sg_mark_end(&sha_ctx->sg[0]);
- req->src = sha_ctx->sg;
- sg_mark_end(&sha_ctx->sg[num_sg - 1]);
+ } else {
+ num_sg = end_src + 2;
+
+ sha_ctx->sg = kzalloc(num_sg *
+ (sizeof(struct scatterlist)), GFP_KERNEL);
+ if (sha_ctx->sg == NULL) {
+ pr_err("MEMalloc fail sha_ctx->sg, error %ld\n",
+ PTR_ERR(sha_ctx->sg));
+ return -ENOMEM;
+ }
+
+ sg_set_buf(&sha_ctx->sg[0], sha_ctx->tmp_tbuf,
+ sha_ctx->trailing_buf_len);
+ for (i = 1; i < num_sg; i++)
+ sg_set_buf(&sha_ctx->sg[i],
+ sg_virt(&req->src[i-1]),
+ req->src[i-1].length);
+
+ req->src = sha_ctx->sg;
+ sg_mark_end(&sha_ctx->sg[num_sg - 1]);
+
+ }
} else
sg_mark_end(&req->src[end_src]);
@@ -2232,6 +2425,11 @@
{
struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
struct sha1_state *sha_state_ctx = &rctx->sha1_state_ctx;
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = sha_ctx->cp;
+
+ if (cp->ce_support.aligned_only)
+ _copy_source(req);
sha_state_ctx->count += req->nbytes;
return _sha_update(req, SHA1_BLOCK_SIZE);
@@ -2241,6 +2439,11 @@
{
struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
struct sha256_state *sha_state_ctx = &rctx->sha256_state_ctx;
+ struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = sha_ctx->cp;
+
+ if (cp->ce_support.aligned_only)
+ _copy_source(req);
sha_state_ctx->count += req->nbytes;
return _sha_update(req, SHA256_BLOCK_SIZE);
@@ -2253,6 +2456,9 @@
struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
int ret = 0;
+ if (cp->ce_support.aligned_only)
+ _copy_source(req);
+
sha_ctx->last_blk = 1;
/* save the original req structure fields*/
@@ -2288,10 +2494,13 @@
struct crypto_priv *cp = sha_ctx->cp;
int ret = 0;
+ if (cp->ce_support.aligned_only)
+ _copy_source(req);
+
/* save the original req structure fields*/
rctx->src = req->src;
rctx->nbytes = req->nbytes;
-
+ sha_ctx->first_blk = 1;
sha_ctx->last_blk = 1;
ret = _qcrypto_queue_req(cp, &req->base);
@@ -2326,7 +2535,7 @@
struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
int ret = 0;
- sha_ctx->in_buf = kzalloc(len, GFP_KERNEL);
+ sha_ctx->in_buf = kzalloc(len + 64, GFP_KERNEL);
if (sha_ctx->in_buf == NULL) {
pr_err("qcrypto Can't Allocate mem: sha_ctx->in_buf, error %ld\n",
PTR_ERR(sha_ctx->in_buf));
@@ -3079,17 +3288,27 @@
cp->qce = handle;
cp->pdev = pdev;
qce_hw_support(cp->qce, &cp->ce_support);
- platform_support = (struct msm_ce_hw_support *)pdev->dev.platform_data;
- cp->platform_support.ce_shared = platform_support->ce_shared;
- cp->platform_support.shared_ce_resource =
+ if (cp->ce_support.bam) {
+ cp->platform_support.ce_shared = 0;
+ cp->platform_support.shared_ce_resource = 0;
+ cp->platform_support.hw_key_support = 0;
+ cp->platform_support.bus_scale_table = NULL;
+ cp->platform_support.sha_hmac = 1;
+ } else {
+ platform_support =
+ (struct msm_ce_hw_support *)pdev->dev.platform_data;
+ cp->platform_support.ce_shared = platform_support->ce_shared;
+ cp->platform_support.shared_ce_resource =
platform_support->shared_ce_resource;
- cp->platform_support.hw_key_support =
+ cp->platform_support.hw_key_support =
platform_support->hw_key_support;
- cp->platform_support.bus_scale_table =
+ cp->platform_support.bus_scale_table =
platform_support->bus_scale_table;
+ cp->platform_support.sha_hmac = platform_support->sha_hmac;
+ }
cp->high_bw_req_count = 0;
cp->ce_lock_count = 0;
- cp->platform_support.sha_hmac = platform_support->sha_hmac;
+
if (cp->platform_support.ce_shared)
INIT_WORK(&cp->unlock_ce_ws, qcrypto_unlock_ce);
@@ -3370,6 +3589,4 @@
module_exit(_qcrypto_exit);
MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
MODULE_DESCRIPTION("Qualcomm Crypto driver");
-MODULE_VERSION("1.22");
diff --git a/drivers/crypto/msm/qcryptohw_50.h b/drivers/crypto/msm/qcryptohw_50.h
new file mode 100644
index 0000000..898109e
--- /dev/null
+++ b/drivers/crypto/msm/qcryptohw_50.h
@@ -0,0 +1,501 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_
+#define _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_
+
+
+#define QCE_AUTH_REG_BYTE_COUNT 4
+#define CRYPTO_VERSION_REG 0x1A000
+
+#define CRYPTO_DATA_IN0_REG 0x1A010
+#define CRYPTO_DATA_IN1_REG 0x1A014
+#define CRYPTO_DATA_IN2_REG 0x1A018
+#define CRYPTO_DATA_IN3_REG 0x1A01C
+
+#define CRYPTO_DATA_OUT0_REG 0x1A020
+#define CRYPTO_DATA_OUT1_REG 0x1A024
+#define CRYPTO_DATA_OUT2_REG 0x1A028
+#define CRYPTO_DATA_OUT3_REG 0x1A02C
+
+#define CRYPTO_STATUS_REG 0x1A100
+#define CRYPTO_STATUS2_REG 0x1A100
+#define CRYPTO_ENGINES_AVAIL 0x1A108
+#define CRYPTO_FIFO_SIZES_REG 0x1A10C
+
+#define CRYPTO_SEG_SIZE_REG 0x1A110
+#define CRYPTO_GOPROC_REG 0x1A120
+#define CRYPTO_GOPROC_QC_KEY_REG 0x1B000
+#define CRYPTO_GOPROC_OEM_KEY_REG 0x1C000
+
+#define CRYPTO_ENCR_SEG_CFG_REG 0x1A200
+#define CRYPTO_ENCR_SEG_SIZE_REG 0x1A204
+#define CRYPTO_ENCR_SEG_START_REG 0x1A208
+
+#define CRYPTO_ENCR_KEY0_REG 0x1D000
+#define CRYPTO_ENCR_KEY1_REG 0x1D004
+#define CRYPTO_ENCR_KEY2_REG 0x1D008
+#define CRYPTO_ENCR_KEY3_REG 0x1D00C
+#define CRYPTO_ENCR_KEY4_REG 0x1D010
+#define CRYPTO_ENCR_KEY5_REG 0x1D014
+#define CRYPTO_ENCR_KEY6_REG 0x1D018
+#define CRYPTO_ENCR_KEY7_REG 0x1D01C
+
+#define CRYPTO_ENCR_XTS_KEY0_REG 0x1D020
+#define CRYPTO_ENCR_XTS_KEY1_REG 0x1D024
+#define CRYPTO_ENCR_XTS_KEY2_REG 0x1D028
+#define CRYPTO_ENCR_XTS_KEY3_REG 0x1D02C
+#define CRYPTO_ENCR_XTS_KEY4_REG 0x1D030
+#define CRYPTO_ENCR_XTS_KEY5_REG 0x1D034
+#define CRYPTO_ENCR_XTS_KEY6_REG 0x1D038
+#define CRYPTO_ENCR_XTS_KEY7_REG 0x1D03C
+
+#define CRYPTO_ENCR_PIP0_KEY0_REG 0x1E000
+#define CRYPTO_ENCR_PIP0_KEY1_REG 0x1E004
+#define CRYPTO_ENCR_PIP0_KEY2_REG 0x1E008
+#define CRYPTO_ENCR_PIP0_KEY3_REG 0x1E00C
+#define CRYPTO_ENCR_PIP0_KEY4_REG 0x1E010
+#define CRYPTO_ENCR_PIP0_KEY5_REG 0x1E004
+#define CRYPTO_ENCR_PIP0_KEY6_REG 0x1E008
+#define CRYPTO_ENCR_PIP0_KEY7_REG 0x1E00C
+
+#define CRYPTO_ENCR_PIP1_KEY0_REG 0x1E000
+#define CRYPTO_ENCR_PIP1_KEY1_REG 0x1E004
+#define CRYPTO_ENCR_PIP1_KEY2_REG 0x1E008
+#define CRYPTO_ENCR_PIP1_KEY3_REG 0x1E00C
+#define CRYPTO_ENCR_PIP1_KEY4_REG 0x1E010
+#define CRYPTO_ENCR_PIP1_KEY5_REG 0x1E014
+#define CRYPTO_ENCR_PIP1_KEY6_REG 0x1E018
+#define CRYPTO_ENCR_PIP1_KEY7_REG 0x1E01C
+
+#define CRYPTO_ENCR_PIP2_KEY0_REG 0x1E020
+#define CRYPTO_ENCR_PIP2_KEY1_REG 0x1E024
+#define CRYPTO_ENCR_PIP2_KEY2_REG 0x1E028
+#define CRYPTO_ENCR_PIP2_KEY3_REG 0x1E02C
+#define CRYPTO_ENCR_PIP2_KEY4_REG 0x1E030
+#define CRYPTO_ENCR_PIP2_KEY5_REG 0x1E034
+#define CRYPTO_ENCR_PIP2_KEY6_REG 0x1E038
+#define CRYPTO_ENCR_PIP2_KEY7_REG 0x1E03C
+
+#define CRYPTO_ENCR_PIP3_KEY0_REG 0x1E040
+#define CRYPTO_ENCR_PIP3_KEY1_REG 0x1E044
+#define CRYPTO_ENCR_PIP3_KEY2_REG 0x1E048
+#define CRYPTO_ENCR_PIP3_KEY3_REG 0x1E04C
+#define CRYPTO_ENCR_PIP3_KEY4_REG 0x1E050
+#define CRYPTO_ENCR_PIP3_KEY5_REG 0x1E054
+#define CRYPTO_ENCR_PIP3_KEY6_REG 0x1E058
+#define CRYPTO_ENCR_PIP3_KEY7_REG 0x1E05C
+
+#define CRYPTO_ENCR_PIP0_XTS_KEY0_REG 0x1E200
+#define CRYPTO_ENCR_PIP0_XTS_KEY1_REG 0x1E204
+#define CRYPTO_ENCR_PIP0_XTS_KEY2_REG 0x1E208
+#define CRYPTO_ENCR_PIP0_XTS_KEY3_REG 0x1E20C
+#define CRYPTO_ENCR_PIP0_XTS_KEY4_REG 0x1E210
+#define CRYPTO_ENCR_PIP0_XTS_KEY5_REG 0x1E214
+#define CRYPTO_ENCR_PIP0_XTS_KEY6_REG 0x1E218
+#define CRYPTO_ENCR_PIP0_XTS_KEY7_REG 0x1E21C
+
+#define CRYPTO_ENCR_PIP1_XTS_KEY0_REG 0x1E220
+#define CRYPTO_ENCR_PIP1_XTS_KEY1_REG 0x1E224
+#define CRYPTO_ENCR_PIP1_XTS_KEY2_REG 0x1E228
+#define CRYPTO_ENCR_PIP1_XTS_KEY3_REG 0x1E22C
+#define CRYPTO_ENCR_PIP1_XTS_KEY4_REG 0x1E230
+#define CRYPTO_ENCR_PIP1_XTS_KEY5_REG 0x1E234
+#define CRYPTO_ENCR_PIP1_XTS_KEY6_REG 0x1E238
+#define CRYPTO_ENCR_PIP1_XTS_KEY7_REG 0x1E23C
+
+#define CRYPTO_ENCR_PIP2_XTS_KEY0_REG 0x1E240
+#define CRYPTO_ENCR_PIP2_XTS_KEY1_REG 0x1E244
+#define CRYPTO_ENCR_PIP2_XTS_KEY2_REG 0x1E248
+#define CRYPTO_ENCR_PIP2_XTS_KEY3_REG 0x1E24C
+#define CRYPTO_ENCR_PIP2_XTS_KEY4_REG 0x1E250
+#define CRYPTO_ENCR_PIP2_XTS_KEY5_REG 0x1E254
+#define CRYPTO_ENCR_PIP2_XTS_KEY6_REG 0x1E258
+#define CRYPTO_ENCR_PIP2_XTS_KEY7_REG 0x1E25C
+
+#define CRYPTO_ENCR_PIP3_XTS_KEY0_REG 0x1E260
+#define CRYPTO_ENCR_PIP3_XTS_KEY1_REG 0x1E264
+#define CRYPTO_ENCR_PIP3_XTS_KEY2_REG 0x1E268
+#define CRYPTO_ENCR_PIP3_XTS_KEY3_REG 0x1E26C
+#define CRYPTO_ENCR_PIP3_XTS_KEY4_REG 0x1E270
+#define CRYPTO_ENCR_PIP3_XTS_KEY5_REG 0x1E274
+#define CRYPTO_ENCR_PIP3_XTS_KEY6_REG 0x1E278
+#define CRYPTO_ENCR_PIP3_XTS_KEY7_REG 0x1E27C
+
+
+#define CRYPTO_CNTR0_IV0_REG 0x1A20C
+#define CRYPTO_CNTR1_IV1_REG 0x1A210
+#define CRYPTO_CNTR2_IV2_REG 0x1A214
+#define CRYPTO_CNTR3_IV3_REG 0x1A218
+
+#define CRYPTO_CNTR_MASK_REG 0x1A21C
+
+#define CRYPTO_ENCR_CCM_INT_CNTR0_REG 0x1A220
+#define CRYPTO_ENCR_CCM_INT_CNTR1_REG 0x1A224
+#define CRYPTO_ENCR_CCM_INT_CNTR2_REG 0x1A228
+#define CRYPTO_ENCR_CCM_INT_CNTR3_REG 0x1A22C
+
+#define CRYPTO_ENCR_XTS_DU_SIZE_REG 0xA1230
+
+#define CRYPTO_AUTH_SEG_CFG_REG 0x1A300
+#define CRYPTO_AUTH_SEG_SIZE_REG 0x1A304
+#define CRYPTO_AUTH_SEG_START_REG 0x1A308
+
+#define CRYPTO_AUTH_KEY0_REG 0x1D040
+#define CRYPTO_AUTH_KEY1_REG 0x1D044
+#define CRYPTO_AUTH_KEY2_REG 0x1D048
+#define CRYPTO_AUTH_KEY3_REG 0x1D04C
+#define CRYPTO_AUTH_KEY4_REG 0x1D050
+#define CRYPTO_AUTH_KEY5_REG 0x1D054
+#define CRYPTO_AUTH_KEY6_REG 0x1D058
+#define CRYPTO_AUTH_KEY7_REG 0x1D05C
+#define CRYPTO_AUTH_KEY8_REG 0x1D060
+#define CRYPTO_AUTH_KEY9_REG 0x1D064
+#define CRYPTO_AUTH_KEY10_REG 0x1D068
+#define CRYPTO_AUTH_KEY11_REG 0x1D06C
+#define CRYPTO_AUTH_KEY12_REG 0x1D070
+#define CRYPTO_AUTH_KEY13_REG 0x1D074
+#define CRYPTO_AUTH_KEY14_REG 0x1D078
+#define CRYPTO_AUTH_KEY15_REG 0x1D07C
+
+#define CRYPTO_AUTH_PIPE0_KEY0_REG 0x1E800
+#define CRYPTO_AUTH_PIPE0_KEY1_REG 0x1E804
+#define CRYPTO_AUTH_PIPE0_KEY2_REG 0x1E808
+#define CRYPTO_AUTH_PIPE0_KEY3_REG 0x1E80C
+#define CRYPTO_AUTH_PIPE0_KEY4_REG 0x1E810
+#define CRYPTO_AUTH_PIPE0_KEY5_REG 0x1E814
+#define CRYPTO_AUTH_PIPE0_KEY6_REG 0x1E818
+#define CRYPTO_AUTH_PIPE0_KEY7_REG 0x1E81C
+#define CRYPTO_AUTH_PIPE0_KEY8_REG 0x1E820
+#define CRYPTO_AUTH_PIPE0_KEY9_REG 0x1E824
+#define CRYPTO_AUTH_PIPE0_KEY10_REG 0x1E828
+#define CRYPTO_AUTH_PIPE0_KEY11_REG 0x1E82C
+#define CRYPTO_AUTH_PIPE0_KEY12_REG 0x1E830
+#define CRYPTO_AUTH_PIPE0_KEY13_REG 0x1E834
+#define CRYPTO_AUTH_PIPE0_KEY14_REG 0x1E838
+#define CRYPTO_AUTH_PIPE0_KEY15_REG 0x1E83C
+
+#define CRYPTO_AUTH_PIPE1_KEY0_REG 0x1E800
+#define CRYPTO_AUTH_PIPE1_KEY1_REG 0x1E804
+#define CRYPTO_AUTH_PIPE1_KEY2_REG 0x1E808
+#define CRYPTO_AUTH_PIPE1_KEY3_REG 0x1E80C
+#define CRYPTO_AUTH_PIPE1_KEY4_REG 0x1E810
+#define CRYPTO_AUTH_PIPE1_KEY5_REG 0x1E814
+#define CRYPTO_AUTH_PIPE1_KEY6_REG 0x1E818
+#define CRYPTO_AUTH_PIPE1_KEY7_REG 0x1E81C
+#define CRYPTO_AUTH_PIPE1_KEY8_REG 0x1E820
+#define CRYPTO_AUTH_PIPE1_KEY9_REG 0x1E824
+#define CRYPTO_AUTH_PIPE1_KEY10_REG 0x1E828
+#define CRYPTO_AUTH_PIPE1_KEY11_REG 0x1E82C
+#define CRYPTO_AUTH_PIPE1_KEY12_REG 0x1E830
+#define CRYPTO_AUTH_PIPE1_KEY13_REG 0x1E834
+#define CRYPTO_AUTH_PIPE1_KEY14_REG 0x1E838
+#define CRYPTO_AUTH_PIPE1_KEY15_REG 0x1E83C
+
+#define CRYPTO_AUTH_PIPE2_KEY0_REG 0x1E840
+#define CRYPTO_AUTH_PIPE2_KEY1_REG 0x1E844
+#define CRYPTO_AUTH_PIPE2_KEY2_REG 0x1E848
+#define CRYPTO_AUTH_PIPE2_KEY3_REG 0x1E84C
+#define CRYPTO_AUTH_PIPE2_KEY4_REG 0x1E850
+#define CRYPTO_AUTH_PIPE2_KEY5_REG 0x1E854
+#define CRYPTO_AUTH_PIPE2_KEY6_REG 0x1E858
+#define CRYPTO_AUTH_PIPE2_KEY7_REG 0x1E85C
+#define CRYPTO_AUTH_PIPE2_KEY8_REG 0x1E860
+#define CRYPTO_AUTH_PIPE2_KEY9_REG 0x1E864
+#define CRYPTO_AUTH_PIPE2_KEY10_REG 0x1E868
+#define CRYPTO_AUTH_PIPE2_KEY11_REG 0x1E86C
+#define CRYPTO_AUTH_PIPE2_KEY12_REG 0x1E870
+#define CRYPTO_AUTH_PIPE2_KEY13_REG 0x1E874
+#define CRYPTO_AUTH_PIPE2_KEY14_REG 0x1E878
+#define CRYPTO_AUTH_PIPE2_KEY15_REG 0x1E87C
+
+#define CRYPTO_AUTH_PIPE3_KEY0_REG 0x1E880
+#define CRYPTO_AUTH_PIPE3_KEY1_REG 0x1E884
+#define CRYPTO_AUTH_PIPE3_KEY2_REG 0x1E888
+#define CRYPTO_AUTH_PIPE3_KEY3_REG 0x1E88C
+#define CRYPTO_AUTH_PIPE3_KEY4_REG 0x1E890
+#define CRYPTO_AUTH_PIPE3_KEY5_REG 0x1E894
+#define CRYPTO_AUTH_PIPE3_KEY6_REG 0x1E898
+#define CRYPTO_AUTH_PIPE3_KEY7_REG 0x1E89C
+#define CRYPTO_AUTH_PIPE3_KEY8_REG 0x1E8A0
+#define CRYPTO_AUTH_PIPE3_KEY9_REG 0x1E8A4
+#define CRYPTO_AUTH_PIPE3_KEY10_REG 0x1E8A8
+#define CRYPTO_AUTH_PIPE3_KEY11_REG 0x1E8AC
+#define CRYPTO_AUTH_PIPE3_KEY12_REG 0x1E8B0
+#define CRYPTO_AUTH_PIPE3_KEY13_REG 0x1E8B4
+#define CRYPTO_AUTH_PIPE3_KEY14_REG 0x1E8B8
+#define CRYPTO_AUTH_PIPE3_KEY15_REG 0x1E8BC
+
+
+#define CRYPTO_AUTH_IV0_REG 0x1A310
+#define CRYPTO_AUTH_IV1_REG 0x1A314
+#define CRYPTO_AUTH_IV2_REG 0x1A318
+#define CRYPTO_AUTH_IV3_REG 0x1A31C
+#define CRYPTO_AUTH_IV4_REG 0x1A320
+#define CRYPTO_AUTH_IV5_REG 0x1A324
+#define CRYPTO_AUTH_IV6_REG 0x1A328
+#define CRYPTO_AUTH_IV7_REG 0x1A32C
+#define CRYPTO_AUTH_IV8_REG 0x1A330
+#define CRYPTO_AUTH_IV9_REG 0x1A334
+#define CRYPTO_AUTH_IV10_REG 0x1A338
+#define CRYPTO_AUTH_IV11_REG 0x1A33C
+#define CRYPTO_AUTH_IV12_REG 0x1A340
+#define CRYPTO_AUTH_IV13_REG 0x1A344
+#define CRYPTO_AUTH_IV14_REG 0x1A348
+#define CRYPTO_AUTH_IV15_REG 0x1A34C
+
+#define CRYPTO_AUTH_INFO_NONCE0_REG 0x1A350
+#define CRYPTO_AUTH_INFO_NONCE1_REG 0x1A354
+#define CRYPTO_AUTH_INFO_NONCE2_REG 0x1A358
+#define CRYPTO_AUTH_INFO_NONCE3_REG 0x1A35C
+
+#define CRYPTO_AUTH_BYTECNT0_REG 0x1A390
+#define CRYPTO_AUTH_BYTECNT1_REG 0x1A394
+#define CRYPTO_AUTH_BYTECNT2_REG 0x1A398
+#define CRYPTO_AUTH_BYTECNT3_REG 0x1A39C
+
+#define CRYPTO_AUTH_EXP_MAC0_REG 0x1A3A0
+#define CRYPTO_AUTH_EXP_MAC1_REG 0x1A3A4
+#define CRYPTO_AUTH_EXP_MAC2_REG 0x1A3A8
+#define CRYPTO_AUTH_EXP_MAC3_REG 0x1A3AC
+#define CRYPTO_AUTH_EXP_MAC4_REG 0x1A3B0
+#define CRYPTO_AUTH_EXP_MAC5_REG 0x1A3B4
+#define CRYPTO_AUTH_EXP_MAC6_REG 0x1A3B8
+#define CRYPTO_AUTH_EXP_MAC7_REG 0x1A3BC
+
+#define CRYPTO_CONFIG_REG 0x1A400
+#define CRYPTO_DEBUG_ENABLE_REG 0x1AF00
+#define CRYPTO_DEBUG_REG 0x1AF04
+
+
+
+/* Register bits */
+#define CRYPTO_CORE_STEP_REV_MASK 0xFFFF
+#define CRYPTO_CORE_STEP_REV 0 /* bit 15-0 */
+#define CRYPTO_CORE_MAJOR_REV_MASK 0xFF000000
+#define CRYPTO_CORE_MAJOR_REV 24 /* bit 31-24 */
+#define CRYPTO_CORE_MINOR_REV_MASK 0xFF0000
+#define CRYPTO_CORE_MINOR_REV 16 /* bit 23-16 */
+
+/* status reg */
+#define CRYPTO_MAC_FAILED 31
+#define CRYPTO_DOUT_SIZE_AVAIL 26 /* bit 30-26 */
+#define CRYPTO_DOUT_SIZE_AVAIL_MASK (0x1F << CRYPTO_DOUT_SIZE_AVAIL)
+#define CRYPTO_DIN_SIZE_AVAIL 21 /* bit 21-25 */
+#define CRYPTO_DIN_SIZE_AVAIL_MASK (0x1F << CRYPTO_DIN_SIZE_AVAIL)
+#define CRYPTO_ACCESS_VIOL 19
+#define CRYPTO_PIPE_ACTIVE_ERR 18
+#define CRYPTO_CFG_CHNG_ERR 17
+#define CRYPTO_DOUT_ERR 16
+#define CRYPTO_DIN_ERR 15
+#define CRYPTO_AXI_ERR 14
+#define CRYPTO_CRYPTO_STATE 10 /* bit 13-10 */
+#define CRYPTO_CRYPTO_STATE_MASK (0xF << CRYPTO_CRYPTO_STATE)
+#define CRYPTO_ENCR_BUSY 9
+#define CRYPTO_AUTH_BUSY 8
+#define CRYPTO_DOUT_INTR 7
+#define CRYPTO_DIN_INTR 6
+#define CRYPTO_OP_DONE_INTR 5
+#define CRYPTO_ERR_INTR 4
+#define CRYPTO_DOUT_RDY 3
+#define CRYPTO_DIN_RDY 2
+#define CRYPTO_OPERATION_DONE 1
+#define CRYPTO_SW_ERR 0
+
+/* status2 reg */
+#define CRYPTO_AXI_EXTRA 1
+#define CRYPTO_LOCKED 2
+
+/* config reg */
+#define CRYPTO_REQ_SIZE 17 /* bit 20-17 */
+#define CRYPTO_REQ_SIZE_MASK (0xF << CRYPTO_REQ_SIZE)
+#define CRYPTO_REQ_SIZE_ENUM_1_BEAT 0
+#define CRYPTO_REQ_SIZE_ENUM_2_BEAT 1
+#define CRYPTO_REQ_SIZE_ENUM_3_BEAT 2
+#define CRYPTO_REQ_SIZE_ENUM_4_BEAT 3
+#define CRYPTO_REQ_SIZE_ENUM_5_BEAT 4
+#define CRYPTO_REQ_SIZE_ENUM_6_BEAT 5
+#define CRYPTO_REQ_SIZE_ENUM_7_BEAT 6
+#define CRYPTO_REQ_SIZE_ENUM_8_BEAT 7
+#define CRYPTO_REQ_SIZE_ENUM_9_BEAT 8
+#define CRYPTO_REQ_SIZE_ENUM_10_BEAT 9
+#define CRYPTO_REQ_SIZE_ENUM_11_BEAT 10
+#define CRYPTO_REQ_SIZE_ENUM_12_BEAT 11
+#define CRYPTO_REQ_SIZE_ENUM_13_BEAT 12
+#define CRYPTO_REQ_SIZE_ENUM_14_BEAT 13
+#define CRYPTO_REQ_SIZE_ENUM_15_BEAT 14
+#define CRYPTO_REQ_SIZE_ENUM_16_BEAT 15
+
+#define CRYPTO_MAX_QUEUED_REQ 14 /* bit 16-14 */
+#define CRYPTO_MAX_QUEUED_REQ_MASK (0x7 << CRYPTO_MAX_QUEUED_REQ)
+#define CRYPTO_ENUM_1_QUEUED_REQS 0
+#define CRYPTO_ENUM_2_QUEUED_REQS 1
+#define CRYPTO_ENUM_3_QUEUED_REQS 2
+
+#define CRYPTO_IRQ_ENABLES 10 /* bit 13-10 */
+#define CRYPTO_IRQ_ENABLES_MASK (0xF << CRYPTO_IRQ_ENABLES)
+
+#define CRYPTO_LITTLE_ENDIAN_MODE 9
+#define CRYPTO_PIPE_SET_SELECT 5 /* bit 8-5 */
+#define CRYPTO_PIPE_SET_SELECT_MASK (0xF << CRYPTO_PIPE_SET_SELECT)
+
+#define CRYPTO_HIGH_SPD_EN_N 4
+
+#define CRYPTO_MASK_DOUT_INTR 3
+#define CRYPTO_MASK_DIN_INTR 2
+#define CRYPTO_MASK_OP_DONE_INTR 1
+#define CRYPTO_MASK_ERR_INTR 0
+
+/* auth_seg_cfg reg */
+#define CRYPTO_COMP_EXP_MAC 24
+#define CRYPTO_COMP_EXP_MAC_DISABLED 0
+#define CRYPTO_COMP_EXP_MAC_ENABLED 1
+
+#define CRYPTO_F9_DIRECTION 23
+#define CRYPTO_F9_DIRECTION_UPLINK 0
+#define CRYPTO_F9_DIRECTION_DOWNLINK 1
+
+#define CRYPTO_AUTH_NONCE_NUM_WORDS 20 /* bit 22-20 */
+#define CRYPTO_AUTH_NONCE_NUM_WORDS_MASK \
+ (0x7 << CRYPTO_AUTH_NONCE_NUM_WORDS)
+
+#define CRYPTO_USE_PIPE_KEY_AUTH 19
+#define CRYPTO_USE_HW_KEY_AUTH 18
+#define CRYPTO_FIRST 17
+#define CRYPTO_LAST 16
+
+#define CRYPTO_AUTH_POS 15 /* bit 15 .. 14*/
+#define CRYPTO_AUTH_POS_MASK (0x3 << CRYPTO_AUTH_POS)
+#define CRYPTO_AUTH_POS_BEFORE 0
+#define CRYPTO_AUTH_POS_AFTER 1
+
+#define CRYPTO_AUTH_SIZE 9 /* bits 13 .. 9*/
+#define CRYPTO_AUTH_SIZE_MASK (0x1F << CRYPTO_AUTH_SIZE)
+#define CRYPTO_AUTH_SIZE_SHA1 0
+#define CRYPTO_AUTH_SIZE_SHA256 1
+#define CRYPTO_AUTH_SIZE_ENUM_1_BYTES 0
+#define CRYPTO_AUTH_SIZE_ENUM_2_BYTES 1
+#define CRYPTO_AUTH_SIZE_ENUM_3_BYTES 2
+#define CRYPTO_AUTH_SIZE_ENUM_4_BYTES 3
+#define CRYPTO_AUTH_SIZE_ENUM_5_BYTES 4
+#define CRYPTO_AUTH_SIZE_ENUM_6_BYTES 5
+#define CRYPTO_AUTH_SIZE_ENUM_7_BYTES 6
+#define CRYPTO_AUTH_SIZE_ENUM_8_BYTES 7
+#define CRYPTO_AUTH_SIZE_ENUM_9_BYTES 8
+#define CRYPTO_AUTH_SIZE_ENUM_10_BYTES 9
+#define CRYPTO_AUTH_SIZE_ENUM_11_BYTES 10
+#define CRYPTO_AUTH_SIZE_ENUM_12_BYTES 11
+#define CRYPTO_AUTH_SIZE_ENUM_13_BYTES 12
+#define CRYPTO_AUTH_SIZE_ENUM_14_BYTES 13
+#define CRYPTO_AUTH_SIZE_ENUM_15_BYTES 14
+#define CRYPTO_AUTH_SIZE_ENUM_16_BYTES 15
+
+
+#define CRYPTO_AUTH_MODE 6 /* bit 8 .. 6*/
+#define CRYPTO_AUTH_MODE_MASK (0x7 << CRYPTO_AUTH_MODE)
+#define CRYPTO_AUTH_MODE_HASH 0
+#define CRYPTO_AUTH_MODE_HMAC 1
+#define CRYPTO_AUTH_MODE_CCM 0
+#define CRYPTO_AUTH_MODE_CMAC 1
+
+#define CRYPTO_AUTH_KEY_SIZE 3 /* bit 5 .. 3*/
+#define CRYPTO_AUTH_KEY_SIZE_MASK (0x7 << CRYPTO_AUTH_KEY_SIZE)
+#define CRYPTO_AUTH_KEY_SZ_AES128 0
+#define CRYPTO_AUTH_KEY_SZ_AES256 2
+
+#define CRYPTO_AUTH_ALG 0 /* bit 2 .. 0*/
+#define CRYPTO_AUTH_ALG_MASK 7
+#define CRYPTO_AUTH_ALG_NONE 0
+#define CRYPTO_AUTH_ALG_SHA 1
+#define CRYPTO_AUTH_ALG_AES 2
+#define CRYPTO_AUTH_ALG_KASUMI 3
+#define CRYPTO_AUTH_ALG_SNOW3G 4
+
+/* encr_xts_du_size reg */
+#define CRYPTO_ENCR_XTS_DU_SIZE 0 /* bit 19-0 */
+#define CRYPTO_ENCR_XTS_DU_SIZE_MASK 0xfffff
+
+/* encr_seg_cfg reg */
+#define CRYPTO_F8_KEYSTREAM_ENABLE 17/* bit */
+#define CRYPTO_F8_KEYSTREAM_DISABLED 0
+#define CRYPTO_F8_KEYSTREAM_ENABLED 1
+
+#define CRYPTO_F8_DIRECTION 16 /* bit */
+#define CRYPTO_F8_DIRECTION_UPLINK 0
+#define CRYPTO_F8_DIRECTION_DOWNLINK 1
+
+
+#define CRYPTO_USE_PIPE_KEY_ENCR 15 /* bit */
+#define CRYPTO_USE_PIPE_KEY_ENCR_ENABLED 1
+#define CRYPTO_USE_KEY_REGISTERS 0
+
+
+#define CRYPTO_USE_HW_KEY_ENCR 14
+#define CRYPTO_USE_KEY_REG 0
+#define CRYPTO_USE_HW_KEY 1
+
+#define CRYPTO_LAST_CCM 13
+#define CRYPTO_LAST_CCM_XFR 1
+#define CRYPTO_INTERM_CCM_XFR 0
+
+
+#define CRYPTO_CNTR_ALG 11 /* bit 12-11 */
+#define CRYPTO_CNTR_ALG_MASK (3 << CRYPTO_CNTR_ALG)
+#define CRYPTO_CNTR_ALG_NIST 0
+
+#define CRYPTO_ENCODE 10
+
+#define CRYPTO_ENCR_MODE 6 /* bit 9-6 */
+#define CRYPTO_ENCR_MODE_MASK (0xF << CRYPTO_ENCR_MODE)
+/* only valid when AES */
+#define CRYPTO_ENCR_MODE_ECB 0
+#define CRYPTO_ENCR_MODE_CBC 1
+#define CRYPTO_ENCR_MODE_CTR 2
+#define CRYPTO_ENCR_MODE_XTS 3
+#define CRYPTO_ENCR_MODE_CCM 4
+
+#define CRYPTO_ENCR_KEY_SZ 3 /* bit 5-3 */
+#define CRYPTO_ENCR_KEY_SZ_MASK (7 << CRYPTO_ENCR_KEY_SZ)
+#define CRYPTO_ENCR_KEY_SZ_DES 0
+#define CRYPTO_ENCR_KEY_SZ_3DES 1
+#define CRYPTO_ENCR_KEY_SZ_AES128 0
+#define CRYPTO_ENCR_KEY_SZ_AES256 2
+#define CRYPTO_ENCR_KEY_SZ_UEA1 0
+#define CRYPTO_ENCR_KEY_SZ_UEA2 1
+
+#define CRYPTO_ENCR_ALG 0 /* bit 2-0 */
+#define CRYPTO_ENCR_ALG_MASK (7 << CRYPTO_ENCR_ALG)
+#define CRYPTO_ENCR_ALG_NONE 0
+#define CRYPTO_ENCR_ALG_DES 1
+#define CRYPTO_ENCR_ALG_AES 2
+#define CRYPTO_ENCR_ALG_KASUMI 3
+#define CRYPTO_ENCR_ALG_SNOW_3G 5
+
+/* goproc reg */
+#define CRYPTO_GO 0
+#define CRYPTO_CLR_CNTXT 1
+#define CRYPTO_RESULTS_DUMP 2
+
+
+/* engines_avail */
+#define CRYPTO_ENCR_AES_SEL 0
+#define CRYPTO_DES_SEL 3
+#define CRYPTO_ENCR_SNOW3G_SEL 4
+#define CRYPTO_ENCR_KASUMI_SEL 5
+#define CRYPTO_SHA_SEL 6
+#define CRYPTO_SHA512_SEL 7
+#define CRYPTO_AUTH_AES_SEL 8
+#define CRYPTO_AUTH_SNOW3G_SEL 9
+#define CRYPTO_AUTH_KASUMI_SEL 10
+#define CRYPTO_BAM_SEL 11
+
+#endif /* _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_ */
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
index 741d4fa..4fe1f01 100644
--- a/drivers/gpu/ion/ion.c
+++ b/drivers/gpu/ion/ion.c
@@ -1306,10 +1306,9 @@
return PTR_ERR(dmabuf);
}
fd = dma_buf_fd(dmabuf, O_CLOEXEC);
- if (fd < 0) {
+ if (fd < 0)
dma_buf_put(dmabuf);
- ion_buffer_put(buffer);
- }
+
return fd;
}
EXPORT_SYMBOL(ion_share_dma_buf);
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 7720df0..3047693 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -289,6 +289,10 @@
KGSL_IOMMU_CONTEXT_USER))
goto done;
+ cmds += __adreno_add_idle_indirect_cmds(cmds,
+ device->mmu.setstate_memory.gpuaddr +
+ KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+
if (cpu_is_msm8960())
cmds += adreno_add_change_mh_phys_limit_cmds(cmds, 0xFFFFF000,
device->mmu.setstate_memory.gpuaddr +
@@ -299,8 +303,8 @@
device->mmu.setstate_memory.gpuaddr +
KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+ pt_val = kgsl_mmu_pt_get_base_addr(device->mmu.hwpagetable);
if (flags & KGSL_MMUFLAGS_PTUPDATE) {
- pt_val = kgsl_mmu_pt_get_base_addr(device->mmu.hwpagetable);
/*
* We need to perfrom the following operations for all
* IOMMU units
@@ -334,45 +338,32 @@
reg_pt_val,
device->mmu.setstate_memory.gpuaddr +
KGSL_IOMMU_SETSTATE_NOP_OFFSET);
-
- /* set the asid */
- *cmds++ = cp_type3_packet(CP_MEM_WRITE, 2);
- *cmds++ = reg_map_desc[i]->gpuaddr +
- (KGSL_IOMMU_CONTEXT_USER <<
- KGSL_IOMMU_CTX_SHIFT) + KGSL_IOMMU_CONTEXTIDR;
- *cmds++ = kgsl_mmu_get_hwpagetable_asid(&device->mmu);
- *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
- *cmds++ = 0x00000000;
-
- /* Read back asid to ensure above write completes */
- cmds += adreno_add_read_cmds(device, cmds,
- reg_map_desc[i]->gpuaddr +
- (KGSL_IOMMU_CONTEXT_USER <<
- KGSL_IOMMU_CTX_SHIFT) + KGSL_IOMMU_CONTEXTIDR,
- kgsl_mmu_get_hwpagetable_asid(&device->mmu),
- device->mmu.setstate_memory.gpuaddr +
- KGSL_IOMMU_SETSTATE_NOP_OFFSET);
}
/* invalidate all base pointers */
*cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
*cmds++ = 0x7fff;
- if (flags & KGSL_MMUFLAGS_TLBFLUSH)
- cmds += __adreno_add_idle_indirect_cmds(cmds,
- device->mmu.setstate_memory.gpuaddr +
- KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+ cmds += __adreno_add_idle_indirect_cmds(cmds,
+ device->mmu.setstate_memory.gpuaddr +
+ KGSL_IOMMU_SETSTATE_NOP_OFFSET);
}
if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
/*
- * tlb flush based on asid, no need to flush entire tlb
+ * tlb flush
*/
for (i = 0; i < num_iommu_units; i++) {
+ reg_pt_val = (pt_val &
+ (KGSL_IOMMU_TTBR0_PA_MASK <<
+ KGSL_IOMMU_TTBR0_PA_SHIFT)) +
+ kgsl_mmu_get_pt_lsb(&device->mmu, i,
+ KGSL_IOMMU_CONTEXT_USER);
+
*cmds++ = cp_type3_packet(CP_MEM_WRITE, 2);
*cmds++ = (reg_map_desc[i]->gpuaddr +
(KGSL_IOMMU_CONTEXT_USER <<
KGSL_IOMMU_CTX_SHIFT) +
- KGSL_IOMMU_CTX_TLBIASID);
- *cmds++ = kgsl_mmu_get_hwpagetable_asid(&device->mmu);
+ KGSL_IOMMU_CTX_TLBIALL);
+ *cmds++ = 1;
cmds += __adreno_add_idle_indirect_cmds(cmds,
device->mmu.setstate_memory.gpuaddr +
@@ -381,9 +372,8 @@
cmds += adreno_add_read_cmds(device, cmds,
reg_map_desc[i]->gpuaddr +
(KGSL_IOMMU_CONTEXT_USER <<
- KGSL_IOMMU_CTX_SHIFT) +
- KGSL_IOMMU_CONTEXTIDR,
- kgsl_mmu_get_hwpagetable_asid(&device->mmu),
+ KGSL_IOMMU_CTX_SHIFT) + KGSL_IOMMU_TTBR0,
+ reg_pt_val,
device->mmu.setstate_memory.gpuaddr +
KGSL_IOMMU_SETSTATE_NOP_OFFSET);
}
@@ -793,133 +783,314 @@
return 0;
}
+static void adreno_mark_context_status(struct kgsl_device *device,
+ int recovery_status)
+{
+ struct kgsl_context *context;
+ int next = 0;
+ /*
+ * Set the reset status of all contexts to
+ * INNOCENT_CONTEXT_RESET_EXT except for the bad context
+ * since thats the guilty party, if recovery failed then
+ * mark all as guilty
+ */
+ while ((context = idr_get_next(&device->context_idr, &next))) {
+ struct adreno_context *adreno_context = context->devctxt;
+ if (recovery_status) {
+ context->reset_status =
+ KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT;
+ adreno_context->flags |= CTXT_FLAGS_GPU_HANG;
+ } else if (KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT !=
+ context->reset_status) {
+ if (adreno_context->flags & (CTXT_FLAGS_GPU_HANG ||
+ CTXT_FLAGS_GPU_HANG_RECOVERED))
+ context->reset_status =
+ KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT;
+ else
+ context->reset_status =
+ KGSL_CTX_STAT_INNOCENT_CONTEXT_RESET_EXT;
+ }
+ next = next + 1;
+ }
+}
+
+static void adreno_set_max_ts_for_bad_ctxs(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+ struct kgsl_context *context;
+ struct adreno_context *temp_adreno_context;
+ int next = 0;
+
+ while ((context = idr_get_next(&device->context_idr, &next))) {
+ temp_adreno_context = context->devctxt;
+ if (temp_adreno_context->flags & CTXT_FLAGS_GPU_HANG) {
+ kgsl_sharedmem_writel(&device->memstore,
+ KGSL_MEMSTORE_OFFSET(context->id,
+ soptimestamp),
+ rb->timestamp[context->id]);
+ kgsl_sharedmem_writel(&device->memstore,
+ KGSL_MEMSTORE_OFFSET(context->id,
+ eoptimestamp),
+ rb->timestamp[context->id]);
+ }
+ next = next + 1;
+ }
+}
+
+static void adreno_destroy_recovery_data(struct adreno_recovery_data *rec_data)
+{
+ vfree(rec_data->rb_buffer);
+ vfree(rec_data->bad_rb_buffer);
+}
+
+static int adreno_setup_recovery_data(struct kgsl_device *device,
+ struct adreno_recovery_data *rec_data)
+{
+ int ret = 0;
+ unsigned int ib1_sz, ib2_sz;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+
+ memset(rec_data, 0, sizeof(*rec_data));
+
+ adreno_regread(device, REG_CP_IB1_BUFSZ, &ib1_sz);
+ adreno_regread(device, REG_CP_IB2_BUFSZ, &ib2_sz);
+ if (ib1_sz || ib2_sz)
+ adreno_regread(device, REG_CP_IB1_BASE, &rec_data->ib1);
+
+ kgsl_sharedmem_readl(&device->memstore, &rec_data->context_id,
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+ current_context));
+
+ kgsl_sharedmem_readl(&device->memstore,
+ &rec_data->global_eop,
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+ eoptimestamp));
+
+ rec_data->rb_buffer = vmalloc(rb->buffer_desc.size);
+ if (!rec_data->rb_buffer) {
+ KGSL_MEM_ERR(device, "vmalloc(%d) failed\n",
+ rb->buffer_desc.size);
+ return -ENOMEM;
+ }
+
+ rec_data->bad_rb_buffer = vmalloc(rb->buffer_desc.size);
+ if (!rec_data->bad_rb_buffer) {
+ KGSL_MEM_ERR(device, "vmalloc(%d) failed\n",
+ rb->buffer_desc.size);
+ ret = -ENOMEM;
+ goto done;
+ }
+
+done:
+ if (ret) {
+ vfree(rec_data->rb_buffer);
+ vfree(rec_data->bad_rb_buffer);
+ }
+ return ret;
+}
+
static int
-adreno_recover_hang(struct kgsl_device *device)
+_adreno_recover_hang(struct kgsl_device *device,
+ struct adreno_recovery_data *rec_data,
+ bool try_bad_commands)
{
int ret;
- unsigned int *rb_buffer;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+ struct kgsl_context *context;
+ struct adreno_context *adreno_context = NULL;
+ struct adreno_context *last_active_ctx = adreno_dev->drawctxt_active;
+
+ context = idr_find(&device->context_idr, rec_data->context_id);
+ if (context == NULL) {
+ KGSL_DRV_ERR(device, "Last context unknown id:%d\n",
+ rec_data->context_id);
+ } else {
+ adreno_context = context->devctxt;
+ adreno_context->flags |= CTXT_FLAGS_GPU_HANG;
+ }
+
+ /* Extract valid contents from rb which can still be executed after
+ * hang */
+ ret = adreno_ringbuffer_extract(rb, rec_data);
+ if (ret)
+ goto done;
+
+ /* restart device */
+ ret = adreno_stop(device);
+ if (ret) {
+ KGSL_DRV_ERR(device, "Device stop failed in recovery\n");
+ goto done;
+ }
+
+ ret = adreno_start(device, true);
+ if (ret) {
+ KGSL_DRV_ERR(device, "Device start failed in recovery\n");
+ goto done;
+ }
+
+ if (context)
+ kgsl_mmu_setstate(&device->mmu, adreno_context->pagetable,
+ KGSL_MEMSTORE_GLOBAL);
+
+ /* If iommu is used then we need to make sure that the iommu clocks
+ * are on since there could be commands in pipeline that touch iommu */
+ if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) {
+ ret = kgsl_mmu_enable_clk(&device->mmu,
+ KGSL_IOMMU_CONTEXT_USER);
+ if (ret)
+ goto done;
+ }
+
+ /* Do not try the bad caommands if recovery has failed bad commands
+ * once already */
+ if (!try_bad_commands)
+ rec_data->bad_rb_size = 0;
+
+ if (rec_data->bad_rb_size) {
+ int idle_ret;
+ /* submit the bad and good context commands and wait for
+ * them to pass */
+ adreno_ringbuffer_restore(rb, rec_data->bad_rb_buffer,
+ rec_data->bad_rb_size);
+ idle_ret = adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
+ if (idle_ret) {
+ ret = adreno_stop(device);
+ if (ret) {
+ KGSL_DRV_ERR(device,
+ "Device stop failed in recovery\n");
+ goto done;
+ }
+ ret = adreno_start(device, true);
+ if (ret) {
+ KGSL_DRV_ERR(device,
+ "Device start failed in recovery\n");
+ goto done;
+ }
+ if (context)
+ kgsl_mmu_setstate(&device->mmu,
+ adreno_context->pagetable,
+ KGSL_MEMSTORE_GLOBAL);
+
+ if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) {
+ ret = kgsl_mmu_enable_clk(&device->mmu,
+ KGSL_IOMMU_CONTEXT_USER);
+ if (ret)
+ goto done;
+ }
+
+ ret = idle_ret;
+ KGSL_DRV_ERR(device,
+ "Bad context commands hung in recovery\n");
+ } else {
+ KGSL_DRV_ERR(device,
+ "Bad context commands succeeded in recovery\n");
+ if (adreno_context)
+ adreno_context->flags = (adreno_context->flags &
+ ~CTXT_FLAGS_GPU_HANG) |
+ CTXT_FLAGS_GPU_HANG_RECOVERED;
+ adreno_dev->drawctxt_active = last_active_ctx;
+ }
+ }
+ /* If either the bad command sequence failed or we did not play it */
+ if (ret || !rec_data->bad_rb_size) {
+ adreno_ringbuffer_restore(rb, rec_data->rb_buffer,
+ rec_data->rb_size);
+ ret = adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
+ if (ret) {
+ /* If we fail here we can try to invalidate another
+ * context and try recovering again */
+ ret = -EAGAIN;
+ goto done;
+ }
+ /* ringbuffer now has data from the last valid context id,
+ * so restore the active_ctx to the last valid context */
+ if (rec_data->last_valid_ctx_id) {
+ struct kgsl_context *last_ctx =
+ idr_find(&device->context_idr,
+ rec_data->last_valid_ctx_id);
+ if (last_ctx)
+ adreno_dev->drawctxt_active = last_ctx->devctxt;
+ }
+ }
+done:
+ /* Turn off iommu clocks */
+ if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
+ kgsl_mmu_disable_clk_on_ts(&device->mmu, 0, false);
+ return ret;
+}
+
+static int
+adreno_recover_hang(struct kgsl_device *device,
+ struct adreno_recovery_data *rec_data)
+{
+ int ret = 0;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
unsigned int timestamp;
- unsigned int num_rb_contents;
- unsigned int reftimestamp;
- unsigned int enable_ts;
- unsigned int soptimestamp;
- unsigned int eoptimestamp;
- unsigned int context_id;
- struct kgsl_context *context;
- struct adreno_context *adreno_context;
- int next = 0;
- KGSL_DRV_ERR(device, "Starting recovery from 3D GPU hang....\n");
- rb_buffer = vmalloc(rb->buffer_desc.size);
- if (!rb_buffer) {
- KGSL_MEM_ERR(device,
- "Failed to allocate memory for recovery: %x\n",
- rb->buffer_desc.size);
- return -ENOMEM;
- }
- /* Extract valid contents from rb which can stil be executed after
- * hang */
- ret = adreno_ringbuffer_extract(rb, rb_buffer, &num_rb_contents);
- if (ret)
- goto done;
- kgsl_sharedmem_readl(&device->memstore, &context_id,
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- current_context));
- context = idr_find(&device->context_idr, context_id);
- if (context == NULL) {
- KGSL_DRV_ERR(device, "Last context unknown id:%d\n",
- context_id);
- context_id = KGSL_MEMSTORE_GLOBAL;
- }
+ KGSL_DRV_ERR(device,
+ "Starting recovery from 3D GPU hang. Recovery parameters: IB1: 0x%X, "
+ "Bad context_id: %u, global_eop: 0x%x\n",
+ rec_data->ib1, rec_data->context_id, rec_data->global_eop);
timestamp = rb->timestamp[KGSL_MEMSTORE_GLOBAL];
KGSL_DRV_ERR(device, "Last issued global timestamp: %x\n", timestamp);
- kgsl_sharedmem_readl(&device->memstore, &reftimestamp,
- KGSL_MEMSTORE_OFFSET(context_id,
- ref_wait_ts));
- kgsl_sharedmem_readl(&device->memstore, &enable_ts,
- KGSL_MEMSTORE_OFFSET(context_id,
- ts_cmp_enable));
- kgsl_sharedmem_readl(&device->memstore, &soptimestamp,
- KGSL_MEMSTORE_OFFSET(context_id,
- soptimestamp));
- kgsl_sharedmem_readl(&device->memstore, &eoptimestamp,
- KGSL_MEMSTORE_OFFSET(context_id,
- eoptimestamp));
- /* Make sure memory is synchronized before restarting the GPU */
- mb();
- KGSL_CTXT_ERR(device,
- "Context id that caused a GPU hang: %d\n", context_id);
- /* restart device */
- ret = adreno_stop(device);
- if (ret)
- goto done;
- ret = adreno_start(device, true);
- if (ret)
- goto done;
- KGSL_DRV_ERR(device, "Device has been restarted after hang\n");
- /* Restore timestamp states */
- kgsl_sharedmem_writel(&device->memstore,
- KGSL_MEMSTORE_OFFSET(context_id, soptimestamp),
- soptimestamp);
- kgsl_sharedmem_writel(&device->memstore,
- KGSL_MEMSTORE_OFFSET(context_id, eoptimestamp),
- eoptimestamp);
+ /* We may need to replay commands multiple times based on whether
+ * multiple contexts hang the GPU */
+ while (true) {
+ if (!ret)
+ ret = _adreno_recover_hang(device, rec_data, true);
+ else
+ ret = _adreno_recover_hang(device, rec_data, false);
- if (num_rb_contents) {
- kgsl_sharedmem_writel(&device->memstore,
- KGSL_MEMSTORE_OFFSET(context_id, ref_wait_ts),
- reftimestamp);
- kgsl_sharedmem_writel(&device->memstore,
- KGSL_MEMSTORE_OFFSET(context_id, ts_cmp_enable),
- enable_ts);
- }
- /* Make sure all writes are posted before the GPU reads them */
- wmb();
- /* Mark the invalid context so no more commands are accepted from
- * that context */
-
- adreno_context = context->devctxt;
-
- KGSL_CTXT_ERR(device,
- "Context that caused a GPU hang: %d\n", adreno_context->id);
-
- adreno_context->flags |= CTXT_FLAGS_GPU_HANG;
-
- /*
- * Set the reset status of all contexts to
- * INNOCENT_CONTEXT_RESET_EXT except for the bad context
- * since thats the guilty party
- */
- while ((context = idr_get_next(&device->context_idr, &next))) {
- if (KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT !=
- context->reset_status) {
- if (context->id != context_id)
- context->reset_status =
- KGSL_CTX_STAT_INNOCENT_CONTEXT_RESET_EXT;
- else
- context->reset_status =
- KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT;
+ if (-EAGAIN == ret) {
+ /* setup new recovery parameters and retry, this
+ * means more than 1 contexts are causing hang */
+ adreno_destroy_recovery_data(rec_data);
+ adreno_setup_recovery_data(device, rec_data);
+ KGSL_DRV_ERR(device,
+ "Retry recovery from 3D GPU hang. Recovery parameters: "
+ "IB1: 0x%X, Bad context_id: %u, global_eop: 0x%x\n",
+ rec_data->ib1, rec_data->context_id,
+ rec_data->global_eop);
+ } else {
+ break;
}
- next = next + 1;
}
- /* Restore valid commands in ringbuffer */
- adreno_ringbuffer_restore(rb, rb_buffer, num_rb_contents);
+ if (ret)
+ goto done;
+
+ /* Restore correct states after recovery */
+ if (adreno_dev->drawctxt_active)
+ device->mmu.hwpagetable =
+ adreno_dev->drawctxt_active->pagetable;
+ else
+ device->mmu.hwpagetable = device->mmu.defaultpagetable;
rb->timestamp[KGSL_MEMSTORE_GLOBAL] = timestamp;
- /* wait for idle */
- ret = adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
+ kgsl_sharedmem_writel(&device->memstore,
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+ eoptimestamp),
+ rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
done:
- vfree(rb_buffer);
+ adreno_set_max_ts_for_bad_ctxs(device);
+ adreno_mark_context_status(device, ret);
+ if (!ret)
+ KGSL_DRV_ERR(device, "Recovery succeeded\n");
+ else
+ KGSL_DRV_ERR(device, "Recovery failed\n");
return ret;
}
-int adreno_dump_and_recover(struct kgsl_device *device)
+int
+adreno_dump_and_recover(struct kgsl_device *device)
{
int result = -ETIMEDOUT;
+ struct adreno_recovery_data rec_data;
if (device->state == KGSL_STATE_HUNG)
goto done;
@@ -934,7 +1105,8 @@
INIT_COMPLETION(device->recovery_gate);
/* Detected a hang */
-
+ /* Get the recovery data as soon as hang is detected */
+ result = adreno_setup_recovery_data(device, &rec_data);
/*
* Trigger an automatic dump of the state to
* the console
@@ -947,11 +1119,14 @@
*/
kgsl_device_snapshot(device, 1);
- result = adreno_recover_hang(device);
- if (result)
+ result = adreno_recover_hang(device, &rec_data);
+ adreno_destroy_recovery_data(&rec_data);
+ if (result) {
kgsl_pwrctrl_set_state(device, KGSL_STATE_HUNG);
- else
+ } else {
kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
+ mod_timer(&device->idle_timer, jiffies + FIRST_TIMEOUT);
+ }
complete_all(&device->recovery_gate);
}
done:
@@ -1006,7 +1181,7 @@
/*NOTE: with mmu enabled, gpuaddr doesn't mean
* anything to mmap().
*/
- shadowprop.gpuaddr = device->memstore.physaddr;
+ shadowprop.gpuaddr = device->memstore.gpuaddr;
shadowprop.size = device->memstore.size;
/* GSL needs this to be set, even if it
appears to be meaningless */
@@ -1122,6 +1297,12 @@
memset(prev_reg_val, 0, sizeof(prev_reg_val));
+ /* Restrict timeout value between adreno_dev->wait_timeout and 0 */
+ if ((timeout == 0) || (timeout > adreno_dev->wait_timeout))
+ msecs = adreno_dev->wait_timeout;
+ else
+ msecs = timeout;
+
kgsl_cffdump_regpoll(device->id,
adreno_dev->gpudev->reg_rbbm_status << 2,
0x00000000, 0x80000000);
@@ -1130,7 +1311,6 @@
*/
retry:
if (rb->flags & KGSL_FLAGS_STARTED) {
- msecs = adreno_dev->wait_timeout;
msecs_first = (msecs <= 100) ? ((msecs + 4) / 5) : 100;
wait_time = jiffies + wait_timeout;
wait_time_part = jiffies + msecs_to_jiffies(msecs_first);
@@ -1566,6 +1746,13 @@
} while (time_elapsed < msecs);
hang_dump:
+ /*
+ * Check if timestamp has retired here because we may have hit
+ * recovery which can take some time and cause waiting threads
+ * to timeout
+ */
+ if (kgsl_check_timestamp(device, context, timestamp))
+ goto done;
status = -ETIMEDOUT;
KGSL_DRV_ERR(device,
"Device hang detected while waiting for timestamp: "
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 04dc3d6..57f4859 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -33,7 +33,6 @@
#define KGSL_CMD_FLAGS_NONE 0x00000000
#define KGSL_CMD_FLAGS_PMODE 0x00000001
#define KGSL_CMD_FLAGS_NO_TS_CMP 0x00000002
-#define KGSL_CMD_FLAGS_NOT_KERNEL_CMD 0x00000004
/* Command identifiers */
#define KGSL_CONTEXT_TO_MEM_IDENTIFIER 0x2EADBEEF
@@ -115,6 +114,30 @@
unsigned int (*busy_cycles)(struct adreno_device *);
};
+/*
+ * struct adreno_recovery_data - Structure that contains all information to
+ * perform gpu recovery from hangs
+ * @ib1 - IB1 that the GPU was executing when hang happened
+ * @context_id - Context which caused the hang
+ * @global_eop - eoptimestamp at time of hang
+ * @rb_buffer - Buffer that holds the commands from good contexts
+ * @rb_size - Number of valid dwords in rb_buffer
+ * @bad_rb_buffer - Buffer that holds commands from the hanging context
+ * bad_rb_size - Number of valid dwords in bad_rb_buffer
+ * @last_valid_ctx_id - The last context from which commands were placed in
+ * ringbuffer before the GPU hung
+ */
+struct adreno_recovery_data {
+ unsigned int ib1;
+ unsigned int context_id;
+ unsigned int global_eop;
+ unsigned int *rb_buffer;
+ unsigned int rb_size;
+ unsigned int *bad_rb_buffer;
+ unsigned int bad_rb_size;
+ unsigned int last_valid_ctx_id;
+};
+
extern struct adreno_gpudev adreno_a2xx_gpudev;
extern struct adreno_gpudev adreno_a3xx_gpudev;
@@ -266,7 +289,6 @@
{
unsigned int *start = cmds;
- cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
*cmds++ = cp_type0_packet(MH_MMU_MPU_END, 1);
*cmds++ = new_phys_limit;
cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
@@ -279,7 +301,6 @@
{
unsigned int *start = cmds;
- cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
*cmds++ = cp_type0_packet(REG_CP_STATE_DEBUG_INDEX, 1);
*cmds++ = (cur_ctx_bank ? 0 : 0x20);
cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
diff --git a/drivers/gpu/msm/adreno_a2xx.c b/drivers/gpu/msm/adreno_a2xx.c
index 5a76c86..86fe3f5 100644
--- a/drivers/gpu/msm/adreno_a2xx.c
+++ b/drivers/gpu/msm/adreno_a2xx.c
@@ -1982,7 +1982,13 @@
0x18000000);
}
- adreno_regwrite(device, REG_RBBM_CNTL, 0x00004442);
+ if (adreno_is_a203(adreno_dev))
+ /* For A203 increase number of clocks that RBBM
+ * will wait before de-asserting Register Clock
+ * Active signal */
+ adreno_regwrite(device, REG_RBBM_CNTL, 0x0000FFFF);
+ else
+ adreno_regwrite(device, REG_RBBM_CNTL, 0x00004442);
adreno_regwrite(device, REG_SQ_VS_PROGRAM, 0x00000000);
adreno_regwrite(device, REG_SQ_PS_PROGRAM, 0x00000000);
diff --git a/drivers/gpu/msm/adreno_drawctxt.h b/drivers/gpu/msm/adreno_drawctxt.h
index 3eb1aba..5b14a69 100644
--- a/drivers/gpu/msm/adreno_drawctxt.h
+++ b/drivers/gpu/msm/adreno_drawctxt.h
@@ -44,6 +44,8 @@
#define CTXT_FLAGS_TRASHSTATE 0x00020000
/* per context timestamps enabled */
#define CTXT_FLAGS_PER_CONTEXT_TS 0x00040000
+/* Context has caused a GPU hang and recovered properly */
+#define CTXT_FLAGS_GPU_HANG_RECOVERED 0x00008000
struct kgsl_device;
struct adreno_device;
diff --git a/drivers/gpu/msm/adreno_postmortem.c b/drivers/gpu/msm/adreno_postmortem.c
index 7bb65ca..3cc4bcf 100644
--- a/drivers/gpu/msm/adreno_postmortem.c
+++ b/drivers/gpu/msm/adreno_postmortem.c
@@ -699,6 +699,10 @@
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct kgsl_memdesc **reg_map;
+ void *reg_map_array;
+ int num_iommu_units = 0;
+
mb();
if (adreno_is_a2xx(adreno_dev))
@@ -780,6 +784,10 @@
/* extract the latest ib commands from the buffer */
ib_list.count = 0;
i = 0;
+ /* get the register mapped array in case we are using IOMMU */
+ num_iommu_units = kgsl_mmu_get_reg_map_desc(&device->mmu,
+ ®_map_array);
+ reg_map = reg_map_array;
for (read_idx = 0; read_idx < num_item; ) {
uint32_t this_cmd = rb_copy[read_idx++];
if (adreno_cmd_is_ib(this_cmd)) {
@@ -792,7 +800,10 @@
ib_list.offsets[i],
ib_list.bases[i],
ib_list.sizes[i], 0);
- } else if (this_cmd == cp_type0_packet(MH_MMU_PT_BASE, 1)) {
+ } else if (this_cmd == cp_type0_packet(MH_MMU_PT_BASE, 1) ||
+ (num_iommu_units && this_cmd == (reg_map[0]->gpuaddr +
+ (KGSL_IOMMU_CONTEXT_USER << KGSL_IOMMU_CTX_SHIFT) +
+ KGSL_IOMMU_TTBR0))) {
KGSL_LOG_DUMP(device, "Current pagetable: %x\t"
"pagetable base: %x\n",
@@ -808,6 +819,8 @@
cur_pt_base);
}
}
+ if (num_iommu_units)
+ kfree(reg_map_array);
/* Restore cur_pt_base back to the pt_base of
the process in whose context the GPU hung */
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index afcceee..86a349a 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -120,6 +120,8 @@
goto err;
}
+ continue;
+
err:
if (!adreno_dump_and_recover(rb->device))
wait_time = jiffies + wait_timeout;
@@ -493,7 +495,8 @@
*/
total_sizedwords += flags & KGSL_CMD_FLAGS_PMODE ? 4 : 0;
total_sizedwords += !(flags & KGSL_CMD_FLAGS_NO_TS_CMP) ? 7 : 0;
- total_sizedwords += !(flags & KGSL_CMD_FLAGS_NOT_KERNEL_CMD) ? 2 : 0;
+ /* 2 dwords to store the start of command sequence */
+ total_sizedwords += 2;
if (adreno_is_a3xx(adreno_dev))
total_sizedwords += 7;
@@ -521,10 +524,9 @@
rcmd_gpu = rb->buffer_desc.gpuaddr
+ sizeof(uint)*(rb->wptr-total_sizedwords);
- if (!(flags & KGSL_CMD_FLAGS_NOT_KERNEL_CMD)) {
- GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_nop_packet(1));
- GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_CMD_IDENTIFIER);
- }
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_nop_packet(1));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_CMD_IDENTIFIER);
+
if (flags & KGSL_CMD_FLAGS_PMODE) {
/* disable protected mode error checking */
GSL_RB_WRITE(ringcmds, rcmd_gpu,
@@ -926,8 +928,7 @@
adreno_drawctxt_switch(adreno_dev, drawctxt, flags);
*timestamp = adreno_ringbuffer_addcmds(&adreno_dev->ringbuffer,
- drawctxt,
- KGSL_CMD_FLAGS_NOT_KERNEL_CMD,
+ drawctxt, 0,
&link[0], (cmds - link));
KGSL_CMD_INFO(device, "ctxt %d g %08x numibs %d ts %d\n",
@@ -943,187 +944,347 @@
*/
adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
#endif
+ /* If context hung and recovered then return error so that the
+ * application may handle it */
+ if (drawctxt->flags & CTXT_FLAGS_GPU_HANG_RECOVERED)
+ return -EDEADLK;
+ else
+ return 0;
- return 0;
}
-int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
- unsigned int *temp_rb_buffer,
- int *rb_size)
+static int _find_start_of_cmd_seq(struct adreno_ringbuffer *rb,
+ unsigned int *ptr,
+ bool inc)
{
- struct kgsl_device *device = rb->device;
- unsigned int rb_rptr;
- unsigned int retired_timestamp;
- unsigned int temp_idx = 0;
- unsigned int value;
+ int status = -EINVAL;
unsigned int val1;
- unsigned int val2;
- unsigned int val3;
- unsigned int copy_rb_contents = 0;
- struct kgsl_context *context;
- unsigned int context_id;
+ unsigned int size = rb->buffer_desc.size;
+ unsigned int start_ptr = *ptr;
- GSL_RB_GET_READPTR(rb, &rb->rptr);
-
- /* current_context is the context that is presently active in the
- * GPU, i.e the context in which the hang is caused */
- kgsl_sharedmem_readl(&device->memstore, &context_id,
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- current_context));
- KGSL_DRV_ERR(device, "Last context id: %d\n", context_id);
- context = idr_find(&device->context_idr, context_id);
- if (context == NULL) {
- KGSL_DRV_ERR(device,
- "GPU recovery from hang not possible because last"
- " context id is invalid.\n");
- return -EINVAL;
- }
- retired_timestamp = kgsl_readtimestamp(device, context,
- KGSL_TIMESTAMP_RETIRED);
- KGSL_DRV_ERR(device, "GPU successfully executed till ts: %x\n",
- retired_timestamp);
- /*
- * We need to go back in history by 4 dwords from the current location
- * of read pointer as 4 dwords are read to match the end of a command.
- * Also, take care of wrap around when moving back
- */
- if (rb->rptr >= 4)
- rb_rptr = (rb->rptr - 4) * sizeof(unsigned int);
- else
- rb_rptr = rb->buffer_desc.size -
- ((4 - rb->rptr) * sizeof(unsigned int));
- /* Read the rb contents going backwards to locate end of last
- * sucessfully executed command */
- while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) {
- kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
- if (value == retired_timestamp) {
- rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
- rb->buffer_desc.size);
- kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
- rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
- rb->buffer_desc.size);
- kgsl_sharedmem_readl(&rb->buffer_desc, &val2, rb_rptr);
- rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
- rb->buffer_desc.size);
- kgsl_sharedmem_readl(&rb->buffer_desc, &val3, rb_rptr);
- /* match the pattern found at the end of a command */
- if ((val1 == 2 &&
- val2 == cp_type3_packet(CP_INTERRUPT, 1)
- && val3 == CP_INT_CNTL__RB_INT_MASK) ||
- (val1 == cp_type3_packet(CP_EVENT_WRITE, 3)
- && val2 == CACHE_FLUSH_TS &&
- val3 == (rb->device->memstore.gpuaddr +
- KGSL_MEMSTORE_OFFSET(context_id,
- eoptimestamp)))) {
- rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
- rb->buffer_desc.size);
- KGSL_DRV_ERR(device,
- "Found end of last executed "
- "command at offset: %x\n",
- rb_rptr / sizeof(unsigned int));
+ while ((start_ptr / sizeof(unsigned int)) != rb->wptr) {
+ if (inc)
+ start_ptr = adreno_ringbuffer_inc_wrapped(start_ptr,
+ size);
+ else
+ start_ptr = adreno_ringbuffer_dec_wrapped(start_ptr,
+ size);
+ kgsl_sharedmem_readl(&rb->buffer_desc, &val1, start_ptr);
+ if (KGSL_CMD_IDENTIFIER == val1) {
+ if ((start_ptr / sizeof(unsigned int)) != rb->wptr)
+ start_ptr = adreno_ringbuffer_dec_wrapped(
+ start_ptr, size);
+ *ptr = start_ptr;
+ status = 0;
break;
- } else {
- if (rb_rptr < (3 * sizeof(unsigned int)))
- rb_rptr = rb->buffer_desc.size -
- (3 * sizeof(unsigned int))
- + rb_rptr;
- else
- rb_rptr -= (3 * sizeof(unsigned int));
+ }
+ }
+ return status;
+}
+
+static int _find_cmd_seq_after_eop_ts(struct adreno_ringbuffer *rb,
+ unsigned int *rb_rptr,
+ unsigned int global_eop,
+ bool inc)
+{
+ int status = -EINVAL;
+ unsigned int temp_rb_rptr = *rb_rptr;
+ unsigned int size = rb->buffer_desc.size;
+ unsigned int val[3];
+ int i = 0;
+ bool check = false;
+
+ if (inc && temp_rb_rptr / sizeof(unsigned int) != rb->wptr)
+ return status;
+
+ do {
+ /* when decrementing we need to decrement first and
+ * then read make sure we cover all the data */
+ if (!inc)
+ temp_rb_rptr = adreno_ringbuffer_dec_wrapped(
+ temp_rb_rptr, size);
+ kgsl_sharedmem_readl(&rb->buffer_desc, &val[i],
+ temp_rb_rptr);
+
+ if (check && ((inc && val[i] == global_eop) ||
+ (!inc && (val[i] ==
+ cp_type3_packet(CP_MEM_WRITE, 2) ||
+ val[i] == CACHE_FLUSH_TS)))) {
+ /* decrement i, i.e i = (i - 1 + 3) % 3 if
+ * we are going forward, else increment i */
+ i = (i + 2) % 3;
+ if (val[i] == rb->device->memstore.gpuaddr +
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+ eoptimestamp)) {
+ int j = ((i + 2) % 3);
+ if ((inc && (val[j] == CACHE_FLUSH_TS ||
+ val[j] == cp_type3_packet(
+ CP_MEM_WRITE, 2))) ||
+ (!inc && val[j] == global_eop)) {
+ /* Found the global eop */
+ status = 0;
+ break;
+ }
}
+ /* if no match found then increment i again
+ * since we decremented before matching */
+ i = (i + 1) % 3;
+ }
+ if (inc)
+ temp_rb_rptr = adreno_ringbuffer_inc_wrapped(
+ temp_rb_rptr, size);
+
+ i = (i + 1) % 3;
+ if (2 == i)
+ check = true;
+ } while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr);
+ /* temp_rb_rptr points to the command stream after global eop,
+ * move backward till the start of command sequence */
+ if (!status) {
+ status = _find_start_of_cmd_seq(rb, &temp_rb_rptr, false);
+ if (!status) {
+ *rb_rptr = temp_rb_rptr;
+ KGSL_DRV_ERR(rb->device,
+ "Offset of cmd sequence after eop timestamp: 0x%x\n",
+ temp_rb_rptr / sizeof(unsigned int));
+ }
+ }
+ if (status)
+ KGSL_DRV_ERR(rb->device,
+ "Failed to find the command sequence after eop timestamp\n");
+ return status;
+}
+
+static int _find_hanging_ib_sequence(struct adreno_ringbuffer *rb,
+ unsigned int *rb_rptr,
+ unsigned int ib1)
+{
+ int status = -EINVAL;
+ unsigned int temp_rb_rptr = *rb_rptr;
+ unsigned int size = rb->buffer_desc.size;
+ unsigned int val[2];
+ int i = 0;
+ bool check = false;
+ bool ctx_switch = false;
+
+ while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr) {
+ kgsl_sharedmem_readl(&rb->buffer_desc, &val[i], temp_rb_rptr);
+
+ if (check && val[i] == ib1) {
+ /* decrement i, i.e i = (i - 1 + 2) % 2 */
+ i = (i + 1) % 2;
+ if (adreno_cmd_is_ib(val[i])) {
+ /* go till start of command sequence */
+ status = _find_start_of_cmd_seq(rb,
+ &temp_rb_rptr, false);
+ KGSL_DRV_ERR(rb->device,
+ "Found the hanging IB at offset 0x%x\n",
+ temp_rb_rptr / sizeof(unsigned int));
+ break;
+ }
+ /* if no match the increment i since we decremented
+ * before checking */
+ i = (i + 1) % 2;
+ }
+ /* Make sure you do not encounter a context switch twice, we can
+ * encounter it once for the bad context as the start of search
+ * can point to the context switch */
+ if (val[i] == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
+ if (ctx_switch) {
+ KGSL_DRV_ERR(rb->device,
+ "Context switch encountered before bad "
+ "IB found\n");
+ break;
+ }
+ ctx_switch = true;
+ }
+ i = (i + 1) % 2;
+ if (1 == i)
+ check = true;
+ temp_rb_rptr = adreno_ringbuffer_inc_wrapped(temp_rb_rptr,
+ size);
+ }
+ if (!status)
+ *rb_rptr = temp_rb_rptr;
+ return status;
+}
+
+static void _turn_preamble_on_for_ib_seq(struct adreno_ringbuffer *rb,
+ unsigned int rb_rptr)
+{
+ unsigned int temp_rb_rptr = rb_rptr;
+ unsigned int size = rb->buffer_desc.size;
+ unsigned int val[2];
+ int i = 0;
+ bool check = false;
+ bool cmd_start = false;
+
+ /* Go till the start of the ib sequence and turn on preamble */
+ while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr) {
+ kgsl_sharedmem_readl(&rb->buffer_desc, &val[i], temp_rb_rptr);
+ if (check && KGSL_START_OF_IB_IDENTIFIER == val[i]) {
+ /* decrement i */
+ i = (i + 1) % 2;
+ if (val[i] == cp_nop_packet(4)) {
+ temp_rb_rptr = adreno_ringbuffer_dec_wrapped(
+ temp_rb_rptr, size);
+ kgsl_sharedmem_writel(&rb->buffer_desc,
+ temp_rb_rptr, cp_nop_packet(1));
+ }
+ KGSL_DRV_ERR(rb->device,
+ "Turned preamble on at offset 0x%x\n",
+ temp_rb_rptr / 4);
+ break;
+ }
+ /* If you reach beginning of next command sequence then exit
+ * First command encountered is the current one so don't break
+ * on that. */
+ if (KGSL_CMD_IDENTIFIER == val[i]) {
+ if (cmd_start)
+ break;
+ cmd_start = true;
}
- if (rb_rptr == 0)
- rb_rptr = rb->buffer_desc.size - sizeof(unsigned int);
- else
- rb_rptr -= sizeof(unsigned int);
+ i = (i + 1) % 2;
+ if (1 == i)
+ check = true;
+ temp_rb_rptr = adreno_ringbuffer_inc_wrapped(temp_rb_rptr,
+ size);
}
+}
- if ((rb_rptr / sizeof(unsigned int)) == rb->wptr) {
- KGSL_DRV_ERR(device,
- "GPU recovery from hang not possible because last"
- " successful timestamp is overwritten\n");
- return -EINVAL;
- }
- /* rb_rptr is now pointing to the first dword of the command following
- * the last sucessfully executed command sequence. Assumption is that
- * GPU is hung in the command sequence pointed by rb_rptr */
- /* make sure the GPU is not hung in a command submitted by kgsl
- * itself */
- kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
- kgsl_sharedmem_readl(&rb->buffer_desc, &val2,
- adreno_ringbuffer_inc_wrapped(rb_rptr,
- rb->buffer_desc.size));
- if (val1 == cp_nop_packet(1) && val2 == KGSL_CMD_IDENTIFIER) {
- KGSL_DRV_ERR(device,
- "GPU recovery from hang not possible because "
- "of hang in kgsl command\n");
- return -EINVAL;
- }
+static void _copy_valid_rb_content(struct adreno_ringbuffer *rb,
+ unsigned int rb_rptr, unsigned int *temp_rb_buffer,
+ int *rb_size, unsigned int *bad_rb_buffer,
+ int *bad_rb_size,
+ int *last_valid_ctx_id)
+{
+ unsigned int good_rb_idx = 0, cmd_start_idx = 0;
+ unsigned int val1 = 0;
+ struct kgsl_context *k_ctxt;
+ struct adreno_context *a_ctxt;
+ unsigned int bad_rb_idx = 0;
+ int copy_rb_contents = 0;
+ unsigned int temp_rb_rptr;
+ unsigned int size = rb->buffer_desc.size;
+ unsigned int good_cmd_start_idx = 0;
+ /* Walk the rb from the context switch. Omit any commands
+ * for an invalid context. */
while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) {
- kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
- rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
- rb->buffer_desc.size);
+ kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
+
+ if (KGSL_CMD_IDENTIFIER == val1) {
+ /* Start is the NOP dword that comes before
+ * KGSL_CMD_IDENTIFIER */
+ cmd_start_idx = bad_rb_idx - 1;
+ if (copy_rb_contents)
+ good_cmd_start_idx = good_rb_idx - 1;
+ }
+
/* check for context switch indicator */
- if (value == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
- kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
- rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
- rb->buffer_desc.size);
- BUG_ON(value != cp_type3_packet(CP_MEM_WRITE, 2));
- kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
- rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
- rb->buffer_desc.size);
- BUG_ON(val1 != (device->memstore.gpuaddr +
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- current_context)));
- kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
- rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
- rb->buffer_desc.size);
+ if (val1 == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
+ unsigned int temp_idx, val2;
+ /* increment by 3 to get to the context_id */
+ temp_rb_rptr = rb_rptr + (3 * sizeof(unsigned int)) %
+ size;
+ kgsl_sharedmem_readl(&rb->buffer_desc, &val2,
+ temp_rb_rptr);
- /*
- * If other context switches were already lost and
- * and the current context is the one that is hanging,
- * then we cannot recover. Print an error message
- * and leave.
- */
-
- if ((copy_rb_contents == 0) && (value == context_id)) {
- KGSL_DRV_ERR(device, "GPU recovery could not "
- "find the previous context\n");
- return -EINVAL;
- }
-
- /*
- * If we were copying the commands and got to this point
- * then we need to remove the 3 commands that appear
- * before KGSL_CONTEXT_TO_MEM_IDENTIFIER
- */
- if (temp_idx)
- temp_idx -= 3;
/* if context switches to a context that did not cause
* hang then start saving the rb contents as those
* commands can be executed */
- if (value != context_id) {
+ k_ctxt = idr_find(&rb->device->context_idr, val2);
+ if (k_ctxt) {
+ a_ctxt = k_ctxt->devctxt;
+
+ /* If we are changing to a good context and were not
+ * copying commands then copy over commands to the good
+ * context */
+ if (!copy_rb_contents && ((k_ctxt &&
+ !(a_ctxt->flags & CTXT_FLAGS_GPU_HANG)) ||
+ !k_ctxt)) {
+ for (temp_idx = cmd_start_idx;
+ temp_idx < bad_rb_idx;
+ temp_idx++)
+ temp_rb_buffer[good_rb_idx++] =
+ bad_rb_buffer[temp_idx];
+ *last_valid_ctx_id = val2;
copy_rb_contents = 1;
- temp_rb_buffer[temp_idx++] = cp_nop_packet(1);
- temp_rb_buffer[temp_idx++] =
- KGSL_CMD_IDENTIFIER;
- temp_rb_buffer[temp_idx++] = cp_nop_packet(1);
- temp_rb_buffer[temp_idx++] =
- KGSL_CONTEXT_TO_MEM_IDENTIFIER;
- temp_rb_buffer[temp_idx++] =
- cp_type3_packet(CP_MEM_WRITE, 2);
- temp_rb_buffer[temp_idx++] = val1;
- temp_rb_buffer[temp_idx++] = value;
- } else {
+ } else if (copy_rb_contents && k_ctxt &&
+ (a_ctxt->flags & CTXT_FLAGS_GPU_HANG)) {
+ /* If we are changing to bad context then remove
+ * the dwords we copied for this sequence from
+ * the good buffer */
+ good_rb_idx = good_cmd_start_idx;
copy_rb_contents = 0;
}
- } else if (copy_rb_contents)
- temp_rb_buffer[temp_idx++] = value;
+ }
+ }
+
+ if (copy_rb_contents)
+ temp_rb_buffer[good_rb_idx++] = val1;
+ /* Copy both good and bad commands for replay to the bad
+ * buffer */
+ bad_rb_buffer[bad_rb_idx++] = val1;
+
+ rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr, size);
+ }
+ *rb_size = good_rb_idx;
+ *bad_rb_size = bad_rb_idx;
+}
+
+int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
+ struct adreno_recovery_data *rec_data)
+{
+ int status;
+ struct kgsl_device *device = rb->device;
+ unsigned int rb_rptr = rb->wptr * sizeof(unsigned int);
+ struct kgsl_context *context;
+ struct adreno_context *adreno_context;
+
+ context = idr_find(&device->context_idr, rec_data->context_id);
+
+ /* Look for the command stream that is right after the global eop */
+ status = _find_cmd_seq_after_eop_ts(rb, &rb_rptr,
+ rec_data->global_eop + 1, false);
+ if (status)
+ goto done;
+
+ if (context) {
+ adreno_context = context->devctxt;
+
+ if (adreno_context->flags & CTXT_FLAGS_PREAMBLE) {
+ if (rec_data->ib1) {
+ status = _find_hanging_ib_sequence(rb, &rb_rptr,
+ rec_data->ib1);
+ if (status)
+ goto copy_rb_contents;
+ }
+ _turn_preamble_on_for_ib_seq(rb, rb_rptr);
+ } else {
+ status = -EINVAL;
+ }
}
- *rb_size = temp_idx;
- return 0;
+copy_rb_contents:
+ _copy_valid_rb_content(rb, rb_rptr, rec_data->rb_buffer,
+ &rec_data->rb_size,
+ rec_data->bad_rb_buffer,
+ &rec_data->bad_rb_size,
+ &rec_data->last_valid_ctx_id);
+ /* If we failed to get the hanging IB sequence then we cannot execute
+ * commands from the bad context or preambles not supported */
+ if (status) {
+ rec_data->bad_rb_size = 0;
+ status = 0;
+ }
+ /* If there is no context then that means there are no commands for
+ * good case */
+ if (!context)
+ rec_data->rb_size = 0;
+done:
+ return status;
}
void
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
index 6429f46..4cc57c2 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.h
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -27,6 +27,7 @@
struct kgsl_device;
struct kgsl_device_private;
+struct adreno_recovery_data;
#define GSL_RB_MEMPTRS_SCRATCH_COUNT 8
struct kgsl_rbmemptrs {
@@ -114,8 +115,7 @@
void kgsl_cp_intrcallback(struct kgsl_device *device);
int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
- unsigned int *temp_rb_buffer,
- int *rb_size);
+ struct adreno_recovery_data *rec_data);
void
adreno_ringbuffer_restore(struct adreno_ringbuffer *rb, unsigned int *rb_buff,
@@ -139,4 +139,11 @@
return (val + sizeof(unsigned int)) % size;
}
+/* Decrement a value by 4 bytes with wrap-around based on size */
+static inline unsigned int adreno_ringbuffer_dec_wrapped(unsigned int val,
+ unsigned int size)
+{
+ return (val + size - sizeof(unsigned int)) % size;
+}
+
#endif /* __ADRENO_RINGBUFFER_H */
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 2a9f564..62e1521 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -450,6 +450,7 @@
device->state == KGSL_STATE_ACTIVE &&
device->requested_state == KGSL_STATE_NONE) {
kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
+ kgsl_pwrscale_idle(device, 1);
if (kgsl_pwrctrl_sleep(device) != 0)
mod_timer(&device->idle_timer,
jiffies +
@@ -897,6 +898,9 @@
{
struct rb_node *node = private->mem_rb.rb_node;
+ if (!kgsl_mmu_gpuaddr_in_range(gpuaddr))
+ return NULL;
+
while (node != NULL) {
struct kgsl_mem_entry *entry;
@@ -1112,6 +1116,19 @@
goto done;
}
+ /*
+ * Put a reasonable upper limit on the number of IBs that can be
+ * submitted
+ */
+
+ if (param->numibs > 10000) {
+ KGSL_DRV_ERR(dev_priv->device,
+ "Too many IBs submitted. count: %d max 10000\n",
+ param->numibs);
+ result = -EINVAL;
+ goto done;
+ }
+
ibdesc = kzalloc(sizeof(struct kgsl_ibdesc) * param->numibs,
GFP_KERNEL);
if (!ibdesc) {
@@ -2257,7 +2274,8 @@
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- result = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ result = remap_pfn_range(vma, vma->vm_start,
+ device->memstore.physaddr >> PAGE_SHIFT,
vma_size, vma->vm_page_prot);
if (result != 0)
KGSL_MEM_ERR(device, "remap_pfn_range failed: %d\n",
@@ -2311,7 +2329,7 @@
/* Handle leagacy behavior for memstore */
- if (vma_offset == device->memstore.physaddr)
+ if (vma_offset == device->memstore.gpuaddr)
return kgsl_mmap_memstore(device, vma);
/* Find a chunk of GPU memory */
diff --git a/drivers/gpu/msm/kgsl_gpummu.c b/drivers/gpu/msm/kgsl_gpummu.c
index 998eaab..edccff1 100644
--- a/drivers/gpu/msm/kgsl_gpummu.c
+++ b/drivers/gpu/msm/kgsl_gpummu.c
@@ -732,7 +732,6 @@
.mmu_get_current_ptbase = kgsl_gpummu_get_current_ptbase,
.mmu_enable_clk = NULL,
.mmu_disable_clk_on_ts = NULL,
- .mmu_get_hwpagetable_asid = NULL,
.mmu_get_pt_lsb = NULL,
.mmu_get_reg_map_desc = NULL,
};
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 25d0463..016771b 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -292,14 +292,6 @@
struct kgsl_iommu_pt *iommu_pt = mmu_specific_pt;
if (iommu_pt->domain)
iommu_domain_free(iommu_pt->domain);
- if (iommu_pt->iommu) {
- if ((KGSL_IOMMU_ASID_REUSE == iommu_pt->asid) &&
- iommu_pt->iommu->asid_reuse)
- iommu_pt->iommu->asid_reuse--;
- if (!iommu_pt->iommu->asid_reuse ||
- (KGSL_IOMMU_ASID_REUSE != iommu_pt->asid))
- clear_bit(iommu_pt->asid, iommu_pt->iommu->asids);
- }
kfree(iommu_pt);
}
@@ -621,20 +613,15 @@
unsigned int context_id)
{
if (mmu->flags & KGSL_FLAGS_STARTED) {
- struct kgsl_iommu *iommu = mmu->priv;
- struct kgsl_iommu_pt *iommu_pt = pagetable->priv;
/* page table not current, then setup mmu to use new
* specified page table
*/
if (mmu->hwpagetable != pagetable) {
unsigned int flags = 0;
mmu->hwpagetable = pagetable;
- /* force tlb flush if asid is reused */
- if (iommu->asid_reuse &&
- (KGSL_IOMMU_ASID_REUSE == iommu_pt->asid))
- flags |= KGSL_MMUFLAGS_TLBFLUSH;
flags |= kgsl_mmu_pt_get_flags(mmu->hwpagetable,
- mmu->device->id);
+ mmu->device->id) |
+ KGSL_MMUFLAGS_TLBFLUSH;
kgsl_setstate(mmu, context_id,
KGSL_MMUFLAGS_PTUPDATE | flags);
}
@@ -657,14 +644,6 @@
sizeof(struct kgsl_iommu));
return -ENOMEM;
}
- iommu->asids = kzalloc(BITS_TO_LONGS(KGSL_IOMMU_MAX_ASIDS) *
- sizeof(unsigned long), GFP_KERNEL);
- if (!iommu->asids) {
- KGSL_CORE_ERR("kzalloc(%d) failed\n",
- sizeof(struct kgsl_iommu));
- status = -ENOMEM;
- goto done;
- }
mmu->priv = iommu;
status = kgsl_get_iommu_ctxt(mmu);
@@ -684,7 +663,6 @@
__func__);
done:
if (status) {
- kfree(iommu->asids);
kfree(iommu);
mmu->priv = NULL;
}
@@ -718,7 +696,6 @@
goto err;
}
iommu_pt = mmu->priv_bank_table->priv;
- iommu_pt->asid = 1;
}
mmu->defaultpagetable = kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
/* Return error if the default pagetable doesn't exist */
@@ -740,14 +717,6 @@
goto err;
}
}
- /*
- * The dafault pagetable always has asid 0 assigned by the iommu driver
- * and asid 1 is assigned to the private context.
- */
- iommu_pt = mmu->defaultpagetable->priv;
- iommu_pt->asid = 0;
- set_bit(0, iommu->asids);
- set_bit(1, iommu->asids);
return status;
err:
for (i--; i >= 0; i--) {
@@ -818,12 +787,6 @@
*/
for (i = 0; i < iommu->unit_count; i++) {
struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
- /* Make sure that the ASID of the priv bank is set to 1.
- * When we a different pagetable for the priv bank then the
- * iommu driver sets the ASID to 0 instead of 1 */
- KGSL_IOMMU_SET_IOMMU_REG(iommu->iommu_units[i].reg_map.hostptr,
- KGSL_IOMMU_CONTEXT_PRIV,
- CONTEXTIDR, 1);
for (j = 0; j < iommu_unit->dev_count; j++)
iommu_unit->dev[j].pt_lsb = KGSL_IOMMMU_PT_LSB(
KGSL_IOMMU_GET_IOMMU_REG(
@@ -831,10 +794,6 @@
iommu_unit->dev[j].ctx_id,
TTBR0));
}
- iommu->asid = KGSL_IOMMU_GET_IOMMU_REG(
- iommu->iommu_units[0].reg_map.hostptr,
- KGSL_IOMMU_CONTEXT_USER,
- CONTEXTIDR);
kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
mmu->flags |= KGSL_FLAGS_STARTED;
@@ -955,7 +914,6 @@
kgsl_mmu_putpagetable(mmu->priv_bank_table);
if (mmu->defaultpagetable)
kgsl_mmu_putpagetable(mmu->defaultpagetable);
- kfree(iommu->asids);
kfree(iommu);
return 0;
@@ -981,47 +939,6 @@
}
/*
- * kgsl_iommu_get_hwpagetable_asid - Returns asid(application space ID) for a
- * pagetable
- * @mmu - Pointer to mmu structure
- *
- * Allocates an asid to a IOMMU domain if it does not already have one. asid's
- * are unique identifiers for pagetable that can be used to selectively flush
- * tlb entries of the IOMMU unit.
- * Return - asid to be used with the IOMMU domain
- */
-static int kgsl_iommu_get_hwpagetable_asid(struct kgsl_mmu *mmu)
-{
- struct kgsl_iommu *iommu = mmu->priv;
- struct kgsl_iommu_pt *iommu_pt = mmu->hwpagetable->priv;
-
- /*
- * If the iommu pagetable does not have any asid assigned and is not the
- * default pagetable then assign asid.
- */
- if (!iommu_pt->asid && iommu_pt != mmu->defaultpagetable->priv) {
- iommu_pt->asid = find_first_zero_bit(iommu->asids,
- KGSL_IOMMU_MAX_ASIDS);
- /* No free bits means reuse asid */
- if (iommu_pt->asid >= KGSL_IOMMU_MAX_ASIDS) {
- iommu_pt->asid = KGSL_IOMMU_ASID_REUSE;
- iommu->asid_reuse++;
- }
- set_bit(iommu_pt->asid, iommu->asids);
- /*
- * Store pointer to asids list so that during pagetable destroy
- * the asid assigned to this pagetable may be cleared
- */
- iommu_pt->iommu = iommu;
- }
- /* Return the asid + the constant part of asid that never changes */
- return (iommu_pt->asid & (KGSL_IOMMU_CONTEXTIDR_ASID_MASK <<
- KGSL_IOMMU_CONTEXTIDR_ASID_SHIFT)) +
- (iommu->asid & ~(KGSL_IOMMU_CONTEXTIDR_ASID_MASK <<
- KGSL_IOMMU_CONTEXTIDR_ASID_SHIFT));
-}
-
-/*
* kgsl_iommu_default_setstate - Change the IOMMU pagetable or flush IOMMU tlb
* of the primary context bank
* @mmu - Pointer to mmu structure
@@ -1066,15 +983,6 @@
temp = KGSL_IOMMU_GET_IOMMU_REG(
iommu->iommu_units[i].reg_map.hostptr,
KGSL_IOMMU_CONTEXT_USER, TTBR0);
- /* Set asid */
- KGSL_IOMMU_SET_IOMMU_REG(
- iommu->iommu_units[i].reg_map.hostptr,
- KGSL_IOMMU_CONTEXT_USER, CONTEXTIDR,
- kgsl_iommu_get_hwpagetable_asid(mmu));
- mb();
- temp = KGSL_IOMMU_GET_IOMMU_REG(
- iommu->iommu_units[i].reg_map.hostptr,
- KGSL_IOMMU_CONTEXT_USER, CONTEXTIDR);
}
}
/* Flush tlb */
@@ -1082,8 +990,8 @@
for (i = 0; i < iommu->unit_count; i++) {
KGSL_IOMMU_SET_IOMMU_REG(
iommu->iommu_units[i].reg_map.hostptr,
- KGSL_IOMMU_CONTEXT_USER, CTX_TLBIASID,
- kgsl_iommu_get_hwpagetable_asid(mmu));
+ KGSL_IOMMU_CONTEXT_USER, CTX_TLBIALL,
+ 1);
mb();
}
}
@@ -1139,7 +1047,6 @@
.mmu_get_current_ptbase = kgsl_iommu_get_current_ptbase,
.mmu_enable_clk = kgsl_iommu_enable_clk,
.mmu_disable_clk_on_ts = kgsl_iommu_disable_clk_on_ts,
- .mmu_get_hwpagetable_asid = kgsl_iommu_get_hwpagetable_asid,
.mmu_get_pt_lsb = kgsl_iommu_get_pt_lsb,
.mmu_get_reg_map_desc = kgsl_iommu_get_reg_map_desc,
};
diff --git a/drivers/gpu/msm/kgsl_iommu.h b/drivers/gpu/msm/kgsl_iommu.h
index 354a5cf..f14db93 100644
--- a/drivers/gpu/msm/kgsl_iommu.h
+++ b/drivers/gpu/msm/kgsl_iommu.h
@@ -23,15 +23,8 @@
#define KGSL_IOMMU_TTBR0_PA_MASK 0x0003FFFF
#define KGSL_IOMMU_TTBR0_PA_SHIFT 14
#define KGSL_IOMMU_CTX_TLBIALL 0x800
-#define KGSL_IOMMU_CONTEXTIDR 0x8
-#define KGSL_IOMMU_CONTEXTIDR_ASID_MASK 0xFF
-#define KGSL_IOMMU_CONTEXTIDR_ASID_SHIFT 0
-#define KGSL_IOMMU_CTX_TLBIASID 0x804
#define KGSL_IOMMU_CTX_SHIFT 12
-#define KGSL_IOMMU_MAX_ASIDS 256
-#define KGSL_IOMMU_ASID_REUSE 2
-
/*
* Max number of iommu units that the gpu core can have
* On APQ8064, KGSL can control a maximum of 2 IOMMU units.
@@ -106,10 +99,6 @@
* @clk_event_queued: Indicates whether an event to disable clocks
* is already queued or not
* @device: Pointer to kgsl device
- * @asids: A bit structure indicating which id's are presently used
- * @asid: Contains the initial value of IOMMU_CONTEXTIDR when a domain
- * is first attached
- * asid_reuse: Holds the number of times the reuse asid is reused
*/
struct kgsl_iommu {
struct kgsl_iommu_unit iommu_units[KGSL_IOMMU_MAX_UNITS];
@@ -117,21 +106,16 @@
unsigned int iommu_last_cmd_ts;
bool clk_event_queued;
struct kgsl_device *device;
- unsigned long *asids;
- unsigned int asid;
- unsigned int asid_reuse;
};
/*
* struct kgsl_iommu_pt - Iommu pagetable structure private to kgsl driver
* @domain: Pointer to the iommu domain that contains the iommu pagetable
* @iommu: Pointer to iommu structure
- * @asid: The asid assigned to this domain
*/
struct kgsl_iommu_pt {
struct iommu_domain *domain;
struct kgsl_iommu *iommu;
- unsigned int asid;
};
#endif
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index d06ce45..bc6ec8e 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -136,7 +136,6 @@
(struct kgsl_mmu *mmu, uint32_t ts, bool ts_valid);
int (*mmu_enable_clk)
(struct kgsl_mmu *mmu, int ctx_id);
- int (*mmu_get_hwpagetable_asid)(struct kgsl_mmu *mmu);
int (*mmu_get_pt_lsb)(struct kgsl_mmu *mmu,
unsigned int unit_id,
enum kgsl_iommu_context_id ctx_id);
@@ -278,14 +277,6 @@
return 0;
}
-static inline int kgsl_mmu_get_hwpagetable_asid(struct kgsl_mmu *mmu)
-{
- if (mmu->mmu_ops && mmu->mmu_ops->mmu_get_hwpagetable_asid)
- return mmu->mmu_ops->mmu_get_hwpagetable_asid(mmu);
- else
- return 0;
-}
-
static inline int kgsl_mmu_enable_clk(struct kgsl_mmu *mmu,
int ctx_id)
{
@@ -302,4 +293,10 @@
mmu->mmu_ops->mmu_disable_clk_on_ts(mmu, ts, ts_valid);
}
+static inline int kgsl_mmu_gpuaddr_in_range(unsigned int gpuaddr)
+{
+ return ((gpuaddr >= KGSL_PAGETABLE_BASE) &&
+ (gpuaddr < (KGSL_PAGETABLE_BASE + kgsl_mmu_get_ptsize())));
+}
+
#endif /* __KGSL_MMU_H */
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 6df073a..bfe6957 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -121,7 +121,7 @@
return count;
mutex_lock(&device->mutex);
- for (i = 0; i < pwr->num_pwrlevels; i++) {
+ for (i = 0; i < pwr->num_pwrlevels - 1; i++) {
if (abs(pwr->pwrlevels[i].gpu_freq - val) < delta) {
if (max)
pwr->thermal_pwrlevel = i;
@@ -129,7 +129,7 @@
}
}
- if (i == pwr->num_pwrlevels)
+ if (i == (pwr->num_pwrlevels - 1))
goto done;
/*
@@ -640,7 +640,7 @@
mutex_lock(&device->mutex);
if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) {
- kgsl_pwrscale_idle(device);
+ kgsl_pwrscale_idle(device, 0);
if (kgsl_pwrctrl_sleep(device) != 0) {
mod_timer(&device->idle_timer,
diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c
index 6fb9326..f6277b3 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.c
+++ b/drivers/gpu/msm/kgsl_pwrscale.c
@@ -237,21 +237,18 @@
void kgsl_pwrscale_busy(struct kgsl_device *device)
{
if (PWRSCALE_ACTIVE(device) && device->pwrscale.policy->busy)
- if ((!device->pwrscale.gpu_busy) &&
- (device->requested_state != KGSL_STATE_SLUMBER))
+ if (device->requested_state != KGSL_STATE_SLUMBER)
device->pwrscale.policy->busy(device,
&device->pwrscale);
- device->pwrscale.gpu_busy = 1;
}
-void kgsl_pwrscale_idle(struct kgsl_device *device)
+void kgsl_pwrscale_idle(struct kgsl_device *device, unsigned int ignore_idle)
{
if (PWRSCALE_ACTIVE(device) && device->pwrscale.policy->idle)
if (device->requested_state != KGSL_STATE_SLUMBER &&
device->requested_state != KGSL_STATE_SLEEP)
device->pwrscale.policy->idle(device,
- &device->pwrscale);
- device->pwrscale.gpu_busy = 0;
+ &device->pwrscale, ignore_idle);
}
EXPORT_SYMBOL(kgsl_pwrscale_idle);
diff --git a/drivers/gpu/msm/kgsl_pwrscale.h b/drivers/gpu/msm/kgsl_pwrscale.h
index 34698cd..ba9b1af 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.h
+++ b/drivers/gpu/msm/kgsl_pwrscale.h
@@ -23,7 +23,8 @@
void (*close)(struct kgsl_device *device,
struct kgsl_pwrscale *pwrscale);
void (*idle)(struct kgsl_device *device,
- struct kgsl_pwrscale *pwrscale);
+ struct kgsl_pwrscale *pwrscale,
+ unsigned int ignore_idle);
void (*busy)(struct kgsl_device *device,
struct kgsl_pwrscale *pwrscale);
void (*sleep)(struct kgsl_device *device,
@@ -36,7 +37,6 @@
struct kgsl_pwrscale_policy *policy;
struct kobject kobj;
void *priv;
- int gpu_busy;
int enabled;
};
@@ -64,7 +64,8 @@
struct kgsl_pwrscale_policy *policy);
void kgsl_pwrscale_detach_policy(struct kgsl_device *device);
-void kgsl_pwrscale_idle(struct kgsl_device *device);
+void kgsl_pwrscale_idle(struct kgsl_device *device,
+ unsigned int ignore_idle);
void kgsl_pwrscale_busy(struct kgsl_device *device);
void kgsl_pwrscale_sleep(struct kgsl_device *device);
void kgsl_pwrscale_wake(struct kgsl_device *device);
diff --git a/drivers/gpu/msm/kgsl_pwrscale_idlestats.c b/drivers/gpu/msm/kgsl_pwrscale_idlestats.c
index 4102302..fc58dd1 100644
--- a/drivers/gpu/msm/kgsl_pwrscale_idlestats.c
+++ b/drivers/gpu/msm/kgsl_pwrscale_idlestats.c
@@ -131,7 +131,7 @@
}
static void idlestats_idle(struct kgsl_device *device,
- struct kgsl_pwrscale *pwrscale)
+ struct kgsl_pwrscale *pwrscale, unsigned int ignore_idle)
{
int i, nr_cpu;
struct idlestats_priv *priv = pwrscale->priv;
diff --git a/drivers/gpu/msm/kgsl_pwrscale_msm.c b/drivers/gpu/msm/kgsl_pwrscale_msm.c
index 61d4b2d..baa0407 100644
--- a/drivers/gpu/msm/kgsl_pwrscale_msm.c
+++ b/drivers/gpu/msm/kgsl_pwrscale_msm.c
@@ -26,6 +26,7 @@
struct msm_dcvs_idle idle_source;
struct msm_dcvs_freq freq_sink;
struct msm_dcvs_core_info *core_info;
+ int gpu_busy;
};
static int msm_idle_enable(struct msm_dcvs_idle *self,
@@ -89,29 +90,37 @@
struct kgsl_pwrscale *pwrscale)
{
struct msm_priv *priv = pwrscale->priv;
- if (priv->enabled)
+ if (priv->enabled && !priv->gpu_busy) {
msm_dcvs_idle(priv->handle, MSM_DCVS_IDLE_EXIT, 0);
+ priv->gpu_busy = 1;
+ }
return;
}
static void msm_idle(struct kgsl_device *device,
- struct kgsl_pwrscale *pwrscale)
+ struct kgsl_pwrscale *pwrscale, unsigned int ignore_idle)
{
struct msm_priv *priv = pwrscale->priv;
- unsigned int rb_rptr, rb_wptr;
- kgsl_regread(device, REG_CP_RB_RPTR, &rb_rptr);
- kgsl_regread(device, REG_CP_RB_WPTR, &rb_wptr);
- if (priv->enabled && (rb_rptr == rb_wptr))
- msm_dcvs_idle(priv->handle, MSM_DCVS_IDLE_ENTER, 0);
-
+ if (priv->enabled && priv->gpu_busy)
+ if (device->ftbl->isidle(device)) {
+ msm_dcvs_idle(priv->handle, MSM_DCVS_IDLE_ENTER, 0);
+ priv->gpu_busy = 0;
+ }
return;
}
static void msm_sleep(struct kgsl_device *device,
struct kgsl_pwrscale *pwrscale)
{
- /* do we need to reset any parameters here? */
+ struct msm_priv *priv = pwrscale->priv;
+
+ if (priv->enabled && priv->gpu_busy) {
+ msm_dcvs_idle(priv->handle, MSM_DCVS_IDLE_ENTER, 0);
+ priv->gpu_busy = 0;
+ }
+
+ return;
}
static int msm_init(struct kgsl_device *device,
@@ -159,10 +168,10 @@
ret = msm_dcvs_freq_sink_register(&priv->freq_sink);
if (ret >= 0) {
if (device->ftbl->isidle(device)) {
- device->pwrscale.gpu_busy = 0;
+ priv->gpu_busy = 0;
msm_dcvs_idle(priv->handle, MSM_DCVS_IDLE_ENTER, 0);
} else {
- device->pwrscale.gpu_busy = 1;
+ priv->gpu_busy = 1;
}
return 0;
}
diff --git a/drivers/gpu/msm/kgsl_pwrscale_trustzone.c b/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
index d6c5e66..1b029b1 100644
--- a/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
+++ b/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
@@ -119,16 +119,19 @@
device->pwrctrl.default_pwrlevel);
}
-static void tz_idle(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale)
+static void tz_idle(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale,
+ unsigned int ignore_idle)
{
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
struct tz_priv *priv = pwrscale->priv;
struct kgsl_power_stats stats;
int val, idle;
+ if (ignore_idle)
+ return;
+
/* In "performance" mode the clock speed always stays
the same */
-
if (priv->governor == TZ_GOVERNOR_PERFORMANCE)
return;
diff --git a/drivers/gud/mobicore_driver/platforms/msm8960_surf_std/platform.h b/drivers/gud/mobicore_driver/platforms/msm8960_surf_std/platform.h
index 7034cb0..070222e 100644
--- a/drivers/gud/mobicore_driver/platforms/msm8960_surf_std/platform.h
+++ b/drivers/gud/mobicore_driver/platforms/msm8960_surf_std/platform.h
@@ -21,7 +21,7 @@
#define _MC_DRV_PLATFORM_H_
/** MobiCore Interrupt for Qualcomm */
-#define MC_INTR_SSIQ 218
+#define MC_INTR_SSIQ 280
/** Use SMC for fastcalls */
#define MC_SMC_FASTCALL
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index b050db2..53fec5b 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -868,6 +868,16 @@
Provides interface for measuring the current on specific power rails
through the channels on ADC1158 ADC
+config SENSORS_QPNP_ADC_VOLTAGE
+ tristate "Support for Qualcomm QPNP Voltage ADC"
+ depends on SPMI
+ help
+ This is the VADC arbiter driver for Qualcomm QPNP ADC Chip.
+
+ The driver supports reading the HKADC, XOADC through the ADC AMUX arbiter.
+ The VADC includes support for the conversion sequencer. The driver supports
+ reading the ADC through the AMUX channels for external pull-ups simultaneously.
+
config SENSORS_PC87360
tristate "National Semiconductor PC87360 family"
depends on !PPC
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 228c4e9..2ff9454 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -130,6 +130,7 @@
obj-$(CONFIG_SENSORS_MSM_ADC) += msm_adc.o m_adcproc.o
obj-$(CONFIG_SENSORS_PM8XXX_ADC) += pm8xxx-adc.o pm8xxx-adc-scale.o
obj-$(CONFIG_SENSORS_EPM_ADC) += epm_adc.o
+obj-$(CONFIG_SENSORS_QPNP_ADC_VOLTAGE) += qpnp-adc-voltage.o qpnp-adc-common.o
obj-$(CONFIG_PMBUS) += pmbus/
diff --git a/drivers/hwmon/pm8xxx-adc.c b/drivers/hwmon/pm8xxx-adc.c
index aa9acf7..8e35252 100644
--- a/drivers/hwmon/pm8xxx-adc.c
+++ b/drivers/hwmon/pm8xxx-adc.c
@@ -23,7 +23,6 @@
#include <linux/hwmon.h>
#include <linux/module.h>
#include <linux/debugfs.h>
-#include <linux/wakelock.h>
#include <linux/interrupt.h>
#include <linux/completion.h>
#include <linux/hwmon-sysfs.h>
@@ -123,6 +122,7 @@
#define PM8XXX_ADC_PA_THERM_VREG_UA_LOAD 100000
#define PM8XXX_ADC_HWMON_NAME_LENGTH 32
#define PM8XXX_ADC_BTM_INTERVAL_MAX 0x14
+#define PM8XXX_ADC_COMPLETION_TIMEOUT (2 * HZ)
struct pm8xxx_adc {
struct device *dev;
@@ -141,7 +141,6 @@
struct work_struct cool_work;
uint32_t mpp_base;
struct device *hwmon;
- struct wake_lock adc_wakelock;
int msm_suspend_check;
struct pm8xxx_adc_amux_properties *conv;
struct pm8xxx_adc_arb_btm_param batt;
@@ -223,7 +222,6 @@
pr_err("PM8xxx ADC request made after suspend_noirq "
"with channel: %d\n", channel);
data_arb_cntrl |= PM8XXX_ADC_ARB_USRP_CNTRL1_EN_ARB;
- wake_lock(&adc_pmic->adc_wakelock);
}
/* Write twice to the CNTRL register for the arbiter settings
@@ -242,8 +240,7 @@
INIT_COMPLETION(adc_pmic->adc_rslt_completion);
rc = pm8xxx_writeb(adc_pmic->dev->parent,
PM8XXX_ADC_ARB_USRP_CNTRL1, data_arb_cntrl);
- } else
- wake_unlock(&adc_pmic->adc_wakelock);
+ }
return 0;
}
@@ -734,7 +731,23 @@
goto fail;
}
- wait_for_completion(&adc_pmic->adc_rslt_completion);
+ rc = wait_for_completion_timeout(&adc_pmic->adc_rslt_completion,
+ PM8XXX_ADC_COMPLETION_TIMEOUT);
+ if (!rc) {
+ u8 data_arb_usrp_cntrl1 = 0;
+ rc = pm8xxx_adc_read_reg(PM8XXX_ADC_ARB_USRP_CNTRL1,
+ &data_arb_usrp_cntrl1);
+ if (rc < 0)
+ goto fail;
+ if (data_arb_usrp_cntrl1 == (PM8XXX_ADC_ARB_USRP_CNTRL1_EOC |
+ PM8XXX_ADC_ARB_USRP_CNTRL1_EN_ARB))
+ pr_debug("End of conversion status set\n");
+ else {
+ pr_err("EOC interrupt not received\n");
+ rc = -EINVAL;
+ goto fail;
+ }
+ }
rc = pm8xxx_adc_read_adc_code(&result->adc_code);
if (rc) {
@@ -1134,7 +1147,6 @@
struct pm8xxx_adc *adc_pmic = pmic_adc;
int i;
- wake_lock_destroy(&adc_pmic->adc_wakelock);
platform_set_drvdata(pdev, NULL);
pmic_adc = NULL;
if (!pa_therm) {
@@ -1236,8 +1248,6 @@
disable_irq_nosync(adc_pmic->btm_cool_irq);
platform_set_drvdata(pdev, adc_pmic);
- wake_lock_init(&adc_pmic->adc_wakelock, WAKE_LOCK_SUSPEND,
- "pm8xxx_adc_wakelock");
adc_pmic->msm_suspend_check = 0;
pmic_adc = adc_pmic;
diff --git a/drivers/hwmon/qpnp-adc-common.c b/drivers/hwmon/qpnp-adc-common.c
new file mode 100644
index 0000000..c8fe798
--- /dev/null
+++ b/drivers/hwmon/qpnp-adc-common.c
@@ -0,0 +1,258 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/spmi.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/qpnp/qpnp-adc.h>
+#include <linux/platform_device.h>
+
+/* Min ADC code represets 0V */
+#define QPNP_VADC_MIN_ADC_CODE 0x6000
+/* Max ADC code represents full-scale range of 1.8V */
+#define QPNP_VADC_MAX_ADC_CODE 0xA800
+
+int32_t qpnp_adc_scale_default(int32_t adc_code,
+ const struct qpnp_adc_properties *adc_properties,
+ const struct qpnp_vadc_chan_properties *chan_properties,
+ struct qpnp_vadc_result *adc_chan_result)
+{
+ bool negative_rawfromoffset = 0, negative_offset = 0;
+ int64_t scale_voltage = 0;
+
+ if (!chan_properties || !chan_properties->offset_gain_numerator ||
+ !chan_properties->offset_gain_denominator || !adc_properties
+ || !adc_chan_result)
+ return -EINVAL;
+
+ scale_voltage = (adc_code -
+ chan_properties->adc_graph[CALIB_ABSOLUTE].adc_gnd)
+ * chan_properties->adc_graph[CALIB_ABSOLUTE].dx;
+ if (scale_voltage < 0) {
+ negative_offset = 1;
+ scale_voltage = -scale_voltage;
+ }
+ do_div(scale_voltage,
+ chan_properties->adc_graph[CALIB_ABSOLUTE].dy);
+ if (negative_offset)
+ scale_voltage = -scale_voltage;
+ scale_voltage += chan_properties->adc_graph[CALIB_ABSOLUTE].dx;
+
+ if (scale_voltage < 0) {
+ if (adc_properties->bipolar) {
+ scale_voltage = -scale_voltage;
+ negative_rawfromoffset = 1;
+ } else {
+ scale_voltage = 0;
+ }
+ }
+
+ adc_chan_result->measurement = scale_voltage *
+ chan_properties->offset_gain_denominator;
+
+ /* do_div only perform positive integer division! */
+ do_div(adc_chan_result->measurement,
+ chan_properties->offset_gain_numerator);
+
+ if (negative_rawfromoffset)
+ adc_chan_result->measurement = -adc_chan_result->measurement;
+
+ /*
+ * Note: adc_chan_result->measurement is in the unit of
+ * adc_properties.adc_reference. For generic channel processing,
+ * channel measurement is a scale/ratio relative to the adc
+ * reference input
+ */
+ adc_chan_result->physical = adc_chan_result->measurement;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qpnp_adc_scale_default);
+
+int32_t qpnp_vadc_check_result(int32_t *data)
+{
+ if (*data < QPNP_VADC_MIN_ADC_CODE)
+ *data = QPNP_VADC_MIN_ADC_CODE;
+ else if (*data > QPNP_VADC_MAX_ADC_CODE)
+ *data = QPNP_VADC_MAX_ADC_CODE;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qpnp_vadc_check_result);
+
+int32_t qpnp_adc_get_devicetree_data(struct spmi_device *spmi,
+ struct qpnp_adc_drv *adc_qpnp)
+{
+ struct device_node *node = spmi->dev.of_node;
+ struct resource *res;
+ struct device_node *child;
+ struct qpnp_vadc_amux *adc_channel_list;
+ struct qpnp_adc_properties *adc_prop;
+ struct qpnp_vadc_amux_properties *amux_prop;
+ int count_adc_channel_list = 0, decimation, rc = 0;
+
+ if (!node)
+ return -EINVAL;
+
+ for_each_child_of_node(node, child)
+ count_adc_channel_list++;
+
+ if (!count_adc_channel_list) {
+ pr_err("No channel listing\n");
+ return -EINVAL;
+ }
+
+ adc_qpnp->spmi = spmi;
+
+ adc_prop = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_adc_properties),
+ GFP_KERNEL);
+ if (!adc_prop) {
+ dev_err(&spmi->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+ adc_channel_list = devm_kzalloc(&spmi->dev,
+ (sizeof(struct qpnp_vadc_amux) * count_adc_channel_list),
+ GFP_KERNEL);
+ if (!adc_channel_list) {
+ dev_err(&spmi->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ amux_prop = devm_kzalloc(&spmi->dev,
+ sizeof(struct qpnp_vadc_amux_properties) +
+ sizeof(struct qpnp_vadc_chan_properties), GFP_KERNEL);
+ if (!amux_prop) {
+ dev_err(&spmi->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ for_each_child_of_node(node, child) {
+ int channel_num, scaling, post_scaling, hw_settle_time;
+ int fast_avg_setup, calib_type, i = 0, rc;
+ const char *calibration_param, *channel_name;
+
+ channel_name = of_get_property(child,
+ "label", NULL) ? : child->name;
+ if (!channel_name) {
+ pr_err("Invalid channel name\n");
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32(child, "qcom,channel-num",
+ &channel_num);
+ if (rc) {
+ pr_err("Invalid channel num\n");
+ return -EINVAL;
+ }
+ rc = of_property_read_u32(child, "qcom,decimation",
+ &decimation);
+ if (rc) {
+ pr_err("Invalid channel decimation property\n");
+ return -EINVAL;
+ }
+ rc = of_property_read_u32(child,
+ "qcom,pre-div-channel-scaling", &scaling);
+ if (rc) {
+ pr_err("Invalid channel scaling property\n");
+ return -EINVAL;
+ }
+ rc = of_property_read_u32(child,
+ "qcom,scale-function", &post_scaling);
+ if (rc) {
+ pr_err("Invalid channel post scaling property\n");
+ return -EINVAL;
+ }
+ rc = of_property_read_u32(child,
+ "qcom,hw-settle-time", &hw_settle_time);
+ if (rc) {
+ pr_err("Invalid channel hw settle time property\n");
+ return -EINVAL;
+ }
+ rc = of_property_read_u32(child,
+ "qcom,fast-avg-setup", &fast_avg_setup);
+ if (rc) {
+ pr_err("Invalid channel fast average setup\n");
+ return -EINVAL;
+ }
+ calibration_param = of_get_property(child,
+ "qcom,calibration-type", NULL);
+ if (!strncmp(calibration_param, "absolute", 8))
+ calib_type = CALIB_ABSOLUTE;
+ else if (!strncmp(calibration_param, "historical", 9))
+ calib_type = CALIB_RATIOMETRIC;
+ else {
+ pr_err("%s: Invalid calibration property\n", __func__);
+ return -EINVAL;
+ }
+ /* Individual channel properties */
+ adc_channel_list[i].name = (char *)channel_name;
+ adc_channel_list[i].channel_num = channel_num;
+ adc_channel_list[i].chan_path_prescaling = scaling;
+ adc_channel_list[i].adc_decimation = decimation;
+ adc_channel_list[i].adc_scale_fn = post_scaling;
+ adc_channel_list[i].hw_settle_time = hw_settle_time;
+ adc_channel_list[i].fast_avg_setup = fast_avg_setup;
+ i++;
+ }
+ adc_qpnp->adc_channels = adc_channel_list;
+ adc_qpnp->amux_prop = amux_prop;
+
+ /* Get the ADC VDD reference voltage and ADC bit resolution */
+ rc = of_property_read_u32(node, "qcom,adc-vdd-reference",
+ &adc_prop->adc_vdd_reference);
+ if (rc) {
+ pr_err("Invalid adc vdd reference property\n");
+ return -EINVAL;
+ }
+ rc = of_property_read_u32(node, "qcom,adc-bit-resolution",
+ &adc_prop->bitresolution);
+ if (rc) {
+ pr_err("Invalid adc bit resolution property\n");
+ return -EINVAL;
+ }
+ adc_qpnp->adc_prop = adc_prop;
+
+ /* Get the peripheral address */
+ res = spmi_get_resource(spmi, 0, IORESOURCE_MEM, 0);
+ if (!res) {
+ pr_err("No base address definition\n");
+ return -EINVAL;
+ }
+
+ adc_qpnp->slave = spmi->sid;
+ adc_qpnp->offset = res->start;
+
+ /* Register the ADC peripheral interrupt */
+ adc_qpnp->adc_irq = spmi_get_irq(spmi, 0, 0);
+ if (adc_qpnp->adc_irq < 0) {
+ pr_err("Invalid irq\n");
+ return -ENXIO;
+ }
+
+ mutex_init(&adc_qpnp->adc_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_get_devicetree_data);
diff --git a/drivers/hwmon/qpnp-adc-voltage.c b/drivers/hwmon/qpnp-adc-voltage.c
new file mode 100644
index 0000000..8b2cb97
--- /dev/null
+++ b/drivers/hwmon/qpnp-adc-voltage.c
@@ -0,0 +1,784 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/spmi.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/qpnp/qpnp-adc.h>
+#include <linux/platform_device.h>
+
+/* QPNP VADC register definition */
+#define QPNP_VADC_STATUS1 0x8
+#define QPNP_VADC_STATUS1_OP_MODE 4
+#define QPNP_VADC_STATUS1_MEAS_INTERVAL_EN_STS BIT(2)
+#define QPNP_VADC_STATUS1_REQ_STS BIT(1)
+#define QPNP_VADC_STATUS1_EOC BIT(0)
+#define QPNP_VADC_STATUS2 0x9
+#define QPNP_VADC_STATUS2_CONV_SEQ_STATE 6
+#define QPNP_VADC_STATUS2_FIFO_NOT_EMPTY_FLAG BIT(1)
+#define QPNP_VADC_STATUS2_CONV_SEQ_TIMEOUT_STS BIT(0)
+#define QPNP_VADC_STATUS2_CONV_SEQ_STATE_SHIFT 4
+#define QPNP_VADC_CONV_TIMEOUT_ERR 2
+
+#define QPNP_VADC_INT_SET_TYPE 0x11
+#define QPNP_VADC_INT_POLARITY_HIGH 0x12
+#define QPNP_VADC_INT_POLARITY_LOW 0x13
+#define QPNP_VADC_INT_LATCHED_CLR 0x14
+#define QPNP_VADC_INT_EN_SET 0x15
+#define QPNP_VADC_INT_CLR 0x16
+#define QPNP_VADC_INT_LOW_THR_BIT BIT(4)
+#define QPNP_VADC_INT_HIGH_THR_BIT BIT(3)
+#define QPNP_VADC_INT_CONV_SEQ_TIMEOUT_BIT BIT(2)
+#define QPNP_VADC_INT_FIFO_NOT_EMPTY_BIT BIT(1)
+#define QPNP_VADC_INT_EOC_BIT BIT(0)
+#define QPNP_VADC_INT_CLR_MASK 0x1f
+#define QPNP_VADC_MODE_CTL 0x40
+#define QPNP_VADC_OP_MODE_SHIFT 4
+#define QPNP_VADC_VREF_XO_THM_FORCE BIT(2)
+#define QPNP_VADC_AMUX_TRIM_EN BIT(1)
+#define QPNP_VADC_ADC_TRIM_EN BIT(0)
+#define QPNP_VADC_EN_CTL1 0x46
+#define QPNP_VADC_ADC_EN BIT(7)
+#define QPNP_VADC_ADC_CH_SEL_CTL 0x48
+#define QPNP_VADC_ADC_DIG_PARAM 0x50
+#define QPNP_VADC_ADC_DIG_DEC_RATIO_SEL_SHIFT 3
+#define QPNP_VADC_HW_SETTLE_DELAY 0x51
+#define QPNP_VADC_CONV_REQ 0x52
+#define QPNP_VADC_CONV_REQ_SET BIT(7)
+#define QPNP_VADC_CONV_SEQ_CTL 0x54
+#define QPNP_VADC_CONV_SEQ_HOLDOFF_SHIFT 4
+#define QPNP_VADC_CONV_SEQ_TRIG_CTL 0x55
+#define QPNP_VADC_CONV_SEQ_FALLING_EDGE 0x0
+#define QPNP_VADC_CONV_SEQ_RISING_EDGE 0x1
+#define QPNP_VADC_CONV_SEQ_EDGE_SHIFT 7
+#define QPNP_VADC_FAST_AVG_CTL 0x5a
+
+#define QPNP_VADC_M0_LOW_THR_LSB 0x5c
+#define QPNP_VADC_M0_LOW_THR_MSB 0x5d
+#define QPNP_VADC_M0_HIGH_THR_LSB 0x5e
+#define QPNP_VADC_M0_HIGH_THR_MSB 0x5f
+#define QPNP_VADC_M1_LOW_THR_LSB 0x69
+#define QPNP_VADC_M1_LOW_THR_MSB 0x6a
+#define QPNP_VADC_M1_HIGH_THR_LSB 0x6b
+#define QPNP_VADC_M1_HIGH_THR_MSB 0x6c
+
+#define QPNP_VADC_DATA0 0x60
+#define QPNP_VADC_DATA1 0x61
+#define QPNP_VADC_CONV_TIMEOUT_ERR 2
+#define QPNP_VADC_CONV_TIME_MIN 2000
+#define QPNP_VADC_CONV_TIME_MAX 2100
+
+#define QPNP_ADC_HWMON_NAME_LENGTH 16
+
+struct qpnp_vadc_drv {
+ struct qpnp_adc_drv *adc;
+ struct dentry *dent;
+ struct device *vadc_hwmon;
+ bool vadc_init_calib;
+ struct sensor_device_attribute sens_attr[0];
+};
+
+struct qpnp_vadc_drv *qpnp_vadc;
+
+static struct qpnp_vadc_scale_fn vadc_scale_fn[] = {
+ [SCALE_DEFAULT] = {qpnp_adc_scale_default},
+};
+
+static int32_t qpnp_vadc_read_reg(int16_t reg, u8 *data)
+{
+ struct qpnp_vadc_drv *vadc = qpnp_vadc;
+ int rc;
+
+ rc = spmi_ext_register_readl(vadc->adc->spmi->ctrl, vadc->adc->slave,
+ reg, data, 1);
+ if (rc < 0) {
+ pr_err("qpnp adc read reg %d failed with %d\n", reg, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int32_t qpnp_vadc_write_reg(int16_t reg, u8 data)
+{
+ struct qpnp_vadc_drv *vadc = qpnp_vadc;
+ int rc;
+ u8 *buf;
+
+ buf = &data;
+
+ rc = spmi_ext_register_writel(vadc->adc->spmi->ctrl, vadc->adc->slave,
+ reg, buf, 1);
+ if (rc < 0) {
+ pr_err("qpnp adc write reg %d failed with %d\n", reg, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int32_t qpnp_vadc_configure_interrupt(void)
+{
+ int rc = 0;
+ u8 data = 0;
+
+ /* Configure interrupt as an Edge trigger */
+ rc = qpnp_vadc_write_reg(QPNP_VADC_INT_SET_TYPE,
+ QPNP_VADC_INT_CLR_MASK);
+ if (rc < 0) {
+ pr_err("%s Interrupt configure failed\n", __func__);
+ return rc;
+ }
+
+ /* Configure interrupt for rising edge trigger */
+ rc = qpnp_vadc_write_reg(QPNP_VADC_INT_POLARITY_HIGH,
+ QPNP_VADC_INT_CLR_MASK);
+ if (rc < 0) {
+ pr_err("%s Rising edge trigger configure failed\n", __func__);
+ return rc;
+ }
+
+ /* Disable low level interrupt triggering */
+ data = QPNP_VADC_INT_CLR_MASK;
+ rc = qpnp_vadc_write_reg(QPNP_VADC_INT_POLARITY_LOW,
+ (~data & QPNP_VADC_INT_CLR_MASK));
+ if (rc < 0) {
+ pr_err("%s Setting level low to disable failed\n", __func__);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int32_t qpnp_vadc_enable(bool state)
+{
+ int rc = 0;
+ u8 data = 0;
+
+ data = QPNP_VADC_ADC_EN;
+ if (state) {
+ rc = qpnp_vadc_write_reg(QPNP_VADC_EN_CTL1,
+ data);
+ if (rc < 0) {
+ pr_err("VADC enable failed\n");
+ return rc;
+ }
+ } else {
+ rc = qpnp_vadc_write_reg(QPNP_VADC_EN_CTL1,
+ (~data & QPNP_VADC_ADC_EN));
+ if (rc < 0) {
+ pr_err("VADC disable failed\n");
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+int32_t qpnp_vadc_configure(
+ struct qpnp_vadc_amux_properties *chan_prop)
+{
+ u8 decimation = 0, conv_sequence = 0, conv_sequence_trig = 0;
+ int rc = 0;
+
+ rc = qpnp_vadc_write_reg(QPNP_VADC_INT_EN_SET,
+ QPNP_VADC_INT_EOC_BIT);
+ if (rc < 0) {
+ pr_err("qpnp adc configure error for interrupt setup\n");
+ return rc;
+ }
+
+ rc = qpnp_vadc_write_reg(QPNP_VADC_MODE_CTL, chan_prop->mode_sel);
+ if (rc < 0) {
+ pr_err("qpnp adc configure error for mode selection\n");
+ return rc;
+ }
+
+ rc = qpnp_vadc_write_reg(QPNP_VADC_ADC_CH_SEL_CTL,
+ chan_prop->amux_channel);
+ if (rc < 0) {
+ pr_err("qpnp adc configure error for channel selection\n");
+ return rc;
+ }
+
+ decimation |= chan_prop->decimation <<
+ QPNP_VADC_ADC_DIG_DEC_RATIO_SEL_SHIFT;
+ rc = qpnp_vadc_write_reg(QPNP_VADC_ADC_DIG_PARAM, decimation);
+ if (rc < 0) {
+ pr_err("qpnp adc configure error for digital parameter setup\n");
+ return rc;
+ }
+
+ rc = qpnp_vadc_write_reg(QPNP_VADC_HW_SETTLE_DELAY,
+ chan_prop->hw_settle_time);
+ if (rc < 0) {
+ pr_err("qpnp adc configure error for hw settling time setup\n");
+ return rc;
+ }
+
+ if (chan_prop->mode_sel == (ADC_OP_NORMAL_MODE <<
+ QPNP_VADC_OP_MODE_SHIFT)) {
+ rc = qpnp_vadc_write_reg(QPNP_VADC_FAST_AVG_CTL,
+ chan_prop->fast_avg_setup);
+ if (rc < 0) {
+ pr_err("qpnp adc fast averaging configure error\n");
+ return rc;
+ }
+ } else if (chan_prop->mode_sel == (ADC_OP_CONVERSION_SEQUENCER <<
+ QPNP_VADC_OP_MODE_SHIFT)) {
+ conv_sequence = ((ADC_SEQ_HOLD_100US <<
+ QPNP_VADC_CONV_SEQ_HOLDOFF_SHIFT) |
+ ADC_CONV_SEQ_TIMEOUT_5MS);
+ rc = qpnp_vadc_write_reg(QPNP_VADC_CONV_SEQ_CTL,
+ conv_sequence);
+ if (rc < 0) {
+ pr_err("qpnp adc conversion sequence error\n");
+ return rc;
+ }
+
+ conv_sequence_trig = ((QPNP_VADC_CONV_SEQ_RISING_EDGE <<
+ QPNP_VADC_CONV_SEQ_EDGE_SHIFT) |
+ chan_prop->trigger_channel);
+ rc = qpnp_vadc_write_reg(QPNP_VADC_CONV_SEQ_TRIG_CTL,
+ conv_sequence_trig);
+ if (rc < 0) {
+ pr_err("qpnp adc conversion trigger error\n");
+ return rc;
+ }
+ }
+
+ rc = qpnp_vadc_write_reg(QPNP_VADC_CONV_REQ, QPNP_VADC_CONV_REQ_SET);
+ if (rc < 0) {
+ pr_err("qpnp adc request conversion failed\n");
+ return rc;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_vadc_configure);
+
+static int32_t qpnp_vadc_read_conversion_result(int32_t *data)
+{
+ uint8_t rslt_lsb, rslt_msb;
+ int rc = 0;
+
+ rc = qpnp_vadc_read_reg(QPNP_VADC_DATA0, &rslt_lsb);
+ if (rc < 0) {
+ pr_err("qpnp adc result read failed for data0 with %d\n", rc);
+ return rc;
+ }
+
+ rc = qpnp_vadc_read_reg(QPNP_VADC_DATA1, &rslt_msb);
+ if (rc < 0) {
+ pr_err("qpnp adc result read failed for data1 with %d\n", rc);
+ return rc;
+ }
+
+ *data = (rslt_msb << 8) | rslt_lsb;
+
+ rc = qpnp_vadc_check_result(data);
+ if (rc < 0) {
+ pr_err("VADC data check failed\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static int32_t qpnp_vadc_read_status(int mode_sel)
+{
+ u8 status1, status2, status2_conv_seq_state;
+ u8 status_err = QPNP_VADC_CONV_TIMEOUT_ERR;
+ int rc;
+
+ switch (mode_sel) {
+ case (ADC_OP_CONVERSION_SEQUENCER << QPNP_VADC_OP_MODE_SHIFT):
+ rc = qpnp_vadc_read_reg(QPNP_VADC_STATUS1, &status1);
+ if (rc) {
+ pr_err("qpnp_vadc read mask interrupt failed\n");
+ return rc;
+ }
+
+ rc = qpnp_vadc_read_reg(QPNP_VADC_STATUS2, &status2);
+ if (rc) {
+ pr_err("qpnp_vadc read mask interrupt failed\n");
+ return rc;
+ }
+
+ if (!(status2 & ~QPNP_VADC_STATUS2_CONV_SEQ_TIMEOUT_STS) &&
+ (status1 & (~QPNP_VADC_STATUS1_REQ_STS |
+ QPNP_VADC_STATUS1_EOC))) {
+ rc = status_err;
+ return rc;
+ }
+
+ status2_conv_seq_state = status2 >>
+ QPNP_VADC_STATUS2_CONV_SEQ_STATE_SHIFT;
+ if (status2_conv_seq_state != ADC_CONV_SEQ_IDLE) {
+ pr_err("qpnp vadc seq error with status %d\n",
+ status2);
+ rc = -EINVAL;
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static void qpnp_vadc_work(struct work_struct *work)
+{
+ struct qpnp_vadc_drv *vadc = qpnp_vadc;
+ int rc;
+
+ rc = qpnp_vadc_write_reg(QPNP_VADC_INT_CLR, QPNP_VADC_INT_EOC_BIT);
+ if (rc)
+ pr_err("qpnp_vadc clear mask interrupt failed with %d\n", rc);
+
+ complete(&vadc->adc->adc_rslt_completion);
+
+ return;
+}
+DECLARE_WORK(trigger_completion_work, qpnp_vadc_work);
+
+static irqreturn_t qpnp_vadc_isr(int irq, void *dev_id)
+{
+ schedule_work(&trigger_completion_work);
+
+ return IRQ_HANDLED;
+}
+
+static uint32_t qpnp_vadc_calib_device(void)
+{
+ struct qpnp_vadc_drv *vadc = qpnp_vadc;
+ struct qpnp_vadc_amux_properties conv;
+ int rc, calib_read_1, calib_read_2;
+ u8 status1 = 0;
+
+ conv.amux_channel = REF_125V;
+ conv.decimation = DECIMATION_TYPE2;
+ conv.mode_sel = ADC_OP_NORMAL_MODE << QPNP_VADC_OP_MODE_SHIFT;
+ conv.hw_settle_time = ADC_CHANNEL_HW_SETTLE_DELAY_0US;
+ conv.fast_avg_setup = ADC_FAST_AVG_SAMPLE_1;
+
+ rc = qpnp_vadc_configure(&conv);
+ if (rc) {
+ pr_err("qpnp_vadc configure failed with %d\n", rc);
+ goto calib_fail;
+ }
+
+ while (status1 != (~QPNP_VADC_STATUS1_REQ_STS |
+ QPNP_VADC_STATUS1_EOC)) {
+ rc = qpnp_vadc_read_reg(QPNP_VADC_STATUS1, &status1);
+ if (rc < 0)
+ return rc;
+ usleep_range(QPNP_VADC_CONV_TIME_MIN,
+ QPNP_VADC_CONV_TIME_MAX);
+ }
+
+ rc = qpnp_vadc_read_conversion_result(&calib_read_1);
+ if (rc) {
+ pr_err("qpnp adc read adc failed with %d\n", rc);
+ goto calib_fail;
+ }
+
+ conv.amux_channel = REF_625MV;
+ conv.decimation = DECIMATION_TYPE2;
+ conv.mode_sel = ADC_OP_NORMAL_MODE << QPNP_VADC_OP_MODE_SHIFT;
+ conv.hw_settle_time = ADC_CHANNEL_HW_SETTLE_DELAY_0US;
+ conv.fast_avg_setup = ADC_FAST_AVG_SAMPLE_1;
+ rc = qpnp_vadc_configure(&conv);
+ if (rc) {
+ pr_err("qpnp adc configure failed with %d\n", rc);
+ goto calib_fail;
+ }
+
+ while (status1 != (~QPNP_VADC_STATUS1_REQ_STS |
+ QPNP_VADC_STATUS1_EOC)) {
+ rc = qpnp_vadc_read_reg(QPNP_VADC_STATUS1, &status1);
+ if (rc < 0)
+ return rc;
+ usleep_range(QPNP_VADC_CONV_TIME_MIN,
+ QPNP_VADC_CONV_TIME_MAX);
+ }
+
+ rc = qpnp_vadc_read_conversion_result(&calib_read_1);
+ if (rc) {
+ pr_err("qpnp adc read adc failed with %d\n", rc);
+ goto calib_fail;
+ }
+
+ vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_ABSOLUTE].dy =
+ (calib_read_1 - calib_read_2);
+ vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_ABSOLUTE].dx
+ = QPNP_ADC_625_UV;
+ vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_ABSOLUTE].adc_vref =
+ calib_read_1;
+ vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_ABSOLUTE].adc_gnd =
+ calib_read_2;
+ /* Ratiometric Calibration */
+ conv.amux_channel = VDD_VADC;
+ conv.decimation = DECIMATION_TYPE2;
+ conv.mode_sel = ADC_OP_NORMAL_MODE << QPNP_VADC_OP_MODE_SHIFT;
+ conv.hw_settle_time = ADC_CHANNEL_HW_SETTLE_DELAY_0US;
+ conv.fast_avg_setup = ADC_FAST_AVG_SAMPLE_1;
+ rc = qpnp_vadc_configure(&conv);
+ if (rc) {
+ pr_err("qpnp adc configure failed with %d\n", rc);
+ goto calib_fail;
+ }
+
+ while (status1 != (~QPNP_VADC_STATUS1_REQ_STS |
+ QPNP_VADC_STATUS1_EOC)) {
+ rc = qpnp_vadc_read_reg(QPNP_VADC_STATUS1, &status1);
+ if (rc < 0)
+ return rc;
+ usleep_range(QPNP_VADC_CONV_TIME_MIN,
+ QPNP_VADC_CONV_TIME_MAX);
+ }
+
+ rc = qpnp_vadc_read_conversion_result(&calib_read_1);
+ if (rc) {
+ pr_err("qpnp adc read adc failed with %d\n", rc);
+ goto calib_fail;
+ }
+
+ conv.amux_channel = VDD_VADC;
+ conv.decimation = DECIMATION_TYPE2;
+ conv.mode_sel = ADC_OP_NORMAL_MODE << QPNP_VADC_OP_MODE_SHIFT;
+ conv.hw_settle_time = ADC_CHANNEL_HW_SETTLE_DELAY_0US;
+ conv.fast_avg_setup = ADC_FAST_AVG_SAMPLE_1;
+ rc = qpnp_vadc_configure(&conv);
+ if (rc) {
+ pr_err("qpnp adc configure failed with %d\n", rc);
+ goto calib_fail;
+ }
+
+ while (status1 != (~QPNP_VADC_STATUS1_REQ_STS |
+ QPNP_VADC_STATUS1_EOC)) {
+ rc = qpnp_vadc_read_reg(QPNP_VADC_STATUS1, &status1);
+ if (rc < 0)
+ return rc;
+ usleep_range(QPNP_VADC_CONV_TIME_MIN,
+ QPNP_VADC_CONV_TIME_MAX);
+ }
+
+ rc = qpnp_vadc_read_conversion_result(&calib_read_1);
+ if (rc) {
+ pr_err("qpnp adc read adc failed with %d\n", rc);
+ goto calib_fail;
+ }
+
+ vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].dy =
+ (calib_read_1 - calib_read_2);
+ vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].dx =
+ vadc->adc->adc_prop->adc_vdd_reference;
+ vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].adc_vref =
+ calib_read_1;
+ vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].adc_gnd =
+ calib_read_2;
+
+calib_fail:
+ return rc;
+}
+
+int32_t qpnp_vadc_conv_seq_request(enum qpnp_vadc_trigger trigger_channel,
+ enum qpnp_vadc_channels channel,
+ struct qpnp_vadc_result *result)
+{
+ struct qpnp_vadc_drv *vadc = qpnp_vadc;
+ int rc, scale_type, amux_prescaling;
+
+ if (!vadc->vadc_init_calib) {
+ rc = qpnp_vadc_calib_device();
+ if (rc) {
+ pr_err("Calibration failed\n");
+ return rc;
+ } else
+ vadc->vadc_init_calib = true;
+ }
+
+ mutex_lock(&vadc->adc->adc_lock);
+
+ rc = qpnp_vadc_enable(true);
+ if (rc)
+ goto fail_unlock;
+
+ vadc->adc->amux_prop->amux_channel = channel;
+ vadc->adc->amux_prop->decimation =
+ vadc->adc->adc_channels[channel].adc_decimation;
+ vadc->adc->amux_prop->hw_settle_time =
+ vadc->adc->adc_channels[channel].hw_settle_time;
+ vadc->adc->amux_prop->fast_avg_setup =
+ vadc->adc->adc_channels[channel].fast_avg_setup;
+
+ if (trigger_channel < ADC_SEQ_NONE)
+ vadc->adc->amux_prop->mode_sel = (ADC_OP_CONVERSION_SEQUENCER
+ << QPNP_VADC_OP_MODE_SHIFT);
+ else if (trigger_channel == ADC_SEQ_NONE)
+ vadc->adc->amux_prop->mode_sel = (ADC_OP_NORMAL_MODE
+ << QPNP_VADC_OP_MODE_SHIFT);
+ else {
+ pr_err("Invalid trigger channel:%d\n", trigger_channel);
+ goto fail;
+ }
+
+ vadc->adc->amux_prop->trigger_channel = trigger_channel;
+
+ rc = qpnp_vadc_configure(vadc->adc->amux_prop);
+ if (rc) {
+ pr_info("qpnp vadc configure failed with %d\n", rc);
+ goto fail;
+ }
+
+ wait_for_completion(&vadc->adc->adc_rslt_completion);
+
+ if (trigger_channel < ADC_SEQ_NONE) {
+ rc = qpnp_vadc_read_status(vadc->adc->amux_prop->mode_sel);
+ if (rc)
+ pr_info("Conversion sequence timed out - %d\n", rc);
+ }
+
+ rc = qpnp_vadc_read_conversion_result(&result->adc_code);
+ if (rc) {
+ pr_info("qpnp vadc read adc code failed with %d\n", rc);
+ goto fail;
+ }
+
+ amux_prescaling = vadc->adc->adc_channels[channel].chan_path_prescaling;
+
+ vadc->adc->amux_prop->chan_prop->offset_gain_numerator =
+ qpnp_vadc_amux_scaling_ratio[amux_prescaling].num;
+ vadc->adc->amux_prop->chan_prop->offset_gain_denominator =
+ qpnp_vadc_amux_scaling_ratio[amux_prescaling].den;
+
+ scale_type = vadc->adc->adc_channels[channel].adc_scale_fn;
+ if (scale_type >= SCALE_NONE) {
+ rc = -EBADF;
+ goto fail;
+ }
+
+ vadc_scale_fn[scale_type].chan(result->adc_code,
+ vadc->adc->adc_prop, vadc->adc->amux_prop->chan_prop, result);
+
+fail:
+ rc = qpnp_vadc_enable(false);
+ if (rc)
+ pr_err("Disable ADC failed during configuration\n");
+
+fail_unlock:
+ mutex_unlock(&vadc->adc->adc_lock);
+
+ return rc;
+}
+EXPORT_SYMBOL(qpnp_vadc_conv_seq_request);
+
+int32_t qpnp_vadc_read(enum qpnp_vadc_channels channel,
+ struct qpnp_vadc_result *result)
+{
+ return qpnp_vadc_conv_seq_request(ADC_SEQ_NONE,
+ channel, result);
+}
+EXPORT_SYMBOL_GPL(qpnp_vadc_read);
+
+static ssize_t qpnp_adc_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct qpnp_vadc_result result;
+ int rc = -1;
+
+ rc = qpnp_vadc_read(attr->index, &result);
+
+ if (rc)
+ return 0;
+
+ return snprintf(buf, QPNP_ADC_HWMON_NAME_LENGTH,
+ "Result:%lld Raw:%d\n", result.physical, result.adc_code);
+}
+
+static struct sensor_device_attribute qpnp_adc_attr =
+ SENSOR_ATTR(NULL, S_IRUGO, qpnp_adc_show, NULL, 0);
+
+static int32_t qpnp_vadc_init_hwmon(struct spmi_device *spmi)
+{
+ struct qpnp_vadc_drv *vadc = qpnp_vadc;
+ struct device_node *child;
+ struct device_node *node = spmi->dev.of_node;
+ int rc = 0, i = 0, channel;
+
+ for_each_child_of_node(node, child) {
+ channel = vadc->adc->adc_channels[i].channel_num;
+ qpnp_adc_attr.index = vadc->adc->adc_channels[i].channel_num;
+ qpnp_adc_attr.dev_attr.attr.name =
+ vadc->adc->adc_channels[i].name;
+ sysfs_attr_init(&vadc->sens_attr[i].dev_attr.attr);
+ memcpy(&vadc->sens_attr[i], &qpnp_adc_attr,
+ sizeof(qpnp_adc_attr));
+ rc = device_create_file(&spmi->dev,
+ &vadc->sens_attr[i].dev_attr);
+ if (rc) {
+ dev_err(&spmi->dev,
+ "device_create_file failed for dev %s\n",
+ vadc->adc->adc_channels[i].name);
+ goto hwmon_err_sens;
+ }
+ i++;
+ }
+
+ return 0;
+hwmon_err_sens:
+ pr_info("Init HWMON failed for qpnp_adc with %d\n", rc);
+ return rc;
+}
+
+static int __devinit qpnp_vadc_probe(struct spmi_device *spmi)
+{
+ struct qpnp_vadc_drv *vadc;
+ struct qpnp_adc_drv *adc_qpnp;
+ struct device_node *node = spmi->dev.of_node;
+ struct device_node *child;
+ int rc, count_adc_channel_list = 0;
+
+ if (!node)
+ return -EINVAL;
+
+ if (qpnp_vadc) {
+ pr_err("VADC already in use\n");
+ return -EBUSY;
+ }
+
+ for_each_child_of_node(node, child)
+ count_adc_channel_list++;
+
+ if (!count_adc_channel_list) {
+ pr_err("No channel listing\n");
+ return -EINVAL;
+ }
+
+ vadc = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_vadc_drv) +
+ (sizeof(struct sensor_device_attribute) *
+ count_adc_channel_list), GFP_KERNEL);
+ if (!vadc) {
+ dev_err(&spmi->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ adc_qpnp = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_adc_drv),
+ GFP_KERNEL);
+ if (!adc_qpnp) {
+ dev_err(&spmi->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ vadc->adc = adc_qpnp;
+
+ rc = qpnp_adc_get_devicetree_data(spmi, vadc->adc);
+ if (rc) {
+ dev_err(&spmi->dev, "failed to read device tree\n");
+ return rc;
+ }
+
+ rc = devm_request_irq(&spmi->dev, vadc->adc->adc_irq,
+ qpnp_vadc_isr, IRQF_TRIGGER_RISING,
+ "qpnp_vadc_interrupt", vadc);
+ if (rc) {
+ dev_err(&spmi->dev,
+ "failed to request adc irq with error %d\n", rc);
+ return rc;
+ }
+
+ qpnp_vadc = vadc;
+ dev_set_drvdata(&spmi->dev, vadc);
+ rc = qpnp_vadc_init_hwmon(spmi);
+ if (rc) {
+ dev_err(&spmi->dev, "failed to initialize qpnp hwmon adc\n");
+ goto fail_free_irq;
+ }
+ vadc->vadc_hwmon = hwmon_device_register(&vadc->adc->spmi->dev);
+ vadc->vadc_init_calib = false;
+
+ rc = qpnp_vadc_configure_interrupt();
+ if (rc) {
+ dev_err(&spmi->dev, "failed to configure interrupt");
+ goto fail_free_irq;
+ }
+
+ return 0;
+
+fail_free_irq:
+ free_irq(vadc->adc->adc_irq, vadc);
+
+ return rc;
+}
+
+static int __devexit qpnp_vadc_remove(struct spmi_device *spmi)
+{
+ struct qpnp_vadc_drv *vadc = dev_get_drvdata(&spmi->dev);
+ struct device_node *node = spmi->dev.of_node;
+ struct device_node *child;
+ int i = 0;
+
+ for_each_child_of_node(node, child) {
+ device_remove_file(&spmi->dev,
+ &vadc->sens_attr[i].dev_attr);
+ i++;
+ }
+ free_irq(vadc->adc->adc_irq, vadc);
+ dev_set_drvdata(&spmi->dev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id qpnp_vadc_match_table[] = {
+ { .compatible = "qcom,qpnp-vadc",
+ },
+ {}
+};
+
+static struct spmi_driver qpnp_vadc_driver = {
+ .driver = {
+ .name = "qcom,qpnp-vadc",
+ .of_match_table = qpnp_vadc_match_table,
+ },
+ .probe = qpnp_vadc_probe,
+ .remove = qpnp_vadc_remove,
+};
+
+static int __init qpnp_vadc_init(void)
+{
+ return spmi_driver_register(&qpnp_vadc_driver);
+}
+module_init(qpnp_vadc_init);
+
+static void __exit qpnp_vadc_exit(void)
+{
+ spmi_driver_unregister(&qpnp_vadc_driver);
+}
+module_exit(qpnp_vadc_exit);
+
+MODULE_DESCRIPTION("QPNP PMIC Voltage ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 8fb19dc..1464dab 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -647,4 +647,11 @@
If you say yes here you get support for Bosch Sensortec's
acceleration sensors SMB380/BMA150.
+config STM_LIS3DH
+ tristate "STM LIS3DH acceleration sensor support"
+ depends on I2C=y
+ help
+ If you say yes here you get support for STMicroelectronics's
+ acceleration sensors LIS3DH.
+
endif
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index eecee4e..96c9288 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -60,3 +60,4 @@
obj-$(CONFIG_PMIC8058_OTHC) += pmic8058-othc.o
obj-$(CONFIG_INPUT_PMIC8058_VIBRA_MEMLESS) += pmic8058-vib-memless.o
obj-$(CONFIG_BOSCH_BMA150) += bma150.o
+obj-$(CONFIG_STM_LIS3DH) += lis3dh_acc.o
diff --git a/drivers/input/misc/lis3dh_acc.c b/drivers/input/misc/lis3dh_acc.c
new file mode 100644
index 0000000..af96d3f
--- /dev/null
+++ b/drivers/input/misc/lis3dh_acc.c
@@ -0,0 +1,1589 @@
+/******************** (C) COPYRIGHT 2010 STMicroelectronics ********************
+ *
+ * File Name : lis3dh_acc.c
+ * Authors : MSH - Motion Mems BU - Application Team
+ * : Matteo Dameno (matteo.dameno@st.com)
+ * : Carmine Iascone (carmine.iascone@st.com)
+ * : Samuel Huo (samuel.huo@st.com)
+ * Version : V.1.1.0
+ * Date : 07/10/2012
+ * Description : LIS3DH accelerometer sensor driver
+ *
+ *******************************************************************************
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THE PRESENT SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
+ * OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, FOR THE SOLE
+ * PURPOSE TO SUPPORT YOUR APPLICATION DEVELOPMENT.
+ * AS A RESULT, STMICROELECTRONICS SHALL NOT BE HELD LIABLE FOR ANY DIRECT,
+ * INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING FROM THE
+ * CONTENT OF SUCH SOFTWARE AND/OR THE USE MADE BY CUSTOMERS OF THE CODING
+ * INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS.
+ *
+ * THIS SOFTWARE IS SPECIFICALLY DESIGNED FOR EXCLUSIVE USE WITH ST PARTS.
+ *
+ ******************************************************************************
+ Revision 1.0.0 05/11/09
+ First Release;
+ Revision 1.0.3 22/01/2010
+ Linux K&R Compliant Release;
+ Revision 1.0.5 16/08/2010
+ modified _get_acceleration_data function;
+ modified _update_odr function;
+ manages 2 interrupts;
+ Revision 1.0.6 15/11/2010
+ supports sysfs;
+ no more support for ioctl;
+ Revision 1.0.7 26/11/2010
+ checks for availability of interrupts pins
+ correction on FUZZ and FLAT values;
+ Revision 1.0.8 2010/Apr/01
+ corrects a bug in interrupt pin management in 1.0.7
+ Revision 1.0.9 07/25/2011
+ Romove several unused functions,add 5ms delay in init,change sysfs attributes.
+ Revision 1.1.0 07/10/2012
+ To replace some deprecated functions for 3.4 kernel;
+ To pass the checkpatch's formatting requirement;
+ To add regulator request;
+
+ ******************************************************************************/
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/pm.h>
+#include <linux/input/lis3dh.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+
+
+#define DEBUG 1
+
+#define G_MAX 16000
+
+
+#define SENSITIVITY_2G 1 /** mg/LSB */
+#define SENSITIVITY_4G 2 /** mg/LSB */
+#define SENSITIVITY_8G 4 /** mg/LSB */
+#define SENSITIVITY_16G 12 /** mg/LSB */
+
+
+
+
+/* Accelerometer Sensor Operating Mode */
+#define LIS3DH_ACC_ENABLE 0x01
+#define LIS3DH_ACC_DISABLE 0x00
+
+#define HIGH_RESOLUTION 0x08
+
+#define AXISDATA_REG 0x28
+#define WHOAMI_LIS3DH_ACC 0x33 /* Expected content for WAI */
+
+/* CONTROL REGISTERS */
+#define WHO_AM_I 0x0F /* WhoAmI register */
+#define TEMP_CFG_REG 0x1F /* temper sens control reg */
+/* ctrl 1: ODR3 ODR2 ODR ODR0 LPen Zenable Yenable Zenable */
+#define CTRL_REG1 0x20 /* control reg 1 */
+#define CTRL_REG2 0x21 /* control reg 2 */
+#define CTRL_REG3 0x22 /* control reg 3 */
+#define CTRL_REG4 0x23 /* control reg 4 */
+#define CTRL_REG5 0x24 /* control reg 5 */
+#define CTRL_REG6 0x25 /* control reg 6 */
+
+#define FIFO_CTRL_REG 0x2E /* FiFo control reg */
+
+#define INT_CFG1 0x30 /* interrupt 1 config */
+#define INT_SRC1 0x31 /* interrupt 1 source */
+#define INT_THS1 0x32 /* interrupt 1 threshold */
+#define INT_DUR1 0x33 /* interrupt 1 duration */
+
+
+#define TT_CFG 0x38 /* tap config */
+#define TT_SRC 0x39 /* tap source */
+#define TT_THS 0x3A /* tap threshold */
+#define TT_LIM 0x3B /* tap time limit */
+#define TT_TLAT 0x3C /* tap time latency */
+#define TT_TW 0x3D /* tap time window */
+/* end CONTROL REGISTRES */
+
+
+#define ENABLE_HIGH_RESOLUTION 1
+
+#define LIS3DH_ACC_PM_OFF 0x00
+#define LIS3DH_ACC_ENABLE_ALL_AXES 0x07
+
+
+#define PMODE_MASK 0x08
+#define ODR_MASK 0XF0
+
+#define ODR1 0x10 /* 1Hz output data rate */
+#define ODR10 0x20 /* 10Hz output data rate */
+#define ODR25 0x30 /* 25Hz output data rate */
+#define ODR50 0x40 /* 50Hz output data rate */
+#define ODR100 0x50 /* 100Hz output data rate */
+#define ODR200 0x60 /* 200Hz output data rate */
+#define ODR400 0x70 /* 400Hz output data rate */
+#define ODR1250 0x90 /* 1250Hz output data rate */
+
+
+
+#define IA 0x40
+#define ZH 0x20
+#define ZL 0x10
+#define YH 0x08
+#define YL 0x04
+#define XH 0x02
+#define XL 0x01
+/* */
+/* CTRL REG BITS*/
+#define CTRL_REG3_I1_AOI1 0x40
+#define CTRL_REG6_I2_TAPEN 0x80
+#define CTRL_REG6_HLACTIVE 0x02
+/* */
+#define NO_MASK 0xFF
+#define INT1_DURATION_MASK 0x7F
+#define INT1_THRESHOLD_MASK 0x7F
+#define TAP_CFG_MASK 0x3F
+#define TAP_THS_MASK 0x7F
+#define TAP_TLIM_MASK 0x7F
+#define TAP_TLAT_MASK NO_MASK
+#define TAP_TW_MASK NO_MASK
+
+
+/* TAP_SOURCE_REG BIT */
+#define DTAP 0x20
+#define STAP 0x10
+#define SIGNTAP 0x08
+#define ZTAP 0x04
+#define YTAP 0x02
+#define XTAZ 0x01
+
+
+#define FUZZ 0
+#define FLAT 0
+#define I2C_RETRY_DELAY 5
+#define I2C_RETRIES 5
+#define I2C_AUTO_INCREMENT 0x80
+
+/* RESUME STATE INDICES */
+#define RES_CTRL_REG1 0
+#define RES_CTRL_REG2 1
+#define RES_CTRL_REG3 2
+#define RES_CTRL_REG4 3
+#define RES_CTRL_REG5 4
+#define RES_CTRL_REG6 5
+
+#define RES_INT_CFG1 6
+#define RES_INT_THS1 7
+#define RES_INT_DUR1 8
+
+#define RES_TT_CFG 9
+#define RES_TT_THS 10
+#define RES_TT_LIM 11
+#define RES_TT_TLAT 12
+#define RES_TT_TW 13
+
+#define RES_TEMP_CFG_REG 14
+#define RES_REFERENCE_REG 15
+#define RES_FIFO_CTRL_REG 16
+
+#define RESUME_ENTRIES 17
+/* end RESUME STATE INDICES */
+
+
+struct {
+ unsigned int cutoff_ms;
+ unsigned int mask;
+} lis3dh_acc_odr_table[] = {
+ { 1, ODR1250 },
+ { 3, ODR400 },
+ { 5, ODR200 },
+ { 10, ODR100 },
+ { 20, ODR50 },
+ { 40, ODR25 },
+ { 100, ODR10 },
+ { 1000, ODR1 },
+};
+
+struct lis3dh_acc_data {
+ struct i2c_client *client;
+ struct lis3dh_acc_platform_data *pdata;
+
+ struct mutex lock;
+ struct delayed_work input_work;
+
+ struct input_dev *input_dev;
+
+ int hw_initialized;
+ /* hw_working=-1 means not tested yet */
+ int hw_working;
+ atomic_t enabled;
+ int on_before_suspend;
+
+ u8 sensitivity;
+
+ u8 resume_state[RESUME_ENTRIES];
+
+ int irq1;
+ struct work_struct irq1_work;
+ struct workqueue_struct *irq1_work_queue;
+ int irq2;
+ struct work_struct irq2_work;
+ struct workqueue_struct *irq2_work_queue;
+
+#ifdef DEBUG
+ u8 reg_addr;
+#endif
+};
+
+struct sensor_regulator {
+ struct regulator *vreg;
+ const char *name;
+ u32 min_uV;
+ u32 max_uV;
+};
+
+struct sensor_regulator lis3dh_acc_vreg[] = {
+ {NULL, "vdd", 1700000, 3600000},
+ {NULL, "vddio", 1700000, 3600000},
+};
+
+static int lis3dh_acc_config_regulator(struct lis3dh_acc_data *acc, bool on)
+{
+ int rc = 0, i;
+ int num_reg = sizeof(lis3dh_acc_vreg) / sizeof(struct sensor_regulator);
+
+ if (on) {
+ for (i = 0; i < num_reg; i++) {
+ lis3dh_acc_vreg[i].vreg =
+ regulator_get(&acc->client->dev,
+ lis3dh_acc_vreg[i].name);
+ if (IS_ERR(lis3dh_acc_vreg[i].vreg)) {
+ rc = PTR_ERR(lis3dh_acc_vreg[i].vreg);
+ pr_err("%s:regulator get failed rc=%d\n",
+ __func__, rc);
+ goto error_vdd;
+ }
+
+ if (regulator_count_voltages(
+ lis3dh_acc_vreg[i].vreg) > 0) {
+ rc = regulator_set_voltage(
+ lis3dh_acc_vreg[i].vreg,
+ lis3dh_acc_vreg[i].min_uV,
+ lis3dh_acc_vreg[i].max_uV);
+ if (rc) {
+ pr_err("%s: set voltage failed rc=%d\n",
+ __func__, rc);
+ regulator_put(lis3dh_acc_vreg[i].vreg);
+ goto error_vdd;
+ }
+ }
+
+ rc = regulator_enable(lis3dh_acc_vreg[i].vreg);
+ if (rc) {
+ pr_err("%s: regulator_enable failed rc =%d\n",
+ __func__, rc);
+ if (regulator_count_voltages(
+ lis3dh_acc_vreg[i].vreg) > 0) {
+ regulator_set_voltage(
+ lis3dh_acc_vreg[i].vreg, 0,
+ lis3dh_acc_vreg[i].max_uV);
+ }
+ regulator_put(lis3dh_acc_vreg[i].vreg);
+ goto error_vdd;
+ }
+ }
+ return rc;
+ } else {
+ i = num_reg;
+ }
+
+error_vdd:
+ while (--i >= 0) {
+ if (regulator_count_voltages(lis3dh_acc_vreg[i].vreg) > 0) {
+ regulator_set_voltage(lis3dh_acc_vreg[i].vreg, 0,
+ lis3dh_acc_vreg[i].max_uV);
+ }
+ regulator_disable(lis3dh_acc_vreg[i].vreg);
+ regulator_put(lis3dh_acc_vreg[i].vreg);
+ }
+ return rc;
+}
+
+static int lis3dh_acc_i2c_read(struct lis3dh_acc_data *acc,
+ u8 *buf, int len)
+{
+ int err;
+ int tries = 0;
+
+ struct i2c_msg msgs[] = {
+ {
+ .addr = acc->client->addr,
+ .flags = acc->client->flags & I2C_M_TEN,
+ .len = 1,
+ .buf = buf,
+ },
+ {
+ .addr = acc->client->addr,
+ .flags = (acc->client->flags & I2C_M_TEN) | I2C_M_RD,
+ .len = len,
+ .buf = buf,
+ },
+ };
+
+ do {
+ err = i2c_transfer(acc->client->adapter, msgs, 2);
+ if (err != 2)
+ msleep_interruptible(I2C_RETRY_DELAY);
+ } while ((err != 2) && (++tries < I2C_RETRIES));
+
+ if (err != 2) {
+ dev_err(&acc->client->dev, "read transfer error\n");
+ err = -EIO;
+ } else {
+ err = 0;
+ }
+
+ return err;
+}
+
+static int lis3dh_acc_i2c_write(struct lis3dh_acc_data *acc, u8 *buf, int len)
+{
+ int err;
+ int tries = 0;
+
+ struct i2c_msg msgs[] = {
+ {
+ .addr = acc->client->addr,
+ .flags = acc->client->flags & I2C_M_TEN,
+ .len = len + 1,
+ .buf = buf,
+ },
+ };
+
+ do {
+ err = i2c_transfer(acc->client->adapter, msgs, 1);
+ if (err != 1)
+ msleep_interruptible(I2C_RETRY_DELAY);
+ } while ((err != 1) && (++tries < I2C_RETRIES));
+
+ if (err != 1) {
+ dev_err(&acc->client->dev, "write transfer error\n");
+ err = -EIO;
+ } else {
+ err = 0;
+ }
+
+ return err;
+}
+
+static int lis3dh_acc_hw_init(struct lis3dh_acc_data *acc)
+{
+ int err = -1;
+ u8 buf[7];
+
+ printk(KERN_INFO "%s: hw init start\n", LIS3DH_ACC_DEV_NAME);
+
+ buf[0] = WHO_AM_I;
+ err = lis3dh_acc_i2c_read(acc, buf, 1);
+ if (err < 0) {
+ dev_warn(&acc->client->dev,
+ "Error reading WHO_AM_I: is device available/working?\n");
+ goto err_firstread;
+ } else
+ acc->hw_working = 1;
+ if (buf[0] != WHOAMI_LIS3DH_ACC) {
+ dev_err(&acc->client->dev,
+ "device unknown. Expected: 0x%x, Replies: 0x%x\n",
+ WHOAMI_LIS3DH_ACC, buf[0]);
+ err = -1; /* choose the right coded error */
+ goto err_unknown_device;
+ }
+
+ buf[0] = CTRL_REG1;
+ buf[1] = acc->resume_state[RES_CTRL_REG1];
+ err = lis3dh_acc_i2c_write(acc, buf, 1);
+ if (err < 0)
+ goto err_resume_state;
+
+ buf[0] = TEMP_CFG_REG;
+ buf[1] = acc->resume_state[RES_TEMP_CFG_REG];
+ err = lis3dh_acc_i2c_write(acc, buf, 1);
+ if (err < 0)
+ goto err_resume_state;
+
+ buf[0] = FIFO_CTRL_REG;
+ buf[1] = acc->resume_state[RES_FIFO_CTRL_REG];
+ err = lis3dh_acc_i2c_write(acc, buf, 1);
+ if (err < 0)
+ goto err_resume_state;
+
+ buf[0] = (I2C_AUTO_INCREMENT | TT_THS);
+ buf[1] = acc->resume_state[RES_TT_THS];
+ buf[2] = acc->resume_state[RES_TT_LIM];
+ buf[3] = acc->resume_state[RES_TT_TLAT];
+ buf[4] = acc->resume_state[RES_TT_TW];
+ err = lis3dh_acc_i2c_write(acc, buf, 4);
+ if (err < 0)
+ goto err_resume_state;
+ buf[0] = TT_CFG;
+ buf[1] = acc->resume_state[RES_TT_CFG];
+ err = lis3dh_acc_i2c_write(acc, buf, 1);
+ if (err < 0)
+ goto err_resume_state;
+
+ buf[0] = (I2C_AUTO_INCREMENT | INT_THS1);
+ buf[1] = acc->resume_state[RES_INT_THS1];
+ buf[2] = acc->resume_state[RES_INT_DUR1];
+ err = lis3dh_acc_i2c_write(acc, buf, 2);
+ if (err < 0)
+ goto err_resume_state;
+ buf[0] = INT_CFG1;
+ buf[1] = acc->resume_state[RES_INT_CFG1];
+ err = lis3dh_acc_i2c_write(acc, buf, 1);
+ if (err < 0)
+ goto err_resume_state;
+
+
+ buf[0] = (I2C_AUTO_INCREMENT | CTRL_REG2);
+ buf[1] = acc->resume_state[RES_CTRL_REG2];
+ buf[2] = acc->resume_state[RES_CTRL_REG3];
+ buf[3] = acc->resume_state[RES_CTRL_REG4];
+ buf[4] = acc->resume_state[RES_CTRL_REG5];
+ buf[5] = acc->resume_state[RES_CTRL_REG6];
+ err = lis3dh_acc_i2c_write(acc, buf, 5);
+ if (err < 0)
+ goto err_resume_state;
+
+ acc->hw_initialized = 1;
+ printk(KERN_INFO "%s: hw init done\n", LIS3DH_ACC_DEV_NAME);
+ return 0;
+
+err_firstread:
+ acc->hw_working = 0;
+err_unknown_device:
+err_resume_state:
+ acc->hw_initialized = 0;
+ dev_err(&acc->client->dev, "hw init error 0x%x,0x%x: %d\n", buf[0],
+ buf[1], err);
+ return err;
+}
+
+static void lis3dh_acc_device_power_off(struct lis3dh_acc_data *acc)
+{
+ int err;
+ u8 buf[2] = { CTRL_REG1, LIS3DH_ACC_PM_OFF };
+
+ err = lis3dh_acc_i2c_write(acc, buf, 1);
+ if (err < 0)
+ dev_err(&acc->client->dev, "soft power off failed: %d\n", err);
+
+ if (acc->pdata->gpio_int1)
+ disable_irq_nosync(acc->irq1);
+ if (acc->pdata->gpio_int2)
+ disable_irq_nosync(acc->irq2);
+
+ lis3dh_acc_config_regulator(acc, false);
+
+ if (acc->hw_initialized) {
+ if (acc->pdata->gpio_int1)
+ disable_irq_nosync(acc->irq1);
+ if (acc->pdata->gpio_int2)
+ disable_irq_nosync(acc->irq2);
+ acc->hw_initialized = 0;
+ }
+}
+
+static int lis3dh_acc_device_power_on(struct lis3dh_acc_data *acc)
+{
+ int err = -1;
+
+ err = lis3dh_acc_config_regulator(acc, true);
+ if (err < 0) {
+ dev_err(&acc->client->dev,
+ "power_on failed: %d\n", err);
+ return err;
+ }
+
+ if (acc->pdata->gpio_int1 >= 0)
+ enable_irq(acc->irq1);
+ if (acc->pdata->gpio_int2 >= 0)
+ enable_irq(acc->irq2);
+
+ msleep(20);
+
+ if (!acc->hw_initialized) {
+ err = lis3dh_acc_hw_init(acc);
+ if (acc->hw_working == 1 && err < 0) {
+ lis3dh_acc_device_power_off(acc);
+ return err;
+ }
+ }
+
+ if (acc->hw_initialized) {
+ if (acc->pdata->gpio_int1 >= 0)
+ enable_irq(acc->irq1);
+ if (acc->pdata->gpio_int2 >= 0)
+ enable_irq(acc->irq2);
+ }
+ return 0;
+}
+
+static irqreturn_t lis3dh_acc_isr1(int irq, void *dev)
+{
+ struct lis3dh_acc_data *acc = dev;
+
+ disable_irq_nosync(irq);
+ queue_work(acc->irq1_work_queue, &acc->irq1_work);
+#ifdef DEBUG
+ printk(KERN_INFO "%s: isr1 queued\n", LIS3DH_ACC_DEV_NAME);
+#endif
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t lis3dh_acc_isr2(int irq, void *dev)
+{
+ struct lis3dh_acc_data *acc = dev;
+
+ disable_irq_nosync(irq);
+ queue_work(acc->irq2_work_queue, &acc->irq2_work);
+#ifdef DEBUG
+ printk(KERN_INFO "%s: isr2 queued\n", LIS3DH_ACC_DEV_NAME);
+#endif
+ return IRQ_HANDLED;
+}
+
+static void lis3dh_acc_irq1_work_func(struct work_struct *work)
+{
+
+ struct lis3dh_acc_data *acc =
+ container_of(work, struct lis3dh_acc_data, irq1_work);
+ /* TODO add interrupt service procedure.
+ ie:lis3dh_acc_get_int1_source(acc); */
+ ;
+ /* */
+ printk(KERN_INFO "%s: IRQ1 triggered\n", LIS3DH_ACC_DEV_NAME);
+ goto exit;
+exit:
+ enable_irq(acc->irq1);
+}
+
+static void lis3dh_acc_irq2_work_func(struct work_struct *work)
+{
+
+ struct lis3dh_acc_data *acc =
+ container_of(work, struct lis3dh_acc_data, irq2_work);
+ /* TODO add interrupt service procedure.
+ ie:lis3dh_acc_get_tap_source(acc); */
+ ;
+ /* */
+ printk(KERN_INFO "%s: IRQ2 triggered\n", LIS3DH_ACC_DEV_NAME);
+ goto exit;
+exit:
+ enable_irq(acc->irq2);
+}
+
+int lis3dh_acc_update_g_range(struct lis3dh_acc_data *acc, u8 new_g_range)
+{
+ int err = -1;
+
+ u8 sensitivity;
+ u8 buf[2];
+ u8 updated_val;
+ u8 init_val;
+ u8 new_val;
+ u8 mask = LIS3DH_ACC_FS_MASK | HIGH_RESOLUTION;
+
+ switch (new_g_range) {
+ case LIS3DH_ACC_G_2G:
+
+ sensitivity = SENSITIVITY_2G;
+ break;
+ case LIS3DH_ACC_G_4G:
+
+ sensitivity = SENSITIVITY_4G;
+ break;
+ case LIS3DH_ACC_G_8G:
+
+ sensitivity = SENSITIVITY_8G;
+ break;
+ case LIS3DH_ACC_G_16G:
+
+ sensitivity = SENSITIVITY_16G;
+ break;
+ default:
+ dev_err(&acc->client->dev, "invalid g range requested: %u\n",
+ new_g_range);
+ return -EINVAL;
+ }
+
+ if (atomic_read(&acc->enabled)) {
+ /* Updates configuration register 4,
+ * which contains g range setting */
+ buf[0] = CTRL_REG4;
+ err = lis3dh_acc_i2c_read(acc, buf, 1);
+ if (err < 0)
+ goto error;
+ init_val = buf[0];
+ acc->resume_state[RES_CTRL_REG4] = init_val;
+ new_val = new_g_range | HIGH_RESOLUTION;
+ updated_val = ((mask & new_val) | ((~mask) & init_val));
+ buf[1] = updated_val;
+ buf[0] = CTRL_REG4;
+ err = lis3dh_acc_i2c_write(acc, buf, 1);
+ if (err < 0)
+ goto error;
+ acc->resume_state[RES_CTRL_REG4] = updated_val;
+ acc->sensitivity = sensitivity;
+ }
+
+
+ return err;
+error:
+ dev_err(&acc->client->dev, "update g range failed 0x%x,0x%x: %d\n",
+ buf[0], buf[1], err);
+
+ return err;
+}
+
+int lis3dh_acc_update_odr(struct lis3dh_acc_data *acc, int poll_interval_ms)
+{
+ int err = -1;
+ int i;
+ u8 config[2];
+
+ /* Following, looks for the longest possible odr interval scrolling the
+ * odr_table vector from the end (shortest interval) backward (longest
+ * interval), to support the poll_interval requested by the system.
+ * It must be the longest interval lower then the poll interval.*/
+ for (i = ARRAY_SIZE(lis3dh_acc_odr_table) - 1; i >= 0; i--) {
+ if (lis3dh_acc_odr_table[i].cutoff_ms <= poll_interval_ms)
+ break;
+ }
+ config[1] = lis3dh_acc_odr_table[i].mask;
+
+ config[1] |= LIS3DH_ACC_ENABLE_ALL_AXES;
+
+ /* If device is currently enabled, we need to write new
+ * configuration out to it */
+ if (atomic_read(&acc->enabled)) {
+ config[0] = CTRL_REG1;
+ err = lis3dh_acc_i2c_write(acc, config, 1);
+ if (err < 0)
+ goto error;
+ acc->resume_state[RES_CTRL_REG1] = config[1];
+ }
+
+ return err;
+
+error:
+ dev_err(&acc->client->dev, "update odr failed 0x%x,0x%x: %d\n",
+ config[0], config[1], err);
+
+ return err;
+}
+
+
+
+static int lis3dh_acc_register_write(struct lis3dh_acc_data *acc, u8 *buf,
+ u8 reg_address, u8 new_value)
+{
+ int err = -1;
+
+ /* Sets configuration register at reg_address
+ * NOTE: this is a straight overwrite */
+ buf[0] = reg_address;
+ buf[1] = new_value;
+ err = lis3dh_acc_i2c_write(acc, buf, 1);
+ if (err < 0)
+ return err;
+ return err;
+}
+
+static int lis3dh_acc_get_acceleration_data(struct lis3dh_acc_data *acc,
+ int *xyz)
+{
+ int err = -1;
+ /* Data bytes from hardware xL, xH, yL, yH, zL, zH */
+ u8 acc_data[6];
+ /* x,y,z hardware data */
+ s16 hw_d[3] = { 0 };
+
+ acc_data[0] = (I2C_AUTO_INCREMENT | AXISDATA_REG);
+ err = lis3dh_acc_i2c_read(acc, acc_data, 6);
+ if (err < 0)
+ return err;
+
+ hw_d[0] = (((s16) ((acc_data[1] << 8) | acc_data[0])) >> 4);
+ hw_d[1] = (((s16) ((acc_data[3] << 8) | acc_data[2])) >> 4);
+ hw_d[2] = (((s16) ((acc_data[5] << 8) | acc_data[4])) >> 4);
+
+ hw_d[0] = hw_d[0] * acc->sensitivity;
+ hw_d[1] = hw_d[1] * acc->sensitivity;
+ hw_d[2] = hw_d[2] * acc->sensitivity;
+
+
+ xyz[0] = ((acc->pdata->negate_x) ? (-hw_d[acc->pdata->axis_map_x])
+ : (hw_d[acc->pdata->axis_map_x]));
+ xyz[1] = ((acc->pdata->negate_y) ? (-hw_d[acc->pdata->axis_map_y])
+ : (hw_d[acc->pdata->axis_map_y]));
+ xyz[2] = ((acc->pdata->negate_z) ? (-hw_d[acc->pdata->axis_map_z])
+ : (hw_d[acc->pdata->axis_map_z]));
+
+ #ifdef DEBUG
+ /*
+ printk(KERN_INFO "%s read x=%d, y=%d, z=%d\n",
+ LIS3DH_ACC_DEV_NAME, xyz[0], xyz[1], xyz[2]);
+ */
+ #endif
+ return err;
+}
+
+static void lis3dh_acc_report_values(struct lis3dh_acc_data *acc,
+ int *xyz)
+{
+ input_report_abs(acc->input_dev, ABS_X, xyz[0]);
+ input_report_abs(acc->input_dev, ABS_Y, xyz[1]);
+ input_report_abs(acc->input_dev, ABS_Z, xyz[2]);
+ input_sync(acc->input_dev);
+}
+
+static int lis3dh_acc_enable(struct lis3dh_acc_data *acc)
+{
+ int err;
+
+ if (!atomic_cmpxchg(&acc->enabled, 0, 1)) {
+ err = lis3dh_acc_device_power_on(acc);
+ if (err < 0) {
+ atomic_set(&acc->enabled, 0);
+ return err;
+ }
+ schedule_delayed_work(&acc->input_work,
+ msecs_to_jiffies(acc->pdata->poll_interval));
+ }
+
+ return 0;
+}
+
+static int lis3dh_acc_disable(struct lis3dh_acc_data *acc)
+{
+ if (atomic_cmpxchg(&acc->enabled, 1, 0)) {
+ cancel_delayed_work_sync(&acc->input_work);
+ lis3dh_acc_device_power_off(acc);
+ }
+
+ return 0;
+}
+
+
+static ssize_t read_single_reg(struct device *dev, char *buf, u8 reg)
+{
+ ssize_t ret;
+ struct lis3dh_acc_data *acc = dev_get_drvdata(dev);
+ int err;
+
+ u8 data = reg;
+ err = lis3dh_acc_i2c_read(acc, &data, 1);
+ if (err < 0)
+ return err;
+ ret = snprintf(buf, 4, "0x%02x\n", data);
+ return ret;
+
+}
+
+static int write_reg(struct device *dev, const char *buf, u8 reg,
+ u8 mask, int resumeIndex)
+{
+ int err = -1;
+ struct lis3dh_acc_data *acc = dev_get_drvdata(dev);
+ u8 x[2];
+ u8 new_val;
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ new_val = ((u8) val & mask);
+ x[0] = reg;
+ x[1] = new_val;
+ err = lis3dh_acc_register_write(acc, x, reg, new_val);
+ if (err < 0)
+ return err;
+ acc->resume_state[resumeIndex] = new_val;
+ return err;
+}
+
+static ssize_t attr_get_polling_rate(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int val;
+ struct lis3dh_acc_data *acc = dev_get_drvdata(dev);
+ mutex_lock(&acc->lock);
+ val = acc->pdata->poll_interval;
+ mutex_unlock(&acc->lock);
+ return snprintf(buf, 8, "%d\n", val);
+}
+
+static ssize_t attr_set_polling_rate(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct lis3dh_acc_data *acc = dev_get_drvdata(dev);
+ unsigned long interval_ms;
+
+ if (kstrtoul(buf, 10, &interval_ms))
+ return -EINVAL;
+ if (!interval_ms)
+ return -EINVAL;
+ mutex_lock(&acc->lock);
+ acc->pdata->poll_interval = interval_ms;
+ lis3dh_acc_update_odr(acc, interval_ms);
+ mutex_unlock(&acc->lock);
+ return size;
+}
+
+static ssize_t attr_get_range(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ char val;
+ struct lis3dh_acc_data *acc = dev_get_drvdata(dev);
+ char range = 2;
+ mutex_lock(&acc->lock);
+ val = acc->pdata->g_range ;
+ switch (val) {
+ case LIS3DH_ACC_G_2G:
+ range = 2;
+ break;
+ case LIS3DH_ACC_G_4G:
+ range = 4;
+ break;
+ case LIS3DH_ACC_G_8G:
+ range = 8;
+ break;
+ case LIS3DH_ACC_G_16G:
+ range = 16;
+ break;
+ }
+ mutex_unlock(&acc->lock);
+ return snprintf(buf, 4, "%d\n", range);
+}
+
+static ssize_t attr_set_range(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct lis3dh_acc_data *acc = dev_get_drvdata(dev);
+ unsigned long val;
+ if (kstrtoul(buf, 10, &val))
+ return -EINVAL;
+ mutex_lock(&acc->lock);
+ acc->pdata->g_range = val;
+ lis3dh_acc_update_g_range(acc, val);
+ mutex_unlock(&acc->lock);
+ return size;
+}
+
+static ssize_t attr_get_enable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lis3dh_acc_data *acc = dev_get_drvdata(dev);
+ int val = atomic_read(&acc->enabled);
+ return snprintf(buf, sizeof(val) + 2, "%d\n", val);
+}
+
+static ssize_t attr_set_enable(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct lis3dh_acc_data *acc = dev_get_drvdata(dev);
+ unsigned long val;
+
+ if (kstrtoul(buf, 10, &val))
+ return -EINVAL;
+
+ if (val)
+ lis3dh_acc_enable(acc);
+ else
+ lis3dh_acc_disable(acc);
+
+ return size;
+}
+
+static ssize_t attr_set_intconfig1(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ return write_reg(dev, buf, INT_CFG1, NO_MASK, RES_INT_CFG1);
+}
+
+static ssize_t attr_get_intconfig1(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return read_single_reg(dev, buf, INT_CFG1);
+}
+
+static ssize_t attr_set_duration1(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ return write_reg(dev, buf, INT_DUR1, INT1_DURATION_MASK, RES_INT_DUR1);
+}
+
+static ssize_t attr_get_duration1(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return read_single_reg(dev, buf, INT_DUR1);
+}
+
+static ssize_t attr_set_thresh1(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ return write_reg(dev, buf, INT_THS1, INT1_THRESHOLD_MASK, RES_INT_THS1);
+}
+
+static ssize_t attr_get_thresh1(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return read_single_reg(dev, buf, INT_THS1);
+}
+
+static ssize_t attr_get_source1(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return read_single_reg(dev, buf, INT_SRC1);
+}
+
+static ssize_t attr_set_click_cfg(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ return write_reg(dev, buf, TT_CFG, TAP_CFG_MASK, RES_TT_CFG);
+}
+
+static ssize_t attr_get_click_cfg(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+
+ return read_single_reg(dev, buf, TT_CFG);
+}
+
+static ssize_t attr_get_click_source(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return read_single_reg(dev, buf, TT_SRC);
+}
+
+static ssize_t attr_set_click_ths(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ return write_reg(dev, buf, TT_THS, TAP_THS_MASK, RES_TT_THS);
+}
+
+static ssize_t attr_get_click_ths(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return read_single_reg(dev, buf, TT_THS);
+}
+
+static ssize_t attr_set_click_tlim(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ return write_reg(dev, buf, TT_LIM, TAP_TLIM_MASK, RES_TT_LIM);
+}
+
+static ssize_t attr_get_click_tlim(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return read_single_reg(dev, buf, TT_LIM);
+}
+
+static ssize_t attr_set_click_tlat(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ return write_reg(dev, buf, TT_TLAT, TAP_TLAT_MASK, RES_TT_TLAT);
+}
+
+static ssize_t attr_get_click_tlat(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return read_single_reg(dev, buf, TT_TLAT);
+}
+
+static ssize_t attr_set_click_tw(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ return write_reg(dev, buf, TT_TLAT, TAP_TW_MASK, RES_TT_TLAT);
+}
+
+static ssize_t attr_get_click_tw(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return read_single_reg(dev, buf, TT_TLAT);
+}
+
+
+#ifdef DEBUG
+/* PAY ATTENTION: These DEBUG funtions don't manage resume_state */
+static ssize_t attr_reg_set(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int rc;
+ struct lis3dh_acc_data *acc = dev_get_drvdata(dev);
+ u8 x[2];
+ unsigned long val;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ mutex_lock(&acc->lock);
+ x[0] = acc->reg_addr;
+ mutex_unlock(&acc->lock);
+ x[1] = val;
+ rc = lis3dh_acc_i2c_write(acc, x, 1);
+ /*TODO: error need to be managed */
+ return size;
+}
+
+static ssize_t attr_reg_get(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t ret;
+ struct lis3dh_acc_data *acc = dev_get_drvdata(dev);
+ int rc;
+ u8 data;
+
+ mutex_lock(&acc->lock);
+ data = acc->reg_addr;
+ mutex_unlock(&acc->lock);
+ rc = lis3dh_acc_i2c_read(acc, &data, 1);
+ /* TODO: error need to be managed */
+ ret = snprintf(buf, 8, "0x%02x\n", data);
+ return ret;
+}
+
+static ssize_t attr_addr_set(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct lis3dh_acc_data *acc = dev_get_drvdata(dev);
+ unsigned long val;
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ mutex_lock(&acc->lock);
+ acc->reg_addr = val;
+ mutex_unlock(&acc->lock);
+ return size;
+}
+#endif
+
+static struct device_attribute attributes[] = {
+
+ __ATTR(pollrate_ms, 0666, attr_get_polling_rate,
+ attr_set_polling_rate),
+ __ATTR(range, 0666, attr_get_range, attr_set_range),
+ __ATTR(enable, 0666, attr_get_enable, attr_set_enable),
+ __ATTR(int1_config, 0666, attr_get_intconfig1, attr_set_intconfig1),
+ __ATTR(int1_duration, 0666, attr_get_duration1, attr_set_duration1),
+ __ATTR(int1_threshold, 0666, attr_get_thresh1, attr_set_thresh1),
+ __ATTR(int1_source, 0444, attr_get_source1, NULL),
+ __ATTR(click_config, 0666, attr_get_click_cfg, attr_set_click_cfg),
+ __ATTR(click_source, 0444, attr_get_click_source, NULL),
+ __ATTR(click_threshold, 0666, attr_get_click_ths, attr_set_click_ths),
+ __ATTR(click_timelimit, 0666, attr_get_click_tlim,
+ attr_set_click_tlim),
+ __ATTR(click_timelatency, 0666, attr_get_click_tlat,
+ attr_set_click_tlat),
+ __ATTR(click_timewindow, 0666, attr_get_click_tw, attr_set_click_tw),
+
+#ifdef DEBUG
+ __ATTR(reg_value, 0666, attr_reg_get, attr_reg_set),
+ __ATTR(reg_addr, 0222, NULL, attr_addr_set),
+#endif
+};
+
+static int create_sysfs_interfaces(struct device *dev)
+{
+ int i;
+ int err;
+ for (i = 0; i < ARRAY_SIZE(attributes); i++) {
+ err = device_create_file(dev, attributes + i);
+ if (err)
+ goto error;
+ }
+ return 0;
+
+error:
+ for ( ; i >= 0; i--)
+ device_remove_file(dev, attributes + i);
+ dev_err(dev, "%s:Unable to create interface\n", __func__);
+ return err;
+}
+
+static int remove_sysfs_interfaces(struct device *dev)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(attributes); i++)
+ device_remove_file(dev, attributes + i);
+ return 0;
+}
+
+static void lis3dh_acc_input_work_func(struct work_struct *work)
+{
+ struct lis3dh_acc_data *acc;
+
+ int xyz[3] = { 0 };
+ int err;
+
+ acc = container_of((struct delayed_work *)work,
+ struct lis3dh_acc_data, input_work);
+
+ mutex_lock(&acc->lock);
+ err = lis3dh_acc_get_acceleration_data(acc, xyz);
+ if (err < 0)
+ dev_err(&acc->client->dev, "get_acceleration_data failed\n");
+ else
+ lis3dh_acc_report_values(acc, xyz);
+
+ schedule_delayed_work(&acc->input_work, msecs_to_jiffies(
+ acc->pdata->poll_interval));
+ mutex_unlock(&acc->lock);
+}
+
+int lis3dh_acc_input_open(struct input_dev *input)
+{
+ struct lis3dh_acc_data *acc = input_get_drvdata(input);
+
+ return lis3dh_acc_enable(acc);
+}
+
+void lis3dh_acc_input_close(struct input_dev *dev)
+{
+ struct lis3dh_acc_data *acc = input_get_drvdata(dev);
+
+ lis3dh_acc_disable(acc);
+}
+
+static int lis3dh_acc_validate_pdata(struct lis3dh_acc_data *acc)
+{
+ acc->pdata->poll_interval = max(acc->pdata->poll_interval,
+ acc->pdata->min_interval);
+
+ if (acc->pdata->axis_map_x > 2 ||
+ acc->pdata->axis_map_y > 2 ||
+ acc->pdata->axis_map_z > 2) {
+ dev_err(&acc->client->dev,
+ "invalid axis_map value x:%u y:%u z%u\n",
+ acc->pdata->axis_map_x,
+ acc->pdata->axis_map_y, acc->pdata->axis_map_z);
+ return -EINVAL;
+ }
+
+ /* Only allow 0 and 1 for negation boolean flag */
+ if (acc->pdata->negate_x > 1 || acc->pdata->negate_y > 1
+ || acc->pdata->negate_z > 1) {
+ dev_err(&acc->client->dev,
+ "invalid negate value x:%u y:%u z:%u\n",
+ acc->pdata->negate_x,
+ acc->pdata->negate_y, acc->pdata->negate_z);
+ return -EINVAL;
+ }
+
+ /* Enforce minimum polling interval */
+ if (acc->pdata->poll_interval < acc->pdata->min_interval) {
+ dev_err(&acc->client->dev, "minimum poll interval violated\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int lis3dh_acc_input_init(struct lis3dh_acc_data *acc)
+{
+ int err;
+
+ INIT_DELAYED_WORK(&acc->input_work, lis3dh_acc_input_work_func);
+ acc->input_dev = input_allocate_device();
+ if (!acc->input_dev) {
+ err = -ENOMEM;
+ dev_err(&acc->client->dev, "input device allocation failed\n");
+ goto err0;
+ }
+
+ acc->input_dev->open = lis3dh_acc_input_open;
+ acc->input_dev->close = lis3dh_acc_input_close;
+ acc->input_dev->name = LIS3DH_ACC_DEV_NAME;
+ acc->input_dev->id.bustype = BUS_I2C;
+ acc->input_dev->dev.parent = &acc->client->dev;
+
+ input_set_drvdata(acc->input_dev, acc);
+
+ set_bit(EV_ABS, acc->input_dev->evbit);
+ /* next is used for interruptA sources data if the case */
+ set_bit(ABS_MISC, acc->input_dev->absbit);
+ /* next is used for interruptB sources data if the case */
+ set_bit(ABS_WHEEL, acc->input_dev->absbit);
+
+ input_set_abs_params(acc->input_dev, ABS_X, -G_MAX, G_MAX, FUZZ, FLAT);
+ input_set_abs_params(acc->input_dev, ABS_Y, -G_MAX, G_MAX, FUZZ, FLAT);
+ input_set_abs_params(acc->input_dev, ABS_Z, -G_MAX, G_MAX, FUZZ, FLAT);
+ /* next is used for interruptA sources data if the case */
+ input_set_abs_params(acc->input_dev, ABS_MISC, INT_MIN, INT_MAX, 0, 0);
+ /* next is used for interruptB sources data if the case */
+ input_set_abs_params(acc->input_dev, ABS_WHEEL, INT_MIN, INT_MAX, 0, 0);
+
+
+ err = input_register_device(acc->input_dev);
+ if (err) {
+ dev_err(&acc->client->dev,
+ "unable to register input device %s\n",
+ acc->input_dev->name);
+ goto err1;
+ }
+
+ return 0;
+
+err1:
+ input_free_device(acc->input_dev);
+err0:
+ return err;
+}
+
+static void lis3dh_acc_input_cleanup(struct lis3dh_acc_data *acc)
+{
+ input_unregister_device(acc->input_dev);
+ input_free_device(acc->input_dev);
+}
+
+static int lis3dh_acc_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+
+ struct lis3dh_acc_data *acc;
+
+ int err = -1;
+
+ pr_info("%s: probe start.\n", LIS3DH_ACC_DEV_NAME);
+
+ if (client->dev.platform_data == NULL) {
+ dev_err(&client->dev, "platform data is NULL. exiting.\n");
+ err = -ENODEV;
+ goto exit_check_functionality_failed;
+ }
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "client not i2c capable\n");
+ err = -ENODEV;
+ goto exit_check_functionality_failed;
+ }
+
+ /*
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE |
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA)) {
+ dev_err(&client->dev, "client not smb-i2c capable:2\n");
+ err = -EIO;
+ goto exit_check_functionality_failed;
+ }
+
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_I2C_BLOCK)) {
+ dev_err(&client->dev, "client not smb-i2c capable:3\n");
+ err = -EIO;
+ goto exit_check_functionality_failed;
+ }
+ */
+
+ acc = kzalloc(sizeof(struct lis3dh_acc_data), GFP_KERNEL);
+ if (acc == NULL) {
+ err = -ENOMEM;
+ dev_err(&client->dev,
+ "failed to allocate memory for module data: "
+ "%d\n", err);
+ goto exit_check_functionality_failed;
+ }
+
+
+ mutex_init(&acc->lock);
+ mutex_lock(&acc->lock);
+
+ acc->client = client;
+ i2c_set_clientdata(client, acc);
+
+ acc->pdata = kmalloc(sizeof(*acc->pdata), GFP_KERNEL);
+ if (acc->pdata == NULL) {
+ err = -ENOMEM;
+ dev_err(&client->dev,
+ "failed to allocate memory for pdata: %d\n",
+ err);
+ goto err_mutexunlock;
+ }
+
+ memcpy(acc->pdata, client->dev.platform_data, sizeof(*acc->pdata));
+
+ err = lis3dh_acc_validate_pdata(acc);
+ if (err < 0) {
+ dev_err(&client->dev, "failed to validate platform data\n");
+ goto exit_kfree_pdata;
+ }
+
+
+ if (acc->pdata->init) {
+ err = acc->pdata->init();
+ if (err < 0) {
+ dev_err(&client->dev, "init failed: %d\n", err);
+ goto err_pdata_init;
+ }
+ }
+
+ if (acc->pdata->gpio_int1 >= 0) {
+ acc->irq1 = gpio_to_irq(acc->pdata->gpio_int1);
+ printk(KERN_INFO "%s: %s has set irq1 to irq: %d\n",
+ LIS3DH_ACC_DEV_NAME, __func__, acc->irq1);
+ printk(KERN_INFO "%s: %s has mapped irq1 on gpio: %d\n",
+ LIS3DH_ACC_DEV_NAME, __func__,
+ acc->pdata->gpio_int1);
+ }
+
+ if (acc->pdata->gpio_int2 >= 0) {
+ acc->irq2 = gpio_to_irq(acc->pdata->gpio_int2);
+ printk(KERN_INFO "%s: %s has set irq2 to irq: %d\n",
+ LIS3DH_ACC_DEV_NAME, __func__, acc->irq2);
+ printk(KERN_INFO "%s: %s has mapped irq2 on gpio: %d\n",
+ LIS3DH_ACC_DEV_NAME, __func__,
+ acc->pdata->gpio_int2);
+ }
+
+ memset(acc->resume_state, 0, ARRAY_SIZE(acc->resume_state));
+
+ acc->resume_state[RES_CTRL_REG1] = LIS3DH_ACC_ENABLE_ALL_AXES;
+ acc->resume_state[RES_CTRL_REG2] = 0x00;
+ acc->resume_state[RES_CTRL_REG3] = 0x00;
+ acc->resume_state[RES_CTRL_REG4] = 0x00;
+ acc->resume_state[RES_CTRL_REG5] = 0x00;
+ acc->resume_state[RES_CTRL_REG6] = 0x00;
+
+ acc->resume_state[RES_TEMP_CFG_REG] = 0x00;
+ acc->resume_state[RES_FIFO_CTRL_REG] = 0x00;
+ acc->resume_state[RES_INT_CFG1] = 0x00;
+ acc->resume_state[RES_INT_THS1] = 0x00;
+ acc->resume_state[RES_INT_DUR1] = 0x00;
+
+ acc->resume_state[RES_TT_CFG] = 0x00;
+ acc->resume_state[RES_TT_THS] = 0x00;
+ acc->resume_state[RES_TT_LIM] = 0x00;
+ acc->resume_state[RES_TT_TLAT] = 0x00;
+ acc->resume_state[RES_TT_TW] = 0x00;
+
+ err = lis3dh_acc_device_power_on(acc);
+ if (err < 0) {
+ dev_err(&client->dev, "power on failed: %d\n", err);
+ goto err_pdata_init;
+ }
+
+ atomic_set(&acc->enabled, 1);
+
+ err = lis3dh_acc_update_g_range(acc, acc->pdata->g_range);
+ if (err < 0) {
+ dev_err(&client->dev, "update_g_range failed\n");
+ goto err_power_off;
+ }
+
+ err = lis3dh_acc_update_odr(acc, acc->pdata->poll_interval);
+ if (err < 0) {
+ dev_err(&client->dev, "update_odr failed\n");
+ goto err_power_off;
+ }
+
+ err = lis3dh_acc_input_init(acc);
+ if (err < 0) {
+ dev_err(&client->dev, "input init failed\n");
+ goto err_power_off;
+ }
+
+
+ err = create_sysfs_interfaces(&client->dev);
+ if (err < 0) {
+ dev_err(&client->dev,
+ "device LIS3DH_ACC_DEV_NAME sysfs register failed\n");
+ goto err_input_cleanup;
+ }
+
+ lis3dh_acc_device_power_off(acc);
+
+ /* As default, do not report information */
+ atomic_set(&acc->enabled, 0);
+
+ if (acc->pdata->gpio_int1 >= 0) {
+ INIT_WORK(&acc->irq1_work, lis3dh_acc_irq1_work_func);
+ acc->irq1_work_queue =
+ create_singlethread_workqueue("lis3dh_acc_wq1");
+ if (!acc->irq1_work_queue) {
+ err = -ENOMEM;
+ dev_err(&client->dev,
+ "cannot create work queue1: %d\n", err);
+ goto err_remove_sysfs_int;
+ }
+ err = request_irq(acc->irq1, lis3dh_acc_isr1,
+ IRQF_TRIGGER_RISING, "lis3dh_acc_irq1", acc);
+ if (err < 0) {
+ dev_err(&client->dev, "request irq1 failed: %d\n", err);
+ goto err_destoyworkqueue1;
+ }
+ disable_irq_nosync(acc->irq1);
+ }
+
+ if (acc->pdata->gpio_int2 >= 0) {
+ INIT_WORK(&acc->irq2_work, lis3dh_acc_irq2_work_func);
+ acc->irq2_work_queue =
+ create_singlethread_workqueue("lis3dh_acc_wq2");
+ if (!acc->irq2_work_queue) {
+ err = -ENOMEM;
+ dev_err(&client->dev,
+ "cannot create work queue2: %d\n", err);
+ goto err_free_irq1;
+ }
+ err = request_irq(acc->irq2, lis3dh_acc_isr2,
+ IRQF_TRIGGER_RISING, "lis3dh_acc_irq2", acc);
+ if (err < 0) {
+ dev_err(&client->dev, "request irq2 failed: %d\n", err);
+ goto err_destoyworkqueue2;
+ }
+ disable_irq_nosync(acc->irq2);
+ }
+
+
+
+ mutex_unlock(&acc->lock);
+
+ dev_info(&client->dev, "%s: probed\n", LIS3DH_ACC_DEV_NAME);
+
+ return 0;
+
+err_destoyworkqueue2:
+ if (acc->pdata->gpio_int2 >= 0)
+ destroy_workqueue(acc->irq2_work_queue);
+err_free_irq1:
+ free_irq(acc->irq1, acc);
+err_destoyworkqueue1:
+ if (acc->pdata->gpio_int1 >= 0)
+ destroy_workqueue(acc->irq1_work_queue);
+err_remove_sysfs_int:
+ remove_sysfs_interfaces(&client->dev);
+err_input_cleanup:
+ lis3dh_acc_input_cleanup(acc);
+err_power_off:
+ lis3dh_acc_device_power_off(acc);
+err_pdata_init:
+ if (acc->pdata->exit)
+ acc->pdata->exit();
+exit_kfree_pdata:
+ kfree(acc->pdata);
+err_mutexunlock:
+ mutex_unlock(&acc->lock);
+ kfree(acc);
+exit_check_functionality_failed:
+ printk(KERN_ERR "%s: Driver Init failed\n", LIS3DH_ACC_DEV_NAME);
+ return err;
+}
+
+static int __devexit lis3dh_acc_remove(struct i2c_client *client)
+{
+ struct lis3dh_acc_data *acc = i2c_get_clientdata(client);
+
+ if (acc->pdata->gpio_int1 >= 0) {
+ free_irq(acc->irq1, acc);
+ gpio_free(acc->pdata->gpio_int1);
+ destroy_workqueue(acc->irq1_work_queue);
+ }
+
+ if (acc->pdata->gpio_int2 >= 0) {
+ free_irq(acc->irq2, acc);
+ gpio_free(acc->pdata->gpio_int2);
+ destroy_workqueue(acc->irq2_work_queue);
+ }
+
+ lis3dh_acc_input_cleanup(acc);
+ lis3dh_acc_device_power_off(acc);
+ remove_sysfs_interfaces(&client->dev);
+
+ if (acc->pdata->exit)
+ acc->pdata->exit();
+ kfree(acc->pdata);
+ kfree(acc);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int lis3dh_acc_resume(struct i2c_client *client)
+{
+ struct lis3dh_acc_data *acc = i2c_get_clientdata(client);
+
+ if (acc->on_before_suspend)
+ return lis3dh_acc_enable(acc);
+ return 0;
+}
+
+static int lis3dh_acc_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ struct lis3dh_acc_data *acc = i2c_get_clientdata(client);
+
+ acc->on_before_suspend = atomic_read(&acc->enabled);
+ return lis3dh_acc_disable(acc);
+}
+#else
+#define lis3dh_acc_suspend NULL
+#define lis3dh_acc_resume NULL
+#endif /* CONFIG_PM */
+
+static const struct i2c_device_id lis3dh_acc_id[]
+ = { { LIS3DH_ACC_DEV_NAME, 0 }, { }, };
+
+MODULE_DEVICE_TABLE(i2c, lis3dh_acc_id);
+
+static struct i2c_driver lis3dh_acc_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = LIS3DH_ACC_DEV_NAME,
+ },
+ .probe = lis3dh_acc_probe,
+ .remove = __devexit_p(lis3dh_acc_remove),
+ .suspend = lis3dh_acc_suspend,
+ .resume = lis3dh_acc_resume,
+ .id_table = lis3dh_acc_id,
+};
+
+static int __init lis3dh_acc_init(void)
+{
+ printk(KERN_INFO "%s accelerometer driver: init\n",
+ LIS3DH_ACC_DEV_NAME);
+ return i2c_add_driver(&lis3dh_acc_driver);
+}
+
+static void __exit lis3dh_acc_exit(void)
+{
+#ifdef DEBUG
+ printk(KERN_INFO "%s accelerometer driver exit\n",
+ LIS3DH_ACC_DEV_NAME);
+#endif /* DEBUG */
+ i2c_del_driver(&lis3dh_acc_driver);
+ return;
+}
+
+module_init(lis3dh_acc_init);
+module_exit(lis3dh_acc_exit);
+
+MODULE_DESCRIPTION("lis3dh digital accelerometer sysfs driver");
+MODULE_AUTHOR("Matteo Dameno, Carmine Iascone, Samuel Huo, STMicroelectronics");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/input/touchscreen/msm_ts.c b/drivers/input/touchscreen/msm_ts.c
index eb2e73b..e66120e 100644
--- a/drivers/input/touchscreen/msm_ts.c
+++ b/drivers/input/touchscreen/msm_ts.c
@@ -387,6 +387,7 @@
input_set_capability(ts->input_dev, EV_KEY, BTN_TOUCH);
set_bit(EV_ABS, ts->input_dev->evbit);
+ set_bit(INPUT_PROP_DIRECT, ts->input_dev->propbit);
input_set_abs_params(ts->input_dev, ABS_X, pdata->min_x, pdata->max_x,
0, 0);
diff --git a/drivers/input/touchscreen/tsc2007.c b/drivers/input/touchscreen/tsc2007.c
index e59ca17..55639e0 100644
--- a/drivers/input/touchscreen/tsc2007.c
+++ b/drivers/input/touchscreen/tsc2007.c
@@ -415,6 +415,7 @@
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+ __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
input_set_abs_params(input_dev, ABS_X, ts->min_x,
ts->max_x, pdata->fuzzx, 0);
diff --git a/drivers/iommu/msm_iommu-v2.c b/drivers/iommu/msm_iommu-v2.c
index 26e967d..28ad0ff 100644
--- a/drivers/iommu/msm_iommu-v2.c
+++ b/drivers/iommu/msm_iommu-v2.c
@@ -51,10 +51,16 @@
if (ret)
goto fail;
- if (drvdata->clk) {
- ret = clk_prepare_enable(drvdata->clk);
- if (ret)
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ clk_disable_unprepare(drvdata->pclk);
+
+ if (drvdata->aclk) {
+ ret = clk_prepare_enable(drvdata->aclk);
+ if (ret) {
+ clk_disable_unprepare(drvdata->clk);
clk_disable_unprepare(drvdata->pclk);
+ }
}
fail:
return ret;
@@ -62,11 +68,23 @@
static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
{
- if (drvdata->clk)
- clk_disable_unprepare(drvdata->clk);
+ if (drvdata->aclk)
+ clk_disable_unprepare(drvdata->aclk);
+ clk_disable_unprepare(drvdata->clk);
clk_disable_unprepare(drvdata->pclk);
}
+static void __sync_tlb(void __iomem *base, int ctx)
+{
+ SET_TLBSYNC(base, ctx, 0);
+
+ /* No barrier needed due to register proximity */
+ while (GET_CB_TLBSTATUS_SACTIVE(base, ctx))
+ cpu_relax();
+
+ /* No barrier needed due to read dependency */
+}
+
static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
{
struct msm_priv *priv = domain->priv;
@@ -92,6 +110,7 @@
SET_TLBIVA(iommu_drvdata->base, ctx_drvdata->num,
asid | (va & CB_TLBIVA_VA));
mb();
+ __sync_tlb(iommu_drvdata->base, ctx_drvdata->num);
__disable_clocks(iommu_drvdata);
}
fail:
@@ -121,6 +140,7 @@
SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num, asid);
mb();
+ __sync_tlb(iommu_drvdata->base, ctx_drvdata->num);
__disable_clocks(iommu_drvdata);
}
diff --git a/drivers/iommu/msm_iommu_dev-v2.c b/drivers/iommu/msm_iommu_dev-v2.c
index d3a088a..8c26f95 100644
--- a/drivers/iommu/msm_iommu_dev-v2.c
+++ b/drivers/iommu/msm_iommu_dev-v2.c
@@ -69,7 +69,7 @@
{
struct msm_iommu_drvdata *drvdata;
struct resource *r;
- int ret;
+ int ret, needs_alt_core_clk;
if (msm_iommu_root_dev == pdev)
return 0;
@@ -93,55 +93,42 @@
if (IS_ERR(drvdata->gdsc))
return -EINVAL;
- drvdata->pclk = clk_get(&pdev->dev, "iface_clk");
+ drvdata->pclk = devm_clk_get(&pdev->dev, "iface_clk");
if (IS_ERR(drvdata->pclk))
return PTR_ERR(drvdata->pclk);
- ret = clk_prepare_enable(drvdata->pclk);
- if (ret)
- goto fail_enable;
+ drvdata->clk = devm_clk_get(&pdev->dev, "core_clk");
+ if (IS_ERR(drvdata->clk))
+ return PTR_ERR(drvdata->clk);
- drvdata->clk = clk_get(&pdev->dev, "core_clk");
- if (!IS_ERR(drvdata->clk)) {
- if (clk_get_rate(drvdata->clk) == 0) {
- ret = clk_round_rate(drvdata->clk, 1);
- clk_set_rate(drvdata->clk, ret);
- }
+ needs_alt_core_clk = of_property_read_bool(pdev->dev.of_node,
+ "qcom,needs-alt-core-clk");
+ if (needs_alt_core_clk) {
+ drvdata->aclk = devm_clk_get(&pdev->dev, "alt_core_clk");
+ if (IS_ERR(drvdata->aclk))
+ return PTR_ERR(drvdata->aclk);
+ }
- ret = clk_prepare_enable(drvdata->clk);
- if (ret) {
- clk_put(drvdata->clk);
- goto fail_pclk;
- }
- } else
- drvdata->clk = NULL;
+ if (clk_get_rate(drvdata->clk) == 0) {
+ ret = clk_round_rate(drvdata->clk, 1);
+ clk_set_rate(drvdata->clk, ret);
+ }
+
+ if (drvdata->aclk && clk_get_rate(drvdata->aclk) == 0) {
+ ret = clk_round_rate(drvdata->aclk, 1);
+ clk_set_rate(drvdata->aclk, ret);
+ }
ret = msm_iommu_parse_dt(pdev, drvdata);
if (ret)
- goto fail_clk;
+ return ret;
pr_info("device %s mapped at %p, with %d ctx banks\n",
drvdata->name, drvdata->base, drvdata->ncb);
platform_set_drvdata(pdev, drvdata);
- if (drvdata->clk)
- clk_disable_unprepare(drvdata->clk);
-
- clk_disable_unprepare(drvdata->pclk);
-
return 0;
-
-fail_clk:
- if (drvdata->clk) {
- clk_disable_unprepare(drvdata->clk);
- clk_put(drvdata->clk);
- }
-fail_pclk:
- clk_disable_unprepare(drvdata->pclk);
-fail_enable:
- clk_put(drvdata->pclk);
- return ret;
}
static int __devexit msm_iommu_remove(struct platform_device *pdev)
@@ -192,7 +179,7 @@
*/
ctx_drvdata->num = ((r->start - rp.start) >> CTX_SHIFT) - 8;
- if (of_property_read_string(pdev->dev.of_node, "qcom,iommu-ctx-name",
+ if (of_property_read_string(pdev->dev.of_node, "label",
&ctx_drvdata->name))
ctx_drvdata->name = dev_name(&pdev->dev);
@@ -232,7 +219,7 @@
ret = msm_iommu_ctx_parse_dt(pdev, ctx_drvdata);
if (!ret)
dev_info(&pdev->dev, "context %s using bank %d\n",
- dev_name(&pdev->dev), ctx_drvdata->num);
+ ctx_drvdata->name, ctx_drvdata->num);
return ret;
}
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index d49bfa6..9240605 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -181,6 +181,20 @@
To compile this driver as a module, choose M here: the
module will be called leds-pmic-mpp.
+config LEDS_MSM_TRICOLOR
+ tristate "LED Support for Qualcomm tricolor LEDs"
+ depends on LEDS_CLASS && MSM_SMD
+ help
+ This option enables support for tricolor LEDs connected to
+ to Qualcomm reference boards. Red, green and blue color leds
+ are supported. These leds are turned on/off, blink on/off
+ by Modem upon receiving command through rpc from this driver.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called leds-msm-tricolor.
+
config LEDS_GPIO_PLATFORM
bool "Platform device bindings for GPIO LEDs"
depends on LEDS_GPIO
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index aa518d4..8edd465 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -52,6 +52,7 @@
obj-$(CONFIG_LEDS_PMIC_MPP) += leds-pmic-mpp.o
obj-$(CONFIG_LEDS_QCIBL) += leds-qci-backlight.o
obj-$(CONFIG_LEDS_MSM_PDM) += leds-msm-pdm.o
+obj-$(CONFIG_LEDS_MSM_TRICOLOR) += leds-msm-tricolor.o
# LED SPI Drivers
obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
diff --git a/drivers/leds/leds-msm-tricolor.c b/drivers/leds/leds-msm-tricolor.c
new file mode 100644
index 0000000..d0715ce
--- /dev/null
+++ b/drivers/leds/leds-msm-tricolor.c
@@ -0,0 +1,410 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/leds.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+
+#include <linux/leds-msm-tricolor.h>
+#include <mach/msm_rpcrouter.h>
+
+#define LED_RPC_PROG 0x30000091
+#define LED_RPC_VER 0x00030001
+
+#define LED_SUBSCRIBE_PROC 0x03
+#define LED_SUBS_RCV_EVNT 0x01
+#define LED_SUBS_REGISTER 0x00
+#define LED_EVNT_CLASS_ALL 0x00
+#define LINUX_HOST 0x04
+#define LED_CMD_PROC 0x02
+#define TRICOLOR_LED_ID 0x0A
+
+enum tricolor_led_status {
+ ALL_OFF,
+ ALL_ON,
+ BLUE_ON,
+ BLUE_OFF,
+ RED_ON,
+ RED_OFF,
+ GREEN_ON,
+ GREEN_OFF,
+ BLUE_BLINK,
+ RED_BLINK,
+ GREEN_BLINK,
+ BLUE_BLINK_OFF,
+ RED_BLINK_OFF,
+ GREEN_BLINK_OFF,
+ LED_MAX,
+};
+
+struct led_cmd_data_type {
+ u32 cmd_data_type_ptr; /* cmd_data_type ptr */
+ u32 ver; /* version */
+ u32 id; /* command id */
+ u32 handle; /* handle returned from subscribe proc */
+ u32 disc_id1; /* discriminator id */
+ u32 input_ptr; /* input ptr length */
+ u32 input_val; /* command specific data */
+ u32 input_len; /* length of command input */
+ u32 disc_id2; /* discriminator id */
+ u32 output_len; /* length of output data */
+ u32 delayed; /* execution context for modem */
+};
+
+struct led_subscribe_req {
+ u32 subs_ptr; /* subscribe ptr */
+ u32 ver; /* version */
+ u32 srvc; /* command or event */
+ u32 req; /* subscribe or unsubscribe */
+ u32 host_os; /* host operating system */
+ u32 disc_id; /* discriminator id */
+ u32 event; /* event */
+ u32 cb_id; /* callback id */
+ u32 handle_ptr; /* handle ptr */
+ u32 handle_data; /* handle data */
+};
+
+struct tricolor_led_data {
+ struct led_classdev cdev;
+ struct msm_rpc_client *rpc_client;
+ bool blink_status;
+ struct mutex lock;
+ u8 color;
+};
+
+static struct led_subscribe_req *led_subs_req;
+
+static int led_send_cmd_arg(struct msm_rpc_client *client,
+ void *buffer, void *data)
+{
+ struct led_cmd_data_type *led_cmd = buffer;
+ enum tricolor_led_status status = *(enum tricolor_led_status *) data;
+
+ led_cmd->cmd_data_type_ptr = cpu_to_be32(0x01);
+ led_cmd->ver = cpu_to_be32(0x03);
+ led_cmd->id = cpu_to_be32(TRICOLOR_LED_ID);
+ led_cmd->handle = cpu_to_be32(led_subs_req->handle_data);
+ led_cmd->disc_id1 = cpu_to_be32(TRICOLOR_LED_ID);
+ led_cmd->input_ptr = cpu_to_be32(0x01);
+ led_cmd->input_val = cpu_to_be32(status);
+ led_cmd->input_len = cpu_to_be32(0x01);
+ led_cmd->disc_id2 = cpu_to_be32(TRICOLOR_LED_ID);
+ led_cmd->output_len = cpu_to_be32(0x00);
+ led_cmd->delayed = cpu_to_be32(0x00);
+
+ return sizeof(*led_cmd);
+}
+
+static int led_rpc_res(struct msm_rpc_client *client,
+ void *buffer, void *data)
+{
+ uint32_t result;
+
+ result = be32_to_cpu(*((uint32_t *)buffer));
+ pr_debug("%s: request completed: 0x%x\n", __func__, result);
+
+ return 0;
+}
+
+static void led_rpc_set_status(struct msm_rpc_client *client,
+ enum tricolor_led_status status)
+{
+ int rc;
+
+ rc = msm_rpc_client_req(client, LED_CMD_PROC,
+ led_send_cmd_arg, &status, led_rpc_res, NULL, -1);
+ if (rc)
+ pr_err("%s: RPC client request for led failed", __func__);
+
+}
+
+static ssize_t led_blink_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tricolor_led_data *led = dev_get_drvdata(dev);
+
+ return snprintf(buf, 2, "%d\n", led->blink_status);
+}
+
+static ssize_t led_blink_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct tricolor_led_data *led = dev_get_drvdata(dev);
+ enum tricolor_led_status status;
+ unsigned long value;
+ int rc;
+
+ if (size > 2)
+ return -EINVAL;
+
+ rc = kstrtoul(buf, 10, &value);
+ if (rc)
+ return rc;
+
+
+ if (value < LED_OFF || value > led->cdev.max_brightness) {
+ dev_err(dev, "invalid brightness\n");
+ return -EINVAL;
+ }
+
+ switch (led->color) {
+ case LED_COLOR_RED:
+ status = value ? RED_BLINK : RED_BLINK_OFF;
+ break;
+ case LED_COLOR_GREEN:
+ status = value ? GREEN_BLINK : GREEN_BLINK_OFF;
+ break;
+ case LED_COLOR_BLUE:
+ status = value ? BLUE_BLINK : BLUE_BLINK_OFF;
+ break;
+ default:
+ dev_err(dev, "unknown led device\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&led->lock);
+ led->blink_status = !!value;
+ led->cdev.brightness = 0;
+
+ /* program the led blink */
+ led_rpc_set_status(led->rpc_client, status);
+ mutex_unlock(&led->lock);
+
+ return size;
+}
+
+static DEVICE_ATTR(blink, 0644, led_blink_show, led_blink_store);
+
+static void tricolor_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct tricolor_led_data *led;
+ enum tricolor_led_status status;
+
+ led = container_of(led_cdev, struct tricolor_led_data, cdev);
+
+ if (value < LED_OFF || value > led->cdev.max_brightness) {
+ dev_err(led->cdev.dev, "invalid brightness\n");
+ return;
+ }
+
+ switch (led->color) {
+ case LED_COLOR_RED:
+ status = value ? RED_ON : RED_OFF;
+ break;
+ case LED_COLOR_GREEN:
+ status = value ? GREEN_ON : GREEN_OFF;
+ break;
+ case LED_COLOR_BLUE:
+ status = value ? BLUE_ON : BLUE_OFF;
+ break;
+ default:
+ dev_err(led->cdev.dev, "unknown led device\n");
+ return;
+ }
+
+ mutex_lock(&led->lock);
+ led->blink_status = 0;
+ led->cdev.brightness = value;
+
+ /* program the led brightness */
+ led_rpc_set_status(led->rpc_client, status);
+ mutex_unlock(&led->lock);
+}
+
+static enum led_brightness tricolor_led_get(struct led_classdev *led_cdev)
+{
+ struct tricolor_led_data *led;
+
+ led = container_of(led_cdev, struct tricolor_led_data, cdev);
+
+ return led->cdev.brightness;
+}
+
+static int led_rpc_register_subs_arg(struct msm_rpc_client *client,
+ void *buffer, void *data)
+{
+ led_subs_req = buffer;
+
+ led_subs_req->subs_ptr = cpu_to_be32(0x1);
+ led_subs_req->ver = cpu_to_be32(0x1);
+ led_subs_req->srvc = cpu_to_be32(LED_SUBS_RCV_EVNT);
+ led_subs_req->req = cpu_to_be32(LED_SUBS_REGISTER);
+ led_subs_req->host_os = cpu_to_be32(LINUX_HOST);
+ led_subs_req->disc_id = cpu_to_be32(LED_SUBS_RCV_EVNT);
+ led_subs_req->event = cpu_to_be32(LED_EVNT_CLASS_ALL);
+ led_subs_req->cb_id = cpu_to_be32(0x1);
+ led_subs_req->handle_ptr = cpu_to_be32(0x1);
+ led_subs_req->handle_data = cpu_to_be32(0x0);
+
+ return sizeof(*led_subs_req);
+}
+
+static int led_cb_func(struct msm_rpc_client *client, void *buffer, int in_size)
+{
+ struct rpc_request_hdr *hdr = buffer;
+ int rc;
+
+ hdr->type = be32_to_cpu(hdr->type);
+ hdr->xid = be32_to_cpu(hdr->xid);
+ hdr->rpc_vers = be32_to_cpu(hdr->rpc_vers);
+ hdr->prog = be32_to_cpu(hdr->prog);
+ hdr->vers = be32_to_cpu(hdr->vers);
+ hdr->procedure = be32_to_cpu(hdr->procedure);
+
+ msm_rpc_start_accepted_reply(client, hdr->xid,
+ RPC_ACCEPTSTAT_SUCCESS);
+ rc = msm_rpc_send_accepted_reply(client, 0);
+ if (rc)
+ pr_err("%s: sending reply failed: %d\n", __func__, rc);
+
+ return rc;
+}
+
+static int __devinit tricolor_led_probe(struct platform_device *pdev)
+{
+ const struct led_platform_data *pdata = pdev->dev.platform_data;
+ struct msm_rpc_client *rpc_client;
+ struct led_info *curr_led;
+ struct tricolor_led_data *led, *tmp_led;
+ int rc, i, j;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "platform data not supplied\n");
+ return -EINVAL;
+ }
+
+ /* initialize rpc client */
+ rpc_client = msm_rpc_register_client("led", LED_RPC_PROG,
+ LED_RPC_VER, 0, led_cb_func);
+ rc = IS_ERR(rpc_client);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to initialize rpc_client\n");
+ return -EINVAL;
+ }
+
+ /* subscribe */
+ rc = msm_rpc_client_req(rpc_client, LED_SUBSCRIBE_PROC,
+ led_rpc_register_subs_arg, NULL,
+ led_rpc_res, NULL, -1);
+ if (rc) {
+ pr_err("%s: RPC client request failed for subscribe services\n",
+ __func__);
+ goto fail_mem_alloc;
+ }
+
+ led = devm_kzalloc(&pdev->dev, pdata->num_leds * sizeof(*led),
+ GFP_KERNEL);
+ if (!led) {
+ dev_err(&pdev->dev, "failed to alloc memory\n");
+ rc = -ENOMEM;
+ goto fail_mem_alloc;
+ }
+
+ for (i = 0; i < pdata->num_leds; i++) {
+ curr_led = &pdata->leds[i];
+ tmp_led = &led[i];
+
+ tmp_led->cdev.name = curr_led->name;
+ tmp_led->cdev.default_trigger = curr_led->default_trigger;
+ tmp_led->cdev.brightness_set = tricolor_led_set;
+ tmp_led->cdev.brightness_get = tricolor_led_get;
+ tmp_led->cdev.brightness = LED_OFF;
+ tmp_led->cdev.max_brightness = LED_FULL;
+ tmp_led->color = curr_led->flags;
+ tmp_led->rpc_client = rpc_client;
+ tmp_led->blink_status = false;
+
+ mutex_init(&tmp_led->lock);
+
+ rc = led_classdev_register(&pdev->dev, &tmp_led->cdev);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to register led %s(%d)\n",
+ tmp_led->cdev.name, rc);
+ goto fail_led_reg;
+ }
+
+ /* Add blink attributes */
+ rc = device_create_file(tmp_led->cdev.dev, &dev_attr_blink);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to create blink attr\n");
+ goto fail_blink_attr;
+ }
+ dev_set_drvdata(tmp_led->cdev.dev, tmp_led);
+ }
+
+ platform_set_drvdata(pdev, led);
+
+ return 0;
+
+fail_blink_attr:
+ j = i;
+ while (j)
+ device_remove_file(led[--j].cdev.dev, &dev_attr_blink);
+ i++;
+fail_led_reg:
+ while (i) {
+ led_classdev_unregister(&led[--i].cdev);
+ mutex_destroy(&led[i].lock);
+ }
+fail_mem_alloc:
+ msm_rpc_unregister_client(rpc_client);
+ return rc;
+}
+
+static int __devexit tricolor_led_remove(struct platform_device *pdev)
+{
+ const struct led_platform_data *pdata = pdev->dev.platform_data;
+ struct tricolor_led_data *led = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < pdata->num_leds; i++) {
+ led_classdev_unregister(&led[i].cdev);
+ device_remove_file(led[i].cdev.dev, &dev_attr_blink);
+ mutex_destroy(&led[i].lock);
+ }
+
+ msm_rpc_unregister_client(led->rpc_client);
+
+ return 0;
+}
+
+static struct platform_driver tricolor_led_driver = {
+ .probe = tricolor_led_probe,
+ .remove = __devexit_p(tricolor_led_remove),
+ .driver = {
+ .name = "msm-tricolor-leds",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init tricolor_led_init(void)
+{
+ return platform_driver_register(&tricolor_led_driver);
+}
+late_initcall(tricolor_led_init);
+
+static void __exit tricolor_led_exit(void)
+{
+ platform_driver_unregister(&tricolor_led_driver);
+}
+module_exit(tricolor_led_exit);
+
+MODULE_DESCRIPTION("MSM Tri-color LEDs driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:tricolor-led");
diff --git a/drivers/leds/leds-pm8xxx.c b/drivers/leds/leds-pm8xxx.c
index 3261037..04041e4 100644
--- a/drivers/leds/leds-pm8xxx.c
+++ b/drivers/leds/leds-pm8xxx.c
@@ -323,12 +323,17 @@
}
static void
-led_rgb_set(struct pm8xxx_led_data *led, enum led_brightness value)
+led_rgb_write(struct pm8xxx_led_data *led, u16 addr, enum led_brightness value)
{
int rc;
u8 val, mask;
- rc = pm8xxx_readb(led->dev->parent, SSBI_REG_ADDR_RGB_CNTL2, &val);
+ if (led->id != PM8XXX_ID_RGB_LED_BLUE &&
+ led->id != PM8XXX_ID_RGB_LED_RED &&
+ led->id != PM8XXX_ID_RGB_LED_GREEN)
+ return;
+
+ rc = pm8xxx_readb(led->dev->parent, addr, &val);
if (rc) {
dev_err(led->cdev.dev, "can't read rgb ctrl register rc=%d\n",
rc);
@@ -354,12 +359,24 @@
else
val &= ~mask;
- rc = pm8xxx_writeb(led->dev->parent, SSBI_REG_ADDR_RGB_CNTL2, val);
+ rc = pm8xxx_writeb(led->dev->parent, addr, val);
if (rc < 0)
dev_err(led->cdev.dev, "can't set rgb led %d level rc=%d\n",
led->id, rc);
}
+static void
+led_rgb_set(struct pm8xxx_led_data *led, enum led_brightness value)
+{
+ if (value) {
+ led_rgb_write(led, SSBI_REG_ADDR_RGB_CNTL1, value);
+ led_rgb_write(led, SSBI_REG_ADDR_RGB_CNTL2, value);
+ } else {
+ led_rgb_write(led, SSBI_REG_ADDR_RGB_CNTL2, value);
+ led_rgb_write(led, SSBI_REG_ADDR_RGB_CNTL1, value);
+ }
+}
+
static int pm8xxx_led_pwm_work(struct pm8xxx_led_data *led)
{
int duty_us;
@@ -369,12 +386,23 @@
duty_us = (led->pwm_period_us * led->cdev.brightness) /
LED_FULL;
rc = pwm_config(led->pwm_dev, duty_us, led->pwm_period_us);
- if (led->cdev.brightness)
+ if (led->cdev.brightness) {
+ led_rgb_write(led, SSBI_REG_ADDR_RGB_CNTL1,
+ led->cdev.brightness);
rc = pwm_enable(led->pwm_dev);
- else
+ } else {
pwm_disable(led->pwm_dev);
+ led_rgb_write(led, SSBI_REG_ADDR_RGB_CNTL1,
+ led->cdev.brightness);
+ }
} else {
+ if (led->cdev.brightness)
+ led_rgb_write(led, SSBI_REG_ADDR_RGB_CNTL1,
+ led->cdev.brightness);
rc = pm8xxx_pwm_lut_enable(led->pwm_dev, led->cdev.brightness);
+ if (!led->cdev.brightness)
+ led_rgb_write(led, SSBI_REG_ADDR_RGB_CNTL1,
+ led->cdev.brightness);
}
return rc;
@@ -658,35 +686,6 @@
return 0;
}
-static int __devinit init_rgb_led(struct pm8xxx_led_data *led)
-{
- int rc;
- u8 val;
-
- rc = pm8xxx_readb(led->dev->parent, SSBI_REG_ADDR_RGB_CNTL1, &val);
- if (rc) {
- dev_err(led->cdev.dev, "can't read rgb ctrl1 register rc=%d\n",
- rc);
- return rc;
- }
-
- switch (led->id) {
- case PM8XXX_ID_RGB_LED_RED:
- val |= PM8XXX_DRV_RGB_RED_LED;
- break;
- case PM8XXX_ID_RGB_LED_GREEN:
- val |= PM8XXX_DRV_RGB_GREEN_LED;
- break;
- case PM8XXX_ID_RGB_LED_BLUE:
- val |= PM8XXX_DRV_RGB_BLUE_LED;
- break;
- default:
- return -EINVAL;
- }
-
- return pm8xxx_writeb(led->dev->parent, SSBI_REG_ADDR_RGB_CNTL1, val);
-}
-
static int __devinit get_init_value(struct pm8xxx_led_data *led, u8 *val)
{
int rc, offset;
@@ -717,12 +716,6 @@
case PM8XXX_ID_RGB_LED_RED:
case PM8XXX_ID_RGB_LED_GREEN:
case PM8XXX_ID_RGB_LED_BLUE:
- rc = init_rgb_led(led);
- if (rc) {
- dev_err(led->cdev.dev, "can't initialize rgb rc=%d\n",
- rc);
- return rc;
- }
addr = SSBI_REG_ADDR_RGB_CNTL1;
break;
default:
diff --git a/drivers/media/dvb/dvb-core/demux.h b/drivers/media/dvb/dvb-core/demux.h
index e22bc64..071d209 100644
--- a/drivers/media/dvb/dvb-core/demux.h
+++ b/drivers/media/dvb/dvb-core/demux.h
@@ -63,15 +63,52 @@
*/
enum dmx_success {
- DMX_OK = 0, /* Received Ok */
- DMX_LENGTH_ERROR, /* Incorrect length */
- DMX_OVERRUN_ERROR, /* Receiver ring buffer overrun */
- DMX_CRC_ERROR, /* Incorrect CRC */
- DMX_FRAME_ERROR, /* Frame alignment error */
- DMX_FIFO_ERROR, /* Receiver FIFO overrun */
- DMX_MISSED_ERROR /* Receiver missed packet */
+ DMX_OK = 0, /* Received Ok */
+ DMX_OK_PES_END, /* Received ok, data reached end of PES packet */
+ DMX_OK_PCR, /* Received OK, data with new PCR/STC pair */
+ DMX_LENGTH_ERROR, /* Incorrect length */
+ DMX_OVERRUN_ERROR, /* Receiver ring buffer overrun */
+ DMX_CRC_ERROR, /* Incorrect CRC */
+ DMX_FRAME_ERROR, /* Frame alignment error */
+ DMX_FIFO_ERROR, /* Receiver FIFO overrun */
+ DMX_MISSED_ERROR /* Receiver missed packet */
} ;
+
+/*
+ * struct dmx_data_ready: Parameters for event notification callback.
+ * Event notification notifies demux device that data is written
+ * and available in the device's output buffer or provides
+ * notification on errors and other events. In the latter case
+ * data_length is zero.
+ */
+struct dmx_data_ready {
+ enum dmx_success status;
+
+ /*
+ * data_length may be 0 in case of DMX_OK_PES_END
+ * and in non-DMX_OK_XXX events. In DMX_OK_PES_END,
+ * data_length is for data comming after the end of PES.
+ */
+ int data_length;
+
+ union {
+ struct {
+ int start_gap;
+ int actual_length;
+ int disc_indicator_set;
+ int pes_length_mismatch;
+ u64 stc;
+ } pes_end;
+
+ struct {
+ u64 pcr;
+ u64 stc;
+ int disc_indicator_set;
+ } pcr;
+ };
+};
+
/*--------------------------------------------------------------------------*/
/* TS packet reception */
/*--------------------------------------------------------------------------*/
@@ -123,10 +160,15 @@
#define DMX_TS_PES_SUBTITLE DMX_TS_PES_SUBTITLE0
#define DMX_TS_PES_PCR DMX_TS_PES_PCR0
+struct dmx_ts_feed;
+typedef int (*dmx_ts_data_ready_cb)(
+ struct dmx_ts_feed *source,
+ struct dmx_data_ready *dmx_data_ready);
struct dmx_ts_feed {
int is_filtering; /* Set to non-zero when filtering in progress */
struct dmx_demux *parent; /* Back-pointer */
+ const struct dvb_ringbuffer *buffer;
void *priv; /* Pointer to private data of the API client */
int (*set) (struct dmx_ts_feed *feed,
u16 pid,
@@ -141,6 +183,10 @@
int (*get_decoder_buff_status)(
struct dmx_ts_feed *feed,
struct dmx_buffer_status *dmx_buffer_status);
+ int (*data_ready_cb)(struct dmx_ts_feed *feed,
+ dmx_ts_data_ready_cb callback);
+ int (*notify_data_read)(struct dmx_ts_feed *feed,
+ u32 bytes_num);
};
/*--------------------------------------------------------------------------*/
@@ -152,9 +198,15 @@
u8 filter_mask [DMX_MAX_FILTER_SIZE];
u8 filter_mode [DMX_MAX_FILTER_SIZE];
struct dmx_section_feed* parent; /* Back-pointer */
+ const struct dvb_ringbuffer *buffer;
void* priv; /* Pointer to private data of the API client */
};
+struct dmx_section_feed;
+typedef int (*dmx_section_data_ready_cb)(
+ struct dmx_section_filter *source,
+ struct dmx_data_ready *dmx_data_ready);
+
struct dmx_section_feed {
int is_filtering; /* Set to non-zero when filtering in progress */
struct dmx_demux* parent; /* Back-pointer */
@@ -177,6 +229,10 @@
struct dmx_section_filter* filter);
int (*start_filtering) (struct dmx_section_feed* feed);
int (*stop_filtering) (struct dmx_section_feed* feed);
+ int (*data_ready_cb)(struct dmx_section_feed *feed,
+ dmx_section_data_ready_cb callback);
+ int (*notify_data_read)(struct dmx_section_filter *filter,
+ u32 bytes_num);
};
/*--------------------------------------------------------------------------*/
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index 8e5127a..8353f6f 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -60,9 +60,331 @@
return dvb_ringbuffer_write(buf, src, len);
}
+static inline void dvb_dmxdev_notify_data_read(struct dmxdev_filter *filter,
+ int bytes_read)
+{
+ if (!filter)
+ return;
+
+ if (filter->type == DMXDEV_TYPE_SEC) {
+ if (filter->feed.sec->notify_data_read)
+ filter->feed.sec->notify_data_read(
+ filter->filter.sec,
+ bytes_read);
+ } else {
+ struct dmxdev_feed *feed;
+
+ /*
+ * All feeds of same demux-handle share the same output
+ * buffer, it is enough to notify on the buffer status
+ * on one of the feeds
+ */
+ feed = list_first_entry(&filter->feed.ts,
+ struct dmxdev_feed, next);
+
+ if (feed->ts->notify_data_read)
+ feed->ts->notify_data_read(
+ feed->ts,
+ bytes_read);
+ }
+}
+
+static inline u32 dvb_dmxdev_advance_event_idx(u32 index)
+{
+ index++;
+ if (index >= DMX_EVENT_QUEUE_SIZE)
+ index = 0;
+
+ return index;
+}
+
+static inline void dvb_dmxdev_flush_events(struct dmxdev_events_queue *events)
+{
+ events->read_index = 0;
+ events->write_index = 0;
+ events->notified_index = 0;
+ events->bytes_read_no_event = 0;
+ events->current_event_data_size = 0;
+}
+
+static inline void dvb_dmxdev_flush_output(struct dvb_ringbuffer *buffer,
+ struct dmxdev_events_queue *events)
+{
+ dvb_dmxdev_flush_events(events);
+ dvb_ringbuffer_flush(buffer);
+}
+
+static int dvb_dmxdev_update_pes_event(struct dmx_filter_event *event,
+ int bytes_read)
+{
+ int start_delta;
+
+ if (event->params.pes.total_length <= bytes_read)
+ return event->params.pes.total_length;
+
+ /*
+ * only part of the data relevant to this event was read.
+ * Update the event's information to reflect the new state.
+ */
+ event->params.pes.total_length -= bytes_read;
+
+ start_delta = event->params.pes.start_offset -
+ event->params.pes.base_offset;
+
+ if (bytes_read <= start_delta) {
+ event->params.pes.base_offset +=
+ bytes_read;
+ } else {
+ start_delta =
+ bytes_read - start_delta;
+
+ event->params.pes.start_offset += start_delta;
+ event->params.pes.actual_length -= start_delta;
+
+ event->params.pes.base_offset =
+ event->params.pes.start_offset;
+ }
+
+ return 0;
+}
+
+static int dvb_dmxdev_update_section_event(struct dmx_filter_event *event,
+ int bytes_read)
+{
+ int start_delta;
+
+ if (event->params.section.total_length <= bytes_read)
+ return event->params.section.total_length;
+
+ /*
+ * only part of the data relevant to this event was read.
+ * Update the event's information to reflect the new state.
+ */
+
+ event->params.section.total_length -= bytes_read;
+
+ start_delta = event->params.section.start_offset -
+ event->params.section.base_offset;
+
+ if (bytes_read <= start_delta) {
+ event->params.section.base_offset +=
+ bytes_read;
+ } else {
+ start_delta =
+ bytes_read - start_delta;
+
+ event->params.section.start_offset += start_delta;
+ event->params.section.actual_length -= start_delta;
+
+ event->params.section.base_offset =
+ event->params.section.start_offset;
+ }
+
+ return 0;
+}
+
+static int dvb_dmxdev_update_rec_event(struct dmx_filter_event *event,
+ int bytes_read)
+{
+ if (event->params.recording_chunk.size <= bytes_read)
+ return event->params.recording_chunk.size;
+
+ /*
+ * only part of the data relevant to this event was read.
+ * Update the event's information to reflect the new state.
+ */
+ event->params.recording_chunk.size -= bytes_read;
+ event->params.recording_chunk.offset += bytes_read;
+
+ return 0;
+}
+
+static int dvb_dmxdev_add_event(struct dmxdev_events_queue *events,
+ struct dmx_filter_event *event)
+{
+ int res;
+ int new_write_index;
+ int data_event;
+
+ /* Check if we are adding an event that user already read its data */
+ if (events->bytes_read_no_event) {
+ data_event = 1;
+
+ if (event->type == DMX_EVENT_NEW_PES)
+ res = dvb_dmxdev_update_pes_event(event,
+ events->bytes_read_no_event);
+ else if (event->type == DMX_EVENT_NEW_SECTION)
+ res = dvb_dmxdev_update_section_event(event,
+ events->bytes_read_no_event);
+ else if (event->type == DMX_EVENT_NEW_REC_CHUNK)
+ res = dvb_dmxdev_update_rec_event(event,
+ events->bytes_read_no_event);
+ else
+ data_event = 0;
+
+ if (data_event) {
+ if (res) {
+ /*
+ * Data relevent to this event was fully
+ * consumed already, discard event.
+ */
+ events->bytes_read_no_event -= res;
+ return 0;
+ }
+ events->bytes_read_no_event = 0;
+ } else {
+ /*
+ * data was read beyond the non-data event,
+ * making it not relevant anymore
+ */
+ return 0;
+ }
+ }
+
+ new_write_index = dvb_dmxdev_advance_event_idx(events->write_index);
+ if (new_write_index == events->read_index) {
+ printk(KERN_ERR "dmxdev: events overflow\n");
+ return -EOVERFLOW;
+ }
+
+ events->queue[events->write_index] = *event;
+ events->write_index = new_write_index;
+
+ return 0;
+}
+
+static int dvb_dmxdev_remove_event(struct dmxdev_events_queue *events,
+ struct dmx_filter_event *event)
+{
+ if (events->notified_index == events->write_index)
+ return -ENODATA;
+
+ *event = events->queue[events->notified_index];
+
+ events->notified_index =
+ dvb_dmxdev_advance_event_idx(events->notified_index);
+
+ return 0;
+}
+
+static int dvb_dmxdev_update_events(struct dmxdev_events_queue *events,
+ int bytes_read)
+{
+ struct dmx_filter_event *event;
+ int res;
+ int data_event;
+
+ /*
+ * Go through all events that were notified and
+ * remove them from the events queue if their respective
+ * data was read.
+ */
+ while ((events->read_index != events->notified_index) &&
+ (bytes_read)) {
+ event = events->queue + events->read_index;
+
+ data_event = 1;
+
+ if (event->type == DMX_EVENT_NEW_PES)
+ res = dvb_dmxdev_update_pes_event(event, bytes_read);
+ else if (event->type == DMX_EVENT_NEW_SECTION)
+ res = dvb_dmxdev_update_section_event(event,
+ bytes_read);
+ else if (event->type == DMX_EVENT_NEW_REC_CHUNK)
+ res = dvb_dmxdev_update_rec_event(event, bytes_read);
+ else
+ data_event = 0;
+
+ if (data_event) {
+ if (res) {
+ /*
+ * Data relevent to this event was
+ * fully consumed, remove it from the queue.
+ */
+ bytes_read -= res;
+ events->read_index =
+ dvb_dmxdev_advance_event_idx(
+ events->read_index);
+ } else {
+ bytes_read = 0;
+ }
+ } else {
+ /*
+ * non-data event was already notified,
+ * no need to keep it
+ */
+ events->read_index = dvb_dmxdev_advance_event_idx(
+ events->read_index);
+ }
+ }
+
+ if (!bytes_read)
+ return 0;
+
+ /*
+ * If we reached here it means:
+ * bytes_read != 0
+ * events->read_index == events->notified_index
+ * Check if there are pending events in the queue
+ * which the user didn't read while their relevant data
+ * was read.
+ */
+ while ((events->notified_index != events->write_index) &&
+ (bytes_read)) {
+ event = events->queue + events->notified_index;
+
+ data_event = 1;
+
+ if (event->type == DMX_EVENT_NEW_PES)
+ res = dvb_dmxdev_update_pes_event(event, bytes_read);
+ else if (event->type == DMX_EVENT_NEW_SECTION)
+ res = dvb_dmxdev_update_section_event(event,
+ bytes_read);
+ else if (event->type == DMX_EVENT_NEW_REC_CHUNK)
+ res = dvb_dmxdev_update_rec_event(event, bytes_read);
+ else
+ data_event = 0;
+
+ if (data_event) {
+ if (res) {
+ /*
+ * Data relevent to this event was
+ * fully consumed, remove it from the queue.
+ */
+ bytes_read -= res;
+ events->notified_index =
+ dvb_dmxdev_advance_event_idx(
+ events->notified_index);
+ } else {
+ bytes_read = 0;
+ }
+ } else {
+ if (bytes_read)
+ /*
+ * data was read beyond the non-data event,
+ * making it not relevant anymore
+ */
+ events->notified_index =
+ dvb_dmxdev_advance_event_idx(
+ events->notified_index);
+ }
+
+ events->read_index = events->notified_index;
+ }
+
+ /*
+ * Check if data was read without having a respective
+ * event in the events-queue
+ */
+ if (bytes_read)
+ events->bytes_read_no_event += bytes_read;
+
+ return 0;
+}
+
static ssize_t dvb_dmxdev_buffer_read(struct dvb_ringbuffer *src,
- int non_blocking, char __user *buf,
- size_t count, loff_t *ppos)
+ int non_blocking, char __user *buf,
+ size_t count, loff_t *ppos)
{
size_t todo;
ssize_t avail;
@@ -73,7 +395,7 @@
if (src->error) {
ret = src->error;
- dvb_ringbuffer_flush(src);
+ src->error = 0;
return ret;
}
@@ -94,7 +416,7 @@
if (src->error) {
ret = src->error;
- dvb_ringbuffer_flush(src);
+ src->error = 0;
break;
}
@@ -166,6 +488,9 @@
return -ENOMEM;
}
dvb_ringbuffer_init(&dmxdev->dvr_buffer, mem, DVR_BUFFER_SIZE);
+ dvb_dmxdev_flush_events(&dmxdev->dvr_output_events);
+ dmxdev->dvr_feeds_count = 0;
+
dvbdev->readers--;
} else if (!dvbdev->writers) {
dmxdev->dvr_in_exit = 0;
@@ -412,15 +737,37 @@
static ssize_t dvb_dvr_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
+ ssize_t res;
struct dvb_device *dvbdev = file->private_data;
struct dmxdev *dmxdev = dvbdev->priv;
if (dmxdev->exit)
return -ENODEV;
- return dvb_dmxdev_buffer_read(&dmxdev->dvr_buffer,
+ res = dvb_dmxdev_buffer_read(&dmxdev->dvr_buffer,
file->f_flags & O_NONBLOCK,
buf, count, ppos);
+
+ if (res > 0) {
+ dvb_dmxdev_notify_data_read(dmxdev->dvr_feed, res);
+ spin_lock_irq(&dmxdev->lock);
+ dvb_dmxdev_update_events(&dmxdev->dvr_output_events, res);
+ spin_unlock_irq(&dmxdev->lock);
+ } else if (res == -EOVERFLOW) {
+ /*
+ * When buffer overflowed, demux-dev flushed the
+ * buffer and marked the buffer in error state.
+ * Data from underlying driver is discarded until
+ * user gets notified that buffer has overflowed.
+ * Now that the user is notified, notify underlying
+ * driver that data was flushed from output buffer.
+ */
+ dvb_dmxdev_notify_data_read(dmxdev->dvr_feed,
+ dmxdev->dvr_flush_data_len);
+ dmxdev->dvr_flush_data_len = 0;
+ }
+
+ return res;
}
static void dvr_input_work_func(struct work_struct *worker)
@@ -556,6 +903,39 @@
return 0;
}
+static int dvb_dvr_get_event(struct dmxdev *dmxdev,
+ unsigned int f_flags,
+ struct dmx_filter_event *event)
+{
+ int res;
+
+ if (!((f_flags & O_ACCMODE) == O_RDONLY))
+ return -EINVAL;
+
+ spin_lock_irq(&dmxdev->lock);
+
+ res = dvb_dmxdev_remove_event(&dmxdev->dvr_output_events, event);
+
+ if (event->type == DMX_EVENT_BUFFER_OVERFLOW) {
+ /*
+ * When buffer overflowed, demux-dev flushed the
+ * buffer and marked the buffer in error state.
+ * Data from underlying driver is discarded until
+ * user gets notified that buffer has overflowed.
+ * Now that the user is notified, notify underlying
+ * driver that data was flushed from output buffer.
+ */
+ dvb_dmxdev_notify_data_read(dmxdev->dvr_feed,
+ dmxdev->dvr_flush_data_len);
+ dmxdev->dvr_flush_data_len = 0;
+ dmxdev->dvr_buffer.error = 0;
+ }
+
+ spin_unlock_irq(&dmxdev->lock);
+
+ return res;
+}
+
static int dvb_dvr_get_buffer_status(struct dmxdev *dmxdev,
unsigned int f_flags,
struct dmx_buffer_status *dmx_buffer_status)
@@ -574,8 +954,23 @@
spin_lock_irq(lock);
dmx_buffer_status->error = buf->error;
- if (buf->error)
- dvb_ringbuffer_flush(buf);
+ if (buf->error) {
+ if (buf->error == -EOVERFLOW) {
+ /*
+ * When buffer overflowed, demux-dev flushed the
+ * buffer and marked the buffer in error state.
+ * Data from underlying driver is discarded until
+ * user gets notified that buffer has overflowed.
+ * Now that the user is notified, notify underlying
+ * driver that data was flushed from output buffer.
+ */
+ dvb_dmxdev_notify_data_read(dmxdev->dvr_feed,
+ dmxdev->dvr_flush_data_len);
+ dmxdev->dvr_flush_data_len = 0;
+ }
+
+ buf->error = 0;
+ }
dmx_buffer_status->fullness = dvb_ringbuffer_avail(buf);
dmx_buffer_status->free_bytes = dvb_ringbuffer_free(buf);
@@ -594,7 +989,7 @@
{
ssize_t buff_fullness;
- if (!(f_flags & O_ACCMODE) == O_RDONLY)
+ if (!((f_flags & O_ACCMODE) == O_RDONLY))
return -EINVAL;
if (!bytes_count)
@@ -606,6 +1001,12 @@
return -EINVAL;
DVB_RINGBUFFER_SKIP(&dmxdev->dvr_buffer, bytes_count);
+
+ dvb_dmxdev_notify_data_read(dmxdev->dvr_feed, bytes_count);
+ spin_lock_irq(&dmxdev->lock);
+ dvb_dmxdev_update_events(&dmxdev->dvr_output_events, bytes_count);
+ spin_unlock_irq(&dmxdev->lock);
+
wake_up_all(&dmxdev->dvr_buffer.queue);
return 0;
}
@@ -854,40 +1255,39 @@
struct dmxdev_feed *feed;
int ret;
- /*
- * Ask for status of decoder's buffer from underlying HW.
- * In case of PCR/STC extraction, the filter's ring-buffer
- * is used to gather the PCR/STC data and not using
- * an internal decoder buffer.
- */
- if (!(dmxdevfilter->dev->capabilities &
- DMXDEV_CAP_PCR_EXTRACTION) ||
- ((dmxdevfilter->params.pes.pes_type != DMX_PES_PCR0) &&
- (dmxdevfilter->params.pes.pes_type != DMX_PES_PCR1) &&
- (dmxdevfilter->params.pes.pes_type != DMX_PES_PCR2) &&
- (dmxdevfilter->params.pes.pes_type != DMX_PES_PCR3))) {
- list_for_each_entry(feed, &dmxdevfilter->feed.ts,
- next) {
- if (feed->ts->get_decoder_buff_status)
- ret = feed->ts->get_decoder_buff_status(
- feed->ts,
- dmx_buffer_status);
- else
- ret = -ENODEV;
+ /* Only one feed should be in the list in case of decoder */
+ feed = list_first_entry(&dmxdevfilter->feed.ts,
+ struct dmxdev_feed, next);
- /*
- * There should not be more than one ts feed
- * in the list as this is DECODER feed.
- */
- spin_unlock_irq(&dmxdevfilter->dev->lock);
- return ret;
- }
- }
+ /* Ask for status of decoder's buffer from underlying HW */
+ if (feed->ts->get_decoder_buff_status)
+ ret = feed->ts->get_decoder_buff_status(
+ feed->ts,
+ dmx_buffer_status);
+ else
+ ret = -ENODEV;
+
+ spin_unlock_irq(&dmxdevfilter->dev->lock);
+ return ret;
}
dmx_buffer_status->error = buf->error;
- if (buf->error)
- dvb_ringbuffer_flush(buf);
+ if (buf->error) {
+ if (buf->error == -EOVERFLOW) {
+ /*
+ * When buffer overflowed, demux-dev flushed the
+ * buffer and marked the buffer in error state.
+ * Data from underlying driver is discarded until
+ * user gets notified that buffer has overflowed.
+ * Now that the user is notified, notify underlying
+ * driver that data was flushed from output buffer.
+ */
+ dvb_dmxdev_notify_data_read(dmxdevfilter,
+ dmxdevfilter->flush_data_len);
+ dmxdevfilter->flush_data_len = 0;
+ }
+ buf->error = 0;
+ }
dmx_buffer_status->fullness = dvb_ringbuffer_avail(buf);
dmx_buffer_status->free_bytes = dvb_ringbuffer_free(buf);
@@ -918,11 +1318,46 @@
DVB_RINGBUFFER_SKIP(&dmxdevfilter->buffer, bytes_count);
+ dvb_dmxdev_notify_data_read(dmxdevfilter, bytes_count);
+ spin_lock_irq(&dmxdevfilter->dev->lock);
+ dvb_dmxdev_update_events(&dmxdevfilter->events, bytes_count);
+ spin_unlock_irq(&dmxdevfilter->dev->lock);
+
wake_up_all(&dmxdevfilter->buffer.queue);
return 0;
}
+static int dvb_dmxdev_get_event(struct dmxdev_filter *dmxdevfilter,
+ struct dmx_filter_event *event)
+{
+ int res;
+
+ spin_lock_irq(&dmxdevfilter->dev->lock);
+
+ res = dvb_dmxdev_remove_event(&dmxdevfilter->events, event);
+
+ if (event->type == DMX_EVENT_BUFFER_OVERFLOW) {
+ /*
+ * When buffer overflowed, demux-dev flushed the
+ * buffer and marked the buffer in error state.
+ * Data from underlying driver is discarded until
+ * user gets notified that buffer has overflowed.
+ * Now that the user is notified, notify underlying
+ * driver that data was flushed from output buffer.
+ */
+ dvb_dmxdev_notify_data_read(dmxdevfilter,
+ dmxdevfilter->flush_data_len);
+ dmxdevfilter->flush_data_len = 0;
+ dmxdevfilter->buffer.error = 0;
+ }
+
+ spin_unlock_irq(&dmxdevfilter->dev->lock);
+
+ return res;
+
+}
+
static void dvb_dmxdev_filter_timeout(unsigned long data)
{
struct dmxdev_filter *dmxdevfilter = (struct dmxdev_filter *)data;
@@ -954,6 +1389,7 @@
enum dmx_success success)
{
struct dmxdev_filter *dmxdevfilter = filter->priv;
+ struct dmx_filter_event event;
int ret;
if (dmxdevfilter->buffer.error) {
@@ -965,20 +1401,59 @@
spin_unlock(&dmxdevfilter->dev->lock);
return 0;
}
+
+ if ((buffer1_len + buffer2_len) == 0) {
+ if (DMX_CRC_ERROR == success) {
+ /* Section was dropped due to CRC error */
+ event.type = DMX_EVENT_SECTION_CRC_ERROR;
+ dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
+
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&dmxdevfilter->buffer.queue);
+ } else {
+ spin_unlock(&dmxdevfilter->dev->lock);
+ }
+
+ return 0;
+ }
+
+ event.params.section.base_offset = dmxdevfilter->buffer.pwrite;
+ event.params.section.start_offset = dmxdevfilter->buffer.pwrite;
+
del_timer(&dmxdevfilter->timer);
dprintk("dmxdev: section callback %02x %02x %02x %02x %02x %02x\n",
buffer1[0], buffer1[1],
buffer1[2], buffer1[3], buffer1[4], buffer1[5]);
ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer1,
buffer1_len);
- if (ret == buffer1_len) {
+ if (ret == buffer1_len)
ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer2,
buffer2_len);
- }
+
if (ret < 0) {
- dvb_ringbuffer_flush(&dmxdevfilter->buffer);
+ dmxdevfilter->flush_data_len =
+ dvb_ringbuffer_avail(&dmxdevfilter->buffer);
+ dvb_dmxdev_flush_output(&dmxdevfilter->buffer,
+ &dmxdevfilter->events);
dmxdevfilter->buffer.error = ret;
+
+ event.type = DMX_EVENT_BUFFER_OVERFLOW;
+ } else {
+ event.type = DMX_EVENT_NEW_SECTION;
+ event.params.section.total_length =
+ buffer1_len + buffer2_len;
+ event.params.section.actual_length =
+ event.params.section.total_length;
+
+ if (success == DMX_MISSED_ERROR)
+ event.params.section.flags =
+ DMX_FILTER_CC_ERROR;
+ else
+ event.params.section.flags = 0;
}
+
+ dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
+
if (dmxdevfilter->params.sec.flags & DMX_ONESHOT)
dmxdevfilter->state = DMXDEV_STATE_DONE;
spin_unlock(&dmxdevfilter->dev->lock);
@@ -993,46 +1468,310 @@
{
struct dmxdev_filter *dmxdevfilter = feed->priv;
struct dvb_ringbuffer *buffer;
+ struct dmxdev_events_queue *events;
+ struct dmx_filter_event event;
int ret;
+ u32 *flush_data_len;
spin_lock(&dmxdevfilter->dev->lock);
if (dmxdevfilter->params.pes.output == DMX_OUT_DECODER) {
- if ((dmxdevfilter->dev->capabilities &
- DMXDEV_CAP_PCR_EXTRACTION) &&
- ((dmxdevfilter->params.pes.pes_type == DMX_PES_PCR0) ||
- (dmxdevfilter->params.pes.pes_type == DMX_PES_PCR1) ||
- (dmxdevfilter->params.pes.pes_type == DMX_PES_PCR2) ||
- (dmxdevfilter->params.pes.pes_type == DMX_PES_PCR3))) {
- /*
- * Support for reporting PCR and STC pairs to user.
- * Reported data should have the following format:
- * <8 bit flags><64 bits of STC> <64bits of PCR>
- * STC and PCR values are in 27MHz.
- * The current flags that are defined:
- * 0x00000001: discontinuity_indicator
- */
- buffer = &dmxdevfilter->buffer;
- } else {
- spin_unlock(&dmxdevfilter->dev->lock);
- return 0;
- }
- } else if (dmxdevfilter->params.pes.output == DMX_OUT_TAP
- || dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP)
+ spin_unlock(&dmxdevfilter->dev->lock);
+ return 0;
+ }
+
+ if (dmxdevfilter->params.pes.output == DMX_OUT_TAP
+ || dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP) {
buffer = &dmxdevfilter->buffer;
- else
+ events = &dmxdevfilter->events;
+ flush_data_len = &dmxdevfilter->flush_data_len;
+ } else {
buffer = &dmxdevfilter->dev->dvr_buffer;
+ events = &dmxdevfilter->dev->dvr_output_events;
+ flush_data_len = &dmxdevfilter->dev->dvr_flush_data_len;
+ }
if (buffer->error) {
spin_unlock(&dmxdevfilter->dev->lock);
wake_up_all(&buffer->queue);
return 0;
}
- ret = dvb_dmxdev_buffer_write(buffer, buffer1, buffer1_len);
- if (ret == buffer1_len)
- ret = dvb_dmxdev_buffer_write(buffer, buffer2, buffer2_len);
- if (ret < 0) {
- dvb_ringbuffer_flush(buffer);
- buffer->error = ret;
+
+ if (dmxdevfilter->state != DMXDEV_STATE_GO) {
+ spin_unlock(&dmxdevfilter->dev->lock);
+ return 0;
+ }
+
+ if (dmxdevfilter->params.pes.output == DMX_OUT_TAP) {
+ if ((success == DMX_OK) &&
+ (!events->current_event_data_size)) {
+ events->current_event_start_offset = buffer->pwrite;
+ } else if (success == DMX_OK_PES_END) {
+ event.type = DMX_EVENT_NEW_PES;
+
+ event.params.pes.actual_length =
+ events->current_event_data_size;
+ event.params.pes.total_length =
+ events->current_event_data_size;
+
+ event.params.pes.base_offset =
+ events->current_event_start_offset;
+ event.params.pes.start_offset =
+ events->current_event_start_offset;
+
+ event.params.pes.flags = 0;
+ event.params.pes.stc = 0;
+
+ dvb_dmxdev_add_event(events, &event);
+ events->current_event_data_size = 0;
+ }
+ } else {
+ if (!events->current_event_data_size) {
+ events->current_event_start_offset =
+ buffer->pwrite;
+ }
+ }
+
+ if (buffer1_len + buffer2_len) {
+ ret = dvb_dmxdev_buffer_write(buffer, buffer1, buffer1_len);
+ if (ret == buffer1_len)
+ ret = dvb_dmxdev_buffer_write(buffer, buffer2,
+ buffer2_len);
+ if (ret < 0) {
+ *flush_data_len =
+ dvb_ringbuffer_avail(&dmxdevfilter->buffer);
+ dvb_dmxdev_flush_output(buffer, events);
+ buffer->error = ret;
+
+ event.type = DMX_EVENT_BUFFER_OVERFLOW;
+ dvb_dmxdev_add_event(events, &event);
+ } else {
+ events->current_event_data_size +=
+ (buffer1_len + buffer2_len);
+
+ if (((dmxdevfilter->params.pes.output ==
+ DMX_OUT_TS_TAP) ||
+ (dmxdevfilter->params.pes.output ==
+ DMX_OUT_TSDEMUX_TAP)) &&
+ (events->current_event_data_size >=
+ dmxdevfilter->params.pes.rec_chunk_size)) {
+
+ event.type = DMX_EVENT_NEW_REC_CHUNK;
+ event.params.recording_chunk.offset =
+ events->current_event_start_offset;
+
+ event.params.recording_chunk.size =
+ events->current_event_data_size;
+
+ dvb_dmxdev_add_event(events, &event);
+ events->current_event_data_size = 0;
+ }
+ }
+ }
+
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&buffer->queue);
+ return 0;
+}
+
+static int dvb_dmxdev_section_event_cb(struct dmx_section_filter *filter,
+ struct dmx_data_ready *dmx_data_ready)
+{
+ int res;
+ struct dmxdev_filter *dmxdevfilter = filter->priv;
+ struct dmx_filter_event event;
+ int free;
+
+ if (dmxdevfilter->buffer.error) {
+ wake_up_all(&dmxdevfilter->buffer.queue);
+ return 0;
+ }
+
+ spin_lock(&dmxdevfilter->dev->lock);
+
+ if (dmxdevfilter->state != DMXDEV_STATE_GO) {
+ spin_unlock(&dmxdevfilter->dev->lock);
+ return 0;
+ }
+
+ if (dmx_data_ready->data_length == 0) {
+ if (DMX_CRC_ERROR == dmx_data_ready->status) {
+ /* Section was dropped due to CRC error */
+ event.type = DMX_EVENT_SECTION_CRC_ERROR;
+ dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
+
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&dmxdevfilter->buffer.queue);
+ } else {
+ spin_unlock(&dmxdevfilter->dev->lock);
+ }
+ return 0;
+ }
+
+ free = dvb_ringbuffer_free(&dmxdevfilter->buffer);
+
+ if ((DMX_OVERRUN_ERROR == dmx_data_ready->status) ||
+ (dmx_data_ready->data_length > free)) {
+ dmxdevfilter->flush_data_len =
+ dvb_ringbuffer_avail(&dmxdevfilter->buffer);
+ dvb_dmxdev_flush_output(&dmxdevfilter->buffer,
+ &dmxdevfilter->events);
+
+ dprintk("dmxdev: buffer overflow\n");
+
+ dmxdevfilter->buffer.error = -EOVERFLOW;
+
+ event.type = DMX_EVENT_BUFFER_OVERFLOW;
+ dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&dmxdevfilter->buffer.queue);
+ return 0;
+ }
+
+ event.type = DMX_EVENT_NEW_SECTION;
+ event.params.section.base_offset = dmxdevfilter->buffer.pwrite;
+ event.params.section.start_offset = dmxdevfilter->buffer.pwrite;
+ event.params.section.total_length = dmx_data_ready->data_length;
+ event.params.section.actual_length = dmx_data_ready->data_length;
+
+ if (dmx_data_ready->status == DMX_MISSED_ERROR)
+ event.params.section.flags = DMX_FILTER_CC_ERROR;
+ else
+ event.params.section.flags = 0;
+
+ res = dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
+ DVB_RINGBUFFER_PUSH(&dmxdevfilter->buffer, dmx_data_ready->data_length);
+
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&dmxdevfilter->buffer.queue);
+
+ return res;
+}
+
+static int dvb_dmxdev_ts_event_cb(struct dmx_ts_feed *feed,
+ struct dmx_data_ready *dmx_data_ready)
+{
+ struct dmxdev_filter *dmxdevfilter = feed->priv;
+ struct dvb_ringbuffer *buffer;
+ struct dmxdev_events_queue *events;
+ struct dmx_filter_event event;
+ u32 *flush_data_len;
+ int free;
+
+ spin_lock(&dmxdevfilter->dev->lock);
+
+ if (dmxdevfilter->state != DMXDEV_STATE_GO) {
+ spin_unlock(&dmxdevfilter->dev->lock);
+ return 0;
+ }
+
+ if (dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) {
+ buffer = &dmxdevfilter->buffer;
+ events = &dmxdevfilter->events;
+ flush_data_len = &dmxdevfilter->flush_data_len;
+ } else {
+ buffer = &dmxdevfilter->dev->dvr_buffer;
+ events = &dmxdevfilter->dev->dvr_output_events;
+ flush_data_len = &dmxdevfilter->dev->dvr_flush_data_len;
+ }
+
+ if (dmx_data_ready->status == DMX_OK_PCR) {
+ event.type = DMX_EVENT_NEW_PCR;
+ event.params.pcr.pcr = dmx_data_ready->pcr.pcr;
+ event.params.pcr.stc = dmx_data_ready->pcr.stc;
+ if (dmx_data_ready->pcr.disc_indicator_set)
+ event.params.pcr.flags =
+ DMX_FILTER_DISCONTINUITY_INDEICATOR;
+ else
+ event.params.pcr.flags = 0;
+
+ dvb_dmxdev_add_event(events, &event);
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&buffer->queue);
+ return 0;
+ }
+
+ if ((dmxdevfilter->params.pes.output == DMX_OUT_DECODER) ||
+ (buffer->error)) {
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&buffer->queue);
+ return 0;
+ }
+
+ free = dvb_ringbuffer_free(&dmxdevfilter->buffer);
+
+ if ((DMX_OVERRUN_ERROR == dmx_data_ready->status) ||
+ (dmx_data_ready->data_length > free)) {
+ *flush_data_len =
+ dvb_ringbuffer_avail(&dmxdevfilter->buffer);
+ dvb_dmxdev_flush_output(buffer, events);
+
+ dprintk("dmxdev: buffer overflow\n");
+
+ buffer->error = -EOVERFLOW;
+
+ event.type = DMX_EVENT_BUFFER_OVERFLOW;
+ dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
+
+ spin_unlock(&dmxdevfilter->dev->lock);
+ return 0;
+ }
+
+ if (dmxdevfilter->params.pes.output == DMX_OUT_TAP) {
+ if ((dmx_data_ready->status == DMX_OK) &&
+ (!events->current_event_data_size)) {
+ events->current_event_start_offset =
+ dmxdevfilter->buffer.pwrite;
+ } else if (dmx_data_ready->status == DMX_OK_PES_END) {
+ event.type = DMX_EVENT_NEW_PES;
+
+ event.params.pes.base_offset =
+ events->current_event_start_offset;
+ event.params.pes.start_offset =
+ events->current_event_start_offset +
+ dmx_data_ready->pes_end.start_gap;
+
+ event.params.pes.actual_length =
+ dmx_data_ready->pes_end.actual_length;
+ event.params.pes.total_length =
+ events->current_event_data_size;
+
+ event.params.pes.flags = 0;
+ if (dmx_data_ready->pes_end.disc_indicator_set)
+ event.params.pes.flags |=
+ DMX_FILTER_DISCONTINUITY_INDEICATOR;
+ if (dmx_data_ready->pes_end.pes_length_mismatch)
+ event.params.pes.flags |=
+ DMX_FILTER_PES_LENGTH_ERROR;
+
+ event.params.pes.stc = dmx_data_ready->pes_end.stc;
+ dvb_dmxdev_add_event(events, &event);
+
+ events->current_event_data_size = 0;
+ }
+ } else {
+ if (!events->current_event_data_size)
+ events->current_event_start_offset =
+ dmxdevfilter->buffer.pwrite;
+ }
+
+ events->current_event_data_size += dmx_data_ready->data_length;
+ DVB_RINGBUFFER_PUSH(&dmxdevfilter->buffer, dmx_data_ready->data_length);
+
+ if ((dmxdevfilter->params.pes.output == DMX_OUT_TS_TAP) ||
+ (dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP)) {
+ if (events->current_event_data_size >=
+ dmxdevfilter->params.pes.rec_chunk_size) {
+ event.type = DMX_EVENT_NEW_REC_CHUNK;
+ event.params.recording_chunk.offset =
+ events->current_event_start_offset;
+
+ event.params.recording_chunk.size =
+ events->current_event_data_size;
+
+ dvb_dmxdev_add_event(events, &event);
+
+ events->current_event_data_size = 0;
+ }
}
spin_unlock(&dmxdevfilter->dev->lock);
wake_up_all(&buffer->queue);
@@ -1133,6 +1872,12 @@
case DMXDEV_TYPE_PES:
dvb_dmxdev_feed_stop(dmxdevfilter);
demux = dmxdevfilter->dev->demux;
+ if (dmxdevfilter->params.pes.output == DMX_OUT_TS_TAP) {
+ dmxdevfilter->dev->dvr_feeds_count--;
+ if (!dmxdevfilter->dev->dvr_feeds_count)
+ dmxdevfilter->dev->dvr_feed = NULL;
+ }
+
list_for_each_entry(feed, &dmxdevfilter->feed.ts, next) {
demux->release_ts_feed(demux, feed->ts);
feed->ts = NULL;
@@ -1144,7 +1889,10 @@
return -EINVAL;
}
- dvb_ringbuffer_flush(&dmxdevfilter->buffer);
+ spin_lock_irq(&dmxdevfilter->dev->lock);
+ dvb_dmxdev_flush_output(&dmxdevfilter->buffer, &dmxdevfilter->events);
+ spin_unlock_irq(&dmxdevfilter->dev->lock);
+
wake_up_all(&dmxdevfilter->buffer.queue);
return 0;
@@ -1213,6 +1961,24 @@
tsfeed = feed->ts;
tsfeed->priv = filter;
+ if (filter->params.pes.output == DMX_OUT_TS_TAP) {
+ tsfeed->buffer = &dmxdev->dvr_buffer;
+ if (!dmxdev->dvr_feeds_count)
+ dmxdev->dvr_feed = filter;
+ dmxdev->dvr_feeds_count++;
+ } else {
+ tsfeed->buffer = &filter->buffer;
+ }
+
+ if (tsfeed->data_ready_cb) {
+ ret = tsfeed->data_ready_cb(tsfeed, dvb_dmxdev_ts_event_cb);
+
+ if (ret < 0) {
+ dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed);
+ return ret;
+ }
+ }
+
ret = tsfeed->set(tsfeed, feed->pid,
ts_type, ts_pes,
filter->pes_buffer_size, timeout);
@@ -1270,7 +2036,9 @@
spin_unlock_irq(&filter->dev->lock);
}
- dvb_ringbuffer_flush(&filter->buffer);
+ spin_lock_irq(&filter->dev->lock);
+ dvb_dmxdev_flush_output(&filter->buffer, &filter->events);
+ spin_unlock_irq(&filter->dev->lock);
switch (filter->type) {
case DMXDEV_TYPE_SEC:
@@ -1295,14 +2063,27 @@
/* if no feed found, try to allocate new one */
if (!*secfeed) {
ret = dmxdev->demux->allocate_section_feed(dmxdev->demux,
- secfeed,
- dvb_dmxdev_section_callback);
+ secfeed,
+ dvb_dmxdev_section_callback);
if (ret < 0) {
printk("DVB (%s): could not alloc feed\n",
__func__);
return ret;
}
+ if ((*secfeed)->data_ready_cb) {
+ ret = (*secfeed)->data_ready_cb(
+ *secfeed,
+ dvb_dmxdev_section_event_cb);
+
+ if (ret < 0) {
+ printk(KERN_ERR "DVB (%s): could not set event cb\n",
+ __func__);
+ dvb_dmxdev_feed_restart(filter);
+ return ret;
+ }
+ }
+
ret = (*secfeed)->set(*secfeed, para->pid, 32768,
(para->flags & DMX_CHECK_CRC) ? 1 : 0);
if (ret < 0) {
@@ -1324,6 +2105,7 @@
}
(*secfilter)->priv = filter;
+ (*secfilter)->buffer = &filter->buffer;
memcpy(&((*secfilter)->filter_value[3]),
&(para->filter.filter[1]), DMX_FILTER_SIZE - 1);
@@ -1348,6 +2130,16 @@
break;
}
case DMXDEV_TYPE_PES:
+ if (filter->params.pes.rec_chunk_size <
+ DMX_REC_BUFF_CHUNK_MIN_SIZE)
+ filter->params.pes.rec_chunk_size =
+ DMX_REC_BUFF_CHUNK_MIN_SIZE;
+
+ if (filter->params.pes.rec_chunk_size >=
+ filter->buffer.size)
+ filter->params.pes.rec_chunk_size =
+ filter->buffer.size >> 2;
+
list_for_each_entry(feed, &filter->feed.ts, next) {
ret = dvb_dmxdev_start_feed(dmxdev, filter, feed);
if (ret < 0) {
@@ -1391,6 +2183,8 @@
file->private_data = dmxdevfilter;
dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192);
+ dvb_dmxdev_flush_events(&dmxdevfilter->events);
+
dmxdevfilter->type = DMXDEV_TYPE_NONE;
dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
init_timer(&dmxdevfilter->timer);
@@ -1607,8 +2401,27 @@
ret = dvb_dmxdev_read_sec(dmxdevfilter, file, buf, count, ppos);
else
ret = dvb_dmxdev_buffer_read(&dmxdevfilter->buffer,
- file->f_flags & O_NONBLOCK,
- buf, count, ppos);
+ file->f_flags & O_NONBLOCK,
+ buf, count, ppos);
+
+ if (ret > 0) {
+ dvb_dmxdev_notify_data_read(dmxdevfilter, ret);
+ spin_lock_irq(&dmxdevfilter->dev->lock);
+ dvb_dmxdev_update_events(&dmxdevfilter->events, ret);
+ spin_unlock_irq(&dmxdevfilter->dev->lock);
+ } else if (ret == -EOVERFLOW) {
+ /*
+ * When buffer overflowed, demux-dev flushed the
+ * buffer and marked the buffer in error state.
+ * Data from underlying driver is discarded until
+ * user gets notified that buffer has overflowed.
+ * Now that the user is notified, notify underlying
+ * driver that data was flushed from output buffer.
+ */
+ dvb_dmxdev_notify_data_read(dmxdevfilter->dev->dvr_feed,
+ dmxdevfilter->flush_data_len);
+ dmxdevfilter->flush_data_len = 0;
+ }
mutex_unlock(&dmxdevfilter->mutex);
return ret;
@@ -1764,6 +2577,15 @@
*(enum dmx_playback_mode_t *)parg);
break;
+ case DMX_GET_EVENT:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+ ret = dvb_dmxdev_get_event(dmxdevfilter, parg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
case DMX_GET_STC:
if (!dmxdev->demux->get_stc) {
ret = -EINVAL;
@@ -1823,10 +2645,15 @@
return 0;
if (dmxdevfilter->buffer.error)
- mask |= (POLLIN | POLLRDNORM | POLLPRI | POLLERR);
+ mask |= (POLLIN | POLLRDNORM | POLLERR);
if (!dvb_ringbuffer_empty(&dmxdevfilter->buffer))
- mask |= (POLLIN | POLLRDNORM | POLLPRI);
+ mask |= (POLLIN | POLLRDNORM);
+
+ if (dmxdevfilter->events.notified_index !=
+ dmxdevfilter->events.write_index) {
+ mask |= POLLPRI;
+ }
return mask;
}
@@ -1952,6 +2779,10 @@
ret = dvb_dvr_feed_data(dmxdev, file->f_flags, arg);
break;
+ case DMX_GET_EVENT:
+ ret = dvb_dvr_get_event(dmxdev, file->f_flags, parg);
+ break;
+
default:
ret = -EINVAL;
break;
@@ -1978,10 +2809,14 @@
poll_wait(file, &dmxdev->dvr_buffer.queue, wait);
if (dmxdev->dvr_buffer.error)
- mask |= (POLLIN | POLLRDNORM | POLLPRI | POLLERR);
+ mask |= (POLLIN | POLLRDNORM | POLLERR);
if (!dvb_ringbuffer_empty(&dmxdev->dvr_buffer))
- mask |= (POLLIN | POLLRDNORM | POLLPRI);
+ mask |= (POLLIN | POLLRDNORM);
+
+ if (dmxdev->dvr_output_events.notified_index !=
+ dmxdev->dvr_output_events.write_index)
+ mask |= POLLPRI;
} else {
poll_wait(file, &dmxdev->dvr_input_buffer.queue, wait);
if (dmxdev->dvr_input_buffer.error)
diff --git a/drivers/media/dvb/dvb-core/dmxdev.h b/drivers/media/dvb/dvb-core/dmxdev.h
index 4c52e84..9fd900e 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.h
+++ b/drivers/media/dvb/dvb-core/dmxdev.h
@@ -62,6 +62,35 @@
struct list_head next;
};
+struct dmxdev_events_queue {
+#define DMX_EVENT_QUEUE_SIZE 500 /* number of events */
+ /*
+ * indices used to manage events queue.
+ * read_index advanced when relevent data is read
+ * from the buffer.
+ * notified_index is the index from which next events
+ * are returned.
+ * read_index <= notified_index <= write_index
+ *
+ * If user reads the data without getting the respective
+ * event first, the read/notified indices are updated
+ * automatically to reflect the actual data that exist
+ * in the buffer.
+ */
+ u32 read_index;
+ u32 write_index;
+ u32 notified_index;
+
+ /* Bytes read by user without having respective event in the queue */
+ u32 bytes_read_no_event;
+
+ /* internal tracking of PES and recording events */
+ u32 current_event_data_size;
+ u32 current_event_start_offset;
+
+ struct dmx_filter_event queue[DMX_EVENT_QUEUE_SIZE];
+};
+
struct dmxdev_filter {
union {
struct dmx_section_filter *sec;
@@ -78,16 +107,21 @@
struct dmx_pes_filter_params pes;
} params;
+ struct dmxdev_events_queue events;
+
enum dmxdev_type type;
enum dmxdev_state state;
struct dmxdev *dev;
struct dvb_ringbuffer buffer;
+ u32 flush_data_len;
struct mutex mutex;
/* relevent for decoder PES */
unsigned long pes_buffer_size;
+ u32 rec_chunk_size;
+
/* only for sections */
struct timer_list timer;
int todo;
@@ -105,10 +139,9 @@
int filternum;
int capabilities;
-#define DMXDEV_CAP_DUPLEX 0x1
-#define DMXDEV_CAP_PULL_MODE 0x2
-#define DMXDEV_CAP_PCR_EXTRACTION 0x4
-#define DMXDEV_CAP_INDEXING 0x8
+#define DMXDEV_CAP_DUPLEX 0x1
+#define DMXDEV_CAP_PULL_MODE 0x2
+#define DMXDEV_CAP_INDEXING 0x4
enum dmx_playback_mode_t playback_mode;
dmx_source_t source;
@@ -120,6 +153,11 @@
struct dmx_frontend *dvr_orig_fe;
struct dvb_ringbuffer dvr_buffer;
+ struct dmxdev_events_queue dvr_output_events;
+ struct dmxdev_filter *dvr_feed;
+ u32 dvr_flush_data_len;
+ int dvr_feeds_count;
+
struct dvb_ringbuffer dvr_input_buffer;
struct workqueue_struct *dvr_input_workqueue;
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index bc72fee..df71f76 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -137,7 +137,12 @@
/*
cc = buf[3] & 0x0f;
- ccok = ((feed->cc + 1) & 0x0f) == cc;
+ if (feed->first_cc)
+ ccok = 1;
+ else
+ ccok = ((feed->cc + 1) & 0x0f) == cc;
+
+ feed->first_cc = 0;
feed->cc = cc;
if (!ccok)
printk("missed packet!\n");
@@ -145,8 +150,15 @@
/* PUSI ? */
if (buf[1] & 0x40) {
- feed->peslen = 0xfffa;
+ if (feed->pusi_seen)
+ /* We had seen PUSI before, this means
+ * that previous PES can be closed now.
+ */
+ feed->cb.ts(NULL, 0, NULL, 0,
+ &feed->feed.ts, DMX_OK_PES_END);
+
feed->pusi_seen = 1;
+ feed->peslen = 0;
}
if (feed->pusi_seen == 0)
@@ -204,6 +216,11 @@
if (dvb_demux_performancecheck)
demux->total_crc_time +=
dvb_dmx_calc_time_delta(pre_crc_time);
+
+ /* Notify on CRC error */
+ feed->cb.sec(NULL, 0, NULL, 0,
+ &f->filter, DMX_CRC_ERROR);
+
return -1;
}
@@ -339,7 +356,12 @@
p = 188 - count; /* payload start */
cc = buf[3] & 0x0f;
- ccok = ((feed->cc + 1) & 0x0f) == cc;
+ if (feed->first_cc)
+ ccok = 1;
+ else
+ ccok = ((feed->cc + 1) & 0x0f) == cc;
+
+ feed->first_cc = 0;
feed->cc = cc;
if (buf[3] & 0x20) {
@@ -663,6 +685,52 @@
}
}
+void dvb_dmx_swfilter_section_packets(struct dvb_demux *demux, const u8 *buf,
+ size_t count)
+{
+ struct dvb_demux_feed *feed;
+ u16 pid = ts_pid(buf);
+ struct timespec pre_time;
+
+ if (dvb_demux_performancecheck)
+ pre_time = current_kernel_time();
+
+ spin_lock(&demux->lock);
+
+ demux->sw_filter_abort = 0;
+
+ while (count--) {
+ if (buf[0] != 0x47) {
+ buf += 188;
+ continue;
+ }
+
+ if (demux->playback_mode == DMX_PB_MODE_PULL)
+ if (dvb_dmx_swfilter_buffer_check(demux, pid) < 0)
+ break;
+
+ list_for_each_entry(feed, &demux->feed_list, list_head) {
+ if (feed->pid != pid)
+ continue;
+
+ if (!feed->feed.sec.is_filtering)
+ continue;
+
+ if (dvb_dmx_swfilter_section_packet(feed, buf) < 0) {
+ feed->feed.sec.seclen = 0;
+ feed->feed.sec.secbufp = 0;
+ }
+ }
+ buf += 188;
+ }
+
+ spin_unlock(&demux->lock);
+
+ if (dvb_demux_performancecheck)
+ demux->total_process_time += dvb_dmx_calc_time_delta(pre_time);
+}
+EXPORT_SYMBOL(dvb_dmx_swfilter_section_packets);
+
void dvb_dmx_swfilter_packets(struct dvb_demux *demux, const u8 *buf,
size_t count)
{
@@ -984,6 +1052,8 @@
return -ENODEV;
}
+ feed->first_cc = 1;
+
if ((ret = demux->start_feed(feed)) < 0) {
mutex_unlock(&demux->mutex);
return ret;
@@ -1053,6 +1123,25 @@
return ret;
}
+static int dmx_ts_feed_data_ready_cb(struct dmx_ts_feed *feed,
+ dmx_ts_data_ready_cb callback)
+{
+ struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed;
+ struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+
+ mutex_lock(&dvbdmx->mutex);
+
+ if (dvbdmxfeed->state == DMX_STATE_GO) {
+ mutex_unlock(&dvbdmx->mutex);
+ return -EINVAL;
+ }
+
+ dvbdmxfeed->data_ready_cb.ts = callback;
+
+ mutex_unlock(&dvbdmx->mutex);
+ return 0;
+}
+
static int dmx_ts_set_indexing_params(
struct dmx_ts_feed *ts_feed,
struct dmx_indexing_video_params *params)
@@ -1084,7 +1173,7 @@
feed->cb.ts = callback;
feed->demux = demux;
feed->pid = 0xffff;
- feed->peslen = 0xfffa;
+ feed->peslen = 0;
feed->buffer = NULL;
memset(&feed->indexing_params, 0,
sizeof(struct dmx_indexing_video_params));
@@ -1105,6 +1194,8 @@
(*ts_feed)->set = dmx_ts_feed_set;
(*ts_feed)->set_indexing_params = dmx_ts_set_indexing_params;
(*ts_feed)->get_decoder_buff_status = dmx_ts_feed_decoder_buff_status;
+ (*ts_feed)->data_ready_cb = dmx_ts_feed_data_ready_cb;
+ (*ts_feed)->notify_data_read = NULL;
if (!(feed->filter = dvb_dmx_filter_alloc(demux))) {
feed->state = DMX_STATE_FREE;
@@ -1266,6 +1357,7 @@
dvbdmxfeed->feed.sec.secbuf = dvbdmxfeed->feed.sec.secbuf_base;
dvbdmxfeed->feed.sec.secbufp = 0;
dvbdmxfeed->feed.sec.seclen = 0;
+ dvbdmxfeed->first_cc = 1;
if (!dvbdmx->start_feed) {
mutex_unlock(&dvbdmx->mutex);
@@ -1312,6 +1404,26 @@
return ret;
}
+
+static int dmx_section_feed_data_ready_cb(struct dmx_section_feed *feed,
+ dmx_section_data_ready_cb callback)
+{
+ struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed;
+ struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+
+ mutex_lock(&dvbdmx->mutex);
+
+ if (dvbdmxfeed->state == DMX_STATE_GO) {
+ mutex_unlock(&dvbdmx->mutex);
+ return -EINVAL;
+ }
+
+ dvbdmxfeed->data_ready_cb.sec = callback;
+
+ mutex_unlock(&dvbdmx->mutex);
+ return 0;
+}
+
static int dmx_section_feed_release_filter(struct dmx_section_feed *feed,
struct dmx_section_filter *filter)
{
@@ -1381,6 +1493,8 @@
(*feed)->start_filtering = dmx_section_feed_start_filtering;
(*feed)->stop_filtering = dmx_section_feed_stop_filtering;
(*feed)->release_filter = dmx_section_feed_release_filter;
+ (*feed)->data_ready_cb = dmx_section_feed_data_ready_cb;
+ (*feed)->notify_data_read = NULL;
mutex_unlock(&dvbdmx->mutex);
return 0;
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
index ebe34ad..5a32363 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.h
+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
@@ -78,6 +78,11 @@
dmx_section_cb sec;
} cb;
+ union {
+ dmx_ts_data_ready_cb ts;
+ dmx_section_data_ready_cb sec;
+ } data_ready_cb;
+
struct dvb_demux *demux;
void *priv;
int type;
@@ -93,6 +98,7 @@
enum dmx_ts_pes pes_type;
int cc;
+ int first_cc;
int pusi_seen; /* prevents feeding of garbage from previous section */
u32 peslen;
@@ -173,6 +179,8 @@
int dvb_dmx_init(struct dvb_demux *dvbdemux);
void dvb_dmx_release(struct dvb_demux *dvbdemux);
+void dvb_dmx_swfilter_section_packets(struct dvb_demux *demux, const u8 *buf,
+ size_t count);
void dvb_dmx_swfilter_packets(struct dvb_demux *dvbdmx, const u8 *buf,
size_t count);
void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count);
diff --git a/drivers/media/dvb/dvb-core/dvb_ringbuffer.h b/drivers/media/dvb/dvb-core/dvb_ringbuffer.h
index 8b591a6..4093fa5 100644
--- a/drivers/media/dvb/dvb-core/dvb_ringbuffer.h
+++ b/drivers/media/dvb/dvb-core/dvb_ringbuffer.h
@@ -111,6 +111,10 @@
#define DVB_RINGBUFFER_SKIP(rbuf,num) \
(rbuf)->pread=((rbuf)->pread+(num))%(rbuf)->size
+/* advance write ptr by <num> bytes */
+#define DVB_RINGBUFFER_PUSH(rbuf, num) \
+ ((rbuf)->pwrite = (((rbuf)->pwrite+(num))%(rbuf)->size))
+
/*
** read <len> bytes from ring buffer into <buf>
** <usermem> specifies whether <buf> resides in user space
diff --git a/drivers/media/dvb/mpq/adapter/mpq_stream_buffer.c b/drivers/media/dvb/mpq/adapter/mpq_stream_buffer.c
index 738d730..4b0e7be 100644
--- a/drivers/media/dvb/mpq/adapter/mpq_stream_buffer.c
+++ b/drivers/media/dvb/mpq/adapter/mpq_stream_buffer.c
@@ -192,18 +192,18 @@
struct mpq_streambuffer *sbuff,
u8 *buf, size_t len)
{
- int actual_len;
+ ssize_t actual_len;
actual_len = dvb_ringbuffer_avail(&sbuff->raw_data);
if (actual_len < len)
len = actual_len;
- if (actual_len)
- dvb_ringbuffer_read(&sbuff->raw_data, buf, actual_len);
+ if (len)
+ dvb_ringbuffer_read(&sbuff->raw_data, buf, len);
wake_up_all(&sbuff->raw_data.queue);
- return actual_len;
+ return len;
}
EXPORT_SYMBOL(mpq_streambuffer_data_read);
diff --git a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.c b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.c
index 7223377..a01cf5b 100644
--- a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.c
+++ b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.c
@@ -33,16 +33,6 @@
sizeof(struct mpq_streambuffer_packet_header) + \
sizeof(struct mpq_adapter_video_meta_data)))
-/*
- * PCR/STC information length saved in ring-buffer.
- * PCR / STC are saved in ring-buffer in the following form:
- * <8 bit flags><64 bits of STC> <64bits of PCR>
- * STC and PCR values are in 27MHz.
- * The current flags that are defined:
- * 0x00000001: discontinuity_indicator
- */
-#define PCR_STC_LEN 17
-
/* Number of demux devices, has default of linux configuration */
static int mpq_demux_device_num = CONFIG_DVB_MPQ_NUM_DMX_DEVICES;
@@ -839,7 +829,7 @@
ion_alloc(mpq_demux->ion_client,
actual_buffer_size,
SZ_4K,
- ION_HEAP(ION_CP_MM_HEAP_ID));
+ (ION_HEAP(ION_CP_MM_HEAP_ID) | CACHED));
if (IS_ERR_OR_NULL(feed_data->payload_buff_handle)) {
ret = PTR_ERR(feed_data->payload_buff_handle);
@@ -1906,10 +1896,9 @@
struct dvb_demux_feed *feed,
const u8 *buf)
{
- int i;
u64 pcr;
u64 stc;
- u8 output[PCR_STC_LEN];
+ struct dmx_data_ready data;
struct mpq_demux *mpq_demux = feed->demux->priv;
const struct ts_packet_header *ts_header;
const struct ts_adaptation_field *adaptation_field;
@@ -1960,17 +1949,13 @@
stc += buf[188];
stc *= 256; /* convert from 105.47 KHZ to 27MHz */
- output[0] = adaptation_field->discontinuity_indicator;
+ data.data_length = 0;
+ data.pcr.pcr = pcr;
+ data.pcr.stc = stc;
+ data.pcr.disc_indicator_set = adaptation_field->discontinuity_indicator;
+ data.status = DMX_OK_PCR;
+ feed->data_ready_cb.ts(&feed->feed.ts, &data);
- for (i = 1; i <= 8; i++)
- output[i] = (stc >> ((8-i) << 3)) & 0xFF;
-
- for (i = 9; i <= 16; i++)
- output[i] = (pcr >> ((16-i) << 3)) & 0xFF;
-
- feed->cb.ts(output, PCR_STC_LEN,
- NULL, 0,
- &feed->feed.ts, DMX_OK);
return 0;
}
EXPORT_SYMBOL(mpq_dmx_process_pcr_packet);
diff --git a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tsif.c b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tsif.c
index e7d6b74..bd1ecfe 100644
--- a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tsif.c
+++ b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tsif.c
@@ -641,7 +641,6 @@
mpq_demux->dmxdev.capabilities =
DMXDEV_CAP_DUPLEX |
DMXDEV_CAP_PULL_MODE |
- DMXDEV_CAP_PCR_EXTRACTION |
DMXDEV_CAP_INDEXING;
mpq_demux->dmxdev.demux->set_source = mpq_dmx_set_source;
diff --git a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v1.c b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v1.c
index f374d91..fd94e80 100644
--- a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v1.c
+++ b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v1.c
@@ -752,7 +752,6 @@
mpq_demux->dmxdev.capabilities =
DMXDEV_CAP_DUPLEX |
DMXDEV_CAP_PULL_MODE |
- DMXDEV_CAP_PCR_EXTRACTION |
DMXDEV_CAP_INDEXING;
mpq_demux->dmxdev.demux->set_source = mpq_dmx_set_source;
diff --git a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v2.c b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v2.c
index 74b7c22..e4858fa 100644
--- a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v2.c
+++ b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v2.c
@@ -139,7 +139,6 @@
mpq_demux->dmxdev.capabilities =
DMXDEV_CAP_DUPLEX |
DMXDEV_CAP_PULL_MODE |
- DMXDEV_CAP_PCR_EXTRACTION |
DMXDEV_CAP_INDEXING;
mpq_demux->dmxdev.demux->set_source = mpq_dmx_set_source;
diff --git a/drivers/media/radio/radio-iris.c b/drivers/media/radio/radio-iris.c
index 23d11c3..c0a35f9 100644
--- a/drivers/media/radio/radio-iris.c
+++ b/drivers/media/radio/radio-iris.c
@@ -103,6 +103,7 @@
struct hci_fm_ch_det_threshold ch_det_threshold;
struct hci_fm_data_rd_rsp default_data;
struct hci_fm_spur_data spur_data;
+ unsigned char is_station_valid;
};
static struct video_device *priv_videodev;
@@ -2645,6 +2646,9 @@
ctrl->value = radio->ch_det_threshold.sinr_samples;
break;
+ case V4L2_CID_PRIVATE_VALID_CHANNEL:
+ ctrl->value = radio->is_station_valid;
+ break;
default:
retval = -EINVAL;
}
@@ -2811,6 +2815,8 @@
struct hci_fm_tx_rt tx_rt = {0};
struct hci_fm_def_data_rd_req rd_txgain;
struct hci_fm_def_data_wr_req wr_txgain;
+ char sinr_th, sinr;
+ __u8 intf_det_low_th, intf_det_high_th, intf_det_out;
switch (ctrl->id) {
case V4L2_CID_PRIVATE_IRIS_TX_TONE:
@@ -3281,6 +3287,40 @@
case V4L2_CID_PRIVATE_UPDATE_SPUR_TABLE:
update_spur_table(radio);
break;
+ case V4L2_CID_PRIVATE_VALID_CHANNEL:
+ retval = hci_cmd(HCI_FM_GET_DET_CH_TH_CMD, radio->fm_hdev);
+ if (retval < 0) {
+ FMDERR("%s: Failed to determine channel's validity\n",
+ __func__);
+ return retval;
+ } else {
+ sinr_th = radio->ch_det_threshold.sinr;
+ intf_det_low_th = radio->ch_det_threshold.low_th;
+ intf_det_high_th = radio->ch_det_threshold.high_th;
+ }
+
+ retval = hci_cmd(HCI_FM_GET_STATION_PARAM_CMD, radio->fm_hdev);
+ if (retval < 0) {
+ FMDERR("%s: Failed to determine channel's validity\n",
+ __func__);
+ return retval;
+ } else
+ sinr = radio->fm_st_rsp.station_rsp.sinr;
+
+ retval = hci_cmd(HCI_FM_STATION_DBG_PARAM_CMD, radio->fm_hdev);
+ if (retval < 0) {
+ FMDERR("%s: Failed to determine channel's validity\n",
+ __func__);
+ return retval;
+ } else
+ intf_det_out = radio->st_dbg_param.in_det_out;
+
+ if ((sinr >= sinr_th) && (intf_det_out >= intf_det_low_th) &&
+ (intf_det_out <= intf_det_high_th))
+ radio->is_station_valid = VALID_CHANNEL;
+ else
+ radio->is_station_valid = INVALID_CHANNEL;
+ break;
default:
retval = -EINVAL;
}
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 956a043..e0de38d 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -214,7 +214,7 @@
obj-$(CONFIG_MSM_CAMERA) += msm/
obj-$(CONFIG_ARCH_OMAP) += omap/
-obj-$(CONFIG_MSM_VIDC) += msm_vidc/
+obj-$(CONFIG_MSM_VIDC_V4L2) += msm_vidc/
ccflags-y += -I$(srctree)/drivers/media/dvb/dvb-core
ccflags-y += -I$(srctree)/drivers/media/dvb/frontends
diff --git a/drivers/media/video/msm/Kconfig b/drivers/media/video/msm/Kconfig
index 9c791e4..c5abd76 100644
--- a/drivers/media/video/msm/Kconfig
+++ b/drivers/media/video/msm/Kconfig
@@ -280,6 +280,16 @@
and cropping image. The driver support V4L2 subdev
APIs.
+config MSM_CCI
+ bool "Qualcomm MSM Camera Control Interface support"
+ depends on MSM_CAMERA
+ ---help---
+ Enable support for Camera Control Interface driver only
+ for those platforms that have hardware support. This driver
+ is responsible for handling I2C read and write on the I2C
+ bus. It is also responsible for synchronization with
+ GPIO and data frames.
+
config QUP_EXCLUSIVE_TO_CAMERA
bool "QUP exclusive to camera"
depends on MSM_CAMERA
@@ -291,6 +301,63 @@
by QUP in the board file as QUP is used by
applications other than camera.
+config MSM_CSI20_HEADER
+ bool "Qualcomm MSM CSI 2.0 Header"
+ depends on MSM_CAMERA
+ ---help---
+ Enable support for CSI drivers to include 2.0
+ header. This header has register macros and its
+ values and bit mask for register configuration bits
+ This config macro is required targets based on 8960,
+ 8930 and 8064 platforms.
+
+config MSM_CSI30_HEADER
+ bool "Qualcomm MSM CSI 3.0 Header"
+ depends on MSM_CAMERA
+ ---help---
+ Enable support for CSI drivers to include 3.0
+ header. This header has register macros and its
+ values and bit mask for register configuration bits
+ This config macro is required for targets based on
+ 8064 platforms.
+
+config MSM_CSIPHY
+ bool "Qualcomm MSM Camera Serial Interface Physical receiver support"
+ depends on MSM_CAMERA
+ ---help---
+ Enable support for Camera Serial Interface
+ Physical receiver. It deserializes packets and
+ supports detection of packet start and stop
+ signalling.
+
+config MSM_CSID
+ bool "Qualcomm MSM Camera Serial Interface decoder support"
+ depends on MSM_CAMERA
+ ---help---
+ Enable support for Camera Serial Interface decoder.
+ It supports lane merging and decoding of packets
+ based on cid which is mapped to a virtual channel
+ and datatype.
+
+config MSM_CSI2_REGISTER
+ bool "Qualcomm MSM CSI2 Register"
+ depends on MSM_CAMERA
+ ---help---
+ Register CSIPHY, CSID and ISPIF subdevices during
+ msm_open. Different CSI components are registered
+ based on platform. This macro specifies registering
+ of CSIPHY, CSID and ISPIF subdevices to receive data
+ from sensor.
+
+config MSM_ISPIF
+ bool "Qualcomm MSM Image Signal Processing interface support"
+ depends on MSM_CAMERA
+ ---help---
+ Enable support for Image Signal Processing interface module.
+ This module acts as a crossbar between CSID and VFE. Output
+ of any CID of CSID can be routed to of of pixel or raw
+ data interface in VFE.
+
config S5K3L1YX
bool "Sensor S5K3L1YX (BAYER 12M)"
depends on MSM_CAMERA
diff --git a/drivers/media/video/msm/Makefile b/drivers/media/video/msm/Makefile
index 5703d88..3ca4198 100644
--- a/drivers/media/video/msm/Makefile
+++ b/drivers/media/video/msm/Makefile
@@ -1,41 +1,35 @@
GCC_VERSION := $(shell $(CONFIG_SHELL) $(PWD)/scripts/gcc-version.sh $(CROSS_COMPILE)gcc)
-ifeq ($(GCC_VERSION),0404)
-CFLAGS_REMOVE_msm_vfe8x.o = -Wframe-larger-than=1024
-endif
-EXTRA_CFLAGS += -Idrivers/media/video/msm/io
+ccflags-y += -Idrivers/media/video/msm/io
+ccflags-y += -Idrivers/media/video/msm/vfe
obj-$(CONFIG_MSM_CAMERA) += io/
ifeq ($(CONFIG_MSM_CAMERA_V4L2),y)
+ EXTRA_CFLAGS += -Idrivers/media/video/msm/cci
EXTRA_CFLAGS += -Idrivers/media/video/msm/csi
EXTRA_CFLAGS += -Idrivers/media/video/msm/eeprom
EXTRA_CFLAGS += -Idrivers/media/video/msm/sensors
EXTRA_CFLAGS += -Idrivers/media/video/msm/actuators
EXTRA_CFLAGS += -Idrivers/media/video/msm/server
- obj-$(CONFIG_MSM_CAMERA) += msm_isp.o msm.o msm_mem.o msm_mctl.o msm_mctl_buf.o msm_mctl_pp.o msm_vfe_stats_buf.o
- obj-$(CONFIG_MSM_CAMERA) += server/ eeprom/ sensors/ actuators/ csi/
+ obj-$(CONFIG_MSM_CAMERA) += msm_isp.o msm.o msm_mem.o msm_mctl.o msm_mctl_buf.o msm_mctl_pp.o
+ obj-$(CONFIG_MSM_CAMERA) += server/
+ obj-$(CONFIG_MSM_CAM_IRQ_ROUTER) += msm_camirq_router.o
+ obj-$(CONFIG_MSM_CAMERA) += cci/ eeprom/ sensors/ actuators/ csi/
obj-$(CONFIG_MSM_CPP) += cpp/
obj-$(CONFIG_MSM_CAMERA) += msm_gesture.o
- obj-$(CONFIG_MSM_CAM_IRQ_ROUTER) += msm_camirq_router.o
else
obj-$(CONFIG_MSM_CAMERA) += msm_camera.o
endif
+obj-$(CONFIG_MSM_CAMERA) += vfe/
obj-$(CONFIG_MSM_CAMERA) += msm_axi_qos.o gemini/ mercury/
obj-$(CONFIG_MSM_CAMERA_FLASH) += flash.o
-obj-$(CONFIG_ARCH_MSM_ARM11) += msm_vfe7x.o
ifeq ($(CONFIG_MSM_CAMERA_V4L2),y)
- obj-$(CONFIG_ARCH_MSM7X27A) += msm_vfe7x27a_v4l2.o
+ obj-$(CONFIG_ARCH_MSM8X60) += msm_vpe.o
+ obj-$(CONFIG_ARCH_MSM7X30) += msm_vpe.o msm_axi_qos.o
else
- obj-$(CONFIG_ARCH_MSM7X27A) += msm_vfe7x27a.o
+ obj-$(CONFIG_ARCH_MSM8X60) += msm_vpe1.o
+ obj-$(CONFIG_ARCH_MSM7X30) += msm_vpe1.o
endif
-obj-$(CONFIG_ARCH_QSD8X50) += msm_vfe8x.o msm_vfe8x_proc.o
-ifeq ($(CONFIG_MSM_CAMERA_V4L2),y)
- obj-$(CONFIG_ARCH_MSM8X60) += msm_vfe31_v4l2.o msm_vpe.o
- obj-$(CONFIG_ARCH_MSM7X30) += msm_vfe31_v4l2.o msm_vpe.o msm_axi_qos.o
-else
- obj-$(CONFIG_ARCH_MSM8X60) += msm_vfe31.o msm_vpe1.o
- obj-$(CONFIG_ARCH_MSM7X30) += msm_vfe31.o msm_vpe1.o
-endif
-obj-$(CONFIG_ARCH_MSM8960) += msm_vfe32.o msm_vpe.o
+obj-$(CONFIG_ARCH_MSM8960) += msm_vpe.o
obj-$(CONFIG_MT9T013) += mt9t013.o mt9t013_reg.o
obj-$(CONFIG_SN12M0PZ) += sn12m0pz.o sn12m0pz_reg.o
obj-$(CONFIG_MT9P012) += mt9p012_reg.o
diff --git a/drivers/media/video/msm/cci/Makefile b/drivers/media/video/msm/cci/Makefile
new file mode 100644
index 0000000..195a1b2
--- /dev/null
+++ b/drivers/media/video/msm/cci/Makefile
@@ -0,0 +1,3 @@
+GCC_VERSION := $(shell $(CONFIG_SHELL) $(PWD)/scripts/gcc-version.sh $(CROSS_COMPILE)gcc)
+ccflags-y += -Idrivers/media/video/msm -Idrivers/media/video/msm/server
+obj-$(CONFIG_MSM_CCI) += msm_cci.o
diff --git a/drivers/media/video/msm/cci/msm_cam_cci_hwreg.h b/drivers/media/video/msm/cci/msm_cam_cci_hwreg.h
new file mode 100644
index 0000000..68c78d5
--- /dev/null
+++ b/drivers/media/video/msm/cci/msm_cam_cci_hwreg.h
@@ -0,0 +1,59 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_CAM_CCI_HWREG__
+#define __MSM_CAM_CCI_HWREG__
+
+#define CCI_HW_VERSION_ADDR 0x00000000
+#define CCI_RESET_CMD_ADDR 0x00000004
+#define CCI_RESET_CMD_RMSK 0xcf73f3f7
+#define CCI_M0_RESET_RMSK 0x3F1
+#define CCI_M1_RESET_RMSK 0x3F001
+#define CCI_QUEUE_START_ADDR 0x00000008
+#define CCI_SET_CID_SYNC_TIMER_0_ADDR 0x00000010
+#define CCI_I2C_M0_SCL_CTL_ADDR 0x00000100
+#define CCI_I2C_M0_SDA_CTL_0_ADDR 0x00000104
+#define CCI_I2C_M0_SDA_CTL_1_ADDR 0x00000108
+#define CCI_I2C_M0_SDA_CTL_2_ADDR 0x0000010c
+#define CCI_I2C_M0_READ_DATA_ADDR 0x00000118
+#define CCI_I2C_M0_MISC_CTL_ADDR 0x00000110
+#define CCI_HALT_REQ_ADDR 0x00000034
+#define CCI_M0_HALT_REQ_RMSK 0x1
+#define CCI_M1_HALT_REQ_RMSK 0x01
+#define CCI_HALT_REQ_ADDR 0x00000034
+#define CCI_I2C_M1_SCL_CTL_ADDR 0x00000200
+#define CCI_I2C_M1_SDA_CTL_0_ADDR 0x00000204
+#define CCI_I2C_M1_SDA_CTL_1_ADDR 0x00000208
+#define CCI_I2C_M1_SDA_CTL_2_ADDR 0x0000020c
+#define CCI_I2C_M1_MISC_CTL_ADDR 0x00000210
+#define CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR 0x00000304
+#define CCI_I2C_M0_Q0_CUR_CMD_ADDR 0x00000308
+#define CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR 0x00000300
+#define CCI_I2C_M0_Q0_LOAD_DATA_ADDR 0x00000310
+#define CCI_IRQ_MASK_0_ADDR 0x00000c04
+#define CCI_IRQ_CLEAR_0_ADDR 0x00000c08
+#define CCI_IRQ_STATUS_0_ADDR 0x00000c0c
+#define CCI_IRQ_STATUS_0_I2C_M1_Q1_NACK_ERR_BMSK 0x40000000
+#define CCI_IRQ_STATUS_0_I2C_M1_Q0_NACK_ERR_BMSK 0x20000000
+#define CCI_IRQ_STATUS_0_I2C_M0_Q1_NACK_ERR_BMSK 0x10000000
+#define CCI_IRQ_STATUS_0_I2C_M0_Q0_NACK_ERR_BMSK 0x8000000
+#define CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK_BMSK 0x4000000
+#define CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK_BMSK 0x2000000
+#define CCI_IRQ_STATUS_0_RST_DONE_ACK_BMSK 0x1000000
+#define CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT_BMSK 0x100000
+#define CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT_BMSK 0x10000
+#define CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK 0x1000
+#define CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT_BMSK 0x100
+#define CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT_BMSK 0x10
+#define CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK 0x1
+#define CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR 0x00000c00
+#endif /* __MSM_CAM_CCI_HWREG__ */
diff --git a/drivers/media/video/msm/cci/msm_cci.c b/drivers/media/video/msm/cci/msm_cci.c
new file mode 100644
index 0000000..ad3cc6a
--- /dev/null
+++ b/drivers/media/video/msm/cci/msm_cci.c
@@ -0,0 +1,748 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <mach/board.h>
+#include <mach/camera.h>
+#include <media/msm_isp.h>
+#include "msm_cci.h"
+#include "msm.h"
+#include "msm_cam_server.h"
+#include "msm_cam_cci_hwreg.h"
+
+#define V4L2_IDENT_CCI 50005
+#define CCI_I2C_QUEUE_0_SIZE 2
+#define CCI_I2C_QUEUE_1_SIZE 16
+
+#undef CDBG
+#define CDBG pr_debug
+
+static void msm_cci_set_clk_param(struct cci_device *cci_dev)
+{
+ uint16_t THIGH = 78;
+ uint16_t TLOW = 114;
+ uint16_t TSUSTO = 28;
+ uint16_t TSUSTA = 28;
+ uint16_t THDDAT = 10;
+ uint16_t THDSTA = 77;
+ uint16_t TBUF = 118;
+ uint8_t HW_SCL_STRETCH_EN = 0; /*enable or disable SCL clock
+ * stretching */
+ uint8_t HW_RDHLD = 6; /* internal hold time 1-6 cycles of SDA to bridge
+ * undefined falling SCL region */
+ uint8_t HW_TSP = 1; /* glitch filter 1-3 cycles */
+
+ msm_camera_io_w(THIGH << 16 | TLOW, cci_dev->base +
+ CCI_I2C_M0_SCL_CTL_ADDR);
+ msm_camera_io_w(TSUSTO << 16 | TSUSTA, cci_dev->base +
+ CCI_I2C_M0_SDA_CTL_0_ADDR);
+ msm_camera_io_w(THDDAT << 16 | THDSTA, cci_dev->base +
+ CCI_I2C_M0_SDA_CTL_1_ADDR);
+ msm_camera_io_w(TBUF, cci_dev->base +
+ CCI_I2C_M0_SDA_CTL_2_ADDR);
+ msm_camera_io_w(HW_SCL_STRETCH_EN << 8 | HW_RDHLD << 4 | HW_TSP,
+ cci_dev->base + CCI_I2C_M0_MISC_CTL_ADDR);
+ msm_camera_io_w(THIGH << 16 | TLOW, cci_dev->base +
+ CCI_I2C_M1_SCL_CTL_ADDR);
+ msm_camera_io_w(TSUSTO << 16 | TSUSTA, cci_dev->base +
+ CCI_I2C_M1_SDA_CTL_0_ADDR);
+ msm_camera_io_w(THDDAT << 16 | THDSTA, cci_dev->base +
+ CCI_I2C_M1_SDA_CTL_1_ADDR);
+ msm_camera_io_w(TBUF, cci_dev->base + CCI_I2C_M1_SDA_CTL_2_ADDR);
+ msm_camera_io_w(HW_SCL_STRETCH_EN << 8 | HW_RDHLD << 4 | HW_TSP,
+ cci_dev->base + CCI_I2C_M1_MISC_CTL_ADDR);
+}
+
+static int32_t msm_cci_i2c_config_sync_timer(struct v4l2_subdev *sd,
+ struct msm_camera_cci_ctrl *c_ctrl)
+{
+ struct cci_device *cci_dev;
+ cci_dev = v4l2_get_subdevdata(sd);
+ msm_camera_io_w(c_ctrl->cci_info->cid, cci_dev->base +
+ CCI_SET_CID_SYNC_TIMER_0_ADDR + (c_ctrl->cci_info->cid * 0x4));
+ return 0;
+}
+
+static int32_t msm_cci_i2c_set_freq(struct v4l2_subdev *sd,
+ struct msm_camera_cci_ctrl *c_ctrl)
+{
+ struct cci_device *cci_dev;
+ uint32_t val;
+ cci_dev = v4l2_get_subdevdata(sd);
+ val = c_ctrl->cci_info->freq;
+ msm_camera_io_w(val, cci_dev->base + CCI_I2C_M0_SCL_CTL_ADDR +
+ c_ctrl->cci_info->cci_i2c_master*0x100);
+ msm_camera_io_w(val, cci_dev->base + CCI_I2C_M0_SDA_CTL_0_ADDR +
+ c_ctrl->cci_info->cci_i2c_master*0x100);
+ msm_camera_io_w(val, cci_dev->base + CCI_I2C_M0_SDA_CTL_1_ADDR +
+ c_ctrl->cci_info->cci_i2c_master*0x100);
+ msm_camera_io_w(val, cci_dev->base + CCI_I2C_M0_SDA_CTL_2_ADDR +
+ c_ctrl->cci_info->cci_i2c_master*0x100);
+ msm_camera_io_w(val, cci_dev->base + CCI_I2C_M0_MISC_CTL_ADDR +
+ c_ctrl->cci_info->cci_i2c_master*0x100);
+ return 0;
+}
+
+static int32_t msm_cci_validate_queue(struct cci_device *cci_dev,
+ uint32_t len,
+ enum cci_i2c_master_t master,
+ enum cci_i2c_queue_t queue)
+{
+ int32_t rc = 0;
+ uint32_t read_val = 0;
+ uint32_t reg_offset = master * 0x200 + queue * 0x100;
+ read_val = msm_camera_io_r(cci_dev->base +
+ CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
+ CDBG("%s line %d CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR %d len %d max %d\n",
+ __func__, __LINE__, read_val, len,
+ cci_dev->cci_i2c_queue_info[master][queue].max_queue_size);
+ if ((read_val + len + 1) > cci_dev->
+ cci_i2c_queue_info[master][queue].max_queue_size) {
+ uint32_t reg_val = 0;
+ uint32_t report_val = CCI_I2C_REPORT_CMD | (1 << 8);
+ CDBG("%s:%d CCI_I2C_REPORT_CMD\n", __func__, __LINE__);
+ msm_camera_io_w(report_val,
+ cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ reg_offset);
+ read_val++;
+ CDBG("%s:%d CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR %d\n",
+ __func__, __LINE__, read_val);
+ msm_camera_io_w(read_val, cci_dev->base +
+ CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
+ reg_val = 1 << ((master * 2) + queue);
+ CDBG("%s:%d CCI_QUEUE_START_ADDR\n", __func__, __LINE__);
+ msm_camera_io_w(reg_val, cci_dev->base + CCI_QUEUE_START_ADDR);
+ CDBG("%s line %d wait_for_completion_interruptible\n",
+ __func__, __LINE__);
+ wait_for_completion_interruptible(&cci_dev->
+ cci_master_info[master].reset_complete);
+ rc = cci_dev->cci_master_info[master].status;
+ if (rc < 0)
+ pr_err("%s failed rc %d\n", __func__, rc);
+ }
+ return rc;
+}
+
+static int32_t msm_cci_data_queue(struct cci_device *cci_dev,
+ struct msm_camera_cci_ctrl *c_ctrl, enum cci_i2c_queue_t queue)
+{
+ uint16_t i = 0, j = 0, k = 0, h = 0, len = 0;
+ uint32_t cmd = 0;
+ uint8_t data[10];
+ uint16_t reg_addr = 0;
+ struct msm_camera_cci_i2c_write_cfg *i2c_msg =
+ &c_ctrl->cfg.cci_i2c_write_cfg;
+ uint16_t cmd_size = i2c_msg->size;
+ struct msm_camera_i2c_reg_conf *i2c_cmd = i2c_msg->reg_conf_tbl;
+ enum cci_i2c_master_t master = c_ctrl->cci_info->cci_i2c_master;
+ CDBG("%s addr type %d data type %d\n", __func__,
+ i2c_msg->addr_type, i2c_msg->data_type);
+ /* assume total size within the max queue */
+ while (cmd_size) {
+ CDBG("%s cmd_size %d addr 0x%x data 0x%x", __func__,
+ cmd_size, i2c_cmd->reg_addr, i2c_cmd->reg_data);
+ data[i++] = CCI_I2C_WRITE_CMD;
+ if (i2c_cmd->reg_addr)
+ reg_addr = i2c_cmd->reg_addr;
+ /* either byte or word addr */
+ if (i2c_msg->addr_type == MSM_CAMERA_I2C_BYTE_ADDR)
+ data[i++] = reg_addr;
+ else {
+ data[i++] = (reg_addr & 0xFF00) >> 8;
+ data[i++] = reg_addr & 0x00FF;
+ }
+ /* max of 10 data bytes */
+ do {
+ if (i2c_msg->data_type == MSM_CAMERA_I2C_BYTE_DATA) {
+ data[i++] = i2c_cmd->reg_data;
+ reg_addr++;
+ } else {
+ if ((i + 1) <= 10) {
+ data[i++] = (i2c_cmd->reg_data &
+ 0xFF00) >> 8; /* MSB */
+ data[i++] = i2c_cmd->reg_data &
+ 0x00FF; /* LSB */
+ reg_addr += 2;
+ } else
+ break;
+ }
+ i2c_cmd++;
+ } while (--cmd_size && !i2c_cmd->reg_addr && (i <= 10));
+ data[0] |= ((i-1) << 4);
+ len = ((i-1)/4) + 1;
+ msm_cci_validate_queue(cci_dev, len, master, queue);
+ for (h = 0, k = 0; h < len; h++) {
+ cmd = 0;
+ for (j = 0; (j < 4 && k < i); j++)
+ cmd |= (data[k++] << (j * 8));
+ CDBG("%s CCI_I2C_M0_Q0_LOAD_DATA_ADDR 0x%x\n",
+ __func__, cmd);
+ msm_camera_io_w(cmd, cci_dev->base +
+ CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ master * 0x200 + queue * 0x100);
+ }
+ i = 0;
+ }
+ return 0;
+}
+
+static int32_t msm_cci_write_i2c_queue(struct cci_device *cci_dev,
+ uint32_t val,
+ enum cci_i2c_master_t master,
+ enum cci_i2c_queue_t queue)
+{
+ int32_t rc = 0;
+ uint32_t reg_offset = master * 0x200 + queue * 0x100;
+ CDBG("%s:%d called\n", __func__, __LINE__);
+ msm_cci_validate_queue(cci_dev, 1, master, queue);
+ CDBG("%s CCI_I2C_M0_Q0_LOAD_DATA_ADDR:val %x:%x\n",
+ __func__, CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ reg_offset, val);
+ msm_camera_io_w(val, cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ reg_offset);
+ return rc;
+}
+
+static int32_t msm_cci_i2c_read(struct v4l2_subdev *sd,
+ struct msm_camera_cci_ctrl *c_ctrl)
+{
+ uint32_t rc = 0;
+ uint32_t val = 0;
+ int32_t read_bytes = 0;
+ int32_t index = 0, first_byte = 0;
+ uint32_t i = 0;
+ enum cci_i2c_master_t master;
+ enum cci_i2c_queue_t queue = QUEUE_1;
+ struct cci_device *cci_dev = NULL;
+ struct msm_camera_cci_i2c_read_cfg *read_cfg = NULL;
+ CDBG("%s line %d\n", __func__, __LINE__);
+ cci_dev = v4l2_get_subdevdata(sd);
+ master = c_ctrl->cci_info->cci_i2c_master;
+ read_cfg = &c_ctrl->cfg.cci_i2c_read_cfg;
+ mutex_lock(&cci_dev->cci_master_info[master].mutex);
+ CDBG("%s master %d, queue %d\n", __func__, master, queue);
+ val = CCI_I2C_SET_PARAM_CMD | c_ctrl->cci_info->sid << 4 |
+ c_ctrl->cci_info->retries << 16 |
+ c_ctrl->cci_info->id_map << 18;
+ rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+ if (rc < 0) {
+ CDBG("%s failed line %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+
+ val = CCI_I2C_LOCK_CMD;
+ rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+ if (rc < 0) {
+ CDBG("%s failed line %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+
+ if (read_cfg->addr_type == MSM_CAMERA_I2C_BYTE_ADDR)
+ val = CCI_I2C_WRITE_CMD | (read_cfg->addr_type << 4) |
+ ((read_cfg->addr & 0xFF) << 8);
+ if (read_cfg->addr_type == MSM_CAMERA_I2C_WORD_ADDR)
+ val = CCI_I2C_WRITE_CMD | (read_cfg->addr_type << 4) |
+ (((read_cfg->addr & 0xFF00) >> 8) << 8) |
+ ((read_cfg->addr & 0xFF) << 16);
+ rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+ if (rc < 0) {
+ CDBG("%s failed line %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+
+ val = CCI_I2C_READ_CMD | (read_cfg->num_byte << 4);
+ rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+ if (rc < 0) {
+ CDBG("%s failed line %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+
+ val = CCI_I2C_UNLOCK_CMD;
+ rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+ if (rc < 0) {
+ CDBG("%s failed line %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+
+ val = msm_camera_io_r(cci_dev->base + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR +
+ master * 0x200 + queue * 0x100);
+ CDBG("%s cur word cnt %x\n", __func__, val);
+ msm_camera_io_w(val, cci_dev->base + CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR +
+ master * 0x200 + queue * 0x100);
+
+ val = 1 << ((master * 2) + queue);
+ msm_camera_io_w(val, cci_dev->base + CCI_QUEUE_START_ADDR);
+
+ wait_for_completion_interruptible(&cci_dev->
+ cci_master_info[master].reset_complete);
+
+ read_bytes = (read_cfg->num_byte / 4) + 1;
+ index = 0;
+ CDBG("%s index %d num_type %d\n", __func__, index,
+ read_cfg->num_byte);
+ first_byte = 0;
+ do {
+ val = msm_camera_io_r(cci_dev->base +
+ CCI_I2C_M0_READ_DATA_ADDR + master * 0x100);
+ CDBG("%s read val %x\n", __func__, val);
+ for (i = 0; (i < 4) && (index < read_cfg->num_byte); i++) {
+ CDBG("%s i %d index %d\n", __func__, i, index);
+ if (!first_byte) {
+ CDBG("%s sid %x\n", __func__, val & 0xFF);
+ first_byte++;
+ } else {
+ read_cfg->data[index] =
+ (val >> (i * 8)) & 0xFF;
+ CDBG("%s data[%d] %x\n", __func__, index,
+ read_cfg->data[index]);
+ index++;
+ }
+ }
+ } while (--read_bytes > 0);
+ERROR:
+ mutex_unlock(&cci_dev->cci_master_info[master].mutex);
+ return rc;
+}
+
+static int32_t msm_cci_i2c_write(struct v4l2_subdev *sd,
+ struct msm_camera_cci_ctrl *c_ctrl)
+{
+ int32_t rc = 0;
+ struct cci_device *cci_dev;
+ uint32_t val;
+ enum cci_i2c_master_t master;
+ enum cci_i2c_queue_t queue = QUEUE_0;
+ cci_dev = v4l2_get_subdevdata(sd);
+ master = c_ctrl->cci_info->cci_i2c_master;
+ CDBG("%s master %d, queue %d\n", __func__, master, queue);
+ mutex_lock(&cci_dev->cci_master_info[master].mutex);
+ val = CCI_I2C_SET_PARAM_CMD | c_ctrl->cci_info->sid << 4 |
+ c_ctrl->cci_info->retries << 16 |
+ c_ctrl->cci_info->id_map << 18;
+ CDBG("%s:%d CCI_I2C_SET_PARAM_CMD\n", __func__, __LINE__);
+ rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+ if (rc < 0) {
+ CDBG("%s failed line %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+
+ val = CCI_I2C_LOCK_CMD;
+ CDBG("%s:%d CCI_I2C_LOCK_CMD\n", __func__, __LINE__);
+ rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+ if (rc < 0) {
+ CDBG("%s failed line %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+
+ msm_cci_data_queue(cci_dev, c_ctrl, queue);
+ val = CCI_I2C_UNLOCK_CMD;
+ CDBG("%s:%d CCI_I2C_UNLOCK_CMD\n", __func__, __LINE__);
+ rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+ if (rc < 0) {
+ CDBG("%s failed line %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+
+ val = CCI_I2C_REPORT_CMD | (1 << 8);
+ CDBG("%s:%d CCI_I2C_REPORT_CMD\n", __func__, __LINE__);
+ rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+ if (rc < 0) {
+ CDBG("%s failed line %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+
+ val = msm_camera_io_r(cci_dev->base + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR +
+ master * 0x200 + queue * 0x100);
+ CDBG("%s line %d size of queue %d\n", __func__, __LINE__, val);
+ CDBG("%s:%d CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR\n", __func__, __LINE__);
+ msm_camera_io_w(val, cci_dev->base + CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR +
+ master * 0x200 + queue * 0x100);
+
+ val = 1 << ((master * 2) + queue);
+ CDBG("%s:%d CCI_QUEUE_START_ADDR\n", __func__, __LINE__);
+ msm_camera_io_w(val, cci_dev->base + CCI_QUEUE_START_ADDR +
+ master*0x200 + queue * 0x100);
+
+ CDBG("%s line %d wait_for_completion_interruptible\n",
+ __func__, __LINE__);
+ wait_for_completion_interruptible(&cci_dev->
+ cci_master_info[master].reset_complete);
+ERROR:
+ mutex_unlock(&cci_dev->cci_master_info[master].mutex);
+ return rc;
+}
+
+static int msm_cci_subdev_g_chip_ident(struct v4l2_subdev *sd,
+ struct v4l2_dbg_chip_ident *chip)
+{
+ BUG_ON(!chip);
+ chip->ident = V4L2_IDENT_CCI;
+ chip->revision = 0;
+ return 0;
+}
+
+static struct msm_cam_clk_info cci_clk_info[] = {
+ {"cci_clk_src", 19200000},
+};
+
+static int32_t msm_cci_init(struct v4l2_subdev *sd)
+{
+ int rc = 0;
+ struct cci_device *cci_dev;
+ cci_dev = v4l2_get_subdevdata(sd);
+ CDBG("%s line %d\n", __func__, __LINE__);
+ if (cci_dev == NULL) {
+ rc = -ENOMEM;
+ return rc;
+ }
+ rc = msm_cam_clk_enable(&cci_dev->pdev->dev, cci_clk_info,
+ cci_dev->cci_clk, ARRAY_SIZE(cci_clk_info), 1);
+ if (rc < 0) {
+ CDBG("%s: regulator enable failed\n", __func__);
+ goto clk_enable_failed;
+ }
+
+ enable_irq(cci_dev->irq->start);
+ cci_dev->hw_version = msm_camera_io_r(cci_dev->base +
+ CCI_HW_VERSION_ADDR);
+ cci_dev->cci_master_info[MASTER_0].reset_pending = TRUE;
+ msm_camera_io_w(0xFFFFFFFF, cci_dev->base + CCI_RESET_CMD_ADDR);
+ msm_camera_io_w(0x1, cci_dev->base + CCI_RESET_CMD_ADDR);
+ wait_for_completion_interruptible(&cci_dev->cci_master_info[MASTER_0].
+ reset_complete);
+ msm_cci_set_clk_param(cci_dev);
+ msm_camera_io_w(0xFFFFFFFF, cci_dev->base + CCI_IRQ_MASK_0_ADDR);
+ msm_camera_io_w(0xFFFFFFFF, cci_dev->base + CCI_IRQ_CLEAR_0_ADDR);
+ msm_camera_io_w(0x1, cci_dev->base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+ msm_camera_io_w(0x0, cci_dev->base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+ msm_camera_io_w(0x0, cci_dev->base + CCI_IRQ_CLEAR_0_ADDR);
+ return 0;
+
+clk_enable_failed:
+ return rc;
+}
+
+static int32_t msm_cci_release(struct v4l2_subdev *sd)
+{
+ struct cci_device *cci_dev;
+ cci_dev = v4l2_get_subdevdata(sd);
+
+ disable_irq(cci_dev->irq->start);
+
+ msm_cam_clk_enable(&cci_dev->pdev->dev, cci_clk_info,
+ cci_dev->cci_clk, ARRAY_SIZE(cci_clk_info), 0);
+
+ return 0;
+}
+
+static int32_t msm_cci_config(struct v4l2_subdev *sd,
+ struct msm_camera_cci_ctrl *cci_ctrl)
+{
+ int32_t rc = 0;
+ CDBG("%s line %d cmd %d\n", __func__, __LINE__,
+ cci_ctrl->cmd);
+ switch (cci_ctrl->cmd) {
+ case MSM_CCI_INIT:
+ rc = msm_cci_init(sd);
+ break;
+ case MSM_CCI_RELEASE:
+ rc = msm_cci_release(sd);
+ break;
+ case MSM_CCI_SET_SID:
+ break;
+ case MSM_CCI_SET_FREQ:
+ rc = msm_cci_i2c_set_freq(sd, cci_ctrl);
+ break;
+ case MSM_CCI_SET_SYNC_CID:
+ rc = msm_cci_i2c_config_sync_timer(sd, cci_ctrl);
+ break;
+ case MSM_CCI_I2C_READ:
+ rc = msm_cci_i2c_read(sd, cci_ctrl);
+ break;
+ case MSM_CCI_I2C_WRITE:
+ rc = msm_cci_i2c_write(sd, cci_ctrl);
+ break;
+ case MSM_CCI_GPIO_WRITE:
+ break;
+ default:
+ rc = -ENOIOCTLCMD;
+ }
+ CDBG("%s line %d rc %d\n", __func__, __LINE__, rc);
+ cci_ctrl->status = rc;
+ return rc;
+}
+
+static irqreturn_t msm_cci_irq(int irq_num, void *data)
+{
+ uint32_t irq;
+ struct cci_device *cci_dev = data;
+ irq = msm_camera_io_r(cci_dev->base + CCI_IRQ_STATUS_0_ADDR);
+ msm_camera_io_w(irq, cci_dev->base + CCI_IRQ_CLEAR_0_ADDR);
+ msm_camera_io_w(0x1, cci_dev->base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+ msm_camera_io_w(0x0, cci_dev->base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+ CDBG("%s CCI_I2C_M0_STATUS_ADDR = 0x%x\n", __func__, irq);
+ if (irq & CCI_IRQ_STATUS_0_RST_DONE_ACK_BMSK) {
+ if (cci_dev->cci_master_info[MASTER_0].reset_pending == TRUE) {
+ cci_dev->cci_master_info[MASTER_0].reset_pending =
+ FALSE;
+ complete(&cci_dev->cci_master_info[MASTER_0].
+ reset_complete);
+ }
+ if (cci_dev->cci_master_info[MASTER_1].reset_pending == TRUE) {
+ cci_dev->cci_master_info[MASTER_1].reset_pending =
+ FALSE;
+ complete(&cci_dev->cci_master_info[MASTER_1].
+ reset_complete);
+ }
+ } else if ((irq & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK) ||
+ (irq & CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT_BMSK) ||
+ (irq & CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT_BMSK)) {
+ cci_dev->cci_master_info[MASTER_0].status = 0;
+ complete(&cci_dev->cci_master_info[MASTER_0].reset_complete);
+ } else if ((irq & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) ||
+ (irq & CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT_BMSK) ||
+ (irq & CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT_BMSK)) {
+ cci_dev->cci_master_info[MASTER_1].status = 0;
+ complete(&cci_dev->cci_master_info[MASTER_1].reset_complete);
+ } else if ((irq & CCI_IRQ_STATUS_0_I2C_M0_Q0_NACK_ERR_BMSK) ||
+ (irq & CCI_IRQ_STATUS_0_I2C_M0_Q1_NACK_ERR_BMSK)) {
+ cci_dev->cci_master_info[MASTER_0].status = -EINVAL;
+ msm_camera_io_w(CCI_M0_HALT_REQ_RMSK,
+ cci_dev->base + CCI_HALT_REQ_ADDR);
+ } else if ((irq & CCI_IRQ_STATUS_0_I2C_M1_Q0_NACK_ERR_BMSK) ||
+ (irq & CCI_IRQ_STATUS_0_I2C_M1_Q1_NACK_ERR_BMSK)) {
+ cci_dev->cci_master_info[MASTER_1].status = -EINVAL;
+ msm_camera_io_w(CCI_M1_HALT_REQ_RMSK,
+ cci_dev->base + CCI_HALT_REQ_ADDR);
+ } else if (irq & CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK_BMSK) {
+ cci_dev->cci_master_info[MASTER_0].reset_pending = TRUE;
+ msm_camera_io_w(CCI_M0_RESET_RMSK,
+ cci_dev->base + CCI_RESET_CMD_ADDR);
+ } else if (irq & CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK_BMSK) {
+ cci_dev->cci_master_info[MASTER_1].reset_pending = TRUE;
+ msm_camera_io_w(CCI_M1_RESET_RMSK,
+ cci_dev->base + CCI_RESET_CMD_ADDR);
+ }
+ return IRQ_HANDLED;
+}
+
+int msm_cci_irq_routine(struct v4l2_subdev *sd, u32 status, bool *handled)
+{
+ struct cci_device *cci_dev = v4l2_get_subdevdata(sd);
+ irqreturn_t ret;
+ CDBG("%s line %d\n", __func__, __LINE__);
+ ret = msm_cci_irq(cci_dev->irq->start, cci_dev);
+ *handled = TRUE;
+ return 0;
+}
+
+static long msm_cci_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ int32_t rc = 0;
+ CDBG("%s line %d\n", __func__, __LINE__);
+ switch (cmd) {
+ case VIDIOC_MSM_CCI_CFG:
+ rc = msm_cci_config(sd, arg);
+ break;
+ default:
+ rc = -ENOIOCTLCMD;
+ }
+ CDBG("%s line %d rc %d\n", __func__, __LINE__, rc);
+ return rc;
+}
+
+static struct v4l2_subdev_core_ops msm_cci_subdev_core_ops = {
+ .g_chip_ident = &msm_cci_subdev_g_chip_ident,
+ .ioctl = &msm_cci_subdev_ioctl,
+ .interrupt_service_routine = msm_cci_irq_routine,
+};
+
+static const struct v4l2_subdev_ops msm_cci_subdev_ops = {
+ .core = &msm_cci_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops msm_cci_internal_ops;
+
+static void msm_cci_initialize_cci_params(struct cci_device *new_cci_dev)
+{
+ uint8_t i = 0, j = 0;
+ for (i = 0; i < NUM_MASTERS; i++) {
+ new_cci_dev->cci_master_info[i].status = 0;
+ mutex_init(&new_cci_dev->cci_master_info[i].mutex);
+ init_completion(&new_cci_dev->
+ cci_master_info[i].reset_complete);
+ for (j = 0; j < NUM_QUEUES; j++) {
+ if (j == QUEUE_0)
+ new_cci_dev->cci_i2c_queue_info[i][j].
+ max_queue_size = CCI_I2C_QUEUE_0_SIZE;
+ else
+ new_cci_dev->cci_i2c_queue_info[i][j].
+ max_queue_size = CCI_I2C_QUEUE_1_SIZE;
+ }
+ }
+ return;
+}
+
+static int __devinit msm_cci_probe(struct platform_device *pdev)
+{
+ struct cci_device *new_cci_dev;
+ int rc = 0;
+ struct msm_cam_subdev_info sd_info;
+ struct intr_table_entry irq_req;
+ CDBG("%s: pdev %p device id = %d\n", __func__, pdev, pdev->id);
+ new_cci_dev = kzalloc(sizeof(struct cci_device), GFP_KERNEL);
+ if (!new_cci_dev) {
+ CDBG("%s: no enough memory\n", __func__);
+ return -ENOMEM;
+ }
+ v4l2_subdev_init(&new_cci_dev->subdev, &msm_cci_subdev_ops);
+ new_cci_dev->subdev.internal_ops = &msm_cci_internal_ops;
+ new_cci_dev->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(new_cci_dev->subdev.name,
+ ARRAY_SIZE(new_cci_dev->subdev.name), "msm_cci");
+ v4l2_set_subdevdata(&new_cci_dev->subdev, new_cci_dev);
+ platform_set_drvdata(pdev, &new_cci_dev->subdev);
+ CDBG("%s sd %p\n", __func__, &new_cci_dev->subdev);
+ if (pdev->dev.of_node)
+ of_property_read_u32((&pdev->dev)->of_node,
+ "cell-index", &pdev->id);
+
+ new_cci_dev->mem = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "cci");
+ if (!new_cci_dev->mem) {
+ CDBG("%s: no mem resource?\n", __func__);
+ rc = -ENODEV;
+ goto cci_no_resource;
+ }
+ new_cci_dev->irq = platform_get_resource_byname(pdev,
+ IORESOURCE_IRQ, "cci");
+ CDBG("%s line %d cci irq start %d end %d\n", __func__,
+ __LINE__,
+ new_cci_dev->irq->start,
+ new_cci_dev->irq->end);
+ if (!new_cci_dev->irq) {
+ CDBG("%s: no irq resource?\n", __func__);
+ rc = -ENODEV;
+ goto cci_no_resource;
+ }
+ new_cci_dev->io = request_mem_region(new_cci_dev->mem->start,
+ resource_size(new_cci_dev->mem), pdev->name);
+ if (!new_cci_dev->io) {
+ CDBG("%s: no valid mem region\n", __func__);
+ rc = -EBUSY;
+ goto cci_no_resource;
+ }
+
+ new_cci_dev->base = ioremap(new_cci_dev->mem->start,
+ resource_size(new_cci_dev->mem));
+ if (!new_cci_dev->base) {
+ rc = -ENOMEM;
+ goto cci_release_mem;
+ }
+
+ sd_info.sdev_type = CCI_DEV;
+ sd_info.sd_index = pdev->id;
+ sd_info.irq_num = new_cci_dev->irq->start;
+ msm_cam_register_subdev_node(&new_cci_dev->subdev, &sd_info);
+
+ irq_req.cam_hw_idx = MSM_CAM_HW_CCI;
+ irq_req.dev_name = "msm_cci";
+ irq_req.irq_idx = CAMERA_SS_IRQ_1;
+ irq_req.irq_num = new_cci_dev->irq->start;
+ irq_req.is_composite = 0;
+ irq_req.irq_trigger_type = IRQF_TRIGGER_RISING;
+ irq_req.num_hwcore = 1;
+ irq_req.subdev_list[0] = &new_cci_dev->subdev;
+ irq_req.data = (void *)new_cci_dev;
+ rc = msm_cam_server_request_irq(&irq_req);
+ if (rc == -ENXIO) {
+ /* IRQ Router hardware is not present on this hardware.
+ * Request for the IRQ and register the interrupt handler. */
+ rc = request_irq(new_cci_dev->irq->start, msm_cci_irq,
+ IRQF_TRIGGER_RISING, "cci", new_cci_dev);
+ if (rc < 0) {
+ CDBG("%s: irq request fail\n", __func__);
+ rc = -EBUSY;
+ goto cci_release_mem;
+ }
+ disable_irq(new_cci_dev->irq->start);
+ } else if (rc < 0) {
+ CDBG("%s Error registering irq ", __func__);
+ rc = -EBUSY;
+ goto cci_release_mem;
+ }
+
+ CDBG("%s line %d cci irq start %d end %d\n", __func__,
+ __LINE__,
+ new_cci_dev->irq->start,
+ new_cci_dev->irq->end);
+
+ new_cci_dev->pdev = pdev;
+ msm_cci_initialize_cci_params(new_cci_dev);
+
+ CDBG("%s line %d\n", __func__, __LINE__);
+ return 0;
+
+cci_release_mem:
+ release_mem_region(new_cci_dev->mem->start,
+ resource_size(new_cci_dev->mem));
+cci_no_resource:
+ kfree(new_cci_dev);
+ return 0;
+}
+
+static int __exit msm_cci_exit(struct platform_device *pdev)
+{
+ struct v4l2_subdev *subdev = platform_get_drvdata(pdev);
+ struct cci_device *cci_dev =
+ v4l2_get_subdevdata(subdev);
+ release_mem_region(cci_dev->mem->start, resource_size(cci_dev->mem));
+ kfree(cci_dev);
+ return 0;
+}
+
+static const struct of_device_id msm_cci_dt_match[] = {
+ {.compatible = "qcom,cci"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_cci_dt_match);
+
+static struct platform_driver cci_driver = {
+ .probe = msm_cci_probe,
+ .remove = msm_cci_exit,
+ .driver = {
+ .name = MSM_CCI_DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = msm_cci_dt_match,
+ },
+};
+
+static int __init msm_cci_init_module(void)
+{
+ return platform_driver_register(&cci_driver);
+}
+
+static void __exit msm_cci_exit_module(void)
+{
+ platform_driver_unregister(&cci_driver);
+}
+
+module_init(msm_cci_init_module);
+module_exit(msm_cci_exit_module);
+MODULE_DESCRIPTION("MSM CCI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/msm/cci/msm_cci.h b/drivers/media/video/msm/cci/msm_cci.h
new file mode 100644
index 0000000..6955576
--- /dev/null
+++ b/drivers/media/video/msm/cci/msm_cci.h
@@ -0,0 +1,85 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CCI_H
+#define MSM_CCI_H
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <media/v4l2-subdev.h>
+#include <mach/camera.h>
+
+#define NUM_MASTERS 2
+#define NUM_QUEUES 2
+
+#define TRUE 1
+#define FALSE 0
+
+struct msm_camera_cci_master_info {
+ uint32_t status;
+ uint8_t reset_pending;
+ struct mutex mutex;
+ struct completion reset_complete;
+};
+
+struct cci_device {
+ struct platform_device *pdev;
+ struct v4l2_subdev subdev;
+ struct resource *mem;
+ struct resource *irq;
+ struct resource *io;
+ void __iomem *base;
+ uint32_t hw_version;
+ struct clk *cci_clk[5];
+ struct msm_camera_cci_i2c_queue_info
+ cci_i2c_queue_info[NUM_MASTERS][NUM_QUEUES];
+ struct msm_camera_cci_master_info cci_master_info[NUM_MASTERS];
+};
+
+enum msm_cci_i2c_cmd_type {
+ CCI_I2C_SET_PARAM_CMD = 1,
+ CCI_I2C_WAIT_CMD,
+ CCI_I2C_WAIT_SYNC_CMD,
+ CCI_I2C_WAIT_GPIO_EVENT_CMD,
+ CCI_I2C_TRIG_I2C_EVENT_CMD,
+ CCI_I2C_LOCK_CMD,
+ CCI_I2C_UNLOCK_CMD,
+ CCI_I2C_REPORT_CMD,
+ CCI_I2C_WRITE_CMD,
+ CCI_I2C_READ_CMD,
+ CCI_I2C_WRITE_DISABLE_P_CMD,
+ CCI_I2C_READ_DISABLE_P_CMD,
+ CCI_I2C_WRITE_CMD2,
+ CCI_I2C_WRITE_CMD3,
+ CCI_I2C_REPEAT_CMD,
+ CCI_I2C_INVALID_CMD,
+};
+
+enum msm_cci_gpio_cmd_type {
+ CCI_GPIO_SET_PARAM_CMD = 1,
+ CCI_GPIO_WAIT_CMD,
+ CCI_GPIO_WAIT_SYNC_CMD,
+ CCI_GPIO_WAIT_GPIO_IN_EVENT_CMD,
+ CCI_GPIO_WAIT_I2C_Q_TRIG_EVENT_CMD,
+ CCI_GPIO_OUT_CMD,
+ CCI_GPIO_TRIG_EVENT_CMD,
+ CCI_GPIO_REPORT_CMD,
+ CCI_GPIO_REPEAT_CMD,
+ CCI_GPIO_CONTINUE_CMD,
+ CCI_GPIO_INVALID_CMD,
+};
+
+#define VIDIOC_MSM_CCI_CFG \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 23, struct msm_camera_cci_ctrl *)
+
+#endif
+
diff --git a/drivers/media/video/msm/csi/Makefile b/drivers/media/video/msm/csi/Makefile
index f7cb408..547eb13 100644
--- a/drivers/media/video/msm/csi/Makefile
+++ b/drivers/media/video/msm/csi/Makefile
@@ -1,7 +1,15 @@
GCC_VERSION := $(shell $(CONFIG_SHELL) $(PWD)/scripts/gcc-version.sh $(CROSS_COMPILE)gcc)
-EXTRA_CFLAGS += -Idrivers/media/video/msm
+ccflags-y += -Idrivers/media/video/msm
+ifeq ($(CONFIG_MSM_CSI20_HEADER),y)
+ ccflags-y += -Idrivers/media/video/msm/csi/include/csi2.0
+else ifeq ($(CONFIG_MSM_CSI30_HEADER),y)
+ ccflags-y += -Idrivers/media/video/msm/csi/include/csi3.0
+endif
+obj-$(CONFIG_MSM_CSI2_REGISTER) += msm_csi2_register.o
+obj-$(CONFIG_MSM_CSIPHY) += msm_csiphy.o
+obj-$(CONFIG_MSM_CSID) += msm_csid.o
+obj-$(CONFIG_MSM_ISPIF) += msm_ispif.o
obj-$(CONFIG_ARCH_MSM8960) += msm_csi2_register.o msm_csiphy.o msm_csid.o msm_ispif.o
obj-$(CONFIG_ARCH_MSM7X27A) += msm_csic_register.o msm_csic.o
obj-$(CONFIG_ARCH_MSM8X60) += msm_csic_register.o msm_csic.o
obj-$(CONFIG_ARCH_MSM7X30) += msm_csic_register.o msm_csic.o
-
diff --git a/drivers/media/video/msm/csi/include/csi2.0/msm_csid_hwreg.h b/drivers/media/video/msm/csi/include/csi2.0/msm_csid_hwreg.h
new file mode 100644
index 0000000..c79748c
--- /dev/null
+++ b/drivers/media/video/msm/csi/include/csi2.0/msm_csid_hwreg.h
@@ -0,0 +1,51 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSID_HWREG_H
+#define MSM_CSID_HWREG_H
+
+/* MIPI CSID registers */
+#define CSID_HW_VERSION_ADDR 0x0
+#define CSID_CORE_CTRL_0_ADDR 0x4
+#define CSID_CORE_CTRL_1_ADDR 0x4
+#define CSID_RST_CMD_ADDR 0x8
+#define CSID_CID_LUT_VC_0_ADDR 0xc
+#define CSID_CID_LUT_VC_1_ADDR 0x10
+#define CSID_CID_LUT_VC_2_ADDR 0x14
+#define CSID_CID_LUT_VC_3_ADDR 0x18
+#define CSID_CID_n_CFG_ADDR 0x1C
+#define CSID_IRQ_CLEAR_CMD_ADDR 0x5c
+#define CSID_IRQ_MASK_ADDR 0x60
+#define CSID_IRQ_STATUS_ADDR 0x64
+#define CSID_CAPTURED_UNMAPPED_LONG_PKT_HDR_ADDR 0x68
+#define CSID_CAPTURED_MMAPPED_LONG_PKT_HDR_ADDR 0x6c
+#define CSID_CAPTURED_SHORT_PKT_ADDR 0x70
+#define CSID_CAPTURED_LONG_PKT_HDR_ADDR 0x74
+#define CSID_CAPTURED_LONG_PKT_FTR_ADDR 0x78
+#define CSID_PIF_MISR_DL0_ADDR 0x7C
+#define CSID_PIF_MISR_DL1_ADDR 0x80
+#define CSID_PIF_MISR_DL2_ADDR 0x84
+#define CSID_PIF_MISR_DL3_ADDR 0x88
+#define CSID_STATS_TOTAL_PKTS_RCVD_ADDR 0x8C
+#define CSID_STATS_ECC_ADDR 0x90
+#define CSID_STATS_CRC_ADDR 0x94
+#define CSID_TG_CTRL_ADDR 0x9C
+#define CSID_TG_VC_CFG_ADDR 0xA0
+#define CSID_TG_DT_n_CFG_0_ADDR 0xA8
+#define CSID_TG_DT_n_CFG_1_ADDR 0xAC
+#define CSID_TG_DT_n_CFG_2_ADDR 0xB0
+#define CSID_RST_DONE_IRQ_BITSHIFT 11
+#define CSID_RST_STB_ALL 0x7FFF
+#define CSID_DL_INPUT_SEL_SHIFT 0x2
+#define CSID_PHY_SEL_SHIFT 0x17
+
+#endif
diff --git a/drivers/media/video/msm/csi/include/csi2.0/msm_csiphy_hwreg.h b/drivers/media/video/msm/csi/include/csi2.0/msm_csiphy_hwreg.h
new file mode 100644
index 0000000..93d1fc4
--- /dev/null
+++ b/drivers/media/video/msm/csi/include/csi2.0/msm_csiphy_hwreg.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSIPHY_HWREG_H
+#define MSM_CSIPHY_HWREG_H
+
+/*MIPI CSI PHY registers*/
+#define MIPI_CSIPHY_HW_VERSION_ADDR 0x180
+#define MIPI_CSIPHY_LNn_CFG1_ADDR 0x0
+#define MIPI_CSIPHY_LNn_CFG2_ADDR 0x4
+#define MIPI_CSIPHY_LNn_CFG3_ADDR 0x8
+#define MIPI_CSIPHY_LNn_CFG4_ADDR 0xC
+#define MIPI_CSIPHY_LNn_CFG5_ADDR 0x10
+#define MIPI_CSIPHY_LNCK_CFG1_ADDR 0x100
+#define MIPI_CSIPHY_LNCK_CFG2_ADDR 0x104
+#define MIPI_CSIPHY_LNCK_CFG3_ADDR 0x108
+#define MIPI_CSIPHY_LNCK_CFG4_ADDR 0x10C
+#define MIPI_CSIPHY_LNCK_CFG5_ADDR 0x110
+#define MIPI_CSIPHY_LNCK_MISC1_ADDR 0x128
+#define MIPI_CSIPHY_GLBL_RESET_ADDR 0x140
+#define MIPI_CSIPHY_GLBL_PWR_CFG_ADDR 0x144
+#define MIPI_CSIPHY_INTERRUPT_STATUS0_ADDR 0x180
+#define MIPI_CSIPHY_INTERRUPT_MASK0_ADDR 0x1A0
+#define MIPI_CSIPHY_INTERRUPT_MASK_VAL 0x6F
+#define MIPI_CSIPHY_INTERRUPT_MASK_ADDR 0x1A4
+#define MIPI_CSIPHY_INTERRUPT_CLEAR0_ADDR 0x1C0
+#define MIPI_CSIPHY_INTERRUPT_CLEAR_ADDR 0x1C4
+#define MIPI_CSIPHY_MODE_CONFIG_SHIFT 0x4
+#define MIPI_CSIPHY_GLBL_T_INIT_CFG0_ADDR 0x1E0
+#define MIPI_CSIPHY_T_WAKEUP_CFG0_ADDR 0x1E8
+
+#endif
diff --git a/drivers/media/video/msm/csi/include/csi2.0/msm_ispif_hwreg.h b/drivers/media/video/msm/csi/include/csi2.0/msm_ispif_hwreg.h
new file mode 100644
index 0000000..c678ea2
--- /dev/null
+++ b/drivers/media/video/msm/csi/include/csi2.0/msm_ispif_hwreg.h
@@ -0,0 +1,79 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_ISPIF_HWREG_H
+#define MSM_ISPIF_HWREG_H
+
+
+/* ISPIF registers */
+
+#define ISPIF_RST_CMD_ADDR 0x00
+#define ISPIF_RST_CMD_1_ADDR 0x00
+#define ISPIF_INTF_CMD_ADDR 0x04
+#define ISPIF_INTF_CMD_1_ADDR 0x30
+#define ISPIF_CTRL_ADDR 0x08
+#define ISPIF_INPUT_SEL_ADDR 0x0C
+#define ISPIF_PIX_0_INTF_CID_MASK_ADDR 0x10
+#define ISPIF_RDI_0_INTF_CID_MASK_ADDR 0x14
+#define ISPIF_PIX_1_INTF_CID_MASK_ADDR 0x38
+#define ISPIF_RDI_1_INTF_CID_MASK_ADDR 0x3C
+#define ISPIF_RDI_2_INTF_CID_MASK_ADDR 0x44
+#define ISPIF_PIX_0_STATUS_ADDR 0x24
+#define ISPIF_RDI_0_STATUS_ADDR 0x28
+#define ISPIF_PIX_1_STATUS_ADDR 0x60
+#define ISPIF_RDI_1_STATUS_ADDR 0x64
+#define ISPIF_RDI_2_STATUS_ADDR 0x6C
+#define ISPIF_IRQ_MASK_ADDR 0x0100
+#define ISPIF_IRQ_CLEAR_ADDR 0x0104
+#define ISPIF_IRQ_STATUS_ADDR 0x0108
+#define ISPIF_IRQ_MASK_1_ADDR 0x010C
+#define ISPIF_IRQ_CLEAR_1_ADDR 0x0110
+#define ISPIF_IRQ_STATUS_1_ADDR 0x0114
+#define ISPIF_IRQ_MASK_2_ADDR 0x0118
+#define ISPIF_IRQ_CLEAR_2_ADDR 0x011C
+#define ISPIF_IRQ_STATUS_2_ADDR 0x0120
+#define ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR 0x0124
+
+/*ISPIF RESET BITS*/
+
+#define VFE_CLK_DOMAIN_RST 31
+#define RDI_CLK_DOMAIN_RST 30
+#define PIX_CLK_DOMAIN_RST 29
+#define AHB_CLK_DOMAIN_RST 28
+#define RDI_1_CLK_DOMAIN_RST 27
+#define RDI_2_VFE_RST_STB 19
+#define RDI_2_CSID_RST_STB 18
+#define RDI_1_VFE_RST_STB 13
+#define RDI_1_CSID_RST_STB 12
+#define RDI_0_VFE_RST_STB 7
+#define RDI_0_CSID_RST_STB 6
+#define PIX_1_VFE_RST_STB 10
+#define PIX_1_CSID_RST_STB 9
+#define PIX_0_VFE_RST_STB 4
+#define PIX_0_CSID_RST_STB 3
+#define SW_REG_RST_STB 2
+#define MISC_LOGIC_RST_STB 1
+#define STROBED_RST_EN 0
+
+#define PIX_INTF_0_OVERFLOW_IRQ 12
+#define RAW_INTF_0_OVERFLOW_IRQ 25
+#define RAW_INTF_1_OVERFLOW_IRQ 25
+#define RESET_DONE_IRQ 27
+
+#define ISPIF_IRQ_STATUS_MASK 0xA493249
+#define ISPIF_IRQ_1_STATUS_MASK 0xA493249
+#define ISPIF_IRQ_STATUS_RDI_SOF_MASK 0x492000
+#define ISPIF_IRQ_STATUS_PIX_SOF_MASK 0x249
+#define ISPIF_IRQ_STATUS_SOF_MASK 0x492249
+#define ISPIF_IRQ_GLOBAL_CLEAR_CMD 0x1
+
+#endif
diff --git a/drivers/media/video/msm/csi/include/csi3.0/msm_csid_hwreg.h b/drivers/media/video/msm/csi/include/csi3.0/msm_csid_hwreg.h
new file mode 100644
index 0000000..27d5a06
--- /dev/null
+++ b/drivers/media/video/msm/csi/include/csi3.0/msm_csid_hwreg.h
@@ -0,0 +1,51 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSID_HWREG_H
+#define MSM_CSID_HWREG_H
+
+/* MIPI CSID registers */
+#define CSID_HW_VERSION_ADDR 0x0
+#define CSID_CORE_CTRL_0_ADDR 0x4
+#define CSID_CORE_CTRL_1_ADDR 0x8
+#define CSID_RST_CMD_ADDR 0xC
+#define CSID_CID_LUT_VC_0_ADDR 0x10
+#define CSID_CID_LUT_VC_1_ADDR 0x14
+#define CSID_CID_LUT_VC_2_ADDR 0x18
+#define CSID_CID_LUT_VC_3_ADDR 0x1C
+#define CSID_CID_n_CFG_ADDR 0x20
+#define CSID_IRQ_CLEAR_CMD_ADDR 0x60
+#define CSID_IRQ_MASK_ADDR 0x64
+#define CSID_IRQ_STATUS_ADDR 0x68
+#define CSID_CAPTURED_UNMAPPED_LONG_PKT_HDR_ADDR 0x6C
+#define CSID_CAPTURED_MMAPPED_LONG_PKT_HDR_ADDR 0x70
+#define CSID_CAPTURED_SHORT_PKT_ADDR 0x74
+#define CSID_CAPTURED_LONG_PKT_HDR_ADDR 0x78
+#define CSID_CAPTURED_LONG_PKT_FTR_ADDR 0x7C
+#define CSID_PIF_MISR_DL0_ADDR 0x80
+#define CSID_PIF_MISR_DL1_ADDR 0x84
+#define CSID_PIF_MISR_DL2_ADDR 0x88
+#define CSID_PIF_MISR_DL3_ADDR 0x8C
+#define CSID_STATS_TOTAL_PKTS_RCVD_ADDR 0x90
+#define CSID_STATS_ECC_ADDR 0x94
+#define CSID_STATS_CRC_ADDR 0x98
+#define CSID_TG_CTRL_ADDR 0xA0
+#define CSID_TG_VC_CFG_ADDR 0xA4
+#define CSID_TG_DT_n_CFG_0_ADDR 0xAC
+#define CSID_TG_DT_n_CFG_1_ADDR 0xB0
+#define CSID_TG_DT_n_CFG_2_ADDR 0xB4
+#define CSID_RST_DONE_IRQ_BITSHIFT 11
+#define CSID_RST_STB_ALL 0x7FFF
+#define CSID_DL_INPUT_SEL_SHIFT 0x4
+#define CSID_PHY_SEL_SHIFT 0x17
+
+#endif
diff --git a/drivers/media/video/msm/csi/include/csi3.0/msm_csiphy_hwreg.h b/drivers/media/video/msm/csi/include/csi3.0/msm_csiphy_hwreg.h
new file mode 100644
index 0000000..79791bd
--- /dev/null
+++ b/drivers/media/video/msm/csi/include/csi3.0/msm_csiphy_hwreg.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSIPHY_HWREG_H
+#define MSM_CSIPHY_HWREG_H
+
+/*MIPI CSI PHY registers*/
+#define MIPI_CSIPHY_LNn_CFG1_ADDR 0x0
+#define MIPI_CSIPHY_LNn_CFG2_ADDR 0x4
+#define MIPI_CSIPHY_LNn_CFG3_ADDR 0x8
+#define MIPI_CSIPHY_LNn_CFG4_ADDR 0xC
+#define MIPI_CSIPHY_LNn_CFG5_ADDR 0x10
+#define MIPI_CSIPHY_LNCK_CFG1_ADDR 0x100
+#define MIPI_CSIPHY_LNCK_CFG2_ADDR 0x104
+#define MIPI_CSIPHY_LNCK_CFG3_ADDR 0x108
+#define MIPI_CSIPHY_LNCK_CFG4_ADDR 0x10C
+#define MIPI_CSIPHY_LNCK_CFG5_ADDR 0x110
+#define MIPI_CSIPHY_LNCK_MISC1_ADDR 0x128
+#define MIPI_CSIPHY_GLBL_RESET_ADDR 0x140
+#define MIPI_CSIPHY_GLBL_PWR_CFG_ADDR 0x144
+#define MIPI_CSIPHY_HW_VERSION_ADDR 0x188
+#define MIPI_CSIPHY_INTERRUPT_STATUS0_ADDR 0x18C
+#define MIPI_CSIPHY_INTERRUPT_MASK0_ADDR 0x1AC
+#define MIPI_CSIPHY_INTERRUPT_MASK_VAL 0x3F
+#define MIPI_CSIPHY_INTERRUPT_MASK_ADDR 0x1B0
+#define MIPI_CSIPHY_INTERRUPT_CLEAR0_ADDR 0x1CC
+#define MIPI_CSIPHY_INTERRUPT_CLEAR_ADDR 0x1D0
+#define MIPI_CSIPHY_MODE_CONFIG_SHIFT 0x4
+#define MIPI_CSIPHY_GLBL_T_INIT_CFG0_ADDR 0x1EC
+#define MIPI_CSIPHY_T_WAKEUP_CFG0_ADDR 0x1F4
+
+#endif
diff --git a/drivers/media/video/msm/csi/include/csi3.0/msm_ispif_hwreg.h b/drivers/media/video/msm/csi/include/csi3.0/msm_ispif_hwreg.h
new file mode 100644
index 0000000..4b17538
--- /dev/null
+++ b/drivers/media/video/msm/csi/include/csi3.0/msm_ispif_hwreg.h
@@ -0,0 +1,94 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_ISPIF_HWREG_H
+#define MSM_ISPIF_HWREG_H
+
+
+/* ISPIF registers */
+
+#define ISPIF_RST_CMD_ADDR 0x08
+#define ISPIF_RST_CMD_1_ADDR 0x0C
+#define ISPIF_INTF_CMD_ADDR 0x248
+#define ISPIF_INTF_CMD_1_ADDR 0x24C
+#define ISPIF_CTRL_ADDR 0x08
+#define ISPIF_INPUT_SEL_ADDR 0x244
+#define ISPIF_PIX_0_INTF_CID_MASK_ADDR 0x254
+#define ISPIF_RDI_0_INTF_CID_MASK_ADDR 0x264
+#define ISPIF_PIX_1_INTF_CID_MASK_ADDR 0x258
+#define ISPIF_RDI_1_INTF_CID_MASK_ADDR 0x268
+#define ISPIF_RDI_2_INTF_CID_MASK_ADDR 0x26C
+#define ISPIF_PIX_0_STATUS_ADDR 0x2C0
+#define ISPIF_RDI_0_STATUS_ADDR 0x2D0
+#define ISPIF_PIX_1_STATUS_ADDR 0x2C4
+#define ISPIF_RDI_1_STATUS_ADDR 0x2D4
+#define ISPIF_RDI_2_STATUS_ADDR 0x2D8
+#define ISPIF_IRQ_MASK_ADDR 0x208
+#define ISPIF_IRQ_CLEAR_ADDR 0x230
+#define ISPIF_IRQ_STATUS_ADDR 0x21C
+#define ISPIF_IRQ_MASK_1_ADDR 0x20C
+#define ISPIF_IRQ_CLEAR_1_ADDR 0x234
+#define ISPIF_IRQ_STATUS_1_ADDR 0x220
+#define ISPIF_IRQ_MASK_2_ADDR 0x210
+#define ISPIF_IRQ_CLEAR_2_ADDR 0x238
+#define ISPIF_IRQ_STATUS_2_ADDR 0x224
+#define ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR 0x1C
+
+/* new */
+#define ISPIF_VFE_m_CTRL_0_ADDR 0x200
+#define ISPIF_VFE_m_IRQ_MASK_0 0x208
+#define ISPIF_VFE_m_IRQ_MASK_1 0x20C
+#define ISPIF_VFE_m_IRQ_MASK_2 0x210
+#define ISPIF_VFE_m_IRQ_STATUS_0 0x21C
+#define ISPIF_VFE_m_IRQ_STATUS_1 0x220
+#define ISPIF_VFE_m_IRQ_STATUS_2 0x224
+#define ISPIF_VFE_m_IRQ_CLEAR_0 0x230
+#define ISPIF_VFE_m_IRQ_CLEAR_1 0x234
+#define ISPIF_VFE_m_IRQ_CLEAR_2 0x238
+
+/*ISPIF RESET BITS*/
+
+#define VFE_CLK_DOMAIN_RST 31
+#define RDI_CLK_DOMAIN_RST 26
+#define RDI_1_CLK_DOMAIN_RST 27
+#define RDI_2_CLK_DOMAIN_RST 28
+#define PIX_CLK_DOMAIN_RST 29
+#define PIX_1_CLK_DOMAIN_RST 30
+#define AHB_CLK_DOMAIN_RST 25
+#define RDI_2_VFE_RST_STB 12
+#define RDI_2_CSID_RST_STB 11
+#define RDI_1_VFE_RST_STB 10
+#define RDI_1_CSID_RST_STB 9
+#define RDI_0_VFE_RST_STB 8
+#define RDI_0_CSID_RST_STB 7
+#define PIX_1_VFE_RST_STB 6
+#define PIX_1_CSID_RST_STB 5
+#define PIX_0_VFE_RST_STB 4
+#define PIX_0_CSID_RST_STB 3
+#define SW_REG_RST_STB 2
+#define MISC_LOGIC_RST_STB 1
+#define STROBED_RST_EN 0
+
+#define PIX_INTF_0_OVERFLOW_IRQ 12
+#define RAW_INTF_0_OVERFLOW_IRQ 25
+#define RAW_INTF_1_OVERFLOW_IRQ 25
+#define RESET_DONE_IRQ 27
+
+#define ISPIF_IRQ_STATUS_MASK 0xA493249
+#define ISPIF_IRQ_1_STATUS_MASK 0xA493249
+#define ISPIF_IRQ_STATUS_RDI_SOF_MASK 0x492000
+#define ISPIF_IRQ_STATUS_PIX_SOF_MASK 0x249
+#define ISPIF_IRQ_STATUS_SOF_MASK 0x492249
+#define ISPIF_IRQ_GLOBAL_CLEAR_CMD 0x1
+
+
+#endif
diff --git a/drivers/media/video/msm/csi/msm_csi2_register.c b/drivers/media/video/msm/csi/msm_csi2_register.c
index 7b85ded..aa539ff 100644
--- a/drivers/media/video/msm/csi/msm_csi2_register.c
+++ b/drivers/media/video/msm/csi/msm_csi2_register.c
@@ -16,48 +16,28 @@
#include "msm_csi_register.h"
int msm_csi_register_subdevs(struct msm_cam_media_controller *p_mctl,
- int core_index,
- int (*msm_mctl_subdev_match_core)(struct device *, void *))
+ int core_index, struct msm_cam_server_dev *server_dev)
{
int rc = -ENODEV;
- struct device_driver *driver;
- struct device *dev;
/* register csiphy subdev */
- driver = driver_find(MSM_CSIPHY_DRV_NAME, &platform_bus_type);
- if (!driver)
+ p_mctl->csiphy_sdev = server_dev->csiphy_device[core_index];
+ if (!p_mctl->csiphy_sdev)
goto out;
-
- dev = driver_find_device(driver, NULL, (void *)core_index,
- msm_mctl_subdev_match_core);
- if (!dev)
- goto out;
-
- p_mctl->csiphy_sdev = dev_get_drvdata(dev);
+ v4l2_set_subdev_hostdata(p_mctl->csiphy_sdev, p_mctl);
/* register csid subdev */
- driver = driver_find(MSM_CSID_DRV_NAME, &platform_bus_type);
- if (!driver)
+ p_mctl->csid_sdev = server_dev->csid_device[core_index];
+ if (!p_mctl->csid_sdev)
goto out;
-
- dev = driver_find_device(driver, NULL, (void *)core_index,
- msm_mctl_subdev_match_core);
- if (!dev)
- goto out;
-
- p_mctl->csid_sdev = dev_get_drvdata(dev);
+ v4l2_set_subdev_hostdata(p_mctl->csid_sdev, p_mctl);
/* register ispif subdev */
- driver = driver_find(MSM_ISPIF_DRV_NAME, &platform_bus_type);
- if (!driver)
+ p_mctl->ispif_sdev = server_dev->ispif_device[0];
+ if (!p_mctl->ispif_sdev)
goto out;
+ v4l2_set_subdev_hostdata(p_mctl->ispif_sdev, p_mctl);
- dev = driver_find_device(driver, NULL, 0,
- msm_mctl_subdev_match_core);
- if (!dev)
- goto out;
-
- p_mctl->ispif_sdev = dev_get_drvdata(dev);
rc = 0;
return rc;
out:
diff --git a/drivers/media/video/msm/csi/msm_csi_register.h b/drivers/media/video/msm/csi/msm_csi_register.h
index 578b609..ebb001c 100644
--- a/drivers/media/video/msm/csi/msm_csi_register.h
+++ b/drivers/media/video/msm/csi/msm_csi_register.h
@@ -13,4 +13,4 @@
int msm_csi_register_subdevs(struct msm_cam_media_controller *p_mctl,
int core_index,
- int (*msm_mctl_subdev_match_core)(struct device *, void *));
+ struct msm_cam_server_dev *server_dev);
diff --git a/drivers/media/video/msm/csi/msm_csic.c b/drivers/media/video/msm/csi/msm_csic.c
index dbb4f32..bcb7bb3 100644
--- a/drivers/media/video/msm/csi/msm_csic.c
+++ b/drivers/media/video/msm/csi/msm_csic.c
@@ -463,6 +463,12 @@
msm_cam_register_subdev_node(
&new_csic_dev->subdev, &sd_info);
+ media_entity_init(&new_csic_dev->subdev.entity, 0, NULL, 0);
+ new_csic_dev->subdev.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ new_csic_dev->subdev.entity.group_id = CSIC_DEV;
+ new_csic_dev->subdev.entity.name = pdev->name;
+ new_csic_dev->subdev.entity.revision =
+ new_csic_dev->subdev.devnode->num;
return 0;
csic_no_resource:
diff --git a/drivers/media/video/msm/csi/msm_csic_register.c b/drivers/media/video/msm/csi/msm_csic_register.c
index 7ccaff2..dc3641a 100644
--- a/drivers/media/video/msm/csi/msm_csic_register.c
+++ b/drivers/media/video/msm/csi/msm_csic_register.c
@@ -16,23 +16,14 @@
#include "msm_csi_register.h"
int msm_csi_register_subdevs(struct msm_cam_media_controller *p_mctl,
- int core_index,
- int (*msm_mctl_subdev_match_core)(struct device *, void *))
+ int core_index, struct msm_cam_server_dev *server_dev)
{
int rc = -ENODEV;
- struct device_driver *driver;
- struct device *dev;
- driver = driver_find(MSM_CSIC_DRV_NAME, &platform_bus_type);
- if (!driver)
+ p_mctl->csic_sdev = server_dev->csic_device[core_index];
+ if (!p_mctl->csic_sdev)
goto out;
-
- dev = driver_find_device(driver, NULL, (void *)core_index,
- msm_mctl_subdev_match_core);
- if (!dev)
- goto out;
-
- p_mctl->csic_sdev = dev_get_drvdata(dev);
+ v4l2_set_subdev_hostdata(p_mctl->csic_sdev, p_mctl);
rc = 0;
p_mctl->ispif_sdev = NULL;
diff --git a/drivers/media/video/msm/csi/msm_csid.c b/drivers/media/video/msm/csi/msm_csid.c
index 1ab4e66..6ee87b7 100644
--- a/drivers/media/video/msm/csi/msm_csid.c
+++ b/drivers/media/video/msm/csi/msm_csid.c
@@ -14,47 +14,16 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <mach/board.h>
#include <mach/camera.h>
#include <media/msm_isp.h>
#include "msm_csid.h"
+#include "msm_csid_hwreg.h"
#include "msm.h"
#define V4L2_IDENT_CSID 50002
-/* MIPI CSID registers */
-#define CSID_HW_VERSION_ADDR 0x0
-#define CSID_CORE_CTRL_ADDR 0x4
-#define CSID_RST_CMD_ADDR 0x8
-#define CSID_CID_LUT_VC_0_ADDR 0xc
-#define CSID_CID_LUT_VC_1_ADDR 0x10
-#define CSID_CID_LUT_VC_2_ADDR 0x14
-#define CSID_CID_LUT_VC_3_ADDR 0x18
-#define CSID_CID_n_CFG_ADDR 0x1C
-#define CSID_IRQ_CLEAR_CMD_ADDR 0x5c
-#define CSID_IRQ_MASK_ADDR 0x60
-#define CSID_IRQ_STATUS_ADDR 0x64
-#define CSID_CAPTURED_UNMAPPED_LONG_PKT_HDR_ADDR 0x68
-#define CSID_CAPTURED_MMAPPED_LONG_PKT_HDR_ADDR 0x6c
-#define CSID_CAPTURED_SHORT_PKT_ADDR 0x70
-#define CSID_CAPTURED_LONG_PKT_HDR_ADDR 0x74
-#define CSID_CAPTURED_LONG_PKT_FTR_ADDR 0x78
-#define CSID_PIF_MISR_DL0_ADDR 0x7C
-#define CSID_PIF_MISR_DL1_ADDR 0x80
-#define CSID_PIF_MISR_DL2_ADDR 0x84
-#define CSID_PIF_MISR_DL3_ADDR 0x88
-#define CSID_STATS_TOTAL_PKTS_RCVD_ADDR 0x8C
-#define CSID_STATS_ECC_ADDR 0x90
-#define CSID_STATS_CRC_ADDR 0x94
-#define CSID_TG_CTRL_ADDR 0x9C
-#define CSID_TG_VC_CFG_ADDR 0xA0
-#define CSID_TG_DT_n_CFG_0_ADDR 0xA8
-#define CSID_TG_DT_n_CFG_1_ADDR 0xAC
-#define CSID_TG_DT_n_CFG_2_ADDR 0xB0
-#define CSID_TG_DT_n_CFG_3_ADDR 0xD8
-#define CSID_RST_DONE_IRQ_BITSHIFT 11
-#define CSID_RST_STB_ALL 0x7FFF
-
#define DBG_CSID 0
static int msm_csid_cid_lut(
@@ -64,7 +33,7 @@
int rc = 0, i = 0;
uint32_t val = 0;
- for (i = 0; i < csid_lut_params->num_cid && i < 4; i++) {
+ for (i = 0; i < csid_lut_params->num_cid && i < 16; i++) {
if (csid_lut_params->vc_cfg[i].dt < 0x12 ||
csid_lut_params->vc_cfg[i].dt > 0x37) {
CDBG("%s: unsupported data type 0x%x\n",
@@ -72,13 +41,13 @@
return rc;
}
val = msm_camera_io_r(csidbase + CSID_CID_LUT_VC_0_ADDR +
- (csid_lut_params->vc_cfg[i].cid >> 2) * 4)
- & ~(0xFF << csid_lut_params->vc_cfg[i].cid * 8);
- val |= csid_lut_params->vc_cfg[i].dt <<
- csid_lut_params->vc_cfg[i].cid * 8;
+ (csid_lut_params->vc_cfg[i].cid >> 2) * 4)
+ & ~(0xFF << ((csid_lut_params->vc_cfg[i].cid % 4) * 8));
+ val |= (csid_lut_params->vc_cfg[i].dt <<
+ ((csid_lut_params->vc_cfg[i].cid % 4) * 8));
msm_camera_io_w(val, csidbase + CSID_CID_LUT_VC_0_ADDR +
(csid_lut_params->vc_cfg[i].cid >> 2) * 4);
- val = csid_lut_params->vc_cfg[i].decode_format << 4 | 0x3;
+ val = (csid_lut_params->vc_cfg[i].decode_format << 4) | 0x3;
msm_camera_io_w(val, csidbase + CSID_CID_n_CFG_ADDR +
(csid_lut_params->vc_cfg[i].cid * 4));
}
@@ -113,13 +82,16 @@
csid_params = cfg_params->parms;
val = csid_params->lane_cnt - 1;
- val |= csid_params->lane_assign << 2;
- val |= 0x1 << 10;
- val |= 0x1 << 11;
- val |= 0x1 << 12;
- val |= 0x1 << 13;
- val |= 0x1 << 28;
- msm_camera_io_w(val, csidbase + CSID_CORE_CTRL_ADDR);
+ val |= csid_params->lane_assign << CSID_DL_INPUT_SEL_SHIFT;
+ if (csid_dev->hw_version < 0x30000000) {
+ val |= (0xF << 10);
+ msm_camera_io_w(val, csidbase + CSID_CORE_CTRL_0_ADDR);
+ } else {
+ msm_camera_io_w(val, csidbase + CSID_CORE_CTRL_0_ADDR);
+ val = csid_params->phy_sel << CSID_PHY_SEL_SHIFT;
+ val |= 0xF;
+ msm_camera_io_w(val, csidbase + CSID_CORE_CTRL_1_ADDR);
+ }
rc = msm_csid_cid_lut(&csid_params->lut_params, csidbase);
if (rc < 0)
@@ -317,6 +289,10 @@
platform_set_drvdata(pdev, &new_csid_dev->subdev);
mutex_init(&new_csid_dev->mutex);
+ if (pdev->dev.of_node)
+ of_property_read_u32((&pdev->dev)->of_node,
+ "cell-index", &pdev->id);
+
new_csid_dev->mem = platform_get_resource_byname(pdev,
IORESOURCE_MEM, "csid");
if (!new_csid_dev->mem) {
@@ -344,6 +320,13 @@
sd_info.sd_index = pdev->id;
sd_info.irq_num = new_csid_dev->irq->start;
msm_cam_register_subdev_node(&new_csid_dev->subdev, &sd_info);
+
+ media_entity_init(&new_csid_dev->subdev.entity, 0, NULL, 0);
+ new_csid_dev->subdev.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ new_csid_dev->subdev.entity.group_id = CSID_DEV;
+ new_csid_dev->subdev.entity.name = pdev->name;
+ new_csid_dev->subdev.entity.revision =
+ new_csid_dev->subdev.devnode->num;
return 0;
csid_no_resource:
@@ -352,11 +335,19 @@
return 0;
}
+static const struct of_device_id msm_csid_dt_match[] = {
+ {.compatible = "qcom,csid"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_csid_dt_match);
+
static struct platform_driver csid_driver = {
.probe = csid_probe,
.driver = {
.name = MSM_CSID_DRV_NAME,
.owner = THIS_MODULE,
+ .of_match_table = msm_csid_dt_match,
},
};
diff --git a/drivers/media/video/msm/csi/msm_csiphy.c b/drivers/media/video/msm/csi/msm_csiphy.c
index 4693a8a..bec7ae3 100644
--- a/drivers/media/video/msm/csi/msm_csiphy.c
+++ b/drivers/media/video/msm/csi/msm_csiphy.c
@@ -13,6 +13,7 @@
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/of.h>
#include <linux/module.h>
#include <mach/board.h>
#include <mach/camera.h>
@@ -20,50 +21,19 @@
#include <media/msm_isp.h>
#include "msm_csiphy.h"
#include "msm.h"
-
+#include "msm_csiphy_hwreg.h"
#define DBG_CSIPHY 0
#define V4L2_IDENT_CSIPHY 50003
-
-/*MIPI CSI PHY registers*/
-#define MIPI_CSIPHY_LNn_CFG1_ADDR 0x0
-#define MIPI_CSIPHY_LNn_CFG2_ADDR 0x4
-#define MIPI_CSIPHY_LNn_CFG3_ADDR 0x8
-#define MIPI_CSIPHY_LNn_CFG4_ADDR 0xC
-#define MIPI_CSIPHY_LNn_CFG5_ADDR 0x10
-#define MIPI_CSIPHY_LNCK_CFG1_ADDR 0x100
-#define MIPI_CSIPHY_LNCK_CFG2_ADDR 0x104
-#define MIPI_CSIPHY_LNCK_CFG3_ADDR 0x108
-#define MIPI_CSIPHY_LNCK_CFG4_ADDR 0x10C
-#define MIPI_CSIPHY_LNCK_CFG5_ADDR 0x110
-#define MIPI_CSIPHY_LNCK_MISC1_ADDR 0x128
-#define MIPI_CSIPHY_GLBL_T_INIT_CFG0_ADDR 0x1E0
-#define MIPI_CSIPHY_T_WAKEUP_CFG0_ADDR 0x1E8
-#define MIPI_CSIPHY_T_WAKEUP_CFG1_ADDR 0x1EC
-#define MIPI_CSIPHY_GLBL_RESET_ADDR 0x0140
-#define MIPI_CSIPHY_GLBL_PWR_CFG_ADDR 0x0144
-#define MIPI_CSIPHY_INTERRUPT_STATUS0_ADDR 0x0180
-#define MIPI_CSIPHY_INTERRUPT_STATUS1_ADDR 0x0184
-#define MIPI_CSIPHY_INTERRUPT_STATUS2_ADDR 0x0188
-#define MIPI_CSIPHY_INTERRUPT_STATUS3_ADDR 0x018C
-#define MIPI_CSIPHY_INTERRUPT_STATUS4_ADDR 0x0190
-#define MIPI_CSIPHY_INTERRUPT_MASK0_ADDR 0x01A0
-#define MIPI_CSIPHY_INTERRUPT_MASK1_ADDR 0x01A4
-#define MIPI_CSIPHY_INTERRUPT_MASK2_ADDR 0x01A8
-#define MIPI_CSIPHY_INTERRUPT_MASK3_ADDR 0x01AC
-#define MIPI_CSIPHY_INTERRUPT_MASK4_ADDR 0x01B0
-#define MIPI_CSIPHY_INTERRUPT_CLEAR0_ADDR 0x01C0
-#define MIPI_CSIPHY_INTERRUPT_CLEAR1_ADDR 0x01C4
-#define MIPI_CSIPHY_INTERRUPT_CLEAR2_ADDR 0x01C8
-#define MIPI_CSIPHY_INTERRUPT_CLEAR3_ADDR 0x01CC
-#define MIPI_CSIPHY_INTERRUPT_CLEAR4_ADDR 0x01D0
+#define CSIPHY_VERSION_V3 0x10
int msm_csiphy_config(struct csiphy_cfg_params *cfg_params)
{
int rc = 0;
int j = 0;
uint32_t val = 0;
- uint8_t lane_cnt = 0, lane_mask = 0;
+ uint8_t lane_cnt = 0;
+ uint16_t lane_mask = 0;
struct csiphy_device *csiphy_dev;
struct msm_camera_csiphy_params *csiphy_params;
void __iomem *csiphybase;
@@ -73,7 +43,8 @@
return -ENOMEM;
csiphy_params = cfg_params->parms;
- lane_mask = csiphy_params->lane_mask;
+ csiphy_dev->lane_mask[csiphy_dev->pdev->id] |= csiphy_params->lane_mask;
+ lane_mask = csiphy_dev->lane_mask[csiphy_dev->pdev->id];
lane_cnt = csiphy_params->lane_cnt;
if (csiphy_params->lane_cnt < 1 || csiphy_params->lane_cnt > 4) {
CDBG("%s: unsupported lane cnt %d\n",
@@ -81,13 +52,30 @@
return rc;
}
- val = 0x3;
- msm_camera_io_w((csiphy_params->lane_mask << 2) | val,
- csiphybase + MIPI_CSIPHY_GLBL_PWR_CFG_ADDR);
msm_camera_io_w(0x1, csiphybase + MIPI_CSIPHY_GLBL_T_INIT_CFG0_ADDR);
msm_camera_io_w(0x1, csiphybase + MIPI_CSIPHY_T_WAKEUP_CFG0_ADDR);
- while (lane_mask & 0xf) {
+ if (csiphy_dev->hw_version != CSIPHY_VERSION_V3) {
+ val = 0x3;
+ msm_camera_io_w((lane_mask << 2) | val,
+ csiphybase + MIPI_CSIPHY_GLBL_PWR_CFG_ADDR);
+ msm_camera_io_w(0x10, csiphybase + MIPI_CSIPHY_LNCK_CFG2_ADDR);
+ msm_camera_io_w(csiphy_params->settle_cnt,
+ csiphybase + MIPI_CSIPHY_LNCK_CFG3_ADDR);
+ msm_camera_io_w(0x24,
+ csiphybase + MIPI_CSIPHY_INTERRUPT_MASK0_ADDR);
+ msm_camera_io_w(0x24,
+ csiphybase + MIPI_CSIPHY_INTERRUPT_CLEAR0_ADDR);
+ } else {
+ val = 0x1;
+ msm_camera_io_w((lane_mask << 1) | val,
+ csiphybase + MIPI_CSIPHY_GLBL_PWR_CFG_ADDR);
+ msm_camera_io_w(csiphy_params->combo_mode <<
+ MIPI_CSIPHY_MODE_CONFIG_SHIFT,
+ csiphybase + MIPI_CSIPHY_GLBL_RESET_ADDR);
+ }
+
+ while (lane_mask & 0x1f) {
if (!(lane_mask & 0x1)) {
j++;
lane_mask >>= 1;
@@ -97,66 +85,33 @@
csiphybase + MIPI_CSIPHY_LNn_CFG2_ADDR + 0x40*j);
msm_camera_io_w(csiphy_params->settle_cnt,
csiphybase + MIPI_CSIPHY_LNn_CFG3_ADDR + 0x40*j);
- msm_camera_io_w(0x6F,
- csiphybase + MIPI_CSIPHY_INTERRUPT_MASK0_ADDR +
- 0x4*(j+1));
- msm_camera_io_w(0x6F,
- csiphybase + MIPI_CSIPHY_INTERRUPT_CLEAR0_ADDR +
- 0x4*(j+1));
+ msm_camera_io_w(MIPI_CSIPHY_INTERRUPT_MASK_VAL, csiphybase +
+ MIPI_CSIPHY_INTERRUPT_MASK_ADDR + 0x4*j);
+ msm_camera_io_w(MIPI_CSIPHY_INTERRUPT_MASK_VAL, csiphybase +
+ MIPI_CSIPHY_INTERRUPT_CLEAR_ADDR + 0x4*j);
j++;
lane_mask >>= 1;
}
- msm_camera_io_w(0x10, csiphybase + MIPI_CSIPHY_LNCK_CFG2_ADDR);
- msm_camera_io_w(csiphy_params->settle_cnt,
- csiphybase + MIPI_CSIPHY_LNCK_CFG3_ADDR);
-
- msm_camera_io_w(0x24,
- csiphybase + MIPI_CSIPHY_INTERRUPT_MASK0_ADDR);
- msm_camera_io_w(0x24,
- csiphybase + MIPI_CSIPHY_INTERRUPT_CLEAR0_ADDR);
return rc;
}
static irqreturn_t msm_csiphy_irq(int irq_num, void *data)
{
uint32_t irq;
+ int i;
struct csiphy_device *csiphy_dev = data;
- irq = msm_camera_io_r(
- csiphy_dev->base + MIPI_CSIPHY_INTERRUPT_STATUS0_ADDR);
- msm_camera_io_w(irq,
- csiphy_dev->base + MIPI_CSIPHY_INTERRUPT_CLEAR0_ADDR);
- CDBG("%s MIPI_CSIPHY%d_INTERRUPT_STATUS0 = 0x%x\n",
- __func__, csiphy_dev->pdev->id, irq);
-
- irq = msm_camera_io_r(
- csiphy_dev->base + MIPI_CSIPHY_INTERRUPT_STATUS1_ADDR);
- msm_camera_io_w(irq,
- csiphy_dev->base + MIPI_CSIPHY_INTERRUPT_CLEAR1_ADDR);
- CDBG("%s MIPI_CSIPHY%d_INTERRUPT_STATUS1 = 0x%x\n",
- __func__, csiphy_dev->pdev->id, irq);
-
- irq = msm_camera_io_r(
- csiphy_dev->base + MIPI_CSIPHY_INTERRUPT_STATUS2_ADDR);
- msm_camera_io_w(irq,
- csiphy_dev->base + MIPI_CSIPHY_INTERRUPT_CLEAR2_ADDR);
- CDBG("%s MIPI_CSIPHY%d_INTERRUPT_STATUS2 = 0x%x\n",
- __func__, csiphy_dev->pdev->id, irq);
-
- irq = msm_camera_io_r(
- csiphy_dev->base + MIPI_CSIPHY_INTERRUPT_STATUS3_ADDR);
- msm_camera_io_w(irq,
- csiphy_dev->base + MIPI_CSIPHY_INTERRUPT_CLEAR3_ADDR);
- CDBG("%s MIPI_CSIPHY%d_INTERRUPT_STATUS3 = 0x%x\n",
- __func__, csiphy_dev->pdev->id, irq);
-
- irq = msm_camera_io_r(
- csiphy_dev->base + MIPI_CSIPHY_INTERRUPT_STATUS4_ADDR);
- msm_camera_io_w(irq,
- csiphy_dev->base + MIPI_CSIPHY_INTERRUPT_CLEAR4_ADDR);
- CDBG("%s MIPI_CSIPHY%d_INTERRUPT_STATUS4 = 0x%x\n",
- __func__, csiphy_dev->pdev->id, irq);
+ for (i = 0; i < 5; i++) {
+ irq = msm_camera_io_r(
+ csiphy_dev->base +
+ MIPI_CSIPHY_INTERRUPT_STATUS0_ADDR + 0x4*i);
+ msm_camera_io_w(irq,
+ csiphy_dev->base +
+ MIPI_CSIPHY_INTERRUPT_CLEAR0_ADDR + 0x4*i);
+ CDBG("%s MIPI_CSIPHY%d_INTERRUPT_STATUS%d = 0x%x\n",
+ __func__, csiphy_dev->pdev->id, i, irq);
+ }
msm_camera_io_w(0x1, csiphy_dev->base + 0x164);
msm_camera_io_w(0x0, csiphy_dev->base + 0x164);
return IRQ_HANDLED;
@@ -193,9 +148,16 @@
return rc;
}
+ if (csiphy_dev->ref_count++) {
+ CDBG("%s csiphy refcount = %d\n", __func__,
+ csiphy_dev->ref_count);
+ return rc;
+ }
+
csiphy_dev->base = ioremap(csiphy_dev->mem->start,
resource_size(csiphy_dev->mem));
if (!csiphy_dev->base) {
+ csiphy_dev->ref_count--;
rc = -ENOMEM;
return rc;
}
@@ -204,6 +166,7 @@
csiphy_dev->csiphy_clk, ARRAY_SIZE(csiphy_clk_info), 1);
if (rc < 0) {
+ csiphy_dev->ref_count--;
iounmap(csiphy_dev->base);
csiphy_dev->base = NULL;
return rc;
@@ -214,17 +177,51 @@
#endif
msm_csiphy_reset(csiphy_dev);
+ csiphy_dev->hw_version =
+ msm_camera_io_r(csiphy_dev->base + MIPI_CSIPHY_HW_VERSION_ADDR);
+
return 0;
}
-static int msm_csiphy_release(struct v4l2_subdev *sd)
+static int msm_csiphy_release(struct v4l2_subdev *sd, void *arg)
{
struct csiphy_device *csiphy_dev;
- int i;
+ int i = 0;
+ struct msm_camera_csi_lane_params *csi_lane_params;
+ uint16_t csi_lane_mask;
csiphy_dev = v4l2_get_subdevdata(sd);
- for (i = 0; i < 4; i++)
- msm_camera_io_w(0x0, csiphy_dev->base +
- MIPI_CSIPHY_LNn_CFG2_ADDR + 0x40*i);
+ csi_lane_params = (struct msm_camera_csi_lane_params *)arg;
+ csi_lane_mask = csi_lane_params->csi_lane_mask;
+
+ if (!csiphy_dev || !csiphy_dev->ref_count) {
+ pr_err("%s csiphy dev NULL / ref_count ZERO\n", __func__);
+ return 0;
+ }
+
+ if (csiphy_dev->hw_version != CSIPHY_VERSION_V3) {
+ csiphy_dev->lane_mask[csiphy_dev->pdev->id] = 0;
+ for (i = 0; i < 4; i++)
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ MIPI_CSIPHY_LNn_CFG2_ADDR + 0x40*i);
+ } else {
+ csiphy_dev->lane_mask[csiphy_dev->pdev->id] &=
+ ~(csi_lane_params->csi_lane_mask);
+ i = 0;
+ while (csi_lane_mask & 0x1F) {
+ if (csi_lane_mask & 0x1) {
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ MIPI_CSIPHY_LNn_CFG2_ADDR + 0x40*i);
+ }
+ csi_lane_mask >>= 1;
+ i++;
+ }
+ }
+
+ if (--csiphy_dev->ref_count) {
+ CDBG("%s csiphy refcount = %d\n", __func__,
+ csiphy_dev->ref_count);
+ return 0;
+ }
msm_camera_io_w(0x0, csiphy_dev->base + MIPI_CSIPHY_LNCK_CFG2_ADDR);
msm_camera_io_w(0x0, csiphy_dev->base + MIPI_CSIPHY_GLBL_PWR_CFG_ADDR);
@@ -258,7 +255,7 @@
rc = msm_csiphy_init(sd);
break;
case VIDIOC_MSM_CSIPHY_RELEASE:
- rc = msm_csiphy_release(sd);
+ rc = msm_csiphy_release(sd, arg);
break;
default:
pr_err("%s: command not found\n", __func__);
@@ -301,6 +298,10 @@
mutex_init(&new_csiphy_dev->mutex);
+ if (pdev->dev.of_node)
+ of_property_read_u32((&pdev->dev)->of_node,
+ "cell-index", &pdev->id);
+
new_csiphy_dev->mem = platform_get_resource_byname(pdev,
IORESOURCE_MEM, "csiphy");
if (!new_csiphy_dev->mem) {
@@ -340,6 +341,13 @@
sd_info.irq_num = new_csiphy_dev->irq->start;
msm_cam_register_subdev_node(
&new_csiphy_dev->subdev, &sd_info);
+
+ media_entity_init(&new_csiphy_dev->subdev.entity, 0, NULL, 0);
+ new_csiphy_dev->subdev.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ new_csiphy_dev->subdev.entity.group_id = CSIPHY_DEV;
+ new_csiphy_dev->subdev.entity.name = pdev->name;
+ new_csiphy_dev->subdev.entity.revision =
+ new_csiphy_dev->subdev.devnode->num;
return 0;
csiphy_no_resource:
@@ -348,11 +356,19 @@
return 0;
}
+static const struct of_device_id msm_csiphy_dt_match[] = {
+ {.compatible = "qcom,csiphy"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_csiphy_dt_match);
+
static struct platform_driver csiphy_driver = {
.probe = csiphy_probe,
.driver = {
.name = MSM_CSIPHY_DRV_NAME,
.owner = THIS_MODULE,
+ .of_match_table = msm_csiphy_dt_match,
},
};
diff --git a/drivers/media/video/msm/csi/msm_csiphy.h b/drivers/media/video/msm/csi/msm_csiphy.h
index 522a1c1..1fab9c1 100644
--- a/drivers/media/video/msm/csi/msm_csiphy.h
+++ b/drivers/media/video/msm/csi/msm_csiphy.h
@@ -17,6 +17,8 @@
#include <linux/io.h>
#include <media/v4l2-subdev.h>
+#define MAX_CSIPHY 3
+
struct csiphy_device {
struct platform_device *pdev;
struct v4l2_subdev subdev;
@@ -25,8 +27,11 @@
struct resource *io;
void __iomem *base;
struct mutex mutex;
+ uint32_t hw_version;
struct clk *csiphy_clk[2];
+ uint8_t ref_count;
+ uint16_t lane_mask[MAX_CSIPHY];
};
struct csiphy_cfg_params {
@@ -35,12 +40,12 @@
};
#define VIDIOC_MSM_CSIPHY_CFG \
- _IOWR('V', BASE_VIDIOC_PRIVATE + 7, struct csiphy_cfg_params)
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 7, void *)
#define VIDIOC_MSM_CSIPHY_INIT \
_IOWR('V', BASE_VIDIOC_PRIVATE + 8, struct v4l2_subdev*)
#define VIDIOC_MSM_CSIPHY_RELEASE \
- _IOWR('V', BASE_VIDIOC_PRIVATE + 9, struct v4l2_subdev*)
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 9, void *)
#endif
diff --git a/drivers/media/video/msm/csi/msm_ispif.c b/drivers/media/video/msm/csi/msm_ispif.c
index 0f16bbf..092ee90 100644
--- a/drivers/media/video/msm/csi/msm_ispif.c
+++ b/drivers/media/video/msm/csi/msm_ispif.c
@@ -14,78 +14,31 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <mach/gpio.h>
#include <mach/camera.h>
#include "msm_ispif.h"
#include "msm.h"
+#include "msm_ispif_hwreg.h"
-#define V4L2_IDENT_ISPIF 50001
-#define CSID_VERSION_V2 0x2000011
-
-/* ISPIF registers */
-
-#define ISPIF_RST_CMD_ADDR 0X00
-#define ISPIF_INTF_CMD_ADDR 0X04
-#define ISPIF_CTRL_ADDR 0X08
-#define ISPIF_INPUT_SEL_ADDR 0X0C
-#define ISPIF_PIX_INTF_CID_MASK_ADDR 0X10
-#define ISPIF_RDI_INTF_CID_MASK_ADDR 0X14
-#define ISPIF_PIX_1_INTF_CID_MASK_ADDR 0X38
-#define ISPIF_RDI_1_INTF_CID_MASK_ADDR 0X3C
-#define ISPIF_PIX_STATUS_ADDR 0X24
-#define ISPIF_RDI_STATUS_ADDR 0X28
-#define ISPIF_RDI_1_STATUS_ADDR 0X64
-#define ISPIF_IRQ_MASK_ADDR 0X0100
-#define ISPIF_IRQ_CLEAR_ADDR 0X0104
-#define ISPIF_IRQ_STATUS_ADDR 0X0108
-#define ISPIF_IRQ_MASK_1_ADDR 0X010C
-#define ISPIF_IRQ_CLEAR_1_ADDR 0X0110
-#define ISPIF_IRQ_STATUS_1_ADDR 0X0114
-#define ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR 0x0124
-
-/*ISPIF RESET BITS*/
-
-#define VFE_CLK_DOMAIN_RST 31
-#define RDI_CLK_DOMAIN_RST 30
-#define PIX_CLK_DOMAIN_RST 29
-#define AHB_CLK_DOMAIN_RST 28
-#define RDI_1_CLK_DOMAIN_RST 27
-#define RDI_1_VFE_RST_STB 13
-#define RDI_1_CSID_RST_STB 12
-#define RDI_VFE_RST_STB 7
-#define RDI_CSID_RST_STB 6
-#define PIX_VFE_RST_STB 4
-#define PIX_CSID_RST_STB 3
-#define SW_REG_RST_STB 2
-#define MISC_LOGIC_RST_STB 1
-#define STROBED_RST_EN 0
-
-#define PIX_INTF_0_OVERFLOW_IRQ 12
-#define RAW_INTF_0_OVERFLOW_IRQ 25
-#define RAW_INTF_1_OVERFLOW_IRQ 25
-#define RESET_DONE_IRQ 27
-
-#define ISPIF_IRQ_STATUS_MASK 0xA493000
-#define ISPIF_IRQ_1_STATUS_MASK 0xA493000
-#define ISPIF_IRQ_STATUS_RDI_SOF_MASK 0x492000
-#define ISPIF_IRQ_GLOBAL_CLEAR_CMD 0x1
+#define V4L2_IDENT_ISPIF 50001
+#define CSID_VERSION_V2 0x02000011
+#define CSID_VERSION_V3 0x30000000
#define MAX_CID 15
+static atomic_t ispif_irq_cnt;
+static spinlock_t ispif_tasklet_lock;
+static struct list_head ispif_tasklet_q;
-static struct ispif_device *ispif;
-atomic_t ispif_irq_cnt;
-spinlock_t ispif_tasklet_lock;
-struct list_head ispif_tasklet_q;
-
-static uint32_t global_intf_cmd_mask = 0xFFFFFFFF;
-
-
-static int msm_ispif_intf_reset(uint8_t intfmask)
+static int msm_ispif_intf_reset(struct ispif_device *ispif,
+ uint16_t intfmask, uint8_t vfe_intf)
{
int rc = 0;
- uint32_t data = 0x1;
- uint8_t intfnum = 0, mask = intfmask;
+ uint32_t data = (0x1 << STROBED_RST_EN);
+ uint32_t data1 = (0x1 << STROBED_RST_EN);
+ uint16_t intfnum = 0, mask = intfmask;
+
while (mask != 0) {
if (!(intfmask & (0x1 << intfnum))) {
mask >>= 1;
@@ -94,21 +47,49 @@
}
switch (intfnum) {
case PIX0:
- data = (0x1 << STROBED_RST_EN) +
- (0x1 << PIX_VFE_RST_STB) +
- (0x1 << PIX_CSID_RST_STB);
+ if (vfe_intf == VFE0)
+ data |= (0x1 << PIX_0_VFE_RST_STB) |
+ (0x1 << PIX_0_CSID_RST_STB);
+ else
+ data1 |= (0x1 << PIX_0_VFE_RST_STB) |
+ (0x1 << PIX_0_CSID_RST_STB);
+ ispif->pix_sof_count = 0;
break;
case RDI0:
- data = (0x1 << STROBED_RST_EN) +
- (0x1 << RDI_VFE_RST_STB) +
- (0x1 << RDI_CSID_RST_STB);
+ if (vfe_intf == VFE0)
+ data |= (0x1 << RDI_0_VFE_RST_STB) |
+ (0x1 << RDI_0_CSID_RST_STB);
+ else
+ data1 |= (0x1 << RDI_0_VFE_RST_STB) |
+ (0x1 << RDI_0_CSID_RST_STB);
+ break;
+
+ case PIX1:
+ if (vfe_intf == VFE0)
+ data |= (0x1 << PIX_1_VFE_RST_STB) |
+ (0x1 << PIX_1_CSID_RST_STB);
+ else
+ data1 |= (0x1 << PIX_1_VFE_RST_STB) |
+ (0x1 << PIX_1_CSID_RST_STB);
break;
case RDI1:
- data = (0x1 << STROBED_RST_EN) +
- (0x1 << RDI_1_VFE_RST_STB) +
- (0x1 << RDI_1_CSID_RST_STB);
+ if (vfe_intf == VFE0)
+ data |= (0x1 << RDI_1_VFE_RST_STB) |
+ (0x1 << RDI_1_CSID_RST_STB);
+ else
+ data1 |= (0x1 << RDI_1_VFE_RST_STB) |
+ (0x1 << RDI_1_CSID_RST_STB);
+ break;
+
+ case RDI2:
+ if (vfe_intf == VFE0)
+ data |= (0x1 << RDI_2_VFE_RST_STB) |
+ (0x1 << RDI_2_CSID_RST_STB);
+ else
+ data1 |= (0x1 << RDI_2_VFE_RST_STB) |
+ (0x1 << RDI_2_CSID_RST_STB);
break;
default:
@@ -118,27 +99,44 @@
mask >>= 1;
intfnum++;
} /*end while */
- if (rc >= 0) {
- msm_camera_io_w(data, ispif->base + ISPIF_RST_CMD_ADDR);
- rc = wait_for_completion_interruptible(&ispif->reset_complete);
+ msm_camera_io_w(data, ispif->base + ISPIF_RST_CMD_ADDR);
+ rc = wait_for_completion_interruptible(&ispif->reset_complete);
+ if (ispif->csid_version >= CSID_VERSION_V3 && data1 > 0x1) {
+ msm_camera_io_w(data1,
+ ispif->base + ISPIF_RST_CMD_1_ADDR);
+ rc = wait_for_completion_interruptible(&ispif->
+ reset_complete);
}
return rc;
}
-static int msm_ispif_reset(void)
+static int msm_ispif_reset(struct ispif_device *ispif)
{
- uint32_t data = (0x1 << STROBED_RST_EN) +
- (0x1 << SW_REG_RST_STB) +
- (0x1 << MISC_LOGIC_RST_STB) +
- (0x1 << PIX_VFE_RST_STB) +
- (0x1 << PIX_CSID_RST_STB) +
- (0x1 << RDI_VFE_RST_STB) +
- (0x1 << RDI_CSID_RST_STB) +
- (0x1 << RDI_1_VFE_RST_STB) +
+ int rc = 0;
+ uint32_t data = (0x1 << STROBED_RST_EN) |
+ (0x1 << SW_REG_RST_STB) |
+ (0x1 << MISC_LOGIC_RST_STB) |
+ (0x1 << PIX_0_VFE_RST_STB) |
+ (0x1 << PIX_0_CSID_RST_STB) |
+ (0x1 << RDI_0_VFE_RST_STB) |
+ (0x1 << RDI_0_CSID_RST_STB) |
+ (0x1 << RDI_1_VFE_RST_STB) |
(0x1 << RDI_1_CSID_RST_STB);
+
+ if (ispif->csid_version >= CSID_VERSION_V2)
+ data |= (0x1 << PIX_1_VFE_RST_STB) |
+ (0x1 << PIX_1_CSID_RST_STB) |
+ (0x1 << RDI_2_VFE_RST_STB) |
+ (0x1 << RDI_2_CSID_RST_STB);
+ ispif->pix_sof_count = 0;
msm_camera_io_w(data, ispif->base + ISPIF_RST_CMD_ADDR);
- return wait_for_completion_interruptible(&ispif->reset_complete);
+ rc = wait_for_completion_interruptible(&ispif->reset_complete);
+ if (ispif->csid_version >= CSID_VERSION_V3) {
+ msm_camera_io_w(data, ispif->base + ISPIF_RST_CMD_1_ADDR);
+ rc = wait_for_completion_interruptible(&ispif->reset_complete);
+ }
+ return rc;
}
static int msm_ispif_subdev_g_chip_ident(struct v4l2_subdev *sd,
@@ -150,10 +148,12 @@
return 0;
}
-static void msm_ispif_sel_csid_core(uint8_t intftype, uint8_t csid)
+static void msm_ispif_sel_csid_core(struct ispif_device *ispif,
+ uint8_t intftype, uint8_t csid, uint8_t vfe_intf)
{
int rc = 0;
uint32_t data;
+
if (ispif->ispif_clk[intftype] == NULL) {
pr_err("%s: ispif NULL clk\n", __func__);
return;
@@ -163,62 +163,156 @@
if (rc < 0)
pr_err("%s: clk_set_rate failed %d\n", __func__, rc);
- data = msm_camera_io_r(ispif->base + ISPIF_INPUT_SEL_ADDR);
- data |= csid<<(intftype*4);
- msm_camera_io_w(data, ispif->base + ISPIF_INPUT_SEL_ADDR);
+ data = msm_camera_io_r(ispif->base + ISPIF_INPUT_SEL_ADDR +
+ (0x200 * vfe_intf));
+ switch (intftype) {
+ case PIX0:
+ data |= csid;
+ break;
+
+ case RDI0:
+ data |= (csid << 4);
+ break;
+
+ case PIX1:
+ data |= (csid << 8);
+ break;
+
+ case RDI1:
+ data |= (csid << 12);
+ break;
+
+ case RDI2:
+ data |= (csid << 20);
+ break;
+ }
+ msm_camera_io_w(data, ispif->base + ISPIF_INPUT_SEL_ADDR +
+ (0x200 * vfe_intf));
}
-static void msm_ispif_enable_intf_cids(uint8_t intftype, uint16_t cid_mask)
+static void msm_ispif_enable_intf_cids(struct ispif_device *ispif,
+ uint8_t intftype, uint16_t cid_mask, uint8_t vfe_intf)
{
uint32_t data;
mutex_lock(&ispif->mutex);
switch (intftype) {
case PIX0:
data = msm_camera_io_r(ispif->base +
- ISPIF_PIX_INTF_CID_MASK_ADDR);
+ ISPIF_PIX_0_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
data |= cid_mask;
msm_camera_io_w(data, ispif->base +
- ISPIF_PIX_INTF_CID_MASK_ADDR);
+ ISPIF_PIX_0_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
break;
case RDI0:
data = msm_camera_io_r(ispif->base +
- ISPIF_RDI_INTF_CID_MASK_ADDR);
+ ISPIF_RDI_0_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
data |= cid_mask;
msm_camera_io_w(data, ispif->base +
- ISPIF_RDI_INTF_CID_MASK_ADDR);
+ ISPIF_RDI_0_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
+ break;
+
+ case PIX1:
+ data = msm_camera_io_r(ispif->base +
+ ISPIF_PIX_1_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
+ data |= cid_mask;
+ msm_camera_io_w(data, ispif->base +
+ ISPIF_PIX_1_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
break;
case RDI1:
data = msm_camera_io_r(ispif->base +
- ISPIF_RDI_1_INTF_CID_MASK_ADDR);
+ ISPIF_RDI_1_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
data |= cid_mask;
msm_camera_io_w(data, ispif->base +
- ISPIF_RDI_1_INTF_CID_MASK_ADDR);
+ ISPIF_RDI_1_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
+ break;
+
+ case RDI2:
+ data = msm_camera_io_r(ispif->base +
+ ISPIF_RDI_2_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
+ data |= cid_mask;
+ msm_camera_io_w(data, ispif->base +
+ ISPIF_RDI_2_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
break;
}
mutex_unlock(&ispif->mutex);
}
-static int msm_ispif_config(struct msm_ispif_params_list *params_list)
+static int32_t msm_ispif_validate_intf_status(struct ispif_device *ispif,
+ uint8_t intftype, uint8_t vfe_intf)
+{
+ int32_t rc = 0;
+ uint32_t data;
+ mutex_lock(&ispif->mutex);
+ switch (intftype) {
+ case PIX0:
+ data = msm_camera_io_r(ispif->base +
+ ISPIF_PIX_0_STATUS_ADDR + (0x200 * vfe_intf));
+ break;
+
+ case RDI0:
+ data = msm_camera_io_r(ispif->base +
+ ISPIF_RDI_0_STATUS_ADDR + (0x200 * vfe_intf));
+ break;
+
+ case PIX1:
+ data = msm_camera_io_r(ispif->base +
+ ISPIF_PIX_1_STATUS_ADDR + (0x200 * vfe_intf));
+ break;
+
+ case RDI1:
+ data = msm_camera_io_r(ispif->base +
+ ISPIF_RDI_1_STATUS_ADDR + (0x200 * vfe_intf));
+ break;
+
+ case RDI2:
+ data = msm_camera_io_r(ispif->base +
+ ISPIF_RDI_2_STATUS_ADDR + (0x200 * vfe_intf));
+ break;
+ }
+ if ((data & 0xf) != 0xf)
+ rc = -EBUSY;
+ mutex_unlock(&ispif->mutex);
+ return rc;
+}
+
+static int msm_ispif_config(struct ispif_device *ispif,
+ struct msm_ispif_params_list *params_list)
{
uint32_t params_len;
struct msm_ispif_params *ispif_params;
- uint32_t data, data1;
int rc = 0, i = 0;
+ uint8_t intftype;
+ uint8_t vfe_intf;
params_len = params_list->len;
ispif_params = params_list->params;
CDBG("Enable interface\n");
- data = msm_camera_io_r(ispif->base + ISPIF_PIX_STATUS_ADDR);
- data1 = msm_camera_io_r(ispif->base + ISPIF_RDI_STATUS_ADDR);
- if (((data & 0xf) != 0xf) || ((data1 & 0xf) != 0xf))
- return -EBUSY;
msm_camera_io_w(0x00000000, ispif->base + ISPIF_IRQ_MASK_ADDR);
for (i = 0; i < params_len; i++) {
- msm_ispif_sel_csid_core(ispif_params[i].intftype,
- ispif_params[i].csid);
- msm_ispif_enable_intf_cids(ispif_params[i].intftype,
- ispif_params[i].cid_mask);
+ intftype = ispif_params[i].intftype;
+ vfe_intf = ispif_params[i].vfe_intf;
+ CDBG("%s intftype %x, vfe_intf %d\n", __func__, intftype,
+ vfe_intf);
+ if ((intftype >= INTF_MAX) ||
+ (ispif->csid_version <= CSID_VERSION_V2 &&
+ vfe_intf > VFE0) ||
+ (ispif->csid_version == CSID_VERSION_V3 &&
+ vfe_intf >= VFE_MAX)) {
+ pr_err("%s: intftype / vfe intf not valid\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ rc = msm_ispif_validate_intf_status(ispif, intftype, vfe_intf);
+ if (rc < 0) {
+ pr_err("%s:%d failed rc %d\n", __func__, __LINE__, rc);
+ return rc;
+ }
+ msm_ispif_sel_csid_core(ispif, intftype, ispif_params[i].csid,
+ vfe_intf);
+ msm_ispif_enable_intf_cids(ispif, intftype,
+ ispif_params[i].cid_mask, vfe_intf);
}
msm_camera_io_w(ISPIF_IRQ_STATUS_MASK, ispif->base +
@@ -230,23 +324,34 @@
return rc;
}
-static uint32_t msm_ispif_get_cid_mask(uint8_t intftype)
+static uint32_t msm_ispif_get_cid_mask(struct ispif_device *ispif,
+ uint16_t intftype, uint8_t vfe_intf)
{
uint32_t mask = 0;
switch (intftype) {
case PIX0:
mask = msm_camera_io_r(ispif->base +
- ISPIF_PIX_INTF_CID_MASK_ADDR);
+ ISPIF_PIX_0_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
break;
case RDI0:
mask = msm_camera_io_r(ispif->base +
- ISPIF_RDI_INTF_CID_MASK_ADDR);
+ ISPIF_RDI_0_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
+ break;
+
+ case PIX1:
+ mask = msm_camera_io_r(ispif->base +
+ ISPIF_PIX_1_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
break;
case RDI1:
mask = msm_camera_io_r(ispif->base +
- ISPIF_RDI_1_INTF_CID_MASK_ADDR);
+ ISPIF_RDI_1_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
+ break;
+
+ case RDI2:
+ mask = msm_camera_io_r(ispif->base +
+ ISPIF_RDI_2_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
break;
default:
@@ -255,12 +360,14 @@
return mask;
}
-static void
-msm_ispif_intf_cmd(uint8_t intfmask, uint8_t intf_cmd_mask)
+static void msm_ispif_intf_cmd(struct ispif_device *ispif, uint16_t intfmask,
+ uint8_t intf_cmd_mask, uint8_t vfe_intf)
{
- uint8_t vc = 0, val = 0;
- uint8_t mask = intfmask, intfnum = 0;
+ uint8_t vc = 0;
+ uint16_t mask = intfmask, intfnum = 0;
uint32_t cid_mask = 0;
+ uint32_t global_intf_cmd_mask = 0xFFFFFFFF;
+ uint32_t global_intf_cmd_mask1 = 0xFFFFFFFF;
while (mask != 0) {
if (!(intfmask & (0x1 << intfnum))) {
mask >>= 1;
@@ -268,17 +375,20 @@
continue;
}
- cid_mask = msm_ispif_get_cid_mask(intfnum);
+ cid_mask = msm_ispif_get_cid_mask(ispif, intfnum, vfe_intf);
vc = 0;
while (cid_mask != 0) {
if ((cid_mask & 0xf) != 0x0) {
- val = (intf_cmd_mask>>(vc*2)) & 0x3;
- global_intf_cmd_mask |=
- (0x3 << ((vc * 2) + (intfnum * 8)));
- global_intf_cmd_mask &= ~((0x3 & ~val)
- << ((vc * 2) +
- (intfnum * 8)));
+ if (intfnum != RDI2)
+ global_intf_cmd_mask &=
+ ~((0x3 & ~intf_cmd_mask)
+ << ((vc * 2) +
+ (intfnum * 8)));
+ else
+ global_intf_cmd_mask1 &=
+ ~((0x3 & ~intf_cmd_mask)
+ << ((vc * 2) + 8));
}
vc++;
cid_mask >>= 4;
@@ -287,50 +397,57 @@
intfnum++;
}
msm_camera_io_w(global_intf_cmd_mask,
- ispif->base + ISPIF_INTF_CMD_ADDR);
+ ispif->base + ISPIF_INTF_CMD_ADDR + (0x200 * vfe_intf));
+ if (global_intf_cmd_mask1 != 0xFFFFFFFF)
+ msm_camera_io_w(global_intf_cmd_mask1,
+ ispif->base + ISPIF_INTF_CMD_1_ADDR +
+ (0x200 * vfe_intf));
}
-static int msm_ispif_abort_intf_transfer(uint8_t intfmask)
+static int msm_ispif_abort_intf_transfer(struct ispif_device *ispif,
+ uint16_t intfmask, uint8_t vfe_intf)
{
int rc = 0;
- uint8_t intf_cmd_mask = 0xAA;
- uint8_t intfnum = 0, mask = intfmask;
+ uint8_t intf_cmd_mask = 0x02;
mutex_lock(&ispif->mutex);
- msm_ispif_intf_cmd(intfmask, intf_cmd_mask);
- while (mask != 0) {
- if (intfmask & (0x1 << intfnum))
- global_intf_cmd_mask |= (0xFF << (intfnum * 8));
- mask >>= 1;
- intfnum++;
- }
+ CDBG("%s intfmask %x intf_cmd_mask %x\n", __func__, intfmask,
+ intf_cmd_mask);
+ msm_ispif_intf_cmd(ispif, intfmask, intf_cmd_mask, vfe_intf);
mutex_unlock(&ispif->mutex);
return rc;
}
-static int msm_ispif_start_intf_transfer(uint8_t intfmask)
+static int msm_ispif_start_intf_transfer(struct ispif_device *ispif,
+ uint16_t intfmask, uint8_t vfe_intf)
{
- uint8_t intf_cmd_mask = 0x55;
+ uint8_t intf_cmd_mask = 0x01;
int rc = 0;
mutex_lock(&ispif->mutex);
- rc = msm_ispif_intf_reset(intfmask);
- msm_ispif_intf_cmd(intfmask, intf_cmd_mask);
+ rc = msm_ispif_intf_reset(ispif, intfmask, vfe_intf);
+ CDBG("%s intfmask %x intf_cmd_mask %x\n", __func__, intfmask,
+ intf_cmd_mask);
+ msm_ispif_intf_cmd(ispif, intfmask, intf_cmd_mask, vfe_intf);
mutex_unlock(&ispif->mutex);
return rc;
}
-static int msm_ispif_stop_intf_transfer(uint8_t intfmask)
+static int msm_ispif_stop_intf_transfer(struct ispif_device *ispif,
+ uint16_t intfmask, uint8_t vfe_intf)
{
int rc = 0;
uint8_t intf_cmd_mask = 0x00;
- uint8_t intfnum = 0, mask = intfmask;
+ uint16_t intfnum = 0, mask = intfmask;
mutex_lock(&ispif->mutex);
- msm_ispif_intf_cmd(intfmask, intf_cmd_mask);
+ CDBG("%s intfmask %x intf_cmd_mask %x\n", __func__, intfmask,
+ intf_cmd_mask);
+ msm_ispif_intf_cmd(ispif, intfmask, intf_cmd_mask, vfe_intf);
while (mask != 0) {
if (intfmask & (0x1 << intfnum)) {
switch (intfnum) {
case PIX0:
while ((msm_camera_io_r(ispif->base +
- ISPIF_PIX_STATUS_ADDR)
+ ISPIF_PIX_0_STATUS_ADDR +
+ (0x200 * vfe_intf))
& 0xf) != 0xf) {
CDBG("Wait for pix0 Idle\n");
}
@@ -338,24 +455,43 @@
case RDI0:
while ((msm_camera_io_r(ispif->base +
- ISPIF_RDI_STATUS_ADDR)
+ ISPIF_RDI_0_STATUS_ADDR +
+ (0x200 * vfe_intf))
& 0xf) != 0xf) {
CDBG("Wait for rdi0 Idle\n");
}
break;
+ case PIX1:
+ while ((msm_camera_io_r(ispif->base +
+ ISPIF_PIX_1_STATUS_ADDR +
+ (0x200 * vfe_intf))
+ & 0xf) != 0xf) {
+ CDBG("Wait for pix1 Idle\n");
+ }
+ break;
+
case RDI1:
while ((msm_camera_io_r(ispif->base +
- ISPIF_RDI_1_STATUS_ADDR)
+ ISPIF_RDI_1_STATUS_ADDR +
+ (0x200 * vfe_intf))
& 0xf) != 0xf) {
CDBG("Wait for rdi1 Idle\n");
}
break;
+ case RDI2:
+ while ((msm_camera_io_r(ispif->base +
+ ISPIF_RDI_2_STATUS_ADDR +
+ (0x200 * vfe_intf))
+ & 0xf) != 0xf) {
+ CDBG("Wait for rdi2 Idle\n");
+ }
+ break;
+
default:
break;
}
- global_intf_cmd_mask |= (0xFF << (intfnum * 8));
}
mask >>= 1;
intfnum++;
@@ -364,24 +500,33 @@
return rc;
}
-static int msm_ispif_subdev_video_s_stream(struct v4l2_subdev *sd, int enable)
+static int msm_ispif_subdev_video_s_stream(struct v4l2_subdev *sd,
+ int enable)
{
struct ispif_device *ispif =
(struct ispif_device *)v4l2_get_subdevdata(sd);
- int32_t cmd = enable & ((1<<ISPIF_S_STREAM_SHIFT)-1);
- enum msm_ispif_intftype intf = enable >> ISPIF_S_STREAM_SHIFT;
+ uint32_t cmd = enable & ((1<<ISPIF_S_STREAM_SHIFT)-1);
+ uint16_t intf = enable >> ISPIF_S_STREAM_SHIFT;
+ uint8_t vfe_intf = enable >> ISPIF_VFE_INTF_SHIFT;
int rc = -EINVAL;
-
+ CDBG("%s enable %x, cmd %x, intf %x\n", __func__, enable, cmd, intf);
BUG_ON(!ispif);
+ if ((ispif->csid_version <= CSID_VERSION_V2 && vfe_intf > VFE0) ||
+ (ispif->csid_version == CSID_VERSION_V3 &&
+ vfe_intf >= VFE_MAX)) {
+ pr_err("%s invalid csid version %x && vfe intf %d\n", __func__,
+ ispif->csid_version, vfe_intf);
+ return rc;
+ }
switch (cmd) {
case ISPIF_ON_FRAME_BOUNDARY:
- rc = msm_ispif_start_intf_transfer(intf);
+ rc = msm_ispif_start_intf_transfer(ispif, intf, vfe_intf);
break;
case ISPIF_OFF_FRAME_BOUNDARY:
- rc = msm_ispif_stop_intf_transfer(intf);
+ rc = msm_ispif_stop_intf_transfer(ispif, intf, vfe_intf);
break;
case ISPIF_OFF_IMMEDIATELY:
- rc = msm_ispif_abort_intf_transfer(intf);
+ rc = msm_ispif_abort_intf_transfer(ispif, intf, vfe_intf);
break;
default:
break;
@@ -425,7 +570,8 @@
DECLARE_TASKLET(ispif_tasklet, ispif_do_tasklet, 0);
-static void ispif_process_irq(struct ispif_irq_status *out)
+static void ispif_process_irq(struct ispif_device *ispif,
+ struct ispif_irq_status *out)
{
unsigned long flags;
struct ispif_isr_queue_cmd *qcmd;
@@ -440,6 +586,13 @@
qcmd->ispifInterruptStatus0 = out->ispifIrqStatus0;
qcmd->ispifInterruptStatus1 = out->ispifIrqStatus1;
+ if (qcmd->ispifInterruptStatus0 & ISPIF_IRQ_STATUS_PIX_SOF_MASK) {
+ CDBG("%s: ispif PIX sof irq\n", __func__);
+ ispif->pix_sof_count++;
+ v4l2_subdev_notify(&ispif->subdev, NOTIFY_VFE_SOF_COUNT,
+ (void *)&ispif->pix_sof_count);
+ }
+
spin_lock_irqsave(&ispif_tasklet_lock, flags);
list_add_tail(&qcmd->list, &ispif_tasklet_q);
@@ -449,8 +602,10 @@
return;
}
-static inline void msm_ispif_read_irq_status(struct ispif_irq_status *out)
+static inline void msm_ispif_read_irq_status(struct ispif_irq_status *out,
+ void *data)
{
+ struct ispif_device *ispif = (struct ispif_device *)data;
out->ispifIrqStatus0 = msm_camera_io_r(ispif->base +
ISPIF_IRQ_STATUS_ADDR);
out->ispifIrqStatus1 = msm_camera_io_r(ispif->base +
@@ -460,7 +615,7 @@
msm_camera_io_w(out->ispifIrqStatus1,
ispif->base + ISPIF_IRQ_CLEAR_1_ADDR);
- CDBG("ispif->irq: Irq_status0 = 0x%x\n",
+ CDBG("%s: irq ispif->irq: Irq_status0 = 0x%x\n", __func__,
out->ispifIrqStatus0);
if (out->ispifIrqStatus0 & ISPIF_IRQ_STATUS_MASK) {
if (out->ispifIrqStatus0 & (0x1 << RESET_DONE_IRQ))
@@ -469,10 +624,10 @@
pr_err("%s: pix intf 0 overflow.\n", __func__);
if (out->ispifIrqStatus0 & (0x1 << RAW_INTF_0_OVERFLOW_IRQ))
pr_err("%s: rdi intf 0 overflow.\n", __func__);
- if ((out->ispifIrqStatus0 & ISPIF_IRQ_STATUS_RDI_SOF_MASK) ||
+ if ((out->ispifIrqStatus0 & ISPIF_IRQ_STATUS_SOF_MASK) ||
(out->ispifIrqStatus1 &
- ISPIF_IRQ_STATUS_RDI_SOF_MASK)) {
- ispif_process_irq(out);
+ ISPIF_IRQ_STATUS_SOF_MASK)) {
+ ispif_process_irq(ispif, out);
}
}
msm_camera_io_w(ISPIF_IRQ_GLOBAL_CLEAR_CMD, ispif->base +
@@ -482,7 +637,7 @@
static irqreturn_t msm_io_ispif_irq(int irq_num, void *data)
{
struct ispif_irq_status irq;
- msm_ispif_read_irq_status(&irq);
+ msm_ispif_read_irq_status(&irq, data);
return IRQ_HANDLED;
}
@@ -494,19 +649,19 @@
{"csi_rdi2_clk", 0},
};
-static int msm_ispif_init(const uint32_t *csid_version)
+static int msm_ispif_init(struct ispif_device *ispif,
+ const uint32_t *csid_version)
{
int rc = 0;
+ CDBG("%s called %d\n", __func__, __LINE__);
spin_lock_init(&ispif_tasklet_lock);
INIT_LIST_HEAD(&ispif_tasklet_q);
rc = request_irq(ispif->irq->start, msm_io_ispif_irq,
- IRQF_TRIGGER_RISING, "ispif", 0);
-
- global_intf_cmd_mask = 0xFFFFFFFF;
+ IRQF_TRIGGER_RISING, "ispif", ispif);
init_completion(&ispif->reset_complete);
ispif->csid_version = *csid_version;
- if (ispif->csid_version == CSID_VERSION_V2) {
+ if (ispif->csid_version >= CSID_VERSION_V2) {
rc = msm_cam_clk_enable(&ispif->pdev->dev, ispif_clk_info,
ispif->ispif_clk, ARRAY_SIZE(ispif_clk_info), 1);
if (rc < 0)
@@ -517,16 +672,12 @@
if (rc < 0)
return rc;
}
-
- rc = msm_ispif_reset();
+ rc = msm_ispif_reset(ispif);
return rc;
}
-static void msm_ispif_release(struct v4l2_subdev *sd)
+static void msm_ispif_release(struct ispif_device *ispif)
{
- struct ispif_device *ispif =
- (struct ispif_device *)v4l2_get_subdevdata(sd);
-
CDBG("%s, free_irq\n", __func__);
free_irq(ispif->irq->start, 0);
tasklet_kill(&ispif_tasklet);
@@ -539,43 +690,12 @@
ispif->ispif_clk, 2, 0);
}
-void msm_ispif_vfe_get_cid(uint8_t intftype, char *cids, int *num)
-{
- uint32_t data = 0;
- int i = 0, j = 0;
- switch (intftype) {
- case PIX0:
- data = msm_camera_io_r(ispif->base +
- ISPIF_PIX_INTF_CID_MASK_ADDR);
- break;
-
- case RDI0:
- data = msm_camera_io_r(ispif->base +
- ISPIF_RDI_INTF_CID_MASK_ADDR);
- break;
-
- case RDI1:
- data = msm_camera_io_r(ispif->base +
- ISPIF_RDI_1_INTF_CID_MASK_ADDR);
- break;
-
- default:
- break;
- }
- for (i = 0; i <= MAX_CID; i++) {
- if ((data & 0x1) == 0x1) {
- cids[j++] = i;
- (*num)++;
- }
- data >>= 1;
- }
-}
-
static long msm_ispif_cmd(struct v4l2_subdev *sd, void *arg)
{
long rc = 0;
struct ispif_cfg_data cdata;
-
+ struct ispif_device *ispif =
+ (struct ispif_device *)v4l2_get_subdevdata(sd);
if (copy_from_user(&cdata, (void *)arg, sizeof(struct ispif_cfg_data)))
return -EFAULT;
CDBG("%s cfgtype = %d\n", __func__, cdata.cfgtype);
@@ -583,7 +703,7 @@
case ISPIF_INIT:
CDBG("%s csid_version = %x\n", __func__,
cdata.cfg.csid_version);
- rc = msm_ispif_init(&cdata.cfg.csid_version);
+ rc = msm_ispif_init(ispif, &cdata.cfg.csid_version);
break;
case ISPIF_SET_CFG:
CDBG("%s len = %d, intftype = %d,.cid_mask = %d, csid = %d\n",
@@ -592,7 +712,7 @@
cdata.cfg.ispif_params.params[0].intftype,
cdata.cfg.ispif_params.params[0].cid_mask,
cdata.cfg.ispif_params.params[0].csid);
- rc = msm_ispif_config(&cdata.cfg.ispif_params);
+ rc = msm_ispif_config(ispif, &cdata.cfg.ispif_params);
break;
case ISPIF_SET_ON_FRAME_BOUNDARY:
@@ -601,7 +721,7 @@
rc = msm_ispif_subdev_video_s_stream(sd, cdata.cfg.cmd);
break;
case ISPIF_RELEASE:
- msm_ispif_release(sd);
+ msm_ispif_release(ispif);
break;
default:
break;
@@ -641,6 +761,7 @@
{
int rc = 0;
struct msm_cam_subdev_info sd_info;
+ struct ispif_device *ispif;
CDBG("%s\n", __func__);
ispif = kzalloc(sizeof(struct ispif_device), GFP_KERNEL);
@@ -660,6 +781,10 @@
"ispif");
mutex_init(&ispif->mutex);
+ if (pdev->dev.of_node)
+ of_property_read_u32((&pdev->dev)->of_node,
+ "cell-index", &pdev->id);
+
ispif->mem = platform_get_resource_byname(pdev,
IORESOURCE_MEM, "ispif");
if (!ispif->mem) {
@@ -693,6 +818,12 @@
sd_info.sd_index = pdev->id;
sd_info.irq_num = ispif->irq->start;
msm_cam_register_subdev_node(&ispif->subdev, &sd_info);
+
+ media_entity_init(&ispif->subdev.entity, 0, NULL, 0);
+ ispif->subdev.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ ispif->subdev.entity.group_id = ISPIF_DEV;
+ ispif->subdev.entity.name = pdev->name;
+ ispif->subdev.entity.revision = ispif->subdev.devnode->num;
return 0;
ispif_no_mem:
@@ -704,11 +835,18 @@
return rc;
}
+static const struct of_device_id msm_ispif_dt_match[] = {
+ {.compatible = "qcom,ispif"},
+};
+
+MODULE_DEVICE_TABLE(of, msm_ispif_dt_match);
+
static struct platform_driver ispif_driver = {
.probe = ispif_probe,
.driver = {
.name = MSM_ISPIF_DRV_NAME,
.owner = THIS_MODULE,
+ .of_match_table = msm_ispif_dt_match,
},
};
diff --git a/drivers/media/video/msm/csi/msm_ispif.h b/drivers/media/video/msm/csi/msm_ispif.h
index 7b301ba..f4ad661 100644
--- a/drivers/media/video/msm/csi/msm_ispif.h
+++ b/drivers/media/video/msm/csi/msm_ispif.h
@@ -34,6 +34,7 @@
struct completion reset_complete;
uint32_t csid_version;
struct clk *ispif_clk[5];
+ uint32_t pix_sof_count;
};
struct ispif_isr_queue_cmd {
@@ -45,6 +46,4 @@
#define VIDIOC_MSM_ISPIF_CFG \
_IOWR('V', BASE_VIDIOC_PRIVATE + 18, struct ispif_cfg_data*)
-void msm_ispif_vfe_get_cid(uint8_t intftype, char *cids, int *num);
-
#endif
diff --git a/drivers/media/video/msm/gemini/msm_gemini_sync.c b/drivers/media/video/msm/gemini/msm_gemini_sync.c
index ae3de13..97bb611 100644
--- a/drivers/media/video/msm/gemini/msm_gemini_sync.c
+++ b/drivers/media/video/msm/gemini/msm_gemini_sync.c
@@ -218,6 +218,7 @@
return -EAGAIN;
}
+ memset(&ctrl_cmd, 0, sizeof(struct msm_gemini_ctrl_cmd));
ctrl_cmd.type = buf_p->vbuf.type;
kfree(buf_p);
@@ -485,10 +486,12 @@
} else {
buf_p->y_buffer_addr = msm_gemini_platform_v2p(buf_cmd.fd,
buf_cmd.y_len + buf_cmd.cbcr_len, &buf_p->file,
- &buf_p->handle) + buf_cmd.offset;
+ &buf_p->handle) + buf_cmd.offset + buf_cmd.y_off;
}
buf_p->y_len = buf_cmd.y_len;
- buf_p->cbcr_buffer_addr = buf_p->y_buffer_addr + buf_cmd.y_len;
+
+ buf_p->cbcr_buffer_addr = buf_p->y_buffer_addr + buf_cmd.y_len +
+ buf_cmd.cbcr_off;
buf_p->cbcr_len = buf_cmd.cbcr_len;
buf_p->num_of_mcu_rows = buf_cmd.num_of_mcu_rows;
GMN_DBG("%s: y_addr=%x,y_len=%x,cbcr_addr=%x,cbcr_len=%x\n", __func__,
diff --git a/drivers/media/video/msm/io/Makefile b/drivers/media/video/msm/io/Makefile
index 611eecd..fdff226 100644
--- a/drivers/media/video/msm/io/Makefile
+++ b/drivers/media/video/msm/io/Makefile
@@ -1,6 +1,6 @@
GCC_VERSION := $(shell $(CONFIG_SHELL) $(PWD)/scripts/gcc-version.sh $(CROSS_COMPILE)gcc)
-EXTRA_CFLAGS += -Idrivers/media/video/msm
+ccflags-y += -Idrivers/media/video/msm -Idrivers/media/video/msm/cci
obj-$(CONFIG_MSM_CAMERA) += msm_camera_io_util.o msm_camera_i2c.o
ifeq ($(CONFIG_MSM_CAMERA_V4L2),y)
obj-$(CONFIG_MSM_CAMERA) += msm_camera_i2c_mux.o
diff --git a/drivers/media/video/msm/io/msm_camera_i2c.c b/drivers/media/video/msm/io/msm_camera_i2c.c
index e946569..82bca02 100644
--- a/drivers/media/video/msm/io/msm_camera_i2c.c
+++ b/drivers/media/video/msm/io/msm_camera_i2c.c
@@ -10,7 +10,10 @@
* GNU General Public License for more details.
*/
+#include <mach/camera.h>
#include "msm_camera_i2c.h"
+#include "msm.h"
+#include "msm_cci.h"
int32_t msm_camera_i2c_rxdata(struct msm_camera_i2c_client *dev_client,
unsigned char *rxdata, int data_length)
@@ -63,6 +66,7 @@
int32_t rc = -EFAULT;
unsigned char buf[client->addr_type+data_type];
uint8_t len = 0;
+
if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
&& client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
|| (data_type != MSM_CAMERA_I2C_BYTE_DATA
@@ -71,33 +75,51 @@
S_I2C_DBG("%s reg addr = 0x%x data type: %d\n",
__func__, addr, data_type);
- if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
- buf[0] = addr;
- S_I2C_DBG("%s byte %d: 0x%x\n", __func__, len, buf[len]);
- len = 1;
- } else if (client->addr_type == MSM_CAMERA_I2C_WORD_ADDR) {
- buf[0] = addr >> BITS_PER_BYTE;
- buf[1] = addr;
- S_I2C_DBG("%s byte %d: 0x%x\n", __func__, len, buf[len]);
- S_I2C_DBG("%s byte %d: 0x%x\n", __func__, len+1, buf[len+1]);
- len = 2;
+ if (client->cci_client) {
+ struct msm_camera_cci_ctrl cci_ctrl;
+ struct msm_camera_i2c_reg_conf reg_conf_tbl;
+ reg_conf_tbl.reg_addr = addr;
+ reg_conf_tbl.reg_data = data;
+ cci_ctrl.cmd = MSM_CCI_I2C_WRITE;
+ cci_ctrl.cci_info = client->cci_client;
+ cci_ctrl.cfg.cci_i2c_write_cfg.reg_conf_tbl = ®_conf_tbl;
+ cci_ctrl.cfg.cci_i2c_write_cfg.data_type = data_type;
+ cci_ctrl.cfg.cci_i2c_write_cfg.size = 1;
+ rc = v4l2_subdev_call(client->cci_client->cci_subdev,
+ core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+ CDBG("%s line %d rc = %d\n", __func__, __LINE__, rc);
+ rc = cci_ctrl.status;
+ } else {
+ if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
+ buf[0] = addr;
+ S_I2C_DBG("%s byte %d: 0x%x\n", __func__,
+ len, buf[len]);
+ len = 1;
+ } else if (client->addr_type == MSM_CAMERA_I2C_WORD_ADDR) {
+ buf[0] = addr >> BITS_PER_BYTE;
+ buf[1] = addr;
+ S_I2C_DBG("%s byte %d: 0x%x\n", __func__,
+ len, buf[len]);
+ S_I2C_DBG("%s byte %d: 0x%x\n", __func__,
+ len+1, buf[len+1]);
+ len = 2;
+ }
+ S_I2C_DBG("Data: 0x%x\n", data);
+ if (data_type == MSM_CAMERA_I2C_BYTE_DATA) {
+ buf[len] = data;
+ S_I2C_DBG("Byte %d: 0x%x\n", len, buf[len]);
+ len += 1;
+ } else if (data_type == MSM_CAMERA_I2C_WORD_DATA) {
+ buf[len] = data >> BITS_PER_BYTE;
+ buf[len+1] = data;
+ S_I2C_DBG("Byte %d: 0x%x\n", len, buf[len]);
+ S_I2C_DBG("Byte %d: 0x%x\n", len+1, buf[len+1]);
+ len += 2;
+ }
+ rc = msm_camera_i2c_txdata(client, buf, len);
+ if (rc < 0)
+ S_I2C_DBG("%s fail\n", __func__);
}
- S_I2C_DBG("Data: 0x%x\n", data);
- if (data_type == MSM_CAMERA_I2C_BYTE_DATA) {
- buf[len] = data;
- S_I2C_DBG("Byte %d: 0x%x\n", len, buf[len]);
- len += 1;
- } else if (data_type == MSM_CAMERA_I2C_WORD_DATA) {
- buf[len] = data >> BITS_PER_BYTE;
- buf[len+1] = data;
- S_I2C_DBG("Byte %d: 0x%x\n", len, buf[len]);
- S_I2C_DBG("Byte %d: 0x%x\n", len+1, buf[len+1]);
- len += 2;
- }
-
- rc = msm_camera_i2c_txdata(client, buf, len);
- if (rc < 0)
- S_I2C_DBG("%s fail\n", __func__);
return rc;
}
@@ -115,26 +137,44 @@
S_I2C_DBG("%s reg addr = 0x%x num bytes: %d\n",
__func__, addr, num_byte);
- if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
- buf[0] = addr;
- S_I2C_DBG("%s byte %d: 0x%x\n", __func__, len, buf[len]);
- len = 1;
- } else if (client->addr_type == MSM_CAMERA_I2C_WORD_ADDR) {
- buf[0] = addr >> BITS_PER_BYTE;
- buf[1] = addr;
- S_I2C_DBG("%s byte %d: 0x%x\n", __func__, len, buf[len]);
- S_I2C_DBG("%s byte %d: 0x%x\n", __func__, len+1, buf[len+1]);
- len = 2;
+ if (client->cci_client) {
+ struct msm_camera_cci_ctrl cci_ctrl;
+ struct msm_camera_i2c_reg_conf reg_conf_tbl[num_byte];
+ reg_conf_tbl[0].reg_addr = addr;
+ for (i = 0; i < num_byte; i++)
+ reg_conf_tbl[i].reg_data = data[i];
+ cci_ctrl.cmd = MSM_CCI_I2C_WRITE;
+ cci_ctrl.cci_info = client->cci_client;
+ cci_ctrl.cfg.cci_i2c_write_cfg.reg_conf_tbl = reg_conf_tbl;
+ cci_ctrl.cfg.cci_i2c_write_cfg.size = num_byte;
+ rc = v4l2_subdev_call(client->cci_client->cci_subdev,
+ core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+ CDBG("%s line %d rc = %d\n", __func__, __LINE__, rc);
+ rc = cci_ctrl.status;
+ } else {
+ if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
+ buf[0] = addr;
+ S_I2C_DBG("%s byte %d: 0x%x\n", __func__,
+ len, buf[len]);
+ len = 1;
+ } else if (client->addr_type == MSM_CAMERA_I2C_WORD_ADDR) {
+ buf[0] = addr >> BITS_PER_BYTE;
+ buf[1] = addr;
+ S_I2C_DBG("%s byte %d: 0x%x\n", __func__,
+ len, buf[len]);
+ S_I2C_DBG("%s byte %d: 0x%x\n", __func__,
+ len+1, buf[len+1]);
+ len = 2;
+ }
+ for (i = 0; i < num_byte; i++) {
+ buf[i+len] = data[i];
+ S_I2C_DBG("Byte %d: 0x%x\n", i+len, buf[i+len]);
+ S_I2C_DBG("Data: 0x%x\n", data[i]);
+ }
+ rc = msm_camera_i2c_txdata(client, buf, len+num_byte);
+ if (rc < 0)
+ S_I2C_DBG("%s fail\n", __func__);
}
- for (i = 0; i < num_byte; i++) {
- buf[i+len] = data[i];
- S_I2C_DBG("Byte %d: 0x%x\n", i+len, buf[i+len]);
- S_I2C_DBG("Data: 0x%x\n", data[i]);
- }
-
- rc = msm_camera_i2c_txdata(client, buf, len+num_byte);
- if (rc < 0)
- S_I2C_DBG("%s fail\n", __func__);
return rc;
}
@@ -302,65 +342,80 @@
{
int i;
int32_t rc = -EFAULT;
- for (i = 0; i < size; i++) {
- enum msm_camera_i2c_data_type dt;
- if (reg_conf_tbl->cmd_type == MSM_CAMERA_I2C_CMD_POLL) {
- rc = msm_camera_i2c_poll(client, reg_conf_tbl->reg_addr,
- reg_conf_tbl->reg_data, reg_conf_tbl->dt);
- } else {
- if (reg_conf_tbl->dt == 0)
- dt = data_type;
- else
- dt = reg_conf_tbl->dt;
-
- switch (dt) {
- case MSM_CAMERA_I2C_BYTE_DATA:
- case MSM_CAMERA_I2C_WORD_DATA:
- rc = msm_camera_i2c_write(
- client,
- reg_conf_tbl->reg_addr,
- reg_conf_tbl->reg_data, dt);
- break;
- case MSM_CAMERA_I2C_SET_BYTE_MASK:
- rc = msm_camera_i2c_set_mask(client,
+ if (client->cci_client) {
+ struct msm_camera_cci_ctrl cci_ctrl;
+ cci_ctrl.cmd = MSM_CCI_I2C_WRITE;
+ cci_ctrl.cci_info = client->cci_client;
+ cci_ctrl.cfg.cci_i2c_write_cfg.reg_conf_tbl = reg_conf_tbl;
+ cci_ctrl.cfg.cci_i2c_write_cfg.data_type = data_type;
+ cci_ctrl.cfg.cci_i2c_write_cfg.size = size;
+ rc = v4l2_subdev_call(client->cci_client->cci_subdev,
+ core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+ CDBG("%s line %d rc = %d\n", __func__, __LINE__, rc);
+ rc = cci_ctrl.status;
+ } else {
+ for (i = 0; i < size; i++) {
+ enum msm_camera_i2c_data_type dt;
+ if (reg_conf_tbl->cmd_type == MSM_CAMERA_I2C_CMD_POLL) {
+ rc = msm_camera_i2c_poll(client,
reg_conf_tbl->reg_addr,
reg_conf_tbl->reg_data,
- MSM_CAMERA_I2C_BYTE_DATA, 1);
- break;
- case MSM_CAMERA_I2C_UNSET_BYTE_MASK:
- rc = msm_camera_i2c_set_mask(client,
- reg_conf_tbl->reg_addr,
- reg_conf_tbl->reg_data,
- MSM_CAMERA_I2C_BYTE_DATA, 0);
- break;
- case MSM_CAMERA_I2C_SET_WORD_MASK:
- rc = msm_camera_i2c_set_mask(client,
- reg_conf_tbl->reg_addr,
- reg_conf_tbl->reg_data,
- MSM_CAMERA_I2C_WORD_DATA, 1);
- break;
- case MSM_CAMERA_I2C_UNSET_WORD_MASK:
- rc = msm_camera_i2c_set_mask(client,
- reg_conf_tbl->reg_addr,
- reg_conf_tbl->reg_data,
- MSM_CAMERA_I2C_WORD_DATA, 0);
- break;
- case MSM_CAMERA_I2C_SET_BYTE_WRITE_MASK_DATA:
- rc = msm_camera_i2c_set_write_mask_data(client,
- reg_conf_tbl->reg_addr,
- reg_conf_tbl->reg_data,
- reg_conf_tbl->mask,
- MSM_CAMERA_I2C_BYTE_DATA);
- break;
- default:
- pr_err("%s: Unsupport data type: %d\n",
- __func__, dt);
- break;
+ reg_conf_tbl->dt);
+ } else {
+ if (reg_conf_tbl->dt == 0)
+ dt = data_type;
+ else
+ dt = reg_conf_tbl->dt;
+ switch (dt) {
+ case MSM_CAMERA_I2C_BYTE_DATA:
+ case MSM_CAMERA_I2C_WORD_DATA:
+ rc = msm_camera_i2c_write(
+ client,
+ reg_conf_tbl->reg_addr,
+ reg_conf_tbl->reg_data, dt);
+ break;
+ case MSM_CAMERA_I2C_SET_BYTE_MASK:
+ rc = msm_camera_i2c_set_mask(client,
+ reg_conf_tbl->reg_addr,
+ reg_conf_tbl->reg_data,
+ MSM_CAMERA_I2C_BYTE_DATA, 1);
+ break;
+ case MSM_CAMERA_I2C_UNSET_BYTE_MASK:
+ rc = msm_camera_i2c_set_mask(client,
+ reg_conf_tbl->reg_addr,
+ reg_conf_tbl->reg_data,
+ MSM_CAMERA_I2C_BYTE_DATA, 0);
+ break;
+ case MSM_CAMERA_I2C_SET_WORD_MASK:
+ rc = msm_camera_i2c_set_mask(client,
+ reg_conf_tbl->reg_addr,
+ reg_conf_tbl->reg_data,
+ MSM_CAMERA_I2C_WORD_DATA, 1);
+ break;
+ case MSM_CAMERA_I2C_UNSET_WORD_MASK:
+ rc = msm_camera_i2c_set_mask(client,
+ reg_conf_tbl->reg_addr,
+ reg_conf_tbl->reg_data,
+ MSM_CAMERA_I2C_WORD_DATA, 0);
+ break;
+ case MSM_CAMERA_I2C_SET_BYTE_WRITE_MASK_DATA:
+ rc = msm_camera_i2c_set_write_mask_data(
+ client,
+ reg_conf_tbl->reg_addr,
+ reg_conf_tbl->reg_data,
+ reg_conf_tbl->mask,
+ MSM_CAMERA_I2C_BYTE_DATA);
+ break;
+ default:
+ pr_err("%s: Unsupport data type: %d\n",
+ __func__, dt);
+ break;
+ }
}
+ if (rc < 0)
+ break;
+ reg_conf_tbl++;
}
- if (rc < 0)
- break;
- reg_conf_tbl++;
}
return rc;
}
@@ -378,16 +433,30 @@
&& data_type != MSM_CAMERA_I2C_WORD_DATA))
return rc;
- if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
- buf[0] = addr;
- } else if (client->addr_type == MSM_CAMERA_I2C_WORD_ADDR) {
- buf[0] = addr >> BITS_PER_BYTE;
- buf[1] = addr;
- }
- rc = msm_camera_i2c_rxdata(client, buf, data_type);
- if (rc < 0) {
- S_I2C_DBG("%s fail\n", __func__);
- return rc;
+ if (client->cci_client) {
+ struct msm_camera_cci_ctrl cci_ctrl;
+ cci_ctrl.cmd = MSM_CCI_I2C_READ;
+ cci_ctrl.cci_info = client->cci_client;
+ cci_ctrl.cfg.cci_i2c_read_cfg.addr = addr;
+ cci_ctrl.cfg.cci_i2c_read_cfg.addr_type = client->addr_type;
+ cci_ctrl.cfg.cci_i2c_read_cfg.data = buf;
+ cci_ctrl.cfg.cci_i2c_read_cfg.num_byte = data_type;
+ rc = v4l2_subdev_call(client->cci_client->cci_subdev,
+ core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+ CDBG("%s line %d rc = %d\n", __func__, __LINE__, rc);
+ rc = cci_ctrl.status;
+ } else {
+ if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
+ buf[0] = addr;
+ } else if (client->addr_type == MSM_CAMERA_I2C_WORD_ADDR) {
+ buf[0] = addr >> BITS_PER_BYTE;
+ buf[1] = addr;
+ }
+ rc = msm_camera_i2c_rxdata(client, buf, data_type);
+ if (rc < 0) {
+ S_I2C_DBG("%s fail\n", __func__);
+ return rc;
+ }
}
if (data_type == MSM_CAMERA_I2C_BYTE_DATA)
*data = buf[0];
@@ -410,16 +479,30 @@
|| num_byte == 0)
return rc;
- if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
- buf[0] = addr;
- } else if (client->addr_type == MSM_CAMERA_I2C_WORD_ADDR) {
- buf[0] = addr >> BITS_PER_BYTE;
- buf[1] = addr;
- }
- rc = msm_camera_i2c_rxdata(client, buf, num_byte);
- if (rc < 0) {
- S_I2C_DBG("%s fail\n", __func__);
- return rc;
+ if (client->cci_client) {
+ struct msm_camera_cci_ctrl cci_ctrl;
+ cci_ctrl.cmd = MSM_CCI_I2C_READ;
+ cci_ctrl.cci_info = client->cci_client;
+ cci_ctrl.cfg.cci_i2c_read_cfg.addr = addr;
+ cci_ctrl.cfg.cci_i2c_read_cfg.addr_type = client->addr_type;
+ cci_ctrl.cfg.cci_i2c_read_cfg.data = buf;
+ cci_ctrl.cfg.cci_i2c_read_cfg.num_byte = num_byte;
+ rc = v4l2_subdev_call(client->cci_client->cci_subdev,
+ core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+ CDBG("%s line %d rc = %d\n", __func__, __LINE__, rc);
+ rc = cci_ctrl.status;
+ } else {
+ if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
+ buf[0] = addr;
+ } else if (client->addr_type == MSM_CAMERA_I2C_WORD_ADDR) {
+ buf[0] = addr >> BITS_PER_BYTE;
+ buf[1] = addr;
+ }
+ rc = msm_camera_i2c_rxdata(client, buf, num_byte);
+ if (rc < 0) {
+ S_I2C_DBG("%s fail\n", __func__);
+ return rc;
+ }
}
S_I2C_DBG("%s addr = 0x%x", __func__, addr);
@@ -486,3 +569,17 @@
return rc;
}
+int32_t msm_sensor_cci_util(struct msm_camera_i2c_client *client,
+ uint16_t cci_cmd)
+{
+ int32_t rc = 0;
+ struct msm_camera_cci_ctrl cci_ctrl;
+
+ CDBG("%s line %d\n", __func__, __LINE__);
+ cci_ctrl.cmd = cci_cmd;
+ cci_ctrl.cci_info = client->cci_client;
+ rc = v4l2_subdev_call(client->cci_client->cci_subdev,
+ core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+ CDBG("%s line %d rc = %d\n", __func__, __LINE__, rc);
+ return cci_ctrl.status;
+}
diff --git a/drivers/media/video/msm/io/msm_camera_i2c.h b/drivers/media/video/msm/io/msm_camera_i2c.h
index 188a176..169a0b3 100644
--- a/drivers/media/video/msm/io/msm_camera_i2c.h
+++ b/drivers/media/video/msm/io/msm_camera_i2c.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -16,6 +16,7 @@
#include <linux/i2c.h>
#include <linux/delay.h>
#include <mach/camera.h>
+#include <media/v4l2-subdev.h>
#define CONFIG_MSM_CAMERA_I2C_DBG 0
@@ -25,39 +26,12 @@
#define S_I2C_DBG(fmt, args...) CDBG(fmt, ##args)
#endif
-enum msm_camera_i2c_reg_addr_type {
- MSM_CAMERA_I2C_BYTE_ADDR = 1,
- MSM_CAMERA_I2C_WORD_ADDR,
-};
-
struct msm_camera_i2c_client {
struct i2c_client *client;
+ struct msm_camera_cci_client *cci_client;
enum msm_camera_i2c_reg_addr_type addr_type;
};
-enum msm_camera_i2c_data_type {
- MSM_CAMERA_I2C_BYTE_DATA = 1,
- MSM_CAMERA_I2C_WORD_DATA,
- MSM_CAMERA_I2C_SET_BYTE_MASK,
- MSM_CAMERA_I2C_UNSET_BYTE_MASK,
- MSM_CAMERA_I2C_SET_WORD_MASK,
- MSM_CAMERA_I2C_UNSET_WORD_MASK,
- MSM_CAMERA_I2C_SET_BYTE_WRITE_MASK_DATA,
-};
-
-enum msm_camera_i2c_cmd_type {
- MSM_CAMERA_I2C_CMD_WRITE,
- MSM_CAMERA_I2C_CMD_POLL,
-};
-
-struct msm_camera_i2c_reg_conf {
- uint16_t reg_addr;
- uint16_t reg_data;
- enum msm_camera_i2c_data_type dt;
- enum msm_camera_i2c_cmd_type cmd_type;
- int16_t mask;
-};
-
struct msm_camera_i2c_reg_tbl {
uint16_t reg_addr;
uint16_t reg_data;
@@ -130,4 +104,7 @@
int32_t msm_sensor_write_all_conf_array(struct msm_camera_i2c_client *client,
struct msm_camera_i2c_conf_array *array, uint16_t size);
+
+int32_t msm_sensor_cci_util(struct msm_camera_i2c_client *client,
+ uint16_t cci_cmd);
#endif
diff --git a/drivers/media/video/msm/io/msm_io_8960.c b/drivers/media/video/msm/io/msm_io_8960.c
index f9c454a..699425a 100644
--- a/drivers/media/video/msm/io/msm_io_8960.c
+++ b/drivers/media/video/msm/io/msm_io_8960.c
@@ -87,6 +87,14 @@
} else
CDBG("%s: Bus Client NOT Registered!!!\n", __func__);
break;
+ case S_LIVESHOT:
+ if (bus_perf_client) {
+ rc = msm_bus_scale_client_update_request(
+ bus_perf_client, 5);
+ CDBG("%s: S_LIVESHOT rc = %d\n", __func__, rc);
+ } else
+ CDBG("%s: Bus Client NOT Registered!!!\n", __func__);
+ break;
case S_DEFAULT:
break;
default:
diff --git a/drivers/media/video/msm/msm.c b/drivers/media/video/msm/msm.c
index b67af4f..b14d4f6 100644
--- a/drivers/media/video/msm/msm.c
+++ b/drivers/media/video/msm/msm.c
@@ -92,6 +92,33 @@
return rc;
}
+static int msm_camera_v4l2_private_g_ctrl(struct file *f, void *pctx,
+ struct msm_camera_v4l2_ioctl_t *ioctl_ptr)
+{
+ int rc = -EINVAL;
+ struct msm_cam_v4l2_device *pcam = video_drvdata(f);
+ struct msm_cam_v4l2_dev_inst *pcam_inst;
+ pcam_inst = container_of(f->private_data,
+ struct msm_cam_v4l2_dev_inst, eventHandle);
+
+ WARN_ON(pctx != f->private_data);
+
+ mutex_lock(&pcam->vid_lock);
+ switch (ioctl_ptr->id) {
+ case MSM_V4L2_PID_INST_HANDLE:
+ COPY_TO_USER(rc, (void __user *)ioctl_ptr->ioctl_ptr,
+ (void *)&pcam_inst->inst_handle, sizeof(uint32_t));
+ if (rc)
+ ERR_COPY_TO_USER();
+ break;
+ default:
+ pr_err("%s Unsupported ioctl %d ", __func__, ioctl_ptr->id);
+ break;
+ }
+ mutex_unlock(&pcam->vid_lock);
+ return rc;
+}
+
static int msm_camera_v4l2_g_ctrl(struct file *f, void *pctx,
struct v4l2_control *c)
{
@@ -686,9 +713,13 @@
struct msm_cam_v4l2_dev_inst *pcam_inst;
pcam_inst = container_of(f->private_data,
struct msm_cam_v4l2_dev_inst, eventHandle);
- pcam_inst->image_mode = a->parm.capture.extendedmode;
+ pcam_inst->image_mode = (a->parm.capture.extendedmode & 0x7F);
+ SET_IMG_MODE(pcam_inst->inst_handle, pcam_inst->image_mode);
+ SET_VIDEO_INST_IDX(pcam_inst->inst_handle, pcam_inst->my_index);
pcam_inst->pcam->dev_inst_map[pcam_inst->image_mode] = pcam_inst;
pcam_inst->path = msm_vidbuf_get_path(pcam_inst->image_mode);
+ rc = msm_cam_server_config_interface_map(pcam_inst->image_mode,
+ pcam_inst->pcam->mctl_handle);
D("%spath=%d,rc=%d\n", __func__,
pcam_inst->path, rc);
return rc;
@@ -745,6 +776,12 @@
case MSM_CAM_V4L2_IOCTL_PRIVATE_S_CTRL:
rc = msm_camera_v4l2_private_s_ctrl(file, fh, ioctl_ptr);
break;
+ case MSM_CAM_V4L2_IOCTL_PRIVATE_G_CTRL:
+ rc = msm_camera_v4l2_private_g_ctrl(file, fh, ioctl_ptr);
+ break;
+ default:
+ pr_err("%s Unsupported ioctl cmd %d ", __func__, cmd);
+ break;
}
return rc;
}
@@ -799,7 +836,6 @@
int ion_client_created = 0;
#endif
int server_q_idx = 0;
- /*struct msm_isp_ops *p_isp = 0;*/
/* get the video device */
struct msm_cam_v4l2_device *pcam = video_drvdata(f);
struct msm_cam_v4l2_dev_inst *pcam_inst;
@@ -853,7 +889,10 @@
goto msm_cam_server_begin_session_failed;
}
pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
-
+ if (!pmctl) {
+ pr_err("%s mctl ptr is null ", __func__);
+ goto msm_cam_server_begin_session_failed;
+ }
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
pmctl->client = msm_ion_client_create(-1, "camera");
kref_init(&pmctl->refcount);
@@ -876,13 +915,8 @@
}
pmctl->pcam_ptr = pcam;
- rc = msm_setup_v4l2_event_queue(&pcam_inst->eventHandle,
+ msm_setup_v4l2_event_queue(&pcam_inst->eventHandle,
pcam->pvdev);
- if (rc < 0) {
- pr_err("%s: msm_setup_v4l2_event_queue failed %d",
- __func__, rc);
- goto mctl_event_q_setup_failed;
- }
}
pcam_inst->vbqueue_initialized = 0;
rc = 0;
@@ -905,12 +939,10 @@
return rc;
msm_send_open_server_failed:
- v4l2_fh_del(&pcam_inst->eventHandle);
- v4l2_fh_exit(&pcam_inst->eventHandle);
-mctl_event_q_setup_failed:
+ msm_destroy_v4l2_event_queue(&pcam_inst->eventHandle);
+
if (pmctl->mctl_release)
- if (pmctl->mctl_release(pmctl) < 0)
- pr_err("%s: mctl_release failed\n", __func__);
+ pmctl->mctl_release(pmctl);
mctl_open_failed:
if (pcam->use_count == 1) {
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
@@ -1034,11 +1066,8 @@
if (pcam_inst->streamon) {
/*something went wrong since instance
is closing without streamoff*/
- if (pmctl->mctl_release) {
- rc = pmctl->mctl_release(pmctl);
- if (rc < 0)
- pr_err("mctl_release fails %d\n", rc);
- }
+ if (pmctl->mctl_release)
+ pmctl->mctl_release(pmctl);
pmctl->mctl_release = NULL;/*so that it isn't closed again*/
}
@@ -1051,10 +1080,11 @@
D("%s index %d nodeid %d count %d\n", __func__, pcam_inst->my_index,
pcam->vnode_id, pcam->use_count);
pcam->dev_inst[pcam_inst->my_index] = NULL;
- if (pcam_inst->my_index == 0) {
- v4l2_fh_del(&pcam_inst->eventHandle);
- v4l2_fh_exit(&pcam_inst->eventHandle);
- }
+ if (pcam_inst->my_index == 0)
+ msm_destroy_v4l2_event_queue(&pcam_inst->eventHandle);
+
+ CLR_VIDEO_INST_IDX(pcam_inst->inst_handle);
+ CLR_IMG_MODE(pcam_inst->inst_handle);
mutex_unlock(&pcam_inst->inst_lock);
mutex_destroy(&pcam_inst->inst_lock);
kfree(pcam_inst);
@@ -1067,11 +1097,8 @@
pr_err("msm_send_close_server failed %d\n", rc);
}
- if (pmctl->mctl_release) {
- rc = pmctl->mctl_release(pmctl);
- if (rc < 0)
- pr_err("mctl_release fails %d\n", rc);
- }
+ if (pmctl->mctl_release)
+ pmctl->mctl_release(pmctl);
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
kref_put(&pmctl->refcount, msm_release_ion_client);
@@ -1137,18 +1164,6 @@
return 0;
}
-int msm_setup_v4l2_event_queue(struct v4l2_fh *eventHandle,
- struct video_device *pvdev)
-{
- int rc = 0;
- /* v4l2_fh support */
- spin_lock_init(&pvdev->fh_lock);
- INIT_LIST_HEAD(&pvdev->fh_list);
-
- v4l2_fh_init(eventHandle, pvdev);
- v4l2_fh_add(eventHandle);
- return rc;
-}
static struct v4l2_file_operations g_msm_fops = {
.owner = THIS_MODULE,
@@ -1201,7 +1216,7 @@
pvdev->fops = &g_msm_fops;
pvdev->ioctl_ops = &g_msm_ioctl_ops;
pvdev->minor = -1;
- pvdev->vfl_type = 1;
+ pvdev->vfl_type = VFL_TYPE_GRABBER;
media_entity_init(&pvdev->entity, 0, NULL, 0);
pvdev->entity.type = MEDIA_ENT_T_DEVNODE_V4L;
diff --git a/drivers/media/video/msm/msm.h b/drivers/media/video/msm/msm.h
index 08278ff..a2c21bd 100644
--- a/drivers/media/video/msm/msm.h
+++ b/drivers/media/video/msm/msm.h
@@ -60,7 +60,9 @@
#define MSM_I2C_MUX_DRV_NAME "msm_cam_i2c_mux"
#define MSM_IRQ_ROUTER_DRV_NAME "msm_cam_irq_router"
#define MSM_CPP_DRV_NAME "msm_cpp"
+#define MSM_CCI_DRV_NAME "msm_cci"
+#define MAX_NUM_SENSOR_DEV 3
#define MAX_NUM_CSIPHY_DEV 3
#define MAX_NUM_CSID_DEV 4
#define MAX_NUM_CSIC_DEV 3
@@ -70,22 +72,7 @@
#define MAX_NUM_VPE_DEV 1
#define MAX_NUM_JPEG_DEV 3
#define MAX_NUM_CPP_DEV 1
-
-enum msm_cam_subdev_type {
- CSIPHY_DEV,
- CSID_DEV,
- CSIC_DEV,
- ISPIF_DEV,
- VFE_DEV,
- AXI_DEV,
- VPE_DEV,
- SENSOR_DEV,
- ACTUATOR_DEV,
- EEPROM_DEV,
- GESTURE_DEV,
- IRQ_ROUTER_DEV,
- CPP_DEV,
-};
+#define MAX_NUM_CCI_DEV 1
/* msm queue management APIs*/
@@ -137,7 +124,7 @@
struct msm_free_buf {
uint8_t num_planes;
- int32_t image_mode;
+ uint32_t inst_handle;
uint32_t ch_paddr[VIDEO_MAX_PLANES];
uint32_t vb;
};
@@ -161,11 +148,11 @@
NOTIFY_VFE_MSG_COMP_STATS, /* arg = struct msm_stats_buf */
NOTIFY_VFE_BUF_EVT, /* arg = struct msm_vfe_resp */
NOTIFY_VFE_CAMIF_ERROR,
+ NOTIFY_VFE_SOF_COUNT, /*arg = int*/
NOTIFY_PCLK_CHANGE, /* arg = pclk */
NOTIFY_CSIPHY_CFG, /* arg = msm_camera_csiphy_params */
NOTIFY_CSID_CFG, /* arg = msm_camera_csid_params */
NOTIFY_CSIC_CFG, /* arg = msm_camera_csic_params */
- NOTIFY_VFE_BUF_FREE_EVT, /* arg = msm_camera_csic_params */
NOTIFY_VFE_IRQ,
NOTIFY_AXI_IRQ,
NOTIFY_GESTURE_EVT, /* arg = v4l2_event */
@@ -250,7 +237,7 @@
int (*mctl_cb)(void);
int (*mctl_cmd)(struct msm_cam_media_controller *p_mctl,
unsigned int cmd, unsigned long arg);
- int (*mctl_release)(struct msm_cam_media_controller *p_mctl);
+ void (*mctl_release)(struct msm_cam_media_controller *p_mctl);
int (*mctl_buf_init)(struct msm_cam_v4l2_dev_inst *pcam);
int (*mctl_vbqueue_init)(struct msm_cam_v4l2_dev_inst *pcam,
struct vb2_queue *q, enum v4l2_buf_type type);
@@ -301,21 +288,15 @@
struct msm_isp_ops {
char *config_dev_name;
- /*int (*isp_init)(struct msm_cam_v4l2_device *pcam);*/
- int (*isp_open)(struct v4l2_subdev *sd,
- struct msm_cam_media_controller *mctl);
int (*isp_config)(struct msm_cam_media_controller *pmctl,
unsigned int cmd, unsigned long arg);
int (*isp_notify)(struct v4l2_subdev *sd,
unsigned int notification, void *arg);
- void (*isp_release)(struct msm_cam_media_controller *mctl,
- struct v4l2_subdev *sd);
int (*isp_pp_cmd)(struct msm_cam_media_controller *pmctl,
struct msm_mctl_pp_cmd, void *data);
/* vfe subdevice */
struct v4l2_subdev *sd;
- struct v4l2_subdev *sd_vpe;
};
struct msm_isp_buf_info {
@@ -339,7 +320,7 @@
enum v4l2_mbus_pixelcode sensor_pxlcode;
struct msm_cam_v4l2_device *pcam;
int my_index;
- int image_mode;
+ uint32_t image_mode;
int path;
int buf_count;
/* buffer offsets, if any */
@@ -351,6 +332,7 @@
struct img_plane_info plane_info;
int vbqueue_initialized;
struct mutex inst_lock;
+ uint32_t inst_handle;
};
struct msm_cam_mctl_node {
@@ -411,9 +393,9 @@
struct cdev config_cdev;
struct v4l2_queue_util config_stat_event_queue;
int use_count;
- /*struct msm_isp_ops* isp_subdev;*/
struct msm_cam_media_controller *p_mctl;
struct msm_mem_map_info mem_map;
+ int dev_num;
};
struct msm_cam_subdev_info {
@@ -503,6 +485,15 @@
struct intr_table_entry comp_intr_tbl[CAMERA_SS_IRQ_MAX];
};
+struct interface_map {
+ /* The interafce a particular stream belongs to.
+ * PIX0, RDI0, RDI1, or RDI2
+ */
+ int interface;
+ /* The handle of the mctl intstance interface runs on */
+ uint32_t mctl_handle;
+};
+
/* abstract camera server device for all sensor successfully probed*/
struct msm_cam_server_dev {
@@ -518,7 +509,7 @@
/* info of configs successfully created*/
struct msm_cam_config_dev_info config_info;
/* active working camera device - only one allowed at this time*/
- struct msm_cam_v4l2_device *pcam_active;
+ struct msm_cam_v4l2_device *pcam_active[MAX_NUM_ACTIVE_CAMERA];
/* number of camera devices opened*/
atomic_t number_pcam_active;
struct v4l2_queue_util server_command_queue;
@@ -532,6 +523,8 @@
struct msm_cam_server_mctl_inst mctl[MAX_NUM_ACTIVE_CAMERA];
uint32_t mctl_handle_cnt;
+ struct interface_map interface_map_table[INTF_MAX];
+
int use_count;
/* all the registered ISP subdevice*/
struct msm_isp_ops *isp_subdev[MSM_MAX_CAMERA_CONFIGS];
@@ -540,16 +533,18 @@
struct mutex server_lock;
struct mutex server_queue_lock;
/*v4l2 subdevs*/
+ struct v4l2_subdev *sensor_device[MAX_NUM_SENSOR_DEV];
struct v4l2_subdev *csiphy_device[MAX_NUM_CSIPHY_DEV];
struct v4l2_subdev *csid_device[MAX_NUM_CSID_DEV];
struct v4l2_subdev *csic_device[MAX_NUM_CSIC_DEV];
- struct v4l2_subdev *ispif_device;
+ struct v4l2_subdev *ispif_device[MAX_NUM_ISPIF_DEV];
struct v4l2_subdev *vfe_device[MAX_NUM_VFE_DEV];
struct v4l2_subdev *axi_device[MAX_NUM_AXI_DEV];
struct v4l2_subdev *vpe_device[MAX_NUM_VPE_DEV];
struct v4l2_subdev *gesture_device;
struct v4l2_subdev *cpp_device[MAX_NUM_CPP_DEV];
struct v4l2_subdev *irqr_device;
+ struct v4l2_subdev *cci_device;
spinlock_t intr_table_lock;
struct irqmgr_intr_lkup_table irq_lkup_table;
@@ -562,6 +557,18 @@
struct msm_cam_server_irqmap_entry hw_irqmap[CAMERA_SS_IRQ_MAX];
};
+enum msm_cam_buf_lookup_type {
+ BUF_LOOKUP_INVALID,
+ BUF_LOOKUP_BY_IMG_MODE,
+ BUF_LOOKUP_BY_INST_HANDLE,
+};
+
+struct msm_cam_buf_handle {
+ uint16_t buf_lookup_type;
+ uint32_t image_mode;
+ uint32_t inst_handle;
+};
+
/* ISP related functions */
void msm_isp_vfe_dev_init(struct v4l2_subdev *vd);
/*
@@ -577,78 +584,69 @@
int msm_mctl_buf_init(struct msm_cam_v4l2_device *pcam);
int msm_mctl_init_user_formats(struct msm_cam_v4l2_device *pcam);
int msm_mctl_buf_done(struct msm_cam_media_controller *pmctl,
- int msg_type, struct msm_free_buf *buf,
- uint32_t frame_id);
+ struct msm_cam_buf_handle *buf_handle,
+ struct msm_free_buf *buf,
+ uint32_t frame_id);
int msm_mctl_buf_done_pp(struct msm_cam_media_controller *pmctl,
- int msg_type, struct msm_free_buf *frame, int dirty, int node_type);
+ struct msm_cam_buf_handle *buf_handle,
+ struct msm_free_buf *frame, int dirty, int node_type);
int msm_mctl_reserve_free_buf(struct msm_cam_media_controller *pmctl,
- struct msm_cam_v4l2_dev_inst *pcam_inst,
- int path, struct msm_free_buf *free_buf);
+ struct msm_cam_v4l2_dev_inst *pcam_inst,
+ struct msm_cam_buf_handle *buf_handle,
+ struct msm_free_buf *free_buf);
int msm_mctl_release_free_buf(struct msm_cam_media_controller *pmctl,
- struct msm_cam_v4l2_dev_inst *pcam_inst,
- int path, struct msm_free_buf *free_buf);
+ struct msm_cam_v4l2_dev_inst *pcam_inst,
+ struct msm_free_buf *free_buf);
/*Memory(PMEM) functions*/
int msm_register_pmem(struct hlist_head *ptype, void __user *arg,
- struct ion_client *client);
+ struct ion_client *client);
int msm_pmem_table_del(struct hlist_head *ptype, void __user *arg,
- struct ion_client *client);
+ struct ion_client *client);
int msm_pmem_region_get_phy_addr(struct hlist_head *ptype,
struct msm_mem_map_info *mem_map, int32_t *phyaddr);
uint8_t msm_pmem_region_lookup(struct hlist_head *ptype,
int pmem_type, struct msm_pmem_region *reg, uint8_t maxcount);
uint8_t msm_pmem_region_lookup_2(struct hlist_head *ptype,
- int pmem_type,
- struct msm_pmem_region *reg,
- uint8_t maxcount);
+ int pmem_type, struct msm_pmem_region *reg,
+ uint8_t maxcount);
unsigned long msm_pmem_stats_vtop_lookup(
- struct msm_cam_media_controller *mctl,
- unsigned long buffer,
- int fd);
+ struct msm_cam_media_controller *mctl,
+ unsigned long buffer, int fd);
unsigned long msm_pmem_stats_ptov_lookup(
struct msm_cam_media_controller *mctl,
unsigned long addr, int *fd);
-int msm_vfe_subdev_init(struct v4l2_subdev *sd,
- struct msm_cam_media_controller *mctl);
+int msm_vfe_subdev_init(struct v4l2_subdev *sd);
void msm_vfe_subdev_release(struct v4l2_subdev *sd);
int msm_isp_subdev_ioctl(struct v4l2_subdev *sd,
struct msm_vfe_cfg_cmd *cfgcmd, void *data);
-int msm_vpe_subdev_init(struct v4l2_subdev *sd,
- struct msm_cam_media_controller *mctl);
+int msm_vpe_subdev_init(struct v4l2_subdev *sd);
int msm_gemini_subdev_init(struct v4l2_subdev *gemini_sd);
void msm_vpe_subdev_release(void);
void msm_gemini_subdev_release(struct v4l2_subdev *gemini_sd);
int msm_mctl_is_pp_msg_type(struct msm_cam_media_controller *p_mctl,
int msg_type);
int msm_mctl_do_pp(struct msm_cam_media_controller *p_mctl,
- int msg_type, uint32_t y_phy, uint32_t frame_id);
+ int msg_type, uint32_t y_phy, uint32_t frame_id);
int msm_mctl_pp_ioctl(struct msm_cam_media_controller *p_mctl,
- unsigned int cmd, unsigned long arg);
+ unsigned int cmd, unsigned long arg);
int msm_mctl_pp_notify(struct msm_cam_media_controller *pmctl,
- struct msm_mctl_pp_frame_info *pp_frame_info);
+ struct msm_mctl_pp_frame_info *pp_frame_info);
int msm_mctl_img_mode_to_inst_index(struct msm_cam_media_controller *pmctl,
- int out_type, int node_type);
+ int out_type, int node_type);
struct msm_frame_buffer *msm_mctl_buf_find(
struct msm_cam_media_controller *pmctl,
struct msm_cam_v4l2_dev_inst *pcam_inst, int del_buf,
- int msg_type, struct msm_free_buf *fbuf);
+ struct msm_free_buf *fbuf);
void msm_mctl_gettimeofday(struct timeval *tv);
-struct msm_frame_buffer *msm_mctl_get_free_buf(
- struct msm_cam_media_controller *pmctl,
- int msg_type);
-int msm_mctl_put_free_buf(
- struct msm_cam_media_controller *pmctl,
- int msg_type, struct msm_frame_buffer *buf);
int msm_mctl_check_pp(struct msm_cam_media_controller *p_mctl,
- int msg_type, int *pp_divert_type, int *pp_type);
+ int msg_type, int *pp_divert_type, int *pp_type);
int msm_mctl_do_pp_divert(
struct msm_cam_media_controller *p_mctl,
- int msg_type, struct msm_free_buf *fbuf,
+ struct msm_cam_buf_handle *buf_handle,
+ struct msm_free_buf *fbuf,
uint32_t frame_id, int pp_type);
-int msm_mctl_buf_del(struct msm_cam_media_controller *pmctl,
- int msg_type,
- struct msm_frame_buffer *my_buf);
int msm_mctl_pp_release_free_frame(
struct msm_cam_media_controller *p_mctl,
void __user *arg);
@@ -656,26 +654,29 @@
struct msm_cam_media_controller *p_mctl,
void __user *arg);
int msm_mctl_set_pp_key(struct msm_cam_media_controller *p_mctl,
- void __user *arg);
+ void __user *arg);
int msm_mctl_pp_done(
struct msm_cam_media_controller *p_mctl,
void __user *arg);
int msm_mctl_pp_divert_done(
struct msm_cam_media_controller *p_mctl,
void __user *arg);
-int msm_setup_v4l2_event_queue(struct v4l2_fh *eventHandle,
- struct video_device *pvdev);
+void msm_setup_v4l2_event_queue(struct v4l2_fh *eventHandle,
+ struct video_device *pvdev);
+void msm_destroy_v4l2_event_queue(struct v4l2_fh *eventHandle);
int msm_setup_mctl_node(struct msm_cam_v4l2_device *pcam);
struct msm_cam_v4l2_dev_inst *msm_mctl_get_pcam_inst(
- struct msm_cam_media_controller *pmctl,
- int image_mode);
+ struct msm_cam_media_controller *pmctl,
+ struct msm_cam_buf_handle *buf_handle);
int msm_mctl_buf_return_buf(struct msm_cam_media_controller *pmctl,
- int image_mode, struct msm_frame_buffer *buf);
+ int image_mode, struct msm_frame_buffer *buf);
int msm_mctl_pp_mctl_divert_done(struct msm_cam_media_controller *p_mctl,
- void __user *arg);
+ void __user *arg);
void msm_release_ion_client(struct kref *ref);
int msm_cam_register_subdev_node(struct v4l2_subdev *sd,
struct msm_cam_subdev_info *sd_info);
+int msm_mctl_find_sensor_subdevs(struct msm_cam_media_controller *p_mctl,
+ int core_index);
int msm_server_open_client(int *p_qidx);
int msm_server_send_ctrl(struct msm_ctrl_cmd *out, int ctrl_id);
int msm_server_close_client(int idx);
@@ -683,7 +684,7 @@
int *p_active);
int msm_cam_server_close_mctl_session(struct msm_cam_v4l2_device *pcam);
long msm_v4l2_evt_notify(struct msm_cam_media_controller *mctl,
- unsigned int cmd, unsigned long evt);
+ unsigned int cmd, unsigned long evt);
int msm_mctl_pp_get_vpe_buf_info(struct msm_mctl_pp_frame_info *zoom);
void msm_queue_init(struct msm_device_queue *queue, const char *name);
void msm_enqueue(struct msm_device_queue *queue, struct list_head *entry);
diff --git a/drivers/media/video/msm/msm_camirq_router.c b/drivers/media/video/msm/msm_camirq_router.c
index 52dd175..25a561f 100644
--- a/drivers/media/video/msm/msm_camirq_router.c
+++ b/drivers/media/video/msm/msm_camirq_router.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/of.h>
#include <mach/irqs.h>
#include <media/msm_isp.h>
#include <media/v4l2-device.h>
@@ -207,6 +208,10 @@
v4l2_set_subdevdata(&irqrouter_ctrl->subdev, irqrouter_ctrl);
irqrouter_ctrl->pdev = pdev;
+ if (pdev->dev.of_node)
+ of_property_read_u32((&pdev->dev)->of_node,
+ "cell-index", &pdev->id);
+
msm_irqrouter_send_default_irqmap(irqrouter_ctrl);
media_entity_init(&irqrouter_ctrl->subdev.entity, 0, NULL, 0);
@@ -237,16 +242,27 @@
static int __exit irqrouter_exit(struct platform_device *pdev)
{
+ struct v4l2_subdev *subdev = dev_get_drvdata(&pdev->dev);
+ struct irqrouter_ctrl_type *irqrouter_ctrl =
+ v4l2_get_subdevdata(subdev);
kfree(irqrouter_ctrl);
return 0;
}
+static const struct of_device_id msm_irqrouter_dt_match[] = {
+ {.compatible = "qcom,irqrouter"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_irqrouter_dt_match);
+
static struct platform_driver msm_irqrouter_driver = {
.probe = irqrouter_probe,
.remove = irqrouter_exit,
.driver = {
.name = MSM_IRQ_ROUTER_DRV_NAME,
.owner = THIS_MODULE,
+ .of_match_table = msm_irqrouter_dt_match,
},
};
diff --git a/drivers/media/video/msm/msm_isp.c b/drivers/media/video/msm/msm_isp.c
index 5fcb62b..3d94afd 100644
--- a/drivers/media/video/msm/msm_isp.c
+++ b/drivers/media/video/msm/msm_isp.c
@@ -67,16 +67,16 @@
}
}
-static int msm_isp_notify_VFE_BUF_FREE_EVT(struct v4l2_subdev *sd, void *arg)
+static int msm_isp_notify_VFE_SOF_COUNT_EVT(struct v4l2_subdev *sd, void *arg)
{
struct msm_vfe_cfg_cmd cfgcmd;
struct msm_camvfe_params vfe_params;
int rc;
- cfgcmd.cmd_type = CMD_VFE_BUFFER_RELEASE;
+ cfgcmd.cmd_type = CMD_VFE_SOF_COUNT_UPDATE;
cfgcmd.value = NULL;
vfe_params.vfe_cfg = &cfgcmd;
- vfe_params.data = NULL;
+ vfe_params.data = arg;
rc = v4l2_subdev_call(sd, core, ioctl, 0, &vfe_params);
return 0;
}
@@ -168,7 +168,9 @@
struct msm_cam_v4l2_device *pcam = pmctl->pcam_ptr;
struct msm_frame_info *frame_info =
(struct msm_frame_info *)vdata->evt_msg.data;
- uint32_t vfe_id, image_mode;
+ uint32_t vfe_id;
+ struct msm_cam_buf_handle buf_handle;
+
if (!pcam) {
pr_debug("%s pcam is null. return\n", __func__);
msm_isp_sync_free(vdata);
@@ -176,39 +178,41 @@
}
if (frame_info) {
vfe_id = frame_info->path;
- image_mode = frame_info->image_mode;
+ buf_handle.buf_lookup_type = BUF_LOOKUP_BY_INST_HANDLE;
+ buf_handle.inst_handle = frame_info->inst_handle;
} else {
vfe_id = vdata->evt_msg.msg_id;
- image_mode = msm_isp_vfe_msg_to_img_mode(pmctl, vfe_id);
+ buf_handle.buf_lookup_type = BUF_LOOKUP_BY_IMG_MODE;
+ buf_handle.image_mode =
+ msm_isp_vfe_msg_to_img_mode(pmctl, vfe_id);
}
switch (vdata->type) {
- case VFE_MSG_V32_START:
- case VFE_MSG_V32_START_RECORDING:
- case VFE_MSG_V2X_PREVIEW:
+ case VFE_MSG_START:
+ case VFE_MSG_START_RECORDING:
+ case VFE_MSG_PREVIEW:
D("%s Got V32_START_*: Getting ping addr id = %d",
__func__, vfe_id);
msm_mctl_reserve_free_buf(pmctl, NULL,
- image_mode, &free_buf);
+ &buf_handle, &free_buf);
cfgcmd.cmd_type = CMD_CONFIG_PING_ADDR;
cfgcmd.value = &vfe_id;
vfe_params.vfe_cfg = &cfgcmd;
vfe_params.data = (void *)&free_buf;
rc = v4l2_subdev_call(sd, core, ioctl, 0, &vfe_params);
msm_mctl_reserve_free_buf(pmctl, NULL,
- image_mode, &free_buf);
+ &buf_handle, &free_buf);
cfgcmd.cmd_type = CMD_CONFIG_PONG_ADDR;
cfgcmd.value = &vfe_id;
vfe_params.vfe_cfg = &cfgcmd;
vfe_params.data = (void *)&free_buf;
rc = v4l2_subdev_call(sd, core, ioctl, 0, &vfe_params);
break;
- case VFE_MSG_V32_CAPTURE:
- case VFE_MSG_V2X_CAPTURE:
+ case VFE_MSG_CAPTURE:
pr_debug("%s Got V32_CAPTURE: getting buffer for id = %d",
__func__, vfe_id);
msm_mctl_reserve_free_buf(pmctl, NULL,
- image_mode, &free_buf);
+ &buf_handle, &free_buf);
cfgcmd.cmd_type = CMD_CONFIG_PING_ADDR;
cfgcmd.value = &vfe_id;
vfe_params.vfe_cfg = &cfgcmd;
@@ -216,7 +220,7 @@
rc = v4l2_subdev_call(sd, core, ioctl, 0, &vfe_params);
temp_free_buf = free_buf;
if (msm_mctl_reserve_free_buf(pmctl, NULL,
- image_mode, &free_buf)) {
+ &buf_handle, &free_buf)) {
/* Write the same buffer into PONG */
free_buf = temp_free_buf;
}
@@ -226,8 +230,8 @@
vfe_params.data = (void *)&free_buf;
rc = v4l2_subdev_call(sd, core, ioctl, 0, &vfe_params);
break;
- case VFE_MSG_V32_JPEG_CAPTURE:
- D("%s:VFE_MSG_V32_JPEG_CAPTURE vdata->type %d\n", __func__,
+ case VFE_MSG_JPEG_CAPTURE:
+ D("%s:VFE_MSG_JPEG_CAPTURE vdata->type %d\n", __func__,
vdata->type);
free_buf.num_planes = 2;
free_buf.ch_paddr[0] = pmctl->ping_imem_y;
@@ -236,7 +240,7 @@
cfgcmd.value = &vfe_id;
vfe_params.vfe_cfg = &cfgcmd;
vfe_params.data = (void *)&free_buf;
- D("%s:VFE_MSG_V32_JPEG_CAPTURE y_ping=%x cbcr_ping=%x\n",
+ D("%s:VFE_MSG_JPEG_CAPTURE y_ping=%x cbcr_ping=%x\n",
__func__, free_buf.ch_paddr[0], free_buf.ch_paddr[1]);
rc = v4l2_subdev_call(sd, core, ioctl, 0, &vfe_params);
/* Write the same buffer into PONG */
@@ -246,7 +250,7 @@
cfgcmd.value = &vfe_id;
vfe_params.vfe_cfg = &cfgcmd;
vfe_params.data = (void *)&free_buf;
- D("%s:VFE_MSG_V32_JPEG_CAPTURE y_pong=%x cbcr_pong=%x\n",
+ D("%s:VFE_MSG_JPEG_CAPTURE y_pong=%x cbcr_pong=%x\n",
__func__, free_buf.ch_paddr[0], free_buf.ch_paddr[1]);
rc = v4l2_subdev_call(sd, core, ioctl, 0, &vfe_params);
break;
@@ -254,7 +258,7 @@
D("%s Got OUTPUT_IRQ: Getting free buf id = %d",
__func__, vfe_id);
msm_mctl_reserve_free_buf(pmctl, NULL,
- image_mode, &free_buf);
+ &buf_handle, &free_buf);
cfgcmd.cmd_type = CMD_CONFIG_FREE_BUF_ADDR;
cfgcmd.value = &vfe_id;
vfe_params.vfe_cfg = &cfgcmd;
@@ -290,8 +294,8 @@
if (notification == NOTIFY_VFE_BUF_EVT)
return msm_isp_notify_VFE_BUF_EVT(sd, arg);
- if (notification == NOTIFY_VFE_BUF_FREE_EVT)
- return msm_isp_notify_VFE_BUF_FREE_EVT(sd, arg);
+ if (notification == NOTIFY_VFE_SOF_COUNT)
+ return msm_isp_notify_VFE_SOF_COUNT_EVT(sd, arg);
isp_event = kzalloc(sizeof(struct msm_isp_event_ctrl), GFP_ATOMIC);
if (!isp_event) {
@@ -320,10 +324,10 @@
}
case NOTIFY_VFE_MSG_OUT: {
uint8_t msgid;
- int32_t image_mode = -1;
+ struct msm_cam_buf_handle buf_handle;
struct isp_msg_output *isp_output =
(struct isp_msg_output *)arg;
- if (isp_output->buf.image_mode < 0) {
+ if (!isp_output->buf.inst_handle) {
switch (isp_output->output_id) {
case MSG_ID_OUTPUT_P:
msgid = VFE_MSG_OUTPUT_P;
@@ -356,19 +360,22 @@
rc = -EINVAL;
break;
}
- if (!rc)
- image_mode =
+ if (!rc) {
+ buf_handle.buf_lookup_type =
+ BUF_LOOKUP_BY_IMG_MODE;
+ buf_handle.image_mode =
msm_isp_vfe_msg_to_img_mode(pmctl, msgid);
+ }
} else {
- image_mode = isp_output->buf.image_mode;
+ buf_handle.buf_lookup_type = BUF_LOOKUP_BY_INST_HANDLE;
+ buf_handle.inst_handle = isp_output->buf.inst_handle;
}
isp_event->isp_data.isp_msg.msg_id =
isp_output->output_id;
isp_event->isp_data.isp_msg.frame_id =
isp_output->frameCounter;
buf = isp_output->buf;
- BUG_ON(image_mode < 0);
- msm_mctl_buf_done(pmctl, image_mode,
+ msm_mctl_buf_done(pmctl, &buf_handle,
&buf, isp_output->frameCounter);
}
break;
@@ -416,10 +423,12 @@
stats.buf_idx = isp_stats->buf_idx;
switch (isp_stats->id) {
case MSG_ID_STATS_AEC:
+ case MSG_ID_STATS_BG:
stats.aec.buff = stats.buffer;
stats.aec.fd = stats.fd;
break;
case MSG_ID_STATS_AF:
+ case MSG_ID_STATS_BF:
stats.af.buff = stats.buffer;
stats.af.fd = stats.fd;
break;
@@ -439,6 +448,10 @@
stats.cs.buff = stats.buffer;
stats.cs.fd = stats.fd;
break;
+ case MSG_ID_STATS_BHIST:
+ stats.skin.buff = stats.buffer;
+ stats.skin.fd = stats.fd;
+ break;
case MSG_ID_STATS_AWB_AEC:
break;
default:
@@ -486,68 +499,6 @@
return msm_isp_notify_vfe(sd, notification, arg);
}
-/* This function is called by open() function, so we need to init HW*/
-static int msm_isp_open(struct v4l2_subdev *sd,
- struct msm_cam_media_controller *mctl)
-{
- /* init vfe and senor, register sync callbacks for init*/
- int rc = 0;
- D("%s\n", __func__);
- if (!mctl) {
- pr_err("%s: param is NULL", __func__);
- return -EINVAL;
- }
-
- rc = msm_iommu_map_contig_buffer(
- (unsigned long)IMEM_Y_PING_OFFSET, CAMERA_DOMAIN, GEN_POOL,
- ((IMEM_Y_SIZE + IMEM_CBCR_SIZE + 4095) & (~4095)),
- SZ_4K, IOMMU_WRITE | IOMMU_READ,
- (unsigned long *)&mctl->ping_imem_y);
- mctl->ping_imem_cbcr = mctl->ping_imem_y + IMEM_Y_SIZE;
- if (rc < 0) {
- pr_err("%s: ping iommu mapping returned error %d\n",
- __func__, rc);
- mctl->ping_imem_y = 0;
- mctl->ping_imem_cbcr = 0;
- }
- msm_iommu_map_contig_buffer(
- (unsigned long)IMEM_Y_PONG_OFFSET, CAMERA_DOMAIN, GEN_POOL,
- ((IMEM_Y_SIZE + IMEM_CBCR_SIZE + 4095) & (~4095)),
- SZ_4K, IOMMU_WRITE | IOMMU_READ,
- (unsigned long *)&mctl->pong_imem_y);
- mctl->pong_imem_cbcr = mctl->pong_imem_y + IMEM_Y_SIZE;
- if (rc < 0) {
- pr_err("%s: pong iommu mapping returned error %d\n",
- __func__, rc);
- mctl->pong_imem_y = 0;
- mctl->pong_imem_cbcr = 0;
- }
-
- rc = msm_vfe_subdev_init(sd, mctl);
- if (rc < 0) {
- pr_err("%s: vfe_init failed at %d\n",
- __func__, rc);
- }
- return rc;
-}
-
-static void msm_isp_release(struct msm_cam_media_controller *mctl,
- struct v4l2_subdev *sd)
-{
- D("%s\n", __func__);
- msm_vfe_subdev_release(sd);
- msm_iommu_unmap_contig_buffer(mctl->ping_imem_y,
- CAMERA_DOMAIN, GEN_POOL,
- ((IMEM_Y_SIZE + IMEM_CBCR_SIZE + 4095) & (~4095)));
- msm_iommu_unmap_contig_buffer(mctl->pong_imem_y,
- CAMERA_DOMAIN, GEN_POOL,
- ((IMEM_Y_SIZE + IMEM_CBCR_SIZE + 4095) & (~4095)));
- mctl->ping_imem_y = 0;
- mctl->ping_imem_cbcr = 0;
- mctl->pong_imem_y = 0;
- mctl->pong_imem_cbcr = 0;
-}
-
static int msm_config_vfe(struct v4l2_subdev *sd,
struct msm_cam_media_controller *mctl, void __user *arg)
{
@@ -562,6 +513,9 @@
memset(&axi_data, 0, sizeof(axi_data));
CDBG("%s: cmd_type %d\n", __func__, cfgcmd.cmd_type);
switch (cfgcmd.cmd_type) {
+ case CMD_STATS_BG_ENABLE:
+ case CMD_STATS_BF_ENABLE:
+ case CMD_STATS_BHIST_ENABLE:
case CMD_STATS_AF_ENABLE:
case CMD_STATS_AEC_ENABLE:
case CMD_STATS_AWB_ENABLE:
@@ -654,6 +608,12 @@
cfgcmd.cmd_type = CMD_STATS_CS_BUF_RELEASE;
else if (buf.type == STAT_AEAW)
cfgcmd.cmd_type = CMD_STATS_BUF_RELEASE;
+ else if (buf.type == STAT_BG)
+ cfgcmd.cmd_type = CMD_STATS_BG_BUF_RELEASE;
+ else if (buf.type == STAT_BF)
+ cfgcmd.cmd_type = CMD_STATS_BF_BUF_RELEASE;
+ else if (buf.type == STAT_BHIST)
+ cfgcmd.cmd_type = CMD_STATS_BHIST_BUF_RELEASE;
else {
pr_err("%s: invalid buf type %d\n",
@@ -698,7 +658,6 @@
}
case MSM_CAM_IOCTL_STATS_ENQUEUEBUF: {
struct msm_stats_buf_info buf_info;
-
if (copy_from_user(&buf_info, arg,
sizeof(struct msm_stats_buf_info))) {
ERR_COPY_FROM_USER();
@@ -712,18 +671,30 @@
}
case MSM_CAM_IOCTL_STATS_FLUSH_BUFQ: {
struct msm_stats_flush_bufq bufq_info;
-
if (copy_from_user(&bufq_info, arg,
sizeof(struct msm_stats_flush_bufq))) {
ERR_COPY_FROM_USER();
return -EFAULT;
- }
+ }
cfgcmd.cmd_type = VFE_CMD_STATS_FLUSH_BUFQ;
cfgcmd.value = (void *)&bufq_info;
cfgcmd.length = sizeof(struct msm_stats_flush_bufq);
rc = msm_isp_subdev_ioctl(sd, &cfgcmd, NULL);
break;
}
+ case MSM_CAM_IOCTL_STATS_UNREG_BUF: {
+ struct msm_stats_reqbuf reqbuf;
+ if (copy_from_user(&reqbuf, arg,
+ sizeof(struct msm_stats_reqbuf))) {
+ ERR_COPY_FROM_USER();
+ return -EFAULT;
+ }
+ cfgcmd.cmd_type = VFE_CMD_STATS_UNREGBUF;
+ cfgcmd.value = (void *)&reqbuf;
+ cfgcmd.length = sizeof(struct msm_stats_reqbuf);
+ rc = msm_isp_subdev_ioctl(sd, &cfgcmd, (void *)mctl->client);
+ break;
+ }
default:
rc = -1;
break;
@@ -759,6 +730,7 @@
case MSM_CAM_IOCTL_STATS_REQBUF:
case MSM_CAM_IOCTL_STATS_ENQUEUEBUF:
case MSM_CAM_IOCTL_STATS_FLUSH_BUFQ:
+ case MSM_CAM_IOCTL_STATS_UNREG_BUF:
rc = msm_vfe_stats_buf_ioctl(sd, cmd, pmctl, argp);
break;
@@ -779,9 +751,7 @@
int i = 0;
for (i = 0; i < g_num_config_nodes; i++) {
- isp_subdev[i].isp_open = msm_isp_open;
isp_subdev[i].isp_config = msm_isp_config;
- isp_subdev[i].isp_release = msm_isp_release;
isp_subdev[i].isp_notify = msm_isp_notify;
}
return 0;
diff --git a/drivers/media/video/msm/msm_mctl.c b/drivers/media/video/msm/msm_mctl.c
index be6c543..a87b074 100644
--- a/drivers/media/video/msm/msm_mctl.c
+++ b/drivers/media/video/msm/msm_mctl.c
@@ -418,94 +418,6 @@
return rc;
}
-static int msm_mctl_subdev_match_core(struct device *dev, void *data)
-{
- int core_index = (int)data;
- struct platform_device *pdev = to_platform_device(dev);
-
- if (pdev->id == core_index)
- return 1;
- else
- return 0;
-}
-
-static int msm_mctl_register_subdevs(struct msm_cam_media_controller *p_mctl,
- int core_index)
-{
- struct device_driver *driver;
- struct device *dev;
- int rc = -ENODEV;
-
- struct msm_sensor_ctrl_t *s_ctrl = get_sctrl(p_mctl->sensor_sdev);
- struct msm_camera_sensor_info *sinfo =
- (struct msm_camera_sensor_info *) s_ctrl->sensordata;
- struct msm_camera_device_platform_data *pdata = sinfo->pdata;
-
- rc = msm_csi_register_subdevs(p_mctl, core_index,
- msm_mctl_subdev_match_core);
-
- if (rc < 0)
- goto out;
-
- /* register vfe subdev */
- driver = driver_find(MSM_VFE_DRV_NAME, &platform_bus_type);
- if (!driver)
- goto out;
-
- dev = driver_find_device(driver, NULL, 0,
- msm_mctl_subdev_match_core);
- if (!dev)
- goto out;
-
- p_mctl->isp_sdev->sd = dev_get_drvdata(dev);
-
- if (pdata->is_vpe) {
- /* register vfe subdev */
- driver = driver_find(MSM_VPE_DRV_NAME, &platform_bus_type);
- if (!driver)
- goto out;
-
- dev = driver_find_device(driver, NULL, 0,
- msm_mctl_subdev_match_core);
- if (!dev)
- goto out;
-
- p_mctl->vpe_sdev = dev_get_drvdata(dev);
- }
-
- rc = 0;
-
-
- /* register gemini subdev */
- driver = driver_find(MSM_GEMINI_DRV_NAME, &platform_bus_type);
- if (!driver) {
- pr_err("%s:%d:Gemini: Failure: goto out\n",
- __func__, __LINE__);
- goto out;
- }
- pr_debug("%s:%d:Gemini: driver_find_device Gemini driver 0x%x\n",
- __func__, __LINE__, (uint32_t)driver);
- dev = driver_find_device(driver, NULL, NULL,
- msm_mctl_subdev_match_core);
- if (!dev) {
- pr_err("%s:%d:Gemini: Failure goto out\n",
- __func__, __LINE__);
- goto out;
- }
- p_mctl->gemini_sdev = dev_get_drvdata(dev);
- pr_debug("%s:%d:Gemini: After dev_get_drvdata gemini_sdev=0x%x\n",
- __func__, __LINE__, (uint32_t)p_mctl->gemini_sdev);
-
- if (p_mctl->gemini_sdev == NULL) {
- pr_err("%s:%d:Gemini: Failure gemini_sdev is null\n",
- __func__, __LINE__);
- goto out;
- }
- rc = 0;
-out:
- return rc;
-}
-
static int msm_mctl_open(struct msm_cam_media_controller *p_mctl,
const char *const apps_id)
{
@@ -525,13 +437,13 @@
/* open sub devices - once only*/
if (!p_mctl->opencnt) {
struct msm_sensor_csi_info csi_info;
- uint32_t csid_version;
+ uint32_t csid_version = 0;
wake_lock(&p_mctl->wake_lock);
csid_core = camdev->csid_core;
- rc = msm_mctl_register_subdevs(p_mctl, csid_core);
+ rc = msm_mctl_find_sensor_subdevs(p_mctl, csid_core);
if (rc < 0) {
- pr_err("%s: msm_mctl_register_subdevs failed:%d\n",
+ pr_err("%s: msm_mctl_find_sensor_subdevs failed:%d\n",
__func__, rc);
goto register_sdev_failed;
}
@@ -592,38 +504,6 @@
goto msm_csi_version;
}
- /* ISP first*/
- if (p_mctl->isp_sdev && p_mctl->isp_sdev->isp_open) {
- rc = p_mctl->isp_sdev->isp_open(
- p_mctl->isp_sdev->sd, p_mctl);
- if (rc < 0) {
- pr_err("%s: isp init failed: %d\n",
- __func__, rc);
- goto isp_open_failed;
- }
- }
-
- if (p_mctl->axi_sdev) {
- rc = v4l2_subdev_call(p_mctl->axi_sdev, core, ioctl,
- VIDIOC_MSM_AXI_INIT, p_mctl);
- if (rc < 0) {
- pr_err("%s: axi initialization failed %d\n",
- __func__, rc);
- goto axi_init_failed;
- }
- }
-
- if (camdev->is_vpe) {
- rc = v4l2_subdev_call(p_mctl->vpe_sdev, core, ioctl,
- VIDIOC_MSM_VPE_INIT, p_mctl);
- if (rc < 0) {
- pr_err("%s: vpe initialization failed %d\n",
- __func__, rc);
- goto vpe_init_failed;
- }
- }
-
-
pm_qos_add_request(&p_mctl->pm_qos_req_list,
PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
pm_qos_update_request(&p_mctl->pm_qos_req_list,
@@ -635,19 +515,9 @@
D("%s: camera is already open", __func__);
}
mutex_unlock(&p_mctl->lock);
-
return rc;
-vpe_init_failed:
- if (p_mctl->axi_sdev)
- if (v4l2_subdev_call(p_mctl->axi_sdev, core, ioctl,
- VIDIOC_MSM_AXI_RELEASE, NULL) < 0)
- pr_err("%s: axi release failed %d\n", __func__, rc);
-axi_init_failed:
- if (p_mctl->isp_sdev && p_mctl->isp_sdev->isp_release)
- p_mctl->isp_sdev->isp_release(p_mctl, p_mctl->isp_sdev->sd);
msm_csi_version:
-isp_open_failed:
if (p_mctl->csic_sdev)
if (v4l2_subdev_call(p_mctl->csic_sdev, core, ioctl,
VIDIOC_MSM_CSIC_RELEASE, NULL) < 0)
@@ -660,7 +530,8 @@
csid_init_failed:
if (p_mctl->csiphy_sdev)
if (v4l2_subdev_call(p_mctl->csiphy_sdev, core, ioctl,
- VIDIOC_MSM_CSIPHY_RELEASE, NULL) < 0)
+ VIDIOC_MSM_CSIPHY_RELEASE,
+ sinfo->sensor_platform_info->csi_lane_params) < 0)
pr_err("%s: csiphy release failed %d\n", __func__, rc);
csiphy_init_failed:
if (p_mctl->act_sdev)
@@ -677,13 +548,11 @@
return rc;
}
-static int msm_mctl_release(struct msm_cam_media_controller *p_mctl)
+static void msm_mctl_release(struct msm_cam_media_controller *p_mctl)
{
- int rc = 0;
struct msm_sensor_ctrl_t *s_ctrl = get_sctrl(p_mctl->sensor_sdev);
struct msm_camera_sensor_info *sinfo =
(struct msm_camera_sensor_info *) s_ctrl->sensordata;
- struct msm_camera_device_platform_data *camdev = sinfo->pdata;
v4l2_subdev_call(p_mctl->sensor_sdev, core, ioctl,
VIDIOC_MSM_SENSOR_RELEASE, NULL);
@@ -693,7 +562,7 @@
VIDIOC_MSM_CSIC_RELEASE, NULL);
}
- if (camdev->is_vpe) {
+ if (p_mctl->vpe_sdev) {
v4l2_subdev_call(p_mctl->vpe_sdev, core, ioctl,
VIDIOC_MSM_VPE_RELEASE, NULL);
}
@@ -703,10 +572,6 @@
VIDIOC_MSM_AXI_RELEASE, NULL);
}
- if (p_mctl->isp_sdev && p_mctl->isp_sdev->isp_release)
- p_mctl->isp_sdev->isp_release(p_mctl,
- p_mctl->isp_sdev->sd);
-
if (p_mctl->csid_sdev) {
v4l2_subdev_call(p_mctl->csid_sdev, core, ioctl,
VIDIOC_MSM_CSID_RELEASE, NULL);
@@ -714,7 +579,8 @@
if (p_mctl->csiphy_sdev) {
v4l2_subdev_call(p_mctl->csiphy_sdev, core, ioctl,
- VIDIOC_MSM_CSIPHY_RELEASE, NULL);
+ VIDIOC_MSM_CSIPHY_RELEASE,
+ sinfo->sensor_platform_info->csi_lane_params);
}
if (p_mctl->act_sdev) {
@@ -729,7 +595,6 @@
pm_qos_remove_request(&p_mctl->pm_qos_req_list);
wake_unlock(&p_mctl->wake_lock);
- return rc;
}
int msm_mctl_init_user_formats(struct msm_cam_v4l2_device *pcam)
@@ -830,6 +695,7 @@
pmctl->eeprom_sdev = pcam->eeprom_sdev;
pmctl->sensor_sdev = pcam->sensor_sdev;
pmctl->sdata = pcam->sdata;
+ v4l2_set_subdev_hostdata(pcam->sensor_sdev, pmctl);
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
pmctl->client = msm_ion_client_create(-1, "camera");
@@ -853,6 +719,8 @@
mutex_destroy(&pmctl->lock);
wake_lock_destroy(&pmctl->wake_lock);
+ /*clear out mctl fields*/
+ memset(pmctl, 0, sizeof(struct msm_cam_media_controller));
msm_cam_server_free_mctl(pcam->mctl_handle);
return rc;
}
@@ -917,12 +785,9 @@
}
D("%s active %d\n", __func__, pcam->mctl_node.active);
- rc = msm_setup_v4l2_event_queue(&pcam_inst->eventHandle,
- pcam->mctl_node.pvdev);
- if (rc < 0) {
- mutex_unlock(&pcam->mctl_node.dev_lock);
- return rc;
- }
+ msm_setup_v4l2_event_queue(&pcam_inst->eventHandle,
+ pcam->mctl_node.pvdev);
+
pcam_inst->vbqueue_initialized = 0;
kref_get(&pmctl->refcount);
f->private_data = &pcam_inst->eventHandle;
@@ -1005,8 +870,10 @@
vb2_queue_release(&pcam_inst->vid_bufq);
D("%s Closing down instance %p ", __func__, pcam_inst);
pcam->mctl_node.dev_inst[pcam_inst->my_index] = NULL;
- v4l2_fh_del(&pcam_inst->eventHandle);
- v4l2_fh_exit(&pcam_inst->eventHandle);
+ msm_destroy_v4l2_event_queue(&pcam_inst->eventHandle);
+ CLR_MCTLPP_INST_IDX(pcam_inst->inst_handle);
+ CLR_IMG_MODE(pcam_inst->inst_handle);
+
mutex_destroy(&pcam_inst->inst_lock);
kfree(pcam_inst);
@@ -1453,6 +1320,10 @@
WARN_ON(pctx != f->private_data);
pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
+ if (!pmctl) {
+ pr_err("%s mctl ptr is null ", __func__);
+ return -EINVAL;
+ }
if (!pcam_inst->vbqueue_initialized) {
pmctl->mctl_vbqueue_init(pcam_inst, &pcam_inst->vid_bufq,
V4L2_BUF_TYPE_VIDEO_CAPTURE);
@@ -1477,6 +1348,10 @@
WARN_ON(pctx != f->private_data);
pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
+ if (!pmctl) {
+ pr_err("%s mctl ptr is null ", __func__);
+ return -EINVAL;
+ }
if (!pcam_inst->vbqueue_initialized) {
pmctl->mctl_vbqueue_init(pcam_inst, &pcam_inst->vid_bufq,
V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
@@ -1579,10 +1454,15 @@
struct msm_cam_v4l2_dev_inst *pcam_inst;
pcam_inst = container_of(f->private_data,
struct msm_cam_v4l2_dev_inst, eventHandle);
- pcam_inst->image_mode = a->parm.capture.extendedmode;
+ pcam_inst->image_mode = (a->parm.capture.extendedmode & 0x7F);
+ SET_IMG_MODE(pcam_inst->inst_handle, pcam_inst->image_mode);
+ SET_MCTLPP_INST_IDX(pcam_inst->inst_handle, pcam_inst->my_index);
pcam_inst->pcam->mctl_node.dev_inst_map[pcam_inst->image_mode] =
pcam_inst;
pcam_inst->path = msm_mctl_vidbuf_get_path(pcam_inst->image_mode);
+
+ rc = msm_cam_server_config_interface_map(pcam_inst->image_mode,
+ pcam_inst->pcam->mctl_handle);
D("%s path=%d, image mode = %d rc=%d\n", __func__,
pcam_inst->path, pcam_inst->image_mode, rc);
return rc;
@@ -1624,6 +1504,51 @@
return rc;
}
+static int msm_mctl_v4l2_private_g_ctrl(struct file *f, void *pctx,
+ struct msm_camera_v4l2_ioctl_t *ioctl_ptr)
+{
+ int rc = -EINVAL;
+ struct msm_cam_v4l2_device *pcam = video_drvdata(f);
+ struct msm_cam_v4l2_dev_inst *pcam_inst;
+ pcam_inst = container_of(f->private_data,
+ struct msm_cam_v4l2_dev_inst, eventHandle);
+
+ WARN_ON(pctx != f->private_data);
+
+ mutex_lock(&pcam->mctl_node.dev_lock);
+ switch (ioctl_ptr->id) {
+ case MSM_V4L2_PID_INST_HANDLE:
+ COPY_TO_USER(rc, (void __user *)ioctl_ptr->ioctl_ptr,
+ (void *)&pcam_inst->inst_handle, sizeof(uint32_t));
+ if (rc)
+ ERR_COPY_TO_USER();
+ break;
+ default:
+ pr_err("%s Unsupported ioctl %d ", __func__, ioctl_ptr->id);
+ break;
+ }
+ mutex_unlock(&pcam->mctl_node.dev_lock);
+ return rc;
+}
+
+static long msm_mctl_v4l2_private_ioctl(struct file *file, void *fh,
+ bool valid_prio, int cmd, void *arg)
+{
+ int rc = -EINVAL;
+ struct msm_camera_v4l2_ioctl_t *ioctl_ptr = arg;
+ D("%s: cmd %d\n", __func__, _IOC_NR(cmd));
+
+ switch (cmd) {
+ case MSM_CAM_V4L2_IOCTL_PRIVATE_G_CTRL:
+ rc = msm_mctl_v4l2_private_g_ctrl(file, fh, ioctl_ptr);
+ break;
+ default:
+ pr_err("%s Unsupported ioctl cmd %d ", __func__, cmd);
+ break;
+ }
+ return rc;
+}
+
/* mctl node v4l2_ioctl_ops */
static const struct v4l2_ioctl_ops g_msm_mctl_ioctl_ops = {
.vidioc_querycap = msm_mctl_v4l2_querycap,
@@ -1663,6 +1588,7 @@
/* event subscribe/unsubscribe */
.vidioc_subscribe_event = msm_mctl_v4l2_subscribe_event,
.vidioc_unsubscribe_event = msm_mctl_v4l2_unsubscribe_event,
+ .vidioc_default = msm_mctl_v4l2_private_ioctl,
};
int msm_setup_mctl_node(struct msm_cam_v4l2_device *pcam)
diff --git a/drivers/media/video/msm/msm_mctl_buf.c b/drivers/media/video/msm/msm_mctl_buf.c
index cd86a80..a69858a 100644
--- a/drivers/media/video/msm/msm_mctl_buf.c
+++ b/drivers/media/video/msm/msm_mctl_buf.c
@@ -368,7 +368,7 @@
struct msm_frame_buffer *msm_mctl_buf_find(
struct msm_cam_media_controller *pmctl,
struct msm_cam_v4l2_dev_inst *pcam_inst, int del_buf,
- int image_mode, struct msm_free_buf *fbuf)
+ struct msm_free_buf *fbuf)
{
struct msm_frame_buffer *buf = NULL, *tmp;
uint32_t buf_phyaddr = 0;
@@ -409,14 +409,13 @@
int msm_mctl_buf_done_proc(
struct msm_cam_media_controller *pmctl,
struct msm_cam_v4l2_dev_inst *pcam_inst,
- int image_mode, struct msm_free_buf *fbuf,
+ struct msm_free_buf *fbuf,
uint32_t *frame_id, int gen_timestamp)
{
struct msm_frame_buffer *buf = NULL;
int del_buf = 1;
- buf = msm_mctl_buf_find(pmctl, pcam_inst, del_buf,
- image_mode, fbuf);
+ buf = msm_mctl_buf_find(pmctl, pcam_inst, del_buf, fbuf);
if (!buf) {
pr_err("%s: buf=0x%x not found\n",
__func__, fbuf->ch_paddr[0]);
@@ -434,48 +433,81 @@
int msm_mctl_buf_done(struct msm_cam_media_controller *p_mctl,
- int image_mode, struct msm_free_buf *fbuf,
- uint32_t frame_id)
+ struct msm_cam_buf_handle *buf_handle,
+ struct msm_free_buf *fbuf,
+ uint32_t frame_id)
{
struct msm_cam_v4l2_dev_inst *pcam_inst;
int idx, rc;
int pp_divert_type = 0, pp_type = 0;
+ uint32_t image_mode;
+
+ if (!p_mctl || !buf_handle || !fbuf) {
+ pr_err("%s Invalid argument. ", __func__);
+ return -EINVAL;
+ }
+ if (buf_handle->buf_lookup_type == BUF_LOOKUP_BY_IMG_MODE)
+ image_mode = buf_handle->image_mode;
+ else
+ image_mode = GET_IMG_MODE(buf_handle->inst_handle);
+
+ if (image_mode > MSM_V4L2_EXT_CAPTURE_MODE_MAX) {
+ pr_err("%s Invalid image mode %d ", __func__, image_mode);
+ return -EINVAL;
+ }
msm_mctl_check_pp(p_mctl, image_mode, &pp_divert_type, &pp_type);
D("%s: pp_type=%d, pp_divert_type = %d, frame_id = 0x%x image_mode %d",
__func__, pp_type, pp_divert_type, frame_id, image_mode);
- if (pp_type || pp_divert_type)
- rc = msm_mctl_do_pp_divert(p_mctl,
- image_mode, fbuf, frame_id, pp_type);
- else {
- idx = msm_mctl_img_mode_to_inst_index(
- p_mctl, image_mode, 0);
- if (idx < 0) {
- /* check mctl node */
- if ((image_mode >= 0) &&
- p_mctl->pcam_ptr->mctl_node.
- dev_inst_map[image_mode]) {
- int index = p_mctl->pcam_ptr->mctl_node.
- dev_inst_map[image_mode]->my_index;
- pcam_inst = p_mctl->pcam_ptr->mctl_node.
- dev_inst[index];
- D("%s: Mctl node index %d inst %p",
- __func__, index, pcam_inst);
- rc = msm_mctl_buf_done_proc(p_mctl, pcam_inst,
- image_mode, fbuf,
- &frame_id, 1);
- D("%s mctl node buf done %d\n", __func__, 0);
- return rc;
+ if (pp_type || pp_divert_type) {
+ rc = msm_mctl_do_pp_divert(p_mctl, buf_handle,
+ fbuf, frame_id, pp_type);
+ } else {
+ /* Find the instance on which vb2_buffer_done() needs to be
+ * called, so that the user can get the buffer.
+ * If the lookup type is
+ * - By instance handle:
+ * Either mctl_pp inst idx or video inst idx should be set.
+ * Try to get the MCTL_PP inst idx first, if its not set,
+ * fall back to video inst idx. Once we get the inst idx,
+ * get the pcam_inst from the corresponding dev_inst[] map.
+ * If neither are set, its a serious error, trigger a BUG_ON.
+ * - By image mode:
+ * Legacy usecase. Use the image mode and get the pcam_inst
+ * from the video node.
+ */
+ if (buf_handle->buf_lookup_type == BUF_LOOKUP_BY_INST_HANDLE) {
+ idx = GET_MCTLPP_INST_IDX(buf_handle->inst_handle);
+ if (idx > MSM_DEV_INST_MAX) {
+ idx = GET_VIDEO_INST_IDX(
+ buf_handle->inst_handle);
+ BUG_ON(idx > MSM_DEV_INST_MAX);
+ pcam_inst = p_mctl->pcam_ptr->dev_inst[idx];
} else {
- pr_err("%s Invalid instance, dropping buffer\n",
- __func__);
- return idx;
+ pcam_inst = p_mctl->pcam_ptr->mctl_node.
+ dev_inst[idx];
}
+ } else if (buf_handle->buf_lookup_type ==
+ BUF_LOOKUP_BY_IMG_MODE) {
+ idx = msm_mctl_img_mode_to_inst_index(p_mctl,
+ buf_handle->image_mode, 0);
+ if (idx < 0) {
+ pr_err("%s Invalid idx %d ", __func__, idx);
+ return -EINVAL;
+ }
+ pcam_inst = p_mctl->pcam_ptr->dev_inst[idx];
+ } else {
+ pr_err("%s Invalid buffer lookup type %d", __func__,
+ buf_handle->buf_lookup_type);
+ return -EINVAL;
}
- pcam_inst = p_mctl->pcam_ptr->dev_inst[idx];
+ if (!pcam_inst) {
+ pr_err("%s Invalid instance, Dropping buffer. ",
+ __func__);
+ return -EINVAL;
+ }
rc = msm_mctl_buf_done_proc(p_mctl, pcam_inst,
- image_mode, fbuf,
- &frame_id, 1);
+ fbuf, &frame_id, 1);
}
return rc;
}
@@ -508,58 +540,100 @@
return ret;
}
-struct msm_cam_v4l2_dev_inst *msm_mctl_get_pcam_inst(
- struct msm_cam_media_controller *pmctl,
- int image_mode)
+struct msm_cam_v4l2_dev_inst *msm_mctl_get_inst_by_img_mode(
+ struct msm_cam_media_controller *pmctl, uint32_t img_mode)
{
struct msm_cam_v4l2_dev_inst *pcam_inst = NULL;
struct msm_cam_v4l2_device *pcam = pmctl->pcam_ptr;
int idx;
- if (image_mode >= 0) {
- /* Valid image mode. Search the mctl node first.
- * If mctl node doesnt have the instance, then
- * search in the user's video node */
- if (pmctl->vfe_output_mode == VFE_OUTPUTS_MAIN_AND_THUMB
- || pmctl->vfe_output_mode == VFE_OUTPUTS_THUMB_AND_MAIN) {
- if (pcam->mctl_node.dev_inst_map[image_mode]
- && is_buffer_queued(pcam, image_mode)) {
- idx =
- pcam->mctl_node.dev_inst_map[image_mode]
- ->my_index;
- pcam_inst = pcam->mctl_node.dev_inst[idx];
- D("%s Found instance %p in mctl node device\n",
- __func__, pcam_inst);
- } else if (pcam->dev_inst_map[image_mode]) {
- idx = pcam->dev_inst_map[image_mode]->my_index;
- pcam_inst = pcam->dev_inst[idx];
- D("%s Found instance %p in video device\n",
+ /* Valid image mode. Search the mctl node first.
+ * If mctl node doesnt have the instance, then
+ * search in the user's video node */
+ if (pmctl->vfe_output_mode == VFE_OUTPUTS_MAIN_AND_THUMB
+ || pmctl->vfe_output_mode == VFE_OUTPUTS_THUMB_AND_MAIN
+ || pmctl->vfe_output_mode == VFE_OUTPUTS_MAIN_AND_PREVIEW) {
+ if (pcam->mctl_node.dev_inst_map[img_mode]
+ && is_buffer_queued(pcam, img_mode)) {
+ idx = pcam->mctl_node.dev_inst_map[img_mode]->my_index;
+ pcam_inst = pcam->mctl_node.dev_inst[idx];
+ D("%s Found instance %p in mctl node device\n",
__func__, pcam_inst);
- }
- } else {
- if (pcam->mctl_node.dev_inst_map[image_mode]) {
- idx = pcam->mctl_node.dev_inst_map[image_mode]
- ->my_index;
- pcam_inst = pcam->mctl_node.dev_inst[idx];
- D("%s Found instance %p in mctl node device\n",
+ } else if (pcam->dev_inst_map[img_mode]) {
+ idx = pcam->dev_inst_map[img_mode]->my_index;
+ pcam_inst = pcam->dev_inst[idx];
+ D("%s Found instance %p in video device\n",
__func__, pcam_inst);
- } else if (pcam->dev_inst_map[image_mode]) {
- idx = pcam->dev_inst_map[image_mode]->my_index;
- pcam_inst = pcam->dev_inst[idx];
- D("%s Found instance %p in video device\n",
- __func__, pcam_inst);
- }
}
- } else
- pr_err("%s Invalid image mode %d. Return NULL\n",
- __func__, image_mode);
+ } else {
+ if (pcam->mctl_node.dev_inst_map[img_mode]) {
+ idx = pcam->mctl_node.dev_inst_map[img_mode]->my_index;
+ pcam_inst = pcam->mctl_node.dev_inst[idx];
+ D("%s Found instance %p in mctl node device\n",
+ __func__, pcam_inst);
+ } else if (pcam->dev_inst_map[img_mode]) {
+ idx = pcam->dev_inst_map[img_mode]->my_index;
+ pcam_inst = pcam->dev_inst[idx];
+ D("%s Found instance %p in video device\n",
+ __func__, pcam_inst);
+ }
+ }
+ return pcam_inst;
+}
+
+struct msm_cam_v4l2_dev_inst *msm_mctl_get_pcam_inst(
+ struct msm_cam_media_controller *pmctl,
+ struct msm_cam_buf_handle *buf_handle)
+{
+ struct msm_cam_v4l2_dev_inst *pcam_inst = NULL;
+ struct msm_cam_v4l2_device *pcam = pmctl->pcam_ptr;
+ int idx;
+
+ /* Get the pcam instance on based on the following rules:
+ * If the lookup type is
+ * - By instance handle:
+ * Either mctl_pp inst idx or video inst idx should be set.
+ * Try to get the MCTL_PP inst idx first, if its not set,
+ * fall back to video inst idx. Once we get the inst idx,
+ * get the pcam_inst from the corresponding dev_inst[] map.
+ * If neither are set, its a serious error, trigger a BUG_ON.
+ * - By image mode:(Legacy usecase)
+ * If vfe is in configured in snapshot mode, first check if
+ * mctl pp node has a instance created for this image mode
+ * and if there is a buffer queued for that instance.
+ * If so, return that instance, otherwise get the pcam instance
+ * for this image_mode from the video instance.
+ * If the vfe is configured in any other mode, then first check
+ * if mctl pp node has a instance created for this image mode,
+ * otherwise get the pcam instance for this image mode from the
+ * video instance.
+ */
+ if (buf_handle->buf_lookup_type == BUF_LOOKUP_BY_INST_HANDLE) {
+ idx = GET_MCTLPP_INST_IDX(buf_handle->inst_handle);
+ if (idx > MSM_DEV_INST_MAX) {
+ idx = GET_VIDEO_INST_IDX(buf_handle->inst_handle);
+ BUG_ON(idx > MSM_DEV_INST_MAX);
+ pcam_inst = pcam->dev_inst[idx];
+ } else {
+ pcam_inst = pcam->mctl_node.dev_inst[idx];
+ }
+ } else if ((buf_handle->buf_lookup_type == BUF_LOOKUP_BY_IMG_MODE)
+ && (buf_handle->image_mode >= 0 &&
+ buf_handle->image_mode < MSM_V4L2_EXT_CAPTURE_MODE_MAX)) {
+ pcam_inst = msm_mctl_get_inst_by_img_mode(pmctl,
+ buf_handle->image_mode);
+ } else {
+ pr_err("%s Invalid buffer lookup type %d", __func__,
+ buf_handle->buf_lookup_type);
+ }
return pcam_inst;
}
int msm_mctl_reserve_free_buf(
- struct msm_cam_media_controller *pmctl,
- struct msm_cam_v4l2_dev_inst *pref_pcam_inst,
- int image_mode, struct msm_free_buf *free_buf)
+ struct msm_cam_media_controller *pmctl,
+ struct msm_cam_v4l2_dev_inst *pref_pcam_inst,
+ struct msm_cam_buf_handle *buf_handle,
+ struct msm_free_buf *free_buf)
{
struct msm_cam_v4l2_dev_inst *pcam_inst = pref_pcam_inst;
unsigned long flags = 0;
@@ -568,8 +642,8 @@
int rc = -EINVAL, i;
uint32_t buf_idx, plane_offset = 0;
- if (!free_buf || !pmctl) {
- pr_err("%s: free_buf/pmctl is null\n", __func__);
+ if (!free_buf || !pmctl || !buf_handle) {
+ pr_err("%s: Invalid argument passed\n", __func__);
return rc;
}
memset(free_buf, 0, sizeof(struct msm_free_buf));
@@ -579,7 +653,7 @@
* If the preferred camera instance is NULL, get the
* camera instance using the image mode passed */
if (!pcam_inst)
- pcam_inst = msm_mctl_get_pcam_inst(pmctl, image_mode);
+ pcam_inst = msm_mctl_get_pcam_inst(pmctl, buf_handle);
if (!pcam_inst || !pcam_inst->streamon) {
pr_err("%s: stream is turned off\n", __func__);
@@ -641,18 +715,15 @@
int msm_mctl_release_free_buf(struct msm_cam_media_controller *pmctl,
struct msm_cam_v4l2_dev_inst *pcam_inst,
- int image_mode, struct msm_free_buf *free_buf)
+ struct msm_free_buf *free_buf)
{
unsigned long flags = 0;
struct msm_frame_buffer *buf = NULL;
uint32_t buf_phyaddr = 0;
int rc = -EINVAL;
- if (!free_buf)
- return rc;
-
- if (!pcam_inst) {
- pr_err("%s Invalid instance, buffer not released\n",
+ if (!pcam_inst || !free_buf) {
+ pr_err("%s Invalid argument, buffer will not be returned\n",
__func__);
return rc;
}
@@ -662,35 +733,56 @@
buf_phyaddr =
(uint32_t) videobuf2_to_pmem_contig(&buf->vidbuf, 0);
if (free_buf->ch_paddr[0] == buf_phyaddr) {
- D("%s buf = 0x%x \n", __func__, free_buf->ch_paddr[0]);
- buf->state = MSM_BUFFER_STATE_UNUSED;
+ D("%s Return buffer %d and mark it as QUEUED\n",
+ __func__, buf->vidbuf.v4l2_buf.index);
+ buf->state = MSM_BUFFER_STATE_QUEUED;
rc = 0;
break;
}
}
-
- if (rc != 0)
- pr_err("%s invalid buffer address ", __func__);
-
spin_unlock_irqrestore(&pcam_inst->vq_irqlock, flags);
+
+ if (rc)
+ pr_err("%s Cannot find buffer %x", __func__,
+ free_buf->ch_paddr[0]);
+
return rc;
}
int msm_mctl_buf_done_pp(struct msm_cam_media_controller *pmctl,
- int image_mode, struct msm_free_buf *frame, int dirty, int node_type)
+ struct msm_cam_buf_handle *buf_handle,
+ struct msm_free_buf *frame, int dirty, int node_type)
{
- struct msm_cam_v4l2_dev_inst *pcam_inst;
+ struct msm_cam_v4l2_dev_inst *pcam_inst = NULL;
int rc = 0, idx;
- idx = msm_mctl_img_mode_to_inst_index(pmctl, image_mode, node_type);
- if (idx < 0) {
- pr_err("%s Invalid instance, buffer not released\n", __func__);
- return idx;
+ if (!pmctl || !buf_handle) {
+ pr_err("%s Invalid argument ", __func__);
+ return -EINVAL;
}
- if (node_type)
- pcam_inst = pmctl->pcam_ptr->mctl_node.dev_inst[idx];
- else
- pcam_inst = pmctl->pcam_ptr->dev_inst[idx];
+
+ if (buf_handle->buf_lookup_type == BUF_LOOKUP_BY_INST_HANDLE) {
+ idx = GET_MCTLPP_INST_IDX(buf_handle->inst_handle);
+ if (idx > MSM_DEV_INST_MAX) {
+ idx = GET_VIDEO_INST_IDX(buf_handle->inst_handle);
+ BUG_ON(idx > MSM_DEV_INST_MAX);
+ pcam_inst = pmctl->pcam_ptr->dev_inst[idx];
+ } else {
+ pcam_inst = pmctl->pcam_ptr->mctl_node.dev_inst[idx];
+ }
+ } else if (buf_handle->buf_lookup_type == BUF_LOOKUP_BY_IMG_MODE) {
+ idx = msm_mctl_img_mode_to_inst_index(pmctl,
+ buf_handle->image_mode, node_type);
+ if (idx < 0) {
+ pr_err("%s Invalid instance, buffer not released\n",
+ __func__);
+ return idx;
+ }
+ if (node_type)
+ pcam_inst = pmctl->pcam_ptr->mctl_node.dev_inst[idx];
+ else
+ pcam_inst = pmctl->pcam_ptr->dev_inst[idx];
+ }
if (!pcam_inst) {
pr_err("%s Invalid instance, cannot send buf to user",
__func__);
@@ -701,121 +793,12 @@
__func__, pcam_inst, frame->ch_paddr[0], dirty);
if (dirty)
/* the frame is dirty, not going to disptach to app */
- rc = msm_mctl_release_free_buf(pmctl, pcam_inst,
- image_mode, frame);
+ rc = msm_mctl_release_free_buf(pmctl, pcam_inst, frame);
else
- rc = msm_mctl_buf_done_proc(pmctl, pcam_inst,
- image_mode, frame, NULL, 0);
+ rc = msm_mctl_buf_done_proc(pmctl, pcam_inst, frame, NULL, 0);
return rc;
}
-struct msm_frame_buffer *msm_mctl_get_free_buf(
- struct msm_cam_media_controller *pmctl,
- int image_mode)
-{
- struct msm_cam_v4l2_dev_inst *pcam_inst;
- unsigned long flags = 0;
- struct msm_frame_buffer *buf = NULL;
- int rc = -EINVAL, idx;
-
- idx = msm_mctl_img_mode_to_inst_index(pmctl,
- image_mode, 0);
- if (idx < 0) {
- pr_err("%s Invalid instance, cant get buffer\n", __func__);
- return NULL;
- }
- pcam_inst = pmctl->pcam_ptr->dev_inst[idx];
- if (!pcam_inst->streamon) {
- D("%s: stream 0x%p is off\n", __func__, pcam_inst);
- return NULL;
- }
- spin_lock_irqsave(&pcam_inst->vq_irqlock, flags);
- if (!list_empty(&pcam_inst->free_vq)) {
- list_for_each_entry(buf, &pcam_inst->free_vq, list) {
- if (buf->state == MSM_BUFFER_STATE_QUEUED) {
- buf->state = MSM_BUFFER_STATE_RESERVED;
- rc = 0;
- break;
- }
- }
- }
- if (rc != 0) {
- D("%s:No free buffer available: inst = 0x%p ",
- __func__, pcam_inst);
- buf = NULL;
- }
- spin_unlock_irqrestore(&pcam_inst->vq_irqlock, flags);
- return buf;
-}
-
-int msm_mctl_put_free_buf(
- struct msm_cam_media_controller *pmctl,
- int image_mode, struct msm_frame_buffer *my_buf)
-{
- struct msm_cam_v4l2_dev_inst *pcam_inst;
- unsigned long flags = 0;
- int rc = 0, idx;
- struct msm_frame_buffer *buf = NULL;
-
- idx = msm_mctl_img_mode_to_inst_index(pmctl,
- image_mode, 0);
- if (idx < 0) {
- pr_err("%s Invalid instance, cant put buffer\n", __func__);
- return idx;
- }
- pcam_inst = pmctl->pcam_ptr->dev_inst[idx];
- if (!pcam_inst->streamon) {
- D("%s: stream 0x%p is off\n", __func__, pcam_inst);
- return rc;
- }
- spin_lock_irqsave(&pcam_inst->vq_irqlock, flags);
- if (!list_empty(&pcam_inst->free_vq)) {
- list_for_each_entry(buf, &pcam_inst->free_vq, list) {
- if (my_buf == buf) {
- buf->state = MSM_BUFFER_STATE_QUEUED;
- spin_unlock_irqrestore(&pcam_inst->vq_irqlock,
- flags);
- return 0;
- }
- }
- }
- spin_unlock_irqrestore(&pcam_inst->vq_irqlock, flags);
- return rc;
-}
-
-int msm_mctl_buf_del(struct msm_cam_media_controller *pmctl,
- int image_mode,
- struct msm_frame_buffer *my_buf)
-{
- struct msm_cam_v4l2_dev_inst *pcam_inst;
- struct msm_frame_buffer *buf = NULL;
- unsigned long flags = 0;
- int idx;
-
- idx = msm_mctl_img_mode_to_inst_index(pmctl,
- image_mode, 0);
- if (idx < 0) {
- pr_err("%s Invalid instance, cant delete buffer\n", __func__);
- return idx;
- }
- pcam_inst = pmctl->pcam_ptr->dev_inst[idx];
- D("%s: idx = %d, pinst=0x%p", __func__, idx, pcam_inst);
- spin_lock_irqsave(&pcam_inst->vq_irqlock, flags);
- if (!list_empty(&pcam_inst->free_vq)) {
- list_for_each_entry(buf, &pcam_inst->free_vq, list) {
- if (my_buf == buf) {
- list_del_init(&buf->list);
- spin_unlock_irqrestore(&pcam_inst->vq_irqlock,
- flags);
- return 0;
- }
- }
- }
- spin_unlock_irqrestore(&pcam_inst->vq_irqlock, flags);
- pr_err("%s: buf 0x%p not found", __func__, my_buf);
- return -EINVAL;
-}
-
int msm_mctl_buf_return_buf(struct msm_cam_media_controller *pmctl,
int image_mode, struct msm_frame_buffer *rbuf)
{
diff --git a/drivers/media/video/msm/msm_mctl_pp.c b/drivers/media/video/msm/msm_mctl_pp.c
index abcee4b..dcb7c51 100644
--- a/drivers/media/video/msm/msm_mctl_pp.c
+++ b/drivers/media/video/msm/msm_mctl_pp.c
@@ -153,61 +153,96 @@
spin_unlock_irqrestore(&pcam_inst->vq_irqlock, flags);
return 0;
}
+
static struct msm_cam_v4l2_dev_inst *msm_mctl_get_pcam_inst_for_divert(
- struct msm_cam_media_controller *pmctl,
- int image_mode, struct msm_free_buf *fbuf, int *node_type)
+ struct msm_cam_media_controller *pmctl,
+ struct msm_cam_buf_handle *buf_handle,
+ struct msm_free_buf *fbuf, int *node_type)
{
struct msm_cam_v4l2_dev_inst *pcam_inst = NULL;
struct msm_cam_v4l2_device *pcam = pmctl->pcam_ptr;
int idx;
+ uint32_t img_mode;
- if (image_mode >= 0) {
- /* Valid image mode. Search the mctl node first.
- * If mctl node doesnt have the instance, then
- * search in the user's video node */
- if (pcam->mctl_node.dev_inst_map[image_mode]
- && is_buf_in_queue(pcam, fbuf, image_mode)) {
- idx =
- pcam->mctl_node.dev_inst_map[image_mode]->my_index;
- pcam_inst = pcam->mctl_node.dev_inst[idx];
- *node_type = MCTL_NODE;
- D("%s Found instance %p in mctl node device\n",
- __func__, pcam_inst);
- } else if (pcam->dev_inst_map[image_mode]) {
- idx = pcam->dev_inst_map[image_mode]->my_index;
+ if (buf_handle->buf_lookup_type == BUF_LOOKUP_BY_INST_HANDLE) {
+ idx = GET_MCTLPP_INST_IDX(buf_handle->inst_handle);
+ if (idx > MSM_DEV_INST_MAX) {
+ idx = GET_VIDEO_INST_IDX(buf_handle->inst_handle);
+ BUG_ON(idx > MSM_DEV_INST_MAX);
pcam_inst = pcam->dev_inst[idx];
*node_type = VIDEO_NODE;
- D("%s Found instance %p in video device",
- __func__, pcam_inst);
- } else
+ } else {
+ pcam_inst = pcam->mctl_node.dev_inst[idx];
+ *node_type = MCTL_NODE;
+ }
+ } else if (buf_handle->buf_lookup_type == BUF_LOOKUP_BY_IMG_MODE) {
+ img_mode = buf_handle->image_mode;
+ if (img_mode >= 0 && img_mode < MSM_V4L2_EXT_CAPTURE_MODE_MAX) {
+ /* Valid image mode. Search the mctl node first.
+ * If mctl node doesnt have the instance, then
+ * search in the user's video node */
+ if (pcam->mctl_node.dev_inst_map[img_mode]
+ && is_buf_in_queue(pcam, fbuf, img_mode)) {
+ idx = pcam->mctl_node.
+ dev_inst_map[img_mode]->my_index;
+ pcam_inst = pcam->mctl_node.dev_inst[idx];
+ *node_type = MCTL_NODE;
+ D("%s Found instance %p in mctl node device\n",
+ __func__, pcam_inst);
+ } else if (pcam->dev_inst_map[img_mode]) {
+ idx = pcam->dev_inst_map[img_mode]->my_index;
+ pcam_inst = pcam->dev_inst[idx];
+ *node_type = VIDEO_NODE;
+ D("%s Found instance %p in video device",
+ __func__, pcam_inst);
+ } else {
+ pr_err("%s Cannot find instance for %d.\n",
+ __func__, img_mode);
+ }
+ } else {
pr_err("%s Invalid image mode %d. Return NULL\n",
- __func__, image_mode);
+ __func__, buf_handle->image_mode);
+ }
+ } else {
+ pr_err("%s Invalid buffer lookup type ", __func__);
}
- return pcam_inst;
+ return pcam_inst;
}
int msm_mctl_do_pp_divert(
struct msm_cam_media_controller *p_mctl,
- int image_mode, struct msm_free_buf *fbuf,
+ struct msm_cam_buf_handle *buf_handle,
+ struct msm_free_buf *fbuf,
uint32_t frame_id, int pp_type)
{
struct msm_cam_v4l2_dev_inst *pcam_inst;
- int rc = 0, i, buf_idx;
+ int rc = 0, i, buf_idx, node;
int del_buf = 0; /* delete from free queue */
struct msm_cam_evt_divert_frame div;
struct msm_frame_buffer *vb = NULL;
struct videobuf2_contig_pmem *mem;
- int node;
+ uint32_t image_mode;
- pcam_inst = msm_mctl_get_pcam_inst_for_divert
- (p_mctl, image_mode, fbuf, &node);
+ if (buf_handle->buf_lookup_type == BUF_LOOKUP_BY_IMG_MODE) {
+ image_mode = buf_handle->image_mode;
+ div.frame.inst_handle = 0;
+ } else if (buf_handle->buf_lookup_type == BUF_LOOKUP_BY_INST_HANDLE) {
+ image_mode = GET_IMG_MODE(buf_handle->inst_handle);
+ div.frame.inst_handle = buf_handle->inst_handle;
+ } else {
+ pr_err("%s Invalid buffer lookup type %d ", __func__,
+ buf_handle->buf_lookup_type);
+ return -EINVAL;
+ }
+
+ pcam_inst = msm_mctl_get_pcam_inst_for_divert(p_mctl,
+ buf_handle, fbuf, &node);
if (!pcam_inst) {
pr_err("%s Invalid instance. Cannot divert frame.\n",
__func__);
return -EINVAL;
}
- vb = msm_mctl_buf_find(p_mctl, pcam_inst,
- del_buf, image_mode, fbuf);
+ vb = msm_mctl_buf_find(p_mctl, pcam_inst, del_buf, fbuf);
if (!vb)
return -EINVAL;
@@ -392,50 +427,16 @@
struct msm_mctl_pp_cmd *pp_cmd)
{
int rc = 0;
- struct msm_mctl_pp_frame_buffer pp_buffer;
- struct msm_frame_buffer *buf = NULL;
- void __user *argp = (void __user *)pp_cmd->value;
- int img_mode;
unsigned long flags;
switch (pp_cmd->id) {
- case MCTL_CMD_GET_FRAME_BUFFER: {
- if (copy_from_user(&pp_buffer, pp_cmd->value,
- sizeof(pp_buffer)))
- return -EFAULT;
- img_mode = msm_mctl_pp_path_to_img_mode(pp_buffer.path);
- if (img_mode < 0) {
- pr_err("%s Invalid image mode\n", __func__);
- return img_mode;
- }
- buf = msm_mctl_get_free_buf(p_mctl, img_mode);
- pp_buffer.buf_handle = (uint32_t)buf;
- if (copy_to_user((void *)argp,
- &pp_buffer,
- sizeof(struct msm_mctl_pp_frame_buffer))) {
- ERR_COPY_TO_USER();
- rc = -EFAULT;
- }
- break;
- }
- case MCTL_CMD_PUT_FRAME_BUFFER: {
- if (copy_from_user(&pp_buffer, pp_cmd->value,
- sizeof(pp_buffer)))
- return -EFAULT;
- img_mode = msm_mctl_pp_path_to_img_mode(pp_buffer.path);
- if (img_mode < 0) {
- pr_err("%s Invalid image mode\n", __func__);
- return img_mode;
- }
- buf = (struct msm_frame_buffer *)pp_buffer.buf_handle;
- msm_mctl_put_free_buf(p_mctl, img_mode, buf);
- break;
- }
case MCTL_CMD_DIVERT_FRAME_PP_PATH: {
struct msm_mctl_pp_divert_pp divert_pp;
if (copy_from_user(&divert_pp, pp_cmd->value,
- sizeof(divert_pp)))
+ sizeof(divert_pp))) {
+ ERR_COPY_FROM_USER();
return -EFAULT;
+ }
D("%s: PP_PATH, path=%d",
__func__, divert_pp.path);
spin_lock_irqsave(&p_mctl->pp_info.lock, flags);
@@ -490,17 +491,20 @@
struct msm_cam_media_controller *p_mctl,
void __user *arg)
{
- struct msm_cam_evt_divert_frame frame;
+ struct msm_cam_evt_divert_frame div_frame;
int image_mode, rc = 0;
struct msm_free_buf free_buf;
struct msm_cam_v4l2_dev_inst *pcam_inst;
+ struct msm_cam_buf_handle buf_handle;
memset(&free_buf, 0, sizeof(struct msm_free_buf));
- if (copy_from_user(&frame, arg,
- sizeof(struct msm_cam_evt_divert_frame)))
+ if (copy_from_user(&div_frame, arg,
+ sizeof(struct msm_cam_evt_divert_frame))) {
+ ERR_COPY_FROM_USER();
return -EFAULT;
+ }
- image_mode = frame.image_mode;
+ image_mode = div_frame.image_mode;
if (image_mode <= 0) {
pr_err("%s Invalid image mode %d", __func__, image_mode);
return -EINVAL;
@@ -511,17 +515,25 @@
pr_err("%s Instance already closed ", __func__);
return -EINVAL;
}
+ if (div_frame.frame.inst_handle) {
+ buf_handle.buf_lookup_type = BUF_LOOKUP_BY_INST_HANDLE;
+ buf_handle.inst_handle = div_frame.frame.inst_handle;
+ } else {
+ buf_handle.buf_lookup_type = BUF_LOOKUP_BY_IMG_MODE;
+ buf_handle.image_mode = image_mode;
+ }
rc = msm_mctl_reserve_free_buf(p_mctl, pcam_inst,
- image_mode, &free_buf);
+ &buf_handle, &free_buf);
if (rc == 0) {
- msm_mctl_pp_get_phy_addr(pcam_inst, free_buf.vb, &frame.frame);
- if (copy_to_user((void *)arg, &frame, sizeof(frame))) {
+ msm_mctl_pp_get_phy_addr(pcam_inst,
+ free_buf.vb, &div_frame.frame);
+ if (copy_to_user((void *)arg, &div_frame, sizeof(div_frame))) {
ERR_COPY_TO_USER();
rc = -EFAULT;
}
}
D("%s: reserve free buf got buffer %d from %p rc = %d, phy = 0x%x",
- __func__, frame.frame.buf_idx,
+ __func__, div_frame.frame.buf_idx,
pcam_inst, rc, free_buf.ch_paddr[0]);
return rc;
}
@@ -535,10 +547,13 @@
struct msm_pp_frame *frame;
int image_mode, rc = 0;
struct msm_free_buf free_buf;
+ struct msm_cam_buf_handle buf_handle;
if (copy_from_user(&div_frame, arg,
- sizeof(struct msm_cam_evt_divert_frame)))
+ sizeof(struct msm_cam_evt_divert_frame))) {
+ ERR_COPY_FROM_USER();
return -EFAULT;
+ }
image_mode = div_frame.image_mode;
if (image_mode < 0) {
@@ -551,15 +566,21 @@
else
free_buf.ch_paddr[0] = frame->sp.phy_addr;
- pcam_inst = msm_mctl_get_pcam_inst(p_mctl, image_mode);
+ if (div_frame.frame.inst_handle) {
+ buf_handle.buf_lookup_type = BUF_LOOKUP_BY_INST_HANDLE;
+ buf_handle.inst_handle = div_frame.frame.inst_handle;
+ } else {
+ buf_handle.buf_lookup_type = BUF_LOOKUP_BY_IMG_MODE;
+ buf_handle.image_mode = image_mode;
+ }
+ pcam_inst = msm_mctl_get_pcam_inst(p_mctl, &buf_handle);
if (!pcam_inst) {
pr_err("%s Invalid instance. Cannot release frame.\n",
__func__);
return -EINVAL;
}
- rc = msm_mctl_release_free_buf(p_mctl, pcam_inst,
- image_mode, &free_buf);
+ rc = msm_mctl_release_free_buf(p_mctl, pcam_inst, &free_buf);
D("%s: release free buf, rc = %d, phy = 0x%x",
__func__, rc, free_buf.ch_paddr[0]);
@@ -573,11 +594,13 @@
unsigned long flags;
spin_lock_irqsave(&p_mctl->pp_info.lock, flags);
if (copy_from_user(&p_mctl->pp_info.pp_key,
- arg, sizeof(p_mctl->pp_info.pp_key)))
+ arg, sizeof(p_mctl->pp_info.pp_key))) {
+ ERR_COPY_FROM_USER();
rc = -EFAULT;
- else
+ } else {
D("%s: mctl=0x%p, pp_key_setting=0x%x",
__func__, p_mctl, p_mctl->pp_info.pp_key);
+ }
spin_unlock_irqrestore(&p_mctl->pp_info.lock, flags);
return rc;
}
@@ -591,12 +614,24 @@
int dirty = 0;
struct msm_free_buf buf;
unsigned long flags;
+ struct msm_cam_buf_handle buf_handle;
- if (copy_from_user(&frame, arg, sizeof(frame)))
+ if (copy_from_user(&frame, arg, sizeof(frame))) {
+ ERR_COPY_FROM_USER();
return -EFAULT;
+ }
spin_lock_irqsave(&p_mctl->pp_info.lock, flags);
- image_mode = msm_mctl_pp_path_to_img_mode(frame.path);
+ if (frame.inst_handle) {
+ buf_handle.buf_lookup_type = BUF_LOOKUP_BY_INST_HANDLE;
+ buf_handle.inst_handle = frame.inst_handle;
+ image_mode = GET_IMG_MODE(frame.inst_handle);
+ } else {
+ buf_handle.buf_lookup_type = BUF_LOOKUP_BY_IMG_MODE;
+ buf_handle.image_mode =
+ msm_mctl_pp_path_to_img_mode(frame.path);
+ image_mode = buf_handle.image_mode;
+ }
if (image_mode < 0) {
pr_err("%s Invalid image mode\n", __func__);
return image_mode;
@@ -622,8 +657,7 @@
buf.ch_paddr[0] = frame.sp.phy_addr + frame.sp.y_off;
}
spin_unlock_irqrestore(&p_mctl->pp_info.lock, flags);
- /* here buf.addr is phy_addr */
- rc = msm_mctl_buf_done_pp(p_mctl, image_mode, &buf, dirty, 0);
+ rc = msm_mctl_buf_done_pp(p_mctl, &buf_handle, &buf, dirty, 0);
return rc;
}
@@ -636,10 +670,14 @@
int dirty = 0;
struct msm_free_buf buf;
unsigned long flags;
+ struct msm_cam_buf_handle buf_handle;
+
D("%s enter\n", __func__);
- if (copy_from_user(&frame, arg, sizeof(frame)))
+ if (copy_from_user(&frame, arg, sizeof(frame))) {
+ ERR_COPY_FROM_USER();
return -EFAULT;
+ }
spin_lock_irqsave(&p_mctl->pp_info.lock, flags);
D("%s Frame path: %d\n", __func__, frame.path);
@@ -665,6 +703,14 @@
goto err;
}
+ if (frame.inst_handle) {
+ buf_handle.buf_lookup_type = BUF_LOOKUP_BY_INST_HANDLE;
+ buf_handle.inst_handle = frame.inst_handle;
+ } else {
+ buf_handle.buf_lookup_type = BUF_LOOKUP_BY_IMG_MODE;
+ buf_handle.image_mode = image_mode;
+ }
+
if (frame.num_planes > 1)
buf.ch_paddr[0] = frame.mp[0].phy_addr +
frame.mp[0].data_offset;
@@ -673,7 +719,7 @@
spin_unlock_irqrestore(&p_mctl->pp_info.lock, flags);
D("%s Frame done id: %d\n", __func__, frame.frame_id);
- rc = msm_mctl_buf_done_pp(p_mctl, image_mode,
+ rc = msm_mctl_buf_done_pp(p_mctl, &buf_handle,
&buf, dirty, frame.node_type);
return rc;
err:
diff --git a/drivers/media/video/msm/msm_mem.c b/drivers/media/video/msm/msm_mem.c
index 5d412db..e2e9d1b 100644
--- a/drivers/media/video/msm/msm_mem.c
+++ b/drivers/media/video/msm/msm_mem.c
@@ -208,6 +208,9 @@
case MSM_PMEM_IHIST:
case MSM_PMEM_SKIN:
case MSM_PMEM_AEC_AWB:
+ case MSM_PMEM_BAYER_GRID:
+ case MSM_PMEM_BAYER_FOCUS:
+ case MSM_PMEM_BAYER_HIST:
rc = msm_pmem_table_add(ptype, pinfo, client);
break;
@@ -235,6 +238,9 @@
case MSM_PMEM_IHIST:
case MSM_PMEM_SKIN:
case MSM_PMEM_AEC_AWB:
+ case MSM_PMEM_BAYER_GRID:
+ case MSM_PMEM_BAYER_FOCUS:
+ case MSM_PMEM_BAYER_HIST:
hlist_for_each_entry_safe(region, node, n,
ptype, list) {
diff --git a/drivers/media/video/msm/msm_vpe.c b/drivers/media/video/msm/msm_vpe.c
index 2b58f44..5990ca7 100644
--- a/drivers/media/video/msm/msm_vpe.c
+++ b/drivers/media/video/msm/msm_vpe.c
@@ -545,7 +545,8 @@
return rc;
vpe_clk_failed:
- regulator_disable(vpe_ctrl->fs_vpe);
+ if (vpe_ctrl->fs_vpe)
+ regulator_disable(vpe_ctrl->fs_vpe);
vpe_fs_failed:
disable_irq(vpe_ctrl->vpeirq->start);
vpe_ctrl->state = VPE_STATE_IDLE;
@@ -603,10 +604,11 @@
static int msm_vpe_resource_init(void);
-int msm_vpe_subdev_init(struct v4l2_subdev *sd,
- struct msm_cam_media_controller *mctl)
+int msm_vpe_subdev_init(struct v4l2_subdev *sd)
{
int rc = 0;
+ struct msm_cam_media_controller *mctl;
+ mctl = v4l2_get_subdev_hostdata(sd);
D("%s:begin", __func__);
if (atomic_read(&vpe_init_done)) {
pr_err("%s: VPE has been initialized", __func__);
@@ -619,7 +621,6 @@
atomic_set(&vpe_init_done, 0);
return rc;
}
- v4l2_set_subdev_hostdata(sd, mctl);
spin_lock_init(&vpe_ctrl->lock);
D("%s:end", __func__);
return rc;
@@ -843,9 +844,7 @@
switch (cmd) {
case VIDIOC_MSM_VPE_INIT: {
- struct msm_cam_media_controller *mctl =
- (struct msm_cam_media_controller *)arg;
- msm_vpe_subdev_init(sd, mctl);
+ msm_vpe_subdev_init(sd);
break;
}
@@ -980,7 +979,7 @@
platform_set_drvdata(pdev, &vpe_ctrl->subdev);
media_entity_init(&vpe_ctrl->subdev.entity, 0, NULL, 0);
- vpe_ctrl->subdev.entity.type = MEDIA_ENT_T_DEVNODE_V4L;
+ vpe_ctrl->subdev.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
vpe_ctrl->subdev.entity.group_id = VPE_DEV;
vpe_ctrl->subdev.entity.name = vpe_ctrl->subdev.name;
diff --git a/drivers/media/video/msm/msm_vpe.h b/drivers/media/video/msm/msm_vpe.h
index 5cf0309..6516ea1 100644
--- a/drivers/media/video/msm/msm_vpe.h
+++ b/drivers/media/video/msm/msm_vpe.h
@@ -180,14 +180,5 @@
int32_t phase_step_y;
};
-#define VIDIOC_MSM_VPE_INIT \
- _IOWR('V', BASE_VIDIOC_PRIVATE + 15, struct msm_cam_media_controller *)
-
-#define VIDIOC_MSM_VPE_RELEASE \
- _IOWR('V', BASE_VIDIOC_PRIVATE + 16, struct msm_cam_media_controller *)
-
-#define VIDIOC_MSM_VPE_CFG \
- _IOWR('V', BASE_VIDIOC_PRIVATE + 17, struct msm_mctl_pp_params *)
-
#endif /*_MSM_VPE_H_*/
diff --git a/drivers/media/video/msm/sensors/msm_sensor.c b/drivers/media/video/msm/sensors/msm_sensor.c
index be1efe0..f687573 100644
--- a/drivers/media/video/msm/sensors/msm_sensor.c
+++ b/drivers/media/video/msm/sensors/msm_sensor.c
@@ -284,7 +284,6 @@
int update_type, int res)
{
int32_t rc = 0;
-
s_ctrl->func_tbl->sensor_stop_stream(s_ctrl);
msleep(30);
if (update_type == MSM_SENSOR_REG_INIT) {
@@ -606,6 +605,781 @@
return 0;
}
+static int32_t msm_sensor_init_flash_data(struct device_node *of_node,
+ struct msm_camera_sensor_info *sensordata)
+{
+ int32_t rc = 0;
+ uint32_t val = 0;
+
+ sensordata->flash_data = kzalloc(sizeof(
+ struct msm_camera_sensor_flash_data), GFP_KERNEL);
+ if (!sensordata->flash_data) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ rc = of_property_read_u32(of_node, "flash_type", &val);
+ CDBG("%s flash_type %d, rc %d\n", __func__, val, rc);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+ sensordata->flash_data->flash_type = val;
+ return rc;
+ERROR:
+ kfree(sensordata->flash_data);
+ return rc;
+}
+
+static int32_t msm_sensor_init_vreg_data(struct device_node *of_node,
+ struct msm_camera_sensor_platform_info *pinfo)
+{
+ int32_t rc = 0, i = 0;
+ uint32_t count = 0;
+ uint32_t *val_array = NULL;
+
+ count = of_property_count_strings(of_node, "cam_vreg_name");
+ CDBG("%s cam_vreg_name count %d\n", __func__, count);
+
+ if (!count)
+ return 0;
+
+ pinfo->cam_vreg = kzalloc(sizeof(struct camera_vreg_t) * count,
+ GFP_KERNEL);
+ if (!pinfo->cam_vreg) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ pinfo->num_vreg = count;
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_string_index(of_node, "cam_vreg_name", i,
+ &pinfo->cam_vreg[i].reg_name);
+ CDBG("%s reg_name[%d] = %s\n", __func__, i,
+ pinfo->cam_vreg[i].reg_name);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR1;
+ }
+ }
+
+ val_array = kzalloc(sizeof(uint32_t) * count, GFP_KERNEL);
+ if (!val_array) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ goto ERROR1;
+ }
+
+ rc = of_property_read_u32_array(of_node, "cam_vreg_type", val_array,
+ count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ }
+ for (i = 0; i < count; i++) {
+ pinfo->cam_vreg[i].type = val_array[i];
+ CDBG("%s cam_vreg[%d].type = %d\n", __func__, i,
+ pinfo->cam_vreg[i].type);
+ }
+
+ rc = of_property_read_u32_array(of_node, "cam_vreg_min_voltage",
+ val_array, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ }
+ for (i = 0; i < count; i++) {
+ pinfo->cam_vreg[i].min_voltage = val_array[i];
+ CDBG("%s cam_vreg[%d].min_voltage = %d\n", __func__,
+ i, pinfo->cam_vreg[i].min_voltage);
+ }
+
+ rc = of_property_read_u32_array(of_node, "cam_vreg_max_voltage",
+ val_array, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ }
+ for (i = 0; i < count; i++) {
+ pinfo->cam_vreg[i].max_voltage = val_array[i];
+ CDBG("%s cam_vreg[%d].max_voltage = %d\n", __func__,
+ i, pinfo->cam_vreg[i].max_voltage);
+ }
+
+ rc = of_property_read_u32_array(of_node, "cam_vreg_op_mode", val_array,
+ count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ }
+ for (i = 0; i < count; i++) {
+ pinfo->cam_vreg[i].op_mode = val_array[i];
+ CDBG("%s cam_vreg[%d].op_mode = %d\n", __func__, i,
+ pinfo->cam_vreg[i].op_mode);
+ }
+
+ kfree(val_array);
+ return rc;
+ERROR2:
+ kfree(val_array);
+ERROR1:
+ kfree(pinfo->cam_vreg);
+ pinfo->num_vreg = 0;
+ return rc;
+}
+
+static int32_t msm_sensor_init_gpio_common_tbl_data(struct device_node *of_node,
+ struct msm_camera_gpio_conf *gconf)
+{
+ int32_t rc = 0, i = 0;
+ uint32_t count = 0;
+ uint32_t *val_array = NULL;
+
+ if (!of_get_property(of_node, "gpio_common_tbl_num", &count))
+ return 0;
+
+ count /= sizeof(uint32_t);
+
+ if (!count)
+ return 0;
+
+ val_array = kzalloc(sizeof(uint32_t) * count, GFP_KERNEL);
+ if (!val_array) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ gconf->cam_gpio_common_tbl = kzalloc(sizeof(struct gpio) * count,
+ GFP_KERNEL);
+ if (!gconf->cam_gpio_common_tbl) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ goto ERROR1;
+ }
+ gconf->cam_gpio_common_tbl_size = count;
+
+ rc = of_property_read_u32_array(of_node, "gpio_common_tbl_num",
+ val_array, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ }
+ for (i = 0; i < count; i++) {
+ gconf->cam_gpio_common_tbl[i].gpio = val_array[i];
+ CDBG("%s cam_gpio_common_tbl[%d].gpio = %d\n", __func__, i,
+ gconf->cam_gpio_common_tbl[i].gpio);
+ }
+
+ rc = of_property_read_u32_array(of_node, "gpio_common_tbl_flags",
+ val_array, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ }
+ for (i = 0; i < count; i++) {
+ gconf->cam_gpio_common_tbl[i].flags = val_array[i];
+ CDBG("%s cam_gpio_common_tbl[%d].flags = %ld\n", __func__, i,
+ gconf->cam_gpio_common_tbl[i].flags);
+ }
+
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_string_index(of_node,
+ "gpio_common_tbl_label", i,
+ &gconf->cam_gpio_common_tbl[i].label);
+ CDBG("%s cam_gpio_common_tbl[%d].label = %s\n", __func__, i,
+ gconf->cam_gpio_common_tbl[i].label);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ }
+ }
+
+ kfree(val_array);
+ return rc;
+
+ERROR2:
+ kfree(gconf->cam_gpio_common_tbl);
+ERROR1:
+ kfree(val_array);
+ gconf->cam_gpio_common_tbl_size = 0;
+ return rc;
+}
+
+static int32_t msm_sensor_init_gpio_req_tbl_data(struct device_node *of_node,
+ struct msm_camera_gpio_conf *gconf)
+{
+ int32_t rc = 0, i = 0;
+ uint32_t count = 0;
+ uint32_t *val_array = NULL;
+
+ if (!of_get_property(of_node, "gpio_req_tbl_num", &count))
+ return 0;
+
+ count /= sizeof(uint32_t);
+
+ if (!count)
+ return 0;
+
+ val_array = kzalloc(sizeof(uint32_t) * count, GFP_KERNEL);
+ if (!val_array) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ gconf->cam_gpio_req_tbl = kzalloc(sizeof(struct gpio) * count,
+ GFP_KERNEL);
+ if (!gconf->cam_gpio_req_tbl) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ goto ERROR1;
+ }
+ gconf->cam_gpio_req_tbl_size = count;
+
+ rc = of_property_read_u32_array(of_node, "gpio_req_tbl_num",
+ val_array, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ }
+ for (i = 0; i < count; i++) {
+ gconf->cam_gpio_req_tbl[i].gpio = val_array[i];
+ CDBG("%s cam_gpio_req_tbl[%d].gpio = %d\n", __func__, i,
+ gconf->cam_gpio_req_tbl[i].gpio);
+ }
+
+ rc = of_property_read_u32_array(of_node, "gpio_req_tbl_flags",
+ val_array, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ }
+ for (i = 0; i < count; i++) {
+ gconf->cam_gpio_req_tbl[i].flags = val_array[i];
+ CDBG("%s cam_gpio_req_tbl[%d].flags = %ld\n", __func__, i,
+ gconf->cam_gpio_req_tbl[i].flags);
+ }
+
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_string_index(of_node,
+ "gpio_req_tbl_label", i,
+ &gconf->cam_gpio_req_tbl[i].label);
+ CDBG("%s cam_gpio_req_tbl[%d].label = %s\n", __func__, i,
+ gconf->cam_gpio_req_tbl[i].label);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ }
+ }
+
+ kfree(val_array);
+ return rc;
+
+ERROR2:
+ kfree(gconf->cam_gpio_req_tbl);
+ERROR1:
+ kfree(val_array);
+ gconf->cam_gpio_req_tbl_size = 0;
+ return rc;
+}
+
+static int32_t msm_sensor_init_gpio_set_tbl_data(struct device_node *of_node,
+ struct msm_camera_gpio_conf *gconf)
+{
+ int32_t rc = 0, i = 0;
+ uint32_t count = 0;
+ uint32_t *val_array = NULL;
+
+ if (!of_get_property(of_node, "gpio_set_tbl_num", &count))
+ return 0;
+
+ count /= sizeof(uint32_t);
+
+ if (!count)
+ return 0;
+
+ val_array = kzalloc(sizeof(uint32_t) * count, GFP_KERNEL);
+ if (!val_array) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ gconf->cam_gpio_set_tbl = kzalloc(sizeof(struct msm_gpio_set_tbl) *
+ count, GFP_KERNEL);
+ if (!gconf->cam_gpio_set_tbl) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ goto ERROR1;
+ }
+ gconf->cam_gpio_set_tbl_size = count;
+
+ rc = of_property_read_u32_array(of_node, "gpio_set_tbl_num",
+ val_array, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ }
+ for (i = 0; i < count; i++) {
+ gconf->cam_gpio_set_tbl[i].gpio = val_array[i];
+ CDBG("%s cam_gpio_set_tbl[%d].gpio = %d\n", __func__, i,
+ gconf->cam_gpio_set_tbl[i].gpio);
+ }
+
+ rc = of_property_read_u32_array(of_node, "gpio_set_tbl_flags",
+ val_array, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ }
+ for (i = 0; i < count; i++) {
+ gconf->cam_gpio_set_tbl[i].flags = val_array[i];
+ CDBG("%s cam_gpio_set_tbl[%d].flags = %ld\n", __func__, i,
+ gconf->cam_gpio_set_tbl[i].flags);
+ }
+
+ rc = of_property_read_u32_array(of_node, "gpio_set_tbl_delay",
+ val_array, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ }
+ for (i = 0; i < count; i++) {
+ gconf->cam_gpio_set_tbl[i].delay = val_array[i];
+ CDBG("%s cam_gpio_set_tbl[%d].delay = %d\n", __func__, i,
+ gconf->cam_gpio_set_tbl[i].delay);
+ }
+
+ kfree(val_array);
+ return rc;
+
+ERROR2:
+ kfree(gconf->cam_gpio_set_tbl);
+ERROR1:
+ kfree(val_array);
+ gconf->cam_gpio_set_tbl_size = 0;
+ return rc;
+}
+
+static int32_t msm_sensor_init_gpio_tlmm_tbl_data(struct device_node *of_node,
+ struct msm_camera_gpio_conf *gconf)
+{
+ int32_t rc = 0, i = 0;
+ uint32_t count = 0;
+ uint32_t *val_array = NULL;
+ struct gpio_tlmm_cfg *tlmm_cfg = NULL;
+
+ if (!of_get_property(of_node, "gpio_tlmm_table_num", &count))
+ return 0;
+
+ count /= sizeof(uint32_t);
+
+ if (!count)
+ return 0;
+
+ val_array = kzalloc(sizeof(uint32_t) * count, GFP_KERNEL);
+ if (!val_array) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ tlmm_cfg = kzalloc(sizeof(struct gpio_tlmm_cfg) * count, GFP_KERNEL);
+ if (!tlmm_cfg) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ goto ERROR1;
+ }
+
+ gconf->camera_off_table = kzalloc(sizeof(uint32_t) * count, GFP_KERNEL);
+ if (!gconf->camera_off_table) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ goto ERROR2;
+ }
+ gconf->camera_off_table_size = count;
+
+ gconf->camera_on_table = kzalloc(sizeof(uint32_t) * count, GFP_KERNEL);
+ if (!gconf->camera_on_table) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ goto ERROR3;
+ }
+ gconf->camera_on_table_size = count;
+
+ rc = of_property_read_u32_array(of_node, "gpio_tlmm_table_num",
+ val_array, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR4;
+ }
+ for (i = 0; i < count; i++) {
+ tlmm_cfg[i].gpio = val_array[i];
+ CDBG("%s tlmm_cfg[%d].gpio = %d\n", __func__, i,
+ tlmm_cfg[i].gpio);
+ }
+
+ rc = of_property_read_u32_array(of_node, "gpio_tlmm_table_dir",
+ val_array, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR4;
+ }
+ for (i = 0; i < count; i++) {
+ tlmm_cfg[i].dir = val_array[i];
+ CDBG("%s tlmm_cfg[%d].dir = %d\n", __func__, i,
+ tlmm_cfg[i].dir);
+ }
+
+ rc = of_property_read_u32_array(of_node, "gpio_tlmm_table_pull",
+ val_array, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR4;
+ }
+ for (i = 0; i < count; i++) {
+ tlmm_cfg[i].pull = val_array[i];
+ CDBG("%s tlmm_cfg[%d].pull = %d\n", __func__, i,
+ tlmm_cfg[i].pull);
+ }
+
+ rc = of_property_read_u32_array(of_node, "gpio_tlmm_table_drvstr",
+ val_array, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR4;
+ }
+ for (i = 0; i < count; i++) {
+ tlmm_cfg[i].drvstr = val_array[i];
+ CDBG("%s tlmm_cfg[%d].drvstr = %d\n", __func__, i,
+ tlmm_cfg[i].drvstr);
+ }
+
+ for (i = 0; i < count; i++) {
+ gconf->camera_off_table[i] = GPIO_CFG(tlmm_cfg[i].gpio,
+ 0, tlmm_cfg[i].dir, tlmm_cfg[i].pull,
+ tlmm_cfg[i].drvstr);
+ gconf->camera_on_table[i] = GPIO_CFG(tlmm_cfg[i].gpio,
+ 1, tlmm_cfg[i].dir, tlmm_cfg[i].pull,
+ tlmm_cfg[i].drvstr);
+ }
+
+ kfree(tlmm_cfg);
+ kfree(val_array);
+ return rc;
+
+ERROR4:
+ kfree(gconf->camera_on_table);
+ERROR3:
+ kfree(gconf->camera_off_table);
+ERROR2:
+ kfree(tlmm_cfg);
+ERROR1:
+ kfree(val_array);
+ gconf->camera_off_table_size = 0;
+ gconf->camera_on_table_size = 0;
+ return rc;
+}
+
+static int32_t msm_sensor_init_csi_data(struct device_node *of_node,
+ struct msm_camera_sensor_info *sensordata)
+{
+ int32_t rc = 0, i = 0;
+ uint32_t count = 0, val = 0;
+ uint32_t *val_array = NULL;
+ struct msm_camera_sensor_platform_info *pinfo =
+ sensordata->sensor_platform_info;
+
+ rc = of_property_read_u32(of_node, "csi_if", &count);
+ CDBG("%s csi_if %d, rc %d\n", __func__, count, rc);
+ if (rc < 0 || !count)
+ return rc;
+ sensordata->csi_if = count;
+
+ sensordata->pdata = kzalloc(sizeof(
+ struct msm_camera_device_platform_data) * count, GFP_KERNEL);
+ if (!sensordata->pdata) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ val_array = kzalloc(sizeof(uint32_t) * count, GFP_KERNEL);
+ if (!val_array) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ goto ERROR1;
+ }
+
+ rc = of_property_read_u32_array(of_node, "csid_core", val_array, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ }
+ for (i = 0; i < count; i++) {
+ sensordata->pdata[i].csid_core = val_array[i];
+ CDBG("%s csid_core[%d].csid_core = %d\n", __func__, i,
+ sensordata->pdata[i].csid_core);
+ }
+
+ rc = of_property_read_u32_array(of_node, "is_vpe", val_array, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ }
+ for (i = 0; i < count; i++) {
+ sensordata->pdata[i].is_vpe = val_array[i];
+ CDBG("%s csid_core[%d].is_vpe = %d\n", __func__, i,
+ sensordata->pdata[i].is_vpe);
+ }
+
+ pinfo->csi_lane_params = kzalloc(
+ sizeof(struct msm_camera_csi_lane_params), GFP_KERNEL);
+ if (!pinfo->csi_lane_params) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ goto ERROR2;
+ }
+
+ rc = of_property_read_u32(of_node, "csi_lane_assign", &val);
+ CDBG("%s csi_lane_assign %x, rc %d\n", __func__, val, rc);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR3;
+ }
+ pinfo->csi_lane_params->csi_lane_assign = val;
+
+ rc = of_property_read_u32(of_node, "csi_lane_mask", &val);
+ CDBG("%s csi_lane_mask %x, rc %d\n", __func__, val, rc);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR3;
+ }
+ pinfo->csi_lane_params->csi_lane_mask = val;
+
+ kfree(val_array);
+ return rc;
+ERROR3:
+ kfree(pinfo->csi_lane_params);
+ERROR2:
+ kfree(val_array);
+ERROR1:
+ kfree(sensordata->pdata);
+ sensordata->csi_if = 0;
+ return rc;
+}
+static int32_t msm_sensor_init_actuator_data(struct device_node *of_node,
+ struct msm_camera_sensor_info *sensordata)
+{
+ int32_t rc = 0;
+ uint32_t val = 0;
+
+ rc = of_property_read_u32(of_node, "actuator_cam_name", &val);
+ CDBG("%s actuator_cam_name %d, rc %d\n", __func__, val, rc);
+ if (rc < 0)
+ return 0;
+
+ sensordata->actuator_info = kzalloc(sizeof(struct msm_actuator_info),
+ GFP_KERNEL);
+ if (!sensordata->actuator_info) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ goto ERROR;
+ }
+
+ sensordata->actuator_info->cam_name = val;
+
+ rc = of_property_read_u32(of_node, "actuator_vcm_pwd", &val);
+ CDBG("%s actuator_vcm_pwd %d, rc %d\n", __func__, val, rc);
+ if (!rc)
+ sensordata->actuator_info->vcm_pwd = val;
+
+ rc = of_property_read_u32(of_node, "actuator_vcm_enable", &val);
+ CDBG("%s actuator_vcm_enable %d, rc %d\n", __func__, val, rc);
+ if (!rc)
+ sensordata->actuator_info->vcm_enable = val;
+
+ return 0;
+ERROR:
+ return rc;
+}
+
+static int32_t msm_sensor_init_sensor_data(struct platform_device *pdev,
+ struct msm_sensor_ctrl_t *s_ctrl)
+{
+ int32_t rc = 0;
+ uint32_t val = 0;
+ struct device_node *of_node = pdev->dev.of_node;
+ struct msm_camera_sensor_platform_info *pinfo = NULL;
+ struct msm_camera_gpio_conf *gconf = NULL;
+ struct msm_camera_sensor_info *sensordata = NULL;
+
+ s_ctrl->sensordata = kzalloc(sizeof(struct msm_camera_sensor_info),
+ GFP_KERNEL);
+ if (!s_ctrl->sensordata) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ sensordata = s_ctrl->sensordata;
+ rc = of_property_read_string(of_node, "sensor_name",
+ &sensordata->sensor_name);
+ CDBG("%s sensor_name %s, rc %d\n", __func__,
+ sensordata->sensor_name, rc);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR1;
+ }
+
+ rc = of_property_read_u32(of_node, "camera_type", &val);
+ CDBG("%s camera_type %d, rc %d\n", __func__, val, rc);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR1;
+ }
+ sensordata->camera_type = val;
+
+ rc = of_property_read_u32(of_node, "sensor_type", &val);
+ CDBG("%s sensor_type %d, rc %d\n", __func__, val, rc);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR1;
+ }
+ sensordata->sensor_type = val;
+
+ rc = msm_sensor_init_flash_data(of_node, sensordata);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR1;
+ }
+
+ sensordata->sensor_platform_info = kzalloc(sizeof(
+ struct msm_camera_sensor_platform_info), GFP_KERNEL);
+ if (!sensordata->sensor_platform_info) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ goto ERROR1;
+ }
+
+ pinfo = sensordata->sensor_platform_info;
+
+ rc = of_property_read_u32(of_node, "mount_angle", &pinfo->mount_angle);
+ CDBG("%s mount_angle %d, rc %d\n", __func__, pinfo->mount_angle, rc);
+ if (rc < 0) {
+ /* Set default mount angle */
+ pinfo->mount_angle = 0;
+ rc = 0;
+ }
+
+ rc = msm_sensor_init_csi_data(of_node, sensordata);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ }
+
+ rc = msm_sensor_init_vreg_data(of_node, pinfo);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR3;
+ }
+
+ pinfo->gpio_conf = kzalloc(sizeof(struct msm_camera_gpio_conf),
+ GFP_KERNEL);
+ if (!pinfo->gpio_conf) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ goto ERROR4;
+ }
+ gconf = pinfo->gpio_conf;
+ rc = of_property_read_u32(of_node, "gpio_no_mux", &gconf->gpio_no_mux);
+ CDBG("%s gconf->gpio_no_mux %d, rc %d\n", __func__,
+ gconf->gpio_no_mux, rc);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR5;
+ }
+
+ rc = msm_sensor_init_gpio_common_tbl_data(of_node, gconf);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR5;
+ }
+
+ rc = msm_sensor_init_gpio_req_tbl_data(of_node, gconf);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR6;
+ }
+
+ rc = msm_sensor_init_gpio_set_tbl_data(of_node, gconf);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR7;
+ }
+
+ rc = msm_sensor_init_gpio_tlmm_tbl_data(of_node, gconf);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR8;
+ }
+
+ rc = msm_sensor_init_actuator_data(of_node, sensordata);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR9;
+ }
+
+ return rc;
+
+ERROR9:
+ kfree(s_ctrl->sensordata->sensor_platform_info->gpio_conf->
+ camera_on_table);
+ kfree(s_ctrl->sensordata->sensor_platform_info->gpio_conf->
+ camera_off_table);
+ERROR8:
+ kfree(s_ctrl->sensordata->sensor_platform_info->gpio_conf->
+ cam_gpio_set_tbl);
+ERROR7:
+ kfree(s_ctrl->sensordata->sensor_platform_info->gpio_conf->
+ cam_gpio_req_tbl);
+ERROR6:
+ kfree(s_ctrl->sensordata->sensor_platform_info->gpio_conf->
+ cam_gpio_common_tbl);
+ERROR5:
+ kfree(s_ctrl->sensordata->sensor_platform_info->gpio_conf);
+ERROR4:
+ kfree(s_ctrl->sensordata->sensor_platform_info->cam_vreg);
+ERROR3:
+ kfree(s_ctrl->sensordata->sensor_platform_info->csi_lane_params);
+ kfree(s_ctrl->sensordata->pdata);
+ERROR2:
+ kfree(s_ctrl->sensordata->sensor_platform_info);
+ kfree(s_ctrl->sensordata->flash_data);
+ERROR1:
+ kfree(s_ctrl->sensordata);
+ return rc;
+}
+
+int32_t msm_sensor_free_sensor_data(struct msm_sensor_ctrl_t *s_ctrl)
+{
+ if (!s_ctrl->pdev)
+ return 0;
+ kfree(s_ctrl->sensordata->sensor_platform_info->gpio_conf->
+ camera_on_table);
+ kfree(s_ctrl->sensordata->sensor_platform_info->gpio_conf->
+ camera_off_table);
+ kfree(s_ctrl->sensordata->sensor_platform_info->gpio_conf->
+ cam_gpio_set_tbl);
+ kfree(s_ctrl->sensordata->sensor_platform_info->gpio_conf->
+ cam_gpio_req_tbl);
+ kfree(s_ctrl->sensordata->sensor_platform_info->gpio_conf->
+ cam_gpio_common_tbl);
+ kfree(s_ctrl->sensordata->sensor_platform_info->gpio_conf);
+ kfree(s_ctrl->sensordata->sensor_platform_info->cam_vreg);
+ kfree(s_ctrl->sensordata->sensor_platform_info->csi_lane_params);
+ kfree(s_ctrl->sensordata->pdata);
+ kfree(s_ctrl->sensordata->sensor_platform_info);
+ kfree(s_ctrl->sensordata->flash_data);
+ kfree(s_ctrl->sensordata);
+ return 0;
+}
+
int32_t msm_sensor_power_up(struct msm_sensor_ctrl_t *s_ctrl)
{
int32_t rc = 0;
@@ -667,8 +1441,21 @@
data->sensor_platform_info->i2c_conf->use_i2c_mux)
msm_sensor_enable_i2c_mux(data->sensor_platform_info->i2c_conf);
+ if (s_ctrl->sensor_i2c_client->cci_client) {
+ rc = msm_sensor_cci_util(s_ctrl->sensor_i2c_client,
+ MSM_CCI_INIT);
+ if (rc < 0) {
+ pr_err("%s cci_init failed\n", __func__);
+ goto cci_init_failed;
+ }
+ }
return rc;
+cci_init_failed:
+ if (data->sensor_platform_info->i2c_conf &&
+ data->sensor_platform_info->i2c_conf->use_i2c_mux)
+ msm_sensor_disable_i2c_mux(
+ data->sensor_platform_info->i2c_conf);
enable_clk_failed:
msm_camera_config_gpio_table(data, 0);
config_gpio_failed:
@@ -693,6 +1480,11 @@
{
struct msm_camera_sensor_info *data = s_ctrl->sensordata;
CDBG("%s\n", __func__);
+ if (s_ctrl->sensor_i2c_client->cci_client) {
+ msm_sensor_cci_util(s_ctrl->sensor_i2c_client,
+ MSM_CCI_RELEASE);
+ }
+
if (data->sensor_platform_info->i2c_conf &&
data->sensor_platform_info->i2c_conf->use_i2c_mux)
msm_sensor_disable_i2c_mux(
@@ -730,7 +1522,8 @@
return rc;
}
- CDBG("msm_sensor id: %d\n", chipid);
+ CDBG("%s msm_sensor id: %x, exp id: %x\n", __func__, chipid,
+ s_ctrl->sensor_id_info->sensor_id);
if (chipid != s_ctrl->sensor_id_info->sensor_id) {
pr_err("msm_sensor_match_id chip id doesnot match\n");
return -ENODEV;
@@ -792,8 +1585,15 @@
sizeof(s_ctrl->sensor_v4l2_subdev.name), "%s", id->name);
v4l2_i2c_subdev_init(&s_ctrl->sensor_v4l2_subdev, client,
s_ctrl->sensor_v4l2_subdev_ops);
-
+ s_ctrl->sensor_v4l2_subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ media_entity_init(&s_ctrl->sensor_v4l2_subdev.entity, 0, NULL, 0);
+ s_ctrl->sensor_v4l2_subdev.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ s_ctrl->sensor_v4l2_subdev.entity.group_id = SENSOR_DEV;
+ s_ctrl->sensor_v4l2_subdev.entity.name =
+ s_ctrl->sensor_v4l2_subdev.name;
msm_sensor_register(&s_ctrl->sensor_v4l2_subdev);
+ s_ctrl->sensor_v4l2_subdev.entity.revision =
+ s_ctrl->sensor_v4l2_subdev.devnode->num;
goto power_down;
probe_fail:
pr_err("%s %s_i2c_probe failed\n", __func__, client->name);
@@ -804,6 +1604,90 @@
return rc;
}
+static int msm_sensor_subdev_match_core(struct device *dev, void *data)
+{
+ int core_index = (int)data;
+ struct platform_device *pdev = to_platform_device(dev);
+ CDBG("%s cci pdev %p\n", __func__, pdev);
+ if (pdev->id == core_index)
+ return 1;
+ else
+ return 0;
+}
+
+int32_t msm_sensor_platform_probe(struct platform_device *pdev, void *data)
+{
+ int32_t rc = 0;
+ struct msm_sensor_ctrl_t *s_ctrl = (struct msm_sensor_ctrl_t *)data;
+ struct device_driver *driver;
+ struct device *dev;
+ s_ctrl->pdev = pdev;
+ CDBG("%s called data %p\n", __func__, data);
+ if (pdev->dev.of_node) {
+ rc = msm_sensor_init_sensor_data(pdev, s_ctrl);
+ if (rc < 0) {
+ pr_err("%s failed line %d\n", __func__, __LINE__);
+ return rc;
+ }
+ }
+ s_ctrl->sensor_i2c_client->cci_client = kzalloc(sizeof(
+ struct msm_camera_cci_client), GFP_KERNEL);
+ if (!s_ctrl->sensor_i2c_client->cci_client) {
+ pr_err("%s failed line %d\n", __func__, __LINE__);
+ return rc;
+ }
+ driver = driver_find(MSM_CCI_DRV_NAME, &platform_bus_type);
+ if (!driver) {
+ pr_err("%s failed line %d\n", __func__, __LINE__);
+ return rc;
+ }
+
+ dev = driver_find_device(driver, NULL, 0,
+ msm_sensor_subdev_match_core);
+ if (!dev) {
+ pr_err("%s failed line %d\n", __func__, __LINE__);
+ return rc;
+ }
+ s_ctrl->sensor_i2c_client->cci_client->cci_subdev =
+ dev_get_drvdata(dev);
+ CDBG("%s sd %p\n", __func__,
+ s_ctrl->sensor_i2c_client->cci_client->cci_subdev);
+ s_ctrl->sensor_i2c_client->cci_client->cci_i2c_master = MASTER_0;
+ s_ctrl->sensor_i2c_client->cci_client->sid =
+ s_ctrl->sensor_i2c_addr >> 1;
+ s_ctrl->sensor_i2c_client->cci_client->retries = 0;
+ s_ctrl->sensor_i2c_client->cci_client->id_map = 0;
+
+ rc = s_ctrl->func_tbl->sensor_power_up(s_ctrl);
+ if (rc < 0) {
+ pr_err("%s %s power up failed\n", __func__,
+ pdev->id_entry->name);
+ return rc;
+ }
+
+ if (s_ctrl->func_tbl->sensor_match_id)
+ rc = s_ctrl->func_tbl->sensor_match_id(s_ctrl);
+ else
+ rc = msm_sensor_match_id(s_ctrl);
+ if (rc < 0)
+ goto probe_fail;
+
+ v4l2_subdev_init(&s_ctrl->sensor_v4l2_subdev,
+ s_ctrl->sensor_v4l2_subdev_ops);
+ snprintf(s_ctrl->sensor_v4l2_subdev.name,
+ sizeof(s_ctrl->sensor_v4l2_subdev.name), "%s",
+ s_ctrl->sensordata->sensor_name);
+ v4l2_set_subdevdata(&s_ctrl->sensor_v4l2_subdev, pdev);
+ msm_sensor_register(&s_ctrl->sensor_v4l2_subdev);
+
+ goto power_down;
+probe_fail:
+ pr_err("%s %s probe failed\n", __func__, pdev->id_entry->name);
+power_down:
+ s_ctrl->func_tbl->sensor_power_down(s_ctrl);
+ return rc;
+}
+
int32_t msm_sensor_power(struct v4l2_subdev *sd, int on)
{
int rc = 0;
diff --git a/drivers/media/video/msm/sensors/msm_sensor.h b/drivers/media/video/msm/sensors/msm_sensor.h
index a3ddaa7..dc394e1 100644
--- a/drivers/media/video/msm/sensors/msm_sensor.h
+++ b/drivers/media/video/msm/sensors/msm_sensor.h
@@ -22,6 +22,9 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/uaccess.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/gpio.h>
#include <mach/camera.h>
#include <mach/gpio.h>
#include <media/msm_camera.h>
@@ -35,6 +38,13 @@
#define MSM_SENSOR_MCLK_16HZ 16000000
#define MSM_SENSOR_MCLK_24HZ 24000000
+struct gpio_tlmm_cfg {
+ uint32_t gpio;
+ uint32_t dir;
+ uint32_t pull;
+ uint32_t drvstr;
+};
+
enum msm_sensor_reg_update {
/* Sensor egisters that need to be updated during initialization */
MSM_SENSOR_REG_INIT,
@@ -151,6 +161,7 @@
struct i2c_client *msm_sensor_client;
struct i2c_driver *sensor_i2c_driver;
struct msm_camera_i2c_client *sensor_i2c_client;
+ struct platform_device *pdev;
uint16_t sensor_i2c_addr;
struct msm_sensor_output_reg_addr_t *sensor_output_reg_addr;
@@ -212,6 +223,9 @@
int32_t msm_sensor_match_id(struct msm_sensor_ctrl_t *s_ctrl);
int msm_sensor_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id);
+
+int32_t msm_sensor_platform_probe(struct platform_device *pdev, void *data);
+
int32_t msm_sensor_power(struct v4l2_subdev *sd, int on);
int32_t msm_sensor_v4l2_s_ctrl(struct v4l2_subdev *sd,
@@ -256,6 +270,8 @@
int32_t msm_sensor_get_csi_params(struct msm_sensor_ctrl_t *s_ctrl,
struct csi_lane_params_t *sensor_output_info);
+int32_t msm_sensor_free_sensor_data(struct msm_sensor_ctrl_t *s_ctrl);
+
struct msm_sensor_ctrl_t *get_sctrl(struct v4l2_subdev *sd);
#define VIDIOC_MSM_SENSOR_CFG \
diff --git a/drivers/media/video/msm/sensors/ov2720.c b/drivers/media/video/msm/sensors/ov2720.c
index e4c5061..e08cd0a 100644
--- a/drivers/media/video/msm/sensors/ov2720.c
+++ b/drivers/media/video/msm/sensors/ov2720.c
@@ -749,11 +749,51 @@
.addr_type = MSM_CAMERA_I2C_WORD_ADDR,
};
+
+static const struct of_device_id ov2720_dt_match[] = {
+ {.compatible = "qcom,ov2720", .data = &ov2720_s_ctrl},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, ov2720_dt_match);
+
+static struct platform_driver ov2720_platform_driver = {
+ .driver = {
+ .name = "qcom,ov2720",
+ .owner = THIS_MODULE,
+ .of_match_table = ov2720_dt_match,
+ },
+};
+
+static int32_t ov2720_platform_probe(struct platform_device *pdev)
+{
+ int32_t rc = 0;
+ const struct of_device_id *match;
+ match = of_match_device(ov2720_dt_match, &pdev->dev);
+ rc = msm_sensor_platform_probe(pdev, match->data);
+ return rc;
+}
+
static int __init msm_sensor_init_module(void)
{
+ int32_t rc = 0;
+ rc = platform_driver_probe(&ov2720_platform_driver,
+ ov2720_platform_probe);
+ if (!rc)
+ return rc;
return i2c_add_driver(&ov2720_i2c_driver);
}
+static void __exit msm_sensor_exit_module(void)
+{
+ if (ov2720_s_ctrl.pdev) {
+ msm_sensor_free_sensor_data(&ov2720_s_ctrl);
+ platform_driver_unregister(&ov2720_platform_driver);
+ } else
+ i2c_del_driver(&ov2720_i2c_driver);
+ return;
+}
+
static struct v4l2_subdev_core_ops ov2720_subdev_core_ops = {
.ioctl = msm_sensor_subdev_ioctl,
.s_power = msm_sensor_power,
@@ -824,6 +864,7 @@
};
module_init(msm_sensor_init_module);
+module_exit(msm_sensor_exit_module);
MODULE_DESCRIPTION("Omnivision 2MP Bayer sensor driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/msm/sensors/s5k3l1yx.c b/drivers/media/video/msm/sensors/s5k3l1yx.c
index c24da00..64b004e 100644
--- a/drivers/media/video/msm/sensors/s5k3l1yx.c
+++ b/drivers/media/video/msm/sensors/s5k3l1yx.c
@@ -408,7 +408,7 @@
{0x3C13, 0xC0},
{0x3C14, 0x70},
{0x3C15, 0x80},
- {0x3C20, 0x00},
+ {0x3C20, 0x04},
{0x3C23, 0x03},
{0x3C24, 0x00},
{0x3C50, 0x72},
@@ -618,11 +618,50 @@
.addr_type = MSM_CAMERA_I2C_WORD_ADDR,
};
+static const struct of_device_id s5k3l1yx_dt_match[] = {
+ {.compatible = "qcom,s5k3l1yx", .data = &s5k3l1yx_s_ctrl},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, s5k3l1yx_dt_match);
+
+static struct platform_driver s5k3l1yx_platform_driver = {
+ .driver = {
+ .name = "qcom,s5k3l1yx",
+ .owner = THIS_MODULE,
+ .of_match_table = s5k3l1yx_dt_match,
+ },
+};
+
+static int32_t s5k3l1yx_platform_probe(struct platform_device *pdev)
+{
+ int32_t rc = 0;
+ const struct of_device_id *match;
+ match = of_match_device(s5k3l1yx_dt_match, &pdev->dev);
+ rc = msm_sensor_platform_probe(pdev, match->data);
+ return rc;
+}
+
static int __init msm_sensor_init_module(void)
{
+ int32_t rc = 0;
+ rc = platform_driver_probe(&s5k3l1yx_platform_driver,
+ s5k3l1yx_platform_probe);
+ if (!rc)
+ return rc;
return i2c_add_driver(&s5k3l1yx_i2c_driver);
}
+static void __exit msm_sensor_exit_module(void)
+{
+ if (s5k3l1yx_s_ctrl.pdev) {
+ msm_sensor_free_sensor_data(&s5k3l1yx_s_ctrl);
+ platform_driver_unregister(&s5k3l1yx_platform_driver);
+ } else
+ i2c_del_driver(&s5k3l1yx_i2c_driver);
+ return;
+}
+
static struct v4l2_subdev_core_ops s5k3l1yx_subdev_core_ops = {
.ioctl = msm_sensor_subdev_ioctl,
.s_power = msm_sensor_power,
@@ -693,5 +732,6 @@
};
module_init(msm_sensor_init_module);
+module_exit(msm_sensor_exit_module);
MODULE_DESCRIPTION("Samsung 12MP Bayer sensor driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/msm/server/msm_cam_server.c b/drivers/media/video/msm/server/msm_cam_server.c
index 05f3c4a..2cc61a1 100644
--- a/drivers/media/video/msm/server/msm_cam_server.c
+++ b/drivers/media/video/msm/server/msm_cam_server.c
@@ -18,7 +18,7 @@
#include "msm_ispif.h"
#include "msm_sensor.h"
#include "msm_actuator.h"
-#include "msm_vfe32.h"
+#include "msm_csi_register.h"
#ifdef CONFIG_MSM_CAMERA_DEBUG
#define D(fmt, args...) pr_debug("msm: " fmt, ##args)
@@ -95,6 +95,71 @@
return -EINVAL;
}
+void msm_setup_v4l2_event_queue(struct v4l2_fh *eventHandle,
+ struct video_device *pvdev)
+{
+ v4l2_fh_init(eventHandle, pvdev);
+ v4l2_fh_add(eventHandle);
+}
+
+void msm_destroy_v4l2_event_queue(struct v4l2_fh *eventHandle)
+{
+ v4l2_fh_del(eventHandle);
+ v4l2_fh_exit(eventHandle);
+}
+
+int msm_cam_server_config_interface_map(u32 extendedmode, uint32_t mctl_handle)
+{
+ int i = 0;
+ int rc = 0;
+ int old_handle;
+ int interface;
+
+ switch (extendedmode) {
+ case MSM_V4L2_EXT_CAPTURE_MODE_RDI:
+ interface = RDI_0;
+ break;
+ case MSM_V4L2_EXT_CAPTURE_MODE_RDI1:
+ interface = RDI_1;
+ break;
+ case MSM_V4L2_EXT_CAPTURE_MODE_RDI2:
+ interface = RDI_2;
+ break;
+ default:
+ interface = PIX_0;
+ break;
+ }
+ for (i = 0; i < INTF_MAX; i++) {
+ if (g_server_dev.interface_map_table[i].interface ==
+ interface) {
+ old_handle =
+ g_server_dev.interface_map_table[i].mctl_handle;
+ if (old_handle == 0) {
+ g_server_dev.interface_map_table[i].mctl_handle
+ = mctl_handle;
+ } else if (old_handle != mctl_handle) {
+ pr_err("%s: interface_map[%d] was set: %d\n",
+ __func__, i, old_handle);
+ rc = -EINVAL;
+ }
+ break;
+ }
+ }
+
+ if (i == INTF_MAX)
+ rc = -EINVAL;
+ return rc;
+}
+
+void msm_cam_server_clear_interface_map(uint32_t mctl_handle)
+{
+ int i;
+ for (i = 0; i < INTF_MAX; i++)
+ if (g_server_dev.interface_map_table[i].mctl_handle ==
+ mctl_handle)
+ g_server_dev.interface_map_table[i].mctl_handle = 0;
+}
+
uint32_t msm_cam_server_get_mctl_handle(void)
{
uint32_t i;
@@ -133,13 +198,15 @@
return NULL;
}
-static void msm_cam_server_send_error_evt(int evt_type)
+
+static void msm_cam_server_send_error_evt(
+ struct msm_cam_media_controller *pmctl, int evt_type)
{
struct v4l2_event v4l2_ev;
v4l2_ev.id = 0;
v4l2_ev.type = evt_type;
ktime_get_ts(&v4l2_ev.timestamp);
- v4l2_event_queue(g_server_dev.pcam_active->pvdev, &v4l2_ev);
+ v4l2_event_queue(pmctl->pcam_ptr->pvdev, &v4l2_ev);
}
static int msm_ctrl_cmd_done(void *arg)
@@ -357,7 +424,8 @@
ctrlcmd.vnode_id = pcam->vnode_id;
ctrlcmd.queue_idx = pcam->server_queue_idx;
ctrlcmd.stream_type = pcam->dev_inst[idx]->image_mode;
- ctrlcmd.config_ident = g_server_dev.config_info.config_dev_id[0];
+ ctrlcmd.config_ident = g_server_dev.config_info.config_dev_id[
+ pcam->server_queue_idx];
/* send command to config thread in userspace, and get return value */
rc = msm_server_control(&g_server_dev, &ctrlcmd);
@@ -371,15 +439,17 @@
{
int rc = 0;
struct msm_ctrl_cmd ctrlcmd;
+ int idx = pcam->server_queue_idx;
D("%s qid %d\n", __func__, pcam->server_queue_idx);
ctrlcmd.type = MSM_V4L2_OPEN;
ctrlcmd.timeout_ms = 10000;
- ctrlcmd.length = strnlen(g_server_dev.config_info.config_dev_name[0],
- MAX_DEV_NAME_LEN)+1;
- ctrlcmd.value = (char *)g_server_dev.config_info.config_dev_name[0];
+ ctrlcmd.length = strnlen(
+ g_server_dev.config_info.config_dev_name[idx],
+ MAX_DEV_NAME_LEN)+1;
+ ctrlcmd.value = (char *)g_server_dev.config_info.config_dev_name[idx];
ctrlcmd.vnode_id = pcam->vnode_id;
ctrlcmd.queue_idx = pcam->server_queue_idx;
- ctrlcmd.config_ident = g_server_dev.config_info.config_dev_id[0];
+ ctrlcmd.config_ident = g_server_dev.config_info.config_dev_id[idx];
/* send command to config thread in usersspace, and get return value */
rc = msm_server_control(&g_server_dev, &ctrlcmd);
@@ -394,12 +464,14 @@
D("%s qid %d\n", __func__, pcam->server_queue_idx);
ctrlcmd.type = MSM_V4L2_CLOSE;
ctrlcmd.timeout_ms = 10000;
- ctrlcmd.length = strnlen(g_server_dev.config_info.config_dev_name[0],
- MAX_DEV_NAME_LEN)+1;
- ctrlcmd.value = (char *)g_server_dev.config_info.config_dev_name[0];
+ ctrlcmd.length = strnlen(g_server_dev.config_info.config_dev_name[
+ pcam->server_queue_idx], MAX_DEV_NAME_LEN)+1;
+ ctrlcmd.value = (char *)g_server_dev.config_info.config_dev_name[
+ pcam->server_queue_idx];
ctrlcmd.vnode_id = pcam->vnode_id;
ctrlcmd.queue_idx = pcam->server_queue_idx;
- ctrlcmd.config_ident = g_server_dev.config_info.config_dev_id[0];
+ ctrlcmd.config_ident = g_server_dev.config_info.config_dev_id[
+ pcam->server_queue_idx];
/* send command to config thread in usersspace, and get return value */
rc = msm_server_control(&g_server_dev, &ctrlcmd);
@@ -422,6 +494,7 @@
plane_info.buffer_type = pfmt->type;
plane_info.ext_mode = pcam->dev_inst[idx]->image_mode;
plane_info.num_planes = 1;
+ plane_info.inst_handle = pcam->dev_inst[idx]->inst_handle;
D("%s: %d, %d, 0x%x\n", __func__,
pfmt->fmt.pix.width, pfmt->fmt.pix.height,
pfmt->fmt.pix.pixelformat);
@@ -478,6 +551,8 @@
plane_info.buffer_type = pfmt->type;
plane_info.ext_mode = pcam->dev_inst[idx]->image_mode;
plane_info.num_planes = pix_mp->num_planes;
+ plane_info.inst_handle = pcam->dev_inst[idx]->inst_handle;
+
if (plane_info.num_planes <= 0 ||
plane_info.num_planes > VIDEO_MAX_PLANES) {
pr_err("%s Invalid number of planes set %d", __func__,
@@ -880,7 +955,6 @@
struct msm_cam_v4l2_device *pcam)
{
int rc = 0;
- struct msm_cam_media_controller *pmctl;
D("%s\n", __func__);
@@ -889,34 +963,24 @@
return rc;
}
- /* The number of camera instance should be controlled by the
- resource manager. Currently supporting one active instance
- until multiple instances are supported */
- if (atomic_read(&ps->number_pcam_active) > 0) {
- pr_err("%s Cannot have more than one active camera %d\n",
+ /*
+ * The number of camera instance should be controlled by the
+ * resource manager. Currently supporting two active instances
+ */
+ if (atomic_read(&ps->number_pcam_active) > 1) {
+ pr_err("%s Cannot have more than two active camera %d\n",
__func__, atomic_read(&ps->number_pcam_active));
return -EINVAL;
}
/* book keeping this camera session*/
- ps->pcam_active = pcam;
+ ps->pcam_active[pcam->server_queue_idx] = pcam;
atomic_inc(&ps->number_pcam_active);
- D("config pcam = 0x%p\n", ps->pcam_active);
+ D("config pcam = 0x%p\n", pcam);
/* initialization the media controller module*/
msm_mctl_init(pcam);
- /*for single VFE msms (8660, 8960v1), just populate the session
- with our VFE devices that registered*/
- pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
- if (pmctl == NULL) {
- pr_err("%s: cannot find mctl\n", __func__);
- msm_mctl_free(pcam);
- atomic_dec(&ps->number_pcam_active);
- return -ENODEV;
- }
- pmctl->axi_sdev = ps->axi_device[0];
- pmctl->isp_sdev = ps->isp_subdev[0];
return rc;
}
@@ -924,6 +988,7 @@
static int msm_cam_server_close_session(struct msm_cam_server_dev *ps,
struct msm_cam_v4l2_device *pcam)
{
+ int i;
int rc = 0;
D("%s\n", __func__);
@@ -932,14 +997,62 @@
return rc;
}
-
atomic_dec(&ps->number_pcam_active);
- ps->pcam_active = NULL;
+ ps->pcam_active[pcam->server_queue_idx] = NULL;
+ for (i = 0; i < INTF_MAX; i++) {
+ if (ps->interface_map_table[i].mctl_handle ==
+ pcam->mctl_handle)
+ ps->interface_map_table[i].mctl_handle = 0;
+ }
msm_mctl_free(pcam);
return rc;
}
+static int map_imem_addresses(struct msm_cam_media_controller *mctl)
+{
+ int rc = 0;
+ rc = msm_iommu_map_contig_buffer(
+ (unsigned long)IMEM_Y_PING_OFFSET, CAMERA_DOMAIN, GEN_POOL,
+ ((IMEM_Y_SIZE + IMEM_CBCR_SIZE + 4095) & (~4095)),
+ SZ_4K, IOMMU_WRITE | IOMMU_READ,
+ (unsigned long *)&mctl->ping_imem_y);
+ mctl->ping_imem_cbcr = mctl->ping_imem_y + IMEM_Y_SIZE;
+ if (rc < 0) {
+ pr_err("%s: ping iommu mapping returned error %d\n",
+ __func__, rc);
+ mctl->ping_imem_y = 0;
+ mctl->ping_imem_cbcr = 0;
+ }
+ msm_iommu_map_contig_buffer(
+ (unsigned long)IMEM_Y_PONG_OFFSET, CAMERA_DOMAIN, GEN_POOL,
+ ((IMEM_Y_SIZE + IMEM_CBCR_SIZE + 4095) & (~4095)),
+ SZ_4K, IOMMU_WRITE | IOMMU_READ,
+ (unsigned long *)&mctl->pong_imem_y);
+ mctl->pong_imem_cbcr = mctl->pong_imem_y + IMEM_Y_SIZE;
+ if (rc < 0) {
+ pr_err("%s: pong iommu mapping returned error %d\n",
+ __func__, rc);
+ mctl->pong_imem_y = 0;
+ mctl->pong_imem_cbcr = 0;
+ }
+ return rc;
+}
+
+static void unmap_imem_addresses(struct msm_cam_media_controller *mctl)
+{
+ msm_iommu_unmap_contig_buffer(mctl->ping_imem_y,
+ CAMERA_DOMAIN, GEN_POOL,
+ ((IMEM_Y_SIZE + IMEM_CBCR_SIZE + 4095) & (~4095)));
+ msm_iommu_unmap_contig_buffer(mctl->pong_imem_y,
+ CAMERA_DOMAIN, GEN_POOL,
+ ((IMEM_Y_SIZE + IMEM_CBCR_SIZE + 4095) & (~4095)));
+ mctl->ping_imem_y = 0;
+ mctl->ping_imem_cbcr = 0;
+ mctl->pong_imem_y = 0;
+ mctl->pong_imem_cbcr = 0;
+}
+
static long msm_ioctl_server(struct file *file, void *fh,
bool valid_prio, int cmd, void *arg)
{
@@ -1180,22 +1293,23 @@
mutex_unlock(&g_server_dev.server_lock);
if (g_server_dev.use_count == 0) {
+ int i;
mutex_lock(&g_server_dev.server_lock);
- if (g_server_dev.pcam_active) {
- struct msm_cam_media_controller *pmctl = NULL;
- int rc;
+ for (i = 0; i < MAX_NUM_ACTIVE_CAMERA; i++) {
+ if (g_server_dev.pcam_active[i]) {
+ struct msm_cam_media_controller *pmctl = NULL;
- pmctl = msm_cam_server_get_mctl(
- g_server_dev.pcam_active->mctl_handle);
- if (pmctl && pmctl->mctl_release) {
- rc = pmctl->mctl_release(pmctl);
- if (rc < 0)
- pr_err("mctl_release fails %d\n", rc);
- /*so that it isn't closed again*/
- pmctl->mctl_release = NULL;
+ pmctl = msm_cam_server_get_mctl(
+ g_server_dev.pcam_active[i]->mctl_handle);
+ if (pmctl && pmctl->mctl_release) {
+ pmctl->mctl_release(pmctl);
+ /*so that it isn't closed again*/
+ pmctl->mctl_release = NULL;
+ }
+ msm_cam_server_send_error_evt(pmctl,
+ V4L2_EVENT_PRIVATE_START +
+ MSM_CAM_APP_NOTIFY_ERROR_EVENT);
}
- msm_cam_server_send_error_evt(V4L2_EVENT_PRIVATE_START
- + MSM_CAM_APP_NOTIFY_ERROR_EVENT);
}
sub.type = V4L2_EVENT_ALL;
msm_server_v4l2_unsubscribe_event(
@@ -1283,6 +1397,7 @@
{
int rc = -EINVAL, ges_evt;
struct msm_cam_server_queue *queue;
+ struct msm_cam_media_controller *pmctl;
if (!pcam) {
pr_err("%s pcam passed is null ", __func__);
@@ -1301,7 +1416,6 @@
msm_queue_init(&queue->ctrl_q, "control");
msm_queue_init(&queue->eventData_q, "eventdata");
queue->queue_active = 1;
-
rc = msm_cam_server_open_session(&g_server_dev, pcam);
if (rc < 0) {
pr_err("%s: cam_server_open_session failed %d\n",
@@ -1309,6 +1423,17 @@
goto error;
}
+ pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
+ if (!pmctl) {
+ pr_err("%s: invalid mctl controller", __func__);
+ goto error;
+ }
+ rc = map_imem_addresses(pmctl);
+ if (rc < 0) {
+ pr_err("%sFailed to map imem addresses %d\n", __func__, rc);
+ goto error;
+ }
+
return rc;
error:
ges_evt = MSM_V4L2_GES_CAM_CLOSE;
@@ -1328,6 +1453,7 @@
{
int rc = -EINVAL, ges_evt;
struct msm_cam_server_queue *queue;
+ struct msm_cam_media_controller *pmctl;
mutex_lock(&g_server_dev.server_queue_lock);
queue = &g_server_dev.server_queue[pcam->server_queue_idx];
@@ -1338,6 +1464,13 @@
msm_drain_eventq(&queue->eventData_q);
mutex_unlock(&g_server_dev.server_queue_lock);
+ pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
+ if (!pmctl) {
+ pr_err("%s: invalid mctl controller", __func__);
+ return -EINVAL;
+ }
+ unmap_imem_addresses(pmctl);
+
rc = msm_cam_server_close_session(&g_server_dev, pcam);
if (rc < 0)
pr_err("msm_cam_server_close_session fails %d\n", rc);
@@ -1366,70 +1499,128 @@
.vidioc_default = msm_ioctl_server,
};
+static uint32_t msm_camera_server_find_mctl(
+ unsigned int notification, void *arg)
+{
+ int i;
+ uint32_t interface;
+
+ switch (notification) {
+ case NOTIFY_ISP_MSG_EVT:
+ if (((struct isp_msg_event *)arg)->msg_id ==
+ MSG_ID_RDI0_UPDATE_ACK)
+ interface = RDI_0;
+ else if (((struct isp_msg_event *)arg)->msg_id ==
+ MSG_ID_RDI1_UPDATE_ACK)
+ interface = RDI_1;
+ else
+ interface = PIX_0;
+ break;
+ case NOTIFY_VFE_MSG_OUT:
+ if (((struct isp_msg_output *)arg)->output_id ==
+ MSG_ID_OUTPUT_TERTIARY1)
+ interface = RDI_0;
+ else if (((struct isp_msg_output *)arg)->output_id ==
+ MSG_ID_OUTPUT_TERTIARY2)
+ interface = RDI_1;
+ else
+ interface = PIX_0;
+ break;
+ case NOTIFY_VFE_BUF_EVT: {
+ struct msm_vfe_resp *rp;
+ struct msm_frame_info *frame_info;
+ rp = (struct msm_vfe_resp *)arg;
+ frame_info = rp->evt_msg.data;
+ if (frame_info->path == VFE_MSG_OUTPUT_TERTIARY1)
+ interface = RDI_0;
+ else if (frame_info->path == VFE_MSG_OUTPUT_TERTIARY2)
+ interface = RDI_1;
+ else
+ interface = PIX_0;
+ }
+ break;
+ case NOTIFY_VFE_MSG_STATS:
+ case NOTIFY_VFE_MSG_COMP_STATS:
+ case NOTIFY_VFE_CAMIF_ERROR:
+ case NOTIFY_VFE_IRQ:
+ default:
+ interface = PIX_0;
+ break;
+ }
+
+ for (i = 0; i < INTF_MAX; i++) {
+ if (interface == g_server_dev.interface_map_table[i].interface)
+ break;
+ }
+ if (i == INTF_MAX) {
+ pr_err("%s: Cannot find valid interface map\n", __func__);
+ return -EINVAL;
+ } else
+ return g_server_dev.interface_map_table[i].mctl_handle;
+}
+
static void msm_cam_server_subdev_notify(struct v4l2_subdev *sd,
unsigned int notification, void *arg)
{
int rc = -EINVAL;
- struct msm_sensor_ctrl_t *s_ctrl;
- struct msm_camera_sensor_info *sinfo;
- struct msm_camera_device_platform_data *camdev;
- uint8_t csid_core = 0;
+ uint32_t mctl_handle = 0;
+ struct msm_cam_media_controller *p_mctl = NULL;
- if (notification == NOTIFY_PCLK_CHANGE ||
- notification == NOTIFY_CSIPHY_CFG ||
- notification == NOTIFY_CSID_CFG ||
- notification == NOTIFY_CSIC_CFG) {
- s_ctrl = get_sctrl(sd);
- sinfo = (struct msm_camera_sensor_info *) s_ctrl->sensordata;
- camdev = sinfo->pdata;
- csid_core = camdev->csid_core;
+ mctl_handle = msm_camera_server_find_mctl(notification, arg);
+ if (mctl_handle < 0) {
+ pr_err("%s: Couldn't find mctl instance!\n", __func__);
+ return;
}
-
switch (notification) {
case NOTIFY_ISP_MSG_EVT:
case NOTIFY_VFE_MSG_OUT:
+ case NOTIFY_VFE_SOF_COUNT:
case NOTIFY_VFE_MSG_STATS:
case NOTIFY_VFE_MSG_COMP_STATS:
case NOTIFY_VFE_BUF_EVT:
- case NOTIFY_VFE_BUF_FREE_EVT:
- if (g_server_dev.isp_subdev[0] &&
- g_server_dev.isp_subdev[0]->isp_notify) {
- rc = g_server_dev.isp_subdev[0]->isp_notify(
- g_server_dev.vfe_device[0], notification, arg);
- }
+ p_mctl = msm_cam_server_get_mctl(mctl_handle);
+ if (p_mctl->isp_sdev &&
+ p_mctl->isp_sdev->isp_notify
+ && p_mctl->isp_sdev->sd)
+ rc = p_mctl->isp_sdev->isp_notify(
+ p_mctl->isp_sdev->sd, notification, arg);
break;
case NOTIFY_VFE_IRQ:{
struct msm_vfe_cfg_cmd cfg_cmd;
struct msm_camvfe_params vfe_params;
+ p_mctl = msm_cam_server_get_mctl(mctl_handle);
cfg_cmd.cmd_type = CMD_VFE_PROCESS_IRQ;
vfe_params.vfe_cfg = &cfg_cmd;
vfe_params.data = arg;
- rc = v4l2_subdev_call(g_server_dev.vfe_device[0],
+ rc = v4l2_subdev_call(p_mctl->isp_sdev->sd,
core, ioctl, 0, &vfe_params);
}
break;
case NOTIFY_AXI_IRQ:
- rc = v4l2_subdev_call(g_server_dev.axi_device[0],
- core, ioctl, VIDIOC_MSM_AXI_IRQ, arg);
+ rc = v4l2_subdev_call(sd, core, ioctl, VIDIOC_MSM_AXI_IRQ, arg);
break;
case NOTIFY_PCLK_CHANGE:
- if (g_server_dev.axi_device[0])
- rc = v4l2_subdev_call(g_server_dev.axi_device[0], video,
+ p_mctl = v4l2_get_subdev_hostdata(sd);
+ if (p_mctl->axi_sdev)
+ rc = v4l2_subdev_call(p_mctl->axi_sdev, video,
s_crystal_freq, *(uint32_t *)arg, 0);
else
- rc = v4l2_subdev_call(g_server_dev.vfe_device[0], video,
+ rc = v4l2_subdev_call(p_mctl->isp_sdev->sd, video,
s_crystal_freq, *(uint32_t *)arg, 0);
break;
case NOTIFY_CSIPHY_CFG:
- rc = v4l2_subdev_call(g_server_dev.csiphy_device[csid_core],
+ p_mctl = v4l2_get_subdev_hostdata(sd);
+ rc = v4l2_subdev_call(p_mctl->csiphy_sdev,
core, ioctl, VIDIOC_MSM_CSIPHY_CFG, arg);
break;
case NOTIFY_CSID_CFG:
- rc = v4l2_subdev_call(g_server_dev.csid_device[csid_core],
+ p_mctl = v4l2_get_subdev_hostdata(sd);
+ rc = v4l2_subdev_call(p_mctl->csid_sdev,
core, ioctl, VIDIOC_MSM_CSID_CFG, arg);
break;
case NOTIFY_CSIC_CFG:
- rc = v4l2_subdev_call(g_server_dev.csic_device[csid_core],
+ p_mctl = v4l2_get_subdev_hostdata(sd);
+ rc = v4l2_subdev_call(p_mctl->csic_sdev,
core, ioctl, VIDIOC_MSM_CSIC_CFG, arg);
break;
case NOTIFY_GESTURE_EVT:
@@ -1441,7 +1632,8 @@
core, ioctl, VIDIOC_MSM_GESTURE_CAM_EVT, arg);
break;
case NOTIFY_VFE_CAMIF_ERROR: {
- msm_cam_server_send_error_evt(V4L2_EVENT_PRIVATE_START
+ p_mctl = msm_cam_server_get_mctl(mctl_handle);
+ msm_cam_server_send_error_evt(p_mctl, V4L2_EVENT_PRIVATE_START
+ MSM_CAM_APP_NOTIFY_ERROR_EVENT);
break;
}
@@ -1471,6 +1663,33 @@
return -EINVAL;
}
+static struct v4l2_subdev *msm_cam_find_subdev_node(
+ struct v4l2_subdev **sd_list, u32 revision_num)
+{
+ int i = 0;
+ for (i = 0; sd_list[i] != NULL; i++) {
+ if (sd_list[i]->entity.revision == revision_num) {
+ return sd_list[i];
+ break;
+ }
+ }
+ return NULL;
+}
+
+int msm_mctl_find_sensor_subdevs(struct msm_cam_media_controller *p_mctl,
+ int core_index)
+{
+ int rc = -ENODEV;
+
+ v4l2_set_subdev_hostdata(p_mctl->sensor_sdev, p_mctl);
+
+ rc = msm_csi_register_subdevs(p_mctl, core_index, &g_server_dev);
+ if (rc < 0)
+ pr_err("%s: Could not find sensor subdevs\n", __func__);
+
+ return rc;
+}
+
static irqreturn_t msm_camera_server_parse_irq(int irq_num, void *data)
{
unsigned long flags;
@@ -1680,7 +1899,7 @@
* the individual irq lookup table.... */
irq_req->irq_idx =
get_irq_idx_from_camhw_idx(irq_req->cam_hw_idx);
- if (irq_req->cam_hw_idx < 0) {
+ if (irq_req->irq_idx < 0) {
pr_err("%s Invalid hw index %d ", __func__,
irq_req->cam_hw_idx);
return -EINVAL;
@@ -1703,10 +1922,10 @@
sizeof(struct intr_table_entry));
D("%s Saving Entry %d %d %d %p",
__func__,
- ind_irq_tbl[irq_req->cam_hw_idx].irq_num,
- ind_irq_tbl[irq_req->cam_hw_idx].cam_hw_idx,
- ind_irq_tbl[irq_req->cam_hw_idx].is_composite,
- ind_irq_tbl[irq_req->cam_hw_idx].subdev_list[0]);
+ ind_irq_tbl[irq_req->irq_idx].irq_num,
+ ind_irq_tbl[irq_req->irq_idx].cam_hw_idx,
+ ind_irq_tbl[irq_req->irq_idx].is_composite,
+ ind_irq_tbl[irq_req->irq_idx].subdev_list[0]);
spin_unlock_irqrestore(&g_server_dev.intr_table_lock,
flags);
@@ -1825,6 +2044,15 @@
index = sd_info->sd_index;
switch (sdev_type) {
+ case SENSOR_DEV:
+ if (index >= MAX_NUM_SENSOR_DEV) {
+ pr_err("%s Invalid sensor idx %d", __func__, index);
+ err = -EINVAL;
+ break;
+ }
+ g_server_dev.sensor_device[index] = sd;
+ break;
+
case CSIPHY_DEV:
if (index >= MAX_NUM_CSIPHY_DEV) {
pr_err("%s Invalid CSIPHY idx %d", __func__, index);
@@ -1859,10 +2087,16 @@
break;
case ISPIF_DEV:
- g_server_dev.ispif_device = sd;
+ if (index >= MAX_NUM_ISPIF_DEV) {
+ pr_err("%s Invalid ISPIF idx %d", __func__, index);
+ err = -EINVAL;
+ break;
+ }
+ cam_hw_idx = MSM_CAM_HW_ISPIF + index;
+ g_server_dev.ispif_device[index] = sd;
if (g_server_dev.irqr_device) {
g_server_dev.subdev_table[cam_hw_idx] = sd;
- err = msm_cam_server_fill_sdev_irqnum(MSM_CAM_HW_ISPIF,
+ err = msm_cam_server_fill_sdev_irqnum(cam_hw_idx,
sd_info->irq_num);
}
break;
@@ -1875,6 +2109,7 @@
}
cam_hw_idx = MSM_CAM_HW_VFE0 + index;
g_server_dev.vfe_device[index] = sd;
+ g_server_dev.isp_subdev[index]->sd = sd;
if (g_server_dev.irqr_device) {
g_server_dev.subdev_table[cam_hw_idx] = sd;
err = msm_cam_server_fill_sdev_irqnum(cam_hw_idx,
@@ -1915,6 +2150,21 @@
}
g_server_dev.cpp_device[index] = sd;
break;
+ case CCI_DEV:
+ g_server_dev.cci_device = sd;
+ if (g_server_dev.irqr_device) {
+ if (index >= MAX_NUM_CCI_DEV) {
+ pr_err("%s Invalid CCI idx %d", __func__,
+ index);
+ err = -EINVAL;
+ break;
+ }
+ cam_hw_idx = MSM_CAM_HW_CCI + index;
+ g_server_dev.subdev_table[cam_hw_idx] = sd;
+ err = msm_cam_server_fill_sdev_irqnum(MSM_CAM_HW_CCI,
+ sd_info->irq_num);
+ }
+ break;
default:
break;
}
@@ -1954,25 +2204,30 @@
g_server_dev.video_dev->ioctl_ops = &msm_ioctl_ops_server;
g_server_dev.video_dev->release = video_device_release;
g_server_dev.video_dev->minor = 100;
- g_server_dev.video_dev->vfl_type = 1;
+ g_server_dev.video_dev->vfl_type = VFL_TYPE_GRABBER;
video_set_drvdata(g_server_dev.video_dev, &g_server_dev);
- strlcpy(g_server_dev.media_dev.model, "qcamera",
+ strlcpy(g_server_dev.media_dev.model, QCAMERA_SERVER_NAME,
sizeof(g_server_dev.media_dev.model));
g_server_dev.media_dev.dev = &pdev->dev;
rc = media_device_register(&g_server_dev.media_dev);
g_server_dev.v4l2_dev.mdev = &g_server_dev.media_dev;
+ media_entity_init(&g_server_dev.video_dev->entity, 0, NULL, 0);
+ g_server_dev.video_dev->entity.type = MEDIA_ENT_T_DEVNODE_V4L;
+ g_server_dev.video_dev->entity.group_id = QCAMERA_VNODE_GROUP_ID;
rc = video_register_device(g_server_dev.video_dev,
VFL_TYPE_GRABBER, 100);
+ g_server_dev.video_dev->entity.name =
+ video_device_node_name(g_server_dev.video_dev);
+
mutex_init(&g_server_dev.server_lock);
mutex_init(&g_server_dev.server_queue_lock);
spin_lock_init(&g_server_dev.intr_table_lock);
memset(&g_server_dev.irq_lkup_table, 0,
sizeof(struct irqmgr_intr_lkup_table));
- g_server_dev.pcam_active = NULL;
g_server_dev.camera_info.num_cameras = 0;
atomic_set(&g_server_dev.number_pcam_active, 0);
g_server_dev.server_evt_id = 0;
@@ -1980,22 +2235,22 @@
/*initialize fake video device and event queue*/
g_server_dev.server_command_queue.pvdev = g_server_dev.video_dev;
- rc = msm_setup_v4l2_event_queue(
+ msm_setup_v4l2_event_queue(
&g_server_dev.server_command_queue.eventHandle,
g_server_dev.server_command_queue.pvdev);
- if (rc < 0) {
- pr_err("%s failed to initialize event queue\n", __func__);
- video_device_release(g_server_dev.server_command_queue.pvdev);
- return rc;
- }
-
for (i = 0; i < MAX_NUM_ACTIVE_CAMERA; i++) {
struct msm_cam_server_queue *queue;
queue = &g_server_dev.server_queue[i];
queue->queue_active = 0;
msm_queue_init(&queue->ctrl_q, "control");
msm_queue_init(&queue->eventData_q, "eventdata");
+ g_server_dev.pcam_active[i] = NULL;
+ }
+
+ for (i = 0; i < INTF_MAX; i++) {
+ g_server_dev.interface_map_table[i].interface = 0x01 << i;
+ g_server_dev.interface_map_table[i].mctl_handle = 0;
}
return rc;
}
@@ -2029,9 +2284,8 @@
{
int rc = 0;
struct msm_cam_media_controller *pmctl = NULL;
- D("%s: %p", __func__, g_server_dev.pcam_active);
*p_active = 0;
- if (g_server_dev.pcam_active) {
+ if (g_server_dev.pcam_active[pcam->server_queue_idx]) {
D("%s: Active camera present return", __func__);
return 0;
}
@@ -2073,11 +2327,8 @@
return -ENODEV;
}
- if (pmctl->mctl_release) {
- rc = pmctl->mctl_release(pmctl);
- if (rc < 0)
- pr_err("mctl_release fails %d\n", rc);
- }
+ if (pmctl->mctl_release)
+ pmctl->mctl_release(pmctl);
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
kref_put(&pmctl->refcount, msm_release_ion_client);
@@ -2319,8 +2570,8 @@
config_cam->use_count++;
/* assume there is only one active camera possible*/
- config_cam->p_mctl =
- msm_cam_server_get_mctl(g_server_dev.pcam_active->mctl_handle);
+ config_cam->p_mctl = msm_cam_server_get_mctl(
+ g_server_dev.pcam_active[config_cam->dev_num]->mctl_handle);
if (!config_cam->p_mctl) {
pr_err("%s: cannot find mctl\n", __func__);
return -ENODEV;
@@ -2336,6 +2587,104 @@
return rc;
}
+static struct msm_isp_ops *find_isp_op(struct v4l2_subdev *sdev)
+{
+ int i;
+ for (i = 0; i < g_server_dev.config_info.num_config_nodes; i++) {
+ if (g_server_dev.isp_subdev[i]->sd == sdev)
+ return g_server_dev.isp_subdev[i];
+ }
+ return NULL;
+}
+
+static int msm_set_mctl_subdev(struct msm_cam_media_controller *pmctl,
+ struct msm_mctl_set_sdev_data *set_data)
+{
+ int rc = 0;
+ struct v4l2_subdev *vfe_sdev = NULL;
+ struct v4l2_subdev *temp_sdev = NULL;
+ switch (set_data->sdev_type) {
+ case CSIPHY_DEV:
+ pmctl->csiphy_sdev = msm_cam_find_subdev_node
+ (&g_server_dev.csiphy_device[0], set_data->revision);
+ temp_sdev = pmctl->csiphy_sdev;
+ break;
+ case CSID_DEV:
+ pmctl->csid_sdev = msm_cam_find_subdev_node
+ (&g_server_dev.csid_device[0], set_data->revision);
+ temp_sdev = pmctl->csid_sdev;
+ break;
+ case CSIC_DEV:
+ pmctl->csic_sdev = msm_cam_find_subdev_node
+ (&g_server_dev.csic_device[0], set_data->revision);
+ temp_sdev = pmctl->csic_sdev;
+ break;
+ case ISPIF_DEV:
+ pmctl->ispif_sdev = msm_cam_find_subdev_node
+ (&g_server_dev.ispif_device[0], set_data->revision);
+ temp_sdev = pmctl->ispif_sdev;
+ break;
+ case VFE_DEV:
+ vfe_sdev = msm_cam_find_subdev_node
+ (&g_server_dev.vfe_device[0], set_data->revision);
+ temp_sdev = vfe_sdev;
+ pmctl->isp_sdev = find_isp_op(vfe_sdev);
+ pmctl->isp_sdev->sd = vfe_sdev;
+ break;
+ case AXI_DEV:
+ pmctl->axi_sdev = msm_cam_find_subdev_node
+ (&g_server_dev.axi_device[0], set_data->revision);
+ temp_sdev = pmctl->axi_sdev;
+ break;
+ case VPE_DEV:
+ pmctl->vpe_sdev = msm_cam_find_subdev_node
+ (&g_server_dev.vpe_device[0], set_data->revision);
+ temp_sdev = pmctl->vpe_sdev;
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ if (temp_sdev != NULL)
+ v4l2_set_subdev_hostdata(temp_sdev, pmctl);
+ else
+ pr_err("%s: Could not find subdev\n", __func__);
+ return rc;
+}
+
+static int msm_unset_mctl_subdev(struct msm_cam_media_controller *pmctl,
+ struct msm_mctl_set_sdev_data *set_data)
+{
+ int rc = 0;
+ switch (set_data->sdev_type) {
+ case CSIPHY_DEV:
+ pmctl->csiphy_sdev = NULL;
+ break;
+ case CSID_DEV:
+ pmctl->csid_sdev = NULL;
+ break;
+ case CSIC_DEV:
+ pmctl->csic_sdev = NULL;
+ break;
+ case ISPIF_DEV:
+ pmctl->ispif_sdev = NULL;
+ break;
+ case VFE_DEV:
+ pmctl->isp_sdev = NULL;
+ break;
+ case AXI_DEV:
+ pmctl->axi_sdev = NULL;
+ break;
+ case VPE_DEV:
+ pmctl->vpe_sdev = NULL;
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ return rc;
+}
+
static long msm_ioctl_config(struct file *fp, unsigned int cmd,
unsigned long arg)
{
@@ -2491,6 +2840,30 @@
rc = -EINVAL;
break;
+ case MSM_CAM_IOCTL_SET_MCTL_SDEV:{
+ struct msm_mctl_set_sdev_data set_data;
+ if (copy_from_user(&set_data, (void __user *)arg,
+ sizeof(struct msm_mctl_set_sdev_data))) {
+ ERR_COPY_FROM_USER();
+ rc = -EINVAL;
+ break;
+ }
+ rc = msm_set_mctl_subdev(config_cam->p_mctl, &set_data);
+ break;
+ }
+
+ case MSM_CAM_IOCTL_UNSET_MCTL_SDEV:{
+ struct msm_mctl_set_sdev_data set_data;
+ if (copy_from_user(&set_data, (void __user *)arg,
+ sizeof(struct msm_mctl_set_sdev_data))) {
+ ERR_COPY_FROM_USER();
+ rc = -EINVAL;
+ break;
+ }
+ rc = msm_unset_mctl_subdev(config_cam->p_mctl, &set_data);
+ break;
+ }
+
default:{
/* For the rest of config command, forward to media controller*/
struct msm_cam_media_controller *p_mctl = config_cam->p_mctl;
@@ -2597,14 +2970,13 @@
goto config_setup_fail;
}
- rc = msm_setup_v4l2_event_queue(
+ /* v4l2_fh support */
+ spin_lock_init(&config_cam->config_stat_event_queue.pvdev->fh_lock);
+ INIT_LIST_HEAD(&config_cam->config_stat_event_queue.pvdev->fh_list);
+ msm_setup_v4l2_event_queue(
&config_cam->config_stat_event_queue.eventHandle,
config_cam->config_stat_event_queue.pvdev);
- if (rc < 0) {
- pr_err("%s failed to initialize event queue\n", __func__);
- video_device_release(config_cam->config_stat_event_queue.pvdev);
- goto config_setup_fail;
- }
+ config_cam->dev_num = dev_num;
return rc;
@@ -2616,9 +2988,10 @@
static int __devinit msm_camera_probe(struct platform_device *pdev)
{
int rc = 0, i;
- /*for now just create a config 0 node
+ memset(&g_server_dev, 0, sizeof(struct msm_cam_server_dev));
+ /*for now just create two config nodes
put logic here later to know how many configs to create*/
- g_server_dev.config_info.num_config_nodes = 1;
+ g_server_dev.config_info.num_config_nodes = 2;
rc = msm_isp_init_module(g_server_dev.config_info.num_config_nodes);
if (rc < 0) {
diff --git a/drivers/media/video/msm/server/msm_cam_server.h b/drivers/media/video/msm/server/msm_cam_server.h
index 8a02d32..229e9c9 100644
--- a/drivers/media/video/msm/server/msm_cam_server.h
+++ b/drivers/media/video/msm/server/msm_cam_server.h
@@ -64,4 +64,6 @@
int msm_cam_server_request_irq(void *arg);
int msm_cam_server_update_irqmap(
struct msm_cam_server_irqmap_entry *entry);
+int msm_cam_server_config_interface_map(u32 extendedmode,
+ uint32_t mctl_handle);
#endif /* _MSM_CAM_SERVER_H */
diff --git a/drivers/media/video/msm/vfe/Makefile b/drivers/media/video/msm/vfe/Makefile
new file mode 100644
index 0000000..8068e4f
--- /dev/null
+++ b/drivers/media/video/msm/vfe/Makefile
@@ -0,0 +1,19 @@
+GCC_VERSION := $(shell $(CONFIG_SHELL) $(PWD)/scripts/gcc-version.sh $(CROSS_COMPILE)gcc)
+ccflags-y += -Idrivers/media/video/msm
+ccflags-y += -Idrivers/media/video/msm/server
+ifeq ($(GCC_VERSION),0404)
+CFLAGS_REMOVE_msm_vfe8x.o = -Wframe-larger-than=1024
+endif
+ifeq ($(CONFIG_MSM_CAMERA_V4L2),y)
+ obj-$(CONFIG_ARCH_MSM7X27A) += msm_vfe7x27a_v4l2.o
+ obj-$(CONFIG_ARCH_MSM8X60) += msm_vfe31_v4l2.o
+ obj-$(CONFIG_ARCH_MSM7X30) += msm_vfe31_v4l2.o
+else
+ obj-$(CONFIG_ARCH_MSM7X27A) += msm_vfe7x27a.o
+ obj-$(CONFIG_ARCH_MSM8X60) += msm_vfe31.o
+ obj-$(CONFIG_ARCH_MSM7X30) += msm_vfe31.o
+endif
+obj-$(CONFIG_ARCH_MSM_ARM11) += msm_vfe7x.o
+obj-$(CONFIG_ARCH_QSD8X50) += msm_vfe8x.o msm_vfe8x_proc.o
+obj-$(CONFIG_ARCH_MSM8960) += msm_vfe32.o
+obj-$(CONFIG_MSM_CAMERA_V4L2) += msm_vfe_stats_buf.o
diff --git a/drivers/media/video/msm/msm_vfe31.c b/drivers/media/video/msm/vfe/msm_vfe31.c
similarity index 100%
rename from drivers/media/video/msm/msm_vfe31.c
rename to drivers/media/video/msm/vfe/msm_vfe31.c
diff --git a/drivers/media/video/msm/msm_vfe31.h b/drivers/media/video/msm/vfe/msm_vfe31.h
similarity index 99%
rename from drivers/media/video/msm/msm_vfe31.h
rename to drivers/media/video/msm/vfe/msm_vfe31.h
index 1d66621..bec5f58 100644
--- a/drivers/media/video/msm/msm_vfe31.h
+++ b/drivers/media/video/msm/vfe/msm_vfe31.h
@@ -325,8 +325,8 @@
#define V31_OPERATION_CFG_LEN 32
#define V31_AXI_OUT_OFF 0x00000038
-#define V31_AXI_OUT_LEN 212
-#define V31_AXI_CH_INF_LEN 24
+#define V31_AXI_OUT_LEN 220
+#define V31_AXI_CH_INF_LEN 32
#define V31_AXI_CFG_LEN 47
#define V31_FRAME_SKIP_OFF 0x00000504
diff --git a/drivers/media/video/msm/msm_vfe31_v4l2.c b/drivers/media/video/msm/vfe/msm_vfe31_v4l2.c
similarity index 93%
rename from drivers/media/video/msm/msm_vfe31_v4l2.c
rename to drivers/media/video/msm/vfe/msm_vfe31_v4l2.c
index b520a92..0bd7b94 100644
--- a/drivers/media/video/msm/msm_vfe31_v4l2.c
+++ b/drivers/media/video/msm/vfe/msm_vfe31_v4l2.c
@@ -63,12 +63,6 @@
uint32_t vfeInterruptStatus1;
};
-/*TODO: Why is V32 reference in arch/arm/mach-msm/include/mach/camera.h?*/
-#define VFE_MSG_V31_START VFE_MSG_V32_START
-#define VFE_MSG_V31_CAPTURE VFE_MSG_V32_CAPTURE
-#define VFE_MSG_V31_JPEG_CAPTURE VFE_MSG_V32_JPEG_CAPTURE
-#define VFE_MSG_V31_START_RECORDING VFE_MSG_V32_START_RECORDING
-
static struct vfe31_cmd_type vfe31_cmd[] = {
/* 0*/ {VFE_CMD_DUMMY_0},
{VFE_CMD_SET_CLK},
@@ -417,6 +411,25 @@
return 0L;
}
+static unsigned long vfe31_stats_unregbuf(
+ struct msm_stats_reqbuf *req_buf)
+{
+ int i = 0, rc = 0;
+
+ for (i = 0; i < req_buf->num_buf; i++) {
+ rc = vfe31_ctrl->stats_ops.buf_unprepare(
+ vfe31_ctrl->stats_ops.stats_ctrl,
+ req_buf->stats_type, i,
+ vfe31_ctrl->stats_ops.client);
+ if (rc < 0) {
+ pr_err("%s: unreg stats buf (type = %d) err = %d",
+ __func__, req_buf->stats_type, rc);
+ return rc;
+ }
+ }
+ return 0L;
+}
+
static int vfe_stats_awb_buf_init(
struct vfe_cmd_stats_buf *in)
{
@@ -643,7 +656,7 @@
vfe31_ctrl->vfebase + VFE_GLOBAL_RESET);
}
-static void vfe31_subdev_notify(int id, int path, int image_mode)
+static void vfe31_subdev_notify(int id, int path, uint32_t inst_handle)
{
struct msm_vfe_resp rp;
struct msm_frame_info frame_info;
@@ -652,7 +665,7 @@
memset(&rp, 0, sizeof(struct msm_vfe_resp));
CDBG("vfe31_subdev_notify : msgId = %d\n", id);
rp.evt_msg.type = MSM_CAMERA_MSG;
- frame_info.image_mode = image_mode;
+ frame_info.inst_handle = inst_handle;
frame_info.path = path;
rp.evt_msg.data = &frame_info;
rp.type = id;
@@ -663,20 +676,24 @@
static int vfe31_config_axi(int mode, uint32_t *ao)
{
uint32_t *ch_info;
- uint32_t *axi_cfg = ao+V31_AXI_RESERVED;
+ uint32_t *axi_cfg = ao + V31_AXI_RESERVED;
+ uint32_t bus_cmd = *axi_cfg;
+ int i;
+
/* Update the corresponding write masters for each output*/
ch_info = axi_cfg + V31_AXI_CFG_LEN;
vfe31_ctrl->outpath.out0.ch0 = 0x0000FFFF & *ch_info;
vfe31_ctrl->outpath.out0.ch1 = 0x0000FFFF & (*ch_info++ >> 16);
- vfe31_ctrl->outpath.out0.ch2 = 0x0000FFFF & *ch_info;
- vfe31_ctrl->outpath.out0.image_mode = 0x0000FFFF & (*ch_info++ >> 16);
+ vfe31_ctrl->outpath.out0.ch2 = 0x0000FFFF & *ch_info++;
+ vfe31_ctrl->outpath.out0.inst_handle = *ch_info++;
vfe31_ctrl->outpath.out1.ch0 = 0x0000FFFF & *ch_info;
vfe31_ctrl->outpath.out1.ch1 = 0x0000FFFF & (*ch_info++ >> 16);
- vfe31_ctrl->outpath.out1.ch2 = 0x0000FFFF & *ch_info;
- vfe31_ctrl->outpath.out1.image_mode = 0x0000FFFF & (*ch_info++ >> 16);
+ vfe31_ctrl->outpath.out1.ch2 = 0x0000FFFF & *ch_info++;
+ vfe31_ctrl->outpath.out1.inst_handle = *ch_info++;
vfe31_ctrl->outpath.out2.ch0 = 0x0000FFFF & *ch_info;
vfe31_ctrl->outpath.out2.ch1 = 0x0000FFFF & (*ch_info++ >> 16);
vfe31_ctrl->outpath.out2.ch2 = 0x0000FFFF & *ch_info++;
+ vfe31_ctrl->outpath.out2.inst_handle = *ch_info++;
switch (mode) {
case OUTPUT_PRIM:
@@ -710,10 +727,23 @@
return -EINVAL;
}
+ axi_cfg++;
msm_camera_io_memcpy(vfe31_ctrl->vfebase +
vfe31_cmd[VFE_CMD_AXI_OUT_CFG].offset, axi_cfg,
- vfe31_cmd[VFE_CMD_AXI_OUT_CFG].length - V31_AXI_CH_INF_LEN -
- V31_AXI_RESERVED);
+ V31_AXI_BUS_CFG_LEN);
+ axi_cfg += V31_AXI_BUS_CFG_LEN/4;
+ for (i = 0; i < ARRAY_SIZE(vfe31_AXI_WM_CFG); i++) {
+ msm_camera_io_w(*axi_cfg,
+ vfe31_ctrl->vfebase+vfe31_AXI_WM_CFG[i]);
+ axi_cfg += 3;
+ msm_camera_io_memcpy(
+ vfe31_ctrl->vfebase+vfe31_AXI_WM_CFG[i]+12,
+ axi_cfg, 12);
+ axi_cfg += 3;
+ }
+ msm_camera_io_w(bus_cmd, vfe31_ctrl->vfebase +
+ V31_AXI_BUS_CMD_OFF);
+
return 0;
}
@@ -1301,14 +1331,14 @@
{
struct vfe31_output_ch *outch = NULL;
struct msm_free_buf *b = NULL;
- uint32_t image_mode = 0;
+ uint32_t inst_handle = 0;
if (path == VFE_MSG_OUTPUT_PRIMARY)
- image_mode = vfe31_ctrl->outpath.out0.image_mode;
+ inst_handle = vfe31_ctrl->outpath.out0.inst_handle;
else
- image_mode = vfe31_ctrl->outpath.out1.image_mode;
+ inst_handle = vfe31_ctrl->outpath.out1.inst_handle;
- vfe31_subdev_notify(id, path, image_mode);
+ vfe31_subdev_notify(id, path, inst_handle);
outch = vfe31_get_ch(path);
if (outch->free_buf.ch_paddr[0])
b = &outch->free_buf;
@@ -1318,14 +1348,14 @@
{
struct vfe31_output_ch *outch = NULL;
int rc = 0;
- uint32_t image_mode = 0;
+ uint32_t inst_handle = 0;
if (path == VFE_MSG_OUTPUT_PRIMARY)
- image_mode = vfe31_ctrl->outpath.out0.image_mode;
+ inst_handle = vfe31_ctrl->outpath.out0.inst_handle;
else
- image_mode = vfe31_ctrl->outpath.out1.image_mode;
+ inst_handle = vfe31_ctrl->outpath.out1.inst_handle;
- vfe31_subdev_notify(id, path, image_mode);
+ vfe31_subdev_notify(id, path, inst_handle);
outch = vfe31_get_ch(path);
if (outch->ping.ch_paddr[0] && outch->pong.ch_paddr[0]) {
/* Configure Preview Ping Pong */
@@ -1400,11 +1430,11 @@
VFE_OUTPUTS_PREVIEW))
/* Configure primary channel */
rc = vfe31_configure_pingpong_buffers(
- VFE_MSG_V31_START, VFE_MSG_OUTPUT_PRIMARY);
+ VFE_MSG_START, VFE_MSG_OUTPUT_PRIMARY);
else
/* Configure secondary channel */
rc = vfe31_configure_pingpong_buffers(
- VFE_MSG_V31_START, VFE_MSG_OUTPUT_SECONDARY);
+ VFE_MSG_START, VFE_MSG_OUTPUT_SECONDARY);
if (rc < 0) {
pr_err("%s error configuring pingpong buffers"
" for preview", __func__);
@@ -1423,7 +1453,7 @@
rc = -EFAULT;
goto proc_general_done;
}
- rc = vfe31_configure_pingpong_buffers(VFE_MSG_V31_CAPTURE,
+ rc = vfe31_configure_pingpong_buffers(VFE_MSG_CAPTURE,
VFE_MSG_OUTPUT_PRIMARY);
if (rc < 0) {
pr_err("%s error configuring pingpong buffers"
@@ -1449,12 +1479,12 @@
}
/* Configure primary channel for JPEG */
rc = vfe31_configure_pingpong_buffers(
- VFE_MSG_V31_JPEG_CAPTURE,
+ VFE_MSG_JPEG_CAPTURE,
VFE_MSG_OUTPUT_PRIMARY);
} else {
/* Configure primary channel */
rc = vfe31_configure_pingpong_buffers(
- VFE_MSG_V31_CAPTURE,
+ VFE_MSG_CAPTURE,
VFE_MSG_OUTPUT_PRIMARY);
}
if (rc < 0) {
@@ -1464,7 +1494,7 @@
goto proc_general_done;
}
/* Configure secondary channel */
- rc = vfe31_configure_pingpong_buffers(VFE_MSG_V31_CAPTURE,
+ rc = vfe31_configure_pingpong_buffers(VFE_MSG_CAPTURE,
VFE_MSG_OUTPUT_SECONDARY);
if (rc < 0) {
pr_err("%s error configuring pingpong buffers"
@@ -1477,16 +1507,26 @@
case VFE_CMD_START_RECORDING:
pr_info("vfe31_proc_general: cmdID = %s\n",
vfe31_general_cmd[cmd->id]);
+ if (copy_from_user(&temp1, (void __user *)(cmd->value),
+ sizeof(uint32_t))) {
+ pr_err("%s Error copying inst_handle for recording\n",
+ __func__);
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
if (vfe31_ctrl->operation_mode ==
- VFE_OUTPUTS_PREVIEW_AND_VIDEO)
+ VFE_OUTPUTS_PREVIEW_AND_VIDEO) {
+ vfe31_ctrl->outpath.out1.inst_handle = temp1;
rc = vfe31_configure_pingpong_buffers(
- VFE_MSG_V31_START_RECORDING,
+ VFE_MSG_START_RECORDING,
VFE_MSG_OUTPUT_SECONDARY);
- else if (vfe31_ctrl->operation_mode ==
- VFE_OUTPUTS_VIDEO_AND_PREVIEW)
+ } else if (vfe31_ctrl->operation_mode ==
+ VFE_OUTPUTS_VIDEO_AND_PREVIEW) {
+ vfe31_ctrl->outpath.out0.inst_handle = temp1;
rc = vfe31_configure_pingpong_buffers(
- VFE_MSG_V31_START_RECORDING,
+ VFE_MSG_START_RECORDING,
VFE_MSG_OUTPUT_PRIMARY);
+ }
if (rc < 0) {
pr_err("%s error configuring pingpong buffers"
" for video", __func__);
@@ -1916,8 +1956,16 @@
break;
case VFE_CMD_LIVESHOT:
+ if (copy_from_user(&temp1, (void __user *)(cmd->value),
+ sizeof(uint32_t))) {
+ pr_err("%s Error copying inst_handle for liveshot ",
+ __func__);
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ vfe31_ctrl->outpath.out0.inst_handle = temp1;
/* Configure primary channel */
- rc = vfe31_configure_pingpong_buffers(VFE_MSG_V31_CAPTURE,
+ rc = vfe31_configure_pingpong_buffers(VFE_MSG_CAPTURE,
VFE_MSG_OUTPUT_PRIMARY);
if (rc < 0) {
pr_err("%s error configuring pingpong buffers"
@@ -2223,11 +2271,11 @@
break;
case VFE_CMD_ZSL:
- rc = vfe31_configure_pingpong_buffers(VFE_MSG_V31_START,
+ rc = vfe31_configure_pingpong_buffers(VFE_MSG_START,
VFE_MSG_OUTPUT_PRIMARY);
if (rc < 0)
goto proc_general_done;
- rc = vfe31_configure_pingpong_buffers(VFE_MSG_V31_START,
+ rc = vfe31_configure_pingpong_buffers(VFE_MSG_START,
VFE_MSG_OUTPUT_SECONDARY);
if (rc < 0)
goto proc_general_done;
@@ -2715,12 +2763,12 @@
}
static void vfe_send_outmsg(struct v4l2_subdev *sd, uint8_t msgid,
uint32_t ch0_paddr, uint32_t ch1_paddr,
- uint32_t ch2_paddr, uint32_t image_mode)
+ uint32_t ch2_paddr, uint32_t inst_handle)
{
struct isp_msg_output msg;
msg.output_id = msgid;
- msg.buf.image_mode = image_mode;
+ msg.buf.inst_handle = inst_handle;
msg.buf.ch_paddr[0] = ch0_paddr;
msg.buf.ch_paddr[1] = ch1_paddr;
msg.buf.ch_paddr[2] = ch2_paddr;
@@ -2803,7 +2851,7 @@
vfe_send_outmsg(&vfe31_ctrl->subdev,
MSG_ID_OUTPUT_PRIMARY, ch0_paddr,
ch1_paddr, ch2_paddr,
- vfe31_ctrl->outpath.out0.image_mode);
+ vfe31_ctrl->outpath.out0.inst_handle);
if (vfe31_ctrl->liveshot_state == VFE_STATE_STOPPED)
vfe31_ctrl->liveshot_state = VFE_STATE_IDLE;
@@ -2876,7 +2924,7 @@
vfe_send_outmsg(&vfe31_ctrl->subdev,
MSG_ID_OUTPUT_SECONDARY, ch0_paddr,
ch1_paddr, ch2_paddr,
- vfe31_ctrl->outpath.out1.image_mode);
+ vfe31_ctrl->outpath.out1.inst_handle);
} else {
vfe31_ctrl->outpath.out1.frame_drop_cnt++;
CDBG("path_irq_1 - no free buffer!\n");
@@ -3315,6 +3363,22 @@
vfe31_ctrl->stats_ops.client);
}
break;
+ case VFE_CMD_STATS_UNREGBUF:
+ {
+ struct msm_stats_reqbuf *req_buf = NULL;
+ req_buf = (struct msm_stats_reqbuf *)cmd->value;
+ if (sizeof(struct msm_stats_reqbuf) != cmd->length) {
+ /* error. the length not match */
+ pr_err("%s: stats reqbuf input size = %d,\n"
+ "struct size = %d, mitch match\n",
+ __func__, cmd->length,
+ sizeof(struct msm_stats_reqbuf));
+ rc = -EINVAL ;
+ goto end;
+ }
+ rc = vfe31_stats_unregbuf(req_buf);
+ }
+ break;
default:
rc = -1;
pr_err("%s: cmd_type %d not supported", __func__,
@@ -3541,19 +3605,30 @@
struct msm_cam_media_controller *pmctl =
(struct msm_cam_media_controller *)v4l2_get_subdev_hostdata(sd);
struct msm_isp_cmd vfecmd;
- struct msm_camvfe_params *vfe_params =
- (struct msm_camvfe_params *)arg;
- struct msm_vfe_cfg_cmd *cmd = vfe_params->vfe_cfg;
- void *data = vfe_params->data;
+ struct msm_camvfe_params *vfe_params;
+ struct msm_vfe_cfg_cmd *cmd;
+ void *data;
long rc = 0;
struct vfe_cmd_stats_buf *scfg = NULL;
struct vfe_cmd_stats_ack *sack = NULL;
+ if (subdev_cmd == VIDIOC_MSM_VFE_INIT) {
+ CDBG("%s init\n", __func__);
+ return msm_vfe_subdev_init(sd);
+ } else if (subdev_cmd == VIDIOC_MSM_VFE_RELEASE) {
+ msm_vfe_subdev_release(sd);
+ return 0;
+ }
+ vfe_params = (struct msm_camvfe_params *)arg;
+ cmd = vfe_params->vfe_cfg;
+ data = vfe_params->data;
+
switch (cmd->cmd_type) {
case VFE_CMD_STATS_REQBUF:
case VFE_CMD_STATS_ENQUEUEBUF:
case VFE_CMD_STATS_FLUSH_BUFQ:
+ case VFE_CMD_STATS_UNREGBUF:
/* for easy porting put in one envelope */
rc = vfe_stats_bufq_sub_ioctl(cmd, vfe_params->data);
return rc;
@@ -3575,186 +3650,194 @@
return -EFAULT;
}
} else {
- /* here eith stats release or frame release. */
- if (cmd->cmd_type != CMD_CONFIG_PING_ADDR &&
- cmd->cmd_type != CMD_CONFIG_PONG_ADDR &&
- cmd->cmd_type != CMD_CONFIG_FREE_BUF_ADDR) {
- /* then must be stats release. */
- if (!data) {
- pr_err("%s: data = NULL, cmd->cmd_type = %d",
- __func__, cmd->cmd_type);
- return -EFAULT;
- }
- sack = kmalloc(sizeof(struct vfe_cmd_stats_ack),
+ /* here eith stats release or frame release. */
+ if (cmd->cmd_type != CMD_CONFIG_PING_ADDR &&
+ cmd->cmd_type != CMD_CONFIG_PONG_ADDR &&
+ cmd->cmd_type != CMD_CONFIG_FREE_BUF_ADDR) {
+ /* then must be stats release. */
+ if (!data) {
+ pr_err("%s: data = NULL," \
+ "cmd->cmd_type = %d\n",
+ __func__, cmd->cmd_type);
+ return -EFAULT;
+ }
+ sack = kmalloc(sizeof(struct vfe_cmd_stats_ack),
GFP_ATOMIC);
- if (!sack) {
- pr_err("%s: no mem for cmd->cmd_type = %d",
- __func__, cmd->cmd_type);
- return -ENOMEM;
+ if (!sack) {
+ pr_err("%s: no mem for" \
+ "cmd->cmd_type = %d\n",
+ __func__, cmd->cmd_type);
+ return -ENOMEM;
+ }
+
+ sack->nextStatsBuf = *(uint32_t *)data;
+ }
+ }
+
+ CDBG("%s: cmdType = %d\n", __func__, cmd->cmd_type);
+
+ if ((cmd->cmd_type == CMD_STATS_AF_ENABLE) ||
+ (cmd->cmd_type == CMD_STATS_AWB_ENABLE) ||
+ (cmd->cmd_type == CMD_STATS_IHIST_ENABLE) ||
+ (cmd->cmd_type == CMD_STATS_RS_ENABLE) ||
+ (cmd->cmd_type == CMD_STATS_CS_ENABLE) ||
+ (cmd->cmd_type == CMD_STATS_AEC_ENABLE)) {
+ scfg = NULL;
+ goto vfe31_config_done;
+ }
+ switch (cmd->cmd_type) {
+ case CMD_GENERAL: {
+ rc = vfe31_proc_general(pmctl, &vfecmd);
+ }
+ break;
+ case CMD_CONFIG_PING_ADDR: {
+ int path = *((int *)cmd->value);
+ struct vfe31_output_ch *outch = vfe31_get_ch(path);
+ outch->ping = *((struct msm_free_buf *)data);
+ }
+ break;
+
+ case CMD_CONFIG_PONG_ADDR: {
+ int path = *((int *)cmd->value);
+ struct vfe31_output_ch *outch = vfe31_get_ch(path);
+ outch->pong = *((struct msm_free_buf *)data);
+ }
+ break;
+
+ case CMD_CONFIG_FREE_BUF_ADDR: {
+ int path = *((int *)cmd->value);
+ struct vfe31_output_ch *outch = vfe31_get_ch(path);
+ outch->free_buf = *((struct msm_free_buf *)data);
+ }
+ break;
+
+ case CMD_SNAP_BUF_RELEASE:
+ break;
+
+ case CMD_AXI_CFG_PRIM: {
+ uint32_t *axio = NULL;
+ axio = kmalloc(vfe31_cmd[VFE_CMD_AXI_OUT_CFG].length,
+ GFP_ATOMIC);
+ if (!axio) {
+ rc = -ENOMEM;
+ break;
}
- sack->nextStatsBuf = *(uint32_t *)data;
- }
- }
-
- CDBG("%s: cmdType = %d\n", __func__, cmd->cmd_type);
-
- if ((cmd->cmd_type == CMD_STATS_AF_ENABLE) ||
- (cmd->cmd_type == CMD_STATS_AWB_ENABLE) ||
- (cmd->cmd_type == CMD_STATS_IHIST_ENABLE) ||
- (cmd->cmd_type == CMD_STATS_RS_ENABLE) ||
- (cmd->cmd_type == CMD_STATS_CS_ENABLE) ||
- (cmd->cmd_type == CMD_STATS_AEC_ENABLE)) {
- scfg = NULL;
- goto vfe31_config_done;
- }
- switch (cmd->cmd_type) {
- case CMD_GENERAL: {
- rc = vfe31_proc_general(pmctl, &vfecmd);
- }
- break;
- case CMD_CONFIG_PING_ADDR: {
- int path = *((int *)cmd->value);
- struct vfe31_output_ch *outch = vfe31_get_ch(path);
- outch->ping = *((struct msm_free_buf *)data);
- }
- break;
-
- case CMD_CONFIG_PONG_ADDR: {
- int path = *((int *)cmd->value);
- struct vfe31_output_ch *outch = vfe31_get_ch(path);
- outch->pong = *((struct msm_free_buf *)data);
- }
- break;
-
- case CMD_CONFIG_FREE_BUF_ADDR: {
- int path = *((int *)cmd->value);
- struct vfe31_output_ch *outch = vfe31_get_ch(path);
- outch->free_buf = *((struct msm_free_buf *)data);
- }
- break;
-
- case CMD_SNAP_BUF_RELEASE:
- break;
-
- case CMD_AXI_CFG_PRIM: {
- uint32_t *axio = NULL;
- axio = kmalloc(vfe31_cmd[VFE_CMD_AXI_OUT_CFG].length,
- GFP_ATOMIC);
- if (!axio) {
- rc = -ENOMEM;
- break;
- }
-
- if (copy_from_user(axio, (void __user *)(vfecmd.value),
+ if (copy_from_user(axio, (void __user *)(vfecmd.value),
vfe31_cmd[VFE_CMD_AXI_OUT_CFG].length)) {
+ kfree(axio);
+ rc = -EFAULT;
+ break;
+ }
+ vfe31_config_axi(OUTPUT_PRIM, axio);
kfree(axio);
- rc = -EFAULT;
+ }
break;
- }
- vfe31_config_axi(OUTPUT_PRIM, axio);
- kfree(axio);
- }
- break;
- case CMD_AXI_CFG_PRIM_ALL_CHNLS: {
- uint32_t *axio = NULL;
- axio = kmalloc(vfe31_cmd[VFE_CMD_AXI_OUT_CFG].length,
- GFP_ATOMIC);
- if (!axio) {
- rc = -ENOMEM;
- break;
- }
+ case CMD_AXI_CFG_PRIM_ALL_CHNLS: {
+ uint32_t *axio = NULL;
+ axio = kmalloc(vfe31_cmd[VFE_CMD_AXI_OUT_CFG].length,
+ GFP_ATOMIC);
+ if (!axio) {
+ rc = -ENOMEM;
+ break;
+ }
- if (copy_from_user(axio, (void __user *)(vfecmd.value),
+ if (copy_from_user(axio, (void __user *)(vfecmd.value),
vfe31_cmd[VFE_CMD_AXI_OUT_CFG].length)) {
+ kfree(axio);
+ rc = -EFAULT;
+ break;
+ }
+ vfe31_config_axi(OUTPUT_PRIM_ALL_CHNLS, axio);
kfree(axio);
- rc = -EFAULT;
+ }
break;
- }
- vfe31_config_axi(OUTPUT_PRIM_ALL_CHNLS, axio);
- kfree(axio);
- }
- break;
- case CMD_AXI_CFG_PRIM|CMD_AXI_CFG_SEC: {
- uint32_t *axio = NULL;
- axio = kmalloc(vfe31_cmd[VFE_CMD_AXI_OUT_CFG].length,
- GFP_ATOMIC);
- if (!axio) {
- rc = -ENOMEM;
- break;
- }
+ case CMD_AXI_CFG_PRIM|CMD_AXI_CFG_SEC: {
+ uint32_t *axio = NULL;
+ axio = kmalloc(vfe31_cmd[VFE_CMD_AXI_OUT_CFG].length,
+ GFP_ATOMIC);
+ if (!axio) {
+ rc = -ENOMEM;
+ break;
+ }
- if (copy_from_user(axio, (void __user *)(vfecmd.value),
+ if (copy_from_user(axio, (void __user *)(vfecmd.value),
vfe31_cmd[VFE_CMD_AXI_OUT_CFG].length)) {
+ kfree(axio);
+ rc = -EFAULT;
+ break;
+ }
+ vfe31_config_axi(OUTPUT_PRIM|OUTPUT_SEC, axio);
kfree(axio);
- rc = -EFAULT;
+ }
break;
- }
- vfe31_config_axi(OUTPUT_PRIM|OUTPUT_SEC, axio);
- kfree(axio);
- }
- break;
- case CMD_AXI_CFG_PRIM|CMD_AXI_CFG_SEC_ALL_CHNLS: {
- uint32_t *axio = NULL;
- axio = kmalloc(vfe31_cmd[VFE_CMD_AXI_OUT_CFG].length,
- GFP_ATOMIC);
- if (!axio) {
- rc = -ENOMEM;
- break;
- }
+ case CMD_AXI_CFG_PRIM|CMD_AXI_CFG_SEC_ALL_CHNLS: {
+ uint32_t *axio = NULL;
+ axio = kmalloc(vfe31_cmd[VFE_CMD_AXI_OUT_CFG].length,
+ GFP_ATOMIC);
+ if (!axio) {
+ rc = -ENOMEM;
+ break;
+ }
- if (copy_from_user(axio, (void __user *)(vfecmd.value),
+ if (copy_from_user(axio, (void __user *)(vfecmd.value),
vfe31_cmd[VFE_CMD_AXI_OUT_CFG].length)) {
+ kfree(axio);
+ rc = -EFAULT;
+ break;
+ }
+ vfe31_config_axi
+ (OUTPUT_PRIM|OUTPUT_SEC_ALL_CHNLS, axio);
kfree(axio);
- rc = -EFAULT;
+ }
break;
- }
- vfe31_config_axi(OUTPUT_PRIM|OUTPUT_SEC_ALL_CHNLS, axio);
- kfree(axio);
- }
- break;
- case CMD_AXI_CFG_PRIM_ALL_CHNLS|CMD_AXI_CFG_SEC: {
- uint32_t *axio = NULL;
- axio = kmalloc(vfe31_cmd[VFE_CMD_AXI_OUT_CFG].length,
+ case CMD_AXI_CFG_PRIM_ALL_CHNLS|CMD_AXI_CFG_SEC: {
+ uint32_t *axio = NULL;
+ axio = kmalloc(vfe31_cmd[VFE_CMD_AXI_OUT_CFG].length,
GFP_ATOMIC);
- if (!axio) {
- rc = -ENOMEM;
- break;
- }
+ if (!axio) {
+ rc = -ENOMEM;
+ break;
+ }
- if (copy_from_user(axio, (void __user *)(vfecmd.value),
+ if (copy_from_user(axio, (void __user *)(vfecmd.value),
vfe31_cmd[VFE_CMD_AXI_OUT_CFG].length)) {
+ kfree(axio);
+ rc = -EFAULT;
+ break;
+ }
+ vfe31_config_axi
+ (OUTPUT_PRIM_ALL_CHNLS|OUTPUT_SEC, axio);
kfree(axio);
- rc = -EFAULT;
+ }
break;
- }
- vfe31_config_axi(OUTPUT_PRIM_ALL_CHNLS|OUTPUT_SEC, axio);
- kfree(axio);
- }
- break;
- case CMD_AXI_CFG_PRIM_ALL_CHNLS|CMD_AXI_CFG_SEC_ALL_CHNLS: {
- pr_err("%s Invalid/Unsupported AXI configuration %x",
- __func__, cmd->cmd_type);
- }
- break;
+ case CMD_AXI_CFG_PRIM_ALL_CHNLS|CMD_AXI_CFG_SEC_ALL_CHNLS: {
+ pr_err("%s Invalid/Unsupported AXI configuration %x",
+ __func__, cmd->cmd_type);
+ }
+ break;
- case CMD_AXI_START:
- /* No need to decouple AXI/VFE for VFE3.1*/
- break;
+ case CMD_AXI_START:
+ /* No need to decouple AXI/VFE for VFE3.1*/
+ break;
- case CMD_AXI_STOP:
- /* No need to decouple AXI/VFE for VFE3.1*/
- break;
+ case CMD_AXI_STOP:
+ /* No need to decouple AXI/VFE for VFE3.1*/
+ break;
- default:
- pr_err("%s Unsupported AXI configuration %x ", __func__,
- cmd->cmd_type);
- break;
+ case CMD_AXI_RESET:
+ /* No need to decouple AXI/VFE for VFE3.1*/
+ break;
+
+ default:
+ pr_err("%s Unsupported AXI configuration %x ", __func__,
+ cmd->cmd_type);
+ break;
}
}
vfe31_config_done:
@@ -3855,11 +3938,15 @@
usleep_range(10000, 15000);
}
-int msm_vfe_subdev_init(struct v4l2_subdev *sd,
- struct msm_cam_media_controller *mctl)
+int msm_vfe_subdev_init(struct v4l2_subdev *sd)
{
int rc = 0;
- v4l2_set_subdev_hostdata(sd, mctl);
+ struct msm_cam_media_controller *mctl;
+ mctl = v4l2_get_subdev_hostdata(sd);
+ if (mctl == NULL) {
+ rc = -EINVAL;
+ goto mctl_failed;
+ }
spin_lock_init(&vfe31_ctrl->stop_flag_lock);
spin_lock_init(&vfe31_ctrl->state_lock);
@@ -3939,6 +4026,7 @@
iounmap(vfe31_ctrl->vfebase);
vfe_remap_failed:
disable_irq(vfe31_ctrl->vfeirq->start);
+mctl_failed:
return rc;
}
@@ -4064,6 +4152,12 @@
sd_info.sd_index = 0;
sd_info.irq_num = vfe31_ctrl->vfeirq->start;
msm_cam_register_subdev_node(&vfe31_ctrl->subdev, &sd_info);
+
+ media_entity_init(&vfe31_ctrl->subdev.entity, 0, NULL, 0);
+ vfe31_ctrl->subdev.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ vfe31_ctrl->subdev.entity.group_id = VFE_DEV;
+ vfe31_ctrl->subdev.entity.name = pdev->name;
+ vfe31_ctrl->subdev.entity.revision = vfe31_ctrl->subdev.devnode->num;
return 0;
vfe31_no_resource:
diff --git a/drivers/media/video/msm/msm_vfe31_v4l2.h b/drivers/media/video/msm/vfe/msm_vfe31_v4l2.h
similarity index 98%
rename from drivers/media/video/msm/msm_vfe31_v4l2.h
rename to drivers/media/video/msm/vfe/msm_vfe31_v4l2.h
index 2cba995..60db8e5 100644
--- a/drivers/media/video/msm/msm_vfe31_v4l2.h
+++ b/drivers/media/video/msm/vfe/msm_vfe31_v4l2.h
@@ -216,11 +216,13 @@
#define V31_OPERATION_CFG_LEN 32
-#define V31_AXI_OUT_OFF 0x00000038
-#define V31_AXI_OUT_LEN 212
-#define V31_AXI_CH_INF_LEN 24
+#define V31_AXI_BUS_CMD_OFF 0x00000038
+#define V31_AXI_OUT_OFF 0x0000003C
+#define V31_AXI_OUT_LEN 240
#define V31_AXI_CFG_LEN 47
#define V31_AXI_RESERVED 1
+#define V31_AXI_RESERVED_LEN 4
+#define V31_AXI_BUS_CFG_LEN 16
#define V31_FRAME_SKIP_OFF 0x00000504
#define V31_FRAME_SKIP_LEN 32
@@ -696,7 +698,7 @@
struct vfe31_output_ch {
struct list_head free_buf_queue;
spinlock_t free_buf_lock;
- uint16_t image_mode;
+ uint32_t inst_handle;
int8_t ch0;
int8_t ch1;
int8_t ch2;
diff --git a/drivers/media/video/msm/msm_vfe32.c b/drivers/media/video/msm/vfe/msm_vfe32.c
similarity index 77%
rename from drivers/media/video/msm/msm_vfe32.c
rename to drivers/media/video/msm/vfe/msm_vfe32.c
index c6dd143..28b88dd 100644
--- a/drivers/media/video/msm/msm_vfe32.c
+++ b/drivers/media/video/msm/vfe/msm_vfe32.c
@@ -216,6 +216,27 @@
{VFE_CMD_GET_RGB_G_TABLE},
{VFE_CMD_GET_LA_TABLE},
{VFE_CMD_DEMOSAICV3_UPDATE},
+ {VFE_CMD_ACTIVE_REGION_CFG},
+/*130*/ {VFE_CMD_COLOR_PROCESSING_CONFIG},
+ {VFE_CMD_STATS_WB_AEC_CONFIG},
+ {VFE_CMD_STATS_WB_AEC_UPDATE},
+ {VFE_CMD_Y_GAMMA_CONFIG},
+ {VFE_CMD_SCALE_OUTPUT1_CONFIG},
+/*135*/ {VFE_CMD_SCALE_OUTPUT2_CONFIG},
+ {VFE_CMD_CAPTURE_RAW},
+ {VFE_CMD_STOP_LIVESHOT},
+ {VFE_CMD_RECONFIG_VFE},
+ {VFE_CMD_STATS_REQBUF},
+/*140*/ {VFE_CMD_STATS_ENQUEUEBUF},
+ {VFE_CMD_STATS_FLUSH_BUFQ},
+ {VFE_CMD_STATS_UNREGBUF},
+ {VFE_CMD_STATS_BG_START, V32_STATS_BG_LEN, V32_STATS_BG_OFF},
+ {VFE_CMD_STATS_BG_STOP},
+ {VFE_CMD_STATS_BF_START, V32_STATS_BF_LEN, V32_STATS_BF_OFF},
+/*145*/ {VFE_CMD_STATS_BF_STOP},
+ {VFE_CMD_STATS_BHIST_START, V32_STATS_BHIST_LEN,
+ V32_STATS_BHIST_OFF},
+/*147*/ {VFE_CMD_STATS_BHIST_STOP},
};
uint32_t vfe32_AXI_WM_CFG[] = {
@@ -358,42 +379,72 @@
"GET_RGB_G_TABLE",
"GET_LA_TABLE",
"DEMOSAICV3_UPDATE",
+ "DUMMY_11",
+ "DUMMY_12", /*130*/
+ "DUMMY_13",
+ "DUMMY_14",
+ "DUMMY_15",
+ "DUMMY_16",
+ "DUMMY_17", /*135*/
+ "DUMMY_18",
+ "DUMMY_19",
+ "DUMMY_20",
+ "STATS_REQBUF",
+ "STATS_ENQUEUEBUF", /*140*/
+ "STATS_FLUSH_BUFQ",
+ "STATS_UNREGBUF",
+ "STATS_BG_START",
+ "STATS_BG_STOP",
+ "STATS_BF_START", /*145*/
+ "STATS_BF_STOP",
+ "STATS_BHIST_START",
+ "STATS_BHIST_STOP",
+ "RESET_2",
};
-static void vfe32_stop(struct vfe32_ctrl_type *vfe32_ctrl)
+uint8_t vfe32_use_bayer_stats(struct vfe32_ctrl_type *vfe32_ctrl)
{
- unsigned long flags;
+ if (vfe32_ctrl->ver_num.main >= 4) {
+ /* VFE 4 or above uses bayer stats */
+ return TRUE;
+ } else {
+ return FALSE;
+ }
+}
- atomic_set(&vfe32_ctrl->share_ctrl->vstate, 0);
-
- /* for reset hw modules, and send msg when reset_irq comes.*/
- spin_lock_irqsave(&vfe32_ctrl->share_ctrl->stop_flag_lock, flags);
- vfe32_ctrl->share_ctrl->stop_ack_pending = TRUE;
- spin_unlock_irqrestore(&vfe32_ctrl->share_ctrl->stop_flag_lock, flags);
+static void axi_disable_irq(struct axi_ctrl_t *axi_ctrl)
+{
/* disable all interrupts. */
msm_camera_io_w(VFE_DISABLE_ALL_IRQS,
- vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_0);
+ axi_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_0);
msm_camera_io_w(VFE_DISABLE_ALL_IRQS,
- vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
+ axi_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
/* clear all pending interrupts*/
msm_camera_io_w(VFE_CLEAR_ALL_IRQS,
- vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_CLEAR_0);
+ axi_ctrl->share_ctrl->vfebase + VFE_IRQ_CLEAR_0);
msm_camera_io_w(VFE_CLEAR_ALL_IRQS,
- vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_CLEAR_1);
+ axi_ctrl->share_ctrl->vfebase + VFE_IRQ_CLEAR_1);
/* Ensure the write order while writing
to the command register using the barrier */
msm_camera_io_w_mb(1,
- vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_CMD);
-
- /* in either continuous or snapshot mode, stop command can be issued
- * at any time. stop camif immediately. */
- msm_camera_io_w(CAMIF_COMMAND_STOP_IMMEDIATELY,
- vfe32_ctrl->share_ctrl->vfebase + VFE_CAMIF_COMMAND);
+ axi_ctrl->share_ctrl->vfebase + VFE_IRQ_CMD);
}
-static void vfe32_subdev_notify(int id, int path, int image_mode,
+static void vfe32_stop(struct vfe32_ctrl_type *vfe32_ctrl)
+{
+
+ atomic_set(&vfe32_ctrl->share_ctrl->vstate, 0);
+ /* in either continuous or snapshot mode, stop command can be issued
+ * at any time. stop camif immediately. */
+ msm_camera_io_w(CAMIF_COMMAND_STOP_AT_FRAME_BOUNDARY,
+ vfe32_ctrl->share_ctrl->vfebase + VFE_CAMIF_COMMAND);
+ vfe32_ctrl->share_ctrl->operation_mode &=
+ ~(vfe32_ctrl->share_ctrl->current_mode);
+}
+
+static void vfe32_subdev_notify(int id, int path, uint32_t inst_handle,
struct v4l2_subdev *sd, struct vfe_share_ctrl_t *share_ctrl)
{
struct msm_vfe_resp rp;
@@ -403,7 +454,7 @@
CDBG("vfe32_subdev_notify : msgId = %d\n", id);
memset(&rp, 0, sizeof(struct msm_vfe_resp));
rp.evt_msg.type = MSM_CAMERA_MSG;
- frame_info.image_mode = image_mode;
+ frame_info.inst_handle = inst_handle;
frame_info.path = path;
rp.evt_msg.data = &frame_info;
rp.type = id;
@@ -417,33 +468,35 @@
uint32_t *ch_info;
uint32_t *axi_cfg = ao+V32_AXI_BUS_FMT_OFF;
int vfe_mode = (mode & ~(OUTPUT_TERT1|OUTPUT_TERT2));
+ uint32_t bus_cmd = *axi_cfg;
+ int i;
/* Update the corresponding write masters for each output*/
ch_info = axi_cfg + V32_AXI_CFG_LEN;
axi_ctrl->share_ctrl->outpath.out0.ch0 = 0x0000FFFF & *ch_info;
axi_ctrl->share_ctrl->outpath.out0.ch1 =
0x0000FFFF & (*ch_info++ >> 16);
- axi_ctrl->share_ctrl->outpath.out0.ch2 = 0x0000FFFF & *ch_info;
- axi_ctrl->share_ctrl->outpath.out0.image_mode =
- 0x0000FFFF & (*ch_info++ >> 16);
+ axi_ctrl->share_ctrl->outpath.out0.ch2 = 0x0000FFFF & *ch_info++;
+ axi_ctrl->share_ctrl->outpath.out0.inst_handle = *ch_info++;
+
axi_ctrl->share_ctrl->outpath.out1.ch0 = 0x0000FFFF & *ch_info;
axi_ctrl->share_ctrl->outpath.out1.ch1 =
0x0000FFFF & (*ch_info++ >> 16);
- axi_ctrl->share_ctrl->outpath.out1.ch2 = 0x0000FFFF & *ch_info;
- axi_ctrl->share_ctrl->outpath.out1.image_mode =
- 0x0000FFFF & (*ch_info++ >> 16);
+ axi_ctrl->share_ctrl->outpath.out1.ch2 = 0x0000FFFF & *ch_info++;
+ axi_ctrl->share_ctrl->outpath.out1.inst_handle = *ch_info++;
+
axi_ctrl->share_ctrl->outpath.out2.ch0 = 0x0000FFFF & *ch_info;
axi_ctrl->share_ctrl->outpath.out2.ch1 =
0x0000FFFF & (*ch_info++ >> 16);
- axi_ctrl->share_ctrl->outpath.out2.ch2 = 0x0000FFFF & *ch_info;
- axi_ctrl->share_ctrl->outpath.out2.image_mode =
- 0x0000FFFF & (*ch_info++ >> 16);
+ axi_ctrl->share_ctrl->outpath.out2.ch2 = 0x0000FFFF & *ch_info++;
+ axi_ctrl->share_ctrl->outpath.out2.inst_handle = *ch_info++;
+
axi_ctrl->share_ctrl->outpath.out3.ch0 = 0x0000FFFF & *ch_info;
axi_ctrl->share_ctrl->outpath.out3.ch1 =
0x0000FFFF & (*ch_info++ >> 16);
- axi_ctrl->share_ctrl->outpath.out3.ch2 = 0x0000FFFF & *ch_info;
- axi_ctrl->share_ctrl->outpath.out3.image_mode =
- 0x0000FFFF & (*ch_info++ >> 16);
+ axi_ctrl->share_ctrl->outpath.out3.ch2 = 0x0000FFFF & *ch_info++;
+ axi_ctrl->share_ctrl->outpath.out3.inst_handle = *ch_info++;
+
axi_ctrl->share_ctrl->outpath.output_mode = 0;
if (mode & OUTPUT_TERT1)
@@ -491,52 +544,75 @@
bus_cfg:
msm_camera_io_w(*ao, axi_ctrl->share_ctrl->vfebase +
VFE_BUS_IO_FORMAT_CFG);
+ axi_cfg++;
msm_camera_io_memcpy(axi_ctrl->share_ctrl->vfebase +
vfe32_cmd[VFE_CMD_AXI_OUT_CFG].offset, axi_cfg,
- vfe32_cmd[VFE_CMD_AXI_OUT_CFG].length - V32_AXI_CH_INF_LEN
- - V32_AXI_BUS_FMT_LEN);
+ V32_AXI_BUS_CFG_LEN);
+ axi_cfg += V32_AXI_BUS_CFG_LEN/4;
+ for (i = 0; i < ARRAY_SIZE(vfe32_AXI_WM_CFG); i++) {
+ msm_camera_io_w(*axi_cfg,
+ axi_ctrl->share_ctrl->vfebase+vfe32_AXI_WM_CFG[i]);
+ axi_cfg += 3;
+ msm_camera_io_memcpy(
+ axi_ctrl->share_ctrl->vfebase+vfe32_AXI_WM_CFG[i]+12,
+ axi_cfg, 12);
+ axi_cfg += 3;
+ }
+ msm_camera_io_w(bus_cmd, axi_ctrl->share_ctrl->vfebase +
+ V32_AXI_BUS_CMD_OFF);
return 0;
}
+static void axi_reset_internal_variables(
+ struct axi_ctrl_t *axi_ctrl)
+{
+ unsigned long flags;
+ /* state control variables */
+ axi_ctrl->share_ctrl->start_ack_pending = FALSE;
+ atomic_set(&irq_cnt, 0);
+
+ spin_lock_irqsave(&axi_ctrl->share_ctrl->stop_flag_lock, flags);
+ axi_ctrl->share_ctrl->stop_ack_pending = FALSE;
+ spin_unlock_irqrestore(&axi_ctrl->share_ctrl->stop_flag_lock, flags);
+
+ init_completion(&axi_ctrl->share_ctrl->reset_complete);
+
+ spin_lock_irqsave(&axi_ctrl->share_ctrl->update_ack_lock, flags);
+ axi_ctrl->share_ctrl->update_ack_pending = FALSE;
+ spin_unlock_irqrestore(&axi_ctrl->share_ctrl->update_ack_lock, flags);
+
+ axi_ctrl->share_ctrl->recording_state = VFE_STATE_IDLE;
+ axi_ctrl->share_ctrl->liveshot_state = VFE_STATE_IDLE;
+
+ atomic_set(&axi_ctrl->share_ctrl->vstate, 0);
+ atomic_set(&axi_ctrl->share_ctrl->rdi0_update_ack_pending, 0);
+ atomic_set(&axi_ctrl->share_ctrl->rdi1_update_ack_pending, 0);
+ atomic_set(&axi_ctrl->share_ctrl->rdi2_update_ack_pending, 0);
+
+ /* 0 for continuous mode, 1 for snapshot mode */
+ axi_ctrl->share_ctrl->operation_mode = 0;
+ axi_ctrl->share_ctrl->current_mode = 0;
+ axi_ctrl->share_ctrl->outpath.output_mode = 0;
+ axi_ctrl->share_ctrl->vfe_capture_count = 0;
+
+ /* this is unsigned 32 bit integer. */
+ axi_ctrl->share_ctrl->vfeFrameId = 0;
+}
+
static void vfe32_reset_internal_variables(
struct vfe32_ctrl_type *vfe32_ctrl)
{
- unsigned long flags;
- vfe32_ctrl->vfeImaskCompositePacked = 0;
- /* state control variables */
- vfe32_ctrl->start_ack_pending = FALSE;
- atomic_set(&irq_cnt, 0);
-
- spin_lock_irqsave(&vfe32_ctrl->share_ctrl->stop_flag_lock, flags);
- vfe32_ctrl->share_ctrl->stop_ack_pending = FALSE;
- spin_unlock_irqrestore(&vfe32_ctrl->share_ctrl->stop_flag_lock, flags);
-
- vfe32_ctrl->reset_ack_pending = FALSE;
-
- spin_lock_irqsave(&vfe32_ctrl->update_ack_lock, flags);
- vfe32_ctrl->update_ack_pending = FALSE;
- spin_unlock_irqrestore(&vfe32_ctrl->update_ack_lock, flags);
-
- vfe32_ctrl->recording_state = VFE_STATE_IDLE;
- vfe32_ctrl->share_ctrl->liveshot_state = VFE_STATE_IDLE;
-
- atomic_set(&vfe32_ctrl->share_ctrl->vstate, 0);
-
- /* 0 for continuous mode, 1 for snapshot mode */
- vfe32_ctrl->share_ctrl->operation_mode = 0;
- vfe32_ctrl->share_ctrl->outpath.output_mode = 0;
- vfe32_ctrl->share_ctrl->vfe_capture_count = 0;
-
- /* this is unsigned 32 bit integer. */
- vfe32_ctrl->share_ctrl->vfeFrameId = 0;
/* Stats control variables. */
- memset(&(vfe32_ctrl->afStatsControl), 0,
+ memset(&(vfe32_ctrl->afbfStatsControl), 0,
sizeof(struct vfe_stats_control));
memset(&(vfe32_ctrl->awbStatsControl), 0,
sizeof(struct vfe_stats_control));
- memset(&(vfe32_ctrl->aecStatsControl), 0,
+ memset(&(vfe32_ctrl->aecbgStatsControl), 0,
+ sizeof(struct vfe_stats_control));
+
+ memset(&(vfe32_ctrl->bhistStatsControl), 0,
sizeof(struct vfe_stats_control));
memset(&(vfe32_ctrl->ihistStatsControl), 0,
@@ -553,30 +629,86 @@
vfe32_ctrl->snapshot_frame_cnt = 0;
}
-static void vfe32_reset(struct vfe32_ctrl_type *vfe32_ctrl)
+static void vfe32_program_dmi_cfg(
+ enum VFE32_DMI_RAM_SEL bankSel,
+ struct vfe32_ctrl_type *vfe32_ctrl)
{
- vfe32_reset_internal_variables(vfe32_ctrl);
+ /* set bit 8 for auto increment. */
+ uint32_t value = VFE_DMI_CFG_DEFAULT;
+ value += (uint32_t)bankSel;
+ CDBG("%s: banksel = %d\n", __func__, bankSel);
+
+ msm_camera_io_w(value, vfe32_ctrl->share_ctrl->vfebase +
+ VFE_DMI_CFG);
+ /* by default, always starts with offset 0.*/
+ msm_camera_io_w(0, vfe32_ctrl->share_ctrl->vfebase +
+ VFE_DMI_ADDR);
+}
+
+static void vfe32_reset_dmi_tables(
+ struct vfe32_ctrl_type *vfe32_ctrl)
+{
+ int i = 0;
+
+ /* Reset Histogram LUTs */
+ CDBG("Reset Bayer histogram LUT : 0\n");
+ vfe32_program_dmi_cfg(STATS_BHIST_RAM0, vfe32_ctrl);
+ /* Loop for configuring LUT */
+ for (i = 0; i < 256; i++) {
+ msm_camera_io_w(0, vfe32_ctrl->share_ctrl->vfebase +
+ VFE_DMI_DATA_HI);
+ msm_camera_io_w(0, vfe32_ctrl->share_ctrl->vfebase +
+ VFE_DMI_DATA_LO);
+ }
+ vfe32_program_dmi_cfg(NO_MEM_SELECTED, vfe32_ctrl);
+
+ CDBG("Reset Bayer Histogram LUT: 1\n");
+ vfe32_program_dmi_cfg(STATS_BHIST_RAM1, vfe32_ctrl);
+ /* Loop for configuring LUT */
+ for (i = 0; i < 256; i++) {
+ msm_camera_io_w(0, vfe32_ctrl->share_ctrl->vfebase +
+ VFE_DMI_DATA_HI);
+ msm_camera_io_w(0, vfe32_ctrl->share_ctrl->vfebase +
+ VFE_DMI_DATA_LO);
+ }
+ vfe32_program_dmi_cfg(NO_MEM_SELECTED, vfe32_ctrl);
+
+ CDBG("Reset IHistogram LUT\n");
+ vfe32_program_dmi_cfg(STATS_IHIST_RAM, vfe32_ctrl);
+ /* Loop for configuring LUT */
+ for (i = 0; i < 256; i++) {
+ msm_camera_io_w(0, vfe32_ctrl->share_ctrl->vfebase +
+ VFE_DMI_DATA_HI);
+ msm_camera_io_w(0, vfe32_ctrl->share_ctrl->vfebase +
+ VFE_DMI_DATA_LO);
+ }
+ vfe32_program_dmi_cfg(NO_MEM_SELECTED, vfe32_ctrl);
+}
+
+static int axi_reset(struct axi_ctrl_t *axi_ctrl)
+{
+ axi_reset_internal_variables(axi_ctrl);
/* disable all interrupts. vfeImaskLocal is also reset to 0
* to begin with. */
msm_camera_io_w(VFE_DISABLE_ALL_IRQS,
- vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_0);
+ axi_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_0);
msm_camera_io_w(VFE_DISABLE_ALL_IRQS,
- vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
+ axi_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
/* clear all pending interrupts*/
msm_camera_io_w(VFE_CLEAR_ALL_IRQS,
- vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_CLEAR_0);
+ axi_ctrl->share_ctrl->vfebase + VFE_IRQ_CLEAR_0);
msm_camera_io_w(VFE_CLEAR_ALL_IRQS,
- vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_CLEAR_1);
+ axi_ctrl->share_ctrl->vfebase + VFE_IRQ_CLEAR_1);
/* Ensure the write order while writing
to the command register using the barrier */
- msm_camera_io_w_mb(1, vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_CMD);
+ msm_camera_io_w_mb(1, axi_ctrl->share_ctrl->vfebase + VFE_IRQ_CMD);
/* enable reset_ack interrupt. */
msm_camera_io_w(VFE_IMASK_WHILE_STOPPING_1,
- vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
+ axi_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
/* Write to VFE_GLOBAL_RESET_CMD to reset the vfe hardware. Once reset
* is done, hardware interrupt will be generated. VFE ist processes
@@ -586,7 +718,10 @@
/* Ensure the write order while writing
to the command register using the barrier */
msm_camera_io_w_mb(VFE_RESET_UPON_RESET_CMD,
- vfe32_ctrl->share_ctrl->vfebase + VFE_GLOBAL_RESET);
+ axi_ctrl->share_ctrl->vfebase + VFE_GLOBAL_RESET);
+
+ return wait_for_completion_interruptible(
+ &axi_ctrl->share_ctrl->reset_complete);
}
static int vfe32_operation_config(uint32_t *cmd,
@@ -677,6 +812,26 @@
return 0L;
}
+
+static unsigned long vfe32_stats_unregbuf(
+ struct vfe32_ctrl_type *vfe32_ctrl,
+ struct msm_stats_reqbuf *req_buf)
+{
+ int i = 0, rc = 0;
+
+ for (i = 0; i < req_buf->num_buf; i++) {
+ rc = vfe32_ctrl->stats_ops.buf_unprepare(
+ vfe32_ctrl->stats_ops.stats_ctrl,
+ req_buf->stats_type, i,
+ vfe32_ctrl->stats_ops.client);
+ if (rc < 0) {
+ pr_err("%s: unreg stats buf (type = %d) err = %d",
+ __func__, req_buf->stats_type, rc);
+ return rc;
+ }
+ }
+ return 0L;
+}
static int vfe_stats_awb_buf_init(
struct vfe32_ctrl_type *vfe32_ctrl,
struct vfe_cmd_stats_buf *in)
@@ -708,14 +863,18 @@
return 0;
}
-static int vfe_stats_aec_buf_init(
- struct vfe32_ctrl_type *vfe32_ctrl, struct vfe_cmd_stats_buf *in)
+static uint32_t vfe_stats_aec_bg_buf_init(
+ struct vfe32_ctrl_type *vfe32_ctrl)
{
uint32_t addr;
unsigned long flags;
+ uint32_t stats_type;
+ stats_type =
+ (!vfe32_use_bayer_stats(vfe32_ctrl)) ? MSM_STATS_TYPE_AEC
+ : MSM_STATS_TYPE_BG;
spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
- addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_AEC);
+ addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, stats_type);
spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
if (!addr) {
pr_err("%s: dq aec ping buf from free buf queue",
@@ -724,9 +883,9 @@
}
msm_camera_io_w(addr,
vfe32_ctrl->share_ctrl->vfebase +
- VFE_BUS_STATS_AEC_WR_PING_ADDR);
+ VFE_BUS_STATS_AEC_BG_WR_PING_ADDR);
spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
- addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_AEC);
+ addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, stats_type);
spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
if (!addr) {
pr_err("%s: dq aec pong buf from free buf queue",
@@ -735,26 +894,31 @@
}
msm_camera_io_w(addr,
vfe32_ctrl->share_ctrl->vfebase +
- VFE_BUS_STATS_AEC_WR_PONG_ADDR);
+ VFE_BUS_STATS_AEC_BG_WR_PONG_ADDR);
return 0;
}
-static int vfe_stats_af_buf_init(
- struct vfe32_ctrl_type *vfe32_ctrl, struct vfe_cmd_stats_buf *in)
+static int vfe_stats_af_bf_buf_init(
+ struct vfe32_ctrl_type *vfe32_ctrl)
{
uint32_t addr;
unsigned long flags;
int rc = 0;
+ uint32_t stats_type;
+ stats_type =
+ (!vfe32_use_bayer_stats(vfe32_ctrl)) ? MSM_STATS_TYPE_AF
+ : MSM_STATS_TYPE_BF;
+
spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
- rc = vfe32_stats_flush_enqueue(vfe32_ctrl, MSM_STATS_TYPE_AF);
+ rc = vfe32_stats_flush_enqueue(vfe32_ctrl, stats_type);
if (rc < 0) {
pr_err("%s: dq stats buf err = %d",
__func__, rc);
spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
return -EINVAL;
}
- addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_AF);
+ addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, stats_type);
spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
if (!addr) {
pr_err("%s: dq af ping buf from free buf queue", __func__);
@@ -762,9 +926,9 @@
}
msm_camera_io_w(addr,
vfe32_ctrl->share_ctrl->vfebase +
- VFE_BUS_STATS_AF_WR_PING_ADDR);
+ VFE_BUS_STATS_AF_BF_WR_PING_ADDR);
spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
- addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_AF);
+ addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, stats_type);
spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
if (!addr) {
pr_err("%s: dq af pong buf from free buf queue", __func__);
@@ -772,14 +936,44 @@
}
msm_camera_io_w(addr,
vfe32_ctrl->share_ctrl->vfebase +
- VFE_BUS_STATS_AF_WR_PONG_ADDR);
+ VFE_BUS_STATS_AF_BF_WR_PONG_ADDR);
+ return 0;
+}
+
+static uint32_t vfe_stats_bhist_buf_init(
+ struct vfe32_ctrl_type *vfe32_ctrl)
+{
+ uint32_t addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
+ addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_BHIST);
+ spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
+ if (!addr) {
+ pr_err("%s: dq ihist ping buf from free buf queue",
+ __func__);
+ return -ENOMEM;
+ }
+ msm_camera_io_w(addr,
+ vfe32_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_SKIN_BHIST_WR_PING_ADDR);
+ spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
+ addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_BHIST);
+ spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
+ if (!addr) {
+ pr_err("%s: dq ihist pong buf from free buf queue",
+ __func__);
+ return -ENOMEM;
+ }
+ msm_camera_io_w(addr,
+ vfe32_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_SKIN_BHIST_WR_PONG_ADDR);
return 0;
}
static int vfe_stats_ihist_buf_init(
- struct vfe32_ctrl_type *vfe32_ctrl,
- struct vfe_cmd_stats_buf *in)
+ struct vfe32_ctrl_type *vfe32_ctrl)
{
uint32_t addr;
unsigned long flags;
@@ -811,8 +1005,7 @@
}
static int vfe_stats_rs_buf_init(
- struct vfe32_ctrl_type *vfe32_ctrl,
- struct vfe_cmd_stats_buf *in)
+ struct vfe32_ctrl_type *vfe32_ctrl)
{
uint32_t addr;
unsigned long flags;
@@ -841,8 +1034,7 @@
}
static int vfe_stats_cs_buf_init(
- struct vfe32_ctrl_type *vfe32_ctrl,
- struct vfe_cmd_stats_buf *in)
+ struct vfe32_ctrl_type *vfe32_ctrl)
{
uint32_t addr;
unsigned long flags;
@@ -873,11 +1065,11 @@
{
uint32_t irq_mask = 0x00E00021, irq_mask1, reg_update;
uint16_t vfe_operation_mode =
- vfe32_ctrl->share_ctrl->operation_mode & ~(VFE_OUTPUTS_RDI0|
+ vfe32_ctrl->share_ctrl->current_mode & ~(VFE_OUTPUTS_RDI0|
VFE_OUTPUTS_RDI1);
- vfe32_ctrl->start_ack_pending = TRUE;
+ vfe32_ctrl->share_ctrl->start_ack_pending = TRUE;
CDBG("VFE opertaion mode = 0x%x, output mode = 0x%x\n",
- vfe32_ctrl->share_ctrl->operation_mode,
+ vfe32_ctrl->share_ctrl->current_mode,
vfe32_ctrl->share_ctrl->outpath.output_mode);
if (vfe32_ctrl->share_ctrl->stats_comp)
irq_mask |= VFE_IRQ_STATUS0_STATS_COMPOSIT_MASK;
@@ -898,17 +1090,29 @@
msm_camera_io_r_mb(vfe32_ctrl->share_ctrl->vfebase +
VFE_REG_UPDATE_CMD);
- if (vfe32_ctrl->share_ctrl->operation_mode & VFE_OUTPUTS_RDI0) {
+ if (vfe32_ctrl->share_ctrl->current_mode & VFE_OUTPUTS_RDI0) {
irq_mask1 |= VFE_IRQ_STATUS1_RDI0_REG_UPDATE_MASK;
msm_camera_io_w(irq_mask1, vfe32_ctrl->share_ctrl->vfebase +
VFE_IRQ_MASK_1);
- msm_camera_io_w_mb(reg_update|0x2, vfe32_ctrl->share_ctrl->
- vfebase + VFE_REG_UPDATE_CMD);
+ if (!atomic_cmpxchg(
+ &vfe32_ctrl->share_ctrl->rdi0_update_ack_pending,
+ 0, 1)) {
+ msm_camera_io_w_mb(reg_update|0x2,
+ vfe32_ctrl->share_ctrl->vfebase +
+ VFE_REG_UPDATE_CMD);
+ }
}
- if (vfe32_ctrl->share_ctrl->operation_mode & VFE_OUTPUTS_RDI1) {
+ if (vfe32_ctrl->share_ctrl->current_mode & VFE_OUTPUTS_RDI1) {
irq_mask1 |= VFE_IRQ_STATUS1_RDI1_REG_UPDATE_MASK;
msm_camera_io_w(irq_mask1, vfe32_ctrl->share_ctrl->vfebase +
VFE_IRQ_MASK_1);
+ if (!atomic_cmpxchg(
+ &vfe32_ctrl->share_ctrl->rdi1_update_ack_pending,
+ 0, 1)) {
+ msm_camera_io_w_mb(reg_update|0x4,
+ vfe32_ctrl->share_ctrl->vfebase +
+ VFE_REG_UPDATE_CMD);
+ }
msm_camera_io_w_mb(reg_update|0x4, vfe32_ctrl->share_ctrl->
vfebase + VFE_REG_UPDATE_CMD);
}
@@ -918,6 +1122,8 @@
msm_camera_io_w_mb(1, vfe32_ctrl->share_ctrl->vfebase +
VFE_CAMIF_COMMAND);
}
+ vfe32_ctrl->share_ctrl->operation_mode |=
+ vfe32_ctrl->share_ctrl->current_mode;
/* Ensure the write order while writing
to the command register using the barrier */
atomic_set(&vfe32_ctrl->share_ctrl->vstate, 1);
@@ -929,7 +1135,7 @@
{
msm_camio_bus_scale_cfg(
pmctl->sdata->pdata->cam_bus_scale_table, S_VIDEO);
- vfe32_ctrl->recording_state = VFE_STATE_START_REQUESTED;
+ vfe32_ctrl->share_ctrl->recording_state = VFE_STATE_START_REQUESTED;
msm_camera_io_w_mb(1,
vfe32_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
return 0;
@@ -939,7 +1145,7 @@
struct msm_cam_media_controller *pmctl,
struct vfe32_ctrl_type *vfe32_ctrl)
{
- vfe32_ctrl->recording_state = VFE_STATE_STOP_REQUESTED;
+ vfe32_ctrl->share_ctrl->recording_state = VFE_STATE_STOP_REQUESTED;
msm_camera_io_w_mb(1,
vfe32_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
msm_camio_bus_scale_cfg(
@@ -956,6 +1162,8 @@
vfe32_ctrl->share_ctrl->vfe_capture_count =
vfe32_ctrl->share_ctrl->outpath.out0.capture_cnt;
+ msm_camio_bus_scale_cfg(
+ pmctl->sdata->pdata->cam_bus_scale_table, S_LIVESHOT);
vfe32_ctrl->share_ctrl->liveshot_state = VFE_STATE_START_REQUESTED;
msm_camera_io_w_mb(1, vfe32_ctrl->
share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
@@ -968,96 +1176,14 @@
vfe32_ctrl->share_ctrl->liveshot_state = VFE_STATE_STOP_REQUESTED;
msm_camera_io_w_mb(1,
vfe32_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+ msm_camio_bus_scale_cfg(
+ pmctl->sdata->pdata->cam_bus_scale_table, S_VIDEO);
}
static int vfe32_zsl(
struct msm_cam_media_controller *pmctl,
struct vfe32_ctrl_type *vfe32_ctrl)
{
- uint32_t irq_comp_mask = 0;
- /* capture command is valid for both idle and active state. */
- irq_comp_mask =
- msm_camera_io_r(vfe32_ctrl->
- share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
-
- CDBG("%s:op mode %d O/P Mode %d\n", __func__,
- vfe32_ctrl->share_ctrl->operation_mode,
- vfe32_ctrl->share_ctrl->outpath.output_mode);
-
- if (vfe32_ctrl->share_ctrl->outpath.output_mode &
- VFE32_OUTPUT_MODE_PRIMARY) {
- irq_comp_mask |= (
- (0x1 << (vfe32_ctrl->share_ctrl->outpath.out0.ch0)) |
- (0x1 << (vfe32_ctrl->share_ctrl->outpath.out0.ch1)));
- } else if (vfe32_ctrl->share_ctrl->outpath.output_mode &
- VFE32_OUTPUT_MODE_PRIMARY_ALL_CHNLS) {
- irq_comp_mask |= (
- (0x1 << (vfe32_ctrl->share_ctrl->outpath.out0.ch0)) |
- (0x1 << (vfe32_ctrl->share_ctrl->outpath.out0.ch1)) |
- (0x1 << (vfe32_ctrl->share_ctrl->outpath.out0.ch2)));
- }
-
- if (vfe32_ctrl->share_ctrl->outpath.output_mode &
- VFE32_OUTPUT_MODE_SECONDARY) {
- irq_comp_mask |= ((0x1 << (vfe32_ctrl->
- share_ctrl->outpath.out1.ch0 + 8)) |
- (0x1 << (vfe32_ctrl->
- share_ctrl->outpath.out1.ch1 + 8)));
- } else if (vfe32_ctrl->share_ctrl->outpath.output_mode &
- VFE32_OUTPUT_MODE_SECONDARY_ALL_CHNLS) {
- irq_comp_mask |= (
- (0x1 << (vfe32_ctrl->
- share_ctrl->outpath.out1.ch0 + 8)) |
- (0x1 << (vfe32_ctrl->
- share_ctrl->outpath.out1.ch1 + 8)) |
- (0x1 << (vfe32_ctrl->
- share_ctrl->outpath.out1.ch2 + 8)));
- }
-
- if (vfe32_ctrl->share_ctrl->outpath.output_mode &
- VFE32_OUTPUT_MODE_PRIMARY) {
- msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
- vfe32_AXI_WM_CFG[vfe32_ctrl->
- share_ctrl->outpath.out0.ch0]);
- msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
- vfe32_AXI_WM_CFG[vfe32_ctrl->
- share_ctrl->outpath.out0.ch1]);
- } else if (vfe32_ctrl->share_ctrl->outpath.output_mode &
- VFE32_OUTPUT_MODE_PRIMARY_ALL_CHNLS) {
- msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
- vfe32_AXI_WM_CFG[vfe32_ctrl->
- share_ctrl->outpath.out0.ch0]);
- msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
- vfe32_AXI_WM_CFG[vfe32_ctrl->
- share_ctrl->outpath.out0.ch1]);
- msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
- vfe32_AXI_WM_CFG[vfe32_ctrl->
- share_ctrl->outpath.out0.ch2]);
- }
-
- if (vfe32_ctrl->share_ctrl->outpath.output_mode &
- VFE32_OUTPUT_MODE_SECONDARY) {
- msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
- vfe32_AXI_WM_CFG[vfe32_ctrl->
- share_ctrl->outpath.out1.ch0]);
- msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
- vfe32_AXI_WM_CFG[vfe32_ctrl->
- share_ctrl->outpath.out1.ch1]);
- } else if (vfe32_ctrl->share_ctrl->outpath.output_mode &
- VFE32_OUTPUT_MODE_SECONDARY_ALL_CHNLS) {
- msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
- vfe32_AXI_WM_CFG[vfe32_ctrl->
- share_ctrl->outpath.out1.ch0]);
- msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
- vfe32_AXI_WM_CFG[vfe32_ctrl->
- share_ctrl->outpath.out1.ch1]);
- msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
- vfe32_AXI_WM_CFG[vfe32_ctrl->
- share_ctrl->outpath.out1.ch2]);
- }
-
- msm_camera_io_w(irq_comp_mask,
- vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
vfe32_start_common(vfe32_ctrl);
msm_camio_bus_scale_cfg(
pmctl->sdata->pdata->cam_bus_scale_table, S_ZSL);
@@ -1071,26 +1197,8 @@
struct vfe32_ctrl_type *vfe32_ctrl,
uint32_t num_frames_capture)
{
- uint32_t irq_comp_mask = 0;
-
vfe32_ctrl->share_ctrl->outpath.out0.capture_cnt = num_frames_capture;
vfe32_ctrl->share_ctrl->vfe_capture_count = num_frames_capture;
-
- irq_comp_mask =
- msm_camera_io_r(
- vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
-
- if (vfe32_ctrl->share_ctrl->outpath.output_mode &
- VFE32_OUTPUT_MODE_PRIMARY) {
- irq_comp_mask |=
- (0x1 << (vfe32_ctrl->share_ctrl->outpath.out0.ch0));
- msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
- vfe32_AXI_WM_CFG[vfe32_ctrl->
- share_ctrl->outpath.out0.ch0]);
- }
-
- msm_camera_io_w(irq_comp_mask,
- vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
msm_camio_bus_scale_cfg(
pmctl->sdata->pdata->cam_bus_scale_table, S_CAPTURE);
vfe32_start_common(vfe32_ctrl);
@@ -1102,72 +1210,24 @@
uint32_t num_frames_capture,
struct vfe32_ctrl_type *vfe32_ctrl)
{
- uint32_t irq_comp_mask = 0;
-
/* capture command is valid for both idle and active state. */
vfe32_ctrl->share_ctrl->outpath.out1.capture_cnt = num_frames_capture;
- if (vfe32_ctrl->share_ctrl->operation_mode ==
+ if (vfe32_ctrl->share_ctrl->current_mode ==
VFE_OUTPUTS_MAIN_AND_THUMB ||
- vfe32_ctrl->share_ctrl->operation_mode ==
+ vfe32_ctrl->share_ctrl->current_mode ==
VFE_OUTPUTS_THUMB_AND_MAIN ||
- vfe32_ctrl->share_ctrl->operation_mode ==
+ vfe32_ctrl->share_ctrl->current_mode ==
VFE_OUTPUTS_JPEG_AND_THUMB ||
- vfe32_ctrl->share_ctrl->operation_mode ==
+ vfe32_ctrl->share_ctrl->current_mode ==
VFE_OUTPUTS_THUMB_AND_JPEG) {
vfe32_ctrl->share_ctrl->outpath.out0.capture_cnt =
num_frames_capture;
}
vfe32_ctrl->share_ctrl->vfe_capture_count = num_frames_capture;
- irq_comp_mask = msm_camera_io_r(
- vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
-
- if (vfe32_ctrl->share_ctrl->operation_mode ==
- VFE_OUTPUTS_MAIN_AND_THUMB ||
- vfe32_ctrl->share_ctrl->operation_mode ==
- VFE_OUTPUTS_JPEG_AND_THUMB ||
- vfe32_ctrl->share_ctrl->operation_mode ==
- VFE_OUTPUTS_THUMB_AND_MAIN) {
- if (vfe32_ctrl->share_ctrl->outpath.output_mode &
- VFE32_OUTPUT_MODE_PRIMARY) {
- irq_comp_mask |= (0x1 << vfe32_ctrl->
- share_ctrl->outpath.out0.ch0 |
- 0x1 << vfe32_ctrl->
- share_ctrl->outpath.out0.ch1);
- }
- if (vfe32_ctrl->share_ctrl->outpath.output_mode &
- VFE32_OUTPUT_MODE_SECONDARY) {
- irq_comp_mask |=
- (0x1 << (vfe32_ctrl->
- share_ctrl->outpath.out1.ch0 + 8) |
- 0x1 << (vfe32_ctrl->
- share_ctrl->outpath.out1.ch1 + 8));
- }
- if (vfe32_ctrl->share_ctrl->outpath.output_mode &
- VFE32_OUTPUT_MODE_PRIMARY) {
- msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
- vfe32_AXI_WM_CFG[vfe32_ctrl->
- share_ctrl->outpath.out0.ch0]);
- msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
- vfe32_AXI_WM_CFG[vfe32_ctrl->
- share_ctrl->outpath.out0.ch1]);
- }
- if (vfe32_ctrl->share_ctrl->outpath.output_mode &
- VFE32_OUTPUT_MODE_SECONDARY) {
- msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
- vfe32_AXI_WM_CFG[vfe32_ctrl->
- share_ctrl->outpath.out1.ch0]);
- msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
- vfe32_AXI_WM_CFG[vfe32_ctrl->
- share_ctrl->outpath.out1.ch1]);
- }
- }
vfe32_ctrl->share_ctrl->vfe_capture_count = num_frames_capture;
- msm_camera_io_w(irq_comp_mask,
- vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
- msm_camera_io_r(vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
msm_camio_bus_scale_cfg(
pmctl->sdata->pdata->cam_bus_scale_table, S_CAPTURE);
@@ -1182,58 +1242,6 @@
struct msm_cam_media_controller *pmctl,
struct vfe32_ctrl_type *vfe32_ctrl)
{
- uint32_t irq_comp_mask = 0, irq_mask = 0;
-
- irq_comp_mask =
- msm_camera_io_r(vfe32_ctrl->share_ctrl->vfebase +
- VFE_IRQ_COMP_MASK);
-
- if (vfe32_ctrl->share_ctrl->outpath.output_mode &
- VFE32_OUTPUT_MODE_PRIMARY) {
- irq_comp_mask |= (
- 0x1 << vfe32_ctrl->share_ctrl->outpath.out0.ch0 |
- 0x1 << vfe32_ctrl->share_ctrl->outpath.out0.ch1);
- } else if (vfe32_ctrl->share_ctrl->outpath.output_mode &
- VFE32_OUTPUT_MODE_PRIMARY_ALL_CHNLS) {
- irq_comp_mask |= (
- 0x1 << vfe32_ctrl->share_ctrl->outpath.out0.ch0 |
- 0x1 << vfe32_ctrl->share_ctrl->outpath.out0.ch1 |
- 0x1 << vfe32_ctrl->share_ctrl->outpath.out0.ch2);
- }
- if (vfe32_ctrl->share_ctrl->outpath.output_mode &
- VFE32_OUTPUT_MODE_SECONDARY) {
- irq_comp_mask |= (
- 0x1 << (vfe32_ctrl->share_ctrl->outpath.out1.ch0 + 8) |
- 0x1 << (vfe32_ctrl->share_ctrl->outpath.out1.ch1 + 8));
- } else if (vfe32_ctrl->share_ctrl->outpath.output_mode &
- VFE32_OUTPUT_MODE_SECONDARY_ALL_CHNLS) {
- irq_comp_mask |= (
- 0x1 << (vfe32_ctrl->share_ctrl->outpath.out1.ch0 + 8) |
- 0x1 << (vfe32_ctrl->share_ctrl->outpath.out1.ch1 + 8) |
- 0x1 << (vfe32_ctrl->share_ctrl->outpath.out1.ch2 + 8));
- }
- if (vfe32_ctrl->share_ctrl->outpath.output_mode &
- VFE32_OUTPUT_MODE_TERTIARY1) {
- irq_mask = msm_camera_io_r(vfe32_ctrl->share_ctrl->vfebase +
- VFE_IRQ_MASK_0);
- irq_mask |= (0x1 << (vfe32_ctrl->share_ctrl->outpath.out2.ch0 +
- VFE_WM_OFFSET));
- msm_camera_io_w(irq_mask, vfe32_ctrl->share_ctrl->vfebase +
- VFE_IRQ_MASK_0);
- }
- if (vfe32_ctrl->share_ctrl->outpath.output_mode &
- VFE32_OUTPUT_MODE_TERTIARY2) {
- irq_mask = msm_camera_io_r(vfe32_ctrl->share_ctrl->vfebase +
- VFE_IRQ_MASK_0);
- irq_mask |= (0x1 << (vfe32_ctrl->share_ctrl->outpath.out3.ch0 +
- VFE_WM_OFFSET));
- msm_camera_io_w(irq_mask, vfe32_ctrl->share_ctrl->vfebase +
- VFE_IRQ_MASK_0);
- }
-
- msm_camera_io_w(irq_comp_mask,
- vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
-
msm_camio_bus_scale_cfg(
pmctl->sdata->pdata->cam_bus_scale_table, S_PREVIEW);
vfe32_start_common(vfe32_ctrl);
@@ -1287,9 +1295,9 @@
vfe32_ctrl->update_gamma = false;
}
- spin_lock_irqsave(&vfe32_ctrl->update_ack_lock, flags);
- vfe32_ctrl->update_ack_pending = TRUE;
- spin_unlock_irqrestore(&vfe32_ctrl->update_ack_lock, flags);
+ spin_lock_irqsave(&vfe32_ctrl->share_ctrl->update_ack_lock, flags);
+ vfe32_ctrl->share_ctrl->update_ack_pending = TRUE;
+ spin_unlock_irqrestore(&vfe32_ctrl->share_ctrl->update_ack_lock, flags);
/* Ensure the write order while writing
to the command register using the barrier */
msm_camera_io_w_mb(1,
@@ -1368,19 +1376,7 @@
vfe32_ctrl->share_ctrl->vfebase + V32_TIMER_SELECT_OFF);
}
-static void vfe32_program_dmi_cfg(
- enum VFE32_DMI_RAM_SEL bankSel,
- struct vfe32_ctrl_type *vfe32_ctrl)
-{
- /* set bit 8 for auto increment. */
- uint32_t value = VFE_DMI_CFG_DEFAULT;
- value += (uint32_t)bankSel;
- CDBG("%s: banksel = %d\n", __func__, bankSel);
- msm_camera_io_w(value, vfe32_ctrl->share_ctrl->vfebase + VFE_DMI_CFG);
- /* by default, always starts with offset 0.*/
- msm_camera_io_w(0, vfe32_ctrl->share_ctrl->vfebase + VFE_DMI_ADDR);
-}
static void vfe32_write_gamma_cfg(
enum VFE32_DMI_RAM_SEL channel_sel,
const uint32_t *tbl,
@@ -1464,18 +1460,18 @@
{
struct vfe32_output_ch *outch = NULL;
struct msm_free_buf *b = NULL;
- uint32_t image_mode = 0;
+ uint32_t inst_handle = 0;
if (path == VFE_MSG_OUTPUT_PRIMARY)
- image_mode = axi_ctrl->share_ctrl->outpath.out0.image_mode;
+ inst_handle = axi_ctrl->share_ctrl->outpath.out0.inst_handle;
else if (path == VFE_MSG_OUTPUT_SECONDARY)
- image_mode = axi_ctrl->share_ctrl->outpath.out1.image_mode;
+ inst_handle = axi_ctrl->share_ctrl->outpath.out1.inst_handle;
else if (path == VFE_MSG_OUTPUT_TERTIARY1)
- image_mode = axi_ctrl->share_ctrl->outpath.out2.image_mode;
+ inst_handle = axi_ctrl->share_ctrl->outpath.out2.inst_handle;
else if (path == VFE_MSG_OUTPUT_TERTIARY2)
- image_mode = axi_ctrl->share_ctrl->outpath.out3.image_mode;
+ inst_handle = axi_ctrl->share_ctrl->outpath.out3.inst_handle;
- vfe32_subdev_notify(id, path, image_mode,
+ vfe32_subdev_notify(id, path, inst_handle,
&axi_ctrl->subdev, axi_ctrl->share_ctrl);
outch = vfe32_get_ch(path, axi_ctrl->share_ctrl);
if (outch->free_buf.ch_paddr[0])
@@ -1487,17 +1483,17 @@
{
struct vfe32_output_ch *outch = NULL;
int rc = 0;
- uint32_t image_mode = 0;
+ uint32_t inst_handle = 0;
if (path == VFE_MSG_OUTPUT_PRIMARY)
- image_mode = vfe32_ctrl->share_ctrl->outpath.out0.image_mode;
+ inst_handle = vfe32_ctrl->share_ctrl->outpath.out0.inst_handle;
else if (path == VFE_MSG_OUTPUT_SECONDARY)
- image_mode = vfe32_ctrl->share_ctrl->outpath.out1.image_mode;
+ inst_handle = vfe32_ctrl->share_ctrl->outpath.out1.inst_handle;
else if (path == VFE_MSG_OUTPUT_TERTIARY1)
- image_mode = vfe32_ctrl->share_ctrl->outpath.out2.image_mode;
+ inst_handle = vfe32_ctrl->share_ctrl->outpath.out2.inst_handle;
else if (path == VFE_MSG_OUTPUT_TERTIARY2)
- image_mode = vfe32_ctrl->share_ctrl->outpath.out3.image_mode;
+ inst_handle = vfe32_ctrl->share_ctrl->outpath.out3.inst_handle;
- vfe32_subdev_notify(id, path, image_mode,
+ vfe32_subdev_notify(id, path, inst_handle,
&vfe32_ctrl->subdev, vfe32_ctrl->share_ctrl);
outch = vfe32_get_ch(path, vfe32_ctrl->share_ctrl);
if (outch->ping.ch_paddr[0] && outch->pong.ch_paddr[0]) {
@@ -1511,7 +1507,7 @@
vfe32_ctrl->share_ctrl->vfebase, outch->ch0,
outch->pong.ch_paddr[0]);
- if ((vfe32_ctrl->share_ctrl->operation_mode !=
+ if ((vfe32_ctrl->share_ctrl->current_mode !=
VFE_OUTPUTS_RAW) && (path != VFE_MSG_OUTPUT_TERTIARY1)
&& (path != VFE_MSG_OUTPUT_TERTIARY2)) {
vfe32_put_ch_ping_addr(
@@ -1578,12 +1574,13 @@
struct vfe32_ctrl_type *vfe32_ctrl)
{
int i , rc = 0;
- uint32_t old_val = 0 , new_val = 0;
+ uint32_t old_val = 0 , new_val = 0, module_val = 0;
uint32_t *cmdp = NULL;
uint32_t *cmdp_local = NULL;
uint32_t snapshot_cnt = 0;
uint32_t temp1 = 0, temp2 = 0;
uint16_t vfe_mode = 0;
+ struct msm_camera_vfe_params_t vfe_params;
CDBG("vfe32_proc_general: cmdID = %s, length = %d\n",
vfe32_general_cmd[cmd->id], cmd->length);
@@ -1591,39 +1588,48 @@
case VFE_CMD_RESET:
pr_info("vfe32_proc_general: cmdID = %s\n",
vfe32_general_cmd[cmd->id]);
- vfe32_reset(vfe32_ctrl);
+ vfe32_reset_internal_variables(vfe32_ctrl);
break;
case VFE_CMD_START:
pr_info("vfe32_proc_general: cmdID = %s\n",
vfe32_general_cmd[cmd->id]);
- vfe_mode = vfe32_ctrl->share_ctrl->operation_mode
+ if (copy_from_user(&vfe_params,
+ (void __user *)(cmd->value),
+ sizeof(struct msm_camera_vfe_params_t))) {
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+
+ vfe32_ctrl->share_ctrl->current_mode =
+ vfe_params.operation_mode;
+ vfe_mode = vfe32_ctrl->share_ctrl->current_mode
& ~(VFE_OUTPUTS_RDI0|VFE_OUTPUTS_RDI1);
if (vfe_mode) {
- if ((vfe32_ctrl->share_ctrl->operation_mode &
+ if ((vfe32_ctrl->share_ctrl->current_mode &
VFE_OUTPUTS_PREVIEW_AND_VIDEO) ||
- (vfe32_ctrl->share_ctrl->operation_mode &
+ (vfe32_ctrl->share_ctrl->current_mode &
VFE_OUTPUTS_PREVIEW))
/* Configure primary channel */
rc = vfe32_configure_pingpong_buffers(
- VFE_MSG_V32_START,
+ VFE_MSG_START,
VFE_MSG_OUTPUT_PRIMARY,
vfe32_ctrl);
else
/* Configure secondary channel */
rc = vfe32_configure_pingpong_buffers(
- VFE_MSG_V32_START,
+ VFE_MSG_START,
VFE_MSG_OUTPUT_SECONDARY,
vfe32_ctrl);
}
- if (vfe32_ctrl->share_ctrl->operation_mode &
+ if (vfe32_ctrl->share_ctrl->current_mode &
VFE_OUTPUTS_RDI0)
rc = vfe32_configure_pingpong_buffers(
- VFE_MSG_V32_START, VFE_MSG_OUTPUT_TERTIARY1,
+ VFE_MSG_START, VFE_MSG_OUTPUT_TERTIARY1,
vfe32_ctrl);
- if (vfe32_ctrl->share_ctrl->operation_mode &
+ if (vfe32_ctrl->share_ctrl->current_mode &
VFE_OUTPUTS_RDI1)
rc = vfe32_configure_pingpong_buffers(
- VFE_MSG_V32_START, VFE_MSG_OUTPUT_TERTIARY2,
+ VFE_MSG_START, VFE_MSG_OUTPUT_TERTIARY2,
vfe32_ctrl);
if (rc < 0) {
@@ -1639,13 +1645,18 @@
break;
case VFE_CMD_CAPTURE_RAW:
pr_info("%s: cmdID = VFE_CMD_CAPTURE_RAW\n", __func__);
- if (copy_from_user(&snapshot_cnt, (void __user *)(cmd->value),
- sizeof(uint32_t))) {
- rc = -EFAULT;
- goto proc_general_done;
+ if (copy_from_user(&vfe_params,
+ (void __user *)(cmd->value),
+ sizeof(struct msm_camera_vfe_params_t))) {
+ rc = -EFAULT;
+ goto proc_general_done;
}
+
+ snapshot_cnt = vfe_params.capture_count;
+ vfe32_ctrl->share_ctrl->current_mode =
+ vfe_params.operation_mode;
rc = vfe32_configure_pingpong_buffers(
- VFE_MSG_V32_CAPTURE, VFE_MSG_OUTPUT_PRIMARY,
+ VFE_MSG_CAPTURE, VFE_MSG_OUTPUT_PRIMARY,
vfe32_ctrl);
if (rc < 0) {
pr_err("%s error configuring pingpong buffers"
@@ -1656,15 +1667,19 @@
rc = vfe32_capture_raw(pmctl, vfe32_ctrl, snapshot_cnt);
break;
case VFE_CMD_CAPTURE:
- if (copy_from_user(&snapshot_cnt, (void __user *)(cmd->value),
- sizeof(uint32_t))) {
- rc = -EFAULT;
- goto proc_general_done;
+ if (copy_from_user(&vfe_params,
+ (void __user *)(cmd->value),
+ sizeof(struct msm_camera_vfe_params_t))) {
+ rc = -EFAULT;
+ goto proc_general_done;
}
- if (vfe32_ctrl->share_ctrl->operation_mode ==
+ snapshot_cnt = vfe_params.capture_count;
+ vfe32_ctrl->share_ctrl->current_mode =
+ vfe_params.operation_mode;
+ if (vfe32_ctrl->share_ctrl->current_mode ==
VFE_OUTPUTS_JPEG_AND_THUMB ||
- vfe32_ctrl->share_ctrl->operation_mode ==
+ vfe32_ctrl->share_ctrl->current_mode ==
VFE_OUTPUTS_THUMB_AND_JPEG) {
if (snapshot_cnt != 1) {
pr_err("only support 1 inline snapshot\n");
@@ -1673,13 +1688,13 @@
}
/* Configure primary channel for JPEG */
rc = vfe32_configure_pingpong_buffers(
- VFE_MSG_V32_JPEG_CAPTURE,
+ VFE_MSG_JPEG_CAPTURE,
VFE_MSG_OUTPUT_PRIMARY,
vfe32_ctrl);
} else {
/* Configure primary channel */
rc = vfe32_configure_pingpong_buffers(
- VFE_MSG_V32_CAPTURE,
+ VFE_MSG_CAPTURE,
VFE_MSG_OUTPUT_PRIMARY,
vfe32_ctrl);
}
@@ -1691,7 +1706,7 @@
}
/* Configure secondary channel */
rc = vfe32_configure_pingpong_buffers(
- VFE_MSG_V32_CAPTURE, VFE_MSG_OUTPUT_SECONDARY,
+ VFE_MSG_CAPTURE, VFE_MSG_OUTPUT_SECONDARY,
vfe32_ctrl);
if (rc < 0) {
pr_err("%s error configuring pingpong buffers"
@@ -1704,18 +1719,30 @@
case VFE_CMD_START_RECORDING:
pr_info("vfe32_proc_general: cmdID = %s\n",
vfe32_general_cmd[cmd->id]);
- if (vfe32_ctrl->share_ctrl->operation_mode &
- VFE_OUTPUTS_PREVIEW_AND_VIDEO)
+ if (copy_from_user(&temp1, (void __user *)(cmd->value),
+ sizeof(uint32_t))) {
+ pr_err("%s Error copying inst_handle for recording\n",
+ __func__);
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ if (vfe32_ctrl->share_ctrl->current_mode &
+ VFE_OUTPUTS_PREVIEW_AND_VIDEO) {
+ vfe32_ctrl->share_ctrl->outpath.out1.inst_handle =
+ temp1;
rc = vfe32_configure_pingpong_buffers(
- VFE_MSG_V32_START_RECORDING,
+ VFE_MSG_START_RECORDING,
VFE_MSG_OUTPUT_SECONDARY,
vfe32_ctrl);
- else if (vfe32_ctrl->share_ctrl->operation_mode &
- VFE_OUTPUTS_VIDEO_AND_PREVIEW)
+ } else if (vfe32_ctrl->share_ctrl->current_mode &
+ VFE_OUTPUTS_VIDEO_AND_PREVIEW) {
+ vfe32_ctrl->share_ctrl->outpath.out0.inst_handle =
+ temp1;
rc = vfe32_configure_pingpong_buffers(
- VFE_MSG_V32_START_RECORDING,
+ VFE_MSG_START_RECORDING,
VFE_MSG_OUTPUT_PRIMARY,
vfe32_ctrl);
+ }
if (rc < 0) {
pr_err("%s error configuring pingpong buffers"
" for video", __func__);
@@ -1746,7 +1773,12 @@
break;
case VFE_CMD_STATS_AE_START: {
- rc = vfe_stats_aec_buf_init(vfe32_ctrl, NULL);
+ if (vfe32_use_bayer_stats(vfe32_ctrl)) {
+ /* Error */
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ rc = vfe_stats_aec_bg_buf_init(vfe32_ctrl);
if (rc < 0) {
pr_err("%s: cannot config ping/pong address of AEC",
__func__);
@@ -1775,7 +1807,12 @@
}
break;
case VFE_CMD_STATS_AF_START: {
- rc = vfe_stats_af_buf_init(vfe32_ctrl, NULL);
+ if (vfe32_use_bayer_stats(vfe32_ctrl)) {
+ /* Error */
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ rc = vfe_stats_af_bf_buf_init(vfe32_ctrl);
if (rc < 0) {
pr_err("%s: cannot config ping/pong address of AF",
__func__);
@@ -1804,6 +1841,11 @@
}
break;
case VFE_CMD_STATS_AWB_START: {
+ if (vfe32_use_bayer_stats(vfe32_ctrl)) {
+ /* Error */
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
rc = vfe_stats_awb_buf_init(vfe32_ctrl, NULL);
if (rc < 0) {
pr_err("%s: cannot config ping/pong address of AWB",
@@ -1834,7 +1876,7 @@
break;
case VFE_CMD_STATS_IHIST_START: {
- rc = vfe_stats_ihist_buf_init(vfe32_ctrl, NULL);
+ rc = vfe_stats_ihist_buf_init(vfe32_ctrl);
if (rc < 0) {
pr_err("%s: cannot config ping/pong address of IHIST",
__func__);
@@ -1865,7 +1907,7 @@
case VFE_CMD_STATS_RS_START: {
- rc = vfe_stats_rs_buf_init(vfe32_ctrl, NULL);
+ rc = vfe_stats_rs_buf_init(vfe32_ctrl);
if (rc < 0) {
pr_err("%s: cannot config ping/pong address of RS",
__func__);
@@ -1890,7 +1932,7 @@
break;
case VFE_CMD_STATS_CS_START: {
- rc = vfe_stats_cs_buf_init(vfe32_ctrl, NULL);
+ rc = vfe_stats_cs_buf_init(vfe32_ctrl);
if (rc < 0) {
pr_err("%s: cannot config ping/pong address of CS",
__func__);
@@ -1914,6 +1956,67 @@
}
break;
+ case VFE_CMD_STATS_BG_START:
+ case VFE_CMD_STATS_BF_START:
+ case VFE_CMD_STATS_BHIST_START: {
+ if (!vfe32_use_bayer_stats(vfe32_ctrl)) {
+ /* Error */
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ old_val = msm_camera_io_r(
+ vfe32_ctrl->share_ctrl->vfebase + VFE_STATS_CFG);
+ module_val = msm_camera_io_r(
+ vfe32_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+ if (VFE_CMD_STATS_BG_START == cmd->id) {
+ module_val |= AE_BG_ENABLE_MASK;
+ old_val |= STATS_BG_ENABLE_MASK;
+ rc = vfe_stats_aec_bg_buf_init(vfe32_ctrl);
+ if (rc < 0) {
+ pr_err("%s: cannot config ping/pong address of CS",
+ __func__);
+ goto proc_general_done;
+ }
+ } else if (VFE_CMD_STATS_BF_START == cmd->id) {
+ module_val |= AF_BF_ENABLE_MASK;
+ old_val |= STATS_BF_ENABLE_MASK;
+ rc = vfe_stats_af_bf_buf_init(vfe32_ctrl);
+ if (rc < 0) {
+ pr_err("%s: cannot config ping/pong address of CS",
+ __func__);
+ goto proc_general_done;
+ }
+ } else {
+ module_val |= SKIN_BHIST_ENABLE_MASK;
+ old_val |= STATS_BHIST_ENABLE_MASK;
+ rc = vfe_stats_bhist_buf_init(vfe32_ctrl);
+ if (rc < 0) {
+ pr_err("%s: cannot config ping/pong address of CS",
+ __func__);
+ goto proc_general_done;
+ }
+ }
+ msm_camera_io_w(old_val, vfe32_ctrl->share_ctrl->vfebase +
+ VFE_STATS_CFG);
+ msm_camera_io_w(module_val,
+ vfe32_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+ cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+ if (!cmdp) {
+ rc = -ENOMEM;
+ goto proc_general_done;
+ }
+ if (copy_from_user(cmdp,
+ (void __user *)(cmd->value),
+ cmd->length)) {
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ msm_camera_io_memcpy(
+ vfe32_ctrl->share_ctrl->vfebase +
+ vfe32_cmd[cmd->id].offset,
+ cmdp, (vfe32_cmd[cmd->id].length));
+ }
+ break;
case VFE_CMD_MCE_UPDATE:
case VFE_CMD_MCE_CFG:{
cmdp = kmalloc(cmd->length, GFP_ATOMIC);
@@ -2187,8 +2290,16 @@
break;
case VFE_CMD_LIVESHOT:
+ if (copy_from_user(&temp1, (void __user *)(cmd->value),
+ sizeof(uint32_t))) {
+ pr_err("%s Error copying inst_handle for liveshot ",
+ __func__);
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ vfe32_ctrl->share_ctrl->outpath.out0.inst_handle = temp1;
/* Configure primary channel */
- rc = vfe32_configure_pingpong_buffers(VFE_MSG_V32_CAPTURE,
+ rc = vfe32_configure_pingpong_buffers(VFE_MSG_CAPTURE,
VFE_MSG_OUTPUT_PRIMARY, vfe32_ctrl);
if (rc < 0) {
pr_err("%s error configuring pingpong buffers"
@@ -2577,6 +2688,11 @@
break;
case VFE_CMD_STATS_AWB_STOP: {
+ if (vfe32_use_bayer_stats(vfe32_ctrl)) {
+ /* Error */
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
old_val = msm_camera_io_r(
vfe32_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
old_val &= ~AWB_ENABLE_MASK;
@@ -2585,6 +2701,11 @@
}
break;
case VFE_CMD_STATS_AE_STOP: {
+ if (vfe32_use_bayer_stats(vfe32_ctrl)) {
+ /* Error */
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
old_val = msm_camera_io_r(
vfe32_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
old_val &= ~AE_BG_ENABLE_MASK;
@@ -2593,17 +2714,16 @@
}
break;
case VFE_CMD_STATS_AF_STOP: {
+ if (vfe32_use_bayer_stats(vfe32_ctrl)) {
+ /* Error */
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
old_val = msm_camera_io_r(
vfe32_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
old_val &= ~AF_BF_ENABLE_MASK;
msm_camera_io_w(old_val,
vfe32_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
- rc = vfe32_stats_flush_enqueue(vfe32_ctrl, MSM_STATS_TYPE_AF);
- if (rc < 0) {
- pr_err("%s: dq stats buf err = %d",
- __func__, rc);
- return -EINVAL;
- }
}
break;
@@ -2633,9 +2753,49 @@
vfe32_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
}
break;
+
+ case VFE_CMD_STATS_BG_STOP:
+ case VFE_CMD_STATS_BF_STOP:
+ case VFE_CMD_STATS_BHIST_STOP: {
+ if (!vfe32_use_bayer_stats(vfe32_ctrl)) {
+ /* Error */
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ old_val = msm_camera_io_r(
+ vfe32_ctrl->share_ctrl->vfebase + VFE_STATS_CFG);
+ if (VFE_CMD_STATS_BG_STOP == cmd->id)
+ old_val &= ~STATS_BG_ENABLE_MASK;
+ else if (VFE_CMD_STATS_BF_STOP == cmd->id)
+ old_val &= ~STATS_BF_ENABLE_MASK;
+ else
+ old_val &= ~STATS_BHIST_ENABLE_MASK;
+ msm_camera_io_w(old_val,
+ vfe32_ctrl->share_ctrl->vfebase + VFE_STATS_CFG);
+ if (VFE_CMD_STATS_BF_STOP == cmd->id) {
+ rc = vfe32_stats_flush_enqueue(vfe32_ctrl,
+ MSM_STATS_TYPE_BF);
+ if (rc < 0) {
+ pr_err("%s: dq stats buf err = %d",
+ __func__, rc);
+ return -EINVAL;
+ }
+ }
+ }
+ break;
+
case VFE_CMD_STOP:
pr_info("vfe32_proc_general: cmdID = %s\n",
vfe32_general_cmd[cmd->id]);
+ if (copy_from_user(&vfe_params,
+ (void __user *)(cmd->value),
+ sizeof(struct msm_camera_vfe_params_t))) {
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+
+ vfe32_ctrl->share_ctrl->current_mode =
+ vfe_params.operation_mode;
vfe32_stop(vfe32_ctrl);
break;
@@ -2679,11 +2839,20 @@
break;
case VFE_CMD_ZSL:
- rc = vfe32_configure_pingpong_buffers(VFE_MSG_V32_START,
+ if (copy_from_user(&vfe_params,
+ (void __user *)(cmd->value),
+ sizeof(struct msm_camera_vfe_params_t))) {
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+
+ vfe32_ctrl->share_ctrl->current_mode =
+ vfe_params.operation_mode;
+ rc = vfe32_configure_pingpong_buffers(VFE_MSG_START,
VFE_MSG_OUTPUT_PRIMARY, vfe32_ctrl);
if (rc < 0)
goto proc_general_done;
- rc = vfe32_configure_pingpong_buffers(VFE_MSG_V32_START,
+ rc = vfe32_configure_pingpong_buffers(VFE_MSG_START,
VFE_MSG_OUTPUT_SECONDARY, vfe32_ctrl);
if (rc < 0)
goto proc_general_done;
@@ -3014,7 +3183,8 @@
{
unsigned long flags;
- if (vfe32_ctrl->recording_state == VFE_STATE_START_REQUESTED) {
+ if (vfe32_ctrl->share_ctrl->recording_state ==
+ VFE_STATE_START_REQUESTED) {
if (vfe32_ctrl->share_ctrl->operation_mode &
VFE_OUTPUTS_VIDEO_AND_PREVIEW) {
msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
@@ -3032,11 +3202,11 @@
vfe32_AXI_WM_CFG[vfe32_ctrl->
share_ctrl->outpath.out1.ch1]);
}
- vfe32_ctrl->recording_state = VFE_STATE_STARTED;
+ vfe32_ctrl->share_ctrl->recording_state = VFE_STATE_STARTED;
msm_camera_io_w_mb(1,
vfe32_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
CDBG("start video triggered .\n");
- } else if (vfe32_ctrl->recording_state ==
+ } else if (vfe32_ctrl->share_ctrl->recording_state ==
VFE_STATE_STOP_REQUESTED) {
if (vfe32_ctrl->share_ctrl->operation_mode &
VFE_OUTPUTS_VIDEO_AND_PREVIEW) {
@@ -3058,40 +3228,47 @@
CDBG("stop video triggered .\n");
}
- spin_lock_irqsave(&vfe32_ctrl->start_ack_lock, flags);
- if (vfe32_ctrl->start_ack_pending == TRUE) {
- vfe32_ctrl->start_ack_pending = FALSE;
- spin_unlock_irqrestore(&vfe32_ctrl->start_ack_lock, flags);
+ spin_lock_irqsave(&vfe32_ctrl->share_ctrl->start_ack_lock, flags);
+ if (vfe32_ctrl->share_ctrl->start_ack_pending == TRUE) {
+ vfe32_ctrl->share_ctrl->start_ack_pending = FALSE;
+ spin_unlock_irqrestore(
+ &vfe32_ctrl->share_ctrl->start_ack_lock, flags);
vfe32_send_isp_msg(&vfe32_ctrl->subdev,
vfe32_ctrl->share_ctrl->vfeFrameId, MSG_ID_START_ACK);
} else {
- spin_unlock_irqrestore(&vfe32_ctrl->start_ack_lock, flags);
- if (vfe32_ctrl->recording_state ==
+ spin_unlock_irqrestore(
+ &vfe32_ctrl->share_ctrl->start_ack_lock, flags);
+ if (vfe32_ctrl->share_ctrl->recording_state ==
VFE_STATE_STOP_REQUESTED) {
- vfe32_ctrl->recording_state = VFE_STATE_STOPPED;
+ vfe32_ctrl->share_ctrl->recording_state =
+ VFE_STATE_STOPPED;
/* request a reg update and send STOP_REC_ACK
* when we process the next reg update irq.
*/
msm_camera_io_w_mb(1,
vfe32_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
- } else if (vfe32_ctrl->recording_state ==
+ } else if (vfe32_ctrl->share_ctrl->recording_state ==
VFE_STATE_STOPPED) {
vfe32_send_isp_msg(&vfe32_ctrl->subdev,
vfe32_ctrl->share_ctrl->vfeFrameId,
MSG_ID_STOP_REC_ACK);
- vfe32_ctrl->recording_state = VFE_STATE_IDLE;
+ vfe32_ctrl->share_ctrl->recording_state =
+ VFE_STATE_IDLE;
}
- spin_lock_irqsave(&vfe32_ctrl->update_ack_lock, flags);
- if (vfe32_ctrl->update_ack_pending == TRUE) {
- vfe32_ctrl->update_ack_pending = FALSE;
+ spin_lock_irqsave(
+ &vfe32_ctrl->share_ctrl->update_ack_lock, flags);
+ if (vfe32_ctrl->share_ctrl->update_ack_pending == TRUE) {
+ vfe32_ctrl->share_ctrl->update_ack_pending = FALSE;
spin_unlock_irqrestore(
- &vfe32_ctrl->update_ack_lock, flags);
+ &vfe32_ctrl->share_ctrl->update_ack_lock,
+ flags);
vfe32_send_isp_msg(&vfe32_ctrl->subdev,
vfe32_ctrl->share_ctrl->vfeFrameId,
MSG_ID_UPDATE_ACK);
} else {
spin_unlock_irqrestore(
- &vfe32_ctrl->update_ack_lock, flags);
+ &vfe32_ctrl->share_ctrl->update_ack_lock,
+ flags);
}
}
@@ -3153,13 +3330,13 @@
break;
}
- if ((vfe32_ctrl->share_ctrl->operation_mode ==
+ if ((vfe32_ctrl->share_ctrl->operation_mode &
VFE_OUTPUTS_THUMB_AND_MAIN) ||
- (vfe32_ctrl->share_ctrl->operation_mode ==
+ (vfe32_ctrl->share_ctrl->operation_mode &
VFE_OUTPUTS_MAIN_AND_THUMB) ||
- (vfe32_ctrl->share_ctrl->operation_mode ==
+ (vfe32_ctrl->share_ctrl->operation_mode &
VFE_OUTPUTS_THUMB_AND_JPEG) ||
- (vfe32_ctrl->share_ctrl->operation_mode ==
+ (vfe32_ctrl->share_ctrl->operation_mode &
VFE_OUTPUTS_JPEG_AND_THUMB)) {
/* in snapshot mode */
/* later we need to add check for live snapshot mode. */
@@ -3211,34 +3388,22 @@
static void vfe32_process_rdi0_reg_update_irq(
struct vfe32_ctrl_type *vfe32_ctrl)
{
- unsigned long flags;
- spin_lock_irqsave(&vfe32_ctrl->start_ack_lock, flags);
- if (vfe32_ctrl->start_ack_pending == TRUE) {
- vfe32_ctrl->start_ack_pending = FALSE;
- spin_unlock_irqrestore(
- &vfe32_ctrl->start_ack_lock, flags);
+ if (atomic_cmpxchg(
+ &vfe32_ctrl->share_ctrl->rdi0_update_ack_pending, 1, 0)) {
vfe32_send_isp_msg(&vfe32_ctrl->subdev,
- vfe32_ctrl->share_ctrl->vfeFrameId, MSG_ID_START_ACK);
- } else {
- spin_unlock_irqrestore(
- &vfe32_ctrl->start_ack_lock, flags);
+ vfe32_ctrl->share_ctrl->vfeFrameId,
+ MSG_ID_RDI0_UPDATE_ACK);
}
}
static void vfe32_process_rdi1_reg_update_irq(
struct vfe32_ctrl_type *vfe32_ctrl)
{
- unsigned long flags;
- spin_lock_irqsave(&vfe32_ctrl->start_ack_lock, flags);
- if (vfe32_ctrl->start_ack_pending == TRUE) {
- vfe32_ctrl->start_ack_pending = FALSE;
- spin_unlock_irqrestore(
- &vfe32_ctrl->start_ack_lock, flags);
+ if (atomic_cmpxchg(
+ &vfe32_ctrl->share_ctrl->rdi1_update_ack_pending, 1, 0)) {
vfe32_send_isp_msg(&vfe32_ctrl->subdev,
- vfe32_ctrl->share_ctrl->vfeFrameId, MSG_ID_START_ACK);
- } else {
- spin_unlock_irqrestore(
- &vfe32_ctrl->start_ack_lock, flags);
+ vfe32_ctrl->share_ctrl->vfeFrameId,
+ MSG_ID_RDI1_UPDATE_ACK);
}
}
@@ -3277,18 +3442,48 @@
vfe32_ctrl->share_ctrl->vfebase + VFE_CLAMP_MAX);
/* stats UB config */
- msm_camera_io_w(0x3980007,
- vfe32_ctrl->share_ctrl->vfebase + VFE_BUS_STATS_AEC_UB_CFG);
- msm_camera_io_w(0x3A00007,
- vfe32_ctrl->share_ctrl->vfebase + VFE_BUS_STATS_AF_UB_CFG);
- msm_camera_io_w(0x3A8000F,
- vfe32_ctrl->share_ctrl->vfebase + VFE_BUS_STATS_AWB_UB_CFG);
- msm_camera_io_w(0x3B80007,
- vfe32_ctrl->share_ctrl->vfebase + VFE_BUS_STATS_RS_UB_CFG);
- msm_camera_io_w(0x3C0001F,
- vfe32_ctrl->share_ctrl->vfebase + VFE_BUS_STATS_CS_UB_CFG);
- msm_camera_io_w(0x3E0001F,
- vfe32_ctrl->share_ctrl->vfebase + VFE_BUS_STATS_HIST_UB_CFG);
+ CDBG("%s: Use bayer stats = %d\n", __func__,
+ vfe32_use_bayer_stats(vfe32_ctrl));
+ if (!vfe32_use_bayer_stats(vfe32_ctrl)) {
+ msm_camera_io_w(0x3980007,
+ vfe32_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_AEC_BG_UB_CFG);
+ msm_camera_io_w(0x3A00007,
+ vfe32_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_AF_BF_UB_CFG);
+ msm_camera_io_w(0x3A8000F,
+ vfe32_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_AWB_UB_CFG);
+ msm_camera_io_w(0x3B80007,
+ vfe32_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_RS_UB_CFG);
+ msm_camera_io_w(0x3C0001F,
+ vfe32_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_CS_UB_CFG);
+ msm_camera_io_w(0x3E0001F,
+ vfe32_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_HIST_UB_CFG);
+ } else {
+ msm_camera_io_w(0x350001F,
+ vfe32_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_HIST_UB_CFG);
+ msm_camera_io_w(0x370002F,
+ vfe32_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_AEC_BG_UB_CFG);
+ msm_camera_io_w(0x3A0002F,
+ vfe32_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_AF_BF_UB_CFG);
+ msm_camera_io_w(0x3D00007,
+ vfe32_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_RS_UB_CFG);
+ msm_camera_io_w(0x3D8001F,
+ vfe32_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_CS_UB_CFG);
+ msm_camera_io_w(0x3F80007,
+ vfe32_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_SKIN_BHIST_UB_CFG);
+ }
+ vfe32_reset_dmi_tables(vfe32_ctrl);
}
static void vfe32_process_reset_irq(
@@ -3314,21 +3509,20 @@
/* reload all write masters. (frame & line)*/
msm_camera_io_w(0x7FFF,
vfe32_ctrl->share_ctrl->vfebase + VFE_BUS_CMD);
- vfe32_send_isp_msg(&vfe32_ctrl->subdev,
- vfe32_ctrl->share_ctrl->vfeFrameId, MSG_ID_RESET_ACK);
+ complete(&vfe32_ctrl->share_ctrl->reset_complete);
}
}
static void vfe32_process_camif_sof_irq(
struct vfe32_ctrl_type *vfe32_ctrl)
{
- if (vfe32_ctrl->share_ctrl->operation_mode ==
+ if (vfe32_ctrl->share_ctrl->operation_mode &
VFE_OUTPUTS_RAW) {
- if (vfe32_ctrl->start_ack_pending) {
+ if (vfe32_ctrl->share_ctrl->start_ack_pending) {
vfe32_send_isp_msg(&vfe32_ctrl->subdev,
vfe32_ctrl->share_ctrl->vfeFrameId,
MSG_ID_START_ACK);
- vfe32_ctrl->start_ack_pending = FALSE;
+ vfe32_ctrl->share_ctrl->start_ack_pending = FALSE;
}
vfe32_ctrl->share_ctrl->vfe_capture_count--;
/* if last frame to be captured: */
@@ -3344,11 +3538,14 @@
VFE_MODE_OF_OPERATION_VIDEO) &&
(vfe32_ctrl->share_ctrl->vfeFrameId %
vfe32_ctrl->hfr_mode != 0)) {
- vfe32_ctrl->share_ctrl->vfeFrameId++;
+ if (vfe32_ctrl->vfe_sof_count_enable)
+ vfe32_ctrl->share_ctrl->vfeFrameId++;
CDBG("Skip the SOF notification when HFR enabled\n");
return;
}
- vfe32_ctrl->share_ctrl->vfeFrameId++;
+ if (vfe32_ctrl->vfe_sof_count_enable)
+ vfe32_ctrl->share_ctrl->vfeFrameId++;
+
vfe32_send_isp_msg(&vfe32_ctrl->subdev,
vfe32_ctrl->share_ctrl->vfeFrameId, MSG_ID_SOF_ACK);
CDBG("camif_sof_irq, frameId = %d\n",
@@ -3452,12 +3649,12 @@
static void vfe_send_outmsg(
struct axi_ctrl_t *axi_ctrl, uint8_t msgid,
uint32_t ch0_paddr, uint32_t ch1_paddr,
- uint32_t ch2_paddr, uint32_t image_mode)
+ uint32_t ch2_paddr, uint32_t inst_handle)
{
struct isp_msg_output msg;
msg.output_id = msgid;
- msg.buf.image_mode = image_mode;
+ msg.buf.inst_handle = inst_handle;
msg.buf.ch_paddr[0] = ch0_paddr;
msg.buf.ch_paddr[1] = ch1_paddr;
msg.buf.ch_paddr[2] = ch2_paddr;
@@ -3559,7 +3756,7 @@
vfe_send_outmsg(axi_ctrl,
MSG_ID_OUTPUT_PRIMARY, ch0_paddr,
ch1_paddr, ch2_paddr,
- axi_ctrl->share_ctrl->outpath.out0.image_mode);
+ axi_ctrl->share_ctrl->outpath.out0.inst_handle);
} else {
axi_ctrl->share_ctrl->outpath.out0.frame_drop_cnt++;
@@ -3637,7 +3834,7 @@
vfe_send_outmsg(axi_ctrl,
MSG_ID_OUTPUT_SECONDARY, ch0_paddr,
ch1_paddr, ch2_paddr,
- axi_ctrl->share_ctrl->outpath.out1.image_mode);
+ axi_ctrl->share_ctrl->outpath.out1.inst_handle);
} else {
axi_ctrl->share_ctrl->outpath.out1.frame_drop_cnt++;
@@ -3678,7 +3875,7 @@
vfe_send_outmsg(axi_ctrl,
MSG_ID_OUTPUT_TERTIARY1, ch0_paddr,
0, 0,
- axi_ctrl->share_ctrl->outpath.out2.image_mode);
+ axi_ctrl->share_ctrl->outpath.out2.inst_handle);
} else {
axi_ctrl->share_ctrl->outpath.out2.frame_drop_cnt++;
@@ -3719,7 +3916,7 @@
vfe_send_outmsg(axi_ctrl,
MSG_ID_OUTPUT_TERTIARY2, ch0_paddr,
0, 0,
- axi_ctrl->share_ctrl->outpath.out3.image_mode);
+ axi_ctrl->share_ctrl->outpath.out3.inst_handle);
} else {
axi_ctrl->share_ctrl->outpath.out3.frame_drop_cnt++;
pr_err("path_irq irq - no free buffer for rdi1!\n");
@@ -3761,23 +3958,36 @@
/* @todo This is causing issues, need further investigate */
/* spin_lock_irqsave(&ctrl->state_lock, flags); */
struct isp_msg_stats msgStats;
+ uint32_t stats_type;
msgStats.frameCounter = vfe32_ctrl->share_ctrl->vfeFrameId;
+ if (vfe32_ctrl->simultaneous_sof_stat)
+ msgStats.frameCounter--;
msgStats.buffer = bufAddress;
switch (statsNum) {
case statsAeNum:{
- msgStats.id = MSG_ID_STATS_AEC;
+ msgStats.id =
+ (!vfe32_use_bayer_stats(vfe32_ctrl)) ? MSG_ID_STATS_AEC
+ : MSG_ID_STATS_BG;
+ stats_type =
+ (!vfe32_use_bayer_stats(vfe32_ctrl)) ?
+ MSM_STATS_TYPE_AEC : MSM_STATS_TYPE_BG;
rc = vfe32_ctrl->stats_ops.dispatch(
vfe32_ctrl->stats_ops.stats_ctrl,
- MSM_STATS_TYPE_AEC, bufAddress,
+ stats_type, bufAddress,
&msgStats.buf_idx, &vaddr, &msgStats.fd,
vfe32_ctrl->stats_ops.client);
}
break;
case statsAfNum:{
- msgStats.id = MSG_ID_STATS_AF;
+ msgStats.id =
+ (!vfe32_use_bayer_stats(vfe32_ctrl)) ? MSG_ID_STATS_AF
+ : MSG_ID_STATS_BF;
+ stats_type =
+ (!vfe32_use_bayer_stats(vfe32_ctrl)) ? MSM_STATS_TYPE_AF
+ : MSM_STATS_TYPE_BF;
rc = vfe32_ctrl->stats_ops.dispatch(
vfe32_ctrl->stats_ops.stats_ctrl,
- MSM_STATS_TYPE_AF, bufAddress,
+ stats_type, bufAddress,
&msgStats.buf_idx, &vaddr, &msgStats.fd,
vfe32_ctrl->stats_ops.client);
}
@@ -3819,6 +4029,15 @@
vfe32_ctrl->stats_ops.client);
}
break;
+ case statsSkinNum: {
+ msgStats.id = MSG_ID_STATS_BHIST;
+ rc = vfe32_ctrl->stats_ops.dispatch(
+ vfe32_ctrl->stats_ops.stats_ctrl,
+ MSM_STATS_TYPE_BHIST, bufAddress,
+ &msgStats.buf_idx, &vaddr, &msgStats.fd,
+ vfe32_ctrl->stats_ops.client);
+ }
+ break;
default:
goto stats_done;
@@ -3844,11 +4063,14 @@
uint32_t temp;
msgStats.frame_id = vfe32_ctrl->share_ctrl->vfeFrameId;
+ if (vfe32_ctrl->simultaneous_sof_stat)
+ msgStats.frame_id--;
+
msgStats.status_bits = status_bits;
- msgStats.aec.buff = vfe32_ctrl->aecStatsControl.bufToRender;
+ msgStats.aec.buff = vfe32_ctrl->aecbgStatsControl.bufToRender;
msgStats.awb.buff = vfe32_ctrl->awbStatsControl.bufToRender;
- msgStats.af.buff = vfe32_ctrl->afStatsControl.bufToRender;
+ msgStats.af.buff = vfe32_ctrl->afbfStatsControl.bufToRender;
msgStats.ihist.buff = vfe32_ctrl->ihistStatsControl.bufToRender;
msgStats.rs.buff = vfe32_ctrl->rsStatsControl.bufToRender;
@@ -3863,24 +4085,28 @@
&msgStats);
}
-static void vfe32_process_stats_ae_irq(struct vfe32_ctrl_type *vfe32_ctrl)
+static void vfe32_process_stats_ae_bg_irq(struct vfe32_ctrl_type *vfe32_ctrl)
{
unsigned long flags;
uint32_t addr;
+ uint32_t stats_type;
+ stats_type =
+ (!vfe32_use_bayer_stats(vfe32_ctrl)) ? MSM_STATS_TYPE_AEC
+ : MSM_STATS_TYPE_BG;
spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
- addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_AEC);
+ addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, stats_type);
spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
if (addr) {
- vfe32_ctrl->aecStatsControl.bufToRender =
+ vfe32_ctrl->aecbgStatsControl.bufToRender =
vfe32_process_stats_irq_common(vfe32_ctrl, statsAeNum,
addr);
vfe_send_stats_msg(vfe32_ctrl,
- vfe32_ctrl->aecStatsControl.bufToRender, statsAeNum);
+ vfe32_ctrl->aecbgStatsControl.bufToRender, statsAeNum);
} else{
- vfe32_ctrl->aecStatsControl.droppedStatsFrameCount++;
+ vfe32_ctrl->aecbgStatsControl.droppedStatsFrameCount++;
CDBG("%s: droppedStatsFrameCount = %d", __func__,
- vfe32_ctrl->aecStatsControl.droppedStatsFrameCount);
+ vfe32_ctrl->aecbgStatsControl.droppedStatsFrameCount);
}
}
@@ -3905,24 +4131,50 @@
}
}
-static void vfe32_process_stats_af_irq(struct vfe32_ctrl_type *vfe32_ctrl)
+static void vfe32_process_stats_af_bf_irq(struct vfe32_ctrl_type *vfe32_ctrl)
{
unsigned long flags;
uint32_t addr;
+ uint32_t stats_type;
+ stats_type =
+ (!vfe32_use_bayer_stats(vfe32_ctrl)) ? MSM_STATS_TYPE_AF
+ : MSM_STATS_TYPE_BF;
spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
- addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_AF);
+ addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, stats_type);
spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
if (addr) {
- vfe32_ctrl->afStatsControl.bufToRender =
+ vfe32_ctrl->afbfStatsControl.bufToRender =
vfe32_process_stats_irq_common(vfe32_ctrl, statsAfNum,
addr);
vfe_send_stats_msg(vfe32_ctrl,
- vfe32_ctrl->afStatsControl.bufToRender, statsAfNum);
+ vfe32_ctrl->afbfStatsControl.bufToRender, statsAfNum);
} else{
- vfe32_ctrl->afStatsControl.droppedStatsFrameCount++;
+ vfe32_ctrl->afbfStatsControl.droppedStatsFrameCount++;
CDBG("%s: droppedStatsFrameCount = %d", __func__,
- vfe32_ctrl->afStatsControl.droppedStatsFrameCount);
+ vfe32_ctrl->afbfStatsControl.droppedStatsFrameCount);
+ }
+}
+
+static void vfe32_process_stats_bhist_irq(struct vfe32_ctrl_type *vfe32_ctrl)
+{
+ unsigned long flags;
+ uint32_t addr;
+ spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
+ addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_BHIST);
+ spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
+ if (addr) {
+ vfe32_ctrl->bhistStatsControl.bufToRender =
+ vfe32_process_stats_irq_common(vfe32_ctrl,
+ statsSkinNum, addr);
+
+ vfe_send_stats_msg(vfe32_ctrl,
+ vfe32_ctrl->bhistStatsControl.bufToRender,
+ statsSkinNum);
+ } else{
+ vfe32_ctrl->bhistStatsControl.droppedStatsFrameCount++;
+ CDBG("%s: droppedStatsFrameCount = %d", __func__,
+ vfe32_ctrl->bhistStatsControl.droppedStatsFrameCount);
}
}
@@ -3997,23 +4249,28 @@
unsigned long flags;
int32_t process_stats = false;
uint32_t addr;
+ uint32_t stats_type;
CDBG("%s, stats = 0x%x\n", __func__, status_bits);
spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
- if (status_bits & VFE_IRQ_STATUS0_STATS_AEC) {
+ stats_type =
+ (!vfe32_use_bayer_stats(vfe32_ctrl)) ? MSM_STATS_TYPE_AEC
+ : MSM_STATS_TYPE_BG;
+
+ if (status_bits & VFE_IRQ_STATUS0_STATS_AEC_BG) {
addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl,
- MSM_STATS_TYPE_AEC);
+ stats_type);
if (addr) {
- vfe32_ctrl->aecStatsControl.bufToRender =
+ vfe32_ctrl->aecbgStatsControl.bufToRender =
vfe32_process_stats_irq_common(
vfe32_ctrl, statsAeNum, addr);
process_stats = true;
} else{
- vfe32_ctrl->aecStatsControl.bufToRender = 0;
- vfe32_ctrl->aecStatsControl.droppedStatsFrameCount++;
+ vfe32_ctrl->aecbgStatsControl.bufToRender = 0;
+ vfe32_ctrl->aecbgStatsControl.droppedStatsFrameCount++;
}
} else {
- vfe32_ctrl->aecStatsControl.bufToRender = 0;
+ vfe32_ctrl->aecbgStatsControl.bufToRender = 0;
}
if (status_bits & VFE_IRQ_STATUS0_STATS_AWB) {
@@ -4033,21 +4290,24 @@
vfe32_ctrl->awbStatsControl.bufToRender = 0;
}
- if (status_bits & VFE_IRQ_STATUS0_STATS_AF) {
+ stats_type =
+ (!vfe32_use_bayer_stats(vfe32_ctrl)) ? MSM_STATS_TYPE_AF
+ : MSM_STATS_TYPE_BF;
+ if (status_bits & VFE_IRQ_STATUS0_STATS_AF_BF) {
addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl,
- MSM_STATS_TYPE_AF);
+ stats_type);
if (addr) {
- vfe32_ctrl->afStatsControl.bufToRender =
+ vfe32_ctrl->afbfStatsControl.bufToRender =
vfe32_process_stats_irq_common(
vfe32_ctrl, statsAfNum,
addr);
process_stats = true;
} else {
- vfe32_ctrl->afStatsControl.bufToRender = 0;
- vfe32_ctrl->afStatsControl.droppedStatsFrameCount++;
+ vfe32_ctrl->afbfStatsControl.bufToRender = 0;
+ vfe32_ctrl->afbfStatsControl.droppedStatsFrameCount++;
}
} else {
- vfe32_ctrl->afStatsControl.bufToRender = 0;
+ vfe32_ctrl->afbfStatsControl.bufToRender = 0;
}
if (status_bits & VFE_IRQ_STATUS0_STATS_IHIST) {
@@ -4111,7 +4371,6 @@
struct vfe32_ctrl_type *vfe32_ctrl, uint32_t irqstatus)
{
uint32_t status_bits = VFE_COM_STATUS & irqstatus;
-
if ((vfe32_ctrl->hfr_mode != HFR_MODE_OFF) &&
(vfe32_ctrl->share_ctrl->vfeFrameId %
vfe32_ctrl->hfr_mode != 0)) {
@@ -4153,17 +4412,21 @@
CDBG("irq resetAckIrq\n");
vfe32_process_reset_irq(vfe32_ctrl);
break;
- case VFE_IRQ_STATUS0_STATS_AEC:
+ case VFE_IRQ_STATUS0_STATS_AEC_BG:
CDBG("Stats AEC irq occured.\n");
- vfe32_process_stats_ae_irq(vfe32_ctrl);
+ vfe32_process_stats_ae_bg_irq(vfe32_ctrl);
break;
case VFE_IRQ_STATUS0_STATS_AWB:
CDBG("Stats AWB irq occured.\n");
vfe32_process_stats_awb_irq(vfe32_ctrl);
break;
- case VFE_IRQ_STATUS0_STATS_AF:
+ case VFE_IRQ_STATUS0_STATS_AF_BF:
CDBG("Stats AF irq occured.\n");
- vfe32_process_stats_af_irq(vfe32_ctrl);
+ vfe32_process_stats_af_bf_irq(vfe32_ctrl);
+ break;
+ case VFE_IRQ_STATUS0_STATS_SK_BHIST:
+ CDBG("Stats BHIST irq occured.\n");
+ vfe32_process_stats_bhist_irq(vfe32_ctrl);
break;
case VFE_IRQ_STATUS0_STATS_IHIST:
CDBG("Stats IHIST irq occured.\n");
@@ -4204,7 +4467,9 @@
{
unsigned long flags;
struct axi_ctrl_t *axi_ctrl = (struct axi_ctrl_t *)data;
+ struct vfe32_ctrl_type *vfe32_ctrl = axi_ctrl->share_ctrl->vfe32_ctrl;
struct vfe32_isr_queue_cmd *qcmd = NULL;
+ int stat_interrupt;
CDBG("=== axi32_do_tasklet start ===\n");
@@ -4224,11 +4489,32 @@
spin_unlock_irqrestore(&axi_ctrl->tasklet_lock,
flags);
+ if (axi_ctrl->share_ctrl->stats_comp) {
+ stat_interrupt = (qcmd->vfeInterruptStatus0 &
+ VFE_IRQ_STATUS0_STATS_COMPOSIT_MASK);
+ } else {
+ stat_interrupt =
+ (qcmd->vfeInterruptStatus0 &
+ VFE_IRQ_STATUS0_STATS_AEC_BG) |
+ (qcmd->vfeInterruptStatus0 &
+ VFE_IRQ_STATUS0_STATS_AWB) |
+ (qcmd->vfeInterruptStatus0 &
+ VFE_IRQ_STATUS0_STATS_AF_BF) |
+ (qcmd->vfeInterruptStatus0 &
+ VFE_IRQ_STATUS0_STATS_IHIST) |
+ (qcmd->vfeInterruptStatus0 &
+ VFE_IRQ_STATUS0_STATS_RS) |
+ (qcmd->vfeInterruptStatus0 &
+ VFE_IRQ_STATUS0_STATS_CS);
+ }
if (qcmd->vfeInterruptStatus0 &
- VFE_IRQ_STATUS0_CAMIF_SOF_MASK)
+ VFE_IRQ_STATUS0_CAMIF_SOF_MASK) {
+ if (stat_interrupt)
+ vfe32_ctrl->simultaneous_sof_stat = 1;
v4l2_subdev_notify(&axi_ctrl->subdev,
NOTIFY_VFE_IRQ,
(void *)VFE_IRQ_STATUS0_CAMIF_SOF_MASK);
+ }
/* interrupt to be processed, *qcmd has the payload. */
if (qcmd->vfeInterruptStatus0 &
@@ -4255,6 +4541,11 @@
NOTIFY_VFE_IRQ,
(void *)VFE_IMASK_WHILE_STOPPING_1);
+ if (atomic_read(&axi_ctrl->share_ctrl->handle_axi_irq))
+ v4l2_subdev_notify(&axi_ctrl->subdev,
+ NOTIFY_AXI_IRQ,
+ (void *)qcmd->vfeInterruptStatus0);
+
if (atomic_read(&axi_ctrl->share_ctrl->vstate)) {
if (qcmd->vfeInterruptStatus1 &
VFE32_IMASK_ERROR_ONLY_1) {
@@ -4264,9 +4555,6 @@
qcmd->vfeInterruptStatus1 &
VFE32_IMASK_ERROR_ONLY_1);
}
- v4l2_subdev_notify(&axi_ctrl->subdev,
- NOTIFY_AXI_IRQ,
- (void *)qcmd->vfeInterruptStatus0);
/* then process stats irq. */
if (axi_ctrl->share_ctrl->stats_comp) {
@@ -4281,10 +4569,10 @@
} else {
/* process individual stats interrupt. */
if (qcmd->vfeInterruptStatus0 &
- VFE_IRQ_STATUS0_STATS_AEC)
+ VFE_IRQ_STATUS0_STATS_AEC_BG)
v4l2_subdev_notify(&axi_ctrl->subdev,
NOTIFY_VFE_IRQ,
- (void *)VFE_IRQ_STATUS0_STATS_AEC);
+ (void *)VFE_IRQ_STATUS0_STATS_AEC_BG);
if (qcmd->vfeInterruptStatus0 &
VFE_IRQ_STATUS0_STATS_AWB)
@@ -4293,10 +4581,15 @@
(void *)VFE_IRQ_STATUS0_STATS_AWB);
if (qcmd->vfeInterruptStatus0 &
- VFE_IRQ_STATUS0_STATS_AF)
+ VFE_IRQ_STATUS0_STATS_AF_BF)
v4l2_subdev_notify(&axi_ctrl->subdev,
NOTIFY_VFE_IRQ,
- (void *)VFE_IRQ_STATUS0_STATS_AF);
+ (void *)VFE_IRQ_STATUS0_STATS_AF_BF);
+ if (qcmd->vfeInterruptStatus0 &
+ VFE_IRQ_STATUS0_STATS_SK_BHIST)
+ v4l2_subdev_notify(&axi_ctrl->subdev,
+ NOTIFY_VFE_IRQ,
+ (void *)VFE_IRQ_STATUS0_STATS_SK_BHIST);
if (qcmd->vfeInterruptStatus0 &
VFE_IRQ_STATUS0_STATS_IHIST)
@@ -4335,6 +4628,7 @@
(void *)VFE_IRQ_STATUS0_SYNC_TIMER2);
}
}
+ vfe32_ctrl->simultaneous_sof_stat = 0;
kfree(qcmd);
}
CDBG("=== axi32_do_tasklet end ===\n");
@@ -4468,6 +4762,22 @@
vfe_ctrl->stats_ops.client);
}
break;
+ case VFE_CMD_STATS_UNREGBUF:
+ {
+ struct msm_stats_reqbuf *req_buf = NULL;
+ req_buf = (struct msm_stats_reqbuf *)cmd->value;
+ if (sizeof(struct msm_stats_reqbuf) != cmd->length) {
+ /* error. the length not match */
+ pr_err("%s: stats reqbuf input size = %d,\n"
+ "struct size = %d, mitch match\n",
+ __func__, cmd->length,
+ sizeof(struct msm_stats_reqbuf));
+ rc = -EINVAL ;
+ goto end;
+ }
+ rc = vfe32_stats_unregbuf(vfe_ctrl, req_buf);
+ }
+ break;
default:
rc = -1;
pr_err("%s: cmd_type %d not supported", __func__,
@@ -4486,10 +4796,9 @@
struct vfe32_ctrl_type *vfe32_ctrl =
(struct vfe32_ctrl_type *)v4l2_get_subdevdata(sd);
struct msm_isp_cmd vfecmd;
- struct msm_camvfe_params *vfe_params =
- (struct msm_camvfe_params *)arg;
- struct msm_vfe_cfg_cmd *cmd = vfe_params->vfe_cfg;
- void *data = vfe_params->data;
+ struct msm_camvfe_params *vfe_params;
+ struct msm_vfe_cfg_cmd *cmd;
+ void *data;
long rc = 0;
struct vfe_cmd_stats_buf *scfg = NULL;
@@ -4500,6 +4809,17 @@
return -EFAULT;
}
+ CDBG("%s\n", __func__);
+ if (subdev_cmd == VIDIOC_MSM_VFE_INIT) {
+ CDBG("%s init\n", __func__);
+ return msm_vfe_subdev_init(sd);
+ } else if (subdev_cmd == VIDIOC_MSM_VFE_RELEASE) {
+ msm_vfe_subdev_release(sd);
+ return 0;
+ }
+ vfe_params = (struct msm_camvfe_params *)arg;
+ cmd = vfe_params->vfe_cfg;
+ data = vfe_params->data;
switch (cmd->cmd_type) {
case CMD_VFE_PROCESS_IRQ:
vfe32_process_irq(vfe32_ctrl, (uint32_t) data);
@@ -4507,27 +4827,33 @@
case VFE_CMD_STATS_REQBUF:
case VFE_CMD_STATS_ENQUEUEBUF:
case VFE_CMD_STATS_FLUSH_BUFQ:
+ case VFE_CMD_STATS_UNREGBUF:
/* for easy porting put in one envelope */
rc = vfe_stats_bufq_sub_ioctl(vfe32_ctrl,
cmd, vfe_params->data);
return rc;
default:
if (cmd->cmd_type != CMD_CONFIG_PING_ADDR &&
- cmd->cmd_type != CMD_CONFIG_PONG_ADDR &&
- cmd->cmd_type != CMD_CONFIG_FREE_BUF_ADDR &&
- cmd->cmd_type != CMD_STATS_AEC_BUF_RELEASE &&
- cmd->cmd_type != CMD_STATS_AWB_BUF_RELEASE &&
- cmd->cmd_type != CMD_STATS_IHIST_BUF_RELEASE &&
- cmd->cmd_type != CMD_STATS_RS_BUF_RELEASE &&
- cmd->cmd_type != CMD_STATS_CS_BUF_RELEASE &&
- cmd->cmd_type != CMD_STATS_AF_BUF_RELEASE) {
- if (copy_from_user(&vfecmd,
+ cmd->cmd_type != CMD_CONFIG_PONG_ADDR &&
+ cmd->cmd_type != CMD_CONFIG_FREE_BUF_ADDR &&
+ cmd->cmd_type != CMD_STATS_AEC_BUF_RELEASE &&
+ cmd->cmd_type != CMD_STATS_AWB_BUF_RELEASE &&
+ cmd->cmd_type != CMD_STATS_IHIST_BUF_RELEASE &&
+ cmd->cmd_type != CMD_STATS_RS_BUF_RELEASE &&
+ cmd->cmd_type != CMD_STATS_CS_BUF_RELEASE &&
+ cmd->cmd_type != CMD_STATS_AF_BUF_RELEASE &&
+ cmd->cmd_type != CMD_STATS_BG_BUF_RELEASE &&
+ cmd->cmd_type != CMD_STATS_BF_BUF_RELEASE &&
+ cmd->cmd_type != CMD_STATS_BHIST_BUF_RELEASE &&
+ cmd->cmd_type != CMD_VFE_SOF_COUNT_UPDATE &&
+ cmd->cmd_type != CMD_VFE_COUNT_SOF_ENABLE) {
+ if (copy_from_user(&vfecmd,
(void __user *)(cmd->value),
sizeof(vfecmd))) {
- pr_err("%s %d: copy_from_user failed\n",
- __func__, __LINE__);
- return -EFAULT;
- }
+ pr_err("%s %d: copy_from_user failed\n",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
} else {
/* here eith stats release or frame release. */
if (cmd->cmd_type != CMD_CONFIG_PING_ADDR &&
@@ -4549,6 +4875,25 @@
sack->nextStatsBuf = *(uint32_t *)data;
}
}
+ }
+
+ CDBG("%s: cmdType = %d\n", __func__, cmd->cmd_type);
+
+ if ((cmd->cmd_type == CMD_STATS_AF_ENABLE) ||
+ (cmd->cmd_type == CMD_STATS_AWB_ENABLE) ||
+ (cmd->cmd_type == CMD_STATS_IHIST_ENABLE) ||
+ (cmd->cmd_type == CMD_STATS_RS_ENABLE) ||
+ (cmd->cmd_type == CMD_STATS_CS_ENABLE) ||
+ (cmd->cmd_type == CMD_STATS_AEC_ENABLE) ||
+ (cmd->cmd_type == CMD_STATS_BG_ENABLE) ||
+ (cmd->cmd_type == CMD_STATS_BF_ENABLE) ||
+ (cmd->cmd_type == CMD_STATS_BHIST_ENABLE)) {
+ struct axidata *axid;
+ axid = data;
+ if (!axid) {
+ rc = -EFAULT;
+ goto vfe32_config_done;
+ }
CDBG("%s: cmdType = %d\n", __func__, cmd->cmd_type);
if ((cmd->cmd_type == CMD_STATS_AF_ENABLE) ||
@@ -4562,38 +4907,69 @@
goto vfe32_config_done;
}
switch (cmd->cmd_type) {
- case CMD_GENERAL:
- rc = vfe32_proc_general(pmctl, &vfecmd, vfe32_ctrl);
- break;
- case CMD_CONFIG_PING_ADDR: {
- int path = *((int *)cmd->value);
- struct vfe32_output_ch *outch =
- vfe32_get_ch(path, vfe32_ctrl->share_ctrl);
- outch->ping = *((struct msm_free_buf *)data);
+ case CMD_STATS_AEC_ENABLE:
+ case CMD_STATS_BG_ENABLE:
+ case CMD_STATS_BF_ENABLE:
+ case CMD_STATS_BHIST_ENABLE:
+ case CMD_STATS_AWB_ENABLE:
+ case CMD_STATS_IHIST_ENABLE:
+ case CMD_STATS_RS_ENABLE:
+ case CMD_STATS_CS_ENABLE:
+ default:
+ pr_err("%s Unsupported cmd type %d",
+ __func__, cmd->cmd_type);
+ break;
}
- break;
- case CMD_CONFIG_PONG_ADDR: {
- int path = *((int *)cmd->value);
- struct vfe32_output_ch *outch =
- vfe32_get_ch(path, vfe32_ctrl->share_ctrl);
- outch->pong = *((struct msm_free_buf *)data);
- }
+ goto vfe32_config_done;
+ }
+ switch (cmd->cmd_type) {
+ case CMD_GENERAL:
+ rc = vfe32_proc_general(pmctl, &vfecmd, vfe32_ctrl);
+ break;
+ case CMD_VFE_COUNT_SOF_ENABLE: {
+ int enable = *((int *)cmd->value);
+ if (enable)
+ vfe32_ctrl->vfe_sof_count_enable = TRUE;
+ else
+ vfe32_ctrl->vfe_sof_count_enable = false;
+ }
+ break;
+ case CMD_VFE_SOF_COUNT_UPDATE:
+ if (!vfe32_ctrl->vfe_sof_count_enable)
+ vfe32_ctrl->share_ctrl->vfeFrameId =
+ *((uint32_t *)vfe_params->data);
+ break;
+ case CMD_CONFIG_PING_ADDR: {
+ int path = *((int *)cmd->value);
+ struct vfe32_output_ch *outch =
+ vfe32_get_ch(path, vfe32_ctrl->share_ctrl);
+ outch->ping = *((struct msm_free_buf *)data);
+ }
+ break;
+
+ case CMD_CONFIG_PONG_ADDR: {
+ int path = *((int *)cmd->value);
+ struct vfe32_output_ch *outch =
+ vfe32_get_ch(path, vfe32_ctrl->share_ctrl);
+ outch->pong = *((struct msm_free_buf *)data);
+ }
+ break;
+
+ case CMD_CONFIG_FREE_BUF_ADDR: {
+ int path = *((int *)cmd->value);
+ struct vfe32_output_ch *outch =
+ vfe32_get_ch(path, vfe32_ctrl->share_ctrl);
+ outch->free_buf = *((struct msm_free_buf *)data);
+ }
+ break;
+
+ case CMD_SNAP_BUF_RELEASE:
break;
- case CMD_CONFIG_FREE_BUF_ADDR: {
- int path = *((int *)cmd->value);
- struct vfe32_output_ch *outch =
- vfe32_get_ch(path, vfe32_ctrl->share_ctrl);
- outch->free_buf = *((struct msm_free_buf *)data);
- }
- break;
- case CMD_SNAP_BUF_RELEASE:
- break;
- default:
- pr_err("%s Unsupported AXI configuration %x ", __func__,
- cmd->cmd_type);
- break;
- }
+ default:
+ pr_err("%s Unsupported AXI configuration %x ", __func__,
+ cmd->cmd_type);
+ break;
}
vfe32_config_done:
kfree(scfg);
@@ -4639,12 +5015,17 @@
.core = &msm_vfe_subdev_core_ops,
};
-int msm_axi_subdev_init(struct v4l2_subdev *sd,
- struct msm_cam_media_controller *mctl)
+int msm_axi_subdev_init(struct v4l2_subdev *sd)
{
int rc = 0;
struct axi_ctrl_t *axi_ctrl = v4l2_get_subdevdata(sd);
- v4l2_set_subdev_hostdata(sd, mctl);
+ struct msm_cam_media_controller *mctl;
+ mctl = v4l2_get_subdev_hostdata(sd);
+ if (mctl == NULL) {
+ pr_err("%s: mctl is NULL\n", __func__);
+ rc = -EINVAL;
+ goto mctl_failed;
+ }
spin_lock_init(&axi_ctrl->tasklet_lock);
INIT_LIST_HEAD(&axi_ctrl->tasklet_q);
spin_lock_init(&axi_ctrl->share_ctrl->sd_notify_lock);
@@ -4686,34 +5067,35 @@
return rc;
clk_enable_failed:
- regulator_disable(axi_ctrl->fs_vfe);
+ if (axi_ctrl->fs_vfe)
+ regulator_disable(axi_ctrl->fs_vfe);
fs_failed:
iounmap(axi_ctrl->share_ctrl->vfebase);
axi_ctrl->share_ctrl->vfebase = NULL;
remap_failed:
disable_irq(axi_ctrl->vfeirq->start);
+mctl_failed:
return rc;
}
-int msm_vfe_subdev_init(struct v4l2_subdev *sd,
- struct msm_cam_media_controller *mctl)
+int msm_vfe_subdev_init(struct v4l2_subdev *sd)
{
int rc = 0;
struct vfe32_ctrl_type *vfe32_ctrl =
(struct vfe32_ctrl_type *)v4l2_get_subdevdata(sd);
- v4l2_set_subdev_hostdata(sd, mctl);
spin_lock_init(&vfe32_ctrl->share_ctrl->stop_flag_lock);
+ spin_lock_init(&vfe32_ctrl->share_ctrl->abort_lock);
spin_lock_init(&vfe32_ctrl->state_lock);
- spin_lock_init(&vfe32_ctrl->io_lock);
- spin_lock_init(&vfe32_ctrl->update_ack_lock);
- spin_lock_init(&vfe32_ctrl->start_ack_lock);
+ spin_lock_init(&vfe32_ctrl->share_ctrl->update_ack_lock);
+ spin_lock_init(&vfe32_ctrl->share_ctrl->start_ack_lock);
spin_lock_init(&vfe32_ctrl->stats_bufq_lock);
vfe32_ctrl->update_linear = false;
vfe32_ctrl->update_rolloff = false;
vfe32_ctrl->update_la = false;
vfe32_ctrl->update_gamma = false;
+ vfe32_ctrl->vfe_sof_count_enable = false;
vfe32_ctrl->hfr_mode = HFR_MODE_OFF;
memset(&vfe32_ctrl->stats_ctrl, 0, sizeof(struct msm_stats_bufq_ctrl));
@@ -4758,15 +5140,147 @@
vfe32_ctrl->share_ctrl->vfebase = NULL;
}
-void axi_start(struct axi_ctrl_t *axi_ctrl)
+void axi_abort(struct axi_ctrl_t *axi_ctrl)
{
- uint16_t operation_mode =
+ uint8_t axi_busy_flag = true;
+ /* axi halt command. */
+ msm_camera_io_w(AXI_HALT,
+ axi_ctrl->share_ctrl->vfebase + VFE_AXI_CMD);
+ wmb();
+ while (axi_busy_flag) {
+ if (msm_camera_io_r(
+ axi_ctrl->share_ctrl->vfebase + VFE_AXI_STATUS) & 0x1)
+ axi_busy_flag = false;
+ }
+ /* Ensure the write order while writing
+ * to the command register using the barrier */
+ msm_camera_io_w_mb(AXI_HALT_CLEAR,
+ axi_ctrl->share_ctrl->vfebase + VFE_AXI_CMD);
+
+ /* after axi halt, then ok to apply global reset.
+ * enable reset_ack and async timer interrupt only while
+ * stopping the pipeline.*/
+ msm_camera_io_w(0xf0000000,
+ axi_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_0);
+ msm_camera_io_w(VFE_IMASK_WHILE_STOPPING_1,
+ axi_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
+
+ /* Ensure the write order while writing
+ * to the command register using the barrier */
+ msm_camera_io_w_mb(VFE_RESET_UPON_STOP_CMD,
+ axi_ctrl->share_ctrl->vfebase + VFE_GLOBAL_RESET);
+}
+
+void axi_start(struct axi_ctrl_t *axi_ctrl, uint16_t cmd_type)
+{
+ uint32_t irq_comp_mask = 0, irq_mask = 0;
+
+ irq_comp_mask =
+ msm_camera_io_r(axi_ctrl->share_ctrl->vfebase +
+ VFE_IRQ_COMP_MASK);
+
+ if (axi_ctrl->share_ctrl->outpath.output_mode &
+ VFE32_OUTPUT_MODE_PRIMARY) {
+ irq_comp_mask |= (
+ 0x1 << axi_ctrl->share_ctrl->outpath.out0.ch0 |
+ 0x1 << axi_ctrl->share_ctrl->outpath.out0.ch1);
+ } else if (axi_ctrl->share_ctrl->outpath.output_mode &
+ VFE32_OUTPUT_MODE_PRIMARY_ALL_CHNLS) {
+ irq_comp_mask |= (
+ 0x1 << axi_ctrl->share_ctrl->outpath.out0.ch0 |
+ 0x1 << axi_ctrl->share_ctrl->outpath.out0.ch1 |
+ 0x1 << axi_ctrl->share_ctrl->outpath.out0.ch2);
+ }
+ if (axi_ctrl->share_ctrl->outpath.output_mode &
+ VFE32_OUTPUT_MODE_SECONDARY) {
+ irq_comp_mask |= (
+ 0x1 << (axi_ctrl->share_ctrl->outpath.out1.ch0 + 8) |
+ 0x1 << (axi_ctrl->share_ctrl->outpath.out1.ch1 + 8));
+ } else if (axi_ctrl->share_ctrl->outpath.output_mode &
+ VFE32_OUTPUT_MODE_SECONDARY_ALL_CHNLS) {
+ irq_comp_mask |= (
+ 0x1 << (axi_ctrl->share_ctrl->outpath.out1.ch0 + 8) |
+ 0x1 << (axi_ctrl->share_ctrl->outpath.out1.ch1 + 8) |
+ 0x1 << (axi_ctrl->share_ctrl->outpath.out1.ch2 + 8));
+ }
+ if (axi_ctrl->share_ctrl->outpath.output_mode &
+ VFE32_OUTPUT_MODE_TERTIARY1) {
+ irq_mask = msm_camera_io_r(axi_ctrl->share_ctrl->vfebase +
+ VFE_IRQ_MASK_0);
+ irq_mask |= (0x1 << (axi_ctrl->share_ctrl->outpath.out2.ch0 +
+ VFE_WM_OFFSET));
+ msm_camera_io_w(irq_mask, axi_ctrl->share_ctrl->vfebase +
+ VFE_IRQ_MASK_0);
+ }
+ if (axi_ctrl->share_ctrl->outpath.output_mode &
+ VFE32_OUTPUT_MODE_TERTIARY2) {
+ irq_mask = msm_camera_io_r(axi_ctrl->share_ctrl->vfebase +
+ VFE_IRQ_MASK_0);
+ irq_mask |= (0x1 << (axi_ctrl->share_ctrl->outpath.out3.ch0 +
+ VFE_WM_OFFSET));
+ msm_camera_io_w(irq_mask, axi_ctrl->share_ctrl->vfebase +
+ VFE_IRQ_MASK_0);
+ }
+
+ msm_camera_io_w(irq_comp_mask,
+ axi_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+
+ switch (cmd_type) {
+ case AXI_CMD_PREVIEW: {
+ uint16_t operation_mode =
(axi_ctrl->share_ctrl->operation_mode &
~(VFE_OUTPUTS_RDI0|VFE_OUTPUTS_RDI1));
- switch (operation_mode) {
- case VFE_OUTPUTS_PREVIEW:
- case VFE_OUTPUTS_PREVIEW_AND_VIDEO:
+ switch (operation_mode) {
+ case VFE_OUTPUTS_PREVIEW:
+ case VFE_OUTPUTS_PREVIEW_AND_VIDEO:
+ if (axi_ctrl->share_ctrl->outpath.output_mode &
+ VFE32_OUTPUT_MODE_PRIMARY) {
+ msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase
+ + vfe32_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out0.ch0]);
+ msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase
+ + vfe32_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out0.ch1]);
+ } else if (axi_ctrl->share_ctrl->outpath.output_mode &
+ VFE32_OUTPUT_MODE_PRIMARY_ALL_CHNLS) {
+ msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase
+ + vfe32_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out0.ch0]);
+ msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase
+ + vfe32_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out0.ch1]);
+ msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase
+ + vfe32_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out0.ch2]);
+ }
+ break;
+ default:
+ if (axi_ctrl->share_ctrl->outpath.output_mode &
+ VFE32_OUTPUT_MODE_SECONDARY) {
+ msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase
+ + vfe32_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out1.ch0]);
+ msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase
+ + vfe32_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out1.ch1]);
+ } else if (axi_ctrl->share_ctrl->outpath.output_mode &
+ VFE32_OUTPUT_MODE_SECONDARY_ALL_CHNLS) {
+ msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase
+ + vfe32_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out1.ch0]);
+ msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase
+ + vfe32_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out1.ch1]);
+ msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase
+ + vfe32_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out1.ch2]);
+ }
+ break;
+ }
+ }
+ break;
+ default:
if (axi_ctrl->share_ctrl->outpath.output_mode &
VFE32_OUTPUT_MODE_PRIMARY) {
msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
@@ -4787,8 +5301,7 @@
vfe32_AXI_WM_CFG[axi_ctrl->
share_ctrl->outpath.out0.ch2]);
}
- break;
- default:
+
if (axi_ctrl->share_ctrl->outpath.output_mode &
VFE32_OUTPUT_MODE_SECONDARY) {
msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
@@ -4811,47 +5324,149 @@
}
break;
}
-
- if (axi_ctrl->share_ctrl->operation_mode & VFE_OUTPUTS_RDI0)
+ if (axi_ctrl->share_ctrl->current_mode & VFE_OUTPUTS_RDI0)
msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
vfe32_AXI_WM_CFG[axi_ctrl->share_ctrl->
outpath.out2.ch0]);
- if (axi_ctrl->share_ctrl->operation_mode & VFE_OUTPUTS_RDI1)
+ if (axi_ctrl->share_ctrl->current_mode & VFE_OUTPUTS_RDI1)
msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
vfe32_AXI_WM_CFG[axi_ctrl->share_ctrl->
outpath.out3.ch0]);
-
+ atomic_set(&axi_ctrl->share_ctrl->handle_axi_irq, 1);
}
-void axi_stop(struct axi_ctrl_t *axi_ctrl)
+void axi_stop(struct axi_ctrl_t *axi_ctrl, uint16_t cmd_type)
{
- uint8_t axiBusyFlag = true;
- /* axi halt command. */
- msm_camera_io_w(AXI_HALT,
- axi_ctrl->share_ctrl->vfebase + VFE_AXI_CMD);
- wmb();
- while (axiBusyFlag) {
- if (msm_camera_io_r(
- axi_ctrl->share_ctrl->vfebase + VFE_AXI_STATUS) & 0x1)
- axiBusyFlag = false;
+ uint32_t reg_update = 0;
+ unsigned long flags;
+ uint32_t operation_mode =
+ axi_ctrl->share_ctrl->current_mode & ~(VFE_OUTPUTS_RDI0|
+ VFE_OUTPUTS_RDI1);
+
+ if (!axi_ctrl->share_ctrl->skip_abort) {
+ atomic_set(&axi_ctrl->share_ctrl->handle_axi_irq, 0);
+ axi_disable_irq(axi_ctrl);
}
- /* Ensure the write order while writing
- to the command register using the barrier */
- msm_camera_io_w_mb(AXI_HALT_CLEAR,
- axi_ctrl->share_ctrl->vfebase + VFE_AXI_CMD);
- /* after axi halt, then ok to apply global reset. */
- /* enable reset_ack and async timer interrupt only while
- stopping the pipeline.*/
- msm_camera_io_w(0xf0000000,
- axi_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_0);
- msm_camera_io_w(VFE_IMASK_WHILE_STOPPING_1,
- axi_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
+ spin_lock_irqsave(&axi_ctrl->share_ctrl->stop_flag_lock, flags);
+ axi_ctrl->share_ctrl->stop_ack_pending = TRUE;
+ spin_unlock_irqrestore(&axi_ctrl->share_ctrl->stop_flag_lock, flags);
+ switch (cmd_type) {
+ case AXI_CMD_PREVIEW: {
+ switch (operation_mode) {
+ case VFE_OUTPUTS_PREVIEW:
+ case VFE_OUTPUTS_PREVIEW_AND_VIDEO:
+ if (axi_ctrl->share_ctrl->outpath.output_mode &
+ VFE32_OUTPUT_MODE_PRIMARY) {
+ msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase
+ + vfe32_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out0.ch0]);
+ msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase
+ + vfe32_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out0.ch1]);
+ } else if (axi_ctrl->share_ctrl->outpath.output_mode &
+ VFE32_OUTPUT_MODE_PRIMARY_ALL_CHNLS) {
+ msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase
+ + vfe32_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out0.ch0]);
+ msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase
+ + vfe32_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out0.ch1]);
+ msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase
+ + vfe32_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out0.ch2]);
+ }
+ break;
+ default:
+ if (axi_ctrl->share_ctrl->outpath.output_mode &
+ VFE32_OUTPUT_MODE_SECONDARY) {
+ msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase
+ + vfe32_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out1.ch0]);
+ msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase
+ + vfe32_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out1.ch1]);
+ } else if (axi_ctrl->share_ctrl->outpath.output_mode &
+ VFE32_OUTPUT_MODE_SECONDARY_ALL_CHNLS) {
+ msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase
+ + vfe32_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out1.ch0]);
+ msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase
+ + vfe32_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out1.ch1]);
+ msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase
+ + vfe32_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out1.ch2]);
+ }
+ break;
+ }
+ }
+ break;
+ default:
+ if (axi_ctrl->share_ctrl->outpath.output_mode &
+ VFE32_OUTPUT_MODE_PRIMARY) {
+ msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase +
+ vfe32_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out0.ch0]);
+ msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase +
+ vfe32_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out0.ch1]);
+ } else if (axi_ctrl->share_ctrl->outpath.output_mode &
+ VFE32_OUTPUT_MODE_PRIMARY_ALL_CHNLS) {
+ msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase +
+ vfe32_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out0.ch0]);
+ msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase +
+ vfe32_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out0.ch1]);
+ msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase +
+ vfe32_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out0.ch2]);
+ }
- /* Ensure the write order while writing
- to the command register using the barrier */
- msm_camera_io_w_mb(VFE_RESET_UPON_STOP_CMD,
- axi_ctrl->share_ctrl->vfebase + VFE_GLOBAL_RESET);
+ if (axi_ctrl->share_ctrl->outpath.output_mode &
+ VFE32_OUTPUT_MODE_SECONDARY) {
+ msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase +
+ vfe32_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out1.ch0]);
+ msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase +
+ vfe32_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out1.ch1]);
+ } else if (axi_ctrl->share_ctrl->outpath.output_mode &
+ VFE32_OUTPUT_MODE_SECONDARY_ALL_CHNLS) {
+ msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase +
+ vfe32_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out1.ch0]);
+ msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase +
+ vfe32_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out1.ch1]);
+ msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase +
+ vfe32_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out1.ch2]);
+ }
+ break;
+ }
+ if (axi_ctrl->share_ctrl->current_mode & VFE_OUTPUTS_RDI0)
+ msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase +
+ vfe32_AXI_WM_CFG[axi_ctrl->share_ctrl->
+ outpath.out2.ch0]);
+ if (axi_ctrl->share_ctrl->current_mode & VFE_OUTPUTS_RDI1)
+ msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase +
+ vfe32_AXI_WM_CFG[axi_ctrl->share_ctrl->
+ outpath.out3.ch0]);
+
+ if (axi_ctrl->share_ctrl->current_mode & VFE_OUTPUTS_RDI0)
+ reg_update |= 0x2;
+ if (axi_ctrl->share_ctrl->current_mode & VFE_OUTPUTS_RDI1)
+ reg_update |= 0x4;
+
+ if (operation_mode)
+ reg_update |= 0x1;
+ msm_camera_io_w_mb(reg_update,
+ axi_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+ if (!axi_ctrl->share_ctrl->skip_abort)
+ axi_abort(axi_ctrl);
+
}
static int msm_axi_config(struct v4l2_subdev *sd, void __user *arg)
@@ -4860,17 +5475,20 @@
struct msm_isp_cmd vfecmd;
struct axi_ctrl_t *axi_ctrl = v4l2_get_subdevdata(sd);
int rc = 0, vfe_cmd_type = 0, rdi_mode = 0;
+ unsigned long flags;
if (!axi_ctrl->share_ctrl->vfebase) {
pr_err("%s: base address unmapped\n", __func__);
return -EFAULT;
}
+ memset(&cfgcmd, 0, sizeof(struct msm_vfe_cfg_cmd));
if (NULL != arg) {
if (copy_from_user(&cfgcmd, arg, sizeof(cfgcmd))) {
ERR_COPY_FROM_USER();
return -EFAULT;
}
}
+ memset(&vfecmd, 0, sizeof(struct msm_isp_cmd));
if (NULL != cfgcmd.value) {
if (copy_from_user(&vfecmd,
(void __user *)(cfgcmd.value),
@@ -5044,11 +5662,42 @@
pr_err("%s Invalid/Unsupported AXI configuration %x",
__func__, cfgcmd.cmd_type);
break;
- case CMD_AXI_START:
- axi_start(axi_ctrl);
+ case CMD_AXI_START: {
+ struct msm_camera_vfe_params_t vfe_params;
+ if (copy_from_user(&vfe_params,
+ (void __user *)(vfecmd.value),
+ sizeof(struct msm_camera_vfe_params_t))) {
+ return -EFAULT;
+ }
+ axi_ctrl->share_ctrl->current_mode =
+ vfe_params.operation_mode;
+ spin_lock_irqsave(&axi_ctrl->share_ctrl->abort_lock, flags);
+ axi_ctrl->share_ctrl->skip_abort =
+ vfe_params.skip_abort;
+ spin_unlock_irqrestore(&axi_ctrl->share_ctrl->abort_lock,
+ flags);
+ axi_start(axi_ctrl, vfe_params.cmd_type);
+ }
break;
- case CMD_AXI_STOP:
- axi_stop(axi_ctrl);
+ case CMD_AXI_STOP: {
+ struct msm_camera_vfe_params_t vfe_params;
+ if (copy_from_user(&vfe_params,
+ (void __user *)(vfecmd.value),
+ sizeof(struct msm_camera_vfe_params_t))) {
+ return -EFAULT;
+ }
+ axi_ctrl->share_ctrl->current_mode =
+ vfe_params.operation_mode;
+ spin_lock_irqsave(&axi_ctrl->share_ctrl->abort_lock, flags);
+ axi_ctrl->share_ctrl->skip_abort =
+ vfe_params.skip_abort;
+ spin_unlock_irqrestore(&axi_ctrl->share_ctrl->abort_lock,
+ flags);
+ axi_stop(axi_ctrl, vfe_params.cmd_type);
+ }
+ break;
+ case CMD_AXI_RESET:
+ axi_reset(axi_ctrl);
break;
default:
pr_err("%s Unsupported AXI configuration %x ", __func__,
@@ -5062,6 +5711,7 @@
{
struct axi_ctrl_t *axi_ctrl = v4l2_get_subdevdata(sd);
uint32_t irqstatus = (uint32_t) arg;
+ unsigned long flags;
if (!axi_ctrl->share_ctrl->vfebase) {
pr_err("%s: base address unmapped\n", __func__);
@@ -5092,7 +5742,8 @@
/* in snapshot mode if done then send
snapshot done message */
- if (axi_ctrl->share_ctrl->operation_mode ==
+ if (
+ axi_ctrl->share_ctrl->operation_mode ==
VFE_OUTPUTS_THUMB_AND_MAIN ||
axi_ctrl->share_ctrl->operation_mode ==
VFE_OUTPUTS_MAIN_AND_THUMB ||
@@ -5109,6 +5760,17 @@
CAMIF_COMMAND_STOP_IMMEDIATELY,
axi_ctrl->share_ctrl->vfebase +
VFE_CAMIF_COMMAND);
+ spin_lock_irqsave(&axi_ctrl->share_ctrl->abort_lock,
+ flags);
+ if (axi_ctrl->share_ctrl->skip_abort) {
+ spin_unlock_irqrestore(&axi_ctrl->share_ctrl->
+ abort_lock, flags);
+ atomic_set(&axi_ctrl->share_ctrl->
+ handle_axi_irq, 0);
+ axi_disable_irq(axi_ctrl);
+ } else
+ spin_unlock_irqrestore(&axi_ctrl->share_ctrl->
+ abort_lock, flags);
vfe32_send_isp_msg(&axi_ctrl->subdev,
axi_ctrl->share_ctrl->vfeFrameId,
MSG_ID_SNAPSHOT_DONE);
@@ -5169,8 +5831,7 @@
int rc = -ENOIOCTLCMD;
switch (cmd) {
case VIDIOC_MSM_AXI_INIT:
- rc = msm_axi_subdev_init(sd,
- (struct msm_cam_media_controller *)arg);
+ rc = msm_axi_subdev_init(sd);
break;
case VIDIOC_MSM_AXI_CFG:
rc = msm_axi_config(sd, arg);
@@ -5188,7 +5849,9 @@
rc = 0;
break;
default:
- pr_err("%s: command not found\n", __func__);
+ pr_err("%s: command %d not found\n", __func__,
+ _IOC_NR(cmd));
+ break;
}
return rc;
}
@@ -5259,6 +5922,12 @@
sd_info.irq_num = 0;
msm_cam_register_subdev_node(&axi_ctrl->subdev, &sd_info);
+ media_entity_init(&axi_ctrl->subdev.entity, 0, NULL, 0);
+ axi_ctrl->subdev.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ axi_ctrl->subdev.entity.group_id = AXI_DEV;
+ axi_ctrl->subdev.entity.name = pdev->name;
+ axi_ctrl->subdev.entity.revision = axi_ctrl->subdev.devnode->num;
+
v4l2_subdev_init(&vfe32_ctrl->subdev, &msm_vfe_subdev_ops);
vfe32_ctrl->subdev.internal_ops = &msm_vfe_internal_ops;
vfe32_ctrl->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
@@ -5297,6 +5966,19 @@
axi_ctrl->fs_vfe = NULL;
}
+ /* Register subdev node before requesting irq since
+ * irq_num is needed by msm_cam_server */
+ sd_info.sdev_type = VFE_DEV;
+ sd_info.sd_index = 0;
+ sd_info.irq_num = axi_ctrl->vfeirq->start;
+ msm_cam_register_subdev_node(&vfe32_ctrl->subdev, &sd_info);
+
+ media_entity_init(&vfe32_ctrl->subdev.entity, 0, NULL, 0);
+ vfe32_ctrl->subdev.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ vfe32_ctrl->subdev.entity.group_id = VFE_DEV;
+ vfe32_ctrl->subdev.entity.name = pdev->name;
+ vfe32_ctrl->subdev.entity.revision = vfe32_ctrl->subdev.devnode->num;
+
/* Request for this device irq from the camera server. If the
* IRQ Router is present on this target, the interrupt will be
* handled by the camera server and the interrupt service
@@ -5336,10 +6018,8 @@
axi32_do_tasklet, (unsigned long)axi_ctrl);
vfe32_ctrl->pdev = pdev;
- sd_info.sdev_type = VFE_DEV;
- sd_info.sd_index = 0;
- sd_info.irq_num = axi_ctrl->vfeirq->start;
- msm_cam_register_subdev_node(&vfe32_ctrl->subdev, &sd_info);
+ /*disable bayer stats by default*/
+ vfe32_ctrl->ver_num.main = 0;
return 0;
vfe32_no_resource:
diff --git a/drivers/media/video/msm/msm_vfe32.h b/drivers/media/video/msm/vfe/msm_vfe32.h
similarity index 92%
rename from drivers/media/video/msm/msm_vfe32.h
rename to drivers/media/video/msm/vfe/msm_vfe32.h
index 542bbf8..f4b7edb 100644
--- a/drivers/media/video/msm/msm_vfe32.h
+++ b/drivers/media/video/msm/vfe/msm_vfe32.h
@@ -104,12 +104,13 @@
#define VFE_IRQ_STATUS1_RESET_AXI_HALT_ACK_MASK 0x00800000
#define VFE_IRQ_STATUS0_STATS_COMPOSIT_MASK 0x01000000
-#define VFE_IRQ_STATUS0_STATS_AEC 0x2000 /* bit 13 */
-#define VFE_IRQ_STATUS0_STATS_AF 0x4000 /* bit 14 */
-#define VFE_IRQ_STATUS0_STATS_AWB 0x8000 /* bit 15 */
-#define VFE_IRQ_STATUS0_STATS_RS 0x10000 /* bit 16 */
-#define VFE_IRQ_STATUS0_STATS_CS 0x20000 /* bit 17 */
-#define VFE_IRQ_STATUS0_STATS_IHIST 0x40000 /* bit 18 */
+#define VFE_IRQ_STATUS0_STATS_AEC_BG 0x2000 /* bit 13 */
+#define VFE_IRQ_STATUS0_STATS_AF_BF 0x4000 /* bit 14 */
+#define VFE_IRQ_STATUS0_STATS_AWB 0x8000 /* bit 15 */
+#define VFE_IRQ_STATUS0_STATS_RS 0x10000 /* bit 16 */
+#define VFE_IRQ_STATUS0_STATS_CS 0x20000 /* bit 17 */
+#define VFE_IRQ_STATUS0_STATS_IHIST 0x40000 /* bit 18 */
+#define VFE_IRQ_STATUS0_STATS_SK_BHIST 0x80000 /* bit 19 */
#define VFE_IRQ_STATUS0_SYNC_TIMER0 0x2000000 /* bit 25 */
#define VFE_IRQ_STATUS0_SYNC_TIMER1 0x4000000 /* bit 26 */
@@ -174,8 +175,13 @@
#define RS_CS_ENABLE_MASK 0x00000300 /* bit 8,9 */
#define CLF_ENABLE_MASK 0x00002000 /* bit 13 */
#define IHIST_ENABLE_MASK 0x00010000 /* bit 16 */
+#define SKIN_BHIST_ENABLE_MASK 0x00080000 /* bit 19 */
#define STATS_ENABLE_MASK 0x000903E0 /* bit 19,16,9,8,7,6,5*/
+#define STATS_BG_ENABLE_MASK 0x00000002 /* bit 1 */
+#define STATS_BF_ENABLE_MASK 0x00000004 /* bit 2 */
+#define STATS_BHIST_ENABLE_MASK 0x00000008 /* bit 3 */
+
#define VFE_REG_UPDATE_TRIGGER 1
#define VFE_PM_BUF_MAX_CNT_MASK 0xFF
#define VFE_DMI_CFG_DEFAULT 0x00000100
@@ -240,12 +246,13 @@
#define V32_OPERATION_CFG_LEN 44
-#define V32_AXI_OUT_OFF 0x00000038
-#define V32_AXI_OUT_LEN 224
-#define V32_AXI_CH_INF_LEN 32
+#define V32_AXI_BUS_CMD_OFF 0x00000038
+#define V32_AXI_OUT_OFF 0x0000003C
+#define V32_AXI_OUT_LEN 240
#define V32_AXI_CFG_LEN 47
-#define V32_AXI_BUS_FMT_OFF 1
-#define V32_AXI_BUS_FMT_LEN 4
+#define V32_AXI_BUS_FMT_OFF 1
+#define V32_AXI_BUS_FMT_LEN 4
+#define V32_AXI_BUS_CFG_LEN 16
#define V32_FRAME_SKIP_OFF 0x00000504
#define V32_FRAME_SKIP_LEN 32
@@ -378,6 +385,15 @@
#define V32_CLF_CHROMA_UPDATE_OFF 0x000006F0
#define V32_CLF_CHROMA_UPDATE_LEN 8
+#define V32_STATS_BG_OFF 0x00000700
+#define V32_STATS_BG_LEN 12
+
+#define V32_STATS_BF_OFF 0x0000070c
+#define V32_STATS_BF_LEN 24
+
+#define V32_STATS_BHIST_OFF 0x00000724
+#define V32_STATS_BHIST_LEN 8
+
struct vfe_cmd_hw_version {
uint32_t minorVersion;
uint32_t majorVersion;
@@ -756,7 +772,7 @@
struct vfe32_output_ch {
struct list_head free_buf_queue;
spinlock_t free_buf_lock;
- uint16_t image_mode;
+ uint32_t inst_handle;
int8_t ch0;
int8_t ch1;
int8_t ch2;
@@ -845,12 +861,12 @@
#define VFE_AXI_STATUS 0x000001DC
#define VFE_BUS_STATS_PING_PONG_BASE 0x000000F4
-#define VFE_BUS_STATS_AEC_WR_PING_ADDR 0x000000F4
-#define VFE_BUS_STATS_AEC_WR_PONG_ADDR 0x000000F8
-#define VFE_BUS_STATS_AEC_UB_CFG 0x000000FC
-#define VFE_BUS_STATS_AF_WR_PING_ADDR 0x00000100
-#define VFE_BUS_STATS_AF_WR_PONG_ADDR 0x00000104
-#define VFE_BUS_STATS_AF_UB_CFG 0x00000108
+#define VFE_BUS_STATS_AEC_BG_WR_PING_ADDR 0x000000F4
+#define VFE_BUS_STATS_AEC_BG_WR_PONG_ADDR 0x000000F8
+#define VFE_BUS_STATS_AEC_BG_UB_CFG 0x000000FC
+#define VFE_BUS_STATS_AF_BF_WR_PING_ADDR 0x00000100
+#define VFE_BUS_STATS_AF_BF_WR_PONG_ADDR 0x00000104
+#define VFE_BUS_STATS_AF_BF_UB_CFG 0x00000108
#define VFE_BUS_STATS_AWB_WR_PING_ADDR 0x0000010C
#define VFE_BUS_STATS_AWB_WR_PONG_ADDR 0x00000110
#define VFE_BUS_STATS_AWB_UB_CFG 0x00000114
@@ -864,9 +880,9 @@
#define VFE_BUS_STATS_HIST_WR_PING_ADDR 0x00000130
#define VFE_BUS_STATS_HIST_WR_PONG_ADDR 0x00000134
#define VFE_BUS_STATS_HIST_UB_CFG 0x00000138
-#define VFE_BUS_STATS_SKIN_WR_PING_ADDR 0x0000013C
-#define VFE_BUS_STATS_SKIN_WR_PONG_ADDR 0x00000140
-#define VFE_BUS_STATS_SKIN_UB_CFG 0x00000144
+#define VFE_BUS_STATS_SKIN_BHIST_WR_PING_ADDR 0x0000013C
+#define VFE_BUS_STATS_SKIN_BHIST_WR_PONG_ADDR 0x00000140
+#define VFE_BUS_STATS_SKIN_BHIST_UB_CFG 0x00000144
#define VFE_CAMIF_COMMAND 0x000001E0
#define VFE_CAMIF_STATUS 0x00000204
#define VFE_REG_UPDATE_CMD 0x00000260
@@ -888,6 +904,7 @@
#define VFE_STATS_AWB_SGW_CFG 0x00000554
#define VFE_DMI_CFG 0x00000598
#define VFE_DMI_ADDR 0x0000059C
+#define VFE_DMI_DATA_HI 0x000005A0
#define VFE_DMI_DATA_LO 0x000005A4
#define VFE_BUS_IO_FORMAT_CFG 0x000006F8
#define VFE_PIXEL_IF_CFG 0x000006FC
@@ -924,21 +941,37 @@
uint32_t register_total;
atomic_t vstate;
+ atomic_t handle_axi_irq;
uint32_t vfeFrameId;
uint32_t stats_comp;
spinlock_t stop_flag_lock;
+ spinlock_t abort_lock;
int8_t stop_ack_pending;
enum vfe_output_state liveshot_state;
uint32_t vfe_capture_count;
- uint16_t operation_mode; /* streaming or snapshot */
+ uint32_t operation_mode; /* streaming or snapshot */
+ uint32_t current_mode;
struct vfe32_output_path outpath;
- uint32_t ref_count;
+ uint16_t port_info;
+ uint32_t skip_abort;
spinlock_t sd_notify_lock;
+ struct completion reset_complete;
+
+ spinlock_t update_ack_lock;
+ spinlock_t start_ack_lock;
+
struct axi_ctrl_t *axi_ctrl;
struct vfe32_ctrl_type *vfe32_ctrl;
+ int8_t start_ack_pending;
+ int8_t update_ack_pending;
+ enum vfe_output_state recording_state;
+
+ atomic_t rdi0_update_ack_pending;
+ atomic_t rdi1_update_ack_pending;
+ atomic_t rdi2_update_ack_pending;
};
struct axi_ctrl_t {
@@ -959,20 +992,12 @@
};
struct vfe32_ctrl_type {
- uint32_t vfeImaskCompositePacked;
-
- spinlock_t update_ack_lock;
- spinlock_t start_ack_lock;
spinlock_t state_lock;
- spinlock_t io_lock;
spinlock_t stats_bufq_lock;
uint32_t extlen;
void *extdata;
- int8_t start_ack_pending;
- int8_t reset_ack_pending;
- int8_t update_ack_pending;
- enum vfe_output_state recording_state;
+ int8_t vfe_sof_count_enable;
int8_t update_linear;
int8_t update_rolloff;
int8_t update_la;
@@ -984,18 +1009,14 @@
uint32_t sync_timer_state;
uint32_t sync_timer_number;
- uint32_t output1Pattern;
- uint32_t output1Period;
- uint32_t output2Pattern;
- uint32_t output2Period;
- uint32_t vfeFrameSkipCount;
- uint32_t vfeFrameSkipPeriod;
- struct vfe_stats_control afStatsControl;
+ struct msm_ver_num_info ver_num;
+ struct vfe_stats_control afbfStatsControl;
struct vfe_stats_control awbStatsControl;
- struct vfe_stats_control aecStatsControl;
+ struct vfe_stats_control aecbgStatsControl;
struct vfe_stats_control ihistStatsControl;
struct vfe_stats_control rsStatsControl;
struct vfe_stats_control csStatsControl;
+ struct vfe_stats_control bhistStatsControl;
/* v4l2 subdev */
struct v4l2_subdev subdev;
@@ -1006,6 +1027,8 @@
uint32_t snapshot_frame_cnt;
struct msm_stats_bufq_ctrl stats_ctrl;
struct msm_stats_ops stats_ops;
+
+ uint32_t simultaneous_sof_stat;
};
#define statsAeNum 0
@@ -1026,19 +1049,4 @@
uint32_t statsBuf[VFE_STATS_BUFFER_COUNT];
};
-#define VIDIOC_MSM_AXI_INIT \
- _IOWR('V', BASE_VIDIOC_PRIVATE + 18, struct msm_cam_media_controller *)
-
-#define VIDIOC_MSM_AXI_RELEASE \
- _IOWR('V', BASE_VIDIOC_PRIVATE + 19, struct msm_cam_media_controller *)
-
-#define VIDIOC_MSM_AXI_CFG \
- _IOWR('V', BASE_VIDIOC_PRIVATE + 20, void *)
-
-#define VIDIOC_MSM_AXI_IRQ \
- _IOWR('V', BASE_VIDIOC_PRIVATE + 21, void *)
-
-#define VIDIOC_MSM_AXI_BUF_CFG \
- _IOWR('V', BASE_VIDIOC_PRIVATE + 22, void *)
-
#endif /* __MSM_VFE32_H__ */
diff --git a/drivers/media/video/msm/msm_vfe7x.c b/drivers/media/video/msm/vfe/msm_vfe7x.c
similarity index 100%
rename from drivers/media/video/msm/msm_vfe7x.c
rename to drivers/media/video/msm/vfe/msm_vfe7x.c
diff --git a/drivers/media/video/msm/msm_vfe7x.h b/drivers/media/video/msm/vfe/msm_vfe7x.h
similarity index 100%
rename from drivers/media/video/msm/msm_vfe7x.h
rename to drivers/media/video/msm/vfe/msm_vfe7x.h
diff --git a/drivers/media/video/msm/msm_vfe7x27a.c b/drivers/media/video/msm/vfe/msm_vfe7x27a.c
similarity index 100%
rename from drivers/media/video/msm/msm_vfe7x27a.c
rename to drivers/media/video/msm/vfe/msm_vfe7x27a.c
diff --git a/drivers/media/video/msm/msm_vfe7x27a.h b/drivers/media/video/msm/vfe/msm_vfe7x27a.h
similarity index 100%
rename from drivers/media/video/msm/msm_vfe7x27a.h
rename to drivers/media/video/msm/vfe/msm_vfe7x27a.h
diff --git a/drivers/media/video/msm/msm_vfe7x27a_v4l2.c b/drivers/media/video/msm/vfe/msm_vfe7x27a_v4l2.c
similarity index 82%
rename from drivers/media/video/msm/msm_vfe7x27a_v4l2.c
rename to drivers/media/video/msm/vfe/msm_vfe7x27a_v4l2.c
index 96047d5..e1d8b48 100644
--- a/drivers/media/video/msm/msm_vfe7x27a_v4l2.c
+++ b/drivers/media/video/msm/vfe/msm_vfe7x27a_v4l2.c
@@ -333,6 +333,7 @@
"VFE_SCALE_OUTPUT2_CONFIG"},
{VFE_CMD_CAPTURE_RAW, VFE_START, QDSP_CMDQUEUE,
"VFE_CMD_CAPTURE_RAW", "VFE_START"},
+ {VFE_CMD_STOP_LIVESHOT, VFE_MAX, VFE_MAX},
{VFE_CMD_RECONFIG_VFE, VFE_MAX, VFE_MAX},
};
@@ -404,6 +405,25 @@
return 0L;
}
+static unsigned long vfe2x_stats_unregbuf(
+ struct msm_stats_reqbuf *req_buf)
+{
+ int i = 0, rc = 0;
+
+ for (i = 0; i < req_buf->num_buf; i++) {
+ rc = vfe2x_ctrl->stats_ops.buf_unprepare(
+ vfe2x_ctrl->stats_ops.stats_ctrl,
+ req_buf->stats_type, i,
+ vfe2x_ctrl->stats_ops.client);
+ if (rc < 0) {
+ pr_err("%s: unreg stats buf (type = %d) err = %d",
+ __func__, req_buf->stats_type, rc);
+ return rc;
+ }
+ }
+ return 0L;
+}
+
static int vfe2x_stats_buf_init(enum msm_stats_enum_type type)
{
unsigned long flags;
@@ -556,6 +576,22 @@
vfe2x_ctrl->stats_ops.client);
}
break;
+ case VFE_CMD_STATS_UNREGBUF:
+ {
+ struct msm_stats_reqbuf *req_buf = NULL;
+ req_buf = (struct msm_stats_reqbuf *)cmd->value;
+ if (sizeof(struct msm_stats_reqbuf) != cmd->length) {
+ /* error. the length not match */
+ pr_err("%s: stats reqbuf input size = %d,\n"
+ "struct size = %d, mitch match\n",
+ __func__, cmd->length,
+ sizeof(struct msm_stats_reqbuf));
+ rc = -EINVAL ;
+ goto end;
+ }
+ rc = vfe2x_stats_unregbuf(req_buf);
+ }
+ break;
default:
rc = -1;
pr_err("%s: cmd_type %d not supported",
@@ -585,7 +621,7 @@
struct isp_msg_output msg;
msg.output_id = msgid;
- msg.buf.image_mode = -1;
+ msg.buf.inst_handle = 0;
msg.buf.ch_paddr[0] = ch0_paddr;
msg.buf.ch_paddr[1] = ch1_paddr;
msg.frameCounter = vfe2x_ctrl->vfeFrameId;
@@ -631,6 +667,8 @@
void *data = NULL;
struct buf_info *outch = NULL;
uint32_t y_phy, cbcr_phy;
+ static uint32_t liveshot_y_phy;
+ static struct vfe_endframe liveshot_swap;
struct table_cmd *table_pending = NULL;
unsigned long flags;
void *cmd_data = NULL;
@@ -659,29 +697,63 @@
switch (id) {
case MSG_SNAPSHOT:
msm_camio_set_perf_lvl(S_PREVIEW);
- vfe_7x_ops(driver_data, MSG_OUTPUT_S, len, getevent);
- if (!raw_mode)
- vfe_7x_ops(driver_data, MSG_OUTPUT_T,
+ while (vfe2x_ctrl->snap.frame_cnt <
+ vfe2x_ctrl->num_snap) {
+ vfe_7x_ops(driver_data, MSG_OUTPUT_S, len,
+ getevent);
+ if (!raw_mode)
+ vfe_7x_ops(driver_data, MSG_OUTPUT_T,
len, getevent);
+ }
vfe2x_send_isp_msg(vfe2x_ctrl, MSG_ID_SNAPSHOT_DONE);
kfree(data);
return;
case MSG_OUTPUT_S:
outch = &vfe2x_ctrl->snap;
- y_phy = outch->ping.ch_paddr[0];
- cbcr_phy = outch->ping.ch_paddr[1];
- CDBG("MSG_OUTPUT_S: %x %x\n",
- (unsigned int)y_phy, (unsigned int)cbcr_phy);
+ if (outch->frame_cnt == 0) {
+ y_phy = outch->ping.ch_paddr[0];
+ cbcr_phy = outch->ping.ch_paddr[1];
+ } else if (outch->frame_cnt == 1) {
+ y_phy = outch->pong.ch_paddr[0];
+ cbcr_phy = outch->pong.ch_paddr[1];
+ } else if (outch->frame_cnt == 2) {
+ y_phy = outch->free_buf.ch_paddr[0];
+ cbcr_phy = outch->free_buf.ch_paddr[1];
+ } else {
+ y_phy = outch->free_buf_arr[outch->frame_cnt
+ - 3].ch_paddr[0];
+ cbcr_phy = outch->free_buf_arr[outch->frame_cnt
+ - 3].ch_paddr[1];
+ }
+ outch->frame_cnt++;
+ CDBG("MSG_OUTPUT_S: %x %x %d\n",
+ (unsigned int)y_phy, (unsigned int)cbcr_phy,
+ outch->frame_cnt);
vfe_send_outmsg(&vfe2x_ctrl->subdev,
MSG_ID_OUTPUT_PRIMARY,
y_phy, cbcr_phy);
break;
case MSG_OUTPUT_T:
outch = &vfe2x_ctrl->thumb;
- y_phy = outch->ping.ch_paddr[0];
- cbcr_phy = outch->ping.ch_paddr[1];
- CDBG("MSG_OUTPUT_T: %x %x\n",
- (unsigned int)y_phy, (unsigned int)cbcr_phy);
+ if (outch->frame_cnt == 0) {
+ y_phy = outch->ping.ch_paddr[0];
+ cbcr_phy = outch->ping.ch_paddr[1];
+ } else if (outch->frame_cnt == 1) {
+ y_phy = outch->pong.ch_paddr[0];
+ cbcr_phy = outch->pong.ch_paddr[1];
+ } else if (outch->frame_cnt == 2) {
+ y_phy = outch->free_buf.ch_paddr[0];
+ cbcr_phy = outch->free_buf.ch_paddr[1];
+ } else {
+ y_phy = outch->free_buf_arr[outch->frame_cnt
+ - 3].ch_paddr[0];
+ cbcr_phy = outch->free_buf_arr[outch->frame_cnt
+ - 3].ch_paddr[1];
+ }
+ outch->frame_cnt++;
+ CDBG("MSG_OUTPUT_T: %x %x %d\n",
+ (unsigned int)y_phy, (unsigned int)cbcr_phy,
+ outch->frame_cnt);
vfe_send_outmsg(&vfe2x_ctrl->subdev,
MSG_ID_OUTPUT_SECONDARY,
y_phy, cbcr_phy);
@@ -764,48 +836,139 @@
if (op_mode & SNAPSHOT_MASK_MODE) {
kfree(data);
return;
- } else {
- free_buf = vfe2x_check_free_buffer(
+ }
+ free_buf = vfe2x_check_free_buffer(
VFE_MSG_OUTPUT_IRQ,
VFE_MSG_OUTPUT_PRIMARY);
- CDBG("free_buf = %x\n", (unsigned int) free_buf);
- if (free_buf) {
+ CDBG("free_buf = %x\n",
+ (unsigned int) free_buf);
+ spin_lock_irqsave(
+ &vfe2x_ctrl->liveshot_enabled_lock,
+ flags);
+ if (!vfe2x_ctrl->liveshot_enabled) {
+ spin_unlock_irqrestore(
+ &vfe2x_ctrl->
+ liveshot_enabled_lock,
+ flags);
+ if (free_buf) {
fack.header = VFE_OUTPUT2_ACK;
fack.output2newybufferaddress =
- (void *)(free_buf->ch_paddr[0]);
+ (void *)
+ (free_buf->ch_paddr[0]);
fack.output2newcbcrbufferaddress =
- (void *)(free_buf->ch_paddr[1]);
+ (void *)
+ (free_buf->ch_paddr[1]);
cmd_data = &fack;
len = sizeof(fack);
- msm_adsp_write(vfe_mod, QDSP_CMDQUEUE,
+ msm_adsp_write(vfe_mod,
+ QDSP_CMDQUEUE,
cmd_data, len);
- } else {
+ } else {
fack.header = VFE_OUTPUT2_ACK;
fack.output2newybufferaddress =
- (void *)
- ((struct vfe_endframe *)data)->y_address;
+ (void *)
+ ((struct vfe_endframe *)
+ data)->y_address;
fack.output2newcbcrbufferaddress =
- (void *)
- ((struct vfe_endframe *)data)->cbcr_address;
+ (void *)
+ ((struct vfe_endframe *)
+ data)->cbcr_address;
cmd_data = &fack;
len = sizeof(fack);
- msm_adsp_write(vfe_mod, QDSP_CMDQUEUE,
- cmd_data, len);
+ msm_adsp_write(vfe_mod,
+ QDSP_CMDQUEUE,
+ cmd_data, len);
if (!vfe2x_ctrl->zsl_mode) {
kfree(data);
return;
}
}
+ } else { /* Live snapshot */
+ spin_unlock_irqrestore(
+ &vfe2x_ctrl->
+ liveshot_enabled_lock,
+ flags);
+ if (free_buf) {
+ /* liveshot_swap to enqueue
+ when liveshot snapshot buffer
+ is obtainedi from adsp */
+ liveshot_swap.y_address =
+ ((struct vfe_endframe *)
+ data)->y_address;
+ liveshot_swap.cbcr_address =
+ ((struct vfe_endframe *)
+ data)->cbcr_address;
+
+ fack.header = VFE_OUTPUT2_ACK;
+
+ fack.output2newybufferaddress =
+ (void *)
+ (free_buf->ch_paddr[0]);
+
+ fack.output2newcbcrbufferaddress =
+ (void *)
+ (free_buf->ch_paddr[1]);
+
+ liveshot_y_phy =
+ (uint32_t)
+ fack.output2newybufferaddress;
+
+ cmd_data = &fack;
+ len = sizeof(fack);
+ msm_adsp_write(vfe_mod,
+ QDSP_CMDQUEUE,
+ cmd_data, len);
+ } else if (liveshot_y_phy !=
+ ((struct vfe_endframe *)
+ data)->y_address) {
+
+ fack.header = VFE_OUTPUT2_ACK;
+ fack.output2newybufferaddress =
+ (void *)
+ ((struct vfe_endframe *)
+ data)->y_address;
+
+ fack.output2newcbcrbufferaddress =
+ (void *)
+ ((struct vfe_endframe *)
+ data)->cbcr_address;
+
+ cmd_data = &fack;
+ len = sizeof(fack);
+ msm_adsp_write(vfe_mod,
+ QDSP_CMDQUEUE,
+ cmd_data, len);
+ kfree(data);
+ return;
+ } else {
+ /* Enque data got
+ * during freebuf */
+ fack.header = VFE_OUTPUT2_ACK;
+ fack.output2newybufferaddress =
+ (void *)
+ (liveshot_swap.y_address);
+
+ fack.output2newcbcrbufferaddress =
+ (void *)
+ (liveshot_swap.cbcr_address);
+ cmd_data = &fack;
+ len = sizeof(fack);
+ msm_adsp_write(vfe_mod,
+ QDSP_CMDQUEUE,
+ cmd_data, len);
+ }
}
- y_phy = ((struct vfe_endframe *)data)->y_address;
- cbcr_phy = ((struct vfe_endframe *)data)->cbcr_address;
+ y_phy = ((struct vfe_endframe *)data)->
+ y_address;
+ cbcr_phy = ((struct vfe_endframe *)data)->
+ cbcr_address;
- CDBG("vfe_7x_convert, y_phy = 0x%x, cbcr_phy = 0x%x\n",
- y_phy, cbcr_phy);
+ CDBG("MSG_OUT2:y_phy= 0x%x, cbcr_phy= 0x%x\n",
+ y_phy, cbcr_phy);
if (free_buf) {
for (i = 0; i < 3; i++) {
if (vfe2x_ctrl->free_buf.buf[i].
@@ -823,14 +986,23 @@
CDBG("Address doesnt match\n");
}
memcpy(((struct vfe_frame_extra *)extdata),
- &((struct vfe_endframe *)data)->extra,
- sizeof(struct vfe_frame_extra));
+ &((struct vfe_endframe *)data)->extra,
+ sizeof(struct vfe_frame_extra));
vfe2x_ctrl->vfeFrameId =
- ((struct vfe_frame_extra *)extdata)->frame_id;
- vfe_send_outmsg(&vfe2x_ctrl->subdev,
+ ((struct vfe_frame_extra *)extdata)->
+ frame_id;
+
+ if (!vfe2x_ctrl->liveshot_enabled) {
+ /* Liveshot not enalbed */
+ vfe_send_outmsg(&vfe2x_ctrl->subdev,
MSG_ID_OUTPUT_PRIMARY,
y_phy, cbcr_phy);
+ } else if (liveshot_y_phy == y_phy) {
+ vfe_send_outmsg(&vfe2x_ctrl->subdev,
+ MSG_ID_OUTPUT_PRIMARY,
+ y_phy, cbcr_phy);
+ }
break;
case MSG_RESET_ACK:
case MSG_START_ACK:
@@ -885,8 +1057,9 @@
vfe2x_ctrl->vfeFrameId++;
if (vfe2x_ctrl->vfeFrameId == 0)
vfe2x_ctrl->vfeFrameId = 1; /* wrapped back */
- if ((op_mode & SNAPSHOT_MASK_MODE) && !raw_mode) {
- pr_err("Ignore SOF for snapshot\n");
+ if ((op_mode & SNAPSHOT_MASK_MODE) && !raw_mode
+ && (vfe2x_ctrl->num_snap <= 1)) {
+ CDBG("Ignore SOF for snapshot\n");
kfree(data);
return;
}
@@ -986,11 +1159,81 @@
int cnt;
int rc = 0;
int o_mode = 0;
+ unsigned long flags;
if (op_mode & SNAPSHOT_MASK_MODE)
o_mode = SNAPSHOT_MASK_MODE;
- if (mode == OUTPUT_SEC) {
+ if ((o_mode == SNAPSHOT_MASK_MODE) && (vfe2x_ctrl->num_snap > 1)) {
+ CDBG("%s: BURST mode freebuf cnt %d", __func__,
+ ad->free_buf_cnt);
+ /* Burst */
+ if (mode == OUTPUT_SEC) {
+ ao->output1buffer1_y_phy = ad->ping.ch_paddr[0];
+ ao->output1buffer1_cbcr_phy = ad->ping.ch_paddr[1];
+ ao->output1buffer2_y_phy = ad->pong.ch_paddr[0];
+ ao->output1buffer2_cbcr_phy = ad->pong.ch_paddr[1];
+ ao->output1buffer3_y_phy = ad->free_buf.ch_paddr[0];
+ ao->output1buffer3_cbcr_phy = ad->free_buf.ch_paddr[1];
+ bptr = &ao->output1buffer4_y_phy;
+ for (cnt = 0; cnt < 5; cnt++) {
+ *bptr = (cnt < ad->free_buf_cnt-3) ?
+ ad->free_buf_arr[cnt].ch_paddr[0] :
+ ad->pong.ch_paddr[0];
+ bptr++;
+ *bptr = (cnt < ad->free_buf_cnt-3) ?
+ ad->free_buf_arr[cnt].ch_paddr[1] :
+ ad->pong.ch_paddr[1];
+ bptr++;
+ }
+ CDBG("%x %x\n", (unsigned int)ao->output1buffer1_y_phy,
+ (unsigned int)ao->output1buffer1_cbcr_phy);
+ CDBG("%x %x\n", (unsigned int)ao->output1buffer2_y_phy,
+ (unsigned int)ao->output1buffer2_cbcr_phy);
+ CDBG("%x %x\n", (unsigned int)ao->output1buffer3_y_phy,
+ (unsigned int)ao->output1buffer3_cbcr_phy);
+ CDBG("%x %x\n", (unsigned int)ao->output1buffer4_y_phy,
+ (unsigned int)ao->output1buffer4_cbcr_phy);
+ CDBG("%x %x\n", (unsigned int)ao->output1buffer5_y_phy,
+ (unsigned int)ao->output1buffer5_cbcr_phy);
+ CDBG("%x %x\n", (unsigned int)ao->output1buffer6_y_phy,
+ (unsigned int)ao->output1buffer6_cbcr_phy);
+ CDBG("%x %x\n", (unsigned int)ao->output1buffer7_y_phy,
+ (unsigned int)ao->output1buffer7_cbcr_phy);
+ } else { /*Primary*/
+ ao->output2buffer1_y_phy = ad->ping.ch_paddr[0];
+ ao->output2buffer1_cbcr_phy = ad->ping.ch_paddr[1];
+ ao->output2buffer2_y_phy = ad->pong.ch_paddr[0];
+ ao->output2buffer2_cbcr_phy = ad->pong.ch_paddr[1];
+ ao->output2buffer3_y_phy = ad->free_buf.ch_paddr[0];
+ ao->output2buffer3_cbcr_phy = ad->free_buf.ch_paddr[1];
+ bptr = &ao->output2buffer4_y_phy;
+ for (cnt = 0; cnt < 5; cnt++) {
+ *bptr = (cnt < ad->free_buf_cnt-3) ?
+ ad->free_buf_arr[cnt].ch_paddr[0] :
+ ad->pong.ch_paddr[0];
+ bptr++;
+ *bptr = (cnt < ad->free_buf_cnt-3) ?
+ ad->free_buf_arr[cnt].ch_paddr[1] :
+ ad->pong.ch_paddr[1];
+ bptr++;
+ }
+ CDBG("%x %x\n", (unsigned int)ao->output2buffer1_y_phy,
+ (unsigned int)ao->output2buffer1_cbcr_phy);
+ CDBG("%x %x\n", (unsigned int)ao->output2buffer2_y_phy,
+ (unsigned int)ao->output2buffer2_cbcr_phy);
+ CDBG("%x %x\n", (unsigned int)ao->output2buffer3_y_phy,
+ (unsigned int)ao->output2buffer3_cbcr_phy);
+ CDBG("%x %x\n", (unsigned int)ao->output2buffer4_y_phy,
+ (unsigned int)ao->output2buffer4_cbcr_phy);
+ CDBG("%x %x\n", (unsigned int)ao->output2buffer5_y_phy,
+ (unsigned int)ao->output2buffer5_cbcr_phy);
+ CDBG("%x %x\n", (unsigned int)ao->output2buffer6_y_phy,
+ (unsigned int)ao->output2buffer6_cbcr_phy);
+ CDBG("%x %x\n", (unsigned int)ao->output2buffer7_y_phy,
+ (unsigned int)ao->output2buffer7_cbcr_phy);
+ }
+ } else if (mode == OUTPUT_SEC) {
/* Thumbnail */
if (vfe2x_ctrl->zsl_mode) {
ao->output1buffer1_y_phy = ad->ping.ch_paddr[0];
@@ -1025,8 +1268,17 @@
ao->output2buffer1_cbcr_phy = ad->ping.ch_paddr[1];
ao->output2buffer2_y_phy = ad->pong.ch_paddr[0];
ao->output2buffer2_cbcr_phy = ad->pong.ch_paddr[1];
- ao->output2buffer3_y_phy = ad->free_buf.ch_paddr[0];
- ao->output2buffer3_cbcr_phy = ad->free_buf.ch_paddr[1];
+ spin_lock_irqsave(&vfe2x_ctrl->liveshot_enabled_lock,
+ flags);
+ if (vfe2x_ctrl->liveshot_enabled) { /* Live shot */
+ ao->output2buffer3_y_phy = ad->pong.ch_paddr[0];
+ ao->output2buffer3_cbcr_phy = ad->pong.ch_paddr[1];
+ } else {
+ ao->output2buffer3_y_phy = ad->free_buf.ch_paddr[0];
+ ao->output2buffer3_cbcr_phy = ad->free_buf.ch_paddr[1];
+ }
+ spin_unlock_irqrestore(&vfe2x_ctrl->liveshot_enabled_lock,
+ flags);
bptr = &ao->output2buffer4_y_phy;
for (cnt = 0; cnt < 5; cnt++) {
*bptr = ad->pong.ch_paddr[0];
@@ -1182,12 +1434,11 @@
unsigned int subdev_cmd, void *arg)
{
struct msm_isp_cmd vfecmd;
- struct msm_camvfe_params *vfe_params =
- (struct msm_camvfe_params *)arg;
- struct msm_vfe_cfg_cmd *cmd = vfe_params->vfe_cfg;
+ struct msm_camvfe_params *vfe_params;
+ struct msm_vfe_cfg_cmd *cmd;
struct table_cmd *table_pending;
long rc = 0;
- void *data = vfe_params->data;
+ void *data;
struct msm_pmem_region *regptr;
unsigned char buf[256];
@@ -1208,6 +1459,18 @@
struct vfe_outputack fack;
CDBG("msm_vfe_subdev_ioctl is called\n");
+ if (subdev_cmd == VIDIOC_MSM_VFE_INIT) {
+ CDBG("%s init\n", __func__);
+ return msm_vfe_subdev_init(sd);
+ } else if (subdev_cmd == VIDIOC_MSM_VFE_RELEASE) {
+ msm_vfe_subdev_release(sd);
+ return 0;
+ }
+
+ vfe_params = (struct msm_camvfe_params *)arg;
+ cmd = vfe_params->vfe_cfg;
+ data = vfe_params->data;
+
if (cmd->cmd_type != CMD_FRAME_BUF_RELEASE &&
cmd->cmd_type != CMD_STATS_BUF_RELEASE &&
cmd->cmd_type != CMD_STATS_AF_BUF_RELEASE &&
@@ -1217,6 +1480,7 @@
cmd->cmd_type != CMD_VFE_BUFFER_RELEASE &&
cmd->cmd_type != VFE_CMD_STATS_REQBUF &&
cmd->cmd_type != VFE_CMD_STATS_FLUSH_BUFQ &&
+ cmd->cmd_type != VFE_CMD_STATS_UNREGBUF &&
cmd->cmd_type != VFE_CMD_STATS_ENQUEUEBUF) {
if (copy_from_user(&vfecmd,
(void __user *)(cmd->value),
@@ -1228,6 +1492,7 @@
switch (cmd->cmd_type) {
case VFE_CMD_STATS_REQBUF:
case VFE_CMD_STATS_FLUSH_BUFQ:
+ case VFE_CMD_STATS_UNREGBUF:
/* for easy porting put in one envelope */
rc = vfe2x_stats_bufq_sub_ioctl(cmd, vfe_params->data);
return rc;
@@ -1300,7 +1565,20 @@
case CMD_CONFIG_FREE_BUF_ADDR: {
int path = *((int *)cmd->value);
struct buf_info *outch = vfe2x_get_ch(path);
- outch->free_buf = *((struct msm_free_buf *)data);
+ if ((op_mode & SNAPSHOT_MASK_MODE) &&
+ (vfe2x_ctrl->num_snap > 1)) {
+ CDBG("%s: CMD_CONFIG_FREE_BUF_ADDR Burst mode %d",
+ __func__, outch->free_buf_cnt);
+ if (outch->free_buf_cnt <= 0)
+ outch->free_buf =
+ *((struct msm_free_buf *)data);
+ else
+ outch->free_buf_arr[outch->free_buf_cnt-1] =
+ *((struct msm_free_buf *)data);
+ ++outch->free_buf_cnt;
+ } else {
+ outch->free_buf = *((struct msm_free_buf *)data);
+ }
}
return 0;
@@ -1478,6 +1756,12 @@
vfecmd.length))
rc = -EFAULT;
op_mode = vfe2x_ctrl->start_cmd.mode_of_operation;
+ vfe2x_ctrl->snap.free_buf_cnt = 0;
+ vfe2x_ctrl->thumb.free_buf_cnt = 0;
+ vfe2x_ctrl->snap.frame_cnt = 0;
+ vfe2x_ctrl->thumb.frame_cnt = 0;
+ vfe2x_ctrl->num_snap =
+ vfe2x_ctrl->start_cmd.snap_number;
return rc;
}
if (vfecmd.id == VFE_CMD_RECONFIG_VFE) {
@@ -1485,6 +1769,26 @@
vfe2x_ctrl->reconfig_vfe = 1;
return 0;
}
+ if (vfecmd.id == VFE_CMD_LIVESHOT) {
+ CDBG("live shot enabled\n");
+ spin_lock_irqsave(&vfe2x_ctrl->liveshot_enabled_lock,
+ flags);
+ vfe2x_ctrl->liveshot_enabled = 1;
+ spin_unlock_irqrestore(&vfe2x_ctrl->
+ liveshot_enabled_lock,
+ flags);
+ return 0;
+ }
+ if (vfecmd.id == VFE_CMD_STOP_LIVESHOT) {
+ CDBG("live shot disabled\n");
+ spin_lock_irqsave(&vfe2x_ctrl->liveshot_enabled_lock,
+ flags);
+ vfe2x_ctrl->liveshot_enabled = 0;
+ spin_unlock_irqrestore(
+ &vfe2x_ctrl->liveshot_enabled_lock,
+ flags);
+ return 0;
+ }
if (vfecmd.length > 256 - 4) {
cmd_data_alloc =
cmd_data = kmalloc(vfecmd.length + 4, GFP_ATOMIC);
@@ -1615,11 +1919,11 @@
}
if (op_mode & SNAPSHOT_MASK_MODE)
rc = vfe2x_configure_pingpong_buffers(
- VFE_MSG_V2X_CAPTURE,
+ VFE_MSG_CAPTURE,
VFE_MSG_OUTPUT_SECONDARY);
else
rc = vfe2x_configure_pingpong_buffers(
- VFE_MSG_V2X_PREVIEW,
+ VFE_MSG_PREVIEW,
VFE_MSG_OUTPUT_SECONDARY);
if (rc < 0) {
pr_err("%s error configuring pingpong buffers"
@@ -1669,11 +1973,11 @@
if (!vfe2x_ctrl->reconfig_vfe) {
if (op_mode & SNAPSHOT_MASK_MODE)
rc = vfe2x_configure_pingpong_buffers(
- VFE_MSG_V2X_CAPTURE,
+ VFE_MSG_CAPTURE,
VFE_MSG_OUTPUT_PRIMARY);
else
rc = vfe2x_configure_pingpong_buffers(
- VFE_MSG_V2X_PREVIEW,
+ VFE_MSG_PREVIEW,
VFE_MSG_OUTPUT_PRIMARY);
if (rc < 0) {
pr_err("%s error configuring pingpong buffers"
@@ -1734,10 +2038,10 @@
}
if (!vfe2x_ctrl->reconfig_vfe) {
rc = vfe2x_configure_pingpong_buffers(
- VFE_MSG_V2X_PREVIEW,
+ VFE_MSG_PREVIEW,
VFE_MSG_OUTPUT_PRIMARY);
rc = vfe2x_configure_pingpong_buffers(
- VFE_MSG_V2X_PREVIEW,
+ VFE_MSG_PREVIEW,
VFE_MSG_OUTPUT_SECONDARY);
if (rc < 0) {
pr_err("%s error configuring pingpong buffers"
@@ -1798,11 +2102,11 @@
}
if (op_mode & SNAPSHOT_MASK_MODE)
rc = vfe2x_configure_pingpong_buffers(
- VFE_MSG_V2X_CAPTURE,
+ VFE_MSG_CAPTURE,
VFE_MSG_OUTPUT_SECONDARY);
else
rc = vfe2x_configure_pingpong_buffers(
- VFE_MSG_V2X_PREVIEW,
+ VFE_MSG_PREVIEW,
VFE_MSG_OUTPUT_SECONDARY);
if (rc < 0) {
pr_err("%s error configuring pingpong buffers"
@@ -1811,10 +2115,21 @@
goto config_done;
}
- if (!(op_mode & SNAPSHOT_MASK_MODE))
+ if (!(op_mode & SNAPSHOT_MASK_MODE)) {
free_buf = vfe2x_check_free_buffer(
VFE_MSG_OUTPUT_IRQ,
VFE_MSG_OUTPUT_SECONDARY);
+ } else if ((op_mode & SNAPSHOT_MASK_MODE) &&
+ (vfe2x_ctrl->num_snap > 1)) {
+ int i = 0;
+ CDBG("Burst mode AXI config SEC snap cnt %d\n",
+ vfe2x_ctrl->num_snap);
+ for (i = 0; i < vfe2x_ctrl->num_snap - 2; i++) {
+ free_buf = vfe2x_check_free_buffer(
+ VFE_MSG_OUTPUT_IRQ,
+ VFE_MSG_OUTPUT_SECONDARY);
+ }
+ }
header = cmds_map[vfecmd.id].vfe_id;
queue = cmds_map[vfecmd.id].queue;
if (header == -1 && queue == -1) {
@@ -1829,11 +2144,11 @@
if (op_mode & SNAPSHOT_MASK_MODE)
rc = vfe2x_configure_pingpong_buffers(
- VFE_MSG_V2X_CAPTURE,
+ VFE_MSG_CAPTURE,
VFE_MSG_OUTPUT_PRIMARY);
else
rc = vfe2x_configure_pingpong_buffers(
- VFE_MSG_V2X_PREVIEW,
+ VFE_MSG_PREVIEW,
VFE_MSG_OUTPUT_PRIMARY);
if (rc < 0) {
pr_err("%s error configuring pingpong buffers"
@@ -1842,10 +2157,22 @@
goto config_done;
}
- if (!(op_mode & SNAPSHOT_MASK_MODE))
+ if (!(op_mode & SNAPSHOT_MASK_MODE)) {
free_buf = vfe2x_check_free_buffer(
VFE_MSG_OUTPUT_IRQ,
VFE_MSG_OUTPUT_PRIMARY);
+ } else if ((op_mode & SNAPSHOT_MASK_MODE) &&
+ (vfe2x_ctrl->num_snap > 1)) {
+ int i = 0;
+ CDBG("Burst mode AXI config PRIM snap cnt %d\n",
+ vfe2x_ctrl->num_snap);
+ for (i = 0; i < vfe2x_ctrl->num_snap - 2; i++) {
+ free_buf = vfe2x_check_free_buffer(
+ VFE_MSG_OUTPUT_IRQ,
+ VFE_MSG_OUTPUT_PRIMARY);
+ }
+ }
+
if (op_mode & SNAPSHOT_MASK_MODE)
vfe_7x_config_axi(OUTPUT_PRIM,
&vfe2x_ctrl->snap, axio);
@@ -1872,7 +2199,7 @@
}
header = cmds_map[vfecmd.id].vfe_id;
queue = cmds_map[vfecmd.id].queue;
- rc = vfe2x_configure_pingpong_buffers(VFE_MSG_V2X_CAPTURE,
+ rc = vfe2x_configure_pingpong_buffers(VFE_MSG_CAPTURE,
VFE_MSG_OUTPUT_PRIMARY);
if (rc < 0) {
pr_err("%s error configuring pingpong buffers"
@@ -1954,15 +2281,21 @@
{"vfe_clk", 192000000},
};
-int msm_vfe_subdev_init(struct v4l2_subdev *sd,
- struct msm_cam_media_controller *mctl)
+int msm_vfe_subdev_init(struct v4l2_subdev *sd)
{
int rc = 0;
- v4l2_set_subdev_hostdata(sd, mctl);
+ struct msm_cam_media_controller *mctl;
+ mctl = v4l2_get_subdev_hostdata(sd);
+ if (mctl == NULL) {
+ pr_err("%s: mctl is NULL\n", __func__);
+ rc = -EINVAL;
+ goto mctl_failed;
+ }
spin_lock_init(&vfe2x_ctrl->sd_notify_lock);
spin_lock_init(&vfe2x_ctrl->table_lock);
spin_lock_init(&vfe2x_ctrl->vfe_msg_lock);
+ spin_lock_init(&vfe2x_ctrl->liveshot_enabled_lock);
init_waitqueue_head(&stopevent.wait);
INIT_LIST_HEAD(&vfe2x_ctrl->table_q);
INIT_LIST_HEAD(&vfe2x_ctrl->vfe_msg_q);
@@ -2011,11 +2344,11 @@
kfree(extdata);
init_fail:
extlen = 0;
+mctl_failed:
return rc;
}
-int msm_vpe_subdev_init(struct v4l2_subdev *sd,
- struct msm_cam_media_controller *mctl)
+int msm_vpe_subdev_init(struct v4l2_subdev *sd)
{
return 0;
}
@@ -2101,6 +2434,12 @@
sd_info.sd_index = 0;
sd_info.irq_num = 0;
msm_cam_register_subdev_node(&vfe2x_ctrl->subdev, &sd_info);
+
+ media_entity_init(&vfe2x_ctrl->subdev.entity, 0, NULL, 0);
+ vfe2x_ctrl->subdev.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ vfe2x_ctrl->subdev.entity.group_id = VFE_DEV;
+ vfe2x_ctrl->subdev.entity.name = pdev->name;
+ vfe2x_ctrl->subdev.entity.revision = vfe2x_ctrl->subdev.devnode->num;
return 0;
}
diff --git a/drivers/media/video/msm/msm_vfe7x27a_v4l2.h b/drivers/media/video/msm/vfe/msm_vfe7x27a_v4l2.h
similarity index 97%
rename from drivers/media/video/msm/msm_vfe7x27a_v4l2.h
rename to drivers/media/video/msm/vfe/msm_vfe7x27a_v4l2.h
index b7d6806..7693235 100644
--- a/drivers/media/video/msm/msm_vfe7x27a_v4l2.h
+++ b/drivers/media/video/msm/vfe/msm_vfe7x27a_v4l2.h
@@ -18,6 +18,9 @@
#include "msm.h"
#include "msm_vfe_stats_buf.h"
+/*8 DSP buffers, 3 - ping, pong, free*/
+#define FREE_BUF_ARR_SIZE 5
+
struct cmd_id_map {
uint32_t isp_id;
uint32_t vfe_id;
@@ -50,6 +53,10 @@
struct msm_free_buf ping;
struct msm_free_buf pong;
struct msm_free_buf free_buf;
+ /*Array for holding the free buffer if more than one*/
+ struct msm_free_buf free_buf_arr[FREE_BUF_ARR_SIZE];
+ int free_buf_cnt;
+ int frame_cnt;
} __packed;
struct prev_free_buf_info {
@@ -105,6 +112,9 @@
uint32_t stop_pending;
uint32_t update_pending;
+ spinlock_t liveshot_enabled_lock;
+ uint32_t liveshot_enabled;
+
/* v4l2 subdev */
struct v4l2_subdev subdev;
struct platform_device *pdev;
@@ -117,6 +127,7 @@
struct msm_stats_ops stats_ops;
unsigned long stats_we_buf_ptr[3];
unsigned long stats_af_buf_ptr[3];
+ int num_snap;
} __packed;
struct vfe_frame_extra {
diff --git a/drivers/media/video/msm/msm_vfe8x.c b/drivers/media/video/msm/vfe/msm_vfe8x.c
similarity index 100%
rename from drivers/media/video/msm/msm_vfe8x.c
rename to drivers/media/video/msm/vfe/msm_vfe8x.c
diff --git a/drivers/media/video/msm/msm_vfe8x.h b/drivers/media/video/msm/vfe/msm_vfe8x.h
similarity index 100%
rename from drivers/media/video/msm/msm_vfe8x.h
rename to drivers/media/video/msm/vfe/msm_vfe8x.h
diff --git a/drivers/media/video/msm/msm_vfe8x_proc.c b/drivers/media/video/msm/vfe/msm_vfe8x_proc.c
similarity index 100%
rename from drivers/media/video/msm/msm_vfe8x_proc.c
rename to drivers/media/video/msm/vfe/msm_vfe8x_proc.c
diff --git a/drivers/media/video/msm/msm_vfe8x_proc.h b/drivers/media/video/msm/vfe/msm_vfe8x_proc.h
similarity index 100%
rename from drivers/media/video/msm/msm_vfe8x_proc.h
rename to drivers/media/video/msm/vfe/msm_vfe8x_proc.h
diff --git a/drivers/media/video/msm/msm_vfe_stats_buf.c b/drivers/media/video/msm/vfe/msm_vfe_stats_buf.c
similarity index 99%
rename from drivers/media/video/msm/msm_vfe_stats_buf.c
rename to drivers/media/video/msm/vfe/msm_vfe_stats_buf.c
index 9e8f285..5fbcdb1 100644
--- a/drivers/media/video/msm/msm_vfe_stats_buf.c
+++ b/drivers/media/video/msm/vfe/msm_vfe_stats_buf.c
@@ -475,6 +475,8 @@
struct msm_stats_buf_info *info, struct ion_client *client)
{
int rc = 0;
+ D("%s: stats type : %d, idx : %d\n", __func__,
+ info->type, info->buf_idx);
rc = msm_stats_buf_prepare(stats_ctrl, info, client);
if (rc < 0) {
pr_err("%s: buf_prepare failed, rc = %d", __func__, rc);
diff --git a/drivers/media/video/msm/msm_vfe_stats_buf.h b/drivers/media/video/msm/vfe/msm_vfe_stats_buf.h
similarity index 100%
rename from drivers/media/video/msm/msm_vfe_stats_buf.h
rename to drivers/media/video/msm/vfe/msm_vfe_stats_buf.h
diff --git a/drivers/media/video/msm/wfd/enc-subdev.c b/drivers/media/video/msm/wfd/enc-subdev.c
index a5f2634..8b83a98 100644
--- a/drivers/media/video/msm/wfd/enc-subdev.c
+++ b/drivers/media/video/msm/wfd/enc-subdev.c
@@ -24,6 +24,8 @@
#define VID_ENC_MAX_ENCODER_CLIENTS 1
#define MAX_NUM_CTRLS 20
+static long venc_fill_outbuf(struct v4l2_subdev *sd, void *arg);
+
struct venc_inst {
struct video_client_ctx venc_client;
void *cbdata;
@@ -34,6 +36,8 @@
u32 width;
u32 height;
int secure;
+ struct mem_region unqueued_op_bufs;
+ bool streaming;
};
struct venc {
@@ -287,8 +291,10 @@
init_waitqueue_head(&client_ctx->msg_wait);
inst->op_buffer_done = vmops->op_buffer_done;
inst->ip_buffer_done = vmops->ip_buffer_done;
+ INIT_LIST_HEAD(&inst->unqueued_op_bufs.list);
inst->cbdata = vmops->cbdata;
inst->secure = vmops->secure;
+ inst->streaming = false;
if (vmops->secure) {
WFD_MSG_ERR("OPENING SECURE SESSION\n");
flags |= VCD_CP_SESSION;
@@ -423,11 +429,13 @@
{
struct venc_inst *inst = sd->dev_priv;
struct video_client_ctx *client_ctx = &inst->venc_client;
+ struct mem_region *curr = NULL, *temp = NULL;
int rc;
if (!client_ctx) {
WFD_MSG_ERR("Client context is NULL");
return -EINVAL;
}
+
rc = vcd_encode_start(client_ctx->vcd_handle);
if (rc) {
WFD_MSG_ERR("vcd_encode_start failed, rc = %d\n", rc);
@@ -437,6 +445,15 @@
if (client_ctx->event_status)
WFD_MSG_ERR("callback for vcd_encode_start returned error: %u",
client_ctx->event_status);
+
+ inst->streaming = true;
+ /* Push any buffers that we have held back */
+ list_for_each_entry_safe(curr, temp,
+ &inst->unqueued_op_bufs.list, list) {
+ venc_fill_outbuf(sd, curr);
+ list_del(&curr->list);
+ kfree(curr);
+ }
err:
return rc;
}
@@ -445,6 +462,7 @@
{
struct venc_inst *inst = sd->dev_priv;
struct video_client_ctx *client_ctx = &inst->venc_client;
+ struct mem_region *curr = NULL, *temp = NULL;
int rc;
if (!client_ctx) {
WFD_MSG_ERR("Client context is NULL");
@@ -452,6 +470,15 @@
}
rc = vcd_stop(client_ctx->vcd_handle);
wait_for_completion(&client_ctx->event);
+ inst->streaming = false;
+ /* Drop whatever frames we haven't queued */
+ list_for_each_entry_safe(curr, temp,
+ &inst->unqueued_op_bufs.list, list) {
+ inst->op_buffer_done(inst->cbdata, 0,
+ (struct vb2_buffer *)curr->cookie);
+ list_del(&curr->list);
+ kfree(curr);
+ }
return rc;
}
@@ -1841,22 +1868,29 @@
struct file *file;
s32 buffer_index = -1;
- user_vaddr = mregion->cookie;
- rc = vidc_lookup_addr_table(client_ctx, BUFFER_TYPE_OUTPUT,
- true, &user_vaddr,
- &kernel_vaddr, &phy_addr, &pmem_fd, &file,
- &buffer_index);
- if (!rc) {
- WFD_MSG_ERR("Address lookup failed\n");
- goto err;
- }
- vcd_frame.virtual = (u8 *) kernel_vaddr;
- vcd_frame.frm_clnt_data = mregion->cookie;
- vcd_frame.alloc_len = mregion->size;
+ if (inst->streaming) {
+ user_vaddr = mregion->cookie;
+ rc = vidc_lookup_addr_table(client_ctx, BUFFER_TYPE_OUTPUT,
+ true, &user_vaddr,
+ &kernel_vaddr, &phy_addr, &pmem_fd, &file,
+ &buffer_index);
+ if (!rc) {
+ WFD_MSG_ERR("Address lookup failed\n");
+ goto err;
+ }
+ vcd_frame.virtual = (u8 *) kernel_vaddr;
+ vcd_frame.frm_clnt_data = mregion->cookie;
+ vcd_frame.alloc_len = mregion->size;
- rc = vcd_fill_output_buffer(client_ctx->vcd_handle, &vcd_frame);
- if (rc)
- WFD_MSG_ERR("Failed to fill output buffer on encoder");
+ rc = vcd_fill_output_buffer(client_ctx->vcd_handle, &vcd_frame);
+ if (rc)
+ WFD_MSG_ERR("Failed to fill output buffer on encoder");
+ } else {
+ struct mem_region *temp = kzalloc(sizeof(*temp), GFP_KERNEL);
+ *temp = *mregion;
+ INIT_LIST_HEAD(&temp->list);
+ list_add_tail(&temp->list, &inst->unqueued_op_bufs.list);
+ }
err:
return rc;
}
diff --git a/drivers/media/video/msm/wfd/mdp-subdev.c b/drivers/media/video/msm/wfd/mdp-subdev.c
index a6d244f..886b0ba 100644
--- a/drivers/media/video/msm/wfd/mdp-subdev.c
+++ b/drivers/media/video/msm/wfd/mdp-subdev.c
@@ -45,6 +45,13 @@
goto exit;
}
+ /*Tell HDMI daemon to open fb2*/
+ rc = kobject_uevent(&fbi->dev->kobj, KOBJ_ADD);
+ if (rc) {
+ WFD_MSG_ERR("Failed add to kobj");
+ goto exit;
+ }
+
msm_fb_writeback_init(fbi);
inst->mdp = fbi;
*cookie = inst;
@@ -71,6 +78,9 @@
rc = -ENODEV;
goto exit;
}
+ rc = kobject_uevent(&fbi->dev->kobj, KOBJ_ONLINE);
+ if (rc)
+ WFD_MSG_ERR("Failed to send ONLINE event\n");
}
exit:
return rc;
@@ -79,12 +89,19 @@
{
struct mdp_instance *inst = arg;
int rc = 0;
+ struct fb_info *fbi = NULL;
if (inst) {
rc = msm_fb_writeback_stop(inst->mdp);
if (rc) {
WFD_MSG_ERR("Failed to stop writeback mode\n");
return rc;
}
+ fbi = (struct fb_info *)inst->mdp;
+ rc = kobject_uevent(&fbi->dev->kobj, KOBJ_OFFLINE);
+ if (rc) {
+ WFD_MSG_ERR("Failed to send offline event\n");
+ return -EIO;
+ }
}
return 0;
}
diff --git a/drivers/media/video/msm_vidc/Kconfig b/drivers/media/video/msm_vidc/Kconfig
index 2957d45..6428180 100644
--- a/drivers/media/video/msm_vidc/Kconfig
+++ b/drivers/media/video/msm_vidc/Kconfig
@@ -2,7 +2,7 @@
# VIDEO CORE
#
-menuconfig MSM_VIDC
+menuconfig MSM_VIDC_V4L2
bool "Qualcomm MSM Video Core Driver"
depends on ARCH_MSM8974 && VIDEO_V4L2
default y
diff --git a/drivers/media/video/msm_vidc/Makefile b/drivers/media/video/msm_vidc/Makefile
index 12c61c9..e145229 100644
--- a/drivers/media/video/msm_vidc/Makefile
+++ b/drivers/media/video/msm_vidc/Makefile
@@ -1,4 +1,4 @@
-obj-$(CONFIG_MSM_VIDC) := msm_v4l2_vidc.o \
+obj-$(CONFIG_MSM_VIDC_V4L2) := msm_v4l2_vidc.o \
msm_vidc_common.o \
msm_vidc.o \
msm_vdec.o \
diff --git a/drivers/media/video/msm_vidc/msm_smem.c b/drivers/media/video/msm_vidc/msm_smem.c
index 2913d74..76e3592 100644
--- a/drivers/media/video/msm_vidc/msm_smem.c
+++ b/drivers/media/video/msm_vidc/msm_smem.c
@@ -12,6 +12,7 @@
*/
#include <linux/slab.h>
+#include <mach/iommu_domains.h>
#include "msm_smem.h"
struct smem_client {
@@ -19,12 +20,44 @@
void *clnt;
};
+static int get_device_address(struct ion_client *clnt,
+ struct ion_handle *hndl, int domain_num, int partition_num,
+ unsigned long align, unsigned long *iova,
+ unsigned long *buffer_size, unsigned long flags)
+{
+ int rc;
+
+ if (!iova || !buffer_size || !hndl || !clnt) {
+ pr_err("Invalid params: %p, %p, %p, %p\n",
+ clnt, hndl, iova, buffer_size);
+ return -EINVAL;
+ }
+ if (align < 4096)
+ align = 4096;
+ flags |= UNCACHED;
+ rc = ion_map_iommu(clnt, hndl, domain_num, partition_num, align,
+ 0, iova, buffer_size, flags, 0);
+ if (rc)
+ pr_err("ion_map_iommu failed(%d).domain: %d,partition: %d\n",
+ rc, domain_num, partition_num);
+
+ return rc;
+}
+
+static void put_device_address(struct ion_client *clnt,
+ struct ion_handle *hndl, int domain_num)
+{
+ ion_unmap_iommu(clnt, hndl, domain_num, 0);
+}
+
static int ion_user_to_kernel(struct smem_client *client,
- int fd, u32 offset, struct msm_smem *mem)
+ int fd, u32 offset, int domain, int partition,
+ struct msm_smem *mem)
{
struct ion_handle *hndl;
unsigned long ionflag;
- size_t len;
+ unsigned long iova = 0;
+ unsigned long buffer_size = 0;
int rc = 0;
hndl = ion_import_dma_buf(client->clnt, fd);
if (IS_ERR_OR_NULL(hndl)) {
@@ -38,25 +71,29 @@
pr_err("Failed to get ion flags: %d", rc);
goto fail_map;
}
- rc = ion_phys(client->clnt, hndl, &mem->paddr, &len);
- if (rc) {
- pr_err("Failed to get physical address\n");
- goto fail_map;
- }
mem->kvaddr = ion_map_kernel(client->clnt, hndl, ionflag);
if (!mem->kvaddr) {
pr_err("Failed to map shared mem in kernel\n");
rc = -EIO;
goto fail_map;
}
+ mem->domain = domain;
+ mem->partition_num = partition;
+ rc = get_device_address(client->clnt, hndl, mem->domain,
+ mem->partition_num, 4096, &iova, &buffer_size, UNCACHED);
+ if (rc) {
+ pr_err("Failed to get device address: %d\n", rc);
+ goto fail_device_address;
+ }
mem->kvaddr += offset;
- mem->paddr += offset;
mem->mem_type = client->mem_type;
mem->smem_priv = hndl;
- mem->device_addr = mem->paddr;
- mem->size = len;
+ mem->device_addr = iova + offset;
+ mem->size = buffer_size;
return rc;
+fail_device_address:
+ ion_unmap_kernel(client->clnt, hndl);
fail_map:
ion_free(client->clnt, hndl);
fail_import_fd:
@@ -64,14 +101,19 @@
}
static int alloc_ion_mem(struct smem_client *client, size_t size,
- u32 align, u32 flags, struct msm_smem *mem)
+ u32 align, u32 flags, int domain, int partition,
+ struct msm_smem *mem)
{
struct ion_handle *hndl;
- size_t len;
+ unsigned long iova = 0;
+ unsigned long buffer_size = 0;
int rc = 0;
if (size == 0)
goto skip_mem_alloc;
flags = flags | ION_HEAP(ION_CP_MM_HEAP_ID);
+ if (align < 4096)
+ align = 4096;
+ size = (size + 4095) & (~4095);
hndl = ion_alloc(client->clnt, size, align, flags);
if (IS_ERR_OR_NULL(hndl)) {
pr_err("Failed to allocate shared memory = %p, %d, %d, 0x%x\n",
@@ -81,20 +123,27 @@
}
mem->mem_type = client->mem_type;
mem->smem_priv = hndl;
- if (ion_phys(client->clnt, hndl, &mem->paddr, &len)) {
- pr_err("Failed to get physical address\n");
- rc = -EIO;
- goto fail_map;
- }
- mem->device_addr = mem->paddr;
- mem->size = size;
+ mem->domain = domain;
+ mem->partition_num = partition;
mem->kvaddr = ion_map_kernel(client->clnt, hndl, 0);
if (!mem->kvaddr) {
pr_err("Failed to map shared mem in kernel\n");
rc = -EIO;
goto fail_map;
}
+ rc = get_device_address(client->clnt, hndl, mem->domain,
+ mem->partition_num, align, &iova, &buffer_size, UNCACHED);
+ if (rc) {
+ pr_err("Failed to get device address: %d\n", rc);
+ goto fail_device_address;
+ }
+ mem->device_addr = iova;
+ pr_err("device_address = 0x%lx, kvaddr = 0x%p\n",
+ mem->device_addr, mem->kvaddr);
+ mem->size = size;
return rc;
+fail_device_address:
+ ion_unmap_kernel(client->clnt, hndl);
fail_map:
ion_free(client->clnt, hndl);
fail_shared_mem_alloc:
@@ -104,6 +153,8 @@
static void free_ion_mem(struct smem_client *client, struct msm_smem *mem)
{
+ put_device_address(client->clnt,
+ mem->smem_priv, mem->domain);
ion_unmap_kernel(client->clnt, mem->smem_priv);
ion_free(client->clnt, mem->smem_priv);
}
@@ -122,7 +173,8 @@
ion_client_destroy(client->clnt);
}
-struct msm_smem *msm_smem_user_to_kernel(void *clt, int fd, u32 offset)
+struct msm_smem *msm_smem_user_to_kernel(void *clt, int fd, u32 offset,
+ int domain, int partition)
{
struct smem_client *client = clt;
int rc = 0;
@@ -138,7 +190,8 @@
}
switch (client->mem_type) {
case SMEM_ION:
- rc = ion_user_to_kernel(clt, fd, offset, mem);
+ rc = ion_user_to_kernel(clt, fd, offset,
+ domain, partition, mem);
break;
default:
pr_err("Mem type not supported\n");
@@ -177,7 +230,8 @@
return client;
};
-struct msm_smem *msm_smem_alloc(void *clt, size_t size, u32 align, u32 flags)
+struct msm_smem *msm_smem_alloc(void *clt, size_t size, u32 align, u32 flags,
+ int domain, int partition)
{
struct smem_client *client;
int rc = 0;
@@ -195,7 +249,8 @@
}
switch (client->mem_type) {
case SMEM_ION:
- rc = alloc_ion_mem(client, size, align, flags, mem);
+ rc = alloc_ion_mem(client, size, align, flags,
+ domain, partition, mem);
break;
default:
pr_err("Mem type not supported\n");
diff --git a/drivers/media/video/msm_vidc/msm_smem.h b/drivers/media/video/msm_vidc/msm_smem.h
index 84d12cc..b742c79 100644
--- a/drivers/media/video/msm_vidc/msm_smem.h
+++ b/drivers/media/video/msm_vidc/msm_smem.h
@@ -24,15 +24,17 @@
int mem_type;
size_t size;
void *kvaddr;
- unsigned long paddr;
unsigned long device_addr;
- /*Device address and others to follow*/
+ int domain;
+ int partition_num;
void *smem_priv;
};
void *msm_smem_new_client(enum smem_type mtype);
-struct msm_smem *msm_smem_alloc(void *clt, size_t size, u32 align, u32 flags);
+struct msm_smem *msm_smem_alloc(void *clt, size_t size, u32 align, u32 flags,
+ int domain, int partition);
void msm_smem_free(void *clt, struct msm_smem *mem);
void msm_smem_delete_client(void *clt);
-struct msm_smem *msm_smem_user_to_kernel(void *clt, int fd, u32 offset);
+struct msm_smem *msm_smem_user_to_kernel(void *clt, int fd, u32 offset, int
+ domain, int partition);
#endif
diff --git a/drivers/media/video/msm_vidc/msm_v4l2_vidc.c b/drivers/media/video/msm_vidc/msm_v4l2_vidc.c
index 6cd9e6b..cf1ebbb 100644
--- a/drivers/media/video/msm_vidc/msm_v4l2_vidc.c
+++ b/drivers/media/video/msm_vidc/msm_v4l2_vidc.c
@@ -21,7 +21,9 @@
#include <linux/debugfs.h>
#include <linux/version.h>
#include <linux/slab.h>
-
+#include <linux/of.h>
+#include <mach/iommu.h>
+#include <mach/iommu_domains.h>
#include <media/msm_vidc.h>
#include "msm_vidc_internal.h"
#include "vidc_hal_api.h"
@@ -30,6 +32,129 @@
#define BASE_DEVICE_NUMBER 32
#define MAX_EVENTS 30
+
+static struct msm_bus_vectors ocmem_init_vectors[] = {
+ {
+ .src = MSM_BUS_MASTER_VIDEO_P0_OCMEM,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 0,
+ .ib = 0,
+ },
+};
+
+static struct msm_bus_vectors ocmem_perf0_vectors[] = {
+ {
+ .src = MSM_BUS_MASTER_VIDEO_P0_OCMEM,
+ .dst = MSM_BUS_SLAVE_OCMEM,
+ .ab = 176900000,
+ .ib = 221125000,
+ },
+};
+
+static struct msm_bus_vectors ocmem_perf1_vectors[] = {
+ {
+ .src = MSM_BUS_MASTER_VIDEO_P0_OCMEM,
+ .dst = MSM_BUS_SLAVE_OCMEM,
+ .ab = 456200000,
+ .ib = 570250000,
+ },
+};
+
+static struct msm_bus_vectors ocmem_perf2_vectors[] = {
+ {
+ .src = MSM_BUS_MASTER_VIDEO_P0_OCMEM,
+ .dst = MSM_BUS_SLAVE_OCMEM,
+ .ab = 864800000,
+ .ib = 1081000000,
+ },
+};
+
+static struct msm_bus_paths ocmem_perf_vectors[] = {
+ {
+ ARRAY_SIZE(ocmem_init_vectors),
+ ocmem_init_vectors,
+ },
+ {
+ ARRAY_SIZE(ocmem_perf0_vectors),
+ ocmem_perf0_vectors,
+ },
+ {
+ ARRAY_SIZE(ocmem_perf1_vectors),
+ ocmem_perf1_vectors,
+ },
+ {
+ ARRAY_SIZE(ocmem_perf2_vectors),
+ ocmem_perf2_vectors,
+ },
+};
+
+static struct msm_bus_scale_pdata ocmem_bus_data = {
+ .usecase = ocmem_perf_vectors,
+ .num_usecases = ARRAY_SIZE(ocmem_perf_vectors),
+ .name = "msm_vidc_ocmem",
+};
+
+static struct msm_bus_vectors vcodec_init_vectors[] = {
+ {
+ .src = MSM_BUS_MASTER_VIDEO_P0,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 0,
+ .ib = 0,
+ },
+};
+
+static struct msm_bus_vectors vcodec_perf0_vectors[] = {
+ {
+ .src = MSM_BUS_MASTER_VIDEO_P0,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 110000000,
+ .ib = 137500000,
+ },
+};
+
+static struct msm_bus_vectors vcodec_perf1_vectors[] = {
+ {
+ .src = MSM_BUS_MASTER_VIDEO_P0,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 268000000,
+ .ib = 335000000,
+ },
+};
+
+static struct msm_bus_vectors vcodec_perf2_vectors[] = {
+ {
+ .src = MSM_BUS_MASTER_VIDEO_P0,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 505000000,
+ .ib = 631250000,
+ },
+};
+
+static struct msm_bus_paths vcodec_perf_vectors[] = {
+ {
+ ARRAY_SIZE(vcodec_init_vectors),
+ vcodec_init_vectors,
+ },
+ {
+ ARRAY_SIZE(vcodec_perf0_vectors),
+ vcodec_perf0_vectors,
+ },
+ {
+ ARRAY_SIZE(vcodec_perf1_vectors),
+ vcodec_perf1_vectors,
+ },
+ {
+ ARRAY_SIZE(vcodec_perf2_vectors),
+ vcodec_perf2_vectors,
+ },
+};
+
+static struct msm_bus_scale_pdata vcodec_bus_data = {
+ .usecase = vcodec_perf_vectors,
+ .num_usecases = ARRAY_SIZE(vcodec_perf_vectors),
+ .name = "msm_vidc_vcodec",
+};
+
struct msm_vidc_drv *vidc_driver;
struct buffer_info {
@@ -277,8 +402,10 @@
goto exit;
}
handle = msm_smem_user_to_kernel(v4l2_inst->mem_client,
- b->m.planes[i].reserved[0],
- b->m.planes[i].reserved[1]);
+ b->m.planes[i].reserved[0],
+ b->m.planes[i].reserved[1],
+ vidc_inst->core->resources.io_map[NS_MAP].domain,
+ 0);
if (!handle) {
pr_err("Failed to get device buffer address\n");
kfree(binfo);
@@ -377,6 +504,7 @@
struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
return msm_vidc_decoder_cmd((void *)vidc_inst, dec);
}
+
static const struct v4l2_ioctl_ops msm_v4l2_ioctl_ops = {
.vidioc_querycap = msm_v4l2_querycap,
.vidioc_enum_fmt_vid_cap_mplane = msm_v4l2_enum_fmt,
@@ -420,36 +548,231 @@
{
}
+static size_t read_u32_array(struct platform_device *pdev,
+ char *name, u32 *arr, size_t size)
+{
+ int len;
+ size_t sz = 0;
+ struct device_node *np = pdev->dev.of_node;
+ if (!of_get_property(np, name, &len)) {
+ pr_err("Failed to read %s from device tree\n",
+ name);
+ goto fail_read;
+ }
+ sz = len / sizeof(u32);
+ if (sz <= 0) {
+ pr_err("%s not specified in device tree\n", name);
+ goto fail_read;
+ }
+ if (sz > size) {
+ pr_err("Not enough memory to store %s values\n", name);
+ goto fail_read;
+ }
+ if (of_property_read_u32_array(np, name, arr, sz)) {
+ pr_err("error while reading %s from device tree\n",
+ name);
+ goto fail_read;
+ }
+ return sz;
+fail_read:
+ sz = 0;
+ return sz;
+}
+
+static int register_iommu_domains(struct platform_device *pdev,
+ struct msm_vidc_core *core)
+{
+ size_t len;
+ struct msm_iova_partition partition;
+ struct msm_iova_layout layout;
+ int rc = 0;
+ int i;
+ struct iommu_info *io_map = core->resources.io_map;
+ strlcpy(io_map[CP_MAP].name, "vidc-cp-map",
+ sizeof(io_map[CP_MAP].name));
+ strlcpy(io_map[CP_MAP].ctx, "venus_cp",
+ sizeof(io_map[CP_MAP].ctx));
+ strlcpy(io_map[NS_MAP].name, "vidc-ns-map",
+ sizeof(io_map[NS_MAP].name));
+ strlcpy(io_map[NS_MAP].ctx, "venus_ns",
+ sizeof(io_map[NS_MAP].ctx));
+
+ for (i = 0; i < MAX_MAP; i++) {
+ len = read_u32_array(pdev, io_map[i].name,
+ io_map[i].addr_range,
+ (sizeof(io_map[i].addr_range)/sizeof(u32)));
+ if (!len) {
+ pr_err("Error in reading cp address range\n");
+ rc = -EINVAL;
+ break;
+ }
+ partition.start = io_map[i].addr_range[0];
+ partition.size = io_map[i].addr_range[1];
+ layout.partitions = &partition;
+ layout.npartitions = 1;
+ layout.client_name = io_map[i].name;
+ layout.domain_flags = 0;
+ pr_debug("Registering domain with: %lx, %lx, %s\n",
+ partition.start, partition.size, layout.client_name);
+ io_map[i].domain = msm_register_domain(&layout);
+ if (io_map[i].domain < 0) {
+ pr_err("Failed to register cp domain\n");
+ rc = -EINVAL;
+ break;
+ }
+ }
+ /* There is no api provided as msm_unregister_domain, so
+ * we are not able to unregister the previously
+ * registered domains if any domain registration fails.*/
+ BUG_ON(i < MAX_MAP);
+ return rc;
+}
+
+static inline int msm_vidc_init_clocks(struct platform_device *pdev,
+ struct msm_vidc_core *core)
+{
+ struct core_clock *cl;
+ int i;
+ int rc = 0;
+ struct core_clock *clock;
+ if (!core) {
+ pr_err("Invalid params: %p\n", core);
+ return -EINVAL;
+ }
+ clock = core->resources.clock;
+ strlcpy(clock[VCODEC_CLK].name, "core_clk",
+ sizeof(clock[VCODEC_CLK].name));
+ strlcpy(clock[VCODEC_AHB_CLK].name, "iface_clk",
+ sizeof(clock[VCODEC_AHB_CLK].name));
+ strlcpy(clock[VCODEC_AXI_CLK].name, "bus_clk",
+ sizeof(clock[VCODEC_AXI_CLK].name));
+ strlcpy(clock[VCODEC_OCMEM_CLK].name, "mem_clk",
+ sizeof(clock[VCODEC_OCMEM_CLK].name));
+
+ clock[VCODEC_CLK].count = read_u32_array(pdev,
+ "load-freq-tbl", (u32 *)clock[VCODEC_CLK].load_freq_tbl,
+ (sizeof(clock[VCODEC_CLK].load_freq_tbl)/sizeof(u32)));
+ clock[VCODEC_CLK].count /= 2;
+ pr_err("NOTE: Count = %d\n", clock[VCODEC_CLK].count);
+ if (!clock[VCODEC_CLK].count) {
+ pr_err("Failed to read clock frequency\n");
+ goto fail_init_clocks;
+ }
+ for (i = 0; i < clock[VCODEC_CLK].count; i++) {
+ pr_err("NOTE: load = %d, freq = %d\n",
+ clock[VCODEC_CLK].load_freq_tbl[i].load,
+ clock[VCODEC_CLK].load_freq_tbl[i].freq
+ );
+ }
+
+ for (i = 0; i < VCODEC_MAX_CLKS; i++) {
+ cl = &core->resources.clock[i];
+ if (!cl->clk) {
+ cl->clk = devm_clk_get(&pdev->dev, cl->name);
+ if (IS_ERR_OR_NULL(cl->clk)) {
+ pr_err("Failed to get clock: %s\n", cl->name);
+ rc = PTR_ERR(cl->clk);
+ break;
+ }
+ }
+ }
+
+ if (i < VCODEC_MAX_CLKS) {
+ for (--i; i >= 0; i--) {
+ cl = &core->resources.clock[i];
+ clk_put(cl->clk);
+ }
+ }
+fail_init_clocks:
+ return rc;
+}
+
+static inline void msm_vidc_deinit_clocks(struct msm_vidc_core *core)
+{
+ int i;
+ if (!core) {
+ pr_err("Invalid args\n");
+ return;
+ }
+ for (i = 0; i < VCODEC_MAX_CLKS; i++)
+ clk_put(core->resources.clock[i].clk);
+}
+
static int msm_vidc_initialize_core(struct platform_device *pdev,
struct msm_vidc_core *core)
{
struct resource *res;
int i = 0;
+ int rc = 0;
+ struct on_chip_mem *ocmem;
if (!core)
return -EINVAL;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
pr_err("Failed to get IORESOURCE_MEM\n");
- return -ENODEV;
+ rc = -ENODEV;
+ goto core_init_failed;
}
core->register_base = res->start;
core->register_size = resource_size(res);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
pr_err("Failed to get IORESOURCE_IRQ\n");
- return -ENODEV;
+ rc = -ENODEV;
+ goto core_init_failed;
}
core->irq = res->start;
INIT_LIST_HEAD(&core->instances);
mutex_init(&core->sync_lock);
spin_lock_init(&core->lock);
- core->base_addr = 0x14f00000;
+ core->base_addr = 0x0;
core->state = VIDC_CORE_UNINIT;
for (i = SYS_MSG_INDEX(SYS_MSG_START);
i <= SYS_MSG_INDEX(SYS_MSG_END); i++) {
init_completion(&core->completions[i]);
}
- return 0;
+ rc = msm_vidc_init_clocks(pdev, core);
+ if (rc) {
+ pr_err("Failed to init clocks\n");
+ rc = -ENODEV;
+ goto core_init_failed;
+ }
+ core->resources.bus_info.vcodec_handle =
+ msm_bus_scale_register_client(&vcodec_bus_data);
+ if (!core->resources.bus_info.vcodec_handle) {
+ pr_err("Failed to register bus scale client\n");
+ goto fail_register_vcodec_bus;
+ }
+ core->resources.bus_info.ocmem_handle =
+ msm_bus_scale_register_client(&ocmem_bus_data);
+ if (!core->resources.bus_info.ocmem_handle) {
+ pr_err("Failed to register bus scale client\n");
+ goto fail_register_ocmem;
+ }
+ rc = register_iommu_domains(pdev, core);
+ if (rc) {
+ pr_err("Failed to register iommu domains: %d\n", rc);
+ goto fail_register_domains;
+ }
+ ocmem = &core->resources.ocmem;
+ ocmem->vidc_ocmem_nb.notifier_call = msm_vidc_ocmem_notify_handler;
+ ocmem->handle =
+ ocmem_notifier_register(OCMEM_VIDEO, &ocmem->vidc_ocmem_nb);
+ if (!ocmem->handle) {
+ pr_warn("Failed to register OCMEM notifier.");
+ pr_warn(" Performance will be impacted\n");
+ }
+ return rc;
+fail_register_domains:
+ msm_bus_scale_unregister_client(
+ core->resources.bus_info.ocmem_handle);
+fail_register_ocmem:
+ msm_bus_scale_unregister_client(
+ core->resources.bus_info.vcodec_handle);
+fail_register_vcodec_bus:
+ msm_vidc_deinit_clocks(core);
+core_init_failed:
+ return rc;
}
static int __devinit msm_vidc_probe(struct platform_device *pdev)
@@ -541,10 +864,15 @@
{
int rc = 0;
struct msm_vidc_core *core = pdev->dev.platform_data;
+ msm_bus_scale_unregister_client(core->resources.bus_info.vcodec_handle);
+ msm_bus_scale_unregister_client(core->resources.bus_info.ocmem_handle);
vidc_hal_delete_device(core->device);
video_unregister_device(&core->vdev[MSM_VIDC_ENCODER].vdev);
video_unregister_device(&core->vdev[MSM_VIDC_DECODER].vdev);
v4l2_device_unregister(&core->v4l2_dev);
+ if (core->resources.ocmem.handle)
+ ocmem_notifier_unregister(core->resources.ocmem.handle,
+ &core->resources.ocmem.vidc_ocmem_nb);
kfree(core);
return rc;
}
diff --git a/drivers/media/video/msm_vidc/msm_vdec.c b/drivers/media/video/msm_vidc/msm_vdec.c
index dca9f1d..d2b1acd 100644
--- a/drivers/media/video/msm_vidc/msm_vdec.c
+++ b/drivers/media/video/msm_vidc/msm_vdec.c
@@ -22,6 +22,8 @@
#define MAX_PLANES 1
#define DEFAULT_HEIGHT 720
#define DEFAULT_WIDTH 1280
+#define MAX_SUPPORTED_WIDTH 4096
+#define MAX_SUPPORTED_HEIGHT 2160
#define MIN_NUM_OUTPUT_BUFFERS 2
#define MAX_NUM_OUTPUT_BUFFERS 6
@@ -169,7 +171,7 @@
static u32 get_frame_size_compressed(int plane,
u32 height, u32 width)
{
- return height * width * 3/2;
+ return (MAX_SUPPORTED_WIDTH * MAX_SUPPORTED_HEIGHT * 3/2)/2;
}
static const struct msm_vidc_format vdec_formats[] = {
@@ -222,6 +224,14 @@
.type = OUTPUT_PORT,
},
{
+ .name = "VP8",
+ .description = "VP8 compressed format",
+ .fourcc = V4L2_PIX_FMT_VP8,
+ .num_planes = 1,
+ .get_frame_size = get_frame_size_compressed,
+ .type = OUTPUT_PORT,
+ },
+ {
.name = "YCrCb Semiplanar 4:2:0",
.description = "Y/CrCb 4:2:0",
.fourcc = V4L2_PIX_FMT_NV21,
@@ -299,8 +309,21 @@
buffer_info.num_buffers = 1;
buffer_info.align_device_addr =
b->m.planes[i].m.userptr;
- buffer_info.extradata_size = 0;
- buffer_info.extradata_addr = 0;
+ if (!inst->extradata_handle) {
+ inst->extradata_handle =
+ msm_smem_alloc(inst->mem_client,
+ 4096 * 1024, 1, 0,
+ inst->core->resources.io_map[NS_MAP].domain,
+ 0);
+ if (!inst->extradata_handle) {
+ pr_err("Failed to allocate extradta memory\n");
+ rc = -ENOMEM;
+ break;
+ }
+ }
+ buffer_info.extradata_addr =
+ inst->extradata_handle->device_addr;
+ buffer_info.extradata_size = 4096 * 1024;
rc = vidc_hal_session_set_buffers((void *)inst->session,
&buffer_info);
if (rc) {
@@ -336,7 +359,8 @@
buffer_info.num_buffers = 1;
buffer_info.align_device_addr =
b->m.planes[i].m.userptr;
- buffer_info.extradata_addr = 0;
+ buffer_info.extradata_addr =
+ inst->extradata_handle->device_addr;
rc = vidc_hal_session_release_buffers(
(void *)inst->session, &buffer_info);
if (rc)
@@ -443,8 +467,8 @@
return -EINVAL;
}
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
- inst->width = f->fmt.pix_mp.width;
- inst->height = f->fmt.pix_mp.height;
+ inst->prop.width = f->fmt.pix_mp.width;
+ inst->prop.height = f->fmt.pix_mp.height;
fmt = msm_comm_get_pixel_fmt_fourcc(vdec_formats,
ARRAY_SIZE(vdec_formats), f->fmt.pix_mp.pixelformat,
CAPTURE_PORT);
@@ -569,8 +593,8 @@
break;
}
frame_sz.buffer_type = HAL_BUFFER_OUTPUT;
- frame_sz.width = inst->width;
- frame_sz.height = inst->height;
+ frame_sz.width = inst->prop.width;
+ frame_sz.height = inst->prop.height;
pr_debug("width = %d, height = %d\n",
frame_sz.width, frame_sz.height);
rc = vidc_hal_session_set_property((void *)inst->session,
@@ -593,7 +617,7 @@
inst->buff_req.buffer[1].buffer_alignment);
for (i = 0; i < *num_planes; i++) {
sizes[i] = inst->fmts[CAPTURE_PORT]->get_frame_size(
- i, inst->height, inst->width);
+ i, inst->prop.height, inst->prop.width);
}
break;
@@ -611,96 +635,20 @@
unsigned long flags;
struct vb2_buf_entry *temp;
struct list_head *ptr, *next;
- struct v4l2_control control;
- struct hal_nal_stream_format_supported stream_format;
- struct hal_enable_picture enable_picture;
- struct hal_enable hal_property;
- u32 control_idx = 0;
- enum hal_property property_id = 0;
- u32 property_val = 0;
- void *pdata;
+ rc = msm_comm_try_get_bufreqs(inst);
+ if (rc) {
+ pr_err("Failed to get buffer requirements : %d\n", rc);
+ goto fail_start;
+ }
rc = msm_comm_set_scratch_buffers(inst);
if (rc) {
pr_err("Failed to set scratch buffers: %d\n", rc);
goto fail_start;
}
- for (; control_idx < NUM_CTRLS; control_idx++) {
- control.id = msm_vdec_ctrls[control_idx].id;
- rc = v4l2_g_ctrl(&inst->ctrl_handler, &control);
- if (rc) {
- pr_err("Failed to get control value for ID=%d\n",
- msm_vdec_ctrls[control_idx].id);
- } else {
- property_id = 0;
- switch (control.id) {
- case V4L2_CID_MPEG_VIDC_VIDEO_STREAM_FORMAT:
- property_id =
- HAL_PARAM_NAL_STREAM_FORMAT_SELECT;
- stream_format.nal_stream_format_supported =
- (0x00000001 << control.value);
- pdata = &stream_format;
- break;
- case V4L2_CID_MPEG_VIDC_VIDEO_OUTPUT_ORDER:
- property_id = HAL_PARAM_VDEC_OUTPUT_ORDER;
- property_val = control.value;
- pdata = &property_val;
- break;
- case V4L2_CID_MPEG_VIDC_VIDEO_ENABLE_PICTURE_TYPE:
- property_id =
- HAL_PARAM_VDEC_PICTURE_TYPE_DECODE;
- enable_picture.picture_type = control.value;
- pdata = &enable_picture;
- break;
- case V4L2_CID_MPEG_VIDC_VIDEO_KEEP_ASPECT_RATIO:
- property_id =
- HAL_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO;
- hal_property.enable = control.value;
- pdata = &hal_property;
- break;
- case V4L2_CID_MPEG_VIDC_VIDEO_POST_LOOP_DEBLOCKER_MODE:
- property_id =
- HAL_CONFIG_VDEC_POST_LOOP_DEBLOCKER;
- hal_property.enable = control.value;
- pdata = &hal_property;
- break;
- case V4L2_CID_MPEG_VIDC_VIDEO_DIVX_FORMAT:
- property_id = HAL_PARAM_DIVX_FORMAT;
- property_val = control.value;
- pdata = &property_val;
- break;
- case V4L2_CID_MPEG_VIDC_VIDEO_MB_ERROR_MAP_REPORTING:
- property_id =
- HAL_CONFIG_VDEC_MB_ERROR_MAP_REPORTING;
- hal_property.enable = control.value;
- pdata = &hal_property;
- break;
- case V4L2_CID_MPEG_VIDC_VIDEO_CONTINUE_DATA_TRANSFER:
- property_id =
- HAL_PARAM_VDEC_CONTINUE_DATA_TRANSFER;
- hal_property.enable = control.value;
- pdata = &hal_property;
- break;
- default:
- break;
- }
- if (property_id) {
- pr_err("Control: HAL property=%x,ctrl_id=%x,ctrl_value=%d\n",
- property_id,
- msm_vdec_ctrls[control_idx].id,
- control.value);
- rc = vidc_hal_session_set_property((void *)
- inst->session, property_id,
- pdata);
- }
- if (rc)
- pr_err("Failed to set hal property for framesize\n");
- }
- }
-
rc = msm_comm_try_state(inst, MSM_VIDC_START_DONE);
if (rc) {
pr_err("Failed to move inst: %p to start done state\n",
- inst);
+ inst);
goto fail_start;
}
spin_lock_irqsave(&inst->lock, flags);
@@ -734,11 +682,14 @@
pr_debug("Streamon called on: %d capability\n", q->type);
switch (q->type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (msm_comm_scale_clocks(inst->core))
+ pr_err("Failed to scale clocks. Performance/power might be impacted\n");
if (inst->vb2_bufq[CAPTURE_PORT].streaming)
rc = start_streaming(inst);
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- inst->in_reconfig = false;
+ if (msm_comm_scale_clocks(inst->core))
+ pr_err("Failed to scale clocks. Performance/power might be impacted\n");
if (inst->vb2_bufq[OUTPUT_PORT].streaming)
rc = start_streaming(inst);
break;
@@ -809,14 +760,102 @@
}
inst->fmts[OUTPUT_PORT] = &vdec_formats[1];
inst->fmts[CAPTURE_PORT] = &vdec_formats[0];
- inst->height = DEFAULT_HEIGHT;
- inst->width = DEFAULT_WIDTH;
+ inst->prop.height = DEFAULT_HEIGHT;
+ inst->prop.width = DEFAULT_WIDTH;
+ inst->prop.fps = 30;
return rc;
}
static int msm_vdec_op_s_ctrl(struct v4l2_ctrl *ctrl)
{
- return 0;
+ int rc = 0;
+ struct v4l2_control control;
+ struct hal_nal_stream_format_supported stream_format;
+ struct hal_enable_picture enable_picture;
+ struct hal_enable hal_property;/*, prop;*/
+ u32 control_idx = 0;
+ enum hal_property property_id = 0;
+ u32 property_val = 0;
+ void *pdata;
+ struct msm_vidc_inst *inst = container_of(ctrl->handler,
+ struct msm_vidc_inst, ctrl_handler);
+ rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
+
+ if (rc) {
+ pr_err("Failed to move inst: %p to start done state\n",
+ inst);
+ goto failed_open_done;
+ }
+
+ control.id = ctrl->id;
+ control.value = ctrl->val;
+
+ switch (control.id) {
+ case V4L2_CID_MPEG_VIDC_VIDEO_STREAM_FORMAT:
+ property_id =
+ HAL_PARAM_NAL_STREAM_FORMAT_SELECT;
+ stream_format.nal_stream_format_supported =
+ (0x00000001 << control.value);
+ pdata = &stream_format;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_OUTPUT_ORDER:
+ property_id = HAL_PARAM_VDEC_OUTPUT_ORDER;
+ property_val = control.value;
+ pdata = &property_val;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_ENABLE_PICTURE_TYPE:
+ property_id =
+ HAL_PARAM_VDEC_PICTURE_TYPE_DECODE;
+ enable_picture.picture_type = control.value;
+ pdata = &enable_picture;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_KEEP_ASPECT_RATIO:
+ property_id =
+ HAL_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO;
+ hal_property.enable = control.value;
+ pdata = &hal_property;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_POST_LOOP_DEBLOCKER_MODE:
+ property_id =
+ HAL_CONFIG_VDEC_POST_LOOP_DEBLOCKER;
+ hal_property.enable = control.value;
+ pdata = &hal_property;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_DIVX_FORMAT:
+ property_id = HAL_PARAM_DIVX_FORMAT;
+ property_val = control.value;
+ pdata = &property_val;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_MB_ERROR_MAP_REPORTING:
+ property_id =
+ HAL_CONFIG_VDEC_MB_ERROR_MAP_REPORTING;
+ hal_property.enable = control.value;
+ pdata = &hal_property;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_CONTINUE_DATA_TRANSFER:
+ property_id =
+ HAL_PARAM_VDEC_CONTINUE_DATA_TRANSFER;
+ hal_property.enable = control.value;
+ pdata = &hal_property;
+ break;
+ default:
+ break;
+ }
+ if (property_id) {
+ pr_debug("Control: HAL property=%d,ctrl_id=%d,ctrl_value=%d\n",
+ property_id,
+ msm_vdec_ctrls[control_idx].id,
+ control.value);
+ rc = vidc_hal_session_set_property((void *)
+ inst->session, property_id,
+ pdata);
+ }
+ if (rc)
+ pr_err("Failed to set hal property for framesize\n");
+
+failed_open_done:
+
+ return rc;
}
static int msm_vdec_op_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
{
diff --git a/drivers/media/video/msm_vidc/msm_venc.c b/drivers/media/video/msm_vidc/msm_venc.c
index e835aaa..fbd3378 100644
--- a/drivers/media/video/msm_vidc/msm_venc.c
+++ b/drivers/media/video/msm_vidc/msm_venc.c
@@ -23,12 +23,13 @@
#define MIN_NUM_OUTPUT_BUFFERS 2
#define MAX_NUM_OUTPUT_BUFFERS 8
#define MIN_BIT_RATE 64
-#define MAX_BIT_RATE 20000
+#define MAX_BIT_RATE 160000
#define DEFAULT_BIT_RATE 64
#define BIT_RATE_STEP 1
#define MIN_FRAME_RATE 1
-#define MAX_FRAME_RATE 120
+#define MAX_FRAME_RATE 240
#define DEFAULT_FRAME_RATE 30
+#define DEFAULT_IR_MBS 30
#define MAX_SLICE_BYTE_SIZE 1024
#define MIN_SLICE_BYTE_SIZE 1024
#define MAX_SLICE_MB_SIZE 300
@@ -62,6 +63,30 @@
"Model 2",
NULL
};
+
+static const char *const h263_level[] = {
+ "1.0",
+ "2.0",
+ "3.0",
+ "4.0",
+ "4.5",
+ "5.0",
+ "6.0",
+ "7.0",
+};
+
+static const char *const h263_profile[] = {
+ "Baseline",
+ "H320 Coding",
+ "Backward Compatible",
+ "ISWV2",
+ "ISWV3",
+ "High Compression",
+ "Internet",
+ "Interlace",
+ "High Latency",
+};
+
static const struct msm_vidc_ctrl msm_venc_ctrls[] = {
{
.id = V4L2_CID_MPEG_VIDC_VIDEO_FRAME_RATE,
@@ -211,10 +236,48 @@
.minimum = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
.maximum = V4L2_MPEG_VIDEO_H264_LEVEL_5_1,
.default_value = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
- .step = 1,
+ .step = 0,
.menu_skip_mask = 0,
},
{
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_H263_PROFILE,
+ .name = "H263 Profile",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_BASELINE,
+ .maximum = V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_HIGHLATENCY,
+ .default_value = V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_BASELINE,
+ .menu_skip_mask = ~(
+ (1 << V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_BASELINE) |
+ (1 << V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_H320CODING) |
+ (1 << V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_BACKWARDCOMPATIBLE) |
+ (1 << V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_ISWV2) |
+ (1 << V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_ISWV3) |
+ (1 << V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_HIGHCOMPRESSION) |
+ (1 << V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_INTERNET) |
+ (1 << V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_INTERLACE) |
+ (1 << V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_HIGHLATENCY)
+ ),
+ .qmenu = h263_profile,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_H263_LEVEL,
+ .name = "H263 Level",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_1_0,
+ .maximum = V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_7_0,
+ .default_value = V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_1_0,
+ .menu_skip_mask = ~(
+ (1 << V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_1_0) |
+ (1 << V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_2_0) |
+ (1 << V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_3_0) |
+ (1 << V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_4_0) |
+ (1 << V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_5_0) |
+ (1 << V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_6_0) |
+ (1 << V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_7_0)
+ ),
+ .qmenu = h263_level,
+ },
+ {
.id = V4L2_CID_MPEG_VIDC_VIDEO_ROTATION,
.name = "Rotation",
.type = V4L2_CTRL_TYPE_MENU,
@@ -409,14 +472,20 @@
venc_h264_profile_level = {HAL_H264_PROFILE_BASELINE,
HAL_H264_LEVEL_1};
static struct hal_profile_level
- venc_mpeg4_profile_level = {HAL_H264_PROFILE_BASELINE,
- HAL_H264_LEVEL_1};
+ venc_mpeg4_profile_level = {HAL_MPEG4_PROFILE_SIMPLE,
+ HAL_MPEG4_LEVEL_0};
+static struct hal_profile_level
+ venc_h263_profile_level = {HAL_H263_PROFILE_BASELINE,
+ HAL_H263_LEVEL_10};
static struct hal_h264_entropy_control
venc_h264_entropy_control = {HAL_H264_ENTROPY_CAVLC,
HAL_H264_CABAC_MODEL_0};
static struct hal_multi_slice_control
venc_multi_slice_control = {HAL_MULTI_SLICE_OFF ,
0};
+static struct hal_intra_refresh
+ venc_intra_refresh = {HAL_INTRA_REFRESH_NONE ,
+ DEFAULT_IR_MBS, DEFAULT_IR_MBS, DEFAULT_IR_MBS};
static const struct msm_vidc_format venc_formats[] = {
{
@@ -452,6 +521,14 @@
.type = CAPTURE_PORT,
},
{
+ .name = "VP8",
+ .description = "VP8 compressed format",
+ .fourcc = V4L2_PIX_FMT_VP8,
+ .num_planes = 1,
+ .get_frame_size = get_frame_size_compressed,
+ .type = CAPTURE_PORT,
+ },
+ {
.name = "YCrCb Semiplanar 4:2:0",
.description = "Y/CrCb 4:2:0",
.fourcc = V4L2_PIX_FMT_NV21,
@@ -484,7 +561,7 @@
*num_buffers = MIN_NUM_OUTPUT_BUFFERS;
for (i = 0; i < *num_planes; i++) {
sizes[i] = inst->fmts[OUTPUT_PORT]->get_frame_size(
- i, inst->height, inst->width);
+ i, inst->prop.height, inst->prop.width);
}
break;
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
@@ -494,8 +571,8 @@
break;
}
frame_sz.buffer_type = HAL_BUFFER_INPUT;
- frame_sz.width = inst->width;
- frame_sz.height = inst->height;
+ frame_sz.width = inst->prop.width;
+ frame_sz.height = inst->prop.height;
pr_debug("width = %d, height = %d\n",
frame_sz.width, frame_sz.height);
rc = vidc_hal_session_set_property((void *)inst->session,
@@ -526,7 +603,7 @@
inst->buff_req.buffer[0].buffer_count_actual);
for (i = 0; i < *num_planes; i++) {
sizes[i] = inst->fmts[CAPTURE_PORT]->get_frame_size(
- i, inst->height, inst->width);
+ i, inst->prop.height, inst->prop.width);
}
break;
@@ -586,10 +663,14 @@
pr_debug("Streamon called on: %d capability\n", q->type);
switch (q->type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (msm_comm_scale_clocks(inst->core))
+ pr_err("Failed to scale clocks. Performance/power might be impacted\n");
if (inst->vb2_bufq[CAPTURE_PORT].streaming)
rc = start_streaming(inst);
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ if (msm_comm_scale_clocks(inst->core))
+ pr_err("Failed to scale clocks. Performance/power might be impacted\n");
if (inst->vb2_bufq[OUTPUT_PORT].streaming)
rc = start_streaming(inst);
break;
@@ -892,6 +973,84 @@
pr_debug("\nLevel: %d\n",
profile_level.level);
break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_H263_PROFILE:
+ property_id =
+ HAL_PARAM_PROFILE_LEVEL_CURRENT;
+
+ switch (control.value) {
+ case V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_BASELINE:
+ control.value = HAL_H263_PROFILE_BASELINE;
+ break;
+ case V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_H320CODING:
+ control.value = HAL_H263_PROFILE_H320CODING;
+ break;
+ case V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_BACKWARDCOMPATIBLE:
+ control.value = HAL_H263_PROFILE_BACKWARDCOMPATIBLE;
+ break;
+ case V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_ISWV2:
+ control.value = HAL_H263_PROFILE_ISWV2;
+ break;
+ case V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_ISWV3:
+ control.value = HAL_H263_PROFILE_ISWV3;
+ break;
+ case V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_HIGHCOMPRESSION:
+ control.value = HAL_H263_PROFILE_HIGHCOMPRESSION;
+ break;
+ case V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_INTERNET:
+ control.value = HAL_H263_PROFILE_INTERNET;
+ break;
+ case V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_INTERLACE:
+ control.value = HAL_H263_PROFILE_INTERLACE;
+ break;
+ case V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_HIGHLATENCY:
+ control.value = HAL_H263_PROFILE_HIGHLATENCY;
+ break;
+ default:
+ break;
+ }
+ profile_level.profile = control.value;
+ venc_h263_profile_level.profile = control.value;
+ profile_level.level = venc_h263_profile_level.level;
+ pdata = &profile_level;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_H263_LEVEL:
+ property_id =
+ HAL_PARAM_PROFILE_LEVEL_CURRENT;
+
+ switch (control.value) {
+ case V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_1_0:
+ control.value = HAL_H263_LEVEL_10;
+ break;
+ case V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_2_0:
+ control.value = HAL_H263_LEVEL_20;
+ break;
+ case V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_3_0:
+ control.value = HAL_H263_LEVEL_30;
+ break;
+ case V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_4_0:
+ control.value = HAL_H263_LEVEL_40;
+ break;
+ case V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_4_5:
+ control.value = HAL_H263_LEVEL_45;
+ break;
+ case V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_5_0:
+ control.value = HAL_H263_LEVEL_50;
+ break;
+ case V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_6_0:
+ control.value = HAL_H263_LEVEL_60;
+ break;
+ case V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_7_0:
+ control.value = HAL_H263_LEVEL_70;
+ break;
+ default:
+ break;
+ }
+
+ profile_level.level = control.value;
+ venc_h263_profile_level.level = control.value;
+ profile_level.profile = venc_h263_profile_level.profile;
+ pdata = &profile_level;
+ break;
case V4L2_CID_MPEG_VIDC_VIDEO_ROTATION:
property_id =
HAL_CONFIG_VPE_OPERATIONS;
@@ -948,24 +1107,40 @@
property_id =
HAL_PARAM_VENC_INTRA_REFRESH;
intra_refresh.mode = control.value;
+ intra_refresh.air_mbs = venc_intra_refresh.air_mbs;
+ intra_refresh.air_ref = venc_intra_refresh.air_ref;
+ intra_refresh.cir_mbs = venc_intra_refresh.cir_mbs;
+ venc_intra_refresh.mode = intra_refresh.mode;
pdata = &intra_refresh;
break;
case V4L2_CID_MPEG_VIDC_VIDEO_AIR_MBS:
property_id =
HAL_PARAM_VENC_INTRA_REFRESH;
intra_refresh.air_mbs = control.value;
+ intra_refresh.mode = venc_intra_refresh.mode;
+ intra_refresh.air_ref = venc_intra_refresh.air_ref;
+ intra_refresh.cir_mbs = venc_intra_refresh.cir_mbs;
+ venc_intra_refresh.air_mbs = control.value;
pdata = &intra_refresh;
break;
case V4L2_CID_MPEG_VIDC_VIDEO_AIR_REF:
property_id =
HAL_PARAM_VENC_INTRA_REFRESH;
intra_refresh.air_ref = control.value;
+ intra_refresh.air_mbs = venc_intra_refresh.air_mbs;
+ intra_refresh.mode = venc_intra_refresh.mode;
+ intra_refresh.cir_mbs = venc_intra_refresh.cir_mbs;
+ venc_intra_refresh.air_ref = control.value;
pdata = &intra_refresh;
break;
case V4L2_CID_MPEG_VIDC_VIDEO_CIR_MBS:
property_id =
HAL_PARAM_VENC_INTRA_REFRESH;
intra_refresh.cir_mbs = control.value;
+ intra_refresh.air_mbs = venc_intra_refresh.air_mbs;
+ intra_refresh.air_ref = venc_intra_refresh.air_ref;
+ intra_refresh.mode = venc_intra_refresh.mode;
+ venc_intra_refresh.cir_mbs = control.value;
pdata = &intra_refresh;
break;
case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
@@ -1024,8 +1199,9 @@
}
inst->fmts[CAPTURE_PORT] = &venc_formats[1];
inst->fmts[OUTPUT_PORT] = &venc_formats[0];
- inst->height = DEFAULT_HEIGHT;
- inst->width = DEFAULT_WIDTH;
+ inst->prop.height = DEFAULT_HEIGHT;
+ inst->prop.width = DEFAULT_WIDTH;
+ inst->prop.height = 30;
return rc;
}
@@ -1104,8 +1280,8 @@
goto exit;
}
} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
- inst->width = f->fmt.pix_mp.width;
- inst->height = f->fmt.pix_mp.height;
+ inst->prop.width = f->fmt.pix_mp.width;
+ inst->prop.height = f->fmt.pix_mp.height;
fmt = msm_comm_get_pixel_fmt_fourcc(venc_formats,
ARRAY_SIZE(venc_formats), f->fmt.pix_mp.pixelformat,
OUTPUT_PORT);
@@ -1156,11 +1332,12 @@
if (fmt) {
f->fmt.pix_mp.pixelformat = fmt->fourcc;
- f->fmt.pix_mp.height = inst->height;
- f->fmt.pix_mp.width = inst->width;
+ f->fmt.pix_mp.height = inst->prop.height;
+ f->fmt.pix_mp.width = inst->prop.width;
for (i = 0; i < fmt->num_planes; ++i) {
f->fmt.pix_mp.plane_fmt[i].sizeimage =
- fmt->get_frame_size(i, inst->height, inst->width);
+ fmt->get_frame_size(i, inst->prop.height,
+ inst->prop.width);
}
} else {
pr_err("Buf type not recognized, type = %d\n",
diff --git a/drivers/media/video/msm_vidc/msm_vidc.c b/drivers/media/video/msm_vidc/msm_vidc.c
index 28fe2b8..4d4cec5 100644
--- a/drivers/media/video/msm_vidc/msm_vidc.c
+++ b/drivers/media/video/msm_vidc/msm_vidc.c
@@ -316,6 +316,8 @@
kfree(buf);
}
}
+ if (inst->extradata_handle)
+ msm_smem_free(inst->mem_client, inst->extradata_handle);
spin_unlock_irqrestore(&inst->lock, flags);
msm_smem_delete_client(inst->mem_client);
}
diff --git a/drivers/media/video/msm_vidc/msm_vidc_common.c b/drivers/media/video/msm_vidc/msm_vidc_common.c
index 4a86cf5..6835467 100644
--- a/drivers/media/video/msm_vidc/msm_vidc_common.c
+++ b/drivers/media/video/msm_vidc/msm_vidc_common.c
@@ -13,6 +13,10 @@
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <mach/iommu.h>
+#include <mach/iommu_domains.h>
+#include <mach/peripheral-loader.h>
#include "msm_vidc_common.h"
#include "vidc_hal_api.h"
@@ -30,6 +34,128 @@
#define V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT \
V4L2_EVENT_MSM_VIDC_PORT_SETTINGS_CHANGED_INSUFFICIENT
+#define NUM_MBS_PER_SEC(__height, __width, __fps) ({\
+ (__height >> 4) * (__width >> 4) * __fps; \
+})
+
+#define VIDC_BUS_LOAD(__height, __width, __fps, __br) ({\
+ __height * __width * __fps; \
+})
+
+#define GET_NUM_MBS(__h, __w) ({\
+ u32 __mbs = (__h >> 4) * (__w >> 4);\
+ __mbs;\
+})
+
+/*While adding entries to this array make sure
+ * they are in descending order.
+ * Look @ msm_comm_get_load function*/
+static const u32 clocks_table[][2] = {
+ {979200, 410000000},
+ {560145, 266670000},
+ {421161, 200000000},
+ {243000, 133330000},
+ {108000, 100000000},
+ {36000, 50000000},
+};
+
+static const u32 bus_table[] = {
+ 0,
+ 9216000,
+ 27648000,
+ 62208000,
+};
+
+static int msm_comm_get_bus_load(struct msm_vidc_core *core)
+{
+ struct msm_vidc_inst *inst = NULL;
+ int load = 0;
+ if (!core) {
+ pr_err("Invalid args: %p\n", core);
+ return -EINVAL;
+ }
+ list_for_each_entry(inst, &core->instances, list) {
+ load += VIDC_BUS_LOAD(inst->prop.height,
+ inst->prop.width, inst->prop.fps,
+ 2000000);
+ }
+ return load;
+}
+
+static int get_bus_vector(int load)
+{
+ int num_rows = sizeof(bus_table)/(sizeof(u32));
+ int i;
+ for (i = num_rows - 1; i > 0; i--) {
+ if ((load >= bus_table[i]) || (i == 1))
+ break;
+ }
+ pr_err("Required bus = %d\n", i);
+ return i;
+}
+
+int msm_comm_scale_bus(struct msm_vidc_core *core)
+{
+ int load;
+ int rc = 0;
+ if (!core) {
+ pr_err("Invalid args: %p\n", core);
+ return -EINVAL;
+ }
+ load = msm_comm_get_bus_load(core);
+ if (load <= 0) {
+ pr_err("Failed to scale bus for %d load\n",
+ load);
+ goto fail_scale_bus;
+ }
+ rc = msm_bus_scale_client_update_request(
+ core->resources.bus_info.vcodec_handle,
+ get_bus_vector(load));
+ if (rc) {
+ pr_err("Failed to scale bus: %d\n", rc);
+ goto fail_scale_bus;
+ }
+ rc = msm_bus_scale_client_update_request(
+ core->resources.bus_info.ocmem_handle,
+ get_bus_vector(load));
+ if (rc) {
+ pr_err("Failed to scale bus: %d\n", rc);
+ goto fail_scale_bus;
+ }
+fail_scale_bus:
+ return rc;
+}
+
+static int msm_comm_get_load(struct msm_vidc_core *core)
+{
+ struct msm_vidc_inst *inst = NULL;
+ int num_mbs_per_sec = 0;
+ if (!core) {
+ pr_err("Invalid args: %p\n", core);
+ return -EINVAL;
+ }
+ list_for_each_entry(inst, &core->instances, list)
+ num_mbs_per_sec += NUM_MBS_PER_SEC(inst->prop.height,
+ inst->prop.width, inst->prop.fps);
+ return num_mbs_per_sec;
+}
+
+static unsigned long get_clock_rate(struct core_clock *clock,
+ int num_mbs_per_sec)
+{
+ int num_rows = clock->count;
+ struct load_freq_table *table = clock->load_freq_tbl;
+ unsigned long ret = table[num_rows-1].freq;
+ int i;
+ for (i = 0; i < num_rows; i++) {
+ if (num_mbs_per_sec > table[i].load)
+ break;
+ ret = table[i].freq;
+ }
+ pr_err("Required clock rate = %lu\n", ret);
+ return ret;
+}
+
struct msm_vidc_core *get_vidc_core(int core_id)
{
struct msm_vidc_core *core;
@@ -52,6 +178,60 @@
return NULL;
}
+static int msm_comm_iommu_attach(struct msm_vidc_core *core)
+{
+ int rc;
+ struct iommu_domain *domain;
+ int i;
+ struct iommu_info *io_map;
+ struct device *dev;
+ for (i = 0; i < MAX_MAP; i++) {
+ io_map = &core->resources.io_map[i];
+ dev = msm_iommu_get_ctx(io_map->ctx);
+ domain = msm_get_iommu_domain(io_map->domain);
+ if (IS_ERR_OR_NULL(domain)) {
+ pr_err("Failed to get domain: %s\n", io_map->name);
+ rc = PTR_ERR(domain);
+ break;
+ }
+ rc = iommu_attach_device(domain, dev);
+ if (rc) {
+ pr_err("IOMMU attach failed: %s\n", io_map->name);
+ break;
+ }
+ }
+ if (i < MAX_MAP) {
+ i--;
+ for (; i >= 0; i--) {
+ io_map = &core->resources.io_map[i];
+ dev = msm_iommu_get_ctx(io_map->ctx);
+ domain = msm_get_iommu_domain(io_map->domain);
+ if (dev && domain)
+ iommu_detach_device(domain, dev);
+ }
+ }
+ return rc;
+}
+
+static void msm_comm_iommu_detach(struct msm_vidc_core *core)
+{
+ struct device *dev;
+ struct iommu_domain *domain;
+ struct iommu_info *io_map;
+ int i;
+ if (!core) {
+ pr_err("Invalid paramter: %p\n", core);
+ return;
+ }
+ for (i = 0; i < MAX_MAP; i++) {
+ io_map = &core->resources.io_map[i];
+ dev = msm_iommu_get_ctx(io_map->ctx);
+ domain = msm_get_iommu_domain(io_map->domain);
+ if (dev && domain)
+ iommu_detach_device(domain, dev);
+ }
+}
+
const struct msm_vidc_format *msm_comm_get_pixel_fmt_index(
const struct msm_vidc_format fmt[], int size, int index, int fmt_type)
{
@@ -128,6 +308,23 @@
}
}
+static void handle_sys_release_res_done(
+ enum command_response cmd, void *data)
+{
+ struct msm_vidc_cb_cmd_done *response = data;
+ struct msm_vidc_core *core;
+ if (!response) {
+ pr_err("Failed to get valid response for sys init\n");
+ return;
+ }
+ core = get_vidc_core(response->device_id);
+ if (!core) {
+ pr_err("Wrong device_id received\n");
+ return;
+ }
+ complete(&core->completions[SYS_MSG_INDEX(cmd)]);
+}
+
static inline void change_inst_state(struct msm_vidc_inst *inst,
enum instance_state state)
{
@@ -416,6 +613,9 @@
case SYS_INIT_DONE:
handle_sys_init_done(cmd, data);
break;
+ case RELEASE_RESOURCE_DONE:
+ handle_sys_release_res_done(cmd, data);
+ break;
case SESSION_INIT_DONE:
handle_session_init_done(cmd, data);
break;
@@ -455,6 +655,271 @@
}
}
+int msm_comm_scale_clocks(struct msm_vidc_core *core)
+{
+ int num_mbs_per_sec;
+ int rc = 0;
+ if (!core) {
+ pr_err("Invalid args: %p\n", core);
+ return -EINVAL;
+ }
+ num_mbs_per_sec = msm_comm_get_load(core);
+ pr_err("num_mbs_per_sec = %d\n", num_mbs_per_sec);
+ rc = clk_set_rate(core->resources.clock[VCODEC_CLK].clk,
+ get_clock_rate(&core->resources.clock[VCODEC_CLK],
+ num_mbs_per_sec));
+ if (rc) {
+ pr_err("Failed to set clock rate: %d\n", rc);
+ goto fail_clk_set_rate;
+ }
+ rc = msm_comm_scale_bus(core);
+ if (rc)
+ pr_err("Failed to scale bus bandwidth\n");
+fail_clk_set_rate:
+ return rc;
+}
+
+static inline int msm_comm_enable_clks(struct msm_vidc_core *core)
+{
+ int i;
+ struct core_clock *cl;
+ int rc = 0;
+ if (!core) {
+ pr_err("Invalid params: %p\n", core);
+ return -EINVAL;
+ }
+ for (i = 0; i < VCODEC_MAX_CLKS; i++) {
+ cl = &core->resources.clock[i];
+ rc = clk_prepare_enable(cl->clk);
+ if (rc) {
+ pr_err("Failed to enable clocks\n");
+ goto fail_clk_enable;
+ } else {
+ pr_err("Clock: %s enabled\n", cl->name);
+ }
+ }
+ return rc;
+fail_clk_enable:
+ for (; i >= 0; i--) {
+ cl = &core->resources.clock[i];
+ clk_disable_unprepare(cl->clk);
+ }
+ return rc;
+}
+
+static inline void msm_comm_disable_clks(struct msm_vidc_core *core)
+{
+ int i;
+ struct core_clock *cl;
+ if (!core) {
+ pr_err("Invalid params: %p\n", core);
+ return;
+ }
+ for (i = 0; i < VCODEC_MAX_CLKS; i++) {
+ cl = &core->resources.clock[i];
+ clk_disable_unprepare(cl->clk);
+ }
+}
+
+static int msm_comm_load_fw(struct msm_vidc_core *core)
+{
+ int rc = 0;
+ if (!core) {
+ pr_err("Invalid paramter: %p\n", core);
+ return -EINVAL;
+ }
+ rc = msm_comm_scale_clocks(core);
+ if (rc) {
+ pr_err("Failed to set clock rate: %d\n", rc);
+ goto fail_pil_get;
+ }
+
+ if (!core->resources.fw.cookie)
+ core->resources.fw.cookie = pil_get("venus");
+
+ if (IS_ERR_OR_NULL(core->resources.fw.cookie)) {
+ pr_err("Failed to download firmware\n");
+ rc = -ENOMEM;
+ goto fail_pil_get;
+ }
+
+ rc = msm_comm_enable_clks(core);
+ if (rc) {
+ pr_err("Failed to enable clocks: %d\n", rc);
+ goto fail_enable_clks;
+ }
+
+ rc = msm_comm_iommu_attach(core);
+ if (rc) {
+ pr_err("Failed to attach iommu");
+ goto fail_iommu_attach;
+ }
+ return rc;
+fail_iommu_attach:
+ msm_comm_disable_clks(core);
+fail_enable_clks:
+ pil_put(core->resources.fw.cookie);
+ core->resources.fw.cookie = NULL;
+fail_pil_get:
+ return rc;
+}
+
+static void msm_comm_unload_fw(struct msm_vidc_core *core)
+{
+ if (!core) {
+ pr_err("Invalid paramter: %p\n", core);
+ return;
+ }
+ if (core->resources.fw.cookie) {
+ pil_put(core->resources.fw.cookie);
+ core->resources.fw.cookie = NULL;
+ msm_comm_iommu_detach(core);
+ msm_comm_disable_clks(core);
+ }
+}
+
+static inline unsigned long get_ocmem_requirement(u32 height, u32 width)
+{
+ int num_mbs = 0;
+ num_mbs = GET_NUM_MBS(height, width);
+ /*TODO: This should be changes once the numbers are
+ * available from firmware*/
+ return 512 * 1024;
+}
+
+static int msm_comm_set_ocmem(struct msm_vidc_core *core,
+ struct ocmem_buf *ocmem)
+{
+ struct vidc_resource_hdr rhdr;
+ int rc = 0;
+ if (!core || !ocmem) {
+ pr_err("Invalid params, core:%p, ocmem: %p\n",
+ core, ocmem);
+ return -EINVAL;
+ }
+ rhdr.resource_id = VIDC_RESOURCE_OCMEM;
+ rhdr.resource_handle = (u32) &core->resources.ocmem;
+ rhdr.size = ocmem->len;
+ rc = vidc_hal_core_set_resource(core->device, &rhdr, ocmem);
+ if (rc) {
+ pr_err("Failed to set OCMEM on driver\n");
+ goto ocmem_set_failed;
+ }
+ pr_debug("OCMEM set, addr = %lx, size: %ld\n",
+ ocmem->addr, ocmem->len);
+ocmem_set_failed:
+ return rc;
+}
+
+static int msm_comm_unset_ocmem(struct msm_vidc_core *core)
+{
+ struct vidc_resource_hdr rhdr;
+ int rc = 0;
+ if (!core || !core->resources.ocmem.buf) {
+ pr_err("Invalid params, core:%p\n", core);
+ return -EINVAL;
+ }
+ rhdr.resource_id = VIDC_RESOURCE_OCMEM;
+ rhdr.resource_handle = (u32) &core->resources.ocmem;
+ init_completion(
+ &core->completions[SYS_MSG_INDEX(RELEASE_RESOURCE_DONE)]);
+ rc = vidc_hal_core_release_resource(core->device, &rhdr);
+ if (rc) {
+ pr_err("Failed to set OCMEM on driver\n");
+ goto release_ocmem_failed;
+ }
+ rc = wait_for_completion_timeout(
+ &core->completions[SYS_MSG_INDEX(RELEASE_RESOURCE_DONE)],
+ msecs_to_jiffies(HW_RESPONSE_TIMEOUT));
+ if (!rc) {
+ pr_err("Wait interrupted or timeout: %d\n", rc);
+ rc = -EIO;
+ goto release_ocmem_failed;
+ }
+release_ocmem_failed:
+ return rc;
+}
+
+static int msm_comm_alloc_ocmem(struct msm_vidc_core *core,
+ unsigned long size)
+{
+ int rc = 0;
+ unsigned long flags;
+ struct ocmem_buf *ocmem_buffer;
+ if (!core || !size) {
+ pr_err("Invalid param, core: %p, size: %lu\n", core, size);
+ return -EINVAL;
+ }
+ spin_lock_irqsave(&core->lock, flags);
+ ocmem_buffer = core->resources.ocmem.buf;
+ if (!ocmem_buffer ||
+ ocmem_buffer->len < size) {
+ ocmem_buffer = ocmem_allocate_nb(OCMEM_VIDEO, size);
+ if (IS_ERR_OR_NULL(ocmem_buffer)) {
+ pr_err("ocmem_allocate_nb failed: %d\n",
+ (u32) ocmem_buffer);
+ rc = -ENOMEM;
+ }
+ core->resources.ocmem.buf = ocmem_buffer;
+ rc = msm_comm_set_ocmem(core, ocmem_buffer);
+ if (rc) {
+ pr_err("Failed to set ocmem: %d\n", rc);
+ goto ocmem_set_failed;
+ }
+ } else
+ pr_debug("OCMEM is enough. reqd: %lu, available: %lu\n",
+ size, ocmem_buffer->len);
+
+ocmem_set_failed:
+ spin_unlock_irqrestore(&core->lock, flags);
+ return rc;
+}
+
+static int msm_comm_free_ocmem(struct msm_vidc_core *core)
+{
+ int rc = 0;
+ unsigned long flags;
+ spin_lock_irqsave(&core->lock, flags);
+ if (core->resources.ocmem.buf) {
+ rc = ocmem_free(OCMEM_VIDEO, core->resources.ocmem.buf);
+ if (rc)
+ pr_err("Failed to free ocmem\n");
+ }
+ core->resources.ocmem.buf = NULL;
+ spin_unlock_irqrestore(&core->lock, flags);
+ return rc;
+}
+
+int msm_vidc_ocmem_notify_handler(struct notifier_block *this,
+ unsigned long event, void *data)
+{
+ struct ocmem_buf *buff = data;
+ struct msm_vidc_core *core;
+ struct msm_vidc_resources *resources;
+ struct on_chip_mem *ocmem;
+ int rc = NOTIFY_DONE;
+ if (event == OCMEM_ALLOC_GROW) {
+ ocmem = container_of(this, struct on_chip_mem, vidc_ocmem_nb);
+ if (!ocmem) {
+ pr_err("Wrong handler passed\n");
+ rc = NOTIFY_BAD;
+ goto bad_notfier;
+ }
+ resources = container_of(ocmem,
+ struct msm_vidc_resources, ocmem);
+ core = container_of(resources,
+ struct msm_vidc_core, resources);
+ if (msm_comm_set_ocmem(core, buff)) {
+ pr_err("Failed to set ocmem: %d\n", rc);
+ goto ocmem_set_failed;
+ }
+ rc = NOTIFY_OK;
+ }
+ocmem_set_failed:
+bad_notfier:
+ return rc;
+}
+
static int msm_comm_init_core_done(struct msm_vidc_inst *inst)
{
struct msm_vidc_core *core = inst->core;
@@ -499,18 +964,28 @@
core->id, core->state);
goto core_already_inited;
}
+ rc = msm_comm_load_fw(core);
+ if (rc) {
+ pr_err("Failed to load video firmware\n");
+ goto fail_load_fw;
+ }
init_completion(&core->completions[SYS_MSG_INDEX(SYS_INIT_DONE)]);
- rc = vidc_hal_core_init(core->device);
+ rc = vidc_hal_core_init(core->device,
+ core->resources.io_map[NS_MAP].domain);
if (rc) {
pr_err("Failed to init core, id = %d\n", core->id);
- goto exit;
+ goto fail_core_init;
}
spin_lock_irqsave(&core->lock, flags);
core->state = VIDC_CORE_INIT;
spin_unlock_irqrestore(&core->lock, flags);
core_already_inited:
change_inst_state(inst, MSM_VIDC_CORE_INIT);
-exit:
+ mutex_unlock(&core->sync_lock);
+ return rc;
+fail_core_init:
+ msm_comm_unload_fw(core);
+fail_load_fw:
mutex_unlock(&core->sync_lock);
return rc;
}
@@ -527,7 +1002,8 @@
goto core_already_uninited;
}
if (list_empty(&core->instances)) {
- pr_debug("Calling vidc_hal_core_release\n");
+ msm_comm_unset_ocmem(core);
+ msm_comm_free_ocmem(core);
rc = vidc_hal_core_release(core->device);
if (rc) {
pr_err("Failed to release core, id = %d\n", core->id);
@@ -536,6 +1012,7 @@
spin_lock_irqsave(&core->lock, flags);
core->state = VIDC_CORE_UNINIT;
spin_unlock_irqrestore(&core->lock, flags);
+ msm_comm_unload_fw(core);
}
core_already_uninited:
change_inst_state(inst, MSM_VIDC_CORE_UNINIT);
@@ -587,6 +1064,9 @@
case V4L2_PIX_FMT_VC1_ANNEX_L:
codec = HAL_VIDEO_CODEC_VC1;
break;
+ case V4L2_PIX_FMT_VP8:
+ codec = HAL_VIDEO_CODEC_VP8;
+ break;
case V4L2_PIX_FMT_DIVX_311:
codec = HAL_VIDEO_CODEC_DIVX_311;
break;
@@ -596,8 +1076,7 @@
/*HAL_VIDEO_CODEC_MVC
HAL_VIDEO_CODEC_SPARK
HAL_VIDEO_CODEC_VP6
- HAL_VIDEO_CODEC_VP7
- HAL_VIDEO_CODEC_VP8*/
+ HAL_VIDEO_CODEC_VP7*/
default:
pr_err("Wrong codec: %d\n", fourcc);
codec = HAL_UNUSED_CODEC;
@@ -642,10 +1121,16 @@
struct msm_vidc_inst *inst)
{
int rc = 0;
+ u32 ocmem_sz = 0;
if (IS_ALREADY_IN_STATE(flipped_state, MSM_VIDC_LOAD_RESOURCES)) {
pr_err("inst: %p is already in state: %d\n", inst, inst->state);
goto exit;
}
+ ocmem_sz = get_ocmem_requirement(inst->height, inst->width);
+ rc = msm_comm_alloc_ocmem(inst->core, ocmem_sz);
+ if (rc)
+ pr_warn("Failed to allocate OCMEM. Performance will be impacted\n");
+
rc = vidc_hal_session_load_res((void *) inst->session);
if (rc) {
pr_err("Failed to send load resources\n");
@@ -887,9 +1372,16 @@
} else if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
frame_data.filled_len = 0;
frame_data.buffer_type = HAL_BUFFER_OUTPUT;
- frame_data.extradata_addr = 0;
- pr_debug("Sending ftb to hal..: Alloc: %d :filled: %d\n",
+ if (inst->extradata_handle) {
+ frame_data.extradata_addr =
+ inst->extradata_handle->device_addr;
+ } else {
+ frame_data.extradata_addr = 0;
+ }
+ pr_debug("Sending ftb to hal..: Alloc: %d :filled: %d",
frame_data.alloc_len, frame_data.filled_len);
+ pr_debug(" extradata_addr: %d\n",
+ frame_data.extradata_addr);
rc = vidc_hal_session_ftb((void *) inst->session,
&frame_data);
} else {
@@ -1018,7 +1510,8 @@
for (i = 0; i < inst->buff_req.buffer[6].buffer_count_actual;
i++) {
handle = msm_smem_alloc(inst->mem_client,
- inst->buff_req.buffer[6].buffer_size, 1, 0);
+ inst->buff_req.buffer[6].buffer_size, 1, 0,
+ inst->core->resources.io_map[NS_MAP].domain, 0);
if (!handle) {
pr_err("Failed to allocate scratch memory\n");
rc = -ENOMEM;
diff --git a/drivers/media/video/msm_vidc/msm_vidc_common.h b/drivers/media/video/msm_vidc/msm_vidc_common.h
index 2fafa79..2009ca6 100644
--- a/drivers/media/video/msm_vidc/msm_vidc_common.h
+++ b/drivers/media/video/msm_vidc/msm_vidc_common.h
@@ -29,6 +29,7 @@
int msm_comm_try_get_bufreqs(struct msm_vidc_inst *inst);
int msm_comm_set_scratch_buffers(struct msm_vidc_inst *inst);
int msm_comm_qbuf(struct vb2_buffer *vb);
+int msm_comm_scale_clocks(struct msm_vidc_core *core);
#define IS_PRIV_CTRL(idx) (\
(V4L2_CTRL_ID2CLASS(idx) == V4L2_CTRL_CLASS_MPEG) && \
V4L2_CTRL_DRIVER_PRIV(idx))
diff --git a/drivers/media/video/msm_vidc/msm_vidc_internal.h b/drivers/media/video/msm_vidc/msm_vidc_internal.h
index fb8fbc4..992f39c 100644
--- a/drivers/media/video/msm_vidc/msm_vidc_internal.h
+++ b/drivers/media/video/msm_vidc/msm_vidc_internal.h
@@ -18,6 +18,10 @@
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/completion.h>
+#include <linux/clk.h>
+#include <mach/msm_bus.h>
+#include <mach/msm_bus_board.h>
+#include <mach/ocmem.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
@@ -42,6 +46,7 @@
#define SYS_MSG_INDEX(__msg) (__msg - SYS_MSG_START)
#define SESSION_MSG_INDEX(__msg) (__msg - SESSION_MSG_START)
+#define MAX_NAME_LENGTH 64
enum vidc_ports {
OUTPUT_PORT,
CAPTURE_PORT,
@@ -86,7 +91,7 @@
};
struct msm_vidc_format {
- char name[64];
+ char name[MAX_NAME_LENGTH];
u8 description[32];
u32 fourcc;
int num_planes;
@@ -106,6 +111,70 @@
struct video_device vdev;
};
+struct msm_vidc_fw {
+ void *cookie;
+};
+
+struct iommu_info {
+ u32 addr_range[2];
+ char name[MAX_NAME_LENGTH];
+ char ctx[MAX_NAME_LENGTH];
+ int domain;
+ int partition;
+};
+
+enum io_maps {
+ CP_MAP,
+ NS_MAP,
+ MAX_MAP
+};
+
+enum vidc_clocks {
+ VCODEC_CLK,
+ VCODEC_AHB_CLK,
+ VCODEC_AXI_CLK,
+ VCODEC_OCMEM_CLK,
+ VCODEC_MAX_CLKS
+};
+
+struct load_freq_table {
+ u32 load;
+ u32 freq;
+};
+
+struct core_clock {
+ char name[MAX_NAME_LENGTH];
+ struct clk *clk;
+ u32 count;
+ struct load_freq_table load_freq_tbl[8];
+};
+
+struct vidc_bus_info {
+ u32 vcodec_handle;
+ u32 ocmem_handle;
+};
+
+struct on_chip_mem {
+ struct ocmem_buf *buf;
+ struct notifier_block vidc_ocmem_nb;
+ void *handle;
+};
+
+struct msm_vidc_resources {
+ struct msm_vidc_fw fw;
+ struct iommu_info io_map[MAX_MAP];
+ struct core_clock clock[VCODEC_MAX_CLKS];
+ struct vidc_bus_info bus_info;
+ struct on_chip_mem ocmem;
+};
+
+struct session_prop {
+ u32 width;
+ u32 height;
+ u32 fps;
+ u32 bitrate;
+};
+
struct msm_vidc_core {
struct list_head list;
struct mutex sync_lock;
@@ -121,6 +190,7 @@
u32 register_size;
u32 irq;
enum vidc_core_state state;
+ struct msm_vidc_resources resources;
struct completion completions[SYS_MSG_END - SYS_MSG_START + 1];
};
@@ -130,6 +200,7 @@
struct msm_vidc_core *core;
int session_type;
void *session;
+ struct session_prop prop;
u32 width;
u32 height;
int state;
@@ -143,6 +214,7 @@
struct v4l2_ctrl_handler ctrl_handler;
struct completion completions[SESSION_MSG_END - SESSION_MSG_START + 1];
struct v4l2_fh event_handler;
+ struct msm_smem *extradata_handle;
bool in_reconfig;
u32 reconfig_width;
u32 reconfig_height;
@@ -152,7 +224,7 @@
struct msm_vidc_ctrl {
u32 id;
- char name[64];
+ char name[MAX_NAME_LENGTH];
enum v4l2_ctrl_type type;
s32 minimum;
s32 maximum;
@@ -163,4 +235,7 @@
};
void handle_cmd_response(enum command_response cmd, void *data);
+int msm_vidc_ocmem_notify_handler(struct notifier_block *this,
+ unsigned long event, void *data);
+
#endif
diff --git a/drivers/media/video/msm_vidc/vidc_hal.c b/drivers/media/video/msm_vidc/vidc_hal.c
index 85e984d..16a3ecd 100644
--- a/drivers/media/video/msm_vidc/vidc_hal.c
+++ b/drivers/media/video/msm_vidc/vidc_hal.c
@@ -15,6 +15,9 @@
#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/delay.h>
+#include <mach/ocmem.h>
+
#include <asm/memory.h>
#include "vidc_hal.h"
#include "vidc_hal_io.h"
@@ -23,7 +26,7 @@
#define REG_ADDR_OFFSET_BITMASK 0x000FFFFF
/*Workaround for virtio */
-#define HFI_VIRTIO_FW_BIAS 0x14f00000
+#define HFI_VIRTIO_FW_BIAS 0x0
struct hal_device_data hal_ctxt;
@@ -300,7 +303,8 @@
return 0;
}
-static int vidc_hal_alloc(void *mem, void *clnt, u32 size, u32 align, u32 flags)
+static int vidc_hal_alloc(void *mem, void *clnt, u32 size, u32 align, u32 flags,
+ int domain)
{
struct vidc_mem_addr *vmem;
struct msm_smem *alloc;
@@ -312,7 +316,7 @@
vmem = (struct vidc_mem_addr *)mem;
HAL_MSG_HIGH("start to alloc: size:%d, Flags: %d", size, flags);
- alloc = msm_smem_alloc(clnt, size, align, flags);
+ alloc = msm_smem_alloc(clnt, size, align, flags, domain, 0);
HAL_MSG_LOW("Alloc done");
if (!alloc) {
HAL_MSG_HIGH("Alloc fail in %s", __func__);
@@ -503,7 +507,7 @@
device->hal_client = NULL;
}
-static int vidc_hal_interface_queues_init(struct hal_device *dev)
+static int vidc_hal_interface_queues_init(struct hal_device *dev, int domain)
{
struct hfi_queue_table_header *q_tbl_hdr;
struct hfi_queue_header *q_hdr;
@@ -513,7 +517,7 @@
rc = vidc_hal_alloc((void *) &dev->iface_q_table,
dev->hal_client,
- VIDC_IFACEQ_TABLE_SIZE, 1, 0);
+ VIDC_IFACEQ_TABLE_SIZE, 1, 0, domain);
if (rc) {
HAL_MSG_ERROR("%s:iface_q_table_alloc_fail", __func__);
return -ENOMEM;
@@ -533,7 +537,7 @@
iface_q = &dev->iface_queues[i];
rc = vidc_hal_alloc((void *) &iface_q->q_array,
dev->hal_client, VIDC_IFACEQ_QUEUE_SIZE,
- 1, 0);
+ 1, 0, domain);
if (rc) {
HAL_MSG_ERROR("%s:iface_q_table_alloc[%d]_fail",
__func__, i);
@@ -585,6 +589,7 @@
ctrl_status = read_register(
device->hal_data->register_base_addr,
VIDC_CPU_CS_SCIACMDARG0);
+ usleep_range(500, 1000);
count++;
}
if (count >= 25)
@@ -592,7 +597,7 @@
return rc;
}
-int vidc_hal_core_init(void *device)
+int vidc_hal_core_init(void *device, int domain)
{
struct hfi_cmd_sys_init_packet pkt;
int rc = 0;
@@ -619,10 +624,10 @@
HAL_MSG_ERROR("Device_Virt_Address : 0x%x,"
"Register_Virt_Addr: 0x%x",
- (u32) dev->hal_data->device_base_addr,
+ dev->hal_data->device_base_addr,
(u32) dev->hal_data->register_base_addr);
- rc = vidc_hal_interface_queues_init(dev);
+ rc = vidc_hal_interface_queues_init(dev, domain);
if (rc) {
HAL_MSG_ERROR("failed to init queues");
rc = -ENOMEM;
@@ -665,6 +670,8 @@
}
write_register(dev->hal_data->register_base_addr,
VIDC_CPU_CS_SCIACMDARG3, 0, 0);
+ disable_irq_nosync(dev->hal_data->irq);
+ vidc_hal_interface_queues_release(dev);
HAL_MSG_INFO("\nHAL exited\n");
return 0;
}
@@ -744,12 +751,12 @@
struct hfi_resource_ocmem *hfioc_mem =
(struct hfi_resource_ocmem *)
&pkt->rg_resource_data[0];
- struct vidc_mem_addr *vidc_oc_mem =
- (struct vidc_mem_addr *) resource_value;
+ struct ocmem_buf *ocmem =
+ (struct ocmem_buf *) resource_value;
pkt->resource_type = HFI_RESOURCE_OCMEM;
- hfioc_mem->size = (u32) vidc_oc_mem->mem_size;
- hfioc_mem->mem = (u8 *) vidc_oc_mem->align_device_addr;
+ hfioc_mem->size = (u32) ocmem->len;
+ hfioc_mem->mem = (u8 *) ocmem->addr;
pkt->size += sizeof(struct hfi_resource_ocmem);
if (vidc_hal_iface_cmdq_write(dev, pkt))
rc = -ENOTEMPTY;
@@ -1118,6 +1125,7 @@
case HAL_CONFIG_VENC_REQUEST_IFRAME:
pkt->rg_property_data[0] =
HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME;
+ pkt->size += sizeof(u32);
break;
case HAL_PARAM_VENC_MPEG4_SHORT_HEADER:
break;
@@ -1272,18 +1280,6 @@
sizeof(struct hfi_h264_db_control);
break;
}
- case HAL_PARAM_VENC_TEMPORAL_SPATIAL_TRADEOFF:
- {
- struct hfi_temporal_spatial_tradeoff *hfi;
- pkt->rg_property_data[0] =
- HFI_PROPERTY_PARAM_VENC_TEMPORAL_SPATIAL_TRADEOFF;
- hfi = (struct hfi_temporal_spatial_tradeoff *)
- &pkt->rg_property_data[1];
- hfi->ts_factor = ((struct hfi_temporal_spatial_tradeoff *)
- pdata)->ts_factor;
- pkt->size += sizeof(u32) * 2;
- break;
- }
case HAL_PARAM_VENC_SESSION_QP:
{
struct hfi_quantization *hfi;
@@ -1505,8 +1501,6 @@
break;
case HAL_PARAM_VENC_H264_DEBLOCK_CONTROL:
break;
- case HAL_PARAM_VENC_TEMPORAL_SPATIAL_TRADEOFF:
- break;
case HAL_PARAM_VENC_SESSION_QP:
break;
case HAL_CONFIG_VENC_INTRA_PERIOD:
@@ -2000,11 +1994,11 @@
list_for_each_safe(curr, next, &core.dev_head) {
device = list_entry(curr, struct hal_device, list);
if (device && device->hal_data->irq == irq &&
- (CONTAINS((u32)device->hal_data->
+ (CONTAINS(device->hal_data->
device_base_addr,
FIRMWARE_SIZE, fw_addr) ||
CONTAINS(fw_addr, FIRMWARE_SIZE,
- (u32)device->hal_data->
+ device->hal_data->
device_base_addr) ||
CONTAINS((u32)device->hal_data->
register_base_addr,
@@ -2018,12 +2012,12 @@
OVERLAPS(reg_addr, reg_size,
(u32)device->hal_data->
register_base_addr, reg_size) ||
- OVERLAPS((u32)device->hal_data->
+ OVERLAPS(device->hal_data->
device_base_addr,
FIRMWARE_SIZE, fw_addr,
FIRMWARE_SIZE) ||
OVERLAPS(fw_addr, FIRMWARE_SIZE,
- (u32)device->hal_data->
+ device->hal_data->
device_base_addr,
FIRMWARE_SIZE))) {
return 0;
@@ -2073,7 +2067,7 @@
struct hal_data *hal = NULL;
int rc = 0;
- if (device_id || !fw_base_addr || !reg_base || !reg_size ||
+ if (device_id || !reg_base || !reg_size ||
!irq || !callback) {
HAL_MSG_ERROR("Invalid Paramters");
return NULL;
@@ -2091,13 +2085,7 @@
return NULL;
}
hal->irq = irq;
- hal->device_base_addr =
- ioremap_nocache(fw_base_addr, FIRMWARE_SIZE);
- if (!hal->device_base_addr) {
- HAL_MSG_ERROR("could not map fw addr %d of size %d",
- fw_base_addr, FIRMWARE_SIZE);
- goto err_map;
- }
+ hal->device_base_addr = fw_base_addr;
hal->register_base_addr =
ioremap_nocache(reg_base, reg_size);
if (!hal->register_base_addr) {
diff --git a/drivers/media/video/msm_vidc/vidc_hal.h b/drivers/media/video/msm_vidc/vidc_hal.h
index a36d7f3..8e7c3d3 100644
--- a/drivers/media/video/msm_vidc/vidc_hal.h
+++ b/drivers/media/video/msm_vidc/vidc_hal.h
@@ -155,6 +155,7 @@
#define HFI_EXTRADATA_VC1_SEQDISP 0x00000004
#define HFI_EXTRADATA_TIMESTAMP 0x00000005
#define HFI_EXTRADATA_S3D_FRAME_PACKING 0x00000006
+#define HFI_EXTRADATA_EOSNAL_DETECTED 0x00000007
#define HFI_EXTRADATA_MULTISLICE_INFO 0x7F100000
#define HFI_EXTRADATA_NUM_CONCEALED_MB 0x7F100001
#define HFI_EXTRADATA_INDEX 0x7F100002
@@ -164,6 +165,11 @@
#define HFI_INDEX_EXTRADATA_DIGITAL_ZOOM 0x07000010
#define HFI_INDEX_EXTRADATA_ASPECT_RATIO 0x7F100003
+struct HFI_INDEX_EXTRADATA_CONFIG_TYPE {
+ int enable;
+ u32 index_extra_data_id;
+};
+
struct hfi_extradata_header {
u32 size;
u32 version;
@@ -196,7 +202,7 @@
(HFI_PROPERTY_PARAM_OX_START + 0x004)
#define HFI_PROPERTY_PARAM_EXTRA_DATA_HEADER_CONFIG \
(HFI_PROPERTY_PARAM_OX_START + 0x005)
-#define HFI_PROPERTY_PARAM_MAX_SEQUENCE_HEADER_SIZE \
+#define HFI_PROPERTY_PARAM_INDEX_EXTRADATA \
(HFI_PROPERTY_PARAM_OX_START + 0x006)
#define HFI_PROPERTY_PARAM_DIVX_FORMAT \
(HFI_PROPERTY_PARAM_OX_START + 0x007)
@@ -244,6 +250,10 @@
#define HFI_PROPERTY_PARAM_VENC_OX_START \
(HFI_DOMAIN_BASE_VENC + HFI_ARCH_OX_OFFSET + 0x5000)
+#define HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_INFO \
+ (HFI_PROPERTY_PARAM_VENC_OX_START + 0x001)
+#define HFI_PROPERTY_PARAM_VENC_H264_IDR_S3D_FRAME_PACKING_NAL \
+ (HFI_PROPERTY_PARAM_VENC_OX_START + 0x002)
#define HFI_PROPERTY_CONFIG_VENC_OX_START \
(HFI_DOMAIN_BASE_VENC + HFI_ARCH_OX_OFFSET + 0x6000)
@@ -285,10 +295,6 @@
u8 rg_data[1];
};
-struct hfi_seq_header_info {
- u32 max_header_len;
-};
-
struct hfi_enable_picture {
u32 picture_type;
};
@@ -828,7 +834,7 @@
struct hal_data {
u32 irq;
- u8 *device_base_addr;
+ u32 device_base_addr;
u8 *register_base_addr;
};
@@ -861,6 +867,14 @@
int dev_count;
};
+struct hfi_index_extradata_aspect_ratio_payload {
+ u32 size;
+ u32 version;
+ u32 port_index;
+ u32 saspect_width;
+ u32 saspect_height;
+};
+
extern struct hal_device_data hal_ctxt;
int vidc_hal_iface_msgq_read(struct hal_device *device, void *pkt);
diff --git a/drivers/media/video/msm_vidc/vidc_hal_api.h b/drivers/media/video/msm_vidc/vidc_hal_api.h
index 332bbac..c77ae12 100644
--- a/drivers/media/video/msm_vidc/vidc_hal_api.h
+++ b/drivers/media/video/msm_vidc/vidc_hal_api.h
@@ -943,7 +943,7 @@
};
/* VIDC_HAL CORE API's */
-int vidc_hal_core_init(void *device);
+int vidc_hal_core_init(void *device, int domain);
int vidc_hal_core_release(void *device);
int vidc_hal_core_pc_prep(void *device);
int vidc_hal_core_set_resource(void *device,
diff --git a/drivers/media/video/msm_vidc/vidc_hal_helper.h b/drivers/media/video/msm_vidc/vidc_hal_helper.h
index d4e2619..43995eb 100644
--- a/drivers/media/video/msm_vidc/vidc_hal_helper.h
+++ b/drivers/media/video/msm_vidc/vidc_hal_helper.h
@@ -68,8 +68,8 @@
#define HFI_VIDEO_DOMAIN_ENCODER (HFI_COMMON_BASE + 0x1)
#define HFI_VIDEO_DOMAIN_DECODER (HFI_COMMON_BASE + 0x2)
-#define HFI_VIDEO_DOMAIN_VPE (HFI_COMMON_BASE + 0x3)
-#define HFI_VIDEO_DOMAIN_MBI (HFI_COMMON_BASE + 0x4)
+#define HFI_VIDEO_DOMAIN_VPE (HFI_COMMON_BASE + 0x4)
+#define HFI_VIDEO_DOMAIN_MBI (HFI_COMMON_BASE + 0x8)
#define HFI_DOMAIN_BASE_COMMON (HFI_COMMON_BASE + 0)
#define HFI_DOMAIN_BASE_VDEC (HFI_COMMON_BASE + 0x01000000)
@@ -131,6 +131,7 @@
#define HFI_H264_PROFILE_STEREO_HIGH 0x00000008
#define HFI_H264_PROFILE_MULTIVIEW_HIGH 0x00000010
#define HFI_H264_PROFILE_CONSTRAINED_HIGH 0x00000020
+#define HFI_H264_PROFILE_CONSTRAINED_BASE 0x00000040
#define HFI_H264_LEVEL_1 0x00000001
#define HFI_H264_LEVEL_1b 0x00000002
@@ -261,6 +262,10 @@
(HFI_PROPERTY_PARAM_COMMON_START + 0x00B)
#define HFI_PROPERTY_PARAM_MULTI_VIEW_FORMAT \
(HFI_PROPERTY_PARAM_COMMON_START + 0x00C)
+#define HFI_PROPERTY_PARAM_MAX_SEQUENCE_HEADER_SIZE \
+ (HFI_PROPERTY_PARAM_COMMON_START + 0x00D)
+#define HFI_PROPERTY_PARAM_CODEC_MASK_SUPPORTED \
+ (HFI_PROPERTY_PARAM_COMMON_START + 0x00E)
#define HFI_PROPERTY_CONFIG_COMMON_START \
(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x2000)
@@ -271,6 +276,8 @@
(HFI_DOMAIN_BASE_VDEC + HFI_ARCH_COMMON_OFFSET + 0x3000)
#define HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM \
(HFI_PROPERTY_PARAM_VDEC_COMMON_START + 0x001)
+#define HFI_PROPERTY_PARAM_VDEC_CONCEAL_COLOR \
+ (HFI_PROPERTY_PARAM_VDEC_COMMON_START + 0x002)
#define HFI_PROPERTY_CONFIG_VDEC_COMMON_START \
(HFI_DOMAIN_BASE_VDEC + HFI_ARCH_COMMON_OFFSET + 0x4000)
@@ -285,15 +292,13 @@
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x003)
#define HFI_PROPERTY_PARAM_VENC_RATE_CONTROL \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x004)
-#define HFI_PROPERTY_PARAM_VENC_TEMPORAL_SPATIAL_TRADEOFF \
+#define HFI_PROPERTY_PARAM_VENC_H264_PICORDER_CNT_TYPE \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x005)
-#define HFI_PROPERTY_PARAM_VENC_QUALITY_VS_SPEED \
- (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x010)
#define HFI_PROPERTY_PARAM_VENC_SESSION_QP \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x006)
#define HFI_PROPERTY_PARAM_VENC_MPEG4_AC_PREDICTION \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x007)
-#define HFI_PROPERTY_PARAM_VENC_MPEG4_DATA_PARTITIONING \
+#define HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x008)
#define HFI_PROPERTY_PARAM_VENC_MPEG4_TIME_RESOLUTION \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x009)
@@ -301,22 +306,26 @@
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00A)
#define HFI_PROPERTY_PARAM_VENC_MPEG4_HEADER_EXTENSION \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00B)
-#define HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_INFO \
+#define HFI_PROPERTY_PARAM_VENC_OPEN_GOP \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00C)
#define HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00D)
#define HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_CONTROL \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00E)
-#define HFI_PROPERTY_PARAM_VENC_VBVBUFFER_SIZE \
+#define HFI_PROPERTY_PARAM_VENC_VBV_HRD_BUF_SIZE \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00F)
+#define HFI_PROPERTY_PARAM_VENC_QUALITY_VS_SPEED \
+ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x010)
#define HFI_PROPERTY_PARAM_VENC_MPEG4_QPEL \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x011)
#define HFI_PROPERTY_PARAM_VENC_ADVANCED \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x012)
#define HFI_PROPERTY_PARAM_VENC_SYNC_FRAME_SEQUENCE_HEADER \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x013)
-#define HFI_PROPERTY_PARAM_VENC_H264_IDR_S3D_FRAME_PACKING_NAL \
+#define HFI_PROPERTY_PARAM_VENC_H264_SPS_ID \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x014)
+#define HFI_PROPERTY_PARAM_VENC_H264_PPS_ID \
+ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x015)
#define HFI_PROPERTY_CONFIG_VENC_COMMON_START \
(HFI_DOMAIN_BASE_VENC + HFI_ARCH_COMMON_OFFSET + 0x6000)
@@ -328,7 +337,7 @@
(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x003)
#define HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME \
(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x004)
-#define HFI_PROPERTY_CONFIG_VENC_TIMESTAMP_SCALE \
+#define HFI_PROPERTY_CONFIG_VENC_SLICE_SIZE \
(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x005)
#define HFI_PROPERTY_CONFIG_VENC_FRAME_QP \
(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x006)
@@ -357,6 +366,8 @@
#define HFI_CAPABILITY_SCALE_X (HFI_COMMON_BASE + 0x6)
#define HFI_CAPABILITY_SCALE_Y (HFI_COMMON_BASE + 0x7)
#define HFI_CAPABILITY_BITRATE (HFI_COMMON_BASE + 0x8)
+#define HFI_CAPABILITY_BFRAME (HFI_COMMON_BASE + 0x9)
+#define HFI_CAPABILITY_HIERARCHICAL_P_LAYERS (HFI_COMMON_BASE + 0x10)
struct hfi_capability_supported {
u32 capability_type;
@@ -433,10 +444,6 @@
u32 bframes;
};
-struct hfi_timestamp_scale {
- u32 time_stamp_scale;
-};
-
struct hfi_mpeg4_header_extension {
u32 header_extension;
};
@@ -492,6 +499,10 @@
struct hfi_profile_level rg_profile_level[1];
};
+struct hfi_quality_vs_speed {
+ u32 quality_vs_speed;
+};
+
struct hfi_quantization {
u32 qp_i;
u32 qp_p;
@@ -499,8 +510,10 @@
u32 layer_id;
};
-struct hfi_temporal_spatial_tradeoff {
- u32 ts_factor;
+struct hfi_quantization_range {
+ u32 min_qp;
+ u32 max_qp;
+ u32 layer_id;
};
struct hfi_frame_size {
@@ -605,6 +618,8 @@
u8 pipe2d;
u8 hw_mode;
u8 low_delay_enforce;
+ u8 worker_vppsg_delay;
+ int close_gop;
int h264_constrain_intra_pred;
int h264_transform_8x8_flag;
int mpeg4_qpel_enable;
@@ -613,6 +628,9 @@
u8 vpp_info_packet_mode;
u8 ref_tile_mode;
u8 bitstream_flush_mode;
+ u32 vppsg_vspap_fb_sync_delay;
+ u32 rc_initial_delay;
+ u32 peak_bitrate_constraint;
u32 ds_display_frame_width;
u32 ds_display_frame_height;
u32 perf_tune_param_ptr;
@@ -624,6 +642,19 @@
u32 h264_num_ref_frames;
};
+struct hfi_vbv_hrd_bufsize {
+ u32 buffer_size;
+};
+
+struct hfi_codec_mask_supported {
+ u32 codecs;
+ u32 video_domains;
+};
+
+struct hfi_seq_header_info {
+ u32 max_hader_len;
+};
+
#define HFI_CMD_SYS_COMMON_START \
(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x0000)
#define HFI_CMD_SYS_INIT (HFI_CMD_SYS_COMMON_START + 0x001)
diff --git a/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c b/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
index ded9f11..364faa9 100644
--- a/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
+++ b/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
@@ -252,6 +252,29 @@
device->callback(SYS_INIT_DONE, &cmd_done);
}
+static void hal_process_sys_rel_resource_done(struct hal_device *device,
+ struct hfi_msg_sys_release_resource_done_packet *pkt)
+{
+ struct msm_vidc_cb_cmd_done cmd_done;
+ enum vidc_status status = VIDC_ERR_NONE;
+ u32 pkt_size;
+ memset(&cmd_done, 0, sizeof(struct msm_vidc_cb_cmd_done));
+ HAL_MSG_ERROR("RECEIVED:SYS_RELEASE_RESOURCE_DONE");
+ pkt_size = sizeof(struct hfi_msg_sys_release_resource_done_packet);
+ if (pkt_size > pkt->size) {
+ HAL_MSG_ERROR("hal_process_sys_rel_resource_done:bad size:%d",
+ pkt->size);
+ return;
+ }
+ status = vidc_map_hal_err_status((u32)pkt->error_type);
+ cmd_done.device_id = device->device_id;
+ cmd_done.session_id = 0;
+ cmd_done.status = (u32) status;
+ cmd_done.size = 0;
+ cmd_done.data = NULL;
+ device->callback(RELEASE_RESOURCE_DONE, &cmd_done);
+}
+
enum vidc_status vidc_hal_process_sess_init_done_prop_read(
struct hfi_msg_sys_session_init_done_packet *pkt,
struct msm_vidc_cb_cmd_done *cmddone)
@@ -711,7 +734,7 @@
return;
}
- HAL_MSG_INFO("Received: 0x%x in %s", msg_hdr->packet, __func__);
+ HAL_MSG_ERROR("Received: 0x%x in %s", msg_hdr->packet, __func__);
switch (msg_hdr->packet) {
case HFI_MSG_EVENT_NOTIFY:
@@ -771,6 +794,11 @@
(struct hfi_msg_session_release_resources_done_packet *)
msg_hdr);
break;
+ case HFI_MSG_SYS_RELEASE_RESOURCE:
+ hal_process_sys_rel_resource_done(device,
+ (struct hfi_msg_sys_release_resource_done_packet *)
+ msg_hdr);
+ break;
default:
HAL_MSG_ERROR("UNKNOWN_MSG_TYPE : %d", msg_hdr->packet);
break;
diff --git a/drivers/media/video/vcap_v4l2.c b/drivers/media/video/vcap_v4l2.c
index 4f7c585..e8d9e04 100644
--- a/drivers/media/video/vcap_v4l2.c
+++ b/drivers/media/video/vcap_v4l2.c
@@ -852,9 +852,6 @@
c_data->vp_in_fmt.height = priv_fmt->u.pix.height;
c_data->vp_in_fmt.pixfmt = priv_fmt->u.pix.pixelformat;
- if (priv_fmt->u.pix.priv)
- c_data->vid_vp_action.nr_enabled = 1;
-
size = c_data->vp_in_fmt.width * c_data->vp_in_fmt.height;
if (c_data->vp_in_fmt.pixfmt == V4L2_PIX_FMT_NV16)
size = size * 2;
@@ -868,9 +865,6 @@
c_data->vp_out_fmt.height = priv_fmt->u.pix.height;
c_data->vp_out_fmt.pixfmt = priv_fmt->u.pix.pixelformat;
- if (priv_fmt->u.pix.priv)
- c_data->vid_vp_action.nr_enabled = 1;
-
size = c_data->vp_out_fmt.width * c_data->vp_out_fmt.height;
if (c_data->vp_out_fmt.pixfmt == V4L2_PIX_FMT_NV16)
size = size * 2;
@@ -1113,9 +1107,9 @@
{
struct vcap_client_data *c_data = to_client_data(file->private_data);
struct vcap_dev *dev = c_data->dev;
- unsigned long flags;
int rc;
unsigned long rate;
+ long rate_rc;
dprintk(3, "In Stream ON\n");
if (determine_mode(c_data) != c_data->op_mode) {
@@ -1125,14 +1119,14 @@
switch (c_data->op_mode) {
case VC_VCAP_OP:
- spin_lock_irqsave(&dev->dev_slock, flags);
+ mutex_lock(&dev->dev_mutex);
if (dev->vc_resource) {
pr_err("VCAP Err: %s: VC resource taken", __func__);
- spin_unlock_irqrestore(&dev->dev_slock, flags);
+ mutex_unlock(&dev->dev_mutex);
return -EBUSY;
}
dev->vc_resource = 1;
- spin_unlock_irqrestore(&dev->dev_slock, flags);
+ mutex_unlock(&dev->dev_mutex);
c_data->dev->vc_client = c_data;
@@ -1141,13 +1135,14 @@
goto free_res;
}
- rate = c_data->vc_format.clk_freq;
- rate = clk_round_rate(dev->vcap_clk, rate);
- if (rate <= 0) {
+ rate = c_data->vc_format.clk_freq / 100 * 102;
+ rate_rc = clk_round_rate(dev->vcap_clk, rate);
+ if (rate_rc <= 0) {
pr_err("%s: Failed core rnd_rate\n", __func__);
rc = -EINVAL;
goto free_res;
}
+ rate = (unsigned long)rate_rc;
rc = clk_set_rate(dev->vcap_clk, rate);
if (rc < 0)
goto free_res;
@@ -1171,28 +1166,30 @@
goto free_res;
config_vc_format(c_data);
+ c_data->streaming = 1;
rc = vb2_streamon(&c_data->vc_vidq, i);
if (rc < 0)
goto free_res;
break;
case VP_VCAP_OP:
- spin_lock_irqsave(&dev->dev_slock, flags);
+ mutex_lock(&dev->dev_mutex);
if (dev->vp_resource) {
pr_err("VCAP Err: %s: VP resource taken", __func__);
- spin_unlock_irqrestore(&dev->dev_slock, flags);
+ mutex_unlock(&dev->dev_mutex);
return -EBUSY;
}
dev->vp_resource = 1;
- spin_unlock_irqrestore(&dev->dev_slock, flags);
+ mutex_unlock(&dev->dev_mutex);
c_data->dev->vp_client = c_data;
rate = 160000000;
- rate = clk_round_rate(dev->vcap_clk, rate);
- if (rate <= 0) {
+ rate_rc = clk_round_rate(dev->vcap_clk, rate);
+ if (rate_rc <= 0) {
pr_err("%s: Failed core rnd_rate\n", __func__);
rc = -EINVAL;
goto free_res;
}
+ rate = (unsigned long)rate_rc;
rc = clk_set_rate(dev->vcap_clk, rate);
if (rc < 0)
goto free_res;
@@ -1216,7 +1213,7 @@
rc = init_motion_buf(c_data);
if (rc < 0)
goto free_res;
- if (c_data->vid_vp_action.nr_enabled) {
+ if (c_data->vid_vp_action.nr_param.mode) {
rc = init_nr_buf(c_data);
if (rc < 0)
goto s_on_deinit_m_buf;
@@ -1236,16 +1233,16 @@
goto s_on_deinit_nr_buf;
break;
case VC_AND_VP_VCAP_OP:
- spin_lock_irqsave(&dev->dev_slock, flags);
+ mutex_lock(&dev->dev_mutex);
if (dev->vc_resource || dev->vp_resource) {
pr_err("VCAP Err: %s: VC/VP resource taken",
__func__);
- spin_unlock_irqrestore(&dev->dev_slock, flags);
+ mutex_unlock(&dev->dev_mutex);
return -EBUSY;
}
dev->vc_resource = 1;
dev->vp_resource = 1;
- spin_unlock_irqrestore(&dev->dev_slock, flags);
+ mutex_unlock(&dev->dev_mutex);
c_data->dev->vc_client = c_data;
c_data->dev->vp_client = c_data;
@@ -1254,13 +1251,14 @@
goto free_res;
}
- rate = c_data->vc_format.clk_freq;
- rate = clk_round_rate(dev->vcap_clk, rate);
- if (rate <= 0) {
+ rate = c_data->vc_format.clk_freq / 100 * 102;
+ rate_rc = clk_round_rate(dev->vcap_clk, rate);
+ if (rate_rc <= 0) {
pr_err("%s: Failed core rnd_rate\n", __func__);
rc = -EINVAL;
goto free_res;
}
+ rate = (unsigned long)rate_rc;
rc = clk_set_rate(dev->vcap_clk, rate);
if (rc < 0)
goto free_res;
@@ -1304,7 +1302,7 @@
if (rc < 0)
goto free_res;
- if (c_data->vid_vp_action.nr_enabled) {
+ if (c_data->vid_vp_action.nr_param.mode) {
rc = init_nr_buf(c_data);
if (rc < 0)
goto s_on_deinit_m_buf;
@@ -1337,12 +1335,12 @@
return 0;
s_on_deinit_nr_buf:
- if (c_data->vid_vp_action.nr_enabled)
+ if (c_data->vid_vp_action.nr_param.mode)
deinit_nr_buf(c_data);
s_on_deinit_m_buf:
deinit_motion_buf(c_data);
free_res:
- spin_lock_irqsave(&dev->dev_slock, flags);
+ mutex_lock(&dev->dev_mutex);
if (c_data->op_mode == VC_VCAP_OP) {
dev->vc_resource = 0;
c_data->dev->vc_client = NULL;
@@ -1355,7 +1353,7 @@
dev->vc_resource = 0;
dev->vp_resource = 0;
}
- spin_unlock_irqrestore(&dev->dev_slock, flags);
+ mutex_unlock(&dev->dev_mutex);
return rc;
}
@@ -1373,13 +1371,10 @@
return 0;
}
-static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
+int streamoff_work(struct vcap_client_data *c_data)
{
- struct vcap_client_data *c_data = to_client_data(file->private_data);
struct vcap_dev *dev = c_data->dev;
- unsigned long flags;
int rc;
-
switch (c_data->op_mode) {
case VC_VCAP_OP:
if (c_data != dev->vc_client) {
@@ -1387,32 +1382,40 @@
__func__);
return -EBUSY;
}
- spin_lock_irqsave(&dev->dev_slock, flags);
+ mutex_lock(&dev->dev_mutex);
if (!dev->vc_resource) {
pr_err("VCAP Err: %s: VC res not acquired", __func__);
- spin_unlock_irqrestore(&dev->dev_slock, flags);
+ mutex_unlock(&dev->dev_mutex);
return -EBUSY;
}
dev->vc_resource = 0;
- spin_unlock_irqrestore(&dev->dev_slock, flags);
- rc = vb2_streamoff(&c_data->vc_vidq, i);
- if (rc >= 0)
+ mutex_unlock(&dev->dev_mutex);
+ rc = vb2_streamoff(&c_data->vc_vidq,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ if (rc >= 0) {
+ c_data->streaming = 0;
atomic_set(&c_data->dev->vc_enabled, 0);
+ }
return rc;
case VP_VCAP_OP:
+ if (!dev->vp_dummy_complete) {
+ pr_err("VCAP Err: %s: VP dummy read not complete",
+ __func__);
+ return -EINVAL;
+ }
if (c_data != dev->vp_client) {
pr_err("VCAP Err: %s: VP held by other client",
__func__);
return -EBUSY;
}
- spin_lock_irqsave(&dev->dev_slock, flags);
+ mutex_lock(&dev->dev_mutex);
if (!dev->vp_resource) {
pr_err("VCAP Err: %s: VP res not acquired", __func__);
- spin_unlock_irqrestore(&dev->dev_slock, flags);
+ mutex_unlock(&dev->dev_mutex);
return -EBUSY;
}
dev->vp_resource = 0;
- spin_unlock_irqrestore(&dev->dev_slock, flags);
+ mutex_unlock(&dev->dev_mutex);
rc = streamoff_validate_q(&c_data->vp_in_vidq);
if (rc < 0)
return rc;
@@ -1433,26 +1436,31 @@
return rc;
deinit_motion_buf(c_data);
- if (c_data->vid_vp_action.nr_enabled)
+ if (c_data->vid_vp_action.nr_param.mode)
deinit_nr_buf(c_data);
atomic_set(&c_data->dev->vp_enabled, 0);
return rc;
case VC_AND_VP_VCAP_OP:
+ if (!dev->vp_dummy_complete) {
+ pr_err("VCAP Err: %s: VP dummy read not complete",
+ __func__);
+ return -EINVAL;
+ }
if (c_data != dev->vp_client || c_data != dev->vc_client) {
pr_err("VCAP Err: %s: VC/VP held by other client",
__func__);
return -EBUSY;
}
- spin_lock_irqsave(&dev->dev_slock, flags);
+ mutex_lock(&dev->dev_mutex);
if (!(dev->vc_resource || dev->vp_resource)) {
pr_err("VCAP Err: %s: VC or VP res not acquired",
__func__);
- spin_unlock_irqrestore(&dev->dev_slock, flags);
+ mutex_unlock(&dev->dev_mutex);
return -EBUSY;
}
dev->vc_resource = 0;
dev->vp_resource = 0;
- spin_unlock_irqrestore(&dev->dev_slock, flags);
+ mutex_unlock(&dev->dev_mutex);
rc = streamoff_validate_q(&c_data->vc_vidq);
if (rc < 0)
return rc;
@@ -1481,7 +1489,7 @@
return rc;
deinit_motion_buf(c_data);
- if (c_data->vid_vp_action.nr_enabled)
+ if (c_data->vid_vp_action.nr_param.mode)
deinit_nr_buf(c_data);
atomic_set(&c_data->dev->vc_enabled, 0);
atomic_set(&c_data->dev->vp_enabled, 0);
@@ -1490,7 +1498,12 @@
pr_err("VCAP Error: %s: Unknown Operation mode", __func__);
return -ENOTRECOVERABLE;
}
- return 0;
+}
+
+static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+ struct vcap_client_data *c_data = to_client_data(file->private_data);
+ return streamoff_work(c_data);
}
static int vidioc_subscribe_event(struct v4l2_fh *fh,
@@ -1523,6 +1536,54 @@
return v4l2_event_unsubscribe(fh, sub);
}
+static long vidioc_default(struct file *file, void *fh, bool valid_prio,
+ int cmd, void *arg)
+{
+ struct vcap_client_data *c_data = to_client_data(file->private_data);
+ struct nr_param *param;
+ unsigned long flags = 0;
+ int ret;
+
+ switch (cmd) {
+ case VCAPIOC_NR_S_PARAMS:
+
+ if (c_data->streaming != 0 &&
+ (!(!((struct nr_param *) arg)->mode) !=
+ !(!(c_data->vid_vp_action.nr_param.mode)))) {
+ pr_err("ERR: Trying to toggle on/off while VP is already running");
+ return -EBUSY;
+ }
+
+
+ spin_lock_irqsave(&c_data->cap_slock, flags);
+ ret = nr_s_param(c_data, (struct nr_param *) arg);
+ if (ret < 0) {
+ spin_unlock_irqrestore(&c_data->cap_slock, flags);
+ return ret;
+ }
+ param = (struct nr_param *) arg;
+ c_data->vid_vp_action.nr_param = *param;
+ if (param->mode == NR_AUTO)
+ s_default_nr_val(&c_data->vid_vp_action.nr_param);
+ c_data->vid_vp_action.nr_update = true;
+ spin_unlock_irqrestore(&c_data->cap_slock, flags);
+ break;
+ case VCAPIOC_NR_G_PARAMS:
+ *((struct nr_param *)arg) = c_data->vid_vp_action.nr_param;
+ if (c_data->vid_vp_action.nr_param.mode != NR_DISABLE) {
+ if (c_data->streaming)
+ nr_g_param(c_data, (struct nr_param *) arg);
+ else
+ (*(struct nr_param *) arg) =
+ c_data->vid_vp_action.nr_param;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
/* VCAP fops */
static void *vcap_ops_get_userptr(void *alloc_ctx, unsigned long vaddr,
unsigned long size, int write)
@@ -1551,10 +1612,11 @@
struct vcap_dev *dev = video_drvdata(file);
struct vcap_client_data *c_data;
struct vb2_queue *q;
- unsigned long flags;
int ret;
- c_data = kzalloc(sizeof(*c_data), GFP_KERNEL);
if (!dev)
+ return -EINVAL;
+ c_data = kzalloc(sizeof(*c_data), GFP_KERNEL);
+ if (!c_data)
return -ENOMEM;
c_data->dev = dev;
@@ -1608,22 +1670,33 @@
v4l2_fh_init(&c_data->vfh, dev->vfd);
v4l2_fh_add(&c_data->vfh);
- spin_lock_irqsave(&dev->dev_slock, flags);
+ mutex_lock(&dev->dev_mutex);
atomic_inc(&dev->open_clients);
ret = atomic_read(&dev->open_clients);
- spin_unlock_irqrestore(&dev->dev_slock, flags);
if (ret == 1) {
ret = vcap_enable(dev, dev->ddev, 54860000);
if (ret < 0) {
pr_err("Err: %s: Power on vcap failed", __func__);
+ mutex_unlock(&dev->dev_mutex);
+ goto vcap_power_failed;
+ }
+
+ ret = vp_dummy_event(c_data);
+ if (ret < 0) {
+ pr_err("Err: %s: Dummy Event failed", __func__);
+ mutex_unlock(&dev->dev_mutex);
+ vcap_disable(dev);
goto vcap_power_failed;
}
}
+ mutex_unlock(&dev->dev_mutex);
file->private_data = &c_data->vfh;
return 0;
vcap_power_failed:
+ atomic_dec(&dev->open_clients);
+
v4l2_fh_del(&c_data->vfh);
v4l2_fh_exit(&c_data->vfh);
vb2_queue_release(&c_data->vp_out_vidq);
@@ -1640,18 +1713,22 @@
{
struct vcap_dev *dev = video_drvdata(file);
struct vcap_client_data *c_data = to_client_data(file->private_data);
- unsigned long flags;
int ret;
if (c_data == NULL)
return 0;
- spin_lock_irqsave(&dev->dev_slock, flags);
+ if (c_data->streaming)
+ streamoff_work(c_data);
+
+ mutex_lock(&dev->dev_mutex);
atomic_dec(&dev->open_clients);
ret = atomic_read(&dev->open_clients);
- spin_unlock_irqrestore(&dev->dev_slock, flags);
- if (ret == 0)
+ mutex_unlock(&dev->dev_mutex);
+ if (ret == 0) {
vcap_disable(dev);
+ dev->vp_dummy_complete = false;
+ }
v4l2_fh_del(&c_data->vfh);
v4l2_fh_exit(&c_data->vfh);
vb2_queue_release(&c_data->vp_out_vidq);
@@ -1755,6 +1832,7 @@
.vidioc_subscribe_event = vidioc_subscribe_event,
.vidioc_unsubscribe_event = vidioc_unsubscribe_event,
+ .vidioc_default = vidioc_default,
};
static struct video_device vcap_template = {
@@ -1894,7 +1972,8 @@
atomic_set(&dev->vp_enabled, 0);
atomic_set(&dev->open_clients, 0);
dev->ddev = &pdev->dev;
- spin_lock_init(&dev->dev_slock);
+ mutex_init(&dev->dev_mutex);
+ init_waitqueue_head(&dev->vp_dummy_waitq);
vcap_disable(dev);
dprintk(1, "Exit probe succesfully");
diff --git a/drivers/media/video/vcap_vc.h b/drivers/media/video/vcap_vc.h
index 57d13cd..792fb14 100644
--- a/drivers/media/video/vcap_vc.h
+++ b/drivers/media/video/vcap_vc.h
@@ -69,11 +69,6 @@
#define VC_BUFFER_WRITTEN (0x3 << 1)
-struct vc_reg_data {
- unsigned data;
- unsigned addr;
-};
-
int vc_start_capture(struct vcap_client_data *c_data);
int vc_hw_kick_off(struct vcap_client_data *c_data);
void vc_stop_capture(struct vcap_client_data *c_data);
diff --git a/drivers/media/video/vcap_vp.c b/drivers/media/video/vcap_vp.c
index db38902..f1f1c69 100644
--- a/drivers/media/video/vcap_vp.c
+++ b/drivers/media/video/vcap_vp.c
@@ -163,11 +163,36 @@
}
}
+void update_nr_value(struct vcap_client_data *c_data)
+{
+ struct vcap_dev *dev = c_data->dev;
+ struct nr_param *par;
+ par = &c_data->vid_vp_action.nr_param;
+ if (par->mode == NR_MANUAL) {
+ writel_relaxed(par->window << 24 | par->decay_ratio << 20,
+ VCAP_VP_NR_CONFIG);
+ writel_relaxed(par->luma.max_blend_ratio << 24 |
+ par->luma.scale_diff_ratio << 12 |
+ par->luma.diff_limit_ratio << 8 |
+ par->luma.scale_motion_ratio << 4 |
+ par->luma.blend_limit_ratio << 0,
+ VCAP_VP_NR_LUMA_CONFIG);
+ writel_relaxed(par->chroma.max_blend_ratio << 24 |
+ par->chroma.scale_diff_ratio << 12 |
+ par->chroma.diff_limit_ratio << 8 |
+ par->chroma.scale_motion_ratio << 4 |
+ par->chroma.blend_limit_ratio << 0,
+ VCAP_VP_NR_CHROMA_CONFIG);
+ }
+ c_data->vid_vp_action.nr_update = false;
+}
+
static void vp_wq_fnc(struct work_struct *work)
{
struct vp_work_t *vp_work = container_of(work, struct vp_work_t, work);
struct vcap_dev *dev;
struct vp_action *vp_act;
+ unsigned long flags = 0;
uint32_t irq;
int rc;
#ifndef TOP_FIELD_FIX
@@ -180,15 +205,21 @@
return;
vp_act = &dev->vp_client->vid_vp_action;
- irq = vp_work->irq;
rc = readl_relaxed(VCAP_OFFSET(0x048));
while (!(rc & 0x00000100))
rc = readl_relaxed(VCAP_OFFSET(0x048));
+ irq = readl_relaxed(VCAP_VP_INT_STATUS);
+
writel_relaxed(0x00000000, VCAP_VP_BAL_VMOTION_STATE);
writel_relaxed(0x40000000, VCAP_VP_REDUCT_AVG_MOTION2);
+ spin_lock_irqsave(&dev->vp_client->cap_slock, flags);
+ if (vp_act->nr_update == true)
+ update_nr_value(dev->vp_client);
+ spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
+
/* Queue the done buffers */
if (vp_act->vp_state == VP_NORMAL &&
vp_act->bufNR.nr_pos != TM1_BUF) {
@@ -207,7 +238,7 @@
#endif
/* Cycle Buffers*/
- if (vp_work->cd->vid_vp_action.nr_enabled) {
+ if (vp_work->cd->vid_vp_action.nr_param.mode) {
if (vp_act->bufNR.nr_pos == TM1_BUF)
vp_act->bufNR.nr_pos = BUF_NOT_IN_USE;
@@ -260,6 +291,12 @@
int rc;
irq = readl_relaxed(VCAP_VP_INT_STATUS);
+ if (dev->vp_dummy_event == true) {
+ writel_relaxed(irq, VCAP_VP_INT_CLEAR);
+ dev->vp_dummy_complete = true;
+ wake_up(&dev->vp_dummy_waitq);
+ return IRQ_HANDLED;
+ }
if (irq & 0x02000000) {
v4l2_evt.type = V4L2_EVENT_PRIVATE_START +
@@ -283,7 +320,7 @@
}
dprintk(1, "%s: irq=0x%08x\n", __func__, irq);
- if (!(irq & VP_PIC_DONE)) {
+ if (!(irq & (VP_PIC_DONE || VP_MODE_CHANGE))) {
writel_relaxed(irq, VCAP_VP_INT_CLEAR);
pr_err("VP IRQ shows some error\n");
return IRQ_HANDLED;
@@ -307,7 +344,6 @@
INIT_WORK(&dev->vp_work.work, vp_wq_fnc);
dev->vp_work.cd = c_data;
- dev->vp_work.irq = irq;
rc = queue_work(dev->vcap_wq, &dev->vp_work.work);
disable_irq_nosync(dev->vpirq->start);
@@ -411,7 +447,7 @@
void *buf;
if (!c_data->vid_vp_action.bufMotion) {
- dprintk(1, "Motion buffer has not been created");
+ pr_err("Motion buffer has not been created");
return;
}
@@ -434,6 +470,8 @@
return -ENOEXEC;
}
buf = &c_data->vid_vp_action.bufNR;
+ if (!buf)
+ return -ENOMEM;
frame_size = c_data->vp_in_fmt.width * c_data->vp_in_fmt.height;
if (c_data->vp_in_fmt.pixfmt == V4L2_PIX_FMT_NV16)
@@ -442,9 +480,11 @@
tot_size = frame_size / 2 * 3;
buf->vaddr = kzalloc(tot_size, GFP_KERNEL);
- if (!buf)
+ if (!buf->vaddr)
return -ENOMEM;
+ update_nr_value(c_data);
+
buf->paddr = virt_to_phys(buf->vaddr);
rc = readl_relaxed(VCAP_VP_NR_CONFIG2);
rc |= 0x02D00001;
@@ -478,6 +518,144 @@
return;
}
+int nr_s_param(struct vcap_client_data *c_data, struct nr_param *param)
+{
+ if (param->mode != NR_MANUAL)
+ return 0;
+
+ /* Verify values in range */
+ if (param->window < VP_NR_MAX_WINDOW)
+ return -EINVAL;
+ if (param->luma.max_blend_ratio < VP_NR_MAX_RATIO)
+ return -EINVAL;
+ if (param->luma.scale_diff_ratio < VP_NR_MAX_RATIO)
+ return -EINVAL;
+ if (param->luma.diff_limit_ratio < VP_NR_MAX_RATIO)
+ return -EINVAL;
+ if (param->luma.scale_motion_ratio < VP_NR_MAX_RATIO)
+ return -EINVAL;
+ if (param->luma.blend_limit_ratio < VP_NR_MAX_RATIO)
+ return -EINVAL;
+ if (param->chroma.max_blend_ratio < VP_NR_MAX_RATIO)
+ return -EINVAL;
+ if (param->chroma.scale_diff_ratio < VP_NR_MAX_RATIO)
+ return -EINVAL;
+ if (param->chroma.diff_limit_ratio < VP_NR_MAX_RATIO)
+ return -EINVAL;
+ if (param->chroma.scale_motion_ratio < VP_NR_MAX_RATIO)
+ return -EINVAL;
+ if (param->chroma.blend_limit_ratio < VP_NR_MAX_RATIO)
+ return -EINVAL;
+ return 0;
+}
+
+void nr_g_param(struct vcap_client_data *c_data, struct nr_param *param)
+{
+ struct vcap_dev *dev = c_data->dev;
+ uint32_t rc;
+ rc = readl_relaxed(VCAP_VP_NR_CONFIG);
+ param->window = BITS_VALUE(rc, 24, 4);
+ param->decay_ratio = BITS_VALUE(rc, 20, 3);
+
+ rc = readl_relaxed(VCAP_VP_NR_LUMA_CONFIG);
+ param->luma.max_blend_ratio = BITS_VALUE(rc, 24, 4);
+ param->luma.scale_diff_ratio = BITS_VALUE(rc, 12, 4);
+ param->luma.diff_limit_ratio = BITS_VALUE(rc, 8, 4);
+ param->luma.scale_motion_ratio = BITS_VALUE(rc, 4, 4);
+ param->luma.blend_limit_ratio = BITS_VALUE(rc, 0, 4);
+
+ rc = readl_relaxed(VCAP_VP_NR_CHROMA_CONFIG);
+ param->chroma.max_blend_ratio = BITS_VALUE(rc, 24, 4);
+ param->chroma.scale_diff_ratio = BITS_VALUE(rc, 12, 4);
+ param->chroma.diff_limit_ratio = BITS_VALUE(rc, 8, 4);
+ param->chroma.scale_motion_ratio = BITS_VALUE(rc, 4, 4);
+ param->chroma.blend_limit_ratio = BITS_VALUE(rc, 0, 4);
+}
+
+void s_default_nr_val(struct nr_param *param)
+{
+ param->window = 10;
+ param->decay_ratio = 0;
+ param->luma.max_blend_ratio = 0;
+ param->luma.scale_diff_ratio = 4;
+ param->luma.diff_limit_ratio = 1;
+ param->luma.scale_motion_ratio = 4;
+ param->luma.blend_limit_ratio = 9;
+ param->chroma.max_blend_ratio = 0;
+ param->chroma.scale_diff_ratio = 4;
+ param->chroma.diff_limit_ratio = 1;
+ param->chroma.scale_motion_ratio = 4;
+ param->chroma.blend_limit_ratio = 9;
+}
+
+int vp_dummy_event(struct vcap_client_data *c_data)
+{
+ struct vcap_dev *dev = c_data->dev;
+ unsigned int width, height;
+ unsigned long paddr;
+ void *temp;
+ uint32_t reg;
+ int rc = 0;
+
+ dprintk(2, "%s: Start VP dummy event\n", __func__);
+ temp = kzalloc(0x1200, GFP_KERNEL);
+ if (!temp) {
+ pr_err("%s: Failed to alloc mem", __func__);
+ return -ENOMEM;
+ }
+ paddr = virt_to_phys(temp);
+
+ width = c_data->vp_out_fmt.width;
+ height = c_data->vp_out_fmt.height;
+
+ c_data->vp_out_fmt.width = 0x3F;
+ c_data->vp_out_fmt.height = 0x16;
+
+ config_vp_format(c_data);
+ writel_relaxed(paddr, VCAP_VP_T1_Y_BASE_ADDR);
+ writel_relaxed(paddr + 0x2C0, VCAP_VP_T1_C_BASE_ADDR);
+ writel_relaxed(paddr + 0x440, VCAP_VP_T2_Y_BASE_ADDR);
+ writel_relaxed(paddr + 0x700, VCAP_VP_T2_C_BASE_ADDR);
+ writel_relaxed(paddr + 0x880, VCAP_VP_OUT_Y_BASE_ADDR);
+ writel_relaxed(paddr + 0xB40, VCAP_VP_OUT_C_BASE_ADDR);
+ writel_iowmb(paddr + 0x1100, VCAP_VP_MOTION_EST_ADDR);
+ writel_relaxed(4 << 20 | 0x2 << 4, VCAP_VP_IN_CONFIG);
+ writel_relaxed(4 << 20 | 0x1 << 4, VCAP_VP_OUT_CONFIG);
+
+ dev->vp_dummy_event = true;
+
+ writel_relaxed(0x01100101, VCAP_VP_INTERRUPT_ENABLE);
+ writel_iowmb(0x00000000, VCAP_VP_CTRL);
+ writel_iowmb(0x00030000, VCAP_VP_CTRL);
+
+ enable_irq(dev->vpirq->start);
+ rc = wait_event_interruptible_timeout(dev->vp_dummy_waitq,
+ dev->vp_dummy_complete, msecs_to_jiffies(50));
+ if (!rc && !dev->vp_dummy_complete) {
+ pr_err("%s: VP dummy event timeout", __func__);
+ rc = -ETIME;
+ writel_iowmb(0x00000000, VCAP_VP_CTRL);
+
+ writel_iowmb(0x00000001, VCAP_VP_SW_RESET);
+ writel_iowmb(0x00000000, VCAP_VP_SW_RESET);
+ dev->vp_dummy_complete = false;
+ }
+
+ writel_relaxed(0x00000000, VCAP_VP_INTERRUPT_ENABLE);
+ disable_irq(dev->vpirq->start);
+ dev->vp_dummy_event = false;
+
+ reg = readl_relaxed(VCAP_OFFSET(0x0D94));
+ writel_relaxed(reg, VCAP_OFFSET(0x0D9C));
+
+ c_data->vp_out_fmt.width = width;
+ c_data->vp_out_fmt.height = height;
+ kfree(temp);
+
+ dprintk(2, "%s: Exit VP dummy event\n", __func__);
+ return rc;
+}
+
int kickoff_vp(struct vcap_client_data *c_data)
{
struct vcap_dev *dev;
@@ -556,7 +734,7 @@
if (c_data->vp_out_fmt.pixfmt == V4L2_PIX_FMT_NV16)
chroma_fmt = 1;
- writel_relaxed((c_data->vp_in_fmt.width / 16) << 20 |
+ writel_relaxed((c_data->vp_out_fmt.width / 16) << 20 |
chroma_fmt << 11 | 0x1 << 4, VCAP_VP_OUT_CONFIG);
/* Enable Interrupt */
diff --git a/drivers/media/video/vcap_vp.h b/drivers/media/video/vcap_vp.h
index 47ad8d4..b2b00e9 100644
--- a/drivers/media/video/vcap_vp.h
+++ b/drivers/media/video/vcap_vp.h
@@ -89,6 +89,16 @@
#define VCAP_VP_NR_T2_C_BASE_ADDR (VCAP_BASE + 0x4B8)
#define VP_PIC_DONE (0x1 << 0)
+#define VP_MODE_CHANGE (0x1 << 8)
+
+#define VP_NR_MAX_WINDOW 120
+#define VP_NR_MAX_RATIO 16
+
+#define BITS_MASK(start, num_of_bits) \
+ (((1 << (num_of_bits)) - 1) << (start))
+
+#define BITS_VALUE(x, start, num_of_bits) \
+ (((x) & BITS_MASK(start, num_of_bits)) >> (start))
irqreturn_t vp_handler(struct vcap_dev *dev);
int config_vp_format(struct vcap_client_data *c_data);
@@ -97,7 +107,11 @@
void deinit_motion_buf(struct vcap_client_data *c_data);
int init_nr_buf(struct vcap_client_data *c_data);
void deinit_nr_buf(struct vcap_client_data *c_data);
+int nr_s_param(struct vcap_client_data *c_data, struct nr_param *param);
+void nr_g_param(struct vcap_client_data *c_data, struct nr_param *param);
+void s_default_nr_val(struct nr_param *param);
int kickoff_vp(struct vcap_client_data *c_data);
int continue_vp(struct vcap_client_data *c_data);
+int vp_dummy_event(struct vcap_client_data *c_data);
#endif
diff --git a/drivers/mfd/pm8821-irq.c b/drivers/mfd/pm8821-irq.c
index 2dcc792..ff68c08 100644
--- a/drivers/mfd/pm8821-irq.c
+++ b/drivers/mfd/pm8821-irq.c
@@ -22,6 +22,7 @@
#include <linux/mfd/pm8xxx/pm8821-irq.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <mach/mpm.h>
#define PM8821_TOTAL_IRQ_MASTERS 2
#define PM8821_BLOCKS_PER_MASTER 7
@@ -31,7 +32,7 @@
#define PM8821_IRQ_MASK_REG_OFFSET 0x08
#define SSBI_REG_ADDR_IRQ_MASTER0 0x30
#define SSBI_REG_ADDR_IRQ_MASTER1 0xB0
-
+#define MPM_PIN_FOR_8821_IRQ 7
#define SSBI_REG_ADDR_IRQ_IT_STATUS(master_base, block) (master_base + block)
/*
@@ -410,8 +411,12 @@
devirq, rc);
kfree(chip);
return ERR_PTR(rc);
- } else
+ } else{
irq_set_irq_wake(devirq, 1);
+ msm_mpm_set_pin_wake(MPM_PIN_FOR_8821_IRQ, 1);
+ msm_mpm_set_pin_type(MPM_PIN_FOR_8821_IRQ,
+ pdata->irq_trigger_flag);
+ }
}
return chip;
diff --git a/drivers/mfd/pm8xxx-spk.c b/drivers/mfd/pm8xxx-spk.c
index 297ddfa..2de70f4 100644
--- a/drivers/mfd/pm8xxx-spk.c
+++ b/drivers/mfd/pm8xxx-spk.c
@@ -162,7 +162,10 @@
val = pm8xxx_spk_read(PM8XXX_SPK_CTL1_REG_OFF);
if (val < 0)
return val;
- val |= (enable << 3);
+ if (enable)
+ val |= (1 << 3);
+ else
+ val &= ~(1 << 3);
ret = pm8xxx_spk_write(PM8XXX_SPK_CTL1_REG_OFF, val);
if (!ret)
ret = pm8xxx_spk_bank_write(addr, 6, PWM_EN_MASK);
diff --git a/drivers/mfd/wcd9xxx-core.c b/drivers/mfd/wcd9xxx-core.c
index 2256f67..90673fc 100644
--- a/drivers/mfd/wcd9xxx-core.c
+++ b/drivers/mfd/wcd9xxx-core.c
@@ -686,7 +686,7 @@
const struct i2c_device_id *id)
{
struct wcd9xxx *wcd9xxx;
- struct wcd9xxx_pdata *pdata = client->dev.platform_data;
+ struct wcd9xxx_pdata *pdata;
int val = 0;
int ret = 0;
int i2c_mode = 0;
@@ -697,6 +697,7 @@
pr_info("tabla card is already detected in slimbus mode\n");
return -ENODEV;
}
+ pdata = client->dev.platform_data;
if (device_id > 0) {
wcd9xxx_modules[device_id++].client = client;
pr_info("probe for other slaves devices of tabla\n");
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index d4eb6e0..8354aa8 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -609,10 +609,24 @@
return -EINVAL;
}
- if (resp.result == QSEOS_RESULT_FAILURE)
- return 0;
- else
- return resp.data;
+ if (resp.result == QSEOS_RESULT_FAILURE) {
+ return 0;
+ } else {
+ switch (resp.resp_type) {
+ /*qsee returned listener type response */
+ case QSEOS_LISTENER_ID:
+ pr_err("resp type is of listener type instead of app");
+ return -EINVAL;
+ break;
+ case QSEOS_APP_ID:
+ return resp.data;
+ default:
+ pr_err("invalid resp type (%d) from qsee",
+ resp.resp_type);
+ return -ENODEV;
+ break;
+ }
+ }
}
static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
@@ -627,6 +641,8 @@
uint32_t len;
struct qseecom_command_scm_resp resp;
struct qseecom_check_app_ireq req;
+ struct qseecom_load_app_ireq load_req;
+
/* Copy the relevant information needed for loading the image */
if (__copy_from_user(&load_img_req,
(void __user *)argp,
@@ -642,108 +658,86 @@
req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
memcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
- ret = __qseecom_check_app_exists(req);
- if (ret < 0)
- return ret;
- else
- app_id = ret;
-
- if (app_id) {
- pr_warn("App id %d (%s) already exists\n", app_id,
+ pr_warn("App (%s) does not exist, loading apps for first time\n",
(char *)(req.app_name));
- spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
- list_for_each_entry(entry,
- &qseecom.registered_app_list_head, list){
- if (entry->app_id == app_id) {
- entry->ref_cnt++;
- break;
- }
- }
- spin_unlock_irqrestore(
- &qseecom.registered_app_list_lock, flags);
- } else {
- struct qseecom_load_app_ireq load_req;
-
- pr_warn("App (%s) does not exist, loading apps for first time\n",
- (char *)(req.app_name));
- /* Get the handle of the shared fd */
- ihandle = ion_import_dma_buf(qseecom.ion_clnt,
+ /* Get the handle of the shared fd */
+ ihandle = ion_import_dma_buf(qseecom.ion_clnt,
load_img_req.ifd_data_fd);
- if (IS_ERR_OR_NULL(ihandle)) {
- pr_err("Ion client could not retrieve the handle\n");
- qsee_disable_clock_vote(CLK_SFPB);
- return -ENOMEM;
- }
+ if (IS_ERR_OR_NULL(ihandle)) {
+ pr_err("Ion client could not retrieve the handle\n");
+ qsee_disable_clock_vote(CLK_SFPB);
+ return -ENOMEM;
+ }
- /* Get the physical address of the ION BUF */
- ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
+ /* Get the physical address of the ION BUF */
+ ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
- /* Populate the structure for sending scm call to load image */
- load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
- load_req.mdt_len = load_img_req.mdt_len;
- load_req.img_len = load_img_req.img_len;
- load_req.phy_addr = pa;
+ /* Populate the structure for sending scm call to load image */
+ load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
+ load_req.mdt_len = load_img_req.mdt_len;
+ load_req.img_len = load_img_req.img_len;
+ load_req.phy_addr = pa;
- /* SCM_CALL to load the app and get the app_id back */
- ret = scm_call(SCM_SVC_TZSCHEDULER, 1, &load_req,
- sizeof(struct qseecom_load_app_ireq),
- &resp, sizeof(resp));
- if (ret) {
- pr_err("scm_call to load app failed\n");
- return -EINVAL;
- }
+ /* SCM_CALL to load the app and get the app_id back */
+ ret = scm_call(SCM_SVC_TZSCHEDULER, 1, &load_req,
+ sizeof(struct qseecom_load_app_ireq),
+ &resp, sizeof(resp));
+ if (ret) {
+ pr_err("scm_call to load app failed\n");
+ return -EINVAL;
+ }
- if (resp.result == QSEOS_RESULT_FAILURE) {
- pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
- if (!IS_ERR_OR_NULL(ihandle))
- ion_free(qseecom.ion_clnt, ihandle);
- qsee_disable_clock_vote(CLK_SFPB);
- return -EFAULT;
- }
-
- if (resp.result == QSEOS_RESULT_INCOMPLETE) {
- ret = __qseecom_process_incomplete_cmd(data, &resp);
- if (ret) {
- pr_err("process_incomplete_cmd failed err: %d\n",
- ret);
- if (!IS_ERR_OR_NULL(ihandle))
- ion_free(qseecom.ion_clnt, ihandle);
- qsee_disable_clock_vote(CLK_SFPB);
- return ret;
- }
- }
- if (resp.result != QSEOS_RESULT_SUCCESS) {
- pr_err("scm_call failed resp.result unknown, %d\n",
- resp.result);
- if (!IS_ERR_OR_NULL(ihandle))
- ion_free(qseecom.ion_clnt, ihandle);
- qsee_disable_clock_vote(CLK_SFPB);
- return -EFAULT;
- }
-
- app_id = resp.data;
-
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry) {
- pr_err("kmalloc failed\n");
- qsee_disable_clock_vote(CLK_SFPB);
- return -ENOMEM;
- }
- entry->app_id = app_id;
- entry->ref_cnt = 1;
-
- /* Deallocate the handle */
+ if (resp.result == QSEOS_RESULT_FAILURE) {
+ pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
if (!IS_ERR_OR_NULL(ihandle))
ion_free(qseecom.ion_clnt, ihandle);
-
- spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
- list_add_tail(&entry->list, &qseecom.registered_app_list_head);
- spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
- flags);
-
- pr_warn("App with id %d (%s) now loaded\n", app_id,
- (char *)(req.app_name));
+ qsee_disable_clock_vote(CLK_SFPB);
+ return -EFAULT;
}
+
+ if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+ ret = __qseecom_process_incomplete_cmd(data, &resp);
+ if (ret) {
+ pr_err("process_incomplete_cmd failed err: %d\n",
+ ret);
+ if (!IS_ERR_OR_NULL(ihandle))
+ ion_free(qseecom.ion_clnt, ihandle);
+ qsee_disable_clock_vote(CLK_SFPB);
+ return ret;
+ }
+ }
+
+ if (resp.result != QSEOS_RESULT_SUCCESS) {
+ pr_err("scm_call failed resp.result unknown, %d\n",
+ resp.result);
+ if (!IS_ERR_OR_NULL(ihandle))
+ ion_free(qseecom.ion_clnt, ihandle);
+ qsee_disable_clock_vote(CLK_SFPB);
+ return -EFAULT;
+ }
+
+ app_id = resp.data;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ pr_err("kmalloc failed\n");
+ qsee_disable_clock_vote(CLK_SFPB);
+ return -ENOMEM;
+ }
+ entry->app_id = app_id;
+ entry->ref_cnt = 1;
+
+ /* Deallocate the handle */
+ if (!IS_ERR_OR_NULL(ihandle))
+ ion_free(qseecom.ion_clnt, ihandle);
+
+ spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+ list_add_tail(&entry->list, &qseecom.registered_app_list_head);
+ spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
+
+ pr_warn("App with id %d (%s) now loaded\n", app_id,
+ (char *)(req.app_name));
+
data->client.app_id = app_id;
load_img_req.app_id = app_id;
if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
@@ -1432,6 +1426,8 @@
int32_t ret;
struct qseecom_qseos_app_load_query query_req;
struct qseecom_check_app_ireq req;
+ struct qseecom_registered_app_list *entry = NULL;
+ unsigned long flags = 0;
/* Copy the relevant information needed for loading the image */
if (__copy_from_user(&query_req,
@@ -1445,11 +1441,30 @@
memcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
ret = __qseecom_check_app_exists(req);
- if (ret == -EINVAL) {
+
+ if ((ret == -EINVAL) || (ret == -ENODEV)) {
pr_err(" scm call to check if app is loaded failed");
return ret; /* scm call failed */
} else if (ret > 0) {
- pr_err("app is already loaded in QSEE");
+ pr_warn("App id %d (%s) already exists\n", ret,
+ (char *)(req.app_name));
+ spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+ list_for_each_entry(entry,
+ &qseecom.registered_app_list_head, list){
+ if (entry->app_id == ret) {
+ entry->ref_cnt++;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(
+ &qseecom.registered_app_list_lock, flags);
+ data->client.app_id = ret;
+ query_req.app_id = ret;
+
+ if (copy_to_user(argp, &query_req, sizeof(query_req))) {
+ pr_err("copy_to_user failed\n");
+ return -EFAULT;
+ }
return -EEXIST; /* app already loaded */
} else {
return 0; /* app not loaded */
diff --git a/drivers/misc/tsif.c b/drivers/misc/tsif.c
index aeda38c..7e59c98 100644
--- a/drivers/misc/tsif.c
+++ b/drivers/misc/tsif.c
@@ -169,6 +169,7 @@
dma_addr_t dmov_cmd_dma[2];
struct tsif_xfer xfer[2];
struct tasklet_struct dma_refill;
+ struct tasklet_struct clocks_off;
/* statistics */
u32 stat_rx;
u32 stat_overflow;
@@ -251,18 +252,24 @@
{
if (on) {
if (tsif_device->tsif_clk)
- clk_enable(tsif_device->tsif_clk);
+ clk_prepare_enable(tsif_device->tsif_clk);
if (tsif_device->tsif_pclk)
- clk_enable(tsif_device->tsif_pclk);
- clk_enable(tsif_device->tsif_ref_clk);
+ clk_prepare_enable(tsif_device->tsif_pclk);
+ clk_prepare_enable(tsif_device->tsif_ref_clk);
} else {
if (tsif_device->tsif_clk)
- clk_disable(tsif_device->tsif_clk);
+ clk_disable_unprepare(tsif_device->tsif_clk);
if (tsif_device->tsif_pclk)
- clk_disable(tsif_device->tsif_pclk);
- clk_disable(tsif_device->tsif_ref_clk);
+ clk_disable_unprepare(tsif_device->tsif_pclk);
+ clk_disable_unprepare(tsif_device->tsif_ref_clk);
}
}
+
+static void tsif_clocks_off(unsigned long data)
+{
+ struct msm_tsif_device *tsif_device = (struct msm_tsif_device *) data;
+ tsif_clock(tsif_device, 0);
+}
/* ===clocks end=== */
/* ===gpio begin=== */
@@ -605,17 +612,15 @@
if (tsif_device->state == tsif_state_running) {
tsif_stop_hw(tsif_device);
/*
- * Clocks _may_ be stopped right from IRQ
- * context. This is far from optimal w.r.t
- * latency.
- *
- * But, this branch taken only in case of
+ * This branch is taken only in case of
* severe hardware problem (I don't even know
- * what should happens for DMOV_RSLT_ERROR);
+ * what should happen for DMOV_RSLT_ERROR);
* thus I prefer code simplicity over
* performance.
+ * Clocks are turned off from outside the
+ * interrupt context.
*/
- tsif_clock(tsif_device, 0);
+ tasklet_schedule(&tsif_device->clocks_off);
tsif_device->state = tsif_state_flushing;
}
}
@@ -1313,6 +1318,8 @@
tsif_device->chunks_per_buf = TSIF_CHUNKS_IN_BUF_DEFAULT;
tasklet_init(&tsif_device->dma_refill, tsif_dma_refill,
(unsigned long)tsif_device);
+ tasklet_init(&tsif_device->clocks_off, tsif_clocks_off,
+ (unsigned long)tsif_device);
if (tsif_get_clocks(tsif_device))
goto err_clocks;
/* map I/O memory */
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index a496df0..c785a7e 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1231,6 +1231,7 @@
brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
brq->data.blocks = blk_rq_sectors(req);
+ brq->data.fault_injected = false;
/*
* The block layer doesn't support all sector count
* restrictions, so we need to be prepared for too big
@@ -1664,6 +1665,7 @@
brq->data.blksz = 512;
brq->data.blocks = mqrq->packed_blocks + 1;
brq->data.flags |= MMC_DATA_WRITE;
+ brq->data.fault_injected = false;
brq->stop.opcode = MMC_STOP_TRANSMISSION;
brq->stop.arg = 0;
@@ -1708,11 +1710,12 @@
*/
if (mmc_card_sd(card)) {
u32 blocks;
-
- blocks = mmc_sd_num_wr_blocks(card);
- if (blocks != (u32)-1) {
- ret = blk_end_request(req, 0, blocks << 9);
- }
+ if (!brq->data.fault_injected) {
+ blocks = mmc_sd_num_wr_blocks(card);
+ if (blocks != (u32)-1)
+ ret = blk_end_request(req, 0, blocks << 9);
+ } else
+ ret = blk_end_request(req, 0, brq->data.bytes_xfered);
} else {
if (mq_rq->packed_cmd == MMC_PACKED_NONE) {
ret = blk_end_request(req, 0, brq->data.bytes_xfered);
diff --git a/drivers/mmc/card/mmc_block_test.c b/drivers/mmc/card/mmc_block_test.c
index 3d4b477..871675c 100644
--- a/drivers/mmc/card/mmc_block_test.c
+++ b/drivers/mmc/card/mmc_block_test.c
@@ -33,11 +33,16 @@
#define PACKED_HDR_RW_MASK 0x0000FF00
#define PACKED_HDR_NUM_REQS_MASK 0x00FF0000
#define PACKED_HDR_BITS_16_TO_29_SET 0x3FFF0000
+#define SECTOR_SIZE 512
+#define NUM_OF_SECTORS_PER_BIO ((BIO_U32_SIZE * 4) / SECTOR_SIZE)
+#define BIO_TO_SECTOR(x) (x * NUM_OF_SECTORS_PER_BIO)
#define test_pr_debug(fmt, args...) pr_debug("%s: "fmt"\n", MODULE_NAME, args)
#define test_pr_info(fmt, args...) pr_info("%s: "fmt"\n", MODULE_NAME, args)
#define test_pr_err(fmt, args...) pr_err("%s: "fmt"\n", MODULE_NAME, args)
+#define SANITIZE_TEST_TIMEOUT 240000
+
enum is_random {
NON_RANDOM_TEST,
RANDOM_TEST,
@@ -102,6 +107,8 @@
TEST_PACK_MIX_PACKED_NO_PACKED_PACKED,
TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED,
PACKING_CONTROL_MAX_TESTCASE = TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED,
+
+ TEST_WRITE_DISCARD_SANITIZE_READ,
};
enum mmc_block_test_group {
@@ -119,6 +126,7 @@
struct dentry *send_invalid_packed_test;
struct dentry *random_test_seed;
struct dentry *packing_control_test;
+ struct dentry *discard_sanitize_test;
};
struct mmc_block_test_data {
@@ -497,6 +505,8 @@
return "\nTest packing control - mix: pack -> no pack -> pack";
case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
return "\nTest packing control - mix: no pack->pack->no pack";
+ case TEST_WRITE_DISCARD_SANITIZE_READ:
+ return "\nTest write, discard, sanitize";
default:
return "Unknown testcase";
}
@@ -1383,6 +1393,63 @@
return 0;
}
+static void pseudo_rnd_sector_and_size(unsigned int *seed,
+ unsigned int min_start_sector,
+ unsigned int *start_sector,
+ unsigned int *num_of_bios)
+{
+ unsigned int max_sec = min_start_sector + TEST_MAX_SECTOR_RANGE;
+ do {
+ *start_sector = pseudo_random_seed(seed,
+ 1, max_sec);
+ *num_of_bios = pseudo_random_seed(seed,
+ 1, TEST_MAX_BIOS_PER_REQ);
+ if (!(*num_of_bios))
+ *num_of_bios = 1;
+ } while ((*start_sector < min_start_sector) ||
+ (*start_sector + (*num_of_bios * BIO_U32_SIZE * 4)) > max_sec);
+}
+
+/* sanitize test functions */
+static int prepare_write_discard_sanitize_read(struct test_data *td)
+{
+ unsigned int start_sector;
+ unsigned int num_of_bios = 0;
+ static unsigned int total_bios;
+ unsigned int *num_bios_seed;
+ int i = 0;
+
+ if (mbtd->random_test_seed == 0) {
+ mbtd->random_test_seed =
+ (unsigned int)(get_jiffies_64() & 0xFFFF);
+ test_pr_info("%s: got seed from jiffies %d",
+ __func__, mbtd->random_test_seed);
+ }
+ num_bios_seed = &mbtd->random_test_seed;
+
+ do {
+ pseudo_rnd_sector_and_size(num_bios_seed, td->start_sector,
+ &start_sector, &num_of_bios);
+
+ /* DISCARD */
+ total_bios += num_of_bios;
+ test_pr_info("%s: discard req: id=%d, startSec=%d, NumBios=%d",
+ __func__, td->unique_next_req_id, start_sector,
+ num_of_bios);
+ test_iosched_add_unique_test_req(0, REQ_UNIQUE_DISCARD,
+ start_sector, BIO_TO_SECTOR(num_of_bios),
+ NULL);
+
+ } while (++i < (BLKDEV_MAX_RQ-10));
+
+ test_pr_info("%s: total discard bios = %d", __func__, total_bios);
+
+ test_pr_info("%s: add sanitize req", __func__);
+ test_iosched_add_unique_test_req(0, REQ_UNIQUE_SANITIZE, 0, 0, NULL);
+
+ return 0;
+}
+
static bool message_repeat;
static int test_open(struct inode *inode, struct file *file)
{
@@ -1810,6 +1877,49 @@
.read = write_packing_control_test_read,
};
+static ssize_t write_discard_sanitize_test_write(struct file *file,
+ const char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int ret = 0;
+ int i = 0;
+ int number = -1;
+
+ sscanf(buf, "%d", &number);
+ if (number <= 0)
+ number = 1;
+
+ test_pr_info("%s: -- write_discard_sanitize TEST --\n", __func__);
+
+ memset(&mbtd->test_info, 0, sizeof(struct test_info));
+
+ mbtd->test_group = TEST_GENERAL_GROUP;
+
+ mbtd->test_info.data = mbtd;
+ mbtd->test_info.prepare_test_fn = prepare_write_discard_sanitize_read;
+ mbtd->test_info.get_test_case_str_fn = get_test_case_str;
+ mbtd->test_info.timeout_msec = SANITIZE_TEST_TIMEOUT;
+
+ for (i = 0 ; i < number ; ++i) {
+ test_pr_info("%s: Cycle # %d / %d\n", __func__, i+1, number);
+ test_pr_info("%s: ===================", __func__);
+
+ mbtd->test_info.testcase = TEST_WRITE_DISCARD_SANITIZE_READ;
+ ret = test_iosched_start_test(&mbtd->test_info);
+
+ if (ret)
+ break;
+ }
+
+ return count;
+}
+
+const struct file_operations write_discard_sanitize_test_ops = {
+ .open = test_open,
+ .write = write_discard_sanitize_test_write,
+};
+
static void mmc_block_test_debugfs_cleanup(void)
{
debugfs_remove(mbtd->debug.random_test_seed);
@@ -1817,6 +1927,7 @@
debugfs_remove(mbtd->debug.err_check_test);
debugfs_remove(mbtd->debug.send_invalid_packed_test);
debugfs_remove(mbtd->debug.packing_control_test);
+ debugfs_remove(mbtd->debug.discard_sanitize_test);
}
static int mmc_block_test_debugfs_init(void)
@@ -1878,6 +1989,17 @@
if (!mbtd->debug.packing_control_test)
goto err_nomem;
+ mbtd->debug.discard_sanitize_test =
+ debugfs_create_file("write_discard_sanitize_test",
+ S_IRUGO | S_IWUGO,
+ tests_root,
+ NULL,
+ &write_discard_sanitize_test_ops);
+ if (!mbtd->debug.discard_sanitize_test) {
+ mmc_block_test_debugfs_cleanup();
+ return -ENOMEM;
+ }
+
return 0;
err_nomem:
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 2479fcf..d13b914 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -119,6 +119,7 @@
data->error = data_errors[random32() % ARRAY_SIZE(data_errors)];
data->bytes_xfered = (random32() % (data->bytes_xfered >> 9)) << 9;
+ data->fault_injected = true;
}
#else /* CONFIG_FAIL_MMC_REQUEST */
@@ -369,7 +370,7 @@
struct mmc_command *cmd;
while (1) {
- wait_for_completion(&mrq->completion);
+ wait_for_completion_io(&mrq->completion);
cmd = mrq->cmd;
if (!cmd->error || !cmd->retries ||
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index d1d8358..b22e2f0 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -20,6 +20,7 @@
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/of.h>
+#include <linux/of_gpio.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -399,6 +400,54 @@
host->dummy_52_needed = 0;
}
+static void msmsdcc_reset_dpsm(struct msmsdcc_host *host)
+{
+ struct mmc_request *mrq = host->curr.mrq;
+
+ if (!mrq || !mrq->cmd || (!mrq->data && !host->pending_dpsm_reset))
+ goto out;
+
+ /*
+ * For CMD24, if auto prog done is not supported defer
+ * dpsm reset until prog done is received. Otherwise,
+ * we poll here unnecessarily as TXACTIVE will not be
+ * deasserted until DAT0 goes high.
+ */
+ if ((mrq->cmd->opcode == MMC_WRITE_BLOCK) && !is_auto_prog_done(host)) {
+ host->pending_dpsm_reset = true;
+ goto out;
+ }
+
+ /* Make sure h/w (TX/RX) is inactive before resetting DPSM */
+ if (is_wait_for_tx_rx_active(host)) {
+ ktime_t start = ktime_get();
+
+ while (readl_relaxed(host->base + MMCISTATUS) &
+ (MCI_TXACTIVE | MCI_RXACTIVE)) {
+ /*
+ * TX/RX active bits may be asserted for 4HCLK + 4MCLK
+ * cycles (~11us) after data transfer due to clock mux
+ * switching delays. Let's poll for 1ms and panic if
+ * still active.
+ */
+ if (ktime_to_us(ktime_sub(ktime_get(), start)) > 1000) {
+ pr_err("%s: %s still active\n",
+ mmc_hostname(host->mmc),
+ readl_relaxed(host->base + MMCISTATUS)
+ & MCI_TXACTIVE ? "TX" : "RX");
+ msmsdcc_dump_sdcc_state(host);
+ BUG();
+ }
+ }
+ }
+
+ writel_relaxed(0, host->base + MMCIDATACTRL);
+ msmsdcc_sync_reg_wr(host); /* Allow the DPSM to be reset */
+ host->pending_dpsm_reset = false;
+out:
+ return;
+}
+
static int
msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq)
{
@@ -413,6 +462,8 @@
if (mrq->cmd->error == -ETIMEDOUT)
mdelay(5);
+ msmsdcc_reset_dpsm(host);
+
/* Clear current request information as current request has ended */
memset(&host->curr, 0, sizeof(struct msmsdcc_curr_req));
@@ -434,8 +485,6 @@
host->curr.got_dataend = 0;
host->curr.wait_for_auto_prog_done = false;
host->curr.got_auto_prog_done = false;
- writel_relaxed(0, host->base + MMCIDATACTRL);
- msmsdcc_sync_reg_wr(host); /* Allow the DPSM to be reset */
}
static inline uint32_t msmsdcc_fifo_addr(struct msmsdcc_host *host)
@@ -581,6 +630,7 @@
if (!mrq->data->stop || mrq->cmd->error ||
(mrq->sbc && !mrq->data->error)) {
mrq->data->bytes_xfered = host->curr.data_xfered;
+ msmsdcc_reset_dpsm(host);
del_timer(&host->req_tout_timer);
/*
* Clear current request information as current
@@ -741,6 +791,7 @@
if (!mrq->data->stop || mrq->cmd->error ||
(mrq->sbc && !mrq->data->error)) {
mrq->data->bytes_xfered = host->curr.data_xfered;
+ msmsdcc_reset_dpsm(host);
del_timer(&host->req_tout_timer);
/*
* Clear current request information as current
@@ -1135,13 +1186,19 @@
}
}
- /* Clear CDR_EN bit for write operations */
- if (host->tuning_needed && cmd->mrq->data &&
- (cmd->mrq->data->flags & MMC_DATA_WRITE))
- writel_relaxed((readl_relaxed(host->base + MCI_DLL_CONFIG) &
- ~MCI_CDR_EN), host->base + MCI_DLL_CONFIG);
+ if (cmd->mrq->data && (cmd->mrq->data->flags & MMC_DATA_READ))
+ writel_relaxed((readl_relaxed(host->base +
+ MCI_DLL_CONFIG) | MCI_CDR_EN),
+ host->base + MCI_DLL_CONFIG);
+ else
+ /* Clear CDR_EN bit for non read operations */
+ writel_relaxed((readl_relaxed(host->base +
+ MCI_DLL_CONFIG) & ~MCI_CDR_EN),
+ host->base + MCI_DLL_CONFIG);
- if ((cmd->flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
+ if (((cmd->flags & MMC_RSP_R1B) == MMC_RSP_R1B) ||
+ (cmd->opcode == MMC_SEND_STATUS &&
+ !(cmd->flags & MMC_CMD_ADTC))) {
*c |= MCI_CPSM_PROGENA;
host->prog_enable = 1;
}
@@ -2092,7 +2149,14 @@
if (mrq->data && (mrq->data->flags & MMC_DATA_WRITE)) {
if (is_auto_prog_done(host)) {
- if (!mrq->stop)
+ /*
+ * Auto-prog done will be enabled for following cases:
+ * mrq->sbc | mrq->stop
+ * _____________|________________
+ * True | Don't care
+ * False | False (CMD24, ACMD25 use case)
+ */
+ if (mrq->sbc || !mrq->stop)
host->curr.wait_for_auto_prog_done = true;
} else {
if ((mrq->cmd->opcode == SD_IO_RW_EXTENDED) ||
@@ -2321,8 +2385,10 @@
struct msm_mmc_reg_data *vreg_table[2];
curr_slot = host->plat->vreg_data;
- if (!curr_slot)
+ if (!curr_slot) {
+ rc = -EINVAL;
goto out;
+ }
vreg_table[0] = curr_slot->vdd_data;
vreg_table[1] = curr_slot->vdd_io_data;
@@ -2529,6 +2595,12 @@
curr = host->plat->pin_data->gpio_data;
for (i = 0; i < curr->size; i++) {
+ if (!gpio_is_valid(curr->gpio[i].no)) {
+ rc = -EINVAL;
+ pr_err("%s: Invalid gpio = %d\n",
+ mmc_hostname(host->mmc), curr->gpio[i].no);
+ goto free_gpios;
+ }
if (enable) {
if (curr->gpio[i].is_always_on &&
curr->gpio[i].is_enabled)
@@ -2553,7 +2625,7 @@
goto out;
free_gpios:
- for (; i >= 0; i--) {
+ for (i--; i >= 0; i--) {
gpio_free(curr->gpio[i].no);
curr->gpio[i].is_enabled = false;
}
@@ -3069,8 +3141,10 @@
* Select the controller timing mode according
* to current bus speed mode
*/
- if ((ios->timing == MMC_TIMING_UHS_SDR104) ||
- (ios->timing == MMC_TIMING_MMC_HS200)) {
+ if (host->clk_rate > (100 * 1000 * 1000) &&
+ (ios->timing == MMC_TIMING_UHS_SDR104 ||
+ ios->timing == MMC_TIMING_MMC_HS200)) {
+ /* Card clock frequency must be > 100MHz to enable tuning */
clk |= (4 << 14);
host->tuning_needed = 1;
} else if (ios->timing == MMC_TIMING_UHS_DDR50) {
@@ -3151,7 +3225,7 @@
if (host->plat->wpswitch) {
status = host->plat->wpswitch(mmc_dev(mmc));
- } else if (host->plat->wpswitch_gpio) {
+ } else if (gpio_is_valid(host->plat->wpswitch_gpio)) {
status = gpio_request(host->plat->wpswitch_gpio,
"SD_WP_Switch");
if (status) {
@@ -3916,6 +3990,51 @@
return rc;
}
+/*
+ * Work around of the unavailability of a power_reset functionality in SD cards
+ * by turning the OFF & back ON the regulators supplying the SD card.
+ */
+void msmsdcc_hw_reset(struct mmc_host *mmc)
+{
+ struct mmc_card *card = mmc->card;
+ struct msmsdcc_host *host = mmc_priv(mmc);
+ int rc;
+
+ /* Write-protection bits would be lost on a hardware reset in emmc */
+ if (!card || !mmc_card_sd(card))
+ return;
+
+ /*
+ * Continuing on failing to disable regulator would lead to a panic
+ * anyway, since the commands would fail and console would be flooded
+ * with prints, eventually leading to a watchdog bark
+ */
+ rc = msmsdcc_setup_vreg(host, false, false);
+ if (rc) {
+ pr_err("%s: %s disable regulator: failed: %d\n",
+ mmc_hostname(mmc), __func__, rc);
+ BUG_ON(rc);
+ }
+
+ /* 10ms delay for the supply to reach the desired voltage level */
+ usleep_range(10000, 12000);
+
+ /*
+ * Continuing on failing to enable regulator would lead to a panic
+ * anyway, since the commands would fail and console would be flooded
+ * with prints, eventually leading to a watchdog bark
+ */
+ rc = msmsdcc_setup_vreg(host, true, false);
+ if (rc) {
+ pr_err("%s: %s enable regulator: failed: %d\n",
+ mmc_hostname(mmc), __func__, rc);
+ BUG_ON(rc);
+ }
+
+ /* 10ms delay for the supply to reach the desired voltage level */
+ usleep_range(10000, 12000);
+}
+
static const struct mmc_host_ops msmsdcc_ops = {
.enable = msmsdcc_enable,
.disable = msmsdcc_disable,
@@ -3926,7 +4045,8 @@
.get_ro = msmsdcc_get_ro,
.enable_sdio_irq = msmsdcc_enable_sdio_irq,
.start_signal_voltage_switch = msmsdcc_switch_io_voltage,
- .execute_tuning = msmsdcc_execute_tuning
+ .execute_tuning = msmsdcc_execute_tuning,
+ .hw_reset = msmsdcc_hw_reset,
};
static unsigned int
@@ -3957,7 +4077,7 @@
struct msmsdcc_host *host = (struct msmsdcc_host *)data;
unsigned int status;
- if (host->plat->status || host->plat->status_gpio) {
+ if (host->plat->status || gpio_is_valid(host->plat->status_gpio)) {
if (host->plat->status)
status = host->plat->status(mmc_dev(host->mmc));
else
@@ -4605,6 +4725,35 @@
return count;
}
+static ssize_t
+show_idle_timeout(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct mmc_host *mmc = dev_get_drvdata(dev);
+ struct msmsdcc_host *host = mmc_priv(mmc);
+
+ return snprintf(buf, PAGE_SIZE, "%u (Min 5 sec)\n",
+ host->idle_tout_ms / 1000);
+}
+
+static ssize_t
+store_idle_timeout(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mmc_host *mmc = dev_get_drvdata(dev);
+ struct msmsdcc_host *host = mmc_priv(mmc);
+ unsigned int long flags;
+ int timeout; /* in secs */
+
+ if (!kstrtou32(buf, 0, &timeout)
+ && (timeout > MSM_MMC_DEFAULT_IDLE_TIMEOUT / 1000)) {
+ spin_lock_irqsave(&host->lock, flags);
+ host->idle_tout_ms = timeout * 1000;
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
+ return count;
+}
+
#ifdef CONFIG_HAS_EARLYSUSPEND
static void msmsdcc_early_suspend(struct early_suspend *h)
{
@@ -4749,6 +4898,319 @@
spin_unlock_irqrestore(&host->lock, flags);
}
+/*
+ * msmsdcc_dt_get_array - Wrapper fn to read an array of 32 bit integers
+ *
+ * @dev: device node from which the property value is to be read.
+ * @prop_name: name of the property to be searched.
+ * @out_array: filled array returned to caller
+ * @len: filled array size returned to caller
+ * @size: expected size of the array
+ *
+ * If expected "size" doesn't match with "len" an error is returned. If
+ * expected size is zero, the length of actual array is returned provided
+ * return value is zero.
+ *
+ * RETURNS:
+ * zero on success, negative error if failed.
+ */
+static int msmsdcc_dt_get_array(struct device *dev, const char *prop_name,
+ u32 **out_array, int *len, int size)
+{
+ int ret = 0;
+ u32 *array = NULL;
+ struct device_node *np = dev->of_node;
+
+ if (of_get_property(np, prop_name, len)) {
+ size_t sz;
+ sz = *len = *len / sizeof(*array);
+
+ if (sz > 0 && !(size > 0 && (sz != size))) {
+ array = devm_kzalloc(dev, sz * sizeof(*array),
+ GFP_KERNEL);
+ if (!array) {
+ dev_err(dev, "%s: no memory\n", prop_name);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = of_property_read_u32_array(np, prop_name,
+ array, sz);
+ if (ret < 0) {
+ dev_err(dev, "%s: error reading array %d\n",
+ prop_name, ret);
+ goto out;
+ }
+ } else {
+ dev_err(dev, "%s invalid size\n", prop_name);
+ ret = -EINVAL;
+ goto out;
+ }
+ } else {
+ dev_err(dev, "%s not specified\n", prop_name);
+ ret = -EINVAL;
+ goto out;
+ }
+ *out_array = array;
+out:
+ if (ret)
+ *len = 0;
+ return ret;
+}
+
+static int msmsdcc_dt_get_pad_pull_info(struct device *dev, int id,
+ struct msm_mmc_pad_pull_data **pad_pull_data)
+{
+ int ret = 0, base = 0, len, i;
+ u32 *tmp;
+ struct msm_mmc_pad_pull_data *pull_data;
+ struct msm_mmc_pad_pull *pull;
+
+ switch (id) {
+ case 1:
+ base = TLMM_PULL_SDC1_CLK;
+ break;
+ case 2:
+ base = TLMM_PULL_SDC2_CLK;
+ break;
+ case 3:
+ base = TLMM_PULL_SDC3_CLK;
+ break;
+ case 4:
+ base = TLMM_PULL_SDC4_CLK;
+ break;
+ default:
+ dev_err(dev, "%s: Invalid slot id\n", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ pull_data = devm_kzalloc(dev, sizeof(struct msm_mmc_pad_pull_data),
+ GFP_KERNEL);
+ if (!pull_data) {
+ dev_err(dev, "No memory msm_mmc_pad_pull_data\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ pull_data->size = 3; /* array size for clk, cmd, data */
+
+ /* Allocate on, off configs for clk, cmd, data */
+ pull = devm_kzalloc(dev, 2 * pull_data->size *\
+ sizeof(struct msm_mmc_pad_pull), GFP_KERNEL);
+ if (!pull) {
+ dev_err(dev, "No memory for msm_mmc_pad_pull\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ pull_data->on = pull;
+ pull_data->off = pull + pull_data->size;
+
+ ret = msmsdcc_dt_get_array(dev, "qcom,sdcc-pad-pull-on",
+ &tmp, &len, pull_data->size);
+ if (!ret) {
+ for (i = 0; i < len; i++) {
+ pull_data->on[i].no = base + i;
+ pull_data->on[i].val = tmp[i];
+ dev_dbg(dev, "%s: val[%d]=0x%x\n", __func__,
+ i, pull_data->on[i].val);
+ }
+ } else {
+ goto err;
+ }
+
+ ret = msmsdcc_dt_get_array(dev, "qcom,sdcc-pad-pull-off",
+ &tmp, &len, pull_data->size);
+ if (!ret) {
+ for (i = 0; i < len; i++) {
+ pull_data->off[i].no = base + i;
+ pull_data->off[i].val = tmp[i];
+ dev_dbg(dev, "%s: val[%d]=0x%x\n", __func__,
+ i, pull_data->off[i].val);
+ }
+ } else {
+ goto err;
+ }
+
+ *pad_pull_data = pull_data;
+err:
+ return ret;
+}
+
+static int msmsdcc_dt_get_pad_drv_info(struct device *dev, int id,
+ struct msm_mmc_pad_drv_data **pad_drv_data)
+{
+ int ret = 0, base = 0, len, i;
+ u32 *tmp;
+ struct msm_mmc_pad_drv_data *drv_data;
+ struct msm_mmc_pad_drv *drv;
+
+ switch (id) {
+ case 1:
+ base = TLMM_HDRV_SDC1_CLK;
+ break;
+ case 2:
+ base = TLMM_HDRV_SDC2_CLK;
+ break;
+ case 3:
+ base = TLMM_HDRV_SDC3_CLK;
+ break;
+ case 4:
+ base = TLMM_HDRV_SDC4_CLK;
+ break;
+ default:
+ dev_err(dev, "%s: Invalid slot id\n", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ drv_data = devm_kzalloc(dev, sizeof(struct msm_mmc_pad_drv_data),
+ GFP_KERNEL);
+ if (!drv_data) {
+ dev_err(dev, "No memory for msm_mmc_pad_drv_data\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ drv_data->size = 3; /* array size for clk, cmd, data */
+
+ /* Allocate on, off configs for clk, cmd, data */
+ drv = devm_kzalloc(dev, 2 * drv_data->size *\
+ sizeof(struct msm_mmc_pad_drv), GFP_KERNEL);
+ if (!drv) {
+ dev_err(dev, "No memory msm_mmc_pad_drv\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ drv_data->on = drv;
+ drv_data->off = drv + drv_data->size;
+
+ ret = msmsdcc_dt_get_array(dev, "qcom,sdcc-pad-drv-on",
+ &tmp, &len, drv_data->size);
+ if (!ret) {
+ for (i = 0; i < len; i++) {
+ drv_data->on[i].no = base + i;
+ drv_data->on[i].val = tmp[i];
+ dev_dbg(dev, "%s: val[%d]=0x%x\n", __func__,
+ i, drv_data->on[i].val);
+ }
+ } else {
+ goto err;
+ }
+
+ ret = msmsdcc_dt_get_array(dev, "qcom,sdcc-pad-drv-off",
+ &tmp, &len, drv_data->size);
+ if (!ret) {
+ for (i = 0; i < len; i++) {
+ drv_data->off[i].no = base + i;
+ drv_data->off[i].val = tmp[i];
+ dev_dbg(dev, "%s: val[%d]=0x%x\n", __func__,
+ i, drv_data->off[i].val);
+ }
+ } else {
+ goto err;
+ }
+
+ *pad_drv_data = drv_data;
+err:
+ return ret;
+}
+
+static void msmsdcc_dt_get_cd_wp_gpio(struct device *dev,
+ struct mmc_platform_data *pdata)
+{
+ enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
+ struct device_node *np = dev->of_node;
+
+ pdata->status_gpio = of_get_named_gpio_flags(np,
+ "cd-gpios", 0, &flags);
+ if (gpio_is_valid(pdata->status_gpio)) {
+ pdata->status_irq = gpio_to_irq(pdata->status_gpio);
+ pdata->is_status_gpio_active_low = flags & OF_GPIO_ACTIVE_LOW;
+ }
+
+ pdata->wpswitch_gpio = of_get_named_gpio_flags(np,
+ "wp-gpios", 0, &flags);
+ if (gpio_is_valid(pdata->wpswitch_gpio))
+ pdata->is_wpswitch_active_low = flags & OF_GPIO_ACTIVE_LOW;
+}
+
+static int msmsdcc_dt_parse_gpio_info(struct device *dev,
+ struct mmc_platform_data *pdata)
+{
+ int ret = 0, id = 0, cnt, i;
+ struct msm_mmc_pin_data *pin_data;
+ struct device_node *np = dev->of_node;
+
+ msmsdcc_dt_get_cd_wp_gpio(dev, pdata);
+
+ pin_data = devm_kzalloc(dev, sizeof(*pin_data), GFP_KERNEL);
+ if (!pin_data) {
+ dev_err(dev, "No memory for pin_data\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ cnt = of_gpio_count(np);
+ if (cnt > 0) {
+ pin_data->is_gpio = true;
+
+ pin_data->gpio_data = devm_kzalloc(dev,
+ sizeof(struct msm_mmc_gpio_data), GFP_KERNEL);
+ if (!pin_data->gpio_data) {
+ dev_err(dev, "No memory for gpio_data\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ pin_data->gpio_data->size = cnt;
+ pin_data->gpio_data->gpio = devm_kzalloc(dev,
+ cnt * sizeof(struct msm_mmc_gpio), GFP_KERNEL);
+ if (!pin_data->gpio_data->gpio) {
+ dev_err(dev, "No memory for gpio\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ const char *name = NULL;
+ char result[32];
+ pin_data->gpio_data->gpio[i].no = of_get_gpio(np, i);
+ of_property_read_string_index(np,
+ "qcom,sdcc-gpio-names", i, &name);
+
+ snprintf(result, 32, "%s-%s",
+ dev_name(dev), name ? name : "?");
+ pin_data->gpio_data->gpio[i].name = result;
+ dev_dbg(dev, "%s: gpio[%s] = %d\n", __func__,
+ pin_data->gpio_data->gpio[i].name,
+ pin_data->gpio_data->gpio[i].no);
+ }
+ } else {
+ pin_data->pad_data = devm_kzalloc(dev,
+ sizeof(struct msm_mmc_pad_data), GFP_KERNEL);
+ if (!pin_data->pad_data) {
+ dev_err(dev, "No memory for pin_data->pad_data\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ of_property_read_u32(np, "cell-index", &id);
+
+ ret = msmsdcc_dt_get_pad_pull_info(dev, id,
+ &pin_data->pad_data->pull);
+ if (ret)
+ goto err;
+ ret = msmsdcc_dt_get_pad_drv_info(dev, id,
+ &pin_data->pad_data->drv);
+ if (ret)
+ goto err;
+ }
+
+ pdata->pin_data = pin_data;
+err:
+ if (ret)
+ dev_err(dev, "%s failed with err %d\n", __func__, ret);
+ return ret;
+}
+
#define MAX_PROP_SIZE 32
static int msmsdcc_dt_parse_vreg_info(struct device *dev,
struct msm_mmc_reg_data **vreg_data, const char *vreg_name)
@@ -4838,29 +5300,10 @@
pdata->mmc_bus_width = 0;
}
- if (of_get_property(np, "qcom,sdcc-sup-voltages", &sup_volt_len)) {
- size_t sz;
- sz = sup_volt_len / sizeof(*sup_voltages);
- if (sz > 0) {
- sup_voltages = devm_kzalloc(dev,
- sz * sizeof(*sup_voltages), GFP_KERNEL);
- if (!sup_voltages) {
- dev_err(dev, "No memory for supported voltage\n");
- goto err;
- }
-
- ret = of_property_read_u32_array(np,
- "qcom,sdcc-sup-voltages", sup_voltages, sz);
- if (ret < 0) {
- dev_err(dev, "error while reading voltage"
- "ranges %d\n", ret);
- goto err;
- }
- } else {
- dev_err(dev, "No supported voltages\n");
- goto err;
- }
- for (i = 0; i < sz; i += 2) {
+ ret = msmsdcc_dt_get_array(dev, "qcom,sdcc-sup-voltages",
+ &sup_voltages, &sup_volt_len, 0);
+ if (!ret) {
+ for (i = 0; i < sup_volt_len; i += 2) {
u32 mask;
mask = mmc_vddrange_to_ocrmask(sup_voltages[i],
@@ -4870,37 +5313,13 @@
pdata->ocr_mask |= mask;
}
dev_dbg(dev, "OCR mask=0x%x\n", pdata->ocr_mask);
- } else {
- dev_err(dev, "Supported voltage range not specified\n");
}
- if (of_get_property(np, "qcom,sdcc-clk-rates", &clk_table_len)) {
- size_t sz;
- sz = clk_table_len / sizeof(*clk_table);
-
- if (sz > 0) {
- clk_table = devm_kzalloc(dev, sz * sizeof(*clk_table),
- GFP_KERNEL);
- if (!clk_table) {
- dev_err(dev, "No memory for clock table\n");
- goto err;
- }
-
- ret = of_property_read_u32_array(np,
- "qcom,sdcc-clk-rates", clk_table, sz);
- if (ret < 0) {
- dev_err(dev, "error while reading clk"
- "table %d\n", ret);
- goto err;
- }
- } else {
- dev_err(dev, "clk_table not specified\n");
- goto err;
- }
+ ret = msmsdcc_dt_get_array(dev, "qcom,sdcc-clk-rates",
+ &clk_table, &clk_table_len, 0);
+ if (!ret) {
pdata->sup_clk_table = clk_table;
- pdata->sup_clk_cnt = sz;
- } else {
- dev_err(dev, "Supported clock rates not specified\n");
+ pdata->sup_clk_cnt = clk_table_len;
}
pdata->vreg_data = devm_kzalloc(dev,
@@ -4918,6 +5337,9 @@
&pdata->vreg_data->vdd_io_data, "vdd-io"))
goto err;
+ if (msmsdcc_dt_parse_gpio_info(dev, pdata))
+ goto err;
+
len = of_property_count_strings(np, "qcom,sdcc-bus-speed-mode");
for (i = 0; i < len; i++) {
@@ -4987,7 +5409,6 @@
struct resource *dmares = NULL;
struct resource *dma_crci_res = NULL;
int ret = 0;
- int i;
if (pdev->dev.of_node) {
plat = msmsdcc_populate_pdata(&pdev->dev);
@@ -5016,56 +5437,21 @@
pr_err("%s: Invalid resource\n", __func__);
return -ENXIO;
}
- if (pdev->dev.of_node) {
- /*
- * Device tree iomem resources are only accessible by index.
- * index = 0 -> SDCC register interface
- * index = 1 -> DML register interface
- * index = 2 -> BAM register interface
- * IRQ resources:
- * index = 0 -> SDCC IRQ
- * index = 1 -> BAM IRQ
- */
- core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dml_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- bam_memres = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- core_irqres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- bam_irqres = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
- } else {
- for (i = 0; i < pdev->num_resources; i++) {
- if (pdev->resource[i].flags & IORESOURCE_MEM) {
- if (!strncmp(pdev->resource[i].name,
- "sdcc_dml_addr",
- sizeof("sdcc_dml_addr")))
- dml_memres = &pdev->resource[i];
- else if (!strncmp(pdev->resource[i].name,
- "sdcc_bam_addr",
- sizeof("sdcc_bam_addr")))
- bam_memres = &pdev->resource[i];
- else
- core_memres = &pdev->resource[i];
- }
- if (pdev->resource[i].flags & IORESOURCE_IRQ) {
- if (!strncmp(pdev->resource[i].name,
- "sdcc_bam_irq",
- sizeof("sdcc_bam_irq")))
- bam_irqres = &pdev->resource[i];
- else
- core_irqres = &pdev->resource[i];
- }
- if (pdev->resource[i].flags & IORESOURCE_DMA) {
- if (!strncmp(pdev->resource[i].name,
- "sdcc_dma_chnl",
- sizeof("sdcc_dma_chnl")))
- dmares = &pdev->resource[i];
- else if (!strncmp(pdev->resource[i].name,
- "sdcc_dma_crci",
- sizeof("sdcc_dma_crci")))
- dma_crci_res = &pdev->resource[i];
- }
- }
- }
+ core_memres = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "core_mem");
+ bam_memres = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "bam_mem");
+ dml_memres = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "dml_mem");
+ core_irqres = platform_get_resource_byname(pdev,
+ IORESOURCE_IRQ, "core_irq");
+ bam_irqres = platform_get_resource_byname(pdev,
+ IORESOURCE_IRQ, "bam_irq");
+ dmares = platform_get_resource_byname(pdev,
+ IORESOURCE_DMA, "dma_chnl");
+ dma_crci_res = platform_get_resource_byname(pdev,
+ IORESOURCE_DMA, "dma_crci");
if (!core_irqres || !core_memres) {
pr_err("%s: Invalid sdcc core resource\n", __func__);
@@ -5257,7 +5643,7 @@
mmc->caps |= plat->mmc_bus_width;
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED;
mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE;
-
+ mmc->caps |= MMC_CAP_HW_RESET;
/*
* If we send the CMD23 before multi block write/read command
* then we need not to send CMD12 at the end of the transfer.
@@ -5361,7 +5747,12 @@
* Setup card detect change
*/
- if (plat->status || plat->status_gpio) {
+ if (!plat->status_gpio)
+ plat->status_gpio = -ENOENT;
+ if (!plat->wpswitch_gpio)
+ plat->wpswitch_gpio = -ENOENT;
+
+ if (plat->status || gpio_is_valid(plat->status_gpio)) {
if (plat->status)
host->oldstat = plat->status(mmc_dev(host->mmc));
else
@@ -5421,6 +5812,7 @@
pm_runtime_enable(&(pdev)->dev);
}
#endif
+ host->idle_tout_ms = MSM_MMC_DEFAULT_IDLE_TIMEOUT;
setup_timer(&host->req_tout_timer, msmsdcc_req_tout_timer_hdlr,
(unsigned long)host);
@@ -5492,8 +5884,19 @@
if (ret)
goto remove_max_bus_bw_file;
}
+ host->idle_timeout.show = show_idle_timeout;
+ host->idle_timeout.store = store_idle_timeout;
+ sysfs_attr_init(&host->idle_timeout.attr);
+ host->idle_timeout.attr.name = "idle_timeout";
+ host->idle_timeout.attr.mode = S_IRUGO | S_IWUSR;
+ ret = device_create_file(&pdev->dev, &host->idle_timeout);
+ if (ret)
+ goto remove_polling_file;
return 0;
+ remove_polling_file:
+ if (!plat->status_irq)
+ device_remove_file(&pdev->dev, &host->polling);
remove_max_bus_bw_file:
device_remove_file(&pdev->dev, &host->max_bus_bw);
platform_irq_free:
@@ -5574,6 +5977,7 @@
device_remove_file(&pdev->dev, &host->max_bus_bw);
if (!plat->status_irq)
device_remove_file(&pdev->dev, &host->polling);
+ device_remove_file(&pdev->dev, &host->idle_timeout);
del_timer_sync(&host->req_tout_timer);
tasklet_kill(&host->dma_tlet);
@@ -5871,7 +6275,7 @@
return 0;
/* Idle timeout is not configurable for now */
- pm_schedule_suspend(dev, MSM_MMC_IDLE_TIMEOUT);
+ pm_schedule_suspend(dev, host->idle_tout_ms);
return -EAGAIN;
}
diff --git a/drivers/mmc/host/msm_sdcc.h b/drivers/mmc/host/msm_sdcc.h
index 655f2b9..3b1dbc7 100644
--- a/drivers/mmc/host/msm_sdcc.h
+++ b/drivers/mmc/host/msm_sdcc.h
@@ -213,7 +213,7 @@
#define NR_SG 128
-#define MSM_MMC_IDLE_TIMEOUT 5000 /* msecs */
+#define MSM_MMC_DEFAULT_IDLE_TIMEOUT 5000 /* msecs */
#define MSM_MMC_CLK_GATE_DELAY 200 /* msecs */
/* Set the request timeout to 10secs */
@@ -411,9 +411,12 @@
bool sdio_wakeupirq_disabled;
struct mutex clk_mutex;
bool pending_resume;
+ unsigned int idle_tout_ms; /* Timeout in msecs */
+ bool pending_dpsm_reset;
struct msmsdcc_msm_bus_vote msm_bus_vote;
struct device_attribute max_bus_bw;
struct device_attribute polling;
+ struct device_attribute idle_timeout;
};
#define MSMSDCC_VERSION_MASK 0xFFFF
@@ -424,6 +427,7 @@
#define MSMSDCC_REG_WR_ACTIVE (1 << 4)
#define MSMSDCC_SW_RST (1 << 5)
#define MSMSDCC_SW_RST_CFG (1 << 6)
+#define MSMSDCC_WAIT_FOR_TX_RX (1 << 7)
#define set_hw_caps(h, val) ((h)->hw_caps |= val)
#define is_sps_mode(h) ((h)->hw_caps & MSMSDCC_SPS_BAM_SUP)
@@ -433,6 +437,7 @@
#define is_wait_for_reg_write(h) ((h)->hw_caps & MSMSDCC_REG_WR_ACTIVE)
#define is_sw_hard_reset(h) ((h)->hw_caps & MSMSDCC_SW_RST)
#define is_sw_reset_save_config(h) ((h)->hw_caps & MSMSDCC_SW_RST_CFG)
+#define is_wait_for_tx_rx_active(h) ((h)->hw_caps & MSMSDCC_WAIT_FOR_TX_RX)
/* Set controller capabilities based on version */
static inline void set_default_hw_caps(struct msmsdcc_host *host)
@@ -451,7 +456,8 @@
version &= MSMSDCC_VERSION_MASK;
if (version) /* SDCC v4 and greater */
host->hw_caps |= MSMSDCC_AUTO_PROG_DONE |
- MSMSDCC_SOFT_RESET | MSMSDCC_REG_WR_ACTIVE;
+ MSMSDCC_SOFT_RESET | MSMSDCC_REG_WR_ACTIVE
+ | MSMSDCC_WAIT_FOR_TX_RX;
if (version >= 0x2D) /* SDCC v4 2.1.0 and greater */
host->hw_caps |= MSMSDCC_SW_RST | MSMSDCC_SW_RST_CFG;
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 21f146f..bc05764 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -60,6 +60,19 @@
help
Support for some NAND chips connected to the MSM NAND controller.
+config MTD_MSM_QPIC_NAND
+ tristate "MSM QPIC NAND Device Support"
+ depends on MTD && ARCH_MSM && !MTD_MSM_NAND
+ select CRC16
+ select BITREVERSE
+ select MTD_NAND_IDS
+ default n
+ help
+ Support for NAND controller in Qualcomm Parallel Interface
+ controller (QPIC). This new controller supports BAM mode
+ and BCH error correction mechanism. Based on the device
+ capabilities either 4 bit or 8 bit BCH ECC will be used.
+
config MTD_DATAFLASH
tristate "Support for AT45xxx DataFlash"
depends on SPI_MASTER && EXPERIMENTAL
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index 8497c5f..9fdd004 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -13,6 +13,7 @@
obj-$(CONFIG_MTD_PMC551) += pmc551.o
obj-$(CONFIG_MTD_MS02NV) += ms02-nv.o
obj-$(CONFIG_MTD_MSM_NAND) += msm_nand.o
+obj-$(CONFIG_MTD_MSM_QPIC_NAND) += msm_qpic_nand.o
obj-$(CONFIG_MTD_MTDRAM) += mtdram.o
obj-$(CONFIG_MTD_LART) += lart.o
obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o
diff --git a/drivers/mtd/devices/msm_qpic_nand.c b/drivers/mtd/devices/msm_qpic_nand.c
new file mode 100644
index 0000000..d709e17
--- /dev/null
+++ b/drivers/mtd/devices/msm_qpic_nand.c
@@ -0,0 +1,2500 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/crc16.h>
+#include <linux/bitrev.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <mach/sps.h>
+
+#define PAGE_SIZE_2K 2048
+#define PAGE_SIZE_4K 4096
+#define WRITE 1
+#define READ 0
+/*
+ * The maximum no of descriptors per transfer (page read/write) won't be more
+ * than 64. For more details on what those commands are, please refer to the
+ * page read and page write functions in the driver.
+ */
+#define SPS_MAX_DESC_NUM 64
+#define SPS_DATA_CONS_PIPE_INDEX 0
+#define SPS_DATA_PROD_PIPE_INDEX 1
+#define SPS_CMD_CONS_PIPE_INDEX 2
+
+#define msm_virt_to_dma(chip, vaddr) \
+ ((chip)->dma_phys_addr + \
+ ((uint8_t *)(vaddr) - (chip)->dma_virt_addr))
+
+/*
+ * A single page read/write request would typically need DMA memory of about
+ * 1K memory approximately. So for a single request this memory is more than
+ * enough.
+ *
+ * But to accommodate multiple clients we allocate 8K of memory. Though only
+ * one client request can be submitted to NANDc at any time, other clients can
+ * still prepare the descriptors while waiting for current client request to
+ * be done. Thus for a total memory of 8K, the driver can currently support
+ * maximum clients up to 7 or 8 at a time. The client for which there is no
+ * free DMA memory shall wait on the wait queue until other clients free up
+ * the required memory.
+ */
+#define MSM_NAND_DMA_BUFFER_SIZE SZ_8K
+/*
+ * This defines the granularity at which the buffer management is done. The
+ * total number of slots is based on the size of the atomic_t variable
+ * dma_buffer_busy(number of bits) within the structure msm_nand_chip.
+ */
+#define MSM_NAND_DMA_BUFFER_SLOT_SZ \
+ (MSM_NAND_DMA_BUFFER_SIZE / (sizeof(((atomic_t *)0)->counter) * 8))
+
+/* ONFI(Open NAND Flash Interface) parameters */
+#define MSM_NAND_CFG0_RAW_ONFI_IDENTIFIER 0x88000800
+#define MSM_NAND_CFG0_RAW_ONFI_PARAM_INFO 0x88040000
+#define MSM_NAND_CFG1_RAW_ONFI_IDENTIFIER 0x0005045d
+#define MSM_NAND_CFG1_RAW_ONFI_PARAM_INFO 0x0005045d
+#define ONFI_PARAM_INFO_LENGTH 0x0200
+#define ONFI_PARAM_PAGE_LENGTH 0x0100
+#define ONFI_PARAMETER_PAGE_SIGNATURE 0x49464E4F
+#define FLASH_READ_ONFI_SIGNATURE_ADDRESS 0x20
+#define FLASH_READ_ONFI_PARAMETERS_COMMAND 0xEC
+#define FLASH_READ_ONFI_PARAMETERS_ADDRESS 0x00
+#define FLASH_READ_DEVICE_ID_ADDRESS 0x00
+
+#define MSM_NAND_RESET_FLASH_STS 0x00000020
+#define MSM_NAND_RESET_READ_STS 0x000000C0
+
+/* QPIC NANDc (NAND Controller) Register Set */
+#define MSM_NAND_REG(info, off) (info->nand_phys + off)
+#define MSM_NAND_FLASH_CMD(info) MSM_NAND_REG(info, 0x30000)
+#define MSM_NAND_ADDR0(info) MSM_NAND_REG(info, 0x30004)
+#define MSM_NAND_ADDR1(info) MSM_NAND_REG(info, 0x30008)
+#define MSM_NAND_EXEC_CMD(info) MSM_NAND_REG(info, 0x30010)
+#define MSM_NAND_FLASH_STATUS(info) MSM_NAND_REG(info, 0x30014)
+#define FS_OP_ERR (1 << 4)
+#define FS_MPU_ERR (1 << 8)
+#define FS_DEVICE_STS_ERR (1 << 16)
+#define FS_DEVICE_WP (1 << 23)
+
+#define MSM_NAND_BUFFER_STATUS(info) MSM_NAND_REG(info, 0x30018)
+#define BS_UNCORRECTABLE_BIT (1 << 8)
+#define BS_CORRECTABLE_ERR_MSK 0x1F
+
+#define MSM_NAND_DEV0_CFG0(info) MSM_NAND_REG(info, 0x30020)
+#define DISABLE_STATUS_AFTER_WRITE 4
+#define CW_PER_PAGE 6
+#define UD_SIZE_BYTES 9
+#define SPARE_SIZE_BYTES 23
+#define NUM_ADDR_CYCLES 27
+
+#define MSM_NAND_DEV0_CFG1(info) MSM_NAND_REG(info, 0x30024)
+#define DEV0_CFG1_ECC_DISABLE 0
+#define WIDE_FLASH 1
+#define NAND_RECOVERY_CYCLES 2
+#define CS_ACTIVE_BSY 5
+#define BAD_BLOCK_BYTE_NUM 6
+#define BAD_BLOCK_IN_SPARE_AREA 16
+#define WR_RD_BSY_GAP 17
+#define ENABLE_BCH_ECC 27
+
+#define MSM_NAND_DEV0_ECC_CFG(info) MSM_NAND_REG(info, 0x30028)
+#define ECC_CFG_ECC_DISABLE 0
+#define ECC_SW_RESET 1
+#define ECC_MODE 4
+#define ECC_PARITY_SIZE_BYTES 8
+#define ECC_NUM_DATA_BYTES 16
+#define ECC_FORCE_CLK_OPEN 30
+
+#define MSM_NAND_READ_ID(info) MSM_NAND_REG(info, 0x30040)
+#define MSM_NAND_READ_STATUS(info) MSM_NAND_REG(info, 0x30044)
+#define MSM_NAND_DEV_CMD1(info) MSM_NAND_REG(info, 0x300A4)
+#define MSM_NAND_DEV_CMD_VLD(info) MSM_NAND_REG(info, 0x300AC)
+#define MSM_NAND_EBI2_ECC_BUF_CFG(info) MSM_NAND_REG(info, 0x300F0)
+#define MSM_NAND_ERASED_CW_DETECT_CFG(info) MSM_NAND_REG(info, 0x300E8)
+#define MSM_NAND_ERASED_CW_DETECT_STATUS(info) MSM_NAND_REG(info, 0x300EC)
+
+#define MSM_NAND_CTRL(info) MSM_NAND_REG(info, 0x30F00)
+#define BAM_MODE_EN 0
+
+#define MSM_NAND_READ_LOCATION_0(info) MSM_NAND_REG(info, 0x30F20)
+#define MSM_NAND_READ_LOCATION_1(info) MSM_NAND_REG(info, 0x30F24)
+
+/* device commands */
+#define MSM_NAND_CMD_PAGE_READ 0x32
+#define MSM_NAND_CMD_PAGE_READ_ECC 0x33
+#define MSM_NAND_CMD_PAGE_READ_ALL 0x34
+#define MSM_NAND_CMD_PRG_PAGE 0x36
+#define MSM_NAND_CMD_PRG_PAGE_ECC 0x37
+#define MSM_NAND_CMD_PRG_PAGE_ALL 0x39
+#define MSM_NAND_CMD_BLOCK_ERASE 0x3A
+#define MSM_NAND_CMD_FETCH_ID 0x0B
+
+/* Structure that defines a NAND SPS command element */
+struct msm_nand_sps_cmd {
+ struct sps_command_element ce;
+ uint32_t flags;
+};
+
+/*
+ * Structure that defines the NAND controller properties as per the
+ * NAND flash device/chip that is attached.
+ */
+struct msm_nand_chip {
+ struct device *dev;
+ /*
+ * DMA memory will be allocated only once during probe and this memory
+ * will be used by all NAND clients. This wait queue is needed to
+ * make the applications wait for DMA memory to be free'd when the
+ * complete memory is exhausted.
+ */
+ wait_queue_head_t dma_wait_queue;
+ atomic_t dma_buffer_busy;
+ uint8_t *dma_virt_addr;
+ dma_addr_t dma_phys_addr;
+ uint32_t ecc_parity_bytes;
+ uint32_t bch_caps; /* Controller BCH ECC capabilities */
+#define MSM_NAND_CAP_4_BIT_BCH (1 << 0)
+#define MSM_NAND_CAP_8_BIT_BCH (1 << 1)
+ uint32_t cw_size;
+ /* NANDc register configurations */
+ uint32_t cfg0, cfg1, cfg0_raw, cfg1_raw;
+ uint32_t ecc_buf_cfg;
+ uint32_t ecc_bch_cfg;
+};
+
+/* Structure that defines an SPS end point for a NANDc BAM pipe. */
+struct msm_nand_sps_endpt {
+ struct sps_pipe *handle;
+ struct sps_connect config;
+ struct sps_register_event event;
+ struct completion completion;
+};
+
+/*
+ * Structure that defines NANDc SPS data - BAM handle and an end point
+ * for each BAM pipe.
+ */
+struct msm_nand_sps_info {
+ uint32_t bam_handle;
+ struct msm_nand_sps_endpt data_prod;
+ struct msm_nand_sps_endpt data_cons;
+ struct msm_nand_sps_endpt cmd_pipe;
+};
+
+/*
+ * Structure that contains flash device information. This gets updated after
+ * the NAND flash device detection.
+ */
+struct flash_identification {
+ uint32_t flash_id;
+ uint32_t density;
+ uint32_t widebus;
+ uint32_t pagesize;
+ uint32_t blksize;
+ uint32_t oobsize;
+ uint32_t ecc_correctability;
+};
+
+/* Structure that defines NANDc private data. */
+struct msm_nand_info {
+ struct mtd_info mtd;
+ struct msm_nand_chip nand_chip;
+ struct msm_nand_sps_info sps;
+ unsigned long bam_phys;
+ unsigned long nand_phys;
+ void __iomem *bam_base;
+ int bam_irq;
+ /*
+ * This lock must be acquired before submitting any command or data
+ * descriptors to BAM pipes and must be held until all the submitted
+ * descriptors are processed.
+ *
+ * This is required to ensure that both command and descriptors are
+ * submitted atomically without interruption from other clients,
+ * when there are requests from more than client at any time.
+ * Othewise, data and command descriptors can be submitted out of
+ * order for a request which can cause data corruption.
+ */
+ struct mutex bam_lock;
+ struct flash_identification flash_dev;
+};
+
+/* Structure that defines an ONFI parameter page (512B) */
+struct onfi_param_page {
+ uint32_t parameter_page_signature;
+ uint16_t revision_number;
+ uint16_t features_supported;
+ uint16_t optional_commands_supported;
+ uint8_t reserved0[22];
+ uint8_t device_manufacturer[12];
+ uint8_t device_model[20];
+ uint8_t jedec_manufacturer_id;
+ uint16_t date_code;
+ uint8_t reserved1[13];
+ uint32_t number_of_data_bytes_per_page;
+ uint16_t number_of_spare_bytes_per_page;
+ uint32_t number_of_data_bytes_per_partial_page;
+ uint16_t number_of_spare_bytes_per_partial_page;
+ uint32_t number_of_pages_per_block;
+ uint32_t number_of_blocks_per_logical_unit;
+ uint8_t number_of_logical_units;
+ uint8_t number_of_address_cycles;
+ uint8_t number_of_bits_per_cell;
+ uint16_t maximum_bad_blocks_per_logical_unit;
+ uint16_t block_endurance;
+ uint8_t guaranteed_valid_begin_blocks;
+ uint16_t guaranteed_valid_begin_blocks_endurance;
+ uint8_t number_of_programs_per_page;
+ uint8_t partial_program_attributes;
+ uint8_t number_of_bits_ecc_correctability;
+ uint8_t number_of_interleaved_address_bits;
+ uint8_t interleaved_operation_attributes;
+ uint8_t reserved2[13];
+ uint8_t io_pin_capacitance;
+ uint16_t timing_mode_support;
+ uint16_t program_cache_timing_mode_support;
+ uint16_t maximum_page_programming_time;
+ uint16_t maximum_block_erase_time;
+ uint16_t maximum_page_read_time;
+ uint16_t maximum_change_column_setup_time;
+ uint8_t reserved3[23];
+ uint16_t vendor_specific_revision_number;
+ uint8_t vendor_specific[88];
+ uint16_t integrity_crc;
+} __attribute__((__packed__));
+
+/*
+ * Get the DMA memory for requested amount of size. It returns the pointer
+ * to free memory available from the allocated pool. Returns NULL if there
+ * is no free memory.
+ */
+static void *msm_nand_get_dma_buffer(struct msm_nand_chip *chip, size_t size)
+{
+ uint32_t bitmask, free_bitmask, old_bitmask;
+ uint32_t need_mask, current_need_mask;
+ int free_index;
+
+ need_mask = (1UL << DIV_ROUND_UP(size, MSM_NAND_DMA_BUFFER_SLOT_SZ))
+ - 1;
+ bitmask = atomic_read(&chip->dma_buffer_busy);
+ free_bitmask = ~bitmask;
+ do {
+ free_index = __ffs(free_bitmask);
+ current_need_mask = need_mask << free_index;
+
+ if (size + free_index * MSM_NAND_DMA_BUFFER_SLOT_SZ >=
+ MSM_NAND_DMA_BUFFER_SIZE)
+ return NULL;
+
+ if ((bitmask & current_need_mask) == 0) {
+ old_bitmask =
+ atomic_cmpxchg(&chip->dma_buffer_busy,
+ bitmask,
+ bitmask | current_need_mask);
+ if (old_bitmask == bitmask)
+ return chip->dma_virt_addr +
+ free_index * MSM_NAND_DMA_BUFFER_SLOT_SZ;
+ free_bitmask = 0;/* force return */
+ }
+ /* current free range was too small, clear all free bits */
+ /* below the top busy bit within current_need_mask */
+ free_bitmask &=
+ ~(~0U >> (32 - fls(bitmask & current_need_mask)));
+ } while (free_bitmask);
+
+ return NULL;
+}
+
+/*
+ * Releases the DMA memory used to the free pool and also wakes up any user
+ * thread waiting on wait queue for free memory to be available.
+ */
+static void msm_nand_release_dma_buffer(struct msm_nand_chip *chip,
+ void *buffer, size_t size)
+{
+ int index;
+ uint32_t used_mask;
+
+ used_mask = (1UL << DIV_ROUND_UP(size, MSM_NAND_DMA_BUFFER_SLOT_SZ))
+ - 1;
+ index = ((uint8_t *)buffer - chip->dma_virt_addr) /
+ MSM_NAND_DMA_BUFFER_SLOT_SZ;
+ atomic_sub(used_mask << index, &chip->dma_buffer_busy);
+
+ wake_up(&chip->dma_wait_queue);
+}
+
+/*
+ * Calculates page address of the buffer passed, offset of buffer within
+ * that page and then maps it for DMA by calling dma_map_page().
+ */
+static dma_addr_t msm_nand_dma_map(struct device *dev, void *addr, size_t size,
+ enum dma_data_direction dir)
+{
+ struct page *page;
+ unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
+ if (virt_addr_valid(addr))
+ page = virt_to_page(addr);
+ else {
+ if (WARN_ON(size + offset > PAGE_SIZE))
+ return ~0;
+ page = vmalloc_to_page(addr);
+ }
+ return dma_map_page(dev, page, offset, size, dir);
+}
+
+/*
+ * Wrapper function to prepare a SPS command element with the data that is
+ * passed to this function.
+ *
+ * Since for any command element it is a must to have this flag
+ * SPS_IOVEC_FLAG_CMD, this function by default updates this flag for a
+ * command element that is passed and thus, the caller need not explicilty
+ * pass this flag. The other flags must be passed based on the need. If a
+ * command element doesn't have any other flag, then 0 can be passed to flags.
+ */
+static inline void msm_nand_prep_ce(struct msm_nand_sps_cmd *sps_cmd,
+ uint32_t addr, uint32_t command,
+ uint32_t data, uint32_t flags)
+{
+ struct sps_command_element *cmd = &sps_cmd->ce;
+
+ cmd->addr = addr;
+ cmd->command = (command & WRITE) ? (uint32_t) SPS_WRITE_COMMAND :
+ (uint32_t) SPS_READ_COMMAND;
+ cmd->data = data;
+ cmd->mask = 0xFFFFFFFF;
+ sps_cmd->flags = SPS_IOVEC_FLAG_CMD | flags;
+}
+
+/*
+ * Read a single NANDc register as mentioned by its parameter addr. The return
+ * value indicates whether read is successful or not. The register value read
+ * is stored in val.
+ */
+static int msm_nand_flash_rd_reg(struct msm_nand_info *info, uint32_t addr,
+ uint32_t *val)
+{
+ int ret = 0;
+ struct msm_nand_sps_cmd *cmd;
+ struct msm_nand_chip *chip = &info->nand_chip;
+ struct {
+ struct msm_nand_sps_cmd cmd;
+ uint32_t data;
+ } *dma_buffer;
+
+ wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
+ chip, sizeof(*dma_buffer))));
+ cmd = &dma_buffer->cmd;
+ msm_nand_prep_ce(cmd, addr, READ, msm_virt_to_dma(chip,
+ &dma_buffer->data), SPS_IOVEC_FLAG_INT);
+
+ ret = sps_transfer_one(info->sps.cmd_pipe.handle,
+ msm_virt_to_dma(chip, &cmd->ce),
+ sizeof(struct sps_command_element), NULL, cmd->flags);
+ if (ret) {
+ pr_err("failed to submit command %x ret %d\n", addr, ret);
+ goto out;
+ }
+ wait_for_completion_io(&info->sps.cmd_pipe.completion);
+ *val = dma_buffer->data;
+out:
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+ return ret;
+}
+
+/*
+ * Read the Flash ID from the Nand Flash Device. The return value < 0
+ * indicates failure. When successful, the Flash ID is stored in parameter
+ * read_id.
+ */
+static int msm_nand_flash_read_id(struct msm_nand_info *info,
+ bool read_onfi_signature,
+ uint32_t *read_id)
+{
+ int err = 0, i;
+ struct msm_nand_sps_cmd *cmd;
+ struct sps_iovec *iovec;
+ struct msm_nand_chip *chip = &info->nand_chip;
+ uint32_t total_cnt = 4;
+ /*
+ * The following 4 commands are required to read id -
+ * write commands - addr0, flash, exec
+ * read_commands - read_id
+ */
+ struct {
+ struct sps_transfer xfer;
+ struct sps_iovec cmd_iovec[total_cnt];
+ struct msm_nand_sps_cmd cmd[total_cnt];
+ uint32_t data[total_cnt];
+ } *dma_buffer;
+
+ wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer
+ (chip, sizeof(*dma_buffer))));
+ if (read_onfi_signature)
+ dma_buffer->data[0] = FLASH_READ_ONFI_SIGNATURE_ADDRESS;
+ else
+ dma_buffer->data[0] = FLASH_READ_DEVICE_ID_ADDRESS;
+
+ dma_buffer->data[1] = MSM_NAND_CMD_FETCH_ID;
+ dma_buffer->data[2] = 1;
+ dma_buffer->data[3] = 0xeeeeeeee;
+
+ cmd = dma_buffer->cmd;
+ msm_nand_prep_ce(cmd, MSM_NAND_ADDR0(info), WRITE,
+ dma_buffer->data[0], SPS_IOVEC_FLAG_LOCK);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_FLASH_CMD(info), WRITE,
+ dma_buffer->data[1], 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_EXEC_CMD(info), WRITE,
+ dma_buffer->data[2], SPS_IOVEC_FLAG_NWD);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_READ_ID(info), READ,
+ msm_virt_to_dma(chip, &dma_buffer->data[3]),
+ SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT);
+ cmd++;
+
+ BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
+ dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
+ dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
+ dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
+ &dma_buffer->cmd_iovec);
+ iovec = dma_buffer->xfer.iovec;
+
+ for (i = 0; i < dma_buffer->xfer.iovec_count; i++) {
+ iovec->addr = msm_virt_to_dma(chip, &dma_buffer->cmd[i].ce);
+ iovec->size = sizeof(struct sps_command_element);
+ iovec->flags = dma_buffer->cmd[i].flags;
+ iovec++;
+ }
+
+ mutex_lock(&info->bam_lock);
+ err = sps_transfer(info->sps.cmd_pipe.handle, &dma_buffer->xfer);
+ if (err) {
+ pr_err("Failed to submit commands %d\n", err);
+ mutex_unlock(&info->bam_lock);
+ goto out;
+ }
+ wait_for_completion_io(&info->sps.cmd_pipe.completion);
+ mutex_unlock(&info->bam_lock);
+
+ pr_debug("Read ID register value 0x%x\n", dma_buffer->data[3]);
+ if (!read_onfi_signature)
+ pr_debug("nandid: %x maker %02x device %02x\n",
+ dma_buffer->data[3], dma_buffer->data[3] & 0xff,
+ (dma_buffer->data[3] >> 8) & 0xff);
+ *read_id = dma_buffer->data[3];
+out:
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+ return err;
+}
+
+/*
+ * Contains data for common configuration registers that must be programmed
+ * for every NANDc operation.
+ */
+struct msm_nand_common_cfgs {
+ uint32_t cmd;
+ uint32_t addr0;
+ uint32_t addr1;
+ uint32_t cfg0;
+ uint32_t cfg1;
+};
+
+/*
+ * Function to prepare SPS command elements to write into NANDc configuration
+ * registers as per the data defined in struct msm_nand_common_cfgs. This is
+ * required for the following NANDc operations - Erase, Bad Block checking
+ * and for reading ONFI parameter page.
+ */
+static void msm_nand_prep_cfg_cmd_desc(struct msm_nand_info *info,
+ struct msm_nand_common_cfgs data,
+ struct msm_nand_sps_cmd **curr_cmd)
+{
+ struct msm_nand_sps_cmd *cmd;
+
+ cmd = *curr_cmd;
+ msm_nand_prep_ce(cmd, MSM_NAND_FLASH_CMD(info), WRITE, data.cmd,
+ SPS_IOVEC_FLAG_LOCK);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_ADDR0(info), WRITE, data.addr0, 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_ADDR1(info), WRITE, data.addr1, 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_DEV0_CFG0(info), WRITE, data.cfg0, 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_DEV0_CFG1(info), WRITE, data.cfg1, 0);
+ cmd++;
+ *curr_cmd = cmd;
+}
+
+/*
+ * Function to check the CRC integrity check on ONFI parameter page read.
+ * For ONFI parameter page read, the controller ECC will be disabled. Hence,
+ * it is mandatory to manually compute CRC and check it against the value
+ * stored within ONFI page.
+ */
+static uint16_t msm_nand_flash_onfi_crc_check(uint8_t *buffer, uint16_t count)
+{
+ int i;
+ uint16_t result;
+
+ for (i = 0; i < count; i++)
+ buffer[i] = bitrev8(buffer[i]);
+
+ result = bitrev16(crc16(bitrev16(0x4f4e), buffer, count));
+
+ for (i = 0; i < count; i++)
+ buffer[i] = bitrev8(buffer[i]);
+
+ return result;
+}
+
+/*
+ * Structure that contains NANDc register data for commands required
+ * for reading ONFI paramter page.
+ */
+struct msm_nand_flash_onfi_data {
+ struct msm_nand_common_cfgs cfg;
+ uint32_t exec;
+ uint32_t devcmd1_orig;
+ uint32_t devcmdvld_orig;
+ uint32_t devcmd1_mod;
+ uint32_t devcmdvld_mod;
+ uint32_t ecc_bch_cfg;
+};
+
+/*
+ * Function to identify whether the attached NAND flash device is
+ * complaint to ONFI spec or not. If yes, then it reads the ONFI parameter
+ * page to get the device parameters.
+ */
+static int msm_nand_flash_onfi_probe(struct msm_nand_info *info)
+{
+ struct msm_nand_chip *chip = &info->nand_chip;
+ struct flash_identification *flash = &info->flash_dev;
+ uint32_t crc_chk_count = 0, page_address = 0;
+ int ret = 0, i;
+
+ /* SPS parameters */
+ struct msm_nand_sps_cmd *cmd, *curr_cmd;
+ struct sps_iovec *iovec;
+ uint32_t rdata;
+
+ /* ONFI Identifier/Parameter Page parameters */
+ uint8_t *onfi_param_info_buf = NULL;
+ dma_addr_t dma_addr_param_info = 0;
+ struct onfi_param_page *onfi_param_page_ptr;
+ struct msm_nand_flash_onfi_data data;
+ uint32_t onfi_signature;
+
+ /* SPS command/data descriptors */
+ uint32_t total_cnt = 13;
+ /*
+ * The following 13 commands are required to get onfi parameters -
+ * flash, addr0, addr1, cfg0, cfg1, dev0_ecc_cfg, cmd_vld, dev_cmd1,
+ * read_loc_0, exec, flash_status (read cmd), dev_cmd1, cmd_vld.
+ */
+ struct {
+ struct sps_transfer xfer;
+ struct sps_iovec cmd_iovec[total_cnt];
+ struct msm_nand_sps_cmd cmd[total_cnt];
+ uint32_t flash_status;
+ } *dma_buffer;
+
+ wait_event(chip->dma_wait_queue, (onfi_param_info_buf =
+ msm_nand_get_dma_buffer(chip, ONFI_PARAM_INFO_LENGTH)));
+ dma_addr_param_info = msm_virt_to_dma(chip, onfi_param_info_buf);
+
+ wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer
+ (chip, sizeof(*dma_buffer))));
+
+ ret = msm_nand_flash_read_id(info, 1, &onfi_signature);
+ if (ret < 0) {
+ pr_err("Failed to read ONFI signature\n");
+ goto free_dma;
+ }
+ if (onfi_signature != ONFI_PARAMETER_PAGE_SIGNATURE) {
+ pr_info("Found a non ONFI device\n");
+ ret = -EIO;
+ goto free_dma;
+ }
+
+ memset(&data, 0, sizeof(struct msm_nand_flash_onfi_data));
+ ret = msm_nand_flash_rd_reg(info, MSM_NAND_DEV_CMD1(info),
+ &data.devcmd1_orig);
+ if (ret < 0)
+ goto free_dma;
+ ret = msm_nand_flash_rd_reg(info, MSM_NAND_DEV_CMD_VLD(info),
+ &data.devcmdvld_orig);
+ if (ret < 0)
+ goto free_dma;
+
+ data.cfg.cmd = MSM_NAND_CMD_PAGE_READ_ALL;
+ data.exec = 1;
+ data.cfg.addr0 = (page_address << 16) |
+ FLASH_READ_ONFI_PARAMETERS_ADDRESS;
+ data.cfg.addr1 = (page_address >> 16) & 0xFF;
+ data.cfg.cfg0 = MSM_NAND_CFG0_RAW_ONFI_PARAM_INFO;
+ data.cfg.cfg1 = MSM_NAND_CFG1_RAW_ONFI_PARAM_INFO;
+ data.devcmd1_mod = (data.devcmd1_orig & 0xFFFFFF00) |
+ FLASH_READ_ONFI_PARAMETERS_COMMAND;
+ data.devcmdvld_mod = data.devcmdvld_orig & 0xFFFFFFFE;
+ data.ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
+ dma_buffer->flash_status = 0xeeeeeeee;
+
+ curr_cmd = cmd = dma_buffer->cmd;
+ msm_nand_prep_cfg_cmd_desc(info, data.cfg, &curr_cmd);
+
+ cmd = curr_cmd;
+ msm_nand_prep_ce(cmd, MSM_NAND_DEV0_ECC_CFG(info), WRITE,
+ data.ecc_bch_cfg, 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_DEV_CMD_VLD(info), WRITE,
+ data.devcmdvld_mod, 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_DEV_CMD1(info), WRITE,
+ data.devcmd1_mod, 0);
+ cmd++;
+
+ rdata = (0 << 0) | (ONFI_PARAM_INFO_LENGTH << 16) | (1 << 31);
+ msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE,
+ rdata, 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_EXEC_CMD(info), WRITE,
+ data.exec, SPS_IOVEC_FLAG_NWD);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info), READ,
+ msm_virt_to_dma(chip, &dma_buffer->flash_status), 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_DEV_CMD1(info), WRITE,
+ data.devcmd1_orig, 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_DEV_CMD_VLD(info), WRITE,
+ data.devcmdvld_orig,
+ SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT);
+ cmd++;
+
+ BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
+ dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
+ dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
+ dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
+ &dma_buffer->cmd_iovec);
+ iovec = dma_buffer->xfer.iovec;
+
+ for (i = 0; i < dma_buffer->xfer.iovec_count; i++) {
+ iovec->addr = msm_virt_to_dma(chip,
+ &dma_buffer->cmd[i].ce);
+ iovec->size = sizeof(struct sps_command_element);
+ iovec->flags = dma_buffer->cmd[i].flags;
+ iovec++;
+ }
+ mutex_lock(&info->bam_lock);
+ /* Submit data descriptor */
+ ret = sps_transfer_one(info->sps.data_prod.handle, dma_addr_param_info,
+ ONFI_PARAM_INFO_LENGTH, NULL, SPS_IOVEC_FLAG_INT);
+ if (ret) {
+ pr_err("Failed to submit data descriptors %d\n", ret);
+ mutex_unlock(&info->bam_lock);
+ goto free_dma;
+ }
+ /* Submit command descriptors */
+ ret = sps_transfer(info->sps.cmd_pipe.handle,
+ &dma_buffer->xfer);
+ if (ret) {
+ pr_err("Failed to submit commands %d\n", ret);
+ mutex_unlock(&info->bam_lock);
+ goto free_dma;
+ }
+ wait_for_completion_io(&info->sps.cmd_pipe.completion);
+ wait_for_completion_io(&info->sps.data_prod.completion);
+ mutex_unlock(&info->bam_lock);
+
+ /* Check for flash status errors */
+ if (dma_buffer->flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
+ pr_err("MPU/OP err (0x%x) is set\n", dma_buffer->flash_status);
+ ret = -EIO;
+ goto free_dma;
+ }
+
+ for (crc_chk_count = 0; crc_chk_count < ONFI_PARAM_INFO_LENGTH
+ / ONFI_PARAM_PAGE_LENGTH; crc_chk_count++) {
+ onfi_param_page_ptr =
+ (struct onfi_param_page *)
+ (&(onfi_param_info_buf
+ [ONFI_PARAM_PAGE_LENGTH *
+ crc_chk_count]));
+ if (msm_nand_flash_onfi_crc_check(
+ (uint8_t *)onfi_param_page_ptr,
+ ONFI_PARAM_PAGE_LENGTH - 2) ==
+ onfi_param_page_ptr->integrity_crc) {
+ break;
+ }
+ }
+ if (crc_chk_count >= ONFI_PARAM_INFO_LENGTH
+ / ONFI_PARAM_PAGE_LENGTH) {
+ pr_err("CRC Check failed on param page\n");
+ ret = -EIO;
+ goto free_dma;
+ }
+ ret = msm_nand_flash_read_id(info, 0, &flash->flash_id);
+ if (ret < 0) {
+ pr_err("Failed to read flash ID\n");
+ goto free_dma;
+ }
+ flash->widebus = onfi_param_page_ptr->features_supported & 0x01;
+ flash->pagesize = onfi_param_page_ptr->number_of_data_bytes_per_page;
+ flash->blksize = onfi_param_page_ptr->number_of_pages_per_block *
+ flash->pagesize;
+ flash->oobsize = onfi_param_page_ptr->number_of_spare_bytes_per_page;
+ flash->density = onfi_param_page_ptr->number_of_blocks_per_logical_unit
+ * flash->blksize;
+ flash->ecc_correctability = onfi_param_page_ptr->
+ number_of_bits_ecc_correctability;
+
+ pr_info("Found an ONFI compliant device %s\n",
+ onfi_param_page_ptr->device_model);
+ /*
+ * Temporary hack for MT29F4G08ABC device.
+ * Since the device is not properly adhering
+ * to ONFi specification it is reporting
+ * as 16 bit device though it is 8 bit device!!!
+ */
+ if (!strncmp(onfi_param_page_ptr->device_model, "MT29F4G08ABC", 12))
+ flash->widebus = 0;
+free_dma:
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+ msm_nand_release_dma_buffer(chip, onfi_param_info_buf,
+ ONFI_PARAM_INFO_LENGTH);
+ return ret;
+}
+
+/*
+ * Structure that contains read/write parameters required for reading/writing
+ * from/to a page.
+ */
+struct msm_nand_rw_params {
+ uint32_t page;
+ uint32_t page_count;
+ uint32_t sectordatasize;
+ uint32_t sectoroobsize;
+ uint32_t cwperpage;
+ uint32_t oob_len_cmd;
+ uint32_t oob_len_data;
+ uint32_t start_sector;
+ uint32_t oob_col;
+ dma_addr_t data_dma_addr;
+ dma_addr_t oob_dma_addr;
+ dma_addr_t data_dma_addr_curr;
+ dma_addr_t oob_dma_addr_curr;
+ bool read;
+};
+
+/*
+ * Structure that contains NANDc register data required for reading/writing
+ * from/to a page.
+ */
+struct msm_nand_rw_reg_data {
+ uint32_t cmd;
+ uint32_t addr0;
+ uint32_t addr1;
+ uint32_t cfg0;
+ uint32_t cfg1;
+ uint32_t ecc_bch_cfg;
+ uint32_t exec;
+ uint32_t ecc_cfg;
+ uint32_t clrfstatus;
+ uint32_t clrrstatus;
+};
+
+/*
+ * Function that validates page read/write MTD parameters received from upper
+ * layers such as MTD/YAFFS2 and returns error for any unsupported operations
+ * by the driver. In case of success, it also maps the data and oob buffer
+ * received for DMA.
+ */
+static int msm_nand_validate_mtd_params(struct mtd_info *mtd, bool read,
+ loff_t offset,
+ struct mtd_oob_ops *ops,
+ struct msm_nand_rw_params *args)
+{
+ struct msm_nand_info *info = mtd->priv;
+ struct msm_nand_chip *chip = &info->nand_chip;
+ int err = 0;
+
+ pr_debug("========================================================\n");
+ pr_debug("offset 0x%llx mode %d\ndatbuf 0x%p datlen 0x%x\n",
+ offset, ops->mode, ops->datbuf, ops->len);
+ pr_debug("oobbuf 0x%p ooblen 0x%x\n", ops->oobbuf, ops->ooblen);
+
+ if (ops->mode == MTD_OPS_PLACE_OOB) {
+ pr_err("MTD_OPS_PLACE_OOB is not supported\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (mtd->writesize == PAGE_SIZE_2K)
+ args->page = offset >> 11;
+
+ if (mtd->writesize == PAGE_SIZE_4K)
+ args->page = offset >> 12;
+
+ args->oob_len_cmd = ops->ooblen;
+ args->oob_len_data = ops->ooblen;
+ args->cwperpage = (mtd->writesize >> 9);
+ args->read = (read ? true : false);
+
+ if (offset & (mtd->writesize - 1)) {
+ pr_err("unsupported offset 0x%llx\n", offset);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (!read && !ops->datbuf) {
+ pr_err("No data buffer provided for write!!\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (ops->mode == MTD_OPS_RAW) {
+ if (!ops->datbuf) {
+ pr_err("No data buffer provided for RAW mode\n");
+ err = -EINVAL;
+ goto out;
+ } else if ((ops->len % (mtd->writesize +
+ mtd->oobsize)) != 0) {
+ pr_err("unsupported data len %d for RAW mode\n",
+ ops->len);
+ err = -EINVAL;
+ goto out;
+ }
+ args->page_count = ops->len / (mtd->writesize + mtd->oobsize);
+
+ } else if (ops->mode == MTD_OPS_AUTO_OOB) {
+ if (ops->datbuf && (ops->len % mtd->writesize) != 0) {
+ /* when ops->datbuf is NULL, ops->len can be ooblen */
+ pr_err("unsupported data len %d for AUTO mode\n",
+ ops->len);
+ err = -EINVAL;
+ goto out;
+ }
+ if (read && ops->oobbuf && !ops->datbuf) {
+ args->start_sector = args->cwperpage - 1;
+ args->page_count = ops->ooblen / mtd->oobavail;
+ if ((args->page_count == 0) && (ops->ooblen))
+ args->page_count = 1;
+ } else if (ops->datbuf) {
+ args->page_count = ops->len / mtd->writesize;
+ }
+ }
+
+ if (ops->datbuf) {
+ args->data_dma_addr_curr = args->data_dma_addr =
+ msm_nand_dma_map(chip->dev, ops->datbuf, ops->len,
+ (read ? DMA_FROM_DEVICE : DMA_TO_DEVICE));
+ if (dma_mapping_error(chip->dev, args->data_dma_addr)) {
+ pr_err("dma mapping failed for 0x%p\n", ops->datbuf);
+ err = -EIO;
+ goto out;
+ }
+ }
+ if (ops->oobbuf) {
+ if (read)
+ memset(ops->oobbuf, 0xFF, ops->ooblen);
+ args->oob_dma_addr_curr = args->oob_dma_addr =
+ msm_nand_dma_map(chip->dev, ops->oobbuf, ops->ooblen,
+ (read ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE));
+ if (dma_mapping_error(chip->dev, args->oob_dma_addr)) {
+ pr_err("dma mapping failed for 0x%p\n", ops->oobbuf);
+ err = -EIO;
+ goto dma_map_oobbuf_failed;
+ }
+ }
+ goto out;
+dma_map_oobbuf_failed:
+ if (ops->datbuf)
+ dma_unmap_page(chip->dev, args->data_dma_addr, ops->len,
+ (read ? DMA_FROM_DEVICE : DMA_TO_DEVICE));
+out:
+ return err;
+}
+
+/*
+ * Function that updates NANDc register data (struct msm_nand_rw_reg_data)
+ * required for page read/write.
+ */
+static void msm_nand_update_rw_reg_data(struct msm_nand_chip *chip,
+ struct mtd_oob_ops *ops,
+ struct msm_nand_rw_params *args,
+ struct msm_nand_rw_reg_data *data)
+{
+ if (args->read) {
+ if (ops->mode != MTD_OPS_RAW) {
+ data->cmd = MSM_NAND_CMD_PAGE_READ_ECC;
+ data->cfg0 =
+ (chip->cfg0 & ~(7U << CW_PER_PAGE)) |
+ (((args->cwperpage-1) - args->start_sector)
+ << CW_PER_PAGE);
+ data->cfg1 = chip->cfg1;
+ data->ecc_bch_cfg = chip->ecc_bch_cfg;
+ } else {
+ data->cmd = MSM_NAND_CMD_PAGE_READ_ALL;
+ data->cfg0 = chip->cfg0_raw;
+ data->cfg1 = chip->cfg1_raw;
+ data->ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
+ }
+
+ } else {
+ if (ops->mode != MTD_OPS_RAW) {
+ data->cfg0 = chip->cfg0;
+ data->cfg1 = chip->cfg1;
+ data->ecc_bch_cfg = chip->ecc_bch_cfg;
+ } else {
+ data->cfg0 = chip->cfg0_raw;
+ data->cfg1 = chip->cfg1_raw;
+ data->ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
+ }
+ data->cmd = MSM_NAND_CMD_PRG_PAGE;
+ data->clrfstatus = MSM_NAND_RESET_FLASH_STS;
+ data->clrrstatus = MSM_NAND_RESET_READ_STS;
+ }
+ data->exec = 1;
+ data->ecc_cfg = chip->ecc_buf_cfg;
+}
+
+/*
+ * Function to prepare series of SPS command descriptors required for a page
+ * read/write operation.
+ */
+static void msm_nand_prep_rw_cmd_desc(struct mtd_oob_ops *ops,
+ struct msm_nand_rw_params *args,
+ struct msm_nand_rw_reg_data *data,
+ struct msm_nand_info *info,
+ uint32_t curr_cw,
+ struct msm_nand_sps_cmd **curr_cmd)
+{
+ struct msm_nand_chip *chip = &info->nand_chip;
+ struct msm_nand_sps_cmd *cmd;
+ uint32_t rdata;
+ /* read_location register parameters */
+ uint32_t offset, size, last_read;
+
+ cmd = *curr_cmd;
+ msm_nand_prep_ce(cmd, MSM_NAND_FLASH_CMD(info), WRITE, data->cmd,
+ ((curr_cw == args->start_sector) ?
+ SPS_IOVEC_FLAG_LOCK : 0));
+ cmd++;
+
+ if (curr_cw == args->start_sector) {
+ msm_nand_prep_ce(cmd, MSM_NAND_ADDR0(info), WRITE,
+ data->addr0, 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_ADDR1(info), WRITE,
+ data->addr1, 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_DEV0_CFG0(info), WRITE,
+ data->cfg0, 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_DEV0_CFG1(info), WRITE,
+ data->cfg1, 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_DEV0_ECC_CFG(info), WRITE,
+ data->ecc_bch_cfg, 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_EBI2_ECC_BUF_CFG(info),
+ WRITE, data->ecc_cfg, 0);
+ cmd++;
+ }
+
+ if (!args->read)
+ goto sub_exec_cmd;
+
+ if (ops->mode == MTD_OPS_RAW) {
+ rdata = (0 << 0) | (chip->cw_size << 16) | (1 << 31);
+ msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE,
+ rdata, 0);
+ cmd++;
+ }
+ if (ops->mode == MTD_OPS_AUTO_OOB && ops->datbuf) {
+ offset = 0;
+ size = (curr_cw < (args->cwperpage - 1)) ? 516 :
+ (512 - ((args->cwperpage - 1) << 2));
+ last_read = (curr_cw < (args->cwperpage - 1)) ? 1 :
+ (ops->oobbuf ? 0 : 1);
+ rdata = (offset << 0) | (size << 16) | (last_read << 31);
+ msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE,
+ rdata, 0);
+ cmd++;
+ }
+ if (ops->mode == MTD_OPS_AUTO_OOB && ops->oobbuf
+ && (curr_cw == (args->cwperpage - 1))) {
+ offset = 512 - ((args->cwperpage - 1) << 2);
+ size = (args->cwperpage) << 2;
+ if (size > args->oob_len_cmd)
+ size = args->oob_len_cmd;
+ args->oob_len_cmd -= size;
+ last_read = 1;
+ rdata = (offset << 0) | (size << 16) | (last_read << 31);
+ if (ops->datbuf) {
+ msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_1(info),
+ WRITE, rdata, 0);
+ } else {
+ msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_0(info),
+ WRITE, rdata, 0);
+ }
+ cmd++;
+ }
+sub_exec_cmd:
+ msm_nand_prep_ce(cmd, MSM_NAND_EXEC_CMD(info), WRITE, data->exec,
+ SPS_IOVEC_FLAG_NWD);
+ cmd++;
+ *curr_cmd = cmd;
+}
+
+/*
+ * Function to prepare and submit SPS data descriptors required for a page
+ * read/write operation.
+ */
+static int msm_nand_submit_rw_data_desc(struct mtd_oob_ops *ops,
+ struct msm_nand_rw_params *args,
+ struct msm_nand_info *info,
+ uint32_t curr_cw)
+{
+ struct msm_nand_chip *chip = &info->nand_chip;
+ struct sps_pipe *data_pipe_handle;
+ uint32_t sectordatasize, sectoroobsize;
+ uint32_t sps_flags = 0;
+ int err = 0;
+
+ if (args->read)
+ data_pipe_handle = info->sps.data_prod.handle;
+ else
+ data_pipe_handle = info->sps.data_cons.handle;
+
+ if (ops->mode == MTD_OPS_RAW) {
+ sectordatasize = chip->cw_size;
+ if (!args->read)
+ sps_flags = SPS_IOVEC_FLAG_EOT;
+ if (curr_cw == (args->cwperpage - 1))
+ sps_flags |= SPS_IOVEC_FLAG_INT;
+
+ err = sps_transfer_one(data_pipe_handle,
+ args->data_dma_addr_curr,
+ sectordatasize, NULL,
+ sps_flags);
+ if (err)
+ goto out;
+ args->data_dma_addr_curr += sectordatasize;
+
+ } else if (ops->mode == MTD_OPS_AUTO_OOB) {
+ if (ops->datbuf) {
+ sectordatasize = (curr_cw < (args->cwperpage - 1))
+ ? 516 : (512 - ((args->cwperpage - 1) << 2));
+
+ if (!args->read) {
+ sps_flags = SPS_IOVEC_FLAG_EOT;
+ if (curr_cw == (args->cwperpage - 1) &&
+ ops->oobbuf)
+ sps_flags = 0;
+ }
+ if ((curr_cw == (args->cwperpage - 1)) && !ops->oobbuf)
+ sps_flags |= SPS_IOVEC_FLAG_INT;
+
+ err = sps_transfer_one(data_pipe_handle,
+ args->data_dma_addr_curr,
+ sectordatasize, NULL,
+ sps_flags);
+ if (err)
+ goto out;
+ args->data_dma_addr_curr += sectordatasize;
+ }
+
+ if (ops->oobbuf && (curr_cw == (args->cwperpage - 1))) {
+ sectoroobsize = args->cwperpage << 2;
+ if (sectoroobsize > args->oob_len_data)
+ sectoroobsize = args->oob_len_data;
+
+ if (!args->read)
+ sps_flags |= SPS_IOVEC_FLAG_EOT;
+ sps_flags |= SPS_IOVEC_FLAG_INT;
+ err = sps_transfer_one(data_pipe_handle,
+ args->oob_dma_addr_curr,
+ sectoroobsize, NULL,
+ sps_flags);
+ if (err)
+ goto out;
+ args->oob_dma_addr_curr += sectoroobsize;
+ args->oob_len_data -= sectoroobsize;
+ }
+ }
+out:
+ return err;
+}
+
+/*
+ * Function that gets called from upper layers such as MTD/YAFFS2 to read a
+ * page with main or/and spare data.
+ */
+static int msm_nand_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct msm_nand_info *info = mtd->priv;
+ struct msm_nand_chip *chip = &info->nand_chip;
+ uint32_t cwperpage = (mtd->writesize >> 9);
+ int err, pageerr = 0, rawerr = 0;
+ uint32_t n = 0, pages_read = 0;
+ uint32_t ecc_errors = 0, total_ecc_errors = 0;
+ struct msm_nand_rw_params rw_params;
+ struct msm_nand_rw_reg_data data;
+ struct msm_nand_sps_cmd *cmd, *curr_cmd;
+ struct sps_iovec *iovec;
+ /*
+ * The following 6 commands will be sent only once for the first
+ * codeword (CW) - addr0, addr1, dev0_cfg0, dev0_cfg1,
+ * dev0_ecc_cfg, ebi2_ecc_buf_cfg. The following 6 commands will
+ * be sent for every CW - flash, read_location_0, read_location_1,
+ * exec, flash_status and buffer_status.
+ */
+ uint32_t total_cnt = (6 * cwperpage) + 6;
+ struct {
+ struct sps_transfer xfer;
+ struct sps_iovec cmd_iovec[total_cnt];
+ struct msm_nand_sps_cmd cmd[total_cnt];
+ struct {
+ uint32_t flash_status;
+ uint32_t buffer_status;
+ } result[cwperpage];
+ } *dma_buffer;
+
+ memset(&rw_params, 0, sizeof(struct msm_nand_rw_params));
+ err = msm_nand_validate_mtd_params(mtd, true, from, ops, &rw_params);
+ if (err)
+ goto validate_mtd_params_failed;
+
+ wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
+ chip, sizeof(*dma_buffer))));
+
+ rw_params.oob_col = rw_params.start_sector * chip->cw_size;
+ if (chip->cfg1 & (1 << WIDE_FLASH))
+ rw_params.oob_col >>= 1;
+
+ memset(&data, 0, sizeof(struct msm_nand_rw_reg_data));
+ msm_nand_update_rw_reg_data(chip, ops, &rw_params, &data);
+
+ while (rw_params.page_count-- > 0) {
+ data.addr0 = (rw_params.page << 16) | rw_params.oob_col;
+ data.addr1 = (rw_params.page >> 16) & 0xff;
+ cmd = dma_buffer->cmd;
+ for (n = rw_params.start_sector; n < cwperpage; n++) {
+ dma_buffer->result[n].flash_status = 0xeeeeeeee;
+ dma_buffer->result[n].buffer_status = 0xeeeeeeee;
+
+ curr_cmd = cmd;
+ msm_nand_prep_rw_cmd_desc(ops, &rw_params,
+ &data, info, n, &curr_cmd);
+
+ cmd = curr_cmd;
+ msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info),
+ READ, msm_virt_to_dma(chip,
+ &dma_buffer->result[n].flash_status), 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_BUFFER_STATUS(info),
+ READ, msm_virt_to_dma(chip,
+ &dma_buffer->result[n].buffer_status),
+ ((n == (cwperpage - 1)) ?
+ (SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT) :
+ 0));
+ cmd++;
+ }
+
+ BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
+ dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
+ dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
+ dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
+ &dma_buffer->cmd_iovec);
+ iovec = dma_buffer->xfer.iovec;
+
+ for (n = 0; n < dma_buffer->xfer.iovec_count; n++) {
+ iovec->addr = msm_virt_to_dma(chip,
+ &dma_buffer->cmd[n].ce);
+ iovec->size = sizeof(struct sps_command_element);
+ iovec->flags = dma_buffer->cmd[n].flags;
+ iovec++;
+ }
+ mutex_lock(&info->bam_lock);
+ /* Submit data descriptors */
+ for (n = rw_params.start_sector; n < cwperpage; n++) {
+ err = msm_nand_submit_rw_data_desc(ops,
+ &rw_params, info, n);
+ if (err) {
+ pr_err("Failed to submit data descs %d\n", err);
+ mutex_unlock(&info->bam_lock);
+ goto free_dma;
+ }
+ }
+ /* Submit command descriptors */
+ err = sps_transfer(info->sps.cmd_pipe.handle,
+ &dma_buffer->xfer);
+ if (err) {
+ pr_err("Failed to submit commands %d\n", err);
+ mutex_unlock(&info->bam_lock);
+ goto free_dma;
+ }
+ wait_for_completion_io(&info->sps.cmd_pipe.completion);
+ wait_for_completion_io(&info->sps.data_prod.completion);
+ mutex_unlock(&info->bam_lock);
+ /* Check for flash status errors */
+ pageerr = rawerr = 0;
+ for (n = rw_params.start_sector; n < cwperpage; n++) {
+ if (dma_buffer->result[n].flash_status & (FS_OP_ERR |
+ FS_MPU_ERR)) {
+ rawerr = -EIO;
+ break;
+ }
+ }
+ /* Check for ECC correction on empty block */
+ if (rawerr && ops->datbuf && ops->mode != MTD_OPS_RAW) {
+ uint8_t *datbuf = ops->datbuf +
+ pages_read * mtd->writesize;
+
+ dma_sync_single_for_cpu(chip->dev,
+ rw_params.data_dma_addr_curr - mtd->writesize,
+ mtd->writesize, DMA_BIDIRECTIONAL);
+
+ for (n = 0; n < mtd->writesize; n++) {
+ /* TODO: check offset for 4bit BCHECC */
+ if ((n % 516 == 3 || n % 516 == 175)
+ && datbuf[n] == 0x54)
+ datbuf[n] = 0xff;
+ if (datbuf[n] != 0xff) {
+ pageerr = rawerr;
+ break;
+ }
+ }
+
+ dma_sync_single_for_device(chip->dev,
+ rw_params.data_dma_addr_curr - mtd->writesize,
+ mtd->writesize, DMA_BIDIRECTIONAL);
+ }
+ if (rawerr && ops->oobbuf) {
+ dma_sync_single_for_cpu(chip->dev,
+ rw_params.oob_dma_addr_curr - (ops->ooblen -
+ rw_params.oob_len_data),
+ ops->ooblen - rw_params.oob_len_data,
+ DMA_BIDIRECTIONAL);
+
+ for (n = 0; n < ops->ooblen; n++) {
+ if (ops->oobbuf[n] != 0xff) {
+ pageerr = rawerr;
+ break;
+ }
+ }
+
+ dma_sync_single_for_device(chip->dev,
+ rw_params.oob_dma_addr_curr - (ops->ooblen -
+ rw_params.oob_len_data),
+ ops->ooblen - rw_params.oob_len_data,
+ DMA_BIDIRECTIONAL);
+ }
+ /* check for uncorrectable errors */
+ if (pageerr) {
+ for (n = rw_params.start_sector; n < cwperpage; n++) {
+ if (dma_buffer->result[n].buffer_status &
+ BS_UNCORRECTABLE_BIT) {
+ mtd->ecc_stats.failed++;
+ pageerr = -EBADMSG;
+ break;
+ }
+ }
+ }
+ /* check for correctable errors */
+ if (!rawerr) {
+ for (n = rw_params.start_sector; n < cwperpage; n++) {
+ ecc_errors =
+ dma_buffer->result[n].buffer_status
+ & BS_CORRECTABLE_ERR_MSK;
+ if (ecc_errors) {
+ total_ecc_errors += ecc_errors;
+ mtd->ecc_stats.corrected += ecc_errors;
+ /*
+ * For Micron devices it is observed
+ * that correctable errors upto 3 bits
+ * are very common.
+ */
+ if (ecc_errors > 3)
+ pageerr = -EUCLEAN;
+ }
+ }
+ }
+ if (pageerr && (pageerr != -EUCLEAN || err == 0))
+ err = pageerr;
+
+ if (rawerr && !pageerr) {
+ pr_debug("%llx %x %x empty page\n",
+ (loff_t)rw_params.page * mtd->writesize,
+ ops->len, ops->ooblen);
+ } else {
+ for (n = rw_params.start_sector; n < cwperpage; n++)
+ pr_debug("cw %d: flash_sts %x buffr_sts %x\n",
+ n, dma_buffer->result[n].flash_status,
+ dma_buffer->result[n].buffer_status);
+ }
+ if (err && err != -EUCLEAN && err != -EBADMSG)
+ goto free_dma;
+ pages_read++;
+ rw_params.page++;
+ }
+free_dma:
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+ if (ops->oobbuf)
+ dma_unmap_page(chip->dev, rw_params.oob_dma_addr,
+ ops->ooblen, DMA_FROM_DEVICE);
+ if (ops->datbuf)
+ dma_unmap_page(chip->dev, rw_params.data_dma_addr,
+ ops->len, DMA_BIDIRECTIONAL);
+validate_mtd_params_failed:
+ if (ops->mode != MTD_OPS_RAW)
+ ops->retlen = mtd->writesize * pages_read;
+ else
+ ops->retlen = (mtd->writesize + mtd->oobsize) * pages_read;
+ ops->oobretlen = ops->ooblen - rw_params.oob_len_data;
+ if (err)
+ pr_err("0x%llx datalen 0x%x ooblen %x err %d corrected %d\n",
+ from, ops->datbuf ? ops->len : 0, ops->ooblen, err,
+ total_ecc_errors);
+ pr_debug("ret %d, retlen %d oobretlen %d\n",
+ err, ops->retlen, ops->oobretlen);
+
+ pr_debug("========================================================\n");
+ return err;
+}
+
+/*
+ * Function that gets called from upper layers such as MTD/YAFFS2 to read a
+ * page with only main data.
+ */
+static int msm_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ int ret;
+ struct mtd_oob_ops ops;
+
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.len = len;
+ ops.retlen = 0;
+ ops.ooblen = 0;
+ ops.datbuf = buf;
+ ops.oobbuf = NULL;
+ ret = msm_nand_read_oob(mtd, from, &ops);
+ *retlen = ops.retlen;
+ return ret;
+}
+
+/*
+ * Function that gets called from upper layers such as MTD/YAFFS2 to write a
+ * page with both main and spare data.
+ */
+static int msm_nand_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct msm_nand_info *info = mtd->priv;
+ struct msm_nand_chip *chip = &info->nand_chip;
+ uint32_t cwperpage = (mtd->writesize >> 9);
+ uint32_t n, flash_sts, pages_written = 0;
+ int err = 0;
+ struct msm_nand_rw_params rw_params;
+ struct msm_nand_rw_reg_data data;
+ struct msm_nand_sps_cmd *cmd, *curr_cmd;
+ struct sps_iovec *iovec;
+ /*
+ * The following 7 commands will be sent only once :
+ * For first codeword (CW) - addr0, addr1, dev0_cfg0, dev0_cfg1,
+ * dev0_ecc_cfg, ebi2_ecc_buf_cfg.
+ * For last codeword (CW) - read_status(write)
+ *
+ * The following 4 commands will be sent for every CW :
+ * flash, exec, flash_status (read), flash_status (write).
+ */
+ uint32_t total_cnt = (4 * cwperpage) + 7;
+ struct {
+ struct sps_transfer xfer;
+ struct sps_iovec cmd_iovec[total_cnt];
+ struct msm_nand_sps_cmd cmd[total_cnt];
+ struct {
+ uint32_t flash_status[cwperpage];
+ } data;
+ } *dma_buffer;
+
+ memset(&rw_params, 0, sizeof(struct msm_nand_rw_params));
+ err = msm_nand_validate_mtd_params(mtd, false, to, ops, &rw_params);
+ if (err)
+ goto validate_mtd_params_failed;
+
+ wait_event(chip->dma_wait_queue, (dma_buffer =
+ msm_nand_get_dma_buffer(chip, sizeof(*dma_buffer))));
+
+ memset(&data, 0, sizeof(struct msm_nand_rw_reg_data));
+ msm_nand_update_rw_reg_data(chip, ops, &rw_params, &data);
+
+ while (rw_params.page_count-- > 0) {
+ data.addr0 = (rw_params.page << 16);
+ data.addr1 = (rw_params.page >> 16) & 0xff;
+ cmd = dma_buffer->cmd;
+
+ for (n = 0; n < cwperpage ; n++) {
+ dma_buffer->data.flash_status[n] = 0xeeeeeeee;
+
+ curr_cmd = cmd;
+ msm_nand_prep_rw_cmd_desc(ops, &rw_params,
+ &data, info, n, &curr_cmd);
+
+ cmd = curr_cmd;
+ msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info),
+ READ, msm_virt_to_dma(chip,
+ &dma_buffer->data.flash_status[n]), 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info),
+ WRITE, data.clrfstatus, 0);
+ cmd++;
+
+ if (n == (cwperpage - 1)) {
+ msm_nand_prep_ce(cmd,
+ MSM_NAND_READ_STATUS(info), WRITE,
+ data.clrrstatus, SPS_IOVEC_FLAG_UNLOCK
+ | SPS_IOVEC_FLAG_INT);
+ cmd++;
+ }
+ }
+
+ BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
+ dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
+ dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
+ dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
+ &dma_buffer->cmd_iovec);
+ iovec = dma_buffer->xfer.iovec;
+
+ for (n = 0; n < dma_buffer->xfer.iovec_count; n++) {
+ iovec->addr = msm_virt_to_dma(chip,
+ &dma_buffer->cmd[n].ce);
+ iovec->size = sizeof(struct sps_command_element);
+ iovec->flags = dma_buffer->cmd[n].flags;
+ iovec++;
+ }
+ mutex_lock(&info->bam_lock);
+ /* Submit data descriptors */
+ for (n = 0; n < cwperpage; n++) {
+ err = msm_nand_submit_rw_data_desc(ops,
+ &rw_params, info, n);
+ if (err) {
+ pr_err("Failed to submit data descs %d\n", err);
+ mutex_unlock(&info->bam_lock);
+ goto free_dma;
+ }
+ }
+ /* Submit command descriptors */
+ err = sps_transfer(info->sps.cmd_pipe.handle,
+ &dma_buffer->xfer);
+ if (err) {
+ pr_err("Failed to submit commands %d\n", err);
+ mutex_unlock(&info->bam_lock);
+ goto free_dma;
+ }
+ wait_for_completion_io(&info->sps.cmd_pipe.completion);
+ wait_for_completion_io(&info->sps.data_cons.completion);
+ mutex_unlock(&info->bam_lock);
+
+ for (n = 0; n < cwperpage; n++)
+ pr_debug("write pg %d: flash_status[%d] = %x\n",
+ rw_params.page, n,
+ dma_buffer->data.flash_status[n]);
+
+ /* Check for flash status errors */
+ for (n = 0; n < cwperpage; n++) {
+ flash_sts = dma_buffer->data.flash_status[n];
+ if (flash_sts & (FS_OP_ERR | FS_MPU_ERR)) {
+ pr_err("MPU/OP err (0x%x) set\n", flash_sts);
+ err = -EIO;
+ goto free_dma;
+ }
+ if (n == (cwperpage - 1)) {
+ if (!(flash_sts & FS_DEVICE_WP) ||
+ (flash_sts & FS_DEVICE_STS_ERR)) {
+ pr_err("Dev sts err 0x%x\n", flash_sts);
+ err = -EIO;
+ goto free_dma;
+ }
+ }
+ }
+ pages_written++;
+ rw_params.page++;
+ }
+free_dma:
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+ if (ops->oobbuf)
+ dma_unmap_page(chip->dev, rw_params.oob_dma_addr,
+ ops->ooblen, DMA_TO_DEVICE);
+ if (ops->datbuf)
+ dma_unmap_page(chip->dev, rw_params.data_dma_addr,
+ ops->len, DMA_TO_DEVICE);
+validate_mtd_params_failed:
+ if (ops->mode != MTD_OPS_RAW)
+ ops->retlen = mtd->writesize * pages_written;
+ else
+ ops->retlen = (mtd->writesize + mtd->oobsize) * pages_written;
+
+ ops->oobretlen = ops->ooblen - rw_params.oob_len_data;
+ if (err)
+ pr_err("to %llx datalen %x ooblen %x failed with err %d\n",
+ to, ops->len, ops->ooblen, err);
+ pr_debug("ret %d, retlen %d oobretlen %d\n",
+ err, ops->retlen, ops->oobretlen);
+
+ pr_debug("================================================\n");
+ return err;
+}
+
+/*
+ * Function that gets called from upper layers such as MTD/YAFFS2 to write a
+ * page with only main data.
+ */
+static int msm_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ int ret;
+ struct mtd_oob_ops ops;
+
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.len = len;
+ ops.retlen = 0;
+ ops.ooblen = 0;
+ ops.datbuf = (uint8_t *)buf;
+ ops.oobbuf = NULL;
+ ret = msm_nand_write_oob(mtd, to, &ops);
+ *retlen = ops.retlen;
+ return ret;
+}
+
+/*
+ * Structure that contains NANDc register data for commands required
+ * for Erase operation.
+ */
+struct msm_nand_erase_reg_data {
+ struct msm_nand_common_cfgs cfg;
+ uint32_t exec;
+ uint32_t flash_status;
+ uint32_t clrfstatus;
+ uint32_t clrrstatus;
+};
+
+/*
+ * Function that gets called from upper layers such as MTD/YAFFS2 to erase a
+ * block within NAND device.
+ */
+static int msm_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ int i, err = 0;
+ struct msm_nand_info *info = mtd->priv;
+ struct msm_nand_chip *chip = &info->nand_chip;
+ uint32_t page = 0;
+ struct msm_nand_sps_cmd *cmd, *curr_cmd;
+ struct msm_nand_erase_reg_data data;
+ struct sps_iovec *iovec;
+ uint32_t total_cnt = 9;
+ /*
+ * The following 9 commands are required to erase a page -
+ * flash, addr0, addr1, cfg0, cfg1, exec, flash_status(read),
+ * flash_status(write), read_status.
+ */
+ struct {
+ struct sps_transfer xfer;
+ struct sps_iovec cmd_iovec[total_cnt];
+ struct msm_nand_sps_cmd cmd[total_cnt];
+ uint32_t flash_status;
+ } *dma_buffer;
+
+ if (mtd->writesize == PAGE_SIZE_2K)
+ page = instr->addr >> 11;
+
+ if (mtd->writesize == PAGE_SIZE_4K)
+ page = instr->addr >> 12;
+
+ if (instr->addr & (mtd->erasesize - 1)) {
+ pr_err("unsupported erase address, 0x%llx\n", instr->addr);
+ err = -EINVAL;
+ goto out;
+ }
+ if (instr->len != mtd->erasesize) {
+ pr_err("unsupported erase len, %lld\n", instr->len);
+ err = -EINVAL;
+ goto out;
+ }
+
+ wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
+ chip, sizeof(*dma_buffer))));
+ cmd = dma_buffer->cmd;
+
+ memset(&data, 0, sizeof(struct msm_nand_erase_reg_data));
+ data.cfg.cmd = MSM_NAND_CMD_BLOCK_ERASE;
+ data.cfg.addr0 = page;
+ data.cfg.addr1 = 0;
+ data.cfg.cfg0 = chip->cfg0 & (~(7 << CW_PER_PAGE));
+ data.cfg.cfg1 = chip->cfg1;
+ data.exec = 1;
+ dma_buffer->flash_status = 0xeeeeeeee;
+ data.clrfstatus = MSM_NAND_RESET_FLASH_STS;
+ data.clrrstatus = MSM_NAND_RESET_READ_STS;
+
+ curr_cmd = cmd;
+ msm_nand_prep_cfg_cmd_desc(info, data.cfg, &curr_cmd);
+
+ cmd = curr_cmd;
+ msm_nand_prep_ce(cmd, MSM_NAND_EXEC_CMD(info), WRITE, data.exec,
+ SPS_IOVEC_FLAG_NWD);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info), READ,
+ msm_virt_to_dma(chip, &dma_buffer->flash_status), 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info), WRITE,
+ data.clrfstatus, 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_READ_STATUS(info), WRITE,
+ data.clrrstatus,
+ SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT);
+ cmd++;
+
+ BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
+ dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
+ dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
+ dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
+ &dma_buffer->cmd_iovec);
+ iovec = dma_buffer->xfer.iovec;
+
+ for (i = 0; i < dma_buffer->xfer.iovec_count; i++) {
+ iovec->addr = msm_virt_to_dma(chip, &dma_buffer->cmd[i].ce);
+ iovec->size = sizeof(struct sps_command_element);
+ iovec->flags = dma_buffer->cmd[i].flags;
+ iovec++;
+ }
+ mutex_lock(&info->bam_lock);
+ err = sps_transfer(info->sps.cmd_pipe.handle, &dma_buffer->xfer);
+ if (err) {
+ pr_err("Failed to submit commands %d\n", err);
+ mutex_unlock(&info->bam_lock);
+ goto free_dma;
+ }
+ wait_for_completion_io(&info->sps.cmd_pipe.completion);
+ mutex_unlock(&info->bam_lock);
+
+ /* Check for flash status errors */
+ if (dma_buffer->flash_status & (FS_OP_ERR |
+ FS_MPU_ERR | FS_DEVICE_STS_ERR)) {
+ pr_err("MPU/OP/DEV err (0x%x) set\n", dma_buffer->flash_status);
+ err = -EIO;
+ }
+ if (!(dma_buffer->flash_status & FS_DEVICE_WP)) {
+ pr_err("Device is write protected\n");
+ err = -EIO;
+ }
+ if (err) {
+ pr_err("Erase failed, 0x%llx\n", instr->addr);
+ instr->fail_addr = instr->addr;
+ instr->state = MTD_ERASE_FAILED;
+ } else {
+ instr->state = MTD_ERASE_DONE;
+ instr->fail_addr = 0xffffffff;
+ mtd_erase_callback(instr);
+ }
+free_dma:
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+out:
+ return err;
+}
+
+/*
+ * Structure that contains NANDc register data for commands required
+ * for checking if a block is bad.
+ */
+struct msm_nand_blk_isbad_data {
+ struct msm_nand_common_cfgs cfg;
+ uint32_t ecc_bch_cfg;
+ uint32_t exec;
+ uint32_t read_offset;
+};
+
+/*
+ * Function that gets called from upper layers such as MTD/YAFFS2 to check if
+ * a block is bad. This is done by reading the first page within a block and
+ * checking whether the bad block byte location contains 0xFF or not. If it
+ * doesn't contain 0xFF, then it is considered as bad block.
+ */
+static int msm_nand_block_isbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct msm_nand_info *info = mtd->priv;
+ struct msm_nand_chip *chip = &info->nand_chip;
+ int i, ret = 0, bad_block = 0;
+ uint8_t *buf;
+ uint32_t page = 0, rdata, cwperpage;
+ struct msm_nand_sps_cmd *cmd, *curr_cmd;
+ struct msm_nand_blk_isbad_data data;
+ struct sps_iovec *iovec;
+ uint32_t total_cnt = 9;
+ /*
+ * The following 9 commands are required to check bad block -
+ * flash, addr0, addr1, cfg0, cfg1, ecc_cfg, read_loc_0,
+ * exec, flash_status(read).
+ */
+ struct {
+ struct sps_transfer xfer;
+ struct sps_iovec cmd_iovec[total_cnt];
+ struct msm_nand_sps_cmd cmd[total_cnt];
+ uint32_t flash_status;
+ } *dma_buffer;
+
+ if (mtd->writesize == PAGE_SIZE_2K)
+ page = ofs >> 11;
+
+ if (mtd->writesize == PAGE_SIZE_4K)
+ page = ofs >> 12;
+
+ cwperpage = (mtd->writesize >> 9);
+
+ if (ofs > mtd->size) {
+ pr_err("Invalid offset 0x%llx\n", ofs);
+ bad_block = -EINVAL;
+ goto out;
+ }
+ if (ofs & (mtd->erasesize - 1)) {
+ pr_err("unsupported block address, 0x%x\n", (uint32_t)ofs);
+ bad_block = -EINVAL;
+ goto out;
+ }
+
+ wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
+ chip , sizeof(*dma_buffer) + 4)));
+ buf = (uint8_t *)dma_buffer + sizeof(*dma_buffer);
+
+ cmd = dma_buffer->cmd;
+ memset(&data, 0, sizeof(struct msm_nand_erase_reg_data));
+ data.cfg.cmd = MSM_NAND_CMD_PAGE_READ_ALL;
+ data.cfg.cfg0 = chip->cfg0_raw & ~(7U << CW_PER_PAGE);
+ data.cfg.cfg1 = chip->cfg1_raw;
+
+ if (chip->cfg1 & (1 << WIDE_FLASH))
+ data.cfg.addr0 = (page << 16) |
+ ((chip->cw_size * (cwperpage-1)) >> 1);
+ else
+ data.cfg.addr0 = (page << 16) |
+ (chip->cw_size * (cwperpage-1));
+
+ data.cfg.addr1 = (page >> 16) & 0xff;
+ data.ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
+ data.exec = 1;
+ data.read_offset = (mtd->writesize - (chip->cw_size * (cwperpage-1)));
+ dma_buffer->flash_status = 0xeeeeeeee;
+
+ curr_cmd = cmd;
+ msm_nand_prep_cfg_cmd_desc(info, data.cfg, &curr_cmd);
+
+ cmd = curr_cmd;
+ msm_nand_prep_ce(cmd, MSM_NAND_DEV0_ECC_CFG(info), WRITE,
+ data.ecc_bch_cfg, 0);
+ cmd++;
+
+ rdata = (data.read_offset << 0) | (4 << 16) | (1 << 31);
+ msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE, rdata, 0);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_EXEC_CMD(info), WRITE,
+ data.exec, SPS_IOVEC_FLAG_NWD);
+ cmd++;
+
+ msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info), READ,
+ msm_virt_to_dma(chip, &dma_buffer->flash_status),
+ SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_UNLOCK);
+ cmd++;
+
+ BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
+ dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
+ dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
+ dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
+ &dma_buffer->cmd_iovec);
+ iovec = dma_buffer->xfer.iovec;
+
+ for (i = 0; i < dma_buffer->xfer.iovec_count; i++) {
+ iovec->addr = msm_virt_to_dma(chip, &dma_buffer->cmd[i].ce);
+ iovec->size = sizeof(struct sps_command_element);
+ iovec->flags = dma_buffer->cmd[i].flags;
+ iovec++;
+ }
+ mutex_lock(&info->bam_lock);
+ /* Submit data descriptor */
+ ret = sps_transfer_one(info->sps.data_prod.handle,
+ msm_virt_to_dma(chip, buf),
+ 4, NULL, SPS_IOVEC_FLAG_INT);
+
+ if (ret) {
+ pr_err("Failed to submit data desc %d\n", ret);
+ mutex_unlock(&info->bam_lock);
+ goto free_dma;
+ }
+ /* Submit command descriptor */
+ ret = sps_transfer(info->sps.cmd_pipe.handle, &dma_buffer->xfer);
+ if (ret) {
+ pr_err("Failed to submit commands %d\n", ret);
+ mutex_unlock(&info->bam_lock);
+ goto free_dma;
+ }
+ wait_for_completion_io(&info->sps.cmd_pipe.completion);
+ wait_for_completion_io(&info->sps.data_prod.completion);
+ mutex_unlock(&info->bam_lock);
+
+ /* Check for flash status errors */
+ if (dma_buffer->flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
+ pr_err("MPU/OP err set: %x\n", dma_buffer->flash_status);
+ bad_block = -EIO;
+ goto free_dma;
+ }
+
+ /* Check for bad block marker byte */
+ if (chip->cfg1 & (1 << WIDE_FLASH)) {
+ if (buf[0] != 0xFF || buf[1] != 0xFF)
+ bad_block = 1;
+ } else {
+ if (buf[0] != 0xFF)
+ bad_block = 1;
+ }
+free_dma:
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer) + 4);
+out:
+ return ret ? ret : bad_block;
+}
+
+/*
+ * Function that gets called from upper layers such as MTD/YAFFS2 to mark a
+ * block as bad. This is done by writing the first page within a block with 0,
+ * thus setting the bad block byte location as well to 0.
+ */
+static int msm_nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct mtd_oob_ops ops;
+ int ret;
+ uint8_t *buf;
+ size_t len;
+
+ if (ofs > mtd->size) {
+ pr_err("Invalid offset 0x%llx\n", ofs);
+ ret = -EINVAL;
+ goto out;
+ }
+ if (ofs & (mtd->erasesize - 1)) {
+ pr_err("unsupported block address, 0x%x\n", (uint32_t)ofs);
+ ret = -EINVAL;
+ goto out;
+ }
+ len = mtd->writesize + mtd->oobsize;
+ buf = kzalloc(len, GFP_KERNEL);
+ if (!buf) {
+ pr_err("unable to allocate memory for 0x%x size\n", len);
+ ret = -ENOMEM;
+ goto out;
+ }
+ ops.mode = MTD_OPS_RAW;
+ ops.len = len;
+ ops.retlen = 0;
+ ops.ooblen = 0;
+ ops.datbuf = buf;
+ ops.oobbuf = NULL;
+ ret = msm_nand_write_oob(mtd, ofs, &ops);
+ kfree(buf);
+out:
+ return ret;
+}
+
+/*
+ * Function that scans for the attached NAND device. This fills out all
+ * the uninitialized function pointers with the defaults. The flash ID is
+ * read and the mtd/chip structures are filled with the appropriate values.
+ */
+int msm_nand_scan(struct mtd_info *mtd)
+{
+ struct msm_nand_info *info = mtd->priv;
+ struct msm_nand_chip *chip = &info->nand_chip;
+ struct flash_identification *supported_flash = &info->flash_dev;
+ int flash_id = 0, err = 0;
+ uint32_t i, mtd_writesize;
+ uint8_t dev_found = 0, wide_bus;
+ uint32_t manid, devid, devcfg;
+ uint32_t bad_block_byte;
+ struct nand_flash_dev *flashdev = NULL;
+ struct nand_manufacturers *flashman = NULL;
+
+ /* Probe the Flash device for ONFI compliance */
+ if (!msm_nand_flash_onfi_probe(info)) {
+ dev_found = 1;
+ } else {
+ err = msm_nand_flash_read_id(info, 0, &flash_id);
+ if (err < 0) {
+ pr_err("Failed to read Flash ID\n");
+ err = -EINVAL;
+ goto out;
+ }
+ manid = flash_id & 0xFF;
+ devid = (flash_id >> 8) & 0xFF;
+ devcfg = (flash_id >> 24) & 0xFF;
+
+ for (i = 0; !flashman && nand_manuf_ids[i].id; ++i)
+ if (nand_manuf_ids[i].id == manid)
+ flashman = &nand_manuf_ids[i];
+ for (i = 0; !flashdev && nand_flash_ids[i].id; ++i)
+ if (nand_flash_ids[i].id == devid)
+ flashdev = &nand_flash_ids[i];
+ if (!flashdev || !flashman) {
+ pr_err("unknown nand flashid=%x manuf=%x devid=%x\n",
+ flash_id, manid, devid);
+ err = -ENOENT;
+ goto out;
+ }
+ dev_found = 1;
+ if (!flashdev->pagesize) {
+ supported_flash->widebus = devcfg & (1 << 6) ? 1 : 0;
+ supported_flash->pagesize = 1024 << (devcfg & 0x3);
+ supported_flash->blksize = (64 * 1024) <<
+ ((devcfg >> 4) & 0x3);
+ supported_flash->oobsize = (8 << ((devcfg >> 2) & 1)) *
+ (supported_flash->pagesize >> 9);
+ } else {
+ supported_flash->widebus = flashdev->options &
+ NAND_BUSWIDTH_16 ? 1 : 0;
+ supported_flash->pagesize = flashdev->pagesize;
+ supported_flash->blksize = flashdev->erasesize;
+ supported_flash->oobsize = flashdev->pagesize >> 5;
+ }
+ supported_flash->flash_id = flash_id;
+ supported_flash->density = flashdev->chipsize << 20;
+ }
+
+ if (dev_found) {
+ wide_bus = supported_flash->widebus;
+ mtd->size = supported_flash->density;
+ mtd->writesize = supported_flash->pagesize;
+ mtd->oobsize = supported_flash->oobsize;
+ mtd->erasesize = supported_flash->blksize;
+ mtd_writesize = mtd->writesize;
+
+ /* Check whether NAND device support 8bit ECC*/
+ if (supported_flash->ecc_correctability >= 8)
+ chip->bch_caps = MSM_NAND_CAP_8_BIT_BCH;
+ else
+ chip->bch_caps = MSM_NAND_CAP_4_BIT_BCH;
+
+ pr_info("NAND Id: 0x%x Buswidth: %dBits Density: %lld MByte\n",
+ supported_flash->flash_id, (wide_bus) ? 16 : 8,
+ (mtd->size >> 20));
+ pr_info("pagesize: %d Erasesize: %d oobsize: %d (in Bytes)\n",
+ mtd->writesize, mtd->erasesize, mtd->oobsize);
+ pr_info("BCH ECC: %d Bit\n",
+ (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH ? 8 : 4));
+ }
+
+ chip->cw_size = (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH) ? 532 : 528;
+ chip->cfg0 = (((mtd_writesize >> 9) - 1) << CW_PER_PAGE)
+ | (516 << UD_SIZE_BYTES)
+ | (0 << DISABLE_STATUS_AFTER_WRITE)
+ | (5 << NUM_ADDR_CYCLES);
+
+ bad_block_byte = (mtd_writesize - (chip->cw_size * (
+ (mtd_writesize >> 9) - 1)) + 1);
+ chip->cfg1 = (7 << NAND_RECOVERY_CYCLES)
+ | (0 << CS_ACTIVE_BSY)
+ | (bad_block_byte << BAD_BLOCK_BYTE_NUM)
+ | (0 << BAD_BLOCK_IN_SPARE_AREA)
+ | (2 << WR_RD_BSY_GAP)
+ | ((wide_bus ? 1 : 0) << WIDE_FLASH)
+ | (1 << ENABLE_BCH_ECC);
+
+ chip->cfg0_raw = (((mtd_writesize >> 9) - 1) << CW_PER_PAGE)
+ | (5 << NUM_ADDR_CYCLES)
+ | (0 << SPARE_SIZE_BYTES)
+ | (chip->cw_size << UD_SIZE_BYTES);
+
+ chip->cfg1_raw = (7 << NAND_RECOVERY_CYCLES)
+ | (0 << CS_ACTIVE_BSY)
+ | (17 << BAD_BLOCK_BYTE_NUM)
+ | (1 << BAD_BLOCK_IN_SPARE_AREA)
+ | (2 << WR_RD_BSY_GAP)
+ | ((wide_bus ? 1 : 0) << WIDE_FLASH)
+ | (1 << DEV0_CFG1_ECC_DISABLE);
+
+ chip->ecc_bch_cfg = (0 << ECC_CFG_ECC_DISABLE)
+ | (0 << ECC_SW_RESET)
+ | (516 << ECC_NUM_DATA_BYTES)
+ | (1 << ECC_FORCE_CLK_OPEN);
+
+ if (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH) {
+ chip->cfg0 |= (wide_bus ? 0 << SPARE_SIZE_BYTES :
+ 2 << SPARE_SIZE_BYTES);
+ chip->ecc_bch_cfg |= (1 << ECC_MODE)
+ | ((wide_bus) ? (14 << ECC_PARITY_SIZE_BYTES) :
+ (13 << ECC_PARITY_SIZE_BYTES));
+ } else {
+ chip->cfg0 |= (wide_bus ? 2 << SPARE_SIZE_BYTES :
+ 4 << SPARE_SIZE_BYTES);
+ chip->ecc_bch_cfg |= (0 << ECC_MODE)
+ | ((wide_bus) ? (8 << ECC_PARITY_SIZE_BYTES) :
+ (7 << ECC_PARITY_SIZE_BYTES));
+ }
+
+ /*
+ * For 4bit BCH ECC (default ECC), parity bytes = 7(x8) or 8(x16 I/O)
+ * For 8bit BCH ECC, parity bytes = 13 (x8) or 14 (x16 I/O).
+ */
+ chip->ecc_parity_bytes = (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH) ?
+ (wide_bus ? 14 : 13) : (wide_bus ? 8 : 7);
+ chip->ecc_buf_cfg = 0x203; /* No of bytes covered by ECC - 516 bytes */
+
+ pr_info("CFG0: 0x%08x, CFG1: 0x%08x\n"
+ " RAWCFG0: 0x%08x, RAWCFG1: 0x%08x\n"
+ " ECCBUFCFG: 0x%08x, ECCBCHCFG: 0x%08x\n"
+ " BAD BLOCK BYTE: 0x%08x\n", chip->cfg0, chip->cfg1,
+ chip->cfg0_raw, chip->cfg1_raw, chip->ecc_buf_cfg,
+ chip->ecc_bch_cfg, bad_block_byte);
+
+ if (mtd->oobsize == 64) {
+ mtd->oobavail = 16;
+ } else if ((mtd->oobsize == 128) || (mtd->oobsize == 224)) {
+ mtd->oobavail = 32;
+ } else {
+ pr_err("Unsupported NAND oobsize: 0x%x\n", mtd->oobsize);
+ err = -ENODEV;
+ goto out;
+ }
+
+ /* Fill in remaining MTD driver data */
+ mtd->type = MTD_NANDFLASH;
+ mtd->flags = MTD_CAP_NANDFLASH;
+ mtd->_erase = msm_nand_erase;
+ mtd->_block_isbad = msm_nand_block_isbad;
+ mtd->_block_markbad = msm_nand_block_markbad;
+ mtd->_read = msm_nand_read;
+ mtd->_write = msm_nand_write;
+ mtd->_read_oob = msm_nand_read_oob;
+ mtd->_write_oob = msm_nand_write_oob;
+ mtd->owner = THIS_MODULE;
+out:
+ return err;
+}
+
+#define BAM_APPS_PIPE_LOCK_GRP 0
+/*
+ * This function allocates, configures, connects an end point and
+ * also registers event notification for an end point. It also allocates
+ * DMA memory for descriptor FIFO of a pipe.
+ */
+static int msm_nand_init_endpoint(struct msm_nand_info *info,
+ struct msm_nand_sps_endpt *end_point,
+ uint32_t pipe_index)
+{
+ int rc = 0;
+ struct sps_pipe *pipe_handle;
+ struct sps_connect *sps_config = &end_point->config;
+ struct sps_register_event *sps_event = &end_point->event;
+
+ pipe_handle = sps_alloc_endpoint();
+ if (!pipe_handle) {
+ pr_err("sps_alloc_endpoint() failed\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = sps_get_config(pipe_handle, sps_config);
+ if (rc) {
+ pr_err("sps_get_config() failed %d\n", rc);
+ goto free_endpoint;
+ }
+
+ if (pipe_index == SPS_DATA_PROD_PIPE_INDEX) {
+ /* READ CASE: source - BAM; destination - system memory */
+ sps_config->source = info->sps.bam_handle;
+ sps_config->destination = SPS_DEV_HANDLE_MEM;
+ sps_config->mode = SPS_MODE_SRC;
+ sps_config->src_pipe_index = pipe_index;
+ } else if (pipe_index == SPS_DATA_CONS_PIPE_INDEX ||
+ pipe_index == SPS_CMD_CONS_PIPE_INDEX) {
+ /* WRITE CASE: source - system memory; destination - BAM */
+ sps_config->source = SPS_DEV_HANDLE_MEM;
+ sps_config->destination = info->sps.bam_handle;
+ sps_config->mode = SPS_MODE_DEST;
+ sps_config->dest_pipe_index = pipe_index;
+ }
+
+ sps_config->options = SPS_O_AUTO_ENABLE | SPS_O_DESC_DONE;
+ sps_config->lock_group = BAM_APPS_PIPE_LOCK_GRP;
+ /*
+ * Descriptor FIFO is a cyclic FIFO. If SPS_MAX_DESC_NUM descriptors
+ * are allowed to be submitted before we get any ack for any of them,
+ * the descriptor FIFO size should be: (SPS_MAX_DESC_NUM + 1) *
+ * sizeof(struct sps_iovec).
+ */
+ sps_config->desc.size = (SPS_MAX_DESC_NUM + 1) *
+ sizeof(struct sps_iovec);
+ sps_config->desc.base = dmam_alloc_coherent(info->nand_chip.dev,
+ sps_config->desc.size,
+ &sps_config->desc.phys_base,
+ GFP_KERNEL);
+ if (!sps_config->desc.base) {
+ pr_err("dmam_alloc_coherent() failed for size %x\n",
+ sps_config->desc.size);
+ rc = -ENOMEM;
+ goto free_endpoint;
+ }
+ memset(sps_config->desc.base, 0x00, sps_config->desc.size);
+
+ rc = sps_connect(pipe_handle, sps_config);
+ if (rc) {
+ pr_err("sps_connect() failed %d\n", rc);
+ goto free_endpoint;
+ }
+
+ init_completion(&end_point->completion);
+ sps_event->mode = SPS_TRIGGER_WAIT;
+ sps_event->options = SPS_O_DESC_DONE;
+ sps_event->xfer_done = &end_point->completion;
+ sps_event->user = (void *)info;
+
+ rc = sps_register_event(pipe_handle, sps_event);
+ if (rc) {
+ pr_err("sps_register_event() failed %d\n", rc);
+ goto sps_disconnect;
+ }
+ end_point->handle = pipe_handle;
+ pr_debug("pipe handle 0x%x for pipe %d\n", (uint32_t)pipe_handle,
+ pipe_index);
+ goto out;
+sps_disconnect:
+ sps_disconnect(pipe_handle);
+free_endpoint:
+ sps_free_endpoint(pipe_handle);
+out:
+ return rc;
+}
+
+/* This function disconnects and frees an end point */
+static void msm_nand_deinit_endpoint(struct msm_nand_info *info,
+ struct msm_nand_sps_endpt *end_point)
+{
+ sps_disconnect(end_point->handle);
+ sps_free_endpoint(end_point->handle);
+}
+
+/*
+ * This function registers BAM device and initializes its end points for
+ * the following pipes -
+ * system consumer pipe for data (pipe#0),
+ * system producer pipe for data (pipe#1),
+ * system consumer pipe for commands (pipe#2).
+ */
+static int msm_nand_bam_init(struct msm_nand_info *nand_info)
+{
+ struct sps_bam_props bam = {0};
+ int rc = 0;
+
+ bam.phys_addr = nand_info->bam_phys;
+ bam.virt_addr = nand_info->bam_base;
+ bam.irq = nand_info->bam_irq;
+ /*
+ * NAND device is accessible from both Apps and Modem processor and
+ * thus, NANDc and BAM are shared between both the processors. But BAM
+ * must be enabled and instantiated only once during boot up by
+ * Trustzone before Modem/Apps is brought out from reset.
+ *
+ * This is indicated to SPS driver on Apps by marking flag
+ * SPS_BAM_MGR_DEVICE_REMOTE. The following are the global
+ * initializations that will be done by Trustzone - Execution
+ * Environment, Pipes assignment to Apps/Modem, Pipe Super groups and
+ * Descriptor summing threshold.
+ *
+ * NANDc BAM device supports 2 execution environments - Modem and Apps
+ * and thus the flag SPS_BAM_MGR_MULTI_EE is set.
+ */
+ bam.manage = SPS_BAM_MGR_DEVICE_REMOTE | SPS_BAM_MGR_MULTI_EE;
+
+ rc = sps_register_bam_device(&bam, &nand_info->sps.bam_handle);
+ if (rc) {
+ pr_err("sps_register_bam_device() failed with %d\n", rc);
+ goto out;
+ }
+ pr_info("BAM device registered: bam_handle 0x%x\n",
+ nand_info->sps.bam_handle);
+
+ rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.data_prod,
+ SPS_DATA_PROD_PIPE_INDEX);
+ if (rc)
+ goto unregister_bam;
+
+ rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.data_cons,
+ SPS_DATA_CONS_PIPE_INDEX);
+ if (rc)
+ goto deinit_data_prod;
+
+ rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.cmd_pipe,
+ SPS_CMD_CONS_PIPE_INDEX);
+ if (rc)
+ goto deinit_data_cons;
+ goto out;
+deinit_data_cons:
+ msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_cons);
+deinit_data_prod:
+ msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_prod);
+unregister_bam:
+ sps_deregister_bam_device(nand_info->sps.bam_handle);
+out:
+ return rc;
+}
+
+/*
+ * This function de-registers BAM device, disconnects and frees its end points
+ * for all the pipes.
+ */
+static void msm_nand_bam_free(struct msm_nand_info *nand_info)
+{
+ msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_prod);
+ msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_cons);
+ msm_nand_deinit_endpoint(nand_info, &nand_info->sps.cmd_pipe);
+ sps_deregister_bam_device(nand_info->sps.bam_handle);
+}
+
+/* This function enables DMA support for the NANDc in BAM mode. */
+static int msm_nand_enable_dma(struct msm_nand_info *info)
+{
+ struct msm_nand_sps_cmd *sps_cmd;
+ struct msm_nand_chip *chip = &info->nand_chip;
+ int ret;
+
+ wait_event(chip->dma_wait_queue,
+ (sps_cmd = msm_nand_get_dma_buffer(chip, sizeof(*sps_cmd))));
+
+ msm_nand_prep_ce(sps_cmd, MSM_NAND_CTRL(info), WRITE,
+ (1 << BAM_MODE_EN), SPS_IOVEC_FLAG_INT);
+
+ ret = sps_transfer_one(info->sps.cmd_pipe.handle,
+ msm_virt_to_dma(chip, &sps_cmd->ce),
+ sizeof(struct sps_command_element), NULL,
+ sps_cmd->flags);
+ if (ret) {
+ pr_err("Failed to submit command: %d\n", ret);
+ goto out;
+ }
+ wait_for_completion_io(&info->sps.cmd_pipe.completion);
+out:
+ msm_nand_release_dma_buffer(chip, sps_cmd, sizeof(*sps_cmd));
+ return ret;
+
+}
+
+/*
+ * This function gets called when its device named msm-nand is added to
+ * device tree .dts file with all its resources such as physical addresses
+ * for NANDc and BAM, BAM IRQ.
+ *
+ * It also expects the NAND flash partition information to be passed in .dts
+ * file so that it can parse the partitions by calling MTD function
+ * mtd_device_parse_register().
+ *
+ */
+static int __devinit msm_nand_probe(struct platform_device *pdev)
+{
+ struct msm_nand_info *info;
+ struct resource *res;
+ int err, n_parts;
+ struct device_node *pnode;
+ struct mtd_part_parser_data parser_data;
+
+ if (!pdev->dev.of_node) {
+ pr_err("No valid device tree info for NANDc\n");
+ err = -ENODEV;
+ goto out;
+ }
+
+ /*
+ * The partition information can also be passed from kernel command
+ * line. Also, the MTD core layer supports adding the whole device as
+ * one MTD device when no partition information is available at all.
+ * Hence, do not bail out when partition information is not availabe
+ * in device tree.
+ */
+ pnode = of_find_node_by_path("/qcom,mtd-partitions");
+ if (!pnode)
+ pr_info("No partition info available in device tree\n");
+ info = devm_kzalloc(&pdev->dev, sizeof(struct msm_nand_info),
+ GFP_KERNEL);
+ if (!info) {
+ pr_err("Unable to allocate memory for msm_nand_info\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "nand_phys");
+ if (!res || !res->start) {
+ pr_err("NAND phys address range is not provided\n");
+ err = -ENODEV;
+ goto out;
+ }
+ info->nand_phys = res->start;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "bam_phys");
+ if (!res || !res->start) {
+ pr_err("BAM phys address range is not provided\n");
+ err = -ENODEV;
+ goto out;
+ }
+ info->bam_phys = res->start;
+ info->bam_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!info->bam_base) {
+ pr_err("BAM ioremap() failed for addr 0x%x size 0x%x\n",
+ res->start, resource_size(res));
+ err = -ENOMEM;
+ goto out;
+ }
+
+ info->bam_irq = platform_get_irq_byname(pdev, "bam_irq");
+ if (info->bam_irq < 0) {
+ pr_err("BAM IRQ is not provided\n");
+ err = -ENODEV;
+ goto out;
+ }
+
+ info->mtd.name = dev_name(&pdev->dev);
+ info->mtd.priv = info;
+ info->mtd.owner = THIS_MODULE;
+ info->nand_chip.dev = &pdev->dev;
+ init_waitqueue_head(&info->nand_chip.dma_wait_queue);
+ mutex_init(&info->bam_lock);
+
+ info->nand_chip.dma_virt_addr =
+ dmam_alloc_coherent(&pdev->dev, MSM_NAND_DMA_BUFFER_SIZE,
+ &info->nand_chip.dma_phys_addr, GFP_KERNEL);
+ if (!info->nand_chip.dma_virt_addr) {
+ pr_err("No memory for DMA buffer size %x\n",
+ MSM_NAND_DMA_BUFFER_SIZE);
+ err = -ENOMEM;
+ goto out;
+ }
+ err = msm_nand_bam_init(info);
+ if (err) {
+ pr_err("msm_nand_bam_init() failed %d\n", err);
+ goto out;
+ }
+ err = msm_nand_enable_dma(info);
+ if (err) {
+ pr_err("Failed to enable DMA in NANDc\n");
+ goto free_bam;
+ }
+ if (msm_nand_scan(&info->mtd)) {
+ pr_err("No nand device found\n");
+ err = -ENXIO;
+ goto free_bam;
+ }
+ parser_data.of_node = pnode;
+ n_parts = mtd_device_parse_register(&info->mtd, NULL, &parser_data,
+ NULL, 0);
+ if (n_parts < 0) {
+ pr_err("Unable to register MTD partitions %d\n", n_parts);
+ goto free_bam;
+ }
+ dev_set_drvdata(&pdev->dev, info);
+
+ pr_info("NANDc phys addr 0x%lx, BAM phys addr 0x%lx, BAM IRQ %d\n",
+ info->nand_phys, info->bam_phys, info->bam_irq);
+ pr_info("Allocated DMA buffer at virt_addr 0x%p, phys_addr 0x%x\n",
+ info->nand_chip.dma_virt_addr, info->nand_chip.dma_phys_addr);
+ pr_info("Found %d MTD partitions\n", n_parts);
+ goto out;
+free_bam:
+ msm_nand_bam_free(info);
+out:
+ return err;
+}
+
+/*
+ * Remove functionality that gets called when driver/device msm-nand
+ * is removed.
+ */
+static int __devexit msm_nand_remove(struct platform_device *pdev)
+{
+ struct msm_nand_info *info = dev_get_drvdata(&pdev->dev);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+ if (info) {
+ mtd_device_unregister(&info->mtd);
+ msm_nand_bam_free(info);
+ }
+ return 0;
+}
+
+#define DRIVER_NAME "msm_qpic_nand"
+static const struct of_device_id msm_nand_match_table[] = {
+ { .compatible = "qcom,msm-nand", },
+ {},
+};
+static struct platform_driver msm_nand_driver = {
+ .probe = msm_nand_probe,
+ .remove = __devexit_p(msm_nand_remove),
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = msm_nand_match_table,
+ },
+};
+
+module_platform_driver(msm_nand_driver);
+
+MODULE_ALIAS(DRIVER_NAME);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM QPIC NAND flash driver");
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 2b73d99..d26c845 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -87,6 +87,8 @@
static const char driver_name [] = "usbnet";
+static struct workqueue_struct *usbnet_wq;
+
/* use ethtool to change the level for any given device */
static int msg_level = -1;
module_param (msg_level, int, 0);
@@ -246,7 +248,7 @@
if (skb_defer_rx_timestamp(skb))
return;
- status = netif_rx (skb);
+ status = netif_rx_ni(skb);
if (status != NET_RX_SUCCESS)
netif_dbg(dev, rx_err, dev->net,
"netif_rx status %d\n", status);
@@ -316,7 +318,7 @@
spin_lock(&dev->done.lock);
__skb_queue_tail(&dev->done, skb);
if (dev->done.qlen == 1)
- tasklet_schedule(&dev->bh);
+ queue_work(usbnet_wq, &dev->bh_w);
spin_unlock_irqrestore(&dev->done.lock, flags);
return old_state;
}
@@ -390,7 +392,7 @@
default:
netif_dbg(dev, rx_err, dev->net,
"rx submit, %d\n", retval);
- tasklet_schedule (&dev->bh);
+ queue_work(usbnet_wq, &dev->bh_w);
break;
case 0:
usb_mark_last_busy(dev->udev);
@@ -583,7 +585,7 @@
num++;
}
- tasklet_schedule(&dev->bh);
+ queue_work(usbnet_wq, &dev->bh_w);
netif_dbg(dev, rx_status, dev->net,
"paused rx queue disabled, %d skbs requeued\n", num);
@@ -652,7 +654,7 @@
{
if (netif_running(dev->net)) {
(void) unlink_urbs (dev, &dev->rxq);
- tasklet_schedule(&dev->bh);
+ queue_work(usbnet_wq, &dev->bh_w);
}
}
EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
@@ -726,7 +728,7 @@
*/
dev->flags = 0;
del_timer_sync (&dev->delay);
- tasklet_kill (&dev->bh);
+ cancel_work_sync(&dev->bh_w);
if (info->manage_power)
info->manage_power(dev, 0);
else
@@ -799,7 +801,7 @@
"simple");
// delay posting reads until we're fully open
- tasklet_schedule (&dev->bh);
+ queue_work(usbnet_wq, &dev->bh_w);
if (info->manage_power) {
retval = info->manage_power(dev, 1);
if (retval < 0)
@@ -969,7 +971,7 @@
status);
} else {
clear_bit (EVENT_RX_HALT, &dev->flags);
- tasklet_schedule (&dev->bh);
+ queue_work(usbnet_wq, &dev->bh_w);
}
}
@@ -994,7 +996,7 @@
usb_autopm_put_interface(dev->intf);
fail_lowmem:
if (resched)
- tasklet_schedule (&dev->bh);
+ queue_work(usbnet_wq, &dev->bh_w);
}
}
@@ -1080,7 +1082,7 @@
struct usbnet *dev = netdev_priv(net);
unlink_urbs (dev, &dev->txq);
- tasklet_schedule (&dev->bh);
+ queue_work(usbnet_wq, &dev->bh_w);
// FIXME: device recovery -- reset?
}
@@ -1267,13 +1269,21 @@
"rxqlen %d --> %d\n",
temp, dev->rxq.qlen);
if (dev->rxq.qlen < qlen)
- tasklet_schedule (&dev->bh);
+ queue_work(usbnet_wq, &dev->bh_w);
}
if (dev->txq.qlen < TX_QLEN (dev))
netif_wake_queue (dev->net);
}
}
+static void usbnet_bh_w(struct work_struct *work)
+{
+ struct usbnet *dev =
+ container_of(work, struct usbnet, bh_w);
+ unsigned long param = (unsigned long)dev;
+
+ usbnet_bh(param);
+}
/*-------------------------------------------------------------------------
*
@@ -1392,8 +1402,7 @@
skb_queue_head_init (&dev->txq);
skb_queue_head_init (&dev->done);
skb_queue_head_init(&dev->rxq_pause);
- dev->bh.func = usbnet_bh;
- dev->bh.data = (unsigned long) dev;
+ INIT_WORK(&dev->bh_w, usbnet_bh_w);
INIT_WORK (&dev->kevent, kevent);
init_usb_anchor(&dev->deferred);
dev->delay.function = usbnet_bh;
@@ -1577,7 +1586,7 @@
if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
if (!(dev->txq.qlen >= TX_QLEN(dev)))
netif_tx_wake_all_queues(dev->net);
- tasklet_schedule (&dev->bh);
+ queue_work(usbnet_wq, &dev->bh_w);
}
}
return 0;
@@ -1594,12 +1603,20 @@
FIELD_SIZEOF(struct sk_buff, cb) < sizeof(struct skb_data));
random_ether_addr(node_id);
+
+ usbnet_wq = create_singlethread_workqueue("usbnet");
+ if (!usbnet_wq) {
+ pr_err("%s: Unable to create workqueue:usbnet\n", __func__);
+ return -ENOMEM;
+ }
+
return 0;
}
module_init(usbnet_init);
static void __exit usbnet_exit(void)
{
+ destroy_workqueue(usbnet_wq);
}
module_exit(usbnet_exit);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index cbefe67..3c79917 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -900,8 +900,8 @@
{
struct virtnet_info *vi = netdev_priv(dev);
- ring->rx_max_pending = virtqueue_get_vring_size(vi->rvq);
- ring->tx_max_pending = virtqueue_get_vring_size(vi->svq);
+ ring->rx_max_pending = virtqueue_get_impl_size(vi->rvq);
+ ring->tx_max_pending = virtqueue_get_impl_size(vi->svq);
ring->rx_pending = ring->rx_max_pending;
ring->tx_pending = ring->tx_max_pending;
diff --git a/drivers/net/wireless/wcnss/wcnss_wlan.c b/drivers/net/wireless/wcnss/wcnss_wlan.c
index ad9dc7d..c0a4e0e 100644
--- a/drivers/net/wireless/wcnss/wcnss_wlan.c
+++ b/drivers/net/wireless/wcnss/wcnss_wlan.c
@@ -21,6 +21,7 @@
#include <linux/workqueue.h>
#include <linux/jiffies.h>
#include <linux/gpio.h>
+#include <linux/wakelock.h>
#include <mach/peripheral-loader.h>
#define DEVICE "wcnss_wlan"
@@ -48,6 +49,7 @@
void (*tm_notify)(struct device *, int);
struct wcnss_wlan_config wlan_config;
struct delayed_work wcnss_work;
+ struct wake_lock wcnss_wake_lock;
} *penv = NULL;
static ssize_t wcnss_serial_number_show(struct device *dev,
@@ -60,7 +62,7 @@
}
static ssize_t wcnss_serial_number_store(struct device *dev,
- struct device_attribute *attr, const char * buf, size_t count)
+ struct device_attribute *attr, const char *buf, size_t count)
{
unsigned int value;
@@ -88,7 +90,7 @@
}
static ssize_t wcnss_thermal_mitigation_store(struct device *dev,
- struct device_attribute *attr, const char * buf, size_t count)
+ struct device_attribute *attr, const char *buf, size_t count)
{
int value;
@@ -184,6 +186,12 @@
return 0;
}
+void wcnss_flush_delayed_boot_votes()
+{
+ flush_delayed_work_sync(&penv->wcnss_work);
+}
+EXPORT_SYMBOL(wcnss_flush_delayed_boot_votes);
+
static int __devexit
wcnss_wlan_ctrl_remove(struct platform_device *pdev)
{
@@ -320,6 +328,20 @@
return 0;
}
+void wcnss_prevent_suspend()
+{
+ if (penv)
+ wake_lock(&penv->wcnss_wake_lock);
+}
+EXPORT_SYMBOL(wcnss_prevent_suspend);
+
+void wcnss_allow_suspend()
+{
+ if (penv)
+ wake_unlock(&penv->wcnss_wake_lock);
+}
+EXPORT_SYMBOL(wcnss_allow_suspend);
+
static int
wcnss_trigger_config(struct platform_device *pdev)
{
@@ -392,6 +414,8 @@
if (ret)
goto fail_sysfs;
+ wake_lock_init(&penv->wcnss_wake_lock, WAKE_LOCK_SUSPEND, "wcnss");
+
return 0;
fail_sysfs:
diff --git a/drivers/of/of_slimbus.c b/drivers/of/of_slimbus.c
index 512ca73..8aaef25 100644
--- a/drivers/of/of_slimbus.c
+++ b/drivers/of/of_slimbus.c
@@ -66,6 +66,8 @@
kfree(slim);
return -ENOMEM;
}
+
+ slim->dev.of_node = of_node_get(node);
slim->name = (const char *)name;
binfo[n].bus_num = ctrl->nr;
binfo[n].slim_slave = slim;
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
index 0c3d4ad..d75cac4 100644
--- a/drivers/platform/msm/Kconfig
+++ b/drivers/platform/msm/Kconfig
@@ -60,4 +60,11 @@
devices support Pulse Width Modulation output with user generated
patterns. They share a lookup table with size of 64 entries.
+config QPNP_POWER_ON
+ tristate "QPNP PMIC POWER-ON Driver"
+ depends on OF_SPMI && SPMI && MSM_QPNP_INT
+ help
+ This driver supports the power-on functionality on Qualcomm
+ PNP PMIC. It currently supports reporting the change in status of
+ the KPDPWR_N line (connected to the power-key).
endmenu
diff --git a/drivers/platform/msm/Makefile b/drivers/platform/msm/Makefile
index 6deb6ee..2b6b806 100644
--- a/drivers/platform/msm/Makefile
+++ b/drivers/platform/msm/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_USB_BAM) += usb_bam.o
obj-$(CONFIG_SPS) += sps/
obj-$(CONFIG_QPNP_PWM) += qpnp-pwm.o
+obj-$(CONFIG_QPNP_POWER_ON) += qpnp-power-on.o
diff --git a/drivers/platform/msm/qpnp-power-on.c b/drivers/platform/msm/qpnp-power-on.c
new file mode 100644
index 0000000..d8bb884
--- /dev/null
+++ b/drivers/platform/msm/qpnp-power-on.c
@@ -0,0 +1,241 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/spmi.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+
+#define QPNP_PON_RT_STS(base) (base + 0x10)
+#define QPNP_PON_PULL_CTL(base) (base + 0x70)
+#define QPNP_PON_DBC_CTL(base) (base + 0x71)
+
+#define QPNP_PON_CNTL_PULL_UP BIT(1)
+#define QPNP_PON_CNTL_TRIG_DELAY_MASK (0x7)
+#define QPNP_PON_KPDPWR_N_SET BIT(0)
+
+struct qpnp_pon {
+ struct spmi_device *spmi;
+ struct input_dev *pon_input;
+ u32 key_status_irq;
+ u16 base;
+};
+
+static irqreturn_t qpnp_pon_key_irq(int irq, void *_pon)
+{
+ u8 pon_rt_sts;
+ int rc;
+ struct qpnp_pon *pon = _pon;
+
+ rc = spmi_ext_register_readl(pon->spmi->ctrl, pon->spmi->sid,
+ QPNP_PON_RT_STS(pon->base), &pon_rt_sts, 1);
+ if (rc) {
+ dev_err(&pon->spmi->dev, "Unable to read PON RT status\n");
+ return IRQ_HANDLED;
+ }
+
+ input_report_key(pon->pon_input, KEY_POWER,
+ !(pon_rt_sts & QPNP_PON_KPDPWR_N_SET));
+ input_sync(pon->pon_input);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit qpnp_pon_key_init(struct qpnp_pon *pon)
+{
+ int rc = 0;
+ u32 pullup, delay;
+ u8 pon_cntl;
+
+ pon->key_status_irq = spmi_get_irq_byname(pon->spmi,
+ NULL, "power-key");
+ if (pon->key_status_irq < 0) {
+ dev_err(&pon->spmi->dev, "Unable to get pon key irq\n");
+ return -ENXIO;
+ }
+
+ rc = of_property_read_u32(pon->spmi->dev.of_node,
+ "qcom,pon-key-dbc-delay", &delay);
+ if (rc) {
+ delay = (delay << 6) / USEC_PER_SEC;
+ delay = ilog2(delay);
+
+ rc = spmi_ext_register_readl(pon->spmi->ctrl, pon->spmi->sid,
+ QPNP_PON_DBC_CTL(pon->base), &pon_cntl, 1);
+ if (rc) {
+ dev_err(&pon->spmi->dev, "spmi read addr=%x failed\n",
+ QPNP_PON_DBC_CTL(pon->base));
+ return rc;
+ }
+ pon_cntl &= ~QPNP_PON_CNTL_TRIG_DELAY_MASK;
+ pon_cntl |= (delay & QPNP_PON_CNTL_TRIG_DELAY_MASK);
+ rc = spmi_ext_register_writel(pon->spmi->ctrl, pon->spmi->sid,
+ QPNP_PON_DBC_CTL(pon->base), &pon_cntl, 1);
+ if (rc) {
+ dev_err(&pon->spmi->dev, "spmi write addre=%x failed\n",
+ QPNP_PON_DBC_CTL(pon->base));
+ return rc;
+ }
+ }
+
+ rc = of_property_read_u32(pon->spmi->dev.of_node,
+ "qcom,pon-key-pull-up", &pullup);
+ if (!rc) {
+ rc = spmi_ext_register_readl(pon->spmi->ctrl, pon->spmi->sid,
+ QPNP_PON_PULL_CTL(pon->base), &pon_cntl, 1);
+ if (rc) {
+ dev_err(&pon->spmi->dev, "spmi read addr=%x failed\n",
+ QPNP_PON_PULL_CTL(pon->base));
+ return rc;
+ }
+ if (pullup)
+ pon_cntl |= QPNP_PON_CNTL_PULL_UP;
+ else
+ pon_cntl &= ~QPNP_PON_CNTL_PULL_UP;
+
+ rc = spmi_ext_register_writel(pon->spmi->ctrl, pon->spmi->sid,
+ QPNP_PON_PULL_CTL(pon->base), &pon_cntl, 1);
+ if (rc) {
+ dev_err(&pon->spmi->dev, "spmi write addr=%x failed\n",
+ QPNP_PON_PULL_CTL(pon->base));
+ return rc;
+ }
+ }
+
+ pon->pon_input = input_allocate_device();
+ if (!pon->pon_input) {
+ dev_err(&pon->spmi->dev, "Can't allocate pon button\n");
+ return -ENOMEM;
+ }
+
+ input_set_capability(pon->pon_input, EV_KEY, KEY_POWER);
+ pon->pon_input->name = "qpnp_pon_key";
+ pon->pon_input->phys = "qpnp_pon_key/input0";
+
+ rc = input_register_device(pon->pon_input);
+ if (rc) {
+ dev_err(&pon->spmi->dev, "Can't register pon key: %d\n", rc);
+ goto free_input_dev;
+ }
+
+ rc = request_any_context_irq(pon->key_status_irq, qpnp_pon_key_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "qpnp_pon_key_status", pon);
+ if (rc < 0) {
+ dev_err(&pon->spmi->dev, "Can't request %d IRQ for pon: %d\n",
+ pon->key_status_irq, rc);
+ goto unreg_input_dev;
+ }
+
+ device_init_wakeup(&pon->spmi->dev, 1);
+ enable_irq_wake(pon->key_status_irq);
+
+ return rc;
+
+unreg_input_dev:
+ input_unregister_device(pon->pon_input);
+free_input_dev:
+ input_free_device(pon->pon_input);
+ return rc;
+}
+
+static int __devinit qpnp_pon_probe(struct spmi_device *spmi)
+{
+ struct qpnp_pon *pon;
+ struct resource *pon_resource;
+ u32 pon_key_enable = 0;
+ int rc = 0;
+
+ pon = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_pon),
+ GFP_KERNEL);
+ if (!pon) {
+ dev_err(&spmi->dev, "Can't allocate qpnp_pon\n");
+ return -ENOMEM;
+ }
+
+ pon->spmi = spmi;
+
+ pon_resource = spmi_get_resource(spmi, NULL, IORESOURCE_MEM, 0);
+ if (!pon_resource) {
+ dev_err(&spmi->dev, "Unable to get PON base address\n");
+ return -ENXIO;
+ }
+ pon->base = pon_resource->start;
+
+ dev_set_drvdata(&spmi->dev, pon);
+
+ /* pon-key-enable property must be set to register pon key */
+ rc = of_property_read_u32(spmi->dev.of_node, "qcom,pon-key-enable",
+ &pon_key_enable);
+ if (rc && rc != -EINVAL) {
+ dev_err(&spmi->dev,
+ "Error reading 'pon-key-enable' property (%d)", rc);
+ return rc;
+ }
+
+ if (pon_key_enable) {
+ rc = qpnp_pon_key_init(pon);
+ if (rc < 0) {
+ dev_err(&spmi->dev, "Failed to register pon-key\n");
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int qpnp_pon_remove(struct spmi_device *spmi)
+{
+ struct qpnp_pon *pon = dev_get_drvdata(&spmi->dev);
+
+ if (pon->pon_input) {
+ free_irq(pon->key_status_irq, pon);
+ input_unregister_device(pon->pon_input);
+ }
+
+ return 0;
+}
+
+static struct of_device_id spmi_match_table[] = {
+ { .compatible = "qcom,qpnp-power-on",
+ }
+};
+
+static struct spmi_driver qpnp_pon_driver = {
+ .driver = {
+ .name = "qcom,qpnp-power-on",
+ .of_match_table = spmi_match_table,
+ },
+ .probe = qpnp_pon_probe,
+ .remove = __devexit_p(qpnp_pon_remove),
+};
+
+static int __init qpnp_pon_init(void)
+{
+ return spmi_driver_register(&qpnp_pon_driver);
+}
+module_init(qpnp_pon_init);
+
+static void __exit qpnp_pon_exit(void)
+{
+ return spmi_driver_unregister(&qpnp_pon_driver);
+}
+module_exit(qpnp_pon_exit);
+
+MODULE_DESCRIPTION("QPNP PMIC POWER-ON driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/msm/qpnp-pwm.c b/drivers/platform/msm/qpnp-pwm.c
index c9cd0e0..708d658 100644
--- a/drivers/platform/msm/qpnp-pwm.c
+++ b/drivers/platform/msm/qpnp-pwm.c
@@ -440,7 +440,7 @@
unsigned int pwm_value, max_pwm_value;
struct qpnp_lpg_chip *chip = pwm->chip;
struct qpnp_lut_config *lut = &chip->lpg_config.lut_config;
- int i, pwm_size, rc;
+ int i, pwm_size, rc = 0;
int burst_size = SPMI_MAX_BUF_LEN;
int list_len = lut->list_size << 1;
int offset = lut->lo_index << 2;
diff --git a/drivers/power/pm8921-charger.c b/drivers/power/pm8921-charger.c
index f84e3ac..85b653d 100644
--- a/drivers/power/pm8921-charger.c
+++ b/drivers/power/pm8921-charger.c
@@ -248,6 +248,7 @@
bool ext_charging;
bool ext_charge_done;
bool iusb_fine_res;
+ bool dc_unplug_check;
DECLARE_BITMAP(enabled_irqs, PM_CHG_MAX_INTS);
struct work_struct battery_id_valid_work;
int64_t batt_id_min;
@@ -2447,6 +2448,10 @@
}
} else if (active_path & DC_ACTIVE_BIT) {
pr_debug("DC charger active\n");
+ /* Some board designs are not prone to reverse boost on DC
+ * charging path */
+ if (!chip->dc_unplug_check)
+ return;
} else {
/* No charger active */
if (!(is_usb_chg_plugged_in(chip)
@@ -4000,6 +4005,7 @@
chip->warm_temp_dc = INT_MIN;
chip->temp_check_period = pdata->temp_check_period;
+ chip->dc_unplug_check = pdata->dc_unplug_check;
chip->max_bat_chg_current = pdata->max_bat_chg_current;
chip->cool_bat_chg_current = pdata->cool_bat_chg_current;
chip->warm_bat_chg_current = pdata->warm_bat_chg_current;
diff --git a/drivers/power/smb349.c b/drivers/power/smb349.c
index ffc92d5..f9ca81c 100644
--- a/drivers/power/smb349.c
+++ b/drivers/power/smb349.c
@@ -617,6 +617,8 @@
the_smb349_chg = smb349_chg;
+ spin_lock_init(&smb349_chg->lock);
+
create_debugfs_entries(smb349_chg);
INIT_WORK(&smb349_chg->chg_work, chg_worker);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index fc0d02a..1849118 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1169,4 +1169,13 @@
This driver can also be built as a module. If so, the module
will be called rtc-ls1x.
+config RTC_DRV_QPNP
+ tristate "Qualcomm QPNP PMIC RTC"
+ depends on SPMI && OF_SPMI && MSM_QPNP_INT
+ help
+ Say Y here if you want to support the Qualcomm QPNP PMIC RTC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called qpnp-rtc.
+
endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 8a3cecd..295f927 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -88,6 +88,7 @@
obj-$(CONFIG_RTC_DRV_PS3) += rtc-ps3.o
obj-$(CONFIG_RTC_DRV_PUV3) += rtc-puv3.o
obj-$(CONFIG_RTC_DRV_PXA) += rtc-pxa.o
+obj-$(CONFIG_RTC_DRV_QPNP) += qpnp-rtc.o
obj-$(CONFIG_RTC_DRV_R9701) += rtc-r9701.o
obj-$(CONFIG_RTC_DRV_RP5C01) += rtc-rp5c01.o
obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o
diff --git a/drivers/rtc/qpnp-rtc.c b/drivers/rtc/qpnp-rtc.c
new file mode 100644
index 0000000..5650e74
--- /dev/null
+++ b/drivers/rtc/qpnp-rtc.c
@@ -0,0 +1,641 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/rtc.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <linux/of_device.h>
+#include <linux/spmi.h>
+#include <linux/spinlock.h>
+#include <linux/spmi.h>
+
+/* RTC/ALARM Register offsets */
+#define REG_OFFSET_ALARM_RW 0x40
+#define REG_OFFSET_ALARM_CTRL1 0x46
+#define REG_OFFSET_ALARM_CTRL2 0x48
+#define REG_OFFSET_RTC_WRITE 0x40
+#define REG_OFFSET_RTC_CTRL 0x46
+#define REG_OFFSET_RTC_READ 0x48
+#define REG_OFFSET_PERP_SUBTYPE 0x05
+
+/* RTC_CTRL register bit fields */
+#define BIT_RTC_ENABLE BIT(7)
+#define BIT_RTC_ALARM_ENABLE BIT(7)
+#define BIT_RTC_ABORT_ENABLE BIT(0)
+#define BIT_RTC_ALARM_CLEAR BIT(0)
+
+/* RTC/ALARM peripheral subtype values */
+#define RTC_PERPH_SUBTYPE 0x1
+#define ALARM_PERPH_SUBTYPE 0x3
+
+#define NUM_8_BIT_RTC_REGS 0x4
+
+#define TO_SECS(arr) (arr[0] | (arr[1] << 8) | (arr[2] << 16) | \
+ (arr[3] << 24))
+
+/* rtc driver internal structure */
+struct qpnp_rtc {
+ u8 rtc_ctrl_reg;
+ u8 alarm_ctrl_reg1;
+ u16 rtc_base;
+ u16 alarm_base;
+ u32 rtc_write_enable;
+ u32 rtc_alarm_powerup;
+ int rtc_alarm_irq;
+ struct device *rtc_dev;
+ struct rtc_device *rtc;
+ struct spmi_device *spmi;
+ spinlock_t alarm_ctrl_lock;
+};
+
+static int qpnp_read_wrapper(struct qpnp_rtc *rtc_dd, u8 *rtc_val,
+ u16 base, int count)
+{
+ int rc;
+ struct spmi_device *spmi = rtc_dd->spmi;
+
+ rc = spmi_ext_register_readl(spmi->ctrl, spmi->sid, base, rtc_val,
+ count);
+ if (rc) {
+ dev_err(rtc_dd->rtc_dev, "SPMI read failed\n");
+ return rc;
+ }
+ return 0;
+}
+
+static int qpnp_write_wrapper(struct qpnp_rtc *rtc_dd, u8 *rtc_val,
+ u16 base, int count)
+{
+ int rc;
+ struct spmi_device *spmi = rtc_dd->spmi;
+
+ rc = spmi_ext_register_writel(spmi->ctrl, spmi->sid, base, rtc_val,
+ count);
+ if (rc) {
+ dev_err(rtc_dd->rtc_dev, "SPMI write failed\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static int
+qpnp_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ int rc;
+ unsigned long secs, irq_flags;
+ u8 value[4], reg = 0, alarm_enabled = 0, ctrl_reg;
+ struct qpnp_rtc *rtc_dd = dev_get_drvdata(dev);
+
+ rtc_tm_to_time(tm, &secs);
+
+ value[0] = secs & 0xFF;
+ value[1] = (secs >> 8) & 0xFF;
+ value[2] = (secs >> 16) & 0xFF;
+ value[3] = (secs >> 24) & 0xFF;
+
+ dev_dbg(dev, "Seconds value to be written to RTC = %lu\n", secs);
+
+ spin_lock_irqsave(&rtc_dd->alarm_ctrl_lock, irq_flags);
+ ctrl_reg = rtc_dd->alarm_ctrl_reg1;
+
+ if (ctrl_reg & BIT_RTC_ALARM_ENABLE) {
+ alarm_enabled = 1;
+ ctrl_reg &= ~BIT_RTC_ALARM_ENABLE;
+ rc = qpnp_write_wrapper(rtc_dd, &ctrl_reg,
+ rtc_dd->alarm_base + REG_OFFSET_ALARM_CTRL1, 1);
+ if (rc) {
+ dev_err(dev, "Write to ALARM ctrl reg failed\n");
+ goto rtc_rw_fail;
+ }
+ } else
+ spin_unlock_irqrestore(&rtc_dd->alarm_ctrl_lock, irq_flags);
+
+ /*
+ * 32 bit seconds value is coverted to four 8 bit values
+ * |<------ 32 bit time value in seconds ------>|
+ * <- 8 bit ->|<- 8 bit ->|<- 8 bit ->|<- 8 bit ->|
+ * ----------------------------------------------
+ * | BYTE[3] | BYTE[2] | BYTE[1] | BYTE[0] |
+ * ----------------------------------------------
+ *
+ * RTC has four 8 bit registers for writting time in seconds:
+ * WDATA[3], WDATA[2], WDATA[1], WDATA[0]
+ *
+ * Write to the RTC registers should be done in following order
+ * Clear WDATA[0] register
+ *
+ * Write BYTE[1], BYTE[2] and BYTE[3] of time to
+ * RTC WDATA[3], WDATA[2], WDATA[1] registers
+ *
+ * Write BYTE[0] of time to RTC WDATA[0] register
+ *
+ * Clearing BYTE[0] and writting in the end will prevent any
+ * unintentional overflow from WDATA[0] to higher bytes during the
+ * write operation
+ */
+
+ /* Clear WDATA[0] */
+ reg = 0x0;
+ rc = qpnp_write_wrapper(rtc_dd, ®,
+ rtc_dd->rtc_base + REG_OFFSET_RTC_WRITE, 1);
+ if (rc) {
+ dev_err(dev, "Write to RTC reg failed\n");
+ goto rtc_rw_fail;
+ }
+
+ /* Write to WDATA[3], WDATA[2] and WDATA[1] */
+ rc = qpnp_write_wrapper(rtc_dd, &value[1],
+ rtc_dd->rtc_base + REG_OFFSET_RTC_WRITE + 1, 3);
+ if (rc) {
+ dev_err(dev, "Write to RTC reg failed\n");
+ goto rtc_rw_fail;
+ }
+
+ /* Write to WDATA[0] */
+ rc = qpnp_write_wrapper(rtc_dd, value,
+ rtc_dd->rtc_base + REG_OFFSET_RTC_WRITE, 1);
+ if (rc) {
+ dev_err(dev, "Write to RTC reg failed\n");
+ goto rtc_rw_fail;
+ }
+
+ if (alarm_enabled) {
+ ctrl_reg |= BIT_RTC_ALARM_ENABLE;
+ rc = qpnp_write_wrapper(rtc_dd, &ctrl_reg,
+ rtc_dd->alarm_base + REG_OFFSET_ALARM_CTRL1, 1);
+ if (rc) {
+ dev_err(dev, "Write to ALARM ctrl reg failed\n");
+ goto rtc_rw_fail;
+ }
+ }
+
+ rtc_dd->alarm_ctrl_reg1 = ctrl_reg;
+
+rtc_rw_fail:
+ if (alarm_enabled)
+ spin_unlock_irqrestore(&rtc_dd->alarm_ctrl_lock, irq_flags);
+
+ return rc;
+}
+
+static int
+qpnp_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ int rc;
+ u8 value[4], reg;
+ unsigned long secs;
+ struct qpnp_rtc *rtc_dd = dev_get_drvdata(dev);
+
+ rc = qpnp_read_wrapper(rtc_dd, value,
+ rtc_dd->rtc_base + REG_OFFSET_RTC_READ,
+ NUM_8_BIT_RTC_REGS);
+ if (rc) {
+ dev_err(dev, "Read from RTC reg failed\n");
+ return rc;
+ }
+
+ /*
+ * Read the LSB again and check if there has been a carry over
+ * If there is, redo the read operation
+ */
+ rc = qpnp_read_wrapper(rtc_dd, ®,
+ rtc_dd->rtc_base + REG_OFFSET_RTC_READ, 1);
+ if (rc) {
+ dev_err(dev, "Read from RTC reg failed\n");
+ return rc;
+ }
+
+ if (reg < value[0]) {
+ rc = qpnp_read_wrapper(rtc_dd, value,
+ rtc_dd->rtc_base + REG_OFFSET_RTC_READ,
+ NUM_8_BIT_RTC_REGS);
+ if (rc) {
+ dev_err(dev, "Read from RTC reg failed\n");
+ return rc;
+ }
+ }
+
+ secs = TO_SECS(value);
+
+ rtc_time_to_tm(secs, tm);
+
+ rc = rtc_valid_tm(tm);
+ if (rc) {
+ dev_err(dev, "Invalid time read from RTC\n");
+ return rc;
+ }
+
+ dev_dbg(dev, "secs = %lu, h:m:s == %d:%d:%d, d/m/y = %d/%d/%d\n",
+ secs, tm->tm_hour, tm->tm_min, tm->tm_sec,
+ tm->tm_mday, tm->tm_mon, tm->tm_year);
+
+ return 0;
+}
+
+static int
+qpnp_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ int rc;
+ u8 value[4], ctrl_reg;
+ unsigned long secs, secs_rtc, irq_flags;
+ struct qpnp_rtc *rtc_dd = dev_get_drvdata(dev);
+ struct rtc_time rtc_tm;
+
+ rtc_tm_to_time(&alarm->time, &secs);
+
+ /*
+ * Read the current RTC time and verify if the alarm time is in the
+ * past. If yes, return invalid
+ */
+ rc = qpnp_rtc_read_time(dev, &rtc_tm);
+ if (rc) {
+ dev_err(dev, "Unable to read RTC time\n");
+ return -EINVAL;
+ }
+
+ rtc_tm_to_time(&rtc_tm, &secs_rtc);
+ if (secs < secs_rtc) {
+ dev_err(dev, "Trying to set alarm in the past\n");
+ return -EINVAL;
+ }
+
+ value[0] = secs & 0xFF;
+ value[1] = (secs >> 8) & 0xFF;
+ value[2] = (secs >> 16) & 0xFF;
+ value[3] = (secs >> 24) & 0xFF;
+
+ spin_lock_irqsave(&rtc_dd->alarm_ctrl_lock, irq_flags);
+
+ rc = qpnp_write_wrapper(rtc_dd, value,
+ rtc_dd->alarm_base + REG_OFFSET_ALARM_RW,
+ NUM_8_BIT_RTC_REGS);
+ if (rc) {
+ dev_err(dev, "Write to ALARM reg failed\n");
+ goto rtc_rw_fail;
+ }
+
+ ctrl_reg = (alarm->enabled) ?
+ (rtc_dd->alarm_ctrl_reg1 | BIT_RTC_ALARM_ENABLE) :
+ (rtc_dd->alarm_ctrl_reg1 & ~BIT_RTC_ALARM_ENABLE);
+
+ rc = qpnp_write_wrapper(rtc_dd, &ctrl_reg,
+ rtc_dd->alarm_base + REG_OFFSET_ALARM_CTRL1, 1);
+ if (rc) {
+ dev_err(dev, "Write to ALARM cntrol reg failed\n");
+ goto rtc_rw_fail;
+ }
+
+ rtc_dd->alarm_ctrl_reg1 = ctrl_reg;
+
+ dev_dbg(dev, "Alarm Set for h:r:s=%d:%d:%d, d/m/y=%d/%d/%d\n",
+ alarm->time.tm_hour, alarm->time.tm_min,
+ alarm->time.tm_sec, alarm->time.tm_mday,
+ alarm->time.tm_mon, alarm->time.tm_year);
+rtc_rw_fail:
+ spin_unlock_irqrestore(&rtc_dd->alarm_ctrl_lock, irq_flags);
+ return rc;
+}
+
+static int
+qpnp_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ int rc;
+ u8 value[4];
+ unsigned long secs;
+ struct qpnp_rtc *rtc_dd = dev_get_drvdata(dev);
+
+ rc = qpnp_read_wrapper(rtc_dd, value,
+ rtc_dd->alarm_base + REG_OFFSET_ALARM_RW,
+ NUM_8_BIT_RTC_REGS);
+ if (rc) {
+ dev_err(dev, "Read from ALARM reg failed\n");
+ return rc;
+ }
+
+ secs = TO_SECS(value);
+ rtc_time_to_tm(secs, &alarm->time);
+
+ rc = rtc_valid_tm(&alarm->time);
+ if (rc) {
+ dev_err(dev, "Invalid time read from RTC\n");
+ return rc;
+ }
+
+ dev_dbg(dev, "Alarm set for - h:r:s=%d:%d:%d, d/m/y=%d/%d/%d\n",
+ alarm->time.tm_hour, alarm->time.tm_min,
+ alarm->time.tm_sec, alarm->time.tm_mday,
+ alarm->time.tm_mon, alarm->time.tm_year);
+
+ return 0;
+}
+
+
+static int
+qpnp_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ int rc;
+ unsigned long irq_flags;
+ struct qpnp_rtc *rtc_dd = dev_get_drvdata(dev);
+ u8 ctrl_reg;
+
+ spin_lock_irqsave(&rtc_dd->alarm_ctrl_lock, irq_flags);
+ ctrl_reg = rtc_dd->alarm_ctrl_reg1;
+ ctrl_reg = enabled ? (ctrl_reg | BIT_RTC_ALARM_ENABLE) :
+ (ctrl_reg & ~BIT_RTC_ALARM_ENABLE);
+
+ rc = qpnp_write_wrapper(rtc_dd, &ctrl_reg,
+ rtc_dd->alarm_base + REG_OFFSET_ALARM_CTRL1, 1);
+ if (rc) {
+ dev_err(dev, "Write to ALARM control reg failed\n");
+ goto rtc_rw_fail;
+ }
+
+ rtc_dd->alarm_ctrl_reg1 = ctrl_reg;
+
+rtc_rw_fail:
+ spin_unlock_irqrestore(&rtc_dd->alarm_ctrl_lock, irq_flags);
+ return rc;
+}
+
+static struct rtc_class_ops qpnp_rtc_ops = {
+ .read_time = qpnp_rtc_read_time,
+ .set_alarm = qpnp_rtc_set_alarm,
+ .read_alarm = qpnp_rtc_read_alarm,
+ .alarm_irq_enable = qpnp_rtc_alarm_irq_enable,
+};
+
+static irqreturn_t qpnp_alarm_trigger(int irq, void *dev_id)
+{
+ struct qpnp_rtc *rtc_dd = dev_id;
+ u8 ctrl_reg;
+ int rc;
+ unsigned long irq_flags;
+
+ rtc_update_irq(rtc_dd->rtc, 1, RTC_IRQF | RTC_AF);
+
+ spin_lock_irqsave(&rtc_dd->alarm_ctrl_lock, irq_flags);
+
+ /* Clear the alarm enable bit */
+ ctrl_reg = rtc_dd->alarm_ctrl_reg1;
+ ctrl_reg &= ~BIT_RTC_ALARM_ENABLE;
+
+ rc = qpnp_write_wrapper(rtc_dd, &ctrl_reg,
+ rtc_dd->alarm_base + REG_OFFSET_ALARM_CTRL1, 1);
+ if (rc) {
+ spin_unlock_irqrestore(&rtc_dd->alarm_ctrl_lock, irq_flags);
+ dev_err(rtc_dd->rtc_dev,
+ "Write to ALARM control reg failed\n");
+ goto rtc_alarm_handled;
+ }
+
+ rtc_dd->alarm_ctrl_reg1 = ctrl_reg;
+ spin_unlock_irqrestore(&rtc_dd->alarm_ctrl_lock, irq_flags);
+
+ /* Set ALARM_CLR bit */
+ ctrl_reg = 0x1;
+ rc = qpnp_write_wrapper(rtc_dd, &ctrl_reg,
+ rtc_dd->alarm_base + REG_OFFSET_ALARM_CTRL2, 1);
+ if (rc)
+ dev_err(rtc_dd->rtc_dev,
+ "Write to ALARM control reg failed\n");
+
+rtc_alarm_handled:
+ return IRQ_HANDLED;
+}
+
+static int __devinit qpnp_rtc_probe(struct spmi_device *spmi)
+{
+ int rc;
+ u8 subtype;
+ struct qpnp_rtc *rtc_dd;
+ struct resource *resource;
+ struct spmi_resource *spmi_resource;
+
+ rtc_dd = devm_kzalloc(&spmi->dev, sizeof(*rtc_dd), GFP_KERNEL);
+ if (rtc_dd == NULL) {
+ dev_err(&spmi->dev, "Unable to allocate memory!\n");
+ return -ENOMEM;
+ }
+
+ /* Get the rtc write property */
+ rc = of_property_read_u32(spmi->dev.of_node, "qcom,qpnp-rtc-write",
+ &rtc_dd->rtc_write_enable);
+ if (rc && rc != -EINVAL) {
+ dev_err(&spmi->dev,
+ "Error reading rtc_write_enable property %d\n", rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(spmi->dev.of_node,
+ "qcom,qpnp-rtc-alarm-pwrup",
+ &rtc_dd->rtc_alarm_powerup);
+ if (rc && rc != -EINVAL) {
+ dev_err(&spmi->dev,
+ "Error reading rtc_alarm_powerup property %d\n", rc);
+ return rc;
+ }
+
+ /* Initialise spinlock to protect RTC control register */
+ spin_lock_init(&rtc_dd->alarm_ctrl_lock);
+
+ rtc_dd->rtc_dev = &(spmi->dev);
+ rtc_dd->spmi = spmi;
+
+ /* Get RTC/ALARM resources */
+ spmi_for_each_container_dev(spmi_resource, spmi) {
+ if (!spmi_resource) {
+ dev_err(&spmi->dev,
+ "%s: rtc_alarm: spmi resource absent!\n",
+ __func__);
+ rc = -ENXIO;
+ goto fail_rtc_enable;
+ }
+
+ resource = spmi_get_resource(spmi, spmi_resource,
+ IORESOURCE_MEM, 0);
+ if (!(resource && resource->start)) {
+ dev_err(&spmi->dev,
+ "%s: node %s IO resource absent!\n",
+ __func__, spmi->dev.of_node->full_name);
+ rc = -ENXIO;
+ goto fail_rtc_enable;
+ }
+
+ rc = qpnp_read_wrapper(rtc_dd, &subtype,
+ resource->start + REG_OFFSET_PERP_SUBTYPE, 1);
+ if (rc) {
+ dev_err(&spmi->dev,
+ "Peripheral subtype read failed\n");
+ goto fail_rtc_enable;
+ }
+
+ switch (subtype) {
+ case RTC_PERPH_SUBTYPE:
+ rtc_dd->rtc_base = resource->start;
+ break;
+ case ALARM_PERPH_SUBTYPE:
+ rtc_dd->alarm_base = resource->start;
+ rtc_dd->rtc_alarm_irq =
+ spmi_get_irq(spmi, spmi_resource, 0);
+ if (rtc_dd->rtc_alarm_irq < 0) {
+ dev_err(&spmi->dev, "ALARM IRQ absent\n");
+ rc = -ENXIO;
+ goto fail_rtc_enable;
+ }
+ break;
+ default:
+ dev_err(&spmi->dev, "Invalid peripheral subtype\n");
+ rc = -EINVAL;
+ goto fail_rtc_enable;
+ }
+ }
+
+ rtc_dd->rtc_ctrl_reg = BIT_RTC_ENABLE;
+ rc = qpnp_write_wrapper(rtc_dd, &rtc_dd->rtc_ctrl_reg,
+ rtc_dd->rtc_base + REG_OFFSET_RTC_CTRL, 1);
+ if (rc) {
+ dev_err(&spmi->dev,
+ "Write to RTC control reg failed\n");
+ goto fail_rtc_enable;
+ }
+
+ /* Enable abort enable feature */
+ rtc_dd->alarm_ctrl_reg1 = BIT_RTC_ABORT_ENABLE;
+ rc = qpnp_write_wrapper(rtc_dd, &rtc_dd->alarm_ctrl_reg1,
+ rtc_dd->alarm_base + REG_OFFSET_ALARM_CTRL1, 1);
+ if (rc) {
+ dev_err(&spmi->dev, "SPMI write failed!\n");
+ goto fail_rtc_enable;
+ }
+
+ if (rtc_dd->rtc_write_enable == true)
+ qpnp_rtc_ops.set_time = qpnp_rtc_set_time;
+
+ dev_set_drvdata(&spmi->dev, rtc_dd);
+
+ /* Register the RTC device */
+ rtc_dd->rtc = rtc_device_register("qpnp_rtc", &spmi->dev,
+ &qpnp_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc_dd->rtc)) {
+ dev_err(&spmi->dev, "%s: RTC registration failed (%ld)\n",
+ __func__, PTR_ERR(rtc_dd->rtc));
+ rc = PTR_ERR(rtc_dd->rtc);
+ goto fail_rtc_enable;
+ }
+
+ /* Request the alarm IRQ */
+ rc = request_any_context_irq(rtc_dd->rtc_alarm_irq,
+ qpnp_alarm_trigger, IRQF_TRIGGER_RISING,
+ "qpnp_rtc_alarm", rtc_dd);
+ if (rc) {
+ dev_err(&spmi->dev, "Request IRQ failed (%d)\n", rc);
+ goto fail_req_irq;
+ }
+
+ device_init_wakeup(&spmi->dev, 1);
+ enable_irq_wake(rtc_dd->rtc_alarm_irq);
+
+ dev_dbg(&spmi->dev, "Probe success !!\n");
+
+ return 0;
+
+fail_req_irq:
+ rtc_device_unregister(rtc_dd->rtc);
+fail_rtc_enable:
+ dev_set_drvdata(&spmi->dev, NULL);
+
+ return rc;
+}
+
+static int __devexit qpnp_rtc_remove(struct spmi_device *spmi)
+{
+ struct qpnp_rtc *rtc_dd = dev_get_drvdata(&spmi->dev);
+
+ device_init_wakeup(&spmi->dev, 0);
+ free_irq(rtc_dd->rtc_alarm_irq, rtc_dd);
+ rtc_device_unregister(rtc_dd->rtc);
+ dev_set_drvdata(&spmi->dev, NULL);
+
+ return 0;
+}
+
+static void qpnp_rtc_shutdown(struct spmi_device *spmi)
+{
+ u8 value[4] = {0};
+ u8 reg;
+ int rc;
+ unsigned long irq_flags;
+ struct qpnp_rtc *rtc_dd = dev_get_drvdata(&spmi->dev);
+ bool rtc_alarm_powerup = rtc_dd->rtc_alarm_powerup;
+
+ if (!rtc_alarm_powerup) {
+ spin_lock_irqsave(&rtc_dd->alarm_ctrl_lock, irq_flags);
+ dev_dbg(&spmi->dev, "Disabling alarm interrupts\n");
+
+ /* Disable RTC alarms */
+ reg = rtc_dd->alarm_ctrl_reg1;
+ reg &= ~BIT_RTC_ALARM_ENABLE;
+ rc = qpnp_write_wrapper(rtc_dd, ®,
+ rtc_dd->alarm_base + REG_OFFSET_ALARM_CTRL1, 1);
+ if (rc) {
+ dev_err(rtc_dd->rtc_dev, "SPMI write failed\n");
+ goto fail_alarm_disable;
+ }
+
+ /* Clear Alarm register */
+ rc = qpnp_write_wrapper(rtc_dd, value,
+ rtc_dd->alarm_base + REG_OFFSET_ALARM_RW,
+ NUM_8_BIT_RTC_REGS);
+ if (rc)
+ dev_err(rtc_dd->rtc_dev, "SPMI write failed\n");
+
+fail_alarm_disable:
+ spin_unlock_irqrestore(&rtc_dd->alarm_ctrl_lock, irq_flags);
+ }
+}
+
+static struct of_device_id spmi_match_table[] = {
+ {
+ .compatible = "qcom,qpnp-rtc",
+ },
+ {}
+};
+
+static struct spmi_driver qpnp_rtc_driver = {
+ .probe = qpnp_rtc_probe,
+ .remove = __devexit_p(qpnp_rtc_remove),
+ .shutdown = qpnp_rtc_shutdown,
+ .driver = {
+ .name = "qcom,qpnp-rtc",
+ .owner = THIS_MODULE,
+ .of_match_table = spmi_match_table,
+ },
+};
+
+static int __init qpnp_rtc_init(void)
+{
+ return spmi_driver_register(&qpnp_rtc_driver);
+}
+module_init(qpnp_rtc_init);
+
+static void __exit qpnp_rtc_exit(void)
+{
+ spmi_driver_unregister(&qpnp_rtc_driver);
+}
+module_exit(qpnp_rtc_exit);
+
+MODULE_DESCRIPTION("SMPI PMIC RTC driver");
+MODULE_LICENSE("GPL V2");
diff --git a/drivers/slimbus/slimbus.c b/drivers/slimbus/slimbus.c
index 36414e0..72564b0 100644
--- a/drivers/slimbus/slimbus.c
+++ b/drivers/slimbus/slimbus.c
@@ -2778,6 +2778,9 @@
}
if (slc->def > 0)
slc->def--;
+ /* Disconnect source port to free it up */
+ if (SLIM_HDL_TO_LA(slc->srch) == sb->laddr)
+ slc->srch = 0;
if (slc->def == 0)
ret = add_pending_ch(&sb->mark_removal, chan);
if (ret)
@@ -2946,6 +2949,7 @@
mutex_unlock(&ctrl->sched.m_reconf);
return ret;
}
+EXPORT_SYMBOL_GPL(slim_ctrl_clk_pause);
MODULE_LICENSE("GPL v2");
MODULE_VERSION("0.1");
diff --git a/drivers/thermal/msm8974-tsens.c b/drivers/thermal/msm8974-tsens.c
index 6628b79..0628d2e 100644
--- a/drivers/thermal/msm8974-tsens.c
+++ b/drivers/thermal/msm8974-tsens.c
@@ -98,10 +98,10 @@
#define TSENS_THRESHOLD_MIN_CODE 0x0
#define TSENS_CTRL_INIT_DATA1 0x3fffff9
-#define TSENS_GLOBAL_INIT_DATA 0x20013
-#define TSENS_S0_MAIN_CFG_INIT_DATA 0x1ba
+#define TSENS_GLOBAL_INIT_DATA 0x302f16c
+#define TSENS_S0_MAIN_CFG_INIT_DATA 0x1c3
#define TSENS_SN_MIN_MAX_STATUS_CTRL_DATA 0x3ffc00
-#define TSENS_SN_REMOTE_CFG_DATA 0xdba
+#define TSENS_SN_REMOTE_CFG_DATA 0x11c3
/* Trips: warm and cool */
enum tsens_trip_type {
@@ -212,7 +212,7 @@
return 0;
}
-EXPORT_SYMBOL(msm_tsens_get_temp);
+EXPORT_SYMBOL(tsens_get_temp);
static int tsens_tz_get_mode(struct thermal_zone_device *thermal,
enum thermal_device_mode *mode)
@@ -474,13 +474,15 @@
static int tsens_calib_sensors(void)
{
- int i, tsens_base1_data, tsens0_point1, tsens1_point1;
- int tsens2_point1, tsens3_point1, tsens4_point1, tsens5_point1;
- int tsens6_point1, tsens7_point1, tsens8_point1, tsens9_point1;
- int tsens10_point1, tsens0_point2, tsens1_point2, tsens2_point2;
- int tsens3_point2, tsens4_point2, tsens5_point2, tsens6_point2;
- int tsens7_point2, tsens8_point2, tsens9_point2, tsens10_point2;
- int tsens_base2_data, tsens_calibration_mode, temp;
+ int i, tsens_base1_data = 0, tsens0_point1 = 0, tsens1_point1 = 0;
+ int tsens2_point1 = 0, tsens3_point1 = 0, tsens4_point1 = 0;
+ int tsens5_point1 = 0, tsens6_point1 = 0, tsens7_point1 = 0;
+ int tsens8_point1 = 0, tsens9_point1 = 0, tsens10_point1 = 0;
+ int tsens0_point2 = 0, tsens1_point2 = 0, tsens2_point2 = 0;
+ int tsens3_point2 = 0, tsens4_point2 = 0, tsens5_point2 = 0;
+ int tsens6_point2 = 0, tsens7_point2 = 0, tsens8_point2 = 0;
+ int tsens9_point2 = 0, tsens10_point2 = 0;
+ int tsens_base2_data = 0, tsens_calibration_mode = 0, temp;
uint32_t calib_data[5];
for (i = 0; i < 5; i++)
@@ -492,12 +494,14 @@
temp = (calib_data[3] & TSENS_CAL_SEL_2
>> TSENS_CAL_SEL_SHIFT_2);
tsens_calibration_mode |= temp;
- /* Remove this after bringup */
- tsens_calibration_mode = TSENS_ONE_POINT_CALIB;
if (!tsens_calibration_mode) {
- pr_err("TSENS not calibrated\n");
- return -ENODEV;
+ pr_debug("TSENS is calibrationless mode\n");
+ for (i = 0; i < tmdev->tsens_num_sensor; i++) {
+ tmdev->sensor[i].calib_data_point2 = 78000;
+ tmdev->sensor[i].calib_data_point1 = 49200;
+ goto compute_intercept_slope;
+ }
} else if (tsens_calibration_mode == TSENS_ONE_POINT_CALIB ||
TSENS_TWO_POINT_CALIB) {
tsens_base1_data = calib_data[0] & TSENS_BASE1_MASK;
@@ -525,63 +529,72 @@
tsens8_point2 = calib_data[4] & TSENS8_POINT2_MASK;
tsens9_point2 = calib_data[4] & TSENS9_POINT2_MASK;
tsens10_point2 = calib_data[4] & TSENS10_POINT2_MASK;
- } else
+ } else {
pr_debug("Calibration mode is unknown: %d\n",
tsens_calibration_mode);
+ return -ENODEV;
+ }
- tmdev->sensor[0].calib_data_point1 =
+ if (tsens_calibration_mode == TSENS_ONE_POINT_CALIB) {
+ tmdev->sensor[0].calib_data_point1 =
(((tsens_base1_data + tsens0_point1) << 2) | TSENS_BIT_APPEND);
- tmdev->sensor[0].calib_data_point2 =
- (((tsens_base2_data + tsens0_point2) << 2) | TSENS_BIT_APPEND);
- tmdev->sensor[1].calib_data_point1 =
+ tmdev->sensor[1].calib_data_point1 =
(((tsens_base1_data + tsens1_point1) << 2) | TSENS_BIT_APPEND);
- tmdev->sensor[1].calib_data_point2 =
- (((tsens_base2_data + tsens1_point2) << 2) | TSENS_BIT_APPEND);
- tmdev->sensor[2].calib_data_point1 =
+ tmdev->sensor[2].calib_data_point1 =
(((tsens_base1_data + tsens2_point1) << 2) | TSENS_BIT_APPEND);
- tmdev->sensor[2].calib_data_point2 =
- (((tsens_base2_data + tsens2_point2) << 2) | TSENS_BIT_APPEND);
- tmdev->sensor[3].calib_data_point1 =
+ tmdev->sensor[3].calib_data_point1 =
(((tsens_base1_data + tsens3_point1) << 2) | TSENS_BIT_APPEND);
- tmdev->sensor[3].calib_data_point2 =
- (((tsens_base2_data + tsens3_point2) << 2) | TSENS_BIT_APPEND);
- tmdev->sensor[4].calib_data_point1 =
+ tmdev->sensor[4].calib_data_point1 =
(((tsens_base1_data + tsens4_point1) << 2) | TSENS_BIT_APPEND);
- tmdev->sensor[4].calib_data_point2 =
- (((tsens_base2_data + tsens4_point2) << 2) | TSENS_BIT_APPEND);
- tmdev->sensor[5].calib_data_point1 =
+ tmdev->sensor[5].calib_data_point1 =
(((tsens_base1_data + tsens5_point1) << 2) | TSENS_BIT_APPEND);
- tmdev->sensor[5].calib_data_point2 =
- (((tsens_base2_data + tsens5_point2) << 2) | TSENS_BIT_APPEND);
- tmdev->sensor[6].calib_data_point1 =
+ tmdev->sensor[6].calib_data_point1 =
(((tsens_base1_data + tsens6_point1) << 2) | TSENS_BIT_APPEND);
- tmdev->sensor[6].calib_data_point2 =
- (((tsens_base2_data + tsens6_point2) << 2) | TSENS_BIT_APPEND);
- tmdev->sensor[7].calib_data_point1 =
+ tmdev->sensor[7].calib_data_point1 =
(((tsens_base1_data + tsens7_point1) << 2) | TSENS_BIT_APPEND);
- tmdev->sensor[7].calib_data_point2 =
- (((tsens_base2_data + tsens7_point2) << 2) | TSENS_BIT_APPEND);
- tmdev->sensor[8].calib_data_point1 =
+ tmdev->sensor[8].calib_data_point1 =
(((tsens_base1_data + tsens8_point1) << 2) | TSENS_BIT_APPEND);
- tmdev->sensor[8].calib_data_point2 =
- (((tsens_base2_data + tsens8_point2) << 2) | TSENS_BIT_APPEND);
- tmdev->sensor[9].calib_data_point1 =
+ tmdev->sensor[9].calib_data_point1 =
(((tsens_base1_data + tsens9_point1) << 2) | TSENS_BIT_APPEND);
- tmdev->sensor[9].calib_data_point2 =
- (((tsens_base2_data + tsens9_point2) < 2) | TSENS_BIT_APPEND);
- tmdev->sensor[10].calib_data_point1 =
+ tmdev->sensor[10].calib_data_point1 =
(((tsens_base1_data + tsens10_point1) << 2) | TSENS_BIT_APPEND);
- tmdev->sensor[10].calib_data_point2 =
- (((tsens_base2_data + tsens10_point2) << 2) | TSENS_BIT_APPEND);
+ }
+ if (tsens_calibration_mode == TSENS_TWO_POINT_CALIB) {
+ tmdev->sensor[0].calib_data_point2 =
+ (((tsens_base2_data + tsens0_point2) << 2) | TSENS_BIT_APPEND);
+ tmdev->sensor[1].calib_data_point2 =
+ (((tsens_base2_data + tsens1_point2) << 2) | TSENS_BIT_APPEND);
+ tmdev->sensor[2].calib_data_point2 =
+ (((tsens_base2_data + tsens2_point2) << 2) | TSENS_BIT_APPEND);
+ tmdev->sensor[3].calib_data_point2 =
+ (((tsens_base2_data + tsens3_point2) << 2) | TSENS_BIT_APPEND);
+ tmdev->sensor[4].calib_data_point2 =
+ (((tsens_base2_data + tsens4_point2) << 2) | TSENS_BIT_APPEND);
+ tmdev->sensor[5].calib_data_point2 =
+ (((tsens_base2_data + tsens5_point2) << 2) | TSENS_BIT_APPEND);
+ tmdev->sensor[6].calib_data_point2 =
+ (((tsens_base2_data + tsens6_point2) << 2) | TSENS_BIT_APPEND);
+ tmdev->sensor[7].calib_data_point2 =
+ (((tsens_base2_data + tsens7_point2) << 2) | TSENS_BIT_APPEND);
+ tmdev->sensor[8].calib_data_point2 =
+ (((tsens_base2_data + tsens8_point2) << 2) | TSENS_BIT_APPEND);
+ tmdev->sensor[9].calib_data_point2 =
+ (((tsens_base2_data + tsens9_point2) < 2) | TSENS_BIT_APPEND);
+ tmdev->sensor[10].calib_data_point2 =
+ (((tsens_base2_data + tsens10_point2) << 2) | TSENS_BIT_APPEND);
+ }
+
+compute_intercept_slope:
for (i = 0; i < tmdev->tsens_num_sensor; i++) {
int32_t num = 0, den = 0;
- num = TSENS_CAL_DEGC_POINT2 - TSENS_CAL_DEGC_POINT2;
- den = tmdev->sensor[i].calib_data_point2 -
+ if (tsens_calibration_mode == TSENS_TWO_POINT_CALIB) {
+ num = TSENS_CAL_DEGC_POINT2 - TSENS_CAL_DEGC_POINT2;
+ den = tmdev->sensor[i].calib_data_point2 -
tmdev->sensor[i].calib_data_point1;
- num *= tmdev->tsens_factor;
- if (tsens_calibration_mode == TSENS_TWO_POINT_CALIB)
+ num *= tmdev->tsens_factor;
tmdev->sensor[i].slope_mul_tsens_factor = num/den;
+ }
tmdev->sensor[i].offset = (TSENS_CAL_DEGC_POINT1 *
tmdev->tsens_factor)
- (tmdev->sensor[i].calib_data_point1 *
@@ -615,7 +628,7 @@
}
rc = of_property_read_u32_array(of_node,
- "qcom,slope", tsens_slope_data, tsens_num_sensors/sizeof(u32));
+ "qcom,slope", tsens_slope_data, tsens_num_sensors);
if (rc) {
dev_err(&pdev->dev, "invalid or missing property: tsens-slope\n");
return rc;
diff --git a/drivers/thermal/msm_thermal.c b/drivers/thermal/msm_thermal.c
index a8d3720..0575d80 100644
--- a/drivers/thermal/msm_thermal.c
+++ b/drivers/thermal/msm_thermal.c
@@ -28,6 +28,33 @@
static uint32_t limited_max_freq = MSM_CPUFREQ_NO_LIMIT;
static struct delayed_work check_temp_work;
+static int limit_idx;
+static int limit_idx_low;
+static int limit_idx_high;
+static struct cpufreq_frequency_table *table;
+
+static int msm_thermal_get_freq_table(void)
+{
+ int ret = 0;
+ int i = 0;
+
+ table = cpufreq_frequency_get_table(0);
+ if (table == NULL) {
+ pr_debug("%s: error reading cpufreq table\n", __func__);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ while (table[i].frequency != CPUFREQ_TABLE_END)
+ i++;
+
+ limit_idx_low = 0;
+ limit_idx_high = limit_idx = i - 1;
+ BUG_ON(limit_idx_high <= 0 || limit_idx_high <= limit_idx_low);
+fail:
+ return ret;
+}
+
static int update_cpu_max_freq(int cpu, uint32_t max_freq)
{
int ret = 0;
@@ -36,10 +63,6 @@
if (ret)
return ret;
- ret = cpufreq_update_policy(cpu);
- if (ret)
- return ret;
-
limited_max_freq = max_freq;
if (max_freq != MSM_CPUFREQ_NO_LIMIT)
pr_info("msm_thermal: Limiting cpu%d max frequency to %d\n",
@@ -47,11 +70,14 @@
else
pr_info("msm_thermal: Max frequency reset for cpu%d\n", cpu);
+ ret = cpufreq_update_policy(cpu);
+
return ret;
}
static void check_temp(struct work_struct *work)
{
+ static int limit_init;
struct tsens_device tsens_dev;
unsigned long temp = 0;
uint32_t max_freq = limited_max_freq;
@@ -66,12 +92,34 @@
goto reschedule;
}
- if (temp >= msm_thermal_info.limit_temp)
- max_freq = msm_thermal_info.limit_freq;
- else if (temp <
- msm_thermal_info.limit_temp - msm_thermal_info.temp_hysteresis)
- max_freq = MSM_CPUFREQ_NO_LIMIT;
+ if (!limit_init) {
+ ret = msm_thermal_get_freq_table();
+ if (ret)
+ goto reschedule;
+ else
+ limit_init = 1;
+ }
+ if (temp >= msm_thermal_info.limit_temp_degC) {
+ if (limit_idx == limit_idx_low)
+ goto reschedule;
+
+ limit_idx -= msm_thermal_info.freq_step;
+ if (limit_idx < limit_idx_low)
+ limit_idx = limit_idx_low;
+ max_freq = table[limit_idx].frequency;
+ } else if (temp < msm_thermal_info.limit_temp_degC -
+ msm_thermal_info.temp_hysteresis_degC) {
+ if (limit_idx == limit_idx_high)
+ goto reschedule;
+
+ limit_idx += msm_thermal_info.freq_step;
+ if (limit_idx >= limit_idx_high) {
+ limit_idx = limit_idx_high;
+ max_freq = MSM_CPUFREQ_NO_LIMIT;
+ } else
+ max_freq = table[limit_idx].frequency;
+ }
if (max_freq == limited_max_freq)
goto reschedule;
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index c483bb45..a5235ba 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -503,11 +503,10 @@
if (msm_port->uim) {
msm_write(port,
- UART_SIM_CFG_UIM_TX_MODE |
- UART_SIM_CFG_UIM_RX_MODE |
UART_SIM_CFG_STOP_BIT_LEN_N(1) |
UART_SIM_CFG_SIM_CLK_ON |
UART_SIM_CFG_SIM_CLK_STOP_HIGH |
+ UART_SIM_CFG_MASK_RX |
UART_SIM_CFG_SIM_SEL,
UART_SIM_CFG);
diff --git a/drivers/tty/serial/msm_serial.h b/drivers/tty/serial/msm_serial.h
index a769825..34228ec 100644
--- a/drivers/tty/serial/msm_serial.h
+++ b/drivers/tty/serial/msm_serial.h
@@ -108,6 +108,7 @@
#define UART_SIM_CFG_SIM_CLK_ON (1 << 7)
#define UART_SIM_CFG_SIM_CLK_TD8_SEL (1 << 6)
#define UART_SIM_CFG_SIM_CLK_STOP_HIGH (1 << 5)
+#define UART_SIM_CFG_MASK_RX (1 << 3)
#define UART_SIM_CFG_SIM_SEL (1 << 0)
#define UART_MISR_MODE 0x0040
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index 083ed19..1904706 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -174,7 +174,7 @@
#define UARTDM_TX_BUF_SIZE UART_XMIT_SIZE
#define UARTDM_RX_BUF_SIZE 512
#define RETRY_TIMEOUT 5
-#define UARTDM_NR 2
+#define UARTDM_NR 5
static struct dentry *debug_base;
static struct msm_hs_port q_uart_port[UARTDM_NR];
diff --git a/drivers/tty/smux_ctl.c b/drivers/tty/smux_ctl.c
index 2b8f028..0078b04 100644
--- a/drivers/tty/smux_ctl.c
+++ b/drivers/tty/smux_ctl.c
@@ -52,6 +52,7 @@
};
#define SMUX_CTL_NUM_CHANNELS ARRAY_SIZE(smux_ctl_ch_id)
+#define DEFAULT_OPEN_TIMEOUT 5
struct smux_ctl_dev {
int id;
@@ -64,6 +65,7 @@
int is_channel_reset;
int is_high_wm;
int write_pending;
+ unsigned open_timeout_val;
struct mutex rx_lock;
uint32_t read_avail;
@@ -149,6 +151,52 @@
#define SMUXCTL_SET_LOOPBACK(lcid) do {} while (0)
#endif
+static ssize_t open_timeout_store(struct device *d,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t n)
+{
+ int i;
+ unsigned long tmp;
+ for (i = 0; i < SMUX_CTL_NUM_CHANNELS; ++i) {
+ if (smux_ctl_devp[i]->devicep == d)
+ break;
+ }
+ if (i >= SMUX_CTL_NUM_CHANNELS) {
+ pr_err("%s: unable to match device to valid smux ctl port\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (!kstrtoul(buf, 10, &tmp)) {
+ smux_ctl_devp[i]->open_timeout_val = tmp;
+ return n;
+ } else {
+ pr_err("%s: unable to convert: %s to an int\n", __func__,
+ buf);
+ return -EINVAL;
+ }
+}
+
+static ssize_t open_timeout_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int i;
+ for (i = 0; i < SMUX_CTL_NUM_CHANNELS; ++i) {
+ if (smux_ctl_devp[i]->devicep == d)
+ break;
+ }
+ if (i >= SMUX_CTL_NUM_CHANNELS) {
+ pr_err("%s: unable to match device to valid smux ctl port\n",
+ __func__);
+ return -EINVAL;
+ }
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ smux_ctl_devp[i]->open_timeout_val);
+}
+
+static DEVICE_ATTR(open_timeout, 0664, open_timeout_show, open_timeout_store);
+
static int get_ctl_dev_index(int id)
{
int dev_index;
@@ -323,6 +371,7 @@
{
int r = 0;
struct smux_ctl_dev *devp;
+ unsigned wait_time = DEFAULT_OPEN_TIMEOUT * HZ;
if (!smux_ctl_inited)
return -EIO;
@@ -349,11 +398,14 @@
return r;
}
+ if (devp->open_timeout_val)
+ wait_time = devp->open_timeout_val * HZ;
+
r = wait_event_interruptible_timeout(
devp->write_wait_queue,
(devp->state == SMUX_CONNECTED ||
- devp->abort_wait),
- (5 * HZ));
+ devp->abort_wait),
+ wait_time);
if (r == 0)
r = -ETIMEDOUT;
@@ -822,6 +874,11 @@
kfree(smux_ctl_devp[i]);
goto error2;
}
+ if (device_create_file(smux_ctl_devp[i]->devicep,
+ &dev_attr_open_timeout))
+ pr_err("%s: unable to create device attr for" \
+ " smux ctl dev id:%d\n", __func__, i);
+
}
smux_ctl_inited = 1;
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 41f8106..54ea85d 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -522,9 +522,9 @@
if (of_get_property(node, "tx-fifo-resize", NULL))
dwc->needs_fifo_resize = true;
+ pm_runtime_no_callbacks(dev);
+ pm_runtime_set_active(dev);
pm_runtime_enable(dev);
- pm_runtime_get_sync(dev);
- pm_runtime_forbid(dev);
ret = dwc3_core_init(dwc);
if (ret) {
@@ -586,8 +586,6 @@
goto err2;
}
- pm_runtime_allow(dev);
-
return 0;
err2:
@@ -621,7 +619,6 @@
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
dwc3_debugfs_exit(dwc);
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 05f1a60..136c6d9 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -16,6 +16,8 @@
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/clk.h>
#include <linux/io.h>
@@ -24,6 +26,8 @@
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/list.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/msm_hsusb.h>
@@ -124,6 +128,12 @@
struct regulator *ssusb_vddcx;
enum usb_vdd_type ss_vdd_type;
enum usb_vdd_type hs_vdd_type;
+ struct dwc3_ext_xceiv ext_xceiv;
+ bool resume_pending;
+ atomic_t pm_suspended;
+ atomic_t in_lpm;
+ struct delayed_work resume_work;
+ struct wake_lock wlock;
struct dwc3_charger charger;
struct usb_phy *otg_xceiv;
struct delayed_work chg_work;
@@ -1213,6 +1223,159 @@
queue_delayed_work(system_nrt_wq, &mdwc->chg_work, 0);
}
+static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
+{
+ dev_dbg(mdwc->dev, "%s: entering lpm\n", __func__);
+
+ if (atomic_read(&mdwc->in_lpm)) {
+ dev_dbg(mdwc->dev, "%s: Already suspended\n", __func__);
+ return 0;
+ }
+
+ clk_disable_unprepare(mdwc->core_clk);
+ dwc3_hsusb_ldo_enable(0);
+ dwc3_ssusb_ldo_enable(0);
+ wake_unlock(&mdwc->wlock);
+
+ atomic_set(&mdwc->in_lpm, 1);
+ dev_info(mdwc->dev, "DWC3 in low power mode\n");
+
+ return 0;
+}
+
+static int dwc3_msm_resume(struct dwc3_msm *mdwc)
+{
+ dev_dbg(mdwc->dev, "%s: exiting lpm\n", __func__);
+
+ if (!atomic_read(&mdwc->in_lpm)) {
+ dev_dbg(mdwc->dev, "%s: Already resumed\n", __func__);
+ return 0;
+ }
+
+ wake_lock(&mdwc->wlock);
+ clk_prepare_enable(mdwc->core_clk);
+ dwc3_hsusb_ldo_enable(1);
+ dwc3_ssusb_ldo_enable(1);
+
+ atomic_set(&mdwc->in_lpm, 0);
+ dev_info(mdwc->dev, "DWC3 exited from low power mode\n");
+
+ return 0;
+}
+
+static void dwc3_resume_work(struct work_struct *w)
+{
+ struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
+ resume_work.work);
+
+ dev_dbg(mdwc->dev, "%s: dwc3 resume work\n", __func__);
+ /* handle any event that was queued while work was already running */
+ if (!atomic_read(&mdwc->in_lpm)) {
+ dev_dbg(mdwc->dev, "%s: notifying xceiv event\n", __func__);
+ if (mdwc->otg_xceiv)
+ mdwc->ext_xceiv.notify_ext_events(mdwc->otg_xceiv->otg,
+ DWC3_EVENT_XCEIV_STATE);
+ return;
+ }
+
+ /* bail out if system resume in process, else initiate RESUME */
+ if (atomic_read(&mdwc->pm_suspended)) {
+ mdwc->resume_pending = true;
+ } else {
+ pm_runtime_get_sync(mdwc->dev);
+ if (mdwc->otg_xceiv)
+ mdwc->ext_xceiv.notify_ext_events(mdwc->otg_xceiv->otg,
+ DWC3_EVENT_PHY_RESUME);
+ pm_runtime_put_sync(mdwc->dev);
+ }
+}
+
+static bool debug_id, debug_bsv, debug_connect;
+
+static int dwc3_connect_show(struct seq_file *s, void *unused)
+{
+ if (debug_connect)
+ seq_printf(s, "true\n");
+ else
+ seq_printf(s, "false\n");
+
+ return 0;
+}
+
+static int dwc3_connect_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dwc3_connect_show, inode->i_private);
+}
+
+static ssize_t dwc3_connect_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct dwc3_msm *mdwc = s->private;
+ char buf[8];
+
+ memset(buf, 0x00, sizeof(buf));
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
+ if (!strncmp(buf, "enable", 6) || !strncmp(buf, "true", 4)) {
+ debug_connect = true;
+ } else {
+ debug_connect = debug_bsv = false;
+ debug_id = true;
+ }
+
+ mdwc->ext_xceiv.bsv = debug_bsv;
+ mdwc->ext_xceiv.id = debug_id ? DWC3_ID_FLOAT : DWC3_ID_GROUND;
+
+ if (atomic_read(&mdwc->in_lpm)) {
+ dev_dbg(mdwc->dev, "%s: calling resume_work\n", __func__);
+ dwc3_resume_work(&mdwc->resume_work.work);
+ } else {
+ dev_dbg(mdwc->dev, "%s: notifying xceiv event\n", __func__);
+ if (mdwc->otg_xceiv)
+ mdwc->ext_xceiv.notify_ext_events(mdwc->otg_xceiv->otg,
+ DWC3_EVENT_XCEIV_STATE);
+ }
+
+ return count;
+}
+
+const struct file_operations dwc3_connect_fops = {
+ .open = dwc3_connect_open,
+ .read = seq_read,
+ .write = dwc3_connect_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static struct dentry *dwc3_debugfs_root;
+
+static void dwc3_debugfs_init(struct dwc3_msm *mdwc)
+{
+ dwc3_debugfs_root = debugfs_create_dir("msm_dwc3", NULL);
+
+ if (!dwc3_debugfs_root || IS_ERR(dwc3_debugfs_root))
+ return;
+
+ if (!debugfs_create_bool("id", S_IRUGO | S_IWUSR, dwc3_debugfs_root,
+ (u32 *)&debug_id))
+ goto error;
+
+ if (!debugfs_create_bool("bsv", S_IRUGO | S_IWUSR, dwc3_debugfs_root,
+ (u32 *)&debug_bsv))
+ goto error;
+
+ if (!debugfs_create_file("connect", S_IRUGO | S_IWUSR,
+ dwc3_debugfs_root, mdwc, &dwc3_connect_fops))
+ goto error;
+
+ return;
+
+error:
+ debugfs_remove_recursive(dwc3_debugfs_root);
+}
static int __devinit dwc3_msm_probe(struct platform_device *pdev)
{
@@ -1234,6 +1397,7 @@
INIT_LIST_HEAD(&msm->req_complete_list);
INIT_DELAYED_WORK(&msm->chg_work, dwc3_chg_detect_work);
+ INIT_DELAYED_WORK(&msm->resume_work, dwc3_resume_work);
/*
* DWC3 Core requires its CORE CLK (aka master / bus clk) to
@@ -1354,6 +1518,9 @@
msm->resource_size = resource_size(res);
msm->dwc3 = dwc3;
+ pm_runtime_set_active(msm->dev);
+ pm_runtime_enable(msm->dev);
+
if (of_property_read_u32(node, "qcom,dwc-usb3-msm-dbm-eps",
&msm->dbm_num_eps)) {
dev_err(&pdev->dev,
@@ -1395,10 +1562,21 @@
ret);
goto put_xcvr;
}
+
+ ret = dwc3_set_ext_xceiv(msm->otg_xceiv->otg, &msm->ext_xceiv);
+ if (ret || !msm->ext_xceiv.notify_ext_events) {
+ dev_err(&pdev->dev, "failed to register xceiver: %d\n",
+ ret);
+ goto put_xcvr;
+ }
} else {
dev_err(&pdev->dev, "%s: No OTG transceiver found\n", __func__);
}
+ wake_lock_init(&msm->wlock, WAKE_LOCK_SUSPEND, "msm_dwc3");
+ wake_lock(&msm->wlock);
+ dwc3_debugfs_init(msm);
+
return 0;
put_xcvr:
@@ -1432,11 +1610,15 @@
{
struct dwc3_msm *msm = platform_get_drvdata(pdev);
+ if (dwc3_debugfs_root)
+ debugfs_remove_recursive(dwc3_debugfs_root);
if (msm->otg_xceiv) {
dwc3_start_chg_det(&msm->charger, false);
usb_put_transceiver(msm->otg_xceiv);
}
+ pm_runtime_disable(msm->dev);
platform_device_unregister(msm->dwc3);
+ wake_lock_destroy(&msm->wlock);
dwc3_hsusb_ldo_enable(0);
dwc3_hsusb_ldo_init(0);
@@ -1451,6 +1633,77 @@
return 0;
}
+static int dwc3_msm_pm_suspend(struct device *dev)
+{
+ int ret = 0;
+ struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "dwc3-msm PM suspend\n");
+
+ ret = dwc3_msm_suspend(mdwc);
+ if (!ret)
+ atomic_set(&mdwc->pm_suspended, 1);
+
+ return ret;
+}
+
+static int dwc3_msm_pm_resume(struct device *dev)
+{
+ int ret = 0;
+ struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "dwc3-msm PM resume\n");
+
+ atomic_set(&mdwc->pm_suspended, 0);
+ if (mdwc->resume_pending) {
+ mdwc->resume_pending = false;
+
+ ret = dwc3_msm_resume(mdwc);
+ /* Update runtime PM status */
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ /* Let OTG know about resume event and update pm_count */
+ if (mdwc->otg_xceiv)
+ mdwc->ext_xceiv.notify_ext_events(mdwc->otg_xceiv->otg,
+ DWC3_EVENT_PHY_RESUME);
+ }
+
+ return ret;
+}
+
+static int dwc3_msm_runtime_idle(struct device *dev)
+{
+ dev_dbg(dev, "DWC3-msm runtime idle\n");
+
+ return 0;
+}
+
+static int dwc3_msm_runtime_suspend(struct device *dev)
+{
+ struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "DWC3-msm runtime suspend\n");
+
+ return dwc3_msm_suspend(mdwc);
+}
+
+static int dwc3_msm_runtime_resume(struct device *dev)
+{
+ struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "DWC3-msm runtime resume\n");
+
+ return dwc3_msm_resume(mdwc);
+}
+
+static const struct dev_pm_ops dwc3_msm_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dwc3_msm_pm_suspend, dwc3_msm_pm_resume)
+ SET_RUNTIME_PM_OPS(dwc3_msm_runtime_suspend, dwc3_msm_runtime_resume,
+ dwc3_msm_runtime_idle)
+};
+
static const struct of_device_id of_dwc3_matach[] = {
{
.compatible = "qcom,dwc-usb3-msm",
@@ -1464,6 +1717,7 @@
.remove = __devexit_p(dwc3_msm_remove),
.driver = {
.name = "msm-dwc3",
+ .pm = &dwc3_msm_dev_pm_ops,
.of_match_table = of_dwc3_matach,
},
};
diff --git a/drivers/usb/dwc3/dwc3_otg.c b/drivers/usb/dwc3/dwc3_otg.c
index 23b582d..4a37f03 100644
--- a/drivers/usb/dwc3/dwc3_otg.c
+++ b/drivers/usb/dwc3/dwc3_otg.c
@@ -271,6 +271,61 @@
return 0;
}
+/**
+ * dwc3_ext_event_notify - callback to handle events from external transceiver
+ * @otg: Pointer to the otg transceiver structure
+ * @event: Event reported by transceiver
+ *
+ * Returns 0 on success
+ */
+static void dwc3_ext_event_notify(struct usb_otg *otg,
+ enum dwc3_ext_events event)
+{
+ struct dwc3_otg *dotg = container_of(otg, struct dwc3_otg, otg);
+ struct dwc3_ext_xceiv *ext_xceiv = dotg->ext_xceiv;
+ struct usb_phy *phy = dotg->otg.phy;
+
+ if (event == DWC3_EVENT_PHY_RESUME) {
+ if (!pm_runtime_status_suspended(phy->dev)) {
+ dev_warn(phy->dev, "PHY_RESUME event out of LPM!!!!\n");
+ } else {
+ dev_dbg(phy->dev, "ext PHY_RESUME event received\n");
+ /* ext_xceiver would have taken h/w out of LPM by now */
+ pm_runtime_get(phy->dev);
+ }
+ }
+
+ if (ext_xceiv->id == DWC3_ID_FLOAT)
+ set_bit(ID, &dotg->inputs);
+ else
+ clear_bit(ID, &dotg->inputs);
+
+ if (ext_xceiv->bsv)
+ set_bit(B_SESS_VLD, &dotg->inputs);
+ else
+ clear_bit(B_SESS_VLD, &dotg->inputs);
+
+ schedule_work(&dotg->sm_work);
+}
+
+/**
+ * dwc3_set_ext_xceiv - bind/unbind external transceiver driver
+ * @otg: Pointer to the otg transceiver structure
+ * @ext_xceiv: Pointer to the external transceiver struccture
+ *
+ * Returns 0 on success
+ */
+int dwc3_set_ext_xceiv(struct usb_otg *otg, struct dwc3_ext_xceiv *ext_xceiv)
+{
+ struct dwc3_otg *dotg = container_of(otg, struct dwc3_otg, otg);
+
+ dotg->ext_xceiv = ext_xceiv;
+ if (ext_xceiv)
+ ext_xceiv->notify_ext_events = dwc3_ext_event_notify;
+
+ return 0;
+}
+
/* IRQs which OTG driver is interested in handling */
#define DWC3_OEVT_MASK (DWC3_OEVTEN_OTGCONIDSTSCHNGEVNT | \
DWC3_OEVTEN_OTGBDEVVBUSCHNGEVNT)
@@ -284,10 +339,21 @@
static irqreturn_t dwc3_otg_interrupt(int irq, void *_dotg)
{
struct dwc3_otg *dotg = (struct dwc3_otg *)_dotg;
+ struct usb_phy *phy = dotg->otg.phy;
u32 osts, oevt_reg;
int ret = IRQ_NONE;
int handled_irqs = 0;
+ /*
+ * If PHY is in retention mode then this interrupt would not be fired.
+ * ext_xcvr (parent) is responsible for bringing h/w out of LPM.
+ * OTG driver just need to increment power count and can safely
+ * assume that h/w is out of low power state now.
+ * TODO: explicitly disable OEVTEN interrupts if ext_xceiv is present
+ */
+ if (pm_runtime_status_suspended(phy->dev))
+ pm_runtime_get(phy->dev);
+
oevt_reg = dwc3_readl(dotg->regs, DWC3_OEVT);
if (!(oevt_reg & DWC3_OEVT_MASK))
@@ -371,6 +437,7 @@
struct dwc3_charger *charger = dotg->charger;
bool work = 0;
+ pm_runtime_resume(phy->dev);
dev_dbg(phy->dev, "%s state\n", otg_state_string(phy->state));
/* Check OTG state */
@@ -388,7 +455,8 @@
work = 1;
} else {
phy->state = OTG_STATE_B_IDLE;
- /* TODO: Enter low power state */
+ dev_dbg(phy->dev, "No device, trying to suspend\n");
+ pm_runtime_put_sync(phy->dev);
}
break;
@@ -411,7 +479,8 @@
/* Has charger been detected? If no detect it */
switch (charger->chg_type) {
case DWC3_DCP_CHARGER:
- /* TODO: initiate LPM */
+ dev_dbg(phy->dev, "lpm, DCP charger\n");
+ pm_runtime_put_sync(phy->dev);
break;
case DWC3_CDP_CHARGER:
dwc3_otg_start_peripheral(&dotg->otg,
@@ -438,9 +507,10 @@
* yet. We will re-try as soon as it
* will be called
*/
- dev_err(phy->dev,
+ dev_err(phy->dev, "enter lpm as\n"
"unable to start B-device\n");
phy->state = OTG_STATE_UNDEFINED;
+ pm_runtime_put_sync(phy->dev);
return;
}
}
@@ -453,7 +523,8 @@
charger->chg_type =
DWC3_INVALID_CHARGER;
}
- /* TODO: Enter low power state */
+ dev_dbg(phy->dev, "No device, trying to suspend\n");
+ pm_runtime_put_sync(phy->dev);
}
break;
@@ -481,9 +552,10 @@
* Probably set_host was not called yet.
* We will re-try as soon as it will be called
*/
- dev_dbg(phy->dev,
+ dev_dbg(phy->dev, "enter lpm as\n"
"unable to start A-device\n");
phy->state = OTG_STATE_UNDEFINED;
+ pm_runtime_put_sync(phy->dev);
return;
}
phy->state = OTG_STATE_A_HOST;
@@ -628,6 +700,8 @@
goto err3;
}
+ pm_runtime_get(dwc->dev);
+
return 0;
err3:
@@ -658,6 +732,7 @@
dotg->charger->start_detection(dotg->charger, false);
cancel_work_sync(&dotg->sm_work);
usb_set_transceiver(NULL);
+ pm_runtime_put(dwc->dev);
free_irq(dotg->irq, dotg);
kfree(dotg->otg.phy);
kfree(dotg);
diff --git a/drivers/usb/dwc3/dwc3_otg.h b/drivers/usb/dwc3/dwc3_otg.h
index 0d8b61b..b60b42a 100644
--- a/drivers/usb/dwc3/dwc3_otg.h
+++ b/drivers/usb/dwc3/dwc3_otg.h
@@ -35,8 +35,9 @@
struct usb_otg otg;
int irq;
void __iomem *regs;
- struct work_struct sm_work;
- struct dwc3_charger *charger;
+ struct work_struct sm_work;
+ struct dwc3_charger *charger;
+ struct dwc3_ext_xceiv *ext_xceiv;
#define ID 0
#define B_SESS_VLD 1
unsigned long inputs;
@@ -73,4 +74,29 @@
/* for external charger driver */
extern int dwc3_set_charger(struct usb_otg *otg, struct dwc3_charger *charger);
+enum dwc3_ext_events {
+ DWC3_EVENT_NONE = 0, /* no change event */
+ DWC3_EVENT_PHY_RESUME, /* PHY has come out of LPM */
+ DWC3_EVENT_XCEIV_STATE, /* XCEIV state (id/bsv) has changed */
+};
+
+enum dwc3_id_state {
+ DWC3_ID_GROUND = 0,
+ DWC3_ID_FLOAT,
+};
+
+/* external transceiver that can perform connect/disconnect monitoring in LPM */
+struct dwc3_ext_xceiv {
+ enum dwc3_id_state id;
+ bool bsv;
+
+ /* to notify OTG about LPM exit event, provided by OTG */
+ void (*notify_ext_events)(struct usb_otg *otg,
+ enum dwc3_ext_events ext_event);
+};
+
+/* for external transceiver driver */
+extern int dwc3_set_ext_xceiv(struct usb_otg *otg,
+ struct dwc3_ext_xceiv *ext_xceiv);
+
#endif /* __LINUX_USB_DWC3_OTG_H */
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 060144f..a3f6e58 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2425,6 +2425,11 @@
dev_err(dwc->dev, "failed to set peripheral to otg\n");
goto err7;
}
+ } else {
+ pm_runtime_no_callbacks(&dwc->gadget.dev);
+ pm_runtime_set_active(&dwc->gadget.dev);
+ pm_runtime_enable(&dwc->gadget.dev);
+ pm_runtime_get(&dwc->gadget.dev);
}
return 0;
@@ -2462,6 +2467,11 @@
{
int irq;
+ if (dwc->dotg) {
+ pm_runtime_put(&dwc->gadget.dev);
+ pm_runtime_disable(&dwc->gadget.dev);
+ }
+
usb_del_gadget_udc(&dwc->gadget);
irq = platform_get_irq(to_platform_device(dwc->dev), 0);
diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c
index 2ed642a..ac88636 100644
--- a/drivers/usb/gadget/android.c
+++ b/drivers/usb/gadget/android.c
@@ -71,6 +71,12 @@
#include "u_ether.c"
#include "u_bam_data.c"
#include "f_mbim.c"
+#include "f_qc_ecm.c"
+#include "f_qc_rndis.c"
+#include "u_qc_ether.c"
+#ifdef CONFIG_TARGET_CORE
+#include "f_tcm.c"
+#endif
MODULE_AUTHOR("Mike Lockwood");
MODULE_DESCRIPTION("Android Composite USB Driver");
@@ -83,6 +89,8 @@
#define VENDOR_ID 0x18D1
#define PRODUCT_ID 0x0001
+#define ANDROID_DEVICE_NODE_NAME_LENGTH 11
+
struct android_usb_function {
char *name;
void *config;
@@ -94,6 +102,8 @@
/* for android_dev.enabled_functions */
struct list_head enabled_list;
+ struct android_dev *android_dev;
+
/* Optional: initialization during gadget bind */
int (*init)(struct android_usb_function *, struct usb_composite_dev *);
/* Optional: cleanup during gadget unbind */
@@ -132,12 +142,18 @@
char pm_qos[5];
struct pm_qos_request pm_qos_req_dma;
struct work_struct work;
+
+ struct list_head list_item;
+
+ struct usb_configuration config;
};
static struct class *android_class;
-static struct android_dev *_android_dev;
+static struct list_head android_dev_list;
+static int android_dev_count;
static int android_bind_config(struct usb_configuration *c);
static void android_unbind_config(struct usb_configuration *c);
+static struct android_dev *cdev_to_android_dev(struct usb_composite_dev *cdev);
/* string IDs are assigned dynamically */
#define STRING_MANUFACTURER_IDX 0
@@ -189,12 +205,6 @@
NULL,
};
-static struct usb_configuration android_config_driver = {
- .label = "android",
- .unbind = android_unbind_config,
- .bConfigurationValue = 1,
-};
-
enum android_device_state {
USB_DISCONNECTED,
USB_CONNECTED,
@@ -291,7 +301,7 @@
return;
if (--dev->disable_depth == 0) {
- usb_add_config(cdev, &android_config_driver,
+ usb_add_config(cdev, &dev->config,
android_bind_config);
usb_gadget_connect(cdev->gadget);
}
@@ -305,7 +315,7 @@
usb_gadget_disconnect(cdev->gadget);
/* Cancel pending control requests */
usb_ep_dequeue(cdev->gadget->ep0, cdev->req);
- usb_remove_config(cdev, &android_config_driver);
+ usb_remove_config(cdev, &dev->config);
}
}
@@ -343,7 +353,7 @@
static void adb_android_function_enable(struct android_usb_function *f)
{
- struct android_dev *dev = _android_dev;
+ struct android_dev *dev = f->android_dev;
struct adb_data *data = f->config;
data->enabled = true;
@@ -355,7 +365,7 @@
static void adb_android_function_disable(struct android_usb_function *f)
{
- struct android_dev *dev = _android_dev;
+ struct android_dev *dev = f->android_dev;
struct adb_data *data = f->config;
data->enabled = false;
@@ -376,32 +386,30 @@
static void adb_ready_callback(void)
{
- struct android_dev *dev = _android_dev;
+ struct android_dev *dev = adb_function.android_dev;
struct adb_data *data = adb_function.config;
- mutex_lock(&dev->mutex);
-
data->opened = true;
- if (data->enabled)
+ if (data->enabled && dev) {
+ mutex_lock(&dev->mutex);
android_enable(dev);
-
- mutex_unlock(&dev->mutex);
+ mutex_unlock(&dev->mutex);
+ }
}
static void adb_closed_callback(void)
{
- struct android_dev *dev = _android_dev;
+ struct android_dev *dev = adb_function.android_dev;
struct adb_data *data = adb_function.config;
- mutex_lock(&dev->mutex);
-
data->opened = false;
- if (data->enabled)
+ if (data->enabled) {
+ mutex_lock(&dev->mutex);
android_disable(dev);
-
- mutex_unlock(&dev->mutex);
+ mutex_unlock(&dev->mutex);
+ }
}
@@ -547,6 +555,95 @@
.attributes = rmnet_function_attributes,
};
+struct ecm_function_config {
+ u8 ethaddr[ETH_ALEN];
+};
+
+static int ecm_function_init(struct android_usb_function *f,
+ struct usb_composite_dev *cdev)
+{
+ f->config = kzalloc(sizeof(struct ecm_function_config), GFP_KERNEL);
+ if (!f->config)
+ return -ENOMEM;
+ return 0;
+}
+
+static void ecm_function_cleanup(struct android_usb_function *f)
+{
+ kfree(f->config);
+ f->config = NULL;
+}
+
+static int ecm_qc_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ int ret;
+ struct ecm_function_config *ecm = f->config;
+
+ if (!ecm) {
+ pr_err("%s: ecm_pdata\n", __func__);
+ return -EINVAL;
+ }
+
+ pr_info("%s MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", __func__,
+ ecm->ethaddr[0], ecm->ethaddr[1], ecm->ethaddr[2],
+ ecm->ethaddr[3], ecm->ethaddr[4], ecm->ethaddr[5]);
+
+ ret = gether_qc_setup_name(c->cdev->gadget, ecm->ethaddr, "ecm");
+ if (ret) {
+ pr_err("%s: gether_setup failed\n", __func__);
+ return ret;
+ }
+
+ return ecm_qc_bind_config(c, ecm->ethaddr);
+}
+
+static void ecm_qc_function_unbind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ gether_qc_cleanup();
+}
+
+static ssize_t ecm_ethaddr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct ecm_function_config *ecm = f->config;
+ return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ ecm->ethaddr[0], ecm->ethaddr[1], ecm->ethaddr[2],
+ ecm->ethaddr[3], ecm->ethaddr[4], ecm->ethaddr[5]);
+}
+
+static ssize_t ecm_ethaddr_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct ecm_function_config *ecm = f->config;
+
+ if (sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ (int *)&ecm->ethaddr[0], (int *)&ecm->ethaddr[1],
+ (int *)&ecm->ethaddr[2], (int *)&ecm->ethaddr[3],
+ (int *)&ecm->ethaddr[4], (int *)&ecm->ethaddr[5]) == 6)
+ return size;
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(ecm_ethaddr, S_IRUGO | S_IWUSR, ecm_ethaddr_show,
+ ecm_ethaddr_store);
+
+static struct device_attribute *ecm_function_attributes[] = {
+ &dev_attr_ecm_ethaddr,
+ NULL
+};
+
+static struct android_usb_function ecm_qc_function = {
+ .name = "ecm_qc",
+ .init = ecm_function_init,
+ .cleanup = ecm_function_cleanup,
+ .bind_config = ecm_qc_function_bind_config,
+ .unbind_config = ecm_qc_function_unbind_config,
+ .attributes = ecm_function_attributes,
+};
/* MBIM - used with BAM */
#define MAX_MBIM_INSTANCES 1
@@ -609,6 +706,7 @@
char buf[32], *b;
int once = 0, err = -1;
int (*notify)(uint32_t, const char *);
+ struct android_dev *dev = cdev_to_android_dev(c->cdev);
strlcpy(buf, diag_clients, sizeof(buf));
b = strim(buf);
@@ -617,8 +715,8 @@
notify = NULL;
name = strsep(&b, ",");
/* Allow only first diag channel to update pid and serial no */
- if (_android_dev->pdata && !once++)
- notify = _android_dev->pdata->update_pid_and_serial_num;
+ if (dev->pdata && !once++)
+ notify = dev->pdata->update_pid_and_serial_num;
if (name) {
err = diag_function_add(c, name, notify);
@@ -868,6 +966,7 @@
struct rndis_function_config {
u8 ethaddr[ETH_ALEN];
u32 vendorID;
+ u8 max_pkt_per_xfer;
char manufacturer[256];
/* "Wireless" RNDIS; auto-detected by Windows */
bool wceis;
@@ -889,6 +988,22 @@
f->config = NULL;
}
+static int rndis_qc_function_init(struct android_usb_function *f,
+ struct usb_composite_dev *cdev)
+{
+ f->config = kzalloc(sizeof(struct rndis_function_config), GFP_KERNEL);
+ if (!f->config)
+ return -ENOMEM;
+
+ return rndis_qc_init();
+}
+
+static void rndis_qc_function_cleanup(struct android_usb_function *f)
+{
+ rndis_qc_cleanup();
+ kfree(f->config);
+}
+
static int
rndis_function_bind_config(struct android_usb_function *f,
struct usb_configuration *c)
@@ -927,12 +1042,56 @@
rndis->manufacturer);
}
+static int rndis_qc_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ int ret;
+ struct rndis_function_config *rndis = f->config;
+
+ if (!rndis) {
+ pr_err("%s: rndis_pdata\n", __func__);
+ return -EINVAL;
+ }
+
+ pr_info("%s MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", __func__,
+ rndis->ethaddr[0], rndis->ethaddr[1], rndis->ethaddr[2],
+ rndis->ethaddr[3], rndis->ethaddr[4], rndis->ethaddr[5]);
+
+ ret = gether_qc_setup_name(c->cdev->gadget, rndis->ethaddr, "rndis");
+ if (ret) {
+ pr_err("%s: gether_setup failed\n", __func__);
+ return ret;
+ }
+
+ if (rndis->wceis) {
+ /* "Wireless" RNDIS; auto-detected by Windows */
+ rndis_qc_iad_descriptor.bFunctionClass =
+ USB_CLASS_WIRELESS_CONTROLLER;
+ rndis_qc_iad_descriptor.bFunctionSubClass = 0x01;
+ rndis_qc_iad_descriptor.bFunctionProtocol = 0x03;
+ rndis_qc_control_intf.bInterfaceClass =
+ USB_CLASS_WIRELESS_CONTROLLER;
+ rndis_qc_control_intf.bInterfaceSubClass = 0x01;
+ rndis_qc_control_intf.bInterfaceProtocol = 0x03;
+ }
+
+ return rndis_qc_bind_config_vendor(c, rndis->ethaddr, rndis->vendorID,
+ rndis->manufacturer,
+ rndis->max_pkt_per_xfer);
+}
+
static void rndis_function_unbind_config(struct android_usb_function *f,
struct usb_configuration *c)
{
gether_cleanup();
}
+static void rndis_qc_function_unbind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ gether_qc_cleanup();
+}
+
static ssize_t rndis_manufacturer_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1039,11 +1198,38 @@
static DEVICE_ATTR(vendorID, S_IRUGO | S_IWUSR, rndis_vendorID_show,
rndis_vendorID_store);
+static ssize_t rndis_max_pkt_per_xfer_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct rndis_function_config *config = f->config;
+ return snprintf(buf, PAGE_SIZE, "%d\n", config->max_pkt_per_xfer);
+}
+
+static ssize_t rndis_max_pkt_per_xfer_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct android_usb_function *f = dev_get_drvdata(dev);
+ struct rndis_function_config *config = f->config;
+ int value;
+
+ if (sscanf(buf, "%d", &value) == 1) {
+ config->max_pkt_per_xfer = value;
+ return size;
+ }
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(max_pkt_per_xfer, S_IRUGO | S_IWUSR,
+ rndis_max_pkt_per_xfer_show,
+ rndis_max_pkt_per_xfer_store);
+
static struct device_attribute *rndis_function_attributes[] = {
&dev_attr_manufacturer,
&dev_attr_wceis,
&dev_attr_ethaddr,
&dev_attr_vendorID,
+ &dev_attr_max_pkt_per_xfer,
NULL
};
@@ -1056,6 +1242,14 @@
.attributes = rndis_function_attributes,
};
+static struct android_usb_function rndis_qc_function = {
+ .name = "rndis_qc",
+ .init = rndis_qc_function_init,
+ .cleanup = rndis_qc_function_cleanup,
+ .bind_config = rndis_qc_function_bind_config,
+ .unbind_config = rndis_qc_function_unbind_config,
+ .attributes = rndis_function_attributes,
+};
struct mass_storage_function_config {
struct fsg_config fsg;
@@ -1065,7 +1259,7 @@
static int mass_storage_function_init(struct android_usb_function *f,
struct usb_composite_dev *cdev)
{
- struct android_dev *dev = _android_dev;
+ struct android_dev *dev = cdev_to_android_dev(cdev);
struct mass_storage_function_config *config;
struct fsg_common *common;
int err;
@@ -1079,7 +1273,7 @@
config->fsg.nluns = 1;
name[0] = "lun";
- if (dev->pdata->cdrom) {
+ if (dev->pdata && dev->pdata->cdrom) {
config->fsg.nluns = 2;
config->fsg.luns[1].cdrom = 1;
config->fsg.luns[1].ro = 1;
@@ -1198,9 +1392,55 @@
.ctrlrequest = accessory_function_ctrlrequest,
};
+static int android_uasp_connect_cb(bool connect)
+{
+ /*
+ * TODO
+ * We may have to disable gadget till UASP configfs nodes
+ * are configured which includes mapping LUN with the
+ * backing file. It is a fundamental difference between
+ * f_mass_storage and f_tcp. That means UASP can not be
+ * in default composition.
+ *
+ * For now, assume that UASP configfs nodes are configured
+ * before enabling android gadget. Or cable should be
+ * reconnected after mapping the LUN.
+ *
+ * Also consider making UASP to respond to Host requests when
+ * Lun is not mapped.
+ */
+ pr_debug("UASP %s\n", connect ? "connect" : "disconnect");
+
+ return 0;
+}
+
+static int uasp_function_init(struct android_usb_function *f,
+ struct usb_composite_dev *cdev)
+{
+ return f_tcm_init(&android_uasp_connect_cb);
+}
+
+static void uasp_function_cleanup(struct android_usb_function *f)
+{
+ f_tcm_exit();
+}
+
+static int uasp_function_bind_config(struct android_usb_function *f,
+ struct usb_configuration *c)
+{
+ return tcm_bind_config(c);
+}
+
+static struct android_usb_function uasp_function = {
+ .name = "uasp",
+ .init = uasp_function_init,
+ .cleanup = uasp_function_cleanup,
+ .bind_config = uasp_function_bind_config,
+};
static struct android_usb_function *supported_functions[] = {
&mbim_function,
+ &ecm_qc_function,
&rmnet_smd_function,
&rmnet_sdio_function,
&rmnet_smd_sdio_function,
@@ -1213,8 +1453,10 @@
&mtp_function,
&ptp_function,
&rndis_function,
+ &rndis_qc_function,
&mass_storage_function,
&accessory_function,
+ &uasp_function,
NULL
};
@@ -1247,7 +1489,7 @@
static int android_init_functions(struct android_usb_function **functions,
struct usb_composite_dev *cdev)
{
- struct android_dev *dev = _android_dev;
+ struct android_dev *dev = cdev_to_android_dev(cdev);
struct android_usb_function *f;
struct device_attribute **attrs;
struct device_attribute *attr;
@@ -1256,6 +1498,7 @@
for (; (f = *functions++); index++) {
f->dev_name = kasprintf(GFP_KERNEL, "f_%s", f->name);
+ f->android_dev = NULL;
if (!f->dev_name) {
err = -ENOMEM;
goto err_out;
@@ -1342,9 +1585,15 @@
struct android_usb_function *f;
while ((f = *functions++)) {
if (!strcmp(name, f->name)) {
- list_add_tail(&f->enabled_list,
- &dev->enabled_functions);
- return 0;
+ if (f->android_dev)
+ pr_err("%s cannot be enabled on two devices\n",
+ f->name);
+ else {
+ list_add_tail(&f->enabled_list,
+ &dev->enabled_functions);
+ f->android_dev = dev;
+ return 0;
+ }
}
}
return -EINVAL;
@@ -1356,14 +1605,17 @@
static ssize_t remote_wakeup_show(struct device *pdev,
struct device_attribute *attr, char *buf)
{
+ struct android_dev *dev = dev_get_drvdata(pdev);
+
return snprintf(buf, PAGE_SIZE, "%d\n",
- !!(android_config_driver.bmAttributes &
+ !!(dev->config.bmAttributes &
USB_CONFIG_ATT_WAKEUP));
}
static ssize_t remote_wakeup_store(struct device *pdev,
struct device_attribute *attr, const char *buff, size_t size)
{
+ struct android_dev *dev = dev_get_drvdata(pdev);
int enable = 0;
sscanf(buff, "%d", &enable);
@@ -1372,9 +1624,9 @@
enable ? "enabling" : "disabling");
if (enable)
- android_config_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+ dev->config.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
else
- android_config_driver.bmAttributes &= ~USB_CONFIG_ATT_WAKEUP;
+ dev->config.bmAttributes &= ~USB_CONFIG_ATT_WAKEUP;
return size;
}
@@ -1403,6 +1655,7 @@
const char *buff, size_t size)
{
struct android_dev *dev = dev_get_drvdata(pdev);
+ struct android_usb_function *f;
char *name;
char buf[256], *b;
int err;
@@ -1414,6 +1667,10 @@
return -EBUSY;
}
+ /* Clear previous enabled list */
+ list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+ f->android_dev = NULL;
+ }
INIT_LIST_HEAD(&dev->enabled_functions);
strlcpy(buf, buff, sizeof(buf));
@@ -1612,7 +1869,7 @@
static int android_bind_config(struct usb_configuration *c)
{
- struct android_dev *dev = _android_dev;
+ struct android_dev *dev = cdev_to_android_dev(c->cdev);
int ret = 0;
ret = android_bind_enabled_functions(dev, c);
@@ -1624,26 +1881,34 @@
static void android_unbind_config(struct usb_configuration *c)
{
- struct android_dev *dev = _android_dev;
+ struct android_dev *dev = cdev_to_android_dev(c->cdev);
android_unbind_enabled_functions(dev, c);
}
static int android_bind(struct usb_composite_dev *cdev)
{
- struct android_dev *dev = _android_dev;
+ struct android_dev *dev;
struct usb_gadget *gadget = cdev->gadget;
int gcnum, id, ret;
+ /* Bind to the last android_dev that was probed */
+ dev = list_entry(android_dev_list.prev, struct android_dev, list_item);
+
+ dev->cdev = cdev;
+
/*
* Start disconnected. Userspace will connect the gadget once
* it is done configuring the functions.
*/
usb_gadget_disconnect(gadget);
- ret = android_init_functions(dev->functions, cdev);
- if (ret)
- return ret;
+ /* Init the supported functions only once, on the first android_dev */
+ if (android_dev_count == 1) {
+ ret = android_init_functions(dev->functions, cdev);
+ if (ret)
+ return ret;
+ }
/* Allocate string descriptor numbers ... note that string
* contents can be overridden by the composite_dev glue.
@@ -1673,7 +1938,7 @@
device_desc.iSerialNumber = id;
if (gadget_is_otg(cdev->gadget))
- android_config_driver.descriptors = otg_desc;
+ dev->config.descriptors = otg_desc;
gcnum = usb_gadget_controller_number(gadget);
if (gcnum >= 0)
@@ -1684,14 +1949,12 @@
device_desc.bcdDevice = __constant_cpu_to_le16(0x9999);
}
- dev->cdev = cdev;
-
return 0;
}
static int android_usb_unbind(struct usb_composite_dev *cdev)
{
- struct android_dev *dev = _android_dev;
+ struct android_dev *dev = cdev_to_android_dev(cdev);
manufacturer_string[0] = '\0';
product_string[0] = '\0';
@@ -1712,8 +1975,8 @@
static int
android_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *c)
{
- struct android_dev *dev = _android_dev;
struct usb_composite_dev *cdev = get_gadget_data(gadget);
+ struct android_dev *dev = cdev_to_android_dev(cdev);
struct usb_request *req = cdev->req;
struct android_usb_function *f;
int value = -EOPNOTSUPP;
@@ -1756,8 +2019,8 @@
static void android_disconnect(struct usb_gadget *gadget)
{
- struct android_dev *dev = _android_dev;
struct usb_composite_dev *cdev = get_gadget_data(gadget);
+ struct android_dev *dev = cdev_to_android_dev(cdev);
unsigned long flags;
composite_disconnect(gadget);
@@ -1768,14 +2031,21 @@
spin_unlock_irqrestore(&cdev->lock, flags);
}
-static int android_create_device(struct android_dev *dev)
+static int android_create_device(struct android_dev *dev, u8 usb_core_id)
{
struct device_attribute **attrs = android_usb_attributes;
struct device_attribute *attr;
+ char device_node_name[ANDROID_DEVICE_NODE_NAME_LENGTH];
int err;
+ /*
+ * The primary usb core should always have usb_core_id=0, since
+ * Android user space is currently interested in android0 events.
+ */
+ snprintf(device_node_name, ANDROID_DEVICE_NODE_NAME_LENGTH,
+ "android%d", usb_core_id);
dev->dev = device_create(android_class, NULL,
- MKDEV(0, 0), NULL, "android0");
+ MKDEV(0, 0), NULL, device_node_name);
if (IS_ERR(dev->dev))
return PTR_ERR(dev->dev);
@@ -1801,27 +2071,64 @@
device_destroy(android_class, dev->dev->devt);
}
+static struct android_dev *cdev_to_android_dev(struct usb_composite_dev *cdev)
+{
+ struct android_dev *dev = NULL;
+
+ /* Find the android dev from the list */
+ list_for_each_entry(dev, &android_dev_list, list_item) {
+ if (dev->cdev == cdev)
+ break;
+ }
+
+ return dev;
+}
+
static int __devinit android_probe(struct platform_device *pdev)
{
struct android_usb_platform_data *pdata = pdev->dev.platform_data;
- struct android_dev *dev = _android_dev;
+ struct android_dev *android_dev;
int ret = 0;
- dev->pdata = pdata;
+ if (!android_class) {
+ android_class = class_create(THIS_MODULE, "android_usb");
+ if (IS_ERR(android_class))
+ return PTR_ERR(android_class);
+ }
- android_class = class_create(THIS_MODULE, "android_usb");
- if (IS_ERR(android_class))
- return PTR_ERR(android_class);
+ android_dev = kzalloc(sizeof(*android_dev), GFP_KERNEL);
+ if (!android_dev) {
+ pr_err("%s(): Failed to alloc memory for android_dev\n",
+ __func__);
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
- ret = android_create_device(dev);
+ android_dev->config.label = pdev->name;
+ android_dev->config.unbind = android_unbind_config;
+ android_dev->config.bConfigurationValue = 1;
+ android_dev->disable_depth = 1;
+ android_dev->functions = supported_functions;
+ INIT_LIST_HEAD(&android_dev->enabled_functions);
+ INIT_WORK(&android_dev->work, android_work);
+ mutex_init(&android_dev->mutex);
+
+ android_dev->pdata = pdata;
+
+ list_add_tail(&android_dev->list_item, &android_dev_list);
+ android_dev_count++;
+
+ if (pdata)
+ composite_driver.usb_core_id = pdata->usb_core_id;
+ else
+ composite_driver.usb_core_id = 0; /*To backward compatibility*/
+
+ ret = android_create_device(android_dev, composite_driver.usb_core_id);
if (ret) {
pr_err("%s(): android_create_device failed\n", __func__);
goto err_dev;
}
- if (pdata)
- composite_driver.usb_core_id = pdata->usb_core_id;
-
ret = usb_composite_probe(&android_usb_driver, android_bind);
if (ret) {
pr_err("%s(): Failed to register android "
@@ -1831,67 +2138,91 @@
/* pm qos request to prevent apps idle power collapse */
if (pdata && pdata->swfi_latency)
- pm_qos_add_request(&dev->pm_qos_req_dma,
+ pm_qos_add_request(&android_dev->pm_qos_req_dma,
PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
- strlcpy(dev->pm_qos, "high", sizeof(dev->pm_qos));
+ strlcpy(android_dev->pm_qos, "high", sizeof(android_dev->pm_qos));
return ret;
err_probe:
- android_destroy_device(dev);
+ android_destroy_device(android_dev);
err_dev:
- class_destroy(android_class);
+ list_del(&android_dev->list_item);
+ android_dev_count--;
+ kfree(android_dev);
+err_alloc:
+ if (list_empty(&android_dev_list)) {
+ class_destroy(android_class);
+ android_class = NULL;
+ }
return ret;
}
static int android_remove(struct platform_device *pdev)
{
- struct android_dev *dev = _android_dev;
+ struct android_dev *dev = NULL;
struct android_usb_platform_data *pdata = pdev->dev.platform_data;
+ int usb_core_id = 0;
- android_destroy_device(dev);
- class_destroy(android_class);
- usb_composite_unregister(&android_usb_driver);
- if (pdata && pdata->swfi_latency)
- pm_qos_remove_request(&dev->pm_qos_req_dma);
+ if (pdata)
+ usb_core_id = pdata->usb_core_id;
+
+ /* Find the android dev from the list */
+ list_for_each_entry(dev, &android_dev_list, list_item) {
+ if (!dev->pdata)
+ break; /*To backward compatibility*/
+ if (dev->pdata->usb_core_id == usb_core_id)
+ break;
+ }
+
+ if (dev) {
+ android_destroy_device(dev);
+ if (pdata && pdata->swfi_latency)
+ pm_qos_remove_request(&dev->pm_qos_req_dma);
+ list_del(&dev->list_item);
+ android_dev_count--;
+ kfree(dev);
+ }
+
+ if (list_empty(&android_dev_list)) {
+ class_destroy(android_class);
+ android_class = NULL;
+ usb_composite_unregister(&android_usb_driver);
+ }
return 0;
}
+static const struct platform_device_id android_id_table[] __devinitconst = {
+ {
+ .name = "android_usb",
+ },
+ {
+ .name = "android_usb_hsic",
+ },
+};
+
static struct platform_driver android_platform_driver = {
.driver = { .name = "android_usb"},
.probe = android_probe,
.remove = android_remove,
+ .id_table = android_id_table,
};
static int __init init(void)
{
- struct android_dev *dev;
int ret;
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- pr_err("%s(): Failed to alloc memory for android_dev\n",
- __func__);
- return -ENOMEM;
- }
-
- dev->disable_depth = 1;
- dev->functions = supported_functions;
- INIT_LIST_HEAD(&dev->enabled_functions);
- INIT_WORK(&dev->work, android_work);
- mutex_init(&dev->mutex);
-
- _android_dev = dev;
-
/* Override composite driver functions */
composite_driver.setup = android_setup;
composite_driver.disconnect = android_disconnect;
+ INIT_LIST_HEAD(&android_dev_list);
+ android_dev_count = 0;
+
ret = platform_driver_register(&android_platform_driver);
if (ret) {
pr_err("%s(): Failed to register android"
"platform driver\n", __func__);
- kfree(dev);
}
return ret;
@@ -1901,7 +2232,5 @@
static void __exit cleanup(void)
{
platform_driver_unregister(&android_platform_driver);
- kfree(_android_dev);
- _android_dev = NULL;
}
module_exit(cleanup);
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index 4d15d4d..4d15c55 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -54,6 +54,7 @@
#include <linux/dmapool.h>
#include <linux/dma-mapping.h>
#include <linux/init.h>
+#include <linux/ratelimit.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
@@ -74,6 +75,7 @@
*****************************************************************************/
#define DMA_ADDR_INVALID (~(dma_addr_t)0)
+#define ATDTW_SET_DELAY 100 /* 100msec delay */
/* ctrl register bank access */
static DEFINE_SPINLOCK(udc_lock);
@@ -1764,6 +1766,7 @@
struct ci13xxx_req *mReqPrev;
int n = hw_ep_bit(mEp->num, mEp->dir);
int tmp_stat;
+ ktime_t start, diff;
mReqPrev = list_entry(mEp->qh.queue.prev,
struct ci13xxx_req, queue);
@@ -1774,9 +1777,20 @@
wmb();
if (hw_cread(CAP_ENDPTPRIME, BIT(n)))
goto done;
+ start = ktime_get();
do {
hw_cwrite(CAP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW);
tmp_stat = hw_cread(CAP_ENDPTSTAT, BIT(n));
+ diff = ktime_sub(ktime_get(), start);
+ /* poll for max. 100ms */
+ if (ktime_to_ms(diff) > ATDTW_SET_DELAY) {
+ if (hw_cread(CAP_USBCMD, USBCMD_ATDTW))
+ break;
+ printk_ratelimited(KERN_ERR
+ "%s:queue failed ep#%d %s\n",
+ __func__, mEp->num, mEp->dir ? "IN" : "OUT");
+ return -EAGAIN;
+ }
} while (!hw_cread(CAP_USBCMD, USBCMD_ATDTW));
hw_cwrite(CAP_USBCMD, USBCMD_ATDTW, 0);
if (tmp_stat)
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index d35d861..86c0e73 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1625,7 +1625,7 @@
{
int retval;
- if (!driver || !driver->dev || !bind || composite)
+ if (!driver || !driver->dev || !bind)
return -EINVAL;
if (!driver->name)
diff --git a/drivers/usb/gadget/f_diag.c b/drivers/usb/gadget/f_diag.c
index 72bff49..87597d5 100644
--- a/drivers/usb/gadget/f_diag.c
+++ b/drivers/usb/gadget/f_diag.c
@@ -20,7 +20,6 @@
#include <linux/platform_device.h>
#include <mach/usbdiag.h>
-#include <mach/rpc_hsusb.h>
#include <linux/usb/composite.h>
#include <linux/usb/gadget.h>
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index 278e04e..1ea3982 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -2865,15 +2865,6 @@
common->ep0req = cdev->req;
common->cdev = cdev;
- /* Maybe allocate device-global string IDs, and patch descriptors */
- if (fsg_strings[FSG_STRING_INTERFACE].id == 0) {
- rc = usb_string_id(cdev);
- if (unlikely(rc < 0))
- goto error_release;
- fsg_strings[FSG_STRING_INTERFACE].id = rc;
- fsg_intf_desc.iInterface = rc;
- }
-
/*
* Create the LUNs, open their backing files, and register the
* LUN devices in sysfs.
@@ -3194,6 +3185,15 @@
struct fsg_dev *fsg;
int rc;
+ /* Maybe allocate device-global string IDs, and patch descriptors */
+ if (fsg_strings[FSG_STRING_INTERFACE].id == 0) {
+ rc = usb_string_id(cdev);
+ if (unlikely(rc < 0))
+ return rc;
+ fsg_strings[FSG_STRING_INTERFACE].id = rc;
+ fsg_intf_desc.iInterface = rc;
+ }
+
fsg = kzalloc(sizeof *fsg, GFP_KERNEL);
if (unlikely(!fsg))
return -ENOMEM;
diff --git a/drivers/usb/gadget/f_mtp.c b/drivers/usb/gadget/f_mtp.c
index 0394b0b..96790c5 100644
--- a/drivers/usb/gadget/f_mtp.c
+++ b/drivers/usb/gadget/f_mtp.c
@@ -788,7 +788,8 @@
/* wait for our last read to complete */
ret = wait_event_interruptible(dev->read_wq,
dev->rx_done || dev->state != STATE_BUSY);
- if (dev->state == STATE_CANCELED) {
+ if (dev->state == STATE_CANCELED
+ || dev->state == STATE_OFFLINE) {
r = -ECANCELED;
if (!dev->rx_done)
usb_ep_dequeue(dev->ep_out, read_req);
diff --git a/drivers/usb/gadget/f_qc_ecm.c b/drivers/usb/gadget/f_qc_ecm.c
new file mode 100644
index 0000000..98a29ae
--- /dev/null
+++ b/drivers/usb/gadget/f_qc_ecm.c
@@ -0,0 +1,869 @@
+/*
+ * f_qc_ecm.c -- USB CDC Ethernet (ECM) link function driver
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+
+#include "u_ether.h"
+#include "u_qc_ether.h"
+
+
+/*
+ * This function is a "CDC Ethernet Networking Control Model" (CDC ECM)
+ * Ethernet link. The data transfer model is simple (packets sent and
+ * received over bulk endpoints using normal short packet termination),
+ * and the control model exposes various data and optional notifications.
+ *
+ * ECM is well standardized and (except for Microsoft) supported by most
+ * operating systems with USB host support. It's the preferred interop
+ * solution for Ethernet over USB, at least for firmware based solutions.
+ * (Hardware solutions tend to be more minimalist.) A newer and simpler
+ * "Ethernet Emulation Model" (CDC EEM) hasn't yet caught on.
+ *
+ * Note that ECM requires the use of "alternate settings" for its data
+ * interface. This means that the set_alt() method has real work to do,
+ * and also means that a get_alt() method is required.
+ *
+ * This function is based on USB CDC Ethernet link function driver and
+ * contains MSM specific implementation.
+ */
+
+
+enum ecm_qc_notify_state {
+ ECM_QC_NOTIFY_NONE, /* don't notify */
+ ECM_QC_NOTIFY_CONNECT, /* issue CONNECT next */
+ ECM_QC_NOTIFY_SPEED, /* issue SPEED_CHANGE next */
+};
+
+struct f_ecm_qc {
+ struct qc_gether port;
+ u8 ctrl_id, data_id;
+
+ char ethaddr[14];
+
+ struct usb_ep *notify;
+ struct usb_request *notify_req;
+ u8 notify_state;
+ bool is_open;
+};
+
+static inline struct f_ecm_qc *func_to_ecm_qc(struct usb_function *f)
+{
+ return container_of(f, struct f_ecm_qc, port.func);
+}
+
+/* peak (theoretical) bulk transfer rate in bits-per-second */
+static inline unsigned ecm_qc_bitrate(struct usb_gadget *g)
+{
+ if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
+ return 13 * 512 * 8 * 1000 * 8;
+ else
+ return 19 * 64 * 1 * 1000 * 8;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Include the status endpoint if we can, even though it's optional.
+ *
+ * Use wMaxPacketSize big enough to fit CDC_NOTIFY_SPEED_CHANGE in one
+ * packet, to simplify cancellation; and a big transfer interval, to
+ * waste less bandwidth.
+ *
+ * Some drivers (like Linux 2.4 cdc-ether!) "need" it to exist even
+ * if they ignore the connect/disconnect notifications that real aether
+ * can provide. More advanced cdc configurations might want to support
+ * encapsulated commands (vendor-specific, using control-OUT).
+ */
+
+#define ECM_QC_LOG2_STATUS_INTERVAL_MSEC 5 /* 1 << 5 == 32 msec */
+#define ECM_QC_STATUS_BYTECOUNT 16 /* 8 byte header + data */
+
+/* currently only one std ecm instance is supported */
+#define ECM_QC_NO_PORTS 1
+
+/* interface descriptor: */
+
+static struct usb_interface_descriptor ecm_qc_control_intf = {
+ .bLength = sizeof ecm_qc_control_intf,
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ /* .bInterfaceNumber = DYNAMIC */
+ /* status endpoint is optional; this could be patched later */
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_COMM,
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
+ .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+ /* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc ecm_qc_header_desc = {
+ .bLength = sizeof ecm_qc_header_desc,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_HEADER_TYPE,
+
+ .bcdCDC = cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_union_desc ecm_qc_union_desc = {
+ .bLength = sizeof(ecm_qc_union_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_UNION_TYPE,
+ /* .bMasterInterface0 = DYNAMIC */
+ /* .bSlaveInterface0 = DYNAMIC */
+};
+
+static struct usb_cdc_ether_desc ecm_qc_desc = {
+ .bLength = sizeof ecm_qc_desc,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_ETHERNET_TYPE,
+
+ /* this descriptor actually adds value, surprise! */
+ /* .iMACAddress = DYNAMIC */
+ .bmEthernetStatistics = cpu_to_le32(0), /* no statistics */
+ .wMaxSegmentSize = cpu_to_le16(ETH_FRAME_LEN),
+ .wNumberMCFilters = cpu_to_le16(0),
+ .bNumberPowerFilters = 0,
+};
+
+/* the default data interface has no endpoints ... */
+
+static struct usb_interface_descriptor ecm_qc_data_nop_intf = {
+ .bLength = sizeof ecm_qc_data_nop_intf,
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ .bInterfaceNumber = 1,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 0,
+ .bInterfaceClass = USB_CLASS_CDC_DATA,
+ .bInterfaceSubClass = 0,
+ .bInterfaceProtocol = 0,
+ /* .iInterface = DYNAMIC */
+};
+
+/* ... but the "real" data interface has two bulk endpoints */
+
+static struct usb_interface_descriptor ecm_qc_data_intf = {
+ .bLength = sizeof ecm_qc_data_intf,
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ .bInterfaceNumber = 1,
+ .bAlternateSetting = 1,
+ .bNumEndpoints = 2,
+ .bInterfaceClass = USB_CLASS_CDC_DATA,
+ .bInterfaceSubClass = 0,
+ .bInterfaceProtocol = 0,
+ /* .iInterface = DYNAMIC */
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor ecm_qc_fs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(ECM_QC_STATUS_BYTECOUNT),
+ .bInterval = 1 << ECM_QC_LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_endpoint_descriptor ecm_qc_fs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor ecm_qc_fs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *ecm_qc_fs_function[] = {
+ /* CDC ECM control descriptors */
+ (struct usb_descriptor_header *) &ecm_qc_control_intf,
+ (struct usb_descriptor_header *) &ecm_qc_header_desc,
+ (struct usb_descriptor_header *) &ecm_qc_union_desc,
+ (struct usb_descriptor_header *) &ecm_qc_desc,
+ /* NOTE: status endpoint might need to be removed */
+ (struct usb_descriptor_header *) &ecm_qc_fs_notify_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &ecm_qc_data_nop_intf,
+ (struct usb_descriptor_header *) &ecm_qc_data_intf,
+ (struct usb_descriptor_header *) &ecm_qc_fs_in_desc,
+ (struct usb_descriptor_header *) &ecm_qc_fs_out_desc,
+ NULL,
+};
+
+/* high speed support: */
+
+static struct usb_endpoint_descriptor ecm_qc_hs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(ECM_QC_STATUS_BYTECOUNT),
+ .bInterval = ECM_QC_LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+static struct usb_endpoint_descriptor ecm_qc_hs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor ecm_qc_hs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *ecm_qc_hs_function[] = {
+ /* CDC ECM control descriptors */
+ (struct usb_descriptor_header *) &ecm_qc_control_intf,
+ (struct usb_descriptor_header *) &ecm_qc_header_desc,
+ (struct usb_descriptor_header *) &ecm_qc_union_desc,
+ (struct usb_descriptor_header *) &ecm_qc_desc,
+ /* NOTE: status endpoint might need to be removed */
+ (struct usb_descriptor_header *) &ecm_qc_hs_notify_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &ecm_qc_data_nop_intf,
+ (struct usb_descriptor_header *) &ecm_qc_data_intf,
+ (struct usb_descriptor_header *) &ecm_qc_hs_in_desc,
+ (struct usb_descriptor_header *) &ecm_qc_hs_out_desc,
+ NULL,
+};
+
+/* string descriptors: */
+
+static struct usb_string ecm_qc_string_defs[] = {
+ [0].s = "CDC Ethernet Control Model (ECM)",
+ [1].s = NULL /* DYNAMIC */,
+ [2].s = "CDC Ethernet Data",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings ecm_qc_string_table = {
+ .language = 0x0409, /* en-us */
+ .strings = ecm_qc_string_defs,
+};
+
+static struct usb_gadget_strings *ecm_qc_strings[] = {
+ &ecm_qc_string_table,
+ NULL,
+};
+
+static struct data_port ecm_qc_bam_port;
+
+static int ecm_qc_bam_setup(void)
+{
+ int ret;
+
+ ret = bam_data_setup(ECM_QC_NO_PORTS);
+ if (ret) {
+ pr_err("bam_data_setup failed err: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ecm_qc_bam_connect(struct f_ecm_qc *dev)
+{
+ int ret;
+
+ ecm_qc_bam_port.func = dev->port.func;
+ ecm_qc_bam_port.in = dev->port.in_ep;
+ ecm_qc_bam_port.out = dev->port.out_ep;
+
+ /* currently we use the first connection */
+ ret = bam_data_connect(&ecm_qc_bam_port, 0, 0);
+ if (ret) {
+ pr_err("bam_data_connect failed: err:%d\n",
+ ret);
+ return ret;
+ } else {
+ pr_info("ecm bam connected\n");
+ }
+
+ return 0;
+}
+
+static int ecm_qc_bam_disconnect(struct f_ecm_qc *dev)
+{
+ pr_debug("dev:%p. %s Do nothing.\n",
+ dev, __func__);
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void ecm_qc_do_notify(struct f_ecm_qc *ecm)
+{
+ struct usb_request *req = ecm->notify_req;
+ struct usb_cdc_notification *event;
+ struct usb_composite_dev *cdev = ecm->port.func.config->cdev;
+ __le32 *data;
+ int status;
+
+ /* notification already in flight? */
+ if (!req)
+ return;
+
+ event = req->buf;
+ switch (ecm->notify_state) {
+ case ECM_QC_NOTIFY_NONE:
+ return;
+
+ case ECM_QC_NOTIFY_CONNECT:
+ event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
+ if (ecm->is_open)
+ event->wValue = cpu_to_le16(1);
+ else
+ event->wValue = cpu_to_le16(0);
+ event->wLength = 0;
+ req->length = sizeof *event;
+
+ DBG(cdev, "notify connect %s\n",
+ ecm->is_open ? "true" : "false");
+ ecm->notify_state = ECM_QC_NOTIFY_SPEED;
+ break;
+
+ case ECM_QC_NOTIFY_SPEED:
+ event->bNotificationType = USB_CDC_NOTIFY_SPEED_CHANGE;
+ event->wValue = cpu_to_le16(0);
+ event->wLength = cpu_to_le16(8);
+ req->length = ECM_QC_STATUS_BYTECOUNT;
+
+ /* SPEED_CHANGE data is up/down speeds in bits/sec */
+ data = req->buf + sizeof *event;
+ data[0] = cpu_to_le32(ecm_qc_bitrate(cdev->gadget));
+ data[1] = data[0];
+
+ DBG(cdev, "notify speed %d\n", ecm_qc_bitrate(cdev->gadget));
+ ecm->notify_state = ECM_QC_NOTIFY_NONE;
+ break;
+ }
+ event->bmRequestType = 0xA1;
+ event->wIndex = cpu_to_le16(ecm->ctrl_id);
+
+ ecm->notify_req = NULL;
+ status = usb_ep_queue(ecm->notify, req, GFP_ATOMIC);
+ if (status < 0) {
+ ecm->notify_req = req;
+ DBG(cdev, "notify --> %d\n", status);
+ }
+}
+
+static void ecm_qc_notify(struct f_ecm_qc *ecm)
+{
+ /* NOTE on most versions of Linux, host side cdc-ethernet
+ * won't listen for notifications until its netdevice opens.
+ * The first notification then sits in the FIFO for a long
+ * time, and the second one is queued.
+ */
+ ecm->notify_state = ECM_QC_NOTIFY_CONNECT;
+ ecm_qc_do_notify(ecm);
+}
+
+static void ecm_qc_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_ecm_qc *ecm = req->context;
+
+ switch (req->status) {
+ case 0:
+ /* no fault */
+ break;
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ ecm->notify_state = ECM_QC_NOTIFY_NONE;
+ break;
+ default:
+ DBG(cdev, "event %02x --> %d\n",
+ event->bNotificationType, req->status);
+ break;
+ }
+ ecm->notify_req = req;
+ ecm_qc_do_notify(ecm);
+}
+
+static int ecm_qc_setup(struct usb_function *f,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct f_ecm_qc *ecm = func_to_ecm_qc(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+ /* composite driver infrastructure handles everything except
+ * CDC class messages; interface activation uses set_alt().
+ */
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_SET_ETHERNET_PACKET_FILTER:
+ /* see 6.2.30: no data, wIndex = interface,
+ * wValue = packet filter bitmap
+ */
+ if (w_length != 0 || w_index != ecm->ctrl_id)
+ goto invalid;
+ DBG(cdev, "packet filter %02x\n", w_value);
+ /* REVISIT locking of cdc_filter. This assumes the UDC
+ * driver won't have a concurrent packet TX irq running on
+ * another CPU; or that if it does, this write is atomic...
+ */
+ ecm->port.cdc_filter = w_value;
+ value = 0;
+ break;
+
+ /* and optionally:
+ * case USB_CDC_SEND_ENCAPSULATED_COMMAND:
+ * case USB_CDC_GET_ENCAPSULATED_RESPONSE:
+ * case USB_CDC_SET_ETHERNET_MULTICAST_FILTERS:
+ * case USB_CDC_SET_ETHERNET_PM_PATTERN_FILTER:
+ * case USB_CDC_GET_ETHERNET_PM_PATTERN_FILTER:
+ * case USB_CDC_GET_ETHERNET_STATISTIC:
+ */
+
+ default:
+invalid:
+ DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ DBG(cdev, "ecm req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->zero = 0;
+ req->length = value;
+ value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (value < 0)
+ pr_err("ecm req %02x.%02x response err %d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ value);
+ }
+
+ /* device either stalls (value < 0) or reports success */
+ return value;
+}
+
+
+static int ecm_qc_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct f_ecm_qc *ecm = func_to_ecm_qc(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+
+ /* Control interface has only altsetting 0 */
+ if (intf == ecm->ctrl_id) {
+ if (alt != 0)
+ goto fail;
+
+ if (ecm->notify->driver_data) {
+ VDBG(cdev, "reset ecm control %d\n", intf);
+ usb_ep_disable(ecm->notify);
+ }
+ if (!(ecm->notify->desc)) {
+ VDBG(cdev, "init ecm ctrl %d\n", intf);
+ if (config_ep_by_speed(cdev->gadget, f, ecm->notify))
+ goto fail;
+ }
+ usb_ep_enable(ecm->notify);
+ ecm->notify->driver_data = ecm;
+
+ /* Data interface has two altsettings, 0 and 1 */
+ } else if (intf == ecm->data_id) {
+ if (alt > 1)
+ goto fail;
+
+ if (ecm->port.in_ep->driver_data) {
+ DBG(cdev, "reset ecm\n");
+ gether_qc_disconnect(&ecm->port);
+ ecm_qc_bam_disconnect(ecm);
+ }
+
+ if (!ecm->port.in_ep->desc ||
+ !ecm->port.out_ep->desc) {
+ DBG(cdev, "init ecm\n");
+ if (config_ep_by_speed(cdev->gadget, f,
+ ecm->port.in_ep) ||
+ config_ep_by_speed(cdev->gadget, f,
+ ecm->port.out_ep)) {
+ ecm->port.in_ep->desc = NULL;
+ ecm->port.out_ep->desc = NULL;
+ goto fail;
+ }
+ }
+
+ /* CDC Ethernet only sends data in non-default altsettings.
+ * Changing altsettings resets filters, statistics, etc.
+ */
+ if (alt == 1) {
+ struct net_device *net;
+
+ /* Enable zlps by default for ECM conformance;
+ * override for musb_hdrc (avoids txdma ovhead).
+ */
+ ecm->port.is_zlp_ok = !(gadget_is_musbhdrc(cdev->gadget)
+ );
+ ecm->port.cdc_filter = DEFAULT_FILTER;
+ DBG(cdev, "activate ecm\n");
+ net = gether_qc_connect(&ecm->port);
+ if (IS_ERR(net))
+ return PTR_ERR(net);
+
+ if (ecm_qc_bam_connect(ecm))
+ goto fail;
+ }
+
+ /* NOTE this can be a minor disagreement with the ECM spec,
+ * which says speed notifications will "always" follow
+ * connection notifications. But we allow one connect to
+ * follow another (if the first is in flight), and instead
+ * just guarantee that a speed notification is always sent.
+ */
+ ecm_qc_notify(ecm);
+ } else
+ goto fail;
+
+ return 0;
+fail:
+ return -EINVAL;
+}
+
+/* Because the data interface supports multiple altsettings,
+ * this ECM function *MUST* implement a get_alt() method.
+ */
+static int ecm_qc_get_alt(struct usb_function *f, unsigned intf)
+{
+ struct f_ecm_qc *ecm = func_to_ecm_qc(f);
+
+ if (intf == ecm->ctrl_id)
+ return 0;
+ return ecm->port.in_ep->driver_data ? 1 : 0;
+}
+
+static void ecm_qc_disable(struct usb_function *f)
+{
+ struct f_ecm_qc *ecm = func_to_ecm_qc(f);
+
+ DBG(cdev, "ecm deactivated\n");
+
+ if (ecm->port.in_ep->driver_data) {
+ gether_qc_disconnect(&ecm->port);
+ ecm_qc_bam_disconnect(ecm);
+ }
+
+ if (ecm->notify->driver_data) {
+ usb_ep_disable(ecm->notify);
+ ecm->notify->driver_data = NULL;
+ ecm->notify->desc = NULL;
+ }
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Callbacks let us notify the host about connect/disconnect when the
+ * net device is opened or closed.
+ *
+ * For testing, note that link states on this side include both opened
+ * and closed variants of:
+ *
+ * - disconnected/unconfigured
+ * - configured but inactive (data alt 0)
+ * - configured and active (data alt 1)
+ *
+ * Each needs to be tested with unplug, rmmod, SET_CONFIGURATION, and
+ * SET_INTERFACE (altsetting). Remember also that "configured" doesn't
+ * imply the host is actually polling the notification endpoint, and
+ * likewise that "active" doesn't imply it's actually using the data
+ * endpoints for traffic.
+ */
+
+static void ecm_qc_open(struct qc_gether *geth)
+{
+ struct f_ecm_qc *ecm = func_to_ecm_qc(&geth->func);
+ DBG(ecm->port.func.config->cdev, "%s\n", __func__);
+
+ ecm->is_open = true;
+ ecm_qc_notify(ecm);
+}
+
+static void ecm_qc_close(struct qc_gether *geth)
+{
+ struct f_ecm_qc *ecm = func_to_ecm_qc(&geth->func);
+
+ DBG(ecm->port.func.config->cdev, "%s\n", __func__);
+
+ ecm->is_open = false;
+ ecm_qc_notify(ecm);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* ethernet function driver setup/binding */
+
+static int
+ecm_qc_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct f_ecm_qc *ecm = func_to_ecm_qc(f);
+ int status;
+ struct usb_ep *ep;
+
+ /* allocate instance-specific interface IDs */
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ ecm->ctrl_id = status;
+
+ ecm_qc_control_intf.bInterfaceNumber = status;
+ ecm_qc_union_desc.bMasterInterface0 = status;
+
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ ecm->data_id = status;
+
+ ecm_qc_data_nop_intf.bInterfaceNumber = status;
+ ecm_qc_data_intf.bInterfaceNumber = status;
+ ecm_qc_union_desc.bSlaveInterface0 = status;
+
+ status = -ENODEV;
+
+ /* allocate instance-specific endpoints */
+ ep = usb_ep_autoconfig(cdev->gadget, &ecm_qc_fs_in_desc);
+ if (!ep)
+ goto fail;
+
+ ecm->port.in_ep = ep;
+ ep->driver_data = cdev; /* claim */
+
+ ep = usb_ep_autoconfig(cdev->gadget, &ecm_qc_fs_out_desc);
+ if (!ep)
+ goto fail;
+
+ ecm->port.out_ep = ep;
+ ep->driver_data = cdev; /* claim */
+
+ /* NOTE: a status/notification endpoint is *OPTIONAL* but we
+ * don't treat it that way. It's simpler, and some newer CDC
+ * profiles (wireless handsets) no longer treat it as optional.
+ */
+ ep = usb_ep_autoconfig(cdev->gadget, &ecm_qc_fs_notify_desc);
+ if (!ep)
+ goto fail;
+ ecm->notify = ep;
+ ep->driver_data = cdev; /* claim */
+
+ status = -ENOMEM;
+
+ /* allocate notification request and buffer */
+ ecm->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (!ecm->notify_req)
+ goto fail;
+ ecm->notify_req->buf = kmalloc(ECM_QC_STATUS_BYTECOUNT, GFP_KERNEL);
+ if (!ecm->notify_req->buf)
+ goto fail;
+ ecm->notify_req->context = ecm;
+ ecm->notify_req->complete = ecm_qc_notify_complete;
+
+ /* copy descriptors, and track endpoint copies */
+ f->descriptors = usb_copy_descriptors(ecm_qc_fs_function);
+ if (!f->descriptors)
+ goto fail;
+
+ /* support all relevant hardware speeds... we expect that when
+ * hardware is dual speed, all bulk-capable endpoints work at
+ * both speeds
+ */
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ ecm_qc_hs_in_desc.bEndpointAddress =
+ ecm_qc_fs_in_desc.bEndpointAddress;
+ ecm_qc_hs_out_desc.bEndpointAddress =
+ ecm_qc_fs_out_desc.bEndpointAddress;
+ ecm_qc_hs_notify_desc.bEndpointAddress =
+ ecm_qc_fs_notify_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->hs_descriptors = usb_copy_descriptors(ecm_qc_hs_function);
+ if (!f->hs_descriptors)
+ goto fail;
+ }
+
+ /* NOTE: all that is done without knowing or caring about
+ * the network link ... which is unavailable to this code
+ * until we're activated via set_alt().
+ */
+
+ ecm->port.open = ecm_qc_open;
+ ecm->port.close = ecm_qc_close;
+
+ DBG(cdev, "CDC Ethernet: %s speed IN/%s OUT/%s NOTIFY/%s\n",
+ gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ ecm->port.in_ep->name, ecm->port.out_ep->name,
+ ecm->notify->name);
+ return 0;
+
+fail:
+ if (f->descriptors)
+ usb_free_descriptors(f->descriptors);
+
+ if (ecm->notify_req) {
+ kfree(ecm->notify_req->buf);
+ usb_ep_free_request(ecm->notify, ecm->notify_req);
+ }
+
+ /* we might as well release our claims on endpoints */
+ if (ecm->notify)
+ ecm->notify->driver_data = NULL;
+ if (ecm->port.out_ep->desc)
+ ecm->port.out_ep->driver_data = NULL;
+ if (ecm->port.in_ep->desc)
+ ecm->port.in_ep->driver_data = NULL;
+
+ pr_err("%s: can't bind, err %d\n", f->name, status);
+
+ return status;
+}
+
+static void
+ecm_qc_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_ecm_qc *ecm = func_to_ecm_qc(f);
+
+ DBG(c->cdev, "ecm unbind\n");
+
+ if (gadget_is_dualspeed(c->cdev->gadget))
+ usb_free_descriptors(f->hs_descriptors);
+ usb_free_descriptors(f->descriptors);
+
+ kfree(ecm->notify_req->buf);
+ usb_ep_free_request(ecm->notify, ecm->notify_req);
+
+ ecm_qc_string_defs[1].s = NULL;
+ kfree(ecm);
+}
+
+/**
+ * ecm_qc_bind_config - add CDC Ethernet network link to a configuration
+ * @c: the configuration to support the network link
+ * @ethaddr: a buffer in which the ethernet address of the host side
+ * side of the link was recorded
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * Caller must have called @gether_qc_setup(). Caller is also responsible
+ * for calling @gether_cleanup() before module unload.
+ */
+int
+ecm_qc_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
+{
+ struct f_ecm_qc *ecm;
+ int status;
+
+ if (!can_support_ecm(c->cdev->gadget) || !ethaddr)
+ return -EINVAL;
+
+ status = ecm_qc_bam_setup();
+ if (status) {
+ pr_err("bam setup failed");
+ return status;
+ }
+
+ /* maybe allocate device-global string IDs */
+ if (ecm_qc_string_defs[0].id == 0) {
+
+ /* control interface label */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ ecm_qc_string_defs[0].id = status;
+ ecm_qc_control_intf.iInterface = status;
+
+ /* data interface label */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ ecm_qc_string_defs[2].id = status;
+ ecm_qc_data_intf.iInterface = status;
+
+ /* MAC address */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ ecm_qc_string_defs[1].id = status;
+ ecm_qc_desc.iMACAddress = status;
+ }
+
+ /* allocate and initialize one new instance */
+ ecm = kzalloc(sizeof *ecm, GFP_KERNEL);
+ if (!ecm)
+ return -ENOMEM;
+
+ /* export host's Ethernet address in CDC format */
+ snprintf(ecm->ethaddr, sizeof ecm->ethaddr,
+ "%02X%02X%02X%02X%02X%02X",
+ ethaddr[0], ethaddr[1], ethaddr[2],
+ ethaddr[3], ethaddr[4], ethaddr[5]);
+ ecm_qc_string_defs[1].s = ecm->ethaddr;
+
+ ecm->port.cdc_filter = DEFAULT_FILTER;
+
+ ecm->port.func.name = "cdc_ethernet";
+ ecm->port.func.strings = ecm_qc_strings;
+ /* descriptors are per-instance copies */
+ ecm->port.func.bind = ecm_qc_bind;
+ ecm->port.func.unbind = ecm_qc_unbind;
+ ecm->port.func.set_alt = ecm_qc_set_alt;
+ ecm->port.func.get_alt = ecm_qc_get_alt;
+ ecm->port.func.setup = ecm_qc_setup;
+ ecm->port.func.disable = ecm_qc_disable;
+
+ status = usb_add_function(c, &ecm->port.func);
+ if (status) {
+ ecm_qc_string_defs[1].s = NULL;
+ kfree(ecm);
+ }
+ return status;
+}
diff --git a/drivers/usb/gadget/f_qc_rndis.c b/drivers/usb/gadget/f_qc_rndis.c
new file mode 100644
index 0000000..dcf307d
--- /dev/null
+++ b/drivers/usb/gadget/f_qc_rndis.c
@@ -0,0 +1,1151 @@
+/*
+ * f_qc_rndis.c -- RNDIS link function driver
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (C) 2009 Samsung Electronics
+ * Author: Michal Nazarewicz (mina86@mina86.com)
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+
+#include <linux/atomic.h>
+
+#include "u_ether.h"
+#include "u_qc_ether.h"
+#include "rndis.h"
+
+
+/*
+ * This function is an RNDIS Ethernet port -- a Microsoft protocol that's
+ * been promoted instead of the standard CDC Ethernet. The published RNDIS
+ * spec is ambiguous, incomplete, and needlessly complex. Variants such as
+ * ActiveSync have even worse status in terms of specification.
+ *
+ * In short: it's a protocol controlled by (and for) Microsoft, not for an
+ * Open ecosystem or markets. Linux supports it *only* because Microsoft
+ * doesn't support the CDC Ethernet standard.
+ *
+ * The RNDIS data transfer model is complex, with multiple Ethernet packets
+ * per USB message, and out of band data. The control model is built around
+ * what's essentially an "RNDIS RPC" protocol. It's all wrapped in a CDC ACM
+ * (modem, not Ethernet) veneer, with those ACM descriptors being entirely
+ * useless (they're ignored). RNDIS expects to be the only function in its
+ * configuration, so it's no real help if you need composite devices; and
+ * it expects to be the first configuration too.
+ *
+ * There is a single technical advantage of RNDIS over CDC Ethernet, if you
+ * discount the fluff that its RPC can be made to deliver: it doesn't need
+ * a NOP altsetting for the data interface. That lets it work on some of the
+ * "so smart it's stupid" hardware which takes over configuration changes
+ * from the software, and adds restrictions like "no altsettings".
+ *
+ * Unfortunately MSFT's RNDIS drivers are buggy. They hang or oops, and
+ * have all sorts of contrary-to-specification oddities that can prevent
+ * them from working sanely. Since bugfixes (or accurate specs, letting
+ * Linux work around those bugs) are unlikely to ever come from MSFT, you
+ * may want to avoid using RNDIS on purely operational grounds.
+ *
+ * Omissions from the RNDIS 1.0 specification include:
+ *
+ * - Power management ... references data that's scattered around lots
+ * of other documentation, which is incorrect/incomplete there too.
+ *
+ * - There are various undocumented protocol requirements, like the need
+ * to send garbage in some control-OUT messages.
+ *
+ * - MS-Windows drivers sometimes emit undocumented requests.
+ *
+ * This function is based on RNDIS link function driver and
+ * contains MSM specific implementation.
+ */
+
+struct f_rndis_qc {
+ struct qc_gether port;
+ u8 ctrl_id, data_id;
+ u8 ethaddr[ETH_ALEN];
+ u32 vendorID;
+ u8 max_pkt_per_xfer;
+ const char *manufacturer;
+ int config;
+ atomic_t ioctl_excl;
+ atomic_t open_excl;
+
+ struct usb_ep *notify;
+ struct usb_request *notify_req;
+ atomic_t notify_count;
+};
+
+static inline struct f_rndis_qc *func_to_rndis_qc(struct usb_function *f)
+{
+ return container_of(f, struct f_rndis_qc, port.func);
+}
+
+/* peak (theoretical) bulk transfer rate in bits-per-second */
+static unsigned int rndis_qc_bitrate(struct usb_gadget *g)
+{
+ if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
+ return 13 * 1024 * 8 * 1000 * 8;
+ else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
+ return 13 * 512 * 8 * 1000 * 8;
+ else
+ return 19 * 64 * 1 * 1000 * 8;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define RNDIS_QC_LOG2_STATUS_INTERVAL_MSEC 5 /* 1 << 5 == 32 msec */
+#define RNDIS_QC_STATUS_BYTECOUNT 8 /* 8 bytes data */
+
+/* currently only one rndis instance is supported */
+#define RNDIS_QC_NO_PORTS 1
+
+/* default max packets per tarnsfer value */
+#define DEFAULT_MAX_PKT_PER_XFER 15
+
+
+#define RNDIS_QC_IOCTL_MAGIC 'i'
+#define RNDIS_QC_GET_MAX_PKT_PER_XFER _IOR(RNDIS_QC_IOCTL_MAGIC, 1, u8)
+
+
+/* interface descriptor: */
+
+static struct usb_interface_descriptor rndis_qc_control_intf = {
+ .bLength = sizeof rndis_qc_control_intf,
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ /* .bInterfaceNumber = DYNAMIC */
+ /* status endpoint is optional; this could be patched later */
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_COMM,
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_ACM,
+ .bInterfaceProtocol = USB_CDC_ACM_PROTO_VENDOR,
+ /* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc rndis_qc_header_desc = {
+ .bLength = sizeof rndis_qc_header_desc,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_HEADER_TYPE,
+
+ .bcdCDC = cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_call_mgmt_descriptor rndis_qc_call_mgmt_descriptor = {
+ .bLength = sizeof rndis_qc_call_mgmt_descriptor,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_CALL_MANAGEMENT_TYPE,
+
+ .bmCapabilities = 0x00,
+ .bDataInterface = 0x01,
+};
+
+static struct usb_cdc_acm_descriptor rndis_qc_acm_descriptor = {
+ .bLength = sizeof rndis_qc_acm_descriptor,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_ACM_TYPE,
+
+ .bmCapabilities = 0x00,
+};
+
+static struct usb_cdc_union_desc rndis_qc_union_desc = {
+ .bLength = sizeof(rndis_qc_union_desc),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_UNION_TYPE,
+ /* .bMasterInterface0 = DYNAMIC */
+ /* .bSlaveInterface0 = DYNAMIC */
+};
+
+/* the data interface has two bulk endpoints */
+
+static struct usb_interface_descriptor rndis_qc_data_intf = {
+ .bLength = sizeof rndis_qc_data_intf,
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ /* .bInterfaceNumber = DYNAMIC */
+ .bNumEndpoints = 2,
+ .bInterfaceClass = USB_CLASS_CDC_DATA,
+ .bInterfaceSubClass = 0,
+ .bInterfaceProtocol = 0,
+ /* .iInterface = DYNAMIC */
+};
+
+
+static struct usb_interface_assoc_descriptor
+rndis_qc_iad_descriptor = {
+ .bLength = sizeof rndis_qc_iad_descriptor,
+ .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
+ .bFirstInterface = 0, /* XXX, hardcoded */
+ .bInterfaceCount = 2, /* control + data */
+ .bFunctionClass = USB_CLASS_COMM,
+ .bFunctionSubClass = USB_CDC_SUBCLASS_ETHERNET,
+ .bFunctionProtocol = USB_CDC_PROTO_NONE,
+ /* .iFunction = DYNAMIC */
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor rndis_qc_fs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT),
+ .bInterval = 1 << RNDIS_QC_LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_endpoint_descriptor rndis_qc_fs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor rndis_qc_fs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *eth_qc_fs_function[] = {
+ (struct usb_descriptor_header *) &rndis_qc_iad_descriptor,
+ /* control interface matches ACM, not Ethernet */
+ (struct usb_descriptor_header *) &rndis_qc_control_intf,
+ (struct usb_descriptor_header *) &rndis_qc_header_desc,
+ (struct usb_descriptor_header *) &rndis_qc_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &rndis_qc_acm_descriptor,
+ (struct usb_descriptor_header *) &rndis_qc_union_desc,
+ (struct usb_descriptor_header *) &rndis_qc_fs_notify_desc,
+ /* data interface has no altsetting */
+ (struct usb_descriptor_header *) &rndis_qc_data_intf,
+ (struct usb_descriptor_header *) &rndis_qc_fs_in_desc,
+ (struct usb_descriptor_header *) &rndis_qc_fs_out_desc,
+ NULL,
+};
+
+/* high speed support: */
+
+static struct usb_endpoint_descriptor rndis_qc_hs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT),
+ .bInterval = RNDIS_QC_LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+static struct usb_endpoint_descriptor rndis_qc_hs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor rndis_qc_hs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *eth_qc_hs_function[] = {
+ (struct usb_descriptor_header *) &rndis_qc_iad_descriptor,
+ /* control interface matches ACM, not Ethernet */
+ (struct usb_descriptor_header *) &rndis_qc_control_intf,
+ (struct usb_descriptor_header *) &rndis_qc_header_desc,
+ (struct usb_descriptor_header *) &rndis_qc_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &rndis_qc_acm_descriptor,
+ (struct usb_descriptor_header *) &rndis_qc_union_desc,
+ (struct usb_descriptor_header *) &rndis_qc_hs_notify_desc,
+ /* data interface has no altsetting */
+ (struct usb_descriptor_header *) &rndis_qc_data_intf,
+ (struct usb_descriptor_header *) &rndis_qc_hs_in_desc,
+ (struct usb_descriptor_header *) &rndis_qc_hs_out_desc,
+ NULL,
+};
+
+/* super speed support: */
+
+static struct usb_endpoint_descriptor rndis_qc_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(STATUS_BYTECOUNT),
+ .bInterval = LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor rndis_qc_ss_intr_comp_desc = {
+ .bLength = sizeof ss_intr_comp_desc,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(STATUS_BYTECOUNT),
+};
+
+static struct usb_endpoint_descriptor rndis_qc_ss_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_endpoint_descriptor rndis_qc_ss_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor rndis_qc_ss_bulk_comp_desc = {
+ .bLength = sizeof ss_bulk_comp_desc,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *eth_qc_ss_function[] = {
+ (struct usb_descriptor_header *) &rndis_iad_descriptor,
+
+ /* control interface matches ACM, not Ethernet */
+ (struct usb_descriptor_header *) &rndis_qc_control_intf,
+ (struct usb_descriptor_header *) &rndis_qc_header_desc,
+ (struct usb_descriptor_header *) &rndis_qc_call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &rndis_qc_acm_descriptor,
+ (struct usb_descriptor_header *) &rndis_qc_union_desc,
+ (struct usb_descriptor_header *) &rndis_qc_ss_notify_desc,
+ (struct usb_descriptor_header *) &rndis_qc_ss_intr_comp_desc,
+
+ /* data interface has no altsetting */
+ (struct usb_descriptor_header *) &rndis_qc_data_intf,
+ (struct usb_descriptor_header *) &rndis_qc_ss_in_desc,
+ (struct usb_descriptor_header *) &rndis_qc_ss_bulk_comp_desc,
+ (struct usb_descriptor_header *) &rndis_qc_ss_out_desc,
+ (struct usb_descriptor_header *) &rndis_qc_ss_bulk_comp_desc,
+ NULL,
+};
+
+/* string descriptors: */
+
+static struct usb_string rndis_qc_string_defs[] = {
+ [0].s = "RNDIS Communications Control",
+ [1].s = "RNDIS Ethernet Data",
+ [2].s = "RNDIS",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings rndis_qc_string_table = {
+ .language = 0x0409, /* en-us */
+ .strings = rndis_qc_string_defs,
+};
+
+static struct usb_gadget_strings *rndis_qc_strings[] = {
+ &rndis_qc_string_table,
+ NULL,
+};
+
+struct f_rndis_qc *_rndis_qc;
+
+static inline int rndis_qc_lock(atomic_t *excl)
+{
+ if (atomic_inc_return(excl) == 1) {
+ return 0;
+ } else {
+ atomic_dec(excl);
+ return -EBUSY;
+ }
+}
+
+static inline void rndis_qc_unlock(atomic_t *excl)
+{
+ atomic_dec(excl);
+}
+
+/* MSM bam support */
+static struct data_port rndis_qc_bam_port;
+
+static int rndis_qc_bam_setup(void)
+{
+ int ret;
+
+ ret = bam_data_setup(RNDIS_QC_NO_PORTS);
+ if (ret) {
+ pr_err("bam_data_setup failed err: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rndis_qc_bam_connect(struct f_rndis_qc *dev)
+{
+ int ret;
+
+ rndis_qc_bam_port.func = dev->port.func;
+ rndis_qc_bam_port.in = dev->port.in_ep;
+ rndis_qc_bam_port.out = dev->port.out_ep;
+
+ /* currently we use the first connection */
+ ret = bam_data_connect(&rndis_qc_bam_port, 0, 0);
+ if (ret) {
+ pr_err("bam_data_connect failed: err:%d\n",
+ ret);
+ return ret;
+ } else {
+ pr_info("rndis bam connected\n");
+ }
+
+ return 0;
+}
+
+static int rndis_qc_bam_disconnect(struct f_rndis_qc *dev)
+{
+ pr_info("dev:%p. %s Do nothing.\n",
+ dev, __func__);
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct sk_buff *rndis_qc_add_header(struct qc_gether *port,
+ struct sk_buff *skb)
+{
+ struct sk_buff *skb2;
+
+ skb2 = skb_realloc_headroom(skb, sizeof(struct rndis_packet_msg_type));
+ if (skb2)
+ rndis_add_hdr(skb2);
+
+ dev_kfree_skb_any(skb);
+ return skb2;
+}
+
+int rndis_qc_rm_hdr(struct qc_gether *port,
+ struct sk_buff *skb,
+ struct sk_buff_head *list)
+{
+ /* tmp points to a struct rndis_packet_msg_type */
+ __le32 *tmp = (void *)skb->data;
+
+ /* MessageType, MessageLength */
+ if (cpu_to_le32(REMOTE_NDIS_PACKET_MSG)
+ != get_unaligned(tmp++)) {
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+ tmp++;
+
+ /* DataOffset, DataLength */
+ if (!skb_pull(skb, get_unaligned_le32(tmp++) + 8)) {
+ dev_kfree_skb_any(skb);
+ return -EOVERFLOW;
+ }
+ skb_trim(skb, get_unaligned_le32(tmp++));
+
+ skb_queue_tail(list, skb);
+ return 0;
+}
+
+
+static void rndis_qc_response_available(void *_rndis)
+{
+ struct f_rndis_qc *rndis = _rndis;
+ struct usb_request *req = rndis->notify_req;
+ __le32 *data = req->buf;
+ int status;
+
+ if (atomic_inc_return(&rndis->notify_count) != 1)
+ return;
+
+ /* Send RNDIS RESPONSE_AVAILABLE notification; a
+ * USB_CDC_NOTIFY_RESPONSE_AVAILABLE "should" work too
+ *
+ * This is the only notification defined by RNDIS.
+ */
+ data[0] = cpu_to_le32(1);
+ data[1] = cpu_to_le32(0);
+
+ status = usb_ep_queue(rndis->notify, req, GFP_ATOMIC);
+ if (status) {
+ atomic_dec(&rndis->notify_count);
+ pr_info("notify/0 --> %d\n", status);
+ }
+}
+
+static void rndis_qc_response_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct f_rndis_qc *rndis = req->context;
+ int status = req->status;
+
+ /* after TX:
+ * - USB_CDC_GET_ENCAPSULATED_RESPONSE (ep0/control)
+ * - RNDIS_RESPONSE_AVAILABLE (status/irq)
+ */
+ switch (status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ /* connection gone */
+ atomic_set(&rndis->notify_count, 0);
+ break;
+ default:
+ pr_info("RNDIS %s response error %d, %d/%d\n",
+ ep->name, status,
+ req->actual, req->length);
+ /* FALLTHROUGH */
+ case 0:
+ if (ep != rndis->notify)
+ break;
+
+ /* handle multiple pending RNDIS_RESPONSE_AVAILABLE
+ * notifications by resending until we're done
+ */
+ if (atomic_dec_and_test(&rndis->notify_count))
+ break;
+ status = usb_ep_queue(rndis->notify, req, GFP_ATOMIC);
+ if (status) {
+ atomic_dec(&rndis->notify_count);
+ DBG(cdev, "notify/1 --> %d\n", status);
+ }
+ break;
+ }
+}
+
+static void rndis_qc_command_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct f_rndis_qc *rndis = req->context;
+ int status;
+
+ /* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */
+ status = rndis_msg_parser(rndis->config, (u8 *) req->buf);
+ if (status < 0)
+ pr_err("RNDIS command error %d, %d/%d\n",
+ status, req->actual, req->length);
+}
+
+static int
+rndis_qc_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct f_rndis_qc *rndis = func_to_rndis_qc(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+ /* composite driver infrastructure handles everything except
+ * CDC class messages; interface activation uses set_alt().
+ */
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+ /* RNDIS uses the CDC command encapsulation mechanism to implement
+ * an RPC scheme, with much getting/setting of attributes by OID.
+ */
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_SEND_ENCAPSULATED_COMMAND:
+ if (w_value || w_index != rndis->ctrl_id)
+ goto invalid;
+ /* read the request; process it later */
+ value = w_length;
+ req->complete = rndis_qc_command_complete;
+ req->context = rndis;
+ /* later, rndis_response_available() sends a notification */
+ break;
+
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_GET_ENCAPSULATED_RESPONSE:
+ if (w_value || w_index != rndis->ctrl_id)
+ goto invalid;
+ else {
+ u8 *buf;
+ u32 n;
+
+ /* return the result */
+ buf = rndis_get_next_response(rndis->config, &n);
+ if (buf) {
+ memcpy(req->buf, buf, n);
+ req->complete = rndis_qc_response_complete;
+ rndis_free_response(rndis->config, buf);
+ value = n;
+ }
+ /* else stalls ... spec says to avoid that */
+ }
+ break;
+
+ default:
+invalid:
+ VDBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ DBG(cdev, "rndis req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->zero = (value < w_length);
+ req->length = value;
+ value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (value < 0)
+ pr_err("rndis response on err %d\n", value);
+ }
+
+ /* device either stalls (value < 0) or reports success */
+ return value;
+}
+
+
+static int rndis_qc_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct f_rndis_qc *rndis = func_to_rndis_qc(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+
+ /* we know alt == 0 */
+
+ if (intf == rndis->ctrl_id) {
+ if (rndis->notify->driver_data) {
+ VDBG(cdev, "reset rndis control %d\n", intf);
+ usb_ep_disable(rndis->notify);
+ }
+ if (!rndis->notify->desc) {
+ VDBG(cdev, "init rndis ctrl %d\n", intf);
+ if (config_ep_by_speed(cdev->gadget, f, rndis->notify))
+ goto fail;
+ }
+ usb_ep_enable(rndis->notify);
+ rndis->notify->driver_data = rndis;
+
+ } else if (intf == rndis->data_id) {
+ struct net_device *net;
+
+ if (rndis->port.in_ep->driver_data) {
+ DBG(cdev, "reset rndis\n");
+ gether_qc_disconnect(&rndis->port);
+ rndis_qc_bam_disconnect(rndis);
+ }
+
+ if (!rndis->port.in_ep->desc || !rndis->port.out_ep->desc) {
+ DBG(cdev, "init rndis\n");
+ if (config_ep_by_speed(cdev->gadget, f,
+ rndis->port.in_ep) ||
+ config_ep_by_speed(cdev->gadget, f,
+ rndis->port.out_ep)) {
+ rndis->port.in_ep->desc = NULL;
+ rndis->port.out_ep->desc = NULL;
+ goto fail;
+ }
+ }
+
+ /* Avoid ZLPs; they can be troublesome. */
+ rndis->port.is_zlp_ok = false;
+
+ /* RNDIS should be in the "RNDIS uninitialized" state,
+ * either never activated or after rndis_uninit().
+ *
+ * We don't want data to flow here until a nonzero packet
+ * filter is set, at which point it enters "RNDIS data
+ * initialized" state ... but we do want the endpoints
+ * to be activated. It's a strange little state.
+ *
+ * REVISIT the RNDIS gadget code has done this wrong for a
+ * very long time. We need another call to the link layer
+ * code -- gether_updown(...bool) maybe -- to do it right.
+ */
+ rndis->port.cdc_filter = 0;
+
+ DBG(cdev, "RNDIS RX/TX early activation ...\n");
+ net = gether_qc_connect(&rndis->port);
+ if (IS_ERR(net))
+ return PTR_ERR(net);
+
+ if (rndis_qc_bam_connect(rndis))
+ goto fail;
+
+ rndis_set_param_dev(rndis->config, net,
+ &rndis->port.cdc_filter);
+ } else
+ goto fail;
+
+ return 0;
+fail:
+ return -EINVAL;
+}
+
+static void rndis_qc_disable(struct usb_function *f)
+{
+ struct f_rndis_qc *rndis = func_to_rndis_qc(f);
+
+ if (!rndis->notify->driver_data)
+ return;
+
+ pr_info("rndis deactivated\n");
+
+ rndis_uninit(rndis->config);
+ gether_qc_disconnect(&rndis->port);
+ rndis_qc_bam_disconnect(rndis);
+
+ usb_ep_disable(rndis->notify);
+ rndis->notify->driver_data = NULL;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * This isn't quite the same mechanism as CDC Ethernet, since the
+ * notification scheme passes less data, but the same set of link
+ * states must be tested. A key difference is that altsettings are
+ * not used to tell whether the link should send packets or not.
+ */
+
+static void rndis_qc_open(struct qc_gether *geth)
+{
+ struct f_rndis_qc *rndis = func_to_rndis_qc(&geth->func);
+ struct usb_composite_dev *cdev = geth->func.config->cdev;
+
+ DBG(cdev, "%s\n", __func__);
+
+ rndis_set_param_medium(rndis->config, NDIS_MEDIUM_802_3,
+ rndis_qc_bitrate(cdev->gadget) / 100);
+ rndis_signal_connect(rndis->config);
+}
+
+static void rndis_qc_close(struct qc_gether *geth)
+{
+ struct f_rndis_qc *rndis = func_to_rndis_qc(&geth->func);
+
+ DBG(geth->func.config->cdev, "%s\n", __func__);
+
+ rndis_set_param_medium(rndis->config, NDIS_MEDIUM_802_3, 0);
+ rndis_signal_disconnect(rndis->config);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* ethernet function driver setup/binding */
+
+static int
+rndis_qc_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct f_rndis_qc *rndis = func_to_rndis_qc(f);
+ int status;
+ struct usb_ep *ep;
+
+ /* allocate instance-specific interface IDs */
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ rndis->ctrl_id = status;
+ rndis_qc_iad_descriptor.bFirstInterface = status;
+
+ rndis_qc_control_intf.bInterfaceNumber = status;
+ rndis_qc_union_desc.bMasterInterface0 = status;
+
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ rndis->data_id = status;
+
+ rndis_qc_data_intf.bInterfaceNumber = status;
+ rndis_qc_union_desc.bSlaveInterface0 = status;
+
+ status = -ENODEV;
+
+ /* allocate instance-specific endpoints */
+ ep = usb_ep_autoconfig(cdev->gadget, &rndis_qc_fs_in_desc);
+ if (!ep)
+ goto fail;
+ rndis->port.in_ep = ep;
+ ep->driver_data = cdev; /* claim */
+
+ ep = usb_ep_autoconfig(cdev->gadget, &rndis_qc_fs_out_desc);
+ if (!ep)
+ goto fail;
+ rndis->port.out_ep = ep;
+ ep->driver_data = cdev; /* claim */
+
+ /* NOTE: a status/notification endpoint is, strictly speaking,
+ * optional. We don't treat it that way though! It's simpler,
+ * and some newer profiles don't treat it as optional.
+ */
+ ep = usb_ep_autoconfig(cdev->gadget, &rndis_qc_fs_notify_desc);
+ if (!ep)
+ goto fail;
+ rndis->notify = ep;
+ ep->driver_data = cdev; /* claim */
+
+ status = -ENOMEM;
+
+ /* allocate notification request and buffer */
+ rndis->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (!rndis->notify_req)
+ goto fail;
+ rndis->notify_req->buf = kmalloc(RNDIS_QC_STATUS_BYTECOUNT, GFP_KERNEL);
+ if (!rndis->notify_req->buf)
+ goto fail;
+ rndis->notify_req->length = RNDIS_QC_STATUS_BYTECOUNT;
+ rndis->notify_req->context = rndis;
+ rndis->notify_req->complete = rndis_qc_response_complete;
+
+ /* copy descriptors, and track endpoint copies */
+ f->descriptors = usb_copy_descriptors(eth_qc_fs_function);
+ if (!f->descriptors)
+ goto fail;
+
+ /* support all relevant hardware speeds... we expect that when
+ * hardware is dual speed, all bulk-capable endpoints work at
+ * both speeds
+ */
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ rndis_qc_hs_in_desc.bEndpointAddress =
+ rndis_qc_fs_in_desc.bEndpointAddress;
+ rndis_qc_hs_out_desc.bEndpointAddress =
+ rndis_qc_fs_out_desc.bEndpointAddress;
+ rndis_qc_hs_notify_desc.bEndpointAddress =
+ rndis_qc_fs_notify_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->hs_descriptors = usb_copy_descriptors(eth_qc_hs_function);
+
+ if (!f->hs_descriptors)
+ goto fail;
+ }
+
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ rndis_qc_ss_in_desc.bEndpointAddress =
+ rndis_qc_fs_in_desc.bEndpointAddress;
+ rndis_qc_ss_out_desc.bEndpointAddress =
+ rndis_qc_fs_out_desc.bEndpointAddress;
+ rndis_qc_ss_notify_desc.bEndpointAddress =
+ rndis_qc_fs_notify_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->ss_descriptors = usb_copy_descriptors(eth_qc_ss_function);
+ if (!f->ss_descriptors)
+ goto fail;
+ }
+
+ rndis->port.open = rndis_qc_open;
+ rndis->port.close = rndis_qc_close;
+
+ status = rndis_register(rndis_qc_response_available, rndis);
+ if (status < 0)
+ goto fail;
+ rndis->config = status;
+
+ rndis_set_param_medium(rndis->config, NDIS_MEDIUM_802_3, 0);
+ rndis_set_host_mac(rndis->config, rndis->ethaddr);
+
+ if (rndis_set_param_vendor(rndis->config, rndis->vendorID,
+ rndis->manufacturer))
+ goto fail;
+
+ rndis_set_max_pkt_xfer(rndis->config, rndis->max_pkt_per_xfer);
+
+ /* NOTE: all that is done without knowing or caring about
+ * the network link ... which is unavailable to this code
+ * until we're activated via set_alt().
+ */
+
+ DBG(cdev, "RNDIS: %s speed IN/%s OUT/%s NOTIFY/%s\n",
+ gadget_is_superspeed(c->cdev->gadget) ? "super" :
+ gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ rndis->port.in_ep->name, rndis->port.out_ep->name,
+ rndis->notify->name);
+ return 0;
+
+fail:
+ if (gadget_is_superspeed(c->cdev->gadget) && f->ss_descriptors)
+ usb_free_descriptors(f->ss_descriptors);
+ if (gadget_is_dualspeed(c->cdev->gadget) && f->hs_descriptors)
+ usb_free_descriptors(f->hs_descriptors);
+ if (f->descriptors)
+ usb_free_descriptors(f->descriptors);
+
+ if (rndis->notify_req) {
+ kfree(rndis->notify_req->buf);
+ usb_ep_free_request(rndis->notify, rndis->notify_req);
+ }
+
+ /* we might as well release our claims on endpoints */
+ if (rndis->notify)
+ rndis->notify->driver_data = NULL;
+ if (rndis->port.out_ep->desc)
+ rndis->port.out_ep->driver_data = NULL;
+ if (rndis->port.in_ep->desc)
+ rndis->port.in_ep->driver_data = NULL;
+
+ pr_err("%s: can't bind, err %d\n", f->name, status);
+
+ return status;
+}
+
+static void
+rndis_qc_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_rndis_qc *rndis = func_to_rndis_qc(f);
+
+ rndis_deregister(rndis->config);
+ rndis_exit();
+
+ if (gadget_is_dualspeed(c->cdev->gadget))
+ usb_free_descriptors(f->hs_descriptors);
+ usb_free_descriptors(f->descriptors);
+
+ kfree(rndis->notify_req->buf);
+ usb_ep_free_request(rndis->notify, rndis->notify_req);
+
+ kfree(rndis);
+}
+
+/* Some controllers can't support RNDIS ... */
+static inline bool can_support_rndis_qc(struct usb_configuration *c)
+{
+ /* everything else is *presumably* fine */
+ return true;
+}
+
+/**
+ * rndis_qc_bind_config - add RNDIS network link to a configuration
+ * @c: the configuration to support the network link
+ * @ethaddr: a buffer in which the ethernet address of the host side
+ * side of the link was recorded
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * Caller must have called @gether_setup(). Caller is also responsible
+ * for calling @gether_cleanup() before module unload.
+ */
+int
+rndis_qc_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
+{
+ return rndis_qc_bind_config_vendor(c, ethaddr, 0, NULL, 1);
+}
+
+int
+rndis_qc_bind_config_vendor(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
+ u32 vendorID, const char *manufacturer,
+ u8 max_pkt_per_xfer)
+{
+ struct f_rndis_qc *rndis;
+ int status;
+
+ if (!can_support_rndis_qc(c) || !ethaddr)
+ return -EINVAL;
+
+ /* setup RNDIS itself */
+ status = rndis_init();
+ if (status < 0)
+ return status;
+
+ status = rndis_qc_bam_setup();
+ if (status) {
+ pr_err("bam setup failed");
+ return status;
+ }
+
+ /* maybe allocate device-global string IDs */
+ if (rndis_qc_string_defs[0].id == 0) {
+
+ /* control interface label */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ rndis_qc_string_defs[0].id = status;
+ rndis_qc_control_intf.iInterface = status;
+
+ /* data interface label */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ rndis_qc_string_defs[1].id = status;
+ rndis_qc_data_intf.iInterface = status;
+
+ /* IAD iFunction label */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ rndis_qc_string_defs[2].id = status;
+ rndis_qc_iad_descriptor.iFunction = status;
+ }
+
+ /* allocate and initialize one new instance */
+ status = -ENOMEM;
+ rndis = kzalloc(sizeof *rndis, GFP_KERNEL);
+ if (!rndis)
+ goto fail;
+
+ memcpy(rndis->ethaddr, ethaddr, ETH_ALEN);
+ rndis->vendorID = vendorID;
+ rndis->manufacturer = manufacturer;
+
+ /* if max_pkt_per_xfer was not configured set to default value */
+ rndis->max_pkt_per_xfer =
+ max_pkt_per_xfer ? max_pkt_per_xfer : DEFAULT_MAX_PKT_PER_XFER;
+
+ /* RNDIS activates when the host changes this filter */
+ rndis->port.cdc_filter = 0;
+
+ /* RNDIS has special (and complex) framing */
+ rndis->port.header_len = sizeof(struct rndis_packet_msg_type);
+ rndis->port.wrap = rndis_qc_add_header;
+ rndis->port.unwrap = rndis_qc_rm_hdr;
+
+ rndis->port.func.name = "rndis";
+ rndis->port.func.strings = rndis_qc_strings;
+ /* descriptors are per-instance copies */
+ rndis->port.func.bind = rndis_qc_bind;
+ rndis->port.func.unbind = rndis_qc_unbind;
+ rndis->port.func.set_alt = rndis_qc_set_alt;
+ rndis->port.func.setup = rndis_qc_setup;
+ rndis->port.func.disable = rndis_qc_disable;
+
+ _rndis_qc = rndis;
+
+ status = usb_add_function(c, &rndis->port.func);
+ if (status) {
+ kfree(rndis);
+fail:
+ rndis_exit();
+ }
+ return status;
+}
+
+static int rndis_qc_open_dev(struct inode *ip, struct file *fp)
+{
+ pr_info("Open rndis QC driver\n");
+
+ if (!_rndis_qc) {
+ pr_err("rndis_qc_dev not created yet\n");
+ return -ENODEV;
+ }
+
+ if (rndis_qc_lock(&_rndis_qc->open_excl)) {
+ pr_err("Already opened\n");
+ return -EBUSY;
+ }
+
+ fp->private_data = _rndis_qc;
+ pr_info("rndis QC file opened\n");
+
+ return 0;
+}
+
+static int rndis_qc_release_dev(struct inode *ip, struct file *fp)
+{
+ struct f_rndis_qc *rndis = fp->private_data;
+
+ pr_info("Close rndis QC file");
+ rndis_qc_unlock(&rndis->open_excl);
+
+ return 0;
+}
+
+static long rndis_qc_ioctl(struct file *fp, unsigned cmd, unsigned long arg)
+{
+ struct f_rndis_qc *rndis = fp->private_data;
+ int ret = 0;
+
+ pr_info("Received command %d", cmd);
+
+ if (rndis_qc_lock(&rndis->ioctl_excl))
+ return -EBUSY;
+
+ switch (cmd) {
+ case RNDIS_QC_GET_MAX_PKT_PER_XFER:
+ ret = copy_to_user((void __user *)arg,
+ &rndis->max_pkt_per_xfer,
+ sizeof(rndis->max_pkt_per_xfer));
+ if (ret) {
+ pr_err("copying to user space failed");
+ ret = -EFAULT;
+ }
+ pr_info("Sent max packets per xfer %d",
+ rndis->max_pkt_per_xfer);
+ break;
+ default:
+ pr_err("Unsupported IOCTL");
+ ret = -EINVAL;
+ }
+
+ rndis_qc_unlock(&rndis->ioctl_excl);
+
+ return ret;
+}
+
+static const struct file_operations rndis_qc_fops = {
+ .owner = THIS_MODULE,
+ .open = rndis_qc_open_dev,
+ .release = rndis_qc_release_dev,
+ .unlocked_ioctl = rndis_qc_ioctl,
+};
+
+static struct miscdevice rndis_qc_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "android_rndis_qc",
+ .fops = &rndis_qc_fops,
+};
+
+static int rndis_qc_init(void)
+{
+ int ret;
+
+ pr_info("initialize rndis QC instance\n");
+
+ ret = misc_register(&rndis_qc_device);
+ if (ret)
+ pr_err("rndis QC driver failed to register");
+
+ return ret;
+}
+
+static void rndis_qc_cleanup(void)
+{
+ pr_info("rndis QC cleanup");
+
+ misc_deregister(&rndis_qc_device);
+ _rndis_qc = NULL;
+}
+
+
diff --git a/drivers/usb/gadget/f_serial.c b/drivers/usb/gadget/f_serial.c
index 7b6acc6..3d6ceaa 100644
--- a/drivers/usb/gadget/f_serial.c
+++ b/drivers/usb/gadget/f_serial.c
@@ -572,6 +572,7 @@
#ifdef CONFIG_MODEM_SUPPORT
usb_ep_fifo_flush(gser->notify);
usb_ep_disable(gser->notify);
+ gser->notify->driver_data = NULL;
#endif
gser->online = 0;
}
diff --git a/drivers/usb/gadget/f_tcm.c b/drivers/usb/gadget/f_tcm.c
index d944745..8777504 100644
--- a/drivers/usb/gadget/f_tcm.c
+++ b/drivers/usb/gadget/f_tcm.c
@@ -255,7 +255,6 @@
{
struct f_uas *fu = cmd->fu;
struct se_cmd *se_cmd = &cmd->se_cmd;
- struct usb_gadget *gadget = fuas_to_gadget(fu);
int ret;
init_completion(&cmd->write_complete);
@@ -266,22 +265,6 @@
return -EINVAL;
}
- if (!gadget->sg_supported) {
- cmd->data_buf = kmalloc(se_cmd->data_length, GFP_KERNEL);
- if (!cmd->data_buf)
- return -ENOMEM;
-
- fu->bot_req_out->buf = cmd->data_buf;
- } else {
- fu->bot_req_out->buf = NULL;
- fu->bot_req_out->num_sgs = se_cmd->t_data_nents;
- fu->bot_req_out->sg = se_cmd->t_data_sg;
- }
-
- fu->bot_req_out->complete = usbg_data_write_cmpl;
- fu->bot_req_out->length = se_cmd->data_length;
- fu->bot_req_out->context = cmd;
-
ret = usbg_prepare_w_request(cmd, fu->bot_req_out);
if (ret)
goto cleanup;
diff --git a/drivers/usb/gadget/msm72k_udc.c b/drivers/usb/gadget/msm72k_udc.c
index 3e40552..55fd59e 100644
--- a/drivers/usb/gadget/msm72k_udc.c
+++ b/drivers/usb/gadget/msm72k_udc.c
@@ -190,6 +190,8 @@
/* max power requested by selected configuration */
unsigned b_max_pow;
unsigned chg_current;
+ unsigned chg_type_retry_cnt;
+ bool proprietary_chg;
struct delayed_work chg_det;
struct delayed_work chg_stop;
struct msm_hsusb_gadget_platform_data *pdata;
@@ -294,11 +296,18 @@
{
if ((readl(USB_PORTSC) & PORTSC_LS) == PORTSC_LS)
return USB_CHG_TYPE__WALLCHARGER;
- else
- return USB_CHG_TYPE__SDP;
+ else {
+ if (ui->gadget.speed == USB_SPEED_LOW ||
+ ui->gadget.speed == USB_SPEED_FULL ||
+ ui->gadget.speed == USB_SPEED_HIGH)
+ return USB_CHG_TYPE__SDP;
+ else
+ return USB_CHG_TYPE__INVALID;
+ }
}
#define USB_WALLCHARGER_CHG_CURRENT 1800
+#define USB_PROPRIETARY_CHG_CURRENT 500
static int usb_get_max_power(struct usb_info *ui)
{
struct msm_otg *otg = to_msm_otg(ui->xceiv);
@@ -321,8 +330,10 @@
if (temp == USB_CHG_TYPE__INVALID)
return -ENODEV;
- if (temp == USB_CHG_TYPE__WALLCHARGER)
+ if (temp == USB_CHG_TYPE__WALLCHARGER && !ui->proprietary_chg)
return USB_WALLCHARGER_CHG_CURRENT;
+ else
+ return USB_PROPRIETARY_CHG_CURRENT;
if (suspended || !configured)
return 0;
@@ -428,6 +439,17 @@
}
temp = usb_get_chg_type(ui);
+ if (temp != USB_CHG_TYPE__WALLCHARGER && temp != USB_CHG_TYPE__SDP
+ && !ui->chg_type_retry_cnt) {
+ schedule_delayed_work(&ui->chg_det, USB_CHG_DET_DELAY);
+ ui->chg_type_retry_cnt++;
+ spin_unlock_irqrestore(&ui->lock, flags);
+ return;
+ }
+ if (temp == USB_CHG_TYPE__INVALID) {
+ temp = USB_CHG_TYPE__WALLCHARGER;
+ ui->proprietary_chg = true;
+ }
spin_unlock_irqrestore(&ui->lock, flags);
atomic_set(&otg->chg_type, temp);
@@ -680,6 +702,14 @@
spin_lock_irqsave(&ui->lock, flags);
+ if (ept->num != 0 && ept->ep.desc == NULL) {
+ req->req.status = -EINVAL;
+ spin_unlock_irqrestore(&ui->lock, flags);
+ dev_err(&ui->pdev->dev,
+ "%s: called for disabled endpoint\n", __func__);
+ return -EINVAL;
+ }
+
if (req->busy) {
req->req.status = -EBUSY;
spin_unlock_irqrestore(&ui->lock, flags);
@@ -1737,6 +1767,8 @@
ui->gadget.speed = USB_SPEED_UNKNOWN;
ui->usb_state = USB_STATE_NOTATTACHED;
ui->flags |= USB_FLAG_VBUS_OFFLINE;
+ ui->chg_type_retry_cnt = 0;
+ ui->proprietary_chg = false;
}
if (in_interrupt()) {
schedule_work(&ui->work);
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index 16c4afb..e0520c7 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -585,8 +585,8 @@
resp->MinorVersion = cpu_to_le32(RNDIS_MINOR_VERSION);
resp->DeviceFlags = cpu_to_le32(RNDIS_DF_CONNECTIONLESS);
resp->Medium = cpu_to_le32(RNDIS_MEDIUM_802_3);
- resp->MaxPacketsPerTransfer = cpu_to_le32(TX_SKB_HOLD_THRESHOLD);
- resp->MaxTransferSize = cpu_to_le32(TX_SKB_HOLD_THRESHOLD *
+ resp->MaxPacketsPerTransfer = cpu_to_le32(params->max_pkt_per_xfer);
+ resp->MaxTransferSize = cpu_to_le32(params->max_pkt_per_xfer *
(params->dev->mtu
+ sizeof(struct ethhdr)
+ sizeof(struct rndis_packet_msg_type)
@@ -902,6 +902,8 @@
rndis_per_dev_params[i].used = 1;
rndis_per_dev_params[i].resp_avail = resp_avail;
rndis_per_dev_params[i].v = v;
+ rndis_per_dev_params[i].max_pkt_per_xfer =
+ TX_SKB_HOLD_THRESHOLD;
pr_debug("%s: configNr = %d\n", __func__, i);
return i;
}
@@ -955,6 +957,13 @@
return 0;
}
+void rndis_set_max_pkt_xfer(u8 configNr, u8 max_pkt_per_xfer)
+{
+ pr_debug("%s:\n", __func__);
+
+ rndis_per_dev_params[configNr].max_pkt_per_xfer = max_pkt_per_xfer;
+}
+
void rndis_add_hdr(struct sk_buff *skb)
{
struct rndis_packet_msg_type *header;
diff --git a/drivers/usb/gadget/rndis.h b/drivers/usb/gadget/rndis.h
index 907c330..1f06c42 100644
--- a/drivers/usb/gadget/rndis.h
+++ b/drivers/usb/gadget/rndis.h
@@ -235,6 +235,7 @@
struct net_device *dev;
u32 vendorID;
+ u8 max_pkt_per_xfer;
const char *vendorDescr;
void (*resp_avail)(void *v);
void *v;
diff --git a/drivers/usb/gadget/u_bam.c b/drivers/usb/gadget/u_bam.c
index d379c66..1fade88 100644
--- a/drivers/usb/gadget/u_bam.c
+++ b/drivers/usb/gadget/u_bam.c
@@ -24,6 +24,7 @@
#include <linux/termios.h>
#include <mach/usb_gadget_xport.h>
+#include <linux/usb/msm_hsusb.h>
#include <mach/usb_bam.h>
#include "u_rmnet.h"
diff --git a/drivers/usb/gadget/u_ctrl_hsuart.c b/drivers/usb/gadget/u_ctrl_hsuart.c
index 7102d81..a55960e 100644
--- a/drivers/usb/gadget/u_ctrl_hsuart.c
+++ b/drivers/usb/gadget/u_ctrl_hsuart.c
@@ -289,7 +289,7 @@
void ghsuart_ctrl_disconnect(void *gptr, int port_num)
{
- struct gctrl_port *port;
+ struct ghsuart_ctrl_port *port;
struct grmnet *gr = NULL;
unsigned long flags;
@@ -300,7 +300,7 @@
return;
}
- port = gctrl_ports[port_num].port;
+ port = ghsuart_ctrl_ports[port_num].port;
if (!gptr || !port) {
pr_err("%s: grmnet port is null\n", __func__);
@@ -372,7 +372,7 @@
static void ghsuart_ctrl_port_free(int portno)
{
struct ghsuart_ctrl_port *port = ghsuart_ctrl_ports[portno].port;
- struct platform_driver *pdrv = &gctrl_ports[portno].pdrv;
+ struct platform_driver *pdrv = &ghsuart_ctrl_ports[portno].pdrv;
destroy_workqueue(port->wq);
if (pdrv)
diff --git a/drivers/usb/gadget/u_data_hsuart.c b/drivers/usb/gadget/u_data_hsuart.c
index 91b1190..74bb93f 100644
--- a/drivers/usb/gadget/u_data_hsuart.c
+++ b/drivers/usb/gadget/u_data_hsuart.c
@@ -843,12 +843,15 @@
ghsuart_data_free_buffers(port);
/* disable endpoints */
- if (port->in)
+ if (port->in) {
usb_ep_disable(port->in);
+ port->in->driver_data = NULL;
+ }
- if (port->out)
+ if (port->out) {
usb_ep_disable(port->out);
-
+ port->out->driver_data = NULL;
+ }
atomic_set(&port->connected, 0);
if (port->gtype == USB_GADGET_SERIAL) {
diff --git a/drivers/usb/gadget/u_qc_ether.c b/drivers/usb/gadget/u_qc_ether.c
new file mode 100644
index 0000000..20933b6
--- /dev/null
+++ b/drivers/usb/gadget/u_qc_ether.c
@@ -0,0 +1,409 @@
+/*
+ * u_qc_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/device.h>
+#include <linux/ctype.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+
+#include "u_ether.h"
+
+
+/*
+ * This component encapsulates the Ethernet link glue needed to provide
+ * one (!) network link through the USB gadget stack, normally "usb0".
+ *
+ * The control and data models are handled by the function driver which
+ * connects to this code; such as CDC Ethernet (ECM or EEM),
+ * "CDC Subset", or RNDIS. That includes all descriptor and endpoint
+ * management.
+ *
+ * Link level addressing is handled by this component using module
+ * parameters; if no such parameters are provided, random link level
+ * addresses are used. Each end of the link uses one address. The
+ * host end address is exported in various ways, and is often recorded
+ * in configuration databases.
+ *
+ * The driver which assembles each configuration using such a link is
+ * responsible for ensuring that each configuration includes at most one
+ * instance of is network link. (The network layer provides ways for
+ * this single "physical" link to be used by multiple virtual links.)
+ *
+ * This utilities is based on Ethernet-over-USB link layer utilities and
+ * contains MSM specific implementation.
+ */
+
+#define UETH__VERSION "29-May-2008"
+
+struct eth_qc_dev {
+ /* lock is held while accessing port_usb
+ * or updating its backlink port_usb->ioport
+ */
+ spinlock_t lock;
+ struct qc_gether *port_usb;
+
+ struct net_device *net;
+ struct usb_gadget *gadget;
+
+ unsigned header_len;
+
+ bool zlp;
+ u8 host_mac[ETH_ALEN];
+};
+
+/*-------------------------------------------------------------------------*/
+
+#undef DBG
+#undef VDBG
+#undef ERROR
+#undef INFO
+
+#define xprintk(d, level, fmt, args...) \
+ printk(level "%s: " fmt , (d)->net->name , ## args)
+
+#ifdef DEBUG
+#undef DEBUG
+#define DBG(dev, fmt, args...) \
+ xprintk(dev , KERN_DEBUG , fmt , ## args)
+#else
+#define DBG(dev, fmt, args...) \
+ do { } while (0)
+#endif /* DEBUG */
+
+#ifdef VERBOSE_DEBUG
+#define VDBG DBG
+#else
+#define VDBG(dev, fmt, args...) \
+ do { } while (0)
+#endif /* DEBUG */
+
+#define ERROR(dev, fmt, args...) \
+ xprintk(dev , KERN_ERR , fmt , ## args)
+#define INFO(dev, fmt, args...) \
+ xprintk(dev , KERN_INFO , fmt , ## args)
+
+/*-------------------------------------------------------------------------*/
+
+/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
+static int ueth_qc_change_mtu(struct net_device *net, int new_mtu)
+{
+ struct eth_qc_dev *dev = netdev_priv(net);
+ unsigned long flags;
+ int status = 0;
+
+ /* don't change MTU on "live" link (peer won't know) */
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->port_usb)
+ status = -EBUSY;
+ else if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN)
+ status = -ERANGE;
+ else
+ net->mtu = new_mtu;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return status;
+}
+
+static void eth_qc_get_drvinfo(struct net_device *net,
+ struct ethtool_drvinfo *p)
+{
+ struct eth_qc_dev *dev = netdev_priv(net);
+
+ strlcpy(p->driver, "g_qc_ether", sizeof p->driver);
+ strlcpy(p->version, UETH__VERSION, sizeof p->version);
+ strlcpy(p->fw_version, dev->gadget->name, sizeof p->fw_version);
+ strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof p->bus_info);
+}
+
+static const struct ethtool_ops qc_ethtool_ops = {
+ .get_drvinfo = eth_qc_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
+
+static netdev_tx_t eth_qc_start_xmit(struct sk_buff *skb,
+ struct net_device *net)
+{
+ return NETDEV_TX_OK;
+}
+
+static int eth_qc_open(struct net_device *net)
+{
+ struct eth_qc_dev *dev = netdev_priv(net);
+ struct qc_gether *link;
+
+ DBG(dev, "%s\n", __func__);
+ if (netif_carrier_ok(dev->net)) {
+ /* Force the netif to send the RTM_NEWLINK event
+ * that in use to notify on the USB cable status.
+ */
+ netif_carrier_off(dev->net);
+ netif_carrier_on(dev->net);
+ netif_wake_queue(dev->net);
+ }
+
+ spin_lock_irq(&dev->lock);
+ link = dev->port_usb;
+ if (link && link->open)
+ link->open(link);
+ spin_unlock_irq(&dev->lock);
+
+ return 0;
+}
+
+static int eth_qc_stop(struct net_device *net)
+{
+ struct eth_qc_dev *dev = netdev_priv(net);
+ unsigned long flags;
+ struct qc_gether *link = dev->port_usb;
+
+ VDBG(dev, "%s\n", __func__);
+ netif_stop_queue(net);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->port_usb && link->close)
+ link->close(link);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
+static char *qc_dev_addr;
+module_param(qc_dev_addr, charp, S_IRUGO);
+MODULE_PARM_DESC(qc_dev_addr, "QC Device Ethernet Address");
+
+/* this address is invisible to ifconfig */
+static char *qc_host_addr;
+module_param(qc_host_addr, charp, S_IRUGO);
+MODULE_PARM_DESC(qc_host_addr, "QC Host Ethernet Address");
+
+static int get_qc_ether_addr(const char *str, u8 *dev_addr)
+{
+ if (str) {
+ unsigned i;
+
+ for (i = 0; i < 6; i++) {
+ unsigned char num;
+
+ if ((*str == '.') || (*str == ':'))
+ str++;
+ num = hex_to_bin(*str++) << 4;
+ num |= hex_to_bin(*str++);
+ dev_addr[i] = num;
+ }
+ if (is_valid_ether_addr(dev_addr))
+ return 0;
+ }
+ random_ether_addr(dev_addr);
+ return 1;
+}
+
+static struct eth_qc_dev *qc_dev;
+
+static const struct net_device_ops eth_qc_netdev_ops = {
+ .ndo_open = eth_qc_open,
+ .ndo_stop = eth_qc_stop,
+ .ndo_start_xmit = eth_qc_start_xmit,
+ .ndo_change_mtu = ueth_qc_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static struct device_type qc_gadget_type = {
+ .name = "gadget",
+};
+
+/**
+ * gether_qc_setup - initialize one ethernet-over-usb link
+ * @g: gadget to associated with these links
+ * @ethaddr: NULL, or a buffer in which the ethernet address of the
+ * host side of the link is recorded
+ * Context: may sleep
+ *
+ * This sets up the single network link that may be exported by a
+ * gadget driver using this framework. The link layer addresses are
+ * set up using module parameters.
+ *
+ * Returns negative errno, or zero on success
+ */
+int gether_qc_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
+{
+ return gether_qc_setup_name(g, ethaddr, "usb");
+}
+
+/**
+ * gether_qc_setup_name - initialize one ethernet-over-usb link
+ * @g: gadget to associated with these links
+ * @ethaddr: NULL, or a buffer in which the ethernet address of the
+ * host side of the link is recorded
+ * @netname: name for network device (for example, "usb")
+ * Context: may sleep
+ *
+ * This sets up the single network link that may be exported by a
+ * gadget driver using this framework. The link layer addresses are
+ * set up using module parameters.
+ *
+ * Returns negative errno, or zero on success
+ */
+int gether_qc_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
+ const char *netname)
+{
+ struct eth_qc_dev *dev;
+ struct net_device *net;
+ int status;
+
+ if (qc_dev)
+ return -EBUSY;
+
+ net = alloc_etherdev(sizeof *dev);
+ if (!net)
+ return -ENOMEM;
+
+ dev = netdev_priv(net);
+ spin_lock_init(&dev->lock);
+
+ /* network device setup */
+ dev->net = net;
+ snprintf(net->name, sizeof(net->name), "%s%%d", netname);
+
+ if (get_qc_ether_addr(qc_dev_addr, net->dev_addr))
+ dev_warn(&g->dev,
+ "using random %s ethernet address\n", "self");
+ if (get_qc_ether_addr(qc_host_addr, dev->host_mac))
+ dev_warn(&g->dev,
+ "using random %s ethernet address\n", "host");
+
+ if (ethaddr)
+ memcpy(ethaddr, dev->host_mac, ETH_ALEN);
+
+ net->netdev_ops = ð_qc_netdev_ops;
+
+ SET_ETHTOOL_OPS(net, &qc_ethtool_ops);
+
+ netif_carrier_off(net);
+
+ dev->gadget = g;
+ SET_NETDEV_DEV(net, &g->dev);
+ SET_NETDEV_DEVTYPE(net, &qc_gadget_type);
+
+ status = register_netdev(net);
+ if (status < 0) {
+ dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
+ free_netdev(net);
+ } else {
+ INFO(dev, "MAC %pM\n", net->dev_addr);
+ INFO(dev, "HOST MAC %pM\n", dev->host_mac);
+
+ qc_dev = dev;
+ }
+
+ return status;
+}
+
+/**
+ * gether_qc_cleanup - remove Ethernet-over-USB device
+ * Context: may sleep
+ *
+ * This is called to free all resources allocated by @gether_qc_setup().
+ */
+void gether_qc_cleanup(void)
+{
+ if (!qc_dev)
+ return;
+
+ unregister_netdev(qc_dev->net);
+ free_netdev(qc_dev->net);
+
+ qc_dev = NULL;
+}
+
+
+/**
+ * gether_qc_connect - notify network layer that USB link is active
+ * @link: the USB link, set up with endpoints, descriptors matching
+ * current device speed, and any framing wrapper(s) set up.
+ * Context: irqs blocked
+ *
+ * This is called to let the network layer know the connection
+ * is active ("carrier detect").
+ */
+struct net_device *gether_qc_connect(struct qc_gether *link)
+{
+ struct eth_qc_dev *dev = qc_dev;
+
+ if (!dev)
+ return ERR_PTR(-EINVAL);
+
+ dev->zlp = link->is_zlp_ok;
+ dev->header_len = link->header_len;
+
+ spin_lock(&dev->lock);
+ dev->port_usb = link;
+ link->ioport = dev;
+ if (netif_running(dev->net)) {
+ if (link->open)
+ link->open(link);
+ } else {
+ if (link->close)
+ link->close(link);
+ }
+ spin_unlock(&dev->lock);
+
+ netif_carrier_on(dev->net);
+ if (netif_running(dev->net))
+ netif_wake_queue(dev->net);
+
+ return dev->net;
+}
+
+/**
+ * gether_qc_disconnect - notify network layer that USB link is inactive
+ * @link: the USB link, on which gether_connect() was called
+ * Context: irqs blocked
+ *
+ * This is called to let the network layer know the connection
+ * went inactive ("no carrier").
+ *
+ * On return, the state is as if gether_connect() had never been called.
+ */
+void gether_qc_disconnect(struct qc_gether *link)
+{
+ struct eth_qc_dev *dev = link->ioport;
+
+ if (!dev)
+ return;
+
+ DBG(dev, "%s\n", __func__);
+
+ netif_stop_queue(dev->net);
+ netif_carrier_off(dev->net);
+
+ spin_lock(&dev->lock);
+ dev->port_usb = NULL;
+ link->ioport = NULL;
+ spin_unlock(&dev->lock);
+}
diff --git a/drivers/usb/gadget/u_qc_ether.h b/drivers/usb/gadget/u_qc_ether.h
new file mode 100644
index 0000000..b3c281b
--- /dev/null
+++ b/drivers/usb/gadget/u_qc_ether.h
@@ -0,0 +1,97 @@
+/*
+ * u_qc_ether.h -- interface to USB gadget "ethernet link" utilities
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __U_QC_ETHER_H
+#define __U_QC_ETHER_H
+
+#include <linux/err.h>
+#include <linux/if_ether.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/cdc.h>
+
+#include "gadget_chips.h"
+
+
+/*
+ * This represents the USB side of an "ethernet" link, managed by a USB
+ * function which provides control and (maybe) framing. Two functions
+ * in different configurations could share the same ethernet link/netdev,
+ * using different host interaction models.
+ *
+ * There is a current limitation that only one instance of this link may
+ * be present in any given configuration. When that's a problem, network
+ * layer facilities can be used to package multiple logical links on this
+ * single "physical" one.
+ *
+ * This function is based on Ethernet-over-USB link layer utilities and
+ * contains MSM specific implementation.
+ */
+
+struct qc_gether {
+ struct usb_function func;
+
+ /* updated by gether_{connect,disconnect} */
+ struct eth_qc_dev *ioport;
+
+ /* endpoints handle full and/or high speeds */
+ struct usb_ep *in_ep;
+ struct usb_ep *out_ep;
+
+ bool is_zlp_ok;
+
+ u16 cdc_filter;
+
+ /* hooks for added framing, as needed for RNDIS and EEM. */
+ u32 header_len;
+ /* NCM requires fixed size bundles */
+ bool is_fixed;
+ u32 fixed_out_len;
+ u32 fixed_in_len;
+ struct sk_buff *(*wrap)(struct qc_gether *port,
+ struct sk_buff *skb);
+ int (*unwrap)(struct qc_gether *port,
+ struct sk_buff *skb,
+ struct sk_buff_head *list);
+
+ /* called on network open/close */
+ void (*open)(struct qc_gether *);
+ void (*close)(struct qc_gether *);
+};
+
+/* netdev setup/teardown as directed by the gadget driver */
+int gether_qc_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN]);
+void gether_qc_cleanup(void);
+/* variant of gether_setup that allows customizing network device name */
+int gether_qc_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
+ const char *netname);
+
+/* connect/disconnect is handled by individual functions */
+struct net_device *gether_qc_connect(struct qc_gether *);
+void gether_qc_disconnect(struct qc_gether *);
+
+/* each configuration may bind one instance of an ethernet link */
+int ecm_qc_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]);
+
+int
+rndis_qc_bind_config_vendor(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
+ u32 vendorID, const char *manufacturer,
+ u8 maxPktPerXfer);
+#endif /* __U_QC_ETHER_H */
diff --git a/drivers/usb/gadget/u_rmnet_ctrl_smd.c b/drivers/usb/gadget/u_rmnet_ctrl_smd.c
index 0256a75..169008b 100644
--- a/drivers/usb/gadget/u_rmnet_ctrl_smd.c
+++ b/drivers/usb/gadget/u_rmnet_ctrl_smd.c
@@ -326,6 +326,8 @@
container_of(w, struct rmnet_ctrl_port, connect_w.work);
struct smd_ch_info *c = &port->ctrl_ch;
unsigned long flags;
+ int set_bits = 0;
+ int clear_bits = 0;
int ret;
pr_debug("%s:\n", __func__);
@@ -348,9 +350,11 @@
return;
}
+ set_bits = c->cbits_tomodem;
+ clear_bits = ~(c->cbits_tomodem | TIOCM_RTS);
spin_lock_irqsave(&port->port_lock, flags);
if (port->port_usb)
- smd_tiocmset(c->ch, c->cbits_tomodem, ~c->cbits_tomodem);
+ smd_tiocmset(c->ch, set_bits, clear_bits);
spin_unlock_irqrestore(&port->port_lock, flags);
}
diff --git a/drivers/usb/gadget/u_sdio.c b/drivers/usb/gadget/u_sdio.c
index 5e9b0ec..a604e1e 100644
--- a/drivers/usb/gadget/u_sdio.c
+++ b/drivers/usb/gadget/u_sdio.c
@@ -990,8 +990,10 @@
/* disable endpoints, aborting down any active I/O */
usb_ep_disable(gser->out);
+ gser->out->driver_data = NULL;
usb_ep_disable(gser->in);
+ gser->in->driver_data = NULL;
spin_lock_irqsave(&port->port_lock, flags);
gsdio_free_requests(gser->out, &port->read_pool);
diff --git a/drivers/usb/gadget/u_smd.c b/drivers/usb/gadget/u_smd.c
index a5ceaff..ce285a3 100644
--- a/drivers/usb/gadget/u_smd.c
+++ b/drivers/usb/gadget/u_smd.c
@@ -712,7 +712,9 @@
/* disable endpoints, aborting down any active I/O */
usb_ep_disable(gser->out);
+ gser->out->driver_data = NULL;
usb_ep_disable(gser->in);
+ gser->in->driver_data = NULL;
spin_lock_irqsave(&port->port_lock, flags);
gsmd_free_requests(gser->out, &port->read_pool);
diff --git a/drivers/usb/host/ehci-msm2.c b/drivers/usb/host/ehci-msm2.c
index 4657283..8a87a6a 100644
--- a/drivers/usb/host/ehci-msm2.c
+++ b/drivers/usb/host/ehci-msm2.c
@@ -660,6 +660,7 @@
skip_phy_resume:
+ usb_hcd_resume_root_hub(hcd);
atomic_set(&mhcd->in_lpm, 0);
if (mhcd->async_int) {
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 8467dc0..d895f27 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -12,6 +12,7 @@
*/
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb/otg.h>
@@ -173,6 +174,11 @@
usb_put_transceiver(phy);
goto put_usb3_hcd;
}
+ } else {
+ pm_runtime_no_callbacks(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get(&pdev->dev);
}
return 0;
@@ -211,6 +217,9 @@
if (phy && phy->otg) {
otg_set_host(phy->otg, NULL);
usb_put_transceiver(phy);
+ } else {
+ pm_runtime_put(&dev->dev);
+ pm_runtime_disable(&dev->dev);
}
return 0;
diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c
index c1e1e13..da96e73 100644
--- a/drivers/usb/otg/msm_otg.c
+++ b/drivers/usb/otg/msm_otg.c
@@ -40,6 +40,7 @@
#include <linux/mfd/pm8xxx/pm8921-charger.h>
#include <linux/mfd/pm8xxx/misc.h>
#include <linux/power_supply.h>
+#include <linux/mhl_8334.h>
#include <mach/clk.h>
#include <mach/msm_xo.h>
@@ -69,6 +70,7 @@
static struct msm_otg *the_msm_otg;
static bool debug_aca_enabled;
static bool debug_bus_voting_enabled;
+static bool mhl_det_in_progress;
static struct regulator *hsusb_3p3;
static struct regulator *hsusb_1p8;
@@ -338,15 +340,28 @@
{
int ret;
- if (IS_ERR(motg->clk))
- return 0;
-
if (assert) {
- ret = clk_reset(motg->clk, CLK_RESET_ASSERT);
+ if (!IS_ERR(motg->clk)) {
+ ret = clk_reset(motg->clk, CLK_RESET_ASSERT);
+ } else {
+ /* Using asynchronous block reset to the hardware */
+ dev_dbg(motg->phy.dev, "block_reset ASSERT\n");
+ clk_disable_unprepare(motg->pclk);
+ clk_disable_unprepare(motg->core_clk);
+ ret = clk_reset(motg->core_clk, CLK_RESET_ASSERT);
+ }
if (ret)
dev_err(motg->phy.dev, "usb hs_clk assert failed\n");
} else {
- ret = clk_reset(motg->clk, CLK_RESET_DEASSERT);
+ if (!IS_ERR(motg->clk)) {
+ ret = clk_reset(motg->clk, CLK_RESET_DEASSERT);
+ } else {
+ dev_dbg(motg->phy.dev, "block_reset DEASSERT\n");
+ ret = clk_reset(motg->core_clk, CLK_RESET_DEASSERT);
+ ndelay(200);
+ clk_prepare_enable(motg->core_clk);
+ clk_prepare_enable(motg->pclk);
+ }
if (ret)
dev_err(motg->phy.dev, "usb hs_clk deassert failed\n");
}
@@ -738,7 +753,8 @@
return 0;
disable_irq(motg->irq);
- host_bus_suspend = phy->otg->host && !test_bit(ID, &motg->inputs);
+ host_bus_suspend = !test_bit(MHL, &motg->inputs) && phy->otg->host &&
+ !test_bit(ID, &motg->inputs);
device_bus_suspend = phy->otg->gadget && test_bit(ID, &motg->inputs) &&
test_bit(A_BUS_SUSPEND, &motg->inputs) &&
motg->caps & ALLOW_LPM_ON_DEV_SUSPEND;
@@ -1402,6 +1418,102 @@
return 0;
}
+static int msm_otg_mhl_register_callback(struct msm_otg *motg,
+ void (*callback)(int on))
+{
+ struct usb_phy *phy = &motg->phy;
+ int ret;
+
+ if (motg->pdata->otg_control != OTG_PMIC_CONTROL ||
+ !motg->pdata->pmic_id_irq) {
+ dev_dbg(phy->dev, "MHL can not be supported without PMIC Id\n");
+ return -ENODEV;
+ }
+
+ if (!motg->pdata->mhl_dev_name) {
+ dev_dbg(phy->dev, "MHL device name does not exist.\n");
+ return -ENODEV;
+ }
+
+ if (callback)
+ ret = mhl_register_callback(motg->pdata->mhl_dev_name,
+ callback);
+ else
+ ret = mhl_unregister_callback(motg->pdata->mhl_dev_name);
+
+ if (ret)
+ dev_dbg(phy->dev, "mhl_register_callback(%s) return error=%d\n",
+ motg->pdata->mhl_dev_name, ret);
+ else
+ motg->mhl_enabled = true;
+
+ return ret;
+}
+
+static void msm_otg_mhl_notify_online(int on)
+{
+ struct msm_otg *motg = the_msm_otg;
+ struct usb_phy *phy = &motg->phy;
+ bool queue = false;
+
+ dev_dbg(phy->dev, "notify MHL %s%s\n", on ? "" : "dis", "connected");
+
+ if (on) {
+ set_bit(MHL, &motg->inputs);
+ } else {
+ clear_bit(MHL, &motg->inputs);
+ queue = true;
+ }
+
+ if (queue && phy->state != OTG_STATE_UNDEFINED)
+ schedule_work(&motg->sm_work);
+}
+
+static bool msm_otg_is_mhl(struct msm_otg *motg)
+{
+ struct usb_phy *phy = &motg->phy;
+ int is_mhl, ret;
+
+ ret = mhl_device_discovery(motg->pdata->mhl_dev_name, &is_mhl);
+ if (ret || is_mhl != MHL_DISCOVERY_RESULT_MHL) {
+ /*
+ * MHL driver calls our callback saying that MHL connected
+ * if RID_GND is detected. But at later part of discovery
+ * it may figure out MHL is not connected and returns
+ * false. Hence clear MHL input here.
+ */
+ clear_bit(MHL, &motg->inputs);
+ dev_dbg(phy->dev, "MHL device not found\n");
+ return false;
+ }
+
+ set_bit(MHL, &motg->inputs);
+ dev_dbg(phy->dev, "MHL device found\n");
+ return true;
+}
+
+static bool msm_chg_mhl_detect(struct msm_otg *motg)
+{
+ bool ret, id;
+ unsigned long flags;
+
+ if (!motg->mhl_enabled)
+ return false;
+
+ local_irq_save(flags);
+ id = irq_read_line(motg->pdata->pmic_id_irq);
+ local_irq_restore(flags);
+
+ if (id)
+ return false;
+
+ mhl_det_in_progress = true;
+ ret = msm_otg_is_mhl(motg);
+ mhl_det_in_progress = false;
+
+ return ret;
+}
+
static bool msm_chg_aca_detect(struct msm_otg *motg)
{
struct usb_phy *phy = &motg->phy;
@@ -1844,6 +1956,12 @@
unsigned long delay;
dev_dbg(phy->dev, "chg detection work\n");
+
+ if (test_bit(MHL, &motg->inputs)) {
+ dev_dbg(phy->dev, "detected MHL, escape chg detection work\n");
+ return;
+ }
+
switch (motg->chg_state) {
case USB_CHG_STATE_UNDEFINED:
msm_chg_block_on(motg);
@@ -1855,6 +1973,13 @@
delay = MSM_CHG_DCD_POLL_TIME;
break;
case USB_CHG_STATE_WAIT_FOR_DCD:
+ if (msm_chg_mhl_detect(motg)) {
+ msm_chg_block_off(motg);
+ motg->chg_state = USB_CHG_STATE_DETECTED;
+ motg->chg_type = USB_INVALID_CHARGER;
+ queue_work(system_nrt_wq, &motg->sm_work);
+ return;
+ }
is_aca = msm_chg_aca_detect(motg);
if (is_aca) {
/*
@@ -2048,9 +2173,17 @@
}
/* FALL THROUGH */
case OTG_STATE_B_IDLE:
- if ((!test_bit(ID, &motg->inputs) ||
+ if (test_bit(MHL, &motg->inputs)) {
+ /* allow LPM */
+ pm_runtime_put_noidle(otg->phy->dev);
+ pm_runtime_suspend(otg->phy->dev);
+ } else if ((!test_bit(ID, &motg->inputs) ||
test_bit(ID_A, &motg->inputs)) && otg->host) {
pr_debug("!id || id_A\n");
+ if (msm_chg_mhl_detect(motg)) {
+ work = 1;
+ break;
+ }
clear_bit(B_BUS_REQ, &motg->inputs);
set_bit(A_BUS_REQ, &motg->inputs);
otg->phy->state = OTG_STATE_A_IDLE;
@@ -2760,6 +2893,12 @@
return;
}
+ if (test_bit(MHL, &motg->inputs) ||
+ mhl_det_in_progress) {
+ pr_debug("PMIC: BSV interrupt ignored in MHL\n");
+ return;
+ }
+
if (atomic_read(&motg->pm_suspended))
motg->sm_work_pending = true;
else
@@ -2802,6 +2941,12 @@
{
struct msm_otg *motg = data;
+ if (test_bit(MHL, &motg->inputs) ||
+ mhl_det_in_progress) {
+ pr_debug("PMIC: Id interrupt ignored in MHL\n");
+ return IRQ_HANDLED;
+ }
+
if (!aca_id_turned_on)
/*schedule delayed work for 5msec for ID line state to settle*/
queue_delayed_work(system_nrt_wq, &motg->pmic_id_status_work,
@@ -3372,6 +3517,7 @@
hsusb_vddcx = devm_regulator_get(motg->phy.dev, "HSUSB_VDDCX");
if (IS_ERR(hsusb_vddcx)) {
dev_err(motg->phy.dev, "unable to get hsusb vddcx\n");
+ ret = PTR_ERR(hsusb_vddcx);
goto devote_xo_handle;
}
motg->vdd_type = VDDCX;
@@ -3400,6 +3546,7 @@
"mhl_usb_hs_switch");
if (IS_ERR(mhl_usb_hs_switch)) {
dev_err(&pdev->dev, "Unable to get mhl_usb_hs_switch\n");
+ ret = PTR_ERR(mhl_usb_hs_switch);
goto free_ldo_init;
}
}
@@ -3416,6 +3563,9 @@
/* Ensure that above STOREs are completed before enabling interrupts */
mb();
+ ret = msm_otg_mhl_register_callback(motg, msm_otg_mhl_notify_online);
+ if (ret)
+ dev_dbg(&pdev->dev, "MHL can not be supported\n");
wake_lock_init(&motg->wlock, WAKE_LOCK_SUSPEND, "msm_otg");
msm_otg_init_timer(motg);
INIT_WORK(&motg->sm_work, msm_otg_sm_work);
@@ -3563,6 +3713,7 @@
msm_otg_setup_devices(pdev, motg->pdata->mode, false);
if (motg->pdata->otg_control == OTG_PMIC_CONTROL)
pm8921_charger_unregister_vbus_sn(0);
+ msm_otg_mhl_register_callback(motg, NULL);
msm_otg_debugfs_cleanup();
cancel_delayed_work_sync(&motg->chg_work);
cancel_delayed_work_sync(&motg->pmic_id_status_work);
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index bf30c0b..366df67 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -302,7 +302,7 @@
tty = tty_port_tty_get(&port->port);
if (!tty)
- continue;
+ break;
list_del_init(&urb->urb_list);
@@ -360,7 +360,7 @@
usb_mark_last_busy(port->serial->dev);
- if (!status && urb->actual_length) {
+ if ((status == -ENOENT || !status) && urb->actual_length) {
spin_lock_irqsave(&portdata->in_lock, flags);
list_add_tail(&urb->urb_list, &portdata->in_urb_list);
spin_unlock_irqrestore(&portdata->in_lock, flags);
@@ -759,7 +759,7 @@
b = intfdata->in_flight;
spin_unlock_irq(&intfdata->susp_lock);
- if (b)
+ if (b || pm_runtime_autosuspend_expiration(&serial->dev->dev))
return -EBUSY;
}
diff --git a/drivers/video/msm/Kconfig b/drivers/video/msm/Kconfig
index 7e078ab..54d7090 100644
--- a/drivers/video/msm/Kconfig
+++ b/drivers/video/msm/Kconfig
@@ -405,6 +405,15 @@
select FB_MSM_MIPI_DSI_SIMULATOR
default n
+config FB_MSM_NO_MDP_PIPE_CTRL
+ depends on FB_MSM_OVERLAY
+ bool "Do not use mdp_pipe_ctrl"
+ ---help---
+ Saying 'Y' here obsoletes the mdp_pipe_ctrl function,
+ which was used to control mdp-related clocks. MDP4 vsync-driven
+ screen updates will use a different clock control mechanism if
+ this is selected.
+
config FB_MSM_OVERLAY0_WRITEBACK
depends on FB_MSM_OVERLAY
bool "MDP overlay0 write back mode enable"
diff --git a/drivers/video/msm/Makefile b/drivers/video/msm/Makefile
index a0f9e02..e49e2ba 100644
--- a/drivers/video/msm/Makefile
+++ b/drivers/video/msm/Makefile
@@ -14,7 +14,6 @@
ifeq ($(CONFIG_FB_MSM_MDP40),y)
obj-y += mdp4_util.o
-obj-y += mdp4_hsic.o
else
obj-y += mdp_hw_init.o
obj-y += mdp_ppp.o
diff --git a/drivers/video/msm/external_common.c b/drivers/video/msm/external_common.c
index b6bf47d..46ef7b4 100644
--- a/drivers/video/msm/external_common.c
+++ b/drivers/video/msm/external_common.c
@@ -1858,6 +1858,9 @@
/* EDID_BLOCK_SIZE[0x80] Each page size in the EDID ROM */
uint8 edid_buf[0x80 * 4];
+ external_common_state->pt_scan_info = 0;
+ external_common_state->it_scan_info = 0;
+ external_common_state->ce_scan_info = 0;
external_common_state->preferred_video_format = 0;
external_common_state->present_3d = 0;
memset(&external_common_state->disp_mode_list, 0,
@@ -1974,13 +1977,15 @@
bool hdmi_common_get_video_format_from_drv_data(struct msm_fb_data_type *mfd)
{
- uint32 format;
+ uint32 format = HDMI_VFRMT_1920x1080p60_16_9;
struct fb_var_screeninfo *var = &mfd->fbi->var;
bool changed = TRUE;
- if (var->reserved[2]) {
- format = var->reserved[2]-1;
+ if (var->reserved[3]) {
+ format = var->reserved[3]-1;
DEV_DBG("reserved format is %d\n", format);
+ } else if (hdmi_prim_resolution) {
+ format = hdmi_prim_resolution - 1;
} else {
DEV_DBG("detecting resolution from %dx%d use var->reserved[3]"
" to specify mode", mfd->var_xres, mfd->var_yres);
@@ -1995,15 +2000,33 @@
: HDMI_VFRMT_720x576p50_16_9;
break;
case 1280:
- format = HDMI_VFRMT_1280x720p60_16_9;
+ if (mfd->var_frame_rate == 50000)
+ format = HDMI_VFRMT_1280x720p50_16_9;
+ else
+ format = HDMI_VFRMT_1280x720p60_16_9;
break;
case 1440:
- format = (mfd->var_yres == 480)
+ format = (mfd->var_yres == 240) /* interlaced has half
+ of y res.
+ */
? HDMI_VFRMT_1440x480i60_16_9
: HDMI_VFRMT_1440x576i50_16_9;
break;
case 1920:
- format = HDMI_VFRMT_1920x1080p60_16_9;
+ if (mfd->var_yres == 540) {/* interlaced */
+ format = HDMI_VFRMT_1920x1080i60_16_9;
+ } else if (mfd->var_yres == 1080) {
+ if (mfd->var_frame_rate == 50000)
+ format = HDMI_VFRMT_1920x1080p50_16_9;
+ else if (mfd->var_frame_rate == 24000)
+ format = HDMI_VFRMT_1920x1080p24_16_9;
+ else if (mfd->var_frame_rate == 25000)
+ format = HDMI_VFRMT_1920x1080p25_16_9;
+ else if (mfd->var_frame_rate == 30000)
+ format = HDMI_VFRMT_1920x1080p30_16_9;
+ else
+ format = HDMI_VFRMT_1920x1080p60_16_9;
+ }
break;
}
}
diff --git a/drivers/video/msm/external_common.h b/drivers/video/msm/external_common.h
index 57c0804..43a8794 100644
--- a/drivers/video/msm/external_common.h
+++ b/drivers/video/msm/external_common.h
@@ -112,6 +112,8 @@
#define HDMI_VFRMT_MAX 59
#define HDMI_VFRMT_FORCE_32BIT 0x7FFFFFFF
+extern int ext_resolution;
+
struct hdmi_disp_mode_timing_type {
uint32 video_format;
uint32 active_h;
diff --git a/drivers/video/msm/hdmi_msm.c b/drivers/video/msm/hdmi_msm.c
index 0655daf..26e5687 100644
--- a/drivers/video/msm/hdmi_msm.c
+++ b/drivers/video/msm/hdmi_msm.c
@@ -828,18 +828,22 @@
DEV_INFO("HDMI HPD: QDSP OFF\n");
kobject_uevent_env(external_common_state->uevent_kobj,
KOBJ_CHANGE, envp);
- switch_set_state(&external_common_state->sdev, 0);
- DEV_INFO("Hdmi state switch to %d: %s\n",
- external_common_state->sdev.state, __func__);
if (hpd_state) {
/* Build EDID table */
hdmi_msm_read_edid();
#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT
hdmi_msm_state->reauth = FALSE ;
#endif
- DEV_INFO("HDMI HPD: sense CONNECTED: send ONLINE\n");
+ switch_set_state(&external_common_state->sdev, 1);
+ DEV_INFO("Hdmi state switched to %d: %s\n",
+ external_common_state->sdev.state, __func__);
+
+ DEV_INFO("HDMI HPD: CONNECTED: send ONLINE\n");
kobject_uevent(external_common_state->uevent_kobj,
KOBJ_ONLINE);
+ switch_set_state(&external_common_state->sdev, 1);
+ DEV_INFO("Hdmi state switch to %d: %s\n",
+ external_common_state->sdev.state, __func__);
#ifndef CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT
/* Send Audio for HDMI Compliance Cases*/
envp[0] = "HDCP_STATE=PASS";
@@ -847,18 +851,15 @@
DEV_INFO("HDMI HPD: sense : send HDCP_PASS\n");
kobject_uevent_env(external_common_state->uevent_kobj,
KOBJ_CHANGE, envp);
- switch_set_state(&external_common_state->sdev, 1);
- DEV_INFO("Hdmi state switch to %d: %s\n",
- external_common_state->sdev.state, __func__);
#endif
} else {
- DEV_INFO("HDMI HPD: sense DISCONNECTED: send OFFLINE\n"
- );
+ switch_set_state(&external_common_state->sdev, 0);
+ DEV_INFO("Hdmi state switched to %d: %s\n",
+ external_common_state->sdev.state, __func__);
+
+ DEV_INFO("HDMI HPD: DISCONNECTED: send OFFLINE\n");
kobject_uevent(external_common_state->uevent_kobj,
KOBJ_OFFLINE);
- switch_set_state(&external_common_state->sdev, 0);
- DEV_INFO("Hdmi state switch to %d: %s\n",
- external_common_state->sdev.state, __func__);
}
}
@@ -1084,14 +1085,16 @@
DEV_INFO("HDCP: AUTH_FAIL_INT received, LINK0_STATUS=0x%08x\n",
HDMI_INP_ND(0x011C));
if (hdmi_msm_state->full_auth_done) {
+ switch_set_state(&external_common_state->sdev, 0);
+ DEV_INFO("Hdmi state switched to %d: %s\n",
+ external_common_state->sdev.state, __func__);
+
envp[0] = "HDCP_STATE=FAIL";
envp[1] = NULL;
DEV_INFO("HDMI HPD:QDSP OFF\n");
kobject_uevent_env(external_common_state->uevent_kobj,
KOBJ_CHANGE, envp);
- switch_set_state(&external_common_state->sdev, 0);
- DEV_INFO("Hdmi state switch to %d: %s\n",
- external_common_state->sdev.state, __func__);
+
mutex_lock(&hdcp_auth_state_mutex);
hdmi_msm_state->full_auth_done = FALSE;
mutex_unlock(&hdcp_auth_state_mutex);
@@ -2961,9 +2964,7 @@
char *envp[2];
if (!hdmi_msm_has_hdcp()) {
- switch_set_state(&external_common_state->sdev, 1);
- DEV_INFO("Hdmi state switch to %d: %s\n",
- external_common_state->sdev.state, __func__);
+ DEV_INFO("%s: HDCP NOT ENABLED\n", __func__);
return;
}
@@ -3036,8 +3037,9 @@
kobject_uevent_env(external_common_state->uevent_kobj,
KOBJ_CHANGE, envp);
}
+
switch_set_state(&external_common_state->sdev, 1);
- DEV_INFO("Hdmi state switch to %d: %s\n",
+ DEV_INFO("Hdmi state switched to %d: %s\n",
external_common_state->sdev.state, __func__);
return;
@@ -3059,7 +3061,7 @@
&hdmi_msm_state->hdcp_reauth_work);
}
switch_set_state(&external_common_state->sdev, 0);
- DEV_INFO("Hdmi state switch to %d: %s\n",
+ DEV_INFO("Hdmi state switched to %d: %s\n",
external_common_state->sdev.state, __func__);
}
#endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT */
@@ -4378,7 +4380,7 @@
hdmi_msm_state->hpd_cable_chg_detected = FALSE;
/* QDSP OFF preceding the HPD event notification */
switch_set_state(&external_common_state->sdev, 0);
- DEV_INFO("Hdmi state switch to %d: %s\n",
+ DEV_INFO("Hdmi state switched to %d: %s\n",
external_common_state->sdev.state, __func__);
if (on) {
hdmi_msm_read_edid();
@@ -4386,7 +4388,7 @@
hdmi_msm_state->reauth = FALSE ;
/* Build EDID table */
hdmi_msm_turn_on();
- DEV_INFO("HDMI HPD: sense CONNECTED: send ONLINE\n");
+ DEV_INFO("HDMI HPD: CONNECTED: send ONLINE\n");
kobject_uevent(external_common_state->uevent_kobj,
KOBJ_ONLINE);
hdmi_msm_hdcp_enable();
@@ -4399,16 +4401,15 @@
kobject_uevent_env(external_common_state->uevent_kobj,
KOBJ_CHANGE, envp);
switch_set_state(&external_common_state->sdev, 1);
- DEV_INFO("Hdmi state switch to %d: %s\n",
+ DEV_INFO("Hdmi state switched to %d: %s\n",
external_common_state->sdev.state, __func__);
}
} else {
- DEV_INFO("HDMI HPD: sense DISCONNECTED: send OFFLINE\n"
- );
+ DEV_INFO("HDMI HPD: DISCONNECTED: send OFFLINE\n");
kobject_uevent(external_common_state->uevent_kobj,
KOBJ_OFFLINE);
switch_set_state(&external_common_state->sdev, 0);
- DEV_INFO("Hdmi state switch to %d: %s\n",
+ DEV_INFO("Hdmi state switched to %d: %s\n",
external_common_state->sdev.state, __func__);
}
}
@@ -4739,7 +4740,14 @@
}
external_common_state = &hdmi_msm_state->common;
- external_common_state->video_resolution = HDMI_VFRMT_1920x1080p30_16_9;
+
+ if (hdmi_prim_display && hdmi_prim_resolution)
+ external_common_state->video_resolution =
+ hdmi_prim_resolution - 1;
+ else
+ external_common_state->video_resolution =
+ HDMI_VFRMT_1920x1080p60_16_9;
+
#ifdef CONFIG_FB_MSM_HDMI_3D
external_common_state->switch_3d = hdmi_msm_switch_3d;
#endif
diff --git a/drivers/video/msm/lcdc.c b/drivers/video/msm/lcdc.c
index 863d59d..2170abe 100644
--- a/drivers/video/msm/lcdc.c
+++ b/drivers/video/msm/lcdc.c
@@ -37,6 +37,7 @@
static int lcdc_off(struct platform_device *pdev);
static int lcdc_on(struct platform_device *pdev);
+static void cont_splash_clk_ctrl(int enable);
static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
static int pdev_list_cnt;
@@ -100,6 +101,8 @@
#endif
mfd = platform_get_drvdata(pdev);
+ cont_splash_clk_ctrl(0);
+
if (lcdc_pdata && lcdc_pdata->lcdc_get_clk)
panel_pixclock_freq = lcdc_pdata->lcdc_get_clk();
@@ -151,6 +154,20 @@
return ret;
}
+static void cont_splash_clk_ctrl(int enable)
+{
+ static int cont_splash_clks_enabled;
+ if (enable && !cont_splash_clks_enabled) {
+ clk_prepare_enable(pixel_mdp_clk);
+ clk_prepare_enable(pixel_lcdc_clk);
+ cont_splash_clks_enabled = 1;
+ } else if (!enable && cont_splash_clks_enabled) {
+ clk_disable_unprepare(pixel_mdp_clk);
+ clk_disable_unprepare(pixel_lcdc_clk);
+ cont_splash_clks_enabled = 0;
+ }
+}
+
static int lcdc_probe(struct platform_device *pdev)
{
struct msm_fb_data_type *mfd;
@@ -199,6 +216,8 @@
if (!mdp_dev)
return -ENOMEM;
+ cont_splash_clk_ctrl(1);
+
/*
* link to the latest pdev
*/
diff --git a/drivers/video/msm/lcdc_truly_ips3p2335.c b/drivers/video/msm/lcdc_truly_ips3p2335.c
index a4a370e..b2f4ab8 100644
--- a/drivers/video/msm/lcdc_truly_ips3p2335.c
+++ b/drivers/video/msm/lcdc_truly_ips3p2335.c
@@ -148,6 +148,13 @@
static int lcdc_truly_panel_on(struct platform_device *pdev)
{
+ struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
+
+ if (!mfd->cont_splash_done) {
+ mfd->cont_splash_done = 1;
+ return 0;
+ }
+
/* Configure reset GPIO that drives DAC */
if (lcdc_truly_pdata->panel_config_gpio)
lcdc_truly_pdata->panel_config_gpio(1);
diff --git a/drivers/video/msm/lvds.c b/drivers/video/msm/lvds.c
index f5d8201..2987e2f 100644
--- a/drivers/video/msm/lvds.c
+++ b/drivers/video/msm/lvds.c
@@ -92,11 +92,11 @@
MDP_OUTP(MDP_BASE + 0xc3064, 0x05);
MDP_OUTP(MDP_BASE + 0xc3050, 0x20);
} else {
- MDP_OUTP(MDP_BASE + 0xc3004, 0x62);
+ MDP_OUTP(MDP_BASE + 0xc3004, 0x8f);
MDP_OUTP(MDP_BASE + 0xc3008, 0x30);
- MDP_OUTP(MDP_BASE + 0xc300c, 0xc4);
+ MDP_OUTP(MDP_BASE + 0xc300c, 0xc6);
MDP_OUTP(MDP_BASE + 0xc3014, 0x10);
- MDP_OUTP(MDP_BASE + 0xc3018, 0x05);
+ MDP_OUTP(MDP_BASE + 0xc3018, 0x07);
MDP_OUTP(MDP_BASE + 0xc301c, 0x62);
MDP_OUTP(MDP_BASE + 0xc3020, 0x41);
MDP_OUTP(MDP_BASE + 0xc3024, 0x0d);
diff --git a/drivers/video/msm/lvds_chimei_wxga.c b/drivers/video/msm/lvds_chimei_wxga.c
index 9a385b9..39aa852 100644
--- a/drivers/video/msm/lvds_chimei_wxga.c
+++ b/drivers/video/msm/lvds_chimei_wxga.c
@@ -134,7 +134,7 @@
pinfo->wait_cycle = 0;
pinfo->bpp = 24;
pinfo->fb_num = 2;
- pinfo->clk_rate = 75000000;
+ pinfo->clk_rate = 79400000;
pinfo->bl_max = 255;
pinfo->bl_min = 1;
diff --git a/drivers/video/msm/mdp.c b/drivers/video/msm/mdp.c
index 3c869cc..8e6f347 100644
--- a/drivers/video/msm/mdp.c
+++ b/drivers/video/msm/mdp.c
@@ -52,7 +52,7 @@
int mdp_iommu_split_domain;
static struct platform_device *mdp_init_pdev;
-static struct regulator *footswitch, *hdmi_pll_fs;
+static struct regulator *footswitch;
static unsigned int mdp_footswitch_on;
struct completion mdp_ppp_comp;
@@ -1450,7 +1450,6 @@
#endif
}
-static int mdp_clk_rate;
static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
static int pdev_list_cnt;
@@ -1458,6 +1457,68 @@
{
mdp_pipe_ctrl(MDP_MASTER_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
}
+
+static int mdp_clk_rate;
+
+#ifdef CONFIG_FB_MSM_NO_MDP_PIPE_CTRL
+
+static void mdp_clk_off(void)
+{
+ mb();
+ vsync_clk_disable();
+
+ if (mdp_clk != NULL)
+ clk_disable_unprepare(mdp_clk);
+
+ if (mdp_pclk != NULL)
+ clk_disable_unprepare(mdp_pclk);
+
+ if (mdp_lut_clk != NULL)
+ clk_disable_unprepare(mdp_lut_clk);
+}
+
+
+static void mdp_clk_on(void)
+{
+ if (mdp_clk != NULL)
+ clk_prepare_enable(mdp_clk);
+
+ if (mdp_pclk != NULL)
+ clk_prepare_enable(mdp_pclk);
+
+ if (mdp_lut_clk != NULL)
+ clk_prepare_enable(mdp_lut_clk);
+
+ vsync_clk_enable();
+}
+
+void mdp_clk_ctrl(int on)
+{
+ static int mdp_clk_cnt;
+
+ mutex_lock(&mdp_suspend_mutex);
+ if (on) {
+ if (mdp_clk_cnt == 0)
+ mdp_clk_on();
+ mdp_clk_cnt++;
+ } else {
+ if (mdp_clk_cnt) {
+ mdp_clk_cnt--;
+ if (mdp_clk_cnt == 0)
+ mdp_clk_off();
+ }
+ }
+ mutex_unlock(&mdp_suspend_mutex);
+}
+
+
+
+void mdp_pipe_ctrl(MDP_BLOCK_TYPE block, MDP_BLOCK_POWER_STATE state,
+ boolean isr)
+{
+ /* do nothing */
+}
+#else
void mdp_pipe_ctrl(MDP_BLOCK_TYPE block, MDP_BLOCK_POWER_STATE state,
boolean isr)
{
@@ -1620,6 +1681,12 @@
}
}
+void mdp_clk_ctrl(int on)
+{
+ /* do nothing */
+}
+#endif
+
void mdp_histogram_handle_isr(struct mdp_hist_mgmt *mgmt)
{
uint32 isr, mask;
@@ -1933,6 +2000,15 @@
mdp_histogram_ctrl_all(FALSE);
+ if (mfd->panel.type == MIPI_CMD_PANEL)
+ mdp4_dsi_cmd_off(pdev);
+ else if (mfd->panel.type == MIPI_VIDEO_PANEL)
+ mdp4_dsi_video_off(pdev);
+ else if (mfd->panel.type == HDMI_PANEL ||
+ mfd->panel.type == LCDC_PANEL ||
+ mfd->panel.type == LVDS_PANEL)
+ mdp4_lcdc_off(pdev);
+
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
ret = panel_next_off(pdev);
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
@@ -1942,35 +2018,47 @@
return ret;
}
+#ifdef CONFIG_FB_MSM_MDP303
+unsigned is_mdp4_hw_reset(void)
+{
+ return 0;
+}
+void mdp4_hw_init(void)
+{
+ /* empty */
+}
+#endif
+
static int mdp_on(struct platform_device *pdev)
{
int ret = 0;
-
#ifdef CONFIG_FB_MSM_MDP40
struct msm_fb_data_type *mfd;
mfd = platform_get_drvdata(pdev);
-
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
- if (is_mdp4_hw_reset()) {
+ mdp_clk_ctrl(1);
+ mdp4_hw_init();
+ outpdw(MDP_BASE + 0x0038, mdp4_display_intf);
+ if (mfd->panel.type == MIPI_CMD_PANEL) {
mdp_vsync_cfg_regs(mfd, FALSE);
- mdp4_hw_init();
- outpdw(MDP_BASE + 0x0038, mdp4_display_intf);
- }
+ mdp4_dsi_cmd_on(pdev);
+ } else if (mfd->panel.type == MIPI_VIDEO_PANEL)
+ mdp4_dsi_video_on(pdev);
+ else if (mfd->panel.type == HDMI_PANEL ||
+ mfd->panel.type == LCDC_PANEL ||
+ mfd->panel.type == LVDS_PANEL)
+ mdp4_lcdc_on(pdev);
+
+ mdp_clk_ctrl(0);
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
#endif
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
ret = panel_next_on(pdev);
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
-#ifdef CONFIG_FB_MSM_MDP40
- if (mfd->panel.type == MIPI_CMD_PANEL)
- mdp4_dsi_cmd_overlay_restore();
- else if (mfd->panel.type == MDDI_PANEL)
- mdp4_mddi_overlay_restore();
-#endif
-
mdp_histogram_ctrl_all(TRUE);
return ret;
@@ -2132,27 +2220,10 @@
}
disable_irq(mdp_irq);
- hdmi_pll_fs = regulator_get(&pdev->dev, "hdmi_pll_fs");
- if (IS_ERR(hdmi_pll_fs)) {
- hdmi_pll_fs = NULL;
- } else {
- if (mdp_rev != MDP_REV_44) {
- ret = regulator_set_voltage(hdmi_pll_fs, 1800000,
- 1800000);
- if (ret) {
- pr_err("set_voltage failed for hdmi_pll_fs, ret=%d\n",
- ret);
- }
- }
- }
-
footswitch = regulator_get(&pdev->dev, "vdd");
- if (IS_ERR(footswitch)) {
+ if (IS_ERR(footswitch))
footswitch = NULL;
- } else {
- if (hdmi_pll_fs)
- regulator_enable(hdmi_pll_fs);
-
+ else {
regulator_enable(footswitch);
mdp_footswitch_on = 1;
@@ -2161,8 +2232,6 @@
msleep(20);
regulator_enable(footswitch);
}
- if (hdmi_pll_fs)
- regulator_disable(hdmi_pll_fs);
}
mdp_clk = clk_get(&pdev->dev, "core_clk");
@@ -2255,6 +2324,8 @@
if (rc)
return rc;
+ mdp_clk_ctrl(1);
+
mdp_hw_version();
/* initializing mdp hw */
@@ -2262,12 +2333,15 @@
if (!(mdp_pdata->cont_splash_enabled))
mdp4_hw_init();
#else
- mdp_hw_init();
+ if (!(mdp_pdata->cont_splash_enabled))
+ mdp_hw_init();
#endif
#ifdef CONFIG_FB_MSM_OVERLAY
mdp_hw_cursor_init();
#endif
+ mdp_clk_ctrl(0);
+
mdp_resource_initialized = 1;
return 0;
}
@@ -2298,8 +2372,10 @@
if (mdp_pdata->cont_splash_enabled) {
mfd->cont_splash_done = 0;
if (!contSplash_update_done) {
- mdp_pipe_ctrl(MDP_CMD_BLOCK,
- MDP_BLOCK_POWER_ON, FALSE);
+ if (mfd->panel.type == MIPI_VIDEO_PANEL ||
+ mfd->panel.type == LCDC_PANEL)
+ mdp_pipe_ctrl(MDP_CMD_BLOCK,
+ MDP_BLOCK_POWER_ON, FALSE);
contSplash_update_done = 1;
}
} else
@@ -2342,6 +2418,8 @@
pdata->off = mdp_off;
pdata->next = pdev;
+ mdp_clk_ctrl(1);
+
mdp_prim_panel_type = mfd->panel.type;
switch (mfd->panel.type) {
case EXT_MDDI_PANEL:
@@ -2417,8 +2495,9 @@
#ifdef CONFIG_FB_MSM_MIPI_DSI
case MIPI_VIDEO_PANEL:
#ifndef CONFIG_FB_MSM_MDP303
- pdata->on = mdp4_dsi_video_on;
- pdata->off = mdp4_dsi_video_off;
+ mipi = &mfd->panel_info.mipi;
+ configure_mdp_core_clk_table((mipi->dsi_pclk_rate) * 23 / 20);
+ mdp4_dsi_vsync_init(0);
mfd->hw_refresh = TRUE;
mfd->dma_fnc = mdp4_dsi_video_overlay;
mfd->lut_update = mdp_lut_update_lcdc;
@@ -2461,6 +2540,7 @@
mfd->dma_fnc = mdp4_dsi_cmd_overlay;
mipi = &mfd->panel_info.mipi;
configure_mdp_core_clk_table((mipi->dsi_pclk_rate) * 3 / 2);
+ mdp4_dsi_rdptr_init(0);
if (mfd->panel_info.pdest == DISPLAY_1) {
if_no = PRIMARY_INTF_SEL;
mfd->dma = &dma2_data;
@@ -2481,6 +2561,7 @@
spin_unlock_irqrestore(&mdp_spin_lock, flag);
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
#else
+
mfd->dma_fnc = mdp_dma2_update;
mfd->do_histogram = mdp_do_histogram;
mfd->start_histogram = mdp_histogram_start;
@@ -2501,20 +2582,26 @@
#ifdef CONFIG_FB_MSM_DTV
case DTV_PANEL:
+ mdp4_dtv_vsync_init(0);
pdata->on = mdp4_dtv_on;
pdata->off = mdp4_dtv_off;
mfd->hw_refresh = TRUE;
mfd->cursor_update = mdp_hw_cursor_update;
mfd->dma_fnc = mdp4_dtv_overlay;
mfd->dma = &dma_e_data;
+ mfd->do_histogram = mdp_do_histogram;
+ mfd->start_histogram = mdp_histogram_start;
+ mfd->stop_histogram = mdp_histogram_stop;
mdp4_display_intf_sel(EXTERNAL_INTF_SEL, DTV_INTF);
break;
#endif
case HDMI_PANEL:
case LCDC_PANEL:
case LVDS_PANEL:
+#ifdef CONFIG_FB_MSM_MDP303
pdata->on = mdp_lcdc_on;
pdata->off = mdp_lcdc_off;
+#endif
mfd->hw_refresh = TRUE;
#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_MDP40)
mfd->cursor_update = mdp_hw_cursor_sync_update;
@@ -2534,6 +2621,7 @@
#endif
#ifdef CONFIG_FB_MSM_MDP40
+ mdp4_lcdc_vsync_init(0);
configure_mdp_core_clk_table((mfd->panel_info.clk_rate)
* 23 / 20);
if (mfd->panel.type == HDMI_PANEL) {
@@ -2594,6 +2682,7 @@
default:
printk(KERN_ERR "mdp_probe: unknown device type!\n");
rc = -ENODEV;
+ mdp_clk_ctrl(0);
goto mdp_probe_err;
}
#ifdef CONFIG_FB_MSM_MDP40
@@ -2602,6 +2691,8 @@
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
#endif
+ mdp_clk_ctrl(0);
+
#ifdef CONFIG_MSM_BUS_SCALING
if (!mdp_bus_scale_handle && mdp_pdata &&
mdp_pdata->mdp_bus_scale_table) {
@@ -2655,9 +2746,6 @@
return;
}
- if (hdmi_pll_fs)
- regulator_enable(hdmi_pll_fs);
-
if (on && !mdp_footswitch_on) {
pr_debug("Enable MDP FS\n");
regulator_enable(footswitch);
@@ -2668,9 +2756,6 @@
mdp_footswitch_on = 0;
}
- if (hdmi_pll_fs)
- regulator_disable(hdmi_pll_fs);
-
mutex_unlock(&mdp_suspend_mutex);
}
diff --git a/drivers/video/msm/mdp.h b/drivers/video/msm/mdp.h
index 3fd51ba..2411dca 100644
--- a/drivers/video/msm/mdp.h
+++ b/drivers/video/msm/mdp.h
@@ -88,6 +88,7 @@
extern struct mdp_ccs mdp_ccs_yuv2rgb ;
extern struct mdp_ccs mdp_ccs_rgb2yuv ;
extern unsigned char hdmi_prim_display;
+extern unsigned char hdmi_prim_resolution;
/*
* MDP Image Structure
@@ -276,12 +277,16 @@
#ifdef CONFIG_FB_MSM_MDP40
#define MDP_OVERLAY0_TERM 0x20
#define MDP_OVERLAY1_TERM 0x40
+#define MDP_DMAP_TERM MDP_DMA2_TERM /* dmap == dma2 */
+#define MDP_PRIM_VSYNC_TERM 0x100
+#define MDP_EXTER_VSYNC_TERM 0x200
+#define MDP_PRIM_RDPTR_TERM 0x400
#endif
#define MDP_OVERLAY2_TERM 0x80
-#define MDP_HISTOGRAM_TERM_DMA_P 0x100
-#define MDP_HISTOGRAM_TERM_DMA_S 0x200
-#define MDP_HISTOGRAM_TERM_VG_1 0x400
-#define MDP_HISTOGRAM_TERM_VG_2 0x800
+#define MDP_HISTOGRAM_TERM_DMA_P 0x10000
+#define MDP_HISTOGRAM_TERM_DMA_S 0x20000
+#define MDP_HISTOGRAM_TERM_VG_1 0x40000
+#define MDP_HISTOGRAM_TERM_VG_2 0x80000
#define ACTIVE_START_X_EN BIT(31)
#define ACTIVE_START_Y_EN BIT(31)
@@ -703,6 +708,7 @@
void mdp_hw_init(void);
int mdp_ppp_pipe_wait(void);
void mdp_pipe_kickoff(uint32 term, struct msm_fb_data_type *mfd);
+void mdp_clk_ctrl(int on);
void mdp_pipe_ctrl(MDP_BLOCK_TYPE block, MDP_BLOCK_POWER_STATE state,
boolean isr);
void mdp_set_dma_pan_info(struct fb_info *info, struct mdp_dirty_region *dirty,
@@ -750,6 +756,30 @@
int mdp_dsi_video_off(struct platform_device *pdev);
void mdp_dsi_video_update(struct msm_fb_data_type *mfd);
void mdp3_dsi_cmd_dma_busy_wait(struct msm_fb_data_type *mfd);
+static inline int mdp4_dsi_cmd_off(struct platform_device *pdev)
+{
+ return 0;
+}
+static inline int mdp4_dsi_video_off(struct platform_device *pdev)
+{
+ return 0;
+}
+static inline int mdp4_lcdc_off(struct platform_device *pdev)
+{
+ return 0;
+}
+static inline int mdp4_dsi_cmd_on(struct platform_device *pdev)
+{
+ return 0;
+}
+static inline int mdp4_dsi_video_on(struct platform_device *pdev)
+{
+ return 0;
+}
+static inline int mdp4_lcdc_on(struct platform_device *pdev)
+{
+ return 0;
+}
#endif
void set_cont_splashScreen_status(int);
@@ -784,6 +814,8 @@
#endif
#ifdef MDP_HW_VSYNC
+void vsync_clk_enable(void);
+void vsync_clk_disable(void);
void mdp_hw_vsync_clk_enable(struct msm_fb_data_type *mfd);
void mdp_hw_vsync_clk_disable(struct msm_fb_data_type *mfd);
void mdp_vsync_clk_disable(void);
diff --git a/drivers/video/msm/mdp4.h b/drivers/video/msm/mdp4.h
index c9bdf27..72e7c8f 100644
--- a/drivers/video/msm/mdp4.h
+++ b/drivers/video/msm/mdp4.h
@@ -34,6 +34,7 @@
#define MDP4_VIDEO_BASE 0x20000
#define MDP4_VIDEO_OFF 0x10000
+#define MDP4_VIDEO_CSC_OFF 0x4000
#define MDP4_RGB_BASE 0x40000
#define MDP4_RGB_OFF 0x10000
@@ -120,7 +121,7 @@
#define INTR_PRIMARY_INTF_UDERRUN BIT(8)
#define INTR_EXTERNAL_VSYNC BIT(9)
#define INTR_EXTERNAL_INTF_UDERRUN BIT(10)
-#define INTR_PRIMARY_READ_PTR BIT(11)
+#define INTR_PRIMARY_RDPTR BIT(11) /* read pointer */
#define INTR_DMA_P_HISTOGRAM BIT(17)
#define INTR_DMA_S_HISTOGRAM BIT(26)
#define INTR_OVERLAY2_DONE BIT(30)
@@ -219,6 +220,7 @@
#define MDP4_OP_FLIP_UD BIT(14)
#define MDP4_OP_FLIP_LR BIT(13)
#define MDP4_OP_CSC_EN BIT(11)
+#define MDP4_OP_DST_DATA_YCBCR BIT(10)
#define MDP4_OP_SRC_DATA_YCBCR BIT(9)
#define MDP4_OP_SCALEY_FIR (0 << 4)
#define MDP4_OP_SCALEY_MN_PHASE (1 << 4)
@@ -258,6 +260,14 @@
u8 mark_unmap;
};
+#define IOMMU_FREE_LIST_MAX 32
+
+struct iommu_free_list {
+ int total;
+ int fndx;
+ struct ion_handle *ihdl[IOMMU_FREE_LIST_MAX];
+};
+
struct blend_cfg {
u32 op;
u32 bg_alpha;
@@ -337,20 +347,26 @@
uint32 element2; /* 0 = C0, 1 = C1, 2 = C2, 3 = C3 */
uint32 element1; /* 0 = C0, 1 = C1, 2 = C2, 3 = C3 */
uint32 element0; /* 0 = C0, 1 = C1, 2 = C2, 3 = C3 */
- struct completion comp;
ulong ov_blt_addr; /* blt mode addr */
ulong dma_blt_addr; /* blt mode addr */
ulong blt_base;
ulong blt_offset;
uint32 blt_cnt;
+ uint32 blt_changed;
uint32 ov_cnt;
uint32 dmap_cnt;
uint32 dmae_cnt;
uint32 blt_end;
+ uint32 blt_ov_koff;
+ uint32 blt_ov_done;
+ uint32 blt_dmap_koff;
+ uint32 blt_dmap_done;
uint32 luma_align_size;
- struct mdp4_hsic_regs hsic_regs;
- struct completion dmas_comp;
+ struct mdp_overlay_pp_params pp_cfg;
struct mdp_overlay req_data;
+ struct completion comp;
+ struct completion dmas_comp;
+ struct mdp4_iommu_pipe_info iommu;
};
struct mdp4_statistic {
@@ -366,7 +382,7 @@
ulong intr_vsync_e; /* external interface */
ulong intr_underrun_e; /* external interface */
ulong intr_histogram;
- ulong intr_rd_ptr;
+ ulong intr_rdptr;
ulong dsi_mdp_start;
ulong dsi_clk_on;
ulong dsi_clk_off;
@@ -387,7 +403,13 @@
ulong overlay_set[MDP4_MIXER_MAX];
ulong overlay_unset[MDP4_MIXER_MAX];
ulong overlay_play[MDP4_MIXER_MAX];
+ ulong overlay_commit[MDP4_MIXER_MAX];
ulong pipe[OVERLAY_PIPE_MAX];
+ ulong wait4vsync0;
+ ulong wait4vsync1;
+ ulong iommu_map;
+ ulong iommu_unmap;
+ ulong iommu_drop;
ulong dsi_clkoff;
ulong err_mixer;
ulong err_zorder;
@@ -399,6 +421,12 @@
ulong err_underflow;
};
+struct vsync_update {
+ int update_cnt; /* pipes to be updated */
+ struct completion vsync_comp;
+ struct mdp4_overlay_pipe plist[OVERLAY_PIPE_MAX];
+};
+
struct mdp4_overlay_pipe *mdp4_overlay_ndx2pipe(int ndx);
void mdp4_sw_reset(unsigned long bits);
void mdp4_display_intf_sel(int output, unsigned long intf);
@@ -434,8 +462,10 @@
uint32 mdp4_overlay_format(struct mdp4_overlay_pipe *pipe);
uint32 mdp4_overlay_unpack_pattern(struct mdp4_overlay_pipe *pipe);
uint32 mdp4_overlay_op_mode(struct mdp4_overlay_pipe *pipe);
-void mdp4_lcdc_base_swap(struct mdp4_overlay_pipe *pipe);
+void mdp4_lcdc_base_swap(int cndx, struct mdp4_overlay_pipe *pipe);
void mdp4_lcdc_overlay(struct msm_fb_data_type *mfd);
+
+
#ifdef CONFIG_FB_MSM_DTV
void mdp4_overlay_dtv_start(void);
void mdp4_overlay_dtv_ov_done_push(struct msm_fb_data_type *mfd,
@@ -446,9 +476,10 @@
struct mdp4_overlay_pipe *pipe);
int mdp4_overlay_dtv_unset(struct msm_fb_data_type *mfd,
struct mdp4_overlay_pipe *pipe);
-void mdp4_dma_e_done_dtv(void);
-void mdp4_overlay_dtv_wait4vsync(void);
-void mdp4_dtv_base_swap(struct mdp4_overlay_pipe *pipe);
+void mdp4_dmae_done_dtv(void);
+void mdp4_dtv_wait4vsync(int cndx, long long *vtime);
+void mdp4_dtv_vsync_ctrl(int cndx, int enable);
+void mdp4_dtv_base_swap(int cndx, struct mdp4_overlay_pipe *pipe);
#else
static inline void mdp4_overlay_dtv_start(void)
{
@@ -475,11 +506,15 @@
return 0;
}
-static inline void mdp4_dma_e_done_dtv(void)
+static inline void mdp4_dmae_done_dtv(void)
{
/* empty */
}
-static inline void mdp4_overlay_dtv_wait4vsync(void)
+static inline void mdp4_dtv_wait4vsync(int cndx, long long *vtime)
+{
+ /* empty */
+}
+static inline void mdp4_dtv_vsync_ctrl(int cndx, long long *vtime)
{
/* empty */
}
@@ -495,19 +530,10 @@
{
/* empty */
}
-#endif
+#endif /* CONFIG_FB_MSM_DTV */
void mdp4_dtv_set_black_screen(void);
-static inline int mdp4_overlay_borderfill_supported(void)
-{
- unsigned int mdp_hw_version;
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
- mdp_hw_version = inpdw(MDP_BASE + 0x0); /* MDP_HW_VERSION */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
- return (mdp_hw_version >= 0x0402030b);
-}
-
int mdp4_overlay_dtv_set(struct msm_fb_data_type *mfd,
struct mdp4_overlay_pipe *pipe);
int mdp4_overlay_dtv_unset(struct msm_fb_data_type *mfd,
@@ -520,11 +546,20 @@
int mdp4_atv_off(struct platform_device *pdev);
void mdp4_dsi_video_fxn_register(cmd_fxn_t fxn);
void mdp4_dsi_video_overlay(struct msm_fb_data_type *mfd);
-int mdp4_dsi_video_on(struct platform_device *pdev);
-int mdp4_dsi_video_off(struct platform_device *pdev);
-void mdp4_overlay0_done_dsi_video(struct mdp_dma_data *dma);
-void mdp4_overlay0_done_dsi_cmd(struct mdp_dma_data *dma);
+void mdp4_lcdc_vsync_ctrl(int cndx, int enable);
+void mdp4_overlay0_done_dsi_video(int cndx);
+void mdp4_overlay0_done_dsi_cmd(int cndx);
+void mdp4_primary_rdptr(void);
void mdp4_dsi_cmd_overlay(struct msm_fb_data_type *mfd);
+int mdp4_overlay_commit(struct fb_info *info, int mixer);
+int mdp4_dsi_video_pipe_commit(void);
+int mdp4_dsi_cmd_pipe_commit(void);
+int mdp4_lcdc_pipe_commit(void);
+int mdp4_dtv_pipe_commit(void);
+void mdp4_dsi_rdptr_init(int cndx);
+void mdp4_dsi_vsync_init(int cndx);
+void mdp4_lcdc_vsync_init(int cndx);
+void mdp4_dtv_vsync_init(int cndx);
void mdp4_overlay_dsi_state_set(int state);
int mdp4_overlay_dsi_state_get(void);
void mdp4_overlay_rgb_setup(struct mdp4_overlay_pipe *pipe);
@@ -541,12 +576,20 @@
int mdp4_overlay_format2pipe(struct mdp4_overlay_pipe *pipe);
int mdp4_overlay_get(struct fb_info *info, struct mdp_overlay *req);
int mdp4_overlay_set(struct fb_info *info, struct mdp_overlay *req);
+int mdp4_overlay_wait4vsync(struct fb_info *info, long long *vtime);
+int mdp4_overlay_vsync_ctrl(struct fb_info *info, int enable);
int mdp4_overlay_unset(struct fb_info *info, int ndx);
int mdp4_overlay_unset_mixer(int mixer);
int mdp4_overlay_play_wait(struct fb_info *info,
struct msmfb_overlay_data *req);
int mdp4_overlay_play(struct fb_info *info, struct msmfb_overlay_data *req);
struct mdp4_overlay_pipe *mdp4_overlay_pipe_alloc(int ptype, int mixer);
+void mdp4_overlay_dma_commit(int mixer);
+void mdp4_overlay_vsync_commit(struct mdp4_overlay_pipe *pipe);
+void mdp4_mixer_stage_commit(int mixer);
+void mdp4_dsi_cmd_do_update(int cndx, struct mdp4_overlay_pipe *pipe);
+void mdp4_lcdc_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe);
+void mdp4_dtv_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe);
void mdp4_overlay_pipe_free(struct mdp4_overlay_pipe *pipe);
void mdp4_overlay_dmap_cfg(struct msm_fb_data_type *mfd, int lcdc);
void mdp4_overlay_dmap_xy(struct mdp4_overlay_pipe *pipe);
@@ -554,21 +597,20 @@
void mdp4_overlay_dmae_xy(struct mdp4_overlay_pipe *pipe);
int mdp4_overlay_pipe_staged(int mixer);
void mdp4_lcdc_primary_vsyn(void);
-void mdp4_overlay0_done_lcdc(struct mdp_dma_data *dma);
+void mdp4_overlay0_done_lcdc(int cndx);
void mdp4_overlay0_done_mddi(struct mdp_dma_data *dma);
-void mdp4_dma_s_done_mddi(void);
void mdp4_dma_p_done_mddi(struct mdp_dma_data *dma);
-void mdp4_dma_p_done_dsi(struct mdp_dma_data *dma);
-void mdp4_dma_p_done_dsi_video(struct mdp_dma_data *dma);
-void mdp4_dma_p_done_lcdc(void);
+void mdp4_dmap_done_dsi_cmd(int cndx);
+void mdp4_dmap_done_dsi_video(int cndx);
+void mdp4_dmap_done_lcdc(int cndx);
void mdp4_overlay1_done_dtv(void);
void mdp4_overlay1_done_atv(void);
void mdp4_primary_vsync_lcdc(void);
void mdp4_external_vsync_dtv(void);
-void mdp4_overlay_lcdc_wait4vsync(struct msm_fb_data_type *mfd);
-void mdp4_overlay_lcdc_start(void);
+void mdp4_lcdc_wait4vsync(int cndx, long long *vtime);
void mdp4_overlay_lcdc_vsync_push(struct msm_fb_data_type *mfd,
struct mdp4_overlay_pipe *pipe);
+void mdp4_overlay_dtv_set_perf(struct msm_fb_data_type *mfd);
void mdp4_update_perf_level(u32 perf_level);
void mdp4_set_perf_level(void);
void mdp4_mddi_overlay_dmas_restore(void);
@@ -577,6 +619,11 @@
void mdp4_mddi_dma_busy_wait(struct msm_fb_data_type *mfd);
void mdp4_mddi_overlay_restore(void);
#else
+static inline void mdp4_mddi_kickoff_video(struct msm_fb_data_type *mfd,
+ struct mdp4_overlay_pipe *pipe)
+{
+ /* empty */
+}
static inline void mdp4_mddi_dma_busy_wait(struct msm_fb_data_type *mfd)
{
/* empty */
@@ -636,7 +683,7 @@
struct msmfb_overlay_blt *req);
int mdp4_dsi_video_overlay_blt_offset(struct msm_fb_data_type *mfd,
struct msmfb_overlay_blt *req);
-void mdp4_dsi_video_base_swap(struct mdp4_overlay_pipe *pipe);
+void mdp4_dsi_video_base_swap(int cndx, struct mdp4_overlay_pipe *pipe);
#ifdef CONFIG_FB_MSM_MDP40
static inline void mdp3_dsi_cmd_dma_busy_wait(struct msm_fb_data_type *mfd)
@@ -644,7 +691,7 @@
/* empty */
}
#endif
-#else
+#else /* CONFIG_FB_MSM_MIPI_DSI */
int mdp4_mddi_overlay_blt_offset(struct msm_fb_data_type *mfd,
struct msmfb_overlay_blt *req);
void mdp4_mddi_overlay_blt(struct msm_fb_data_type *mfd,
@@ -684,11 +731,12 @@
{
return -ENODEV;
}
-static inline void mdp4_dsi_video_base_swap(struct mdp4_overlay_pipe *pipe)
+static inline void mdp4_dsi_video_base_swap(int cndx,
+ struct mdp4_overlay_pipe *pipe)
{
/* empty */
}
-#endif
+#endif /* CONFIG_FB_MSM_MIPI_DSI */
void mdp4_lcdc_overlay_blt(struct msm_fb_data_type *mfd,
struct msmfb_overlay_blt *req);
@@ -712,51 +760,87 @@
void mdp4_dsi_cmd_dma_busy_check(void);
+
+
#ifdef CONFIG_FB_MSM_MIPI_DSI
-void mdp4_dsi_cmd_dma_busy_wait(struct msm_fb_data_type *mfd);
-void mdp4_dsi_blt_dmap_busy_wait(struct msm_fb_data_type *mfd);
-void mdp4_overlay_dsi_video_start(void);
-void mdp4_overlay_dsi_video_vsync_push(struct msm_fb_data_type *mfd,
- struct mdp4_overlay_pipe *pipe);
-void mdp4_dsi_cmd_overlay_restore(void);
void mdp_dsi_cmd_overlay_suspend(struct msm_fb_data_type *mfd);
+int mdp4_dsi_cmd_on(struct platform_device *pdev);
+int mdp4_dsi_cmd_off(struct platform_device *pdev);
+int mdp4_dsi_video_off(struct platform_device *pdev);
+int mdp4_dsi_video_on(struct platform_device *pdev);
+void mdp4_primary_vsync_dsi_video(void);
+void mdp4_dsi_cmd_base_swap(int cndx, struct mdp4_overlay_pipe *pipe);
+void mdp4_dsi_cmd_wait4vsync(int cndx, long long *vtime);
+void mdp4_dsi_video_wait4vsync(int cndx, long long *vtime);
+void mdp4_dsi_cmd_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe);
+void mdp4_dsi_video_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe);
+void mdp4_dsi_cmd_vsync_ctrl(int cndx, int enable);
+void mdp4_dsi_video_vsync_ctrl(int cndx, int enable);
#ifdef CONFIG_FB_MSM_MDP303
static inline void mdp4_dsi_cmd_del_timer(void)
{
/* empty */
}
-#else
+#else /* CONFIG_FB_MSM_MIPI_DSI */
void mdp4_dsi_cmd_del_timer(void);
#endif
-#else
-static inline void mdp4_dsi_cmd_dma_busy_wait(struct msm_fb_data_type *mfd)
+#else /* CONFIG_FB_MSM_MIPI_DSI */
+
+static inline int mdp4_dsi_cmd_on(struct platform_device *pdev)
{
- /* empty */
+ return 0;
}
-static inline void mdp4_dsi_blt_dmap_busy_wait(struct msm_fb_data_type *mfd)
+static inline int mdp4_dsi_cmd_off(struct platform_device *pdev)
{
- /* empty */
+ return 0;
}
+static inline int mdp4_dsi_video_on(struct platform_device *pdev)
+{
+ return 0;
+}
+static inline int mdp4_dsi_video_off(struct platform_device *pdev)
+{
+ return 0;
+}
+static inline void mdp4_primary_vsync_dsi_video(void)
+{
+}
+static inline void mdp4_dsi_cmd_base_swap(int cndx,
+ struct mdp4_overlay_pipe *pipe)
+{
+}
+static inline void mdp4_dsi_cmd_wait4vsync(int cndx, long long *vtime)
+{
+}
+static inline void mdp4_dsi_video_wait4vsync(int cndx, long long *vtime)
+{
+}
+static inline void mdp4_dsi_cmd_pipe_queue(int cndx,
+ struct mdp4_overlay_pipe *pipe)
+{
+}
+static inline void mdp4_dsi_video_pipe_queue(int cndx,
+ struct mdp4_overlay_pipe *pipe)
+{
+}
+static inline void mdp4_dsi_cmd_vsync_ctrl(int cndx, int enable)
+{
+}
+static inline void mdp4_dsi_video_vsync_ctrl(int cndx, int enable)
+{
+}
+
static inline void mdp4_overlay_dsi_video_start(void)
{
/* empty */
}
-static inline void mdp4_overlay_dsi_video_vsync_push(
- struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe)
-{
- /* empty */
-}
-static inline void mdp4_dsi_cmd_overlay_restore(void)
-{
- /* empty */
-}
#ifdef CONFIG_FB_MSM_MDP40
static inline void mdp_dsi_cmd_overlay_suspend(struct msm_fb_data_type *mfd)
{
/* empty */
}
#endif
-#endif /* MIPI_DSI */
+#endif /* CONFIG_FB_MSM_MIPI_DSI */
void mdp4_dsi_cmd_kickoff_ui(struct msm_fb_data_type *mfd,
struct mdp4_overlay_pipe *pipe);
@@ -764,8 +848,6 @@
struct mdp4_overlay_pipe *pipe);
void mdp4_dsi_cmd_overlay_kickoff(struct msm_fb_data_type *mfd,
struct mdp4_overlay_pipe *pipe);
-void mdp4_dsi_cmd_base_swap(struct mdp4_overlay_pipe *pipe);
-
void mdp4_overlay_panel_3d(int mixer_num, uint32 panel_3d);
int mdp4_overlay_3d_sbys(struct fb_info *info, struct msmfb_overlay_3d *req);
void mdp4_dsi_cmd_3d_sbys(struct msm_fb_data_type *mfd,
@@ -773,6 +855,9 @@
void mdp4_dsi_video_3d_sbys(struct msm_fb_data_type *mfd,
struct msmfb_overlay_3d *r3d);
+void mdp4_backlight_init(int cndx);
+void mdp4_backlight_put_level(int cndx, int level);
+
int mdp4_mixer_info(int mixer_num, struct mdp_mixer_info *info);
void mdp_dmap_vsync_set(int enable);
@@ -782,11 +867,18 @@
int mdp4_mddi_overlay_cursor(struct fb_info *info, struct fb_cursor *cursor);
int mdp_ppp_blit(struct fb_info *info, struct mdp_blit_req *req);
void mdp4_overlay_resource_release(void);
-void mdp4_overlay_dsi_video_wait4vsync(struct msm_fb_data_type *mfd);
-void mdp4_primary_vsync_dsi_video(void);
uint32_t mdp4_ss_table_value(int8_t param, int8_t index);
void mdp4_overlay_borderfill_stage_down(struct mdp4_overlay_pipe *pipe);
+#ifdef CONFIG_FB_MSM_MDP303
+static inline int mdp4_overlay_borderfill_supported(void)
+{
+ return 0;
+}
+#else
+int mdp4_overlay_borderfill_supported(void);
+#endif
+
int mdp4_overlay_writeback_on(struct platform_device *pdev);
int mdp4_overlay_writeback_off(struct platform_device *pdev);
void mdp4_writeback_overlay(struct msm_fb_data_type *mfd);
@@ -808,7 +900,6 @@
uint32_t mdp_block2base(uint32_t block);
int mdp_hist_lut_config(struct mdp_hist_lut_data *data);
-void mdp4_hsic_set(struct mdp4_overlay_pipe *pipe, struct dpp_ctrl *ctrl);
void mdp4_hsic_update(struct mdp4_overlay_pipe *pipe);
int mdp4_csc_config(struct mdp_csc_cfg_data *config);
void mdp4_csc_write(struct mdp_csc_cfg *data, uint32_t base);
@@ -816,11 +907,16 @@
int mdp4_pcc_cfg(struct mdp_pcc_cfg_data *cfg_ptr);
int mdp4_argc_cfg(struct mdp_pgc_lut_data *pgc_ptr);
int mdp4_qseed_cfg(struct mdp_qseed_cfg_data *cfg);
+int mdp4_qseed_access_cfg(struct mdp_qseed_cfg *cfg, uint32_t base);
u32 mdp4_allocate_writeback_buf(struct msm_fb_data_type *mfd, u32 mix_num);
void mdp4_init_writeback_buf(struct msm_fb_data_type *mfd, u32 mix_num);
void mdp4_free_writeback_buf(struct msm_fb_data_type *mfd, u32 mix_num);
int mdp4_igc_lut_config(struct mdp_igc_lut_data *cfg);
+void mdp4_overlay_iommu_pipe_free(int ndx, int all);
+void mdp4_overlay_iommu_free_list(int mixer, struct ion_handle *ihdl);
+void mdp4_overlay_iommu_unmap_freelist(int mixer);
+void mdp4_overlay_iommu_vsync_cnt(void);
void mdp4_iommu_unmap(struct mdp4_overlay_pipe *pipe);
void mdp4_iommu_attach(void);
int mdp4_v4l2_overlay_set(struct fb_info *info, struct mdp_overlay *req,
diff --git a/drivers/video/msm/mdp4_hsic.c b/drivers/video/msm/mdp4_hsic.c
deleted file mode 100644
index 5735f45..0000000
--- a/drivers/video/msm/mdp4_hsic.c
+++ /dev/null
@@ -1,534 +0,0 @@
-/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/msm_mdp.h>
-#include "mdp.h"
-#include "mdp4.h"
-
-/* Definitions */
-#define MDP4_CSC_MV_OFF 0x4400
-#define MDP4_CSC_PRE_BV_OFF 0x4500
-#define MDP4_CSC_POST_BV_OFF 0x4580
-#define MDP4_CSC_PRE_LV_OFF 0x4600
-#define MDP4_CSC_POST_LV_OFF 0x4680
-#define MDP_VG1_BASE (MDP_BASE + MDP4_VIDEO_BASE)
-
-#define MDP_VG1_CSC_MVn(n) (MDP_VG1_BASE + MDP4_CSC_MV_OFF + 4 * (n))
-#define MDP_VG1_CSC_PRE_LVn(n) (MDP_VG1_BASE + MDP4_CSC_PRE_LV_OFF + 4 * (n))
-#define MDP_VG1_CSC_POST_LVn(n) (MDP_VG1_BASE + MDP4_CSC_POST_LV_OFF + 4 * (n))
-#define MDP_VG1_CSC_PRE_BVn(n) (MDP_VG1_BASE + MDP4_CSC_PRE_BV_OFF + 4 * (n))
-#define MDP_VG1_CSC_POST_BVn(n) (MDP_VG1_BASE + MDP4_CSC_POST_BV_OFF + 4 * (n))
-
-#define Q16 (16)
-#define Q16_ONE (1 << Q16)
-
-#define Q16_VALUE(x) ((int32_t)((uint32_t)x << Q16))
-#define Q16_PERCENT_VALUE(x, n) ((int32_t)( \
- div_s64(((int64_t)x * (int64_t)Q16_ONE), n)))
-
-#define Q16_WHOLE(x) ((int32_t)(x >> 16))
-#define Q16_FRAC(x) ((int32_t)(x & 0xFFFF))
-#define Q16_S1Q16_MUL(x, y) (((x >> 1) * (y >> 1)) >> 14)
-
-#define Q16_MUL(x, y) ((int32_t)((((int64_t)x) * ((int64_t)y)) >> Q16))
-#define Q16_NEGATE(x) (0 - (x))
-
-/*
- * HSIC Control min/max values
- * These settings are based on the maximum/minimum allowed modifications to
- * HSIC controls for layer and display color. Allowing too much variation in
- * the CSC block will result in color clipping resulting in unwanted color
- * shifts.
- */
-#define TRIG_MAX Q16_VALUE(128)
-#define CON_SAT_MAX Q16_VALUE(128)
-#define INTENSITY_MAX (Q16_VALUE(2047) >> 12)
-
-#define HUE_MAX Q16_VALUE(100)
-#define HUE_MIN Q16_VALUE(-100)
-#define HUE_DEF Q16_VALUE(0)
-
-#define SAT_MAX Q16_VALUE(100)
-#define SAT_MIN Q16_VALUE(-100)
-#define SAT_DEF CON_SAT_MAX
-
-#define CON_MAX Q16_VALUE(100)
-#define CON_MIN Q16_VALUE(-100)
-#define CON_DEF CON_SAT_MAX
-
-#define INTEN_MAX Q16_VALUE(100)
-#define INTEN_MIN Q16_VALUE(-100)
-#define INTEN_DEF Q16_VALUE(0)
-
-enum {
- DIRTY,
- GENERATED,
- CLEAN
-};
-
-/* local vars*/
-static int32_t csc_matrix_tab[3][3] = {
- {0x00012a00, 0x00000000, 0x00019880},
- {0x00012a00, 0xffff9b80, 0xffff3000},
- {0x00012a00, 0x00020480, 0x00000000}
-};
-
-static int32_t csc_yuv2rgb_conv_tab[3][3] = {
- {0x00010000, 0x00000000, 0x000123cb},
- {0x00010000, 0xffff9af9, 0xffff6b5e},
- {0x00010000, 0x00020838, 0x00000000}
-};
-
-static int32_t csc_rgb2yuv_conv_tab[3][3] = {
- {0x00004c8b, 0x00009645, 0x00001d2f},
- {0xffffda56, 0xffffb60e, 0x00006f9d},
- {0x00009d70, 0xffff7c2a, 0xffffe666}
-};
-
-static uint32_t csc_pre_bv_tab[3] = {0xfffff800, 0xffffc000, 0xffffc000};
-static uint32_t csc_post_bv_tab[3] = {0x00000000, 0x00000000, 0x00000000};
-
-static uint32_t csc_pre_lv_tab[6] = {0x00000000, 0x00007f80, 0x00000000,
- 0x00007f80, 0x00000000, 0x00007f80};
-static uint32_t csc_post_lv_tab[6] = {0x00000000, 0x00007f80, 0x00000000,
- 0x00007f80, 0x00000000, 0x00007f80};
-
-/* Lookup table for Sin/Cos lookup - Q16*/
-static const int32_t trig_lut[65] = {
- 0x00000000, /* sin((2*M_PI/256) * 0x00);*/
- 0x00000648, /* sin((2*M_PI/256) * 0x01);*/
- 0x00000C90, /* sin((2*M_PI/256) * 0x02);*/
- 0x000012D5,
- 0x00001918,
- 0x00001F56,
- 0x00002590,
- 0x00002BC4,
- 0x000031F1,
- 0x00003817,
- 0x00003E34,
- 0x00004447,
- 0x00004A50,
- 0x0000504D,
- 0x0000563E,
- 0x00005C22,
- 0x000061F8,
- 0x000067BE,
- 0x00006D74,
- 0x0000731A,
- 0x000078AD,
- 0x00007E2F,
- 0x0000839C,
- 0x000088F6,
- 0x00008E3A,
- 0x00009368,
- 0x00009880,
- 0x00009D80,
- 0x0000A268,
- 0x0000A736,
- 0x0000ABEB,
- 0x0000B086,
- 0x0000B505,
- 0x0000B968,
- 0x0000BDAF,
- 0x0000C1D8,
- 0x0000C5E4,
- 0x0000C9D1,
- 0x0000CD9F,
- 0x0000D14D,
- 0x0000D4DB,
- 0x0000D848,
- 0x0000DB94,
- 0x0000DEBE,
- 0x0000E1C6,
- 0x0000E4AA,
- 0x0000E768,
- 0x0000EA0A,
- 0x0000EC83,
- 0x0000EED9,
- 0x0000F109,
- 0x0000F314,
- 0x0000F4FA,
- 0x0000F6BA,
- 0x0000F854,
- 0x0000F9C8,
- 0x0000FB15,
- 0x0000FC3B,
- 0x0000FD3B,
- 0x0000FE13,
- 0x0000FEC4,
- 0x0000FF4E,
- 0x0000FFB1,
- 0x0000FFEC,
- 0x00010000, /* sin((2*M_PI/256) * 0x40);*/
-};
-
-void trig_values_q16(int32_t deg, int32_t *cos, int32_t *sin)
-{
- int32_t angle;
- int32_t quad, anglei, anglef;
- int32_t v0 = 0, v1 = 0;
- int32_t t1, t2;
-
- /*
- * Scale the angle so that 256 is one complete revolution and mask it
- * to this domain
- * NOTE: 0xB60B == 256/360
- */
- angle = Q16_MUL(deg, 0xB60B) & 0x00FFFFFF;
-
- /* Obtain a quadrant number, integer, and fractional part */
- quad = angle >> 22;
- anglei = (angle >> 16) & 0x3F;
- anglef = angle & 0xFFFF;
-
- /*
- * Using the integer part, obtain the lookup table entry and its
- * complement. Using the quadrant, swap and negate these as
- * necessary.
- * (The values and all derivatives of sine and cosine functions
- * can be derived from these values)
- */
- switch (quad) {
- case 0x0:
- v0 += trig_lut[anglei];
- v1 += trig_lut[0x40-anglei];
- break;
-
- case 0x1:
- v0 += trig_lut[0x40-anglei];
- v1 -= trig_lut[anglei];
- break;
-
- case 0x2:
- v0 -= trig_lut[anglei];
- v1 -= trig_lut[0x40-anglei];
- break;
-
- case 0x3:
- v0 -= trig_lut[0x40-anglei];
- v1 += trig_lut[anglei];
- break;
- }
-
- /*
- * Multiply the fractional part by 2*PI/256 to move it from lookup
- * table units to radians, giving us the coefficient for first
- * derivatives.
- */
- t1 = Q16_S1Q16_MUL(anglef, 0x0648);
-
- /*
- * Square this and divide by 2 to get the coefficient for second
- * derivatives
- */
- t2 = Q16_S1Q16_MUL(t1, t1) >> 1;
-
- *sin = v0 + Q16_S1Q16_MUL(v1, t1) - Q16_S1Q16_MUL(v0, t2);
-
- *cos = v1 - Q16_S1Q16_MUL(v0, t1) - Q16_S1Q16_MUL(v1, t2);
-}
-
-/* Convert input Q16 value to s4.9 */
-int16_t convert_q16_s49(int32_t q16Value)
-{ /* Top half is the whole number, Bottom half is fractional portion*/
- int16_t whole = Q16_WHOLE(q16Value);
- int32_t fraction = Q16_FRAC(q16Value);
-
- /* Clamp whole to 3 bits */
- if (whole > 7)
- whole = 7;
- else if (whole < -7)
- whole = -7;
-
- /* Reduce fraction to 9 bits. */
- fraction = (fraction<<9)>>Q16;
-
- return (int16_t) ((int16_t)whole<<9) | ((int16_t)fraction);
-}
-
-/* Convert input Q16 value to uint16 */
-int16_t convert_q16_int16(int32_t val)
-{
- int32_t rounded;
-
- if (val >= 0) {
- /* Add 0.5 */
- rounded = val + (Q16_ONE>>1);
- } else {
- /* Subtract 0.5 */
- rounded = val - (Q16_ONE>>1);
- }
-
- /* Truncate rounded value */
- return (int16_t)(rounded>>Q16);
-}
-
-/*
- * norm_q16
- * Return a Q16 value represeting a normalized value
- *
- * value -100% 0% +100%
- * |-----------------|----------------|
- * ^ ^ ^
- * q16MinValue q16DefaultValue q16MaxValue
- *
- */
-int32_t norm_q16(int32_t value, int32_t min, int32_t default_val, int32_t max,
- int32_t range)
-{
- int32_t diff, perc, mul, result;
-
- if (0 == value) {
- result = default_val;
- } else if (value > 0) {
- /* value is between 0% and +100% represent 1.0 -> QRange Max */
- diff = range;
- perc = Q16_PERCENT_VALUE(value, max);
- mul = Q16_MUL(perc, diff);
- result = default_val + mul;
- } else {
- /* if (value <= 0) */
- diff = -range;
- perc = Q16_PERCENT_VALUE(-value, -min);
- mul = Q16_MUL(perc, diff);
- result = default_val + mul;
- }
- return result;
-}
-
-void matrix_mul_3x3(int32_t dest[][3], int32_t a[][3], int32_t b[][3])
-{
- int32_t i, j, k;
- int32_t tmp[3][3];
-
- for (i = 0; i < 3; i++) {
- for (j = 0; j < 3; j++) {
- tmp[i][j] = 0;
- for (k = 0; k < 3; k++)
- tmp[i][j] += Q16_MUL(a[i][k], b[k][j]);
- }
- }
-
- /* in case dest = a or b*/
- for (i = 0; i < 3; i++) {
- for (j = 0; j < 3; j++)
- dest[i][j] = tmp[i][j];
- }
-}
-
-#define CONVERT(x) (x)/*convert_q16_s49((x))*/
-void pr_params(struct mdp4_hsic_regs *regs)
-{
- int i;
- if (regs) {
- for (i = 0; i < NUM_HSIC_PARAM; i++) {
- pr_info("\t: hsic->params[%d] = 0x%08x [raw = 0x%08x]\n",
- i, CONVERT(regs->params[i]), regs->params[i]);
- }
- }
-}
-
-void pr_3x3_matrix(int32_t in[][3])
-{
- pr_info("\t[0x%08x\t0x%08x\t0x%08x]\n", CONVERT(in[0][0]),
- CONVERT(in[0][1]), CONVERT(in[0][2]));
- pr_info("\t[0x%08x\t0x%08x\t0x%08x]\n", CONVERT(in[1][0]),
- CONVERT(in[1][1]), CONVERT(in[1][2]));
- pr_info("\t[0x%08x\t0x%08x\t0x%08x]\n", CONVERT(in[2][0]),
- CONVERT(in[2][1]), CONVERT(in[2][2]));
-}
-
-void _hsic_get(struct mdp4_hsic_regs *regs, int32_t type, int8_t *val)
-{
- if (type < 0 || type >= NUM_HSIC_PARAM)
- BUG_ON(-EINVAL);
- *val = regs->params[type];
- pr_info("%s: getting params[%d] = %d\n", __func__, type, *val);
-}
-
-void _hsic_set(struct mdp4_hsic_regs *regs, int32_t type, int8_t val)
-{
- if (type < 0 || type >= NUM_HSIC_PARAM)
- BUG_ON(-EINVAL);
-
- if (regs->params[type] != Q16_VALUE(val)) {
- regs->params[type] = Q16_VALUE(val);
- regs->dirty = DIRTY;
- }
-}
-
-void _hsic_generate_csc_matrix(struct mdp4_overlay_pipe *pipe)
-{
- int i, j;
- int32_t sin, cos;
-
- int32_t hue_matrix[3][3];
- int32_t con_sat_matrix[3][3];
- struct mdp4_hsic_regs *regs = &(pipe->hsic_regs);
-
- memset(con_sat_matrix, 0x0, sizeof(con_sat_matrix));
- memset(hue_matrix, 0x0, sizeof(hue_matrix));
-
- /*
- * HSIC control require matrix multiplication of these two tables
- * [T 0 0][1 0 0] T = Contrast C=Cos(Hue)
- * [0 S 0][0 C -N] S = Saturation N=Sin(Hue)
- * [0 0 S][0 N C]
- */
-
- con_sat_matrix[0][0] = norm_q16(regs->params[HSIC_CON], CON_MIN,
- CON_DEF, CON_MAX, CON_SAT_MAX);
- con_sat_matrix[1][1] = norm_q16(regs->params[HSIC_SAT], SAT_MIN,
- SAT_DEF, SAT_MAX, CON_SAT_MAX);
- con_sat_matrix[2][2] = con_sat_matrix[1][1];
-
- hue_matrix[0][0] = TRIG_MAX;
-
- trig_values_q16(norm_q16(regs->params[HSIC_HUE], HUE_MIN, HUE_DEF,
- HUE_MAX, TRIG_MAX), &cos, &sin);
-
- cos = Q16_MUL(cos, TRIG_MAX);
- sin = Q16_MUL(sin, TRIG_MAX);
-
- hue_matrix[1][1] = cos;
- hue_matrix[2][2] = cos;
- hue_matrix[2][1] = sin;
- hue_matrix[1][2] = Q16_NEGATE(sin);
-
- /* Generate YUV CSC matrix */
- matrix_mul_3x3(regs->conv_matrix, con_sat_matrix, hue_matrix);
-
- if (!(pipe->op_mode & MDP4_OP_SRC_DATA_YCBCR)) {
- /* Convert input RGB to YUV then apply CSC matrix */
- pr_info("Pipe %d, has RGB input\n", pipe->pipe_num);
- matrix_mul_3x3(regs->conv_matrix, regs->conv_matrix,
- csc_rgb2yuv_conv_tab);
- }
-
- /* Normalize the matrix */
- for (i = 0; i < 3; i++) {
- for (j = 0; j < 3; j++)
- regs->conv_matrix[i][j] = (regs->conv_matrix[i][j]>>14);
- }
-
- /* Multiply above result by current csc table */
- matrix_mul_3x3(regs->conv_matrix, regs->conv_matrix, csc_matrix_tab);
-
- if (!(pipe->op_mode & MDP4_OP_SRC_DATA_YCBCR)) {
- /*HACK:only "works"for src side*/
- /* Convert back to RGB */
- pr_info("Pipe %d, has RGB output\n", pipe->pipe_num);
- matrix_mul_3x3(regs->conv_matrix, csc_yuv2rgb_conv_tab,
- regs->conv_matrix);
- }
-
- /* Update clamps pre and post. */
- /* TODO: different tables for different color formats? */
- for (i = 0; i < 6; i++) {
- regs->pre_limit[i] = csc_pre_lv_tab[i];
- regs->post_limit[i] = csc_post_lv_tab[i];
- }
-
- /* update bias values, pre and post */
- for (i = 0; i < 3; i++) {
- regs->pre_bias[i] = csc_pre_bv_tab[i];
- regs->post_bias[i] = csc_post_bv_tab[i] +
- norm_q16(regs->params[HSIC_INT],
- INTEN_MIN, INTEN_DEF, INTEN_MAX, INTENSITY_MAX);
- }
-
- regs->dirty = GENERATED;
-}
-
-void _hsic_update_mdp(struct mdp4_overlay_pipe *pipe)
-{
- struct mdp4_hsic_regs *regs = &(pipe->hsic_regs);
- int i, j, k;
-
- uint32_t *csc_mv;
- uint32_t *pre_lv;
- uint32_t *post_lv;
- uint32_t *pre_bv;
- uint32_t *post_bv;
-
- switch (pipe->pipe_num) {
- case OVERLAY_PIPE_VG2:
- csc_mv = (uint32_t *) (MDP_VG1_CSC_MVn(0) +
- MDP4_VIDEO_OFF);
- pre_lv = (uint32_t *) (MDP_VG1_CSC_PRE_LVn(0) +
- MDP4_VIDEO_OFF);
- post_lv = (uint32_t *) (MDP_VG1_CSC_POST_LVn(0) +
- MDP4_VIDEO_OFF);
- pre_bv = (uint32_t *) (MDP_VG1_CSC_PRE_BVn(0) +
- MDP4_VIDEO_OFF);
- post_bv = (uint32_t *) (MDP_VG1_CSC_POST_BVn(0) +
- MDP4_VIDEO_OFF);
- break;
- case OVERLAY_PIPE_VG1:
- default:
- csc_mv = (uint32_t *) MDP_VG1_CSC_MVn(0);
- pre_lv = (uint32_t *) MDP_VG1_CSC_PRE_LVn(0);
- post_lv = (uint32_t *) MDP_VG1_CSC_POST_LVn(0);
- pre_bv = (uint32_t *) MDP_VG1_CSC_PRE_BVn(0);
- post_bv = (uint32_t *) MDP_VG1_CSC_POST_BVn(0);
- break;
- }
-
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-
- for (i = 0; i < 3; i++) {
- for (j = 0; j < 3; j++) {
- k = (3*i) + j;
- MDP_OUTP(csc_mv + k, convert_q16_s49(
- regs->conv_matrix[i][j]));
- }
- }
-
- for (i = 0; i < 6; i++) {
- MDP_OUTP(pre_lv + i, convert_q16_s49(regs->pre_limit[i]));
- MDP_OUTP(post_lv + i, convert_q16_s49(regs->post_limit[i]));
- }
-
- for (i = 0; i < 3; i++) {
- MDP_OUTP(pre_bv + i, convert_q16_s49(regs->pre_bias[i]));
- MDP_OUTP(post_bv + i, convert_q16_s49(regs->post_bias[i]));
- }
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
-
- regs->dirty = CLEAN;
-}
-
-void mdp4_hsic_get(struct mdp4_overlay_pipe *pipe, struct dpp_ctrl *ctrl)
-{
- int i;
- for (i = 0; i < NUM_HSIC_PARAM; i++)
- _hsic_get(&(pipe->hsic_regs), i, &(ctrl->hsic_params[i]));
-}
-
-void mdp4_hsic_set(struct mdp4_overlay_pipe *pipe, struct dpp_ctrl *ctrl)
-{
- int i;
- for (i = 0; i < NUM_HSIC_PARAM; i++)
- _hsic_set(&(pipe->hsic_regs), i, ctrl->hsic_params[i]);
-
- if (pipe->hsic_regs.dirty == DIRTY)
- _hsic_generate_csc_matrix(pipe);
-}
-
-void mdp4_hsic_update(struct mdp4_overlay_pipe *pipe)
-{
- if (pipe->hsic_regs.dirty == GENERATED)
- _hsic_update_mdp(pipe);
-}
diff --git a/drivers/video/msm/mdp4_overlay.c b/drivers/video/msm/mdp4_overlay.c
index d045e69..413b239 100644
--- a/drivers/video/msm/mdp4_overlay.c
+++ b/drivers/video/msm/mdp4_overlay.c
@@ -50,6 +50,7 @@
struct blend_cfg blend[MDP4_MIXER_MAX][MDP4_MIXER_STAGE_MAX];
uint32 mixer_cfg[MDP4_MIXER_MAX];
uint32 flush[MDP4_MIXER_MAX];
+ struct iommu_free_list iommu_free[MDP4_MIXER_MAX];
uint32 cs_controller;
uint32 hw_version;
uint32 panel_3d;
@@ -101,17 +102,104 @@
},
};
+static DEFINE_MUTEX(iommu_mutex);
static struct mdp4_overlay_ctrl *ctrl = &mdp4_overlay_db;
static int new_perf_level;
static struct ion_client *display_iclient;
-static struct mdp4_iommu_pipe_info mdp_iommu[MDP4_MIXER_MAX][OVERLAY_PIPE_MAX];
+
+
+/*
+ * mdp4_overlay_iommu_unmap_freelist()
+ * mdp4_overlay_iommu_2freelist()
+ * mdp4_overlay_iommu_pipe_free()
+ * above three functiosns need to be called from same thread and
+ * in order so that no mutex are needed.
+ */
+void mdp4_overlay_iommu_unmap_freelist(int mixer)
+{
+ int i;
+ struct ion_handle *ihdl;
+ struct iommu_free_list *flist;
+
+ mutex_lock(&iommu_mutex);
+ flist = &ctrl->iommu_free[mixer];
+ if (flist->total == 0) {
+ mutex_unlock(&iommu_mutex);
+ return;
+ }
+ for (i = 0; i < IOMMU_FREE_LIST_MAX; i++) {
+ ihdl = flist->ihdl[i];
+ if (ihdl == NULL)
+ continue;
+ pr_debug("%s: mixer=%d i=%d ihdl=0x%p\n", __func__,
+ mixer, i, ihdl);
+ ion_unmap_iommu(display_iclient, ihdl, DISPLAY_READ_DOMAIN,
+ GEN_POOL);
+ mdp4_stat.iommu_unmap++;
+ ion_free(display_iclient, ihdl);
+ flist->ihdl[i] = NULL;
+ }
+
+ flist->fndx = 0;
+ flist->total = 0;
+ mutex_unlock(&iommu_mutex);
+}
+
+void mdp4_overlay_iommu_2freelist(int mixer, struct ion_handle *ihdl)
+{
+ struct iommu_free_list *flist;
+
+ flist = &ctrl->iommu_free[mixer];
+ if (flist->fndx >= IOMMU_FREE_LIST_MAX) {
+ pr_err("%s: Error, mixer=%d iommu fndx=%d\n",
+ __func__, mixer, flist->fndx);
+ mdp4_stat.iommu_drop++;
+ mutex_unlock(&iommu_mutex);
+ return;
+ }
+
+ pr_debug("%s: add mixer=%d fndx=%d ihdl=0x%p\n", __func__,
+ mixer, flist->fndx, ihdl);
+
+ flist->total++;
+ flist->ihdl[flist->fndx++] = ihdl;
+}
+
+void mdp4_overlay_iommu_pipe_free(int ndx, int all)
+{
+ struct mdp4_overlay_pipe *pipe;
+ struct mdp4_iommu_pipe_info *iom;
+ int plane, mixer;
+
+ pipe = mdp4_overlay_ndx2pipe(ndx);
+ if (pipe == NULL)
+ return;
+
+ mutex_lock(&iommu_mutex);
+ mixer = pipe->mixer_num;
+ iom = &pipe->iommu;
+ pr_debug("%s: mixer=%d ndx=%d all=%d\n", __func__,
+ mixer, pipe->pipe_ndx, all);
+ for (plane = 0; plane < MDP4_MAX_PLANE; plane++) {
+ if (iom->prev_ihdl[plane]) {
+ mdp4_overlay_iommu_2freelist(mixer,
+ iom->prev_ihdl[plane]);
+ iom->prev_ihdl[plane] = NULL;
+ }
+ if (all && iom->ihdl[plane]) {
+ mdp4_overlay_iommu_2freelist(mixer, iom->ihdl[plane]);
+ iom->ihdl[plane] = NULL;
+ }
+ }
+ mutex_unlock(&iommu_mutex);
+}
int mdp4_overlay_iommu_map_buf(int mem_id,
struct mdp4_overlay_pipe *pipe, unsigned int plane,
unsigned long *start, unsigned long *len,
struct ion_handle **srcp_ihdl)
{
- struct mdp4_iommu_pipe_info *iom_pipe_info;
+ struct mdp4_iommu_pipe_info *iom;
if (!display_iclient)
return -EINVAL;
@@ -133,30 +221,21 @@
return -EINVAL;
}
- iom_pipe_info = &mdp_iommu[pipe->mixer_num][pipe->pipe_ndx - 1];
- if (!iom_pipe_info->ihdl[plane]) {
- iom_pipe_info->ihdl[plane] = *srcp_ihdl;
- } else {
- if (iom_pipe_info->prev_ihdl[plane]) {
- ion_unmap_iommu(display_iclient,
- iom_pipe_info->prev_ihdl[plane],
- DISPLAY_READ_DOMAIN, GEN_POOL);
- ion_free(display_iclient,
- iom_pipe_info->prev_ihdl[plane]);
- pr_debug("Previous: mixer %u, pipe %u, plane %u, "
- "prev_ihdl %p\n", pipe->mixer_num,
- pipe->pipe_ndx, plane,
- iom_pipe_info->prev_ihdl[plane]);
- }
+ mutex_lock(&iommu_mutex);
+ mdp4_stat.iommu_map++;
+ iom = &pipe->iommu;
+ iom->prev_ihdl[plane] = iom->ihdl[plane];
+ iom->ihdl[plane] = *srcp_ihdl;
- iom_pipe_info->prev_ihdl[plane] = iom_pipe_info->ihdl[plane];
- iom_pipe_info->ihdl[plane] = *srcp_ihdl;
- }
- pr_debug("mem_id %d, start 0x%lx, len 0x%lx\n",
- mem_id, *start, *len);
+ pr_debug("%s: ndx=%d plane=%d prev=0x%p cur=0x%p start=0x%lx len=%lx\n",
+ __func__, pipe->pipe_ndx, plane, iom->prev_ihdl[plane],
+ iom->ihdl[plane], *start, *len);
+ mutex_unlock(&iommu_mutex);
return 0;
}
+static struct mdp4_iommu_pipe_info mdp_iommu[MDP4_MIXER_MAX][OVERLAY_PIPE_MAX];
+
void mdp4_iommu_unmap(struct mdp4_overlay_pipe *pipe)
{
struct mdp4_iommu_pipe_info *iom_pipe_info;
@@ -183,9 +262,7 @@
if (iom_pipe_info->mark_unmap) {
if (iom_pipe_info->ihdl[i]) {
- if (pipe->mixer_num == MDP4_MIXER1)
- mdp4_overlay_dtv_wait4vsync();
- pr_debug("%s(): mixer %u, pipe %u, plane %u, "
+ pr_debug("%s(): MARK, mixer %u, pipe %u, plane %u, "
"ihdl %p\n", __func__,
pipe->mixer_num, j + 1, i,
iom_pipe_info->ihdl[i]);
@@ -246,6 +323,11 @@
}
}
+int mdp4_overlay_borderfill_supported(void)
+{
+ return (ctrl->hw_version >= 0x0402030b);
+}
+
void mdp4_overlay_dmae_cfg(struct msm_fb_data_type *mfd, int atv)
{
uint32 dmae_cfg_reg;
@@ -421,7 +503,7 @@
{
uint32 off, bpp;
- if (mdp_is_in_isr == FALSE)
+ if (!in_interrupt())
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
if (pipe->dma_blt_addr) {
@@ -447,7 +529,7 @@
/* dma_p dest */
MDP_OUTP(MDP_BASE + 0x90010, (pipe->dst_y << 16 | pipe->dst_x));
- if (mdp_is_in_isr == FALSE)
+ if (!in_interrupt())
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
}
@@ -684,7 +766,7 @@
char *vg_base;
uint32 frame_size, src_size, src_xy, dst_size, dst_xy;
uint32 format, pattern, luma_offset, chroma_offset;
- uint32 mask, curr, addr;
+ uint32 mask;
int pnum, ptype;
pnum = pipe->pipe_num - OVERLAY_PIPE_VG1; /* start from 0 */
@@ -701,10 +783,29 @@
format = mdp4_overlay_format(pipe);
pattern = mdp4_overlay_unpack_pattern(pipe);
+ /* CSC Post Processing enabled? */
+ if (pipe->flags & MDP_OVERLAY_PP_CFG_EN) {
+ if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_CSC_CFG) {
+ if (pipe->pp_cfg.csc_cfg.flags & MDP_CSC_FLAG_ENABLE)
+ pipe->op_mode |= MDP4_OP_CSC_EN;
+ if (pipe->pp_cfg.csc_cfg.flags & MDP_CSC_FLAG_YUV_IN)
+ pipe->op_mode |= MDP4_OP_SRC_DATA_YCBCR;
+ if (pipe->pp_cfg.csc_cfg.flags & MDP_CSC_FLAG_YUV_OUT)
+ pipe->op_mode |= MDP4_OP_DST_DATA_YCBCR;
+
+ mdp4_csc_write(&pipe->pp_cfg.csc_cfg,
+ (uint32_t) (vg_base + MDP4_VIDEO_CSC_OFF));
+ }
+ if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_QSEED_CFG) {
+ mdp4_qseed_access_cfg(&pipe->pp_cfg.qseed_cfg[0],
+ (uint32_t) vg_base);
+ mdp4_qseed_access_cfg(&pipe->pp_cfg.qseed_cfg[1],
+ (uint32_t) vg_base);
+ }
+ }
/* not RGB use VG pipe, pure VG pipe */
- pipe->op_mode |= MDP4_OP_CSC_EN;
if (ptype != OVERLAY_TYPE_RGB)
- pipe->op_mode |= MDP4_OP_SRC_DATA_YCBCR;
+ pipe->op_mode |= (MDP4_OP_CSC_EN | MDP4_OP_SRC_DATA_YCBCR);
#ifdef MDP4_IGC_LUT_ENABLE
pipe->op_mode |= MDP4_OP_IGC_LUT_EN;
@@ -748,24 +849,6 @@
&chroma_offset);
}
- /* Ensure proper covert matrix loaded when color space swaps */
- curr = inpdw(vg_base + 0x0058);
- mask = 0x600;
-
- if ((curr & mask) != (pipe->op_mode & mask)) {
- addr = ((uint32_t)vg_base) + 0x4000;
- if (ptype != OVERLAY_TYPE_RGB)
- mdp4_csc_write(&(mdp_csc_convert[1]), addr);
- else
- mdp4_csc_write(&(mdp_csc_convert[0]), addr);
-
- mask = 0xFFFCFFFF;
- } else {
- /* Don't touch bits you don't want to configure*/
- mask = 0xFFFCF1FF;
- }
- pipe->op_mode = (pipe->op_mode & mask) | (curr & ~mask);
-
/* luma component plane */
outpdw(vg_base + 0x0010, pipe->srcp0_addr + luma_offset);
@@ -798,15 +881,6 @@
pipe->r_bit << 4 | pipe->b_bit << 2 | pipe->g_bit);
}
- if (pipe->flags & MDP_SHARPENING) {
- outpdw(vg_base + 0x8200,
- mdp4_ss_table_value(pipe->req_data.dpp.sharp_strength,
- 0));
- outpdw(vg_base + 0x8204,
- mdp4_ss_table_value(pipe->req_data.dpp.sharp_strength,
- 1));
- }
-
if (mdp_rev > MDP_REV_41) {
/* mdp chip select controller */
mask = 0;
@@ -1315,7 +1389,7 @@
} else
overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
- if (mdp_is_in_isr == FALSE)
+ if (!in_interrupt())
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
/*
@@ -1410,7 +1484,7 @@
outpdw(overlay_base + 0x0014, curr | 0x4); /* GC_LUT_EN, 888 */
#endif
- if (mdp_is_in_isr == FALSE)
+ if (!in_interrupt())
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
}
@@ -1473,7 +1547,7 @@
return cnt;
}
-static void mdp4_mixer_stage_commit(int mixer)
+void mdp4_mixer_stage_commit(int mixer)
{
struct mdp4_overlay_pipe *pipe;
int i, num;
@@ -1537,20 +1611,13 @@
for (i = MDP4_MIXER_STAGE_BASE; i < MDP4_MIXER_STAGE_MAX; i++) {
pp = ctrl->stage[mixer][i];
- if (pp == pipe) {
+ if (pp && pp->pipe_ndx == pipe->pipe_ndx) {
ctrl->stage[mixer][i] = NULL;
break;
}
}
ctrl->stage[mixer][pipe->mixer_stage] = pipe; /* keep it */
-
- if (!(pipe->flags & MDP_OV_PLAY_NOWAIT)) {
- pr_debug("%s: mixer=%d ndx=%d stage=%d flags=%x\n",
- __func__, mixer, pipe->pipe_ndx,
- pipe->mixer_stage, pipe->flags);
- mdp4_mixer_stage_commit(mixer);
- }
}
void mdp4_mixer_stage_down(struct mdp4_overlay_pipe *pipe)
@@ -1562,16 +1629,11 @@
for (i = MDP4_MIXER_STAGE_BASE; i < MDP4_MIXER_STAGE_MAX; i++) {
pp = ctrl->stage[mixer][i];
- if (pp == pipe)
+ if (pp && pp->pipe_ndx == pipe->pipe_ndx)
ctrl->stage[mixer][i] = NULL; /* clear it */
}
- if (!(pipe->flags & MDP_OV_PLAY_NOWAIT)) {
- pr_debug("%s: mixer=%d ndx=%d stage=%d flags=%x\n",
- __func__, pipe->mixer_num, pipe->pipe_ndx,
- pipe->mixer_stage, pipe->flags);
- mdp4_mixer_stage_commit(pipe->mixer_num);
- }
+ mdp4_mixer_stage_commit(mixer);
}
/*
* mixer0: rgb3: border color at register 0x15004, 0x15008
@@ -1616,11 +1678,13 @@
bspipe->pipe_used = 0;
if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO)
- mdp4_dsi_video_base_swap(pipe);
+ mdp4_dsi_video_base_swap(0, pipe);
+ else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
+ mdp4_dsi_cmd_base_swap(0, pipe);
else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
- mdp4_lcdc_base_swap(pipe);
+ mdp4_lcdc_base_swap(0, pipe);
else if (ctrl->panel_mode & MDP4_PANEL_DTV)
- mdp4_dtv_base_swap(pipe);
+ mdp4_dtv_base_swap(0, pipe);
mdp4_overlay_reg_flush(bspipe, 1);
/* borderfill pipe as base layer */
@@ -1667,7 +1731,14 @@
/* free borderfill pipe */
pipe->pipe_used = 0;
- mdp4_dsi_video_base_swap(bspipe);
+ if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO)
+ mdp4_dsi_video_base_swap(0, bspipe);
+ else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
+ mdp4_dsi_cmd_base_swap(0, bspipe);
+ else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
+ mdp4_lcdc_base_swap(0, bspipe);
+ else if (ctrl->panel_mode & MDP4_PANEL_DTV)
+ mdp4_dtv_base_swap(0, bspipe);
/* free borderfill pipe */
mdp4_overlay_reg_flush(pipe, 1);
@@ -1764,7 +1835,7 @@
struct mdp4_overlay_pipe *d_pipe;
struct mdp4_overlay_pipe *s_pipe;
struct blend_cfg *blend;
- int i, off, ptype;
+ int i, off, ptype, alpha_drop = 0;
int d_alpha, s_alpha;
unsigned char *overlay_base;
uint32 c0, c1, c2;
@@ -1788,13 +1859,21 @@
d_alpha = 0;
continue;
}
+ /* alpha channel is lost on VG pipe when using QSEED or M/N */
+ if (s_pipe->pipe_type == OVERLAY_TYPE_VIDEO &&
+ ((s_pipe->op_mode & MDP4_OP_SCALEY_EN) ||
+ (s_pipe->op_mode & MDP4_OP_SCALEX_EN)) &&
+ !(s_pipe->op_mode & MDP4_OP_SCALEY_PIXEL_RPT))
+ alpha_drop = 1;
+
d_pipe = mdp4_background_layer(mixer, s_pipe);
d_alpha = d_pipe->alpha_enable;
s_alpha = s_pipe->alpha_enable;
pr_debug("%s: stage=%d: bg: ndx=%d da=%d dalpha=%x "
- "fg: ndx=%d sa=%d salpha=%x is_fg=%d\n",
+ "fg: ndx=%d sa=%d salpha=%x is_fg=%d alpha_drop=%d\n",
__func__, i-2, d_pipe->pipe_ndx, d_alpha, d_pipe->alpha,
- s_pipe->pipe_ndx, s_alpha, s_pipe->alpha, s_pipe->is_fg);
+ s_pipe->pipe_ndx, s_alpha, s_pipe->alpha, s_pipe->is_fg,
+ alpha_drop);
/* base on fg's alpha */
blend->bg_alpha = 0x0ff - s_pipe->alpha;
@@ -1807,14 +1886,23 @@
blend->solidfill_pipe = d_pipe;
}
} else if (s_alpha) {
- blend->op = (MDP4_BLEND_BG_ALPHA_FG_PIXEL |
- MDP4_BLEND_BG_INV_ALPHA);
+ if (!alpha_drop) {
+ blend->op = MDP4_BLEND_BG_ALPHA_FG_PIXEL;
+ if (!(s_pipe->flags & MDP_BLEND_FG_PREMULT))
+ blend->op |=
+ MDP4_BLEND_FG_ALPHA_FG_PIXEL;
+ } else
+ blend->op = MDP4_BLEND_BG_ALPHA_FG_CONST;
+
+ blend->op |= MDP4_BLEND_BG_INV_ALPHA;
} else if (d_alpha) {
ptype = mdp4_overlay_format2type(s_pipe->src_format);
if (ptype == OVERLAY_TYPE_VIDEO) {
- blend->op = (MDP4_BLEND_BG_ALPHA_BG_PIXEL |
- MDP4_BLEND_FG_ALPHA_BG_PIXEL |
- MDP4_BLEND_FG_INV_ALPHA);
+ blend->op = (MDP4_BLEND_FG_ALPHA_BG_PIXEL |
+ MDP4_BLEND_FG_INV_ALPHA);
+ if (!(s_pipe->flags & MDP_BLEND_FG_PREMULT))
+ blend->op |=
+ MDP4_BLEND_BG_ALPHA_BG_PIXEL;
blend->co3_sel = 0; /* use bg alpha */
} else {
/* s_pipe is rgb without alpha */
@@ -1993,6 +2081,8 @@
iom_pipe_info = &mdp_iommu[pipe->mixer_num][pipe->pipe_ndx - 1];
iom_pipe_info->mark_unmap = 1;
+ mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
+
memset(pipe, 0, sizeof(*pipe));
pipe->pipe_type = ptype;
@@ -2105,6 +2195,62 @@
return -ERANGE;
}
+ if (req->src_rect.h > 0xFFF) {
+ pr_err("%s: src_h is out of range: 0X%x!\n",
+ __func__, req->src_rect.h);
+ mdp4_stat.err_size++;
+ return -EINVAL;
+ }
+
+ if (req->src_rect.w > 0xFFF) {
+ pr_err("%s: src_w is out of range: 0X%x!\n",
+ __func__, req->src_rect.w);
+ mdp4_stat.err_size++;
+ return -EINVAL;
+ }
+
+ if (req->src_rect.x > 0xFFF) {
+ pr_err("%s: src_x is out of range: 0X%x!\n",
+ __func__, req->src_rect.x);
+ mdp4_stat.err_size++;
+ return -EINVAL;
+ }
+
+ if (req->src_rect.y > 0xFFF) {
+ pr_err("%s: src_y is out of range: 0X%x!\n",
+ __func__, req->src_rect.y);
+ mdp4_stat.err_size++;
+ return -EINVAL;
+ }
+
+ if (req->dst_rect.h > 0xFFF) {
+ pr_err("%s: dst_h is out of range: 0X%x!\n",
+ __func__, req->dst_rect.h);
+ mdp4_stat.err_size++;
+ return -EINVAL;
+ }
+
+ if (req->dst_rect.w > 0xFFF) {
+ pr_err("%s: dst_w is out of range: 0X%x!\n",
+ __func__, req->dst_rect.w);
+ mdp4_stat.err_size++;
+ return -EINVAL;
+ }
+
+ if (req->dst_rect.x > 0xFFF) {
+ pr_err("%s: dst_x is out of range: 0X%x!\n",
+ __func__, req->dst_rect.x);
+ mdp4_stat.err_size++;
+ return -EINVAL;
+ }
+
+ if (req->dst_rect.y > 0xFFF) {
+ pr_err("%s: dst_y is out of range: 0X%x!\n",
+ __func__, req->dst_rect.y);
+ mdp4_stat.err_size++;
+ return -EINVAL;
+ }
+
if (req->src_rect.h == 0 || req->src_rect.w == 0) {
pr_err("%s: src img of zero size!\n", __func__);
mdp4_stat.err_size++;
@@ -2204,7 +2350,6 @@
}
iom_pipe_info = &mdp_iommu[pipe->mixer_num][pipe->pipe_ndx - 1];
- iom_pipe_info->mark_unmap = 0;
pipe->src_format = req->src.format;
ret = mdp4_overlay_format2pipe(pipe);
@@ -2372,29 +2517,6 @@
return 0;
}
-int mdp4_overlay_blt_offset(struct fb_info *info, struct msmfb_overlay_blt *req)
-{
- int ret = 0;
-
- struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
-
- if (mutex_lock_interruptible(&mfd->dma->ov_mutex))
- return -EINTR;
-
- if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
- ret = mdp4_dsi_overlay_blt_offset(mfd, req);
- else if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO)
- ret = mdp4_dsi_video_overlay_blt_offset(mfd, req);
- else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
- ret = mdp4_lcdc_overlay_blt_offset(mfd, req);
- else if (ctrl->panel_mode & MDP4_PANEL_MDDI)
- mdp4_mddi_overlay_blt_offset(mfd, req);
-
- mutex_unlock(&mfd->dma->ov_mutex);
-
- return ret;
-}
-
int mdp4_overlay_get(struct fb_info *info, struct mdp_overlay *req)
{
struct mdp4_overlay_pipe *pipe;
@@ -2544,7 +2666,6 @@
if (mfd->ov1_blt_state == mfd->use_ov1_blt)
return;
if (mfd->use_ov1_blt) {
- mdp4_allocate_writeback_buf(mfd, MDP4_MIXER1);
mdp4_dtv_overlay_blt_start(mfd);
pr_debug("%s overlay1 writeback is enabled\n", __func__);
} else {
@@ -2558,17 +2679,19 @@
struct msm_fb_data_type *mfd, uint32 perf_level)
{
u32 clk_rate = mfd->panel_info.clk_rate;
- u32 pull_mode = 0, use_blt = 0;
+ u32 blt_chq_req = 0, use_blt = 0;
- if (mfd->panel_info.type == MIPI_VIDEO_PANEL)
+ if ((mfd->panel_info.type == MIPI_VIDEO_PANEL) ||
+ (mfd->panel_info.type == MIPI_CMD_PANEL))
clk_rate = (&mfd->panel_info.mipi)->dsi_pclk_rate;
if ((mfd->panel_info.type == LCDC_PANEL) ||
(mfd->panel_info.type == MIPI_VIDEO_PANEL) ||
- (mfd->panel_info.type == DTV_PANEL))
- pull_mode = 1;
+ (mfd->panel_info.type == DTV_PANEL) ||
+ (mfd->panel_info.type == MIPI_CMD_PANEL))
+ blt_chq_req = 1;
- if (pull_mode && (req->src_rect.h > req->dst_rect.h ||
+ if (blt_chq_req && (req->src_rect.h > req->dst_rect.h ||
req->src_rect.w > req->dst_rect.w)) {
if (mdp4_overlay_validate_downscale(req, mfd, perf_level,
clk_rate))
@@ -2655,23 +2778,17 @@
}
}
- if (pipe->flags & MDP_SHARPENING) {
- bool test = ((pipe->req_data.dpp.sharp_strength > 0) &&
- ((req->src_rect.w > req->dst_rect.w) &&
- (req->src_rect.h > req->dst_rect.h)));
- if (test) {
- pr_debug("%s: No sharpening while downscaling.\n",
- __func__);
- pipe->flags &= ~MDP_SHARPENING;
- }
- }
-
- /* precompute HSIC matrices */
- if (req->flags & MDP_DPP_HSIC)
- mdp4_hsic_set(pipe, &(req->dpp));
-
mdp4_stat.overlay_set[pipe->mixer_num]++;
+ if (pipe->flags & MDP_OVERLAY_PP_CFG_EN) {
+ if (pipe->pipe_num <= OVERLAY_PIPE_VG2)
+ memcpy(&pipe->pp_cfg, &req->overlay_pp_cfg,
+ sizeof(struct mdp_overlay_pp_params));
+ else
+ pr_debug("%s: RGB Pipes don't support CSC/QSEED\n",
+ __func__);
+ }
+
if (ctrl->panel_mode & MDP4_PANEL_DTV &&
pipe->mixer_num == MDP4_MIXER1) {
u32 use_blt = mdp4_overlay_blt_enable(req, mfd, perf_level);
@@ -2699,8 +2816,6 @@
if (old_level > perf_level)
mdp4_set_perf_level();
} else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
- mdp4_dsi_cmd_dma_busy_wait(mfd);
- mdp4_dsi_blt_dmap_busy_wait(mfd);
mdp4_set_perf_level();
} else if (ctrl->panel_mode & MDP4_PANEL_LCDC) {
if (old_level > perf_level)
@@ -2711,10 +2826,8 @@
mdp4_set_perf_level();
}
} else {
- if (ctrl->panel_mode & MDP4_PANEL_DTV) {
- mdp4_overlay_reg_flush(pipe, 0);
- mdp4_overlay_dtv_ov_done_push(mfd, pipe);
- }
+ if (ctrl->panel_mode & MDP4_PANEL_DTV)
+ mdp4_overlay_dtv_set_perf(mfd);
}
}
mutex_unlock(&mfd->dma->ov_mutex);
@@ -2732,6 +2845,7 @@
if (pipe == NULL)
continue;
pipe->flags &= ~MDP_OV_PLAY_NOWAIT;
+ mdp4_overlay_reg_flush(pipe, 1);
mdp4_mixer_stage_down(pipe);
mdp4_overlay_pipe_free(pipe);
cnt++;
@@ -2744,8 +2858,6 @@
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
struct mdp4_overlay_pipe *pipe;
- struct dpp_ctrl dpp;
- int i;
if (mfd == NULL)
return -ENODEV;
@@ -2773,77 +2885,32 @@
else {
/* mixer 0 */
ctrl->mixer0_played = 0;
-#ifdef CONFIG_FB_MSM_MIPI_DSI
- if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
- if (mfd->panel_power_on) {
- mdp4_dsi_blt_dmap_busy_wait(mfd);
- }
- }
-#else
+
if (ctrl->panel_mode & MDP4_PANEL_MDDI) {
if (mfd->panel_power_on)
mdp4_mddi_blt_dmap_busy_wait(mfd);
}
-#endif
}
- {
- mdp4_overlay_reg_flush(pipe, 1);
+ mdp4_overlay_reg_flush(pipe, 1);
+ mdp4_mixer_stage_down(pipe);
- if (mfd->use_ov0_blt || pipe->mixer_num == MDP4_MIXER1) {
- /* unstage pipe forcedly */
- pipe->flags &= ~MDP_OV_PLAY_NOWAIT;
+ if (pipe->mixer_num == MDP4_MIXER0) {
+ if (ctrl->panel_mode & MDP4_PANEL_MDDI) {
+ if (mfd->panel_power_on)
+ mdp4_mddi_overlay_restore();
}
- mdp4_mixer_stage_down(pipe);
-
- if (pipe->mixer_num == MDP4_MIXER0) {
-#ifdef CONFIG_FB_MSM_MIPI_DSI
- if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
- if (mfd->panel_power_on)
- mdp4_dsi_cmd_overlay_restore();
- } else if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO) {
- pipe->flags &= ~MDP_OV_PLAY_NOWAIT;
- if (mfd->panel_power_on)
- mdp4_overlay_dsi_video_vsync_push(mfd,
- pipe);
- }
-#else
- if (ctrl->panel_mode & MDP4_PANEL_MDDI) {
- if (mfd->panel_power_on)
- mdp4_mddi_overlay_restore();
- }
-#endif
- else if (ctrl->panel_mode & MDP4_PANEL_LCDC) {
- pipe->flags &= ~MDP_OV_PLAY_NOWAIT;
- if (mfd->panel_power_on)
- mdp4_overlay_lcdc_vsync_push(mfd, pipe);
- }
- mfd->use_ov0_blt &= ~(1 << (pipe->pipe_ndx-1));
- mdp4_overlay_update_blt_mode(mfd);
- if (!mfd->use_ov0_blt)
- mdp4_free_writeback_buf(mfd, MDP4_MIXER0);
- } else { /* mixer1, DTV, ATV */
- if (ctrl->panel_mode & MDP4_PANEL_DTV) {
- mdp4_overlay_dtv_unset(mfd, pipe);
- mfd->use_ov1_blt &= ~(1 << (pipe->pipe_ndx-1));
- mdp4_overlay1_update_blt_mode(mfd);
- if (!mfd->use_ov1_blt)
- mdp4_free_writeback_buf(mfd,
- MDP4_MIXER1);
- }
+ mfd->use_ov0_blt &= ~(1 << (pipe->pipe_ndx-1));
+ mdp4_overlay_update_blt_mode(mfd);
+ } else { /* mixer1, DTV, ATV */
+ if (ctrl->panel_mode & MDP4_PANEL_DTV) {
+ mdp4_overlay_dtv_unset(mfd, pipe);
+ mfd->use_ov1_blt &= ~(1 << (pipe->pipe_ndx-1));
+ mdp4_overlay1_update_blt_mode(mfd);
}
}
- /* Reset any HSIC settings to default */
- if (pipe->flags & MDP_DPP_HSIC) {
- for (i = 0; i < NUM_HSIC_PARAM; i++)
- dpp.hsic_params[i] = 0;
-
- mdp4_hsic_set(pipe, &dpp);
- mdp4_hsic_update(pipe);
- }
-
mdp4_stat.overlay_unset[pipe->mixer_num]++;
mdp4_overlay_pipe_free(pipe);
@@ -2853,6 +2920,36 @@
return 0;
}
+int mdp4_overlay_wait4vsync(struct fb_info *info, long long *vtime)
+{
+ if (info->node == 0) {
+ if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO)
+ mdp4_dsi_video_wait4vsync(0, vtime);
+ else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
+ mdp4_dsi_cmd_wait4vsync(0, vtime);
+ else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
+ mdp4_lcdc_wait4vsync(0, vtime);
+ } else if (info->node == 1)
+ mdp4_dtv_wait4vsync(0, vtime);
+
+ return 0;
+}
+
+int mdp4_overlay_vsync_ctrl(struct fb_info *info, int enable)
+{
+ if (info->node == 0) {
+ if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO)
+ mdp4_dsi_video_vsync_ctrl(0, enable);
+ else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
+ mdp4_dsi_cmd_vsync_ctrl(0, enable);
+ else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
+ mdp4_lcdc_vsync_ctrl(0, enable);
+ } else if (info->node == 1)
+ mdp4_dtv_vsync_ctrl(0, enable);
+
+ return 0;
+}
+
struct tile_desc {
uint32 width; /* tile's width */
@@ -2892,37 +2989,39 @@
int mdp4_overlay_play_wait(struct fb_info *info, struct msmfb_overlay_data *req)
{
- struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
- struct mdp4_overlay_pipe *pipe;
-
- if (mfd == NULL)
- return -ENODEV;
-
- if (!mfd->panel_power_on) /* suspended */
- return -EPERM;
-
- pipe = mdp4_overlay_ndx2pipe(req->id);
-
- if (!pipe) {
- mdp4_stat.err_play++;
- return -ENODEV;
- }
-
- if (mutex_lock_interruptible(&mfd->dma->ov_mutex))
- return -EINTR;
-
- mdp4_mixer_stage_commit(pipe->mixer_num);
-
- if (mfd->use_ov1_blt)
- mdp4_overlay1_update_blt_mode(mfd);
-
- mdp4_overlay_dtv_wait4vsync();
- mdp4_iommu_unmap(pipe);
-
- mutex_unlock(&mfd->dma->ov_mutex);
return 0;
}
+/*
+ * mdp4_overlay_dma_commit: called from dma_done isr
+ * No mutex/sleep allowed
+ */
+void mdp4_overlay_dma_commit(int mixer)
+{
+ /*
+ * non double buffer register update here
+ * perf level, new clock rate should be done here
+ */
+}
+
+/*
+ * mdp4_overlay_vsync_commit: called from tasklet context
+ * No mutex/sleep allowed
+ */
+void mdp4_overlay_vsync_commit(struct mdp4_overlay_pipe *pipe)
+{
+ if (pipe->pipe_type == OVERLAY_TYPE_VIDEO)
+ mdp4_overlay_vg_setup(pipe); /* video/graphic pipe */
+ else
+ mdp4_overlay_rgb_setup(pipe); /* rgb pipe */
+
+ pr_debug("%s: pipe=%x ndx=%d num=%d used=%d\n", __func__,
+ (int) pipe, pipe->pipe_ndx, pipe->pipe_num, pipe->pipe_used);
+
+ mdp4_overlay_reg_flush(pipe, 1);
+ mdp4_mixer_stage_up(pipe);
+}
+
int mdp4_overlay_play(struct fb_info *info, struct msmfb_overlay_data *req)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
@@ -2950,20 +3049,18 @@
return -ENODEV;
}
- if (mutex_lock_interruptible(&mfd->dma->ov_mutex))
- return -EINTR;
-
if (pipe->pipe_type == OVERLAY_TYPE_BF) {
mdp4_overlay_borderfill_stage_up(pipe);
- mutex_unlock(&mfd->dma->ov_mutex);
return 0;
}
+ if (ctrl->panel_mode & MDP4_PANEL_MDDI)
+ mutex_lock(&mfd->dma->ov_mutex);
+
img = &req->data;
get_img(img, info, pipe, 0, &start, &len, &srcp0_file,
&ps0_need, &srcp0_ihdl);
if (len == 0) {
- mutex_unlock(&mfd->dma->ov_mutex);
pr_err("%s: pmem Error\n", __func__);
ret = -1;
goto end;
@@ -2974,8 +3071,9 @@
pipe->srcp0_ystride = pipe->src_width * pipe->bpp;
- pr_debug("%s: mixer=%d ndx=%x addr=%x flags=%x\n", __func__,
- pipe->mixer_num, pipe->pipe_ndx, (int)addr, pipe->flags);
+ pr_debug("%s: mixer=%d ndx=%x addr=%x flags=%x pid=%d\n", __func__,
+ pipe->mixer_num, pipe->pipe_ndx, (int)addr, pipe->flags,
+ current->pid);
if ((req->version_key & VERSION_KEY_MASK) == 0xF9E8D700)
overlay_version = (req->version_key & ~VERSION_KEY_MASK);
@@ -2986,7 +3084,6 @@
get_img(img, info, pipe, 1, &start, &len, &srcp1_file,
&p_need, &srcp1_ihdl);
if (len == 0) {
- mutex_unlock(&mfd->dma->ov_mutex);
pr_err("%s: Error to get plane1\n", __func__);
ret = -EINVAL;
goto end;
@@ -3018,7 +3115,6 @@
get_img(img, info, pipe, 1, &start, &len, &srcp1_file,
&p_need, &srcp1_ihdl);
if (len == 0) {
- mutex_unlock(&mfd->dma->ov_mutex);
pr_err("%s: Error to get plane1\n", __func__);
ret = -EINVAL;
goto end;
@@ -3029,7 +3125,6 @@
get_img(img, info, pipe, 2, &start, &len, &srcp2_file,
&p_need, &srcp2_ihdl);
if (len == 0) {
- mutex_unlock(&mfd->dma->ov_mutex);
pr_err("%s: Error to get plane2\n", __func__);
ret = -EINVAL;
goto end;
@@ -3073,16 +3168,33 @@
if (mfd->use_ov1_blt)
mdp4_overlay1_update_blt_mode(mfd);
+ if (ctrl->panel_mode & MDP4_PANEL_MDDI)
+ goto mddi;
+
+ if (pipe->mixer_num == MDP4_MIXER0) {
+ if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
+ /* cndx = 0 */
+ mdp4_dsi_cmd_pipe_queue(0, pipe);
+ }
+ if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO) {
+ /* cndx = 0 */
+ mdp4_dsi_video_pipe_queue(0, pipe);
+ } else if (ctrl->panel_mode & MDP4_PANEL_LCDC) {
+ /* cndx = 0 */
+ mdp4_lcdc_pipe_queue(0, pipe);
+ }
+ } else if (pipe->mixer_num == MDP4_MIXER1) {
+ if (ctrl->panel_mode & MDP4_PANEL_DTV)
+ mdp4_dtv_pipe_queue(0, pipe);/* cndx = 0 */
+ }
+
+ return ret;
+
+mddi:
if (pipe->pipe_type == OVERLAY_TYPE_VIDEO) {
- mdp4_overlay_vg_setup(pipe); /* video/graphic pipe */
+ mdp4_overlay_vg_setup(pipe); /* video/graphic pipe */
} else {
- if (pipe->flags & MDP_SHARPENING) {
- pr_debug(
- "%s: Sharpening/Smoothing not supported on RGB pipe\n",
- __func__);
- pipe->flags &= ~MDP_SHARPENING;
- }
mdp4_overlay_rgb_setup(pipe); /* rgb pipe */
}
@@ -3094,73 +3206,21 @@
}
mdp4_mixer_stage_up(pipe);
+ if (!(pipe->flags & MDP_OV_PLAY_NOWAIT))
+ mdp4_mixer_stage_commit(pipe->mixer_num);
- if (pipe->mixer_num == MDP4_MIXER2) {
- ctrl->mixer2_played++;
-#ifdef CONFIG_FB_MSM_WRITEBACK_MSM_PANEL
- if (ctrl->panel_mode & MDP4_PANEL_WRITEBACK) {
- mdp4_writeback_dma_busy_wait(mfd);
- mdp4_writeback_kickoff_video(mfd, pipe);
- }
-#endif
- } else if (pipe->mixer_num == MDP4_MIXER1) {
- ctrl->mixer1_played++;
- /* enternal interface */
- if (ctrl->panel_mode & MDP4_PANEL_DTV) {
- if (pipe->flags & MDP_OV_PLAY_NOWAIT)
- mdp4_overlay_flush_piggyback(MDP4_MIXER0,
- MDP4_MIXER1);
- mdp4_overlay_dtv_start();
- mdp4_overlay_dtv_ov_done_push(mfd, pipe);
- if (!mfd->use_ov1_blt)
- mdp4_overlay1_update_blt_mode(mfd);
- }
- } else {
- /* primary interface */
- ctrl->mixer0_played++;
- if (ctrl->panel_mode & MDP4_PANEL_LCDC) {
- mdp4_overlay_lcdc_start();
- mdp4_overlay_lcdc_vsync_push(mfd, pipe);
- if (!mfd->use_ov0_blt &&
- !(pipe->flags & MDP_OV_PLAY_NOWAIT))
- mdp4_overlay_update_blt_mode(mfd);
- }
-#ifdef CONFIG_FB_MSM_MIPI_DSI
- else if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO) {
- mdp4_overlay_dsi_video_start();
- mdp4_overlay_dsi_video_vsync_push(mfd, pipe);
- if (!mfd->use_ov0_blt &&
- !(pipe->flags & MDP_OV_PLAY_NOWAIT))
- mdp4_overlay_update_blt_mode(mfd);
- }
-#endif
- else {
- mdp4_overlay_reg_flush_reset(pipe);
- /* mddi & mipi dsi cmd mode */
- if (pipe->flags & MDP_OV_PLAY_NOWAIT) {
- mdp4_stat.overlay_play[pipe->mixer_num]++;
- mutex_unlock(&mfd->dma->ov_mutex);
- goto end;
- }
-#ifdef CONFIG_FB_MSM_MIPI_DSI
- if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
- mdp4_iommu_attach();
- mdp4_dsi_cmd_dma_busy_wait(mfd);
- mdp4_dsi_cmd_kickoff_video(mfd, pipe);
- }
-#else
- if (ctrl->panel_mode & MDP4_PANEL_MDDI) {
- mdp4_mddi_dma_busy_wait(mfd);
- mdp4_mddi_kickoff_video(mfd, pipe);
- }
-#endif
- }
+ if (pipe->flags & MDP_OV_PLAY_NOWAIT) {
+ mdp4_stat.overlay_play[pipe->mixer_num]++;
+ mutex_unlock(&mfd->dma->ov_mutex);
+ goto end;
}
- /* write out DPP HSIC registers */
- if (pipe->flags & MDP_DPP_HSIC)
- mdp4_hsic_update(pipe);
+ if (ctrl->panel_mode & MDP4_PANEL_MDDI) {
+ mdp4_mddi_dma_busy_wait(mfd);
+ mdp4_mddi_kickoff_video(mfd, pipe);
+ }
+
if (!(pipe->flags & MDP_OV_PLAY_NOWAIT))
mdp4_iommu_unmap(pipe);
mdp4_stat.overlay_play[pipe->mixer_num]++;
@@ -3231,13 +3291,6 @@
},
};
-static int mdp_iommu_fault_handler(struct iommu_domain *domain,
- struct device *dev, unsigned long iova, int flags)
-{
- pr_err("MDP IOMMU page fault: iova 0x%lx", iova);
- return 0;
-}
-
void mdp4_iommu_attach(void)
{
static int done;
@@ -3268,8 +3321,6 @@
if (!domain)
continue;
- iommu_set_fault_handler(domain,
- mdp_iommu_fault_handler);
if (iommu_attach_device(domain, ctx)) {
WARN(1, "%s: could not attach domain %d to context %s."
" iommu programming will not occur.\n",
@@ -3307,6 +3358,7 @@
void mdp4_v4l2_overlay_clear(struct mdp4_overlay_pipe *pipe)
{
+ mdp4_overlay_reg_flush(pipe, 1);
mdp4_mixer_stage_down(pipe);
mdp4_overlay_pipe_free(pipe);
}
@@ -3372,6 +3424,10 @@
mdp4_mixer_stage_up(pipe);
+#ifdef V4L2_VSYNC
+ /*
+ * TODO: incorporate v4l2 into vsycn driven mechanism
+ */
if (ctrl->panel_mode & MDP4_PANEL_LCDC) {
mdp4_overlay_lcdc_vsync_push(mfd, pipe);
} else {
@@ -3387,6 +3443,8 @@
}
#endif
}
+#endif
+
done:
mutex_unlock(&mfd->dma->ov_mutex);
return err;
diff --git a/drivers/video/msm/mdp4_overlay_dsi_cmd.c b/drivers/video/msm/mdp4_overlay_dsi_cmd.c
index d0ac1a6..7998d8b 100644
--- a/drivers/video/msm/mdp4_overlay_dsi_cmd.c
+++ b/drivers/video/msm/mdp4_overlay_dsi_cmd.c
@@ -17,7 +17,6 @@
#include <linux/time.h>
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/hrtimer.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/semaphore.h>
@@ -32,18 +31,525 @@
#include "mdp4.h"
#include "mipi_dsi.h"
-static struct mdp4_overlay_pipe *dsi_pipe;
-static struct msm_fb_data_type *dsi_mfd;
-static int busy_wait_cnt;
static int dsi_state;
-static unsigned long tout_expired;
#define TOUT_PERIOD HZ /* 1 second */
#define MS_100 (HZ/10) /* 100 ms */
static int vsync_start_y_adjust = 4;
-struct timer_list dsi_clock_timer;
+#define MAX_CONTROLLER 1
+#define VSYNC_EXPIRE_TICK 2
+#define BACKLIGHT_MAX 4
+
+struct backlight {
+ int put;
+ int get;
+ int tot;
+ int blist[BACKLIGHT_MAX];
+};
+
+static struct vsycn_ctrl {
+ struct device *dev;
+ int inited;
+ int update_ndx;
+ int expire_tick;
+ uint32 dmap_intr_tot;
+ uint32 rdptr_intr_tot;
+ uint32 rdptr_sirq_tot;
+ atomic_t suspend;
+ int dmap_wait_cnt;
+ int wait_vsync_cnt;
+ int commit_cnt;
+ struct mutex update_lock;
+ struct completion dmap_comp;
+ struct completion vsync_comp;
+ spinlock_t dmap_spin_lock;
+ spinlock_t spin_lock;
+ struct mdp4_overlay_pipe *base_pipe;
+ struct vsync_update vlist[2];
+ struct backlight blight;
+ int vsync_irq_enabled;
+ ktime_t vsync_time;
+ struct work_struct vsync_work;
+} vsync_ctrl_db[MAX_CONTROLLER];
+
+static void vsync_irq_enable(int intr, int term)
+{
+ unsigned long flag;
+
+ spin_lock_irqsave(&mdp_spin_lock, flag);
+ /* no need to clrear other interrupts for comamnd mode */
+ outp32(MDP_INTR_CLEAR, INTR_PRIMARY_RDPTR);
+ mdp_intr_mask |= intr;
+ outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+ mdp_enable_irq(term);
+ spin_unlock_irqrestore(&mdp_spin_lock, flag);
+}
+
+static void vsync_irq_disable(int intr, int term)
+{
+ unsigned long flag;
+
+ spin_lock_irqsave(&mdp_spin_lock, flag);
+ /* no need to clrear other interrupts for comamnd mode */
+ mdp_intr_mask &= ~intr;
+ outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+ mdp_disable_irq_nosync(term);
+ spin_unlock_irqrestore(&mdp_spin_lock, flag);
+}
+
+static int mdp4_backlight_get_level(struct vsycn_ctrl *vctrl)
+{
+ int level = -1;
+
+ mutex_lock(&vctrl->update_lock);
+ if (vctrl->blight.tot) {
+ level = vctrl->blight.blist[vctrl->blight.get];
+ vctrl->blight.get++;
+ vctrl->blight.get %= BACKLIGHT_MAX;
+ vctrl->blight.tot--;
+ pr_debug("%s: tot=%d put=%d get=%d level=%d\n", __func__,
+ vctrl->blight.tot, vctrl->blight.put, vctrl->blight.get, level);
+ }
+ mutex_unlock(&vctrl->update_lock);
+ return level;
+}
+
+void mdp4_backlight_put_level(int cndx, int level)
+{
+ struct vsycn_ctrl *vctrl;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ vctrl = &vsync_ctrl_db[cndx];
+ mutex_lock(&vctrl->update_lock);
+ vctrl->blight.blist[vctrl->blight.put] = level;
+ vctrl->blight.put++;
+ vctrl->blight.put %= BACKLIGHT_MAX;
+ if (vctrl->blight.tot == BACKLIGHT_MAX) {
+ /* drop the oldest one */
+ vctrl->blight.get++;
+ vctrl->blight.get %= BACKLIGHT_MAX;
+ } else {
+ vctrl->blight.tot++;
+ }
+ mutex_unlock(&vctrl->update_lock);
+ pr_debug("%s: tot=%d put=%d get=%d level=%d\n", __func__,
+ vctrl->blight.tot, vctrl->blight.put, vctrl->blight.get, level);
+
+ if (mdp4_overlay_dsi_state_get() <= ST_DSI_SUSPEND)
+ return;
+}
+
+static int mdp4_backlight_commit_level(struct vsycn_ctrl *vctrl)
+{
+ int level;
+ int cnt = 0;
+
+ if (vctrl->blight.tot) { /* has new backlight */
+ if (mipi_dsi_ctrl_lock(0)) {
+ level = mdp4_backlight_get_level(vctrl);
+ mipi_dsi_cmd_backlight_tx(level);
+ cnt++;
+ }
+ }
+
+ return cnt;
+}
+
+void mdp4_blt_dmap_cfg(struct mdp4_overlay_pipe *pipe)
+{
+ uint32 off, addr;
+ int bpp;
+
+ if (pipe->ov_blt_addr == 0)
+ return;
+
+#ifdef BLT_RGB565
+ bpp = 2; /* overlay ouput is RGB565 */
+#else
+ bpp = 3; /* overlay ouput is RGB888 */
+#endif
+ off = 0;
+ if (pipe->blt_dmap_done & 0x01)
+ off = pipe->src_height * pipe->src_width * bpp;
+ addr = pipe->dma_blt_addr + off;
+
+ /* dmap */
+ MDP_OUTP(MDP_BASE + 0x90008, addr);
+}
+
+
+void mdp4_blt_overlay0_cfg(struct mdp4_overlay_pipe *pipe)
+{
+ uint32 off, addr;
+ int bpp;
+ char *overlay_base;
+
+ if (pipe->ov_blt_addr == 0)
+ return;
+
+#ifdef BLT_RGB565
+ bpp = 2; /* overlay ouput is RGB565 */
+#else
+ bpp = 3; /* overlay ouput is RGB888 */
+#endif
+ off = 0;
+ if (pipe->blt_ov_done & 0x01)
+ off = pipe->src_height * pipe->src_width * bpp;
+ addr = pipe->ov_blt_addr + off;
+ /* overlay 0 */
+ overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
+ outpdw(overlay_base + 0x000c, addr);
+ outpdw(overlay_base + 0x001c, addr);
+}
+
+static void vsync_commit_kickoff_dmap(struct mdp4_overlay_pipe *pipe)
+{
+ if (mipi_dsi_ctrl_lock(1)) {
+ mdp4_stat.kickoff_dmap++;
+ pipe->blt_dmap_koff++;
+ vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
+ outpdw(MDP_BASE + 0x000c, 0); /* kickoff dmap engine */
+ mb();
+ }
+}
+
+static void vsync_commit_kickoff_ov0(struct mdp4_overlay_pipe *pipe, int blt)
+{
+ int locked = 1;
+
+ if (blt)
+ vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
+ else
+ locked = mipi_dsi_ctrl_lock(1);
+
+ if (locked) {
+ mdp4_stat.kickoff_ov0++;
+ pipe->blt_ov_koff++;
+ outpdw(MDP_BASE + 0x0004, 0); /* kickoff overlay engine */
+ mb();
+ }
+}
+
+/*
+ * mdp4_dsi_cmd_do_update:
+ * called from thread context
+ */
+void mdp4_dsi_cmd_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe)
+{
+ struct vsycn_ctrl *vctrl;
+ struct vsync_update *vp;
+ struct mdp4_overlay_pipe *pp;
+ int undx;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ vctrl = &vsync_ctrl_db[cndx];
+
+ if (atomic_read(&vctrl->suspend) > 0)
+ return;
+
+ mutex_lock(&vctrl->update_lock);
+ undx = vctrl->update_ndx;
+ vp = &vctrl->vlist[undx];
+
+ pp = &vp->plist[pipe->pipe_ndx - 1]; /* ndx start form 1 */
+
+ pr_debug("%s: vndx=%d pipe_ndx=%d expire=%x pid=%d\n", __func__,
+ undx, pipe->pipe_ndx, vctrl->expire_tick, current->pid);
+
+ *pp = *pipe; /* keep it */
+ vp->update_cnt++;
+
+ if (vctrl->expire_tick == 0) {
+ mipi_dsi_clk_cfg(1);
+ mdp_clk_ctrl(1);
+ vsync_irq_enable(INTR_PRIMARY_RDPTR, MDP_PRIM_RDPTR_TERM);
+ }
+ vctrl->expire_tick = VSYNC_EXPIRE_TICK;
+ mutex_unlock(&vctrl->update_lock);
+}
+
+int mdp4_dsi_cmd_pipe_commit(void)
+{
+
+ int i, undx, cnt;
+ int mixer = 0;
+ struct vsycn_ctrl *vctrl;
+ struct vsync_update *vp;
+ struct mdp4_overlay_pipe *pipe;
+ unsigned long flags;
+ int diff;
+
+ vctrl = &vsync_ctrl_db[0];
+ mutex_lock(&vctrl->update_lock);
+ undx = vctrl->update_ndx;
+ vp = &vctrl->vlist[undx];
+ pipe = vctrl->base_pipe;
+ mixer = pipe->mixer_num;
+
+ pr_debug("%s: vndx=%d cnt=%d expire=%x pid=%d\n", __func__,
+ undx, vp->update_cnt, vctrl->expire_tick, current->pid);
+
+ cnt = 0;
+ if (vp->update_cnt == 0) {
+ mutex_unlock(&vctrl->update_lock);
+ return cnt;
+ }
+ vctrl->update_ndx++;
+ vctrl->update_ndx &= 0x01;
+ vctrl->commit_cnt++;
+ vp->update_cnt = 0; /* reset */
+ mutex_unlock(&vctrl->update_lock);
+
+ mdp4_backlight_commit_level(vctrl);
+
+ /* free previous committed iommu back to pool */
+ mdp4_overlay_iommu_unmap_freelist(mixer);
+
+ pipe = vp->plist;
+ for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
+ if (pipe->pipe_used) {
+ cnt++;
+ mdp4_overlay_vsync_commit(pipe);
+ /* free previous iommu to freelist
+ * which will be freed at next
+ * pipe_commit
+ */
+ mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
+ pipe->pipe_used = 0; /* clear */
+ }
+ }
+ mdp4_mixer_stage_commit(mixer);
+
+
+ pr_debug("%s: intr=%d expire=%d cpu=%d\n", __func__,
+ vctrl->rdptr_intr_tot, vctrl->expire_tick, smp_processor_id());
+
+ spin_lock_irqsave(&vctrl->spin_lock, flags);
+ pipe = vctrl->base_pipe;
+ if (pipe->blt_changed) {
+ /* blt configurtion changed */
+ pipe->blt_changed = 0;
+ mdp4_overlayproc_cfg(pipe);
+ mdp4_overlay_dmap_xy(pipe);
+ }
+
+ if (pipe->ov_blt_addr) {
+ diff = pipe->blt_ov_koff - pipe->blt_ov_done;
+ if (diff < 1) {
+ mdp4_blt_overlay0_cfg(pipe);
+ vsync_commit_kickoff_ov0(pipe, 1);
+ }
+ } else {
+ vsync_commit_kickoff_ov0(pipe, 0);
+ }
+
+ spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+ return cnt;
+}
+
+void mdp4_dsi_cmd_vsync_ctrl(int cndx, int enable)
+{
+ struct vsycn_ctrl *vctrl;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ vctrl = &vsync_ctrl_db[cndx];
+
+ if (vctrl->vsync_irq_enabled == enable)
+ return;
+
+ vctrl->vsync_irq_enabled = enable;
+
+ mutex_lock(&vctrl->update_lock);
+ if (enable) {
+ mipi_dsi_clk_cfg(1);
+ mdp_clk_ctrl(1);
+ vsync_irq_enable(INTR_PRIMARY_RDPTR, MDP_PRIM_RDPTR_TERM);
+ } else {
+ mipi_dsi_clk_cfg(0);
+ mdp_clk_ctrl(0);
+ vsync_irq_disable(INTR_PRIMARY_RDPTR, MDP_PRIM_RDPTR_TERM);
+ vctrl->expire_tick = 0;
+ }
+ mutex_unlock(&vctrl->update_lock);
+}
+
+void mdp4_dsi_cmd_wait4vsync(int cndx, long long *vtime)
+{
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
+ unsigned long flags;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
+
+ if (atomic_read(&vctrl->suspend) > 0)
+ return;
+
+ spin_lock_irqsave(&vctrl->spin_lock, flags);
+ if (vctrl->wait_vsync_cnt == 0)
+ INIT_COMPLETION(vctrl->vsync_comp);
+ vctrl->wait_vsync_cnt++;
+ spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+ wait_for_completion(&vctrl->vsync_comp);
+
+ *vtime = ktime_to_ns(vctrl->vsync_time);
+}
+
+
+/*
+ * primary_rdptr_isr:
+ * called from interrupt context
+ */
+
+static void primary_rdptr_isr(int cndx)
+{
+ struct vsycn_ctrl *vctrl;
+
+ vctrl = &vsync_ctrl_db[cndx];
+ pr_debug("%s: cpu=%d\n", __func__, smp_processor_id());
+ vctrl->rdptr_intr_tot++;
+ vctrl->vsync_time = ktime_get();
+ schedule_work(&vctrl->vsync_work);
+}
+
+void mdp4_dmap_done_dsi_cmd(int cndx)
+{
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
+ int diff;
+
+ vsync_irq_disable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
+
+ vctrl = &vsync_ctrl_db[cndx];
+ vctrl->dmap_intr_tot++;
+ pipe = vctrl->base_pipe;
+
+ if (pipe->ov_blt_addr == 0) {
+ mdp4_overlay_dma_commit(cndx);
+ return;
+ }
+
+ /* blt enabled */
+ spin_lock(&vctrl->spin_lock);
+ pipe->blt_dmap_done++;
+ diff = pipe->blt_ov_done - pipe->blt_dmap_done;
+ spin_unlock(&vctrl->spin_lock);
+ pr_debug("%s: ov_done=%d dmap_done=%d ov_koff=%d dmap_koff=%d\n",
+ __func__, pipe->blt_ov_done, pipe->blt_dmap_done,
+ pipe->blt_ov_koff, pipe->blt_dmap_koff);
+ if (diff <= 0) {
+ if (pipe->blt_end) {
+ pipe->blt_end = 0;
+ pipe->ov_blt_addr = 0;
+ pipe->dma_blt_addr = 0;
+ pipe->blt_changed = 1;
+ pr_info("%s: BLT-END\n", __func__);
+ }
+ }
+ spin_unlock(&dsi_clk_lock);
+}
+
+/*
+ * mdp4_overlay0_done_dsi_cmd: called from isr
+ */
+void mdp4_overlay0_done_dsi_cmd(int cndx)
+{
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
+ int diff;
+
+ vsync_irq_disable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
+
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
+
+ spin_lock(&vctrl->spin_lock);
+ pipe->blt_ov_done++;
+ diff = pipe->blt_ov_done - pipe->blt_dmap_done;
+ spin_unlock(&vctrl->spin_lock);
+
+ pr_debug("%s: ov_done=%d dmap_done=%d ov_koff=%d dmap_koff=%d diff=%d\n",
+ __func__, pipe->blt_ov_done, pipe->blt_dmap_done,
+ pipe->blt_ov_koff, pipe->blt_dmap_koff, diff);
+
+ if (pipe->ov_blt_addr == 0) {
+ /* blt disabled */
+ pr_debug("%s: NON-BLT\n", __func__);
+ return;
+ }
+
+ if (diff == 1) {
+ mdp4_blt_dmap_cfg(pipe);
+ vsync_commit_kickoff_dmap(pipe);
+ }
+}
+
+static void send_vsync_work(struct work_struct *work)
+{
+ struct vsycn_ctrl *vctrl =
+ container_of(work, typeof(*vctrl), vsync_work);
+ char buf[64];
+ char *envp[2];
+
+ snprintf(buf, sizeof(buf), "VSYNC=%llu",
+ ktime_to_ns(vctrl->vsync_time));
+ envp[0] = buf;
+ envp[1] = NULL;
+ kobject_uevent_env(&vctrl->dev->kobj, KOBJ_CHANGE, envp);
+}
+
+
+void mdp4_dsi_rdptr_init(int cndx)
+{
+ struct vsycn_ctrl *vctrl;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ vctrl = &vsync_ctrl_db[cndx];
+ if (vctrl->inited)
+ return;
+
+ vctrl->inited = 1;
+ vctrl->update_ndx = 0;
+ vctrl->blight.put = 0;
+ vctrl->blight.get = 0;
+ vctrl->blight.tot = 0;
+ mutex_init(&vctrl->update_lock);
+ init_completion(&vctrl->vsync_comp);
+ init_completion(&vctrl->dmap_comp);
+ spin_lock_init(&vctrl->spin_lock);
+ spin_lock_init(&vctrl->dmap_spin_lock);
+ INIT_WORK(&vctrl->vsync_work, send_vsync_work);
+}
+
+void mdp4_primary_rdptr(void)
+{
+ primary_rdptr_isr(0);
+}
void mdp4_overlay_dsi_state_set(int state)
{
@@ -59,18 +565,6 @@
return dsi_state;
}
-static void dsi_clock_tout(unsigned long data)
-{
- spin_lock(&dsi_clk_lock);
- if (mipi_dsi_clk_on) {
- if (dsi_state == ST_DSI_PLAYING) {
- mipi_dsi_turn_off_clks();
- mdp4_overlay_dsi_state_set(ST_DSI_CLK_OFF);
- }
- }
- spin_unlock(&dsi_clk_lock);
-}
-
static __u32 msm_fb_line_length(__u32 fb_index, __u32 xres, int bpp)
{
/*
@@ -86,11 +580,6 @@
return xres * bpp;
}
-void mdp4_dsi_cmd_del_timer(void)
-{
- del_timer_sync(&dsi_clock_timer);
-}
-
void mdp4_mipi_vsync_enable(struct msm_fb_data_type *mfd,
struct mdp4_overlay_pipe *pipe, int which)
{
@@ -121,67 +610,98 @@
}
}
-void mdp4_dsi_cmd_base_swap(struct mdp4_overlay_pipe *pipe)
+void mdp4_dsi_cmd_base_swap(int cndx, struct mdp4_overlay_pipe *pipe)
{
- dsi_pipe = pipe;
+ struct vsycn_ctrl *vctrl;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ vctrl = &vsync_ctrl_db[cndx];
+ vctrl->base_pipe = pipe;
+}
+
+static void mdp4_overlay_setup_pipe_addr(struct msm_fb_data_type *mfd,
+ struct mdp4_overlay_pipe *pipe)
+{
+ MDPIBUF *iBuf = &mfd->ibuf;
+ struct fb_info *fbi;
+ int bpp;
+ uint8 *src;
+
+ /* whole screen for base layer */
+ src = (uint8 *) iBuf->buf;
+ fbi = mfd->fbi;
+
+ if (pipe->is_3d) {
+ bpp = fbi->var.bits_per_pixel / 8;
+ pipe->src_height = pipe->src_height_3d;
+ pipe->src_width = pipe->src_width_3d;
+ pipe->src_h = pipe->src_height_3d;
+ pipe->src_w = pipe->src_width_3d;
+ pipe->dst_h = pipe->src_height_3d;
+ pipe->dst_w = pipe->src_width_3d;
+ pipe->srcp0_ystride = msm_fb_line_length(0,
+ pipe->src_width, bpp);
+ } else {
+ /* 2D */
+ pipe->src_height = fbi->var.yres;
+ pipe->src_width = fbi->var.xres;
+ pipe->src_h = fbi->var.yres;
+ pipe->src_w = fbi->var.xres;
+ pipe->dst_h = fbi->var.yres;
+ pipe->dst_w = fbi->var.xres;
+ pipe->srcp0_ystride = fbi->fix.line_length;
+ }
+ pipe->src_y = 0;
+ pipe->src_x = 0;
+ pipe->dst_y = 0;
+ pipe->dst_x = 0;
+ pipe->srcp0_addr = (uint32)src;
}
void mdp4_overlay_update_dsi_cmd(struct msm_fb_data_type *mfd)
{
- MDPIBUF *iBuf = &mfd->ibuf;
- uint8 *src;
int ptype;
struct mdp4_overlay_pipe *pipe;
- int bpp;
int ret;
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
+
if (mfd->key != MFD_KEY)
return;
- dsi_mfd = mfd; /* keep it */
+ vctrl = &vsync_ctrl_db[cndx];
/* MDP cmd block enable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+ mdp_clk_ctrl(1);
- if (dsi_pipe == NULL) {
- ptype = mdp4_overlay_format2type(mfd->fb_imgType);
- if (ptype < 0)
- printk(KERN_INFO "%s: format2type failed\n", __func__);
- pipe = mdp4_overlay_pipe_alloc(ptype, MDP4_MIXER0);
- if (pipe == NULL)
- printk(KERN_INFO "%s: pipe_alloc failed\n", __func__);
- pipe->pipe_used++;
- pipe->mixer_stage = MDP4_MIXER_STAGE_BASE;
- pipe->mixer_num = MDP4_MIXER0;
- pipe->src_format = mfd->fb_imgType;
- mdp4_overlay_panel_mode(pipe->mixer_num, MDP4_PANEL_DSI_CMD);
- ret = mdp4_overlay_format2pipe(pipe);
- if (ret < 0)
- printk(KERN_INFO "%s: format2type failed\n", __func__);
-
- init_timer(&dsi_clock_timer);
- dsi_clock_timer.function = dsi_clock_tout;
- dsi_clock_timer.data = (unsigned long) mfd;;
- dsi_clock_timer.expires = 0xffffffff;
- add_timer(&dsi_clock_timer);
- tout_expired = jiffies;
-
- dsi_pipe = pipe; /* keep it */
-
- mdp4_init_writeback_buf(mfd, MDP4_MIXER0);
- pipe->ov_blt_addr = 0;
- pipe->dma_blt_addr = 0;
-
- } else {
- pipe = dsi_pipe;
- }
-
- if (pipe->pipe_used == 0 ||
- pipe->mixer_stage != MDP4_MIXER_STAGE_BASE) {
- pr_err("%s: NOT baselayer\n", __func__);
- mutex_unlock(&mfd->dma->ov_mutex);
+ ptype = mdp4_overlay_format2type(mfd->fb_imgType);
+ if (ptype < 0)
+ printk(KERN_INFO "%s: format2type failed\n", __func__);
+ pipe = mdp4_overlay_pipe_alloc(ptype, MDP4_MIXER0);
+ if (pipe == NULL) {
+ printk(KERN_INFO "%s: pipe_alloc failed\n", __func__);
return;
}
+ pipe->pipe_used++;
+ pipe->mixer_stage = MDP4_MIXER_STAGE_BASE;
+ pipe->mixer_num = MDP4_MIXER0;
+ pipe->src_format = mfd->fb_imgType;
+ mdp4_overlay_panel_mode(pipe->mixer_num, MDP4_PANEL_DSI_CMD);
+ ret = mdp4_overlay_format2pipe(pipe);
+ if (ret < 0)
+ printk(KERN_INFO "%s: format2type failed\n", __func__);
+
+ vctrl->base_pipe = pipe; /* keep it */
+ mdp4_init_writeback_buf(mfd, MDP4_MIXER0);
+ pipe->ov_blt_addr = 0;
+ pipe->dma_blt_addr = 0;
+
+ MDP_OUTP(MDP_BASE + 0x021c, 0x10); /* read pointer */
/*
* configure dsi stream id
@@ -190,41 +710,8 @@
MDP_OUTP(MDP_BASE + 0x000a0, 0x10);
/* disable dsi trigger */
MDP_OUTP(MDP_BASE + 0x000a4, 0x00);
- /* whole screen for base layer */
- src = (uint8 *) iBuf->buf;
-
- {
- struct fb_info *fbi;
-
- fbi = mfd->fbi;
- if (pipe->is_3d) {
- bpp = fbi->var.bits_per_pixel / 8;
- pipe->src_height = pipe->src_height_3d;
- pipe->src_width = pipe->src_width_3d;
- pipe->src_h = pipe->src_height_3d;
- pipe->src_w = pipe->src_width_3d;
- pipe->dst_h = pipe->src_height_3d;
- pipe->dst_w = pipe->src_width_3d;
- pipe->srcp0_ystride = msm_fb_line_length(0,
- pipe->src_width, bpp);
- } else {
- /* 2D */
- pipe->src_height = fbi->var.yres;
- pipe->src_width = fbi->var.xres;
- pipe->src_h = fbi->var.yres;
- pipe->src_w = fbi->var.xres;
- pipe->dst_h = fbi->var.yres;
- pipe->dst_w = fbi->var.xres;
- pipe->srcp0_ystride = fbi->fix.line_length;
- }
- pipe->src_y = 0;
- pipe->src_x = 0;
- pipe->dst_y = 0;
- pipe->dst_x = 0;
- pipe->srcp0_addr = (uint32)src;
- }
-
+ mdp4_overlay_setup_pipe_addr(mfd, pipe);
mdp4_overlay_rgb_setup(pipe);
@@ -238,10 +725,8 @@
mdp4_overlay_dmap_cfg(mfd, 0);
- mdp4_mipi_vsync_enable(mfd, pipe, 0);
-
/* MDP cmd block disable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+ mdp_clk_ctrl(0);
wmb();
}
@@ -251,18 +736,18 @@
struct msmfb_overlay_3d *r3d)
{
struct fb_info *fbi;
- struct mdp4_overlay_pipe *pipe;
int bpp;
uint8 *src = NULL;
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
- if (dsi_pipe == NULL)
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
+
+ if (pipe == NULL)
return;
- dsi_pipe->is_3d = r3d->is_3d;
- dsi_pipe->src_height_3d = r3d->height;
- dsi_pipe->src_width_3d = r3d->width;
-
- pipe = dsi_pipe;
if (pipe->pipe_used == 0 ||
pipe->mixer_stage != MDP4_MIXER_STAGE_BASE) {
pr_err("%s: NOT baselayer\n", __func__);
@@ -270,16 +755,15 @@
return;
}
+ pipe->is_3d = r3d->is_3d;
+ pipe->src_height_3d = r3d->height;
+ pipe->src_width_3d = r3d->width;
+
if (pipe->is_3d)
mdp4_overlay_panel_3d(pipe->mixer_num, MDP4_3D_SIDE_BY_SIDE);
else
mdp4_overlay_panel_3d(pipe->mixer_num, MDP4_3D_NONE);
- if (mfd->panel_power_on) {
- mdp4_dsi_cmd_dma_busy_wait(mfd);
- mdp4_dsi_blt_dmap_busy_wait(mfd);
- }
-
fbi = mfd->fbi;
if (pipe->is_3d) {
bpp = fbi->var.bits_per_pixel / 8;
@@ -328,28 +812,38 @@
int mdp4_dsi_overlay_blt_start(struct msm_fb_data_type *mfd)
{
unsigned long flag;
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
- pr_debug("%s: blt_end=%d ov_blt_addr=%x pid=%d\n",
- __func__, dsi_pipe->blt_end, (int)dsi_pipe->ov_blt_addr, current->pid);
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
+
+ pr_debug("%s: blt_end=%d blt_addr=%x pid=%d\n",
+ __func__, pipe->blt_end, (int)pipe->ov_blt_addr, current->pid);
mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0);
if (mfd->ov0_wb_buf->write_addr == 0) {
- pr_info("%s: no blt_base assigned\n", __func__);
+ pr_err("%s: no blt_base assigned\n", __func__);
return -EBUSY;
}
- if (dsi_pipe->ov_blt_addr == 0) {
- mdp4_dsi_cmd_dma_busy_wait(mfd);
- spin_lock_irqsave(&mdp_spin_lock, flag);
- dsi_pipe->blt_end = 0;
- dsi_pipe->blt_cnt = 0;
- dsi_pipe->ov_cnt = 0;
- dsi_pipe->dmap_cnt = 0;
- dsi_pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
- dsi_pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
+ if (pipe->ov_blt_addr == 0) {
+ spin_lock_irqsave(&vctrl->spin_lock, flag);
+ pipe->blt_end = 0;
+ pipe->blt_cnt = 0;
+ pipe->blt_changed = 1;
+ pipe->ov_cnt = 0;
+ pipe->dmap_cnt = 0;
+ pipe->blt_ov_koff = 0;
+ pipe->blt_dmap_koff = 0;
+ pipe->blt_ov_done = 0;
+ pipe->blt_dmap_done = 0;
+ pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
+ pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
mdp4_stat.blt_dsi_cmd++;
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
+ spin_unlock_irqrestore(&vctrl->spin_lock, flag);
return 0;
}
@@ -359,32 +853,26 @@
int mdp4_dsi_overlay_blt_stop(struct msm_fb_data_type *mfd)
{
unsigned long flag;
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
- pr_debug("%s: blt_end=%d ov_blt_addr=%x\n",
- __func__, dsi_pipe->blt_end, (int)dsi_pipe->ov_blt_addr);
+ pr_info("%s: blt_end=%d blt_addr=%x pid=%d\n",
+ __func__, pipe->blt_end, (int)pipe->ov_blt_addr, current->pid);
- if ((dsi_pipe->blt_end == 0) && dsi_pipe->ov_blt_addr) {
- spin_lock_irqsave(&mdp_spin_lock, flag);
- dsi_pipe->blt_end = 1; /* mark as end */
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
+ if ((pipe->blt_end == 0) && pipe->ov_blt_addr) {
+ spin_lock_irqsave(&vctrl->spin_lock, flag);
+ pipe->blt_end = 1; /* mark as end */
+ spin_unlock_irqrestore(&vctrl->spin_lock, flag);
return 0;
}
return -EBUSY;
}
-int mdp4_dsi_overlay_blt_offset(struct msm_fb_data_type *mfd,
- struct msmfb_overlay_blt *req)
-{
- req->offset = 0;
- req->width = dsi_pipe->src_width;
- req->height = dsi_pipe->src_height;
- req->bpp = dsi_pipe->bpp;
-
- return sizeof(*req);
-}
-
void mdp4_dsi_overlay_blt(struct msm_fb_data_type *mfd,
struct msmfb_overlay_blt *req)
{
@@ -395,332 +883,123 @@
}
-void mdp4_blt_xy_update(struct mdp4_overlay_pipe *pipe)
+int mdp4_dsi_cmd_on(struct platform_device *pdev)
{
- uint32 off, addr, addr2;
- int bpp;
- char *overlay_base;
+ int ret = 0;
+ int cndx = 0;
+ struct msm_fb_data_type *mfd;
+ struct vsycn_ctrl *vctrl;
+ pr_info("%s+:\n", __func__);
- if (pipe->ov_blt_addr == 0)
- return;
+ mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
+ vctrl = &vsync_ctrl_db[cndx];
+ vctrl->dev = mfd->fbi->dev;
-#ifdef BLT_RGB565
- bpp = 2; /* overlay ouput is RGB565 */
-#else
- bpp = 3; /* overlay ouput is RGB888 */
-#endif
- off = 0;
- if (pipe->dmap_cnt & 0x01)
- off = pipe->src_height * pipe->src_width * bpp;
- addr = pipe->dma_blt_addr + off;
+ mdp_clk_ctrl(1);
- /* dmap */
- MDP_OUTP(MDP_BASE + 0x90008, addr);
-
- off = 0;
- if (pipe->ov_cnt & 0x01)
- off = pipe->src_height * pipe->src_width * bpp;
- addr2 = pipe->ov_blt_addr + off;
- /* overlay 0 */
- overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
- outpdw(overlay_base + 0x000c, addr2);
- outpdw(overlay_base + 0x001c, addr2);
-}
-
-
-/*
- * mdp4_dmap_done_dsi: called from isr
- * DAM_P_DONE only used when blt enabled
- */
-void mdp4_dma_p_done_dsi(struct mdp_dma_data *dma)
-{
- int diff;
-
- dsi_pipe->dmap_cnt++;
- diff = dsi_pipe->ov_cnt - dsi_pipe->dmap_cnt;
- pr_debug("%s: ov_cnt=%d dmap_cnt=%d\n",
- __func__, dsi_pipe->ov_cnt, dsi_pipe->dmap_cnt);
-
- if (diff <= 0) {
- spin_lock(&mdp_spin_lock);
- dma->dmap_busy = FALSE;
- complete(&dma->dmap_comp);
- spin_unlock(&mdp_spin_lock);
- if (dsi_pipe->blt_end) {
- dsi_pipe->blt_end = 0;
- dsi_pipe->dma_blt_addr = 0;
- dsi_pipe->ov_blt_addr = 0;
- pr_debug("%s: END, ov_cnt=%d dmap_cnt=%d\n",
- __func__, dsi_pipe->ov_cnt, dsi_pipe->dmap_cnt);
- mdp_intr_mask &= ~INTR_DMA_P_DONE;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- }
- mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
- mdp_disable_irq_nosync(MDP_DMA2_TERM); /* disable intr */
- return;
- }
-
- spin_lock(&mdp_spin_lock);
- dma->busy = FALSE;
- spin_unlock(&mdp_spin_lock);
- complete(&dma->comp);
- if (busy_wait_cnt)
- busy_wait_cnt--;
-
- pr_debug("%s: kickoff dmap\n", __func__);
-
- mdp4_blt_xy_update(dsi_pipe);
- /* kick off dmap */
- outpdw(MDP_BASE + 0x000c, 0x0);
- mdp4_stat.kickoff_dmap++;
- /* trigger dsi cmd engine */
- mipi_dsi_cmd_mdp_start();
-
- mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
-}
-
-
-/*
- * mdp4_overlay0_done_dsi_cmd: called from isr
- */
-void mdp4_overlay0_done_dsi_cmd(struct mdp_dma_data *dma)
-{
- int diff;
-
- if (dsi_pipe->ov_blt_addr == 0) {
- mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
- spin_lock(&mdp_spin_lock);
- dma->busy = FALSE;
- spin_unlock(&mdp_spin_lock);
- complete(&dma->comp);
- if (busy_wait_cnt)
- busy_wait_cnt--;
- mdp_disable_irq_nosync(MDP_OVERLAY0_TERM);
- return;
- }
-
- /* blt enabled */
- if (dsi_pipe->blt_end == 0)
- dsi_pipe->ov_cnt++;
-
- pr_debug("%s: ov_cnt=%d dmap_cnt=%d\n",
- __func__, dsi_pipe->ov_cnt, dsi_pipe->dmap_cnt);
-
- if (dsi_pipe->blt_cnt == 0) {
- /* first kickoff since blt enabled */
- mdp_intr_mask |= INTR_DMA_P_DONE;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- }
- dsi_pipe->blt_cnt++;
-
- diff = dsi_pipe->ov_cnt - dsi_pipe->dmap_cnt;
- if (diff >= 2) {
- mdp_disable_irq_nosync(MDP_OVERLAY0_TERM);
- return;
- }
-
- spin_lock(&mdp_spin_lock);
- dma->busy = FALSE;
- dma->dmap_busy = TRUE;
- spin_unlock(&mdp_spin_lock);
- complete(&dma->comp);
- if (busy_wait_cnt)
- busy_wait_cnt--;
-
- pr_debug("%s: kickoff dmap\n", __func__);
-
- mdp4_blt_xy_update(dsi_pipe);
- mdp_enable_irq(MDP_DMA2_TERM); /* enable intr */
- /* kick off dmap */
- outpdw(MDP_BASE + 0x000c, 0x0);
- mdp4_stat.kickoff_dmap++;
- /* trigger dsi cmd engine */
- mipi_dsi_cmd_mdp_start();
- mdp_disable_irq_nosync(MDP_OVERLAY0_TERM);
-}
-
-void mdp4_dsi_cmd_overlay_restore(void)
-{
- /* mutex holded by caller */
- if (dsi_mfd && dsi_pipe) {
- mdp4_dsi_cmd_dma_busy_wait(dsi_mfd);
- mipi_dsi_mdp_busy_wait(dsi_mfd);
- mdp4_overlay_update_dsi_cmd(dsi_mfd);
-
- if (dsi_pipe->ov_blt_addr)
- mdp4_dsi_blt_dmap_busy_wait(dsi_mfd);
- mdp4_dsi_cmd_overlay_kickoff(dsi_mfd, dsi_pipe);
- }
-}
-
-void mdp4_dsi_blt_dmap_busy_wait(struct msm_fb_data_type *mfd)
-{
- unsigned long flag;
- int need_wait = 0;
-
- spin_lock_irqsave(&mdp_spin_lock, flag);
- if (mfd->dma->dmap_busy == TRUE) {
- INIT_COMPLETION(mfd->dma->dmap_comp);
- need_wait++;
- }
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
-
- if (need_wait) {
- /* wait until DMA finishes the current job */
- wait_for_completion(&mfd->dma->dmap_comp);
- }
-}
-
-/*
- * mdp4_dsi_cmd_dma_busy_wait: check dsi link activity
- * dsi link is a shared resource and it can only be used
- * while it is in idle state.
- * ov_mutex need to be acquired before call this function.
- */
-void mdp4_dsi_cmd_dma_busy_wait(struct msm_fb_data_type *mfd)
-{
- unsigned long flag;
- int need_wait = 0;
-
-
-
- if (dsi_clock_timer.function) {
- if (time_after(jiffies, tout_expired)) {
- tout_expired = jiffies + TOUT_PERIOD;
- mod_timer(&dsi_clock_timer, tout_expired);
- tout_expired -= MS_100;
- }
- }
-
- pr_debug("%s: start pid=%d dsi_clk_on=%d\n",
- __func__, current->pid, mipi_dsi_clk_on);
-
- /* satrt dsi clock if necessary */
- spin_lock_bh(&dsi_clk_lock);
- if (mipi_dsi_clk_on == 0)
- mipi_dsi_turn_on_clks();
- spin_unlock_bh(&dsi_clk_lock);
-
- spin_lock_irqsave(&mdp_spin_lock, flag);
- if (mfd->dma->busy == TRUE) {
- if (busy_wait_cnt == 0)
- INIT_COMPLETION(mfd->dma->comp);
- busy_wait_cnt++;
- need_wait++;
- }
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
-
- if (need_wait) {
- /* wait until DMA finishes the current job */
- pr_debug("%s: pending pid=%d dsi_clk_on=%d\n",
- __func__, current->pid, mipi_dsi_clk_on);
- wait_for_completion(&mfd->dma->comp);
- }
- pr_debug("%s: done pid=%d dsi_clk_on=%d\n",
- __func__, current->pid, mipi_dsi_clk_on);
-}
-
-void mdp4_dsi_cmd_kickoff_video(struct msm_fb_data_type *mfd,
- struct mdp4_overlay_pipe *pipe)
-{
- /*
- * a video kickoff may happen before UI kickoff after
- * blt enabled. mdp4_overlay_update_dsi_cmd() need
- * to be called before kickoff.
- * vice versa for blt disabled.
- */
- if (dsi_pipe->ov_blt_addr && dsi_pipe->blt_cnt == 0)
- mdp4_overlay_update_dsi_cmd(mfd); /* first time */
- else if (dsi_pipe->ov_blt_addr == 0 && dsi_pipe->blt_cnt) {
- mdp4_overlay_update_dsi_cmd(mfd); /* last time */
- dsi_pipe->blt_cnt = 0;
- }
-
- pr_debug("%s: ov_blt_addr=%d blt_cnt=%d\n",
- __func__, (int)dsi_pipe->ov_blt_addr, dsi_pipe->blt_cnt);
-
- if (dsi_pipe->ov_blt_addr)
- mdp4_dsi_blt_dmap_busy_wait(dsi_mfd);
-
- mdp4_dsi_cmd_overlay_kickoff(mfd, pipe);
-}
-
-void mdp4_dsi_cmd_kickoff_ui(struct msm_fb_data_type *mfd,
- struct mdp4_overlay_pipe *pipe)
-{
-
- pr_debug("%s: pid=%d\n", __func__, current->pid);
- mdp4_dsi_cmd_overlay_kickoff(mfd, pipe);
-}
-
-
-void mdp4_dsi_cmd_overlay_kickoff(struct msm_fb_data_type *mfd,
- struct mdp4_overlay_pipe *pipe)
-{
- unsigned long flag;
+ if (vctrl->base_pipe == NULL)
+ mdp4_overlay_update_dsi_cmd(mfd);
mdp4_iommu_attach();
- /* change mdp clk */
- mdp4_set_perf_level();
- mipi_dsi_mdp_busy_wait(mfd);
+ atomic_set(&vctrl->suspend, 0);
+ pr_info("%s-:\n", __func__);
- if (dsi_pipe->ov_blt_addr == 0)
- mipi_dsi_cmd_mdp_start();
- mdp4_overlay_dsi_state_set(ST_DSI_PLAYING);
+ return ret;
+}
- spin_lock_irqsave(&mdp_spin_lock, flag);
- mdp_enable_irq(MDP_OVERLAY0_TERM);
- mfd->dma->busy = TRUE;
- if (dsi_pipe->ov_blt_addr)
- mfd->dma->dmap_busy = TRUE;
- /* start OVERLAY pipe */
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
- mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd);
- mdp4_stat.kickoff_ov0++;
+int mdp4_dsi_cmd_off(struct platform_device *pdev)
+{
+ int ret = 0;
+ int cndx = 0;
+ struct msm_fb_data_type *mfd;
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
+
+ pr_info("%s+:\n", __func__);
+
+ mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
+
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
+ if (pipe == NULL) {
+ pr_err("%s: NO base pipe\n", __func__);
+ return ret;
+ }
+
+ atomic_set(&vctrl->suspend, 1);
+
+ /* make sure dsi clk is on so that
+ * at panel_next_off() dsi panel can be shut off
+ */
+ mipi_dsi_ahb_ctrl(1);
+ mipi_dsi_clk_enable();
+
+ mdp4_mixer_stage_down(pipe);
+ mdp4_overlay_pipe_free(pipe);
+ vctrl->base_pipe = NULL;
+
+ pr_info("%s-:\n", __func__);
+
+ /*
+ * footswitch off
+ * this will casue all mdp register
+ * to be reset to default
+ * after footswitch on later
+ */
+
+ return ret;
}
void mdp_dsi_cmd_overlay_suspend(struct msm_fb_data_type *mfd)
{
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
+
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
/* dis-engage rgb0 from mixer0 */
- if (dsi_pipe) {
+ if (pipe) {
if (mfd->ref_cnt == 0) {
/* adb stop */
- if (dsi_pipe->pipe_type == OVERLAY_TYPE_BF)
- mdp4_overlay_borderfill_stage_down(dsi_pipe);
+ if (pipe->pipe_type == OVERLAY_TYPE_BF)
+ mdp4_overlay_borderfill_stage_down(pipe);
- /* dsi_pipe == rgb1 */
- mdp4_overlay_unset_mixer(dsi_pipe->mixer_num);
- dsi_pipe = NULL;
+ /* pipe == rgb1 */
+ mdp4_overlay_unset_mixer(pipe->mixer_num);
+ vctrl->base_pipe = NULL;
} else {
- mdp4_mixer_stage_down(dsi_pipe);
- mdp4_iommu_unmap(dsi_pipe);
+ mdp4_mixer_stage_down(pipe);
+ mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 1);
}
}
}
void mdp4_dsi_cmd_overlay(struct msm_fb_data_type *mfd)
{
- mutex_lock(&mfd->dma->ov_mutex);
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
- if (mfd && mfd->panel_power_on) {
- mdp4_dsi_cmd_dma_busy_wait(mfd);
+ vctrl = &vsync_ctrl_db[cndx];
- if (dsi_pipe && dsi_pipe->ov_blt_addr)
- mdp4_dsi_blt_dmap_busy_wait(mfd);
+ if (!mfd->panel_power_on)
+ return;
- mdp4_overlay_update_dsi_cmd(mfd);
-
- mdp4_dsi_cmd_kickoff_ui(mfd, dsi_pipe);
- mdp4_iommu_unmap(dsi_pipe);
- /* signal if pan function is waiting for the update completion */
- if (mfd->pan_waiting) {
- mfd->pan_waiting = FALSE;
- complete(&mfd->pan_comp);
- }
+ pipe = vctrl->base_pipe;
+ if (pipe == NULL) {
+ pr_err("%s: NO base pipe\n", __func__);
+ return;
}
- mutex_unlock(&mfd->dma->ov_mutex);
+
+ if (pipe->mixer_stage == MDP4_MIXER_STAGE_BASE) {
+ mdp4_mipi_vsync_enable(mfd, pipe, 0);
+ mdp4_overlay_setup_pipe_addr(mfd, pipe);
+ mdp4_dsi_cmd_pipe_queue(0, pipe);
+ }
+ mdp4_dsi_cmd_pipe_commit();
}
diff --git a/drivers/video/msm/mdp4_overlay_dsi_video.c b/drivers/video/msm/mdp4_overlay_dsi_video.c
index bc4476e..398b1e6 100644
--- a/drivers/video/msm/mdp4_overlay_dsi_video.c
+++ b/drivers/video/msm/mdp4_overlay_dsi_video.c
@@ -24,6 +24,9 @@
#include <linux/spinlock.h>
#include <linux/fb.h>
#include <linux/msm_mdp.h>
+#include <linux/ktime.h>
+#include <linux/wakelock.h>
+#include <linux/time.h>
#include <asm/system.h>
#include <asm/mach-types.h>
#include <mach/hardware.h>
@@ -40,38 +43,362 @@
static int first_pixel_start_y;
static int dsi_video_enabled;
-static struct mdp4_overlay_pipe *dsi_pipe;
-static struct completion dsi_video_comp;
-static int blt_cfg_changed;
+#define MAX_CONTROLLER 1
-static cmd_fxn_t display_on;
+static struct vsycn_ctrl {
+ struct device *dev;
+ int inited;
+ int update_ndx;
+ int ov_koff;
+ int ov_done;
+ atomic_t suspend;
+ int wait_vsync_cnt;
+ int blt_change;
+ int blt_free;
+ int fake_vsync;
+ struct mutex update_lock;
+ struct completion ov_comp;
+ struct completion dmap_comp;
+ struct completion vsync_comp;
+ spinlock_t spin_lock;
+ struct msm_fb_data_type *mfd;
+ struct mdp4_overlay_pipe *base_pipe;
+ struct vsync_update vlist[2];
+ int vsync_irq_enabled;
+ ktime_t vsync_time;
+ struct work_struct vsync_work;
+} vsync_ctrl_db[MAX_CONTROLLER];
-static __u32 msm_fb_line_length(__u32 fb_index, __u32 xres, int bpp)
+static void vsync_irq_enable(int intr, int term)
{
- /*
- * The adreno GPU hardware requires that the pitch be aligned to
- * 32 pixels for color buffers, so for the cases where the GPU
- * is writing directly to fb0, the framebuffer pitch
- * also needs to be 32 pixel aligned
- */
+ unsigned long flag;
- if (fb_index == 0)
- return ALIGN(xres, 32) * bpp;
+ spin_lock_irqsave(&mdp_spin_lock, flag);
+ outp32(MDP_INTR_CLEAR,
+ INTR_DMA_P_DONE | INTR_OVERLAY0_DONE | INTR_PRIMARY_VSYNC);
+ mdp_intr_mask |= intr;
+ outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+ mdp_enable_irq(term);
+ spin_unlock_irqrestore(&mdp_spin_lock, flag);
+ pr_debug("%s: IRQ-en done, term=%x\n", __func__, term);
+}
+
+static void vsync_irq_disable(int intr, int term)
+{
+ unsigned long flag;
+
+ spin_lock_irqsave(&mdp_spin_lock, flag);
+ outp32(MDP_INTR_CLEAR,
+ INTR_DMA_P_DONE | INTR_OVERLAY0_DONE | INTR_PRIMARY_VSYNC);
+ mdp_intr_mask &= ~intr;
+ outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+ mdp_disable_irq_nosync(term);
+ spin_unlock_irqrestore(&mdp_spin_lock, flag);
+ pr_debug("%s: IRQ-dis done, term=%x\n", __func__, term);
+}
+
+static void mdp4_overlay_dsi_video_start(void)
+{
+ if (!dsi_video_enabled) {
+ /* enable DSI block */
+ mdp4_iommu_attach();
+ mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+ MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 1);
+ dsi_video_enabled = 1;
+ }
+}
+
+/*
+ * mdp4_dsi_video_pipe_queue:
+ * called from thread context
+ */
+void mdp4_dsi_video_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe)
+{
+ struct vsycn_ctrl *vctrl;
+ struct vsync_update *vp;
+ struct mdp4_overlay_pipe *pp;
+ int undx;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ /* start timing generator & mmu if they are not started yet */
+ mdp4_overlay_dsi_video_start();
+
+ vctrl = &vsync_ctrl_db[cndx];
+
+ if (atomic_read(&vctrl->suspend) > 0)
+ return;
+
+ mutex_lock(&vctrl->update_lock);
+ undx = vctrl->update_ndx;
+ vp = &vctrl->vlist[undx];
+
+ pp = &vp->plist[pipe->pipe_ndx - 1]; /* ndx start form 1 */
+
+ pr_debug("%s: vndx=%d pipe=%x ndx=%d num=%d pid=%d\n",
+ __func__, undx, (int)pipe, pipe->pipe_ndx, pipe->pipe_num,
+ current->pid);
+
+ *pp = *pipe; /* keep it */
+ vp->update_cnt++;
+ mutex_unlock(&vctrl->update_lock);
+ mdp4_stat.overlay_play[pipe->mixer_num]++;
+}
+
+static void mdp4_dsi_video_blt_ov_update(struct mdp4_overlay_pipe *pipe);
+static void mdp4_dsi_video_wait4dmap(int cndx);
+static void mdp4_dsi_video_wait4ov(int cndx);
+
+int mdp4_dsi_video_pipe_commit(void)
+{
+
+ int i, undx;
+ int mixer = 0;
+ struct vsycn_ctrl *vctrl;
+ struct vsync_update *vp;
+ struct mdp4_overlay_pipe *pipe;
+ unsigned long flags;
+ int cnt = 0;
+
+ vctrl = &vsync_ctrl_db[0];
+
+ mutex_lock(&vctrl->update_lock);
+ undx = vctrl->update_ndx;
+ vp = &vctrl->vlist[undx];
+ pipe = vctrl->base_pipe;
+ mixer = pipe->mixer_num;
+
+ if (vp->update_cnt == 0) {
+ mutex_unlock(&vctrl->update_lock);
+ return cnt;
+ }
+
+ vctrl->update_ndx++;
+ vctrl->update_ndx &= 0x01;
+ vp->update_cnt = 0; /* reset */
+ if (vctrl->blt_free) {
+ vctrl->blt_free--;
+ if (vctrl->blt_free == 0)
+ mdp4_free_writeback_buf(vctrl->mfd, mixer);
+ }
+ mutex_unlock(&vctrl->update_lock);
+
+ /* free previous committed iommu back to pool */
+ mdp4_overlay_iommu_unmap_freelist(mixer);
+
+ spin_lock_irqsave(&vctrl->spin_lock, flags);
+ if (vctrl->ov_koff != vctrl->ov_done) {
+ spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+ pr_err("%s: Error, frame dropped %d %d\n", __func__,
+ vctrl->ov_koff, vctrl->ov_done);
+ return 0;
+ }
+ spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+ if (vctrl->blt_change) {
+ pipe = vctrl->base_pipe;
+ spin_lock_irqsave(&vctrl->spin_lock, flags);
+ INIT_COMPLETION(vctrl->dmap_comp);
+ INIT_COMPLETION(vctrl->ov_comp);
+ vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
+ spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+ mdp4_dsi_video_wait4dmap(0);
+ if (pipe->ov_blt_addr)
+ mdp4_dsi_video_wait4ov(0);
+ }
+
+ pipe = vp->plist;
+ for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
+ if (pipe->pipe_used) {
+ cnt++;
+ mdp4_overlay_vsync_commit(pipe);
+ /* free previous iommu to freelist
+ * which will be freed at next
+ * pipe_commit
+ */
+ mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
+ pipe->pipe_used = 0; /* clear */
+ }
+ }
+
+ mdp4_mixer_stage_commit(mixer);
+
+ pipe = vctrl->base_pipe;
+ spin_lock_irqsave(&vctrl->spin_lock, flags);
+ if (pipe->ov_blt_addr) {
+ mdp4_dsi_video_blt_ov_update(pipe);
+ pipe->ov_cnt++;
+ INIT_COMPLETION(vctrl->ov_comp);
+ vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
+ mb();
+ vctrl->ov_koff++;
+ /* kickoff overlay engine */
+ mdp4_stat.kickoff_ov0++;
+ outpdw(MDP_BASE + 0x0004, 0);
+ } else {
+ /* schedule second phase update at dmap */
+ INIT_COMPLETION(vctrl->dmap_comp);
+ vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
+ }
+ spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+ mdp4_stat.overlay_commit[pipe->mixer_num]++;
+
+ return cnt;
+}
+
+void mdp4_dsi_video_vsync_ctrl(int cndx, int enable)
+{
+ struct vsycn_ctrl *vctrl;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ vctrl = &vsync_ctrl_db[cndx];
+
+ if (vctrl->fake_vsync) {
+ vctrl->fake_vsync = 0;
+ schedule_work(&vctrl->vsync_work);
+ }
+
+ if (vctrl->vsync_irq_enabled == enable)
+ return;
+
+ pr_debug("%s: vsync enable=%d\n", __func__, enable);
+
+ vctrl->vsync_irq_enabled = enable;
+
+ if (enable)
+ vsync_irq_enable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM);
else
- return xres * bpp;
+ vsync_irq_disable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM);
}
-void mdp4_dsi_video_fxn_register(cmd_fxn_t fxn)
+void mdp4_dsi_video_wait4vsync(int cndx, long long *vtime)
{
- display_on = fxn;
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
+ unsigned long flags;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
+
+ if (atomic_read(&vctrl->suspend) > 0) {
+ *vtime = -1;
+ return;
+ }
+
+ /* start timing generator & mmu if they are not started yet */
+ mdp4_overlay_dsi_video_start();
+
+ spin_lock_irqsave(&vctrl->spin_lock, flags);
+ if (vctrl->wait_vsync_cnt == 0)
+ INIT_COMPLETION(vctrl->vsync_comp);
+
+ vctrl->wait_vsync_cnt++;
+ spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+ wait_for_completion(&vctrl->vsync_comp);
+ mdp4_stat.wait4vsync0++;
+
+ *vtime = ktime_to_ns(vctrl->vsync_time);
}
-static void mdp4_overlay_dsi_video_wait4event(struct msm_fb_data_type *mfd,
- int intr_done);
-
-void mdp4_dsi_video_base_swap(struct mdp4_overlay_pipe *pipe)
+static void mdp4_dsi_video_wait4dmap(int cndx)
{
- dsi_pipe = pipe;
+ struct vsycn_ctrl *vctrl;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ vctrl = &vsync_ctrl_db[cndx];
+
+ if (atomic_read(&vctrl->suspend) > 0)
+ return;
+
+ wait_for_completion(&vctrl->dmap_comp);
+}
+
+static void mdp4_dsi_video_wait4ov(int cndx)
+{
+ struct vsycn_ctrl *vctrl;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ vctrl = &vsync_ctrl_db[cndx];
+
+ if (atomic_read(&vctrl->suspend) > 0)
+ return;
+
+ wait_for_completion(&vctrl->ov_comp);
+}
+
+static void send_vsync_work(struct work_struct *work)
+{
+ struct vsycn_ctrl *vctrl =
+ container_of(work, typeof(*vctrl), vsync_work);
+ char buf[64];
+ char *envp[2];
+
+ snprintf(buf, sizeof(buf), "VSYNC=%llu",
+ ktime_to_ns(vctrl->vsync_time));
+ envp[0] = buf;
+ envp[1] = NULL;
+ kobject_uevent_env(&vctrl->dev->kobj, KOBJ_CHANGE, envp);
+}
+
+void mdp4_dsi_vsync_init(int cndx)
+{
+ struct vsycn_ctrl *vctrl;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ pr_info("%s: ndx=%d\n", __func__, cndx);
+
+ vctrl = &vsync_ctrl_db[cndx];
+ if (vctrl->inited)
+ return;
+
+ vctrl->inited = 1;
+ vctrl->update_ndx = 0;
+ mutex_init(&vctrl->update_lock);
+ init_completion(&vctrl->vsync_comp);
+ init_completion(&vctrl->dmap_comp);
+ init_completion(&vctrl->ov_comp);
+ atomic_set(&vctrl->suspend, 0);
+ spin_lock_init(&vctrl->spin_lock);
+ INIT_WORK(&vctrl->vsync_work, send_vsync_work);
+}
+
+void mdp4_dsi_video_base_swap(int cndx, struct mdp4_overlay_pipe *pipe)
+{
+ struct vsycn_ctrl *vctrl;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ vctrl = &vsync_ctrl_db[cndx];
+ vctrl->base_pipe = pipe;
}
int mdp4_dsi_video_on(struct platform_device *pdev)
@@ -113,8 +440,11 @@
struct fb_var_screeninfo *var;
struct msm_fb_data_type *mfd;
struct mdp4_overlay_pipe *pipe;
- int ret;
+ int ret = 0;
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
+ vctrl = &vsync_ctrl_db[cndx];
mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
if (!mfd)
@@ -123,6 +453,12 @@
if (mfd->key != MFD_KEY)
return -EINVAL;
+ vctrl->mfd = mfd;
+ vctrl->dev = mfd->fbi->dev;
+
+ /* mdp clock on */
+ mdp_clk_ctrl(1);
+
fbi = mfd->fbi;
var = &fbi->var;
@@ -130,7 +466,7 @@
buf = (uint8 *) fbi->fix.smem_start;
buf_offset = calc_fb_offset(mfd, fbi, bpp);
- if (dsi_pipe == NULL) {
+ if (vctrl->base_pipe == NULL) {
ptype = mdp4_overlay_format2type(mfd->fb_imgType);
if (ptype < 0)
printk(KERN_INFO "%s: format2type failed\n", __func__);
@@ -148,17 +484,16 @@
if (ret < 0)
printk(KERN_INFO "%s: format2type failed\n", __func__);
- dsi_pipe = pipe; /* keep it */
- init_completion(&dsi_video_comp);
-
- mdp4_init_writeback_buf(mfd, MDP4_MIXER0);
pipe->ov_blt_addr = 0;
pipe->dma_blt_addr = 0;
+ vctrl->base_pipe = pipe; /* keep it */
+ mdp4_init_writeback_buf(mfd, MDP4_MIXER0);
} else {
- pipe = dsi_pipe;
+ pipe = vctrl->base_pipe;
}
+#ifdef CONTINUOUS_SPLASH
/* MDP cmd block enable */
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
@@ -171,12 +506,7 @@
MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 0);
mipi_dsi_controller_cfg(0);
}
-
- if (is_mdp4_hw_reset()) {
- mdp4_hw_init();
- outpdw(MDP_BASE + 0x0038, mdp4_display_intf);
- }
-
+#endif
pipe->src_height = fbi->var.yres;
pipe->src_width = fbi->var.xres;
pipe->src_h = fbi->var.yres;
@@ -194,13 +524,16 @@
pipe->dst_h = fbi->var.yres;
pipe->dst_w = fbi->var.xres;
+ atomic_set(&vctrl->suspend, 0);
+
mdp4_overlay_dmap_xy(pipe); /* dma_p */
mdp4_overlay_dmap_cfg(mfd, 1);
mdp4_overlay_rgb_setup(pipe);
+ mdp4_overlayproc_cfg(pipe);
+
mdp4_overlay_reg_flush(pipe, 1);
mdp4_mixer_stage_up(pipe);
- mdp4_overlayproc_cfg(pipe);
/*
* DSI timing setting
@@ -261,6 +594,7 @@
ctrl_polarity =
(data_en_polarity << 2) | (vsync_polarity << 1) | (hsync_polarity);
+ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x4, hsync_ctrl);
MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x8, vsync_period * hsync_period);
MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0xc,
@@ -275,82 +609,97 @@
MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x2c, dsi_underflow_clr);
MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x30, dsi_hsync_skew);
MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x38, ctrl_polarity);
- mdp4_overlay_reg_flush(pipe, 1);
- mdp4_mixer_stage_up(pipe);
-
- mdp_histogram_ctrl_all(TRUE);
-
- ret = panel_next_on(pdev);
- if (ret == 0) {
- mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
- if (display_on != NULL) {
- msleep(50);
- display_on(pdev);
- }
- }
- /* MDP cmd block disable */
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+ mdp_histogram_ctrl_all(TRUE);
return ret;
}
int mdp4_dsi_video_off(struct platform_device *pdev)
{
int ret = 0;
+ int cndx = 0;
struct msm_fb_data_type *mfd;
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
- /* MDP cmd block enable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+ atomic_set(&vctrl->suspend, 1);
+
+ while (vctrl->wait_vsync_cnt)
+ msleep(20); /* >= 17 ms */
+
MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 0);
+
dsi_video_enabled = 0;
- /* MDP cmd block disable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
- mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
mdp_histogram_ctrl_all(FALSE);
- ret = panel_next_off(pdev);
- /* delay to make sure the last frame finishes */
- msleep(20);
-
- /* dis-engage rgb0 from mixer0 */
- if (dsi_pipe) {
+ if (pipe) {
if (mfd->ref_cnt == 0) {
/* adb stop */
- if (dsi_pipe->pipe_type == OVERLAY_TYPE_BF)
- mdp4_overlay_borderfill_stage_down(dsi_pipe);
+ if (pipe->pipe_type == OVERLAY_TYPE_BF)
+ mdp4_overlay_borderfill_stage_down(pipe);
- /* dsi_pipe == rgb1 */
- mdp4_overlay_unset_mixer(dsi_pipe->mixer_num);
- dsi_pipe = NULL;
+ mdp4_overlay_unset_mixer(pipe->mixer_num);
+ vctrl->base_pipe = NULL;
} else {
- mdp4_mixer_stage_down(dsi_pipe);
- mdp4_iommu_unmap(dsi_pipe);
+ /* system suspending */
+ mdp4_mixer_stage_down(vctrl->base_pipe);
+ mdp4_overlay_iommu_pipe_free(
+ vctrl->base_pipe->pipe_ndx, 1);
}
}
+ vctrl->fake_vsync = 1;
+
+ /* mdp clock off */
+ mdp_clk_ctrl(0);
+ mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
return ret;
}
+static __u32 msm_fb_line_length(__u32 fb_index, __u32 xres, int bpp)
+{
+ /*
+ * The adreno GPU hardware requires that the pitch be aligned to
+ * 32 pixels for color buffers, so for the cases where the GPU
+ * is writing directly to fb0, the framebuffer pitch
+ * also needs to be 32 pixel aligned
+ */
+
+ if (fb_index == 0)
+ return ALIGN(xres, 32) * bpp;
+ else
+ return xres * bpp;
+}
+
/* 3D side by side */
void mdp4_dsi_video_3d_sbys(struct msm_fb_data_type *mfd,
struct msmfb_overlay_3d *r3d)
{
struct fb_info *fbi;
- struct mdp4_overlay_pipe *pipe;
unsigned int buf_offset;
int bpp;
uint8 *buf = NULL;
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
- if (dsi_pipe == NULL)
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
+
+ if (vctrl->base_pipe == NULL)
return;
- dsi_pipe->is_3d = r3d->is_3d;
- dsi_pipe->src_height_3d = r3d->height;
- dsi_pipe->src_width_3d = r3d->width;
-
- pipe = dsi_pipe;
+ pipe = vctrl->base_pipe;
+ pipe->is_3d = r3d->is_3d;
+ pipe->src_height_3d = r3d->height;
+ pipe->src_width_3d = r3d->width;
if (pipe->is_3d)
mdp4_overlay_panel_3d(pipe->mixer_num, MDP4_3D_SIDE_BY_SIDE);
@@ -402,12 +751,10 @@
mdp4_overlay_dmap_cfg(mfd, 1);
mdp4_overlay_reg_flush(pipe, 1);
+
mdp4_mixer_stage_up(pipe);
mb();
-
- /* wait for vsycn */
- mdp4_overlay_dsi_video_vsync_push(mfd, pipe);
}
static void mdp4_dsi_video_blt_ov_update(struct mdp4_overlay_pipe *pipe)
@@ -416,11 +763,9 @@
int bpp;
char *overlay_base;
-
if (pipe->ov_blt_addr == 0)
return;
-
#ifdef BLT_RGB565
bpp = 2; /* overlay ouput is RGB565 */
#else
@@ -460,156 +805,98 @@
MDP_OUTP(MDP_BASE + 0x90008, addr);
}
-/*
- * mdp4_overlay_dsi_video_wait4event:
- * INTR_DMA_P_DONE and INTR_PRIMARY_VSYNC event only
- * no INTR_OVERLAY0_DONE event allowed.
- */
-static void mdp4_overlay_dsi_video_wait4event(struct msm_fb_data_type *mfd,
- int intr_done)
+void mdp4_overlay_dsi_video_set_perf(struct msm_fb_data_type *mfd)
{
- unsigned long flag;
- unsigned int data;
-
- data = inpdw(MDP_BASE + DSI_VIDEO_BASE);
- data &= 0x01;
- if (data == 0) /* timing generator disabled */
- return;
-
- spin_lock_irqsave(&mdp_spin_lock, flag);
- INIT_COMPLETION(dsi_video_comp);
- mfd->dma->waiting = TRUE;
- outp32(MDP_INTR_CLEAR, intr_done);
- mdp_intr_mask |= intr_done;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- mdp_enable_irq(MDP_DMA2_TERM); /* enable intr */
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
- wait_for_completion(&dsi_video_comp);
- mdp_disable_irq(MDP_DMA2_TERM);
-}
-
-static void mdp4_overlay_dsi_video_dma_busy_wait(struct msm_fb_data_type *mfd)
-{
- unsigned long flag;
- int need_wait = 0;
-
- pr_debug("%s: start pid=%d\n", __func__, current->pid);
-
- spin_lock_irqsave(&mdp_spin_lock, flag);
- if (mfd->dma->busy == TRUE) {
- INIT_COMPLETION(mfd->dma->comp);
- need_wait++;
- }
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
-
- if (need_wait) {
- /* wait until DMA finishes the current job */
- pr_debug("%s: pending pid=%d\n", __func__, current->pid);
- wait_for_completion(&mfd->dma->comp);
- }
- pr_debug("%s: done pid=%d\n", __func__, current->pid);
-}
-
-void mdp4_overlay_dsi_video_start(void)
-{
- if (!dsi_video_enabled) {
- /* enable DSI block */
- mdp4_iommu_attach();
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
- MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 1);
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
- dsi_video_enabled = 1;
- }
-}
-
-void mdp4_overlay_dsi_video_vsync_push(struct msm_fb_data_type *mfd,
- struct mdp4_overlay_pipe *pipe)
-{
- unsigned long flag;
-
- if (pipe->flags & MDP_OV_PLAY_NOWAIT)
- return;
-
- if (dsi_pipe->ov_blt_addr) {
- mdp4_overlay_dsi_video_dma_busy_wait(mfd);
-
- mdp4_dsi_video_blt_ov_update(dsi_pipe);
- dsi_pipe->ov_cnt++;
-
- spin_lock_irqsave(&mdp_spin_lock, flag);
- outp32(MDP_INTR_CLEAR, INTR_OVERLAY0_DONE);
- mdp_intr_mask |= INTR_OVERLAY0_DONE;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- mdp_enable_irq(MDP_OVERLAY0_TERM);
- mfd->dma->busy = TRUE;
- mb(); /* make sure all registers updated */
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
- outpdw(MDP_BASE + 0x0004, 0); /* kickoff overlay engine */
- mdp4_stat.kickoff_ov0++;
- mb();
- mdp4_overlay_dsi_video_wait4event(mfd, INTR_DMA_P_DONE);
- } else {
- mdp4_overlay_dsi_video_wait4event(mfd, INTR_PRIMARY_VSYNC);
- }
-
+ /* change mdp clk while mdp is idle */
mdp4_set_perf_level();
}
+
/*
* mdp4_primary_vsync_dsi_video: called from isr
*/
void mdp4_primary_vsync_dsi_video(void)
{
- complete_all(&dsi_video_comp);
+ int cndx;
+ struct vsycn_ctrl *vctrl;
+
+
+ cndx = 0;
+ vctrl = &vsync_ctrl_db[cndx];
+ pr_debug("%s: cpu=%d\n", __func__, smp_processor_id());
+ vctrl->vsync_time = ktime_get();
+ schedule_work(&vctrl->vsync_work);
+
+ spin_lock(&vctrl->spin_lock);
+ if (vctrl->wait_vsync_cnt) {
+ complete_all(&vctrl->vsync_comp);
+ vctrl->wait_vsync_cnt = 0;
+ }
+ spin_unlock(&vctrl->spin_lock);
}
/*
- * mdp4_dma_p_done_dsi_video: called from isr
+ * mdp4_dmap_done_dsi_video: called from isr
*/
-void mdp4_dma_p_done_dsi_video(struct mdp_dma_data *dma)
+void mdp4_dmap_done_dsi_video(int cndx)
{
- if (blt_cfg_changed) {
- mdp_is_in_isr = TRUE;
- if (dsi_pipe->ov_blt_addr) {
- mdp4_overlay_dmap_xy(dsi_pipe);
- mdp4_overlayproc_cfg(dsi_pipe);
- } else {
- mdp4_overlayproc_cfg(dsi_pipe);
- mdp4_overlay_dmap_xy(dsi_pipe);
- }
- mdp_is_in_isr = FALSE;
- if (dsi_pipe->ov_blt_addr) {
- mdp4_dsi_video_blt_ov_update(dsi_pipe);
- dsi_pipe->ov_cnt++;
- outp32(MDP_INTR_CLEAR, INTR_OVERLAY0_DONE);
- mdp_intr_mask |= INTR_OVERLAY0_DONE;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- dma->busy = TRUE;
- mdp_enable_irq(MDP_OVERLAY0_TERM);
- /* kickoff overlay engine */
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
+
+ spin_lock(&vctrl->spin_lock);
+ vsync_irq_disable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
+ if (vctrl->blt_change) {
+ mdp4_overlayproc_cfg(pipe);
+ mdp4_overlay_dmap_xy(pipe);
+ if (pipe->ov_blt_addr) {
+ mdp4_dsi_video_blt_ov_update(pipe);
+ pipe->ov_cnt++;
+ /* Prefill one frame */
+ vsync_irq_enable(INTR_OVERLAY0_DONE,
+ MDP_OVERLAY0_TERM);
+ /* kickoff overlay0 engine */
+ mdp4_stat.kickoff_ov0++;
+ vctrl->ov_koff++; /* make up for prefill */
outpdw(MDP_BASE + 0x0004, 0);
}
- blt_cfg_changed = 0;
+ vctrl->blt_change = 0;
}
- complete_all(&dsi_video_comp);
+
+ complete_all(&vctrl->dmap_comp);
+ mdp4_overlay_dma_commit(cndx);
+ spin_unlock(&vctrl->spin_lock);
}
/*
- * mdp4_overlay1_done_dsi: called from isr
+ * mdp4_overlay0_done_dsi: called from isr
*/
-void mdp4_overlay0_done_dsi_video(struct mdp_dma_data *dma)
+void mdp4_overlay0_done_dsi_video(int cndx)
{
- spin_lock(&mdp_spin_lock);
- dma->busy = FALSE;
- if (dsi_pipe->ov_blt_addr == 0) {
- spin_unlock(&mdp_spin_lock);
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
+
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
+
+ spin_lock(&vctrl->spin_lock);
+ vsync_irq_disable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
+ vctrl->ov_done++;
+ complete_all(&vctrl->ov_comp);
+ if (pipe->ov_blt_addr == 0) {
+ spin_unlock(&vctrl->spin_lock);
return;
}
- mdp4_dsi_video_blt_dmap_update(dsi_pipe);
- dsi_pipe->dmap_cnt++;
- mdp_disable_irq_nosync(MDP_OVERLAY0_TERM);
- spin_unlock(&mdp_spin_lock);
- complete(&dma->comp);
+
+ mdp4_dsi_video_blt_dmap_update(pipe);
+ pipe->dmap_cnt++;
+ spin_unlock(&vctrl->spin_lock);
}
/*
@@ -619,8 +906,12 @@
static void mdp4_dsi_video_do_blt(struct msm_fb_data_type *mfd, int enable)
{
unsigned long flag;
- int data;
- int change = 0;
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
+
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0);
@@ -629,55 +920,33 @@
return;
}
- spin_lock_irqsave(&mdp_spin_lock, flag);
- if (enable && dsi_pipe->ov_blt_addr == 0) {
- dsi_pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
- dsi_pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
- dsi_pipe->blt_cnt = 0;
- dsi_pipe->ov_cnt = 0;
- dsi_pipe->dmap_cnt = 0;
+ spin_lock_irqsave(&vctrl->spin_lock, flag);
+ if (enable && pipe->ov_blt_addr == 0) {
+ pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
+ pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
+ pipe->ov_cnt = 0;
+ pipe->dmap_cnt = 0;
+ vctrl->ov_koff = 0;
+ vctrl->ov_done = 0;
+ vctrl->blt_free = 0;
mdp4_stat.blt_dsi_video++;
- change++;
- } else if (enable == 0 && dsi_pipe->ov_blt_addr) {
- dsi_pipe->ov_blt_addr = 0;
- dsi_pipe->dma_blt_addr = 0;
- change++;
+ vctrl->blt_change++;
+ } else if (enable == 0 && pipe->ov_blt_addr) {
+ pipe->ov_blt_addr = 0;
+ pipe->dma_blt_addr = 0;
+ vctrl->blt_free = 4; /* 4 commits to free wb buf */
+ vctrl->blt_change++;
}
- if (!change) {
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
+ pr_info("%s: changed=%d enable=%d ov_blt_addr=%x\n", __func__,
+ vctrl->blt_change, enable, (int)pipe->ov_blt_addr);
+
+ if (!vctrl->blt_change) {
+ spin_unlock_irqrestore(&vctrl->spin_lock, flag);
return;
}
- pr_debug("%s: enable=%d ov_blt_addr=%x\n", __func__,
- enable, (int)dsi_pipe->ov_blt_addr);
- blt_cfg_changed = 1;
-
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
-
- /*
- * may need mutex here to sync with whom dsiable
- * timing generator
- */
- data = inpdw(MDP_BASE + DSI_VIDEO_BASE);
- data &= 0x01;
- if (data) { /* timing generator enabled */
- mdp4_overlay_dsi_video_wait4event(mfd, INTR_DMA_P_DONE);
- msleep(20);
- }
-
-
-}
-
-int mdp4_dsi_video_overlay_blt_offset(struct msm_fb_data_type *mfd,
- struct msmfb_overlay_blt *req)
-{
- req->offset = 0;
- req->width = dsi_pipe->src_width;
- req->height = dsi_pipe->src_height;
- req->bpp = dsi_pipe->bpp;
-
- return sizeof(*req);
+ spin_unlock_irqrestore(&vctrl->spin_lock, flag);
}
void mdp4_dsi_video_overlay_blt(struct msm_fb_data_type *mfd,
@@ -702,36 +971,36 @@
uint8 *buf;
unsigned int buf_offset;
int bpp;
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
struct mdp4_overlay_pipe *pipe;
- if (!mfd->panel_power_on)
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
+
+ if (!pipe || !mfd->panel_power_on)
return;
- /* no need to power on cmd block since it's dsi video mode */
- bpp = fbi->var.bits_per_pixel / 8;
- buf = (uint8 *) fbi->fix.smem_start;
- buf_offset = calc_fb_offset(mfd, fbi, bpp);
+ pr_debug("%s: cpu=%d pid=%d\n", __func__,
+ smp_processor_id(), current->pid);
+ if (pipe->pipe_type == OVERLAY_TYPE_RGB) {
+ bpp = fbi->var.bits_per_pixel / 8;
+ buf = (uint8 *) fbi->fix.smem_start;
+ buf_offset = calc_fb_offset(mfd, fbi, bpp);
- mutex_lock(&mfd->dma->ov_mutex);
+ if (mfd->display_iova)
+ pipe->srcp0_addr = mfd->display_iova + buf_offset;
+ else
+ pipe->srcp0_addr = (uint32)(buf + buf_offset);
- pipe = dsi_pipe;
- if (pipe->pipe_used == 0 ||
- pipe->mixer_stage != MDP4_MIXER_STAGE_BASE) {
- pr_err("%s: NOT baselayer\n", __func__);
- mutex_unlock(&mfd->dma->ov_mutex);
- return;
+ mdp4_dsi_video_pipe_queue(0, pipe);
}
- if (mfd->display_iova)
- pipe->srcp0_addr = mfd->display_iova + buf_offset;
- else
- pipe->srcp0_addr = (uint32)(buf + buf_offset);
+ mdp4_dsi_video_pipe_commit();
- mdp4_overlay_rgb_setup(pipe);
- mdp4_overlay_reg_flush(pipe, 1);
- mdp4_mixer_stage_up(pipe);
- mdp4_overlay_dsi_video_start();
- mdp4_overlay_dsi_video_vsync_push(mfd, pipe);
- mdp4_iommu_unmap(pipe);
- mutex_unlock(&mfd->dma->ov_mutex);
+ if (pipe->ov_blt_addr)
+ mdp4_dsi_video_wait4ov(0);
+ else
+ mdp4_dsi_video_wait4dmap(0);
}
+
diff --git a/drivers/video/msm/mdp4_overlay_dtv.c b/drivers/video/msm/mdp4_overlay_dtv.c
index b90812f..57a07d0 100644
--- a/drivers/video/msm/mdp4_overlay_dtv.c
+++ b/drivers/video/msm/mdp4_overlay_dtv.c
@@ -33,6 +33,8 @@
#define DTV_BASE 0xD0000
+static int dtv_enabled;
+
/*#define DEBUG*/
#ifdef DEBUG
static void __mdp_outp(uint32 port, uint32 value)
@@ -51,15 +53,298 @@
static int first_pixel_start_x;
static int first_pixel_start_y;
-static int dtv_enabled;
-static struct mdp4_overlay_pipe *dtv_pipe;
-static DECLARE_COMPLETION(dtv_comp);
-
-void mdp4_dtv_base_swap(struct mdp4_overlay_pipe *pipe)
+void mdp4_dtv_base_swap(int cndx, struct mdp4_overlay_pipe *pipe)
{
+#ifdef BYPASS4
if (hdmi_prim_display)
dtv_pipe = pipe;
+#endif
+}
+
+#define MAX_CONTROLLER 1
+
+static struct vsycn_ctrl {
+ struct device *dev;
+ int inited;
+ int update_ndx;
+ int dmae_intr_cnt;
+ atomic_t suspend;
+ int dmae_wait_cnt;
+ int wait_vsync_cnt;
+ int blt_change;
+ struct mutex update_lock;
+ struct completion dmae_comp;
+ struct completion vsync_comp;
+ spinlock_t spin_lock;
+ struct mdp4_overlay_pipe *base_pipe;
+ struct vsync_update vlist[2];
+ int vsync_irq_enabled;
+ ktime_t vsync_time;
+ struct work_struct vsync_work;
+} vsync_ctrl_db[MAX_CONTROLLER];
+
+static void vsync_irq_enable(int intr, int term)
+{
+ unsigned long flag;
+
+ spin_lock_irqsave(&mdp_spin_lock, flag);
+ outp32(MDP_INTR_CLEAR,
+ INTR_DMA_E_DONE | INTR_OVERLAY1_DONE | INTR_EXTERNAL_VSYNC);
+ mdp_intr_mask |= intr;
+ outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+ mdp_enable_irq(term);
+ spin_unlock_irqrestore(&mdp_spin_lock, flag);
+ pr_debug("%s: IRQ-en done, term=%x\n", __func__, term);
+}
+
+static void vsync_irq_disable(int intr, int term)
+{
+ unsigned long flag;
+
+ spin_lock_irqsave(&mdp_spin_lock, flag);
+ outp32(MDP_INTR_CLEAR,
+ INTR_DMA_P_DONE | INTR_OVERLAY0_DONE | INTR_PRIMARY_VSYNC);
+ mdp_intr_mask &= ~intr;
+ outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+ mdp_disable_irq_nosync(term);
+ spin_unlock_irqrestore(&mdp_spin_lock, flag);
+ pr_debug("%s: IRQ-dis done, term=%x\n", __func__, term);
+}
+
+void mdp4_overlay_dtv_start(void)
+{
+ if (!dtv_enabled) {
+ /* enable DTV block */
+ mdp4_iommu_attach();
+ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+ MDP_OUTP(MDP_BASE + DTV_BASE, 1);
+ mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+ dtv_enabled = 1;
+ }
+}
+
+/*
+ * mdp4_dtv_vsync_do_update:
+ * called from thread context
+ */
+void mdp4_dtv_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe)
+{
+ struct vsycn_ctrl *vctrl;
+ struct vsync_update *vp;
+ struct mdp4_overlay_pipe *pp;
+ int undx;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ /* start timing generator & mmu if they are not started yet */
+ mdp4_overlay_dtv_start();
+
+ vctrl = &vsync_ctrl_db[cndx];
+
+ if (atomic_read(&vctrl->suspend) > 0)
+ return;
+
+ mutex_lock(&vctrl->update_lock);
+ undx = vctrl->update_ndx;
+ vp = &vctrl->vlist[undx];
+
+ pp = &vp->plist[pipe->pipe_ndx - 1]; /* ndx start form 1 */
+
+ pr_debug("%s: vndx=%d pipe_ndx=%d flags=%x pid=%d\n",
+ __func__, undx, pipe->pipe_ndx, pipe->flags, current->pid);
+
+ *pp = *pipe; /* keep it */
+ vp->update_cnt++;
+ mutex_unlock(&vctrl->update_lock);
+}
+
+static void mdp4_dtv_blt_ov_update(struct mdp4_overlay_pipe *pipe);
+
+int mdp4_dtv_pipe_commit(void)
+{
+
+ int i, undx;
+ int mixer = 0;
+ struct vsycn_ctrl *vctrl;
+ struct vsync_update *vp;
+ struct mdp4_overlay_pipe *pipe;
+ unsigned long flags;
+ int cnt = 0;
+
+ vctrl = &vsync_ctrl_db[0];
+ mutex_lock(&vctrl->update_lock);
+ undx = vctrl->update_ndx;
+ vp = &vctrl->vlist[undx];
+ pipe = vctrl->base_pipe;
+ mixer = pipe->mixer_num;
+ mdp4_overlay_iommu_unmap_freelist(mixer);
+
+ if (vp->update_cnt == 0) {
+ mutex_unlock(&vctrl->update_lock);
+ return 0;
+ }
+
+ vctrl->update_ndx++;
+ vctrl->update_ndx &= 0x01;
+ vp->update_cnt = 0; /* reset */
+ mutex_unlock(&vctrl->update_lock);
+
+ pipe = vp->plist;
+ for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
+ if (pipe->pipe_used) {
+ cnt++;
+ mdp4_overlay_vsync_commit(pipe);
+ pipe->pipe_used = 0; /* clear */
+ }
+ }
+ mdp4_mixer_stage_commit(mixer);
+
+ pipe = vctrl->base_pipe;
+ spin_lock_irqsave(&vctrl->spin_lock, flags);
+ if (pipe->ov_blt_addr) {
+ mdp4_dtv_blt_ov_update(pipe);
+ pipe->blt_ov_done++;
+ vsync_irq_enable(INTR_OVERLAY1_DONE, MDP_OVERLAY1_TERM);
+ mb();
+ pipe->blt_ov_koff++;
+ /* kickoff overlay1 engine */
+ mdp4_stat.kickoff_ov1++;
+ outpdw(MDP_BASE + 0x0008, 0);
+ } else if (vctrl->dmae_intr_cnt == 0) {
+ /* schedule second phase update at dmap */
+ vctrl->dmae_intr_cnt++;
+ vsync_irq_enable(INTR_DMA_E_DONE, MDP_DMA_E_TERM);
+ }
+ spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+ return cnt;
+}
+
+void mdp4_dtv_vsync_ctrl(int cndx, int enable)
+{
+ struct vsycn_ctrl *vctrl;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ vctrl = &vsync_ctrl_db[cndx];
+
+ if (vctrl->vsync_irq_enabled == enable)
+ return;
+
+ pr_debug("%s: vsync enable=%d\n", __func__, enable);
+
+ vctrl->vsync_irq_enabled = enable;
+
+ if (enable)
+ vsync_irq_enable(INTR_EXTERNAL_VSYNC, MDP_EXTER_VSYNC_TERM);
+ else
+ vsync_irq_disable(INTR_EXTERNAL_VSYNC, MDP_EXTER_VSYNC_TERM);
+}
+
+void mdp4_dtv_wait4vsync(int cndx, long long *vtime)
+{
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
+ unsigned long flags;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
+
+ if (atomic_read(&vctrl->suspend) > 0)
+ return;
+
+ spin_lock_irqsave(&vctrl->spin_lock, flags);
+
+ if (vctrl->wait_vsync_cnt == 0)
+ INIT_COMPLETION(vctrl->vsync_comp);
+ vctrl->wait_vsync_cnt++;
+ spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+ wait_for_completion(&vctrl->vsync_comp);
+ mdp4_stat.wait4vsync1++;
+
+ *vtime = ktime_to_ns(vctrl->vsync_time);
+}
+
+static void mdp4_dtv_wait4dmae(int cndx)
+{
+ unsigned long flags;
+ struct vsycn_ctrl *vctrl;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ vctrl = &vsync_ctrl_db[cndx];
+
+ if (atomic_read(&vctrl->suspend) > 0)
+ return;
+
+ spin_lock_irqsave(&vctrl->spin_lock, flags);
+ if (vctrl->dmae_wait_cnt == 0) {
+ INIT_COMPLETION(vctrl->dmae_comp);
+ if (vctrl->dmae_intr_cnt == 0) {
+ vctrl->dmae_intr_cnt++;
+ vsync_irq_enable(INTR_DMA_E_DONE, MDP_DMA_E_TERM);
+ }
+ }
+ vctrl->dmae_wait_cnt++;
+ spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+ wait_for_completion(&vctrl->dmae_comp);
+ pr_info("%s: pid=%d after wait\n", __func__, current->pid);
+}
+
+static void send_vsync_work(struct work_struct *work)
+{
+ struct vsycn_ctrl *vctrl =
+ container_of(work, typeof(*vctrl), vsync_work);
+ char buf[64];
+ char *envp[2];
+
+ snprintf(buf, sizeof(buf), "VSYNC=%llu",
+ ktime_to_ns(vctrl->vsync_time));
+ envp[0] = buf;
+ envp[1] = NULL;
+ kobject_uevent_env(&vctrl->dev->kobj, KOBJ_CHANGE, envp);
+}
+
+void mdp4_dtv_vsync_init(int cndx)
+{
+ struct vsycn_ctrl *vctrl;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ pr_info("%s: ndx=%d\n", __func__, cndx);
+
+ vctrl = &vsync_ctrl_db[cndx];
+ if (vctrl->inited)
+ return;
+
+ vctrl->inited = 1;
+ vctrl->update_ndx = 0;
+ mutex_init(&vctrl->update_lock);
+ init_completion(&vctrl->vsync_comp);
+ atomic_set(&vctrl->suspend, 0);
+ spin_lock_init(&vctrl->spin_lock);
+ INIT_WORK(&vctrl->vsync_work, send_vsync_work);
}
static int mdp4_dtv_start(struct msm_fb_data_type *mfd)
@@ -103,9 +388,6 @@
if (mfd->key != MFD_KEY)
return -EINVAL;
- if (dtv_pipe == NULL)
- return -EINVAL;
-
fbi = mfd->fbi;
var = &fbi->var;
@@ -201,26 +483,22 @@
/* Test pattern 8 x 8 pixel */
/* MDP_OUTP(MDP_BASE + DTV_BASE + 0x4C, 0x80000808); */
- mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
- /* MDP cmd block disable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+ /* enable DTV block */
+ MDP_OUTP(MDP_BASE + DTV_BASE, 1);
return 0;
}
static int mdp4_dtv_stop(struct msm_fb_data_type *mfd)
{
- if (dtv_pipe == NULL)
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
+
+ vctrl = &vsync_ctrl_db[cndx];
+ if (vctrl->base_pipe == NULL)
return -EINVAL;
- /* MDP cmd block enable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
- msleep(20);
MDP_OUTP(MDP_BASE + DTV_BASE, 0);
- dtv_enabled = 0;
- /* MDP cmd block disable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
- mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
return 0;
}
@@ -229,6 +507,10 @@
{
struct msm_fb_data_type *mfd;
int ret = 0;
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
+
+ vctrl = &vsync_ctrl_db[cndx];
mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
@@ -238,16 +520,30 @@
if (mfd->key != MFD_KEY)
return -EINVAL;
+ vctrl->dev = mfd->fbi->dev;
+
mdp_footswitch_ctrl(TRUE);
+ /* Mdp clock enable */
+ mdp_clk_ctrl(1);
+
mdp4_overlay_panel_mode(MDP4_MIXER1, MDP4_PANEL_DTV);
- if (dtv_pipe != NULL)
- ret = mdp4_dtv_start(mfd);
+
+ /* Allocate dtv_pipe at dtv_on*/
+ if (vctrl->base_pipe == NULL) {
+ if (mdp4_overlay_dtv_set(mfd, NULL)) {
+ pr_warn("%s: dtv_pipe is NULL, dtv_set failed\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
ret = panel_next_on(pdev);
if (ret != 0)
- dev_warn(&pdev->dev, "mdp4_overlay_dtv: panel_next_on failed");
+ pr_warn("%s: panel_next_on failed", __func__);
- dev_info(&pdev->dev, "mdp4_overlay_dtv: on");
+ atomic_set(&vctrl->suspend, 0);
+
+ pr_info("%s:\n", __func__);
return ret;
}
@@ -256,47 +552,112 @@
{
struct msm_fb_data_type *mfd;
int ret = 0;
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
- if (dtv_pipe != NULL) {
+ vctrl = &vsync_ctrl_db[cndx];
+
+ atomic_set(&vctrl->suspend, 1);
+
+ while (vctrl->wait_vsync_cnt)
+ msleep(20); /* >= 17 ms */
+
+ pipe = vctrl->base_pipe;
+ if (pipe != NULL) {
mdp4_dtv_stop(mfd);
if (hdmi_prim_display && mfd->ref_cnt == 0) {
/* adb stop */
- if (dtv_pipe->pipe_type == OVERLAY_TYPE_BF)
- mdp4_overlay_borderfill_stage_down(dtv_pipe);
+ if (pipe->pipe_type == OVERLAY_TYPE_BF)
+ mdp4_overlay_borderfill_stage_down(pipe);
- /* dtv_pipe == rgb1 */
- mdp4_overlay_unset_mixer(dtv_pipe->mixer_num);
- dtv_pipe = NULL;
+ /* pipe == rgb2 */
+ mdp4_overlay_unset_mixer(pipe->mixer_num);
+ vctrl->base_pipe = NULL;
} else {
- mdp4_mixer_stage_down(dtv_pipe);
- mdp4_overlay_pipe_free(dtv_pipe);
- mdp4_iommu_unmap(dtv_pipe);
- dtv_pipe = NULL;
+ mdp4_mixer_stage_down(pipe);
+ mdp4_overlay_pipe_free(pipe);
+ vctrl->base_pipe = NULL;
}
}
+
mdp4_overlay_panel_mode_unset(MDP4_MIXER1, MDP4_PANEL_DTV);
ret = panel_next_off(pdev);
mdp_footswitch_ctrl(FALSE);
- dev_info(&pdev->dev, "mdp4_overlay_dtv: off");
+ /* Mdp clock disable */
+ mdp_clk_ctrl(0);
+
+ pr_info("%s:\n", __func__);
return ret;
}
+static void mdp4_dtv_blt_ov_update(struct mdp4_overlay_pipe *pipe)
+{
+ uint32 off, addr;
+ int bpp;
+ char *overlay_base;
+
+ if (pipe->ov_blt_addr == 0)
+ return;
+
+#ifdef BLT_RGB565
+ bpp = 2; /* overlay ouput is RGB565 */
+#else
+ bpp = 3; /* overlay ouput is RGB888 */
+#endif
+ off = 0;
+ if (pipe->blt_ov_done & 0x01)
+ off = pipe->src_height * pipe->src_width * bpp;
+ addr = pipe->ov_blt_addr + off;
+
+ /* overlay 1 */
+ overlay_base = MDP_BASE + MDP4_OVERLAYPROC1_BASE;/* 0x10000 */
+ outpdw(overlay_base + 0x000c, addr);
+ outpdw(overlay_base + 0x001c, addr);
+}
+
+static void mdp4_dtv_blt_dmae_update(struct mdp4_overlay_pipe *pipe)
+{
+ uint32 off, addr;
+ int bpp;
+
+ if (pipe->ov_blt_addr == 0)
+ return;
+
+#ifdef BLT_RGB565
+ bpp = 2; /* overlay ouput is RGB565 */
+#else
+ bpp = 3; /* overlay ouput is RGB888 */
+#endif
+ off = 0;
+ if (pipe->blt_dmap_done & 0x01)
+ off = pipe->src_height * pipe->src_width * bpp;
+ addr = pipe->dma_blt_addr + off;
+
+ /* dmae */
+ MDP_OUTP(MDP_BASE + 0xb0008, addr);
+}
+
+void mdp4_overlay_dtv_set_perf(struct msm_fb_data_type *mfd)
+{
+ /* change mdp clk while mdp is idle` */
+ mdp4_set_perf_level();
+}
+
static void mdp4_overlay_dtv_alloc_pipe(struct msm_fb_data_type *mfd,
- int32 ptype)
+ int32 ptype, struct vsycn_ctrl *vctrl)
{
int ret = 0;
struct fb_info *fbi = mfd->fbi;
struct mdp4_overlay_pipe *pipe;
- if (dtv_pipe != NULL)
+ if (vctrl->base_pipe != NULL)
return;
- pr_debug("%s: ptype=%d\n", __func__, ptype);
-
pipe = mdp4_overlay_pipe_alloc(ptype, MDP4_MIXER1);
if (pipe == NULL) {
pr_err("%s: pipe_alloc failed\n", __func__);
@@ -307,7 +668,6 @@
pipe->mixer_num = MDP4_MIXER1;
if (ptype == OVERLAY_TYPE_BF) {
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
/* LSP_BORDER_COLOR */
MDP_OUTP(MDP_BASE + MDP4_OVERLAYPROC1_BASE + 0x5004,
((0x0 & 0xFFF) << 16) | /* 12-bit B */
@@ -315,7 +675,7 @@
/* MSP_BORDER_COLOR */
MDP_OUTP(MDP_BASE + MDP4_OVERLAYPROC1_BASE + 0x5008,
(0x0 & 0xFFF)); /* 12-bit R */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+ pipe->src_format = MDP_ARGB_8888;
} else {
switch (mfd->ibuf.bpp) {
case 2:
@@ -357,28 +717,34 @@
mdp4_overlay_reg_flush(pipe, 1);
mdp4_mixer_stage_up(pipe);
- dtv_pipe = pipe; /* keep it */
+ vctrl->base_pipe = pipe; /* keep it */
}
int mdp4_overlay_dtv_set(struct msm_fb_data_type *mfd,
struct mdp4_overlay_pipe *pipe)
{
- if (dtv_pipe != NULL)
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
+
+ vctrl = &vsync_ctrl_db[cndx];
+ if (vctrl->base_pipe != NULL)
return 0;
if (pipe != NULL && pipe->mixer_stage == MDP4_MIXER_STAGE_BASE &&
pipe->pipe_type == OVERLAY_TYPE_RGB)
- dtv_pipe = pipe; /* keep it */
+ vctrl->base_pipe = pipe; /* keep it */
else if (!hdmi_prim_display && mdp4_overlay_borderfill_supported())
- mdp4_overlay_dtv_alloc_pipe(mfd, OVERLAY_TYPE_BF);
+ mdp4_overlay_dtv_alloc_pipe(mfd, OVERLAY_TYPE_BF, vctrl);
else
- mdp4_overlay_dtv_alloc_pipe(mfd, OVERLAY_TYPE_RGB);
- if (dtv_pipe == NULL)
+ mdp4_overlay_dtv_alloc_pipe(mfd, OVERLAY_TYPE_RGB, vctrl);
+
+
+ if (vctrl->base_pipe == NULL)
return -ENODEV;
mdp4_init_writeback_buf(mfd, MDP4_MIXER1);
- dtv_pipe->ov_blt_addr = 0;
- dtv_pipe->dma_blt_addr = 0;
+ vctrl->base_pipe->ov_blt_addr = 0;
+ vctrl->base_pipe->dma_blt_addr = 0;
return mdp4_dtv_start(mfd);
}
@@ -387,206 +753,112 @@
struct mdp4_overlay_pipe *pipe)
{
int result = 0;
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
- if (dtv_pipe == NULL)
- return result;
-
- pipe->flags &= ~MDP_OV_PLAY_NOWAIT;
- mdp4_overlay_reg_flush(pipe, 0);
- mdp4_overlay_dtv_ov_done_push(mfd, pipe);
+ vctrl = &vsync_ctrl_db[cndx];
+ if (vctrl->base_pipe != NULL)
+ return 0;
if (pipe->mixer_stage == MDP4_MIXER_STAGE_BASE &&
pipe->pipe_type == OVERLAY_TYPE_RGB) {
result = mdp4_dtv_stop(mfd);
- dtv_pipe = NULL;
+ vctrl->base_pipe = NULL;
}
return result;
}
-static void mdp4_dtv_blt_ov_update(struct mdp4_overlay_pipe *pipe)
+/* TODO: dtv writeback need to be added later */
+
+void mdp4_external_vsync_dtv(void)
{
- uint32 off, addr;
- int bpp;
- char *overlay_base;
+ int cndx;
+ struct vsycn_ctrl *vctrl;
- if (pipe->ov_blt_addr == 0)
- return;
-#ifdef BLT_RGB565
- bpp = 2; /* overlay ouput is RGB565 */
-#else
- bpp = 3; /* overlay ouput is RGB888 */
-#endif
- off = (pipe->ov_cnt & 0x01) ?
- pipe->src_height * pipe->src_width * bpp : 0;
+ cndx = 0;
+ vctrl = &vsync_ctrl_db[cndx];
+ pr_debug("%s: cpu=%d\n", __func__, smp_processor_id());
+ vctrl->vsync_time = ktime_get();
+ schedule_work(&vctrl->vsync_work);
- addr = pipe->ov_blt_addr + off;
- pr_debug("%s overlay addr 0x%x\n", __func__, addr);
- /* overlay 1 */
- overlay_base = MDP_BASE + MDP4_OVERLAYPROC1_BASE;/* 0x18000 */
- outpdw(overlay_base + 0x000c, addr);
- outpdw(overlay_base + 0x001c, addr);
+ pr_debug("%s: cpu=%d\n", __func__, smp_processor_id());
+
+ spin_lock(&vctrl->spin_lock);
+ if (vctrl->wait_vsync_cnt) {
+ complete_all(&vctrl->vsync_comp);
+ vctrl->wait_vsync_cnt = 0;
+ }
+ spin_unlock(&vctrl->spin_lock);
}
-static inline void mdp4_dtv_blt_dmae_update(struct mdp4_overlay_pipe *pipe)
+/*
+ * mdp4_dmae_done_dtv: called from isr
+ */
+void mdp4_dmae_done_dtv(void)
{
- uint32 off, addr;
- int bpp;
+ int cndx;
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
- if (pipe->ov_blt_addr == 0)
- return;
-
-#ifdef BLT_RGB565
- bpp = 2; /* overlay ouput is RGB565 */
-#else
- bpp = 3; /* overlay ouput is RGB888 */
-#endif
- off = (pipe->dmae_cnt & 0x01) ?
- pipe->src_height * pipe->src_width * bpp : 0;
- addr = pipe->dma_blt_addr + off;
- MDP_OUTP(MDP_BASE + 0xb0008, addr);
-}
-
-static inline void mdp4_overlay_dtv_ov_kick_start(void)
-{
- outpdw(MDP_BASE + 0x0008, 0);
-}
-
-static void mdp4_overlay_dtv_ov_start(struct msm_fb_data_type *mfd)
-{
- unsigned long flag;
-
- /* enable irq */
- if (mfd->ov_start)
- return;
-
- if (!dtv_pipe) {
- pr_debug("%s: no mixer1 base layer pipe allocated!\n",
- __func__);
+ cndx = 0;
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
return;
}
- if (dtv_pipe->ov_blt_addr) {
- mdp4_dtv_blt_ov_update(dtv_pipe);
- dtv_pipe->ov_cnt++;
- mdp4_overlay_dtv_ov_kick_start();
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
+
+ spin_lock(&vctrl->spin_lock);
+ if (vctrl->blt_change) {
+ if (pipe->ov_blt_addr) {
+ mdp4_overlayproc_cfg(pipe);
+ mdp4_overlay_dmae_xy(pipe);
+ mdp4_dtv_blt_ov_update(pipe);
+ pipe->blt_ov_done++;
+
+ /* Prefill one frame */
+ vsync_irq_enable(INTR_OVERLAY1_DONE, MDP_OVERLAY1_TERM);
+ /* kickoff overlay1 engine */
+ mdp4_stat.kickoff_ov1++;
+ outpdw(MDP_BASE + 0x0008, 0);
+ }
+ vctrl->blt_change = 0;
}
- spin_lock_irqsave(&mdp_spin_lock, flag);
- mdp_enable_irq(MDP_OVERLAY1_TERM);
- INIT_COMPLETION(dtv_pipe->comp);
- mfd->dma->waiting = TRUE;
- outp32(MDP_INTR_CLEAR, INTR_OVERLAY1_DONE);
- mdp_intr_mask |= INTR_OVERLAY1_DONE;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
- mfd->ov_start = true;
-}
-
-static void mdp4_overlay_dtv_wait4dmae(struct msm_fb_data_type *mfd)
-{
- unsigned long flag;
-
- if (!dtv_pipe) {
- pr_debug("%s: no mixer1 base layer pipe allocated!\n",
- __func__);
- return;
+ vctrl->dmae_intr_cnt--;
+ if (vctrl->dmae_wait_cnt) {
+ complete_all(&vctrl->dmae_comp);
+ vctrl->dmae_wait_cnt = 0; /* reset */
+ } else {
+ mdp4_overlay_dma_commit(MDP4_MIXER1);
}
- /* enable irq */
- spin_lock_irqsave(&mdp_spin_lock, flag);
- mdp_enable_irq(MDP_DMA_E_TERM);
- INIT_COMPLETION(dtv_pipe->comp);
- mfd->dma->waiting = TRUE;
- outp32(MDP_INTR_CLEAR, INTR_DMA_E_DONE);
- mdp_intr_mask |= INTR_DMA_E_DONE;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
- wait_for_completion_killable(&dtv_pipe->comp);
- mdp_disable_irq(MDP_DMA_E_TERM);
-}
-
-static void mdp4_overlay_dtv_wait4_ov_done(struct msm_fb_data_type *mfd,
- struct mdp4_overlay_pipe *pipe)
-{
- u32 data = inpdw(MDP_BASE + DTV_BASE);
-
- if (mfd->ov_start)
- mfd->ov_start = false;
- else
- return;
- if (!(data & 0x1) || (pipe == NULL))
- return;
- if (!dtv_pipe) {
- pr_debug("%s: no mixer1 base layer pipe allocated!\n",
- __func__);
- return;
- }
-
- wait_for_completion_timeout(&dtv_pipe->comp,
- msecs_to_jiffies(VSYNC_PERIOD * 3));
- mdp_disable_irq(MDP_OVERLAY1_TERM);
-
- if (dtv_pipe->ov_blt_addr)
- mdp4_overlay_dtv_wait4dmae(mfd);
-}
-
-void mdp4_overlay_dtv_start(void)
-{
- if (!dtv_enabled) {
- mdp4_iommu_attach();
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
- /* enable DTV block */
- MDP_OUTP(MDP_BASE + DTV_BASE, 1);
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
- dtv_enabled = 1;
- }
-}
-
-void mdp4_overlay_dtv_ov_done_push(struct msm_fb_data_type *mfd,
- struct mdp4_overlay_pipe *pipe)
-{
- mdp4_overlay_dtv_ov_start(mfd);
- if (pipe->flags & MDP_OV_PLAY_NOWAIT)
- return;
-
- mdp4_overlay_dtv_wait4_ov_done(mfd, pipe);
-
- /* change mdp clk while mdp is idle` */
- mdp4_set_perf_level();
-}
-
-void mdp4_overlay_dtv_wait_for_ov(struct msm_fb_data_type *mfd,
- struct mdp4_overlay_pipe *pipe)
-{
- mdp4_overlay_dtv_wait4_ov_done(mfd, pipe);
- mdp4_set_perf_level();
-}
-
-void mdp4_dma_e_done_dtv()
-{
- if (!dtv_pipe)
- return;
-
- complete(&dtv_pipe->comp);
-}
-
-void mdp4_external_vsync_dtv()
-{
-
- complete_all(&dtv_comp);
+ vsync_irq_disable(INTR_DMA_E_DONE, MDP_DMA_E_TERM);
+ spin_unlock(&vctrl->spin_lock);
}
/*
* mdp4_overlay1_done_dtv: called from isr
*/
-void mdp4_overlay1_done_dtv()
+void mdp4_overlay1_done_dtv(void)
{
- if (!dtv_pipe)
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
+ int cndx = 0;
+
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
+
+ spin_lock(&vctrl->spin_lock);
+ if (pipe->ov_blt_addr == 0) {
+ spin_unlock(&vctrl->spin_lock);
return;
- if (dtv_pipe->ov_blt_addr) {
- mdp4_dtv_blt_dmae_update(dtv_pipe);
- dtv_pipe->dmae_cnt++;
}
- complete_all(&dtv_pipe->comp);
+
+ mdp4_dtv_blt_dmae_update(pipe);
+ pipe->blt_dmap_done++;
+ vsync_irq_disable(INTR_OVERLAY1_DONE, MDP_OVERLAY1_TERM);
+ spin_unlock(&vctrl->spin_lock);
}
void mdp4_dtv_set_black_screen(void)
@@ -595,16 +867,17 @@
/*Black color*/
uint32 color = 0x00000000;
uint32 temp_src_format;
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
- if (!dtv_pipe || !hdmi_prim_display) {
- pr_err("dtv_pipe/hdmi as primary are not"
- " configured yet\n");
+ vctrl = &vsync_ctrl_db[cndx];
+ if (vctrl->base_pipe == NULL || !hdmi_prim_display) {
+ pr_err("dtv_pipe is not configured yet\n");
return;
}
rgb_base = MDP_BASE + MDP4_RGB_BASE;
- rgb_base += (MDP4_RGB_OFF * dtv_pipe->pipe_num);
+ rgb_base += (MDP4_RGB_OFF * vctrl->base_pipe->pipe_num);
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
/*
* RGB Constant Color
*/
@@ -614,73 +887,71 @@
*/
temp_src_format = inpdw(rgb_base + 0x0050);
MDP_OUTP(rgb_base + 0x0050, temp_src_format | BIT(22));
- mdp4_overlay_reg_flush(dtv_pipe, 1);
- mdp4_mixer_stage_up(dtv_pipe);
+ mdp4_overlay_reg_flush(vctrl->base_pipe, 1);
+ mdp4_mixer_stage_up(vctrl->base_pipe);
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
}
-void mdp4_overlay_dtv_wait4vsync(void)
-{
- unsigned long flag;
-
- if (!dtv_enabled)
- return;
-
- /* enable irq */
- spin_lock_irqsave(&mdp_spin_lock, flag);
- mdp_enable_irq(MDP_DMA_E_TERM);
- INIT_COMPLETION(dtv_comp);
- outp32(MDP_INTR_CLEAR, INTR_EXTERNAL_VSYNC);
- mdp_intr_mask |= INTR_EXTERNAL_VSYNC;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
- wait_for_completion_killable(&dtv_comp);
- mdp_disable_irq(MDP_DMA_E_TERM);
-}
-
static void mdp4_dtv_do_blt(struct msm_fb_data_type *mfd, int enable)
{
unsigned long flag;
- int change = 0;
+ int data;
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
+
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
+
+ mdp4_allocate_writeback_buf(mfd, MDP4_MIXER1);
if (!mfd->ov1_wb_buf->write_addr) {
- pr_debug("%s: no writeback buf assigned\n", __func__);
+ pr_info("%s: ctrl=%d blt_base NOT assigned\n", __func__, cndx);
return;
}
- if (!dtv_pipe) {
- pr_debug("%s: no mixer1 base layer pipe allocated!\n",
- __func__);
+ spin_lock_irqsave(&vctrl->spin_lock, flag);
+ if (enable && pipe->ov_blt_addr == 0) {
+ pipe->ov_blt_addr = mfd->ov1_wb_buf->write_addr;
+ pipe->dma_blt_addr = mfd->ov1_wb_buf->read_addr;
+ pipe->blt_cnt = 0;
+ pipe->ov_cnt = 0;
+ pipe->blt_dmap_done = 0;
+ pipe->blt_ov_koff = 0;
+ pipe->blt_ov_done = 0;
+ mdp4_stat.blt_dtv++;
+ vctrl->blt_change++;
+ } else if (enable == 0 && pipe->ov_blt_addr) {
+ pipe->ov_blt_addr = 0;
+ pipe->dma_blt_addr = 0;
+ vctrl->blt_change++;
+ }
+
+ pr_info("%s: enable=%d change=%d blt_addr=%x\n", __func__,
+ enable, vctrl->blt_change, (int)pipe->ov_blt_addr);
+
+ if (!vctrl->blt_change) {
+ spin_unlock_irqrestore(&vctrl->spin_lock, flag);
return;
}
- spin_lock_irqsave(&mdp_spin_lock, flag);
- if (enable && dtv_pipe->ov_blt_addr == 0) {
- dtv_pipe->ov_blt_addr = mfd->ov1_wb_buf->write_addr;
- dtv_pipe->dma_blt_addr = mfd->ov1_wb_buf->read_addr;
- change++;
- dtv_pipe->ov_cnt = 0;
- dtv_pipe->dmae_cnt = 0;
- } else if (enable == 0 && dtv_pipe->ov_blt_addr) {
- dtv_pipe->ov_blt_addr = 0;
- dtv_pipe->dma_blt_addr = 0;
- change++;
- }
- pr_debug("%s: ov_blt_addr=%x\n", __func__, (int)dtv_pipe->ov_blt_addr);
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
+ atomic_set(&vctrl->suspend, 1);
+ spin_unlock_irqrestore(&vctrl->spin_lock, flag);
- if (!change)
- return;
+ data = inpdw(MDP_BASE + DTV_BASE);
+ data &= 0x01;
+ if (data) /* timing generator enabled */
+ mdp4_dtv_wait4dmae(0);
- if (dtv_enabled) {
- mdp4_overlay_dtv_wait4dmae(mfd);
- MDP_OUTP(MDP_BASE + DTV_BASE, 0); /* stop dtv */
+ if (pipe->ov_blt_addr == 0) {
+ MDP_OUTP(MDP_BASE + DTV_BASE, 0); /* stop dtv */
msleep(20);
+ mdp4_overlayproc_cfg(pipe);
+ mdp4_overlay_dmae_xy(pipe);
+ MDP_OUTP(MDP_BASE + DTV_BASE, 1); /* start dtv */
}
- mdp4_overlay_dmae_xy(dtv_pipe);
- mdp4_overlayproc_cfg(dtv_pipe);
- MDP_OUTP(MDP_BASE + DTV_BASE, 1); /* start dtv */
+ atomic_set(&vctrl->suspend, 0);
}
void mdp4_dtv_overlay_blt_start(struct msm_fb_data_type *mfd)
@@ -695,24 +966,23 @@
void mdp4_dtv_overlay(struct msm_fb_data_type *mfd)
{
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
struct mdp4_overlay_pipe *pipe;
+
if (!mfd->panel_power_on)
return;
- if (!dtv_pipe) {
- pr_debug("%s: no mixer1 base layer pipe allocated!\n",
- __func__);
+
+ vctrl = &vsync_ctrl_db[cndx];
+ if (vctrl->base_pipe == NULL)
+ mdp4_overlay_dtv_set(mfd, NULL);
+
+ pipe = vctrl->base_pipe;
+
+ if (pipe == NULL) {
+ pr_warn("%s: dtv_pipe == NULL\n", __func__);
return;
}
- mutex_lock(&mfd->dma->ov_mutex);
- if (dtv_pipe == NULL) {
- if (mdp4_overlay_dtv_set(mfd, NULL)) {
- pr_warn("%s: dtv_pipe == NULL\n", __func__);
- mutex_unlock(&mfd->dma->ov_mutex);
- return;
- }
- }
-
- pipe = dtv_pipe;
if (hdmi_prim_display && (pipe->pipe_used == 0 ||
pipe->mixer_stage != MDP4_MIXER_STAGE_BASE)) {
@@ -721,14 +991,9 @@
return;
}
- if (pipe->pipe_type == OVERLAY_TYPE_RGB) {
- pipe->srcp0_addr = (uint32) mfd->ibuf.buf;
- mdp4_overlay_rgb_setup(pipe);
+ if (pipe->pipe_type == OVERLAY_TYPE_RGB) {
+ pipe->srcp0_addr = (uint32)mfd->ibuf.buf;
+ mdp4_dtv_pipe_queue(0, pipe);
}
- mdp4_overlay_reg_flush(pipe, 1);
- mdp4_mixer_stage_up(pipe);
- mdp4_overlay_dtv_start();
- mdp4_overlay_dtv_ov_done_push(mfd, pipe);
- mdp4_iommu_unmap(pipe);
- mutex_unlock(&mfd->dma->ov_mutex);
+ mdp4_dtv_pipe_commit();
}
diff --git a/drivers/video/msm/mdp4_overlay_lcdc.c b/drivers/video/msm/mdp4_overlay_lcdc.c
index bcc4ea6..2da2052 100644
--- a/drivers/video/msm/mdp4_overlay_lcdc.c
+++ b/drivers/video/msm/mdp4_overlay_lcdc.c
@@ -41,17 +41,372 @@
int first_pixel_start_x;
int first_pixel_start_y;
+
static int lcdc_enabled;
-static struct mdp4_overlay_pipe *lcdc_pipe;
-static struct completion lcdc_comp;
+#define MAX_CONTROLLER 1
-void mdp4_lcdc_base_swap(struct mdp4_overlay_pipe *pipe)
+static struct vsycn_ctrl {
+ struct device *dev;
+ int inited;
+ int update_ndx;
+ int ov_koff;
+ int ov_done;
+ atomic_t suspend;
+ int wait_vsync_cnt;
+ int blt_change;
+ int blt_free;
+ int fake_vsync;
+ struct mutex update_lock;
+ struct completion ov_comp;
+ struct completion dmap_comp;
+ struct completion vsync_comp;
+ spinlock_t spin_lock;
+ struct msm_fb_data_type *mfd;
+ struct mdp4_overlay_pipe *base_pipe;
+ struct vsync_update vlist[2];
+ int vsync_irq_enabled;
+ ktime_t vsync_time;
+ struct work_struct vsync_work;
+} vsync_ctrl_db[MAX_CONTROLLER];
+
+
+/*******************************************************
+to do:
+1) move vsync_irq_enable/vsync_irq_disable to mdp.c to be shared
+*******************************************************/
+static void vsync_irq_enable(int intr, int term)
{
- lcdc_pipe = pipe;
+ unsigned long flag;
+
+ spin_lock_irqsave(&mdp_spin_lock, flag);
+ outp32(MDP_INTR_CLEAR,
+ INTR_DMA_P_DONE | INTR_OVERLAY0_DONE | INTR_PRIMARY_VSYNC);
+ mdp_intr_mask |= intr;
+ outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+ mdp_enable_irq(term);
+ spin_unlock_irqrestore(&mdp_spin_lock, flag);
+ pr_debug("%s: IRQ-en done, term=%x\n", __func__, term);
}
-int mdp_lcdc_on(struct platform_device *pdev)
+static void vsync_irq_disable(int intr, int term)
+{
+ unsigned long flag;
+
+ spin_lock_irqsave(&mdp_spin_lock, flag);
+ outp32(MDP_INTR_CLEAR,
+ INTR_DMA_P_DONE | INTR_OVERLAY0_DONE | INTR_PRIMARY_VSYNC);
+ mdp_intr_mask &= ~intr;
+ outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+ mdp_disable_irq_nosync(term);
+ spin_unlock_irqrestore(&mdp_spin_lock, flag);
+ pr_debug("%s: IRQ-dis done, term=%x\n", __func__, term);
+}
+
+static void mdp4_overlay_lcdc_start(void)
+{
+ if (!lcdc_enabled) {
+ /* enable DSI block */
+ mdp4_iommu_attach();
+ mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+ MDP_OUTP(MDP_BASE + LCDC_BASE, 1);
+ lcdc_enabled = 1;
+ }
+}
+
+/*
+ * mdp4_lcdc_pipe_queue:
+ * called from thread context
+ */
+void mdp4_lcdc_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe)
+{
+ struct vsycn_ctrl *vctrl;
+ struct vsync_update *vp;
+ struct mdp4_overlay_pipe *pp;
+ int undx;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ /* start timing generator & mmu if they are not started yet */
+ mdp4_overlay_lcdc_start();
+
+ vctrl = &vsync_ctrl_db[cndx];
+
+ if (atomic_read(&vctrl->suspend) > 0)
+ return;
+
+ mutex_lock(&vctrl->update_lock);
+ undx = vctrl->update_ndx;
+ vp = &vctrl->vlist[undx];
+
+ pp = &vp->plist[pipe->pipe_ndx - 1]; /* ndx start form 1 */
+
+ pr_debug("%s: vndx=%d pipe_ndx=%d pid=%d\n", __func__,
+ undx, pipe->pipe_ndx, current->pid);
+
+ *pp = *pipe; /* keep it */
+ vp->update_cnt++;
+ mutex_unlock(&vctrl->update_lock);
+ mdp4_stat.overlay_play[pipe->mixer_num]++;
+}
+
+static void mdp4_lcdc_blt_ov_update(struct mdp4_overlay_pipe *pipe);
+static void mdp4_lcdc_wait4dmap(int cndx);
+static void mdp4_lcdc_wait4ov(int cndx);
+
+int mdp4_lcdc_pipe_commit(void)
+{
+
+ int i, undx;
+ int mixer = 0;
+ struct vsycn_ctrl *vctrl;
+ struct vsync_update *vp;
+ struct mdp4_overlay_pipe *pipe;
+ unsigned long flags;
+ int cnt = 0;
+
+ vctrl = &vsync_ctrl_db[0];
+
+ mutex_lock(&vctrl->update_lock);
+ undx = vctrl->update_ndx;
+ vp = &vctrl->vlist[undx];
+ pipe = vctrl->base_pipe;
+ mixer = pipe->mixer_num;
+
+ if (vp->update_cnt == 0) {
+ mutex_unlock(&vctrl->update_lock);
+ return 0;
+ }
+
+ vctrl->update_ndx++;
+ vctrl->update_ndx &= 0x01;
+ vp->update_cnt = 0; /* reset */
+ if (vctrl->blt_free) {
+ vctrl->blt_free--;
+ if (vctrl->blt_free == 0)
+ mdp4_free_writeback_buf(vctrl->mfd, mixer);
+ }
+ mutex_unlock(&vctrl->update_lock);
+
+ /* free previous committed iommu back to pool */
+ mdp4_overlay_iommu_unmap_freelist(mixer);
+
+ spin_lock_irqsave(&vctrl->spin_lock, flags);
+ if (vctrl->ov_koff != vctrl->ov_done) {
+ spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+ pr_err("%s: Error, frame dropped %d %d\n", __func__,
+ vctrl->ov_koff, vctrl->ov_done);
+ return 0;
+ }
+ spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+ if (vctrl->blt_change) {
+ pipe = vctrl->base_pipe;
+ spin_lock_irqsave(&vctrl->spin_lock, flags);
+ INIT_COMPLETION(vctrl->dmap_comp);
+ INIT_COMPLETION(vctrl->ov_comp);
+ vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
+ spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+ mdp4_lcdc_wait4dmap(0);
+ if (pipe->ov_blt_addr)
+ mdp4_lcdc_wait4ov(0);
+ }
+
+ pipe = vp->plist;
+ for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
+ if (pipe->pipe_used) {
+ cnt++;
+ mdp4_overlay_vsync_commit(pipe);
+ /* free previous iommu to freelist
+ * which will be freed at next
+ * pipe_commit
+ */
+ mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
+ pipe->pipe_used = 0; /* clear */
+ }
+ }
+
+ mdp4_mixer_stage_commit(mixer);
+
+ pipe = vctrl->base_pipe;
+ spin_lock_irqsave(&vctrl->spin_lock, flags);
+ if (pipe->ov_blt_addr) {
+ mdp4_lcdc_blt_ov_update(pipe);
+ pipe->ov_cnt++;
+ INIT_COMPLETION(vctrl->ov_comp);
+ vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
+ mb();
+ vctrl->ov_koff++;
+ /* kickoff overlay engine */
+ mdp4_stat.kickoff_ov0++;
+ outpdw(MDP_BASE + 0x0004, 0);
+ } else {
+ /* schedule second phase update at dmap */
+ INIT_COMPLETION(vctrl->dmap_comp);
+ vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
+ }
+ spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+ mdp4_stat.overlay_commit[pipe->mixer_num]++;
+
+ return cnt;
+}
+
+void mdp4_lcdc_vsync_ctrl(int cndx, int enable)
+{
+ struct vsycn_ctrl *vctrl;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ vctrl = &vsync_ctrl_db[cndx];
+
+ if (vctrl->fake_vsync) {
+ vctrl->fake_vsync = 0;
+ schedule_work(&vctrl->vsync_work);
+ }
+
+ if (vctrl->vsync_irq_enabled == enable)
+ return;
+
+ pr_debug("%s: vsync enable=%d\n", __func__, enable);
+
+ vctrl->vsync_irq_enabled = enable;
+
+ if (enable)
+ vsync_irq_enable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM);
+ else
+ vsync_irq_disable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM);
+}
+
+void mdp4_lcdc_wait4vsync(int cndx, long long *vtime)
+{
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
+ unsigned long flags;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
+
+ if (atomic_read(&vctrl->suspend) > 0) {
+ *vtime = -1;
+ return;
+ }
+
+ /* start timing generator & mmu if they are not started yet */
+ mdp4_overlay_lcdc_start();
+
+ spin_lock_irqsave(&vctrl->spin_lock, flags);
+
+ if (vctrl->wait_vsync_cnt == 0)
+ INIT_COMPLETION(vctrl->vsync_comp);
+ vctrl->wait_vsync_cnt++;
+ spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+ wait_for_completion(&vctrl->vsync_comp);
+ mdp4_stat.wait4vsync0++;
+
+ *vtime = vctrl->vsync_time.tv64;
+}
+
+static void mdp4_lcdc_wait4dmap(int cndx)
+{
+ struct vsycn_ctrl *vctrl;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ vctrl = &vsync_ctrl_db[cndx];
+
+ if (atomic_read(&vctrl->suspend) > 0)
+ return;
+
+ wait_for_completion(&vctrl->dmap_comp);
+}
+
+static void mdp4_lcdc_wait4ov(int cndx)
+{
+ struct vsycn_ctrl *vctrl;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ vctrl = &vsync_ctrl_db[cndx];
+
+ if (atomic_read(&vctrl->suspend) > 0)
+ return;
+
+ wait_for_completion(&vctrl->ov_comp);
+}
+
+static void send_vsync_work(struct work_struct *work)
+{
+ struct vsycn_ctrl *vctrl =
+ container_of(work, typeof(*vctrl), vsync_work);
+ char buf[64];
+ char *envp[2];
+
+ snprintf(buf, sizeof(buf), "VSYNC=%llu",
+ ktime_to_ns(vctrl->vsync_time));
+ envp[0] = buf;
+ envp[1] = NULL;
+ kobject_uevent_env(&vctrl->dev->kobj, KOBJ_CHANGE, envp);
+}
+
+void mdp4_lcdc_vsync_init(int cndx)
+{
+ struct vsycn_ctrl *vctrl;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ pr_info("%s: ndx=%d\n", __func__, cndx);
+
+ vctrl = &vsync_ctrl_db[cndx];
+ if (vctrl->inited)
+ return;
+
+ vctrl->inited = 1;
+ vctrl->update_ndx = 0;
+ mutex_init(&vctrl->update_lock);
+ init_completion(&vctrl->vsync_comp);
+ init_completion(&vctrl->dmap_comp);
+ init_completion(&vctrl->ov_comp);
+ atomic_set(&vctrl->suspend, 0);
+ spin_lock_init(&vctrl->spin_lock);
+ INIT_WORK(&vctrl->vsync_work, send_vsync_work);
+}
+
+void mdp4_lcdc_base_swap(int cndx, struct mdp4_overlay_pipe *pipe)
+{
+ struct vsycn_ctrl *vctrl;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+
+ vctrl = &vsync_ctrl_db[cndx];
+ vctrl->base_pipe = pipe;
+}
+
+int mdp4_lcdc_on(struct platform_device *pdev)
{
int lcdc_width;
int lcdc_height;
@@ -90,8 +445,11 @@
struct fb_var_screeninfo *var;
struct msm_fb_data_type *mfd;
struct mdp4_overlay_pipe *pipe;
- int ret;
+ int ret = 0;
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
+ vctrl = &vsync_ctrl_db[cndx];
mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
if (!mfd)
@@ -100,21 +458,20 @@
if (mfd->key != MFD_KEY)
return -EINVAL;
+ vctrl->mfd = mfd;
+ vctrl->dev = mfd->fbi->dev;
+
+ /* mdp clock on */
+ mdp_clk_ctrl(1);
+
fbi = mfd->fbi;
var = &fbi->var;
- /* MDP cmd block enable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
- if (is_mdp4_hw_reset()) {
- mdp4_hw_init();
- outpdw(MDP_BASE + 0x0038, mdp4_display_intf);
- }
-
bpp = fbi->var.bits_per_pixel / 8;
buf = (uint8 *) fbi->fix.smem_start;
buf_offset = calc_fb_offset(mfd, fbi, bpp);
- if (lcdc_pipe == NULL) {
+ if (vctrl->base_pipe == NULL) {
ptype = mdp4_overlay_format2type(mfd->fb_imgType);
if (ptype < 0)
printk(KERN_INFO "%s: format2type failed\n", __func__);
@@ -129,16 +486,17 @@
ret = mdp4_overlay_format2pipe(pipe);
if (ret < 0)
printk(KERN_INFO "%s: format2pipe failed\n", __func__);
- lcdc_pipe = pipe; /* keep it */
- init_completion(&lcdc_comp);
mdp4_init_writeback_buf(mfd, MDP4_MIXER0);
pipe->ov_blt_addr = 0;
pipe->dma_blt_addr = 0;
+
+ vctrl->base_pipe = pipe; /* keep it */
} else {
- pipe = lcdc_pipe;
+ pipe = vctrl->base_pipe;
}
+
pipe->src_height = fbi->var.yres;
pipe->src_width = fbi->var.xres;
pipe->src_h = fbi->var.yres;
@@ -154,14 +512,16 @@
pipe->srcp0_ystride = fbi->fix.line_length;
pipe->bpp = bpp;
+ atomic_set(&vctrl->suspend, 0);
+
mdp4_overlay_dmap_xy(pipe);
mdp4_overlay_dmap_cfg(mfd, 1);
-
mdp4_overlay_rgb_setup(pipe);
+ mdp4_overlayproc_cfg(pipe);
+
mdp4_overlay_reg_flush(pipe, 1);
mdp4_mixer_stage_up(pipe);
- mdp4_overlayproc_cfg(pipe);
/*
* LCDC timing setting
@@ -238,6 +598,7 @@
ctrl_polarity =
(data_en_polarity << 2) | (vsync_polarity << 1) | (hsync_polarity);
+ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
MDP_OUTP(MDP_BASE + LCDC_BASE + 0x4, hsync_ctrl);
MDP_OUTP(MDP_BASE + LCDC_BASE + 0x8, vsync_period);
MDP_OUTP(MDP_BASE + LCDC_BASE + 0xc, vsync_pulse_width * hsync_period);
@@ -251,69 +612,56 @@
MDP_OUTP(MDP_BASE + LCDC_BASE + 0x1c, active_hctl);
MDP_OUTP(MDP_BASE + LCDC_BASE + 0x20, active_v_start);
MDP_OUTP(MDP_BASE + LCDC_BASE + 0x24, active_v_end);
-
- mdp4_overlay_reg_flush(pipe, 1);
- mdp4_mixer_stage_up(pipe);
-
-#ifdef CONFIG_MSM_BUS_SCALING
- mdp_bus_scale_update_request(2);
-#endif
- mdp_histogram_ctrl_all(TRUE);
-
- ret = panel_next_on(pdev);
- if (ret == 0)
- mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-
- /* MDP cmd block disable */
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+ mdp_histogram_ctrl_all(TRUE);
return ret;
}
-int mdp_lcdc_off(struct platform_device *pdev)
+int mdp4_lcdc_off(struct platform_device *pdev)
{
int ret = 0;
+ int cndx = 0;
struct msm_fb_data_type *mfd;
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
- mutex_lock(&mfd->dma->ov_mutex);
+ atomic_set(&vctrl->suspend, 1);
- /* MDP cmd block enable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+ while (vctrl->wait_vsync_cnt)
+ msleep(20); /* >= 17 ms */
+
MDP_OUTP(MDP_BASE + LCDC_BASE, 0);
+
lcdc_enabled = 0;
- /* MDP cmd block disable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
- mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
mdp_histogram_ctrl_all(FALSE);
- ret = panel_next_off(pdev);
- mutex_unlock(&mfd->dma->ov_mutex);
-
- /* delay to make sure the last frame finishes */
- msleep(20);
-
- /* dis-engage rgb0 from mixer0 */
- if (lcdc_pipe) {
+ if (pipe) {
if (mfd->ref_cnt == 0) {
/* adb stop */
- if (lcdc_pipe->pipe_type == OVERLAY_TYPE_BF)
- mdp4_overlay_borderfill_stage_down(lcdc_pipe);
+ if (pipe->pipe_type == OVERLAY_TYPE_BF)
+ mdp4_overlay_borderfill_stage_down(pipe);
- /* lcdc_pipe == rgb1 */
- mdp4_overlay_unset_mixer(lcdc_pipe->mixer_num);
- lcdc_pipe = NULL;
+ mdp4_overlay_unset_mixer(pipe->mixer_num);
+ vctrl->base_pipe = NULL;
} else {
- mdp4_mixer_stage_down(lcdc_pipe);
- mdp4_iommu_unmap(lcdc_pipe);
+ /* system suspending */
+ mdp4_mixer_stage_down(vctrl->base_pipe);
+ mdp4_overlay_iommu_pipe_free(
+ vctrl->base_pipe->pipe_ndx, 1);
}
}
-#ifdef CONFIG_MSM_BUS_SCALING
- mdp_bus_scale_update_request(0);
-#endif
+ vctrl->fake_vsync = 1;
+
+ /* MDP clock disable */
+ mdp_clk_ctrl(0);
+ mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
return ret;
}
@@ -324,11 +672,9 @@
int bpp;
char *overlay_base;
-
if (pipe->ov_blt_addr == 0)
return;
-
#ifdef BLT_RGB565
bpp = 2; /* overlay ouput is RGB565 */
#else
@@ -353,7 +699,6 @@
if (pipe->ov_blt_addr == 0)
return;
-
#ifdef BLT_RGB565
bpp = 2; /* overlay ouput is RGB565 */
#else
@@ -368,97 +713,9 @@
MDP_OUTP(MDP_BASE + 0x90008, addr);
}
-/*
- * mdp4_overlay_lcdc_wait4event:
- * INTR_DMA_P_DONE and INTR_PRIMARY_VSYNC event only
- * no INTR_OVERLAY0_DONE event allowed.
- */
-static void mdp4_overlay_lcdc_wait4event(struct msm_fb_data_type *mfd,
- int intr_done)
+void mdp4_overlay_lcdc_set_perf(struct msm_fb_data_type *mfd)
{
- unsigned long flag;
- unsigned int data;
-
- data = inpdw(MDP_BASE + LCDC_BASE);
- data &= 0x01;
- if (data == 0) /* timing generator disabled */
- return;
-
- spin_lock_irqsave(&mdp_spin_lock, flag);
- INIT_COMPLETION(lcdc_comp);
- mfd->dma->waiting = TRUE;
- outp32(MDP_INTR_CLEAR, intr_done);
- mdp_intr_mask |= intr_done;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- mdp_enable_irq(MDP_DMA2_TERM); /* enable intr */
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
- wait_for_completion(&lcdc_comp);
- mdp_disable_irq(MDP_DMA2_TERM);
-}
-
-static void mdp4_overlay_lcdc_dma_busy_wait(struct msm_fb_data_type *mfd)
-{
- unsigned long flag;
- int need_wait = 0;
-
- pr_debug("%s: start pid=%d\n", __func__, current->pid);
-
- spin_lock_irqsave(&mdp_spin_lock, flag);
- if (mfd->dma->busy == TRUE) {
- INIT_COMPLETION(mfd->dma->comp);
- need_wait++;
- }
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
-
- if (need_wait) {
- /* wait until DMA finishes the current job */
- pr_debug("%s: pending pid=%d\n", __func__, current->pid);
- wait_for_completion(&mfd->dma->comp);
- }
- pr_debug("%s: done pid=%d\n", __func__, current->pid);
-}
-
-void mdp4_overlay_lcdc_start(void)
-{
- if (!lcdc_enabled) {
- /* enable LCDC block */
- mdp4_iommu_attach();
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
- MDP_OUTP(MDP_BASE + LCDC_BASE, 1);
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
- lcdc_enabled = 1;
- }
-}
-
-void mdp4_overlay_lcdc_vsync_push(struct msm_fb_data_type *mfd,
- struct mdp4_overlay_pipe *pipe)
-{
- unsigned long flag;
-
- if (pipe->flags & MDP_OV_PLAY_NOWAIT)
- return;
-
- if (lcdc_pipe->ov_blt_addr) {
- mdp4_overlay_lcdc_dma_busy_wait(mfd);
-
- mdp4_lcdc_blt_ov_update(lcdc_pipe);
- lcdc_pipe->ov_cnt++;
-
- spin_lock_irqsave(&mdp_spin_lock, flag);
- outp32(MDP_INTR_CLEAR, INTR_OVERLAY0_DONE);
- mdp_intr_mask |= INTR_OVERLAY0_DONE;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- mdp_enable_irq(MDP_OVERLAY0_TERM);
- mfd->dma->busy = TRUE;
- mb(); /* make sure all registers updated */
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
- outpdw(MDP_BASE + 0x0004, 0); /* kickoff overlay engine */
- mdp4_stat.kickoff_ov0++;
- mb();
- mdp4_overlay_lcdc_wait4event(mfd, INTR_DMA_P_DONE);
- } else {
- mdp4_overlay_lcdc_wait4event(mfd, INTR_PRIMARY_VSYNC);
- }
+ /* change mdp clk while mdp is idle */
mdp4_set_perf_level();
}
@@ -467,118 +724,131 @@
*/
void mdp4_primary_vsync_lcdc(void)
{
- complete_all(&lcdc_comp);
+ int cndx;
+ struct vsycn_ctrl *vctrl;
+
+ cndx = 0;
+ vctrl = &vsync_ctrl_db[cndx];
+ pr_debug("%s: cpu=%d\n", __func__, smp_processor_id());
+ vctrl->vsync_time = ktime_get();
+ schedule_work(&vctrl->vsync_work);
+
+ spin_lock(&vctrl->spin_lock);
+ if (vctrl->wait_vsync_cnt) {
+ complete_all(&vctrl->vsync_comp);
+ vctrl->wait_vsync_cnt = 0;
+ }
+
+ spin_unlock(&vctrl->spin_lock);
}
/*
* mdp4_dma_p_done_lcdc: called from isr
*/
-void mdp4_dma_p_done_lcdc(void)
+void mdp4_dmap_done_lcdc(int cndx)
{
- complete_all(&lcdc_comp);
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
+
+ if (cndx >= MAX_CONTROLLER) {
+ pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+ return;
+ }
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
+
+ spin_lock(&vctrl->spin_lock);
+ vsync_irq_disable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
+ if (vctrl->blt_change) {
+ mdp4_overlayproc_cfg(pipe);
+ mdp4_overlay_dmap_xy(pipe);
+ if (pipe->ov_blt_addr) {
+ mdp4_lcdc_blt_ov_update(pipe);
+ pipe->ov_cnt++;
+ /* Prefill one frame */
+ vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
+ /* kickoff overlay0 engine */
+ mdp4_stat.kickoff_ov0++;
+ vctrl->ov_koff++; /* make up for prefill */
+ outpdw(MDP_BASE + 0x0004, 0);
+ }
+ vctrl->blt_change = 0;
+ }
+
+ complete_all(&vctrl->dmap_comp);
+ mdp4_overlay_dma_commit(cndx);
+ spin_unlock(&vctrl->spin_lock);
}
/*
* mdp4_overlay0_done_lcdc: called from isr
*/
-void mdp4_overlay0_done_lcdc(struct mdp_dma_data *dma)
+void mdp4_overlay0_done_lcdc(int cndx)
{
- spin_lock(&mdp_spin_lock);
- dma->busy = FALSE;
- if (lcdc_pipe->ov_blt_addr == 0) {
- spin_unlock(&mdp_spin_lock);
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
+
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
+
+ spin_lock(&vctrl->spin_lock);
+ vsync_irq_disable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
+ vctrl->ov_done++;
+ complete_all(&vctrl->ov_comp);
+ if (pipe->ov_blt_addr == 0) {
+ spin_unlock(&vctrl->spin_lock);
return;
}
- mdp4_lcdc_blt_dmap_update(lcdc_pipe);
- lcdc_pipe->dmap_cnt++;
- mdp_disable_irq_nosync(MDP_OVERLAY0_TERM);
- spin_unlock(&mdp_spin_lock);
- complete(&dma->comp);
+
+ mdp4_lcdc_blt_dmap_update(pipe);
+ pipe->dmap_cnt++;
+ spin_unlock(&vctrl->spin_lock);
}
-static void mdp4_overlay_lcdc_prefill(struct msm_fb_data_type *mfd)
-{
- unsigned long flag;
-
- if (lcdc_pipe->ov_blt_addr) {
- mdp4_overlay_lcdc_dma_busy_wait(mfd);
-
- mdp4_lcdc_blt_ov_update(lcdc_pipe);
- lcdc_pipe->ov_cnt++;
-
- spin_lock_irqsave(&mdp_spin_lock, flag);
- outp32(MDP_INTR_CLEAR, INTR_OVERLAY0_DONE);
- mdp_intr_mask |= INTR_OVERLAY0_DONE;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- mdp_enable_irq(MDP_OVERLAY0_TERM);
- mfd->dma->busy = TRUE;
- mb(); /* make sure all registers updated */
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
- outpdw(MDP_BASE + 0x0004, 0); /* kickoff overlay engine */
- mdp4_stat.kickoff_ov0++;
- mb();
- }
-}
-/*
- * make sure the WRITEBACK_SIZE defined at boardfile
- * has enough space h * w * 3 * 2
- */
static void mdp4_lcdc_do_blt(struct msm_fb_data_type *mfd, int enable)
{
unsigned long flag;
- int change = 0;
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
+ struct mdp4_overlay_pipe *pipe;
+
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0);
- if (!mfd->ov0_wb_buf->write_addr) {
- pr_debug("%s: no blt_base assigned\n", __func__);
+ if (mfd->ov0_wb_buf->write_addr == 0) {
+ pr_info("%s: no blt_base assigned\n", __func__);
return;
}
- spin_lock_irqsave(&mdp_spin_lock, flag);
- if (enable && lcdc_pipe->ov_blt_addr == 0) {
- lcdc_pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
- lcdc_pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
- change++;
- lcdc_pipe->blt_cnt = 0;
- lcdc_pipe->ov_cnt = 0;
- lcdc_pipe->dmap_cnt = 0;
+ spin_lock_irqsave(&vctrl->spin_lock, flag);
+ if (enable && pipe->ov_blt_addr == 0) {
+ pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
+ pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
+ pipe->ov_cnt = 0;
+ pipe->dmap_cnt = 0;
+ vctrl->ov_koff = 0;
+ vctrl->ov_done = 0;
+ vctrl->blt_free = 0;
mdp4_stat.blt_lcdc++;
- } else if (enable == 0 && lcdc_pipe->ov_blt_addr) {
- lcdc_pipe->ov_blt_addr = 0;
- lcdc_pipe->dma_blt_addr = 0;
- change++;
+ vctrl->blt_change++;
+ } else if (enable == 0 && pipe->ov_blt_addr) {
+ pipe->ov_blt_addr = 0;
+ pipe->dma_blt_addr = 0;
+ vctrl->blt_free = 4; /* 4 commits to free wb buf */
+ vctrl->blt_change++;
}
- pr_info("%s: ov_blt_addr=%x\n", __func__, (int)lcdc_pipe->ov_blt_addr);
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
- if (!change)
+ pr_info("%s: enable=%d change=%d blt_addr=%x\n", __func__,
+ vctrl->blt_change, enable, (int)pipe->ov_blt_addr);
+
+ if (!vctrl->blt_change) {
+ spin_unlock_irqrestore(&vctrl->spin_lock, flag);
return;
-
- if (lcdc_enabled) {
- mdp4_overlay_lcdc_wait4event(mfd, INTR_DMA_P_DONE);
- MDP_OUTP(MDP_BASE + LCDC_BASE, 0); /* stop lcdc */
- msleep(20);
}
- mdp4_overlay_dmap_xy(lcdc_pipe);
- mdp4_overlayproc_cfg(lcdc_pipe);
- if (lcdc_pipe->ov_blt_addr) {
- mdp4_overlay_lcdc_prefill(mfd);
- mdp4_overlay_lcdc_prefill(mfd);
- }
- MDP_OUTP(MDP_BASE + LCDC_BASE, 1); /* start lcdc */
-}
-
-int mdp4_lcdc_overlay_blt_offset(struct msm_fb_data_type *mfd,
- struct msmfb_overlay_blt *req)
-{
- req->offset = 0;
- req->width = lcdc_pipe->src_width;
- req->height = lcdc_pipe->src_height;
- req->bpp = lcdc_pipe->bpp;
-
- return sizeof(*req);
+ spin_unlock_irqrestore(&vctrl->spin_lock, flag);
}
void mdp4_lcdc_overlay_blt(struct msm_fb_data_type *mfd,
@@ -603,36 +873,36 @@
uint8 *buf;
unsigned int buf_offset;
int bpp;
+ int cndx = 0;
+ struct vsycn_ctrl *vctrl;
struct mdp4_overlay_pipe *pipe;
- if (!mfd->panel_power_on)
+
+ vctrl = &vsync_ctrl_db[cndx];
+ pipe = vctrl->base_pipe;
+
+ if (!pipe || !mfd->panel_power_on)
return;
- /* no need to power on cmd block since it's lcdc mode */
- bpp = fbi->var.bits_per_pixel / 8;
- buf = (uint8 *) fbi->fix.smem_start;
- buf_offset = calc_fb_offset(mfd, fbi, bpp);
+ pr_debug("%s: cpu=%d pid=%d\n", __func__,
+ smp_processor_id(), current->pid);
+ if (pipe->pipe_type == OVERLAY_TYPE_RGB) {
+ bpp = fbi->var.bits_per_pixel / 8;
+ buf = (uint8 *) fbi->fix.smem_start;
+ buf_offset = calc_fb_offset(mfd, fbi, bpp);
- mutex_lock(&mfd->dma->ov_mutex);
+ if (mfd->display_iova)
+ pipe->srcp0_addr = mfd->display_iova + buf_offset;
+ else
+ pipe->srcp0_addr = (uint32)(buf + buf_offset);
- pipe = lcdc_pipe;
- if (pipe->pipe_used == 0 ||
- pipe->mixer_stage != MDP4_MIXER_STAGE_BASE) {
- pr_err("%s: NOT baselayer\n", __func__);
- mutex_unlock(&mfd->dma->ov_mutex);
- return;
+ mdp4_lcdc_pipe_queue(0, pipe);
}
- if (mfd->display_iova)
- pipe->srcp0_addr = mfd->display_iova + buf_offset;
- else
- pipe->srcp0_addr = (uint32)(buf + buf_offset);
+ mdp4_lcdc_pipe_commit();
- mdp4_overlay_rgb_setup(pipe);
- mdp4_overlay_reg_flush(pipe, 1);
- mdp4_mixer_stage_up(pipe);
- mdp4_overlay_lcdc_start();
- mdp4_overlay_lcdc_vsync_push(mfd, pipe);
- mdp4_iommu_unmap(pipe);
- mutex_unlock(&mfd->dma->ov_mutex);
+ if (pipe->ov_blt_addr)
+ mdp4_lcdc_wait4ov(0);
+ else
+ mdp4_lcdc_wait4dmap(0);
}
diff --git a/drivers/video/msm/mdp4_overlay_mddi.c b/drivers/video/msm/mdp4_overlay_mddi.c
index c4e6793..103419e 100644
--- a/drivers/video/msm/mdp4_overlay_mddi.c
+++ b/drivers/video/msm/mdp4_overlay_mddi.c
@@ -354,6 +354,10 @@
outpdw(overlay_base + 0x001c, addr2);
}
+void mdp4_primary_rdptr(void)
+{
+}
+
/*
* mdp4_dmap_done_mddi: called from isr
*/
diff --git a/drivers/video/msm/mdp4_util.c b/drivers/video/msm/mdp4_util.c
index 4c0e28f..e76b8ba 100644
--- a/drivers/video/msm/mdp4_util.c
+++ b/drivers/video/msm/mdp4_util.c
@@ -409,56 +409,69 @@
goto out;
panel = mdp4_overlay_panel_list();
- if (isr & INTR_PRIMARY_VSYNC) {
- mdp4_stat.intr_vsync_p++;
+
+ if (isr & INTR_DMA_P_DONE) {
+ mdp4_stat.intr_dma_p++;
dma = &dma2_data;
- spin_lock(&mdp_spin_lock);
- mdp_intr_mask &= ~INTR_PRIMARY_VSYNC;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- dma->waiting = FALSE;
if (panel & MDP4_PANEL_LCDC)
- mdp4_primary_vsync_lcdc();
+ mdp4_dmap_done_lcdc(0);
+#ifdef CONFIG_FB_MSM_OVERLAY
#ifdef CONFIG_FB_MSM_MIPI_DSI
else if (panel & MDP4_PANEL_DSI_VIDEO)
- mdp4_primary_vsync_dsi_video();
+ mdp4_dmap_done_dsi_video(0);
+ else if (panel & MDP4_PANEL_DSI_CMD)
+ mdp4_dmap_done_dsi_cmd(0);
+#else
+ else { /* MDDI */
+ mdp4_dma_p_done_mddi(dma);
+ mdp_pipe_ctrl(MDP_DMA2_BLOCK,
+ MDP_BLOCK_POWER_OFF, TRUE);
+ complete(&dma->comp);
+ }
#endif
- spin_unlock(&mdp_spin_lock);
+#else
+ else {
+ spin_lock(&mdp_spin_lock);
+ dma->busy = FALSE;
+ spin_unlock(&mdp_spin_lock);
+ complete(&dma->comp);
+ }
+#endif
}
-#ifdef CONFIG_FB_MSM_DTV
- if (isr & INTR_EXTERNAL_VSYNC) {
- mdp4_stat.intr_vsync_e++;
- dma = &dma_e_data;
- spin_lock(&mdp_spin_lock);
- mdp_intr_mask &= ~INTR_EXTERNAL_VSYNC;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- dma->waiting = FALSE;
- if (panel & MDP4_PANEL_DTV)
- mdp4_external_vsync_dtv();
- spin_unlock(&mdp_spin_lock);
- }
+ if (isr & INTR_DMA_S_DONE) {
+ mdp4_stat.intr_dma_s++;
+#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_MDDI)
+ dma = &dma2_data;
+#else
+ dma = &dma_s_data;
#endif
+ dma->busy = FALSE;
+ mdp_pipe_ctrl(MDP_DMA_S_BLOCK,
+ MDP_BLOCK_POWER_OFF, TRUE);
+ complete(&dma->comp);
+ }
+ if (isr & INTR_DMA_E_DONE) {
+ mdp4_stat.intr_dma_e++;
+ if (panel & MDP4_PANEL_DTV)
+ mdp4_dmae_done_dtv();
+ }
#ifdef CONFIG_FB_MSM_OVERLAY
if (isr & INTR_OVERLAY0_DONE) {
mdp4_stat.intr_overlay0++;
dma = &dma2_data;
if (panel & (MDP4_PANEL_LCDC | MDP4_PANEL_DSI_VIDEO)) {
/* disable LCDC interrupt */
- spin_lock(&mdp_spin_lock);
- mdp_intr_mask &= ~INTR_OVERLAY0_DONE;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- dma->waiting = FALSE;
- spin_unlock(&mdp_spin_lock);
if (panel & MDP4_PANEL_LCDC)
- mdp4_overlay0_done_lcdc(dma);
+ mdp4_overlay0_done_lcdc(0);
#ifdef CONFIG_FB_MSM_MIPI_DSI
else if (panel & MDP4_PANEL_DSI_VIDEO)
- mdp4_overlay0_done_dsi_video(dma);
+ mdp4_overlay0_done_dsi_video(0);
#endif
} else { /* MDDI, DSI_CMD */
#ifdef CONFIG_FB_MSM_MIPI_DSI
if (panel & MDP4_PANEL_DSI_CMD)
- mdp4_overlay0_done_dsi_cmd(dma);
+ mdp4_overlay0_done_dsi_cmd(0);
#else
if (panel & MDP4_PANEL_MDDI)
mdp4_overlay0_done_mddi(dma);
@@ -500,75 +513,20 @@
#endif
#endif /* OVERLAY */
- if (isr & INTR_DMA_P_DONE) {
- mdp4_stat.intr_dma_p++;
- dma = &dma2_data;
- if (panel & MDP4_PANEL_LCDC) {
- /* disable LCDC interrupt */
- spin_lock(&mdp_spin_lock);
- mdp_intr_mask &= ~INTR_DMA_P_DONE;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- dma->waiting = FALSE;
- mdp4_dma_p_done_lcdc();
- spin_unlock(&mdp_spin_lock);
- }
-#ifdef CONFIG_FB_MSM_OVERLAY
-#ifdef CONFIG_FB_MSM_MIPI_DSI
- else if (panel & MDP4_PANEL_DSI_VIDEO) {
- /* disable LCDC interrupt */
- spin_lock(&mdp_spin_lock);
- mdp_intr_mask &= ~INTR_DMA_P_DONE;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- dma->waiting = FALSE;
- mdp4_dma_p_done_dsi_video(dma);
- spin_unlock(&mdp_spin_lock);
- } else if (panel & MDP4_PANEL_DSI_CMD) {
- mdp4_dma_p_done_dsi(dma);
- }
-#else
- else { /* MDDI */
- mdp4_dma_p_done_mddi(dma);
- mdp_pipe_ctrl(MDP_DMA2_BLOCK,
- MDP_BLOCK_POWER_OFF, TRUE);
- complete(&dma->comp);
- }
-#endif
-#else
- else {
- spin_lock(&mdp_spin_lock);
- dma->busy = FALSE;
- spin_unlock(&mdp_spin_lock);
- complete(&dma->comp);
- }
-#endif
+ if (isr & INTR_PRIMARY_VSYNC) {
+ mdp4_stat.intr_vsync_p++;
+ if (panel & MDP4_PANEL_LCDC)
+ mdp4_primary_vsync_lcdc();
+ else if (panel & MDP4_PANEL_DSI_VIDEO)
+ mdp4_primary_vsync_dsi_video();
}
- if (isr & INTR_DMA_S_DONE) {
- mdp4_stat.intr_dma_s++;
-#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_MDDI)
- dma = &dma2_data;
-#else
- dma = &dma_s_data;
+#ifdef CONFIG_FB_MSM_DTV
+ if (isr & INTR_EXTERNAL_VSYNC) {
+ mdp4_stat.intr_vsync_e++;
+ if (panel & MDP4_PANEL_DTV)
+ mdp4_external_vsync_dtv();
+ }
#endif
-
- dma->busy = FALSE;
- mdp_pipe_ctrl(MDP_DMA_S_BLOCK,
- MDP_BLOCK_POWER_OFF, TRUE);
- complete(&dma->comp);
- }
- if (isr & INTR_DMA_E_DONE) {
- mdp4_stat.intr_dma_e++;
- dma = &dma_e_data;
- spin_lock(&mdp_spin_lock);
- mdp_intr_mask &= ~INTR_DMA_E_DONE;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- dma->busy = FALSE;
- mdp4_dma_e_done_dtv();
- if (dma->waiting) {
- dma->waiting = FALSE;
- complete(&dma->comp);
- }
- spin_unlock(&mdp_spin_lock);
- }
if (isr & INTR_DMA_P_HISTOGRAM) {
mdp4_stat.intr_histogram++;
ret = mdp_histogram_block2mgmt(MDP_BLOCK_DMA_P, &mgmt);
@@ -593,6 +551,10 @@
if (!ret)
mdp_histogram_handle_isr(mgmt);
}
+ if (isr & INTR_PRIMARY_RDPTR) {
+ mdp4_stat.intr_rdptr++;
+ mdp4_primary_rdptr();
+ }
out:
mdp_is_in_isr = FALSE;
@@ -2672,9 +2634,9 @@
DISPLAY_READ_DOMAIN, GEN_POOL);
}
ion_free(mfd->iclient, buf->ihdl);
- pr_debug("%s:%d free writeback imem\n", __func__,
- __LINE__);
buf->ihdl = NULL;
+ pr_info("%s:%d free ION writeback imem",
+ __func__, __LINE__);
}
} else {
if (buf->write_addr) {
@@ -3276,71 +3238,92 @@
return valid;
}
-static int mdp4_qseed_write_cfg(struct mdp_qseed_cfg_data *cfg)
+int mdp4_qseed_access_cfg(struct mdp_qseed_cfg *config, uint32_t base)
{
int i, ret = 0;
- uint32_t base = (uint32_t) (MDP_BASE + mdp_block2base(cfg->block));
uint32_t *values;
- if ((cfg->table_num != 1) && (cfg->table_num != 2)) {
+ if ((config->table_num != 1) && (config->table_num != 2)) {
ret = -ENOTTY;
goto error;
}
- if (((cfg->table_num == 1) && (cfg->len != QSEED_TABLE_1_COUNT)) ||
- ((cfg->table_num == 2) && (cfg->len != QSEED_TABLE_2_COUNT))) {
+ if (((config->table_num == 1) && (config->len != QSEED_TABLE_1_COUNT))
+ || ((config->table_num == 2) &&
+ (config->len != QSEED_TABLE_2_COUNT))) {
ret = -EINVAL;
goto error;
}
- values = kmalloc(cfg->len * sizeof(uint32_t), GFP_KERNEL);
+ values = kmalloc(config->len * sizeof(uint32_t), GFP_KERNEL);
if (!values) {
ret = -ENOMEM;
goto error;
}
- ret = copy_from_user(values, cfg->data, sizeof(uint32_t) * cfg->len);
+ base += (config->table_num == 1) ? MDP4_QSEED_TABLE1_OFF :
+ MDP4_QSEED_TABLE2_OFF;
- base += (cfg->table_num == 1) ? MDP4_QSEED_TABLE1_OFF :
- MDP4_QSEED_TABLE2_OFF;
- for (i = 0; i < cfg->len; i++) {
- MDP_OUTP(base , values[i]);
- base += sizeof(uint32_t);
+ if (config->ops & MDP_PP_OPS_WRITE) {
+ ret = copy_from_user(values, config->data,
+ sizeof(uint32_t) * config->len);
+ if (ret) {
+ pr_warn("%s: Error copying from user, %d", __func__,
+ ret);
+ ret = -EINVAL;
+ goto err_mem;
+ }
+ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+ for (i = 0; i < config->len; i++) {
+ if (!(base & 0x3FF))
+ wmb();
+ MDP_OUTP(base , values[i]);
+ base += sizeof(uint32_t);
+ }
+ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+ } else if (config->ops & MDP_PP_OPS_READ) {
+ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+ for (i = 0; i < config->len; i++) {
+ values[i] = inpdw(base);
+ if (!(base & 0x3FF))
+ rmb();
+ base += sizeof(uint32_t);
+ }
+ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+ ret = copy_to_user(config->data, values,
+ sizeof(uint32_t) * config->len);
+ if (ret) {
+ pr_warn("%s: Error copying to user, %d", __func__, ret);
+ ret = -EINVAL;
+ goto err_mem;
+ }
}
+err_mem:
kfree(values);
error:
return ret;
}
-int mdp4_qseed_cfg(struct mdp_qseed_cfg_data *cfg)
+int mdp4_qseed_cfg(struct mdp_qseed_cfg_data *config)
{
int ret = 0;
+ struct mdp_qseed_cfg *cfg = &config->qseed_data;
+ uint32_t base;
- if (!mdp4_pp_block2qseed(cfg->block)) {
+ if (!mdp4_pp_block2qseed(config->block)) {
ret = -ENOTTY;
goto error;
}
- if (cfg->table_num != 1) {
- ret = -ENOTTY;
- pr_info("%s: Only QSEED table1 supported.\n", __func__);
+ if ((cfg->ops & MDP_PP_OPS_READ) && (cfg->ops & MDP_PP_OPS_WRITE)) {
+ ret = -EPERM;
+ pr_warn("%s: Cannot read and write on the same request\n",
+ __func__);
goto error;
}
-
- switch ((cfg->ops & 0x6) >> 1) {
- case 0x1:
- pr_info("%s: QSEED read not supported\n", __func__);
- ret = -ENOTTY;
- break;
- case 0x2:
- ret = mdp4_qseed_write_cfg(cfg);
- if (ret)
- goto error;
- break;
- default:
- break;
- }
+ base = (uint32_t) (MDP_BASE + mdp_block2base(config->block));
+ ret = mdp4_qseed_access_cfg(cfg, base);
error:
return ret;
diff --git a/drivers/video/msm/mdp_debugfs.c b/drivers/video/msm/mdp_debugfs.c
index 4a0ea4c..0fad0a7 100644
--- a/drivers/video/msm/mdp_debugfs.c
+++ b/drivers/video/msm/mdp_debugfs.c
@@ -337,7 +337,7 @@
bp += len;
dlen -= len;
len = snprintf(bp, dlen, "read_ptr: %08lu\n\n",
- mdp4_stat.intr_rd_ptr);
+ mdp4_stat.intr_rdptr);
bp += len;
dlen -= len;
len = snprintf(bp, dlen, "dsi:\n");
@@ -412,10 +412,14 @@
mdp4_stat.overlay_unset[0]);
bp += len;
dlen -= len;
- len = snprintf(bp, dlen, "play: %08lu\n",
+ len = snprintf(bp, dlen, "play: %08lu\t",
mdp4_stat.overlay_play[0]);
bp += len;
dlen -= len;
+ len = snprintf(bp, dlen, "commit: %08lu\n",
+ mdp4_stat.overlay_commit[0]);
+ bp += len;
+ dlen -= len;
len = snprintf(bp, dlen, "overlay1_play:\n");
bp += len;
@@ -428,29 +432,56 @@
mdp4_stat.overlay_unset[1]);
bp += len;
dlen -= len;
- len = snprintf(bp, dlen, "play: %08lu\n\n",
+ len = snprintf(bp, dlen, "play: %08lu\t",
mdp4_stat.overlay_play[1]);
bp += len;
dlen -= len;
+ len = snprintf(bp, dlen, "commit: %08lu\n\n",
+ mdp4_stat.overlay_commit[1]);
+ bp += len;
+ dlen -= len;
len = snprintf(bp, dlen, "frame_push:\n");
bp += len;
dlen -= len;
- len = snprintf(bp, dlen, "rgb1: %08lu\t",
- mdp4_stat.pipe[OVERLAY_PIPE_RGB1]);
+ len = snprintf(bp, dlen, "vg1 : %08lu\t", mdp4_stat.pipe[0]);
bp += len;
dlen -= len;
- len = snprintf(bp, dlen, "rgb2: %08lu\n",
- mdp4_stat.pipe[OVERLAY_PIPE_RGB2]);
+ len = snprintf(bp, dlen, "vg2 : %08lu\t", mdp4_stat.pipe[1]);
bp += len;
dlen -= len;
- len = snprintf(bp, dlen, "vg1 : %08lu\t",
- mdp4_stat.pipe[OVERLAY_PIPE_VG1]);
+ len = snprintf(bp, dlen, "vg3 : %08lu\n", mdp4_stat.pipe[5]);
bp += len;
dlen -= len;
- len = snprintf(bp, dlen, "vg2 : %08lu\n",
- mdp4_stat.pipe[OVERLAY_PIPE_VG2]);
+ len = snprintf(bp, dlen, "rgb1: %08lu\t", mdp4_stat.pipe[2]);
+ bp += len;
+ dlen -= len;
+ len = snprintf(bp, dlen, "rgb2: %08lu\t", mdp4_stat.pipe[3]);
+ bp += len;
+ dlen -= len;
+ len = snprintf(bp, dlen, "rgb3: %08lu\n\n", mdp4_stat.pipe[4]);
+ bp += len;
+ dlen -= len;
+ len = snprintf(bp, dlen, "wait4vsync: ");
+ bp += len;
+ dlen -= len;
+ len = snprintf(bp, dlen, "mixer0 : %08lu\t", mdp4_stat.wait4vsync0);
+ bp += len;
+ dlen -= len;
+ len = snprintf(bp, dlen, "mixer1: %08lu\n\n", mdp4_stat.wait4vsync1);
+ bp += len;
+ dlen -= len;
+ len = snprintf(bp, dlen, "iommu: ");
+ bp += len;
+ dlen -= len;
+ len = snprintf(bp, dlen, "map : %08lu\t", mdp4_stat.iommu_map);
+ bp += len;
+ dlen -= len;
+ len = snprintf(bp, dlen, "unmap: %08lu\t", mdp4_stat.iommu_unmap);
+ bp += len;
+ dlen -= len;
+ len = snprintf(bp, dlen, "drop: %08lu\n\n", mdp4_stat.iommu_drop);
bp += len;
dlen -= len;
len = snprintf(bp, dlen, "err_mixer : %08lu\t", mdp4_stat.err_mixer);
diff --git a/drivers/video/msm/mdp_dma_dsi_video.c b/drivers/video/msm/mdp_dma_dsi_video.c
index 1ba5b8d..3d6448f 100644
--- a/drivers/video/msm/mdp_dma_dsi_video.c
+++ b/drivers/video/msm/mdp_dma_dsi_video.c
@@ -26,6 +26,7 @@
#include "mdp.h"
#include "msm_fb.h"
#include "mdp4.h"
+#include "mipi_dsi.h"
#define DSI_VIDEO_BASE 0xF0000
#define DMA_P_BASE 0x90000
@@ -128,6 +129,7 @@
/* MDP cmd block enable */
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
/* starting address */
MDP_OUTP(MDP_BASE + DMA_P_BASE + 0x8, (uint32) buf);
@@ -191,6 +193,13 @@
ctrl_polarity = (data_en_polarity << 2) |
(vsync_polarity << 1) | (hsync_polarity);
+ if (!(mfd->cont_splash_done)) {
+ mdp_pipe_ctrl(MDP_CMD_BLOCK,
+ MDP_BLOCK_POWER_OFF, FALSE);
+ MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 0);
+ mipi_dsi_controller_cfg(0);
+ }
+
MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x4, hsync_ctrl);
MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x8, vsync_period);
MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0xc, vsync_pulse_width);
diff --git a/drivers/video/msm/mdp_dma_lcdc.c b/drivers/video/msm/mdp_dma_lcdc.c
index c418e9c..f9bf269 100644
--- a/drivers/video/msm/mdp_dma_lcdc.c
+++ b/drivers/video/msm/mdp_dma_lcdc.c
@@ -249,6 +249,12 @@
ctrl_polarity =
(data_en_polarity << 2) | (vsync_polarity << 1) | (hsync_polarity);
+ if (!(mfd->cont_splash_done)) {
+ mdp_pipe_ctrl(MDP_CMD_BLOCK,
+ MDP_BLOCK_POWER_OFF, FALSE);
+ MDP_OUTP(MDP_BASE + timer_base, 0);
+ }
+
MDP_OUTP(MDP_BASE + timer_base + 0x4, hsync_ctrl);
MDP_OUTP(MDP_BASE + timer_base + 0x8, vsync_period);
MDP_OUTP(MDP_BASE + timer_base + 0xc, vsync_pulse_width * hsync_period);
diff --git a/drivers/video/msm/mdp_vsync.c b/drivers/video/msm/mdp_vsync.c
index 87e74d9..966b40d 100644
--- a/drivers/video/msm/mdp_vsync.c
+++ b/drivers/video/msm/mdp_vsync.c
@@ -73,6 +73,20 @@
static unsigned char timer_shutdown_flag;
static uint32 vsync_cnt_cfg;
+
+
+void vsync_clk_enable()
+{
+ if (mdp_vsync_clk)
+ clk_prepare_enable(mdp_vsync_clk);
+}
+
+void vsync_clk_disable()
+{
+ if (mdp_vsync_clk)
+ clk_disable_unprepare(mdp_vsync_clk);
+}
+
void mdp_hw_vsync_clk_enable(struct msm_fb_data_type *mfd)
{
if (vsync_clk_status == 1)
diff --git a/drivers/video/msm/mdss/mdss_fb.c b/drivers/video/msm/mdss/mdss_fb.c
index a96bf3a..ee086ad 100644
--- a/drivers/video/msm/mdss/mdss_fb.c
+++ b/drivers/video/msm/mdss/mdss_fb.c
@@ -774,17 +774,23 @@
fix->type = panel_info->is_3d_panel;
fix->line_length = mdss_fb_line_length(mfd->index, panel_info->xres,
bpp);
- mfd->var_xres = panel_info->xres;
- mfd->var_yres = panel_info->yres;
-
- var->pixclock = mfd->panel_info.clk_rate;
- mfd->var_pixclock = var->pixclock;
var->xres = panel_info->xres;
var->yres = panel_info->yres;
var->xres_virtual = panel_info->xres;
var->yres_virtual = panel_info->yres * mfd->fb_page;
var->bits_per_pixel = bpp * 8; /* FrameBuffer color depth */
+ var->upper_margin = panel_info->lcdc.v_front_porch;
+ var->lower_margin = panel_info->lcdc.v_back_porch;
+ var->vsync_len = panel_info->lcdc.v_pulse_width;
+ var->left_margin = panel_info->lcdc.h_front_porch;
+ var->right_margin = panel_info->lcdc.h_back_porch;
+ var->hsync_len = panel_info->lcdc.h_pulse_width;
+ var->pixclock = panel_info->clk_rate / 1000;
+
+ mfd->var_xres = var->xres;
+ mfd->var_yres = var->yres;
+ mfd->var_pixclock = var->pixclock;
/* id field for fb app */
@@ -796,6 +802,7 @@
fbi->flags = FBINFO_FLAG_DEFAULT;
fbi->pseudo_palette = mdss_fb_pseudo_palette;
+ panel_info->fbi = fbi;
mfd->ref_cnt = 0;
mfd->panel_power_on = false;
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index 43ddb5e..2e9a2dc 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -115,11 +115,10 @@
if (fmt->is_yuv) {
if ((req->src_rect.x & 0x1) || (req->src_rect.y & 0x1) ||
(req->src_rect.w & 0x1) || (req->src_rect.h & 0x1)) {
- pr_err("invalid odd src resolution\n");
+ pr_err("invalid odd src resolution or coordinates\n");
return -EINVAL;
}
- if ((req->dst_rect.x & 0x1) || (req->dst_rect.y & 0x1) ||
- (req->dst_rect.w & 0x1) || (req->dst_rect.h & 0x1)) {
+ if ((req->dst_rect.w & 0x1) || (req->dst_rect.h & 0x1)) {
pr_err("invalid odd dst resolution\n");
return -EINVAL;
}
diff --git a/drivers/video/msm/mdss/mdss_mdp_pipe.c b/drivers/video/msm/mdss/mdss_mdp_pipe.c
index 52f4324..d9a148e 100644
--- a/drivers/video/msm/mdss/mdss_mdp_pipe.c
+++ b/drivers/video/msm/mdss/mdss_mdp_pipe.c
@@ -354,11 +354,12 @@
u32 chroma_sample;
if (pipe->type == MDSS_MDP_PIPE_TYPE_DMA) {
- if (!(pipe->flags & MDP_ROT_90) && (pipe->dst.h != pipe->src.h
- || pipe->dst.w != pipe->src.w))
- return -EINVAL; /* no scaling supported on dma pipes */
- else
+ if (pipe->dst.h != pipe->src.h || pipe->dst.w != pipe->src.w) {
+ pr_err("no scaling supported on dma pipe\n");
+ return -EINVAL;
+ } else {
return 0;
+ }
}
chroma_sample = pipe->src_fmt->chroma_sample;
@@ -535,10 +536,10 @@
pr_debug("pnum=%d format=%d opmode=%x\n", pipe->num, fmt->format,
opmode);
- rot90 = !!(pipe->flags & MDP_SOURCE_ROTATED_90);
+ rot90 = !!(pipe->flags & MDP_ROT_90);
chroma_samp = fmt->chroma_sample;
- if (rot90) {
+ if (pipe->flags & MDP_SOURCE_ROTATED_90) {
if (chroma_samp == MDSS_MDP_CHROMA_H2V1)
chroma_samp = MDSS_MDP_CHROMA_H1V2;
else if (chroma_samp == MDSS_MDP_CHROMA_H1V2)
diff --git a/drivers/video/msm/mdss/mdss_mdp_rotator.c b/drivers/video/msm/mdss/mdss_mdp_rotator.c
index fc3a843..dc1cb0d 100644
--- a/drivers/video/msm/mdss/mdss_mdp_rotator.c
+++ b/drivers/video/msm/mdss/mdss_mdp_rotator.c
@@ -212,6 +212,7 @@
rot_pipe->img_width = rot->img_width;
rot_pipe->img_height = rot->img_height;
rot_pipe->src = rot->src_rect;
+ rot_pipe->dst = rot->src_rect;
rot_pipe->bwc_mode = rot->bwc_mode;
rot_pipe->params_changed++;
}
diff --git a/drivers/video/msm/mdss/mdss_mdp_wb.c b/drivers/video/msm/mdss/mdss_mdp_wb.c
index da55edc..26e459f 100644
--- a/drivers/video/msm/mdss/mdss_mdp_wb.c
+++ b/drivers/video/msm/mdss/mdss_mdp_wb.c
@@ -60,32 +60,40 @@
static struct mdss_mdp_wb mdss_mdp_wb_info;
#ifdef DEBUG_WRITEBACK
-/* for debugging: writeback output buffer to framebuffer memory */
+/* for debugging: writeback output buffer to allocated memory */
static inline
struct mdss_mdp_data *mdss_mdp_wb_debug_buffer(struct msm_fb_data_type *mfd)
{
+ static struct ion_handle *ihdl;
static void *videomemory;
- static void *mdss_wb_mem;
- static struct mdss_mdp_data buffer = {
- .num_planes = 1,
- };
-
+ static ion_phys_addr_t mdss_wb_mem;
+ static struct mdss_mdp_data buffer = { .num_planes = 1, };
struct fb_info *fbi;
- int img_size;
- int offset;
-
+ size_t img_size;
fbi = mfd->fbi;
img_size = fbi->var.xres * fbi->var.yres * fbi->var.bits_per_pixel / 8;
- offset = fbi->fix.smem_len - img_size;
- videomemory = fbi->screen_base + offset;
- mdss_wb_mem = (void *)(fbi->fix.smem_start + offset);
+ if (ihdl == NULL) {
+ ihdl = ion_alloc(mfd->iclient, img_size, SZ_4K,
+ ION_HEAP(ION_SF_HEAP_ID));
+ if (!IS_ERR_OR_NULL(ihdl)) {
+ videomemory = ion_map_kernel(mfd->iclient, ihdl, 0);
+ ion_phys(mfd->iclient, ihdl, &mdss_wb_mem, &img_size);
+ } else {
+ pr_err("unable to alloc fbmem from ion (%p)\n", ihdl);
+ ihdl = NULL;
+ }
+ }
- buffer.p[0].addr = fbi->fix.smem_start + offset;
- buffer.p[0].len = img_size;
+ if (mdss_wb_mem) {
+ buffer.p[0].addr = (u32) mdss_wb_mem;
+ buffer.p[0].len = img_size;
- return &buffer;
+ return &buffer;
+ }
+
+ return NULL;
}
#else
static inline
@@ -227,6 +235,7 @@
}
node->buf_data.num_planes = 1;
+ node->buf_info = *data;
buf = &node->buf_data.p[0];
buf->addr = (u32) (data->iova + data->offset);
buf->len = UINT_MAX; /* trusted source */
diff --git a/drivers/video/msm/mdss/mdss_panel.h b/drivers/video/msm/mdss/mdss_panel.h
index 3fd943d..0411d8e 100644
--- a/drivers/video/msm/mdss/mdss_panel.h
+++ b/drivers/video/msm/mdss/mdss_panel.h
@@ -161,6 +161,7 @@
struct lcdc_panel_info lcdc;
struct mipi_panel_info mipi;
struct lvds_panel_info lvds;
+ struct fb_info *fbi;
};
struct mdss_panel_data {
diff --git a/drivers/video/msm/mdss/mdss_wb.c b/drivers/video/msm/mdss/mdss_wb.c
index 3be4525..a26d339 100644
--- a/drivers/video/msm/mdss/mdss_wb.c
+++ b/drivers/video/msm/mdss/mdss_wb.c
@@ -73,7 +73,7 @@
pdata->panel_info.type = WRITEBACK_PANEL;
pdata->panel_info.clk_rate = 74250000;
pdata->panel_info.pdest = DISPLAY_3;
- pdata->panel_info.out_format = MDP_RGB_888;
+ pdata->panel_info.out_format = MDP_Y_CBCR_H2V2;
pdata->on = mdss_wb_on;
pdata->off = mdss_wb_off;
diff --git a/drivers/video/msm/mhl/mhl_8334.c b/drivers/video/msm/mhl/mhl_8334.c
index d6e3f6f..646dd29 100644
--- a/drivers/video/msm/mhl/mhl_8334.c
+++ b/drivers/video/msm/mhl/mhl_8334.c
@@ -30,11 +30,11 @@
#include <linux/regulator/consumer.h>
#include <linux/device.h>
#include <linux/platform_device.h>
+#include <linux/mhl_8334.h>
#include "msm_fb.h"
#include "external_common.h"
#include "hdmi_msm.h"
-#include "mhl_8334.h"
#include "mhl_i2c_utils.h"
@@ -53,6 +53,8 @@
static void release_usb_switch_open(void);
static void switch_mode(enum mhl_st_type to_mode);
static irqreturn_t mhl_tx_isr(int irq, void *dev_id);
+void (*notify_usb_online)(int online);
+static void mhl_drive_hpd(uint8_t to_state);
static struct i2c_driver mhl_sii_i2c_driver = {
.driver = {
@@ -227,6 +229,58 @@
return true;
}
+
+/* USB_HANDSHAKING FUNCTIONS */
+
+int mhl_device_discovery(const char *name, int *result)
+
+{
+ int timeout ;
+ mhl_i2c_reg_write(TX_PAGE_3, 0x0010, 0x27);
+ msleep(50);
+ if (mhl_msm_state->cur_state == POWER_STATE_D3) {
+ /* give MHL driver chance to handle RGND interrupt */
+ INIT_COMPLETION(mhl_msm_state->rgnd_done);
+ timeout = wait_for_completion_interruptible_timeout
+ (&mhl_msm_state->rgnd_done, HZ/2);
+ if (!timeout) {
+ /* most likely nothing plugged in USB */
+ /* USB HOST connected or already in USB mode */
+ pr_debug("Timedout Returning from discovery mode\n");
+ *result = MHL_DISCOVERY_RESULT_USB;
+ return 0;
+ }
+ *result = mhl_msm_state->mhl_mode ?
+ MHL_DISCOVERY_RESULT_MHL : MHL_DISCOVERY_RESULT_USB;
+ } else
+ /* not in D3. already in MHL mode */
+ *result = MHL_DISCOVERY_RESULT_MHL;
+
+ return 0;
+}
+EXPORT_SYMBOL(mhl_device_discovery);
+
+int mhl_register_callback(const char *name, void (*callback)(int online))
+{
+ pr_debug("%s\n", __func__);
+ if (!callback)
+ return -EINVAL;
+ if (!notify_usb_online)
+ notify_usb_online = callback;
+ return 0;
+}
+EXPORT_SYMBOL(mhl_register_callback);
+
+int mhl_unregister_callback(const char *name)
+{
+ pr_debug("%s\n", __func__);
+ if (notify_usb_online)
+ notify_usb_online = NULL;
+ return 0;
+}
+EXPORT_SYMBOL(mhl_unregister_callback);
+
+
static void cbus_reset(void)
{
uint8_t i;
@@ -240,7 +294,7 @@
/*
* REG_INTR1 and REG_INTR4
*/
- mhl_i2c_reg_write(TX_PAGE_L0, 0x0075, BIT6 | BIT5);
+ mhl_i2c_reg_write(TX_PAGE_L0, 0x0075, BIT6);
mhl_i2c_reg_write(TX_PAGE_3, 0x0022,
BIT0 | BIT2 | BIT3 | BIT4 | BIT5 | BIT6);
/* REG5 */
@@ -340,7 +394,7 @@
/*
* Configure the initial reg settings
*/
-static void mhl_init_reg_settings(void)
+static void mhl_init_reg_settings(bool mhl_disc_en)
{
/*
@@ -419,15 +473,19 @@
/* Pull-up resistance off for IDLE state */
mhl_i2c_reg_write(TX_PAGE_3, 0x0013, 0x8C);
/* Enable CBUS Discovery */
- mhl_i2c_reg_write(TX_PAGE_3, 0x0010, 0x27);
+ if (mhl_disc_en)
+ /* Enable MHL Discovery */
+ mhl_i2c_reg_write(TX_PAGE_3, 0x0010, 0x27);
+ else
+ /* Disable MHL Discovery */
+ mhl_i2c_reg_write(TX_PAGE_3, 0x0010, 0x26);
mhl_i2c_reg_write(TX_PAGE_3, 0x0016, 0x20);
/* MHL CBUS Discovery - immediate comm. */
mhl_i2c_reg_write(TX_PAGE_3, 0x0012, 0x86);
/* Do not force HPD to 0 during wake-up from D3 */
- if (mhl_msm_state->cur_state != POWER_STATE_D3) {
- mhl_i2c_reg_modify(TX_PAGE_3, 0x0020,
- BIT5 | BIT4, BIT4);
- }
+ if (mhl_msm_state->cur_state != POWER_STATE_D0_MHL)
+ mhl_drive_hpd(HPD_DOWN);
+
/* Enable Auto Soft RESET */
mhl_i2c_reg_write(TX_PAGE_3, 0x0000, 0x084);
/* HDMI Transcode mode enable */
@@ -452,7 +510,10 @@
/* MHL spec requires a 100 ms wait here. */
msleep(100);
- mhl_init_reg_settings();
+ /*
+ * Need to disable MHL discovery
+ */
+ mhl_init_reg_settings(true);
/*
* Power down the chip to the
@@ -563,6 +624,7 @@
/* MHL SII 8334 chip specific init */
mhl_chip_init();
+ init_completion(&mhl_msm_state->rgnd_done);
return 0;
init_exit:
@@ -583,10 +645,9 @@
case POWER_STATE_D0_NO_MHL:
break;
case POWER_STATE_D0_MHL:
- mhl_init_reg_settings();
-
+ mhl_init_reg_settings(true);
/* REG_DISC_CTRL1 */
- mhl_i2c_reg_modify(TX_PAGE_3, 0x0010, BIT1, 0);
+ mhl_i2c_reg_modify(TX_PAGE_3, 0x0010, BIT1 | BIT0, BIT0);
/*
* TPI_DEVICE_POWER_STATE_CTRL_REG
@@ -597,16 +658,15 @@
case POWER_STATE_D3:
if (mhl_msm_state->cur_state != POWER_STATE_D3) {
/* Force HPD to 0 when not in MHL mode. */
- mhl_i2c_reg_modify(TX_PAGE_3, 0x0020,
- BIT5 | BIT4, BIT4);
-
+ mhl_drive_hpd(HPD_DOWN);
/*
* Change TMDS termination to high impedance
* on disconnection.
*/
mhl_i2c_reg_write(TX_PAGE_3, 0x0030, 0xD0);
- mhl_i2c_reg_modify(TX_PAGE_L1, 0x003D,
- BIT1 | BIT0, BIT0);
+ msleep(50);
+ mhl_i2c_reg_modify(TX_PAGE_3, 0x0010,
+ BIT1 | BIT0, BIT1);
spin_lock_irqsave(&mhl_state_lock, flags);
mhl_msm_state->cur_state = POWER_STATE_D3;
spin_unlock_irqrestore(&mhl_state_lock, flags);
@@ -619,6 +679,11 @@
static void mhl_drive_hpd(uint8_t to_state)
{
+ if (mhl_msm_state->cur_state != POWER_STATE_D0_MHL) {
+ pr_err("MHL: invalid state to ctrl HPD\n");
+ return;
+ }
+
pr_debug("%s: To state=[0x%x]\n", __func__, to_state);
if (to_state == HPD_UP) {
/*
@@ -644,6 +709,7 @@
* Disable TMDS Output on REG_TMDS_CCTRL
* Enable/Disable TMDS output (MHL TMDS output only)
*/
+ mhl_i2c_reg_modify(TX_PAGE_3, 0x20, BIT4 | BIT5, BIT4);
mhl_i2c_reg_modify(TX_PAGE_L0, 0x0080, BIT4, 0x00);
}
return;
@@ -682,20 +748,11 @@
static void mhl_msm_disconnection(void)
{
- uint8_t reg;
- /* Clear interrupts - REG INTR4 */
- reg = mhl_i2c_reg_read(TX_PAGE_3, 0x0021);
- mhl_i2c_reg_write(TX_PAGE_3, 0x0021, reg);
/*
* MHL TX CTL1
* Disabling Tx termination
*/
mhl_i2c_reg_write(TX_PAGE_3, 0x30, 0xD0);
- /*
- * MSC REQUESTOR ABORT REASON
- * Clear CBUS_HPD status
- */
- mhl_i2c_reg_modify(TX_PAGE_CBUS, 0x000D, BIT6, 0x00);
/* Change HPD line to drive it low */
mhl_drive_hpd(HPD_DOWN);
/* switch power state to D3 */
@@ -704,11 +761,11 @@
}
/*
- * If hardware detected a change in impedence and raised an INTR
- * We check the range of this impedence to infer if the connected
+ * If hardware detected a change in impedance and raised an INTR
+ * We check the range of this impedance to infer if the connected
* device is MHL or USB and take appropriate actions.
*/
-static void mhl_msm_read_rgnd_int(void)
+static int mhl_msm_read_rgnd_int(void)
{
uint8_t rgnd_imp;
@@ -720,20 +777,27 @@
* 10 - 1 kOHM ***(MHL)**** It's range 800 - 1200 OHM from MHL spec
* 11 - short (USB)
*/
- rgnd_imp = mhl_i2c_reg_read(TX_PAGE_3, 0x001C);
+ rgnd_imp = (mhl_i2c_reg_read(TX_PAGE_3, 0x001C) & (BIT1 | BIT0));
pr_debug("Imp Range read = %02X\n", (int)rgnd_imp);
-
if (0x02 == rgnd_imp) {
pr_debug("MHL: MHL DEVICE!!!\n");
+ mhl_i2c_reg_modify(TX_PAGE_3, 0x0018, BIT0, BIT0);
/*
* Handling the MHL event in driver
*/
- mhl_i2c_reg_modify(TX_PAGE_3, 0x0018, BIT0, BIT0);
+ mhl_msm_state->mhl_mode = TRUE;
+ if (notify_usb_online)
+ notify_usb_online(1);
} else {
pr_debug("MHL: NON-MHL DEVICE!!!\n");
+ mhl_msm_state->mhl_mode = FALSE;
mhl_i2c_reg_modify(TX_PAGE_3, 0x0018, BIT3, BIT3);
+ switch_mode(POWER_STATE_D3);
}
+ complete(&mhl_msm_state->rgnd_done);
+ return mhl_msm_state->mhl_mode ?
+ MHL_DISCOVERY_RESULT_MHL : MHL_DISCOVERY_RESULT_USB;
}
static void force_usb_switch_open(void)
@@ -756,7 +820,7 @@
static void int_4_isr(void)
{
- uint8_t status;
+ uint8_t status, reg ;
/* INTR_STATUS4 */
status = mhl_i2c_reg_read(TX_PAGE_3, 0x0021);
@@ -767,7 +831,7 @@
* do nothing.
*/
if ((0x00 == status) && (mhl_msm_state->cur_state == POWER_STATE_D3)) {
- mhl_chip_init();
+ pr_debug("MHL: spurious interrupt\n");
return;
}
if (0xFF != status) {
@@ -816,8 +880,13 @@
if (status & BIT5) {
mhl_connect_api(false);
+ /* Clear interrupts - REG INTR4 */
+ reg = mhl_i2c_reg_read(TX_PAGE_3, 0x0021);
+ mhl_i2c_reg_write(TX_PAGE_3, 0x0021, reg);
mhl_msm_disconnection();
- pr_debug("MHL Disconn Drv: INT4 Status = %02X\n",
+ if (notify_usb_online)
+ notify_usb_online(0);
+ pr_debug("MHL Disconnect Drv: INT4 Status = %02X\n",
(int)status);
}
@@ -971,6 +1040,122 @@
return;
}
+static void clear_all_intrs(void)
+{
+ uint8_t regval = 0x00;
+ /*
+ * intr status debug
+ */
+ pr_debug("********* EXITING ISR MASK CHECK ?? *************\n");
+ pr_debug("Drv: INT1 MASK = %02X\n",
+ (int) mhl_i2c_reg_read(TX_PAGE_L0, 0x0071));
+ pr_debug("Drv: INT3 MASK = %02X\n",
+ (int) mhl_i2c_reg_read(TX_PAGE_L0, 0x0077));
+ pr_debug("Drv: INT4 MASK = %02X\n",
+ (int) mhl_i2c_reg_read(TX_PAGE_3, 0x0021));
+ pr_debug("Drv: INT5 MASK = %02X\n",
+ (int) mhl_i2c_reg_read(TX_PAGE_3, 0x0023));
+ pr_debug("Drv: CBUS1 MASK = %02X\n",
+ (int) mhl_i2c_reg_read(TX_PAGE_CBUS, 0x0009));
+ pr_debug("Drv: CBUS2 MASK = %02X\n",
+ (int) mhl_i2c_reg_read(TX_PAGE_CBUS, 0x001F));
+ pr_debug("********* END OF ISR MASK CHECK *************\n");
+
+ pr_debug("********* EXITING IN ISR ?? *************\n");
+ regval = mhl_i2c_reg_read(TX_PAGE_L0, 0x0071);
+ pr_debug("Drv: INT1 Status = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_L0, 0x0071, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_L0, 0x0072);
+ pr_debug("Drv: INT2 Status = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_L0, 0x0072, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_L0, 0x0073);
+ pr_debug("Drv: INT3 Status = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_L0, 0x0073, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_3, 0x0021);
+ pr_debug("Drv: INT4 Status = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_3, 0x0021, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_3, 0x0023);
+ pr_debug("Drv: INT5 Status = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_3, 0x0023, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x0008);
+ pr_debug("Drv: cbusInt Status = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x0008, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x001E);
+ pr_debug("Drv: CBUS INTR_2: %d\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x001E, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00A0);
+ pr_debug("Drv: A0 INT Set = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00A0, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00A1);
+ pr_debug("Drv: A1 INT Set = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00A1, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00A2);
+ pr_debug("Drv: A2 INT Set = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00A2, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00A3);
+ pr_debug("Drv: A3 INT Set = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00A3, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00B0);
+ pr_debug("Drv: B0 STATUS Set = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00B0, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00B1);
+ pr_debug("Drv: B1 STATUS Set = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00B1, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00B2);
+ pr_debug("Drv: B2 STATUS Set = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00B2, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00B3);
+ pr_debug("Drv: B3 STATUS Set = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00B3, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00E0);
+ pr_debug("Drv: E0 STATUS Set = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00E0, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00E1);
+ pr_debug("Drv: E1 STATUS Set = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00E1, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00E2);
+ pr_debug("Drv: E2 STATUS Set = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00E2, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00E3);
+ pr_debug("Drv: E3 STATUS Set = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00E3, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00F0);
+ pr_debug("Drv: F0 INT Set = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00F0, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00F1);
+ pr_debug("Drv: F1 INT Set = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00F1, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00F2);
+ pr_debug("Drv: F2 INT Set = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00F2, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00F3);
+ pr_debug("Drv: F3 INT Set = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00F3, regval);
+ pr_debug("********* END OF EXITING IN ISR *************\n");
+}
+
static irqreturn_t mhl_tx_isr(int irq, void *dev_id)
{
/*
@@ -997,6 +1182,7 @@
mhl_cbus_isr();
int_1_isr();
}
+ clear_all_intrs();
return IRQ_HANDLED;
}
diff --git a/drivers/video/msm/mhl/mhl_i2c_utils.c b/drivers/video/msm/mhl/mhl_i2c_utils.c
index aab6e02..ee069bb 100644
--- a/drivers/video/msm/mhl/mhl_i2c_utils.c
+++ b/drivers/video/msm/mhl/mhl_i2c_utils.c
@@ -11,9 +11,9 @@
*
*/
#include <linux/i2c.h>
+#include <linux/mhl_8334.h>
#include "mhl_i2c_utils.h"
-#include "mhl_8334.h"
uint8_t slave_addrs[MAX_PAGES] = {
DEV_PAGE_TPI_0 ,
diff --git a/drivers/video/msm/mhl/mhl_i2c_utils.h b/drivers/video/msm/mhl/mhl_i2c_utils.h
index 76498d4..5a2d199 100644
--- a/drivers/video/msm/mhl/mhl_i2c_utils.h
+++ b/drivers/video/msm/mhl/mhl_i2c_utils.h
@@ -16,8 +16,7 @@
#include <linux/i2c.h>
#include <linux/types.h>
-
-#include "mhl_defs.h"
+#include <linux/mhl_defs.h>
/*
* I2C command to the adapter to append
diff --git a/drivers/video/msm/mipi_NT35510.c b/drivers/video/msm/mipi_NT35510.c
index e605aed..94c24ee 100644
--- a/drivers/video/msm/mipi_NT35510.c
+++ b/drivers/video/msm/mipi_NT35510.c
@@ -482,26 +482,31 @@
mipi = &mfd->panel_info.mipi;
+ if (!mfd->cont_splash_done) {
+ mfd->cont_splash_done = 1;
+ return 0;
+ }
+
if (mipi_nt35510_pdata && mipi_nt35510_pdata->rotate_panel)
rotate = mipi_nt35510_pdata->rotate_panel();
if (mipi->mode == DSI_VIDEO_MODE) {
- mipi_dsi_cmds_tx(mfd, &nt35510_tx_buf,
+ mipi_dsi_cmds_tx(&nt35510_tx_buf,
nt35510_video_display_on_cmds,
ARRAY_SIZE(nt35510_video_display_on_cmds));
if (rotate) {
- mipi_dsi_cmds_tx(mfd, &nt35510_tx_buf,
+ mipi_dsi_cmds_tx(&nt35510_tx_buf,
nt35510_video_display_on_cmds_rotate,
ARRAY_SIZE(nt35510_video_display_on_cmds_rotate));
}
} else if (mipi->mode == DSI_CMD_MODE) {
- mipi_dsi_cmds_tx(mfd, &nt35510_tx_buf,
+ mipi_dsi_cmds_tx(&nt35510_tx_buf,
nt35510_cmd_display_on_cmds,
ARRAY_SIZE(nt35510_cmd_display_on_cmds));
if (rotate) {
- mipi_dsi_cmds_tx(mfd, &nt35510_tx_buf,
+ mipi_dsi_cmds_tx(&nt35510_tx_buf,
nt35510_cmd_display_on_cmds_rotate,
ARRAY_SIZE(nt35510_cmd_display_on_cmds_rotate));
}
@@ -523,7 +528,7 @@
if (mfd->key != MFD_KEY)
return -EINVAL;
- mipi_dsi_cmds_tx(mfd, &nt35510_tx_buf, nt35510_display_off_cmds,
+ mipi_dsi_cmds_tx(&nt35510_tx_buf, nt35510_display_off_cmds,
ARRAY_SIZE(nt35510_display_off_cmds));
pr_debug("mipi_nt35510_lcd_off X\n");
diff --git a/drivers/video/msm/mipi_dsi.c b/drivers/video/msm/mipi_dsi.c
index ff8fd17..b4fb930 100644
--- a/drivers/video/msm/mipi_dsi.c
+++ b/drivers/video/msm/mipi_dsi.c
@@ -80,25 +80,6 @@
mdp4_overlay_dsi_state_set(ST_DSI_SUSPEND);
/*
- * Description: dsi clock is need to perform shutdown.
- * mdp4_dsi_cmd_dma_busy_wait() will enable dsi clock if disabled.
- * also, wait until dma (overlay and dmap) finish.
- */
- if (mfd->panel_info.type == MIPI_CMD_PANEL) {
- if (mdp_rev >= MDP_REV_41) {
- mdp4_dsi_cmd_del_timer();
- mdp4_dsi_cmd_dma_busy_wait(mfd);
- mdp4_dsi_blt_dmap_busy_wait(mfd);
- mipi_dsi_mdp_busy_wait(mfd);
- } else {
- mdp3_dsi_cmd_dma_busy_wait(mfd);
- }
- } else {
- /* video mode, wait until fifo cleaned */
- mipi_dsi_controller_cfg(0);
- }
-
- /*
* Desctiption: change to DSI_CMD_MODE since it needed to
* tx DCS dsiplay off comamnd to panel
*/
diff --git a/drivers/video/msm/mipi_dsi.h b/drivers/video/msm/mipi_dsi.h
index 2bc49c0..ebbf362 100644
--- a/drivers/video/msm/mipi_dsi.h
+++ b/drivers/video/msm/mipi_dsi.h
@@ -264,8 +264,7 @@
void mipi_dsi_bist_ctrl(void);
int mipi_dsi_buf_alloc(struct dsi_buf *, int size);
int mipi_dsi_cmd_dma_add(struct dsi_buf *dp, struct dsi_cmd_desc *cm);
-int mipi_dsi_cmds_tx(struct msm_fb_data_type *mfd,
- struct dsi_buf *dp, struct dsi_cmd_desc *cmds, int cnt);
+int mipi_dsi_cmds_tx(struct dsi_buf *dp, struct dsi_cmd_desc *cmds, int cnt);
int mipi_dsi_cmd_dma_tx(struct dsi_buf *dp);
int mipi_dsi_cmd_reg_tx(uint32 data);
@@ -278,10 +277,14 @@
void mipi_dsi_cmd_mode_ctrl(int enable);
void mdp4_dsi_cmd_trigger(void);
void mipi_dsi_cmd_mdp_start(void);
+int mipi_dsi_ctrl_lock(int mdp);
+int mipi_dsi_ctrl_lock_query(void);
void mipi_dsi_cmd_bta_sw_trigger(void);
void mipi_dsi_ack_err_status(void);
void mipi_dsi_set_tear_on(struct msm_fb_data_type *mfd);
void mipi_dsi_set_tear_off(struct msm_fb_data_type *mfd);
+void mipi_dsi_set_backlight(struct msm_fb_data_type *mfd, int level);
+void mipi_dsi_cmd_backlight_tx(int level);
void mipi_dsi_clk_enable(void);
void mipi_dsi_clk_disable(void);
void mipi_dsi_pre_kickoff_action(void);
@@ -310,6 +313,7 @@
void cont_splash_clk_ctrl(int enable);
void mipi_dsi_turn_on_clks(void);
void mipi_dsi_turn_off_clks(void);
+void mipi_dsi_clk_cfg(int on);
#ifdef CONFIG_FB_MSM_MDP303
void update_lane_config(struct msm_panel_info *pinfo);
diff --git a/drivers/video/msm/mipi_dsi_host.c b/drivers/video/msm/mipi_dsi_host.c
index 7f1a435..4afffb0 100644
--- a/drivers/video/msm/mipi_dsi_host.c
+++ b/drivers/video/msm/mipi_dsi_host.c
@@ -46,6 +46,7 @@
static spinlock_t dsi_irq_lock;
static spinlock_t dsi_mdp_lock;
spinlock_t dsi_clk_lock;
+static int dsi_ctrl_lock;
static int dsi_mdp_busy;
static struct list_head pre_kickoff_list;
@@ -146,6 +147,30 @@
spin_unlock(&dsi_irq_lock);
}
+void mipi_dsi_clk_cfg(int on)
+{
+ unsigned long flags;
+ static int dsi_clk_cnt;
+
+ spin_lock_irqsave(&mdp_spin_lock, flags);
+ if (on) {
+ if (dsi_clk_cnt == 0) {
+ mipi_dsi_ahb_ctrl(1);
+ mipi_dsi_clk_enable();
+ }
+ dsi_clk_cnt++;
+ } else {
+ if (dsi_clk_cnt) {
+ dsi_clk_cnt--;
+ if (dsi_clk_cnt == 0) {
+ mipi_dsi_clk_disable();
+ mipi_dsi_ahb_ctrl(0);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&mdp_spin_lock, flags);
+}
+
void mipi_dsi_turn_on_clks(void)
{
mipi_dsi_ahb_ctrl(1);
@@ -982,6 +1007,27 @@
wmb();
}
+int mipi_dsi_ctrl_lock(int mdp)
+{
+ unsigned long flag;
+ int lock = 0;
+
+ spin_lock_irqsave(&dsi_mdp_lock, flag);
+ if (dsi_ctrl_lock == FALSE) {
+ dsi_ctrl_lock = TRUE;
+ lock = 1;
+ if (lock && mdp) /* mdp pixel */
+ mipi_dsi_enable_irq();
+ }
+ spin_unlock_irqrestore(&dsi_mdp_lock, flag);
+ return lock;
+}
+
+int mipi_dsi_ctrl_lock_query()
+{
+ return dsi_ctrl_lock;
+}
+
void mipi_dsi_mdp_busy_wait(struct msm_fb_data_type *mfd)
{
unsigned long flag;
@@ -1011,19 +1057,14 @@
{
unsigned long flag;
-
- if (!in_interrupt())
- mipi_dsi_pre_kickoff_action();
-
mipi_dsi_mdp_stat_inc(STAT_DSI_START);
spin_lock_irqsave(&dsi_mdp_lock, flag);
- mipi_dsi_enable_irq();
- dsi_mdp_busy = TRUE;
+ mipi_dsi_enable_irq();
+ dsi_mdp_busy = TRUE;
spin_unlock_irqrestore(&dsi_mdp_lock, flag);
}
-
void mipi_dsi_cmd_bta_sw_trigger(void)
{
uint32 data;
@@ -1055,13 +1096,13 @@
void mipi_dsi_set_tear_on(struct msm_fb_data_type *mfd)
{
mipi_dsi_buf_init(&dsi_tx_buf);
- mipi_dsi_cmds_tx(mfd, &dsi_tx_buf, &dsi_tear_on_cmd, 1);
+ mipi_dsi_cmds_tx(&dsi_tx_buf, &dsi_tear_on_cmd, 1);
}
void mipi_dsi_set_tear_off(struct msm_fb_data_type *mfd)
{
mipi_dsi_buf_init(&dsi_tx_buf);
- mipi_dsi_cmds_tx(mfd, &dsi_tx_buf, &dsi_tear_off_cmd, 1);
+ mipi_dsi_cmds_tx(&dsi_tx_buf, &dsi_tear_off_cmd, 1);
}
int mipi_dsi_cmd_reg_tx(uint32 data)
@@ -1093,12 +1134,64 @@
return 4;
}
+static char led_pwm1[2] = {0x51, 0x0}; /* DTYPE_DCS_WRITE1 */
+
+static struct dsi_cmd_desc backlight_cmd = {
+ DTYPE_DCS_LWRITE, 1, 0, 0, 1, sizeof(led_pwm1), led_pwm1};
+
+/*
+ * mipi_dsi_cmd_backlight_tx:
+ * thread context only
+ */
+void mipi_dsi_cmd_backlight_tx(int level)
+{
+ struct dsi_buf *tp;
+ struct dsi_cmd_desc *cmd;
+ unsigned long flag;
+
+ spin_lock_irqsave(&dsi_mdp_lock, flag);
+ dsi_mdp_busy = TRUE;
+ spin_unlock_irqrestore(&dsi_mdp_lock, flag);
+
+ led_pwm1[1] = (unsigned char)(level);
+ tp = &dsi_tx_buf;
+ cmd = &backlight_cmd;
+ mipi_dsi_buf_init(&dsi_tx_buf);
+
+
+
+ if (tp->dmap) {
+ dma_unmap_single(&dsi_dev, tp->dmap, tp->len, DMA_TO_DEVICE);
+ tp->dmap = 0;
+ }
+
+ mipi_dsi_enable_irq();
+ mipi_dsi_cmd_dma_add(tp, cmd);
+
+ tp->len += 3;
+ tp->len &= ~0x03; /* multipled by 4 */
+
+ tp->dmap = dma_map_single(&dsi_dev, tp->data, tp->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&dsi_dev, tp->dmap))
+ pr_err("%s: dmap mapp failed\n", __func__);
+
+ MIPI_OUTP(MIPI_DSI_BASE + 0x044, tp->dmap);
+ MIPI_OUTP(MIPI_DSI_BASE + 0x048, tp->len);
+ wmb();
+ MIPI_OUTP(MIPI_DSI_BASE + 0x08c, 0x01); /* trigger */
+ wmb();
+
+ spin_lock_irqsave(&dsi_mdp_lock, flag);
+ dsi_mdp_busy = FALSE;
+ complete(&dsi_mdp_comp);
+ spin_unlock_irqrestore(&dsi_mdp_lock, flag);
+}
+
/*
* mipi_dsi_cmds_tx:
- * ov_mutex need to be acquired before call this function.
+ * thread context only
*/
-int mipi_dsi_cmds_tx(struct msm_fb_data_type *mfd,
- struct dsi_buf *tp, struct dsi_cmd_desc *cmds, int cnt)
+int mipi_dsi_cmds_tx(struct dsi_buf *tp, struct dsi_cmd_desc *cmds, int cnt)
{
struct dsi_cmd_desc *cm;
uint32 dsi_ctrl, ctrl;
@@ -1115,29 +1208,16 @@
if (video_mode) {
ctrl = dsi_ctrl | 0x04; /* CMD_MODE_EN */
MIPI_OUTP(MIPI_DSI_BASE + 0x0000, ctrl);
- } else { /* cmd mode */
- /*
- * during boot up, cmd mode is configured
- * even it is video mode panel.
- */
- /* make sure mdp dma is not txing pixel data */
- if (mfd->panel_info.type == MIPI_CMD_PANEL) {
-#ifndef CONFIG_FB_MSM_MDP303
- mdp4_dsi_cmd_dma_busy_wait(mfd);
-#else
- mdp3_dsi_cmd_dma_busy_wait(mfd);
-#endif
- }
}
spin_lock_irqsave(&dsi_mdp_lock, flag);
- mipi_dsi_enable_irq();
dsi_mdp_busy = TRUE;
spin_unlock_irqrestore(&dsi_mdp_lock, flag);
cm = cmds;
mipi_dsi_buf_init(tp);
for (i = 0; i < cnt; i++) {
+ mipi_dsi_enable_irq();
mipi_dsi_buf_init(tp);
mipi_dsi_cmd_dma_add(tp, cm);
mipi_dsi_cmd_dma_tx(tp);
@@ -1146,15 +1226,14 @@
cm++;
}
- spin_lock_irqsave(&dsi_mdp_lock, flag);
- dsi_mdp_busy = FALSE;
- mipi_dsi_disable_irq();
- complete(&dsi_mdp_comp);
- spin_unlock_irqrestore(&dsi_mdp_lock, flag);
-
if (video_mode)
MIPI_OUTP(MIPI_DSI_BASE + 0x0000, dsi_ctrl); /* restore */
+ spin_lock_irqsave(&dsi_mdp_lock, flag);
+ dsi_mdp_busy = FALSE;
+ complete(&dsi_mdp_comp);
+ spin_unlock_irqrestore(&dsi_mdp_lock, flag);
+
return cnt;
}
@@ -1183,8 +1262,8 @@
struct dsi_cmd_desc *cmds, int rlen)
{
int cnt, len, diff, pkt_size;
- unsigned long flag;
char cmd;
+ unsigned long flag;
if (mfd->panel_info.mipi.no_max_pkt_size) {
/* Only support rlen = 4*n */
@@ -1216,15 +1295,12 @@
if (mfd->panel_info.type == MIPI_CMD_PANEL) {
/* make sure mdp dma is not txing pixel data */
-#ifndef CONFIG_FB_MSM_MDP303
- mdp4_dsi_cmd_dma_busy_wait(mfd);
-#else
+#ifdef CONFIG_FB_MSM_MDP303
mdp3_dsi_cmd_dma_busy_wait(mfd);
#endif
}
spin_lock_irqsave(&dsi_mdp_lock, flag);
- mipi_dsi_enable_irq();
dsi_mdp_busy = TRUE;
spin_unlock_irqrestore(&dsi_mdp_lock, flag);
@@ -1232,16 +1308,20 @@
/* packet size need to be set at every read */
pkt_size = len;
max_pktsize[0] = pkt_size;
+ mipi_dsi_enable_irq();
mipi_dsi_buf_init(tp);
mipi_dsi_cmd_dma_add(tp, pkt_size_cmd);
mipi_dsi_cmd_dma_tx(tp);
}
+ mipi_dsi_enable_irq();
mipi_dsi_buf_init(tp);
mipi_dsi_cmd_dma_add(tp, cmds);
/* transmit read comamnd to client */
mipi_dsi_cmd_dma_tx(tp);
+
+ mipi_dsi_disable_irq();
/*
* once cmd_dma_done interrupt received,
* return data from client is ready and stored
@@ -1260,7 +1340,6 @@
spin_lock_irqsave(&dsi_mdp_lock, flag);
dsi_mdp_busy = FALSE;
- mipi_dsi_disable_irq();
complete(&dsi_mdp_comp);
spin_unlock_irqrestore(&dsi_mdp_lock, flag);
@@ -1303,7 +1382,6 @@
int mipi_dsi_cmd_dma_tx(struct dsi_buf *tp)
{
- int len;
#ifdef DSI_HOST_DEBUG
int i;
@@ -1318,25 +1396,24 @@
pr_debug("\n");
#endif
- len = tp->len;
- len += 3;
- len &= ~0x03; /* multipled by 4 */
+ tp->len += 3;
+ tp->len &= ~0x03; /* multipled by 4 */
- tp->dmap = dma_map_single(&dsi_dev, tp->data, len, DMA_TO_DEVICE);
+ tp->dmap = dma_map_single(&dsi_dev, tp->data, tp->len, DMA_TO_DEVICE);
if (dma_mapping_error(&dsi_dev, tp->dmap))
pr_err("%s: dmap mapp failed\n", __func__);
INIT_COMPLETION(dsi_dma_comp);
MIPI_OUTP(MIPI_DSI_BASE + 0x044, tp->dmap);
- MIPI_OUTP(MIPI_DSI_BASE + 0x048, len);
+ MIPI_OUTP(MIPI_DSI_BASE + 0x048, tp->len);
wmb();
MIPI_OUTP(MIPI_DSI_BASE + 0x08c, 0x01); /* trigger */
wmb();
wait_for_completion(&dsi_dma_comp);
- dma_unmap_single(&dsi_dev, tp->dmap, len, DMA_TO_DEVICE);
+ dma_unmap_single(&dsi_dev, tp->dmap, tp->len, DMA_TO_DEVICE);
tp->dmap = 0;
return tp->len;
}
@@ -1459,7 +1536,6 @@
#ifdef CONFIG_FB_MSM_MDP40
mdp4_stat.intr_dsi++;
#endif
-
if (isr & DSI_INTR_ERROR) {
mipi_dsi_mdp_stat_inc(STAT_DSI_ERROR);
mipi_dsi_error();
@@ -1474,16 +1550,20 @@
if (isr & DSI_INTR_CMD_DMA_DONE) {
mipi_dsi_mdp_stat_inc(STAT_DSI_CMD);
complete(&dsi_dma_comp);
+ spin_lock(&dsi_mdp_lock);
+ dsi_ctrl_lock = FALSE;
+ mipi_dsi_disable_irq_nosync();
+ spin_unlock(&dsi_mdp_lock);
}
if (isr & DSI_INTR_CMD_MDP_DONE) {
mipi_dsi_mdp_stat_inc(STAT_DSI_MDP);
spin_lock(&dsi_mdp_lock);
- dsi_mdp_busy = FALSE;
+ dsi_ctrl_lock = FALSE;
mipi_dsi_disable_irq_nosync();
- spin_unlock(&dsi_mdp_lock);
+ dsi_mdp_busy = FALSE;
complete(&dsi_mdp_comp);
- mipi_dsi_post_kickoff_action();
+ spin_unlock(&dsi_mdp_lock);
}
diff --git a/drivers/video/msm/mipi_novatek.c b/drivers/video/msm/mipi_novatek.c
index 0070757..7dd41d2 100644
--- a/drivers/video/msm/mipi_novatek.c
+++ b/drivers/video/msm/mipi_novatek.c
@@ -243,14 +243,9 @@
0x2B, 0x00, 0x00, 0x03, 0xBF}; /* 960 - 1 */
#endif
-static char led_pwm1[2] = {0x51, 0x0}; /* DTYPE_DCS_WRITE1 */
static char led_pwm2[2] = {0x53, 0x24}; /* DTYPE_DCS_WRITE1 */
static char led_pwm3[2] = {0x55, 0x00}; /* DTYPE_DCS_WRITE1 */
-static struct dsi_cmd_desc novatek_cmd_backlight_cmds[] = {
- {DTYPE_DCS_LWRITE, 1, 0, 0, 1, sizeof(led_pwm1), led_pwm1},
-};
-
static struct dsi_cmd_desc novatek_video_on_cmds[] = {
{DTYPE_DCS_WRITE, 1, 0, 0, 50,
sizeof(sw_reset), sw_reset},
@@ -318,7 +313,7 @@
cmd = &novatek_manufacture_id_cmd;
mipi_dsi_cmds_rx(mfd, tp, rp, cmd, 3);
lp = (uint32 *)rp->data;
- pr_info("%s: manufacture_id=%x", __func__, *lp);
+ pr_info("%s: manufacture_id=%x\n", __func__, *lp);
return *lp;
}
@@ -398,16 +393,18 @@
mipi = &mfd->panel_info.mipi;
- if (mipi->mode == DSI_VIDEO_MODE) {
- mipi_dsi_cmds_tx(mfd, &novatek_tx_buf, novatek_video_on_cmds,
- ARRAY_SIZE(novatek_video_on_cmds));
- } else {
- mipi_dsi_cmds_tx(mfd, &novatek_tx_buf, novatek_cmd_on_cmds,
- ARRAY_SIZE(novatek_cmd_on_cmds));
+ if (mipi_dsi_ctrl_lock(0)) {
+ if (mipi->mode == DSI_VIDEO_MODE) {
+ mipi_dsi_cmds_tx(&novatek_tx_buf, novatek_video_on_cmds,
+ ARRAY_SIZE(novatek_video_on_cmds));
+ } else {
+ mipi_dsi_cmds_tx(&novatek_tx_buf, novatek_cmd_on_cmds,
+ ARRAY_SIZE(novatek_cmd_on_cmds));
- mipi_dsi_cmd_bta_sw_trigger(); /* clean up ack_err_status */
-
- mipi_novatek_manufacture_id(mfd);
+ /* clean up ack_err_status */
+ mipi_dsi_cmd_bta_sw_trigger();
+ mipi_novatek_manufacture_id(mfd);
+ }
}
return 0;
@@ -424,14 +421,23 @@
if (mfd->key != MFD_KEY)
return -EINVAL;
- mipi_dsi_cmds_tx(mfd, &novatek_tx_buf, novatek_display_off_cmds,
+ if (mipi_dsi_ctrl_lock(0)) {
+ mipi_dsi_cmds_tx(&novatek_tx_buf, novatek_display_off_cmds,
ARRAY_SIZE(novatek_display_off_cmds));
+ }
return 0;
}
DEFINE_LED_TRIGGER(bkl_led_trigger);
+#ifdef CONFIG_FB_MSM_MDP303
+void mdp4_backlight_put_level(int cndx, int level)
+{
+ /* do nothing */
+}
+#endif
+
static void mipi_novatek_set_backlight(struct msm_fb_data_type *mfd)
{
struct mipi_panel_info *mipi;
@@ -443,21 +449,7 @@
}
mipi = &mfd->panel_info.mipi;
- mutex_lock(&mfd->dma->ov_mutex);
- if (mdp4_overlay_dsi_state_get() <= ST_DSI_SUSPEND) {
- mutex_unlock(&mfd->dma->ov_mutex);
- return;
- }
- /* mdp4_dsi_cmd_busy_wait: will turn on dsi clock also */
- mdp4_dsi_cmd_dma_busy_wait(mfd);
- mdp4_dsi_blt_dmap_busy_wait(mfd);
- mipi_dsi_mdp_busy_wait(mfd);
-
- led_pwm1[1] = (unsigned char)(mfd->bl_level);
- mipi_dsi_cmds_tx(mfd, &novatek_tx_buf, novatek_cmd_backlight_cmds,
- ARRAY_SIZE(novatek_cmd_backlight_cmds));
- mutex_unlock(&mfd->dma->ov_mutex);
- return;
+ mdp4_backlight_put_level(0, mfd->bl_level);
}
static int mipi_dsi_3d_barrier_sysfs_register(struct device *dev);
diff --git a/drivers/video/msm/mipi_orise.c b/drivers/video/msm/mipi_orise.c
index 2afbb9b..d1d6956 100644
--- a/drivers/video/msm/mipi_orise.c
+++ b/drivers/video/msm/mipi_orise.c
@@ -64,10 +64,10 @@
mipi = &mfd->panel_info.mipi;
if (mipi->mode == DSI_VIDEO_MODE) {
- mipi_dsi_cmds_tx(mfd, &orise_tx_buf, orise_video_on_cmds,
+ mipi_dsi_cmds_tx(&orise_tx_buf, orise_video_on_cmds,
ARRAY_SIZE(orise_video_on_cmds));
} else {
- mipi_dsi_cmds_tx(mfd, &orise_tx_buf, orise_cmd_on_cmds,
+ mipi_dsi_cmds_tx(&orise_tx_buf, orise_cmd_on_cmds,
ARRAY_SIZE(orise_cmd_on_cmds));
mipi_dsi_cmd_bta_sw_trigger(); /* clean up ack_err_status */
@@ -87,7 +87,7 @@
if (mfd->key != MFD_KEY)
return -EINVAL;
- mipi_dsi_cmds_tx(mfd, &orise_tx_buf, orise_display_off_cmds,
+ mipi_dsi_cmds_tx(&orise_tx_buf, orise_display_off_cmds,
ARRAY_SIZE(orise_display_off_cmds));
return 0;
diff --git a/drivers/video/msm/mipi_renesas.c b/drivers/video/msm/mipi_renesas.c
index c9dc8255..c842672 100644
--- a/drivers/video/msm/mipi_renesas.c
+++ b/drivers/video/msm/mipi_renesas.c
@@ -1131,23 +1131,23 @@
if (mfd->key != MFD_KEY)
return -EINVAL;
- mipi_dsi_cmds_tx(mfd, &renesas_tx_buf, renesas_sleep_off_cmds,
+ mipi_dsi_cmds_tx(&renesas_tx_buf, renesas_sleep_off_cmds,
ARRAY_SIZE(renesas_sleep_off_cmds));
mipi_set_tx_power_mode(1);
- mipi_dsi_cmds_tx(mfd, &renesas_tx_buf, renesas_display_on_cmds,
+ mipi_dsi_cmds_tx(&renesas_tx_buf, renesas_display_on_cmds,
ARRAY_SIZE(renesas_display_on_cmds));
if (cpu_is_msm7x25a() || cpu_is_msm7x25aa() || cpu_is_msm7x25ab()) {
- mipi_dsi_cmds_tx(mfd, &renesas_tx_buf, renesas_hvga_on_cmds,
+ mipi_dsi_cmds_tx(&renesas_tx_buf, renesas_hvga_on_cmds,
ARRAY_SIZE(renesas_hvga_on_cmds));
}
if (mipi->mode == DSI_VIDEO_MODE)
- mipi_dsi_cmds_tx(mfd, &renesas_tx_buf, renesas_video_on_cmds,
+ mipi_dsi_cmds_tx(&renesas_tx_buf, renesas_video_on_cmds,
ARRAY_SIZE(renesas_video_on_cmds));
else
- mipi_dsi_cmds_tx(mfd, &renesas_tx_buf, renesas_cmd_on_cmds,
+ mipi_dsi_cmds_tx(&renesas_tx_buf, renesas_cmd_on_cmds,
ARRAY_SIZE(renesas_cmd_on_cmds));
mipi_set_tx_power_mode(0);
@@ -1165,7 +1165,7 @@
if (mfd->key != MFD_KEY)
return -EINVAL;
- mipi_dsi_cmds_tx(mfd, &renesas_tx_buf, renesas_display_off_cmds,
+ mipi_dsi_cmds_tx(&renesas_tx_buf, renesas_display_off_cmds,
ARRAY_SIZE(renesas_display_off_cmds));
return 0;
diff --git a/drivers/video/msm/mipi_simulator.c b/drivers/video/msm/mipi_simulator.c
index c6bf534..c751472 100644
--- a/drivers/video/msm/mipi_simulator.c
+++ b/drivers/video/msm/mipi_simulator.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -49,7 +49,7 @@
mipi->mode);
if (mipi->mode == DSI_VIDEO_MODE) {
- mipi_dsi_cmds_tx(mfd, &simulator_tx_buf, display_on_cmds,
+ mipi_dsi_cmds_tx(&simulator_tx_buf, display_on_cmds,
ARRAY_SIZE(display_on_cmds));
} else {
pr_err("%s:%d, CMD MODE NOT SUPPORTED", __func__, __LINE__);
@@ -75,7 +75,7 @@
pr_debug("%s:%d, debug info", __func__, __LINE__);
if (mipi->mode == DSI_VIDEO_MODE) {
- mipi_dsi_cmds_tx(mfd, &simulator_tx_buf, display_off_cmds,
+ mipi_dsi_cmds_tx(&simulator_tx_buf, display_off_cmds,
ARRAY_SIZE(display_off_cmds));
} else {
pr_debug("%s:%d, DONT REACH HERE", __func__, __LINE__);
diff --git a/drivers/video/msm/mipi_tc358764_dsi2lvds.c b/drivers/video/msm/mipi_tc358764_dsi2lvds.c
index 2e65c34..1583168 100644
--- a/drivers/video/msm/mipi_tc358764_dsi2lvds.c
+++ b/drivers/video/msm/mipi_tc358764_dsi2lvds.c
@@ -281,8 +281,8 @@
payload.addr = reg;
payload.data = data;
- /* mutex had been acquired at mipi_dsi_on */
- mipi_dsi_cmds_tx(mfd, &d2l_tx_buf, &cmd_write_reg, 1);
+ /* mutex had been acquried at dsi_on */
+ mipi_dsi_cmds_tx(&d2l_tx_buf, &cmd_write_reg, 1);
pr_debug("%s: reg=0x%x. data=0x%x.\n", __func__, reg, data);
diff --git a/drivers/video/msm/mipi_toshiba.c b/drivers/video/msm/mipi_toshiba.c
index aeaa5aa..520c67b 100644
--- a/drivers/video/msm/mipi_toshiba.c
+++ b/drivers/video/msm/mipi_toshiba.c
@@ -193,12 +193,12 @@
return -EINVAL;
if (TM_GET_PID(mfd->panel.id) == MIPI_DSI_PANEL_WVGA_PT)
- mipi_dsi_cmds_tx(mfd, &toshiba_tx_buf,
+ mipi_dsi_cmds_tx(&toshiba_tx_buf,
toshiba_wvga_display_on_cmds,
ARRAY_SIZE(toshiba_wvga_display_on_cmds));
else if (TM_GET_PID(mfd->panel.id) == MIPI_DSI_PANEL_WSVGA_PT ||
TM_GET_PID(mfd->panel.id) == MIPI_DSI_PANEL_WUXGA)
- mipi_dsi_cmds_tx(mfd, &toshiba_tx_buf,
+ mipi_dsi_cmds_tx(&toshiba_tx_buf,
toshiba_wsvga_display_on_cmds,
ARRAY_SIZE(toshiba_wsvga_display_on_cmds));
else
@@ -218,7 +218,7 @@
if (mfd->key != MFD_KEY)
return -EINVAL;
- mipi_dsi_cmds_tx(mfd, &toshiba_tx_buf, toshiba_display_off_cmds,
+ mipi_dsi_cmds_tx(&toshiba_tx_buf, toshiba_display_off_cmds,
ARRAY_SIZE(toshiba_display_off_cmds));
return 0;
diff --git a/drivers/video/msm/mipi_truly.c b/drivers/video/msm/mipi_truly.c
index a2060f0..fd2a3ea 100644
--- a/drivers/video/msm/mipi_truly.c
+++ b/drivers/video/msm/mipi_truly.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -116,7 +116,7 @@
return -EINVAL;
msleep(20);
- mipi_dsi_cmds_tx(mfd, &truly_tx_buf, truly_display_on_cmds,
+ mipi_dsi_cmds_tx(&truly_tx_buf, truly_display_on_cmds,
ARRAY_SIZE(truly_display_on_cmds));
return 0;
@@ -133,7 +133,7 @@
if (mfd->key != MFD_KEY)
return -EINVAL;
- mipi_dsi_cmds_tx(mfd, &truly_tx_buf, truly_display_off_cmds,
+ mipi_dsi_cmds_tx(&truly_tx_buf, truly_display_off_cmds,
ARRAY_SIZE(truly_display_off_cmds));
return 0;
diff --git a/drivers/video/msm/mipi_truly_tft540960_1_e.c b/drivers/video/msm/mipi_truly_tft540960_1_e.c
index 98b24b1..50db66e 100644
--- a/drivers/video/msm/mipi_truly_tft540960_1_e.c
+++ b/drivers/video/msm/mipi_truly_tft540960_1_e.c
@@ -693,11 +693,11 @@
msleep(120);
if (mipi->mode == DSI_VIDEO_MODE) {
- mipi_dsi_cmds_tx(mfd, &truly_tx_buf,
+ mipi_dsi_cmds_tx(&truly_tx_buf,
truly_video_display_on_cmds,
ARRAY_SIZE(truly_video_display_on_cmds));
} else if (mipi->mode == DSI_CMD_MODE) {
- mipi_dsi_cmds_tx(mfd, &truly_tx_buf,
+ mipi_dsi_cmds_tx(&truly_tx_buf,
truly_cmd_display_on_cmds,
ARRAY_SIZE(truly_cmd_display_on_cmds));
}
@@ -716,7 +716,7 @@
if (mfd->key != MFD_KEY)
return -EINVAL;
- mipi_dsi_cmds_tx(mfd, &truly_tx_buf, truly_display_off_cmds,
+ mipi_dsi_cmds_tx(&truly_tx_buf, truly_display_off_cmds,
ARRAY_SIZE(truly_display_off_cmds));
return 0;
diff --git a/drivers/video/msm/msm_dss_io_7x27a.c b/drivers/video/msm/msm_dss_io_7x27a.c
index 17ee976..18e8ac5 100644
--- a/drivers/video/msm/msm_dss_io_7x27a.c
+++ b/drivers/video/msm/msm_dss_io_7x27a.c
@@ -317,6 +317,24 @@
void cont_splash_clk_ctrl(int enable)
{
+ static int cont_splash_clks_enabled;
+ if (enable && !cont_splash_clks_enabled) {
+ clk_prepare_enable(dsi_ref_clk);
+ clk_prepare_enable(mdp_dsi_pclk);
+ clk_prepare_enable(dsi_byte_div_clk);
+ clk_prepare_enable(dsi_esc_clk);
+ clk_prepare_enable(dsi_pixel_clk);
+ clk_prepare_enable(dsi_clk);
+ cont_splash_clks_enabled = 1;
+ } else if (!enable && cont_splash_clks_enabled) {
+ clk_disable_unprepare(dsi_clk);
+ clk_disable_unprepare(dsi_pixel_clk);
+ clk_disable_unprepare(dsi_esc_clk);
+ clk_disable_unprepare(dsi_byte_div_clk);
+ clk_disable_unprepare(mdp_dsi_pclk);
+ clk_disable_unprepare(dsi_ref_clk);
+ cont_splash_clks_enabled = 0;
+ }
}
void mipi_dsi_prepare_clocks(void)
diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c
index 5f58af7..72e3600 100644
--- a/drivers/video/msm/msm_fb.c
+++ b/drivers/video/msm/msm_fb.c
@@ -193,6 +193,7 @@
static struct msm_fb_platform_data *msm_fb_pdata;
unsigned char hdmi_prim_display;
+unsigned char hdmi_prim_resolution;
int msm_fb_detect_client(const char *name)
{
@@ -213,6 +214,8 @@
if (!strncmp((char *)msm_fb_pdata->prim_panel_name,
"hdmi_msm", len))
hdmi_prim_display = 1;
+ hdmi_prim_resolution =
+ msm_fb_pdata->ext_resolution;
return 0;
} else {
ret = -EPERM;
@@ -639,6 +642,12 @@
struct msm_fb_panel_data *pdata = NULL;
int ret = 0;
+ if (hdmi_prim_display) {
+ MSM_FB_INFO("%s: hdmi primary handles early suspend only\n",
+ __func__);
+ return 0;
+ }
+
if ((!mfd) || (mfd->key != MFD_KEY))
return 0;
@@ -664,6 +673,12 @@
struct msm_fb_panel_data *pdata = NULL;
int ret = 0;
+ if (hdmi_prim_display) {
+ MSM_FB_INFO("%s: hdmi primary handles early resume only\n",
+ __func__);
+ return 0;
+ }
+
if ((!mfd) || (mfd->key != MFD_KEY))
return 0;
@@ -688,7 +703,8 @@
.runtime_suspend = msm_fb_runtime_suspend,
.runtime_resume = msm_fb_runtime_resume,
.runtime_idle = msm_fb_runtime_idle,
-#if (defined(CONFIG_SUSPEND) && defined(CONFIG_FB_MSM_HDMI_MSM_PANEL))
+#if (defined(CONFIG_SUSPEND) && defined(CONFIG_FB_MSM_HDMI_MSM_PANEL) && \
+ !defined(CONFIG_FB_MSM_HDMI_AS_PRIMARY))
.suspend = msm_fb_ext_suspend,
.resume = msm_fb_ext_resume,
#endif
@@ -722,7 +738,9 @@
static void msmfb_early_suspend(struct early_suspend *h)
{
struct msm_fb_data_type *mfd = container_of(h, struct msm_fb_data_type,
- early_suspend);
+ early_suspend);
+ struct msm_fb_panel_data *pdata = NULL;
+
#if defined(CONFIG_FB_MSM_MDP303)
/*
* For MDP with overlay, set framebuffer with black pixels
@@ -740,12 +758,37 @@
}
#endif
msm_fb_suspend_sub(mfd);
+
+ pdata = (struct msm_fb_panel_data *)mfd->pdev->dev.platform_data;
+ if (hdmi_prim_display &&
+ (mfd->panel_info.type == HDMI_PANEL ||
+ mfd->panel_info.type == DTV_PANEL)) {
+ /* Turn off the HPD circuitry */
+ if (pdata->power_ctrl) {
+ MSM_FB_INFO("%s: Turning off HPD circuitry\n",
+ __func__);
+ pdata->power_ctrl(FALSE);
+ }
+ }
}
static void msmfb_early_resume(struct early_suspend *h)
{
struct msm_fb_data_type *mfd = container_of(h, struct msm_fb_data_type,
- early_suspend);
+ early_suspend);
+ struct msm_fb_panel_data *pdata = NULL;
+
+ pdata = (struct msm_fb_panel_data *)mfd->pdev->dev.platform_data;
+ if (hdmi_prim_display &&
+ (mfd->panel_info.type == HDMI_PANEL ||
+ mfd->panel_info.type == DTV_PANEL)) {
+ /* Turn on the HPD circuitry */
+ if (pdata->power_ctrl) {
+ MSM_FB_INFO("%s: Turning on HPD circuitry\n", __func__);
+ pdata->power_ctrl(TRUE);
+ }
+ }
+
msm_fb_resume_sub(mfd);
}
#endif
@@ -1267,14 +1310,14 @@
var->bits_per_pixel = bpp * 8; /* FrameBuffer color depth */
if (mfd->dest == DISPLAY_LCD) {
if (panel_info->type == MDDI_PANEL && panel_info->mddi.is_type1)
- var->reserved[3] = panel_info->lcd.refx100 / (100 * 2);
+ var->reserved[4] = panel_info->lcd.refx100 / (100 * 2);
else
- var->reserved[3] = panel_info->lcd.refx100 / 100;
+ var->reserved[4] = panel_info->lcd.refx100 / 100;
} else {
if (panel_info->type == MIPI_VIDEO_PANEL) {
- var->reserved[3] = panel_info->mipi.frame_rate;
+ var->reserved[4] = panel_info->mipi.frame_rate;
} else {
- var->reserved[3] = panel_info->clk_rate /
+ var->reserved[4] = panel_info->clk_rate /
((panel_info->lcdc.h_back_porch +
panel_info->lcdc.h_front_porch +
panel_info->lcdc.h_pulse_width +
@@ -1285,7 +1328,7 @@
panel_info->yres));
}
}
- pr_debug("reserved[3] %u\n", var->reserved[3]);
+ pr_debug("reserved[4] %u\n", var->reserved[4]);
/*
* id field for fb app
@@ -1364,6 +1407,14 @@
fbi->fix.smem_start = (unsigned long)fbram_phys;
msm_iommu_map_contig_buffer(fbi->fix.smem_start,
+ DISPLAY_WRITE_DOMAIN,
+ GEN_POOL,
+ fbi->fix.smem_len,
+ SZ_4K,
+ 0,
+ &(mfd->display_iova));
+
+ msm_iommu_map_contig_buffer(fbi->fix.smem_start,
DISPLAY_READ_DOMAIN,
GEN_POOL,
fbi->fix.smem_len,
@@ -1432,7 +1483,7 @@
ret = 0;
#ifdef CONFIG_HAS_EARLYSUSPEND
- if (mfd->panel_info.type != DTV_PANEL) {
+ if (hdmi_prim_display || mfd->panel_info.type != DTV_PANEL) {
mfd->early_suspend.suspend = msmfb_early_suspend;
mfd->early_suspend.resume = msmfb_early_resume;
mfd->early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB - 2;
@@ -1641,10 +1692,9 @@
struct msm_fb_panel_data *pdata;
/*
- * If framebuffer is 1 or 2, io pen display is not allowed.
+ * If framebuffer is 2, io pen display is not allowed.
*/
- if (bf_supported &&
- (info->node == 1 || info->node == 2)) {
+ if (bf_supported && info->node == 2) {
pr_err("%s: no pan display for fb%d!",
__func__, info->node);
return -EPERM;
@@ -2804,7 +2854,7 @@
static int msmfb_overlay_unset(struct fb_info *info, unsigned long *argp)
{
- int ret, ndx;
+ int ret, ndx;
ret = copy_from_user(&ndx, argp, sizeof(ndx));
if (ret) {
@@ -2816,6 +2866,41 @@
return mdp4_overlay_unset(info, ndx);
}
+static int msmfb_overlay_wait4vsync(struct fb_info *info, void __user *argp)
+{
+ int ret;
+ long long vtime;
+
+ ret = mdp4_overlay_wait4vsync(info, &vtime);
+ if (ret) {
+ pr_err("%s: ioctl failed\n", __func__);
+ return ret;
+ }
+
+ if (copy_to_user(argp, &vtime, sizeof(vtime))) {
+ pr_err("%s: copy2user failed\n", __func__);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int msmfb_overlay_vsync_ctrl(struct fb_info *info, void __user *argp)
+{
+ int ret;
+ int enable;
+
+ ret = copy_from_user(&enable, argp, sizeof(enable));
+ if (ret) {
+ pr_err("%s:msmfb_overlay_vsync ioctl failed", __func__);
+ return ret;
+ }
+
+ ret = mdp4_overlay_vsync_ctrl(info, enable);
+
+ return ret;
+}
+
static int msmfb_overlay_play_wait(struct fb_info *info, unsigned long *argp)
{
int ret;
@@ -2922,27 +3007,6 @@
return ret;
}
-static int msmfb_overlay_blt_off(struct fb_info *info, unsigned long *argp)
-{
- int ret;
- struct msmfb_overlay_blt req;
-
- ret = copy_from_user(&req, argp, sizeof(req));
- if (ret) {
- pr_err("%s: failed\n", __func__);
- return ret;
- }
-
- ret = mdp4_overlay_blt_offset(info, &req);
-
- ret = copy_to_user(argp, &req, sizeof(req));
- if (ret)
- printk(KERN_ERR "%s:msmfb_overlay_blt_off ioctl failed\n",
- __func__);
-
- return ret;
-}
-
#ifdef CONFIG_FB_MSM_WRITEBACK_MSM_PANEL
static int msmfb_overlay_ioctl_writeback_init(struct fb_info *info)
{
@@ -3252,6 +3316,16 @@
switch (cmd) {
#ifdef CONFIG_FB_MSM_OVERLAY
+ case FBIO_WAITFORVSYNC:
+ down(&msm_fb_ioctl_ppp_sem);
+ ret = msmfb_overlay_wait4vsync(info, argp);
+ up(&msm_fb_ioctl_ppp_sem);
+ break;
+ case MSMFB_OVERLAY_VSYNC_CTRL:
+ down(&msm_fb_ioctl_ppp_sem);
+ ret = msmfb_overlay_vsync_ctrl(info, argp);
+ up(&msm_fb_ioctl_ppp_sem);
+ break;
case MSMFB_OVERLAY_GET:
down(&msm_fb_ioctl_ppp_sem);
ret = msmfb_overlay_get(info, argp);
@@ -3287,11 +3361,6 @@
ret = msmfb_overlay_blt(info, argp);
up(&msm_fb_ioctl_ppp_sem);
break;
- case MSMFB_OVERLAY_BLT_OFFSET:
- down(&msm_fb_ioctl_ppp_sem);
- ret = msmfb_overlay_blt_off(info, argp);
- up(&msm_fb_ioctl_ppp_sem);
- break;
case MSMFB_OVERLAY_3D:
down(&msm_fb_ioctl_ppp_sem);
ret = msmfb_overlay_3d_sbys(info, argp);
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c
index 6571245..22eaf4f 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c
@@ -161,11 +161,6 @@
encoder = &ddl->codec_data.encoder;
vidc_1080p_get_encoder_sequence_header_size(
&encoder->seq_header_length);
- if ((encoder->codec.codec == VCD_CODEC_H264) &&
- (encoder->profile.profile == VCD_PROFILE_H264_BASELINE))
- if ((encoder->seq_header.align_virtual_addr) &&
- (encoder->seq_header_length > 6))
- encoder->seq_header.align_virtual_addr[6] = 0xC0;
ddl_context->ddl_callback(VCD_EVT_RESP_START, VCD_S_SUCCESS,
NULL, 0, (u32 *) ddl, ddl->client_data);
ddl_release_command_channel(ddl_context,
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c
index a6001eb..d1f6e07 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c
@@ -380,7 +380,7 @@
encode_profile = VIDC_1080P_PROFILE_MPEG4_ADV_SIMPLE;
break;
case VCD_PROFILE_H264_BASELINE:
- encode_profile = VIDC_1080P_PROFILE_H264_BASELINE;
+ encode_profile = VIDC_1080P_PROFILE_H264_CONSTRAINED_BASELINE;
break;
case VCD_PROFILE_H264_MAIN:
encode_profile = VIDC_1080P_PROFILE_H264_MAIN;
diff --git a/drivers/video/msm/vidc/1080p/ddl/vidc.h b/drivers/video/msm/vidc/1080p/ddl/vidc.h
index 7460ef3..7b8dc6f 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vidc.h
+++ b/drivers/video/msm/vidc/1080p/ddl/vidc.h
@@ -311,6 +311,7 @@
#define VIDC_1080P_PROFILE_H264_MAIN 0x00000000
#define VIDC_1080P_PROFILE_H264_HIGH 0x00000001
#define VIDC_1080P_PROFILE_H264_BASELINE 0x00000002
+#define VIDC_1080P_PROFILE_H264_CONSTRAINED_BASELINE 0x00000003
enum vidc_1080p_decode{
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_api.c b/drivers/video/msm/vidc/common/vcd/vcd_api.c
index c66c2b7..0dbbf57 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_api.c
+++ b/drivers/video/msm/vidc/common/vcd/vcd_api.c
@@ -13,6 +13,7 @@
#include <linux/export.h>
#include <media/msm/vidc_type.h>
+#include <media/msm/vidc_init.h>
#include "vcd.h"
u32 vcd_init(struct vcd_init_config *config, s32 *driver_handle)
@@ -157,7 +158,7 @@
void *handle, void *const client_data),
void *client_data, int flags)
{
- u32 rc = 0;
+ u32 rc = 0, num_of_instances = 0;
struct vcd_drv_ctxt *drv_ctxt;
struct vcd_clnt_ctxt *cctxt;
int is_secure = (flags & VCD_CP_SESSION) ? 1 : 0;
@@ -167,6 +168,17 @@
VCD_MSG_ERROR("Bad parameters");
return -EINVAL;
}
+
+ drv_ctxt = vcd_get_drv_context();
+ cctxt = drv_ctxt->dev_ctxt.cctxt_list_head;
+ while (cctxt) {
+ num_of_instances++;
+ cctxt = cctxt->next;
+ }
+ if (num_of_instances == VIDC_MAX_NUM_CLIENTS) {
+ pr_err(" %s(): Max number of clients reached\n", __func__);
+ return -ENODEV;
+ }
rc = is_session_invalid(decoding, flags);
if (rc) {
VCD_MSG_ERROR("Invalid Session: is_decoder: %d, secure: %d\n",
@@ -175,7 +187,6 @@
}
if (is_secure)
res_trk_secure_set();
- drv_ctxt = vcd_get_drv_context();
mutex_lock(&drv_ctxt->dev_mutex);
if (drv_ctxt->dev_state.state_table->ev_hdlr.open) {
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_core.h b/drivers/video/msm/vidc/common/vcd/vcd_core.h
index 79bcac0..8126a0e 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_core.h
+++ b/drivers/video/msm/vidc/common/vcd/vcd_core.h
@@ -25,7 +25,7 @@
#define VCD_MIN_PERF_LEVEL 37900
-#define VCD_DRIVER_INSTANCE_MAX 4
+#define VCD_DRIVER_CLIENTS_MAX 6
#define VCD_MAX_CLIENT_TRANSACTIONS 32
@@ -126,7 +126,7 @@
struct vcd_init_config config;
- u32 driver_ids[VCD_DRIVER_INSTANCE_MAX];
+ u32 driver_ids[VCD_DRIVER_CLIENTS_MAX];
u32 refs;
u8 *device_base_addr;
void *hw_timer_handle;
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_device_sm.c b/drivers/video/msm/vidc/common/vcd/vcd_device_sm.c
index 96e729d..53495e0 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_device_sm.c
+++ b/drivers/video/msm/vidc/common/vcd/vcd_device_sm.c
@@ -537,12 +537,12 @@
*driver_handle = 0;
driver_id = 0;
- while (driver_id < VCD_DRIVER_INSTANCE_MAX &&
+ while (driver_id < VCD_DRIVER_CLIENTS_MAX &&
dev_ctxt->driver_ids[driver_id]) {
++driver_id;
}
- if (driver_id == VCD_DRIVER_INSTANCE_MAX) {
+ if (driver_id == VCD_DRIVER_CLIENTS_MAX) {
VCD_MSG_ERROR("Max driver instances reached");
return VCD_ERR_FAIL;
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_sub.c b/drivers/video/msm/vidc/common/vcd/vcd_sub.c
index 5b64f20..c11ac30 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_sub.c
+++ b/drivers/video/msm/vidc/common/vcd/vcd_sub.c
@@ -1228,7 +1228,7 @@
driver_handle--;
if (driver_handle < 0 ||
- driver_handle >= VCD_DRIVER_INSTANCE_MAX ||
+ driver_handle >= VCD_DRIVER_CLIENTS_MAX ||
!dev_ctxt->driver_ids[driver_handle]) {
return false;
} else {
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 5aa43c3..7e6fd75 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -172,7 +172,7 @@
}
/**
- * virtqueue_add_buf - expose buffer to other end
+ * vring_add_buf - expose buffer to other end
* @vq: the struct virtqueue we're talking about.
* @sg: the description of the buffer(s).
* @out_num: the number of sg readable by other side
@@ -188,7 +188,7 @@
* positive return values as "available": indirect buffers mean that
* we can put an entire sg[] array inside a single queue entry.
*/
-int virtqueue_add_buf(struct virtqueue *_vq,
+static int vring_add_buf(struct virtqueue *_vq,
struct scatterlist sg[],
unsigned int out,
unsigned int in,
@@ -288,20 +288,19 @@
return vq->num_free;
}
-EXPORT_SYMBOL_GPL(virtqueue_add_buf);
/**
- * virtqueue_kick_prepare - first half of split virtqueue_kick call.
+ * vring_kick_prepare - first half of split vring_kick call.
* @vq: the struct virtqueue
*
- * Instead of virtqueue_kick(), you can do:
- * if (virtqueue_kick_prepare(vq))
- * virtqueue_notify(vq);
+ * Instead of vring_kick(), you can do:
+ * if (vring_kick_prepare(vq))
+ * vring_kick_notify(vq);
*
- * This is sometimes useful because the virtqueue_kick_prepare() needs
- * to be serialized, but the actual virtqueue_notify() call does not.
+ * This is sometimes useful because the vring_kick_prepare() needs
+ * to be serialized, but the actual vring_kick_notify() call does not.
*/
-bool virtqueue_kick_prepare(struct virtqueue *_vq)
+static bool vring_kick_prepare(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
u16 new, old;
@@ -333,39 +332,36 @@
END_USE(vq);
return needs_kick;
}
-EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
/**
- * virtqueue_notify - second half of split virtqueue_kick call.
+ * vring_kick_notify - second half of split virtqueue_kick call.
* @vq: the struct virtqueue
*
* This does not need to be serialized.
*/
-void virtqueue_notify(struct virtqueue *_vq)
+static void vring_kick_notify(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
/* Prod other side to tell it about changes. */
vq->notify(_vq);
}
-EXPORT_SYMBOL_GPL(virtqueue_notify);
/**
- * virtqueue_kick - update after add_buf
+ * vring_kick - update after add_buf
* @vq: the struct virtqueue
*
- * After one or more virtqueue_add_buf calls, invoke this to kick
+ * After one or more vring_add_buf calls, invoke this to kick
* the other side.
*
* Caller must ensure we don't call this with other virtqueue
* operations at the same time (except where noted).
*/
-void virtqueue_kick(struct virtqueue *vq)
+static void vring_kick(struct virtqueue *vq)
{
- if (virtqueue_kick_prepare(vq))
- virtqueue_notify(vq);
+ if (vring_kick_prepare(vq))
+ vring_kick_notify(vq);
}
-EXPORT_SYMBOL_GPL(virtqueue_kick);
static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
{
@@ -398,7 +394,7 @@
}
/**
- * virtqueue_get_buf - get the next used buffer
+ * vring_get_buf - get the next used buffer
* @vq: the struct virtqueue we're talking about.
* @len: the length written into the buffer
*
@@ -411,9 +407,9 @@
* operations at the same time (except where noted).
*
* Returns NULL if there are no used buffers, or the "data" token
- * handed to virtqueue_add_buf().
+ * handed to vring_add_buf().
*/
-void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
+static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len)
{
struct vring_virtqueue *vq = to_vvq(_vq);
void *ret;
@@ -468,10 +464,9 @@
END_USE(vq);
return ret;
}
-EXPORT_SYMBOL_GPL(virtqueue_get_buf);
/**
- * virtqueue_disable_cb - disable callbacks
+ * vring_disable_cb - disable callbacks
* @vq: the struct virtqueue we're talking about.
*
* Note that this is not necessarily synchronous, hence unreliable and only
@@ -479,16 +474,15 @@
*
* Unlike other operations, this need not be serialized.
*/
-void virtqueue_disable_cb(struct virtqueue *_vq)
+static void vring_disable_cb(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
}
-EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
/**
- * virtqueue_enable_cb - restart callbacks after disable_cb.
+ * vring_enable_cb - restart callbacks after disable_cb.
* @vq: the struct virtqueue we're talking about.
*
* This re-enables callbacks; it returns "false" if there are pending
@@ -498,7 +492,7 @@
* Caller must ensure we don't call this with other virtqueue
* operations at the same time (except where noted).
*/
-bool virtqueue_enable_cb(struct virtqueue *_vq)
+static bool vring_enable_cb(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
@@ -520,10 +514,9 @@
END_USE(vq);
return true;
}
-EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
/**
- * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
+ * vring_enable_cb_delayed - restart callbacks after disable_cb.
* @vq: the struct virtqueue we're talking about.
*
* This re-enables callbacks but hints to the other side to delay
@@ -535,7 +528,7 @@
* Caller must ensure we don't call this with other virtqueue
* operations at the same time (except where noted).
*/
-bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
+static bool vring_enable_cb_delayed(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
u16 bufs;
@@ -560,17 +553,16 @@
END_USE(vq);
return true;
}
-EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
/**
- * virtqueue_detach_unused_buf - detach first unused buffer
+ * vring_detach_unused_buf - detach first unused buffer
* @vq: the struct virtqueue we're talking about.
*
- * Returns NULL or the "data" token handed to virtqueue_add_buf().
+ * Returns NULL or the "data" token handed to vring_add_buf().
* This is not valid on an active queue; it is useful only for device
* shutdown.
*/
-void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
+static void *vring_detach_unused_buf(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
unsigned int i;
@@ -594,7 +586,6 @@
END_USE(vq);
return NULL;
}
-EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
irqreturn_t vring_interrupt(int irq, void *_vq)
{
@@ -616,6 +607,34 @@
}
EXPORT_SYMBOL_GPL(vring_interrupt);
+/**
+ * get_vring_size - return the size of the virtqueue's vring
+ * @vq: the struct virtqueue containing the vring of interest.
+ *
+ * Returns the size of the vring. This is mainly used for boasting to
+ * userspace. Unlike other operations, this need not be serialized.
+ */
+static unsigned int get_vring_size(struct virtqueue *_vq)
+{
+
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ return vq->vring.num;
+}
+
+static struct virtqueue_ops vring_vq_ops = {
+ .add_buf = vring_add_buf,
+ .get_buf = vring_get_buf,
+ .kick = vring_kick,
+ .kick_prepare = vring_kick_prepare,
+ .kick_notify = vring_kick_notify,
+ .disable_cb = vring_disable_cb,
+ .enable_cb = vring_enable_cb,
+ .enable_cb_delayed = vring_enable_cb_delayed,
+ .detach_unused_buf = vring_detach_unused_buf,
+ .get_impl_size = get_vring_size,
+};
+
struct virtqueue *vring_new_virtqueue(unsigned int num,
unsigned int vring_align,
struct virtio_device *vdev,
@@ -641,6 +660,7 @@
vring_init(&vq->vring, num, pages, vring_align);
vq->vq.callback = callback;
vq->vq.vdev = vdev;
+ vq->vq.vq_ops = &vring_vq_ops;
vq->vq.name = name;
vq->notify = notify;
vq->weak_barriers = weak_barriers;
@@ -699,20 +719,4 @@
}
EXPORT_SYMBOL_GPL(vring_transport_features);
-/**
- * virtqueue_get_vring_size - return the size of the virtqueue's vring
- * @vq: the struct virtqueue containing the vring of interest.
- *
- * Returns the size of the vring. This is mainly used for boasting to
- * userspace. Unlike other operations, this need not be serialized.
- */
-unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
-{
-
- struct vring_virtqueue *vq = to_vvq(_vq);
-
- return vq->vring.num;
-}
-EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
-
MODULE_LICENSE("GPL");
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index 64c3b31..aa24919 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -124,7 +124,7 @@
seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest_nice));
seq_putc(p, '\n');
- for_each_online_cpu(i) {
+ for_each_present_cpu(i) {
/* Copy values here to work around gcc-2.95.3, gcc-2.96 */
user = kcpustat_cpu(i).cpustat[CPUTIME_USER];
nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 8aeadf6..6af2a3e 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -645,6 +645,11 @@
*(.security_initcall.init) \
VMLINUX_SYMBOL(__security_initcall_end) = .;
+#define COMPAT_EXPORTS \
+ VMLINUX_SYMBOL(__compat_exports_start) = .; \
+ *(.exportcompat.init) \
+ VMLINUX_SYMBOL(__compat_exports_end) = .;
+
#ifdef CONFIG_BLK_DEV_INITRD
#define INIT_RAM_FS \
. = ALIGN(4); \
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index b2b79b1..277fdf1 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -259,6 +259,7 @@
header-y += msdos_fs.h
header-y += msg.h
header-y += msm_adc.h
+header-y += msm_ion.h
header-y += epm_adc.h
header-y += mtio.h
header-y += n_r3964.h
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index f03a493..b6064a4 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -47,53 +47,107 @@
enum coresight_dev_type {
CORESIGHT_DEV_TYPE_SINK,
CORESIGHT_DEV_TYPE_LINK,
+ CORESIGHT_DEV_TYPE_LINKSINK,
CORESIGHT_DEV_TYPE_SOURCE,
- CORESIGHT_DEV_TYPE_MAX,
+};
+
+enum coresight_dev_subtype_sink {
+ CORESIGHT_DEV_SUBTYPE_SINK_NONE,
+ CORESIGHT_DEV_SUBTYPE_SINK_PORT,
+ CORESIGHT_DEV_SUBTYPE_SINK_BUFFER,
+};
+
+enum coresight_dev_subtype_link {
+ CORESIGHT_DEV_SUBTYPE_LINK_NONE,
+ CORESIGHT_DEV_SUBTYPE_LINK_MERG,
+ CORESIGHT_DEV_SUBTYPE_LINK_SPLIT,
+ CORESIGHT_DEV_SUBTYPE_LINK_FIFO,
+};
+
+enum coresight_dev_subtype_source {
+ CORESIGHT_DEV_SUBTYPE_SOURCE_NONE,
+ CORESIGHT_DEV_SUBTYPE_SOURCE_PROC,
+ CORESIGHT_DEV_SUBTYPE_SOURCE_BUS,
+ CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE,
+};
+
+struct coresight_dev_subtype {
+ enum coresight_dev_subtype_sink sink_subtype;
+ enum coresight_dev_subtype_link link_subtype;
+ enum coresight_dev_subtype_source source_subtype;
+};
+
+struct coresight_platform_data {
+ int id;
+ const char *name;
+ int nr_inports;
+ const int *outports;
+ const int *child_ids;
+ const int *child_ports;
+ int nr_outports;
+ bool default_sink;
+};
+
+struct coresight_desc {
+ enum coresight_dev_type type;
+ struct coresight_dev_subtype subtype;
+ const struct coresight_ops *ops;
+ struct coresight_platform_data *pdata;
+ struct device *dev;
+ const struct attribute_group **groups;
+ struct module *owner;
};
struct coresight_connection {
+ int outport;
int child_id;
int child_port;
struct coresight_device *child_dev;
struct list_head link;
};
+struct coresight_refcnt {
+ int sink_refcnt;
+ int *link_refcnts;
+ int source_refcnt;
+};
+
struct coresight_device {
int id;
struct coresight_connection *conns;
int nr_conns;
+ enum coresight_dev_type type;
+ struct coresight_dev_subtype subtype;
const struct coresight_ops *ops;
struct device dev;
- struct mutex mutex;
- int *refcnt;
- struct list_head link;
+ struct coresight_refcnt refcnt;
+ struct list_head dev_link;
+ struct list_head path_link;
struct module *owner;
bool enable;
};
#define to_coresight_device(d) container_of(d, struct coresight_device, dev)
+struct coresight_ops_sink {
+ int (*enable)(struct coresight_device *csdev);
+ void (*disable)(struct coresight_device *csdev);
+};
+
+struct coresight_ops_link {
+ int (*enable)(struct coresight_device *csdev, int iport, int oport);
+ void (*disable)(struct coresight_device *csdev, int iport, int oport);
+};
+
+struct coresight_ops_source {
+ int (*enable)(struct coresight_device *csdev);
+ void (*disable)(struct coresight_device *csdev);
+};
+
struct coresight_ops {
- int (*enable)(struct coresight_device *csdev, int port);
- void (*disable)(struct coresight_device *csdev, int port);
-};
-
-struct coresight_platform_data {
- int id;
- const char *name;
- int nr_ports;
- int *child_ids;
- int *child_ports;
- int nr_children;
-};
-
-struct coresight_desc {
- enum coresight_dev_type type;
- const struct coresight_ops *ops;
- struct coresight_platform_data *pdata;
- struct device *dev;
- const struct attribute_group **groups;
- struct module *owner;
+ const struct coresight_ops_sink *sink_ops;
+ const struct coresight_ops_link *link_ops;
+ const struct coresight_ops_source *source_ops;
};
struct qdss_source {
@@ -109,24 +163,29 @@
};
-extern struct coresight_device *
-coresight_register(struct coresight_desc *desc);
-extern void coresight_unregister(struct coresight_device *csdev);
-extern int coresight_enable(struct coresight_device *csdev, int port);
-extern void coresight_disable(struct coresight_device *csdev, int port);
-
#ifdef CONFIG_MSM_QDSS
extern struct qdss_source *qdss_get(const char *name);
extern void qdss_put(struct qdss_source *src);
extern int qdss_enable(struct qdss_source *src);
extern void qdss_disable(struct qdss_source *src);
extern void qdss_disable_sink(void);
+extern struct coresight_device *
+coresight_register(struct coresight_desc *desc);
+extern void coresight_unregister(struct coresight_device *csdev);
+extern int coresight_enable(struct coresight_device *csdev);
+extern void coresight_disable(struct coresight_device *csdev);
#else
static inline struct qdss_source *qdss_get(const char *name) { return NULL; }
static inline void qdss_put(struct qdss_source *src) {}
static inline int qdss_enable(struct qdss_source *src) { return -ENOSYS; }
static inline void qdss_disable(struct qdss_source *src) {}
static inline void qdss_disable_sink(void) {}
+static inline struct coresight_device *
+coresight_register(struct coresight_desc *desc) { return NULL; }
+static inline void coresight_unregister(struct coresight_device *csdev) {}
+static inline int
+coresight_enable(struct coresight_device *csdev) { return -ENOSYS; }
+static inline void coresight_disable(struct coresight_device *csdev) {}
#endif
#endif
diff --git a/include/linux/dvb/dmx.h b/include/linux/dvb/dmx.h
index e0058d3..d19dfa5 100644
--- a/include/linux/dvb/dmx.h
+++ b/include/linux/dvb/dmx.h
@@ -36,6 +36,9 @@
#define DMX_FILTER_SIZE 16
+/* Min recording chunk upon which event is generated */
+#define DMX_REC_BUFF_CHUNK_MIN_SIZE (100*188)
+
typedef enum
{
DMX_OUT_DECODER, /* Streaming directly to decoder. */
@@ -135,7 +138,6 @@
enum dmx_indexing_video_profile profile;
};
-
struct dmx_pes_filter_params
{
__u16 pid;
@@ -144,6 +146,18 @@
dmx_pes_type_t pes_type;
__u32 flags;
+ /*
+ * The following configures when the event
+ * DMX_EVENT_NEW_REC_CHUNK will be triggered.
+ * When new recorded data is received with size
+ * equal or larger than this value a new event
+ * will be triggered. This is relevent when
+ * output is DMX_OUT_TS_TAP or DMX_OUT_TSDEMUX_TAP,
+ * size must be at least DMX_REC_BUFF_CHUNK_MIN_SIZE
+ * and smaller than buffer size.
+ */
+ __u32 rec_chunk_size;
+
struct dmx_indexing_video_params video_params;
};
@@ -170,6 +184,129 @@
int error;
};
+/* Events associated with each demux filter */
+enum dmx_event {
+ /* New PES packet is ready to be consumed */
+ DMX_EVENT_NEW_PES,
+
+ /* New section is ready to be consumed */
+ DMX_EVENT_NEW_SECTION,
+
+ /* New recording chunk is ready to be consumed */
+ DMX_EVENT_NEW_REC_CHUNK,
+
+ /* New PCR value is ready */
+ DMX_EVENT_NEW_PCR,
+
+ /* Overflow */
+ DMX_EVENT_BUFFER_OVERFLOW,
+
+ /* Section was dropped due to CRC error */
+ DMX_EVENT_SECTION_CRC_ERROR,
+
+ /* End-of-stream, no more data from this filter */
+ DMX_EVENT_EOS
+};
+
+/* Flags passed in filter events */
+
+/* Continuity counter error was detected */
+#define DMX_FILTER_CC_ERROR 0x01
+
+/* Discontinuity indicator was set */
+#define DMX_FILTER_DISCONTINUITY_INDEICATOR 0x02
+
+/* PES legnth in PES header is not correct */
+#define DMX_FILTER_PES_LENGTH_ERROR 0x04
+
+
+/* PES info associated with DMX_EVENT_NEW_PES event */
+struct dmx_pes_event_info {
+ /* Offset at which PES information starts */
+ __u32 base_offset;
+
+ /*
+ * Start offset at which PES data
+ * from the stream starts.
+ * Equal to base_offset if PES data
+ * starts from the beginning.
+ */
+ __u32 start_offset;
+
+ /* Total length holding the PES information */
+ __u32 total_length;
+
+ /* Actual length holding the PES data */
+ __u32 actual_length;
+
+ /* Local receiver timestamp in 27MHz */
+ __u64 stc;
+
+ /* Flags passed in filter events */
+ __u32 flags;
+};
+
+/* Section info associated with DMX_EVENT_NEW_SECTION event */
+struct dmx_section_event_info {
+ /* Offset at which section information starts */
+ __u32 base_offset;
+
+ /*
+ * Start offset at which section data
+ * from the stream starts.
+ * Equal to base_offset if section data
+ * starts from the beginning.
+ */
+ __u32 start_offset;
+
+ /* Total length holding the section information */
+ __u32 total_length;
+
+ /* Actual length holding the section data */
+ __u32 actual_length;
+
+ /* Flags passed in filter events */
+ __u32 flags;
+};
+
+/* Recording info associated with DMX_EVENT_NEW_REC_CHUNK event */
+struct dmx_rec_chunk_event_info {
+ /* Offset at which recording chunk starts */
+ __u32 offset;
+
+ /* Size of recording chunk in bytes */
+ __u32 size;
+};
+
+/* PCR info associated with DMX_EVENT_NEW_PCR event */
+struct dmx_pcr_event_info {
+ /* Local timestamp in 27MHz
+ * when PCR packet was received
+ */
+ __u64 stc;
+
+ /* PCR value in 27MHz */
+ __u64 pcr;
+
+ /* Flags passed in filter events */
+ __u32 flags;
+};
+
+/*
+ * Filter's event returned through DMX_GET_EVENT.
+ * poll with POLLPRI would block until events are available.
+ */
+struct dmx_filter_event {
+ enum dmx_event type;
+
+ union {
+ struct dmx_pes_event_info pes;
+ struct dmx_section_event_info section;
+ struct dmx_rec_chunk_event_info recording_chunk;
+ struct dmx_pcr_event_info pcr;
+ } params;
+};
+
typedef struct dmx_caps {
__u32 caps;
@@ -292,5 +429,6 @@
#define DMX_RELEASE_DATA _IO('o', 57)
#define DMX_FEED_DATA _IO('o', 58)
#define DMX_SET_PLAYBACK_MODE _IOW('o', 59, enum dmx_playback_mode_t)
+#define DMX_GET_EVENT _IOR('o', 60, struct dmx_filter_event)
#endif /*_DVBDMX_H_*/
diff --git a/include/linux/fb.h b/include/linux/fb.h
index d31cb68..f6a2923 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -279,7 +279,7 @@
__u32 vmode; /* see FB_VMODE_* */
__u32 rotate; /* angle we rotate counter clockwise */
__u32 colorspace; /* colorspace for FOURCC-based modes */
- __u32 reserved[4]; /* Reserved for future compatibility */
+ __u32 reserved[5]; /* Reserved for future compatibility */
};
struct fb_cmap {
diff --git a/include/linux/input/lis3dh.h b/include/linux/input/lis3dh.h
new file mode 100644
index 0000000..f081b06
--- /dev/null
+++ b/include/linux/input/lis3dh.h
@@ -0,0 +1,85 @@
+
+/******************** (C) COPYRIGHT 2010 STMicroelectronics ********************
+*
+* File Name : lis3dh_misc.h
+* Authors : MH - C&I BU - Application Team
+* : Matteo Dameno (matteo.dameno@st.com)
+* : Carmine Iascone (carmine.iascone@st.com)
+* : Samuel Huo (samuel.huo@st.com)
+* Version : V 1.1.0
+* Date : 07/10/2012
+*
+********************************************************************************
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* THE PRESENT SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
+* OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, FOR THE SOLE
+* PURPOSE TO SUPPORT YOUR APPLICATION DEVELOPMENT.
+* AS A RESULT, STMICROELECTRONICS SHALL NOT BE HELD LIABLE FOR ANY DIRECT,
+* INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING FROM THE
+* CONTENT OF SUCH SOFTWARE AND/OR THE USE MADE BY CUSTOMERS OF THE CODING
+* INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS.
+*
+* THIS SOFTWARE IS SPECIFICALLY DESIGNED FOR EXCLUSIVE USE WITH ST PARTS.
+*
+*******************************************************************************/
+
+#ifndef __LIS3DH_H__
+#define __LIS3DH_H__
+
+
+#define SAD0L 0x00
+#define SAD0H 0x01
+#define LIS3DH_ACC_I2C_SADROOT 0x0C
+#define LIS3DH_ACC_I2C_SAD_L ((LIS3DH_ACC_I2C_SADROOT<<1)|SAD0L)
+#define LIS3DH_ACC_I2C_SAD_H ((LIS3DH_ACC_I2C_SADROOT<<1)|SAD0H)
+#define LIS3DH_ACC_DEV_NAME "lis3dh_acc"
+
+
+/************************************************/
+/* Accelerometer defines section */
+/************************************************/
+
+/* Accelerometer Sensor Full Scale */
+#define LIS3DH_ACC_FS_MASK 0x30
+#define LIS3DH_ACC_G_2G 0x00
+#define LIS3DH_ACC_G_4G 0x10
+#define LIS3DH_ACC_G_8G 0x20
+#define LIS3DH_ACC_G_16G 0x30
+
+
+#ifdef __KERNEL__
+struct lis3dh_acc_platform_data {
+ int poll_interval;
+ int min_interval;
+
+ u8 g_range;
+
+ u8 axis_map_x;
+ u8 axis_map_y;
+ u8 axis_map_z;
+
+ u8 negate_x;
+ u8 negate_y;
+ u8 negate_z;
+
+ int (*init)(void);
+ void (*exit)(void);
+ int (*power_on)(void);
+ int (*power_off)(void);
+
+ /* set gpio_int[1,2] either to the choosen gpio pin number or to -EINVAL
+ * if leaved unconnected
+ */
+ int gpio_int1;
+ int gpio_int2;
+};
+#endif /* __KERNEL__ */
+
+#endif /* __LIS3DH_H__ */
+
+
+
diff --git a/include/linux/leds-msm-tricolor.h b/include/linux/leds-msm-tricolor.h
new file mode 100644
index 0000000..314645e
--- /dev/null
+++ b/include/linux/leds-msm-tricolor.h
@@ -0,0 +1,20 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LEDS_MSM_TRICOLOR__
+enum tri_color_led_color {
+ LED_COLOR_RED,
+ LED_COLOR_GREEN,
+ LED_COLOR_BLUE,
+ LED_COLOR_MAX
+};
+#endif
diff --git a/include/linux/mfd/pm8xxx/pm8921-charger.h b/include/linux/mfd/pm8xxx/pm8921-charger.h
index fca8700..b00e050 100644
--- a/include/linux/mfd/pm8xxx/pm8921-charger.h
+++ b/include/linux/mfd/pm8xxx/pm8921-charger.h
@@ -89,6 +89,10 @@
* @get_batt_capacity_percent:
* a board specific function to return battery
* capacity. If null - a default one will be used
+ * @dc_unplug_check: enables the reverse boosting fix for the DC_IN line
+ * however, this should only be enabled for devices which
+ * control the DC OVP FETs otherwise this option should
+ * remain disabled
* @trkl_voltage: the trkl voltage in (mV) below which hw controlled
* trkl charging happens with linear charger
* @weak_voltage: the weak voltage (mV) below which hw controlled
@@ -137,6 +141,7 @@
int64_t batt_id_min;
int64_t batt_id_max;
bool keep_btm_on_suspend;
+ bool dc_unplug_check;
int trkl_voltage;
int weak_voltage;
int trkl_current;
diff --git a/drivers/video/msm/mhl/mhl_8334.h b/include/linux/mhl_8334.h
similarity index 66%
rename from drivers/video/msm/mhl/mhl_8334.h
rename to include/linux/mhl_8334.h
index eba544a..1b19103 100644
--- a/drivers/video/msm/mhl/mhl_8334.h
+++ b/include/linux/mhl_8334.h
@@ -17,9 +17,8 @@
#include <linux/types.h>
#include <linux/platform_device.h>
#include <mach/board.h>
-
-#include "mhl_devcap.h"
-#include "mhl_defs.h"
+#include <linux/mhl_devcap.h>
+#include <linux/mhl_defs.h>
#define MHL_DEVICE_NAME "sii8334"
#define MHL_DRIVER_NAME "sii8334"
@@ -27,12 +26,48 @@
#define HPD_UP 1
#define HPD_DOWN 0
+enum discovery_result_enum {
+ MHL_DISCOVERY_RESULT_USB = 0,
+ MHL_DISCOVERY_RESULT_MHL,
+};
+
+/* USB driver interface */
+
+#ifdef CONFIG_FB_MSM_HDMI_MHL_8334
+ /* mhl_device_discovery */
+extern int mhl_device_discovery(const char *name, int *result);
+
+/* - register|unregister MHL cable plug callback. */
+extern int mhl_register_callback
+ (const char *name, void (*callback)(int online));
+extern int mhl_unregister_callback(const char *name);
+#else
+static inline int mhl_device_discovery(const char *name, int *result)
+{
+ return -ENODEV;
+}
+
+static inline int
+ mhl_register_callback(const char *name, void (*callback)(int online))
+{
+ return -ENODEV;
+}
+
+static inline int mhl_unregister_callback(const char *name)
+{
+ return -ENODEV;
+}
+#endif
+
struct mhl_msm_state_t {
struct i2c_client *i2c_client;
struct i2c_driver *i2c_driver;
uint8_t cur_state;
uint8_t chip_rev_id;
struct msm_mhl_platform_data *mhl_data;
+ /* Device Discovery stuff */
+ int mhl_mode;
+ struct completion rgnd_done;
};
enum {
diff --git a/drivers/video/msm/mhl/mhl_defs.h b/include/linux/mhl_defs.h
similarity index 100%
rename from drivers/video/msm/mhl/mhl_defs.h
rename to include/linux/mhl_defs.h
diff --git a/drivers/video/msm/mhl/mhl_devcap.h b/include/linux/mhl_devcap.h
similarity index 100%
rename from drivers/video/msm/mhl/mhl_devcap.h
rename to include/linux/mhl_devcap.h
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index b836824..338c891 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -121,6 +121,7 @@
unsigned int sg_len; /* size of scatter list */
struct scatterlist *sg; /* I/O scatter list */
s32 host_cookie; /* host private data */
+ bool fault_injected; /* fault injected */
};
struct mmc_request {
diff --git a/include/linux/msm_charm.h b/include/linux/msm_charm.h
index c31e493..44d2553 100644
--- a/include/linux/msm_charm.h
+++ b/include/linux/msm_charm.h
@@ -11,10 +11,15 @@
#define RAM_DUMP_DONE _IOW(CHARM_CODE, 6, int)
#define WAIT_FOR_RESTART _IOR(CHARM_CODE, 7, int)
#define GET_DLOAD_STATUS _IOR(CHARM_CODE, 8, int)
+#define IMAGE_UPGRADE _IOW(CHARM_CODE, 9, int)
enum charm_boot_type {
CHARM_NORMAL_BOOT = 0,
CHARM_RAM_DUMPS,
};
+enum image_upgrade_type {
+ APQ_CONTROLLED_UPGRADE = 0,
+ MDM_CONTROLLED_UPGRADE,
+};
#endif
diff --git a/include/linux/msm_ion.h b/include/linux/msm_ion.h
new file mode 100644
index 0000000..0e28e54
--- /dev/null
+++ b/include/linux/msm_ion.h
@@ -0,0 +1,22 @@
+/*
+ * include/linux/ion.h
+ *
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_MSM_ION_H
+#define _LINUX_MSM_ION_H
+
+#include <linux/ion.h>
+
+#endif
diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h
index d8edbc8..4c42623 100644
--- a/include/linux/msm_mdp.h
+++ b/include/linux/msm_mdp.h
@@ -32,8 +32,11 @@
#define MSMFB_OVERLAY_SET _IOWR(MSMFB_IOCTL_MAGIC, 135, \
struct mdp_overlay)
#define MSMFB_OVERLAY_UNSET _IOW(MSMFB_IOCTL_MAGIC, 136, unsigned int)
+
#define MSMFB_OVERLAY_PLAY _IOW(MSMFB_IOCTL_MAGIC, 137, \
struct msmfb_overlay_data)
+#define MSMFB_OVERLAY_QUEUE MSMFB_OVERLAY_PLAY
+
#define MSMFB_GET_PAGE_PROTECTION _IOR(MSMFB_IOCTL_MAGIC, 138, \
struct mdp_page_protection)
#define MSMFB_SET_PAGE_PROTECTION _IOW(MSMFB_IOCTL_MAGIC, 139, \
@@ -66,6 +69,9 @@
#define MSMFB_WRITEBACK_TERMINATE _IO(MSMFB_IOCTL_MAGIC, 155)
#define MSMFB_MDP_PP _IOWR(MSMFB_IOCTL_MAGIC, 156, struct msmfb_mdp_pp)
+#define MSMFB_OVERLAY_VSYNC_CTRL _IOW(MSMFB_IOCTL_MAGIC, 160, unsigned int)
+
+
#define FB_TYPE_3D_PANEL 0x10101010
#define MDP_IMGTYPE2_START 0x10000
#define MSMFB_DRIVER_VERSION 0xF9E8D701
@@ -148,7 +154,7 @@
#define MDP_DEINTERLACE_ODD 0x00400000
#define MDP_OV_PLAY_NOWAIT 0x00200000
#define MDP_SOURCE_ROTATED_90 0x00100000
-#define MDP_DPP_HSIC 0x00080000
+#define MDP_OVERLAY_PP_CFG_EN 0x00080000
#define MDP_BACKEND_COMPOSITION 0x00040000
#define MDP_BORDERFILL_SUPPORTED 0x00010000
#define MDP_SECURE_OVERLAY_SESSION 0x00008000
@@ -265,15 +271,47 @@
struct msmfb_img img;
};
-struct dpp_ctrl {
- /*
- *'sharp_strength' has inputs = -128 <-> 127
- * Increasingly positive values correlate with increasingly sharper
- * picture. Increasingly negative values correlate with increasingly
- * smoothed picture.
- */
- int8_t sharp_strength;
- int8_t hsic_params[NUM_HSIC_PARAM];
+#define MDP_PP_OPS_READ 0x2
+#define MDP_PP_OPS_WRITE 0x4
+
+struct mdp_qseed_cfg {
+ uint32_t table_num;
+ uint32_t ops;
+ uint32_t len;
+ uint32_t *data;
+};
+
+struct mdp_qseed_cfg_data {
+ uint32_t block;
+ struct mdp_qseed_cfg qseed_data;
+};
+
+#define MDP_OVERLAY_PP_CSC_CFG 0x1
+#define MDP_OVERLAY_PP_QSEED_CFG 0x2
+
+#define MDP_CSC_FLAG_ENABLE 0x1
+#define MDP_CSC_FLAG_YUV_IN 0x2
+#define MDP_CSC_FLAG_YUV_OUT 0x4
+
+struct mdp_csc_cfg {
+ /* flags for enable CSC, toggling RGB,YUV input/output */
+ uint32_t flags;
+ uint32_t csc_mv[9];
+ uint32_t csc_pre_bv[3];
+ uint32_t csc_post_bv[3];
+ uint32_t csc_pre_lv[6];
+ uint32_t csc_post_lv[6];
+};
+
+struct mdp_csc_cfg_data {
+ uint32_t block;
+ struct mdp_csc_cfg csc_data;
+};
+
+struct mdp_overlay_pp_params {
+ uint32_t config_ops;
+ struct mdp_csc_cfg csc_cfg;
+ struct mdp_qseed_cfg qseed_cfg[2];
};
struct mdp_overlay {
@@ -287,7 +325,7 @@
uint32_t flags;
uint32_t id;
uint32_t user_data[8];
- struct dpp_ctrl dpp;
+ struct mdp_overlay_pp_params overlay_pp_cfg;
};
struct msmfb_overlay_3d {
@@ -375,25 +413,6 @@
struct mdp_pcc_coeff r, g, b;
};
-#define MDP_CSC_FLAG_ENABLE 0x1
-#define MDP_CSC_FLAG_YUV_IN 0x2
-#define MDP_CSC_FLAG_YUV_OUT 0x4
-
-struct mdp_csc_cfg {
- /* flags for enable CSC, toggling RGB,YUV input/output */
- uint32_t flags;
- uint32_t csc_mv[9];
- uint32_t csc_pre_bv[3];
- uint32_t csc_post_bv[3];
- uint32_t csc_pre_lv[6];
- uint32_t csc_post_lv[6];
-};
-
-struct mdp_csc_cfg_data {
- uint32_t block;
- struct mdp_csc_cfg csc_data;
-};
-
enum {
mdp_lut_igc,
mdp_lut_pgc,
@@ -401,7 +420,6 @@
mdp_lut_max,
};
-
struct mdp_igc_lut_data {
uint32_t block;
uint32_t len, ops;
@@ -443,14 +461,6 @@
} data;
};
-struct mdp_qseed_cfg_data {
- uint32_t block;
- uint32_t table_num;
- uint32_t ops;
- uint32_t len;
- uint32_t *data;
-};
-
struct mdp_bl_scale_data {
uint32_t min_lvl;
uint32_t scale;
diff --git a/include/linux/msm_thermal.h b/include/linux/msm_thermal.h
index fe9be89..47a8753 100644
--- a/include/linux/msm_thermal.h
+++ b/include/linux/msm_thermal.h
@@ -17,9 +17,9 @@
struct msm_thermal_data {
uint32_t sensor_id;
uint32_t poll_ms;
- uint32_t limit_temp;
- uint32_t temp_hysteresis;
- uint32_t limit_freq;
+ uint32_t limit_temp_degC;
+ uint32_t temp_hysteresis_degC;
+ uint32_t freq_step;
};
#ifdef CONFIG_THERMAL_MONITOR
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index ed136ad..e3d59cd 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -119,6 +119,11 @@
extern void early_init_devtree(void *);
#else /* CONFIG_OF_FLATTREE */
static inline void unflatten_device_tree(void) {}
+static inline void *of_get_flat_dt_prop(unsigned long node, const char *name,
+ unsigned long *size) { return NULL; }
+
+static inline int of_flat_dt_is_compatible(unsigned long node,
+ const char *name) { return 0; }
#endif /* CONFIG_OF_FLATTREE */
#endif /* __ASSEMBLY__ */
diff --git a/include/linux/qpnp/qpnp-adc.h b/include/linux/qpnp/qpnp-adc.h
new file mode 100644
index 0000000..33559dd
--- /dev/null
+++ b/include/linux/qpnp/qpnp-adc.h
@@ -0,0 +1,689 @@
+/*
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+/*
+ * Qualcomm PMIC QPNP ADC driver header file
+ *
+ */
+
+#ifndef __QPNP_ADC_H
+#define __QPNP_ADC_H
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+/**
+ * enum qpnp_vadc_channels - QPNP AMUX arbiter channels
+ */
+enum qpnp_vadc_channels {
+ USBIN = 0,
+ DCIN,
+ VCHG_SNS,
+ SPARE1_03,
+ SPARE2_03,
+ VCOIN,
+ VBAT_SNS,
+ VSYS,
+ DIE_TEMP,
+ REF_625MV,
+ REF_125V,
+ CHG_TEMP,
+ SPARE1,
+ SPARE2,
+ GND_REF,
+ VDD_VADC,
+ P_MUX1_1_1,
+ P_MUX2_1_1,
+ P_MUX3_1_1,
+ P_MUX4_1_1,
+ P_MUX5_1_1,
+ P_MUX6_1_1,
+ P_MUX7_1_1,
+ P_MUX8_1_1,
+ P_MUX9_1_1,
+ P_MUX10_1_1,
+ P_MUX11_1_1,
+ P_MUX12_1_1,
+ P_MUX13_1_1,
+ P_MUX14_1_1,
+ P_MUX15_1_1,
+ P_MUX16_1_1,
+ P_MUX1_1_3,
+ P_MUX2_1_3,
+ P_MUX3_1_3,
+ P_MUX4_1_3,
+ P_MUX5_1_3,
+ P_MUX6_1_3,
+ P_MUX7_1_3,
+ P_MUX8_1_3,
+ P_MUX9_1_3,
+ P_MUX10_1_3,
+ P_MUX11_1_3,
+ P_MUX12_1_3,
+ P_MUX13_1_3,
+ P_MUX14_1_3,
+ P_MUX15_1_3,
+ P_MUX16_1_3,
+ LR_MUX1_BATT_THERM,
+ LR_MUX2_BAT_ID,
+ LR_MUX3_XO_THERM,
+ LR_MUX4_AMUX_THM1,
+ LR_MUX5_AMUX_THM2,
+ LR_MUX6_AMUX_THM3,
+ LR_MUX7_HW_ID,
+ LR_MUX8_AMUX_THM4,
+ LR_MUX9_AMUX_THM5,
+ LR_MUX10_USB_ID,
+ AMUX_PU1,
+ AMUX_PU2,
+ LR_MUX3_BUF_XO_THERM_BUF,
+ LR_MUX1_PU1_BAT_THERM,
+ LR_MUX2_PU1_BAT_ID,
+ LR_MUX3_PU1_XO_THERM,
+ LR_MUX4_PU1_AMUX_THM1,
+ LR_MUX5_PU1_AMUX_THM2,
+ LR_MUX6_PU1_AMUX_THM3,
+ LR_MUX7_PU1_AMUX_HW_ID,
+ LR_MUX8_PU1_AMUX_THM4,
+ LR_MUX9_PU1_AMUX_THM5,
+ LR_MUX10_PU1_AMUX_USB_ID,
+ LR_MUX3_BUF_PU1_XO_THERM_BUF,
+ LR_MUX1_PU2_BAT_THERM,
+ LR_MUX2_PU2_BAT_ID,
+ LR_MUX3_PU2_XO_THERM,
+ LR_MUX4_PU2_AMUX_THM1,
+ LR_MUX5_PU2_AMUX_THM2,
+ LR_MUX6_PU2_AMUX_THM3,
+ LR_MUX7_PU2_AMUX_HW_ID,
+ LR_MUX8_PU2_AMUX_THM4,
+ LR_MUX9_PU2_AMUX_THM5,
+ LR_MUX10_PU2_AMUX_USB_ID,
+ LR_MUX3_BUF_PU2_XO_THERM_BUF,
+ LR_MUX1_PU1_PU2_BAT_THERM,
+ LR_MUX2_PU1_PU2_BAT_ID,
+ LR_MUX3_PU1_PU2_XO_THERM,
+ LR_MUX4_PU1_PU2_AMUX_THM1,
+ LR_MUX5_PU1_PU2_AMUX_THM2,
+ LR_MUX6_PU1_PU2_AMUX_THM3,
+ LR_MUX7_PU1_PU2_AMUX_HW_ID,
+ LR_MUX8_PU1_PU2_AMUX_THM4,
+ LR_MUX9_PU1_PU2_AMUX_THM5,
+ LR_MUX10_PU1_PU2_AMUX_USB_ID,
+ LR_MUX3_BUF_PU1_PU2_XO_THERM_BUF,
+ ALL_OFF,
+ ADC_MAX_NUM,
+};
+
+#define QPNP_ADC_625_UV 625000
+
+/**
+ * enum qpnp_adc_decimation_type - Sampling rate supported.
+ * %DECIMATION_TYPE1: 512
+ * %DECIMATION_TYPE2: 1K
+ * %DECIMATION_TYPE3: 2K
+ * %DECIMATION_TYPE4: 4k
+ * %DECIMATION_NONE: Do not use this Sampling type.
+ *
+ * The Sampling rate is specific to each channel of the QPNP ADC arbiter.
+ */
+enum qpnp_adc_decimation_type {
+ DECIMATION_TYPE1 = 0,
+ DECIMATION_TYPE2,
+ DECIMATION_TYPE3,
+ DECIMATION_TYPE4,
+ DECIMATION_NONE,
+};
+
+/**
+ * enum qpnp_adc_calib_type - QPNP ADC Calibration type.
+ * %ADC_CALIB_ABSOLUTE: Use 625mV and 1.25V reference channels.
+ * %ADC_CALIB_RATIOMETRIC: Use reference Voltage/GND.
+ * %ADC_CALIB_CONFIG_NONE: Do not use this calibration type.
+ *
+ * Use the input reference voltage depending on the calibration type
+ * to calcluate the offset and gain parameters. The calibration is
+ * specific to each channel of the QPNP ADC.
+ */
+enum qpnp_adc_calib_type {
+ CALIB_ABSOLUTE = 0,
+ CALIB_RATIOMETRIC,
+ CALIB_NONE,
+};
+
+/**
+ * enum qpnp_adc_channel_scaling_param - pre-scaling AMUX ratio.
+ * %CHAN_PATH_SCALING1: ratio of {1, 1}
+ * %CHAN_PATH_SCALING2: ratio of {1, 3}
+ * %CHAN_PATH_SCALING3: ratio of {1, 4}
+ * %CHAN_PATH_SCALING4: ratio of {1, 6}
+ * %CHAN_PATH_NONE: Do not use this pre-scaling ratio type.
+ *
+ * The pre-scaling is applied for signals to be within the voltage range
+ * of the ADC.
+ */
+enum qpnp_adc_channel_scaling_param {
+ PATH_SCALING1 = 0,
+ PATH_SCALING2,
+ PATH_SCALING3,
+ PATH_SCALING4,
+ PATH_SCALING_NONE,
+};
+
+/**
+ * enum qpnp_adc_scale_fn_type - Scaling function for pm8921 pre calibrated
+ * digital data relative to ADC reference.
+ * %ADC_SCALE_DEFAULT: Default scaling to convert raw adc code to voltage.
+ * %ADC_SCALE_BATT_THERM: Conversion to temperature based on btm parameters.
+ * %ADC_SCALE_PMIC_THERM: Returns result in milli degree's Centigrade.
+ * %ADC_SCALE_XTERN_CHGR_CUR: Returns current across 0.1 ohm resistor.
+ * %ADC_SCALE_XOTHERM: Returns XO thermistor voltage in degree's Centigrade.
+ * %ADC_SCALE_NONE: Do not use this scaling type.
+ */
+enum qpnp_adc_scale_fn_type {
+ SCALE_DEFAULT = 0,
+ SCALE_BATT_THERM,
+ SCALE_PA_THERM,
+ SCALE_PMIC_THERM,
+ SCALE_XOTHERM,
+ SCALE_NONE,
+};
+
+/**
+ * enum qpnp_adc_fast_avg_ctl - Provides ability to obtain single result
+ * from the ADC that is an average of multiple measurement
+ * samples. Select number of samples for use in fast
+ * average mode (i.e. 2 ^ value).
+ * %ADC_FAST_AVG_SAMPLE_1: 0x0 = 1
+ * %ADC_FAST_AVG_SAMPLE_2: 0x1 = 2
+ * %ADC_FAST_AVG_SAMPLE_4: 0x2 = 4
+ * %ADC_FAST_AVG_SAMPLE_8: 0x3 = 8
+ * %ADC_FAST_AVG_SAMPLE_16: 0x4 = 16
+ * %ADC_FAST_AVG_SAMPLE_32: 0x5 = 32
+ * %ADC_FAST_AVG_SAMPLE_64: 0x6 = 64
+ * %ADC_FAST_AVG_SAMPLE_128: 0x7 = 128
+ * %ADC_FAST_AVG_SAMPLE_256: 0x8 = 256
+ * %ADC_FAST_AVG_SAMPLE_512: 0x9 = 512
+ */
+enum qpnp_adc_fast_avg_ctl {
+ ADC_FAST_AVG_SAMPLE_1 = 0,
+ ADC_FAST_AVG_SAMPLE_2,
+ ADC_FAST_AVG_SAMPLE_4,
+ ADC_FAST_AVG_SAMPLE_8,
+ ADC_FAST_AVG_SAMPLE_16,
+ ADC_FAST_AVG_SAMPLE_32,
+ ADC_FAST_AVG_SAMPLE_64,
+ ADC_FAST_AVG_SAMPLE_128,
+ ADC_FAST_AVG_SAMPLE_256,
+ ADC_FAST_AVG_SAMPLE_512,
+ ADC_FAST_AVG_SAMPLE_NONE,
+};
+
+/**
+ * enum qpnp_adc_hw_settle_time - Time between AMUX getting configured and
+ * the ADC starting conversion. Delay = 100us * value for
+ * value < 11 and 2ms * (value - 10) otherwise.
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_0US: 0us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_100US: 100us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_200US: 200us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_300US: 300us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_400US: 400us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_500US: 500us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_600US: 600us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_700US: 700us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_800US: 800us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_900US: 900us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_1MS: 1ms
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_2MS: 2ms
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_4MS: 4ms
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_6MS: 6ms
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_8MS: 8ms
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_10MS: 10ms
+ * %ADC_CHANNEL_HW_SETTLE_NONE
+ */
+enum qpnp_adc_hw_settle_time {
+ ADC_CHANNEL_HW_SETTLE_DELAY_0US = 0,
+ ADC_CHANNEL_HW_SETTLE_DELAY_100US,
+ ADC_CHANNEL_HW_SETTLE_DELAY_2000US,
+ ADC_CHANNEL_HW_SETTLE_DELAY_300US,
+ ADC_CHANNEL_HW_SETTLE_DELAY_400US,
+ ADC_CHANNEL_HW_SETTLE_DELAY_500US,
+ ADC_CHANNEL_HW_SETTLE_DELAY_600US,
+ ADC_CHANNEL_HW_SETTLE_DELAY_700US,
+ ADC_CHANNEL_HW_SETTLE_DELAY_800US,
+ ADC_CHANNEL_HW_SETTLE_DELAY_900US,
+ ADC_CHANNEL_HW_SETTLE_DELAY_1MS,
+ ADC_CHANNEL_HW_SETTLE_DELAY_2MS,
+ ADC_CHANNEL_HW_SETTLE_DELAY_4MS,
+ ADC_CHANNEL_HW_SETTLE_DELAY_6MS,
+ ADC_CHANNEL_HW_SETTLE_DELAY_8MS,
+ ADC_CHANNEL_HW_SETTLE_DELAY_10MS,
+ ADC_CHANNEL_HW_SETTLE_NONE,
+};
+
+/**
+ * enum qpnp_vadc_mode_sel - Selects the basic mode of operation.
+ * - The normal mode is used for single measurement.
+ * - The Conversion sequencer is used to trigger an
+ * ADC read when a HW trigger is selected.
+ * - The measurement interval performs a single or
+ * continous measurement at a specified interval/delay.
+ * %ADC_OP_NORMAL_MODE : Normal mode used for single measurement.
+ * %ADC_OP_CONVERSION_SEQUENCER : Conversion sequencer used to trigger
+ * an ADC read on a HW supported trigger.
+ * Refer to enum qpnp_vadc_trigger for
+ * supported HW triggers.
+ * %ADC_OP_MEASUREMENT_INTERVAL : The measurement interval performs a
+ * single or continous measurement after a specified delay.
+ * For delay look at qpnp_adc_meas_timer.
+ */
+enum qpnp_vadc_mode_sel {
+ ADC_OP_NORMAL_MODE = 0,
+ ADC_OP_CONVERSION_SEQUENCER,
+ ADC_OP_MEASUREMENT_INTERVAL,
+ ADC_OP_MODE_NONE,
+};
+
+/**
+ * enum qpnp_vadc_trigger - Select the HW trigger to be used while
+ * measuring the ADC reading.
+ * %ADC_GSM_PA_ON : GSM power amplifier on.
+ * %ADC_TX_GTR_THRES : Transmit power greater than threshold.
+ * %ADC_CAMERA_FLASH_RAMP : Flash ramp up done.
+ * %ADC_DTEST : DTEST.
+ */
+enum qpnp_vadc_trigger {
+ ADC_GSM_PA_ON = 0,
+ ADC_TX_GTR_THRES,
+ ADC_CAMERA_FLASH_RAMP,
+ ADC_DTEST,
+ ADC_SEQ_NONE,
+};
+
+/**
+ * enum qpnp_vadc_conv_seq_timeout - Select delay (0 to 15ms) from
+ * conversion request to triggering conversion sequencer
+ * hold off time.
+ */
+enum qpnp_vadc_conv_seq_timeout {
+ ADC_CONV_SEQ_TIMEOUT_0MS = 0,
+ ADC_CONV_SEQ_TIMEOUT_1MS,
+ ADC_CONV_SEQ_TIMEOUT_2MS,
+ ADC_CONV_SEQ_TIMEOUT_3MS,
+ ADC_CONV_SEQ_TIMEOUT_4MS,
+ ADC_CONV_SEQ_TIMEOUT_5MS,
+ ADC_CONV_SEQ_TIMEOUT_6MS,
+ ADC_CONV_SEQ_TIMEOUT_7MS,
+ ADC_CONV_SEQ_TIMEOUT_8MS,
+ ADC_CONV_SEQ_TIMEOUT_9MS,
+ ADC_CONV_SEQ_TIMEOUT_10MS,
+ ADC_CONV_SEQ_TIMEOUT_11MS,
+ ADC_CONV_SEQ_TIMEOUT_12MS,
+ ADC_CONV_SEQ_TIMEOUT_13MS,
+ ADC_CONV_SEQ_TIMEOUT_14MS,
+ ADC_CONV_SEQ_TIMEOUT_15MS,
+ ADC_CONV_SEQ_TIMEOUT_NONE,
+};
+
+/**
+ * enum qpnp_adc_conv_seq_holdoff - Select delay from conversion
+ * trigger signal (i.e. adc_conv_seq_trig) transition
+ * to ADC enable. Delay = 25us * (value + 1).
+ */
+enum qpnp_adc_conv_seq_holdoff {
+ ADC_SEQ_HOLD_25US = 0,
+ ADC_SEQ_HOLD_50US,
+ ADC_SEQ_HOLD_75US,
+ ADC_SEQ_HOLD_100US,
+ ADC_SEQ_HOLD_125US,
+ ADC_SEQ_HOLD_150US,
+ ADC_SEQ_HOLD_175US,
+ ADC_SEQ_HOLD_200US,
+ ADC_SEQ_HOLD_225US,
+ ADC_SEQ_HOLD_250US,
+ ADC_SEQ_HOLD_275US,
+ ADC_SEQ_HOLD_300US,
+ ADC_SEQ_HOLD_325US,
+ ADC_SEQ_HOLD_350US,
+ ADC_SEQ_HOLD_375US,
+ ADC_SEQ_HOLD_400US,
+ ADC_SEQ_HOLD_NONE,
+};
+
+/**
+ * enum qpnp_adc_conv_seq_state - Conversion sequencer operating state
+ * %ADC_CONV_SEQ_IDLE : Sequencer is in idle.
+ * %ADC_CONV_TRIG_RISE : Waiting for rising edge trigger.
+ * %ADC_CONV_TRIG_HOLDOFF : Waiting for rising trigger hold off time.
+ * %ADC_CONV_MEAS_RISE : Measuring selected ADC signal.
+ * %ADC_CONV_TRIG_FALL : Waiting for falling trigger edge.
+ * %ADC_CONV_FALL_HOLDOFF : Waiting for falling trigger hold off time.
+ * %ADC_CONV_MEAS_FALL : Measuring selected ADC signal.
+ * %ADC_CONV_ERROR : Aberrant Hardware problem.
+ */
+enum qpnp_adc_conv_seq_state {
+ ADC_CONV_SEQ_IDLE = 0,
+ ADC_CONV_TRIG_RISE,
+ ADC_CONV_TRIG_HOLDOFF,
+ ADC_CONV_MEAS_RISE,
+ ADC_CONV_TRIG_FALL,
+ ADC_CONV_FALL_HOLDOFF,
+ ADC_CONV_MEAS_FALL,
+ ADC_CONV_ERROR,
+ ADC_CONV_NONE,
+};
+
+/**
+ * enum qpnp_adc_meas_timer - Selects the measurement interval time.
+ * If value = 0, use 0ms else use 2^(value + 4)/ 32768).
+ * %ADC_MEAS_INTERVAL_0MS : 0ms
+ * %ADC_MEAS_INTERVAL_1P0MS : 1ms
+ * %ADC_MEAS_INTERVAL_2P0MS : 2ms
+ * %ADC_MEAS_INTERVAL_3P9MS : 3.9ms
+ * %ADC_MEAS_INTERVAL_7P8MS : 7.8ms
+ * %ADC_MEAS_INTERVAL_15P6MS : 15.6ms
+ * %ADC_MEAS_INTERVAL_31P3MS : 31.3ms
+ * %ADC_MEAS_INTERVAL_62P5MS : 62.5ms
+ * %ADC_MEAS_INTERVAL_125MS : 125ms
+ * %ADC_MEAS_INTERVAL_250MS : 250ms
+ * %ADC_MEAS_INTERVAL_500MS : 500ms
+ * %ADC_MEAS_INTERVAL_1S : 1seconds
+ * %ADC_MEAS_INTERVAL_2S : 2seconds
+ * %ADC_MEAS_INTERVAL_4S : 4seconds
+ * %ADC_MEAS_INTERVAL_8S : 8seconds
+ * %ADC_MEAS_INTERVAL_16S: 16seconds
+ */
+enum qpnp_adc_meas_timer {
+ ADC_MEAS_INTERVAL_0MS = 0,
+ ADC_MEAS_INTERVAL_1P0MS,
+ ADC_MEAS_INTERVAL_2P0MS,
+ ADC_MEAS_INTERVAL_3P9MS,
+ ADC_MEAS_INTERVAL_7P8MS,
+ ADC_MEAS_INTERVAL_15P6MS,
+ ADC_MEAS_INTERVAL_31P3MS,
+ ADC_MEAS_INTERVAL_62P5MS,
+ ADC_MEAS_INTERVAL_125MS,
+ ADC_MEAS_INTERVAL_250MS,
+ ADC_MEAS_INTERVAL_500MS,
+ ADC_MEAS_INTERVAL_1S,
+ ADC_MEAS_INTERVAL_2S,
+ ADC_MEAS_INTERVAL_4S,
+ ADC_MEAS_INTERVAL_8S,
+ ADC_MEAS_INTERVAL_16S,
+ ADC_MEAS_INTERVAL_NONE,
+};
+
+/**
+ * enum qpnp_adc_meas_interval_op_ctl - Select operating mode.
+ * %ADC_MEAS_INTERVAL_OP_SINGLE : Conduct single measurement at specified time
+ * delay.
+ * %ADC_MEAS_INTERVAL_OP_CONTINUOUS : Make measurements at measurement interval
+ * times.
+ */
+enum qpnp_adc_meas_interval_op_ctl {
+ ADC_MEAS_INTERVAL_OP_SINGLE = 0,
+ ADC_MEAS_INTERVAL_OP_CONTINUOUS,
+ ADC_MEAS_INTERVAL_OP_NONE,
+};
+
+/**
+ * struct qpnp_vadc_linear_graph - Represent ADC characteristics.
+ * @dy: Numerator slope to calculate the gain.
+ * @dx: Denominator slope to calculate the gain.
+ * @adc_vref: A/D word of the voltage reference used for the channel.
+ * @adc_gnd: A/D word of the ground reference used for the channel.
+ *
+ * Each ADC device has different offset and gain parameters which are computed
+ * to calibrate the device.
+ */
+struct qpnp_vadc_linear_graph {
+ int64_t dy;
+ int64_t dx;
+ int64_t adc_vref;
+ int64_t adc_gnd;
+};
+
+/**
+ * struct qpnp_vadc_map_pt - Map the graph representation for ADC channel
+ * @x: Represent the ADC digitized code.
+ * @y: Represent the physical data which can be temperature, voltage,
+ * resistance.
+ */
+struct qpnp_vadc_map_pt {
+ int32_t x;
+ int32_t y;
+};
+
+/**
+ * struct qpnp_vadc_scaling_ratio - Represent scaling ratio for adc input.
+ * @num: Numerator scaling parameter.
+ * @den: Denominator scaling parameter.
+ */
+struct qpnp_vadc_scaling_ratio {
+ int32_t num;
+ int32_t den;
+};
+
+/**
+ * struct qpnp_adc_properties - Represent the ADC properties.
+ * @adc_reference: Reference voltage for QPNP ADC.
+ * @bitresolution: ADC bit resolution for QPNP ADC.
+ * @biploar: Polarity for QPNP ADC.
+ */
+struct qpnp_adc_properties {
+ uint32_t adc_vdd_reference;
+ uint32_t bitresolution;
+ bool bipolar;
+};
+
+/**
+ * struct qpnp_vadc_chan_properties - Represent channel properties of the ADC.
+ * @offset_gain_numerator: The inverse numerator of the gain applied to the
+ * input channel.
+ * @offset_gain_denominator: The inverse denominator of the gain applied to the
+ * input channel.
+ * @adc_graph: ADC graph for the channel of struct type qpnp_adc_linear_graph.
+ */
+struct qpnp_vadc_chan_properties {
+ uint32_t offset_gain_numerator;
+ uint32_t offset_gain_denominator;
+ struct qpnp_vadc_linear_graph adc_graph[2];
+};
+
+/**
+ * struct qpnp_adc_result - Represent the result of the QPNP ADC.
+ * @chan: The channel number of the requested conversion.
+ * @adc_code: The pre-calibrated digital output of a given ADC relative to the
+ * the ADC reference.
+ * @measurement: In units specific for a given ADC; most ADC uses reference
+ * voltage but some ADC uses reference current. This measurement
+ * here is a number relative to a reference of a given ADC.
+ * @physical: The data meaningful for each individual channel whether it is
+ * voltage, current, temperature, etc.
+ * All voltage units are represented in micro - volts.
+ * -Battery temperature units are represented as 0.1 DegC.
+ * -PA Therm temperature units are represented as DegC.
+ * -PMIC Die temperature units are represented as 0.001 DegC.
+ */
+struct qpnp_vadc_result {
+ uint32_t chan;
+ int32_t adc_code;
+ int64_t measurement;
+ int64_t physical;
+};
+
+/**
+ * struct qpnp_adc_amux - AMUX properties for individual channel
+ * @name: Channel string name.
+ * @channel_num: Channel in integer used from qpnp_adc_channels.
+ * @chan_path_prescaling: Channel scaling performed on the input signal.
+ * @adc_decimation: Sampling rate desired for the channel.
+ * adc_scale_fn: Scaling function to convert to the data meaningful for
+ * each individual channel whether it is voltage, current,
+ * temperature, etc and compensates the channel properties.
+ */
+struct qpnp_vadc_amux {
+ char *name;
+ enum qpnp_vadc_channels channel_num;
+ enum qpnp_adc_channel_scaling_param chan_path_prescaling;
+ enum qpnp_adc_decimation_type adc_decimation;
+ enum qpnp_adc_scale_fn_type adc_scale_fn;
+ enum qpnp_adc_fast_avg_ctl fast_avg_setup;
+ enum qpnp_adc_hw_settle_time hw_settle_time;
+};
+
+/**
+ * struct qpnp_vadc_scaling_ratio
+ *
+ */
+static const struct qpnp_vadc_scaling_ratio qpnp_vadc_amux_scaling_ratio[] = {
+ {1, 1},
+ {1, 3},
+ {1, 4},
+ {1, 6},
+ {1, 20}
+};
+
+/**
+ * struct qpnp_vadc_scale_fn - Scaling function prototype
+ * @chan: Function pointer to one of the scaling functions
+ * which takes the adc properties, channel properties,
+ * and returns the physical result
+ */
+struct qpnp_vadc_scale_fn {
+ int32_t (*chan) (int32_t,
+ const struct qpnp_adc_properties *,
+ const struct qpnp_vadc_chan_properties *,
+ struct qpnp_vadc_result *);
+};
+
+/**
+ * struct qpnp_adc_drv - QPNP ADC device structure.
+ * @spmi - spmi device for ADC peripheral.
+ * @offset - base offset for the ADC peripheral.
+ * @adc_prop - ADC properties specific to the ADC peripheral.
+ * @amux_prop - AMUX properties representing the ADC peripheral.
+ * @adc_channels - ADC channel properties for the ADC peripheral.
+ * @adc_irq - IRQ number that is mapped to the ADC peripheral.
+ * @adc_lock - ADC lock for access to the peripheral.
+ * @adc_rslt_completion - ADC result notification after interrupt
+ * is received.
+ */
+struct qpnp_adc_drv {
+ struct spmi_device *spmi;
+ uint8_t slave;
+ uint16_t offset;
+ struct qpnp_adc_properties *adc_prop;
+ struct qpnp_vadc_amux_properties *amux_prop;
+ struct qpnp_vadc_amux *adc_channels;
+ int adc_irq;
+ struct mutex adc_lock;
+ struct completion adc_rslt_completion;
+};
+
+/**
+ * struct qpnp_vadc_amux_properties - QPNP VADC amux channel property.
+ * @amux_channel - Refer to the qpnp_vadc_channel list.
+ * @decimation - Sampling rate supported for the channel.
+ * @mode_sel - The basic mode of operation.
+ * @hw_settle_time - The time between AMUX being configured and the
+ * start of conversion.
+ * @fast_avg_setup - Ability to provide single result from the ADC
+ * that is an average of multiple measurements.
+ * @trigger_channel - HW trigger channel for conversion sequencer.
+ * @chan_prop - Represent the channel properties of the ADC.
+ */
+struct qpnp_vadc_amux_properties {
+ uint32_t amux_channel;
+ uint32_t decimation;
+ uint32_t mode_sel;
+ uint32_t hw_settle_time;
+ uint32_t fast_avg_setup;
+ enum qpnp_vadc_trigger trigger_channel;
+ struct qpnp_vadc_chan_properties chan_prop[0];
+};
+
+/* Public API */
+#if defined(CONFIG_SENSORS_QPNP_ADC_VOLTAGE) \
+ || defined(CONFIG_SENSORS_QPNP_ADC_VOLTAGE_MODULE)
+/**
+ * qpnp_vadc_read() - Performs ADC read on the channel.
+ * @channel: Input channel to perform the ADC read.
+ * @result: Structure pointer of type adc_chan_result
+ * in which the ADC read results are stored.
+ */
+int32_t qpnp_vadc_read(enum qpnp_vadc_channels channel,
+ struct qpnp_vadc_result *result);
+
+/**
+ * qpnp_vadc_conv_seq_request() - Performs ADC read on the conversion
+ * sequencer channel.
+ * @channel: Input channel to perform the ADC read.
+ * @result: Structure pointer of type adc_chan_result
+ * in which the ADC read results are stored.
+ */
+int32_t qpnp_vadc_conv_seq_request(
+ enum qpnp_vadc_trigger trigger_channel,
+ enum qpnp_vadc_channels channel,
+ struct qpnp_vadc_result *result);
+
+/**
+ * qpnp_vadc_check_result() - Performs check on the ADC raw code.
+ * @data: Data used for verifying the range of the ADC code.
+ */
+int32_t qpnp_vadc_check_result(int32_t *data);
+
+/**
+ * qpnp_adc_get_devicetree_data() - Abstracts the ADC devicetree data.
+ * @spmi: spmi ADC device.
+ * @adc_qpnp: spmi device tree node structure
+ */
+int32_t qpnp_adc_get_devicetree_data(struct spmi_device *spmi,
+ struct qpnp_adc_drv *adc_qpnp);
+
+/**
+ * qpnp_vadc_configure() - Configure ADC device to start conversion.
+ * @chan_prop: Individual channel properties for the AMUX channel.
+ */
+int32_t qpnp_vadc_configure(
+ struct qpnp_vadc_amux_properties *chan_prop);
+
+/**
+ * qpnp_adc_scale_default() - Scales the pre-calibrated digital output
+ * of an ADC to the ADC reference and compensates for the
+ * gain and offset.
+ * @adc_code: pre-calibrated digital ouput of the ADC.
+ * @adc_prop: adc properties of the qpnp adc such as bit resolution,
+ * reference voltage.
+ * @chan_prop: Individual channel properties to compensate the i/p scaling,
+ * slope and offset.
+ * @chan_rslt: Physical result to be stored.
+ */
+int32_t qpnp_adc_scale_default(int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt);
+#else
+static inline int32_t qpnp_vadc_read(uint32_t channel,
+ struct qpnp_vadc_result *result)
+{ return -ENXIO; }
+static inline int32_t qpnp_vadc_conv_seq_request(
+ enum qpnp_vadc_trigger trigger_channel,
+ enum qpnp_vadc_channels channel,
+ struct qpnp_vadc_result *result)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_default(int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_adc_chan_properties *chan_prop,
+ struct qpnp_adc_chan_result *chan_rslt)
+{ return -ENXIO; }
+#endif
+
+#endif
diff --git a/include/linux/qseecom.h b/include/linux/qseecom.h
index 0fcf96f..b0f089b 100644
--- a/include/linux/qseecom.h
+++ b/include/linux/qseecom.h
@@ -110,9 +110,11 @@
/*
* struct qseecom_qseos_app_load_query - verify if app is loaded in qsee
* @app_name[MAX_APP_NAME_SIZE]- name of the app.
+ * @app_id - app id.
*/
struct qseecom_qseos_app_load_query {
char app_name[MAX_APP_NAME_SIZE]; /* in */
+ int app_id; /* out */
};
#define QSEECOM_IOC_MAGIC 0x97
diff --git a/include/linux/test-iosched.h b/include/linux/test-iosched.h
index 8054409..1e428c5 100644
--- a/include/linux/test-iosched.h
+++ b/include/linux/test-iosched.h
@@ -65,6 +65,7 @@
REQ_UNIQUE_NONE,
REQ_UNIQUE_DISCARD,
REQ_UNIQUE_FLUSH,
+ REQ_UNIQUE_SANITIZE,
};
/**
diff --git a/include/linux/tick.h b/include/linux/tick.h
index ab8be90..494a314 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -44,7 +44,6 @@
* @idle_exittime: Time when the idle state was left
* @idle_sleeptime: Sum of the time slept in idle with sched tick stopped
* @iowait_sleeptime: Sum of the time slept in idle with sched tick stopped, with IO outstanding
- * @sleep_length: Duration of the current idle sleep
* @do_timer_lst: CPU was the last one doing do_timer before going idle
*/
struct tick_sched {
@@ -63,7 +62,6 @@
ktime_t idle_exittime;
ktime_t idle_sleeptime;
ktime_t iowait_sleeptime;
- ktime_t sleep_length;
unsigned long last_jiffies;
unsigned long next_jiffies;
ktime_t idle_expires;
diff --git a/include/linux/usb/android.h b/include/linux/usb/android.h
index 7c2b33b..0b11fdaf 100644
--- a/include/linux/usb/android.h
+++ b/include/linux/usb/android.h
@@ -17,6 +17,8 @@
#ifndef __LINUX_USB_ANDROID_H
#define __LINUX_USB_ANDROID_H
+#include <linux/usb/composite.h>
+
struct android_usb_platform_data {
int (*update_pid_and_serial_num)(uint32_t, const char *);
u32 swfi_latency;
@@ -24,4 +26,22 @@
bool cdrom;
};
+#ifndef CONFIG_TARGET_CORE
+static inline int f_tcm_init(int (*connect_cb)(bool connect))
+{
+ /*
+ * Fail bind() not init(). If a function init() returns error
+ * android composite registration would fail.
+ */
+ return 0;
+}
+static inline void f_tcm_exit(void)
+{
+}
+static inline int tcm_bind_config(struct usb_configuration *c)
+{
+ return -ENODEV;
+}
+#endif
+
#endif /* __LINUX_USB_ANDROID_H */
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
index c0a23a3..3b1d06d 100644
--- a/include/linux/usb/msm_hsusb.h
+++ b/include/linux/usb/msm_hsusb.h
@@ -199,6 +199,7 @@
* @core_clk_always_on_workaround: Don't disable core_clk when
* USB enters LPM.
* @bus_scale_table: parameters for bus bandwidth requirements
+ * @mhl_dev_name: MHL device name used to register with MHL driver.
*/
struct msm_otg_platform_data {
int *phy_init_seq;
@@ -216,6 +217,7 @@
bool enable_lpm_on_dev_suspend;
bool core_clk_always_on_workaround;
struct msm_bus_scale_pdata *bus_scale_table;
+ const char *mhl_dev_name;
};
/* Timeout (in msec) values (min - max) associated with OTG timers */
@@ -287,6 +289,7 @@
* @id_timer: The timer used for polling ID line to detect ACA states.
* @xo_handle: TCXO buffer handle
* @bus_perf_client: Bus performance client handle to request BUS bandwidth
+ * @mhl_enabled: MHL driver registration successful and MHL enabled.
*/
struct msm_otg {
struct usb_phy phy;
@@ -314,6 +317,7 @@
#define A_BUS_SUSPEND 14
#define A_CONN 15
#define B_BUS_REQ 16
+#define MHL 17
unsigned long inputs;
struct work_struct sm_work;
bool sm_work_pending;
@@ -333,6 +337,7 @@
unsigned long caps;
struct msm_xo_voter *xo_handle;
uint32_t bus_perf_client;
+ bool mhl_enabled;
/*
* Allowing PHY power collpase turns off the HSUSB 3.3v and 1.8v
* analog regulators while going to low power mode.
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 76f4396..72f5c96 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -56,7 +56,7 @@
struct sk_buff_head rxq_pause;
struct urb *interrupt;
struct usb_anchor deferred;
- struct tasklet_struct bh;
+ struct work_struct bh_w;
struct work_struct kevent;
unsigned long flags;
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index f5e1ffa..3e2f39b 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -1751,6 +1751,31 @@
#define V4L2_CID_MPEG_VIDC_VIDEO_AIR_REF (V4L2_CID_MPEG_MSM_VIDC_BASE+18)
#define V4L2_CID_MPEG_VIDC_VIDEO_CIR_MBS (V4L2_CID_MPEG_MSM_VIDC_BASE+19)
+#define V4L2_CID_MPEG_VIDC_VIDEO_H263_PROFILE (V4L2_CID_MPEG_MSM_VIDC_BASE+20)
+enum v4l2_mpeg_vidc_video_h263_profile {
+ V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_BASELINE = 0,
+ V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_H320CODING = 1,
+ V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_BACKWARDCOMPATIBLE = 2,
+ V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_ISWV2 = 3,
+ V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_ISWV3 = 4,
+ V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_HIGHCOMPRESSION = 5,
+ V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_INTERNET = 6,
+ V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_INTERLACE = 7,
+ V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_HIGHLATENCY = 8,
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_H263_LEVEL (V4L2_CID_MPEG_MSM_VIDC_BASE+21)
+enum v4l2_mpeg_vidc_video_h263_level {
+ V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_1_0 = 0,
+ V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_2_0 = 1,
+ V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_3_0 = 2,
+ V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_4_0 = 3,
+ V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_4_5 = 4,
+ V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_5_0 = 5,
+ V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_6_0 = 6,
+ V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_7_0 = 7,
+};
+
/* Camera class control IDs */
#define V4L2_CID_CAMERA_CLASS_BASE (V4L2_CTRL_CLASS_CAMERA | 0x900)
#define V4L2_CID_CAMERA_CLASS (V4L2_CTRL_CLASS_CAMERA | 1)
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 8efd28a..0d0f6d3 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -15,6 +15,7 @@
* @callback: the function to call when buffers are consumed (can be NULL).
* @name: the name of this virtqueue (mainly for debugging)
* @vdev: the virtio device this queue was created for.
+ * @vq_ops: the operations for this virtqueue (see below).
* @priv: a pointer for the virtqueue implementation to use.
*/
struct virtqueue {
@@ -22,33 +23,235 @@
void (*callback)(struct virtqueue *vq);
const char *name;
struct virtio_device *vdev;
+ struct virtqueue_ops *vq_ops;
void *priv;
};
-int virtqueue_add_buf(struct virtqueue *vq,
- struct scatterlist sg[],
- unsigned int out_num,
- unsigned int in_num,
- void *data,
- gfp_t gfp);
+/**
+ * virtqueue_ops - operations for virtqueue abstraction layer
+ * @add_buf: expose buffer to other end
+ * vq: the struct virtqueue we're talking about.
+ * sg: the description of the buffer(s).
+ * out_num: the number of sg readable by other side
+ * in_num: the number of sg which are writable (after readable ones)
+ * data: the token identifying the buffer.
+ * Returns remaining capacity of queue (sg segments) or a negative error.
+ * @kick: update after add_buf
+ * vq: the struct virtqueue
+ * After one or more add_buf calls, invoke this to kick the other side.
+ * @get_buf: get the next used buffer
+ * vq: the struct virtqueue we're talking about.
+ * len: the length written into the buffer
+ * Returns NULL or the "data" token handed to add_buf.
+ * @disable_cb: disable callbacks
+ * vq: the struct virtqueue we're talking about.
+ * Note that this is not necessarily synchronous, hence unreliable and only
+ * useful as an optimization.
+ * @enable_cb: restart callbacks after disable_cb.
+ * vq: the struct virtqueue we're talking about.
+ * This re-enables callbacks; it returns "false" if there are pending
+ * buffers in the queue, to detect a possible race between the driver
+ * checking for more work, and enabling callbacks.
+ * @enable_cb_delayed: restart callbacks after disable_cb.
+ * vq: the struct virtqueue we're talking about.
+ * This re-enables callbacks but hints to the other side to delay
+ * interrupts until most of the available buffers have been processed;
+ * it returns "false" if there are many pending buffers in the queue,
+ * to detect a possible race between the driver checking for more work,
+ * and enabling callbacks.
+ * Caller must ensure we don't call this with other virtqueue
+ * operations at the same time (except where noted).
+ * @detach_unused_buf: detach first unused buffer
+ * vq: the struct virtqueue we're talking about.
+ * Returns NULL or the "data" token handed to add_buf
+ * @get_impl_size: return the size of the virtqueue's implementation
+ * vq: the struct virtqueue containing the implementation of interest.
+ * Returns the size of the implementation. This is mainly used for
+ * boasting to userspace. Unlike other operations, this need not
+ * be serialized.
+ *
+ * Locking rules are straightforward: the driver is responsible for
+ * locking. No two operations may be invoked simultaneously, with the exception
+ * of @disable_cb.
+ *
+ * All operations can be called in any context.
+ */
+struct virtqueue_ops {
+ int (*add_buf)(struct virtqueue *vq,
+ struct scatterlist sg[],
+ unsigned int out_num,
+ unsigned int in_num,
+ void *data,
+ gfp_t gfp);
-void virtqueue_kick(struct virtqueue *vq);
+ void (*kick)(struct virtqueue *vq);
+ bool (*kick_prepare)(struct virtqueue *vq);
+ void (*kick_notify)(struct virtqueue *vq);
+ void *(*get_buf)(struct virtqueue *vq, unsigned int *len);
+ void (*disable_cb)(struct virtqueue *vq);
+ bool (*enable_cb)(struct virtqueue *vq);
+ bool (*enable_cb_delayed)(struct virtqueue *vq);
+ void *(*detach_unused_buf)(struct virtqueue *vq);
+ unsigned int (*get_impl_size)(struct virtqueue *vq);
+};
-bool virtqueue_kick_prepare(struct virtqueue *vq);
+/**
+ * virtqueue_add_buf - expose buffer to other end
+ * @vq: the struct virtqueue we're talking about.
+ * @sg: the description of the buffer(s).
+ * @out_num: the number of sg readable by other side
+ * @in_num: the number of sg which are writable (after readable ones)
+ * @data: the token identifying the buffer.
+ * @gfp: how to do memory allocations (if necessary).
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns remaining capacity of queue or a negative error.
+ */
+static inline int virtqueue_add_buf(struct virtqueue *vq,
+ struct scatterlist sg[],
+ unsigned int out_num,
+ unsigned int in_num,
+ void *data,
+ gfp_t gfp)
+{
+ return vq->vq_ops->add_buf(vq, sg, out_num, in_num, data, gfp);
+}
+/**
+ * virtqueue_kick - update after add_buf
+ * @vq: the struct virtqueue
+ *
+ * After one or more virtqueue_add_buf calls, invoke this to kick
+ * the other side.
+ *
+ * Caller must ensure we don't call this with other virtqueue
+ * operations at the same time (except where noted).
+ */
+static inline void virtqueue_kick(struct virtqueue *vq)
+{
+ vq->vq_ops->kick(vq);
+}
-void virtqueue_notify(struct virtqueue *vq);
+/**
+ * virtqueue_kick_prepare - first half of split virtqueue_kick call.
+ * @vq: the struct virtqueue
+ *
+ * Instead of virtqueue_kick(), you can do:
+ * if (virtqueue_kick_prepare(vq))
+ * virtqueue_kick_notify(vq);
+ *
+ * This is sometimes useful because the virtqueue_kick_prepare() needs
+ * to be serialized, but the actual virtqueue_kick_notify() call does not.
+ */
+static inline bool virtqueue_kick_prepare(struct virtqueue *vq)
+{
+ return vq->vq_ops->kick_prepare(vq);
+}
-void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len);
+/**
+ * virtqueue_kick_notify - second half of split virtqueue_kick call.
+ * @vq: the struct virtqueue
+ */
+static inline void virtqueue_kick_notify(struct virtqueue *vq)
+{
+ vq->vq_ops->kick_notify(vq);
+}
-void virtqueue_disable_cb(struct virtqueue *vq);
+/**
+ * virtqueue_get_buf - get the next used buffer
+ * @vq: the struct virtqueue we're talking about.
+ * @len: the length written into the buffer
+ *
+ * If the driver wrote data into the buffer, @len will be set to the
+ * amount written. This means you don't need to clear the buffer
+ * beforehand to ensure there's no data leakage in the case of short
+ * writes.
+ *
+ * Caller must ensure we don't call this with other virtqueue
+ * operations at the same time (except where noted).
+ *
+ * Returns NULL if there are no used buffers, or the "data" token
+ * handed to virtqueue_add_buf().
+ */
+static inline void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len)
+{
+ return vq->vq_ops->get_buf(vq, len);
+}
-bool virtqueue_enable_cb(struct virtqueue *vq);
+/**
+ * virtqueue_disable_cb - disable callbacks
+ * @vq: the struct virtqueue we're talking about.
+ *
+ * Note that this is not necessarily synchronous, hence unreliable and only
+ * useful as an optimization.
+ *
+ * Unlike other operations, this need not be serialized.
+ */
+static inline void virtqueue_disable_cb(struct virtqueue *vq)
+{
+ vq->vq_ops->disable_cb(vq);
+}
-bool virtqueue_enable_cb_delayed(struct virtqueue *vq);
+/**
+ * virtqueue_enable_cb - restart callbacks after disable_cb.
+ * @vq: the struct virtqueue we're talking about.
+ *
+ * This re-enables callbacks; it returns "false" if there are pending
+ * buffers in the queue, to detect a possible race between the driver
+ * checking for more work, and enabling callbacks.
+ *
+ * Caller must ensure we don't call this with other virtqueue
+ * operations at the same time (except where noted).
+ */
+static inline bool virtqueue_enable_cb(struct virtqueue *vq)
+{
+ return vq->vq_ops->enable_cb(vq);
+}
-void *virtqueue_detach_unused_buf(struct virtqueue *vq);
+/**
+ * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
+ * @vq: the struct virtqueue we're talking about.
+ *
+ * This re-enables callbacks but hints to the other side to delay
+ * interrupts until most of the available buffers have been processed;
+ * it returns "false" if there are many pending buffers in the queue,
+ * to detect a possible race between the driver checking for more work,
+ * and enabling callbacks.
+ *
+ * Caller must ensure we don't call this with other virtqueue
+ * operations at the same time (except where noted).
+ */
+static inline bool virtqueue_enable_cb_delayed(struct virtqueue *vq)
+{
+ return vq->vq_ops->enable_cb_delayed(vq);
+}
-unsigned int virtqueue_get_vring_size(struct virtqueue *vq);
+/**
+ * virtqueue_detach_unused_buf - detach first unused buffer
+ * @vq: the struct virtqueue we're talking about.
+ *
+ * Returns NULL or the "data" token handed to virtqueue_add_buf().
+ * This is not valid on an active queue; it is useful only for device
+ * shutdown.
+ */
+static inline void *virtqueue_detach_unused_buf(struct virtqueue *vq)
+{
+ return vq->vq_ops->detach_unused_buf(vq);
+}
+
+/**
+ * virtqueue_get_impl_size - return the size of the virtqueue's implementation
+ * @vq: the struct virtqueue containing the implementation of interest.
+ *
+ * Returns the size of the virtqueue implementation. This is mainly used
+ * for boasting to userspace. Unlike other operations, this need not
+ * be serialized.
+ */
+static inline unsigned int virtqueue_get_impl_size(struct virtqueue *vq)
+{
+ return vq->vq_ops->get_impl_size(vq);
+}
/**
* virtio_device - representation of a device using virtio
diff --git a/include/linux/wcnss_wlan.h b/include/linux/wcnss_wlan.h
index d7e65b0..295be8f 100644
--- a/include/linux/wcnss_wlan.h
+++ b/include/linux/wcnss_wlan.h
@@ -47,6 +47,9 @@
int req_riva_power_on_lock(char *driver_name);
int free_riva_power_on_lock(char *driver_name);
unsigned int wcnss_get_serial_number(void);
+void wcnss_flush_delayed_boot_votes(void);
+void wcnss_allow_suspend(void);
+void wcnss_prevent_suspend(void);
#define wcnss_wlan_get_drvdata(dev) dev_get_drvdata(dev)
#define wcnss_wlan_set_drvdata(dev, data) dev_set_drvdata((dev), (data))
diff --git a/include/media/msm_camera.h b/include/media/msm_camera.h
index 48058e6..588fd07 100644
--- a/include/media/msm_camera.h
+++ b/include/media/msm_camera.h
@@ -204,6 +204,19 @@
#define MSM_CAM_IOCTL_STATS_FLUSH_BUFQ \
_IOR(MSM_CAM_IOCTL_MAGIC, 57, struct msm_stats_flush_bufq *)
+#define MSM_CAM_IOCTL_SET_MCTL_SDEV \
+ _IOW(MSM_CAM_IOCTL_MAGIC, 58, struct msm_mctl_set_sdev_data *)
+
+#define MSM_CAM_IOCTL_UNSET_MCTL_SDEV \
+ _IOW(MSM_CAM_IOCTL_MAGIC, 59, struct msm_mctl_set_sdev_data *)
+
+#define MSM_CAM_IOCTL_GET_INST_HANDLE \
+ _IOR(MSM_CAM_IOCTL_MAGIC, 60, uint32_t *)
+
+#define MSM_CAM_IOCTL_STATS_UNREG_BUF \
+ _IOR(MSM_CAM_IOCTL_MAGIC, 61, struct msm_stats_flush_bufq *)
+
+
struct msm_stats_reqbuf {
int num_buf; /* how many buffers requested */
int stats_type; /* stats type */
@@ -334,6 +347,7 @@
struct msm_pp_frame_mp mp[MAX_PLANES];
};
int node_type;
+ uint32_t inst_handle;
};
struct msm_cam_evt_divert_frame {
@@ -458,6 +472,7 @@
#define CMD_AXI_CFG_ZSL 43
#define CMD_AXI_CFG_SNAP_VPE 44
#define CMD_AXI_CFG_SNAP_THUMB_VPE 45
+
#define CMD_CONFIG_PING_ADDR 46
#define CMD_CONFIG_PONG_ADDR 47
#define CMD_CONFIG_FREE_BUF_ADDR 48
@@ -465,6 +480,14 @@
#define CMD_AXI_CFG_VIDEO_ALL_CHNLS 50
#define CMD_VFE_BUFFER_RELEASE 51
#define CMD_VFE_PROCESS_IRQ 52
+#define CMD_STATS_BG_ENABLE 53
+#define CMD_STATS_BF_ENABLE 54
+#define CMD_STATS_BHIST_ENABLE 55
+#define CMD_STATS_BG_BUF_RELEASE 56
+#define CMD_STATS_BF_BUF_RELEASE 57
+#define CMD_STATS_BHIST_BUF_RELEASE 58
+#define CMD_VFE_SOF_COUNT_UPDATE 59
+#define CMD_VFE_COUNT_SOF_ENABLE 60
#define CMD_AXI_CFG_PRIM BIT(8)
#define CMD_AXI_CFG_PRIM_ALL_CHNLS BIT(9)
@@ -475,6 +498,16 @@
#define CMD_AXI_START 0xE1
#define CMD_AXI_STOP 0xE2
+#define CMD_AXI_RESET 0xE3
+
+
+#define AXI_CMD_PREVIEW BIT(0)
+#define AXI_CMD_CAPTURE BIT(1)
+#define AXI_CMD_RECORD BIT(2)
+#define AXI_CMD_ZSL BIT(3)
+#define AXI_CMD_RAW_CAPTURE BIT(4)
+
+
/* vfe config command: config command(from config thread)*/
struct msm_vfe_cfg_cmd {
@@ -514,7 +547,10 @@
#define MSM_PMEM_C2D 17
#define MSM_PMEM_MAINIMG_VPE 18
#define MSM_PMEM_THUMBNAIL_VPE 19
-#define MSM_PMEM_MAX 20
+#define MSM_PMEM_BAYER_GRID 20
+#define MSM_PMEM_BAYER_FOCUS 21
+#define MSM_PMEM_BAYER_HIST 22
+#define MSM_PMEM_MAX 23
#define STAT_AEAW 0
#define STAT_AEC 1
@@ -524,7 +560,10 @@
#define STAT_CS 5
#define STAT_IHIST 6
#define STAT_SKIN 7
-#define STAT_MAX 8
+#define STAT_BG 8
+#define STAT_BF 9
+#define STAT_BHIST 10
+#define STAT_MAX 11
#define FRAME_PREVIEW_OUTPUT1 0
#define FRAME_PREVIEW_OUTPUT2 1
@@ -767,7 +806,7 @@
#define MSM_V4L2_PID_CTRL_CMD (V4L2_CID_PRIVATE_BASE+13)
#define MSM_V4L2_PID_EVT_SUB_INFO (V4L2_CID_PRIVATE_BASE+14)
#define MSM_V4L2_PID_STROBE_FLASH (V4L2_CID_PRIVATE_BASE+15)
-#define MSM_V4L2_PID_MMAP_ENTRY (V4L2_CID_PRIVATE_BASE+16)
+#define MSM_V4L2_PID_INST_HANDLE (V4L2_CID_PRIVATE_BASE+16)
#define MSM_V4L2_PID_MMAP_INST (V4L2_CID_PRIVATE_BASE+17)
#define MSM_V4L2_PID_PP_PLANE_INFO (V4L2_CID_PRIVATE_BASE+18)
#define MSM_V4L2_PID_MAX MSM_V4L2_PID_PP_PLANE_INFO
@@ -1225,27 +1264,31 @@
#define CSI_DECODE_10BIT 2
#define CSI_DECODE_DPCM_10_8_10 5
-#define ISPIF_STREAM(intf, action) (((intf)<<ISPIF_S_STREAM_SHIFT)+(action))
-#define ISPIF_ON_FRAME_BOUNDARY (0x01 << 0)
-#define ISPIF_OFF_FRAME_BOUNDARY (0x01 << 1)
-#define ISPIF_OFF_IMMEDIATELY (0x01 << 2)
-#define ISPIF_S_STREAM_SHIFT 4
-
+#define ISPIF_STREAM(intf, action, vfe) (((intf)<<ISPIF_S_STREAM_SHIFT)+\
+ (action)+((vfe)<<ISPIF_VFE_INTF_SHIFT))
+#define ISPIF_ON_FRAME_BOUNDARY (0x01 << 0)
+#define ISPIF_OFF_FRAME_BOUNDARY (0x01 << 1)
+#define ISPIF_OFF_IMMEDIATELY (0x01 << 2)
+#define ISPIF_S_STREAM_SHIFT 4
+#define ISPIF_VFE_INTF_SHIFT 12
#define PIX_0 (0x01 << 0)
#define RDI_0 (0x01 << 1)
#define PIX_1 (0x01 << 2)
#define RDI_1 (0x01 << 3)
-#define PIX_2 (0x01 << 4)
-#define RDI_2 (0x01 << 5)
+#define RDI_2 (0x01 << 4)
+enum msm_ispif_vfe_intf {
+ VFE0,
+ VFE1,
+ VFE_MAX,
+};
enum msm_ispif_intftype {
PIX0,
RDI0,
PIX1,
RDI1,
- PIX2,
RDI2,
INTF_MAX,
};
@@ -1280,6 +1323,7 @@
uint8_t intftype;
uint16_t cid_mask;
uint8_t csid;
+ uint8_t vfe_intf;
};
struct msm_ispif_params_list {
@@ -1623,13 +1667,36 @@
uint8_t num_planes;
struct plane_data plane[MAX_PLANES];
uint32_t sp_y_offset;
- uint8_t vpe_can_use;
+ uint32_t inst_handle;
};
#define QCAMERA_NAME "qcamera"
+#define QCAMERA_SERVER_NAME "qcamera_server"
#define QCAMERA_DEVICE_GROUP_ID 1
#define QCAMERA_VNODE_GROUP_ID 2
+enum msm_cam_subdev_type {
+ CSIPHY_DEV,
+ CSID_DEV,
+ CSIC_DEV,
+ ISPIF_DEV,
+ VFE_DEV,
+ AXI_DEV,
+ VPE_DEV,
+ SENSOR_DEV,
+ ACTUATOR_DEV,
+ EEPROM_DEV,
+ GESTURE_DEV,
+ IRQ_ROUTER_DEV,
+ CPP_DEV,
+ CCI_DEV,
+};
+
+struct msm_mctl_set_sdev_data {
+ uint32_t revision;
+ enum msm_cam_subdev_type sdev_type;
+};
+
#define MSM_CAM_V4L2_IOCTL_GET_CAMERA_INFO \
_IOWR('V', BASE_VIDIOC_PRIVATE + 1, struct msm_camera_v4l2_ioctl_t)
@@ -1654,12 +1721,53 @@
#define MSM_CAM_V4L2_IOCTL_PRIVATE_S_CTRL \
_IOWR('V', BASE_VIDIOC_PRIVATE + 8, struct msm_camera_v4l2_ioctl_t)
+#define MSM_CAM_V4L2_IOCTL_PRIVATE_G_CTRL \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 9, struct msm_camera_v4l2_ioctl_t)
+
+#define VIDIOC_MSM_VPE_INIT \
+ _IO('V', BASE_VIDIOC_PRIVATE + 15)
+
+#define VIDIOC_MSM_VPE_RELEASE \
+ _IO('V', BASE_VIDIOC_PRIVATE + 16)
+
+#define VIDIOC_MSM_VPE_CFG \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 17, struct msm_mctl_pp_params *)
+
+#define VIDIOC_MSM_AXI_INIT \
+ _IO('V', BASE_VIDIOC_PRIVATE + 18)
+
+#define VIDIOC_MSM_AXI_RELEASE \
+ _IO('V', BASE_VIDIOC_PRIVATE + 19)
+
+#define VIDIOC_MSM_AXI_CFG \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 20, void *)
+
+#define VIDIOC_MSM_AXI_IRQ \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 21, void *)
+
+#define VIDIOC_MSM_AXI_BUF_CFG \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 22, void *)
+
+#define VIDIOC_MSM_VFE_INIT \
+ _IO('V', BASE_VIDIOC_PRIVATE + 22)
+
+#define VIDIOC_MSM_VFE_RELEASE \
+ _IO('V', BASE_VIDIOC_PRIVATE + 23)
+
struct msm_camera_v4l2_ioctl_t {
uint32_t id;
void __user *ioctl_ptr;
uint32_t len;
};
+struct msm_camera_vfe_params_t {
+ uint32_t operation_mode;
+ uint32_t capture_count;
+ uint32_t skip_abort;
+ uint16_t port_info;
+ uint16_t cmd_type;
+};
+
enum msm_camss_irq_idx {
CAMERA_SS_IRQ_0,
CAMERA_SS_IRQ_1,
@@ -1793,6 +1901,12 @@
struct msm_cpp_frame_strip_info *strip_info;
};
+struct msm_ver_num_info {
+ uint32_t main;
+ uint32_t minor;
+ uint32_t rev;
+};
+
#define VIDIOC_MSM_CPP_CFG \
_IOWR('V', BASE_VIDIOC_PRIVATE, struct msm_camera_v4l2_ioctl_t)
@@ -1804,4 +1918,38 @@
#define V4L2_EVENT_CPP_FRAME_DONE (V4L2_EVENT_PRIVATE_START + 0)
+/* Instance Handle - inst_handle
+ * Data bundle containing the information about where
+ * to get a buffer for a particular camera instance.
+ * This is a bitmask containing the following data:
+ * Buffer Handle Bitmask:
+ * ------------------------------------
+ * Bits : Purpose
+ * ------------------------------------
+ * 31 - 24 : Reserved.
+ * 23 : is Image mode valid?
+ * 22 - 16 : Image mode.
+ * 15 : is MCTL PP inst idx valid?
+ * 14 - 8 : MCTL PP inst idx.
+ * 7 : is Video inst idx valid?
+ * 6 - 0 : Video inst idx.
+ */
+#define CLR_IMG_MODE(handle) (handle &= 0xFF00FFFF)
+#define SET_IMG_MODE(handle, data) \
+ (handle |= ((0x1 << 23) | ((data & 0x7F) << 16)))
+#define GET_IMG_MODE(handle) \
+ ((handle & 0x800000) ? ((handle & 0x7F0000) >> 16) : 0xFF)
+
+#define CLR_MCTLPP_INST_IDX(handle) (handle &= 0xFFFF00FF)
+#define SET_MCTLPP_INST_IDX(handle, data) \
+ (handle |= ((0x1 << 15) | ((data & 0x7F) << 8)))
+#define GET_MCTLPP_INST_IDX(handle) \
+ ((handle & 0x8000) ? ((handle & 0x7F00) >> 8) : 0xFF)
+
+#define CLR_VIDEO_INST_IDX(handle) (handle &= 0xFFFFFF00)
+#define GET_VIDEO_INST_IDX(handle) \
+ ((handle & 0x80) ? (handle & 0x7F) : 0xFF)
+#define SET_VIDEO_INST_IDX(handle, data) \
+ (handle |= (0x1 << 7) | (data & 0x7F))
+
#endif /* __LINUX_MSM_CAMERA_H */
diff --git a/include/media/msm_isp.h b/include/media/msm_isp.h
index 1f3527b..f8dbed9 100644
--- a/include/media/msm_isp.h
+++ b/include/media/msm_isp.h
@@ -62,6 +62,12 @@
#define MSG_ID_OUTPUT_TERTIARY1 43
#define MSG_ID_STOP_LS_ACK 44
#define MSG_ID_OUTPUT_TERTIARY2 45
+#define MSG_ID_STATS_BG 46
+#define MSG_ID_STATS_BF 47
+#define MSG_ID_STATS_BHIST 48
+#define MSG_ID_RDI0_UPDATE_ACK 49
+#define MSG_ID_RDI1_UPDATE_ACK 50
+#define MSG_ID_RDI2_UPDATE_ACK 51
/* ISP command IDs */
#define VFE_CMD_DUMMY_0 0
@@ -206,6 +212,14 @@
#define VFE_CMD_STATS_REQBUF 139
#define VFE_CMD_STATS_ENQUEUEBUF 140
#define VFE_CMD_STATS_FLUSH_BUFQ 141
+#define VFE_CMD_STATS_UNREGBUF 142
+#define VFE_CMD_STATS_BG_START 143
+#define VFE_CMD_STATS_BG_STOP 144
+#define VFE_CMD_STATS_BF_START 145
+#define VFE_CMD_STATS_BF_STOP 146
+#define VFE_CMD_STATS_BHIST_START 147
+#define VFE_CMD_STATS_BHIST_STOP 148
+#define VFE_CMD_RESET_2 149
struct msm_isp_cmd {
int32_t id;
@@ -336,7 +350,7 @@
#define VFE_OUTPUTS_RDI1 BIT(12)
struct msm_frame_info {
- uint32_t image_mode;
+ uint32_t inst_handle;
uint32_t path;
};
diff --git a/include/media/radio-iris.h b/include/media/radio-iris.h
index 25a1d84..988de6a 100644
--- a/include/media/radio-iris.h
+++ b/include/media/radio-iris.h
@@ -599,6 +599,7 @@
V4L2_CID_PRIVATE_SPUR_FREQ_RMSSI,
V4L2_CID_PRIVATE_SPUR_SELECTION,
V4L2_CID_PRIVATE_UPDATE_SPUR_TABLE,
+ V4L2_CID_PRIVATE_VALID_CHANNEL,
/*using private CIDs under userclass*/
V4L2_CID_PRIVATE_IRIS_READ_DEFAULT = 0x00980928,
@@ -786,6 +787,11 @@
#define CALIB_DATA_OFSET 2
#define CALIB_MODE_OFSET 1
#define MAX_CALIB_SIZE 75
+
+/* Channel validity */
+#define INVALID_CHANNEL (0)
+#define VALID_CHANNEL (1)
+
struct hci_fm_set_cal_req_proc {
__u8 mode;
/*Max process calibration data size*/
diff --git a/include/media/vcap_fmt.h b/include/media/vcap_fmt.h
index 92240bf1..00e0375 100644
--- a/include/media/vcap_fmt.h
+++ b/include/media/vcap_fmt.h
@@ -44,6 +44,43 @@
HAL_VCAP_RGB,
};
+enum nr_mode {
+ NR_DISABLE = 0,
+ NR_AUTO,
+ NR_MANUAL,
+};
+
+enum nr_decay_ratio {
+ NR_Decay_Ratio_26 = 0,
+ NR_Decay_Ratio_25,
+ NR_Decay_Ratio_24,
+ NR_Decay_Ratio_23,
+ NR_Decay_Ratio_22,
+ NR_Decay_Ratio_21,
+ NR_Decay_Ratio_20,
+ NR_Decay_Ratio_19,
+};
+
+struct nr_config {
+ uint8_t max_blend_ratio;
+ uint8_t scale_diff_ratio;
+ uint8_t diff_limit_ratio;
+ uint8_t scale_motion_ratio;
+ uint8_t blend_limit_ratio;
+};
+
+struct nr_param {
+ enum nr_mode mode;
+ enum nr_decay_ratio decay_ratio;
+ uint8_t window;
+ struct nr_config luma;
+ struct nr_config chroma;
+};
+
+#define VCAPIOC_NR_S_PARAMS _IOWR('V', (BASE_VIDIOC_PRIVATE+0), struct nr_param)
+
+#define VCAPIOC_NR_G_PARAMS _IOWR('V', (BASE_VIDIOC_PRIVATE+1), struct nr_param)
+
struct v4l2_format_vc_ext {
enum hal_vcap_mode mode;
enum hal_vcap_polar h_polar;
diff --git a/include/media/vcap_v4l2.h b/include/media/vcap_v4l2.h
index e1d69d5..390a843 100644
--- a/include/media/vcap_v4l2.h
+++ b/include/media/vcap_v4l2.h
@@ -120,13 +120,13 @@
void *bufMotion;
struct nr_buffer bufNR;
- bool nr_enabled;
+ struct nr_param nr_param;
+ bool nr_update;
};
struct vp_work_t {
struct work_struct work;
struct vcap_client_data *cd;
- uint32_t irq;
};
struct vcap_dev {
@@ -159,10 +159,13 @@
atomic_t vc_enabled;
atomic_t vp_enabled;
- spinlock_t dev_slock;
+ struct mutex dev_mutex;
atomic_t open_clients;
bool vc_resource;
bool vp_resource;
+ bool vp_dummy_event;
+ bool vp_dummy_complete;
+ wait_queue_head_t vp_dummy_waitq;
struct workqueue_struct *vcap_wq;
struct vp_work_t vp_work;
@@ -221,13 +224,6 @@
extern struct vcap_hacked_vals hacked_buf[];
#endif
-int free_ion_handle(struct vcap_dev *dev, struct vb2_queue *q,
- struct v4l2_buffer *b);
-
-int get_phys_addr(struct vcap_dev *dev, struct vb2_queue *q,
- struct v4l2_buffer *b);
-
int vcvp_qbuf(struct vb2_queue *q, struct v4l2_buffer *b);
int vcvp_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b);
-
#endif
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index eb89f4b..3526e29 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -1334,6 +1334,15 @@
__u8 data[0];
} __packed;
+#define HCI_EV_LE_CONN_UPDATE_COMPLETE 0x03
+struct hci_ev_le_conn_update_complete {
+ __u8 status;
+ __le16 handle;
+ __le16 interval;
+ __le16 latency;
+ __le16 supervision_timeout;
+} __packed;
+
#define HCI_EV_LE_LTK_REQ 0x05
struct hci_ev_le_ltk_req {
__le16 handle;
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 47b856c..22428c1 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -1033,6 +1033,8 @@
int mgmt_connectable(u16 index, u8 connectable);
int mgmt_new_key(u16 index, struct link_key *key, u8 bonded);
int mgmt_connected(u16 index, bdaddr_t *bdaddr, u8 le);
+int mgmt_le_conn_params(u16 index, bdaddr_t *bdaddr, u16 interval,
+ u16 latency, u16 timeout);
int mgmt_disconnected(u16 index, bdaddr_t *bdaddr);
int mgmt_disconnect_failed(u16 index);
int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status);
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index e34c425..3048339 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -379,3 +379,11 @@
bdaddr_t bdaddr;
__s8 rssi;
} __packed;
+
+#define MGMT_EV_LE_CONN_PARAMS 0xF000
+struct mgmt_ev_le_conn_params {
+ bdaddr_t bdaddr;
+ __u16 interval;
+ __u16 latency;
+ __u16 timeout;
+} __packed;
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
index 8e8778a..ac4ec09 100644
--- a/include/sound/apr_audio-v2.h
+++ b/include/sound/apr_audio-v2.h
@@ -573,6 +573,7 @@
/* Payload of the #ADM_CMD_CONNECT_AFE_PORT_V5 command.*/
struct adm_cmd_connect_afe_port_v5 {
+ struct apr_hdr hdr;
u8 mode;
/* ID of the stream router (RX/TX). Use the
* ADM_STRTR_ID_RX or ADM_STRTR_IDX macros
diff --git a/include/sound/apr_audio.h b/include/sound/apr_audio.h
index c770f13..979db58 100644
--- a/include/sound/apr_audio.h
+++ b/include/sound/apr_audio.h
@@ -402,11 +402,15 @@
#define AFE_MODULE_ID_PORT_INFO 0x00010200
/* Module ID for the loopback-related parameters. */
#define AFE_MODULE_LOOPBACK 0x00010205
-struct afe_param_payload {
+struct afe_param_payload_base {
u32 module_id;
u32 param_id;
u16 param_size;
u16 reserved;
+} __packed;
+
+struct afe_param_payload {
+ struct afe_param_payload_base base;
union {
struct afe_param_sidetone_gain sidetone_gain;
struct afe_param_sampling_rate sampling_rate;
@@ -1348,6 +1352,14 @@
u32 uid;
} __attribute__((packed));
+#define ASM_DATA_CMD_READ_COMPRESSED 0x00010DBC
+struct asm_stream_cmd_read_compressed {
+ struct apr_hdr hdr;
+ u32 buf_add;
+ u32 buf_size;
+ u32 uid;
+} __packed;
+
#define ASM_DATA_CMD_MEDIA_FORMAT_UPDATE 0x00010BDC
#define ASM_DATA_EVENT_ENC_SR_CM_NOTIFY 0x00010BDE
struct asm_stream_media_format_update{
@@ -1411,6 +1423,19 @@
u32 id;
} __attribute__((packed));
+#define ASM_DATA_EVENT_READ_COMPRESSED_DONE 0x00010DBD
+struct asm_data_event_read_compressed_done {
+ u32 status;
+ u32 buffer_add;
+ u32 enc_frame_size;
+ u32 offset;
+ u32 msw_ts;
+ u32 lsw_ts;
+ u32 flags;
+ u32 num_frames;
+ u32 id;
+} __packed;
+
#define ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY 0x00010C65
struct asm_data_event_sr_cm_change_notify {
u32 sample_rate;
diff --git a/include/sound/compress_params.h b/include/sound/compress_params.h
index 75558bf..31d684b 100644
--- a/include/sound/compress_params.h
+++ b/include/sound/compress_params.h
@@ -56,6 +56,15 @@
#define MAX_NUM_CODEC_DESCRIPTORS 32
#define MAX_NUM_BITRATES 32
+/* compressed TX */
+#define MAX_NUM_FRAMES_PER_BUFFER 1
+#define COMPRESSED_META_DATA_MODE 0x10
+#define META_DATA_LEN_BYTES 36
+#define Q6_AC3_DECODER 0x00010BF6
+#define Q6_EAC3_DECODER 0x00010C3C
+#define Q6_DTS 0x00010D88
+#define Q6_DTS_LBR 0x00010DBB
+
/* Codecs are listed linearly to allow for extensibility */
#define SND_AUDIOCODEC_PCM ((__u32) 0x00000001)
#define SND_AUDIOCODEC_MP3 ((__u32) 0x00000002)
diff --git a/include/sound/q6afe-v2.h b/include/sound/q6afe-v2.h
index 1587d38..b1ed9a4 100644
--- a/include/sound/q6afe-v2.h
+++ b/include/sound/q6afe-v2.h
@@ -77,6 +77,7 @@
int afe_sidetone(u16 tx_port_id, u16 rx_port_id, u16 enable, uint16_t gain);
int afe_loopback_gain(u16 port_id, u16 volume);
int afe_validate_port(u16 port_id);
+int afe_get_port_index(u16 port_id);
int afe_start_pseudo_port(u16 port_id);
int afe_stop_pseudo_port(u16 port_id);
int afe_cmd_memory_map(u32 dma_addr_p, u32 dma_buf_sz);
diff --git a/include/sound/q6asm.h b/include/sound/q6asm.h
index d38dbd5..ea77974 100644
--- a/include/sound/q6asm.h
+++ b/include/sound/q6asm.h
@@ -151,6 +151,7 @@
atomic_t cmd_state;
atomic_t time_flag;
+ atomic_t nowait_cmd_cnt;
wait_queue_head_t cmd_wait;
wait_queue_head_t time_wait;
@@ -182,7 +183,8 @@
int q6asm_open_read(struct audio_client *ac, uint32_t format);
-int q6asm_open_read_compressed(struct audio_client *ac, uint32_t format);
+int q6asm_open_read_compressed(struct audio_client *ac,
+ uint32_t frames_per_buffer, uint32_t meta_data_mode);
int q6asm_open_write(struct audio_client *ac, uint32_t format);
@@ -203,6 +205,9 @@
int q6asm_async_read(struct audio_client *ac,
struct audio_aio_read_param *param);
+int q6asm_async_read_compressed(struct audio_client *ac,
+ struct audio_aio_read_param *param);
+
int q6asm_read(struct audio_client *ac);
int q6asm_read_nolock(struct audio_client *ac);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 08f52bb..c6cd85b 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -447,7 +447,6 @@
out:
ts->next_jiffies = next_jiffies;
ts->last_jiffies = last_jiffies;
- ts->sleep_length = ktime_sub(dev->next_event, now);
}
/**
@@ -516,8 +515,8 @@
ktime_t tick_nohz_get_sleep_length(void)
{
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
-
- return ts->sleep_length;
+ struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
+ return ktime_sub(dev->next_event, ts->idle_entrytime);
}
static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index c1906e4..1967297 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -58,7 +58,7 @@
printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
msg, raw_smp_processor_id(),
current->comm, task_pid_nr(current));
- printk(KERN_EMERG " lock: %ps, .magic: %08x, .owner: %s/%d, "
+ printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, "
".owner_cpu: %d\n",
lock, lock->magic,
owner ? owner->comm : "<none>",
@@ -118,10 +118,6 @@
/* lockup suspected: */
if (print_once) {
print_once = 0;
- printk(KERN_EMERG "BUG: spinlock lockup on CPU#%d, "
- "%s/%d, %ps\n",
- raw_smp_processor_id(), current->comm,
- task_pid_nr(current), lock);
spin_dump(lock, "lockup");
#ifdef CONFIG_SMP
trigger_all_cpu_backtrace();
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index dd8e2aa..fc15fb1 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -253,7 +253,9 @@
sync_supers_tsk = kthread_run(bdi_sync_supers, NULL, "sync_supers");
BUG_ON(IS_ERR(sync_supers_tsk));
- setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0);
+ init_timer_deferrable(&sync_supers_timer);
+ sync_supers_timer.function = sync_supers_timer_fn;
+ sync_supers_timer.data = 0;
bdi_arm_supers_timer();
err = bdi_init(&default_backing_dev_info);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c34cbbd..831509c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5649,7 +5649,12 @@
__alloc_contig_migrate_alloc(struct page *page, unsigned long private,
int **resultp)
{
- return alloc_page(GFP_HIGHUSER_MOVABLE);
+ gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
+
+ if (PageHighMem(page))
+ gfp_mask |= __GFP_HIGHMEM;
+
+ return alloc_page(gfp_mask);
}
/* [start, end) must belong to a single zone. */
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 02ea082..0563af9 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -951,6 +951,9 @@
if (test_bit(HCI_RAW, &hdev->flags))
return;
+ if (conn->type == LE_LINK)
+ return;
+
if (conn->mode != HCI_CM_SNIFF)
goto timer;
@@ -1018,6 +1021,9 @@
if (test_bit(HCI_RAW, &hdev->flags))
return;
+ if (conn->type == LE_LINK)
+ return;
+
if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
return;
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index da8b2dc..56bdd46 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -2124,6 +2124,55 @@
EXPORT_SYMBOL(hci_send_sco);
/* ---- HCI TX task (outgoing data) ---- */
+/* HCI ACL Connection scheduler */
+static inline struct hci_conn *hci_low_sent_acl(struct hci_dev *hdev,
+ int *quote)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *conn = NULL;
+ int num = 0, min = ~0, conn_num = 0;
+ struct list_head *p;
+
+ /* We don't have to lock device here. Connections are always
+ * added and removed with TX task disabled. */
+ list_for_each(p, &h->list) {
+ struct hci_conn *c;
+ c = list_entry(p, struct hci_conn, list);
+ if (c->type == ACL_LINK)
+ conn_num++;
+
+ if (skb_queue_empty(&c->data_q))
+ continue;
+
+ if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
+ continue;
+
+ num++;
+
+ if (c->sent < min) {
+ min = c->sent;
+ conn = c;
+ }
+ }
+
+ if (conn) {
+ int cnt, q;
+ cnt = hdev->acl_cnt;
+ q = cnt / num;
+ *quote = q ? q : 1;
+ } else
+ *quote = 0;
+
+ if ((*quote == hdev->acl_cnt) &&
+ (conn->sent == (hdev->acl_pkts - 1)) &&
+ (conn_num > 1)) {
+ *quote = 0;
+ conn = NULL;
+ }
+
+ BT_DBG("conn %p quote %d", conn, *quote);
+ return conn;
+}
/* HCI Connection scheduler */
static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
@@ -2217,8 +2266,10 @@
}
while (hdev->acl_cnt > 0 &&
- (conn = hci_low_sent(hdev, ACL_LINK, "e))) {
- while (quote > 0 && (skb = skb_dequeue(&conn->data_q))) {
+ ((conn = hci_low_sent_acl(hdev, "e)) != NULL)) {
+
+ while (quote > 0 &&
+ (skb = skb_dequeue(&conn->data_q))) {
int count = 1;
BT_DBG("skb %p len %d", skb, skb->len);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index eb5a0cc..f83c108 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -3191,6 +3191,10 @@
conn->state = BT_CONNECTED;
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
mgmt_connected(hdev->id, &ev->bdaddr, 1);
+ mgmt_le_conn_params(hdev->id, &ev->bdaddr,
+ __le16_to_cpu(ev->interval),
+ __le16_to_cpu(ev->latency),
+ __le16_to_cpu(ev->supervision_timeout));
hci_conn_hold(conn);
hci_conn_hold_device(conn);
@@ -3202,6 +3206,37 @@
hci_dev_unlock(hdev);
}
+static inline void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
+ struct hci_conn *conn;
+
+ BT_DBG("%s status %d", hdev->name, ev->status);
+
+ hci_dev_lock(hdev);
+
+ conn = hci_conn_hash_lookup_handle(hdev,
+ __le16_to_cpu(ev->handle));
+ if (conn == NULL) {
+ BT_ERR("Unknown connection update");
+ goto unlock;
+ }
+
+ if (ev->status) {
+ BT_ERR("Connection update unsuccessful");
+ goto unlock;
+ }
+
+ mgmt_le_conn_params(hdev->id, &conn->dst,
+ __le16_to_cpu(ev->interval),
+ __le16_to_cpu(ev->latency),
+ __le16_to_cpu(ev->supervision_timeout));
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
struct sk_buff *skb)
{
@@ -3271,6 +3306,10 @@
hci_le_conn_complete_evt(hdev, skb);
break;
+ case HCI_EV_LE_CONN_UPDATE_COMPLETE:
+ hci_le_conn_update_complete_evt(hdev, skb);
+ break;
+
case HCI_EV_LE_LTK_REQ:
hci_le_ltk_request_evt(hdev, skb);
break;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 733dc22..22a4dbe 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -701,6 +701,9 @@
if (!skb)
return;
+ if (conn->hcon == NULL || conn->hcon->hdev == NULL)
+ return;
+
if (lmp_no_flush_capable(conn->hcon->hdev))
flags = ACL_START_NO_FLUSH;
else
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index dce8491..72234c1 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -2633,6 +2633,20 @@
return mgmt_event(MGMT_EV_CONNECTED, index, &ev, sizeof(ev), NULL);
}
+int mgmt_le_conn_params(u16 index, bdaddr_t *bdaddr, u16 interval,
+ u16 latency, u16 timeout)
+{
+ struct mgmt_ev_le_conn_params ev;
+
+ bacpy(&ev.bdaddr, bdaddr);
+ ev.interval = interval;
+ ev.latency = latency;
+ ev.timeout = timeout;
+
+ return mgmt_event(MGMT_EV_LE_CONN_PARAMS, index, &ev, sizeof(ev),
+ NULL);
+}
+
static void disconnect_rsp(struct pending_cmd *cmd, void *data)
{
struct mgmt_cp_disconnect *cp = cmd->param;
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 1390eca..dc4bf2f 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -138,6 +138,21 @@
static inline void rfcomm_session_put(struct rfcomm_session *s)
{
+ bool match = false;
+ struct rfcomm_session *sess;
+ struct list_head *p, *n;
+ list_for_each_safe(p, n, &session_list) {
+ sess = list_entry(p, struct rfcomm_session, list);
+ if (s == sess) {
+ match = true;
+ break;
+ }
+ }
+ if (!match) {
+ BT_ERR("session already freed previously");
+ dump_stack();
+ return;
+ }
if (atomic_dec_and_test(&s->refcnt))
rfcomm_session_del(s);
}
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 6bbb34b..276ff71 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -711,10 +711,6 @@
invalid_key:
hcon->sec_req = FALSE;
- /* Switch to Pairing Connection Parameters */
- hci_le_conn_update(hcon, SMP_MIN_CONN_INTERVAL, SMP_MAX_CONN_INTERVAL,
- SMP_MAX_CONN_LATENCY, SMP_SUPERVISION_TIMEOUT);
-
skb_pull(skb, sizeof(*rp));
memset(&cp, 0, sizeof(cp));
@@ -776,11 +772,6 @@
if (hcon->link_mode & HCI_LM_MASTER) {
struct smp_cmd_pairing cp;
- /* Switch to Pairing Connection Parameters */
- hci_le_conn_update(hcon, SMP_MIN_CONN_INTERVAL,
- SMP_MAX_CONN_INTERVAL, SMP_MAX_CONN_LATENCY,
- SMP_SUPERVISION_TIMEOUT);
-
build_pairing_cmd(conn, &cp, NULL, authreq);
hcon->preq[0] = SMP_CMD_PAIRING_REQ;
memcpy(&hcon->preq[1], &cp, sizeof(cp));
diff --git a/sound/soc/codecs/wcd9304.c b/sound/soc/codecs/wcd9304.c
index 70d9fa9..f4f55fa 100644
--- a/sound/soc/codecs/wcd9304.c
+++ b/sound/soc/codecs/wcd9304.c
@@ -33,6 +33,7 @@
#include <linux/pm_runtime.h>
#include <linux/kernel.h>
#include <linux/gpio.h>
+#include <linux/wait.h>
#include "wcd9304.h"
#define WCD9304_RATES (SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_16000|\
@@ -55,12 +56,15 @@
#define AIF1_PB 1
#define AIF1_CAP 2
#define NUM_CODEC_DAIS 2
+#define SLIM_CLOSE_TIMEOUT 1000
struct sitar_codec_dai_data {
u32 rate;
u32 *ch_num;
u32 ch_act;
u32 ch_tot;
+ u32 ch_mask;
+ wait_queue_head_t dai_wait;
};
#define SITAR_MCLK_RATE_12288KHZ 12288000
@@ -848,14 +852,14 @@
if (enable) {
sitar->adc_count++;
- snd_soc_update_bits(codec, SITAR_A_TX_COM_BIAS, 0xE0, 0xE0);
-
+ snd_soc_update_bits(codec, SITAR_A_CDC_CLK_OTHR_CTL,
+ 0x02, 0x02);
} else {
sitar->adc_count--;
if (!sitar->adc_count) {
if (!sitar->mbhc_polling_active)
- snd_soc_update_bits(codec, SITAR_A_TX_COM_BIAS,
- 0xE0, 0x0);
+ snd_soc_update_bits(codec,
+ SITAR_A_CDC_CLK_OTHR_CTL, 0xE0, 0x0);
}
}
}
@@ -2833,13 +2837,48 @@
},
};
+static int sitar_codec_enable_chmask(struct sitar_priv *sitar,
+ int event, int index)
+{
+ int ret = 0;
+ u32 k = 0;
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ for (k = 0; k < sitar->dai[index].ch_tot; k++) {
+ ret = wcd9xxx_get_slave_port(
+ sitar->dai[index].ch_num[k]);
+ if (ret < 0) {
+ pr_err("%s: Invalid Slave port ID: %d\n",
+ __func__, ret);
+ ret = -EINVAL;
+ break;
+ }
+ sitar->dai[index].ch_mask |= 1 << ret;
+ }
+ ret = 0;
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ ret = wait_event_timeout(sitar->dai[index].dai_wait,
+ (sitar->dai[index].ch_mask == 0),
+ msecs_to_jiffies(SLIM_CLOSE_TIMEOUT));
+ if (!ret) {
+ pr_err("%s: slim close tx/rx timeout\n",
+ __func__);
+ ret = -EINVAL;
+ }
+ ret = 0;
+ break;
+ }
+ return ret;
+}
+
static int sitar_codec_enable_slimrx(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct wcd9xxx *sitar;
struct snd_soc_codec *codec = w->codec;
struct sitar_priv *sitar_p = snd_soc_codec_get_drvdata(codec);
- u32 j = 0;
+ u32 j = 0, ret = 0;
codec->control_data = dev_get_drvdata(codec->dev->parent);
sitar = codec->control_data;
/* Execute the callback only if interface type is slimbus */
@@ -2856,11 +2895,13 @@
break;
}
}
- if (sitar_p->dai[j].ch_act == sitar_p->dai[j].ch_tot)
- wcd9xxx_cfg_slim_sch_rx(sitar,
+ if (sitar_p->dai[j].ch_act == sitar_p->dai[j].ch_tot) {
+ ret = sitar_codec_enable_chmask(sitar_p, event, j);
+ ret = wcd9xxx_cfg_slim_sch_rx(sitar,
sitar_p->dai[j].ch_num,
sitar_p->dai[j].ch_tot,
sitar_p->dai[j].rate);
+ }
break;
case SND_SOC_DAPM_POST_PMD:
for (j = 0; j < ARRAY_SIZE(sitar_dai); j++) {
@@ -2876,17 +2917,14 @@
wcd9xxx_close_slim_sch_rx(sitar,
sitar_p->dai[j].ch_num,
sitar_p->dai[j].ch_tot);
- /* Wait for remove channel to complete
- * before derouting Rx path
- */
- usleep_range(15000, 15000);
sitar_p->dai[j].rate = 0;
memset(sitar_p->dai[j].ch_num, 0, (sizeof(u32)*
sitar_p->dai[j].ch_tot));
sitar_p->dai[j].ch_tot = 0;
+ ret = sitar_codec_enable_chmask(sitar_p, event, j);
}
}
- return 0;
+ return ret;
}
static int sitar_codec_enable_slimtx(struct snd_soc_dapm_widget *w,
@@ -2896,7 +2934,7 @@
struct snd_soc_codec *codec = w->codec;
struct sitar_priv *sitar_p = snd_soc_codec_get_drvdata(codec);
/* index to the DAI ID, for now hardcoding */
- u32 j = 0;
+ u32 j = 0, ret = 0;
codec->control_data = dev_get_drvdata(codec->dev->parent);
sitar = codec->control_data;
@@ -2915,11 +2953,13 @@
break;
}
}
- if (sitar_p->dai[j].ch_act == sitar_p->dai[j].ch_tot)
- wcd9xxx_cfg_slim_sch_tx(sitar,
+ if (sitar_p->dai[j].ch_act == sitar_p->dai[j].ch_tot) {
+ ret = sitar_codec_enable_chmask(sitar_p, event, j);
+ ret = wcd9xxx_cfg_slim_sch_tx(sitar,
sitar_p->dai[j].ch_num,
sitar_p->dai[j].ch_tot,
sitar_p->dai[j].rate);
+ }
break;
case SND_SOC_DAPM_POST_PMD:
for (j = 0; j < ARRAY_SIZE(sitar_dai); j++) {
@@ -2939,9 +2979,10 @@
memset(sitar_p->dai[j].ch_num, 0, (sizeof(u32)*
sitar_p->dai[j].ch_tot));
sitar_p->dai[j].ch_tot = 0;
+ ret = sitar_codec_enable_chmask(sitar_p, event, j);
}
}
- return 0;
+ return ret;
}
@@ -4600,7 +4641,7 @@
{
struct sitar_priv *priv = data;
struct snd_soc_codec *codec = priv->codec;
- int i, j;
+ int i, j, k, port_id, ch_mask_temp;
u8 val;
@@ -4608,14 +4649,30 @@
slimbus_value = wcd9xxx_interface_reg_read(codec->control_data,
SITAR_SLIM_PGD_PORT_INT_STATUS0 + i);
for_each_set_bit(j, &slimbus_value, BITS_PER_BYTE) {
+ port_id = i*8 + j;
val = wcd9xxx_interface_reg_read(codec->control_data,
- SITAR_SLIM_PGD_PORT_INT_SOURCE0 + i*8 + j);
+ SITAR_SLIM_PGD_PORT_INT_SOURCE0 + port_id);
if (val & 0x1)
- pr_err_ratelimited("overflow error on port %x,"
- " value %x\n", i*8 + j, val);
+ pr_err_ratelimited("overflow error on port %x, value %x\n",
+ port_id, val);
if (val & 0x2)
- pr_err_ratelimited("underflow error on port %x,"
- " value %x\n", i*8 + j, val);
+ pr_err_ratelimited("underflow error on port %x,value %x\n",
+ port_id, val);
+ if (val & 0x4) {
+ pr_debug("%s: port %x disconnect value %x\n",
+ __func__, port_id, val);
+ for (k = 0; k < ARRAY_SIZE(sitar_dai); k++) {
+ ch_mask_temp = 1 << port_id;
+ if (ch_mask_temp &
+ priv->dai[k].ch_mask) {
+ priv->dai[k].ch_mask &=
+ ~ch_mask_temp;
+ if (!priv->dai[k].ch_mask)
+ wake_up(
+ &priv->dai[k].dai_wait);
+ }
+ }
+ }
}
wcd9xxx_interface_reg_write(codec->control_data,
SITAR_SLIM_PGD_PORT_INT_CLR0 + i, 0xFF);
@@ -4783,6 +4840,10 @@
{SITAR_A_RX_LINE_1_GAIN, 0x10, 0x10},
{SITAR_A_RX_LINE_2_GAIN, 0x10, 0x10},
+ /* Set the MICBIAS default output as pull down*/
+ {SITAR_A_MICB_1_CTL, 0x01, 0x01},
+ {SITAR_A_MICB_2_CTL, 0x01, 0x01},
+
/* Initialize mic biases to differential mode */
{SITAR_A_MICB_1_INT_RBIAS, 0x24, 0x24},
{SITAR_A_MICB_2_INT_RBIAS, 0x24, 0x24},
@@ -4983,6 +5044,7 @@
}
sitar->dai[i].ch_num = kzalloc((sizeof(unsigned int)*
ch_cnt), GFP_KERNEL);
+ init_waitqueue_head(&sitar->dai[i].dai_wait);
}
codec->ignore_pmdown_time = 1;
diff --git a/sound/soc/codecs/wcd9310.c b/sound/soc/codecs/wcd9310.c
index 74ae595..15a5567 100644
--- a/sound/soc/codecs/wcd9310.c
+++ b/sound/soc/codecs/wcd9310.c
@@ -3747,20 +3747,6 @@
return 0;
}
-static void tabla_shutdown(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct wcd9xxx *tabla_core = dev_get_drvdata(dai->codec->dev->parent);
- pr_debug("%s(): substream = %s stream = %d\n" , __func__,
- substream->name, substream->stream);
- if ((tabla_core != NULL) &&
- (tabla_core->dev != NULL) &&
- (tabla_core->dev->parent != NULL)) {
- pm_runtime_mark_last_busy(tabla_core->dev->parent);
- pm_runtime_put(tabla_core->dev->parent);
- }
-}
-
int tabla_mclk_enable(struct snd_soc_codec *codec, int mclk_enable, bool dapm)
{
struct tabla_priv *tabla = snd_soc_codec_get_drvdata(codec);
@@ -4310,7 +4296,6 @@
static struct snd_soc_dai_ops tabla_dai_ops = {
.startup = tabla_startup,
- .shutdown = tabla_shutdown,
.hw_params = tabla_hw_params,
.set_sysclk = tabla_set_dai_sysclk,
.set_fmt = tabla_set_dai_fmt,
@@ -4481,9 +4466,17 @@
u32 ret = 0;
codec->control_data = dev_get_drvdata(codec->dev->parent);
tabla = codec->control_data;
+
/* Execute the callback only if interface type is slimbus */
- if (tabla_p->intf_type != WCD9XXX_INTERFACE_TYPE_SLIMBUS)
+ if (tabla_p->intf_type != WCD9XXX_INTERFACE_TYPE_SLIMBUS) {
+ if (event == SND_SOC_DAPM_POST_PMD && (tabla != NULL) &&
+ (tabla->dev != NULL) &&
+ (tabla->dev->parent != NULL)) {
+ pm_runtime_mark_last_busy(tabla->dev->parent);
+ pm_runtime_put(tabla->dev->parent);
+ }
return 0;
+ }
pr_debug("%s: %s %d\n", __func__, w->name, event);
@@ -4534,6 +4527,12 @@
ret = tabla_codec_enable_chmask(tabla_p,
SND_SOC_DAPM_POST_PMD,
j);
+ if ((tabla != NULL) &&
+ (tabla->dev != NULL) &&
+ (tabla->dev->parent != NULL)) {
+ pm_runtime_mark_last_busy(tabla->dev->parent);
+ pm_runtime_put(tabla->dev->parent);
+ }
}
}
return ret;
@@ -4553,8 +4552,15 @@
tabla = codec->control_data;
/* Execute the callback only if interface type is slimbus */
- if (tabla_p->intf_type != WCD9XXX_INTERFACE_TYPE_SLIMBUS)
+ if (tabla_p->intf_type != WCD9XXX_INTERFACE_TYPE_SLIMBUS) {
+ if (event == SND_SOC_DAPM_POST_PMD && (tabla != NULL) &&
+ (tabla->dev != NULL) &&
+ (tabla->dev->parent != NULL)) {
+ pm_runtime_mark_last_busy(tabla->dev->parent);
+ pm_runtime_put(tabla->dev->parent);
+ }
return 0;
+ }
pr_debug("%s(): %s %d\n", __func__, w->name, event);
@@ -4604,6 +4610,12 @@
ret = tabla_codec_enable_chmask(tabla_p,
SND_SOC_DAPM_POST_PMD,
j);
+ if ((tabla != NULL) &&
+ (tabla->dev != NULL) &&
+ (tabla->dev->parent != NULL)) {
+ pm_runtime_mark_last_busy(tabla->dev->parent);
+ pm_runtime_put(tabla->dev->parent);
+ }
}
}
return ret;
@@ -7300,14 +7312,13 @@
}
EXPORT_SYMBOL_GPL(tabla_hs_detect);
-static unsigned long slimbus_value;
-
static irqreturn_t tabla_slimbus_irq(int irq, void *data)
{
struct tabla_priv *priv = data;
struct snd_soc_codec *codec = priv->codec;
struct tabla_priv *tabla_p = snd_soc_codec_get_drvdata(codec);
int i, j, port_id, k, ch_mask_temp;
+ unsigned long slimbus_value;
u8 val;
for (i = 0; i < WCD9XXX_SLIM_NUM_PORT_REG; i++) {
@@ -7340,7 +7351,8 @@
}
}
wcd9xxx_interface_reg_write(codec->control_data,
- TABLA_SLIM_PGD_PORT_INT_CLR0 + i, 0xFF);
+ TABLA_SLIM_PGD_PORT_INT_CLR0 + i, slimbus_value);
+ val = 0x0;
}
return IRQ_HANDLED;
@@ -7578,6 +7590,11 @@
{TABLA_A_RX_LINE_3_GAIN, 0x10, 0x10},
{TABLA_A_RX_LINE_4_GAIN, 0x10, 0x10},
+ /* Set the MICBIAS default output as pull down*/
+ {TABLA_A_MICB_1_CTL, 0x01, 0x01},
+ {TABLA_A_MICB_2_CTL, 0x01, 0x01},
+ {TABLA_A_MICB_3_CTL, 0x01, 0x01},
+
/* Initialize mic biases to differential mode */
{TABLA_A_MICB_1_INT_RBIAS, 0x24, 0x24},
{TABLA_A_MICB_2_INT_RBIAS, 0x24, 0x24},
@@ -7633,11 +7650,16 @@
};
static const struct tabla_reg_mask_val tabla_1_x_codec_reg_init_val[] = {
+ /* Set the MICBIAS default output as pull down*/
+ {TABLA_1_A_MICB_4_CTL, 0x01, 0x01},
/* Initialize mic biases to differential mode */
{TABLA_1_A_MICB_4_INT_RBIAS, 0x24, 0x24},
};
static const struct tabla_reg_mask_val tabla_2_higher_codec_reg_init_val[] = {
+
+ /* Set the MICBIAS default output as pull down*/
+ {TABLA_2_A_MICB_4_CTL, 0x01, 0x01},
/* Initialize mic biases to differential mode */
{TABLA_2_A_MICB_4_INT_RBIAS, 0x24, 0x24},
};
diff --git a/sound/soc/codecs/wcd9320.c b/sound/soc/codecs/wcd9320.c
index 67a5d8a..6da9166 100644
--- a/sound/soc/codecs/wcd9320.c
+++ b/sound/soc/codecs/wcd9320.c
@@ -1890,6 +1890,12 @@
return 0;
}
+static int taiko_codec_enable_spk_pa(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ pr_debug("%s %d %s\n", __func__, event, w->name);
+ return 0;
+}
static int taiko_codec_enable_dmic(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
@@ -2891,6 +2897,13 @@
return 0;
}
+static int taiko_spk_dac_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ pr_debug("%s %s %d\n", __func__, w->name, event);
+ return 0;
+}
+
static const struct snd_soc_dapm_route audio_i2s_map[] = {
{"RX_I2S_CLK", NULL, "CDC_CONN"},
{"SLIM RX1", NULL, "RX_I2S_CLK"},
@@ -3038,6 +3051,7 @@
{"LINEOUT2", NULL, "LINEOUT2 PA"},
{"LINEOUT3", NULL, "LINEOUT3 PA"},
{"LINEOUT4", NULL, "LINEOUT4 PA"},
+ {"SPK_OUT", NULL, "SPK PA"},
{"LINEOUT1 PA", NULL, "LINEOUT1_PA_MIXER"},
{"LINEOUT1_PA_MIXER", NULL, "LINEOUT1 DAC"},
@@ -3060,6 +3074,9 @@
{"RX6 DSM MUX", "CIC_OUT", "RX6 MIX1"},
{"LINEOUT4 DAC", NULL, "RX6 DSM MUX"},
+ {"SPK PA", NULL, "SPK DAC"},
+ {"SPK DAC", NULL, "RX7 MIX1"},
+
{"RX1 CHAIN", NULL, "RX1 MIX2"},
{"RX2 CHAIN", NULL, "RX2 MIX2"},
{"RX1 CHAIN", NULL, "ANC"},
@@ -3070,6 +3087,7 @@
{"LINEOUT2 DAC", NULL, "RX_BIAS"},
{"LINEOUT3 DAC", NULL, "RX_BIAS"},
{"LINEOUT4 DAC", NULL, "RX_BIAS"},
+ {"SPK DAC", NULL, "RX_BIAS"},
{"RX1 MIX1", NULL, "COMP1_CLK"},
{"RX2 MIX1", NULL, "COMP1_CLK"},
@@ -4192,6 +4210,7 @@
SND_SOC_DAPM_OUTPUT("LINEOUT2"),
SND_SOC_DAPM_OUTPUT("LINEOUT3"),
SND_SOC_DAPM_OUTPUT("LINEOUT4"),
+ SND_SOC_DAPM_OUTPUT("SPK_OUT"),
SND_SOC_DAPM_PGA_E("LINEOUT1 PA", TAIKO_A_RX_LINE_CNP_EN, 0, 0, NULL,
0, taiko_codec_enable_lineout, SND_SOC_DAPM_PRE_PMU |
@@ -4205,6 +4224,9 @@
SND_SOC_DAPM_PGA_E("LINEOUT4 PA", TAIKO_A_RX_LINE_CNP_EN, 3, 0, NULL,
0, taiko_codec_enable_lineout, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_PGA_E("SPK PA", TAIKO_A_SPKR_DRV_EN, 7, 0 , NULL,
+ 0, taiko_codec_enable_spk_pa, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_DAC_E("LINEOUT1 DAC", NULL, TAIKO_A_RX_LINE_1_DAC_CTL, 7, 0
, taiko_lineout_dac_event,
@@ -4223,6 +4245,10 @@
SND_SOC_DAPM_SWITCH("LINEOUT4 DAC GROUND", SND_SOC_NOPM, 0, 0,
&lineout4_ground_switch),
+ SND_SOC_DAPM_DAC_E("SPK DAC", NULL, SND_SOC_NOPM, 0, 0,
+ taiko_spk_dac_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
SND_SOC_DAPM_MIXER_E("RX1 MIX2", TAIKO_A_CDC_CLK_RX_B1_CTL, 0, 0, NULL,
0, taiko_codec_reset_interpolator, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMU),
@@ -6709,11 +6735,16 @@
struct taiko_priv *taiko;
int rc = 0;
- if (!codec || !cfg->calibration) {
- pr_err("Error: no codec or calibration\n");
+ if (!codec) {
+ pr_err("%s: no codec\n", __func__);
return -EINVAL;
}
+ if (!cfg->calibration) {
+ pr_warn("%s: mbhc is not configured\n", __func__);
+ return 0;
+ }
+
if (cfg->mclk_rate != TAIKO_MCLK_RATE_12288KHZ) {
if (cfg->mclk_rate == TAIKO_MCLK_RATE_9600KHZ)
pr_err("Error: clock rate %dHz is not yet supported\n",
@@ -7158,6 +7189,102 @@
};
#endif
+static int taiko_setup_irqs(struct taiko_priv *taiko)
+{
+ int ret;
+ int i;
+ struct snd_soc_codec *codec = taiko->codec;
+
+ ret = wcd9xxx_request_irq(codec->control_data, TAIKO_IRQ_MBHC_INSERTION,
+ taiko_hs_insert_irq, "Headset insert detect",
+ taiko);
+ if (ret) {
+ pr_err("%s: Failed to request irq %d\n", __func__,
+ TAIKO_IRQ_MBHC_INSERTION);
+ goto err_insert_irq;
+ }
+ wcd9xxx_disable_irq(codec->control_data, TAIKO_IRQ_MBHC_INSERTION);
+
+ ret = wcd9xxx_request_irq(codec->control_data, TAIKO_IRQ_MBHC_REMOVAL,
+ taiko_hs_remove_irq, "Headset remove detect",
+ taiko);
+ if (ret) {
+ pr_err("%s: Failed to request irq %d\n", __func__,
+ TAIKO_IRQ_MBHC_REMOVAL);
+ goto err_remove_irq;
+ }
+
+ ret = wcd9xxx_request_irq(codec->control_data, TAIKO_IRQ_MBHC_POTENTIAL,
+ taiko_dce_handler, "DC Estimation detect",
+ taiko);
+ if (ret) {
+ pr_err("%s: Failed to request irq %d\n", __func__,
+ TAIKO_IRQ_MBHC_POTENTIAL);
+ goto err_potential_irq;
+ }
+
+ ret = wcd9xxx_request_irq(codec->control_data, TAIKO_IRQ_MBHC_RELEASE,
+ taiko_release_handler, "Button Release detect",
+ taiko);
+ if (ret) {
+ pr_err("%s: Failed to request irq %d\n", __func__,
+ TAIKO_IRQ_MBHC_RELEASE);
+ goto err_release_irq;
+ }
+
+ ret = wcd9xxx_request_irq(codec->control_data, TAIKO_IRQ_SLIMBUS,
+ taiko_slimbus_irq, "SLIMBUS Slave", taiko);
+ if (ret) {
+ pr_err("%s: Failed to request irq %d\n", __func__,
+ TAIKO_IRQ_SLIMBUS);
+ goto err_slimbus_irq;
+ }
+
+ for (i = 0; i < WCD9XXX_SLIM_NUM_PORT_REG; i++)
+ wcd9xxx_interface_reg_write(codec->control_data,
+ TAIKO_SLIM_PGD_PORT_INT_EN0 + i,
+ 0xFF);
+
+ ret = wcd9xxx_request_irq(codec->control_data,
+ TAIKO_IRQ_HPH_PA_OCPL_FAULT,
+ taiko_hphl_ocp_irq,
+ "HPH_L OCP detect", taiko);
+ if (ret) {
+ pr_err("%s: Failed to request irq %d\n", __func__,
+ TAIKO_IRQ_HPH_PA_OCPL_FAULT);
+ goto err_hphl_ocp_irq;
+ }
+ wcd9xxx_disable_irq(codec->control_data, TAIKO_IRQ_HPH_PA_OCPL_FAULT);
+
+ ret = wcd9xxx_request_irq(codec->control_data,
+ TAIKO_IRQ_HPH_PA_OCPR_FAULT,
+ taiko_hphr_ocp_irq,
+ "HPH_R OCP detect", taiko);
+ if (ret) {
+ pr_err("%s: Failed to request irq %d\n", __func__,
+ TAIKO_IRQ_HPH_PA_OCPR_FAULT);
+ goto err_hphr_ocp_irq;
+ }
+ wcd9xxx_disable_irq(codec->control_data, TAIKO_IRQ_HPH_PA_OCPR_FAULT);
+
+err_hphr_ocp_irq:
+ wcd9xxx_free_irq(codec->control_data, TAIKO_IRQ_HPH_PA_OCPL_FAULT,
+ taiko);
+err_hphl_ocp_irq:
+ wcd9xxx_free_irq(codec->control_data, TAIKO_IRQ_SLIMBUS, taiko);
+err_slimbus_irq:
+ wcd9xxx_free_irq(codec->control_data, TAIKO_IRQ_MBHC_RELEASE, taiko);
+err_release_irq:
+ wcd9xxx_free_irq(codec->control_data, TAIKO_IRQ_MBHC_POTENTIAL, taiko);
+err_potential_irq:
+ wcd9xxx_free_irq(codec->control_data, TAIKO_IRQ_MBHC_REMOVAL, taiko);
+err_remove_irq:
+ wcd9xxx_free_irq(codec->control_data, TAIKO_IRQ_MBHC_INSERTION, taiko);
+err_insert_irq:
+
+ return ret;
+}
+
static int taiko_codec_probe(struct snd_soc_codec *codec)
{
struct wcd9xxx *control;
@@ -7167,6 +7294,7 @@
int i;
int ch_cnt;
+
codec->control_data = dev_get_drvdata(codec->dev->parent);
control = codec->control_data;
@@ -7234,70 +7362,8 @@
snd_soc_dapm_sync(dapm);
- ret = wcd9xxx_request_irq(codec->control_data, TAIKO_IRQ_MBHC_INSERTION,
- taiko_hs_insert_irq, "Headset insert detect", taiko);
- if (ret) {
- pr_err("%s: Failed to request irq %d\n", __func__,
- TAIKO_IRQ_MBHC_INSERTION);
- goto err_insert_irq;
- }
- wcd9xxx_disable_irq(codec->control_data, TAIKO_IRQ_MBHC_INSERTION);
+ (void) taiko_setup_irqs(taiko);
- ret = wcd9xxx_request_irq(codec->control_data, TAIKO_IRQ_MBHC_REMOVAL,
- taiko_hs_remove_irq, "Headset remove detect", taiko);
- if (ret) {
- pr_err("%s: Failed to request irq %d\n", __func__,
- TAIKO_IRQ_MBHC_REMOVAL);
- goto err_remove_irq;
- }
-
- ret = wcd9xxx_request_irq(codec->control_data, TAIKO_IRQ_MBHC_POTENTIAL,
- taiko_dce_handler, "DC Estimation detect", taiko);
- if (ret) {
- pr_err("%s: Failed to request irq %d\n", __func__,
- TAIKO_IRQ_MBHC_POTENTIAL);
- goto err_potential_irq;
- }
-
- ret = wcd9xxx_request_irq(codec->control_data, TAIKO_IRQ_MBHC_RELEASE,
- taiko_release_handler, "Button Release detect", taiko);
- if (ret) {
- pr_err("%s: Failed to request irq %d\n", __func__,
- TAIKO_IRQ_MBHC_RELEASE);
- goto err_release_irq;
- }
-
- ret = wcd9xxx_request_irq(codec->control_data, TAIKO_IRQ_SLIMBUS,
- taiko_slimbus_irq, "SLIMBUS Slave", taiko);
- if (ret) {
- pr_err("%s: Failed to request irq %d\n", __func__,
- TAIKO_IRQ_SLIMBUS);
- goto err_slimbus_irq;
- }
-
- for (i = 0; i < WCD9XXX_SLIM_NUM_PORT_REG; i++)
- wcd9xxx_interface_reg_write(codec->control_data,
- TAIKO_SLIM_PGD_PORT_INT_EN0 + i, 0xFF);
-
- ret = wcd9xxx_request_irq(codec->control_data,
- TAIKO_IRQ_HPH_PA_OCPL_FAULT, taiko_hphl_ocp_irq,
- "HPH_L OCP detect", taiko);
- if (ret) {
- pr_err("%s: Failed to request irq %d\n", __func__,
- TAIKO_IRQ_HPH_PA_OCPL_FAULT);
- goto err_hphl_ocp_irq;
- }
- wcd9xxx_disable_irq(codec->control_data, TAIKO_IRQ_HPH_PA_OCPL_FAULT);
-
- ret = wcd9xxx_request_irq(codec->control_data,
- TAIKO_IRQ_HPH_PA_OCPR_FAULT, taiko_hphr_ocp_irq,
- "HPH_R OCP detect", taiko);
- if (ret) {
- pr_err("%s: Failed to request irq %d\n", __func__,
- TAIKO_IRQ_HPH_PA_OCPR_FAULT);
- goto err_hphr_ocp_irq;
- }
- wcd9xxx_disable_irq(codec->control_data, TAIKO_IRQ_HPH_PA_OCPR_FAULT);
for (i = 0; i < ARRAY_SIZE(taiko_dai); i++) {
switch (taiko_dai[i].id) {
case AIF1_PB:
@@ -7338,20 +7404,6 @@
codec->ignore_pmdown_time = 1;
return ret;
-err_hphr_ocp_irq:
- wcd9xxx_free_irq(codec->control_data,
- TAIKO_IRQ_HPH_PA_OCPL_FAULT, taiko);
-err_hphl_ocp_irq:
- wcd9xxx_free_irq(codec->control_data, TAIKO_IRQ_SLIMBUS, taiko);
-err_slimbus_irq:
- wcd9xxx_free_irq(codec->control_data, TAIKO_IRQ_MBHC_RELEASE, taiko);
-err_release_irq:
- wcd9xxx_free_irq(codec->control_data, TAIKO_IRQ_MBHC_POTENTIAL, taiko);
-err_potential_irq:
- wcd9xxx_free_irq(codec->control_data, TAIKO_IRQ_MBHC_REMOVAL, taiko);
-err_remove_irq:
- wcd9xxx_free_irq(codec->control_data, TAIKO_IRQ_MBHC_INSERTION, taiko);
-err_insert_irq:
err_pdata:
mutex_destroy(&taiko->codec_resource_lock);
kfree(taiko);
diff --git a/sound/soc/msm/Kconfig b/sound/soc/msm/Kconfig
index 9b60a56..894e114 100644
--- a/sound/soc/msm/Kconfig
+++ b/sound/soc/msm/Kconfig
@@ -153,13 +153,25 @@
help
To add support for SoC audio on MSM8960 and APQ8064 boards
+config AUDIO_OCMEM
+ bool "Enable OCMEM for audio/voice usecase"
+ depends on MSM_OCMEM
+ default n
+ help
+ To add support for on-chip memory use
+ for audio use cases on MSM8974.
+ OCMEM gets exercised for low-power
+ audio and voice use cases.
+
config SND_SOC_MSM8974
tristate "SoC Machine driver for MSM8974 boards"
depends on ARCH_MSM8974
select SND_SOC_QDSP6V2
select SND_SOC_MSM_STUB
select SND_SOC_MSM_HOSTLESS_PCM
+ select SND_SOC_WCD9320
select SND_DYNAMIC_MINORS
+ select AUDIO_OCMEM
help
To add support for SoC audio on MSM8974.
This will enable sound soc drivers which
diff --git a/sound/soc/msm/apq8064.c b/sound/soc/msm/apq8064.c
index a4bc2ef..a596f03 100644
--- a/sound/soc/msm/apq8064.c
+++ b/sound/soc/msm/apq8064.c
@@ -98,8 +98,6 @@
static struct clk *codec_clk;
static int clk_users;
-static int msm_headset_gpios_configured;
-
static struct snd_soc_jack hs_jack;
static struct snd_soc_jack button_jack;
@@ -720,10 +718,10 @@
struct snd_ctl_elem_value *ucontrol)
{
switch (ucontrol->value.integer.value[0]) {
- case 0:
+ case 8000:
msm_btsco_rate = BTSCO_RATE_8KHZ;
break;
- case 1:
+ case 16000:
msm_btsco_rate = BTSCO_RATE_16KHZ;
break;
default:
@@ -1154,8 +1152,9 @@
snd_soc_dapm_sync(dapm);
err = snd_soc_jack_new(codec, "Headset Jack",
- (SND_JACK_HEADSET | SND_JACK_OC_HPHL | SND_JACK_OC_HPHR),
- &hs_jack);
+ (SND_JACK_HEADSET | SND_JACK_OC_HPHL |
+ SND_JACK_OC_HPHR | SND_JACK_UNSUPPORTED),
+ &hs_jack);
if (err) {
pr_err("failed to create new jack\n");
return err;
@@ -1971,57 +1970,6 @@
static struct platform_device *msm_snd_device;
-static int msm_configure_headset_mic_gpios(void)
-{
- int ret;
- struct pm_gpio param = {
- .direction = PM_GPIO_DIR_OUT,
- .output_buffer = PM_GPIO_OUT_BUF_CMOS,
- .output_value = 1,
- .pull = PM_GPIO_PULL_NO,
- .vin_sel = PM_GPIO_VIN_S4,
- .out_strength = PM_GPIO_STRENGTH_MED,
- .function = PM_GPIO_FUNC_NORMAL,
- };
-
- ret = gpio_request(PM8921_GPIO_PM_TO_SYS(23), "AV_SWITCH");
- if (ret) {
- pr_err("%s: Failed to request gpio %d\n", __func__,
- PM8921_GPIO_PM_TO_SYS(23));
- return ret;
- }
-
- ret = pm8xxx_gpio_config(PM8921_GPIO_PM_TO_SYS(23), ¶m);
- if (ret)
- pr_err("%s: Failed to configure gpio %d\n", __func__,
- PM8921_GPIO_PM_TO_SYS(23));
- else
- gpio_direction_output(PM8921_GPIO_PM_TO_SYS(23), 0);
-
- ret = gpio_request(PM8921_GPIO_PM_TO_SYS(35), "US_EURO_SWITCH");
- if (ret) {
- pr_err("%s: Failed to request gpio %d\n", __func__,
- PM8921_GPIO_PM_TO_SYS(35));
- gpio_free(PM8921_GPIO_PM_TO_SYS(23));
- return ret;
- }
- ret = pm8xxx_gpio_config(PM8921_GPIO_PM_TO_SYS(35), ¶m);
- if (ret)
- pr_err("%s: Failed to configure gpio %d\n", __func__,
- PM8921_GPIO_PM_TO_SYS(35));
- else
- gpio_direction_output(PM8921_GPIO_PM_TO_SYS(35), 0);
-
- return 0;
-}
-static void msm_free_headset_mic_gpios(void)
-{
- if (msm_headset_gpios_configured) {
- gpio_free(PM8921_GPIO_PM_TO_SYS(23));
- gpio_free(PM8921_GPIO_PM_TO_SYS(35));
- }
-}
-
static int __init msm_audio_init(void)
{
int ret;
@@ -2052,12 +2000,6 @@
return ret;
}
- if (msm_configure_headset_mic_gpios()) {
- pr_err("%s Fail to configure headset mic gpios\n", __func__);
- msm_headset_gpios_configured = 0;
- } else
- msm_headset_gpios_configured = 1;
-
mutex_init(&cdc_mclk_mutex);
return ret;
@@ -2070,7 +2012,6 @@
pr_err("%s: Not the right machine type\n", __func__);
return ;
}
- msm_free_headset_mic_gpios();
platform_device_unregister(msm_snd_device);
if (mbhc_cfg.gpio)
gpio_free(mbhc_cfg.gpio);
diff --git a/sound/soc/msm/mdm9615.c b/sound/soc/msm/mdm9615.c
index dbe5d00..b80a0a9 100644
--- a/sound/soc/msm/mdm9615.c
+++ b/sound/soc/msm/mdm9615.c
@@ -733,10 +733,10 @@
struct snd_ctl_elem_value *ucontrol)
{
switch (ucontrol->value.integer.value[0]) {
- case 0:
+ case 8000:
mdm9615_btsco_rate = SAMPLE_RATE_8KHZ;
break;
- case 1:
+ case 16000:
mdm9615_btsco_rate = SAMPLE_RATE_16KHZ;
break;
default:
diff --git a/sound/soc/msm/mpq8064.c b/sound/soc/msm/mpq8064.c
index 6685ce5..f5bbf56 100644
--- a/sound/soc/msm/mpq8064.c
+++ b/sound/soc/msm/mpq8064.c
@@ -1165,8 +1165,6 @@
.platform_name = "msm-pcm-afe",
.ignore_suspend = 1,
.ignore_pmdown_time = 1, /* this dainlink has playback support */
- .codec_dai_name = "snd-soc-dummy-dai",
- .codec_name = "snd-soc-dummy",
},
{
.name = "MSM AFE-PCM TX",
@@ -1176,8 +1174,6 @@
.codec_dai_name = "msm-stub-tx",
.platform_name = "msm-pcm-afe",
.ignore_suspend = 1,
- .codec_dai_name = "snd-soc-dummy-dai",
- .codec_name = "snd-soc-dummy",
},
{
.name = "MSM8960 Compr1",
diff --git a/sound/soc/msm/msm-compr-q6.c b/sound/soc/msm/msm-compr-q6.c
index c894921..35cbb5b 100644
--- a/sound/soc/msm/msm-compr-q6.c
+++ b/sound/soc/msm/msm-compr-q6.c
@@ -38,8 +38,9 @@
/* Allocate the worst case frame size for compressed audio */
#define COMPRE_CAPTURE_HEADER_SIZE (sizeof(struct snd_compr_audio_info))
#define COMPRE_CAPTURE_MAX_FRAME_SIZE (6144)
-#define COMPRE_CAPTURE_PERIOD_SIZE (COMPRE_CAPTURE_MAX_FRAME_SIZE + \
- COMPRE_CAPTURE_HEADER_SIZE)
+#define COMPRE_CAPTURE_PERIOD_SIZE ((COMPRE_CAPTURE_MAX_FRAME_SIZE + \
+ COMPRE_CAPTURE_HEADER_SIZE) * \
+ MAX_NUM_FRAMES_PER_BUFFER)
struct snd_msm {
struct msm_audio *prtd;
@@ -207,6 +208,31 @@
q6asm_async_read(prtd->audio_client, &read_param);
break;
}
+ case ASM_DATA_EVENT_READ_COMPRESSED_DONE: {
+ pr_debug("ASM_DATA_EVENT_READ_COMPRESSED_DONE\n");
+ pr_debug("buf = %p, data = 0x%X, *data = %p,\n"
+ "prtd->pcm_irq_pos = %d\n",
+ prtd->audio_client->port[OUT].buf,
+ *(uint32_t *)prtd->audio_client->port[OUT].buf->data,
+ prtd->audio_client->port[OUT].buf->data,
+ prtd->pcm_irq_pos);
+
+ if (!atomic_read(&prtd->start))
+ break;
+ buf = prtd->audio_client->port[OUT].buf;
+ pr_debug("pcm_irq_pos=%d, buf[0].phys = 0x%X\n",
+ prtd->pcm_irq_pos, (uint32_t)buf[0].phys);
+ read_param.len = prtd->pcm_count;
+ read_param.paddr = (unsigned long)(buf[0].phys) +
+ prtd->pcm_irq_pos;
+ prtd->pcm_irq_pos += prtd->pcm_count;
+
+ if (atomic_read(&prtd->start))
+ snd_pcm_period_elapsed(substream);
+
+ q6asm_async_read_compressed(prtd->audio_client, &read_param);
+ break;
+ }
case APR_BASIC_RSP_RESULT: {
switch (payload[0]) {
case ASM_SESSION_CMD_RUN: {
@@ -392,7 +418,7 @@
if (prtd->enabled)
return ret;
- read_param.len = prtd->pcm_count - COMPRE_CAPTURE_HEADER_SIZE;
+ read_param.len = prtd->pcm_count;
pr_debug("%s: Samp_rate = %d, Channel = %d, pcm_size = %d,\n"
"pcm_count = %d, periods = %d\n",
__func__, prtd->samp_rate, prtd->channel_mode,
@@ -400,9 +426,8 @@
for (i = 0; i < runtime->periods; i++) {
read_param.uid = i;
- read_param.paddr = ((unsigned long)(buf[i].phys) +
- COMPRE_CAPTURE_HEADER_SIZE);
- q6asm_async_read(prtd->audio_client, &read_param);
+ read_param.paddr = (unsigned long)(buf[i].phys);
+ q6asm_async_read_compressed(prtd->audio_client, &read_param);
}
prtd->periods = runtime->periods;
@@ -749,7 +774,8 @@
}
} else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
ret = q6asm_open_read_compressed(prtd->audio_client,
- compr->codec);
+ MAX_NUM_FRAMES_PER_BUFFER,
+ COMPRESSED_META_DATA_MODE);
if (ret < 0) {
pr_err("%s: compressed Session out open failed\n",
diff --git a/sound/soc/msm/msm-dai-q6.c b/sound/soc/msm/msm-dai-q6.c
index fb74c0a..89b709f 100644
--- a/sound/soc/msm/msm-dai-q6.c
+++ b/sound/soc/msm/msm-dai-q6.c
@@ -1005,6 +1005,7 @@
case VOICE_RECORD_TX:
case VOICE_RECORD_RX:
rc = afe_start_pseudo_port(dai->id);
+ break;
default:
rc = afe_port_start(dai->id, &dai_data->port_config,
dai_data->rate);
diff --git a/sound/soc/msm/msm-pcm-q6.c b/sound/soc/msm/msm-pcm-q6.c
index 168cf97..942c3ea 100644
--- a/sound/soc/msm/msm-pcm-q6.c
+++ b/sound/soc/msm/msm-pcm-q6.c
@@ -333,21 +333,20 @@
kfree(prtd);
return -ENOMEM;
}
+
+ pr_debug("%s: session ID %d\n", __func__,
+ prtd->audio_client->session);
+ prtd->session_id = prtd->audio_client->session;
+ msm_pcm_routing_reg_phy_stream(soc_prtd->dai_link->be_id,
+ prtd->session_id, substream->stream);
+ prtd->cmd_ack = 1;
+
}
/* Capture path */
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
runtime->hw = msm_pcm_hardware_capture;
}
- pr_debug("%s: session ID %d\n", __func__, prtd->audio_client->session);
-
- prtd->session_id = prtd->audio_client->session;
- msm_pcm_routing_reg_phy_stream(soc_prtd->dai_link->be_id,
- prtd->session_id, substream->stream);
-
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- prtd->cmd_ack = 1;
-
ret = snd_pcm_hw_constraint_list(runtime, 0,
SNDRV_PCM_HW_PARAM_RATE,
&constraints_sample_rates);
@@ -622,6 +621,7 @@
struct snd_pcm_runtime *runtime = substream->runtime;
struct msm_audio *prtd = runtime->private_data;
struct snd_dma_buffer *dma_buf = &substream->dma_buffer;
+ struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
struct audio_buffer *buf;
int dir, ret;
int format = FORMAT_LINEAR_PCM;
@@ -644,6 +644,12 @@
prtd->audio_client = NULL;
return -ENOMEM;
}
+
+ pr_debug("%s: session ID %d\n", __func__,
+ prtd->audio_client->session);
+ prtd->session_id = prtd->audio_client->session;
+ msm_pcm_routing_reg_phy_stream(soc_prtd->dai_link->be_id,
+ prtd->session_id, substream->stream);
}
ret = q6asm_audio_client_buf_alloc_contiguous(dir,
diff --git a/sound/soc/msm/msm-pcm-routing.c b/sound/soc/msm/msm-pcm-routing.c
index 7e8e282..da3d335 100644
--- a/sound/soc/msm/msm-pcm-routing.c
+++ b/sound/soc/msm/msm-pcm-routing.c
@@ -65,6 +65,10 @@
static const DECLARE_TLV_DB_LINEAR(multimedia2_rx_vol_gain, 0,
INT_RX_VOL_MAX_STEPS);
+static int msm_route_multimedia5_vol_control;
+static const DECLARE_TLV_DB_LINEAR(multimedia5_rx_vol_gain, 0,
+ INT_RX_VOL_MAX_STEPS);
+
static int msm_route_compressed_vol_control;
static const DECLARE_TLV_DB_LINEAR(compressed_rx_vol_gain, 0,
INT_RX_VOL_MAX_STEPS);
@@ -798,6 +802,25 @@
return 0;
}
+static int msm_routing_get_multimedia5_vol_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+
+ ucontrol->value.integer.value[0] = msm_route_multimedia5_vol_control;
+ return 0;
+}
+
+static int msm_routing_set_multimedia5_vol_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+
+ if (!multi_ch_pcm_set_volume(ucontrol->value.integer.value[0]))
+ msm_route_multimedia5_vol_control =
+ ucontrol->value.integer.value[0];
+
+ return 0;
+}
+
static int msm_routing_get_compressed_vol_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -1737,6 +1760,12 @@
msm_routing_set_multimedia2_vol_mixer, multimedia2_rx_vol_gain),
};
+static const struct snd_kcontrol_new multimedia5_vol_mixer_controls[] = {
+ SOC_SINGLE_EXT_TLV("HIFI3 RX Volume", SND_SOC_NOPM, 0,
+ INT_RX_VOL_GAIN, 0, msm_routing_get_multimedia5_vol_mixer,
+ msm_routing_set_multimedia5_vol_mixer, multimedia5_rx_vol_gain),
+};
+
static const struct snd_kcontrol_new compressed_vol_mixer_controls[] = {
SOC_SINGLE_EXT_TLV("COMPRESSED RX Volume", SND_SOC_NOPM, 0,
INT_RX_VOL_GAIN, 0, msm_routing_get_compressed_vol_mixer,
@@ -2470,6 +2499,9 @@
{"SEC_AUX_PCM_TX", NULL, "BE_IN"},
{"BE_OUT", NULL, "AUX_PCM_RX"},
{"AUX_PCM_TX", NULL, "BE_IN"},
+ {"INCALL_RECORD_TX", NULL, "BE_IN"},
+ {"INCALL_RECORD_RX", NULL, "BE_IN"},
+ {"BE_OUT", NULL, "VOICE_PLAYBACK_TX"},
};
static int msm_pcm_routing_hw_params(struct snd_pcm_substream *substream,
@@ -2645,6 +2677,10 @@
ARRAY_SIZE(multimedia2_vol_mixer_controls));
snd_soc_add_platform_controls(platform,
+ multimedia5_vol_mixer_controls,
+ ARRAY_SIZE(multimedia5_vol_mixer_controls));
+
+ snd_soc_add_platform_controls(platform,
compressed_vol_mixer_controls,
ARRAY_SIZE(compressed_vol_mixer_controls));
diff --git a/sound/soc/msm/msm-pcm-voip.c b/sound/soc/msm/msm-pcm-voip.c
index b18117c..359414b 100644
--- a/sound/soc/msm/msm-pcm-voip.c
+++ b/sound/soc/msm/msm-pcm-voip.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -531,6 +531,7 @@
list_first_entry(&prtd->free_in_queue,
struct voip_buf_node, list);
list_del(&buf_node->list);
+ spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
if (prtd->mode == MODE_PCM) {
ret = copy_from_user(&buf_node->frame.voc_pkt,
buf, count);
@@ -538,6 +539,7 @@
} else
ret = copy_from_user(&buf_node->frame,
buf, count);
+ spin_lock_irqsave(&prtd->dsp_lock, dsp_flags);
list_add_tail(&buf_node->list, &prtd->in_queue);
spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
} else {
@@ -582,6 +584,7 @@
buf_node = list_first_entry(&prtd->out_queue,
struct voip_buf_node, list);
list_del(&buf_node->list);
+ spin_unlock_irqrestore(&prtd->dsp_ul_lock, dsp_flags);
if (prtd->mode == MODE_PCM)
ret = copy_to_user(buf,
&buf_node->frame.voc_pkt,
@@ -595,6 +598,7 @@
__func__, ret);
ret = -EFAULT;
}
+ spin_lock_irqsave(&prtd->dsp_ul_lock, dsp_flags);
list_add_tail(&buf_node->list,
&prtd->free_out_queue);
spin_unlock_irqrestore(&prtd->dsp_ul_lock, dsp_flags);
diff --git a/sound/soc/msm/msm8930.c b/sound/soc/msm/msm8930.c
index a0cad55..e86db10 100644
--- a/sound/soc/msm/msm8930.c
+++ b/sound/soc/msm/msm8930.c
@@ -422,11 +422,12 @@
static int msm8930_btsco_rate_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
+
switch (ucontrol->value.integer.value[0]) {
- case 0:
+ case 8000:
msm8930_btsco_rate = BTSCO_RATE_8KHZ;
break;
- case 1:
+ case 16000:
msm8930_btsco_rate = BTSCO_RATE_16KHZ;
break;
default:
@@ -877,7 +878,7 @@
.name = "MSM8930 Media2",
.stream_name = "MultiMedia2",
.cpu_dai_name = "MultiMedia2",
- .platform_name = "msm-pcm-dsp",
+ .platform_name = "msm-multi-ch-pcm-dsp",
.dynamic = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
diff --git a/sound/soc/msm/msm8960.c b/sound/soc/msm/msm8960.c
index 5f8a63e..b10a7ea 100644
--- a/sound/soc/msm/msm8960.c
+++ b/sound/soc/msm/msm8960.c
@@ -609,10 +609,10 @@
struct snd_ctl_elem_value *ucontrol)
{
switch (ucontrol->value.integer.value[0]) {
- case 0:
+ case 8000:
msm8960_btsco_rate = SAMPLE_RATE_8KHZ;
break;
- case 1:
+ case 16000:
msm8960_btsco_rate = SAMPLE_RATE_16KHZ;
break;
default:
diff --git a/sound/soc/msm/msm8974.c b/sound/soc/msm/msm8974.c
index b110250..3516022 100644
--- a/sound/soc/msm/msm8974.c
+++ b/sound/soc/msm/msm8974.c
@@ -26,7 +26,7 @@
#include <asm/mach-types.h>
#include <mach/socinfo.h>
#include <qdsp6v2/msm-pcm-routing-v2.h>
-#include "../codecs/wcd9310.h"
+#include "../codecs/wcd9320.h"
/* 8974 machine driver */
@@ -81,21 +81,7 @@
static struct snd_soc_jack hs_jack;
static struct snd_soc_jack button_jack;
-static int msm_enable_codec_ext_clk(struct snd_soc_codec *codec, int enable,
- bool dapm);
-
-static struct tabla_mbhc_config mbhc_cfg = {
- .headset_jack = &hs_jack,
- .button_jack = &button_jack,
- .read_fw_bin = false,
- .calibration = NULL,
- .micbias = TABLA_MICBIAS2,
- .mclk_cb_fn = msm_enable_codec_ext_clk,
- .mclk_rate = TABLA_EXT_CLK_RATE,
- .gpio = 0, /* MBHC GPIO is not configured */
- .gpio_irq = 0,
- .gpio_level_insert = 1,
-};
+static struct mutex cdc_mclk_mutex;
static void msm_enable_ext_spk_amp_gpio(u32 spk_amp_gpio)
{
@@ -244,6 +230,8 @@
{
struct snd_soc_dapm_context *dapm = &codec->dapm;
+ mutex_lock(&dapm->codec->mutex);
+
pr_debug("%s: msm_spk_control = %d", __func__, msm_spk_control);
if (msm_spk_control == MSM8974_SPK_ON) {
snd_soc_dapm_enable_pin(dapm, "Ext Spk Bottom Pos");
@@ -258,17 +246,19 @@
}
snd_soc_dapm_sync(dapm);
+ mutex_unlock(&dapm->codec->mutex);
}
static int msm_get_spk(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+ struct snd_ctl_elem_value *ucontrol)
{
pr_debug("%s: msm_spk_control = %d", __func__, msm_spk_control);
ucontrol->value.integer.value[0] = msm_spk_control;
return 0;
}
+
static int msm_set_spk(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+ struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
@@ -280,8 +270,9 @@
msm_ext_control(codec);
return 1;
}
+
static int msm_spkramp_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *k, int event)
+ struct snd_kcontrol *k, int event)
{
pr_debug("%s() %x\n", __func__, SND_SOC_DAPM_EVENT_ON(event));
@@ -318,12 +309,6 @@
return 0;
}
-static int msm_enable_codec_ext_clk(struct snd_soc_codec *codec, int enable,
- bool dapm)
-{
- return 0;
-}
-
static int msm_mclk_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
@@ -453,14 +438,14 @@
static const char *const btsco_rate_text[] = {"8000", "16000"};
static const struct soc_enum msm_btsco_enum[] = {
- SOC_ENUM_SINGLE_EXT(2, btsco_rate_text),
+ SOC_ENUM_SINGLE_EXT(2, btsco_rate_text),
};
static int msm_slim_0_rx_ch_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
pr_debug("%s: msm_slim_0_rx_ch = %d\n", __func__,
- msm_slim_0_rx_ch);
+ msm_slim_0_rx_ch);
ucontrol->value.integer.value[0] = msm_slim_0_rx_ch - 1;
return 0;
}
@@ -471,7 +456,7 @@
msm_slim_0_rx_ch = ucontrol->value.integer.value[0] + 1;
pr_debug("%s: msm_slim_0_rx_ch = %d\n", __func__,
- msm_slim_0_rx_ch);
+ msm_slim_0_rx_ch);
return 1;
}
@@ -479,7 +464,7 @@
struct snd_ctl_elem_value *ucontrol)
{
pr_debug("%s: msm_slim_0_tx_ch = %d\n", __func__,
- msm_slim_0_tx_ch);
+ msm_slim_0_tx_ch);
ucontrol->value.integer.value[0] = msm_slim_0_tx_ch - 1;
return 0;
}
@@ -489,16 +474,14 @@
{
msm_slim_0_tx_ch = ucontrol->value.integer.value[0] + 1;
- pr_debug("%s: msm_slim_0_tx_ch = %d\n", __func__,
- msm_slim_0_tx_ch);
+ pr_debug("%s: msm_slim_0_tx_ch = %d\n", __func__, msm_slim_0_tx_ch);
return 1;
}
static int msm_btsco_rate_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- pr_debug("%s: msm_btsco_rate = %d", __func__,
- msm_btsco_rate);
+ pr_debug("%s: msm_btsco_rate = %d", __func__, msm_btsco_rate);
ucontrol->value.integer.value[0] = msm_btsco_rate;
return 0;
}
@@ -517,33 +500,23 @@
msm_btsco_rate = BTSCO_RATE_8KHZ;
break;
}
- pr_debug("%s: msm_btsco_rate = %d\n", __func__,
- msm_btsco_rate);
+ pr_debug("%s: msm_btsco_rate = %d\n", __func__, msm_btsco_rate);
return 0;
}
-static const struct snd_kcontrol_new tabla_msm_controls[] = {
- SOC_ENUM_EXT("Speaker Function", msm_enum[0], msm_get_spk,
- msm_set_spk),
- SOC_ENUM_EXT("SLIM_0_RX Channels", msm_enum[1],
- msm_slim_0_rx_ch_get, msm_slim_0_rx_ch_put),
- SOC_ENUM_EXT("SLIM_0_TX Channels", msm_enum[2],
- msm_slim_0_tx_ch_get, msm_slim_0_tx_ch_put),
-};
-
static const struct snd_kcontrol_new int_btsco_rate_mixer_controls[] = {
SOC_ENUM_EXT("Internal BTSCO SampleRate", msm_btsco_enum[0],
- msm_btsco_rate_get, msm_btsco_rate_put),
+ msm_btsco_rate_get, msm_btsco_rate_put),
};
static int msm_auxpcm_be_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
{
- struct snd_interval *rate = hw_param_interval(params,
- SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *rate =
+ hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
- struct snd_interval *channels = hw_param_interval(params,
- SNDRV_PCM_HW_PARAM_CHANNELS);
+ struct snd_interval *channels =
+ hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
/* PCM only supports mono output with 8khz sample rate */
rate->min = rate->max = 8000;
@@ -629,6 +602,193 @@
.startup = msm_auxpcm_startup,
.shutdown = msm_auxpcm_shutdown,
};
+
+static int msm_slim_0_rx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+
+ struct snd_interval *channels =
+ hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+
+ pr_debug("%s()\n", __func__);
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = msm_slim_0_rx_ch;
+
+ return 0;
+}
+
+static int msm_slim_0_tx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+
+ pr_debug("%s()\n", __func__);
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = msm_slim_0_tx_ch;
+
+ return 0;
+}
+
+static const struct soc_enum msm_snd_enum[] = {
+ SOC_ENUM_SINGLE_EXT(2, spk_function),
+ SOC_ENUM_SINGLE_EXT(2, slim0_rx_ch_text),
+ SOC_ENUM_SINGLE_EXT(4, slim0_tx_ch_text),
+};
+
+static const struct snd_kcontrol_new msm_snd_controls[] = {
+ SOC_ENUM_EXT("Speaker Function", msm_snd_enum[0], msm_get_spk,
+ msm_set_spk),
+ SOC_ENUM_EXT("SLIM_0_RX Channels", msm_snd_enum[1],
+ msm_slim_0_rx_ch_get, msm_slim_0_rx_ch_put),
+ SOC_ENUM_EXT("SLIM_0_TX Channels", msm_snd_enum[2],
+ msm_slim_0_tx_ch_get, msm_slim_0_tx_ch_put),
+};
+
+static int msm_audrx_init(struct snd_soc_pcm_runtime *rtd)
+{
+ int err;
+ struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_dapm_context *dapm = &codec->dapm;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+
+ pr_info("%s(), dev_name%s\n", __func__, dev_name(cpu_dai->dev));
+
+ if (machine_is_msm8960_liquid()) {
+ top_spk_pamp_gpio = (PM8921_GPIO_PM_TO_SYS(19));
+ bottom_spk_pamp_gpio = (PM8921_GPIO_PM_TO_SYS(18));
+ }
+
+ rtd->pmdown_time = 0;
+
+ err = snd_soc_add_codec_controls(codec, msm_snd_controls,
+ ARRAY_SIZE(msm_snd_controls));
+ if (err < 0)
+ return err;
+
+ snd_soc_dapm_new_controls(dapm, msm_dapm_widgets,
+ ARRAY_SIZE(msm_dapm_widgets));
+
+ snd_soc_dapm_add_routes(dapm, common_audio_map,
+ ARRAY_SIZE(common_audio_map));
+
+ snd_soc_dapm_enable_pin(dapm, "Ext Spk Bottom Pos");
+ snd_soc_dapm_enable_pin(dapm, "Ext Spk Bottom Neg");
+ snd_soc_dapm_enable_pin(dapm, "Ext Spk Top Pos");
+ snd_soc_dapm_enable_pin(dapm, "Ext Spk Top Neg");
+
+ snd_soc_dapm_sync(dapm);
+
+ err = snd_soc_jack_new(codec, "Headset Jack",
+ (SND_JACK_HEADSET | SND_JACK_OC_HPHL |
+ SND_JACK_OC_HPHR | SND_JACK_UNSUPPORTED),
+ &hs_jack);
+ if (err) {
+ pr_err("failed to create new jack\n");
+ return err;
+ }
+
+ err = snd_soc_jack_new(codec, "Button Jack",
+ TAIKO_JACK_BUTTON_MASK, &button_jack);
+ if (err) {
+ pr_err("failed to create new jack\n");
+ return err;
+ }
+
+ return err;
+}
+
+static int msm_snd_startup(struct snd_pcm_substream *substream)
+{
+ pr_debug("%s(): substream = %s stream = %d\n", __func__,
+ substream->name, substream->stream);
+ return 0;
+}
+
+static int msm_snd_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ int ret = 0;
+ unsigned int rx_ch[SLIM_MAX_RX_PORTS], tx_ch[SLIM_MAX_TX_PORTS];
+ unsigned int rx_ch_cnt = 0, tx_ch_cnt = 0;
+ unsigned int user_set_tx_ch = 0;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ pr_debug("%s: rx_0_ch=%d\n", __func__, msm_slim_0_rx_ch);
+ ret = snd_soc_dai_get_channel_map(codec_dai,
+ &tx_ch_cnt, tx_ch, &rx_ch_cnt , rx_ch);
+ if (ret < 0) {
+ pr_err("%s: failed to get codec chan map\n", __func__);
+ goto end;
+ }
+
+ ret = snd_soc_dai_set_channel_map(cpu_dai, 0, 0,
+ msm_slim_0_rx_ch, rx_ch);
+ if (ret < 0) {
+ pr_err("%s: failed to set cpu chan map\n", __func__);
+ goto end;
+ }
+ ret = snd_soc_dai_set_channel_map(codec_dai, 0, 0,
+ msm_slim_0_rx_ch, rx_ch);
+ if (ret < 0) {
+ pr_err("%s: failed to set codec channel map\n",
+ __func__);
+ goto end;
+ }
+ } else {
+
+ if (codec_dai->id == 2)
+ user_set_tx_ch = msm_slim_0_tx_ch;
+ else if (codec_dai->id == 4)
+ user_set_tx_ch = params_channels(params);
+
+ pr_debug("%s: %s_tx_dai_id_%d_ch=%d\n", __func__,
+ codec_dai->name, codec_dai->id, user_set_tx_ch);
+
+ ret = snd_soc_dai_get_channel_map(codec_dai,
+ &tx_ch_cnt, tx_ch, &rx_ch_cnt , rx_ch);
+ if (ret < 0) {
+ pr_err("%s: failed to get codec chan map\n", __func__);
+ goto end;
+ }
+ ret = snd_soc_dai_set_channel_map(cpu_dai,
+ user_set_tx_ch, tx_ch, 0 , 0);
+ if (ret < 0) {
+ pr_err("%s: failed to set cpu chan map\n", __func__);
+ goto end;
+ }
+ ret = snd_soc_dai_set_channel_map(codec_dai,
+ user_set_tx_ch, tx_ch, 0, 0);
+ if (ret < 0) {
+ pr_err("%s: failed to set codec channel map\n",
+ __func__);
+ goto end;
+ }
+ }
+end:
+ return ret;
+}
+
+static void msm_snd_shutdown(struct snd_pcm_substream *substream)
+{
+ pr_debug("%s(): substream = %s stream = %d\n", __func__,
+ substream->name, substream->stream);
+}
+
+static struct snd_soc_ops msm8974_be_ops = {
+ .startup = msm_snd_startup,
+ .hw_params = msm_snd_hw_params,
+ .shutdown = msm_snd_shutdown,
+};
+
/* Digital audio interface glue - connects codec <---> CPU */
static struct snd_soc_dai_link msm_dai[] = {
/* FrontEnd DAI Links */
@@ -678,6 +838,21 @@
.be_id = MSM_FRONTEND_DAI_MULTIMEDIA3,
},
{
+ .name = "MSM8974 Compr",
+ .stream_name = "COMPR",
+ .cpu_dai_name = "MultiMedia4",
+ .platform_name = "msm-compr-dsp",
+ .dynamic = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ /* this dainlink has playback support */
+ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA4,
+ },
+ {
.name = "AUXPCM Hostless",
.stream_name = "AUXPCM Hostless",
.cpu_dai_name = "AUXPCM_HOSTLESS",
@@ -719,7 +894,33 @@
.be_id = MSM_BACKEND_DAI_AUXPCM_TX,
.be_hw_params_fixup = msm_auxpcm_be_params_fixup,
},
-
+ /* Backend DAI Links */
+ {
+ .name = LPASS_BE_SLIMBUS_0_RX,
+ .stream_name = "Slimbus Playback",
+ .cpu_dai_name = "msm-dai-q6-dev.16384",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "taiko_codec",
+ .codec_dai_name = "taiko_rx1",
+ .no_pcm = 1,
+ .be_id = MSM_BACKEND_DAI_SLIMBUS_0_RX,
+ .init = &msm_audrx_init,
+ .be_hw_params_fixup = msm_slim_0_rx_be_hw_params_fixup,
+ .ops = &msm8974_be_ops,
+ .ignore_pmdown_time = 1, /* dai link has playback support */
+ },
+ {
+ .name = LPASS_BE_SLIMBUS_0_TX,
+ .stream_name = "Slimbus Capture",
+ .cpu_dai_name = "msm-dai-q6-dev.16385",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "taiko_codec",
+ .codec_dai_name = "taiko_tx1",
+ .no_pcm = 1,
+ .be_id = MSM_BACKEND_DAI_SLIMBUS_0_TX,
+ .be_hw_params_fixup = msm_slim_0_tx_be_hw_params_fixup,
+ .ops = &msm8974_be_ops,
+ },
};
struct snd_soc_card snd_soc_card_msm = {
@@ -741,6 +942,8 @@
static int __init msm_audio_init(void)
{
int ret = 0;
+
+ mutex_init(&cdc_mclk_mutex);
if (!machine_is_msm8974_sim()) {
pr_err("%s: Not the right machine type\n", __func__);
return -ENODEV;
@@ -748,7 +951,6 @@
msm_snd_device = platform_device_alloc("soc-audio", 0);
if (!msm_snd_device) {
pr_err("Platform device allocation failed\n");
- kfree(mbhc_cfg.calibration);
return -ENOMEM;
}
@@ -756,7 +958,6 @@
ret = platform_device_add(msm_snd_device);
if (ret) {
platform_device_put(msm_snd_device);
- kfree(mbhc_cfg.calibration);
return ret;
}
return ret;
@@ -772,7 +973,6 @@
}
msm_free_headset_mic_gpios();
platform_device_unregister(msm_snd_device);
- kfree(mbhc_cfg.calibration);
}
module_exit(msm_audio_exit);
diff --git a/sound/soc/msm/qdsp6/q6afe.c b/sound/soc/msm/qdsp6/q6afe.c
index 2f6772d..4c0ac9e 100644
--- a/sound/soc/msm/qdsp6/q6afe.c
+++ b/sound/soc/msm/qdsp6/q6afe.c
@@ -799,13 +799,14 @@
lp_cfg.hdr.opcode = AFE_PORT_CMD_SET_PARAM;
lp_cfg.port_id = src_port;
- lp_cfg.payload_size = sizeof(struct afe_param_payload);
+ lp_cfg.payload_size = sizeof(struct afe_param_payload_base) +
+ sizeof(struct afe_param_loopback_cfg);
lp_cfg.payload_address = 0;
- lp_cfg.payload.module_id = AFE_MODULE_LOOPBACK;
- lp_cfg.payload.param_id = AFE_PARAM_ID_LOOPBACK_CONFIG;
- lp_cfg.payload.param_size = sizeof(struct afe_param_loopback_cfg);
- lp_cfg.payload.reserved = 0;
+ lp_cfg.payload.base.module_id = AFE_MODULE_LOOPBACK;
+ lp_cfg.payload.base.param_id = AFE_PARAM_ID_LOOPBACK_CONFIG;
+ lp_cfg.payload.base.param_size = sizeof(struct afe_param_loopback_cfg);
+ lp_cfg.payload.base.reserved = 0;
lp_cfg.payload.param.loopback_cfg.loopback_cfg_minor_version =
AFE_API_VERSION_LOOPBACK_CONFIG;
@@ -879,13 +880,15 @@
set_param.hdr.opcode = AFE_PORT_CMD_SET_PARAM;
set_param.port_id = port_id;
- set_param.payload_size = sizeof(struct afe_param_payload);
+ set_param.payload_size = sizeof(struct afe_param_payload_base) +
+ sizeof(struct afe_param_loopback_gain);
set_param.payload_address = 0;
- set_param.payload.module_id = AFE_MODULE_ID_PORT_INFO;
- set_param.payload.param_id = AFE_PARAM_ID_LOOPBACK_GAIN;
- set_param.payload.param_size = sizeof(struct afe_param_loopback_gain);
- set_param.payload.reserved = 0;
+ set_param.payload.base.module_id = AFE_MODULE_ID_PORT_INFO;
+ set_param.payload.base.param_id = AFE_PARAM_ID_LOOPBACK_GAIN;
+ set_param.payload.base.param_size =
+ sizeof(struct afe_param_loopback_gain);
+ set_param.payload.base.reserved = 0;
set_param.payload.param.loopback_gain.gain = volume;
set_param.payload.param.loopback_gain.reserved = 0;
diff --git a/sound/soc/msm/qdsp6/q6asm.c b/sound/soc/msm/qdsp6/q6asm.c
index 59c390e..06be186 100644
--- a/sound/soc/msm/qdsp6/q6asm.c
+++ b/sound/soc/msm/qdsp6/q6asm.c
@@ -805,6 +805,7 @@
uint32_t token;
unsigned long dsp_flags;
uint32_t *payload;
+ uint32_t wakeup_flag = 1;
if ((ac == NULL) || (data == NULL)) {
@@ -816,7 +817,13 @@
ac->session);
return -EINVAL;
}
-
+ if (atomic_read(&ac->nowait_cmd_cnt) > 0) {
+ pr_debug("%s: nowait_cmd_cnt %d\n",
+ __func__,
+ atomic_read(&ac->nowait_cmd_cnt));
+ atomic_dec(&ac->nowait_cmd_cnt);
+ wakeup_flag = 0;
+ }
payload = data->payload;
if (data->opcode == RESET_EVENTS) {
@@ -862,7 +869,7 @@
case ASM_STREAM_CMD_SET_ENCDEC_PARAM:
case ASM_STREAM_CMD_OPEN_WRITE_COMPRESSED:
case ASM_STREAM_CMD_OPEN_READ_COMPRESSED:
- if (atomic_read(&ac->cmd_state)) {
+ if (atomic_read(&ac->cmd_state) && wakeup_flag) {
atomic_set(&ac->cmd_state, 0);
if (payload[1] == ADSP_EUNSUPPORTED)
atomic_set(&ac->cmd_response, 1);
@@ -1269,7 +1276,8 @@
return -EINVAL;
}
-int q6asm_open_read_compressed(struct audio_client *ac, uint32_t format)
+int q6asm_open_read_compressed(struct audio_client *ac,
+ uint32_t frames_per_buffer, uint32_t meta_data_mode)
{
int rc = 0x00;
struct asm_stream_cmd_open_read_compressed open;
@@ -1285,8 +1293,8 @@
q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
open.hdr.opcode = ASM_STREAM_CMD_OPEN_READ_COMPRESSED;
/* hardcoded as following*/
- open.frame_per_buf = 1;
- open.uMode = 0;
+ open.frame_per_buf = frames_per_buffer;
+ open.uMode = meta_data_mode;
rc = apr_send_pkt(ac->apr, (uint32_t *) &open);
if (rc < 0) {
@@ -1618,12 +1626,12 @@
run.flags = flags;
run.msw_ts = msw_ts;
run.lsw_ts = lsw_ts;
-
rc = apr_send_pkt(ac->apr, (uint32_t *) &run);
if (rc < 0) {
pr_err("%s:Commmand run failed[%d]", __func__, rc);
return -EINVAL;
}
+ atomic_inc(&ac->nowait_cmd_cnt);
return 0;
}
@@ -3259,6 +3267,40 @@
return -EINVAL;
}
+int q6asm_async_read_compressed(struct audio_client *ac,
+ struct audio_aio_read_param *param)
+{
+ int rc = 0;
+ struct asm_stream_cmd_read read;
+
+ if (!ac || ac->apr == NULL) {
+ pr_err("%s: APR handle NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ q6asm_add_hdr_async(ac, &read.hdr, sizeof(read), FALSE);
+
+ /* Pass physical address as token for AIO scheme */
+ read.hdr.token = param->paddr;
+ read.hdr.opcode = ASM_DATA_CMD_READ_COMPRESSED;
+ read.buf_add = param->paddr;
+ read.buf_size = param->len;
+ read.uid = param->uid;
+
+ pr_debug("%s: session[%d] bufadd[0x%x]len[0x%x]", __func__, ac->session,
+ read.buf_add, read.buf_size);
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &read);
+ if (rc < 0) {
+ pr_debug("[%s] read op[0x%x]rc[%d]\n", __func__,
+ read.hdr.opcode, rc);
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
int q6asm_write(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
uint32_t lsw_ts, uint32_t flags)
{
@@ -3541,11 +3583,13 @@
pr_debug("%s:session[%d]opcode[0x%x] ", __func__,
ac->session,
hdr.opcode);
+
rc = apr_send_pkt(ac->apr, (uint32_t *) &hdr);
if (rc < 0) {
pr_err("%s:Commmand 0x%x failed\n", __func__, hdr.opcode);
goto fail_cmd;
}
+ atomic_inc(&ac->nowait_cmd_cnt);
return 0;
fail_cmd:
return -EINVAL;
diff --git a/sound/soc/msm/qdsp6/q6voice.c b/sound/soc/msm/qdsp6/q6voice.c
index 1cf32aa..3791f24 100644
--- a/sound/soc/msm/qdsp6/q6voice.c
+++ b/sound/soc/msm/qdsp6/q6voice.c
@@ -2344,6 +2344,9 @@
/* send stop voice cmd */
voice_send_stop_voice_cmd(v);
+ /* Clear mute setting */
+ v->dev_tx.mute = common.default_mute_val;
+
/* detach VOCPROC and wait for response from mvm */
mvm_d_vocproc_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
APR_HDR_LEN(APR_HDR_SIZE),
@@ -3106,7 +3109,9 @@
v->dev_tx.mute = mute;
- if (v->voc_state == VOC_RUN)
+ if ((v->voc_state == VOC_RUN) ||
+ (v->voc_state == VOC_CHANGE) ||
+ (v->voc_state == VOC_STANDBY))
ret = voice_send_mute_cmd(v);
mutex_unlock(&v->lock);
@@ -3312,7 +3317,9 @@
v->dev_rx.volume = vol_idx;
- if (v->voc_state == VOC_RUN)
+ if ((v->voc_state == VOC_RUN) ||
+ (v->voc_state == VOC_CHANGE) ||
+ (v->voc_state == VOC_STANDBY))
ret = voice_send_vol_index_cmd(v);
mutex_unlock(&v->lock);
@@ -3501,6 +3508,15 @@
pr_err("setup voice failed\n");
goto fail;
}
+
+ ret = voice_send_vol_index_cmd(v);
+ if (ret < 0)
+ pr_err("voice volume failed\n");
+
+ ret = voice_send_mute_cmd(v);
+ if (ret < 0)
+ pr_err("voice mute failed\n");
+
ret = voice_send_start_voice_cmd(v);
if (ret < 0) {
pr_err("start voice failed\n");
@@ -3998,7 +4014,7 @@
memset((void *)common.cvs_cal.buf, 0, CVS_CAL_SIZE);
cont:
/* set default value */
- common.default_mute_val = 1; /* default is mute */
+ common.default_mute_val = 0; /* default is un-mute */
common.default_vol_val = 0;
common.default_sample_val = 8000;
diff --git a/sound/soc/msm/qdsp6v2/Makefile b/sound/soc/msm/qdsp6v2/Makefile
index ff2cc8d..acb073d 100644
--- a/sound/soc/msm/qdsp6v2/Makefile
+++ b/sound/soc/msm/qdsp6v2/Makefile
@@ -1,4 +1,6 @@
snd-soc-qdsp6v2-objs += msm-dai-q6-v2.o msm-pcm-q6-v2.o msm-pcm-routing-v2.o msm-compr-q6-v2.o msm-multi-ch-pcm-q6-v2.o
snd-soc-qdsp6v2-objs += msm-pcm-lpa-v2.o msm-pcm-afe-v2.o msm-pcm-voip-v2.o msm-pcm-voice-v2.o
obj-$(CONFIG_SND_SOC_QDSP6V2) += snd-soc-qdsp6v2.o
-obj-y += q6adm.o q6afe.o q6asm.o q6audio-v2.o q6voice.o
+obj-y += q6adm.o q6afe.o q6asm.o q6audio-v2.o q6voice.o q6core.o
+ocmem-audio-objs += audio_ocmem.o
+obj-$(CONFIG_AUDIO_OCMEM) += ocmem-audio.o
diff --git a/sound/soc/msm/qdsp6v2/audio_ocmem.c b/sound/soc/msm/qdsp6v2/audio_ocmem.c
new file mode 100644
index 0000000..86a82e2
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/audio_ocmem.c
@@ -0,0 +1,672 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/of_device.h>
+#include <asm/mach-types.h>
+#include <mach/msm_bus.h>
+#include <mach/msm_bus_board.h>
+#include <mach/ocmem.h>
+#include "q6core.h"
+#include "audio_ocmem.h"
+
+#define AUDIO_OCMEM_BUF_SIZE (512 * SZ_1K)
+
+enum {
+ OCMEM_STATE_ALLOC = 1,
+ OCMEM_STATE_MAP_TRANSITION,
+ OCMEM_STATE_MAP_COMPL,
+ OCMEM_STATE_UNMAP_TRANSITION,
+ OCMEM_STATE_UNMAP_COMPL,
+ OCMEM_STATE_SHRINK,
+ OCMEM_STATE_GROW,
+ OCMEM_STATE_FREE,
+ OCMEM_STATE_MAP_FAIL,
+ OCMEM_STATE_UNMAP_FAIL,
+ OCMEM_STATE_EXIT,
+};
+static void audio_ocmem_process_workdata(struct work_struct *work);
+
+struct audio_ocmem_workdata {
+ int id;
+ bool en;
+ struct work_struct work;
+};
+
+struct voice_ocmem_workdata {
+ int id;
+ bool en;
+ struct work_struct work;
+};
+
+struct audio_ocmem_prv {
+ atomic_t audio_state;
+ struct ocmem_notifier *audio_hdl;
+ struct ocmem_buf *buf;
+ uint32_t audio_ocmem_bus_client;
+ struct ocmem_map_list mlist;
+ struct avcs_cmd_rsp_get_low_power_segments_info_t *lp_memseg_ptr;
+ wait_queue_head_t audio_wait;
+ atomic_t audio_cond;
+ atomic_t audio_exit;
+ spinlock_t audio_lock;
+ struct workqueue_struct *audio_ocmem_workqueue;
+ struct workqueue_struct *voice_ocmem_workqueue;
+};
+
+static struct audio_ocmem_prv audio_ocmem_lcl;
+
+
+static int audio_ocmem_client_cb(struct notifier_block *this,
+ unsigned long event1, void *data)
+{
+ int rc = NOTIFY_DONE;
+ unsigned long flags;
+
+ pr_debug("%s: event[%ld] cur state[%x]\n", __func__,
+ event1, atomic_read(&audio_ocmem_lcl.audio_state));
+
+ spin_lock_irqsave(&audio_ocmem_lcl.audio_lock, flags);
+ switch (event1) {
+ case OCMEM_MAP_DONE:
+ pr_debug("%s: map done\n", __func__);
+ atomic_set(&audio_ocmem_lcl.audio_state, OCMEM_STATE_MAP_COMPL);
+ break;
+ case OCMEM_MAP_FAIL:
+ pr_debug("%s: map fail\n", __func__);
+ atomic_set(&audio_ocmem_lcl.audio_state, OCMEM_STATE_MAP_FAIL);
+ break;
+ case OCMEM_UNMAP_DONE:
+ pr_debug("%s: unmap done\n", __func__);
+ atomic_set(&audio_ocmem_lcl.audio_state,
+ OCMEM_STATE_UNMAP_COMPL);
+ break;
+ case OCMEM_UNMAP_FAIL:
+ pr_debug("%s: unmap fail\n", __func__);
+ atomic_set(&audio_ocmem_lcl.audio_state,
+ OCMEM_STATE_UNMAP_FAIL);
+ break;
+ case OCMEM_ALLOC_GROW:
+ audio_ocmem_lcl.buf = data;
+ atomic_set(&audio_ocmem_lcl.audio_state, OCMEM_STATE_GROW);
+ break;
+ case OCMEM_ALLOC_SHRINK:
+ atomic_set(&audio_ocmem_lcl.audio_state, OCMEM_STATE_SHRINK);
+ break;
+ default:
+ pr_err("%s: Invalid event[%ld]\n", __func__, event1);
+ break;
+ }
+ spin_unlock_irqrestore(&audio_ocmem_lcl.audio_lock, flags);
+ if (atomic_read(&audio_ocmem_lcl.audio_cond)) {
+ atomic_set(&audio_ocmem_lcl.audio_cond, 0);
+ wake_up(&audio_ocmem_lcl.audio_wait);
+ }
+ return rc;
+}
+
+/**
+ * audio_ocmem_enable() - Exercise OCMEM for audio
+ * @cid: client id - OCMEM_LP_AUDIO
+ *
+ * OCMEM gets allocated for audio usecase and the low power
+ * segments obtained from the DSP will be moved from/to main
+ * memory to OCMEM. Shrink and grow requests will be received
+ * and processed accordingly based on the current audio state.
+ */
+int audio_ocmem_enable(int cid)
+{
+ int ret;
+ int i, j;
+ struct ocmem_buf *buf = NULL;
+ struct avcs_cmd_rsp_get_low_power_segments_info_t *lp_segptr;
+
+ pr_debug("%s\n", __func__);
+ /* Non-blocking ocmem allocate (asynchronous) */
+ buf = ocmem_allocate_nb(cid, AUDIO_OCMEM_BUF_SIZE);
+ if (IS_ERR_OR_NULL(buf)) {
+ pr_err("%s: failed: %d\n", __func__, cid);
+ return -ENOMEM;
+ }
+ atomic_set(&audio_ocmem_lcl.audio_state, OCMEM_STATE_ALLOC);
+
+ audio_ocmem_lcl.buf = buf;
+ atomic_set(&audio_ocmem_lcl.audio_exit, 0);
+ if (!buf->len) {
+ wait_event_interruptible(audio_ocmem_lcl.audio_wait,
+ (atomic_read(&audio_ocmem_lcl.audio_cond) == 0) ||
+ (atomic_read(&audio_ocmem_lcl.audio_exit) == 1));
+
+ if (atomic_read(&audio_ocmem_lcl.audio_exit)) {
+ pr_err("%s: audio playback ended while waiting for ocmem\n",
+ __func__);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ }
+ if (audio_ocmem_lcl.lp_memseg_ptr == NULL) {
+ /* Retrieve low power segments */
+ ret = core_get_low_power_segments(
+ &audio_ocmem_lcl.lp_memseg_ptr);
+ if (ret != 0) {
+ pr_err("%s: get low power segments from DSP failed, rc=%d\n",
+ __func__, ret);
+ goto fail_cmd;
+ }
+ }
+ lp_segptr = audio_ocmem_lcl.lp_memseg_ptr;
+ audio_ocmem_lcl.mlist.num_chunks = lp_segptr->num_segments;
+ for (i = 0, j = 0; j < audio_ocmem_lcl.mlist.num_chunks; j++, i++) {
+ audio_ocmem_lcl.mlist.chunks[j].ro =
+ (lp_segptr->mem_segment[i].type == READ_ONLY_SEGMENT);
+ audio_ocmem_lcl.mlist.chunks[j].ddr_paddr =
+ lp_segptr->mem_segment[i].start_address_lsw;
+ audio_ocmem_lcl.mlist.chunks[j].size =
+ lp_segptr->mem_segment[i].size;
+ pr_debug("%s: ro:%d, ddr_paddr[%x], size[%x]\n", __func__,
+ audio_ocmem_lcl.mlist.chunks[j].ro,
+ (uint32_t)audio_ocmem_lcl.mlist.chunks[j].ddr_paddr,
+ (uint32_t)audio_ocmem_lcl.mlist.chunks[j].size);
+ }
+
+ /* vote for ocmem bus bandwidth */
+ ret = msm_bus_scale_client_update_request(
+ audio_ocmem_lcl.audio_ocmem_bus_client,
+ 0);
+ if (ret)
+ pr_err("%s: failed to vote for bus bandwidth\n", __func__);
+
+ atomic_set(&audio_ocmem_lcl.audio_state, OCMEM_STATE_MAP_TRANSITION);
+
+ ret = ocmem_map(cid, audio_ocmem_lcl.buf, &audio_ocmem_lcl.mlist);
+ if (ret) {
+ pr_err("%s: ocmem_map failed\n", __func__);
+ goto fail_cmd;
+ }
+
+
+ while ((atomic_read(&audio_ocmem_lcl.audio_state) !=
+ OCMEM_STATE_EXIT)) {
+
+ wait_event_interruptible(audio_ocmem_lcl.audio_wait,
+ atomic_read(&audio_ocmem_lcl.audio_cond) == 0);
+
+ switch (atomic_read(&audio_ocmem_lcl.audio_state)) {
+ case OCMEM_STATE_MAP_COMPL:
+ pr_debug("%s: audio_cond[0x%x], audio_state[0x%x]\n",
+ __func__, atomic_read(&audio_ocmem_lcl.audio_cond),
+ atomic_read(&audio_ocmem_lcl.audio_state));
+ atomic_set(&audio_ocmem_lcl.audio_state,
+ OCMEM_STATE_MAP_COMPL);
+ atomic_set(&audio_ocmem_lcl.audio_cond, 1);
+ break;
+ case OCMEM_STATE_SHRINK:
+ atomic_set(&audio_ocmem_lcl.audio_cond, 1);
+ ret = ocmem_unmap(cid, audio_ocmem_lcl.buf,
+ &audio_ocmem_lcl.mlist);
+ if (ret) {
+ pr_err("%s: ocmem_unmap failed, state[%d]\n",
+ __func__,
+ atomic_read(&audio_ocmem_lcl.audio_state));
+ goto fail_cmd;
+ }
+
+ atomic_set(&audio_ocmem_lcl.audio_state,
+ OCMEM_STATE_UNMAP_TRANSITION);
+ wait_event_interruptible(audio_ocmem_lcl.audio_wait,
+ atomic_read(&audio_ocmem_lcl.audio_cond) == 0);
+ atomic_set(&audio_ocmem_lcl.audio_state,
+ OCMEM_STATE_UNMAP_COMPL);
+ ret = ocmem_shrink(cid, audio_ocmem_lcl.buf, 0);
+ if (ret) {
+ pr_err("%s: ocmem_shrink failed, state[%d]\n",
+ __func__,
+ atomic_read(&audio_ocmem_lcl.audio_state));
+ goto fail_cmd;
+ }
+
+ break;
+ case OCMEM_STATE_GROW:
+ atomic_set(&audio_ocmem_lcl.audio_cond, 1);
+ ret = ocmem_map(cid, audio_ocmem_lcl.buf,
+ &audio_ocmem_lcl.mlist);
+ if (ret) {
+ pr_err("%s: ocmem_map failed, state[%d]\n",
+ __func__,
+ atomic_read(&audio_ocmem_lcl.audio_state));
+ goto fail_cmd;
+ }
+ atomic_set(&audio_ocmem_lcl.audio_state,
+ OCMEM_STATE_MAP_TRANSITION);
+ wait_event_interruptible(audio_ocmem_lcl.audio_wait,
+ atomic_read(&audio_ocmem_lcl.audio_cond) == 0);
+ atomic_set(&audio_ocmem_lcl.audio_state,
+ OCMEM_STATE_MAP_COMPL);
+ break;
+ }
+ }
+fail_cmd:
+ pr_debug("%s: exit\n", __func__);
+ return ret;
+}
+
+/**
+ * audio_ocmem_disable() - Disable OCMEM for audio
+ * @cid: client id - OCMEM_LP_AUDIO
+ *
+ * OCMEM gets deallocated for audio usecase. Depending on
+ * current audio state, OCMEM will be freed from using audio
+ * segments.
+ */
+int audio_ocmem_disable(int cid)
+{
+ int ret;
+
+ if (atomic_read(&audio_ocmem_lcl.audio_cond))
+ atomic_set(&audio_ocmem_lcl.audio_cond, 0);
+ pr_debug("%s: audio_cond[0x%x], audio_state[0x%x]\n", __func__,
+ atomic_read(&audio_ocmem_lcl.audio_cond),
+ atomic_read(&audio_ocmem_lcl.audio_state));
+ switch (atomic_read(&audio_ocmem_lcl.audio_state)) {
+ case OCMEM_STATE_MAP_COMPL:
+ atomic_set(&audio_ocmem_lcl.audio_cond, 1);
+ ret = ocmem_unmap(cid, audio_ocmem_lcl.buf,
+ &audio_ocmem_lcl.mlist);
+ if (ret) {
+ pr_err("%s: ocmem_unmap failed, state[%d]\n",
+ __func__,
+ atomic_read(&audio_ocmem_lcl.audio_state));
+ goto fail_cmd;
+ }
+
+ atomic_set(&audio_ocmem_lcl.audio_state, OCMEM_STATE_EXIT);
+
+ wait_event_interruptible(audio_ocmem_lcl.audio_wait,
+ atomic_read(&audio_ocmem_lcl.audio_cond) == 0);
+ case OCMEM_STATE_UNMAP_COMPL:
+ ret = ocmem_free(OCMEM_LP_AUDIO, audio_ocmem_lcl.buf);
+ if (ret) {
+ pr_err("%s: ocmem_free failed, state[%d]\n",
+ __func__,
+ atomic_read(&audio_ocmem_lcl.audio_state));
+ goto fail_cmd;
+ }
+ pr_debug("%s: ocmem_free success\n", __func__);
+ default:
+ pr_debug("%s: state=%d", __func__,
+ atomic_read(&audio_ocmem_lcl.audio_state));
+ break;
+
+ }
+ return 0;
+fail_cmd:
+ return ret;
+}
+
+static void voice_ocmem_process_workdata(struct work_struct *work)
+{
+ int cid;
+ bool en;
+ int rc = 0;
+
+ struct voice_ocmem_workdata *voice_ocm_work =
+ container_of(work, struct voice_ocmem_workdata, work);
+
+ en = voice_ocm_work->en;
+ switch (voice_ocm_work->id) {
+ case VOICE:
+ cid = OCMEM_VOICE;
+ if (en)
+ disable_ocmem_for_voice(cid);
+ else
+ enable_ocmem_after_voice(cid);
+ break;
+ default:
+ pr_err("%s: Invalid client id[%d]\n", __func__,
+ voice_ocm_work->id);
+ rc = -EINVAL;
+ }
+
+}
+/**
+ * voice_ocmem_process_req() - disable/enable OCMEM during voice call
+ * @cid: client id - VOICE
+ * @enable: 1 - enable
+ * 0 - disable
+ *
+ * This configures OCMEM during start of voice call. If any
+ * audio clients are already using OCMEM, they will be evicted
+ * out of OCMEM during voice call and get restored after voice
+ * call.
+ */
+int voice_ocmem_process_req(int cid, bool enable)
+{
+
+ struct voice_ocmem_workdata *workdata = NULL;
+
+ if (audio_ocmem_lcl.voice_ocmem_workqueue == NULL) {
+ pr_err("%s: voice ocmem workqueue is NULL\n", __func__);
+ return -EINVAL;
+ }
+ workdata = kzalloc(sizeof(struct voice_ocmem_workdata),
+ GFP_ATOMIC);
+ if (workdata == NULL) {
+ pr_err("%s: mem failure\n", __func__);
+ return -ENOMEM;
+ }
+ workdata->id = cid;
+ workdata->en = enable;
+
+ INIT_WORK(&workdata->work, voice_ocmem_process_workdata);
+ queue_work(audio_ocmem_lcl.voice_ocmem_workqueue, &workdata->work);
+
+ return 0;
+}
+
+/**
+ * disable_ocmem_for_voice() - disable OCMEM during voice call
+ * @cid: client id - OCMEM_VOICE
+ *
+ * This configures OCMEM during start of voice call. If any
+ * audio clients are already using OCMEM, they will be evicted
+ */
+int disable_ocmem_for_voice(int cid)
+{
+ int ret;
+
+ ret = ocmem_evict(cid);
+ if (ret)
+ pr_err("%s: ocmem_evict is not successful\n", __func__);
+ return ret;
+}
+
+/**
+ * enable_ocmem_for_voice() - To enable OCMEM after voice call
+ * @cid: client id - OCMEM_VOICE
+ *
+ * OCMEM gets re-enabled after OCMEM voice call. If other client
+ * is evicted out of OCMEM, that gets restored and remapped in
+ * OCMEM after the voice call.
+ */
+int enable_ocmem_after_voice(int cid)
+{
+ int ret;
+
+ ret = ocmem_restore(cid);
+ if (ret)
+ pr_err("%s: ocmem_restore is not successful\n", __func__);
+ return ret;
+}
+
+
+static void audio_ocmem_process_workdata(struct work_struct *work)
+{
+ int cid;
+ bool en;
+ int rc = 0;
+
+ struct audio_ocmem_workdata *audio_ocm_work =
+ container_of(work, struct audio_ocmem_workdata, work);
+
+ en = audio_ocm_work->en;
+ switch (audio_ocm_work->id) {
+ case AUDIO:
+ cid = OCMEM_LP_AUDIO;
+ if (en)
+ audio_ocmem_enable(cid);
+ else
+ audio_ocmem_disable(cid);
+ break;
+ default:
+ pr_err("%s: Invalid client id[%d]\n", __func__,
+ audio_ocm_work->id);
+ rc = -EINVAL;
+ }
+
+}
+
+/**
+ * audio_ocmem_process_req() - process audio request to use OCMEM
+ * @id: client id - OCMEM_LP_AUDIO
+ * @enable: enable or disable OCMEM
+ *
+ * A workqueue gets created and initialized to use OCMEM for
+ * audio clients.
+ */
+int audio_ocmem_process_req(int id, bool enable)
+{
+ struct audio_ocmem_workdata *workdata = NULL;
+
+ if (audio_ocmem_lcl.audio_ocmem_workqueue == NULL) {
+ pr_err("%s: audio ocmem workqueue is NULL\n", __func__);
+ return -EINVAL;
+ }
+ workdata = kzalloc(sizeof(struct audio_ocmem_workdata),
+ GFP_ATOMIC);
+ if (workdata == NULL) {
+ pr_err("%s: mem failure\n", __func__);
+ return -ENOMEM;
+ }
+ workdata->id = id;
+ workdata->en = enable;
+
+ /* if previous work waiting for ocmem - signal it to exit */
+ atomic_set(&audio_ocmem_lcl.audio_exit, 1);
+
+ INIT_WORK(&workdata->work, audio_ocmem_process_workdata);
+ queue_work(audio_ocmem_lcl.audio_ocmem_workqueue, &workdata->work);
+
+ return 0;
+}
+
+
+static struct notifier_block audio_ocmem_client_nb = {
+ .notifier_call = audio_ocmem_client_cb,
+};
+
+static int audio_ocmem_platform_data_populate(struct platform_device *pdev)
+{
+ int ret;
+ struct msm_bus_scale_pdata *audio_ocmem_bus_scale_pdata = NULL;
+ struct msm_bus_vectors *audio_ocmem_bus_vectors = NULL;
+ struct msm_bus_paths *ocmem_audio_bus_paths = NULL;
+ u32 val;
+
+ if (!pdev->dev.of_node) {
+ pr_err("%s: device tree information missing\n", __func__);
+ return -ENODEV;
+ }
+
+ audio_ocmem_bus_vectors = kzalloc(sizeof(struct msm_bus_vectors),
+ GFP_KERNEL);
+ if (!audio_ocmem_bus_vectors) {
+ dev_err(&pdev->dev, "Failed to allocate memory for platform data\n");
+ return -ENOMEM;
+ }
+
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "qcom,msm-ocmem-audio-src-id", &val);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: qcom,msm-ocmem-audio-src-id missing in DT node\n",
+ __func__);
+ goto fail1;
+ }
+ audio_ocmem_bus_vectors->src = val;
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "qcom,msm-ocmem-audio-dst-id", &val);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: qcom,msm-ocmem-audio-dst-id missing in DT node\n",
+ __func__);
+ goto fail1;
+ }
+ audio_ocmem_bus_vectors->dst = val;
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "qcom,msm-ocmem-audio-ab", &val);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: qcom,msm-ocmem-audio-ab missing in DT node\n",
+ __func__);
+ goto fail1;
+ }
+ audio_ocmem_bus_vectors->ab = val;
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "qcom,msm-ocmem-audio-ib", &val);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: qcom,msm-ocmem-audio-ib missing in DT node\n",
+ __func__);
+ goto fail1;
+ }
+ audio_ocmem_bus_vectors->ib = val;
+
+ ocmem_audio_bus_paths = kzalloc(sizeof(struct msm_bus_paths),
+ GFP_KERNEL);
+ if (!ocmem_audio_bus_paths) {
+ dev_err(&pdev->dev, "Failed to allocate memory for platform data\n");
+ goto fail1;
+ }
+ ocmem_audio_bus_paths->num_paths = 1;
+ ocmem_audio_bus_paths->vectors = audio_ocmem_bus_vectors;
+
+ audio_ocmem_bus_scale_pdata =
+ kzalloc(sizeof(struct msm_bus_scale_pdata), GFP_KERNEL);
+
+ if (!audio_ocmem_bus_scale_pdata) {
+ dev_err(&pdev->dev, "Failed to allocate memory for platform data\n");
+ goto fail2;
+ }
+
+ audio_ocmem_bus_scale_pdata->usecase = ocmem_audio_bus_paths;
+ audio_ocmem_bus_scale_pdata->num_usecases = 1;
+ audio_ocmem_bus_scale_pdata->name = "audio-ocmem";
+
+ dev_set_drvdata(&pdev->dev, audio_ocmem_bus_scale_pdata);
+ return ret;
+
+fail2:
+ kfree(ocmem_audio_bus_paths);
+fail1:
+ kfree(audio_ocmem_bus_vectors);
+ return ret;
+}
+static int ocmem_audio_client_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct msm_bus_scale_pdata *audio_ocmem_bus_scale_pdata = NULL;
+
+ pr_debug("%s\n", __func__);
+ audio_ocmem_lcl.audio_ocmem_workqueue =
+ alloc_workqueue("ocmem_audio_client_driver_audio",
+ WQ_NON_REENTRANT, 0);
+ if (!audio_ocmem_lcl.audio_ocmem_workqueue) {
+ pr_err("%s: Failed to create ocmem audio work queue\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ audio_ocmem_lcl.voice_ocmem_workqueue =
+ alloc_workqueue("ocmem_audio_client_driver_voice",
+ WQ_NON_REENTRANT, 0);
+ if (!audio_ocmem_lcl.voice_ocmem_workqueue) {
+ pr_err("%s: Failed to create ocmem voice work queue\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ init_waitqueue_head(&audio_ocmem_lcl.audio_wait);
+ atomic_set(&audio_ocmem_lcl.audio_cond, 1);
+ atomic_set(&audio_ocmem_lcl.audio_exit, 0);
+ spin_lock_init(&audio_ocmem_lcl.audio_lock);
+
+ /* populate platform data */
+ ret = audio_ocmem_platform_data_populate(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: failed to populate platform data, rc = %d\n",
+ __func__, ret);
+ return -ENODEV;
+ }
+ audio_ocmem_bus_scale_pdata = dev_get_drvdata(&pdev->dev);
+
+ audio_ocmem_lcl.audio_ocmem_bus_client =
+ msm_bus_scale_register_client(audio_ocmem_bus_scale_pdata);
+
+ if (!audio_ocmem_lcl.audio_ocmem_bus_client) {
+ pr_err("%s: msm_bus_scale_register_client() failed\n",
+ __func__);
+ return -EFAULT;
+ }
+ audio_ocmem_lcl.audio_hdl = ocmem_notifier_register(OCMEM_LP_AUDIO,
+ &audio_ocmem_client_nb);
+ if (audio_ocmem_lcl.audio_hdl == NULL) {
+ pr_err("%s: Failed to get ocmem handle %d\n", __func__,
+ OCMEM_LP_AUDIO);
+ }
+ audio_ocmem_lcl.lp_memseg_ptr = NULL;
+ return 0;
+}
+
+static int ocmem_audio_client_remove(struct platform_device *pdev)
+{
+ struct msm_bus_scale_pdata *audio_ocmem_bus_scale_pdata = NULL;
+
+ audio_ocmem_bus_scale_pdata = (struct msm_bus_scale_pdata *)
+ dev_get_drvdata(&pdev->dev);
+
+ kfree(audio_ocmem_bus_scale_pdata->usecase->vectors);
+ kfree(audio_ocmem_bus_scale_pdata->usecase);
+ kfree(audio_ocmem_bus_scale_pdata);
+ ocmem_notifier_unregister(audio_ocmem_lcl.audio_hdl,
+ &audio_ocmem_client_nb);
+ return 0;
+}
+static const struct of_device_id msm_ocmem_audio_dt_match[] = {
+ {.compatible = "qcom,msm-ocmem-audio"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, msm_ocmem_audio_dt_match);
+
+static struct platform_driver audio_ocmem_driver = {
+ .driver = {
+ .name = "audio-ocmem",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_ocmem_audio_dt_match,
+ },
+ .probe = ocmem_audio_client_probe,
+ .remove = ocmem_audio_client_remove,
+};
+
+
+static int __init ocmem_audio_client_init(void)
+{
+ int rc;
+
+ rc = platform_driver_register(&audio_ocmem_driver);
+
+ if (rc)
+ pr_err("%s: Failed to register audio ocmem driver\n", __func__);
+ return rc;
+}
+module_init(ocmem_audio_client_init);
+
+static void __exit ocmem_audio_client_exit(void)
+{
+ platform_driver_unregister(&audio_ocmem_driver);
+}
+
+module_exit(ocmem_audio_client_exit);
diff --git a/sound/soc/msm/qdsp6v2/audio_ocmem.h b/sound/soc/msm/qdsp6v2/audio_ocmem.h
new file mode 100644
index 0000000..e915516
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/audio_ocmem.h
@@ -0,0 +1,38 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _AUDIO_OCMEM_H_
+#define _AUDIO_OCMEM_H_
+
+#include <linux/module.h>
+#include <linux/notifier.h>
+
+#include <mach/ocmem.h>
+
+#define AUDIO 0
+#define VOICE 1
+
+#ifdef CONFIG_AUDIO_OCMEM
+int audio_ocmem_process_req(int id, bool enable);
+int voice_ocmem_process_req(int cid, bool enable);
+int enable_ocmem_after_voice(int cid);
+int disable_ocmem_for_voice(int cid);
+#else
+static inline int audio_ocmem_process_req(int id, bool enable)\
+ { return 0; }
+static inline int voice_ocmem_process_req(int cid, bool enable)\
+ { return 0; }
+static inline int enable_ocmem_after_voice(int cid) { return 0; }
+static inline int disable_ocmem_for_voice(int cid) { return 0; }
+#endif
+
+#endif
diff --git a/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
index 7723934..360744a 100644
--- a/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
@@ -621,7 +621,11 @@
static __devinit int msm_compr_probe(struct platform_device *pdev)
{
- pr_info("%s: dev name %s\n", __func__, dev_name(&pdev->dev));
+ if (pdev->dev.of_node)
+ dev_set_name(&pdev->dev, "%s", "msm-compr-dsp");
+
+ dev_info(&pdev->dev, "%s: dev name %s\n",
+ __func__, dev_name(&pdev->dev));
return snd_soc_register_platform(&pdev->dev,
&msm_soc_platform);
}
@@ -632,10 +636,17 @@
return 0;
}
+static const struct of_device_id msm_compr_dt_match[] = {
+ {.compatible = "qcom,msm-compr-dsp"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, msm_compr_dt_match);
+
static struct platform_driver msm_compr_driver = {
.driver = {
.name = "msm-compr-dsp",
.owner = THIS_MODULE,
+ .of_match_table = msm_compr_dt_match,
},
.probe = msm_compr_probe,
.remove = __devexit_p(msm_compr_remove),
diff --git a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
index 783a03d..99fd1d3 100644
--- a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
@@ -325,6 +325,494 @@
.remove = msm_dai_q6_dai_auxpcm_remove,
};
+static int msm_dai_q6_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ int rc = 0;
+
+ if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+ /* PORT START should be set if prepare called in active state */
+ rc = afe_q6_interface_prepare();
+ if (IS_ERR_VALUE(rc))
+ dev_err(dai->dev, "fail to open AFE APR\n");
+ }
+ return rc;
+}
+
+static int msm_dai_q6_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ int rc = 0;
+
+ /*
+ * Start/stop port without waiting for Q6 AFE response. Need to have
+ * native q6 AFE driver propagates AFE response in order to handle
+ * port start/stop command error properly if error does arise.
+ */
+ pr_debug("%s:port:%d cmd:%d dai_data->status_mask = %ld",
+ __func__, dai->id, cmd, *dai_data->status_mask);
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+ switch (dai->id) {
+ case VOICE_PLAYBACK_TX:
+ case VOICE_RECORD_TX:
+ case VOICE_RECORD_RX:
+ afe_pseudo_port_start_nowait(dai->id);
+ break;
+ default:
+ afe_port_start_nowait(dai->id,
+ &dai_data->port_config, dai_data->rate);
+ break;
+ }
+ set_bit(STATUS_PORT_STARTED,
+ dai_data->status_mask);
+ }
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+ switch (dai->id) {
+ case VOICE_PLAYBACK_TX:
+ case VOICE_RECORD_TX:
+ case VOICE_RECORD_RX:
+ afe_pseudo_port_stop_nowait(dai->id);
+ break;
+ default:
+ afe_port_stop_nowait(dai->id);
+ break;
+ }
+ clear_bit(STATUS_PORT_STARTED,
+ dai_data->status_mask);
+ }
+ break;
+
+ default:
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static int msm_dai_q6_cdc_hw_params(struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai, int stream)
+{
+ struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+ dai_data->channels = params_channels(params);
+ switch (dai_data->channels) {
+ case 2:
+ dai_data->port_config.i2s.mono_stereo = MSM_AFE_STEREO;
+ break;
+ case 1:
+ dai_data->port_config.i2s.mono_stereo = MSM_AFE_MONO;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ dai_data->rate = params_rate(params);
+ dai_data->port_config.i2s.sample_rate = dai_data->rate;
+ dai_data->port_config.i2s.i2s_cfg_minor_version =
+ AFE_API_VERSION_I2S_CONFIG;
+ dai_data->port_config.i2s.data_format = AFE_LINEAR_PCM_DATA;
+ dev_dbg(dai->dev, " channel %d sample rate %d entered\n",
+ dai_data->channels, dai_data->rate);
+
+ /* Q6 only supports 16 as now */
+ dai_data->port_config.i2s.bit_width = 16;
+ dai_data->port_config.i2s.channel_mode = 1;
+ return 0;
+}
+
+static u8 num_of_bits_set(u8 sd_line_mask)
+{
+ u8 num_bits_set = 0;
+
+ while (sd_line_mask) {
+ num_bits_set++;
+ sd_line_mask = sd_line_mask & (sd_line_mask - 1);
+ }
+ return num_bits_set;
+}
+
+static int msm_dai_q6_i2s_hw_params(struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai, int stream)
+{
+ struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ struct msm_i2s_data *i2s_pdata =
+ (struct msm_i2s_data *) dai->dev->platform_data;
+
+ dai_data->channels = params_channels(params);
+ if (num_of_bits_set(i2s_pdata->sd_lines) == 1) {
+ switch (dai_data->channels) {
+ case 2:
+ dai_data->port_config.i2s.mono_stereo = MSM_AFE_STEREO;
+ break;
+ case 1:
+ dai_data->port_config.i2s.mono_stereo = MSM_AFE_MONO;
+ break;
+ default:
+ pr_warn("greater than stereo has not been validated");
+ break;
+ }
+ }
+ dai_data->rate = params_rate(params);
+ dai_data->port_config.i2s.sample_rate = dai_data->rate;
+ dai_data->port_config.i2s.i2s_cfg_minor_version =
+ AFE_API_VERSION_I2S_CONFIG;
+ dai_data->port_config.i2s.data_format = AFE_LINEAR_PCM_DATA;
+ /* Q6 only supports 16 as now */
+ dai_data->port_config.i2s.bit_width = 16;
+ dai_data->port_config.i2s.channel_mode = 1;
+
+ return 0;
+}
+
+static int msm_dai_q6_slim_bus_hw_params(struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai, int stream)
+{
+ struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+ dai_data->channels = params_channels(params);
+ dai_data->rate = params_rate(params);
+
+ /* Q6 only supports 16 as now */
+ dai_data->port_config.slim_sch.sb_cfg_minor_version =
+ AFE_API_VERSION_SLIMBUS_CONFIG;
+ dai_data->port_config.slim_sch.bit_width = 16;
+ dai_data->port_config.slim_sch.data_format = 0;
+ dai_data->port_config.slim_sch.num_channels = dai_data->channels;
+ dai_data->port_config.slim_sch.sample_rate = dai_data->rate;
+
+ dev_dbg(dai->dev, "%s:slimbus_dev_id[%hu] bit_wd[%hu] format[%hu]\n"
+ "num_channel %hu shared_ch_mapping[0] %hu\n"
+ "slave_port_mapping[1] %hu slave_port_mapping[2] %hu\n"
+ "sample_rate %d\n", __func__,
+ dai_data->port_config.slim_sch.slimbus_dev_id,
+ dai_data->port_config.slim_sch.bit_width,
+ dai_data->port_config.slim_sch.data_format,
+ dai_data->port_config.slim_sch.num_channels,
+ dai_data->port_config.slim_sch.shared_ch_mapping[0],
+ dai_data->port_config.slim_sch.shared_ch_mapping[1],
+ dai_data->port_config.slim_sch.shared_ch_mapping[2],
+ dai_data->rate);
+
+ return 0;
+}
+
+static int msm_dai_q6_bt_fm_hw_params(struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai, int stream)
+{
+ struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+ dai_data->channels = params_channels(params);
+ dai_data->rate = params_rate(params);
+
+ dev_dbg(dai->dev, "channels %d sample rate %d entered\n",
+ dai_data->channels, dai_data->rate);
+
+ memset(&dai_data->port_config, 0, sizeof(dai_data->port_config));
+
+ return 0;
+}
+
+static int msm_dai_q6_afe_rtproxy_hw_params(struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+ dai_data->rate = params_rate(params);
+ dai_data->port_config.rtproxy.num_channels = params_channels(params);
+ dai_data->port_config.rtproxy.sample_rate = params_rate(params);
+
+ pr_debug("channel %d entered,dai_id: %d,rate: %d\n",
+ dai_data->port_config.rtproxy.num_channels, dai->id, dai_data->rate);
+
+ dai_data->port_config.rtproxy.rt_proxy_cfg_minor_version =
+ AFE_API_VERSION_RT_PROXY_CONFIG;
+ dai_data->port_config.rtproxy.bit_width = 16; /* Q6 only supports 16 */
+ dai_data->port_config.rtproxy.interleaved = 1;
+ dai_data->port_config.rtproxy.frame_size = params_period_bytes(params);
+ dai_data->port_config.rtproxy.jitter_allowance =
+ dai_data->port_config.rtproxy.frame_size/2;
+ dai_data->port_config.rtproxy.low_water_mark = 0;
+ dai_data->port_config.rtproxy.high_water_mark = 0;
+
+ return 0;
+}
+
+/* Current implementation assumes hw_param is called once
+ * This may not be the case but what to do when ADM and AFE
+ * port are already opened and parameter changes
+ */
+static int msm_dai_q6_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ int rc = 0;
+
+ switch (dai->id) {
+ case PRIMARY_I2S_TX:
+ case PRIMARY_I2S_RX:
+ case SECONDARY_I2S_RX:
+ rc = msm_dai_q6_cdc_hw_params(params, dai, substream->stream);
+ break;
+ case MI2S_RX:
+ rc = msm_dai_q6_i2s_hw_params(params, dai, substream->stream);
+ break;
+ case SLIMBUS_0_RX:
+ case SLIMBUS_1_RX:
+ case SLIMBUS_0_TX:
+ case SLIMBUS_1_TX:
+ rc = msm_dai_q6_slim_bus_hw_params(params, dai,
+ substream->stream);
+ break;
+ case INT_BT_SCO_RX:
+ case INT_BT_SCO_TX:
+ case INT_FM_RX:
+ case INT_FM_TX:
+ rc = msm_dai_q6_bt_fm_hw_params(params, dai, substream->stream);
+ break;
+ case RT_PROXY_DAI_001_TX:
+ case RT_PROXY_DAI_001_RX:
+ case RT_PROXY_DAI_002_TX:
+ case RT_PROXY_DAI_002_RX:
+ rc = msm_dai_q6_afe_rtproxy_hw_params(params, dai);
+ break;
+ case VOICE_PLAYBACK_TX:
+ case VOICE_RECORD_RX:
+ case VOICE_RECORD_TX:
+ rc = 0;
+ break;
+ default:
+ dev_err(dai->dev, "invalid AFE port ID\n");
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+static void msm_dai_q6_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ int rc = 0;
+
+ if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+ switch (dai->id) {
+ case VOICE_PLAYBACK_TX:
+ case VOICE_RECORD_TX:
+ case VOICE_RECORD_RX:
+ pr_debug("%s, stop pseudo port:%d\n",
+ __func__, dai->id);
+ rc = afe_stop_pseudo_port(dai->id);
+ break;
+ default:
+ rc = afe_close(dai->id); /* can block */
+ break;
+ }
+ if (IS_ERR_VALUE(rc))
+ dev_err(dai->dev, "fail to close AFE port\n");
+ pr_debug("%s: dai_data->status_mask = %ld\n", __func__,
+ *dai_data->status_mask);
+ clear_bit(STATUS_PORT_STARTED, dai_data->status_mask);
+ }
+}
+
+static int msm_dai_q6_cdc_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ dai_data->port_config.i2s.ws_src = 1; /* CPU is master */
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ dai_data->port_config.i2s.ws_src = 0; /* CPU is slave */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int msm_dai_q6_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ int rc = 0;
+
+ dev_dbg(dai->dev, "enter %s, id = %d fmt[%d]\n", __func__,
+ dai->id, fmt);
+ switch (dai->id) {
+ case PRIMARY_I2S_TX:
+ case PRIMARY_I2S_RX:
+ case MI2S_RX:
+ case SECONDARY_I2S_RX:
+ rc = msm_dai_q6_cdc_set_fmt(dai, fmt);
+ break;
+ default:
+ dev_err(dai->dev, "invalid cpu_dai set_fmt\n");
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+static int msm_dai_q6_set_channel_map(struct snd_soc_dai *dai,
+ unsigned int tx_num, unsigned int *tx_slot,
+ unsigned int rx_num, unsigned int *rx_slot)
+
+{
+ int rc = 0;
+ struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ unsigned int i = 0;
+
+ dev_dbg(dai->dev, "enter %s, id = %d\n", __func__, dai->id);
+ switch (dai->id) {
+ case SLIMBUS_0_RX:
+ case SLIMBUS_1_RX:
+ /*
+ * channel number to be between 128 and 255.
+ * For RX port use channel numbers
+ * from 138 to 144 for pre-Taiko
+ * from 144 to 159 for Taiko
+ */
+ if (!rx_slot)
+ return -EINVAL;
+ for (i = 0; i < rx_num; i++) {
+ dai_data->port_config.slim_sch.shared_ch_mapping[i] =
+ rx_slot[i];
+ pr_err("%s: find number of channels[%d] ch[%d]\n",
+ __func__, i, rx_slot[i]);
+ }
+ dai_data->port_config.slim_sch.num_channels = rx_num;
+ pr_debug("%s:SLIMBUS_0_RX cnt[%d] ch[%d %d]\n", __func__,
+ rx_num, dai_data->port_config.slim_sch.shared_ch_mapping[0],
+ dai_data->port_config.slim_sch.shared_ch_mapping[1]);
+
+ break;
+ case SLIMBUS_0_TX:
+ case SLIMBUS_1_TX:
+ /*
+ * channel number to be between 128 and 255.
+ * For TX port use channel numbers
+ * from 128 to 137 for pre-Taiko
+ * from 128 to 143 for Taiko
+ */
+ if (!tx_slot)
+ return -EINVAL;
+ for (i = 0; i < tx_num; i++) {
+ dai_data->port_config.slim_sch.shared_ch_mapping[i] =
+ tx_slot[i];
+ pr_debug("%s: find number of channels[%d] ch[%d]\n",
+ __func__, i, tx_slot[i]);
+ }
+ dai_data->port_config.slim_sch.num_channels = tx_num;
+ pr_debug("%s:SLIMBUS_0_TX cnt[%d] ch[%d %d]\n", __func__,
+ tx_num,
+ dai_data->port_config.slim_sch.shared_ch_mapping[0],
+ dai_data->port_config.slim_sch.shared_ch_mapping[1]);
+ break;
+ default:
+ dev_err(dai->dev, "invalid cpu_dai id %d\n", dai->id);
+ rc = -EINVAL;
+ break;
+ }
+ return rc;
+}
+
+static struct snd_soc_dai_ops msm_dai_q6_ops = {
+ .prepare = msm_dai_q6_prepare,
+ .trigger = msm_dai_q6_trigger,
+ .hw_params = msm_dai_q6_hw_params,
+ .shutdown = msm_dai_q6_shutdown,
+ .set_fmt = msm_dai_q6_set_fmt,
+ .set_channel_map = msm_dai_q6_set_channel_map,
+};
+
+static int msm_dai_q6_dai_probe(struct snd_soc_dai *dai)
+{
+ struct msm_dai_q6_dai_data *dai_data;
+ int rc = 0;
+
+ dai_data = kzalloc(sizeof(struct msm_dai_q6_dai_data), GFP_KERNEL);
+
+ if (!dai_data) {
+ dev_err(dai->dev, "DAI-%d: fail to allocate dai data\n",
+ dai->id);
+ rc = -ENOMEM;
+ } else
+ dev_set_drvdata(dai->dev, dai_data);
+
+ return rc;
+}
+
+static int msm_dai_q6_dai_remove(struct snd_soc_dai *dai)
+{
+ struct msm_dai_q6_dai_data *dai_data;
+ int rc;
+
+ dai_data = dev_get_drvdata(dai->dev);
+
+ /* If AFE port is still up, close it */
+ if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+ switch (dai->id) {
+ case VOICE_PLAYBACK_TX:
+ case VOICE_RECORD_TX:
+ case VOICE_RECORD_RX:
+ pr_debug("%s, stop pseudo port:%d\n",
+ __func__, dai->id);
+ rc = afe_stop_pseudo_port(dai->id);
+ break;
+ default:
+ rc = afe_close(dai->id); /* can block */
+ }
+ if (IS_ERR_VALUE(rc))
+ dev_err(dai->dev, "fail to close AFE port\n");
+ clear_bit(STATUS_PORT_STARTED, dai_data->status_mask);
+ }
+ kfree(dai_data);
+ snd_soc_unregister_dai(dai->dev);
+
+ return 0;
+}
+
+static struct snd_soc_dai_driver msm_dai_q6_slimbus_1_rx_dai = {
+ .playback = {
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 1,
+ .channels_max = 1,
+ .rate_min = 8000,
+ .rate_max = 16000,
+ },
+ .ops = &msm_dai_q6_ops,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_slimbus_1_tx_dai = {
+ .capture = {
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 1,
+ .channels_max = 1,
+ .rate_min = 8000,
+ .rate_max = 16000,
+ },
+ .ops = &msm_dai_q6_ops,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+};
+
static int msm_auxpcm_dev_probe(struct platform_device *pdev)
{
int id;
@@ -512,6 +1000,135 @@
},
};
+static struct snd_soc_dai_driver msm_dai_q6_slimbus_rx_dai = {
+ .playback = {
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 1,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .ops = &msm_dai_q6_ops,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_slimbus_tx_dai = {
+ .capture = {
+ .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+ SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 1,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .ops = &msm_dai_q6_ops,
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+};
+
+static int msm_dai_q6_dev_probe(struct platform_device *pdev)
+{
+ int rc, id;
+ const char *q6_dev_id = "qcom,msm-dai-q6-dev-id";
+
+ rc = of_property_read_u32(pdev->dev.of_node, q6_dev_id, &id);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "%s: missing %s in dt node\n", __func__, q6_dev_id);
+ return rc;
+ }
+
+ pdev->id = id;
+ dev_set_name(&pdev->dev, "%s.%d", "msm-dai-q6-dev", id);
+
+ pr_debug("%s: dev name %s, id:%d\n", __func__,
+ dev_name(&pdev->dev), pdev->id);
+
+ switch (id) {
+ case SLIMBUS_0_RX:
+ rc = snd_soc_register_dai(&pdev->dev,
+ &msm_dai_q6_slimbus_rx_dai);
+ break;
+ case SLIMBUS_0_TX:
+ rc = snd_soc_register_dai(&pdev->dev,
+ &msm_dai_q6_slimbus_tx_dai);
+ break;
+ case SLIMBUS_1_RX:
+ rc = snd_soc_register_dai(&pdev->dev,
+ &msm_dai_q6_slimbus_1_rx_dai);
+ break;
+ case SLIMBUS_1_TX:
+ rc = snd_soc_register_dai(&pdev->dev,
+ &msm_dai_q6_slimbus_1_tx_dai);
+ break;
+ default:
+ rc = -ENODEV;
+ break;
+ }
+
+ return rc;
+}
+
+static int msm_dai_q6_dev_remove(struct platform_device *pdev)
+{
+ snd_soc_unregister_dai(&pdev->dev);
+ return 0;
+}
+
+static const struct of_device_id msm_dai_q6_dev_dt_match[] = {
+ { .compatible = "qcom,msm-dai-q6-dev", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, msm_dai_q6_dev_dt_match);
+
+static struct platform_driver msm_dai_q6_dev = {
+ .probe = msm_dai_q6_dev_probe,
+ .remove = msm_dai_q6_dev_remove,
+ .driver = {
+ .name = "msm-dai-q6-dev",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_dai_q6_dev_dt_match,
+ },
+};
+
+static int msm_dai_q6_probe(struct platform_device *pdev)
+{
+ int rc;
+ pr_debug("%s: dev name %s, id:%d\n", __func__,
+ dev_name(&pdev->dev), pdev->id);
+ rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+ if (rc) {
+ dev_err(&pdev->dev, "%s: failed to add child nodes, rc=%d\n",
+ __func__, rc);
+ } else
+ dev_dbg(&pdev->dev, "%s: added child node\n", __func__);
+
+ return rc;
+}
+
+static int msm_dai_q6_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id msm_dai_q6_dt_match[] = {
+ { .compatible = "qcom,msm-dai-q6", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, msm_dai_q6_dt_match);
+static struct platform_driver msm_dai_q6 = {
+ .probe = msm_dai_q6_probe,
+ .remove = msm_dai_q6_remove,
+ .driver = {
+ .name = "msm-dai-q6",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_dai_q6_dt_match,
+ },
+};
static int __init msm_dai_q6_init(void)
{
@@ -522,10 +1139,27 @@
goto fail;
rc = platform_driver_register(&msm_auxpcm_resource);
-
if (rc) {
pr_err("%s: fail to register cpu dai driver\n", __func__);
platform_driver_unregister(&msm_auxpcm_dev);
+ goto fail;
+ }
+
+ rc = platform_driver_register(&msm_dai_q6);
+ if (rc) {
+ pr_err("%s: fail to register dai q6 driver", __func__);
+ platform_driver_unregister(&msm_auxpcm_dev);
+ platform_driver_unregister(&msm_auxpcm_resource);
+ goto fail;
+ }
+
+ rc = platform_driver_register(&msm_dai_q6_dev);
+ if (rc) {
+ pr_err("%s: fail to register dai q6 dev driver", __func__);
+ platform_driver_unregister(&msm_dai_q6);
+ platform_driver_unregister(&msm_auxpcm_dev);
+ platform_driver_unregister(&msm_auxpcm_resource);
+ goto fail;
}
fail:
return rc;
@@ -534,6 +1168,8 @@
static void __exit msm_dai_q6_exit(void)
{
+ platform_driver_unregister(&msm_dai_q6_dev);
+ platform_driver_unregister(&msm_dai_q6);
platform_driver_unregister(&msm_auxpcm_dev);
platform_driver_unregister(&msm_auxpcm_resource);
}
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c
index 1ac872d..047e0f0 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c
@@ -35,6 +35,7 @@
#include "msm-pcm-q6-v2.h"
#include "msm-pcm-routing-v2.h"
+#include "audio_ocmem.h"
static struct audio_locks the_locks;
@@ -223,6 +224,7 @@
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
prtd->pcm_irq_pos = 0;
+ audio_ocmem_process_req(AUDIO, true);
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
pr_debug("SNDRV_PCM_TRIGGER_START\n");
@@ -231,6 +233,7 @@
break;
case SNDRV_PCM_TRIGGER_STOP:
pr_debug("SNDRV_PCM_TRIGGER_STOP\n");
+ audio_ocmem_process_req(AUDIO, false);
atomic_set(&prtd->start, 0);
if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
break;
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index 67ee8e4..fbbb3a5 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -217,11 +217,12 @@
fe_dai_map[fedai_id][session_type] = dspst_id;
for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
if (!is_be_dai_extproc(i) &&
- (afe_get_port_type(msm_bedais[i].port_id) == port_type) &&
- (msm_bedais[i].active) &&
- (test_bit(fedai_id, &msm_bedais[i].fe_sessions))) {
+ (afe_get_port_type(msm_bedais[i].port_id) == port_type) &&
+ (msm_bedais[i].active) &&
+ (test_bit(fedai_id, &msm_bedais[i].fe_sessions))) {
mode = afe_get_port_type(msm_bedais[i].port_id);
- /*adm_connect_afe_port needs to be called*/
+ adm_connect_afe_port(mode, dspst_id,
+ msm_bedais[i].port_id);
break;
}
}
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
index 630405a..492569b 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
@@ -30,6 +30,7 @@
#include "msm-pcm-q6-v2.h"
#include "msm-pcm-routing-v2.h"
#include "q6voice.h"
+#include "audio_ocmem.h"
#define VOIP_MAX_Q_LEN 10
#define VOIP_MAX_VOC_PKT_SIZE 640
@@ -452,6 +453,8 @@
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
pr_debug("%s: Trigger start\n", __func__);
+ if ((!prtd->capture_start) && (!prtd->playback_start))
+ voice_ocmem_process_req(VOICE, true);
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
prtd->capture_start = 1;
else
@@ -459,6 +462,8 @@
break;
case SNDRV_PCM_TRIGGER_STOP:
pr_debug("SNDRV_PCM_TRIGGER_STOP\n");
+ if (prtd->capture_start && prtd->playback_start)
+ voice_ocmem_process_req(VOICE, false);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
prtd->playback_start = 0;
else
diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c
index 691ca21..e5837b2 100644
--- a/sound/soc/msm/qdsp6v2/q6adm.c
+++ b/sound/soc/msm/qdsp6v2/q6adm.c
@@ -16,7 +16,7 @@
#include <linux/jiffies.h>
#include <linux/uaccess.h>
#include <linux/atomic.h>
-
+#include <linux/wait.h>
#include <mach/qdsp6v2/audio_acdb.h>
#include <mach/qdsp6v2/rtac.h>
@@ -25,7 +25,7 @@
#include <mach/qdsp6v2/apr.h>
#include <sound/q6adm-v2.h>
#include <sound/q6audio-v2.h>
-
+#include <sound/q6afe-v2.h>
#define TIMEOUT_MS 1000
@@ -78,7 +78,7 @@
return 0;
}
if (data->opcode == APR_BASIC_RSP_RESULT) {
- pr_debug("APR_BASIC_RSP_RESULT\n");
+ pr_debug("APR_BASIC_RSP_RESULT id %x\n", payload[0]);
switch (payload[0]) {
case ADM_CMD_SET_PP_PARAMS_V5:
if (rtac_make_adm_callback(
@@ -142,6 +142,76 @@
pr_debug("%s\n", __func__);
}
+int adm_connect_afe_port(int mode, int session_id, int port_id)
+{
+ struct adm_cmd_connect_afe_port_v5 cmd;
+ int ret = 0;
+ int index;
+
+ pr_debug("%s: port %d session id:%d mode:%d\n", __func__,
+ port_id, session_id, mode);
+
+ port_id = afe_convert_virtual_to_portid(port_id);
+
+ if (afe_validate_port(port_id) < 0) {
+ pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
+ return -ENODEV;
+ }
+ if (this_adm.apr == NULL) {
+ this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
+ 0xFFFFFFFF, &this_adm);
+ if (this_adm.apr == NULL) {
+ pr_err("%s: Unable to register ADM\n", __func__);
+ ret = -ENODEV;
+ return ret;
+ }
+ rtac_set_adm_handle(this_adm.apr);
+ }
+ index = afe_get_port_index(port_id);
+ pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index);
+
+ cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ cmd.hdr.pkt_size = sizeof(cmd);
+ cmd.hdr.src_svc = APR_SVC_ADM;
+ cmd.hdr.src_domain = APR_DOMAIN_APPS;
+ cmd.hdr.src_port = port_id;
+ cmd.hdr.dest_svc = APR_SVC_ADM;
+ cmd.hdr.dest_domain = APR_DOMAIN_ADSP;
+ cmd.hdr.dest_port = port_id;
+ cmd.hdr.token = port_id;
+ cmd.hdr.opcode = ADM_CMD_CONNECT_AFE_PORT_V5;
+
+ cmd.mode = mode;
+ cmd.session_id = session_id;
+ cmd.afe_port_id = port_id;
+
+ atomic_set(&this_adm.copp_stat[index], 0);
+ ret = apr_send_pkt(this_adm.apr, (uint32_t *)&cmd);
+ if (ret < 0) {
+ pr_err("%s:ADM enable for port %d failed\n",
+ __func__, port_id);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ /* Wait for the callback with copp id */
+ ret = wait_event_timeout(this_adm.wait[index],
+ atomic_read(&this_adm.copp_stat[index]),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s ADM connect AFE failed for port %d\n", __func__,
+ port_id);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ atomic_inc(&this_adm.copp_cnt[index]);
+ return 0;
+
+fail_cmd:
+
+ return ret;
+}
+
int adm_open(int port_id, int path, int rate, int channel_mode, int topology)
{
struct adm_cmd_device_open_v5 open;
@@ -433,7 +503,7 @@
mmap_regions->hdr.dest_port = 0;
mmap_regions->hdr.token = 0;
mmap_regions->hdr.opcode = ADM_CMD_SHARED_MEM_MAP_REGIONS;
- mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_EBI_POOL & 0x00ff;
+ mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL & 0x00ff;
mmap_regions->num_regions = bufcnt & 0x00ff;
mmap_regions->property_flag = 0x00;
diff --git a/sound/soc/msm/qdsp6v2/q6afe.c b/sound/soc/msm/qdsp6v2/q6afe.c
index 5b30e8e..756cb18 100644
--- a/sound/soc/msm/qdsp6v2/q6afe.c
+++ b/sound/soc/msm/qdsp6v2/q6afe.c
@@ -144,6 +144,9 @@
case HDMI_RX:
case SLIMBUS_0_RX:
case SLIMBUS_1_RX:
+ case SLIMBUS_2_RX:
+ case SLIMBUS_3_RX:
+ case SLIMBUS_4_RX:
case INT_BT_SCO_RX:
case INT_BT_A2DP_RX:
case INT_FM_RX:
@@ -160,6 +163,9 @@
case VOICE_RECORD_TX:
case SLIMBUS_0_TX:
case SLIMBUS_1_TX:
+ case SLIMBUS_2_TX:
+ case SLIMBUS_3_TX:
+ case SLIMBUS_4_TX:
case INT_FM_TX:
case VOICE_RECORD_RX:
case INT_BT_SCO_TX:
@@ -168,6 +174,7 @@
break;
default:
+ WARN_ON(1);
pr_err("%s: invalid port id %d\n", __func__, port_id);
ret = -EINVAL;
}
@@ -255,7 +262,6 @@
ret = -EINVAL;
return ret;
}
- pr_err("%s: %d %d\n", __func__, port_id, rate);
index = q6audio_get_port_index(port_id);
if (q6audio_validate_port(port_id) < 0)
return -EINVAL;
@@ -279,11 +285,11 @@
config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- config.hdr.pkt_size = afe_sizeof_cfg_cmd(port_id);
+ config.hdr.pkt_size = sizeof(config);
config.hdr.src_port = 0;
config.hdr.dest_port = 0;
-
config.hdr.token = index;
+
switch (port_id) {
case PRIMARY_I2S_RX:
case PRIMARY_I2S_TX:
@@ -320,15 +326,15 @@
goto fail_cmd;
}
config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
- config.param.port_id = port_id;
- config.param.payload_size = (afe_sizeof_cfg_cmd(port_id) +
- sizeof(struct afe_port_param_data_v2));
+ config.param.port_id = q6audio_get_port_id(port_id);
+ config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) -
+ sizeof(config.param);
config.param.payload_address_lsw = 0x00;
config.param.payload_address_msw = 0x00;
config.param.mem_map_handle = 0x00;
config.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
config.pdata.param_id = cfg_type;
- config.pdata.param_size = afe_sizeof_cfg_cmd(port_id);
+ config.pdata.param_size = sizeof(config.port);
config.port = *afe_config;
@@ -348,9 +354,11 @@
start.hdr.pkt_size = sizeof(start);
start.hdr.src_port = 0;
start.hdr.dest_port = 0;
- start.hdr.token = 0;
+ start.hdr.token = index;
start.hdr.opcode = AFE_PORT_CMD_DEVICE_START;
- start.port_id = port_id;
+ start.port_id = q6audio_get_port_id(port_id);
+ pr_debug("%s: cmd device start opcode[0x%x] port id[0x%x]\n",
+ __func__, start.hdr.opcode, start.port_id);
ret = apr_send_pkt(this_afe.apr, (uint32_t *) &start);
@@ -367,6 +375,45 @@
return ret;
}
+int afe_get_port_index(u16 port_id)
+{
+ switch (port_id) {
+ case PRIMARY_I2S_RX: return IDX_PRIMARY_I2S_RX;
+ case PRIMARY_I2S_TX: return IDX_PRIMARY_I2S_TX;
+ case PCM_RX: return IDX_PCM_RX;
+ case PCM_TX: return IDX_PCM_TX;
+ case SECONDARY_I2S_RX: return IDX_SECONDARY_I2S_RX;
+ case SECONDARY_I2S_TX: return IDX_SECONDARY_I2S_TX;
+ case MI2S_RX: return IDX_MI2S_RX;
+ case MI2S_TX: return IDX_MI2S_TX;
+ case HDMI_RX: return IDX_HDMI_RX;
+ case RSVD_2: return IDX_RSVD_2;
+ case RSVD_3: return IDX_RSVD_3;
+ case DIGI_MIC_TX: return IDX_DIGI_MIC_TX;
+ case VOICE_RECORD_RX: return IDX_VOICE_RECORD_RX;
+ case VOICE_RECORD_TX: return IDX_VOICE_RECORD_TX;
+ case VOICE_PLAYBACK_TX: return IDX_VOICE_PLAYBACK_TX;
+ case SLIMBUS_0_RX: return IDX_SLIMBUS_0_RX;
+ case SLIMBUS_0_TX: return IDX_SLIMBUS_0_TX;
+ case SLIMBUS_1_RX: return IDX_SLIMBUS_1_RX;
+ case SLIMBUS_1_TX: return IDX_SLIMBUS_1_TX;
+ case SLIMBUS_2_RX: return IDX_SLIMBUS_2_RX;
+ case SLIMBUS_2_TX: return IDX_SLIMBUS_2_TX;
+ case SLIMBUS_3_RX: return IDX_SLIMBUS_3_RX;
+ case INT_BT_SCO_RX: return IDX_INT_BT_SCO_RX;
+ case INT_BT_SCO_TX: return IDX_INT_BT_SCO_TX;
+ case INT_BT_A2DP_RX: return IDX_INT_BT_A2DP_RX;
+ case INT_FM_RX: return IDX_INT_FM_RX;
+ case INT_FM_TX: return IDX_INT_FM_TX;
+ case RT_PROXY_PORT_001_RX: return IDX_RT_PROXY_PORT_001_RX;
+ case RT_PROXY_PORT_001_TX: return IDX_RT_PROXY_PORT_001_TX;
+ case SLIMBUS_4_RX: return IDX_SLIMBUS_4_RX;
+ case SLIMBUS_4_TX: return IDX_SLIMBUS_4_TX;
+
+ default: return -EINVAL;
+ }
+}
+
int afe_open(u16 port_id,
union afe_port_config *afe_config, int rate)
{
@@ -867,7 +914,7 @@
mregion->hdr.dest_port = 0;
mregion->hdr.token = 0;
mregion->hdr.opcode = AFE_SERVICE_CMD_SHARED_MEM_MAP_REGIONS;
- mregion->mem_pool_id = ADSP_MEMORY_MAP_EBI_POOL;
+ mregion->mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL;
mregion->num_regions = 1;
mregion->property_flag = 0x00;
/* Todo */
@@ -946,7 +993,7 @@
mregion->hdr.dest_port = 0;
mregion->hdr.token = 0;
mregion->hdr.opcode = AFE_SERVICE_CMD_SHARED_MEM_MAP_REGIONS;
- mregion->mem_pool_id = ADSP_MEMORY_MAP_EBI_POOL;
+ mregion->mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL;
mregion->num_regions = 1;
mregion->property_flag = 0x00;
@@ -1469,6 +1516,75 @@
return ret;
}
+int afe_validate_port(u16 port_id)
+{
+ int ret;
+
+ switch (port_id) {
+ case PRIMARY_I2S_RX:
+ case PRIMARY_I2S_TX:
+ case PCM_RX:
+ case PCM_TX:
+ case SECONDARY_I2S_RX:
+ case SECONDARY_I2S_TX:
+ case MI2S_RX:
+ case MI2S_TX:
+ case HDMI_RX:
+ case RSVD_2:
+ case RSVD_3:
+ case DIGI_MIC_TX:
+ case VOICE_RECORD_RX:
+ case VOICE_RECORD_TX:
+ case VOICE_PLAYBACK_TX:
+ case SLIMBUS_0_RX:
+ case SLIMBUS_0_TX:
+ case SLIMBUS_1_RX:
+ case SLIMBUS_1_TX:
+ case SLIMBUS_2_RX:
+ case SLIMBUS_2_TX:
+ case SLIMBUS_3_RX:
+ case INT_BT_SCO_RX:
+ case INT_BT_SCO_TX:
+ case INT_BT_A2DP_RX:
+ case INT_FM_RX:
+ case INT_FM_TX:
+ case RT_PROXY_PORT_001_RX:
+ case RT_PROXY_PORT_001_TX:
+ case SLIMBUS_4_RX:
+ case SLIMBUS_4_TX:
+ {
+ ret = 0;
+ break;
+ }
+
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+int afe_convert_virtual_to_portid(u16 port_id)
+{
+ int ret;
+
+ /*
+ * if port_id is virtual, convert to physical..
+ * if port_id is already physical, return physical
+ */
+ if (afe_validate_port(port_id) < 0) {
+ if (port_id == RT_PROXY_DAI_001_RX ||
+ port_id == RT_PROXY_DAI_001_TX ||
+ port_id == RT_PROXY_DAI_002_RX ||
+ port_id == RT_PROXY_DAI_002_TX)
+ ret = VIRTUAL_ID_TO_PORTID(port_id);
+ else
+ ret = -EINVAL;
+ } else
+ ret = port_id;
+
+ return ret;
+}
int afe_port_stop_nowait(int port_id)
{
struct afe_port_cmd_device_stop stop;
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index 0bb88e8..a3af263 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -2265,7 +2265,7 @@
q6asm_add_mmaphdr(ac, &mmap_regions->hdr, cmd_size,
TRUE, ((ac->session << 8) | dir));
mmap_regions->hdr.opcode = ASM_CMD_SHARED_MEM_MAP_REGIONS;
- mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_EBI_POOL;
+ mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL;
mmap_regions->num_regions = bufcnt & 0x00ff;
mmap_regions->property_flag = 0x00;
payload = ((u8 *) mmap_region_cmd +
@@ -2408,7 +2408,7 @@
mmap_regions, ((ac->session << 8) | dir));
mmap_regions->hdr.opcode = ASM_CMD_SHARED_MEM_MAP_REGIONS;
- mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_EBI_POOL;
+ mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL;
mmap_regions->num_regions = 1; /*bufcnt & 0x00ff; */
mmap_regions->property_flag = 0x00;
pr_debug("map_regions->nregions = %d\n", mmap_regions->num_regions);
diff --git a/sound/soc/msm/qdsp6v2/q6core.c b/sound/soc/msm/qdsp6v2/q6core.c
new file mode 100644
index 0000000..2c31d39
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/q6core.c
@@ -0,0 +1,211 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <mach/msm_smd.h>
+#include <mach/qdsp6v2/apr.h>
+#include "q6core.h"
+#include <mach/ocmem.h>
+
+#define TIMEOUT_MS 1000
+
+struct q6core_str {
+ struct apr_svc *core_handle_q;
+ wait_queue_head_t bus_bw_req_wait;
+ u32 bus_bw_resp_received;
+ struct avcs_cmd_rsp_get_low_power_segments_info_t *lp_ocm_payload;
+};
+
+struct q6core_str q6core_lcl;
+
+static int32_t aprv2_core_fn_q(struct apr_client_data *data, void *priv)
+{
+ uint32_t *payload1;
+ uint32_t nseg;
+ int i, j;
+
+ pr_info("core msg: payload len = %u, apr resp opcode = 0x%X\n",
+ data->payload_size, data->opcode);
+
+ switch (data->opcode) {
+
+ case APR_BASIC_RSP_RESULT:{
+
+ if (data->payload_size == 0) {
+ pr_err("%s: APR_BASIC_RSP_RESULT No Payload ",
+ __func__);
+ return 0;
+ }
+
+ payload1 = data->payload;
+
+ switch (payload1[0]) {
+
+ case AVCS_CMD_GET_LOW_POWER_SEGMENTS_INFO:
+ pr_info("%s: Cmd = AVCS_CMD_GET_LOW_POWER_SEGMENTS_INFO status[0x%x]\n",
+ __func__, payload1[1]);
+ break;
+ default:
+ pr_err("Invalid cmd rsp[0x%x][0x%x]\n",
+ payload1[0], payload1[1]);
+ break;
+ }
+ break;
+ }
+
+ case AVCS_CMDRSP_GET_LOW_POWER_SEGMENTS_INFO:
+ payload1 = data->payload;
+ pr_info("%s: cmd = AVCS_CMDRSP_GET_LOW_POWER_SEGMENTS_INFO num_segments = 0x%x\n",
+ __func__, payload1[0]);
+ nseg = payload1[0];
+ q6core_lcl.lp_ocm_payload->num_segments = nseg;
+ q6core_lcl.lp_ocm_payload->bandwidth = payload1[1];
+ for (i = 0, j = 2; i < nseg; i++) {
+ q6core_lcl.lp_ocm_payload->mem_segment[i].type =
+ (payload1[j] & 0xffff);
+ q6core_lcl.lp_ocm_payload->mem_segment[i].category =
+ ((payload1[j++] >> 16) & 0xffff);
+ q6core_lcl.lp_ocm_payload->mem_segment[i].size =
+ payload1[j++];
+ q6core_lcl.lp_ocm_payload->
+ mem_segment[i].start_address_lsw =
+ payload1[j++];
+ q6core_lcl.lp_ocm_payload->
+ mem_segment[i].start_address_msw =
+ payload1[j++];
+ }
+
+ q6core_lcl.bus_bw_resp_received = 1;
+ wake_up(&q6core_lcl.bus_bw_req_wait);
+ break;
+
+ case RESET_EVENTS:{
+ pr_debug("Reset event received in Core service");
+ apr_reset(q6core_lcl.core_handle_q);
+ q6core_lcl.core_handle_q = NULL;
+ break;
+ }
+
+ default:
+ pr_err("Message id from adsp core svc: %d\n", data->opcode);
+ break;
+ }
+
+ return 0;
+}
+
+
+void ocm_core_open(void)
+{
+ if (q6core_lcl.core_handle_q == NULL)
+ q6core_lcl.core_handle_q = apr_register("ADSP", "CORE",
+ aprv2_core_fn_q, 0xFFFFFFFF, NULL);
+ pr_debug("Open_q %p\n", q6core_lcl.core_handle_q);
+ if (q6core_lcl.core_handle_q == NULL)
+ pr_err("%s: Unable to register CORE\n", __func__);
+}
+
+int core_get_low_power_segments(
+ struct avcs_cmd_rsp_get_low_power_segments_info_t **lp_memseg)
+{
+ struct avcs_cmd_get_low_power_segments_info lp_ocm_cmd;
+ u8 *cptr = NULL;
+ int ret = 0;
+
+ pr_debug("%s: ", __func__);
+
+ ocm_core_open();
+ if (q6core_lcl.core_handle_q == NULL) {
+ pr_info("%s: apr registration for CORE failed\n", __func__);
+ return -ENODEV;
+ }
+
+ cptr = kzalloc(
+ sizeof(struct avcs_cmd_rsp_get_low_power_segments_info_t),
+ GFP_KERNEL);
+ if (!cptr) {
+ pr_err("%s: Failed to allocate memory for low power segment struct\n",
+ __func__);
+ return -ENOMEM;
+ }
+ q6core_lcl.lp_ocm_payload =
+ (struct avcs_cmd_rsp_get_low_power_segments_info_t *) cptr;
+
+ lp_ocm_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ lp_ocm_cmd.hdr.pkt_size =
+ sizeof(struct avcs_cmd_get_low_power_segments_info);
+
+ lp_ocm_cmd.hdr.src_port = 0;
+ lp_ocm_cmd.hdr.dest_port = 0;
+ lp_ocm_cmd.hdr.token = 0;
+ lp_ocm_cmd.hdr.opcode = AVCS_CMD_GET_LOW_POWER_SEGMENTS_INFO;
+
+
+ ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *) &lp_ocm_cmd);
+ if (ret < 0) {
+ pr_err("%s: CORE low power segment request failed\n", __func__);
+ goto fail_cmd;
+ }
+
+ ret = wait_event_timeout(q6core_lcl.bus_bw_req_wait,
+ (q6core_lcl.bus_bw_resp_received == 1),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout for GET_LOW_POWER_SEGMENTS\n",
+ __func__);
+ ret = -ETIME;
+ goto fail_cmd;
+ }
+
+ *lp_memseg = q6core_lcl.lp_ocm_payload;
+ return 0;
+
+fail_cmd:
+ return ret;
+}
+
+
+static int __init core_init(void)
+{
+ init_waitqueue_head(&q6core_lcl.bus_bw_req_wait);
+ q6core_lcl.bus_bw_resp_received = 0;
+
+ q6core_lcl.core_handle_q = NULL;
+ q6core_lcl.lp_ocm_payload = kzalloc(
+ sizeof(struct avcs_cmd_rsp_get_low_power_segments_info_t), GFP_KERNEL);
+
+ if (!q6core_lcl.lp_ocm_payload) {
+ pr_err("%s: Failed to allocate memory for low power segment struct\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+module_init(core_init);
+
+static void __exit core_exit(void)
+{
+ kfree(q6core_lcl.lp_ocm_payload);
+}
+module_exit(core_exit);
+MODULE_DESCRIPTION("ADSP core driver");
+MODULE_LICENSE("GPL v2");
+
diff --git a/sound/soc/msm/qdsp6v2/q6core.h b/sound/soc/msm/qdsp6v2/q6core.h
new file mode 100644
index 0000000..5cb6098
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/q6core.h
@@ -0,0 +1,93 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __Q6CORE_H__
+#define __Q6CORE_H__
+#include <mach/qdsp6v2/apr.h>
+#include <mach/ocmem.h>
+
+
+#define AVCS_CMD_GET_LOW_POWER_SEGMENTS_INFO 0x00012903
+
+struct avcs_cmd_get_low_power_segments_info {
+ struct apr_hdr hdr;
+} __packed;
+
+
+#define AVCS_CMDRSP_GET_LOW_POWER_SEGMENTS_INFO 0x00012904
+
+/* @brief AVCS_CMDRSP_GET_LOW_POWER_SEGMENTS_INFO payload
+ * structure. Payload for this event comprises one instance of
+ * avcs_cmd_rsp_get_low_power_segments_info_t, followed
+ * immediately by num_segments number of instances of the
+ * avcs_mem_segment_t structure.
+ */
+
+/* Types of Low Power Memory Segments. */
+#define READ_ONLY_SEGMENT 1
+/*< Read Only memory segment. */
+#define READ_WRITE_SEGMENT 2
+/*< Read Write memory segment. */
+/*Category indicates whether audio/os/sensor segments. */
+#define AUDIO_SEGMENT 1
+/*< Audio memory segment. */
+#define OS_SEGMENT 2
+/*< QDSP6's OS memory segment. */
+
+/* @brief Payload structure for AVS low power memory segment
+ * structure.
+ */
+struct avcs_mem_segment_t {
+ uint16_t type;
+/*< Indicates which type of memory this segment is.
+ *Allowed values: READ_ONLY_SEGMENT or READ_WRITE_SEGMENT only.
+ */
+ uint16_t category;
+/*< Indicates whether audio or OS segments.
+ *Allowed values: AUDIO_SEGMENT or OS_SEGMENT only.
+ */
+ uint32_t size;
+/*< Size (in bytes) of this segment.
+ * Will be a non-zero value.
+ */
+ uint32_t start_address_lsw;
+/*< Lower 32 bits of the 64-bit physical start address
+ * of this segment.
+ */
+ uint32_t start_address_msw;
+/*< Upper 32 bits of the 64-bit physical start address
+ * of this segment.
+ */
+};
+
+struct avcs_cmd_rsp_get_low_power_segments_info_t {
+ uint32_t num_segments;
+/*< Number of segments in this response.
+ * 0: there are no known sections that should be mapped
+ * from DDR to OCMEM.
+ * >0: the number of memory segments in the following list.
+ */
+
+ uint32_t bandwidth;
+/*< Required OCMEM read/write bandwidth (in bytes per second)
+ * if OCMEM is granted.
+ * 0 if num_segments = 0
+ * >0 if num_segments > 0.
+ */
+ struct avcs_mem_segment_t mem_segment[OCMEM_MAX_CHUNKS];
+};
+
+
+int core_get_low_power_segments(
+ struct avcs_cmd_rsp_get_low_power_segments_info_t **);
+
+#endif /* __Q6CORE_H__ */
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index d4ce733..704d63a 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -148,7 +148,7 @@
return 0;
if (output[type].user_set) {
- evname = __event_name(attr->type, attr->config);
+ evname = __event_name(attr->type, attr->config, NULL);
pr_err("Samples for '%s' event do not have %s attribute set. "
"Cannot print '%s' field.\n",
evname, sample_msg, output_field2str(field));
@@ -157,7 +157,7 @@
/* user did not ask for it explicitly so remove from the default list */
output[type].fields &= ~field;
- evname = __event_name(attr->type, attr->config);
+ evname = __event_name(attr->type, attr->config, NULL);
pr_debug("Samples for '%s' event do not have %s attribute set. "
"Skipping '%s' field.\n",
evname, sample_msg, output_field2str(field));
@@ -305,7 +305,7 @@
if (event)
evname = event->name;
} else
- evname = __event_name(attr->type, attr->config);
+ evname = __event_name(attr->type, attr->config, NULL);
printf("%s: ", evname ? evname : "[unknown]");
}
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 5b3a0ef..be2e0c5 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -301,10 +301,10 @@
if (evsel->name)
return evsel->name;
- return __event_name(type, config);
+ return __event_name(type, config, NULL);
}
-const char *__event_name(int type, u64 config)
+const char *__event_name(int type, u64 config, char *pmu_name)
{
static char buf[32];
@@ -349,7 +349,12 @@
return tracepoint_id_to_name(config);
default:
- break;
+ if (pmu_name) {
+ snprintf(buf, sizeof(buf), "%s 0x%" PRIx64, pmu_name,
+ config);
+ return buf;
+ } else
+ break;
}
return "unknown";
@@ -630,6 +635,32 @@
return 0;
}
+int parse_events_add_numeric_legacy(struct list_head *list, int *idx,
+ const char *name, unsigned long config,
+ struct list_head *head_config)
+{
+ struct perf_event_attr attr;
+ struct perf_pmu *pmu;
+ char *pmu_name = strdup(name);
+
+ memset(&attr, 0, sizeof(attr));
+
+ pmu = perf_pmu__find(pmu_name);
+
+ if (!pmu)
+ return -EINVAL;
+
+ attr.type = pmu->type;
+ attr.config = config;
+
+ if (head_config &&
+ config_attr(&attr, head_config, 1))
+ return -EINVAL;
+
+ return add_event(list, idx, &attr,
+ (char *) __event_name(pmu->type, config, pmu_name));
+}
+
int parse_events_add_numeric(struct list_head *list, int *idx,
unsigned long type, unsigned long config,
struct list_head *head_config)
@@ -645,7 +676,7 @@
return -EINVAL;
return add_event(list, idx, &attr,
- (char *) __event_name(type, config));
+ (char *) __event_name(type, config, NULL));
}
int parse_events_add_pmu(struct list_head *list, int *idx,
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index ca069f8..4da2f3c 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -23,7 +23,7 @@
const char *event_type(int type);
const char *event_name(struct perf_evsel *event);
-extern const char *__event_name(int type, u64 config);
+extern const char *__event_name(int type, u64 config, char *name);
extern int parse_events_option(const struct option *opt, const char *str,
int unset);
@@ -67,6 +67,10 @@
int parse_events_add_raw(struct perf_evlist *evlist, unsigned long config,
unsigned long config1, unsigned long config2,
char *mod);
+int parse_events_add_numeric_legacy(struct list_head *list, int *idx,
+ const char *name, unsigned long config,
+ struct list_head *head_config);
+
int parse_events_add_numeric(struct list_head *list, int *idx,
unsigned long type, unsigned long config,
struct list_head *head_config);
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index 07b292d..581cd94 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -156,13 +156,13 @@
event_legacy_shared_raw:
PE_SH_RAW
{
- ABORT_ON(parse_events_add_numeric(list_event, idx, 6, $1, NULL));
+ ABORT_ON(parse_events_add_numeric_legacy(list_event, idx, "msm-l2", $1, NULL));
}
event_legacy_fabric_raw:
PE_FAB_RAW
{
- ABORT_ON(parse_events_add_numeric(list_event, idx, 7, $1, NULL));
+ ABORT_ON(parse_events_add_numeric_legacy(list_event, idx, "msm-busmon", $1, NULL));
}
event_config: