Merge "radio: iris: Validate whether the current station is good or bad" into msm-3.4
diff --git a/Documentation/devicetree/bindings/arm/msm/memory-reserve.txt b/Documentation/devicetree/bindings/arm/msm/memory-reserve.txt
new file mode 100644
index 0000000..068e256
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/memory-reserve.txt
@@ -0,0 +1,42 @@
+* Memory reservations for MSM targets
+
+Large contiguous allocations (generally sizes greater than 64KB) must be
+allocated from a carved out memory pool. The size of the carved out pools
+is based on the sizes drivers need. To properly size the pools, devices
+must specify the size and type of the memory needed. Any driver wanting to
+allocate contiguous memory should indicate this via device tree bindings:
+
+Required parameters:
+- qcom,memory-reservation-type: type of memory to be reserved. This is a
+string defined in arch/arm/mach-msm/memory.c
+- qcom,memory-reservation-size: size of memory to be reserved
+
+Example:
+
+ qcom,a-driver {
+ compatible = "qcom,a-driver";
+ qcom,memory-reservation-type = "EBI1" /* reserve EBI memory */
+ qcom,memory-reservation-size = <0x400000>; /* size 4MB */
+ };
+
+Under some circumstances, it may be necessary to remove a chunk of memory
+from the kernel completely using memblock remove. Note this is different
+than adjusting the memory tags passed in via the bootloader as the virtual
+range is not affected. Any driver needing to remove a block of memory should
+add the appropriate binding:
+
+Required parameters:
+- qcom,memblock-remove: base and size of block to be removed
+
+ qcom,a-driver {
+ compatible = "qcom,a-driver";
+ /* Remove 4MB at 0x200000*/
+ qcom,memblock-remove = <0x200000 0x400000>;
+ };
+
+In order to ensure memory is only reserved when a driver is actually enabled,
+drivers are required to add EXPORT_COMPAT(<name of compatible string>) some
+where in the driver. For the examples above, the driver must add
+EXPORT_COMPAT("qcom,a-driver") to the driver, similar to EXPORT_SYMBOL.
+The EXPORT_COMPAT is to ensure that memory is only carved out if the
+driver is actually enabled, otherwise the memory will not be used.
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_rtb.txt b/Documentation/devicetree/bindings/arm/msm/msm_rtb.txt
new file mode 100644
index 0000000..7b8642b
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm_rtb.txt
@@ -0,0 +1,21 @@
+Register Trace Buffer (RTB)
+
+The RTB is used to log discrete events in the system in an uncached buffer that
+can be post processed from RAM dumps. The RTB must reserve memory using
+the msm specific memory reservation bindings (see
+Documentation/devicetree/bindings/arm/msm/memory-reserve.txt).
+
+Required properties
+
+- compatible: "qcom,msm-rtb"
+- qcom,memory-reservation-size: size of reserved memory for the RTB buffer
+- qcom,memory-reservation-type: type of memory to be reserved
+(see memory-reserve.txt for information about memory reservations)
+
+Example:
+
+ qcom,msm-rtb {
+ compatible = "qcom,msm-rtb";
+ qcom,memory-reservation-type = "EBI1";
+ qcom,memory-reservation-size = <0x100000>; /* 1M EBI1 buffer */
+ };
diff --git a/Documentation/devicetree/bindings/arm/msm/rpm-regulator-smd.txt b/Documentation/devicetree/bindings/arm/msm/rpm-regulator-smd.txt
index 82935ed..93b5144 100644
--- a/Documentation/devicetree/bindings/arm/msm/rpm-regulator-smd.txt
+++ b/Documentation/devicetree/bindings/arm/msm/rpm-regulator-smd.txt
@@ -140,12 +140,25 @@
corner values supported on MSM8974 for PMIC
PM8841 SMPS 2 (VDD_Dig); nominal voltages for
these corners are also shown:
- 0 = Retention (0.5000 V)
- 1 = SVS Krait (0.7250 V)
- 2 = SVS SOC (0.8125 V)
- 3 = Normal (0.9000 V)
- 4 = Turbo (0.9875 V)
- 5 = Super Turbo (1.0500 V)
+ 0 = None (don't care)
+ 1 = Retention (0.5000 V)
+ 2 = SVS Krait (0.7250 V)
+ 3 = SVS SOC (0.8125 V)
+ 4 = Normal (0.9000 V)
+ 5 = Turbo (0.9875 V)
+ 6 = Super Turbo (1.0500 V)
+- qcom,init-disallow-bypass: Specify that bypass mode should not be used for a
+ given LDO regulator. When in bypass mode, an
+ LDO performs no regulation and acts as a simple
+ switch. The RPM can utilize this mode for an
+ LDO that is subregulated from an SMPS when it is
+ possible to reduce the SMPS voltage to the
+ desired LDO output level. Bypass mode may be
+ disallowed if lower LDO output noise is
+ required. Supported values are:
+ 0 = Allow RPM to utilize LDO bypass mode
+ if possible
+ 1 = Disallow LDO bypass mode
All properties specified within the core regulator framework can also be used in
second level nodes. These bindings can be found in:
diff --git a/Documentation/devicetree/bindings/crypto/msm/qcedev.txt b/Documentation/devicetree/bindings/crypto/msm/qcedev.txt
new file mode 100644
index 0000000..c50a6c3
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/msm/qcedev.txt
@@ -0,0 +1,18 @@
+* QCEDEV (Qualcomm Crypto Engine Device)
+
+Required properties:
+ - compatible : should be "qcom,qcedev"
+ - reg : should contain crypto, BAM register map.
+ - interrupts : should contain crypto BAM interrupt.
+ - qcom,bam-pipe-pair : should contain crypto BAM pipe pair index.
+
+Example:
+
+ qcom,qcedev@fd440000 {
+ compatible = "qcom,qcedev";
+ reg = <0xfd440000 0x20000>,
+ <0xfd444000 0x8000>;
+ reg-names = "crypto-base","crypto-bam-base";
+ interrupts = <0 235 0>;
+ qcom,bam-pipe-pair = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt b/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
new file mode 100644
index 0000000..1b0f703
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
@@ -0,0 +1,18 @@
+* QCRYPTO (Qualcomm Crypto)
+
+Required properties:
+ - compatible : should be "qcom,qcrypto"
+ - reg : should contain crypto, BAM register map.
+ - interrupts : should contain crypto BAM interrupt.
+ - qcom,bam-pipe-pair : should contain crypto BAM pipe pair.
+
+Example:
+
+ qcom,qcrypto@fd444000 {
+ compatible = "qcom,qcrypto";
+ reg = <0xfd440000 0x20000>,
+ <0xfd444000 0x8000>;
+ reg-names = "crypto-base","crypto-bam-base";
+ interrupts = <0 235 0>;
+ qcom,bam-pipe-pair = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/iommu/msm_iommu.txt b/Documentation/devicetree/bindings/iommu/msm_iommu.txt
index 67933e7..c198fe9 100644
--- a/Documentation/devicetree/bindings/iommu/msm_iommu.txt
+++ b/Documentation/devicetree/bindings/iommu/msm_iommu.txt
@@ -13,12 +13,15 @@
- qcom,iommu-ctx-sids : List of stream identifiers associated with this
translation context.
- qcom,iommu-ctx-name : Name of the context bank
+ - qcom,iommu-smt-size : Number of SMR entries in the SMT of this HW block
+ - vdd-supply : vdd-supply: phandle to GDSC regulator controlling this IOMMU.
Example:
qcom,iommu@fda64000 {
compatible = "qcom,msm-smmu-v2";
reg = <0xfda64000 0x10000>;
+ vdd-supply = <&gdsc_iommu>;
qcom,iommu-ctx@fda6c000 {
reg = <0xfda6c000 0x1000>;
diff --git a/Documentation/devicetree/bindings/ocmem/msm-ocmem.txt b/Documentation/devicetree/bindings/ocmem/msm-ocmem.txt
new file mode 100644
index 0000000..1549f10
--- /dev/null
+++ b/Documentation/devicetree/bindings/ocmem/msm-ocmem.txt
@@ -0,0 +1,70 @@
+Qualcomm MSM On-Chip Memory Driver
+
+msm-ocmem is a driver for managing On-Chip Memory (OCMEM) in MSM SoCs.
+It is responsible for allowing various clients to allocate memory from
+OCMEM based on performance, latency and power requirements.
+
+Required Properties:
+- compatible: Must be "qcom,msm-ocmem"
+- reg: Four pairs of physical base addresses and region sizes
+ of memory mapped registers.
+- reg-names : Register region name(s) referenced in reg above
+ "ocmem_ctrl_physical" corresponds to OCMEM control registers.
+ "dm_ctrl_physical" corresponds to DM control registers.
+ "br_ctrl_physical" corresponds to BR control registers.
+ "ocmem_physical" corresponds to address range of OCMEM memory.
+- interrupts: OCMEM core interrupt(s).
+- interrupt-names: OCMEM core interrupt name(s) reference in interrupts above
+ "ocmem_irq" corresponds to OCMEM Error Interrupt.
+ "dm_irq" corresponds to DM Interrupt.
+- qcom,ocmem-num-regions: The number of OCMEM hardware memory regions.
+
+In addition to the information on the OCMEM core, the
+device tree contains additional information describing partitions
+of the OCMEM address space. This is used to establish regions
+of OCMEM that are used for each potential client. The partitions
+can overlap and the OCMEM driver ensures that there is no possibility
+of concurrent access from more than one client to the same address range.
+This allows the OCMEM driver to maximize the usage of OCMEM at all times.
+
+Each partition is represented as a sub-node of the OCMEM device.
+
+OCMEM partitions
+
+Required Properties:
+ - reg : The partition's offset and size within OCMEM.
+ - qcom,ocmem-part-name : The name for this partition.
+ - qcom,ocmem-part-min: The minimum amount of memory reserved exclusively for
+ this client.
+Optional Properties:
+ - qcom,ocmem-part-tail : This parameter, if present, indicates that successive
+ allocations from this partition must be allocated at
+ lower offsetis.
+Example:
+
+ qcom,ocmem@fdd00000 {
+ reg = <0xfdd00000 0x2000>,
+ <0xfdd02000 0x2000>,
+ <0xfe039000 0x400>,
+ <0xfec00000 0x180000>;
+ reg-names = "ocmem_ctrl_physical", "dm_ctrl_physical", "br_ctrl_physical", "ocmem_physical";
+ interrupts = <0 76 0 0 77 0>;
+ interrupt-names = "ocmem_irq", "dm_irq";
+ qcom,ocmem-num-regions = <0x3>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0xfec00000 0x180000>;
+
+ partition@0 {
+ reg = <0x0 0x100000>;
+ qcom,ocmem-part-name = "graphics";
+ qcom,ocmem-part-min = <0x80000>;
+ };
+
+ partition@100000 {
+ reg = <0x100000 0x80000>;
+ qcom,ocmem-part-name = "video";
+ qcom,ocmem-part-min = <0x55000>;
+ };
+
+ };
diff --git a/Documentation/devicetree/bindings/platform/msm/qpnp-power-on.txt b/Documentation/devicetree/bindings/platform/msm/qpnp-power-on.txt
new file mode 100644
index 0000000..2e7f9c3
--- /dev/null
+++ b/Documentation/devicetree/bindings/platform/msm/qpnp-power-on.txt
@@ -0,0 +1,39 @@
+Qualcomm QPNP power-on
+
+The qpnp-power-on is a driver which supports the power-on(PON)
+peripheral on Qualcomm PMICs. The supported functionality includes
+power on/off reason, power-key press/release detection and other PON
+features. This peripheral is connected to the host processor via the SPMI
+interface.
+
+Required properties:
+- compatible: Must be "qcom,qpnp-power-on"
+- reg: Specifies the SPMI address and size for this PON (power-on) peripheral
+- interrupts: Specifies the interrupt associated with the power-key.
+
+Optional properties:
+- qcom,pon-key-enable: Enable power-key detection. It enables monitoring
+ of the KPDPWR_N line (connected to the power-key).
+- qcom,pon-key-dbc-delay: The debouce delay for the power-key interrupt
+ specifed in us. The value ranges from 2 seconds
+ to 1/64 of a second. Possible values are -
+ - 2, 1, 1/2, 1/4, 1/8, 1/16, 1/32, 1/64
+ - Intermediate value is rounded down to the
+ nearest valid value.
+- qcom,pon-key-pull-up: The intial state of the KPDPWR_N pin
+ (connected to the power-key)
+ 0 = No pull-up
+ 1 = pull-up enabled
+
+If any of the above optional property is not defined, the driver will continue
+with the default hardware state.
+
+Example:
+ qcom,power-on@800 {
+ compatible = "qcom,qpnp-power-on";
+ reg = <0x800 0x100>;
+ interrupts = <0x0 0x8 0x1>;
+ qcom,pon-key-enable= <true>;
+ qcom,pon-key-pull-up = <true>;
+ qcom,pon-key-dbc-delay = <15625>;
+ }
diff --git a/Documentation/devicetree/bindings/rtc/qpnp-rtc.txt b/Documentation/devicetree/bindings/rtc/qpnp-rtc.txt
new file mode 100644
index 0000000..156141f
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/qpnp-rtc.txt
@@ -0,0 +1,64 @@
+* msm-qpnp-rtc
+
+msm-qpnp-rtc is a RTC driver that supports 32 bit RTC housed inside PMIC.
+Driver utilizes MSM SPMI interface to communicate with the RTC module.
+RTC device is divided into two sub-peripherals one which controls basic RTC
+and other for controlling alarm.
+
+[PMIC RTC Device Declarations]
+
+-Root Node-
+
+Required properties :
+ - compatible: Must be "qcom,qpnp-rtc"
+ - #address-cells: The number of cells dedicated to represent an address
+ This must be set to '1'.
+ - #size-cells: The number of cells dedicated to represent address
+ space range of a peripheral. This must be set to '1'.
+ - spmi-dev-container: This specifies that all the device nodes specified
+ within this node should have their resources
+ coalesced into a single spmi_device.
+
+Optional properties:
+ - qcom,qpnp-rtc-write: This property enables/disables rtc write
+ operation. If not mentioned rtc driver keeps
+ rtc writes disabled.
+ 0 = Disable rtc writes.
+ 1 = Enable rtc writes.
+ - qcom,qpnp-rtc-alarm-pwrup: This property enables/disables feature of
+ powering up phone (from power down state)
+ through alarm interrupt.
+ If not mentioned rtc driver will disable
+ feature of powring-up phone through alarm.
+ 0 = Disable powering up of phone through
+ alarm interrupt.
+ 1 = Enable powering up of phone through
+ alarm interrupt.
+
+-Child Nodes-
+
+Required properties :
+ - reg : Specify the spmi offset and size for device.
+ - interrupts: Specifies alarm interrupt, only for rtc_alarm
+ sub-peripheral.
+
+Example:
+ qcom,pm8941_rtc {
+ spmi-dev-container;
+ compatible = "qcom,qpnp-rtc";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ qcom,qpnp-rtc-write = <0>;
+ qcom,qpnp-rtc-alarm-pwrup = <0>;
+
+ qcom,pm8941_rtc_rw@6000 {
+ reg = <0x6000 0x100>;
+ };
+
+ qcom,pm8941_rtc_alarm@6100 {
+ reg = <0x6100 0x100>;
+ interrupts = <0x0 0x61 0x1>;
+ };
+ };
+
+
diff --git a/Documentation/devicetree/bindings/thermal/tsens.txt b/Documentation/devicetree/bindings/thermal/tsens.txt
new file mode 100644
index 0000000..c683f58
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/tsens.txt
@@ -0,0 +1,42 @@
+Qualcomm's TSENS driver
+
+The TSENS driver supports reading temperature from sensors across
+the MSM. The driver defaults to support a 10 bit ADC.
+
+The driver uses the Thermal sysfs framework to provide thermal
+clients the ability to enable/disable the sensors, read trip zones,
+read cool/warm temperature thresholds, set temperature thresholds
+for cool/warm notification and receive notification on temperature
+threshold events.
+
+TSENS node
+
+Required properties:
+- compatible : should be "qcom,msm-tsens" for MSM8974 TSENS driver.
+- reg : offset and length of the TSENS registers.
+- reg : offset and length of the QFPROM registers used for storing
+ the calibration data for the individual sensors.
+- reg-names : resource names used for the physical address of the TSENS
+ registers and the QFPROM efuse calibration address.
+ Should be "tsens_physical" for physical address of the TSENS
+ and "tsens_eeprom_physical" for physical address where calibration
+ data is stored.
+- interrupts : TSENS interrupt for cool/warm temperature threshold.
+- qcom,sensors : Total number of available Temperature sensors for TSENS.
+- qcom,slope : One point calibration characterized slope data for each
+ sensor used to compute the offset. Slope is represented
+ as ADC code/DegC and the value is multipled by a factor
+ of 1000.
+
+Example:
+
+tsens@fc4a8000 {
+ compatible = "qcom,msm-tsens";
+ reg = <0xfc4a8000 0x2000>,
+ <0xfc4b80d0 0x5>;
+ reg-names = "tsens_physical", "tsens_eeprom_physical";
+ interrupts = <0 184 0>;
+ qcom,sensors = <11>;
+ qcom,slope = <1134 1122 1142 1123 1176 1176 1176 1186 1176
+ 1176>;
+};
diff --git a/arch/arm/boot/dts/msm-pm8941.dtsi b/arch/arm/boot/dts/msm-pm8941.dtsi
index 87864fd..f462a1e 100644
--- a/arch/arm/boot/dts/msm-pm8941.dtsi
+++ b/arch/arm/boot/dts/msm-pm8941.dtsi
@@ -22,6 +22,16 @@
#address-cells = <1>;
#size-cells = <1>;
+ qcom,power-on@800 {
+ compatible = "qcom,qpnp-power-on";
+ reg = <0x800 0x100>;
+ interrupts = <0x0 0x8 0x0>;
+ interrupt-names = "power-key";
+ qcom,pon-key-enable = <1>;
+ qcom,pon-key-dbc-delay = <15625>;
+ qcom,pon-key-pull-up = <1>;
+ };
+
pm8941_gpios {
spmi-dev-container;
compatible = "qcom,qpnp-pin";
diff --git a/arch/arm/boot/dts/msm8974-iommu.dtsi b/arch/arm/boot/dts/msm8974-iommu.dtsi
index e1a0a9b..a115fd8 100755
--- a/arch/arm/boot/dts/msm8974-iommu.dtsi
+++ b/arch/arm/boot/dts/msm8974-iommu.dtsi
@@ -18,6 +18,7 @@
ranges;
reg = <0xfda64000 0x10000>;
vdd-supply = <&gdsc_jpeg>;
+ qcom,iommu-smt-size = <16>;
qcom,iommu-ctx@fda6c000 {
reg = <0xfda6c000 0x1000>;
@@ -46,6 +47,7 @@
ranges;
reg = <0xfd928000 0x10000>;
vdd-supply = <&gdsc_mdss>;
+ qcom,iommu-smt-size = <16>;
qcom,iommu-ctx@fd930000 {
reg = <0xfd930000 0x1000>;
@@ -68,6 +70,7 @@
ranges;
reg = <0xfdc84000 0x10000>;
vdd-supply = <&gdsc_venus>;
+ qcom,iommu-smt-size = <16>;
qcom,iommu-ctx@fdc8c000 {
reg = <0xfdc8c000 0x1000>;
diff --git a/arch/arm/boot/dts/msm8974-regulator.dtsi b/arch/arm/boot/dts/msm8974-regulator.dtsi
index f0c635e..91894de 100644
--- a/arch/arm/boot/dts/msm8974-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8974-regulator.dtsi
@@ -109,7 +109,7 @@
regulator-name = "8841_s2_corner";
qcom,set = <3>;
regulator-min-microvolt = <1>;
- regulator-max-microvolt = <6>;
+ regulator-max-microvolt = <7>;
qcom,use-voltage-corner;
compatible = "qcom,rpm-regulator-smd";
qcom,consumer-supplies = "vdd_dig", "";
@@ -118,7 +118,7 @@
regulator-name = "8841_s2_corner_ao";
qcom,set = <1>;
regulator-min-microvolt = <1>;
- regulator-max-microvolt = <6>;
+ regulator-max-microvolt = <7>;
qcom,use-voltage-corner;
compatible = "qcom,rpm-regulator-smd";
};
diff --git a/arch/arm/boot/dts/msm8974-rumi.dts b/arch/arm/boot/dts/msm8974-rumi.dts
index b179d94..2cf68b8 100644
--- a/arch/arm/boot/dts/msm8974-rumi.dts
+++ b/arch/arm/boot/dts/msm8974-rumi.dts
@@ -103,4 +103,8 @@
qcom,pronto@fb21b000 {
status = "disable";
};
+
+ qcom,mss@fc880000 {
+ status = "disable";
+ };
};
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index 54f6863..8426b52 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -420,7 +420,48 @@
};
qcom,ocmem@fdd00000 {
- compatible = "qcom,msm_ocmem";
+ compatible = "qcom,msm-ocmem";
+ reg = <0xfdd00000 0x2000>,
+ <0xfdd02000 0x2000>,
+ <0xfe039000 0x400>,
+ <0xfec00000 0x180000>;
+ reg-names = "ocmem_ctrl_physical", "dm_ctrl_physical", "br_ctrl_physical", "ocmem_physical";
+ interrupts = <0 76 0 0 77 0>;
+ interrupt-names = "ocmem_irq", "dm_irq";
+ qcom,ocmem-num-regions = <0x3>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0xfec00000 0x180000>;
+
+ partition@0 {
+ reg = <0x0 0x100000>;
+ qcom,ocmem-part-name = "graphics";
+ qcom,ocmem-part-min = <0x80000>;
+ };
+
+ partition@80000 {
+ reg = <0x80000 0xA0000>;
+ qcom,ocmem-part-name = "lp_audio";
+ qcom,ocmem-part-min = <0xA0000>;
+ };
+
+ partition@E0000 {
+ reg = <0x120000 0x20000>;
+ qcom,ocmem-part-name = "blast";
+ qcom,ocmem-part-min = <0x20000>;
+ };
+
+ partition@100000 {
+ reg = <0x100000 0x80000>;
+ qcom,ocmem-part-name = "video";
+ qcom,ocmem-part-min = <0x55000>;
+ };
+
+ partition@140000 {
+ reg = <0x140000 0x40000>;
+ qcom,ocmem-part-name = "sensors";
+ qcom,ocmem-part-min = <0x40000>;
+ };
};
rpm_bus: qcom,rpm-smd {
@@ -477,6 +518,17 @@
qcom,firmware-min-paddr = <0xF500000>;
qcom,firmware-max-paddr = <0xFA00000>;
};
+
+ tsens@fc4a8000 {
+ compatible = "qcom,msm-tsens";
+ reg = <0xfc4a8000 0x2000>,
+ <0xfc4b80d0 0x5>;
+ reg-names = "tsens_physical", "tsens_eeprom_physical";
+ interrupts = <0 184 0>;
+ qcom,sensors = <11>;
+ qcom,slope = <1134 1122 1142 1123 1176 1176 1176 1186 1176
+ 1176 1176>;
+ };
};
/include/ "msm-pm8x41-rpm-regulator.dtsi"
diff --git a/arch/arm/configs/msm7627a-perf_defconfig b/arch/arm/configs/msm7627a-perf_defconfig
index 913e084..a8abb30 100644
--- a/arch/arm/configs/msm7627a-perf_defconfig
+++ b/arch/arm/configs/msm7627a-perf_defconfig
@@ -17,7 +17,6 @@
CONFIG_KALLSYMS_ALL=y
CONFIG_ASHMEM=y
CONFIG_EMBEDDED=y
-CONFIG_SLAB=y
CONFIG_PROFILING=y
CONFIG_OPROFILE=y
CONFIG_KPROBES=y
@@ -313,6 +312,7 @@
CONFIG_LEDS_GPIO=y
CONFIG_LEDS_MSM_PDM=y
CONFIG_LEDS_PMIC_MPP=y
+CONFIG_LEDS_MSM_TRICOLOR=y
CONFIG_SWITCH=y
CONFIG_SWITCH_GPIO=y
CONFIG_RTC_CLASS=y
diff --git a/arch/arm/configs/msm7627a_defconfig b/arch/arm/configs/msm7627a_defconfig
index 8cd091c..314f91b 100644
--- a/arch/arm/configs/msm7627a_defconfig
+++ b/arch/arm/configs/msm7627a_defconfig
@@ -17,7 +17,6 @@
CONFIG_KALLSYMS_ALL=y
CONFIG_ASHMEM=y
CONFIG_EMBEDDED=y
-CONFIG_SLAB=y
CONFIG_PROFILING=y
CONFIG_OPROFILE=y
CONFIG_KPROBES=y
@@ -313,6 +312,7 @@
CONFIG_MMC_MSM_SDC3_8_BIT_SUPPORT=y
CONFIG_LEDS_MSM_PDM=y
CONFIG_LEDS_PMIC_MPP=y
+CONFIG_LEDS_MSM_TRICOLOR=y
CONFIG_SWITCH=y
CONFIG_SWITCH_GPIO=y
CONFIG_RTC_CLASS=y
@@ -340,8 +340,6 @@
CONFIG_LOCKUP_DETECTOR=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_TIMER_STATS=y
-CONFIG_DEBUG_SLAB=y
-CONFIG_DEBUG_SLAB_LEAK=y
CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_DEBUG_ATOMIC_SLEEP=y
diff --git a/arch/arm/configs/msm8960-perf_defconfig b/arch/arm/configs/msm8960-perf_defconfig
index 6c213c3..0197f78 100644
--- a/arch/arm/configs/msm8960-perf_defconfig
+++ b/arch/arm/configs/msm8960-perf_defconfig
@@ -345,6 +345,7 @@
CONFIG_MSM_CAMERA_FLASH_SC628A=y
CONFIG_MSM_CAMERA_FLASH_TPS61310=y
CONFIG_OV2720=y
+CONFIG_MSM_CSI20_HEADER=y
CONFIG_MSM_CAMERA_SENSOR=y
CONFIG_MSM_ACTUATOR=y
CONFIG_MSM_EEPROM=y
diff --git a/arch/arm/configs/msm8960_defconfig b/arch/arm/configs/msm8960_defconfig
index 9eea6f6..d2aaeb0 100644
--- a/arch/arm/configs/msm8960_defconfig
+++ b/arch/arm/configs/msm8960_defconfig
@@ -341,6 +341,7 @@
# CONFIG_MEDIA_TUNER_CUSTOMISE is not set
CONFIG_VIDEOBUF2_MSM_MEM=y
CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
+CONFIG_USB_VIDEO_CLASS=y
CONFIG_V4L_PLATFORM_DRIVERS=y
CONFIG_MSM_CAMERA_V4L2=y
CONFIG_IMX074=y
@@ -348,6 +349,7 @@
CONFIG_IMX074_ACT=y
CONFIG_MSM_CAMERA_FLASH_SC628A=y
CONFIG_OV2720=y
+CONFIG_MSM_CSI20_HEADER=y
CONFIG_MSM_CAMERA_SENSOR=y
CONFIG_MSM_ACTUATOR=y
CONFIG_MSM_EEPROM=y
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index 9bd967b..622d165 100644
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -168,7 +168,6 @@
# CONFIG_LCD_CLASS_DEVICE is not set
CONFIG_BACKLIGHT_CLASS_DEVICE=y
# CONFIG_BACKLIGHT_GENERIC is not set
-# CONFIG_HID_SUPPORT is not set
CONFIG_USB=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_STORAGE=y
@@ -251,3 +250,7 @@
CONFIG_SND=y
CONFIG_SND_SOC=y
CONFIG_SND_SOC_MSM8974=y
+CONFIG_CRYPTO_DEV_QCRYPTO=m
+CONFIG_CRYPTO_DEV_QCE=m
+CONFIG_CRYPTO_DEV_QCEDEV=m
+CONFIG_QSEECOM=y
diff --git a/arch/arm/configs/msm9615_defconfig b/arch/arm/configs/msm9615_defconfig
index c9bc610..5acfd24 100644
--- a/arch/arm/configs/msm9615_defconfig
+++ b/arch/arm/configs/msm9615_defconfig
@@ -79,6 +79,9 @@
CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_IP_MROUTE=y
CONFIG_IP_MULTIPLE_TABLES=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 7ccdd0f..5cd93dc 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -295,6 +295,7 @@
obj-$(CONFIG_ARCH_MSM8974) += clock-local2.o clock-pll.o clock-8974.o clock-rpm.o clock-voter.o
obj-$(CONFIG_ARCH_MSM8974) += gdsc.o
obj-$(CONFIG_ARCH_MSM9625) += board-9625.o board-9625-gpiomux.o
+obj-$(CONFIG_ARCH_MSM8930) += acpuclock-krait.o acpuclock-8930.o acpuclock-8627.o
obj-$(CONFIG_MACH_SAPPHIRE) += board-sapphire.o board-sapphire-gpio.o
obj-$(CONFIG_MACH_SAPPHIRE) += board-sapphire-keypad.o board-sapphire-panel.o
diff --git a/arch/arm/mach-msm/acpuclock-8627.c b/arch/arm/mach-msm/acpuclock-8627.c
new file mode 100644
index 0000000..45f2096
--- /dev/null
+++ b/arch/arm/mach-msm/acpuclock-8627.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <mach/rpm-regulator.h>
+#include <mach/msm_bus_board.h>
+#include <mach/msm_bus.h>
+
+#include "acpuclock.h"
+#include "acpuclock-krait.h"
+
+/* Corner type vreg VDD values */
+#define LVL_NONE RPM_VREG_CORNER_NONE
+#define LVL_LOW RPM_VREG_CORNER_LOW
+#define LVL_NOM RPM_VREG_CORNER_NOMINAL
+#define LVL_HIGH RPM_VREG_CORNER_HIGH
+
+static struct hfpll_data hfpll_data = {
+ .mode_offset = 0x00,
+ .l_offset = 0x08,
+ .m_offset = 0x0C,
+ .n_offset = 0x10,
+ .config_offset = 0x04,
+ .config_val = 0x7845C665,
+ .has_droop_ctl = true,
+ .droop_offset = 0x14,
+ .droop_val = 0x0108C000,
+ .low_vdd_l_max = 40,
+ .vdd[HFPLL_VDD_NONE] = LVL_NONE,
+ .vdd[HFPLL_VDD_LOW] = LVL_LOW,
+ .vdd[HFPLL_VDD_NOM] = LVL_NOM,
+};
+
+static struct scalable scalable[] = {
+ [CPU0] = {
+ .hfpll_phys_base = 0x00903200,
+ .hfpll_data = &hfpll_data,
+ .aux_clk_sel_phys = 0x02088014,
+ .aux_clk_sel = 3,
+ .l2cpmr_iaddr = 0x4501,
+ .vreg[VREG_CORE] = { "krait0", 1300000, 1740000 },
+ .vreg[VREG_MEM] = { "krait0_mem", 1150000 },
+ .vreg[VREG_DIG] = { "krait0_dig", 1150000 },
+ .vreg[VREG_HFPLL_A] = { "krait0_hfpll", 1800000 },
+ },
+ [CPU1] = {
+ .hfpll_phys_base = 0x00903300,
+ .hfpll_data = &hfpll_data,
+ .aux_clk_sel_phys = 0x02098014,
+ .aux_clk_sel = 3,
+ .l2cpmr_iaddr = 0x5501,
+ .vreg[VREG_CORE] = { "krait1", 1300000, 1740000 },
+ .vreg[VREG_MEM] = { "krait1_mem", 1150000 },
+ .vreg[VREG_DIG] = { "krait1_dig", 1150000 },
+ .vreg[VREG_HFPLL_A] = { "krait1_hfpll", 1800000 },
+ },
+ [L2] = {
+ .hfpll_phys_base = 0x00903400,
+ .hfpll_data = &hfpll_data,
+ .aux_clk_sel_phys = 0x02011028,
+ .aux_clk_sel = 3,
+ .l2cpmr_iaddr = 0x0500,
+ .vreg[VREG_HFPLL_A] = { "l2_hfpll", 1800000 },
+ },
+};
+
+static struct msm_bus_paths bw_level_tbl[] = {
+ [0] = BW_MBPS(640), /* At least 80 MHz on bus. */
+ [1] = BW_MBPS(1064), /* At least 133 MHz on bus. */
+ [2] = BW_MBPS(1600), /* At least 200 MHz on bus. */
+ [3] = BW_MBPS(2128), /* At least 266 MHz on bus. */
+ [4] = BW_MBPS(3200), /* At least 400 MHz on bus. */
+};
+
+static struct msm_bus_scale_pdata bus_scale_data = {
+ .usecase = bw_level_tbl,
+ .num_usecases = ARRAY_SIZE(bw_level_tbl),
+ .active_only = 1,
+ .name = "acpuclk-8627",
+};
+
+/* TODO: Update vdd_dig, vdd_mem and bw when data is available. */
+#define L2(x) (&l2_freq_tbl[(x)])
+static struct l2_level l2_freq_tbl[] = {
+ [0] = { {STBY_KHZ, QSB, 0, 0, 0x00 }, LVL_NOM, 1050000, 0 },
+ [1] = { { 384000, PLL_8, 0, 2, 0x00 }, LVL_NOM, 1050000, 1 },
+ [2] = { { 432000, HFPLL, 2, 0, 0x20 }, LVL_NOM, 1050000, 1 },
+ [3] = { { 486000, HFPLL, 2, 0, 0x24 }, LVL_NOM, 1050000, 1 },
+ [4] = { { 540000, HFPLL, 2, 0, 0x28 }, LVL_NOM, 1050000, 2 },
+ [5] = { { 594000, HFPLL, 1, 0, 0x16 }, LVL_NOM, 1050000, 2 },
+ [6] = { { 648000, HFPLL, 1, 0, 0x18 }, LVL_NOM, 1050000, 2 },
+ [7] = { { 702000, HFPLL, 1, 0, 0x1A }, LVL_NOM, 1050000, 3 },
+ [8] = { { 756000, HFPLL, 1, 0, 0x1C }, LVL_HIGH, 1150000, 3 },
+ [9] = { { 810000, HFPLL, 1, 0, 0x1E }, LVL_HIGH, 1150000, 3 },
+ [10] = { { 864000, HFPLL, 1, 0, 0x20 }, LVL_HIGH, 1150000, 4 },
+ [11] = { { 918000, HFPLL, 1, 0, 0x22 }, LVL_HIGH, 1150000, 4 },
+ [12] = { { 972000, HFPLL, 1, 0, 0x24 }, LVL_HIGH, 1150000, 4 },
+};
+
+/* TODO: Update core voltages when data is available. */
+static struct acpu_level acpu_freq_tbl[] = {
+ { 0, { STBY_KHZ, QSB, 0, 0, 0x00 }, L2(0), 900000 },
+ { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(1), 900000 },
+ { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(5), 925000 },
+ { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(5), 925000 },
+ { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(5), 937500 },
+ { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(5), 962500 },
+ { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(9), 987500 },
+ { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(9), 1000000 },
+ { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(9), 1025000 },
+ { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(9), 1062500 },
+ { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(12), 1062500 },
+ { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(12), 1087500 },
+ { 1, { 972000, HFPLL, 1, 0, 0x24 }, L2(12), 1100000 },
+ { 0, { 0 } }
+};
+
+static struct acpuclk_krait_params acpuclk_8627_params = {
+ .scalable = scalable,
+ .pvs_acpu_freq_tbl[PVS_SLOW] = acpu_freq_tbl,
+ .pvs_acpu_freq_tbl[PVS_NOMINAL] = acpu_freq_tbl,
+ .pvs_acpu_freq_tbl[PVS_FAST] = acpu_freq_tbl,
+ .l2_freq_tbl = l2_freq_tbl,
+ .l2_freq_tbl_size = ARRAY_SIZE(l2_freq_tbl),
+ .bus_scale_data = &bus_scale_data,
+ .qfprom_phys_base = 0x00700000,
+};
+
+static int __init acpuclk_8627_probe(struct platform_device *pdev)
+{
+ return acpuclk_krait_init(&pdev->dev, &acpuclk_8627_params);
+}
+
+static struct platform_driver acpuclk_8627_driver = {
+ .driver = {
+ .name = "acpuclk-8627",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init acpuclk_8627_init(void)
+{
+ return platform_driver_probe(&acpuclk_8627_driver,
+ acpuclk_8627_probe);
+}
+device_initcall(acpuclk_8627_init);
diff --git a/arch/arm/mach-msm/acpuclock-8930.c b/arch/arm/mach-msm/acpuclock-8930.c
new file mode 100644
index 0000000..d60b4eb
--- /dev/null
+++ b/arch/arm/mach-msm/acpuclock-8930.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <mach/rpm-regulator.h>
+#include <mach/msm_bus_board.h>
+#include <mach/msm_bus.h>
+
+#include "acpuclock.h"
+#include "acpuclock-krait.h"
+
+/* Corner type vreg VDD values */
+#define LVL_NONE RPM_VREG_CORNER_NONE
+#define LVL_LOW RPM_VREG_CORNER_LOW
+#define LVL_NOM RPM_VREG_CORNER_NOMINAL
+#define LVL_HIGH RPM_VREG_CORNER_HIGH
+
+static struct hfpll_data hfpll_data = {
+ .mode_offset = 0x00,
+ .l_offset = 0x08,
+ .m_offset = 0x0C,
+ .n_offset = 0x10,
+ .config_offset = 0x04,
+ .config_val = 0x7845C665,
+ .has_droop_ctl = true,
+ .droop_offset = 0x14,
+ .droop_val = 0x0108C000,
+ .low_vdd_l_max = 40,
+ .vdd[HFPLL_VDD_NONE] = LVL_NONE,
+ .vdd[HFPLL_VDD_LOW] = LVL_LOW,
+ .vdd[HFPLL_VDD_NOM] = LVL_NOM,
+};
+
+static struct scalable scalable[] = {
+ [CPU0] = {
+ .hfpll_phys_base = 0x00903200,
+ .hfpll_data = &hfpll_data,
+ .aux_clk_sel_phys = 0x02088014,
+ .aux_clk_sel = 3,
+ .l2cpmr_iaddr = 0x4501,
+ .vreg[VREG_CORE] = { "krait0", 1300000, 1740000 },
+ .vreg[VREG_MEM] = { "krait0_mem", 1150000 },
+ .vreg[VREG_DIG] = { "krait0_dig", 1150000 },
+ .vreg[VREG_HFPLL_A] = { "krait0_hfpll", 1800000 },
+ },
+ [CPU1] = {
+ .hfpll_phys_base = 0x00903300,
+ .hfpll_data = &hfpll_data,
+ .aux_clk_sel_phys = 0x02098014,
+ .aux_clk_sel = 3,
+ .l2cpmr_iaddr = 0x5501,
+ .vreg[VREG_CORE] = { "krait1", 1300000, 1740000 },
+ .vreg[VREG_MEM] = { "krait1_mem", 1150000 },
+ .vreg[VREG_DIG] = { "krait1_dig", 1150000 },
+ .vreg[VREG_HFPLL_A] = { "krait1_hfpll", 1800000 },
+ },
+ [L2] = {
+ .hfpll_phys_base = 0x00903400,
+ .hfpll_data = &hfpll_data,
+ .aux_clk_sel_phys = 0x02011028,
+ .aux_clk_sel = 3,
+ .l2cpmr_iaddr = 0x0500,
+ .vreg[VREG_HFPLL_A] = { "l2_hfpll", 1800000 },
+ },
+};
+
+static struct msm_bus_paths bw_level_tbl[] = {
+ [0] = BW_MBPS(640), /* At least 80 MHz on bus. */
+ [1] = BW_MBPS(1064), /* At least 133 MHz on bus. */
+ [2] = BW_MBPS(1600), /* At least 200 MHz on bus. */
+ [3] = BW_MBPS(2128), /* At least 266 MHz on bus. */
+ [4] = BW_MBPS(3200), /* At least 400 MHz on bus. */
+ [5] = BW_MBPS(3600), /* At least 450 MHz on bus. */
+ [6] = BW_MBPS(3936), /* At least 492 MHz on bus. */
+ [7] = BW_MBPS(4264), /* At least 533 MHz on bus. */
+};
+
+static struct msm_bus_scale_pdata bus_scale_data = {
+ .usecase = bw_level_tbl,
+ .num_usecases = ARRAY_SIZE(bw_level_tbl),
+ .active_only = 1,
+ .name = "acpuclk-8930",
+};
+
+/* TODO: Update vdd_dig, vdd_mem and bw when data is available. */
+#define L2(x) (&l2_freq_tbl[(x)])
+static struct l2_level l2_freq_tbl[] = {
+ [0] = { {STBY_KHZ, QSB, 0, 0, 0x00 }, LVL_NOM, 1050000, 0 },
+ [1] = { { 384000, PLL_8, 0, 2, 0x00 }, LVL_NOM, 1050000, 1 },
+ [2] = { { 432000, HFPLL, 2, 0, 0x20 }, LVL_NOM, 1050000, 2 },
+ [3] = { { 486000, HFPLL, 2, 0, 0x24 }, LVL_NOM, 1050000, 2 },
+ [4] = { { 540000, HFPLL, 2, 0, 0x28 }, LVL_NOM, 1050000, 2 },
+ [5] = { { 594000, HFPLL, 1, 0, 0x16 }, LVL_NOM, 1050000, 2 },
+ [6] = { { 648000, HFPLL, 1, 0, 0x18 }, LVL_NOM, 1050000, 4 },
+ [7] = { { 702000, HFPLL, 1, 0, 0x1A }, LVL_NOM, 1050000, 4 },
+ [8] = { { 756000, HFPLL, 1, 0, 0x1C }, LVL_HIGH, 1150000, 4 },
+ [9] = { { 810000, HFPLL, 1, 0, 0x1E }, LVL_HIGH, 1150000, 4 },
+ [10] = { { 864000, HFPLL, 1, 0, 0x20 }, LVL_HIGH, 1150000, 4 },
+ [11] = { { 918000, HFPLL, 1, 0, 0x22 }, LVL_HIGH, 1150000, 7 },
+ [12] = { { 972000, HFPLL, 1, 0, 0x24 }, LVL_HIGH, 1150000, 7 },
+ [13] = { { 1026000, HFPLL, 1, 0, 0x26 }, LVL_HIGH, 1150000, 7 },
+ [14] = { { 1080000, HFPLL, 1, 0, 0x28 }, LVL_HIGH, 1150000, 7 },
+ [15] = { { 1134000, HFPLL, 1, 0, 0x2A }, LVL_HIGH, 1150000, 7 },
+ [16] = { { 1188000, HFPLL, 1, 0, 0x2C }, LVL_HIGH, 1150000, 7 },
+};
+
+static struct acpu_level acpu_freq_tbl_slow[] = {
+ { 0, { STBY_KHZ, QSB, 0, 0, 0x00 }, L2(0), 950000 },
+ { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(1), 950000 },
+ { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(6), 975000 },
+ { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(6), 975000 },
+ { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(6), 1000000 },
+ { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(6), 1000000 },
+ { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(6), 1025000 },
+ { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(6), 1025000 },
+ { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(11), 1075000 },
+ { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(11), 1075000 },
+ { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(11), 1100000 },
+ { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(11), 1100000 },
+ { 1, { 972000, HFPLL, 1, 0, 0x24 }, L2(11), 1125000 },
+ { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(11), 1125000 },
+ { 1, { 1080000, HFPLL, 1, 0, 0x28 }, L2(16), 1175000 },
+ { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(16), 1175000 },
+ { 1, { 1188000, HFPLL, 1, 0, 0x2C }, L2(16), 1200000 },
+ { 0, { 0 } }
+};
+
+static struct acpu_level acpu_freq_tbl_nom[] = {
+ { 0, { STBY_KHZ, QSB, 0, 0, 0x00 }, L2(0), 925000 },
+ { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(1), 925000 },
+ { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(6), 950000 },
+ { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(6), 950000 },
+ { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(6), 975000 },
+ { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(6), 975000 },
+ { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(6), 1000000 },
+ { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(6), 1000000 },
+ { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(11), 1050000 },
+ { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(11), 1050000 },
+ { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(11), 1075000 },
+ { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(11), 1075000 },
+ { 1, { 972000, HFPLL, 1, 0, 0x24 }, L2(11), 1100000 },
+ { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(11), 1100000 },
+ { 1, { 1080000, HFPLL, 1, 0, 0x28 }, L2(16), 1150000 },
+ { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(16), 1150000 },
+ { 1, { 1188000, HFPLL, 1, 0, 0x2C }, L2(16), 1175000 },
+ { 0, { 0 } }
+};
+
+static struct acpu_level acpu_freq_tbl_fast[] = {
+ { 0, { STBY_KHZ, QSB, 0, 0, 0x00 }, L2(0), 900000 },
+ { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(1), 900000 },
+ { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(6), 900000 },
+ { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(6), 900000 },
+ { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(6), 925000 },
+ { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(6), 925000 },
+ { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(6), 950000 },
+ { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(6), 950000 },
+ { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(11), 1000000 },
+ { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(11), 1000000 },
+ { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(11), 1025000 },
+ { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(11), 1025000 },
+ { 1, { 972000, HFPLL, 1, 0, 0x24 }, L2(11), 1050000 },
+ { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(11), 1050000 },
+ { 1, { 1080000, HFPLL, 1, 0, 0x28 }, L2(16), 1100000 },
+ { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(16), 1100000 },
+ { 1, { 1188000, HFPLL, 1, 0, 0x2C }, L2(16), 1125000 },
+ { 0, { 0 } }
+};
+
+static struct acpuclk_krait_params acpuclk_8930_params = {
+ .scalable = scalable,
+ .pvs_acpu_freq_tbl[PVS_SLOW] = acpu_freq_tbl_slow,
+ .pvs_acpu_freq_tbl[PVS_NOMINAL] = acpu_freq_tbl_nom,
+ .pvs_acpu_freq_tbl[PVS_FAST] = acpu_freq_tbl_fast,
+ .l2_freq_tbl = l2_freq_tbl,
+ .l2_freq_tbl_size = ARRAY_SIZE(l2_freq_tbl),
+ .bus_scale_data = &bus_scale_data,
+ .qfprom_phys_base = 0x00700000,
+};
+
+static int __init acpuclk_8930_probe(struct platform_device *pdev)
+{
+ return acpuclk_krait_init(&pdev->dev, &acpuclk_8930_params);
+}
+
+static struct platform_driver acpuclk_8930_driver = {
+ .driver = {
+ .name = "acpuclk-8930",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init acpuclk_8930_init(void)
+{
+ return platform_driver_probe(&acpuclk_8930_driver,
+ acpuclk_8930_probe);
+}
+device_initcall(acpuclk_8930_init);
diff --git a/arch/arm/mach-msm/acpuclock-8960.c b/arch/arm/mach-msm/acpuclock-8960.c
index 7f198d2..8947c9f 100644
--- a/arch/arm/mach-msm/acpuclock-8960.c
+++ b/arch/arm/mach-msm/acpuclock-8960.c
@@ -170,12 +170,6 @@
[HFPLL_VDD_NOM] = 1050000
};
-static unsigned int hfpll_vdd_dig_tbl_8930[] = {
- [HFPLL_VDD_NONE] = LVL_NONE,
- [HFPLL_VDD_LOW] = LVL_LOW,
- [HFPLL_VDD_NOM] = LVL_NOM
-};
-
static struct scalable scalable_8960[] = {
[CPU0] = {
.hfpll_base = MSM_HFPLL_BASE + 0x200,
@@ -302,95 +296,6 @@
},
};
-static struct scalable scalable_8930[] = {
- [CPU0] = {
- .hfpll_base = MSM_HFPLL_BASE + 0x200,
- .aux_clk_sel = MSM_ACC0_BASE + 0x014,
- .l2cpmr_iaddr = L2CPUCPMR_IADDR,
- .vreg[VREG_CORE] = { "krait0", 1300000 },
- .vreg[VREG_MEM] = { "krait0_mem", 1150000,
- RPM_VREG_VOTER1,
- RPM_VREG_ID_PM8038_L24 },
- .vreg[VREG_DIG] = { "krait0_dig", LVL_HIGH,
- RPM_VREG_VOTER1,
- RPM_VREG_ID_PM8038_VDD_DIG_CORNER
- },
- .vreg[VREG_HFPLL_B] = { "hfpll0", 1800000,
- RPM_VREG_VOTER1,
- RPM_VREG_ID_PM8038_L23 },
- },
- [CPU1] = {
- .hfpll_base = MSM_HFPLL_BASE + 0x300,
- .aux_clk_sel = MSM_ACC1_BASE + 0x014,
- .l2cpmr_iaddr = L2CPUCPMR_IADDR,
- .vreg[VREG_CORE] = { "krait1", 1300000 },
- .vreg[VREG_MEM] = { "krait1_mem", 1150000,
- RPM_VREG_VOTER2,
- RPM_VREG_ID_PM8038_L24 },
- .vreg[VREG_DIG] = { "krait1_dig", LVL_HIGH,
- RPM_VREG_VOTER2,
- RPM_VREG_ID_PM8038_VDD_DIG_CORNER
- },
- .vreg[VREG_HFPLL_B] = { "hfpll1", 1800000,
- RPM_VREG_VOTER2,
- RPM_VREG_ID_PM8038_L23 },
- },
- [L2] = {
- .hfpll_base = MSM_HFPLL_BASE + 0x400,
- .hfpll_vdd_tbl = hfpll_vdd_dig_tbl_8930,
- .aux_clk_sel = MSM_APCS_GCC_BASE + 0x028,
- .l2cpmr_iaddr = L2CPMR_IADDR,
- .vreg[VREG_HFPLL_B] = { "hfpll_l2", 1800000,
- RPM_VREG_VOTER6,
- RPM_VREG_ID_PM8038_L23 },
- },
-};
-
-/*TODO: Update the rpm vreg id when the rpm driver is ready */
-static struct scalable scalable_8627[] = {
- [CPU0] = {
- .hfpll_base = MSM_HFPLL_BASE + 0x200,
- .aux_clk_sel = MSM_ACC0_BASE + 0x014,
- .l2cpmr_iaddr = L2CPUCPMR_IADDR,
- .vreg[VREG_CORE] = { "krait0", 1300000 },
- .vreg[VREG_MEM] = { "krait0_mem", 1150000,
- RPM_VREG_VOTER1,
- RPM_VREG_ID_PM8038_L24 },
- .vreg[VREG_DIG] = { "krait0_dig", LVL_HIGH,
- RPM_VREG_VOTER1,
- RPM_VREG_ID_PM8038_VDD_DIG_CORNER
- },
- .vreg[VREG_HFPLL_B] = { "hfpll0", 1800000,
- RPM_VREG_VOTER1,
- RPM_VREG_ID_PM8038_L23 },
- },
- [CPU1] = {
- .hfpll_base = MSM_HFPLL_BASE + 0x300,
- .aux_clk_sel = MSM_ACC1_BASE + 0x014,
- .l2cpmr_iaddr = L2CPUCPMR_IADDR,
- .vreg[VREG_CORE] = { "krait1", 1300000 },
- .vreg[VREG_MEM] = { "krait1_mem", 1150000,
- RPM_VREG_VOTER2,
- RPM_VREG_ID_PM8038_L24 },
- .vreg[VREG_DIG] = { "krait1_dig", LVL_HIGH,
- RPM_VREG_VOTER2,
- RPM_VREG_ID_PM8038_VDD_DIG_CORNER
- },
- .vreg[VREG_HFPLL_B] = { "hfpll1", 1800000,
- RPM_VREG_VOTER2,
- RPM_VREG_ID_PM8038_L23 },
- },
- [L2] = {
- .hfpll_base = MSM_HFPLL_BASE + 0x400,
- .hfpll_vdd_tbl = hfpll_vdd_dig_tbl_8930,
- .aux_clk_sel = MSM_APCS_GCC_BASE + 0x028,
- .l2cpmr_iaddr = L2CPMR_IADDR,
- .vreg[VREG_HFPLL_B] = { "hfpll_l2", 1800000,
- RPM_VREG_VOTER6,
- RPM_VREG_ID_PM8038_L23 },
- },
-};
-
static struct l2_level *l2_freq_tbl;
static struct acpu_level *acpu_freq_tbl;
static int l2_freq_tbl_size;
@@ -695,129 +600,6 @@
{ 0, { 0 } }
};
-/* TODO: Update vdd_dig, vdd_mem and bw when data is available. */
-#undef L2
-#define L2(x) (&l2_freq_tbl_8930[(x)])
-static struct l2_level l2_freq_tbl_8930[] = {
- [0] = { {STBY_KHZ, QSB, 0, 0, 0x00 }, LVL_NOM, 1050000, 0 },
- [1] = { { 384000, PLL_8, 0, 2, 0x00 }, LVL_NOM, 1050000, 1 },
- [2] = { { 432000, HFPLL, 2, 0, 0x20 }, LVL_NOM, 1050000, 2 },
- [3] = { { 486000, HFPLL, 2, 0, 0x24 }, LVL_NOM, 1050000, 2 },
- [4] = { { 540000, HFPLL, 2, 0, 0x28 }, LVL_NOM, 1050000, 2 },
- [5] = { { 594000, HFPLL, 1, 0, 0x16 }, LVL_NOM, 1050000, 2 },
- [6] = { { 648000, HFPLL, 1, 0, 0x18 }, LVL_NOM, 1050000, 4 },
- [7] = { { 702000, HFPLL, 1, 0, 0x1A }, LVL_NOM, 1050000, 4 },
- [8] = { { 756000, HFPLL, 1, 0, 0x1C }, LVL_HIGH, 1150000, 4 },
- [9] = { { 810000, HFPLL, 1, 0, 0x1E }, LVL_HIGH, 1150000, 4 },
- [10] = { { 864000, HFPLL, 1, 0, 0x20 }, LVL_HIGH, 1150000, 4 },
- [11] = { { 918000, HFPLL, 1, 0, 0x22 }, LVL_HIGH, 1150000, 7 },
- [12] = { { 972000, HFPLL, 1, 0, 0x24 }, LVL_HIGH, 1150000, 7 },
- [13] = { { 1026000, HFPLL, 1, 0, 0x26 }, LVL_HIGH, 1150000, 7 },
- [14] = { { 1080000, HFPLL, 1, 0, 0x28 }, LVL_HIGH, 1150000, 7 },
- [15] = { { 1134000, HFPLL, 1, 0, 0x2A }, LVL_HIGH, 1150000, 7 },
- [16] = { { 1188000, HFPLL, 1, 0, 0x2C }, LVL_HIGH, 1150000, 7 },
-};
-
-/* TODO: Update core voltages when data is available. */
-static struct acpu_level acpu_freq_tbl_8930_slow[] = {
- { 0, { STBY_KHZ, QSB, 0, 0, 0x00 }, L2(0), 950000 },
- { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(1), 950000 },
- { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(6), 975000 },
- { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(6), 975000 },
- { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(6), 1000000 },
- { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(6), 1000000 },
- { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(6), 1025000 },
- { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(6), 1025000 },
- { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(11), 1075000 },
- { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(11), 1075000 },
- { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(11), 1100000 },
- { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(11), 1100000 },
- { 1, { 972000, HFPLL, 1, 0, 0x24 }, L2(11), 1125000 },
- { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(11), 1125000 },
- { 1, { 1080000, HFPLL, 1, 0, 0x28 }, L2(16), 1175000 },
- { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(16), 1175000 },
- { 1, { 1188000, HFPLL, 1, 0, 0x2C }, L2(16), 1200000 },
- { 0, { 0 } }
-};
-
-static struct acpu_level acpu_freq_tbl_8930_nom[] = {
- { 0, { STBY_KHZ, QSB, 0, 0, 0x00 }, L2(0), 925000 },
- { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(1), 925000 },
- { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(6), 950000 },
- { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(6), 950000 },
- { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(6), 975000 },
- { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(6), 975000 },
- { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(6), 1000000 },
- { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(6), 1000000 },
- { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(11), 1050000 },
- { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(11), 1050000 },
- { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(11), 1075000 },
- { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(11), 1075000 },
- { 1, { 972000, HFPLL, 1, 0, 0x24 }, L2(11), 1100000 },
- { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(11), 1100000 },
- { 1, { 1080000, HFPLL, 1, 0, 0x28 }, L2(16), 1150000 },
- { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(16), 1150000 },
- { 1, { 1188000, HFPLL, 1, 0, 0x2C }, L2(16), 1175000 },
- { 0, { 0 } }
-};
-
-static struct acpu_level acpu_freq_tbl_8930_fast[] = {
- { 0, { STBY_KHZ, QSB, 0, 0, 0x00 }, L2(0), 900000 },
- { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(1), 900000 },
- { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(6), 900000 },
- { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(6), 900000 },
- { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(6), 925000 },
- { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(6), 925000 },
- { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(6), 950000 },
- { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(6), 950000 },
- { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(11), 1000000 },
- { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(11), 1000000 },
- { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(11), 1025000 },
- { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(11), 1025000 },
- { 1, { 972000, HFPLL, 1, 0, 0x24 }, L2(11), 1050000 },
- { 1, { 1026000, HFPLL, 1, 0, 0x26 }, L2(11), 1050000 },
- { 1, { 1080000, HFPLL, 1, 0, 0x28 }, L2(16), 1100000 },
- { 1, { 1134000, HFPLL, 1, 0, 0x2A }, L2(16), 1100000 },
- { 1, { 1188000, HFPLL, 1, 0, 0x2C }, L2(16), 1125000 },
- { 0, { 0 } }
-};
-/* TODO: Update vdd_dig, vdd_mem and bw when data is available. */
-#undef L2
-#define L2(x) (&l2_freq_tbl_8627[(x)])
-static struct l2_level l2_freq_tbl_8627[] = {
- [0] = { {STBY_KHZ, QSB, 0, 0, 0x00 }, LVL_NOM, 1050000, 0 },
- [1] = { { 384000, PLL_8, 0, 2, 0x00 }, LVL_NOM, 1050000, 1 },
- [2] = { { 432000, HFPLL, 2, 0, 0x20 }, LVL_NOM, 1050000, 1 },
- [3] = { { 486000, HFPLL, 2, 0, 0x24 }, LVL_NOM, 1050000, 1 },
- [4] = { { 540000, HFPLL, 2, 0, 0x28 }, LVL_NOM, 1050000, 2 },
- [5] = { { 594000, HFPLL, 1, 0, 0x16 }, LVL_NOM, 1050000, 2 },
- [6] = { { 648000, HFPLL, 1, 0, 0x18 }, LVL_NOM, 1050000, 2 },
- [7] = { { 702000, HFPLL, 1, 0, 0x1A }, LVL_NOM, 1050000, 3 },
- [8] = { { 756000, HFPLL, 1, 0, 0x1C }, LVL_HIGH, 1150000, 3 },
- [9] = { { 810000, HFPLL, 1, 0, 0x1E }, LVL_HIGH, 1150000, 3 },
- [10] = { { 864000, HFPLL, 1, 0, 0x20 }, LVL_HIGH, 1150000, 4 },
- [11] = { { 918000, HFPLL, 1, 0, 0x22 }, LVL_HIGH, 1150000, 4 },
- [12] = { { 972000, HFPLL, 1, 0, 0x24 }, LVL_HIGH, 1150000, 4 },
-};
-
-/* TODO: Update core voltages when data is available. */
-static struct acpu_level acpu_freq_tbl_8627[] = {
- { 0, { STBY_KHZ, QSB, 0, 0, 0x00 }, L2(0), 900000 },
- { 1, { 384000, PLL_8, 0, 2, 0x00 }, L2(1), 900000 },
- { 1, { 432000, HFPLL, 2, 0, 0x20 }, L2(5), 925000 },
- { 1, { 486000, HFPLL, 2, 0, 0x24 }, L2(5), 925000 },
- { 1, { 540000, HFPLL, 2, 0, 0x28 }, L2(5), 937500 },
- { 1, { 594000, HFPLL, 1, 0, 0x16 }, L2(5), 962500 },
- { 1, { 648000, HFPLL, 1, 0, 0x18 }, L2(9), 987500 },
- { 1, { 702000, HFPLL, 1, 0, 0x1A }, L2(9), 1000000 },
- { 1, { 756000, HFPLL, 1, 0, 0x1C }, L2(9), 1025000 },
- { 1, { 810000, HFPLL, 1, 0, 0x1E }, L2(9), 1062500 },
- { 1, { 864000, HFPLL, 1, 0, 0x20 }, L2(12), 1062500 },
- { 1, { 918000, HFPLL, 1, 0, 0x22 }, L2(12), 1087500 },
- { 1, { 972000, HFPLL, 1, 0, 0x24 }, L2(12), 1100000 },
- { 0, { 0 } }
-};
-
static struct acpu_level *acpu_freq_tbl_8960_v1[NUM_PVS] __initdata = {
[PVS_SLOW] = acpu_freq_tbl_8960_kraitv1_slow,
[PVS_NOM] = acpu_freq_tbl_8960_kraitv1_nom_fast,
@@ -838,12 +620,6 @@
[PVS_FASTER] = acpu_freq_tbl_8064_fast,
};
-static struct acpu_level *acpu_freq_tbl_8930_pvs[NUM_PVS] __initdata = {
- [PVS_SLOW] = acpu_freq_tbl_8930_slow,
- [PVS_NOM] = acpu_freq_tbl_8930_nom,
- [PVS_FAST] = acpu_freq_tbl_8930_fast,
-};
-
static struct acpu_level *max_acpu_level;
static unsigned long acpuclk_8960_get_rate(int cpu)
@@ -1608,18 +1384,6 @@
acpu_freq_tbl = acpu_freq_tbl_8064[pvs_id];
l2_freq_tbl = l2_freq_tbl_8064;
l2_freq_tbl_size = ARRAY_SIZE(l2_freq_tbl_8064);
- } else if (cpu_is_msm8627()) {
- scalable = scalable_8627;
- acpu_freq_tbl = acpu_freq_tbl_8627;
- l2_freq_tbl = l2_freq_tbl_8627;
- l2_freq_tbl_size = ARRAY_SIZE(l2_freq_tbl_8627);
- } else if (cpu_is_msm8930() || cpu_is_msm8930aa()) {
- enum pvs pvs_id = get_pvs();
-
- scalable = scalable_8930;
- acpu_freq_tbl = acpu_freq_tbl_8930_pvs[pvs_id];
- l2_freq_tbl = l2_freq_tbl_8930;
- l2_freq_tbl_size = ARRAY_SIZE(l2_freq_tbl_8930);
} else {
BUG();
}
diff --git a/arch/arm/mach-msm/acpuclock-8974.c b/arch/arm/mach-msm/acpuclock-8974.c
index c67109e..8cf9c2b 100644
--- a/arch/arm/mach-msm/acpuclock-8974.c
+++ b/arch/arm/mach-msm/acpuclock-8974.c
@@ -123,8 +123,8 @@
#define L2(x) (&l2_freq_tbl[(x)])
static struct l2_level l2_freq_tbl[] = {
- [0] = { {STBY_KHZ, QSB, 0, 0, 0 }, LVL_NOM, 1050000, 0 },
- [1] = { { 300000, PLL_0, 0, 2, 0 }, LVL_NOM, 1050000, 2 },
+ [0] = { {STBY_KHZ, QSB, 0, 0, 0 }, LVL_LOW, 1050000, 0 },
+ [1] = { { 300000, PLL_0, 0, 2, 0 }, LVL_LOW, 1050000, 2 },
[2] = { { 384000, HFPLL, 2, 0, 40 }, LVL_NOM, 1050000, 2 },
[3] = { { 460800, HFPLL, 2, 0, 48 }, LVL_NOM, 1050000, 2 },
[4] = { { 537600, HFPLL, 1, 0, 28 }, LVL_NOM, 1050000, 2 },
diff --git a/arch/arm/mach-msm/bam_dmux.c b/arch/arm/mach-msm/bam_dmux.c
index 813824e..5aea0ed 100644
--- a/arch/arm/mach-msm/bam_dmux.c
+++ b/arch/arm/mach-msm/bam_dmux.c
@@ -1912,7 +1912,7 @@
ul_powerdown_finish();
a2_pc_disabled = 0;
a2_pc_disabled_wakelock_skipped = 0;
- disconnect_ack = 0;
+ disconnect_ack = 1;
/* Cleanup Channel States */
mutex_lock(&bam_pdev_mutexlock);
diff --git a/arch/arm/mach-msm/board-8064-camera.c b/arch/arm/mach-msm/board-8064-camera.c
index c37491d..40995bb 100644
--- a/arch/arm/mach-msm/board-8064-camera.c
+++ b/arch/arm/mach-msm/board-8064-camera.c
@@ -243,7 +243,7 @@
{
.src = MSM_BUS_MASTER_VFE,
.dst = MSM_BUS_SLAVE_EBI_CH0,
- .ab = 140451840,
+ .ab = 274406400,
.ib = 561807360,
},
{
@@ -302,6 +302,27 @@
},
};
+static struct msm_bus_vectors cam_video_ls_vectors[] = {
+ {
+ .src = MSM_BUS_MASTER_VFE,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 348192000,
+ .ib = 617103360,
+ },
+ {
+ .src = MSM_BUS_MASTER_VPE,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 206807040,
+ .ib = 488816640,
+ },
+ {
+ .src = MSM_BUS_MASTER_JPEG_ENC,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 540000000,
+ .ib = 1350000000,
+ },
+};
+
static struct msm_bus_paths cam_bus_client_config[] = {
{
ARRAY_SIZE(cam_init_vectors),
@@ -323,6 +344,10 @@
ARRAY_SIZE(cam_zsl_vectors),
cam_zsl_vectors,
},
+ {
+ ARRAY_SIZE(cam_video_ls_vectors),
+ cam_video_ls_vectors,
+ },
};
static struct msm_bus_scale_pdata cam_bus_client_pdata = {
diff --git a/arch/arm/mach-msm/board-8064-regulator.c b/arch/arm/mach-msm/board-8064-regulator.c
index f727852..29416de 100644
--- a/arch/arm/mach-msm/board-8064-regulator.c
+++ b/arch/arm/mach-msm/board-8064-regulator.c
@@ -221,7 +221,6 @@
REGULATOR_SUPPLY("8921_lvs7", NULL),
REGULATOR_SUPPLY("pll_vdd", "pil_riva"),
REGULATOR_SUPPLY("lvds_vdda", "lvds.0"),
- REGULATOR_SUPPLY("hdmi_pll_fs", "mdp.0"),
REGULATOR_SUPPLY("dsi1_vddio", "mipi_dsi.1"),
REGULATOR_SUPPLY("hdmi_vdda", "hdmi_msm.0"),
};
diff --git a/arch/arm/mach-msm/board-8930-camera.c b/arch/arm/mach-msm/board-8930-camera.c
index 6ee315c..d3e37cd 100644
--- a/arch/arm/mach-msm/board-8930-camera.c
+++ b/arch/arm/mach-msm/board-8930-camera.c
@@ -251,7 +251,7 @@
{
.src = MSM_BUS_MASTER_VFE,
.dst = MSM_BUS_SLAVE_EBI_CH0,
- .ab = 140451840,
+ .ab = 274406400,
.ib = 561807360,
},
{
@@ -310,6 +310,27 @@
},
};
+static struct msm_bus_vectors cam_video_ls_vectors[] = {
+ {
+ .src = MSM_BUS_MASTER_VFE,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 348192000,
+ .ib = 617103360,
+ },
+ {
+ .src = MSM_BUS_MASTER_VPE,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 206807040,
+ .ib = 488816640,
+ },
+ {
+ .src = MSM_BUS_MASTER_JPEG_ENC,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 540000000,
+ .ib = 1350000000,
+ },
+};
+
static struct msm_bus_paths cam_bus_client_config[] = {
{
ARRAY_SIZE(cam_init_vectors),
@@ -331,6 +352,10 @@
ARRAY_SIZE(cam_zsl_vectors),
cam_zsl_vectors,
},
+ {
+ ARRAY_SIZE(cam_video_ls_vectors),
+ cam_video_ls_vectors,
+ },
};
static struct msm_bus_scale_pdata cam_bus_client_pdata = {
diff --git a/arch/arm/mach-msm/board-8930-regulator.c b/arch/arm/mach-msm/board-8930-regulator.c
index f06a1b7..af91089 100644
--- a/arch/arm/mach-msm/board-8930-regulator.c
+++ b/arch/arm/mach-msm/board-8930-regulator.c
@@ -172,11 +172,13 @@
};
VREG_CONSUMERS(S5) = {
REGULATOR_SUPPLY("8038_s5", NULL),
- REGULATOR_SUPPLY("krait0", NULL),
+ REGULATOR_SUPPLY("krait0", "acpuclk-8627"),
+ REGULATOR_SUPPLY("krait0", "acpuclk-8930"),
};
VREG_CONSUMERS(S6) = {
REGULATOR_SUPPLY("8038_s6", NULL),
- REGULATOR_SUPPLY("krait1", NULL),
+ REGULATOR_SUPPLY("krait1", "acpuclk-8627"),
+ REGULATOR_SUPPLY("krait1", "acpuclk-8930"),
};
VREG_CONSUMERS(LVS1) = {
REGULATOR_SUPPLY("8038_lvs1", NULL),
@@ -511,10 +513,39 @@
int msm8930_pm8038_regulator_pdata_len __devinitdata =
ARRAY_SIZE(msm8930_pm8038_regulator_pdata);
+#define RPM_REG_MAP(_id, _sleep_also, _voter, _supply, _dev_name) \
+ { \
+ .vreg_id = RPM_VREG_ID_PM8038_##_id, \
+ .sleep_also = _sleep_also, \
+ .voter = _voter, \
+ .supply = _supply, \
+ .dev_name = _dev_name, \
+ }
+static struct rpm_regulator_consumer_mapping
+ msm_rpm_regulator_consumer_mapping[] __devinitdata = {
+ RPM_REG_MAP(L23, 0, 1, "krait0_hfpll", "acpuclk-8930"),
+ RPM_REG_MAP(L23, 0, 2, "krait1_hfpll", "acpuclk-8930"),
+ RPM_REG_MAP(L23, 0, 6, "l2_hfpll", "acpuclk-8930"),
+ RPM_REG_MAP(L24, 0, 1, "krait0_mem", "acpuclk-8930"),
+ RPM_REG_MAP(L24, 0, 2, "krait1_mem", "acpuclk-8930"),
+ RPM_REG_MAP(VDD_DIG_CORNER, 0, 1, "krait0_dig", "acpuclk-8930"),
+ RPM_REG_MAP(VDD_DIG_CORNER, 0, 2, "krait1_dig", "acpuclk-8930"),
+
+ RPM_REG_MAP(L23, 0, 1, "krait0_hfpll", "acpuclk-8627"),
+ RPM_REG_MAP(L23, 0, 2, "krait1_hfpll", "acpuclk-8627"),
+ RPM_REG_MAP(L23, 0, 6, "l2_hfpll", "acpuclk-8627"),
+ RPM_REG_MAP(L24, 0, 1, "krait0_mem", "acpuclk-8627"),
+ RPM_REG_MAP(L24, 0, 2, "krait1_mem", "acpuclk-8627"),
+ RPM_REG_MAP(VDD_DIG_CORNER, 0, 1, "krait0_dig", "acpuclk-8627"),
+ RPM_REG_MAP(VDD_DIG_CORNER, 0, 2, "krait1_dig", "acpuclk-8627"),
+};
+
struct rpm_regulator_platform_data msm8930_rpm_regulator_pdata __devinitdata = {
.init_data = msm8930_rpm_regulator_init_data,
.num_regulators = ARRAY_SIZE(msm8930_rpm_regulator_init_data),
.version = RPM_VREG_VERSION_8930,
.vreg_id_vdd_mem = RPM_VREG_ID_PM8038_L24,
.vreg_id_vdd_dig = RPM_VREG_ID_PM8038_VDD_DIG_CORNER,
+ .consumer_map = msm_rpm_regulator_consumer_mapping,
+ .consumer_map_len = ARRAY_SIZE(msm_rpm_regulator_consumer_mapping),
};
diff --git a/arch/arm/mach-msm/board-8930.c b/arch/arm/mach-msm/board-8930.c
index ec218ac..88c0924 100644
--- a/arch/arm/mach-msm/board-8930.c
+++ b/arch/arm/mach-msm/board-8930.c
@@ -1460,6 +1460,9 @@
#ifdef CONFIG_MSM_BUS_SCALING
.bus_scale_table = &usb_bus_scale_pdata,
#endif
+#ifdef CONFIG_FB_MSM_HDMI_MHL_8334
+ .mhl_dev_name = "sii8334",
+#endif
};
#endif
@@ -2159,7 +2162,6 @@
};
static struct platform_device *common_devices[] __initdata = {
- &msm8960_device_acpuclk,
&msm8960_device_dmov,
&msm_device_smd,
&msm8960_device_uart_gsbi5,
@@ -2573,6 +2575,10 @@
msm_spm_l2_init(msm_spm_l2_data);
msm8930_init_buses();
platform_add_devices(msm8930_footswitch, msm8930_num_footswitch);
+ if (cpu_is_msm8627())
+ platform_device_register(&msm8627_device_acpuclk);
+ else if (cpu_is_msm8930() || cpu_is_msm8930aa())
+ platform_device_register(&msm8930_device_acpuclk);
platform_add_devices(common_devices, ARRAY_SIZE(common_devices));
msm8930_add_vidc_device();
/*
diff --git a/arch/arm/mach-msm/board-8960-camera.c b/arch/arm/mach-msm/board-8960-camera.c
index b6c03a4..a21c4c3 100644
--- a/arch/arm/mach-msm/board-8960-camera.c
+++ b/arch/arm/mach-msm/board-8960-camera.c
@@ -272,7 +272,7 @@
{
.src = MSM_BUS_MASTER_VFE,
.dst = MSM_BUS_SLAVE_EBI_CH0,
- .ab = 154275840,
+ .ab = 274406400,
.ib = 617103360,
},
{
@@ -367,6 +367,40 @@
},
};
+static struct msm_bus_vectors cam_video_ls_vectors[] = {
+ {
+ .src = MSM_BUS_MASTER_VFE,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 348192000,
+ .ib = 617103360,
+ },
+ {
+ .src = MSM_BUS_MASTER_VPE,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 206807040,
+ .ib = 488816640,
+ },
+ {
+ .src = MSM_BUS_MASTER_JPEG_ENC,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 540000000,
+ .ib = 1350000000,
+ },
+ {
+ .src = MSM_BUS_MASTER_JPEG_ENC,
+ .dst = MSM_BUS_SLAVE_MM_IMEM,
+ .ab = 0,
+ .ib = 0,
+ },
+ {
+ .src = MSM_BUS_MASTER_VFE,
+ .dst = MSM_BUS_SLAVE_MM_IMEM,
+ .ab = 0,
+ .ib = 0,
+ },
+};
+
+
static struct msm_bus_paths cam_bus_client_config[] = {
{
ARRAY_SIZE(cam_init_vectors),
@@ -388,6 +422,10 @@
ARRAY_SIZE(cam_zsl_vectors),
cam_zsl_vectors,
},
+ {
+ ARRAY_SIZE(cam_video_ls_vectors),
+ cam_video_ls_vectors,
+ },
};
static struct msm_bus_scale_pdata cam_bus_client_pdata = {
diff --git a/arch/arm/mach-msm/board-8960-gpiomux.c b/arch/arm/mach-msm/board-8960-gpiomux.c
index fd326f1..53e7c9e 100644
--- a/arch/arm/mach-msm/board-8960-gpiomux.c
+++ b/arch/arm/mach-msm/board-8960-gpiomux.c
@@ -240,6 +240,12 @@
.pull = GPIOMUX_PULL_DOWN,
};
+static struct gpiomux_setting usbsw_cfg = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_8MA,
+ .pull = GPIOMUX_PULL_DOWN,
+};
+
static struct gpiomux_setting mdp_vsync_suspend_cfg = {
.func = GPIOMUX_FUNC_GPIO,
.drv = GPIOMUX_DRV_2MA,
@@ -760,6 +766,13 @@
[GPIOMUX_SUSPENDED] = &ap2mdm_cfg,
}
},
+ /* USB_SW */
+ {
+ .gpio = 25,
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &usbsw_cfg,
+ }
+ }
};
static struct msm_gpiomux_config msm8960_mdp_vsync_configs[] __initdata = {
diff --git a/arch/arm/mach-msm/board-8960-regulator.c b/arch/arm/mach-msm/board-8960-regulator.c
index 2664d6b..650ac28 100644
--- a/arch/arm/mach-msm/board-8960-regulator.c
+++ b/arch/arm/mach-msm/board-8960-regulator.c
@@ -116,7 +116,6 @@
REGULATOR_SUPPLY("8921_l23", NULL),
REGULATOR_SUPPLY("dsi_vddio", "mipi_dsi.1"),
REGULATOR_SUPPLY("hdmi_avdd", "hdmi_msm.0"),
- REGULATOR_SUPPLY("hdmi_pll_fs", "mdp.0"),
REGULATOR_SUPPLY("pll_vdd", "pil_riva"),
REGULATOR_SUPPLY("pll_vdd", "pil_qdsp6v4.1"),
REGULATOR_SUPPLY("pll_vdd", "pil_qdsp6v4.2"),
diff --git a/arch/arm/mach-msm/board-8960.c b/arch/arm/mach-msm/board-8960.c
index 925b7a1..0fcaf78 100644
--- a/arch/arm/mach-msm/board-8960.c
+++ b/arch/arm/mach-msm/board-8960.c
@@ -1293,6 +1293,7 @@
.peripheral_platform_device = NULL,
.ramdump_timeout_ms = 600000,
.no_powerdown_after_ramdumps = 1,
+ .image_upgrade_supported = 1,
};
#define MSM_TSIF0_PHYS (0x18200000)
@@ -1488,6 +1489,9 @@
#ifdef CONFIG_MSM_BUS_SCALING
.bus_scale_table = &usb_bus_scale_pdata,
#endif
+#ifdef CONFIG_FB_MSM_HDMI_MHL_8334
+ .mhl_dev_name = "sii8334",
+#endif
};
#endif
@@ -2467,7 +2471,6 @@
&msm8960_device_dmov,
&msm_device_smd,
&msm_device_uart_dm6,
- &msm_device_uart_dm9,
&msm_device_saw_core0,
&msm_device_saw_core1,
&msm8960_device_ext_5v_vreg,
@@ -3137,6 +3140,7 @@
if (socinfo_get_platform_subtype() == PLATFORM_SUBTYPE_SGLTE) {
msm_uart_dm9_pdata.wakeup_irq = gpio_to_irq(94); /* GSBI9(2) */
msm_device_uart_dm9.dev.platform_data = &msm_uart_dm9_pdata;
+ platform_device_register(&msm_device_uart_dm9);
}
platform_add_devices(common_devices, ARRAY_SIZE(common_devices));
diff --git a/arch/arm/mach-msm/board-8974.c b/arch/arm/mach-msm/board-8974.c
index 557331a..7562c83 100644
--- a/arch/arm/mach-msm/board-8974.c
+++ b/arch/arm/mach-msm/board-8974.c
@@ -208,12 +208,12 @@
static struct resource smd_resource[] = {
{
.name = "modem_smd_in",
- .start = 32 + 17, /* mss_sw_to_kpss_ipc_irq0 */
+ .start = 32 + 25, /* mss_sw_to_kpss_ipc_irq0 */
.flags = IORESOURCE_IRQ,
},
{
.name = "modem_smsm_in",
- .start = 32 + 18, /* mss_sw_to_kpss_ipc_irq1 */
+ .start = 32 + 26, /* mss_sw_to_kpss_ipc_irq1 */
.flags = IORESOURCE_IRQ,
},
{
@@ -640,6 +640,8 @@
OF_DEV_AUXDATA("qcom,qseecom", 0xFE806000, \
"qseecom", NULL),
OF_DEV_AUXDATA("qcom,mdss_mdp", 0xFD900000, "mdp.0", NULL),
+ OF_DEV_AUXDATA("qcom,msm-tsens", 0xFC4A8000, \
+ "msm-tsens", NULL),
{}
};
diff --git a/arch/arm/mach-msm/board-msm7627a-display.c b/arch/arm/mach-msm/board-msm7627a-display.c
index 9259161..3726941 100644
--- a/arch/arm/mach-msm/board-msm7627a-display.c
+++ b/arch/arm/mach-msm/board-msm7627a-display.c
@@ -1188,9 +1188,9 @@
}
/*Toggle Backlight GPIO*/
gpio_set_value_cansleep(GPIO_QRD3_LCD_BACKLIGHT_EN, 1);
- udelay(190);
+ udelay(100);
gpio_set_value_cansleep(GPIO_QRD3_LCD_BACKLIGHT_EN, 0);
- udelay(286);
+ udelay(430);
gpio_set_value_cansleep(GPIO_QRD3_LCD_BACKLIGHT_EN, 1);
/* 1 wire mode starts from this low to high transition */
udelay(50);
diff --git a/arch/arm/mach-msm/board-msm7627a-io.c b/arch/arm/mach-msm/board-msm7627a-io.c
index 22095cd..47e8381 100644
--- a/arch/arm/mach-msm/board-msm7627a-io.c
+++ b/arch/arm/mach-msm/board-msm7627a-io.c
@@ -22,6 +22,7 @@
#include <linux/delay.h>
#include <linux/atmel_maxtouch.h>
#include <linux/input/ft5x06_ts.h>
+#include <linux/leds-msm-tricolor.h>
#include <asm/gpio.h>
#include <asm/mach-types.h>
#include <mach/rpc_server_handset.h>
@@ -162,32 +163,6 @@
};
#define LED_GPIO_PDM 96
-#define LED_RED_GPIO_8625 49
-#define LED_GREEN_GPIO_8625 34
-
-static struct gpio_led gpio_leds_config_8625[] = {
- {
- .name = "green",
- .gpio = LED_GREEN_GPIO_8625,
- },
- {
- .name = "red",
- .gpio = LED_RED_GPIO_8625,
- },
-};
-
-static struct gpio_led_platform_data gpio_leds_pdata_8625 = {
- .num_leds = ARRAY_SIZE(gpio_leds_config_8625),
- .leds = gpio_leds_config_8625,
-};
-
-static struct platform_device gpio_leds_8625 = {
- .name = "leds-gpio",
- .id = -1,
- .dev = {
- .platform_data = &gpio_leds_pdata_8625,
- },
-};
#define MXT_TS_IRQ_GPIO 48
#define MXT_TS_RESET_GPIO 26
@@ -772,6 +747,30 @@
},
};
+static struct led_info tricolor_led_info[] = {
+ [0] = {
+ .name = "red",
+ .flags = LED_COLOR_RED,
+ },
+ [1] = {
+ .name = "green",
+ .flags = LED_COLOR_GREEN,
+ },
+};
+
+static struct led_platform_data tricolor_led_pdata = {
+ .leds = tricolor_led_info,
+ .num_leds = ARRAY_SIZE(tricolor_led_info),
+};
+
+static struct platform_device tricolor_leds_pdev = {
+ .name = "msm-tricolor-leds",
+ .id = -1,
+ .dev = {
+ .platform_data = &tricolor_led_pdata,
+ },
+};
+
void __init msm7627a_add_io_devices(void)
{
/* touchscreen */
@@ -868,24 +867,9 @@
platform_device_register(&kp_pdev_sku3);
/* leds */
- if (machine_is_msm7627a_evb() || machine_is_msm8625_evb()) {
- rc = gpio_tlmm_config(GPIO_CFG(LED_RED_GPIO_8625, 0,
- GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP,
- GPIO_CFG_16MA), GPIO_CFG_ENABLE);
- if (rc) {
- pr_err("%s: gpio_tlmm_config for %d failed\n",
- __func__, LED_RED_GPIO_8625);
- }
-
- rc = gpio_tlmm_config(GPIO_CFG(LED_GREEN_GPIO_8625, 0,
- GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP,
- GPIO_CFG_16MA), GPIO_CFG_ENABLE);
- if (rc) {
- pr_err("%s: gpio_tlmm_config for %d failed\n",
- __func__, LED_GREEN_GPIO_8625);
- }
-
- platform_device_register(&gpio_leds_8625);
+ if (machine_is_msm7627a_evb() || machine_is_msm8625_evb() ||
+ machine_is_msm8625_evt()) {
platform_device_register(&pmic_mpp_leds_pdev);
+ platform_device_register(&tricolor_leds_pdev);
}
}
diff --git a/arch/arm/mach-msm/board-qrd7627a.c b/arch/arm/mach-msm/board-qrd7627a.c
index 5b9ea36..bd73b70 100644
--- a/arch/arm/mach-msm/board-qrd7627a.c
+++ b/arch/arm/mach-msm/board-qrd7627a.c
@@ -144,6 +144,7 @@
static struct android_usb_platform_data android_usb_pdata = {
.update_pid_and_serial_num = usb_diag_update_pid_and_serial_num,
+ .cdrom = 1,
};
static struct platform_device android_usb_device = {
@@ -592,9 +593,9 @@
static u32 msm_calculate_batt_capacity(u32 current_voltage);
static struct msm_psy_batt_pdata msm_psy_batt_data = {
- .voltage_min_design = 3200,
+ .voltage_min_design = 3500,
.voltage_max_design = 4200,
- .voltage_fail_safe = 3340,
+ .voltage_fail_safe = 3598,
.avail_chg_sources = AC_CHG | USB_CHG ,
.batt_technology = POWER_SUPPLY_TECHNOLOGY_LION,
.calculate_capacity = &msm_calculate_batt_capacity,
diff --git a/arch/arm/mach-msm/clock-8960.c b/arch/arm/mach-msm/clock-8960.c
index f99e5de8..b28552f 100644
--- a/arch/arm/mach-msm/clock-8960.c
+++ b/arch/arm/mach-msm/clock-8960.c
@@ -419,16 +419,16 @@
.fmax[VDD_DIG_##l2] = (f2), \
.fmax[VDD_DIG_##l3] = (f3)
-enum vdd_sr2_pll_levels {
- VDD_SR2_PLL_OFF,
- VDD_SR2_PLL_ON
+enum vdd_sr2_hdmi_pll_levels {
+ VDD_SR2_HDMI_PLL_OFF,
+ VDD_SR2_HDMI_PLL_ON
};
-static int set_vdd_sr2_pll_8960(struct clk_vdd_class *vdd_class, int level)
+static int set_vdd_sr2_hdmi_pll_8960(struct clk_vdd_class *vdd_class, int level)
{
int rc = 0;
- if (level == VDD_SR2_PLL_OFF) {
+ if (level == VDD_SR2_HDMI_PLL_OFF) {
rc = rpm_vreg_set_voltage(RPM_VREG_ID_PM8921_L23,
RPM_VREG_VOTER3, 0, 0, 1);
if (rc)
@@ -453,20 +453,20 @@
return rc;
}
-static DEFINE_VDD_CLASS(vdd_sr2_pll, set_vdd_sr2_pll_8960);
+static DEFINE_VDD_CLASS(vdd_sr2_hdmi_pll, set_vdd_sr2_hdmi_pll_8960);
static int sr2_lreg_uv[] = {
- [VDD_SR2_PLL_OFF] = 0,
- [VDD_SR2_PLL_ON] = 1800000,
+ [VDD_SR2_HDMI_PLL_OFF] = 0,
+ [VDD_SR2_HDMI_PLL_ON] = 1800000,
};
-static int set_vdd_sr2_pll_8064(struct clk_vdd_class *vdd_class, int level)
+static int set_vdd_sr2_hdmi_pll_8064(struct clk_vdd_class *vdd_class, int level)
{
return rpm_vreg_set_voltage(RPM_VREG_ID_PM8921_LVS7, RPM_VREG_VOTER3,
sr2_lreg_uv[level], sr2_lreg_uv[level], 1);
}
-static int set_vdd_sr2_pll_8930(struct clk_vdd_class *vdd_class, int level)
+static int set_vdd_sr2_hdmi_pll_8930(struct clk_vdd_class *vdd_class, int level)
{
return rpm_vreg_set_voltage(RPM_VREG_ID_PM8038_L23, RPM_VREG_VOTER3,
sr2_lreg_uv[level], sr2_lreg_uv[level], 1);
@@ -498,8 +498,8 @@
.dbg_name = "pll3_clk",
.rate = 1200000000,
.ops = &clk_ops_local_pll,
- .vdd_class = &vdd_sr2_pll,
- .fmax[VDD_SR2_PLL_ON] = ULONG_MAX,
+ .vdd_class = &vdd_sr2_hdmi_pll,
+ .fmax[VDD_SR2_HDMI_PLL_ON] = ULONG_MAX,
CLK_INIT(pll3_clk.c),
.warned = true,
},
@@ -3912,11 +3912,6 @@
spin_unlock_irqrestore(&local_clock_reg_lock, flags);
}
-static unsigned long hdmi_pll_clk_get_rate(struct clk *c)
-{
- return hdmi_pll_get_rate();
-}
-
static struct clk *hdmi_pll_clk_get_parent(struct clk *c)
{
return &pxo_clk.c;
@@ -3925,13 +3920,14 @@
static struct clk_ops clk_ops_hdmi_pll = {
.enable = hdmi_pll_clk_enable,
.disable = hdmi_pll_clk_disable,
- .get_rate = hdmi_pll_clk_get_rate,
.get_parent = hdmi_pll_clk_get_parent,
};
static struct clk hdmi_pll_clk = {
.dbg_name = "hdmi_pll_clk",
.ops = &clk_ops_hdmi_pll,
+ .vdd_class = &vdd_sr2_hdmi_pll,
+ .fmax[VDD_SR2_HDMI_PLL_ON] = ULONG_MAX,
CLK_INIT(hdmi_pll_clk),
};
@@ -3975,8 +3971,10 @@
void set_rate_tv(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
{
unsigned long pll_rate = (unsigned long)nf->extra_freq_data;
- if (pll_rate)
+ if (pll_rate) {
hdmi_pll_set_rate(pll_rate);
+ hdmi_pll_clk.rate = pll_rate;
+ }
set_rate_mnd(rcg, nf);
}
@@ -4338,7 +4336,23 @@
.md_val = MD8(8, m, 0, n), \
.ns_val = NS(31, 24, n, m, 5, 4, 3, d, 2, 0, s##_to_lpa_mux), \
}
-static struct clk_freq_tbl clk_tbl_aif_osr[] = {
+static struct clk_freq_tbl clk_tbl_aif_osr_492[] = {
+ F_AIF_OSR( 0, gnd, 1, 0, 0),
+ F_AIF_OSR( 512000, pll4, 4, 1, 240),
+ F_AIF_OSR( 768000, pll4, 4, 1, 160),
+ F_AIF_OSR( 1024000, pll4, 4, 1, 120),
+ F_AIF_OSR( 1536000, pll4, 4, 1, 80),
+ F_AIF_OSR( 2048000, pll4, 4, 1, 60),
+ F_AIF_OSR( 3072000, pll4, 4, 1, 40),
+ F_AIF_OSR( 4096000, pll4, 4, 1, 30),
+ F_AIF_OSR( 6144000, pll4, 4, 1, 20),
+ F_AIF_OSR( 8192000, pll4, 4, 1, 15),
+ F_AIF_OSR(12288000, pll4, 4, 1, 10),
+ F_AIF_OSR(24576000, pll4, 4, 1, 5),
+ F_END
+};
+
+static struct clk_freq_tbl clk_tbl_aif_osr_393[] = {
F_AIF_OSR( 0, gnd, 1, 0, 0),
F_AIF_OSR( 512000, pll4, 4, 1, 192),
F_AIF_OSR( 768000, pll4, 4, 1, 128),
@@ -4371,7 +4385,7 @@
.ns_mask = (BM(31, 24) | BM(6, 0)), \
.mnd_en_mask = BIT(8), \
.set_rate = set_rate_mnd, \
- .freq_tbl = clk_tbl_aif_osr, \
+ .freq_tbl = clk_tbl_aif_osr_393, \
.current_freq = &rcg_dummy_freq, \
.c = { \
.dbg_name = #i "_clk", \
@@ -4397,7 +4411,7 @@
.ns_mask = (BM(31, 24) | BM(6, 0)), \
.mnd_en_mask = BIT(8), \
.set_rate = set_rate_mnd, \
- .freq_tbl = clk_tbl_aif_osr, \
+ .freq_tbl = clk_tbl_aif_osr_393, \
.current_freq = &rcg_dummy_freq, \
.c = { \
.dbg_name = #i "_clk", \
@@ -4478,7 +4492,23 @@
.md_val = MD16(m, n), \
.ns_val = NS(31, 16, n, m, 5, 4, 3, d, 2, 0, s##_to_lpa_mux), \
}
-static struct clk_freq_tbl clk_tbl_pcm[] = {
+static struct clk_freq_tbl clk_tbl_pcm_492[] = {
+ { .ns_val = BIT(10) /* external input */ },
+ F_PCM( 512000, pll4, 4, 1, 240),
+ F_PCM( 768000, pll4, 4, 1, 160),
+ F_PCM( 1024000, pll4, 4, 1, 120),
+ F_PCM( 1536000, pll4, 4, 1, 80),
+ F_PCM( 2048000, pll4, 4, 1, 60),
+ F_PCM( 3072000, pll4, 4, 1, 40),
+ F_PCM( 4096000, pll4, 4, 1, 30),
+ F_PCM( 6144000, pll4, 4, 1, 20),
+ F_PCM( 8192000, pll4, 4, 1, 15),
+ F_PCM(12288000, pll4, 4, 1, 10),
+ F_PCM(24576000, pll4, 4, 1, 5),
+ F_END
+};
+
+static struct clk_freq_tbl clk_tbl_pcm_393[] = {
{ .ns_val = BIT(10) /* external input */ },
F_PCM( 512000, pll4, 4, 1, 192),
F_PCM( 768000, pll4, 4, 1, 128),
@@ -4510,7 +4540,7 @@
.ns_mask = BM(31, 16) | BIT(10) | BM(6, 0),
.mnd_en_mask = BIT(8),
.set_rate = set_rate_mnd,
- .freq_tbl = clk_tbl_pcm,
+ .freq_tbl = clk_tbl_pcm_393,
.current_freq = &rcg_dummy_freq,
.c = {
.dbg_name = "pcm_clk",
@@ -4537,7 +4567,7 @@
.ns_mask = (BM(31, 24) | BM(6, 0)),
.mnd_en_mask = BIT(8),
.set_rate = set_rate_mnd,
- .freq_tbl = clk_tbl_aif_osr,
+ .freq_tbl = clk_tbl_aif_osr_393,
.current_freq = &rcg_dummy_freq,
.c = {
.dbg_name = "audio_slimbus_clk",
@@ -5109,7 +5139,10 @@
CLK_LOOKUP("core_clk", sdc2_clk.c, "msm_sdcc.2"),
CLK_LOOKUP("core_clk", sdc3_clk.c, "msm_sdcc.3"),
CLK_LOOKUP("core_clk", sdc4_clk.c, "msm_sdcc.4"),
- CLK_LOOKUP("ref_clk", tsif_ref_clk.c, ""),
+ CLK_LOOKUP("ref_clk", tsif_ref_clk.c, "msm_tsif.0"),
+ CLK_LOOKUP("iface_clk", tsif_p_clk.c, "msm_tsif.0"),
+ CLK_LOOKUP("ref_clk", tsif_ref_clk.c, "msm_tsif.1"),
+ CLK_LOOKUP("iface_clk", tsif_p_clk.c, "msm_tsif.1"),
CLK_LOOKUP("core_clk", tssc_clk.c, ""),
CLK_LOOKUP("alt_core_clk", usb_hs1_xcvr_clk.c, "msm_otg"),
CLK_LOOKUP("alt_core_clk", usb_hs3_xcvr_clk.c, "msm_ehci_host.0"),
@@ -6001,7 +6034,7 @@
.mode_reg = LCC_PLL0_MODE_REG,
};
-static struct pll_config pll4_config __initdata = {
+static struct pll_config pll4_config_393 __initdata = {
.l = 0xE,
.m = 0x27A,
.n = 0x465,
@@ -6232,7 +6265,7 @@
is_pll_enabled = readl_relaxed(LCC_PLL0_STATUS_REG) & BIT(16);
if (!is_pll_enabled)
/* Ref clk = 27MHz and program pll4 to 393.2160MHz */
- configure_pll(&pll4_config, &pll4_regs, 1);
+ configure_pll(&pll4_config_393, &pll4_regs, 1);
/* Enable PLL4 source on the LPASS Primary PLL Mux */
writel_relaxed(0x1, LCC_PRI_PLL_CLK_CTL_REG);
@@ -6258,11 +6291,26 @@
static void __init msm8960_clock_pre_init(void)
{
+ /* Initialize clock registers. */
+ reg_init();
+
if (cpu_is_apq8064()) {
- vdd_sr2_pll.set_vdd = set_vdd_sr2_pll_8064;
+ vdd_sr2_hdmi_pll.set_vdd = set_vdd_sr2_hdmi_pll_8064;
} else if (cpu_is_msm8930() || cpu_is_msm8930aa() || cpu_is_msm8627()) {
vdd_dig.set_vdd = set_vdd_dig_8930;
- vdd_sr2_pll.set_vdd = set_vdd_sr2_pll_8930;
+ vdd_sr2_hdmi_pll.set_vdd = set_vdd_sr2_hdmi_pll_8930;
+ }
+
+ /* Detect PLL4 programmed for alternate 491.52MHz clock plan. */
+ if (readl_relaxed(LCC_PLL0_L_VAL_REG) == 0x12) {
+ pll4_clk.c.rate = 491520000;
+ audio_slimbus_clk.freq_tbl = clk_tbl_aif_osr_492;
+ mi2s_osr_clk.freq_tbl = clk_tbl_aif_osr_492;
+ codec_i2s_mic_osr_clk.freq_tbl = clk_tbl_aif_osr_492;
+ spare_i2s_mic_osr_clk.freq_tbl = clk_tbl_aif_osr_492;
+ codec_i2s_spkr_osr_clk.freq_tbl = clk_tbl_aif_osr_492;
+ spare_i2s_spkr_osr_clk.freq_tbl = clk_tbl_aif_osr_492;
+ pcm_clk.freq_tbl = clk_tbl_pcm_492;
}
/*
@@ -6305,9 +6353,6 @@
vote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
clk_ops_local_pll.enable = sr_pll_clk_enable;
-
- /* Initialize clock registers. */
- reg_init();
}
static void __init msm8960_clock_post_init(void)
diff --git a/arch/arm/mach-msm/clock-8974.c b/arch/arm/mach-msm/clock-8974.c
index 6c9a566..48f16b0 100644
--- a/arch/arm/mach-msm/clock-8974.c
+++ b/arch/arm/mach-msm/clock-8974.c
@@ -593,10 +593,12 @@
#define RPM_MEM_CLK_TYPE 0x326b6c63
#define CXO_ID 0x0
+#define QDSS_ID 0x1
#define PNOC_ID 0x0
#define SNOC_ID 0x1
#define CNOC_ID 0x2
+#define MMSSNOC_AHB_ID 0x4
#define BIMC_ID 0x0
#define OCMEM_ID 0x1
@@ -604,6 +606,8 @@
DEFINE_CLK_RPM_SMD(pnoc_clk, pnoc_a_clk, RPM_BUS_CLK_TYPE, PNOC_ID, NULL);
DEFINE_CLK_RPM_SMD(snoc_clk, snoc_a_clk, RPM_BUS_CLK_TYPE, SNOC_ID, NULL);
DEFINE_CLK_RPM_SMD(cnoc_clk, cnoc_a_clk, RPM_BUS_CLK_TYPE, CNOC_ID, NULL);
+DEFINE_CLK_RPM_SMD(mmssnoc_ahb_clk, mmssnoc_ahb_a_clk, RPM_BUS_CLK_TYPE,
+ MMSSNOC_AHB_ID, NULL);
DEFINE_CLK_RPM_SMD(bimc_clk, bimc_a_clk, RPM_MEM_CLK_TYPE, BIMC_ID, NULL);
DEFINE_CLK_RPM_SMD(ocmemgx_clk, ocmemgx_a_clk, RPM_MEM_CLK_TYPE, OCMEM_ID,
@@ -611,10 +615,10 @@
DEFINE_CLK_RPM_SMD_BRANCH(cxo_clk_src, cxo_a_clk_src,
RPM_MISC_CLK_TYPE, CXO_ID, 19200000);
+DEFINE_CLK_RPM_SMD_QDSS(qdss_clk, qdss_a_clk, RPM_MISC_CLK_TYPE, QDSS_ID);
static struct pll_vote_clk gpll0_clk_src = {
.en_reg = (void __iomem *)APCS_GPLL_ENA_VOTE_REG,
- .en_mask = BIT(0),
.status_reg = (void __iomem *)GPLL0_STATUS_REG,
.status_mask = BIT(17),
.parent = &cxo_clk_src.c,
@@ -3963,6 +3967,20 @@
},
};
+struct rcg_clk audio_core_lpaif_pcmoe_clk_src = {
+ .cmd_rcgr_reg = LPAIF_PCMOE_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_audio_core_lpaif_clock,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[LPASS_BASE],
+ .c = {
+ .dbg_name = "audio_core_lpaif_pcmoe_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP1(LOW, 12290000),
+ CLK_INIT(audio_core_lpaif_pcmoe_clk_src.c),
+ },
+};
+
static struct branch_clk audio_core_lpaif_codec_spkr_osr_clk = {
.cbcr_reg = AUDIO_CORE_LPAIF_CODEC_SPKR_OSR_CBCR,
.parent = &audio_core_lpaif_codec_spkr_clk_src.c,
@@ -4190,6 +4208,17 @@
},
};
+struct branch_clk audio_core_lpaif_pcmoe_clk = {
+ .cbcr_reg = AUDIO_CORE_LPAIF_PCM_DATA_OE_CBCR,
+ .parent = &audio_core_lpaif_pcmoe_clk_src.c,
+ .base = &virt_bases[LPASS_BASE],
+ .c = {
+ .dbg_name = "audio_core_lpaif_pcmoe_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(audio_core_lpaif_pcmoe_clk.c),
+ },
+};
+
static struct branch_clk q6ss_ahb_lfabif_clk = {
.cbcr_reg = LPASS_Q6SS_AHB_LFABIF_CBCR,
.has_sibling = 1,
@@ -4386,6 +4415,7 @@
{&audio_core_lpaif_quad_clk_src.c, LPASS_BASE, 0x0014},
{&audio_core_lpaif_pcm0_clk_src.c, LPASS_BASE, 0x0013},
{&audio_core_lpaif_pcm1_clk_src.c, LPASS_BASE, 0x0012},
+ {&audio_core_lpaif_pcmoe_clk_src.c, LPASS_BASE, 0x000f},
{&audio_core_slimbus_core_clk.c, LPASS_BASE, 0x003d},
{&audio_core_slimbus_lfabif_clk.c, LPASS_BASE, 0x003e},
{&q6ss_xo_clk.c, LPASS_BASE, 0x002b},
@@ -4789,6 +4819,8 @@
CLK_LOOKUP("core_clk", audio_core_lpaif_pcm1_clk_src.c, ""),
CLK_LOOKUP("ebit_clk", audio_core_lpaif_pcm1_ebit_clk.c, ""),
CLK_LOOKUP("ibit_clk", audio_core_lpaif_pcm1_ibit_clk.c, ""),
+ CLK_LOOKUP("core_clk_src", audio_core_lpaif_pcmoe_clk_src.c, ""),
+ CLK_LOOKUP("core_clk", audio_core_lpaif_pcmoe_clk.c, ""),
CLK_LOOKUP("core_clk", mss_xo_q6_clk.c, "pil-q6v5-mss"),
CLK_LOOKUP("bus_clk", mss_bus_q6_clk.c, "pil-q6v5-mss"),
@@ -4830,6 +4862,36 @@
CLK_LOOKUP("bus_a_clk", ocmemnoc_clk.c, "msm_ocmem_noc"),
CLK_LOOKUP("bus_clk", mmss_mmssnoc_axi_clk.c, "msm_mmss_noc"),
CLK_LOOKUP("bus_a_clk", mmss_mmssnoc_axi_clk.c, "msm_mmss_noc"),
+
+ CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-tmc-etr"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-tpiu"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-replicator"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-tmc-etf"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-funnel-merg"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-funnel-in0"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-funnel-in1"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-funnel-kpss"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-funnel-mmss"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-stm"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-etm0"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-etm1"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-etm2"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-etm3"),
+
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-tmc-etr"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-tpiu"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-replicator"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-tmc-etf"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-funnel-merg"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-funnel-in0"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-funnel-in1"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-funnel-kpss"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-funnel-mmss"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-stm"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-etm0"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-etm1"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-etm2"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-etm3"),
};
static struct pll_config_regs gpll0_regs __initdata = {
@@ -4986,7 +5048,7 @@
.main_output_mask = BIT(0),
};
-#define PLL_AUX_OUTPUT BIT(1)
+#define PLL_AUX_OUTPUT_BIT 1
static void __init reg_init(void)
{
@@ -5007,7 +5069,7 @@
/* Active GPLL0's aux output. This is needed by acpuclock. */
regval = readl_relaxed(GCC_REG_BASE(GPLL0_USER_CTL_REG));
- regval |= BIT(PLL_AUX_OUTPUT);
+ regval |= BIT(PLL_AUX_OUTPUT_BIT);
writel_relaxed(regval, GCC_REG_BASE(GPLL0_USER_CTL_REG));
/* Vote for GPLL0 to turn on. Needed by acpuclock. */
@@ -5028,6 +5090,13 @@
clk_set_rate(&ocmemnoc_clk_src.c, 333330000);
/*
+ * Hold an active set vote at a rate of 40MHz for the MMSS NOC AHB
+ * source. Sleep set vote is 0.
+ */
+ clk_set_rate(&mmssnoc_ahb_a_clk.c, 40000000);
+ clk_prepare_enable(&mmssnoc_ahb_a_clk.c);
+
+ /*
* Hold an active set vote for CXO; this is because CXO is expected
* to remain on whenever CPUs aren't power collapsed.
*/
diff --git a/arch/arm/mach-msm/clock-8x60.c b/arch/arm/mach-msm/clock-8x60.c
index 4493ddc..1769f07 100644
--- a/arch/arm/mach-msm/clock-8x60.c
+++ b/arch/arm/mach-msm/clock-8x60.c
@@ -3522,6 +3522,7 @@
CLK_LOOKUP("mem_a_clk", ebi1_msmbus_a_clk.c, "msm_bus"),
CLK_LOOKUP("smi_clk", smi_clk.c, "msm_bus"),
CLK_LOOKUP("smi_a_clk", smi_a_clk.c, "msm_bus"),
+ CLK_LOOKUP("mmfpb_a_clk", mmfpb_a_clk.c, "clock-8x60"),
CLK_LOOKUP("core_clk", gp0_clk.c, ""),
CLK_LOOKUP("core_clk", gp1_clk.c, ""),
@@ -3873,7 +3874,7 @@
int rc;
/* Vote for MMFPB to be at least 64MHz when an Apps CPU is active. */
- struct clk *mmfpb_a_clk = clk_get(NULL, "mmfpb_a_clk");
+ struct clk *mmfpb_a_clk = clk_get_sys("clock-8x60", "mmfpb_a_clk");
if (WARN(IS_ERR(mmfpb_a_clk), "mmfpb_a_clk not found (%ld)\n",
PTR_ERR(mmfpb_a_clk)))
return PTR_ERR(mmfpb_a_clk);
diff --git a/arch/arm/mach-msm/clock-dss-8960.c b/arch/arm/mach-msm/clock-dss-8960.c
index 4e17b29..ca1a3e1 100644
--- a/arch/arm/mach-msm/clock-dss-8960.c
+++ b/arch/arm/mach-msm/clock-dss-8960.c
@@ -90,7 +90,6 @@
#define PLL_PWRDN_B BIT(3)
#define PD_PLL BIT(1)
-static unsigned current_rate;
static unsigned hdmi_pll_on;
int hdmi_pll_enable(void)
@@ -219,11 +218,6 @@
hdmi_pll_on = 0;
}
-unsigned hdmi_pll_get_rate(void)
-{
- return current_rate;
-}
-
int hdmi_pll_set_rate(unsigned rate)
{
unsigned int set_power_dwn = 0;
@@ -378,7 +372,6 @@
if (set_power_dwn)
hdmi_pll_enable();
- current_rate = rate;
if (!ahb_enabled)
writel_relaxed(ahb_en_reg & ~BIT(4), AHB_EN_REG);
diff --git a/arch/arm/mach-msm/clock-dss-8960.h b/arch/arm/mach-msm/clock-dss-8960.h
index 4734cde..72e70fc 100644
--- a/arch/arm/mach-msm/clock-dss-8960.h
+++ b/arch/arm/mach-msm/clock-dss-8960.h
@@ -15,7 +15,6 @@
int hdmi_pll_enable(void);
void hdmi_pll_disable(void);
-unsigned hdmi_pll_get_rate(void);
int hdmi_pll_set_rate(unsigned rate);
#endif
diff --git a/arch/arm/mach-msm/clock-rpm.c b/arch/arm/mach-msm/clock-rpm.c
index 8096c10..207dbef 100644
--- a/arch/arm/mach-msm/clock-rpm.c
+++ b/arch/arm/mach-msm/clock-rpm.c
@@ -54,15 +54,11 @@
return (rc < 0) ? rc : iv.value * r->factor;
}
-#define RPM_SMD_KEY_RATE 0x007A484B
-#define RPM_SMD_KEY_ENABLE 0x62616E45
-
static int clk_rpmrs_set_rate_smd(struct rpm_clk *r, uint32_t value,
uint32_t context, int noirq)
{
- u32 rpm_key = r->branch ? RPM_SMD_KEY_ENABLE : RPM_SMD_KEY_RATE;
struct msm_rpm_kvp kvp = {
- .key = rpm_key,
+ .key = r->rpm_key,
.data = (void *)&value,
.length = sizeof(value),
};
diff --git a/arch/arm/mach-msm/clock-rpm.h b/arch/arm/mach-msm/clock-rpm.h
index ce878ce..22691c5 100644
--- a/arch/arm/mach-msm/clock-rpm.h
+++ b/arch/arm/mach-msm/clock-rpm.h
@@ -17,6 +17,10 @@
#include <mach/rpm.h>
#include <mach/rpm-smd.h>
+#define RPM_SMD_KEY_RATE 0x007A484B
+#define RPM_SMD_KEY_ENABLE 0x62616E45
+#define RPM_SMD_KEY_STATE 0x54415453
+
struct clk_ops;
struct clk_rpmrs_data;
extern struct clk_ops clk_ops_rpm;
@@ -24,6 +28,7 @@
struct rpm_clk {
const int rpm_res_type;
+ const int rpm_key;
const int rpm_clk_id;
const int rpm_status_id;
const bool active_only;
@@ -47,12 +52,14 @@
extern struct clk_rpmrs_data clk_rpmrs_data;
extern struct clk_rpmrs_data clk_rpmrs_data_smd;
-#define __DEFINE_CLK_RPM(name, active, type, r_id, stat_id, dep, rpmrsdata) \
+#define __DEFINE_CLK_RPM(name, active, type, r_id, stat_id, dep, key, \
+ rpmrsdata) \
static struct rpm_clk active; \
static struct rpm_clk name = { \
.rpm_res_type = (type), \
.rpm_clk_id = (r_id), \
.rpm_status_id = (stat_id), \
+ .rpm_key = (key), \
.peer = &active, \
.factor = 1000, \
.rpmrs_data = (rpmrsdata),\
@@ -67,6 +74,7 @@
.rpm_res_type = (type), \
.rpm_clk_id = (r_id), \
.rpm_status_id = (stat_id), \
+ .rpm_key = (key), \
.peer = &name, \
.active_only = true, \
.factor = 1000, \
@@ -80,12 +88,13 @@
};
#define __DEFINE_CLK_RPM_BRANCH(name, active, type, r_id, stat_id, r, \
- rpmrsdata) \
+ key, rpmrsdata) \
static struct rpm_clk active; \
static struct rpm_clk name = { \
.rpm_res_type = (type), \
.rpm_clk_id = (r_id), \
.rpm_status_id = (stat_id), \
+ .rpm_key = (key), \
.peer = &active, \
.last_set_khz = ((r) / 1000), \
.last_set_sleep_khz = ((r) / 1000), \
@@ -104,6 +113,7 @@
.rpm_res_type = (type), \
.rpm_clk_id = (r_id), \
.rpm_status_id = (stat_id), \
+ .rpm_key = (key), \
.peer = &name, \
.last_set_khz = ((r) / 1000), \
.active_only = true, \
@@ -119,12 +129,14 @@
}, \
};
-#define __DEFINE_CLK_RPM_QDSS(name, active, type, r_id, stat_id, rpmrsdata) \
+#define __DEFINE_CLK_RPM_QDSS(name, active, type, r_id, stat_id, \
+ key, rpmrsdata) \
static struct rpm_clk active; \
static struct rpm_clk name = { \
.rpm_res_type = (type), \
.rpm_clk_id = (r_id), \
.rpm_status_id = (stat_id), \
+ .rpm_key = (key), \
.peer = &active, \
.factor = 1, \
.rpmrs_data = (rpmrsdata),\
@@ -139,6 +151,7 @@
.rpm_res_type = (type), \
.rpm_clk_id = (r_id), \
.rpm_status_id = (stat_id), \
+ .rpm_key = (key), \
.peer = &name, \
.active_only = true, \
.factor = 1, \
@@ -153,21 +166,26 @@
#define DEFINE_CLK_RPM(name, active, r_id, dep) \
__DEFINE_CLK_RPM(name, active, 0, MSM_RPM_ID_##r_id##_CLK, \
- MSM_RPM_STATUS_ID_##r_id##_CLK, dep, &clk_rpmrs_data)
+ MSM_RPM_STATUS_ID_##r_id##_CLK, dep, 0, &clk_rpmrs_data)
#define DEFINE_CLK_RPM_QDSS(name, active) \
__DEFINE_CLK_RPM_QDSS(name, active, 0, MSM_RPM_ID_QDSS_CLK, \
- MSM_RPM_STATUS_ID_QDSS_CLK, &clk_rpmrs_data)
+ MSM_RPM_STATUS_ID_QDSS_CLK, 0, &clk_rpmrs_data)
#define DEFINE_CLK_RPM_BRANCH(name, active, r_id, r) \
__DEFINE_CLK_RPM_BRANCH(name, active, 0, MSM_RPM_ID_##r_id##_CLK, \
- MSM_RPM_STATUS_ID_##r_id##_CLK, r, &clk_rpmrs_data)
+ MSM_RPM_STATUS_ID_##r_id##_CLK, r, 0, &clk_rpmrs_data)
#define DEFINE_CLK_RPM_SMD(name, active, type, r_id, dep) \
- __DEFINE_CLK_RPM(name, active, type, r_id, 0, dep, &clk_rpmrs_data_smd)
+ __DEFINE_CLK_RPM(name, active, type, r_id, 0, dep, \
+ RPM_SMD_KEY_RATE, &clk_rpmrs_data_smd)
-#define DEFINE_CLK_RPM_SMD_BRANCH(name, active, type, r_id, dep) \
- __DEFINE_CLK_RPM_BRANCH(name, active, type, r_id, 0, dep, \
- &clk_rpmrs_data_smd)
+#define DEFINE_CLK_RPM_SMD_BRANCH(name, active, type, r_id, r) \
+ __DEFINE_CLK_RPM_BRANCH(name, active, type, r_id, 0, r, \
+ RPM_SMD_KEY_ENABLE, &clk_rpmrs_data_smd)
+
+#define DEFINE_CLK_RPM_SMD_QDSS(name, active, type, r_id) \
+ __DEFINE_CLK_RPM_QDSS(name, active, type, r_id, \
+ 0, RPM_SMD_KEY_STATE, &clk_rpmrs_data_smd)
#endif
diff --git a/arch/arm/mach-msm/devices-8930.c b/arch/arm/mach-msm/devices-8930.c
index a36e7d7..1f954c8 100644
--- a/arch/arm/mach-msm/devices-8930.c
+++ b/arch/arm/mach-msm/devices-8930.c
@@ -355,6 +355,16 @@
.id = MSM_BUS_FAB_CPSS_FPB,
};
+struct platform_device msm8627_device_acpuclk = {
+ .name = "acpuclk-8627",
+ .id = -1,
+};
+
+struct platform_device msm8930_device_acpuclk = {
+ .name = "acpuclk-8930",
+ .id = -1,
+};
+
static struct fs_driver_data gfx3d_fs_data = {
.clks = (struct fs_clk_data[]){
{ .name = "core_clk", .reset_rate = 27000000 },
diff --git a/arch/arm/mach-msm/devices-8960.c b/arch/arm/mach-msm/devices-8960.c
index 79f8c88..d74d4aa 100644
--- a/arch/arm/mach-msm/devices-8960.c
+++ b/arch/arm/mach-msm/devices-8960.c
@@ -3812,6 +3812,7 @@
#define AP2MDM_PMIC_PWR_EN 22
#define AP2MDM_KPDPWR_N 79
#define AP2MDM_SOFT_RESET 78
+#define USB_SW 25
static struct resource sglte_resources[] = {
{
@@ -3856,6 +3857,12 @@
.name = "AP2MDM_SOFT_RESET",
.flags = IORESOURCE_IO,
},
+ {
+ .start = USB_SW,
+ .end = USB_SW,
+ .name = "USB_SW",
+ .flags = IORESOURCE_IO,
+ },
};
struct platform_device mdm_sglte_device = {
diff --git a/arch/arm/mach-msm/devices.h b/arch/arm/mach-msm/devices.h
index 045dfb9..2207e3c 100644
--- a/arch/arm/mach-msm/devices.h
+++ b/arch/arm/mach-msm/devices.h
@@ -420,7 +420,9 @@
extern struct platform_device msm7x27aa_device_acpuclk;
extern struct platform_device msm7x30_device_acpuclk;
extern struct platform_device msm8625_device_acpuclk;
+extern struct platform_device msm8627_device_acpuclk;
extern struct platform_device msm8x50_device_acpuclk;
extern struct platform_device msm8x60_device_acpuclk;
+extern struct platform_device msm8930_device_acpuclk;
extern struct platform_device msm8960_device_acpuclk;
extern struct platform_device msm9615_device_acpuclk;
diff --git a/arch/arm/mach-msm/dma.c b/arch/arm/mach-msm/dma.c
index e5dca00..12f5aa9 100644
--- a/arch/arm/mach-msm/dma.c
+++ b/arch/arm/mach-msm/dma.c
@@ -356,7 +356,7 @@
}
if (!dmov_conf[adm].channel_active) {
dmov_conf[adm].clk_ctl = CLK_TO_BE_DIS;
- schedule_delayed_work(&dmov_conf[adm].work, HZ);
+ schedule_delayed_work(&dmov_conf[adm].work, (HZ/10));
}
spin_unlock_irqrestore(&dmov_conf[adm].list_lock, flags);
error:
@@ -573,7 +573,7 @@
if (!dmov_conf[adm].channel_active && valid) {
disable_irq_nosync(dmov_conf[adm].irq);
dmov_conf[adm].clk_ctl = CLK_TO_BE_DIS;
- schedule_delayed_work(&dmov_conf[adm].work, HZ);
+ schedule_delayed_work(&dmov_conf[adm].work, (HZ/10));
}
mutex_unlock(&dmov_conf[adm].lock);
diff --git a/arch/arm/mach-msm/gss-8064.c b/arch/arm/mach-msm/gss-8064.c
index b9b877a..e528650 100644
--- a/arch/arm/mach-msm/gss-8064.c
+++ b/arch/arm/mach-msm/gss-8064.c
@@ -43,6 +43,8 @@
static int crash_shutdown;
+static struct subsys_device *gss_8064_dev;
+
#define MAX_SSR_REASON_LEN 81U
static void log_gss_sfr(void)
@@ -72,7 +74,7 @@
static void restart_gss(void)
{
log_gss_sfr();
- subsystem_restart("gss");
+ subsystem_restart_dev(gss_8064_dev);
}
static void smsm_state_cb(void *data, uint32_t old_state, uint32_t new_state)
@@ -91,7 +93,7 @@
#define Q6_FW_WDOG_ENABLE 0x08882024
#define Q6_SW_WDOG_ENABLE 0x08982024
-static int gss_shutdown(const struct subsys_data *subsys)
+static int gss_shutdown(const struct subsys_desc *desc)
{
pil_force_shutdown("gss");
disable_irq_nosync(GSS_A5_WDOG_EXPIRED);
@@ -99,14 +101,14 @@
return 0;
}
-static int gss_powerup(const struct subsys_data *subsys)
+static int gss_powerup(const struct subsys_desc *desc)
{
pil_force_boot("gss");
enable_irq(GSS_A5_WDOG_EXPIRED);
return 0;
}
-void gss_crash_shutdown(const struct subsys_data *subsys)
+void gss_crash_shutdown(const struct subsys_desc *desc)
{
crash_shutdown = 1;
smsm_reset_modem(SMSM_RESET);
@@ -122,7 +124,7 @@
};
static int gss_ramdump(int enable,
- const struct subsys_data *crashed_subsys)
+ const struct subsys_desc *crashed_subsys)
{
int ret = 0;
@@ -157,7 +159,7 @@
return IRQ_HANDLED;
}
-static struct subsys_data gss_8064 = {
+static struct subsys_desc gss_8064 = {
.name = "gss",
.shutdown = gss_shutdown,
.powerup = gss_powerup,
@@ -167,7 +169,10 @@
static int gss_subsystem_restart_init(void)
{
- return ssr_register_subsystem(&gss_8064);
+ gss_8064_dev = subsys_register(&gss_8064);
+ if (IS_ERR(gss_8064_dev))
+ return PTR_ERR(gss_8064_dev);
+ return 0;
}
static int gss_open(struct inode *inode, struct file *filep)
diff --git a/arch/arm/mach-msm/include/mach/board.h b/arch/arm/mach-msm/include/mach/board.h
index 8607177..86720d8 100644
--- a/arch/arm/mach-msm/include/mach/board.h
+++ b/arch/arm/mach-msm/include/mach/board.h
@@ -201,8 +201,8 @@
};
struct msm_camera_csi_lane_params {
- uint8_t csi_lane_assign;
- uint8_t csi_lane_mask;
+ uint16_t csi_lane_assign;
+ uint16_t csi_lane_mask;
};
struct msm_camera_gpio_conf {
diff --git a/arch/arm/mach-msm/include/mach/camera.h b/arch/arm/mach-msm/include/mach/camera.h
index 4d66d88..43d707e 100644
--- a/arch/arm/mach-msm/include/mach/camera.h
+++ b/arch/arm/mach-msm/include/mach/camera.h
@@ -135,14 +135,16 @@
struct msm_camera_csid_params {
uint8_t lane_cnt;
- uint8_t lane_assign;
+ uint16_t lane_assign;
+ uint8_t phy_sel;
struct msm_camera_csid_lut_params lut_params;
};
struct msm_camera_csiphy_params {
uint8_t lane_cnt;
uint8_t settle_cnt;
- uint8_t lane_mask;
+ uint16_t lane_mask;
+ uint8_t combo_mode;
};
struct msm_camera_csi2_params {
@@ -565,6 +567,7 @@
S_STEREO_VIDEO,
S_STEREO_CAPTURE,
S_DEFAULT,
+ S_LIVESHOT,
S_EXIT
};
diff --git a/arch/arm/mach-msm/include/mach/dma.h b/arch/arm/mach-msm/include/mach/dma.h
index ba621e6..381ea12 100644
--- a/arch/arm/mach-msm/include/mach/dma.h
+++ b/arch/arm/mach-msm/include/mach/dma.h
@@ -263,6 +263,9 @@
#define DMOV8064_CE_OUT_CHAN 1
#define DMOV8064_CE_OUT_CRCI 15
+#define DMOV8064_TSIF_CHAN 2
+#define DMOV8064_TSIF_CRCI 1
+
/* no client rate control ifc (eg, ram) */
#define DMOV_NONE_CRCI 0
diff --git a/arch/arm/mach-msm/include/mach/iommu.h b/arch/arm/mach-msm/include/mach/iommu.h
index 4bfbe61..28c53db 100644
--- a/arch/arm/mach-msm/include/mach/iommu.h
+++ b/arch/arm/mach-msm/include/mach/iommu.h
@@ -35,6 +35,9 @@
*/
#define MAX_NUM_MIDS 32
+/* Maximum number of SMT entries allowed by the system */
+#define MAX_NUM_SMR 128
+
/**
* struct msm_iommu_dev - a single IOMMU hardware instance
* name Human-readable name given to this IOMMU HW instance
@@ -69,6 +72,9 @@
* @irq: Interrupt number
* @clk: The bus clock for this IOMMU hardware instance
* @pclk: The clock for the IOMMU bus interconnect
+ * @name: Human-readable name of this IOMMU device
+ * @gdsc: Regulator needed to power this HW block (v2 only)
+ * @nsmr: Size of the SMT on this HW block (v2 only)
*
* A msm_iommu_drvdata holds the global driver data about a single piece
* of an IOMMU hardware instance.
@@ -81,6 +87,7 @@
struct clk *pclk;
const char *name;
struct regulator *gdsc;
+ unsigned int nsmr;
};
/**
@@ -89,6 +96,10 @@
* @pdev: Platform device associated wit this HW instance
* @attached_elm: List element for domains to track which devices are
* attached to them
+ * @attached_domain Domain currently attached to this context (if any)
+ * @name Human-readable name of this context device
+ * @sids List of Stream IDs mapped to this context (v2 only)
+ * @nsid Number of Stream IDs mapped to this context (v2 only)
*
* A msm_iommu_ctx_drvdata holds the driver data for a single context bank
* within each IOMMU hardware instance
@@ -99,6 +110,8 @@
struct list_head attached_elm;
struct iommu_domain *attached_domain;
const char *name;
+ u32 sids[MAX_NUM_SMR];
+ unsigned int nsid;
};
/*
diff --git a/arch/arm/mach-msm/include/mach/iommu_hw-v2.h b/arch/arm/mach-msm/include/mach/iommu_hw-v2.h
index fac13b3..b01dbd8 100644
--- a/arch/arm/mach-msm/include/mach/iommu_hw-v2.h
+++ b/arch/arm/mach-msm/include/mach/iommu_hw-v2.h
@@ -16,8 +16,6 @@
#define CTX_SHIFT 12
#define CTX_OFFSET 0x8000
-#define MAX_NUM_SMR 128
-
#define GET_GLOBAL_REG(reg, base) (readl_relaxed((base) + (reg)))
#define GET_CTX_REG(reg, base, ctx) \
(readl_relaxed((base) + CTX_OFFSET + (reg) + ((ctx) << CTX_SHIFT)))
diff --git a/arch/arm/mach-msm/include/mach/mdm2.h b/arch/arm/mach-msm/include/mach/mdm2.h
index 637a3cc..c4877cc 100644
--- a/arch/arm/mach-msm/include/mach/mdm2.h
+++ b/arch/arm/mach-msm/include/mach/mdm2.h
@@ -31,6 +31,7 @@
struct mdm_vddmin_resource *vddmin_resource;
struct platform_device *peripheral_platform_device;
const unsigned int ramdump_timeout_ms;
+ int image_upgrade_supported;
};
#endif
diff --git a/arch/arm/mach-msm/include/mach/ocmem.h b/arch/arm/mach-msm/include/mach/ocmem.h
index b0475ed..927624d 100644
--- a/arch/arm/mach-msm/include/mach/ocmem.h
+++ b/arch/arm/mach-msm/include/mach/ocmem.h
@@ -24,6 +24,8 @@
#define OCMEM_MAX_CHUNKS 32
#define MIN_CHUNK_SIZE (SZ_1K/8)
+struct ocmem_notifier;
+
struct ocmem_buf {
unsigned long addr;
unsigned long len;
@@ -80,9 +82,11 @@
/* APIS */
/* Notification APIs */
-void *ocmem_notifier_register(int client_id, struct notifier_block *nb);
+struct ocmem_notifier *ocmem_notifier_register(int client_id,
+ struct notifier_block *nb);
-int ocmem_notifier_unregister(void *notif_hndl, struct notifier_block *nb);
+int ocmem_notifier_unregister(struct ocmem_notifier *notif_hndl,
+ struct notifier_block *nb);
/* Obtain the maximum quota for the client */
unsigned long get_max_quota(int client_id);
diff --git a/arch/arm/mach-msm/include/mach/ocmem_priv.h b/arch/arm/mach-msm/include/mach/ocmem_priv.h
index dd976ea..64c9ffe 100644
--- a/arch/arm/mach-msm/include/mach/ocmem_priv.h
+++ b/arch/arm/mach-msm/include/mach/ocmem_priv.h
@@ -61,6 +61,23 @@
SCHED_DUMP,
};
+struct ocmem_plat_data {
+ void __iomem *vbase;
+ unsigned long size;
+ unsigned long base;
+ struct ocmem_partition *parts;
+ int nr_parts;
+ void __iomem *reg_base;
+ void __iomem *br_base;
+ void __iomem *dm_base;
+ unsigned nr_regions;
+ unsigned nr_macros;
+ unsigned nr_ports;
+ int ocmem_irq;
+ int dm_irq;
+ bool interleaved;
+};
+
struct ocmem_req {
struct rw_semaphore rw_sem;
/* Chain in sched queue */
diff --git a/arch/arm/mach-msm/include/mach/rpm-regulator-smd.h b/arch/arm/mach-msm/include/mach/rpm-regulator-smd.h
index 319c2d8..a32e168 100644
--- a/arch/arm/mach-msm/include/mach/rpm-regulator-smd.h
+++ b/arch/arm/mach-msm/include/mach/rpm-regulator-smd.h
@@ -29,7 +29,8 @@
* in this enum correspond to MSM8974 for PMIC PM8841 SMPS 2 (VDD_Dig).
*/
enum rpm_regulator_voltage_corner {
- RPM_REGULATOR_CORNER_RETENTION = 1,
+ RPM_REGULATOR_CORNER_NONE = 1,
+ RPM_REGULATOR_CORNER_RETENTION,
RPM_REGULATOR_CORNER_SVS_KRAIT,
RPM_REGULATOR_CORNER_SVS_SOC,
RPM_REGULATOR_CORNER_NORMAL,
diff --git a/arch/arm/mach-msm/include/mach/socinfo.h b/arch/arm/mach-msm/include/mach/socinfo.h
index 9110632..2c3d395 100644
--- a/arch/arm/mach-msm/include/mach/socinfo.h
+++ b/arch/arm/mach-msm/include/mach/socinfo.h
@@ -94,7 +94,9 @@
int __init socinfo_init(void) __must_check;
const int read_msm_cpu_type(void);
const int get_core_count(void);
+const int cpu_is_krait(void);
const int cpu_is_krait_v1(void);
+const int cpu_is_krait_v2(void);
static inline int cpu_is_msm7x01(void)
{
diff --git a/arch/arm/mach-msm/include/mach/subsystem_restart.h b/arch/arm/mach-msm/include/mach/subsystem_restart.h
index 51ace96..6d15f47 100644
--- a/arch/arm/mach-msm/include/mach/subsystem_restart.h
+++ b/arch/arm/mach-msm/include/mach/subsystem_restart.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -18,6 +18,8 @@
#define SUBSYS_NAME_MAX_LENGTH 40
+struct subsys_device;
+
enum {
RESET_SOC = 1,
RESET_SUBSYS_COUPLED,
@@ -25,29 +27,23 @@
RESET_LEVEL_MAX
};
-struct subsys_data {
+struct subsys_desc {
const char *name;
- int (*shutdown) (const struct subsys_data *);
- int (*powerup) (const struct subsys_data *);
- void (*crash_shutdown) (const struct subsys_data *);
- int (*ramdump) (int, const struct subsys_data *);
- /* Internal use only */
- struct list_head list;
- void *notif_handle;
-
- struct mutex shutdown_lock;
- struct mutex powerup_lock;
-
- void *restart_order;
- struct subsys_data *single_restart_list[1];
+ int (*shutdown)(const struct subsys_desc *desc);
+ int (*powerup)(const struct subsys_desc *desc);
+ void (*crash_shutdown)(const struct subsys_desc *desc);
+ int (*ramdump)(int, const struct subsys_desc *desc);
};
#if defined(CONFIG_MSM_SUBSYSTEM_RESTART)
-int get_restart_level(void);
-int subsystem_restart(const char *subsys_name);
-int ssr_register_subsystem(struct subsys_data *subsys);
+extern int get_restart_level(void);
+extern int subsystem_restart_dev(struct subsys_device *dev);
+extern int subsystem_restart(const char *name);
+
+extern struct subsys_device *subsys_register(struct subsys_desc *desc);
+extern void subsys_unregister(struct subsys_device *dev);
#else
@@ -56,16 +52,24 @@
return 0;
}
-static inline int subsystem_restart(const char *subsystem_name)
+static inline int subsystem_restart_dev(struct subsys_device *dev)
{
return 0;
}
-static inline int ssr_register_subsystem(struct subsys_data *subsys)
+static inline int subsystem_restart(const char *name)
{
return 0;
}
+static inline
+struct subsys_device *subsys_register(struct subsys_desc *desc)
+{
+ return NULL;
+}
+
+static inline void subsys_unregister(struct subsys_device *dev) { }
+
#endif /* CONFIG_MSM_SUBSYSTEM_RESTART */
#endif
diff --git a/arch/arm/mach-msm/iommu_domains.c b/arch/arm/mach-msm/iommu_domains.c
index 42f0438..8f9464c 100644
--- a/arch/arm/mach-msm/iommu_domains.c
+++ b/arch/arm/mach-msm/iommu_domains.c
@@ -41,6 +41,11 @@
DEFINE_MUTEX(domain_mutex);
static atomic_t domain_nums = ATOMIC_INIT(-1);
+int msm_use_iommu()
+{
+ return iommu_present(&platform_bus_type);
+}
+
int msm_iommu_map_extra(struct iommu_domain *domain,
unsigned long start_iova,
unsigned long size,
@@ -165,6 +170,11 @@
if (size & (align - 1))
return -EINVAL;
+ if (!msm_use_iommu()) {
+ *iova_val = phys;
+ return 0;
+ }
+
ret = msm_allocate_iova_address(domain_no, partition_no, size, align,
&iova);
@@ -187,6 +197,9 @@
unsigned int partition_no,
unsigned long size)
{
+ if (!msm_use_iommu())
+ return;
+
iommu_unmap_range(msm_get_iommu_domain(domain_no), iova, size);
msm_free_iova_address(iova, domain_no, partition_no, size);
}
@@ -390,11 +403,6 @@
return -EINVAL;
}
-int msm_use_iommu()
-{
- return iommu_present(&platform_bus_type);
-}
-
static int __init iommu_domain_probe(struct platform_device *pdev)
{
struct iommu_domains_pdata *p = pdev->dev.platform_data;
diff --git a/arch/arm/mach-msm/lpass-8660.c b/arch/arm/mach-msm/lpass-8660.c
index 1018360..be18b68 100644
--- a/arch/arm/mach-msm/lpass-8660.c
+++ b/arch/arm/mach-msm/lpass-8660.c
@@ -19,6 +19,7 @@
#include <linux/stringify.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/err.h>
#include <mach/irqs.h>
#include <mach/scm.h>
@@ -35,6 +36,8 @@
#define MODULE_NAME "lpass_8x60"
#define SCM_Q6_NMI_CMD 0x1
+static struct subsys_device *subsys_8x60_q6_dev;
+
/* Subsystem restart: QDSP6 data, functions */
static void *q6_ramdump_dev;
static void q6_fatal_fn(struct work_struct *);
@@ -44,7 +47,7 @@
static void q6_fatal_fn(struct work_struct *work)
{
pr_err("%s: Watchdog bite received from Q6!\n", MODULE_NAME);
- subsystem_restart("lpass");
+ subsystem_restart_dev(subsys_8x60_q6_dev);
enable_irq(LPASS_Q6SS_WDOG_EXPIRED);
}
@@ -65,7 +68,7 @@
pr_info("subsystem-fatal-8x60: Q6 NMI was sent.\n");
}
-int subsys_q6_shutdown(const struct subsys_data *crashed_subsys)
+int subsys_q6_shutdown(const struct subsys_desc *crashed_subsys)
{
void __iomem *q6_wdog_addr =
ioremap_nocache(Q6SS_WDOG_ENABLE, 8);
@@ -82,7 +85,7 @@
return 0;
}
-int subsys_q6_powerup(const struct subsys_data *crashed_subsys)
+int subsys_q6_powerup(const struct subsys_desc *crashed_subsys)
{
int ret = pil_force_boot("q6");
enable_irq(LPASS_Q6SS_WDOG_EXPIRED);
@@ -93,7 +96,7 @@
static struct ramdump_segment q6_segments[] = { {0x46700000, 0x47F00000 -
0x46700000}, {0x28400000, 0x12800} };
static int subsys_q6_ramdump(int enable,
- const struct subsys_data *crashed_subsys)
+ const struct subsys_desc *crashed_subsys)
{
if (enable)
return do_ramdump(q6_ramdump_dev, q6_segments,
@@ -102,7 +105,7 @@
return 0;
}
-void subsys_q6_crash_shutdown(const struct subsys_data *crashed_subsys)
+void subsys_q6_crash_shutdown(const struct subsys_desc *crashed_subsys)
{
send_q6_nmi();
}
@@ -117,7 +120,7 @@
return IRQ_HANDLED;
}
-static struct subsys_data subsys_8x60_q6 = {
+static struct subsys_desc subsys_8x60_q6 = {
.name = "lpass",
.shutdown = subsys_q6_shutdown,
.powerup = subsys_q6_powerup,
@@ -127,6 +130,7 @@
static void __exit lpass_fatal_exit(void)
{
+ subsys_unregister(subsys_8x60_q6_dev);
iounmap(q6_wakeup_intr);
free_irq(LPASS_Q6SS_WDOG_EXPIRED, NULL);
}
@@ -156,7 +160,9 @@
if (!q6_wakeup_intr)
pr_warn("lpass-8660: Unable to ioremap q6 wakeup address.");
- ret = ssr_register_subsystem(&subsys_8x60_q6);
+ subsys_8x60_q6_dev = subsys_register(&subsys_8x60_q6);
+ if (IS_ERR(subsys_8x60_q6_dev))
+ ret = PTR_ERR(subsys_8x60_q6_dev);
out:
return ret;
}
diff --git a/arch/arm/mach-msm/lpass-8960.c b/arch/arm/mach-msm/lpass-8960.c
index c58b0e1..b714a7f 100644
--- a/arch/arm/mach-msm/lpass-8960.c
+++ b/arch/arm/mach-msm/lpass-8960.c
@@ -152,7 +152,7 @@
pr_debug("%s: Q6 NMI was sent.\n", __func__);
}
-static int lpass_shutdown(const struct subsys_data *subsys)
+static int lpass_shutdown(const struct subsys_desc *subsys)
{
send_q6_nmi();
pil_force_shutdown("q6");
@@ -161,7 +161,7 @@
return 0;
}
-static int lpass_powerup(const struct subsys_data *subsys)
+static int lpass_powerup(const struct subsys_desc *subsys)
{
int ret = pil_force_boot("q6");
enable_irq(LPASS_Q6SS_WDOG_EXPIRED);
@@ -170,7 +170,7 @@
/* RAM segments - address and size for 8960 */
static struct ramdump_segment q6_segments[] = { {0x8da00000, 0x8f200000 -
0x8da00000}, {0x28400000, 0x20000} };
-static int lpass_ramdump(int enable, const struct subsys_data *subsys)
+static int lpass_ramdump(int enable, const struct subsys_desc *subsys)
{
pr_debug("%s: enable[%d]\n", __func__, enable);
if (enable)
@@ -181,7 +181,7 @@
return 0;
}
-static void lpass_crash_shutdown(const struct subsys_data *subsys)
+static void lpass_crash_shutdown(const struct subsys_desc *subsys)
{
q6_crash_shutdown = 1;
send_q6_nmi();
@@ -198,7 +198,9 @@
return IRQ_HANDLED;
}
-static struct subsys_data lpass_8960 = {
+static struct subsys_device *lpass_8960_dev;
+
+static struct subsys_desc lpass_8960 = {
.name = "lpass",
.shutdown = lpass_shutdown,
.powerup = lpass_powerup,
@@ -208,7 +210,10 @@
static int __init lpass_restart_init(void)
{
- return ssr_register_subsystem(&lpass_8960);
+ lpass_8960_dev = subsys_register(&lpass_8960);
+ if (IS_ERR(lpass_8960_dev))
+ return PTR_ERR(lpass_8960_dev);
+ return 0;
}
static int __init lpass_fatal_init(void)
@@ -275,6 +280,7 @@
{
subsys_notif_unregister_notifier(ssr_notif_hdle, &rnb);
subsys_notif_unregister_notifier(ssr_modem_notif_hdle, &mnb);
+ subsys_unregister(lpass_8960_dev);
free_irq(LPASS_Q6SS_WDOG_EXPIRED, NULL);
}
diff --git a/arch/arm/mach-msm/mdm.c b/arch/arm/mach-msm/mdm.c
index cbdc92a..4280fb4 100644
--- a/arch/arm/mach-msm/mdm.c
+++ b/arch/arm/mach-msm/mdm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -73,14 +73,14 @@
}
-static int charm_subsys_shutdown(const struct subsys_data *crashed_subsys)
+static int charm_subsys_shutdown(const struct subsys_desc *crashed_subsys)
{
charm_ready = 0;
power_down_charm();
return 0;
}
-static int charm_subsys_powerup(const struct subsys_data *crashed_subsys)
+static int charm_subsys_powerup(const struct subsys_desc *crashed_subsys)
{
power_on_charm();
boot_type = CHARM_NORMAL_BOOT;
@@ -92,7 +92,7 @@
}
static int charm_subsys_ramdumps(int want_dumps,
- const struct subsys_data *crashed_subsys)
+ const struct subsys_desc *crashed_subsys)
{
charm_ram_dump_status = 0;
if (want_dumps) {
@@ -105,7 +105,9 @@
return charm_ram_dump_status;
}
-static struct subsys_data charm_subsystem = {
+static struct subsys_device *charm_subsys;
+
+static struct subsys_desc charm_subsystem = {
.shutdown = charm_subsys_shutdown,
.ramdump = charm_subsys_ramdumps,
.powerup = charm_subsys_powerup,
@@ -229,7 +231,7 @@
static void charm_status_fn(struct work_struct *work)
{
pr_info("Reseting the charm because status changed\n");
- subsystem_restart("external_modem");
+ subsystem_restart_dev(charm_subsys);
}
static DECLARE_WORK(charm_status_work, charm_status_fn);
@@ -239,7 +241,7 @@
pr_info("Reseting the charm due to an errfatal\n");
if (get_restart_level() == RESET_SOC)
pm8xxx_stay_on();
- subsystem_restart("external_modem");
+ subsystem_restart_dev(charm_subsys);
}
static DECLARE_WORK(charm_fatal_work, charm_fatal_fn);
@@ -349,7 +351,11 @@
atomic_notifier_chain_register(&panic_notifier_list, &charm_panic_blk);
charm_debugfs_init();
- ssr_register_subsystem(&charm_subsystem);
+ charm_subsys = subsys_register(&charm_subsystem);
+ if (IS_ERR(charm_subsys)) {
+ ret = PTR_ERR(charm_subsys);
+ goto fatal_err;
+ }
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
diff --git a/arch/arm/mach-msm/mdm2.c b/arch/arm/mach-msm/mdm2.c
index 6e7086e..e74af2e 100644
--- a/arch/arm/mach-msm/mdm2.c
+++ b/arch/arm/mach-msm/mdm2.c
@@ -249,6 +249,33 @@
}
}
+static void mdm_image_upgrade(struct mdm_modem_drv *mdm_drv, int type)
+{
+ switch (type) {
+ case APQ_CONTROLLED_UPGRADE:
+ pr_debug("%s APQ controlled modem image upgrade\n", __func__);
+ mdm_drv->mdm_ready = 0;
+ mdm_toggle_soft_reset(mdm_drv);
+ break;
+ case MDM_CONTROLLED_UPGRADE:
+ pr_debug("%s MDM controlled modem image upgrade\n", __func__);
+ mdm_drv->mdm_ready = 0;
+ /*
+ * If we have no image currently present on the modem, then we
+ * would be in PBL, in which case the status gpio would not go
+ * high.
+ */
+ mdm_drv->disable_status_check = 1;
+ if (mdm_drv->usb_switch_gpio > 0) {
+ pr_info("%s Switching usb control to MDM\n", __func__);
+ gpio_direction_output(mdm_drv->usb_switch_gpio, 1);
+ } else
+ pr_err("%s usb switch gpio unavailable\n", __func__);
+ break;
+ default:
+ pr_err("%s invalid upgrade type\n", __func__);
+ }
+}
static struct mdm_ops mdm_cb = {
.power_on_mdm_cb = mdm_power_on_common,
.reset_mdm_cb = mdm_power_on_common,
@@ -256,6 +283,7 @@
.power_down_mdm_cb = mdm_power_down_common,
.debug_state_changed_cb = debug_state_changed,
.status_cb = mdm_status_changed,
+ .image_upgrade_cb = mdm_image_upgrade,
};
static int __init mdm_modem_probe(struct platform_device *pdev)
diff --git a/arch/arm/mach-msm/mdm_common.c b/arch/arm/mach-msm/mdm_common.c
index 5b181e1..6b40cda 100644
--- a/arch/arm/mach-msm/mdm_common.c
+++ b/arch/arm/mach-msm/mdm_common.c
@@ -56,6 +56,7 @@
#define EXTERNAL_MODEM "external_modem"
static struct mdm_modem_drv *mdm_drv;
+static struct subsys_device *mdm_subsys_dev;
DECLARE_COMPLETION(mdm_needs_reload);
DECLARE_COMPLETION(mdm_boot);
@@ -150,11 +151,13 @@
* If the mdm modem did not pull the MDM2AP_STATUS gpio
* high then call subsystem_restart.
*/
- if (gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 0) {
- pr_err("%s: MDM2AP_STATUS gpio did not go high\n",
- __func__);
- mdm_drv->mdm_ready = 0;
- subsystem_restart(EXTERNAL_MODEM);
+ if (!mdm_drv->disable_status_check) {
+ if (gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 0) {
+ pr_err("%s: MDM2AP_STATUS gpio did not go high\n",
+ __func__);
+ mdm_drv->mdm_ready = 0;
+ subsystem_restart_dev(mdm_subsys_dev);
+ }
}
}
@@ -238,6 +241,15 @@
else
put_user(0, (unsigned long __user *) arg);
break;
+ case IMAGE_UPGRADE:
+ pr_debug("%s Image upgrade ioctl recieved\n", __func__);
+ if (mdm_drv->pdata->image_upgrade_supported &&
+ mdm_drv->ops->image_upgrade_cb) {
+ get_user(status, (unsigned long __user *) arg);
+ mdm_drv->ops->image_upgrade_cb(mdm_drv, status);
+ } else
+ pr_debug("%s Image upgrade not supported\n", __func__);
+ break;
default:
pr_err("%s: invalid ioctl cmd = %d\n", __func__, _IOC_NR(cmd));
ret = -EINVAL;
@@ -271,7 +283,7 @@
(gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 1)) {
pr_info("%s: Reseting the mdm due to an errfatal\n", __func__);
mdm_drv->mdm_ready = 0;
- subsystem_restart(EXTERNAL_MODEM);
+ subsystem_restart_dev(mdm_subsys_dev);
}
return IRQ_HANDLED;
}
@@ -332,7 +344,7 @@
pr_info("%s: unexpected reset external modem\n", __func__);
mdm_drv->mdm_unexpected_reset_occurred = 1;
mdm_drv->mdm_ready = 0;
- subsystem_restart(EXTERNAL_MODEM);
+ subsystem_restart_dev(mdm_subsys_dev);
} else if (value == 1) {
cancel_delayed_work(&mdm2ap_status_check_work);
pr_info("%s: status = 1: mdm is now ready\n", __func__);
@@ -349,7 +361,7 @@
return IRQ_HANDLED;
}
-static int mdm_subsys_shutdown(const struct subsys_data *crashed_subsys)
+static int mdm_subsys_shutdown(const struct subsys_desc *crashed_subsys)
{
mdm_drv->mdm_ready = 0;
gpio_direction_output(mdm_drv->ap2mdm_errfatal_gpio, 1);
@@ -363,11 +375,10 @@
mdm_drv->ops->reset_mdm_cb(mdm_drv);
else
mdm_drv->mdm_unexpected_reset_occurred = 0;
-
return 0;
}
-static int mdm_subsys_powerup(const struct subsys_data *crashed_subsys)
+static int mdm_subsys_powerup(const struct subsys_desc *crashed_subsys)
{
gpio_direction_output(mdm_drv->ap2mdm_errfatal_gpio, 0);
gpio_direction_output(mdm_drv->ap2mdm_status_gpio, 1);
@@ -390,7 +401,7 @@
}
static int mdm_subsys_ramdumps(int want_dumps,
- const struct subsys_data *crashed_subsys)
+ const struct subsys_desc *crashed_subsys)
{
mdm_drv->mdm_ram_dump_status = 0;
if (want_dumps) {
@@ -411,7 +422,7 @@
return mdm_drv->mdm_ram_dump_status;
}
-static struct subsys_data mdm_subsystem = {
+static struct subsys_desc mdm_subsystem = {
.shutdown = mdm_subsys_shutdown,
.ramdump = mdm_subsys_ramdumps,
.powerup = mdm_subsys_powerup,
@@ -514,6 +525,12 @@
if (pres)
mdm_drv->mdm2ap_pblrdy = pres->start;
+ /*USB_SW*/
+ pres = platform_get_resource_byname(pdev, IORESOURCE_IO,
+ "USB_SW");
+ if (pres)
+ mdm_drv->usb_switch_gpio = pres->start;
+
mdm_drv->boot_type = CHARM_NORMAL_BOOT;
mdm_drv->ops = mdm_ops;
@@ -556,6 +573,13 @@
if (mdm_drv->ap2mdm_wakeup_gpio > 0)
gpio_request(mdm_drv->ap2mdm_wakeup_gpio, "AP2MDM_WAKEUP");
+ if (mdm_drv->usb_switch_gpio > 0) {
+ if (gpio_request(mdm_drv->usb_switch_gpio, "USB_SW")) {
+ pr_err("%s Failed to get usb switch gpio\n", __func__);
+ mdm_drv->usb_switch_gpio = -1;
+ }
+ }
+
gpio_direction_output(mdm_drv->ap2mdm_status_gpio, 1);
gpio_direction_output(mdm_drv->ap2mdm_errfatal_gpio, 0);
@@ -588,7 +612,11 @@
mdm_debugfs_init();
/* Register subsystem handlers */
- ssr_register_subsystem(&mdm_subsystem);
+ mdm_subsys_dev = subsys_register(&mdm_subsystem);
+ if (IS_ERR(mdm_subsys_dev)) {
+ ret = PTR_ERR(mdm_subsys_dev);
+ goto fatal_err;
+ }
/* ERR_FATAL irq. */
irq = MSM_GPIO_TO_INT(mdm_drv->mdm2ap_errfatal_gpio);
diff --git a/arch/arm/mach-msm/mdm_private.h b/arch/arm/mach-msm/mdm_private.h
index 7ac3727..7aba83d 100644
--- a/arch/arm/mach-msm/mdm_private.h
+++ b/arch/arm/mach-msm/mdm_private.h
@@ -23,6 +23,7 @@
void (*power_down_mdm_cb)(struct mdm_modem_drv *mdm_drv);
void (*debug_state_changed_cb)(int value);
void (*status_cb)(struct mdm_modem_drv *mdm_drv, int value);
+ void (*image_upgrade_cb)(struct mdm_modem_drv *mdm_drv, int type);
};
/* Private mdm2 data structure */
@@ -37,6 +38,7 @@
unsigned ap2mdm_soft_reset_gpio;
unsigned ap2mdm_pmic_pwr_en_gpio;
unsigned mdm2ap_pblrdy;
+ unsigned usb_switch_gpio;
int mdm_errfatal_irq;
int mdm_status_irq;
@@ -46,6 +48,7 @@
enum charm_boot_type boot_type;
int mdm_debug_on;
int mdm_unexpected_reset_occurred;
+ int disable_status_check;
struct mdm_ops *ops;
struct mdm_platform_data *pdata;
diff --git a/arch/arm/mach-msm/modem-8660.c b/arch/arm/mach-msm/modem-8660.c
index 9c558e4..096ed9c 100644
--- a/arch/arm/mach-msm/modem-8660.c
+++ b/arch/arm/mach-msm/modem-8660.c
@@ -19,6 +19,7 @@
#include <linux/stringify.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/err.h>
#include <mach/irqs.h>
#include <mach/scm.h>
@@ -48,6 +49,8 @@
module_param(reset_modem, int, 0644);
#endif
+static struct subsys_device *modem_8660_dev;
+
/* Subsystem restart: Modem data, functions */
static void *modem_ramdump_dev;
static void modem_fatal_fn(struct work_struct *);
@@ -75,7 +78,7 @@
mb();
iounmap(hwio_modem_reset_addr);
- subsystem_restart("modem");
+ subsystem_restart_dev(modem_8660_dev);
enable_irq(MARM_WDOG_EXPIRED);
}
@@ -93,7 +96,7 @@
if (modem_state == 0 || modem_state & panic_smsm_states) {
- subsystem_restart("modem");
+ subsystem_restart_dev(modem_8660_dev);
enable_irq(MARM_WDOG_EXPIRED);
} else if (modem_state & reset_smsm_states) {
@@ -135,13 +138,13 @@
goto out;
}
pr_err("%s: Modem error fatal'ed.", MODULE_NAME);
- subsystem_restart("modem");
+ subsystem_restart_dev(modem_8660_dev);
}
out:
return NOTIFY_DONE;
}
-static int modem_shutdown(const struct subsys_data *crashed_subsys)
+static int modem_shutdown(const struct subsys_desc *crashed_subsys)
{
void __iomem *modem_wdog_addr;
@@ -178,7 +181,7 @@
return 0;
}
-static int modem_powerup(const struct subsys_data *crashed_subsys)
+static int modem_powerup(const struct subsys_desc *crashed_subsys)
{
int ret;
@@ -192,8 +195,7 @@
static struct ramdump_segment modem_segments[] = {
{0x42F00000, 0x46000000 - 0x42F00000} };
-static int modem_ramdump(int enable,
- const struct subsys_data *crashed_subsys)
+static int modem_ramdump(int enable, const struct subsys_desc *crashed_subsys)
{
if (enable)
return do_ramdump(modem_ramdump_dev, modem_segments,
@@ -202,8 +204,7 @@
return 0;
}
-static void modem_crash_shutdown(
- const struct subsys_data *crashed_subsys)
+static void modem_crash_shutdown(const struct subsys_desc *crashed_subsys)
{
/* If modem hasn't already crashed, send SMSM_RESET. */
if (!(smsm_get_state(SMSM_MODEM_STATE) & SMSM_RESET)) {
@@ -225,7 +226,7 @@
return IRQ_HANDLED;
}
-static struct subsys_data subsys_8660_modem = {
+static struct subsys_desc subsys_8660_modem = {
.name = "modem",
.shutdown = modem_shutdown,
.powerup = modem_powerup,
@@ -260,13 +261,16 @@
goto out;
}
- ret = ssr_register_subsystem(&subsys_8660_modem);
+ modem_8660_dev = subsys_register(&subsys_8660_modem);
+ if (IS_ERR(modem_8660_dev))
+ ret = PTR_ERR(modem_8660_dev);
out:
return ret;
}
static void __exit modem_8660_exit(void)
{
+ subsys_unregister(modem_8660_dev);
free_irq(MARM_WDOG_EXPIRED, NULL);
}
diff --git a/arch/arm/mach-msm/modem-8960.c b/arch/arm/mach-msm/modem-8960.c
index fd7b7b5..73b9b1f 100644
--- a/arch/arm/mach-msm/modem-8960.c
+++ b/arch/arm/mach-msm/modem-8960.c
@@ -35,6 +35,8 @@
static int crash_shutdown;
+static struct subsys_device *modem_8960_dev;
+
#define MAX_SSR_REASON_LEN 81U
#define Q6_FW_WDOG_ENABLE 0x08882024
#define Q6_SW_WDOG_ENABLE 0x08982024
@@ -66,7 +68,7 @@
static void restart_modem(void)
{
log_modem_sfr();
- subsystem_restart("modem");
+ subsystem_restart_dev(modem_8960_dev);
}
static void modem_wdog_check(struct work_struct *work)
@@ -101,7 +103,7 @@
}
}
-static int modem_shutdown(const struct subsys_data *subsys)
+static int modem_shutdown(const struct subsys_desc *subsys)
{
void __iomem *q6_fw_wdog_addr;
void __iomem *q6_sw_wdog_addr;
@@ -142,7 +144,7 @@
#define MODEM_WDOG_CHECK_TIMEOUT_MS 10000
-static int modem_powerup(const struct subsys_data *subsys)
+static int modem_powerup(const struct subsys_desc *subsys)
{
pil_force_boot("modem_fw");
pil_force_boot("modem");
@@ -153,7 +155,7 @@
return 0;
}
-void modem_crash_shutdown(const struct subsys_data *subsys)
+void modem_crash_shutdown(const struct subsys_desc *subsys)
{
crash_shutdown = 1;
smsm_reset_modem(SMSM_RESET);
@@ -176,8 +178,7 @@
static void *modemsw_ramdump_dev;
static void *smem_ramdump_dev;
-static int modem_ramdump(int enable,
- const struct subsys_data *crashed_subsys)
+static int modem_ramdump(int enable, const struct subsys_desc *crashed_subsys)
{
int ret = 0;
@@ -234,7 +235,7 @@
return IRQ_HANDLED;
}
-static struct subsys_data modem_8960 = {
+static struct subsys_desc modem_8960 = {
.name = "modem",
.shutdown = modem_shutdown,
.powerup = modem_powerup,
@@ -244,13 +245,16 @@
static int modem_subsystem_restart_init(void)
{
- return ssr_register_subsystem(&modem_8960);
+ modem_8960_dev = subsys_register(&modem_8960);
+ if (IS_ERR(modem_8960_dev))
+ return PTR_ERR(modem_8960_dev);
+ return 0;
}
static int modem_debug_set(void *data, u64 val)
{
if (val == 1)
- subsystem_restart("modem");
+ subsystem_restart_dev(modem_8960_dev);
return 0;
}
diff --git a/arch/arm/mach-msm/msm_bus/Makefile b/arch/arm/mach-msm/msm_bus/Makefile
index ab62c20..924577f 100644
--- a/arch/arm/mach-msm/msm_bus/Makefile
+++ b/arch/arm/mach-msm/msm_bus/Makefile
@@ -2,7 +2,9 @@
# Makefile for msm-bus driver specific files
#
obj-y += msm_bus_core.o msm_bus_fabric.o msm_bus_config.o msm_bus_arb.o
-obj-y += msm_bus_rpm.o msm_bus_bimc.o msm_bus_noc.o
+obj-y += msm_bus_bimc.o msm_bus_noc.o
+obj-$(CONFIG_MSM_RPM) += msm_bus_rpm.o
+obj-$(CONFIG_MSM_RPM_SMD) += msm_bus_rpm_smd.o
obj-$(CONFIG_ARCH_MSM8X60) += msm_bus_board_8660.o
obj-$(CONFIG_ARCH_MSM8960) += msm_bus_board_8960.o
obj-$(CONFIG_ARCH_MSM9615) += msm_bus_board_9615.o
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c b/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
index 823f14d..2072cb1 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_bimc.c
@@ -1817,44 +1817,45 @@
info->node_info->id, info->node_info->priv_id, add_bw);
binfo = (struct msm_bus_bimc_info *)fab_pdata->hw_data;
- if (!info->node_info->qport) {
- MSM_BUS_DBG("No qos ports to update!\n");
- return;
- }
if (info->node_info->num_mports == 0) {
MSM_BUS_DBG("BIMC: Skip Master BW\n");
goto skip_mas_bw;
}
+ ports = info->node_info->num_mports;
bw = INTERLEAVED_BW(fab_pdata, add_bw, ports);
- ports = INTERLEAVED_VAL(fab_pdata, ports);
for (i = 0; i < ports; i++) {
- MSM_BUS_DBG("qport: %d\n", info->node_info->qport[i]);
sel_cd->mas[info->node_info->masterp[i]].bw += bw;
sel_cd->mas[info->node_info->masterp[i]].hw_id =
info->node_info->mas_hw_id;
- qbw.bw = sel_cd->mas[info->node_info->masterp[i]].bw;
- qbw.ws = info->node_info->ws;
- /* Threshold low = 90% of bw */
- qbw.thl = (90 * bw) / 100;
- /* Threshold medium = bw */
- qbw.thm = bw;
- /* Threshold high = 10% more than bw */
- qbw.thh = (110 * bw) / 100;
- /* Check if info is a shared master.
- * If it is, mark it dirty
- * If it isn't, then set QOS Bandwidth
- **/
- MSM_BUS_DBG("BIMC: Update mas_bw for ID: %d -> %ld\n",
+ MSM_BUS_DBG("BIMC: Update mas_bw for ID: %d -> %llu\n",
info->node_info->priv_id,
sel_cd->mas[info->node_info->masterp[i]].bw);
if (info->node_info->hw_sel == MSM_BUS_RPM)
sel_cd->mas[info->node_info->masterp[i]].dirty = 1;
- else
+ else {
+ if (!info->node_info->qport) {
+ MSM_BUS_DBG("No qos ports to update!\n");
+ break;
+ }
+ MSM_BUS_DBG("qport: %d\n", info->node_info->qport[i]);
+ qbw.bw = sel_cd->mas[info->node_info->masterp[i]].bw;
+ qbw.ws = info->node_info->ws;
+ /* Threshold low = 90% of bw */
+ qbw.thl = (90 * bw) / 100;
+ /* Threshold medium = bw */
+ qbw.thm = bw;
+ /* Threshold high = 10% more than bw */
+ qbw.thh = (110 * bw) / 100;
+ /* Check if info is a shared master.
+ * If it is, mark it dirty
+ * If it isn't, then set QOS Bandwidth
+ **/
msm_bus_bimc_set_qos_bw(binfo,
info->node_info->qport[i], &qbw);
+ }
}
skip_mas_bw:
@@ -1870,7 +1871,7 @@
sel_cd->slv[hop->node_info->slavep[i]].bw += bw;
sel_cd->slv[hop->node_info->slavep[i]].hw_id =
hop->node_info->slv_hw_id;
- MSM_BUS_DBG("BIMC: Update slave_bw: ID: %d -> %ld\n",
+ MSM_BUS_DBG("BIMC: Update slave_bw: ID: %d -> %llu\n",
hop->node_info->priv_id,
sel_cd->slv[hop->node_info->slavep[i]].bw);
MSM_BUS_DBG("BIMC: Update slave_bw: index: %d\n",
@@ -1893,6 +1894,7 @@
*fab_pdata, void *hw_data, void **cdata)
{
MSM_BUS_DBG("\nReached BIMC Commit\n");
+ msm_bus_remote_hw_commit(fab_pdata, hw_data, cdata);
return 0;
}
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_core.h b/arch/arm/mach-msm/msm_bus/msm_bus_core.h
index 264afbd..333fe4b 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_core.h
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_core.h
@@ -123,7 +123,7 @@
struct msm_bus_node_hw_info {
bool dirty;
unsigned int hw_id;
- unsigned long bw;
+ uint64_t bw;
};
struct msm_bus_hw_algorithm {
@@ -202,6 +202,8 @@
int curr;
};
+int msm_bus_remote_hw_commit(struct msm_bus_fabric_registration
+ *fab_pdata, void *hw_data, void **cdata);
int msm_bus_fabric_device_register(struct msm_bus_fabric_device *fabric);
void msm_bus_fabric_device_unregister(struct msm_bus_fabric_device *fabric);
struct msm_bus_fabric_device *msm_bus_get_fabric_device(int fabid);
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_noc.c b/arch/arm/mach-msm/msm_bus/msm_bus_noc.c
index 5179d2a..2597e27 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_noc.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_noc.c
@@ -518,14 +518,12 @@
return;
}
- if (!info->node_info->qport) {
- MSM_BUS_DBG("NOC: No QoS Ports to update bw\n");
- return;
+ if (info->node_info->num_mports == 0) {
+ MSM_BUS_DBG("NOC: Skip Master BW\n");
+ goto skip_mas_bw;
}
ports = info->node_info->num_mports;
- qos_bw.ws = info->node_info->ws;
-
bw = INTERLEAVED_BW(fab_pdata, add_bw, ports);
MSM_BUS_DBG("NOC: Update bw for: %d: %ld\n",
@@ -534,26 +532,36 @@
sel_cd->mas[info->node_info->masterp[i]].bw += bw;
sel_cd->mas[info->node_info->masterp[i]].hw_id =
info->node_info->mas_hw_id;
- qos_bw.bw = sel_cd->mas[info->node_info->masterp[i]].bw;
- MSM_BUS_DBG("NOC: Update mas_bw for ID: %d, BW: %ld, QoS: %u\n",
+ MSM_BUS_DBG("NOC: Update mas_bw: ID: %d, BW: %llu ports:%d\n",
info->node_info->priv_id,
sel_cd->mas[info->node_info->masterp[i]].bw,
- qos_bw.ws);
+ ports);
/* Check if info is a shared master.
* If it is, mark it dirty
* If it isn't, then set QOS Bandwidth
**/
if (info->node_info->hw_sel == MSM_BUS_RPM)
sel_cd->mas[info->node_info->masterp[i]].dirty = 1;
- else
+ else {
+ if (!info->node_info->qport) {
+ MSM_BUS_DBG("No qos ports to update!\n");
+ break;
+ }
+ qos_bw.bw = sel_cd->mas[info->node_info->masterp[i]].
+ bw;
+ qos_bw.ws = info->node_info->ws;
msm_bus_noc_set_qos_bw(ninfo,
info->node_info->qport[i],
info->node_info->perm_mode, &qos_bw);
+ MSM_BUS_DBG("NOC: QoS: Update mas_bw: ws: %u\n",
+ qos_bw.ws);
+ }
}
+skip_mas_bw:
ports = hop->node_info->num_sports;
if (ports == 0) {
- MSM_BUS_ERR("\nDIVIDE BY 0, hop: %d\n",
+ MSM_BUS_DBG("\nDIVIDE BY 0, hop: %d\n",
hop->node_info->priv_id);
return;
}
@@ -562,7 +570,7 @@
sel_cd->slv[hop->node_info->slavep[i]].bw += bw;
sel_cd->slv[hop->node_info->slavep[i]].hw_id =
hop->node_info->slv_hw_id;
- MSM_BUS_DBG("NOC: Update slave_bw for ID: %d -> %ld\n",
+ MSM_BUS_DBG("NOC: Update slave_bw for ID: %d -> %llu\n",
hop->node_info->priv_id,
sel_cd->slv[hop->node_info->slavep[i]].bw);
MSM_BUS_DBG("NOC: Update slave_bw for hw_id: %d, index: %d\n",
@@ -581,6 +589,7 @@
*fab_pdata, void *hw_data, void **cdata)
{
MSM_BUS_DBG("\nReached NOC Commit\n");
+ msm_bus_remote_hw_commit(fab_pdata, hw_data, cdata);
return 0;
}
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_rpm.c b/arch/arm/mach-msm/msm_bus/msm_bus_rpm.c
index 4653431..2213132 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_rpm.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_rpm.c
@@ -946,6 +946,12 @@
return status;
}
+int msm_bus_remote_hw_commit(struct msm_bus_fabric_registration
+ *fab_pdata, void *hw_data, void **cdata)
+{
+ return 0;
+}
+
int msm_bus_rpm_hw_init(struct msm_bus_fabric_registration *pdata,
struct msm_bus_hw_algorithm *hw_algo)
{
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_rpm_smd.c b/arch/arm/mach-msm/msm_bus/msm_bus_rpm_smd.c
new file mode 100644
index 0000000..88fab96
--- /dev/null
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_rpm_smd.c
@@ -0,0 +1,238 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include "msm_bus_core.h"
+#include <mach/msm_bus.h>
+#include <mach/msm_bus_board.h>
+#include <mach/rpm-smd.h>
+
+/* Stubs for backward compatibility */
+void msm_bus_rpm_set_mt_mask()
+{
+}
+
+bool msm_bus_rpm_is_mem_interleaved(void)
+{
+ return true;
+}
+
+struct commit_data {
+ struct msm_bus_node_hw_info *mas_arb;
+ struct msm_bus_node_hw_info *slv_arb;
+};
+
+#ifdef CONFIG_DEBUG_FS
+void msm_bus_rpm_fill_cdata_buffer(int *curr, char *buf, const int max_size,
+ void *cdata, int nmasters, int nslaves, int ntslaves)
+{
+ int c;
+ struct commit_data *cd = (struct commit_data *)cdata;
+
+ *curr += scnprintf(buf + *curr, max_size - *curr, "\nMas BW:\n");
+ for (c = 0; c < nmasters; c++)
+ *curr += scnprintf(buf + *curr, max_size - *curr,
+ "%d: %llu\t", cd->mas_arb[c].hw_id,
+ cd->mas_arb[c].bw);
+ *curr += scnprintf(buf + *curr, max_size - *curr, "\nSlave BW:\n");
+ for (c = 0; c < nslaves; c++) {
+ *curr += scnprintf(buf + *curr, max_size - *curr,
+ "%d: %llu\t", cd->slv_arb[c].hw_id,
+ cd->slv_arb[c].bw);
+ }
+}
+#endif
+
+static int msm_bus_rpm_compare_cdata(
+ struct msm_bus_fabric_registration *fab_pdata,
+ struct commit_data *cd1, struct commit_data *cd2)
+{
+ size_t n;
+ int ret;
+
+ n = sizeof(struct msm_bus_node_hw_info) * fab_pdata->nmasters * 2;
+ ret = memcmp(cd1->mas_arb, cd2->mas_arb, n);
+ if (ret) {
+ MSM_BUS_DBG("Master Arb Data not equal\n");
+ return ret;
+ }
+
+ n = sizeof(struct msm_bus_node_hw_info) * fab_pdata->nslaves * 2;
+ ret = memcmp(cd1->slv_arb, cd2->slv_arb, n);
+ if (ret) {
+ MSM_BUS_DBG("Master Arb Data not equal\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int msm_bus_rpm_req(int ctx, uint32_t rsc_type, uint32_t key,
+ struct msm_bus_node_hw_info *hw_info, bool valid)
+{
+ struct msm_rpm_request *rpm_req;
+ int ret = 0, msg_id;
+
+ if (ctx == ACTIVE_CTX)
+ ctx = MSM_RPM_CTX_ACTIVE_SET;
+ else if (ctx == DUAL_CTX)
+ ctx = MSM_RPM_CTX_SLEEP_SET;
+
+ rpm_req = msm_rpm_create_request(ctx, rsc_type, hw_info->hw_id, 1);
+ if (rpm_req == NULL) {
+ MSM_BUS_WARN("RPM: Couldn't create RPM Request\n");
+ return -ENXIO;
+ }
+
+ if (valid) {
+ ret = msm_rpm_add_kvp_data(rpm_req, key, (const uint8_t *)
+ &hw_info->bw, (int)(sizeof(uint64_t)));
+ if (ret) {
+ MSM_BUS_WARN("RPM: Add KVP failed for RPM Req:%u\n",
+ rsc_type);
+ return ret;
+ }
+
+ MSM_BUS_DBG("Added Key: %d, Val: %llu, size: %d\n", key,
+ hw_info->bw, sizeof(uint64_t));
+ } else {
+ /* Invalidate RPM requests */
+ ret = msm_rpm_add_kvp_data(rpm_req, 0, NULL, 0);
+ if (ret) {
+ MSM_BUS_WARN("RPM: Add KVP failed for RPM Req:%u\n",
+ rsc_type);
+ return ret;
+ }
+ }
+
+ msg_id = msm_rpm_send_request(rpm_req);
+ if (!msg_id) {
+ MSM_BUS_WARN("RPM: No message ID for req\n");
+ return -ENXIO;
+ }
+
+ ret = msm_rpm_wait_for_ack(msg_id);
+ if (ret) {
+ MSM_BUS_WARN("RPM: Ack failed\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int msm_bus_rpm_commit_arb(struct msm_bus_fabric_registration
+ *fab_pdata, int ctx, void *rpm_data,
+ struct commit_data *cd, bool valid)
+{
+ int i, status = 0, rsc_type, key;
+
+ MSM_BUS_DBG("Context: %d\n", ctx);
+ rsc_type = RPM_BUS_MASTER_REQ;
+ key = RPM_MASTER_FIELD_BW;
+ for (i = 0; i < fab_pdata->nmasters; i++) {
+ if (cd->mas_arb[i].dirty) {
+ MSM_BUS_DBG("MAS HWID: %d, BW: %llu DIRTY: %d\n",
+ cd->mas_arb[i].hw_id,
+ cd->mas_arb[i].bw,
+ cd->mas_arb[i].dirty);
+ status = msm_bus_rpm_req(ctx, rsc_type, key,
+ &cd->mas_arb[i], valid);
+ if (status) {
+ MSM_BUS_ERR("RPM: Req fail: mas:%d, bw:%llu\n",
+ cd->mas_arb[i].hw_id,
+ cd->mas_arb[i].bw);
+ break;
+ } else {
+ cd->mas_arb[i].dirty = false;
+ }
+ }
+ }
+
+ rsc_type = RPM_BUS_SLAVE_REQ;
+ key = RPM_SLAVE_FIELD_BW;
+ for (i = 0; i < fab_pdata->nslaves; i++) {
+ if (cd->slv_arb[i].dirty) {
+ MSM_BUS_DBG("SLV HWID: %d, BW: %llu DIRTY: %d\n",
+ cd->slv_arb[i].hw_id,
+ cd->slv_arb[i].bw,
+ cd->slv_arb[i].dirty);
+ status = msm_bus_rpm_req(ctx, rsc_type, key,
+ &cd->slv_arb[i], valid);
+ if (status) {
+ MSM_BUS_ERR("RPM: Req fail: slv:%d, bw:%llu\n",
+ cd->slv_arb[i].hw_id,
+ cd->slv_arb[i].bw);
+ break;
+ } else {
+ cd->slv_arb[i].dirty = false;
+ }
+ }
+ }
+
+ return status;
+}
+
+/**
+* msm_bus_remote_hw_commit() - Commit the arbitration data to RPM
+* @fabric: Fabric for which the data should be committed
+**/
+int msm_bus_remote_hw_commit(struct msm_bus_fabric_registration
+ *fab_pdata, void *hw_data, void **cdata)
+{
+
+ int ret;
+ bool valid;
+ struct commit_data *dual_cd, *act_cd;
+ void *rpm_data = hw_data;
+
+ MSM_BUS_DBG("\nReached RPM Commit\n");
+ dual_cd = (struct commit_data *)cdata[DUAL_CTX];
+ act_cd = (struct commit_data *)cdata[ACTIVE_CTX];
+
+ /*
+ * If the arb data for active set and sleep set is
+ * different, commit both sets.
+ * If the arb data for active set and sleep set is
+ * the same, invalidate the sleep set.
+ */
+ ret = msm_bus_rpm_compare_cdata(fab_pdata, act_cd, dual_cd);
+ if (!ret)
+ /* Invalidate sleep set.*/
+ valid = false;
+ else
+ valid = true;
+
+ ret = msm_bus_rpm_commit_arb(fab_pdata, DUAL_CTX, rpm_data,
+ dual_cd, valid);
+ if (ret)
+ MSM_BUS_ERR("Error comiting fabric:%d in %d ctx\n",
+ fab_pdata->id, DUAL_CTX);
+
+ valid = true;
+ ret = msm_bus_rpm_commit_arb(fab_pdata, ACTIVE_CTX, rpm_data, act_cd,
+ valid);
+ if (ret)
+ MSM_BUS_ERR("Error comiting fabric:%d in %d ctx\n",
+ fab_pdata->id, ACTIVE_CTX);
+
+ return ret;
+}
+
+int msm_bus_rpm_hw_init(struct msm_bus_fabric_registration *pdata,
+ struct msm_bus_hw_algorithm *hw_algo)
+{
+ if (!pdata->ahb)
+ pdata->rpm_enabled = 1;
+ return 0;
+}
diff --git a/arch/arm/mach-msm/msm_dsps.c b/arch/arm/mach-msm/msm_dsps.c
index 200d717..6dde576 100644
--- a/arch/arm/mach-msm/msm_dsps.c
+++ b/arch/arm/mach-msm/msm_dsps.c
@@ -722,6 +722,8 @@
.unlocked_ioctl = dsps_ioctl,
};
+static struct subsys_device *dsps_dev;
+
/**
* Fatal error handler
* Resets DSPS.
@@ -735,7 +737,7 @@
pr_err("%s: DSPS already resetting. Count %d\n", __func__,
atomic_read(&drv->crash_in_progress));
} else {
- subsystem_restart("dsps");
+ subsystem_restart_dev(dsps_dev);
}
}
@@ -765,7 +767,7 @@
* called by the restart notifier
*
*/
-static int dsps_shutdown(const struct subsys_data *subsys)
+static int dsps_shutdown(const struct subsys_desc *subsys)
{
pr_debug("%s\n", __func__);
disable_irq_nosync(drv->wdog_irq);
@@ -779,7 +781,7 @@
* called by the restart notifier
*
*/
-static int dsps_powerup(const struct subsys_data *subsys)
+static int dsps_powerup(const struct subsys_desc *subsys)
{
pr_debug("%s\n", __func__);
dsps_power_on_handler();
@@ -794,7 +796,7 @@
* called by the restart notifier
*
*/
-static void dsps_crash_shutdown(const struct subsys_data *subsys)
+static void dsps_crash_shutdown(const struct subsys_desc *subsys)
{
pr_debug("%s\n", __func__);
dsps_crash_shutdown_g = 1;
@@ -806,7 +808,7 @@
* called by the restart notifier
*
*/
-static int dsps_ramdump(int enable, const struct subsys_data *subsys)
+static int dsps_ramdump(int enable, const struct subsys_desc *subsys)
{
int ret = 0;
pr_debug("%s\n", __func__);
@@ -838,7 +840,7 @@
return ret;
}
-static struct subsys_data dsps_ssrops = {
+static struct subsys_desc dsps_ssrops = {
.name = "dsps",
.shutdown = dsps_shutdown,
.powerup = dsps_powerup,
@@ -919,9 +921,10 @@
goto smsm_register_err;
}
- ret = ssr_register_subsystem(&dsps_ssrops);
- if (ret) {
- pr_err("%s: ssr_register_subsystem fail %d\n", __func__,
+ dsps_dev = subsys_register(&dsps_ssrops);
+ if (IS_ERR(dsps_dev)) {
+ ret = PTR_ERR(dsps_dev);
+ pr_err("%s: subsys_register fail %d\n", __func__,
ret);
goto ssr_register_err;
}
@@ -953,6 +956,7 @@
{
pr_debug("%s.\n", __func__);
+ subsys_unregister(dsps_dev);
dsps_power_off_handler();
dsps_free_resources();
diff --git a/arch/arm/mach-msm/ocmem.c b/arch/arm/mach-msm/ocmem.c
index 43c7fc8..a233080 100644
--- a/arch/arm/mach-msm/ocmem.c
+++ b/arch/arm/mach-msm/ocmem.c
@@ -10,7 +10,6 @@
* GNU General Public License for more details.
*/
-#include <mach/ocmem_priv.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -18,10 +17,12 @@
#include <linux/rbtree.h>
#include <linux/genalloc.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <mach/ocmem_priv.h>
/* This code is to temporarily work around the default state of OCMEM
regions in Virtio. These registers will be read from DT in a subsequent
@@ -41,14 +42,6 @@
unsigned int p_tail;
};
-struct ocmem_plat_data {
- void __iomem *vbase;
- unsigned long size;
- unsigned long base;
- struct ocmem_partition *parts;
- unsigned nr_parts;
-};
-
struct ocmem_zone zones[OCMEM_CLIENT_MAX];
struct ocmem_zone *get_zone(unsigned id)
@@ -183,8 +176,216 @@
return pdata;
}
+int __devinit of_ocmem_parse_regions(struct device *dev,
+ struct ocmem_partition **part)
+{
+ const char *name;
+ struct device_node *child = NULL;
+ int nr_parts = 0;
+ int i = 0;
+ int rc = 0;
+ int id = -1;
+
+ /*Compute total partitions */
+ for_each_child_of_node(dev->of_node, child)
+ nr_parts++;
+
+ if (nr_parts == 0)
+ return 0;
+
+ *part = devm_kzalloc(dev, nr_parts * sizeof(**part),
+ GFP_KERNEL);
+
+ if (!*part)
+ return -ENOMEM;
+
+ for_each_child_of_node(dev->of_node, child)
+ {
+ const u32 *addr;
+ u32 min;
+ u64 size;
+ u64 p_start;
+
+ addr = of_get_address(child, 0, &size, NULL);
+
+ if (!addr) {
+ dev_err(dev, "Invalid addr for partition %d, ignored\n",
+ i);
+ continue;
+ }
+
+ rc = of_property_read_u32(child, "qcom,ocmem-part-min", &min);
+
+ if (rc) {
+ dev_err(dev, "No min for partition %d, ignored\n", i);
+ continue;
+ }
+
+ rc = of_property_read_string(child, "qcom,ocmem-part-name",
+ &name);
+
+ if (rc) {
+ dev_err(dev, "No name for partition %d, ignored\n", i);
+ continue;
+ }
+
+ id = get_id(name);
+
+ if (id < 0) {
+ dev_err(dev, "Ignoring invalid partition %s\n", name);
+ continue;
+ }
+
+ p_start = of_translate_address(child, addr);
+
+ if (p_start == OF_BAD_ADDR) {
+ dev_err(dev, "Invalid offset for partition %d\n", i);
+ continue;
+ }
+
+ (*part)[i].p_start = p_start;
+ (*part)[i].p_size = size;
+ (*part)[i].id = id;
+ (*part)[i].name = name;
+ (*part)[i].p_min = min;
+ (*part)[i].p_tail = of_property_read_bool(child, "tail");
+ i++;
+ }
+
+ return i;
+}
+
static struct ocmem_plat_data *parse_dt_config(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = pdev->dev.of_node;
+ struct ocmem_plat_data *pdata = NULL;
+ struct ocmem_partition *parts = NULL;
+ struct resource *ocmem_irq;
+ struct resource *dm_irq;
+ struct resource *ocmem_mem;
+ struct resource *reg_base;
+ struct resource *br_base;
+ struct resource *dm_base;
+ struct resource *ocmem_mem_io;
+ unsigned nr_parts = 0;
+ unsigned nr_regions = 0;
+
+ pdata = devm_kzalloc(dev, sizeof(struct ocmem_plat_data),
+ GFP_KERNEL);
+
+ if (!pdata) {
+ dev_err(dev, "Unable to allocate memory for platform data\n");
+ return NULL;
+ }
+
+ ocmem_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "ocmem_physical");
+ if (!ocmem_mem) {
+ dev_err(dev, "No OCMEM memory resource\n");
+ return NULL;
+ }
+
+ ocmem_mem_io = request_mem_region(ocmem_mem->start,
+ resource_size(ocmem_mem), pdev->name);
+
+ if (!ocmem_mem_io) {
+ dev_err(dev, "Could not claim OCMEM memory\n");
+ return NULL;
+ }
+
+ pdata->base = ocmem_mem->start;
+ pdata->size = resource_size(ocmem_mem);
+ pdata->vbase = devm_ioremap_nocache(dev, ocmem_mem->start,
+ resource_size(ocmem_mem));
+ if (!pdata->vbase) {
+ dev_err(dev, "Could not ioremap ocmem memory\n");
+ return NULL;
+ }
+
+ reg_base = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "ocmem_ctrl_physical");
+ if (!reg_base) {
+ dev_err(dev, "No OCMEM register resource\n");
+ return NULL;
+ }
+
+ pdata->reg_base = devm_ioremap_nocache(dev, reg_base->start,
+ resource_size(reg_base));
+ if (!pdata->reg_base) {
+ dev_err(dev, "Could not ioremap register map\n");
+ return NULL;
+ }
+
+ br_base = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "br_ctrl_physical");
+ if (!br_base) {
+ dev_err(dev, "No OCMEM BR resource\n");
+ return NULL;
+ }
+
+ pdata->br_base = devm_ioremap_nocache(dev, br_base->start,
+ resource_size(br_base));
+ if (!pdata->br_base) {
+ dev_err(dev, "Could not ioremap BR resource\n");
+ return NULL;
+ }
+
+ dm_base = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "dm_ctrl_physical");
+ if (!dm_base) {
+ dev_err(dev, "No OCMEM DM resource\n");
+ return NULL;
+ }
+
+ pdata->dm_base = devm_ioremap_nocache(dev, dm_base->start,
+ resource_size(dm_base));
+ if (!pdata->dm_base) {
+ dev_err(dev, "Could not ioremap DM resource\n");
+ return NULL;
+ }
+
+ ocmem_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ "ocmem_irq");
+
+ if (!ocmem_irq) {
+ dev_err(dev, "No OCMEM IRQ resource\n");
+ return NULL;
+ }
+
+ dm_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ "dm_irq");
+
+ if (!dm_irq) {
+ dev_err(dev, "No DM IRQ resource\n");
+ return NULL;
+ }
+
+ if (of_property_read_u32(node, "qcom,ocmem-num-regions",
+ &nr_regions)) {
+ dev_err(dev, "No OCMEM memory regions specified\n");
+ }
+
+ if (nr_regions == 0) {
+ dev_err(dev, "No hardware memory regions found\n");
+ return NULL;
+ }
+
+ /* Figure out the number of partititons */
+ nr_parts = of_ocmem_parse_regions(dev, &parts);
+ if (nr_parts <= 0) {
+ dev_err(dev, "No valid OCMEM partitions found\n");
+ goto pdata_error;
+ } else
+ dev_dbg(dev, "Found %d ocmem partitions\n", nr_parts);
+
+ pdata->nr_parts = nr_parts;
+ pdata->parts = parts;
+ pdata->nr_regions = nr_regions;
+ pdata->ocmem_irq = ocmem_irq->start;
+ pdata->dm_irq = dm_irq->start;
+ return pdata;
+pdata_error:
return NULL;
}
@@ -225,7 +426,7 @@
return -EBUSY;
}
- start = pdata->base + part->p_start;
+ start = part->p_start;
ret = gen_pool_add(zone->z_pool, start,
part->p_size, -1);
@@ -273,7 +474,7 @@
zone->z_end, part->p_size/SZ_1K);
}
- dev_info(dev, "Total active zones = %d\n", active_zones);
+ dev_dbg(dev, "Total active zones = %d\n", active_zones);
return 0;
}
@@ -282,7 +483,7 @@
struct device *dev = &pdev->dev;
void *ocmem_region_vbase = NULL;
- if (!pdev->dev.of_node->child) {
+ if (!pdev->dev.of_node) {
dev_info(dev, "Missing Configuration in Device Tree\n");
ocmem_pdata = parse_static_config(pdev);
} else {
@@ -297,6 +498,8 @@
BUG_ON(!IS_ALIGNED(ocmem_pdata->size, PAGE_SIZE));
BUG_ON(!IS_ALIGNED(ocmem_pdata->base, PAGE_SIZE));
+ dev_info(dev, "OCMEM Virtual addr %p\n", ocmem_pdata->vbase);
+
platform_set_drvdata(pdev, ocmem_pdata);
if (ocmem_zone_init(pdev))
@@ -316,7 +519,7 @@
writel_relaxed(REGION_ENABLE, ocmem_region_vbase);
writel_relaxed(REGION_ENABLE, ocmem_region_vbase + 4);
writel_relaxed(REGION_ENABLE, ocmem_region_vbase + 8);
- dev_info(dev, "initialized successfully\n");
+ dev_dbg(dev, "initialized successfully\n");
return 0;
}
@@ -326,7 +529,7 @@
}
static struct of_device_id msm_ocmem_dt_match[] = {
- { .compatible = "qcom,msm_ocmem",
+ { .compatible = "qcom,msm-ocmem",
},
{}
};
diff --git a/arch/arm/mach-msm/ocmem_notifier.c b/arch/arm/mach-msm/ocmem_notifier.c
index 58ad3d9..4754f44 100644
--- a/arch/arm/mach-msm/ocmem_notifier.c
+++ b/arch/arm/mach-msm/ocmem_notifier.c
@@ -75,7 +75,8 @@
return ret;
}
-void *ocmem_notifier_register(int client_id, struct notifier_block *nb)
+struct ocmem_notifier *ocmem_notifier_register(int client_id,
+ struct notifier_block *nb)
{
int ret = 0;
@@ -115,13 +116,12 @@
}
EXPORT_SYMBOL(ocmem_notifier_register);
-int ocmem_notifier_unregister(void *hndl, struct notifier_block *nb)
+int ocmem_notifier_unregister(struct ocmem_notifier *nc_hndl,
+ struct notifier_block *nb)
{
int ret = 0;
- struct ocmem_notifier *nc_hndl = (struct ocmem_notifier *) hndl;
-
if (!nc_hndl) {
pr_err("ocmem: Invalid notification handle\n");
return -EINVAL;
diff --git a/arch/arm/mach-msm/peripheral-loader.c b/arch/arm/mach-msm/peripheral-loader.c
index 3f6eb95..540ffbb 100644
--- a/arch/arm/mach-msm/peripheral-loader.c
+++ b/arch/arm/mach-msm/peripheral-loader.c
@@ -565,18 +565,6 @@
static void msm_pil_debugfs_remove(struct pil_device *pil) { }
#endif
-static int __msm_pil_shutdown(struct device *dev, void *data)
-{
- pil_shutdown(to_pil_device(dev));
- return 0;
-}
-
-static int msm_pil_shutdown_at_boot(void)
-{
- return bus_for_each_dev(&pil_bus_type, NULL, NULL, __msm_pil_shutdown);
-}
-late_initcall(msm_pil_shutdown_at_boot);
-
static void pil_device_release(struct device *dev)
{
struct pil_device *pil = to_pil_device(dev);
diff --git a/arch/arm/mach-msm/qdsp5/audio_voicememo.c b/arch/arm/mach-msm/qdsp5/audio_voicememo.c
index 03dd295..34e5b81 100644
--- a/arch/arm/mach-msm/qdsp5/audio_voicememo.c
+++ b/arch/arm/mach-msm/qdsp5/audio_voicememo.c
@@ -459,7 +459,7 @@
if (datacb_data->pkt.fw_data.fw_ptr_status &&
be32_to_cpu(datacb_data->pkt.fw_data.rec_length) &&
be32_to_cpu(datacb_data->pkt.fw_data.rec_length)
- <= MAX_FRAME_SIZE) {
+ <= MAX_REC_BUF_SIZE) {
MM_DBG("Copy FW link:rec_buf_size \
= 0x%08x, rec_length=0x%08x\n",
@@ -484,7 +484,7 @@
} else if (datacb_data->pkt.rw_data.rw_ptr_status &&
be32_to_cpu(datacb_data->pkt.rw_data.rec_length) &&
be32_to_cpu(datacb_data->pkt.rw_data.rec_length)
- <= MAX_FRAME_SIZE) {
+ <= MAX_REC_BUF_SIZE) {
MM_DBG("Copy RW link:rec_buf_size \
=0x%08x, rec_length=0x%08x\n",
@@ -509,12 +509,12 @@
} else {
MM_ERR("FW: ptr_status %d, rec_length=0x%08x,"
"RW: ptr_status %d, rec_length=0x%08x\n",
- datacb_data->pkt.rw_data.fw_ptr_status, \
+ datacb_data->pkt.fw_data.fw_ptr_status, \
be32_to_cpu( \
datacb_data->pkt.fw_data.rec_length), \
- datacb_data->pkt.rw_data.fw_ptr_status, \
+ datacb_data->pkt.rw_data.rw_ptr_status, \
be32_to_cpu( \
- datacb_data->pkt.fw_data.rec_length));
+ datacb_data->pkt.rw_data.rec_length));
}
if (rec_status != RPC_VOC_REC_STAT_DONE) {
/* Not end of record */
diff --git a/arch/arm/mach-msm/ramdump.c b/arch/arm/mach-msm/ramdump.c
index a18acd6..21e81dd 100644
--- a/arch/arm/mach-msm/ramdump.c
+++ b/arch/arm/mach-msm/ramdump.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -222,6 +222,17 @@
return (void *)rd_dev;
}
+void destroy_ramdump_device(void *dev)
+{
+ struct ramdump_device *rd_dev = dev;
+
+ if (IS_ERR_OR_NULL(rd_dev))
+ return;
+
+ misc_deregister(&rd_dev->device);
+ kfree(rd_dev);
+}
+
int do_ramdump(void *handle, struct ramdump_segment *segments,
int nsegments)
{
diff --git a/arch/arm/mach-msm/ramdump.h b/arch/arm/mach-msm/ramdump.h
index 0b60a44..9006010 100644
--- a/arch/arm/mach-msm/ramdump.h
+++ b/arch/arm/mach-msm/ramdump.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -19,6 +19,7 @@
};
void *create_ramdump_device(const char *dev_name);
+void destroy_ramdump_device(void *dev);
int do_ramdump(void *handle, struct ramdump_segment *segments,
int nsegments);
diff --git a/arch/arm/mach-msm/rpm-regulator-smd.c b/arch/arm/mach-msm/rpm-regulator-smd.c
index fdff231..a8af9e7 100644
--- a/arch/arm/mach-msm/rpm-regulator-smd.c
+++ b/arch/arm/mach-msm/rpm-regulator-smd.c
@@ -68,6 +68,7 @@
RPM_REGULATOR_PARAM_QUIET_MODE,
RPM_REGULATOR_PARAM_FREQ_REASON,
RPM_REGULATOR_PARAM_CORNER,
+ RPM_REGULATOR_PARAM_BYPASS,
RPM_REGULATOR_PARAM_MAX,
};
@@ -111,7 +112,8 @@
PARAM(HEAD_ROOM, 1, 0, 0, 1, "hr", 0, 0x7FFFFFFF, "qcom,init-head-room"),
PARAM(QUIET_MODE, 0, 1, 0, 0, "qm", 0, 2, "qcom,init-quiet-mode"),
PARAM(FREQ_REASON, 0, 1, 0, 1, "resn", 0, 8, "qcom,init-freq-reason"),
- PARAM(CORNER, 0, 1, 0, 0, "corn", 0, 5, "qcom,init-voltage-corner"),
+ PARAM(CORNER, 0, 1, 0, 0, "corn", 0, 6, "qcom,init-voltage-corner"),
+ PARAM(BYPASS, 1, 0, 0, 0, "bypa", 0, 1, "qcom,init-disallow-bypass"),
};
struct rpm_vreg_request {
@@ -440,6 +442,7 @@
RPM_VREG_AGGR_MAX(QUIET_MODE, param_aggr, param_reg);
RPM_VREG_AGGR_MAX(FREQ_REASON, param_aggr, param_reg);
RPM_VREG_AGGR_MAX(CORNER, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(BYPASS, param_aggr, param_reg);
}
static int rpm_vreg_aggregate_requests(struct rpm_regulator *regulator)
@@ -682,7 +685,7 @@
* regulator_set_voltage function to the actual corner values
* sent to the RPM.
*/
- corner = min_uV - RPM_REGULATOR_CORNER_RETENTION;
+ corner = min_uV - RPM_REGULATOR_CORNER_NONE;
if (corner < params[RPM_REGULATOR_PARAM_CORNER].min
|| corner > params[RPM_REGULATOR_PARAM_CORNER].max) {
@@ -716,7 +719,7 @@
struct rpm_regulator *reg = rdev_get_drvdata(rdev);
return reg->req.param[RPM_REGULATOR_PARAM_CORNER]
- + RPM_REGULATOR_CORNER_RETENTION;
+ + RPM_REGULATOR_CORNER_NONE;
}
static int rpm_vreg_set_mode(struct regulator_dev *rdev, unsigned int mode)
diff --git a/arch/arm/mach-msm/rpm-smd.c b/arch/arm/mach-msm/rpm-smd.c
index b8bb27b..697d504 100644
--- a/arch/arm/mach-msm/rpm-smd.c
+++ b/arch/arm/mach-msm/rpm-smd.c
@@ -24,6 +24,7 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
+#include <linux/string.h>
#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/slab.h>
@@ -36,6 +37,19 @@
#include <mach/rpm-smd.h>
#include "rpm-notifier.h"
+/* Debug Definitions */
+
+enum {
+ MSM_RPM_LOG_REQUEST_PRETTY = BIT(0),
+ MSM_RPM_LOG_REQUEST_RAW = BIT(1),
+ MSM_RPM_LOG_REQUEST_SHOW_MSG_ID = BIT(2),
+};
+
+static int msm_rpm_debug_mask;
+module_param_named(
+ debug_mask, msm_rpm_debug_mask, int, S_IRUGO | S_IWUSR
+);
+
struct msm_rpm_driver_data {
const char *ch_name;
uint32_t ch_type;
@@ -492,6 +506,140 @@
}
}
+#define DEBUG_PRINT_BUFFER_SIZE 512
+
+static void msm_rpm_log_request(struct msm_rpm_request *cdata)
+{
+ char buf[DEBUG_PRINT_BUFFER_SIZE];
+ size_t buflen = DEBUG_PRINT_BUFFER_SIZE;
+ char name[5];
+ u32 value;
+ int i, j, prev_valid;
+ int valid_count = 0;
+ int pos = 0;
+
+ name[4] = 0;
+
+ for (i = 0; i < cdata->write_idx; i++)
+ if (cdata->kvp[i].valid)
+ valid_count++;
+
+ pos += scnprintf(buf + pos, buflen - pos, "%sRPM req: ", KERN_INFO);
+ if (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_SHOW_MSG_ID)
+ pos += scnprintf(buf + pos, buflen - pos, "msg_id=%u, ",
+ cdata->msg_hdr.msg_id);
+ pos += scnprintf(buf + pos, buflen - pos, "s=%s",
+ (cdata->msg_hdr.set == MSM_RPM_CTX_ACTIVE_SET ? "act" : "slp"));
+
+ if ((msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_PRETTY)
+ && (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_RAW)) {
+ /* Both pretty and raw formatting */
+ memcpy(name, &cdata->msg_hdr.resource_type, sizeof(uint32_t));
+ pos += scnprintf(buf + pos, buflen - pos,
+ ", rsc_type=0x%08X (%s), rsc_id=%u; ",
+ cdata->msg_hdr.resource_type, name,
+ cdata->msg_hdr.resource_id);
+
+ for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
+ if (!cdata->kvp[i].valid)
+ continue;
+
+ memcpy(name, &cdata->kvp[i].key, sizeof(uint32_t));
+ pos += scnprintf(buf + pos, buflen - pos,
+ "[key=0x%08X (%s), value=%s",
+ cdata->kvp[i].key, name,
+ (cdata->kvp[i].nbytes ? "0x" : "null"));
+
+ for (j = 0; j < cdata->kvp[i].nbytes; j++)
+ pos += scnprintf(buf + pos, buflen - pos,
+ "%02X ",
+ cdata->kvp[i].value[j]);
+
+ if (cdata->kvp[i].nbytes)
+ pos += scnprintf(buf + pos, buflen - pos, "(");
+
+ for (j = 0; j < cdata->kvp[i].nbytes; j += 4) {
+ value = 0;
+ memcpy(&value, &cdata->kvp[i].value[j],
+ min(sizeof(uint32_t),
+ cdata->kvp[i].nbytes - j));
+ pos += scnprintf(buf + pos, buflen - pos, "%u",
+ value);
+ if (j + 4 < cdata->kvp[i].nbytes)
+ pos += scnprintf(buf + pos,
+ buflen - pos, " ");
+ }
+ if (cdata->kvp[i].nbytes)
+ pos += scnprintf(buf + pos, buflen - pos, ")");
+ pos += scnprintf(buf + pos, buflen - pos, "]");
+ if (prev_valid + 1 < valid_count)
+ pos += scnprintf(buf + pos, buflen - pos, ", ");
+ prev_valid++;
+ }
+ } else if (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_PRETTY) {
+ /* Pretty formatting only */
+ memcpy(name, &cdata->msg_hdr.resource_type, sizeof(uint32_t));
+ pos += scnprintf(buf + pos, buflen - pos, " %s %u; ", name,
+ cdata->msg_hdr.resource_id);
+
+ for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
+ if (!cdata->kvp[i].valid)
+ continue;
+
+ memcpy(name, &cdata->kvp[i].key, sizeof(uint32_t));
+ pos += scnprintf(buf + pos, buflen - pos, "%s=%s",
+ name, (cdata->kvp[i].nbytes ? "" : "null"));
+
+ for (j = 0; j < cdata->kvp[i].nbytes; j += 4) {
+ value = 0;
+ memcpy(&value, &cdata->kvp[i].value[j],
+ min(sizeof(uint32_t),
+ cdata->kvp[i].nbytes - j));
+ pos += scnprintf(buf + pos, buflen - pos, "%u",
+ value);
+
+ if (j + 4 < cdata->kvp[i].nbytes)
+ pos += scnprintf(buf + pos,
+ buflen - pos, " ");
+ }
+ if (prev_valid + 1 < valid_count)
+ pos += scnprintf(buf + pos, buflen - pos, ", ");
+ prev_valid++;
+ }
+ } else {
+ /* Raw formatting only */
+ pos += scnprintf(buf + pos, buflen - pos,
+ ", rsc_type=0x%08X, rsc_id=%u; ",
+ cdata->msg_hdr.resource_type,
+ cdata->msg_hdr.resource_id);
+
+ for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
+ if (!cdata->kvp[i].valid)
+ continue;
+
+ pos += scnprintf(buf + pos, buflen - pos,
+ "[key=0x%08X, value=%s",
+ cdata->kvp[i].key,
+ (cdata->kvp[i].nbytes ? "0x" : "null"));
+ for (j = 0; j < cdata->kvp[i].nbytes; j++) {
+ pos += scnprintf(buf + pos, buflen - pos,
+ "%02X",
+ cdata->kvp[i].value[j]);
+ if (j + 1 < cdata->kvp[i].nbytes)
+ pos += scnprintf(buf + pos,
+ buflen - pos, " ");
+ }
+ pos += scnprintf(buf + pos, buflen - pos, "]");
+ if (prev_valid + 1 < valid_count)
+ pos += scnprintf(buf + pos, buflen - pos, ", ");
+ prev_valid++;
+ }
+ }
+
+ pos += scnprintf(buf + pos, buflen - pos, "\n");
+ printk(buf);
+}
+
static int msm_rpm_send_data(struct msm_rpm_request *cdata,
int msg_type, bool noirq)
{
@@ -546,6 +694,10 @@
tmpbuff += cdata->kvp[i].nbytes;
}
+ if (msm_rpm_debug_mask
+ & (MSM_RPM_LOG_REQUEST_PRETTY | MSM_RPM_LOG_REQUEST_RAW))
+ msm_rpm_log_request(cdata);
+
if (standalone) {
for (i = 0; (i < cdata->write_idx); i++)
cdata->kvp[i].valid = false;
diff --git a/arch/arm/mach-msm/smd_pkt.c b/arch/arm/mach-msm/smd_pkt.c
index fdbc387..b9fe341 100644
--- a/arch/arm/mach-msm/smd_pkt.c
+++ b/arch/arm/mach-msm/smd_pkt.c
@@ -725,6 +725,8 @@
SMD_APPS_MODEM,
};
#endif
+module_param_named(loopback_edge, smd_ch_edge[LOOPBACK_INX],
+ int, S_IRUGO | S_IWUSR | S_IWGRP);
static int smd_pkt_dummy_probe(struct platform_device *pdev)
{
diff --git a/arch/arm/mach-msm/socinfo.c b/arch/arm/mach-msm/socinfo.c
index 281e7b8..817c2dc 100644
--- a/arch/arm/mach-msm/socinfo.c
+++ b/arch/arm/mach-msm/socinfo.c
@@ -798,6 +798,11 @@
};
}
+const int cpu_is_krait(void)
+{
+ return ((read_cpuid_id() & 0xFF00FC00) == 0x51000400);
+}
+
const int cpu_is_krait_v1(void)
{
switch (read_cpuid_id()) {
@@ -810,3 +815,22 @@
return 0;
};
}
+
+const int cpu_is_krait_v2(void)
+{
+ switch (read_cpuid_id()) {
+ case 0x511F04D0:
+ case 0x511F04D1:
+ case 0x511F04D2:
+ case 0x511F04D3:
+ case 0x511F04D4:
+
+ case 0x510F06F0:
+ case 0x510F06F1:
+ case 0x510F06F2:
+ return 1;
+
+ default:
+ return 0;
+ };
+}
diff --git a/arch/arm/mach-msm/subsystem_restart.c b/arch/arm/mach-msm/subsystem_restart.c
index 0b6f225..65da903 100644
--- a/arch/arm/mach-msm/subsystem_restart.c
+++ b/arch/arm/mach-msm/subsystem_restart.c
@@ -17,7 +17,6 @@
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/fs.h>
-#include <linux/proc_fs.h>
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/io.h>
@@ -25,11 +24,13 @@
#include <linux/time.h>
#include <linux/wakelock.h>
#include <linux/suspend.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <asm/current.h>
#include <mach/peripheral-loader.h>
-#include <mach/scm.h>
#include <mach/socinfo.h>
#include <mach/subsystem_notif.h>
#include <mach/subsystem_restart.h>
@@ -42,30 +43,40 @@
struct mutex shutdown_lock;
struct mutex powerup_lock;
- struct subsys_data *subsys_ptrs[];
-};
-
-struct restart_wq_data {
- struct subsys_data *subsys;
- struct wake_lock ssr_wake_lock;
- char wlname[64];
- int use_restart_order;
- struct work_struct work;
+ struct subsys_device *subsys_ptrs[];
};
struct restart_log {
struct timeval time;
- struct subsys_data *subsys;
+ struct subsys_device *dev;
struct list_head list;
};
-static int restart_level;
+struct subsys_device {
+ struct subsys_desc *desc;
+ struct list_head list;
+ struct wake_lock wake_lock;
+ char wlname[64];
+ struct work_struct work;
+ spinlock_t restart_lock;
+ bool restarting;
+
+ void *notify;
+
+ struct mutex shutdown_lock;
+ struct mutex powerup_lock;
+
+ void *restart_order;
+};
+
static int enable_ramdumps;
+module_param(enable_ramdumps, int, S_IRUGO | S_IWUSR);
+
struct workqueue_struct *ssr_wq;
static LIST_HEAD(restart_log_list);
static LIST_HEAD(subsystem_list);
-static DEFINE_SPINLOCK(subsystem_list_lock);
+static DEFINE_MUTEX(subsystem_list_lock);
static DEFINE_MUTEX(soc_order_reg_lock);
static DEFINE_MUTEX(restart_log_mutex);
@@ -122,10 +133,7 @@
static struct subsys_soc_restart_order **restart_orders;
static int n_restart_orders;
-module_param(enable_ramdumps, int, S_IRUGO | S_IWUSR);
-
-static struct subsys_soc_restart_order *_update_restart_order(
- struct subsys_data *subsys);
+static int restart_level = RESET_SOC;
int get_restart_level()
{
@@ -148,18 +156,14 @@
return ret;
switch (restart_level) {
-
case RESET_SOC:
case RESET_SUBSYS_COUPLED:
case RESET_SUBSYS_INDEPENDENT:
pr_info("Phase %d behavior activated.\n", restart_level);
- break;
-
+ break;
default:
restart_level = old_val;
return -EINVAL;
- break;
-
}
return 0;
}
@@ -167,62 +171,29 @@
module_param_call(restart_level, restart_level_set, param_get_int,
&restart_level, 0644);
-static struct subsys_data *_find_subsystem(const char *subsys_name)
-{
- struct subsys_data *subsys;
- unsigned long flags;
-
- spin_lock_irqsave(&subsystem_list_lock, flags);
- list_for_each_entry(subsys, &subsystem_list, list)
- if (!strncmp(subsys->name, subsys_name,
- SUBSYS_NAME_MAX_LENGTH)) {
- spin_unlock_irqrestore(&subsystem_list_lock, flags);
- return subsys;
- }
- spin_unlock_irqrestore(&subsystem_list_lock, flags);
-
- return NULL;
-}
-
-static struct subsys_soc_restart_order *_update_restart_order(
- struct subsys_data *subsys)
+static struct subsys_soc_restart_order *
+update_restart_order(struct subsys_device *dev)
{
int i, j;
-
- if (!subsys)
- return NULL;
-
- if (!subsys->name)
- return NULL;
+ struct subsys_soc_restart_order *order;
+ const char *name = dev->desc->name;
+ int len = SUBSYS_NAME_MAX_LENGTH;
mutex_lock(&soc_order_reg_lock);
for (j = 0; j < n_restart_orders; j++) {
- for (i = 0; i < restart_orders[j]->count; i++)
- if (!strncmp(restart_orders[j]->subsystem_list[i],
- subsys->name, SUBSYS_NAME_MAX_LENGTH)) {
-
- restart_orders[j]->subsys_ptrs[i] =
- subsys;
- mutex_unlock(&soc_order_reg_lock);
- return restart_orders[j];
+ order = restart_orders[j];
+ for (i = 0; i < order->count; i++) {
+ if (!strncmp(order->subsystem_list[i], name, len)) {
+ order->subsys_ptrs[i] = dev;
+ goto found;
}
+ }
}
-
+ order = NULL;
+found:
mutex_unlock(&soc_order_reg_lock);
- return NULL;
-}
-
-static void _send_notification_to_order(struct subsys_data
- **restart_list, int count,
- enum subsys_notif_type notif_type)
-{
- int i;
-
- for (i = 0; i < count; i++)
- if (restart_list[i])
- subsys_notif_queue_notification(
- restart_list[i]->notif_handle, notif_type);
+ return order;
}
static int max_restarts;
@@ -231,7 +202,7 @@
static long max_history_time = 3600;
module_param(max_history_time, long, 0644);
-static void do_epoch_check(struct subsys_data *subsys)
+static void do_epoch_check(struct subsys_device *dev)
{
int n = 0;
struct timeval *time_first = NULL, *curr_time;
@@ -251,7 +222,7 @@
r_log = kmalloc(sizeof(struct restart_log), GFP_KERNEL);
if (!r_log)
goto out;
- r_log->subsys = subsys;
+ r_log->dev = dev;
do_gettimeofday(&r_log->time);
curr_time = &r_log->time;
INIT_LIST_HEAD(&r_log->list);
@@ -289,42 +260,94 @@
mutex_unlock(&restart_log_mutex);
}
+static void for_each_subsys_device(struct subsys_device **list, unsigned count,
+ void *data, void (*fn)(struct subsys_device *, void *))
+{
+ while (count--) {
+ struct subsys_device *dev = *list++;
+ if (!dev)
+ continue;
+ fn(dev, data);
+ }
+}
+
+static void __send_notification_to_order(struct subsys_device *dev, void *data)
+{
+ enum subsys_notif_type type = (enum subsys_notif_type)data;
+
+ subsys_notif_queue_notification(dev->notify, type);
+}
+
+static void send_notification_to_order(struct subsys_device **l, unsigned n,
+ enum subsys_notif_type t)
+{
+ for_each_subsys_device(l, n, (void *)t, __send_notification_to_order);
+}
+
+static void subsystem_shutdown(struct subsys_device *dev, void *data)
+{
+ const char *name = dev->desc->name;
+
+ pr_info("[%p]: Shutting down %s\n", current, name);
+ if (dev->desc->shutdown(dev->desc) < 0)
+ panic("subsys-restart: [%p]: Failed to shutdown %s!",
+ current, name);
+}
+
+static void subsystem_ramdump(struct subsys_device *dev, void *data)
+{
+ const char *name = dev->desc->name;
+
+ if (dev->desc->ramdump)
+ if (dev->desc->ramdump(enable_ramdumps, dev->desc) < 0)
+ pr_warn("%s[%p]: Ramdump failed.\n", name, current);
+}
+
+static void subsystem_powerup(struct subsys_device *dev, void *data)
+{
+ const char *name = dev->desc->name;
+
+ pr_info("[%p]: Powering up %s\n", current, name);
+ if (dev->desc->powerup(dev->desc) < 0)
+ panic("[%p]: Failed to powerup %s!", current, name);
+}
+
static void subsystem_restart_wq_func(struct work_struct *work)
{
- struct restart_wq_data *r_work = container_of(work,
- struct restart_wq_data, work);
- struct subsys_data **restart_list;
- struct subsys_data *subsys = r_work->subsys;
+ struct subsys_device *dev = container_of(work,
+ struct subsys_device, work);
+ struct subsys_device **list;
+ struct subsys_desc *desc = dev->desc;
struct subsys_soc_restart_order *soc_restart_order = NULL;
-
struct mutex *powerup_lock;
struct mutex *shutdown_lock;
+ unsigned count;
+ unsigned long flags;
- int i;
- int restart_list_count = 0;
+ if (restart_level != RESET_SUBSYS_INDEPENDENT)
+ soc_restart_order = dev->restart_order;
- if (r_work->use_restart_order)
- soc_restart_order = subsys->restart_order;
-
- /* It's OK to not take the registration lock at this point.
+ /*
+ * It's OK to not take the registration lock at this point.
* This is because the subsystem list inside the relevant
* restart order is not being traversed.
*/
if (!soc_restart_order) {
- restart_list = subsys->single_restart_list;
- restart_list_count = 1;
- powerup_lock = &subsys->powerup_lock;
- shutdown_lock = &subsys->shutdown_lock;
+ list = &dev;
+ count = 1;
+ powerup_lock = &dev->powerup_lock;
+ shutdown_lock = &dev->shutdown_lock;
} else {
- restart_list = soc_restart_order->subsys_ptrs;
- restart_list_count = soc_restart_order->count;
+ list = soc_restart_order->subsys_ptrs;
+ count = soc_restart_order->count;
powerup_lock = &soc_restart_order->powerup_lock;
shutdown_lock = &soc_restart_order->shutdown_lock;
}
pr_debug("[%p]: Attempting to get shutdown lock!\n", current);
- /* Try to acquire shutdown_lock. If this fails, these subsystems are
+ /*
+ * Try to acquire shutdown_lock. If this fails, these subsystems are
* already being restarted - return.
*/
if (!mutex_trylock(shutdown_lock))
@@ -332,7 +355,8 @@
pr_debug("[%p]: Attempting to get powerup lock!\n", current);
- /* Now that we've acquired the shutdown lock, either we're the first to
+ /*
+ * Now that we've acquired the shutdown lock, either we're the first to
* restart these subsystems or some other thread is doing the powerup
* sequence for these subsystems. In the latter case, panic and bail
* out, since a subsystem died in its powerup sequence.
@@ -341,38 +365,23 @@
panic("%s[%p]: Subsystem died during powerup!",
__func__, current);
- do_epoch_check(subsys);
+ do_epoch_check(dev);
- /* Now it is necessary to take the registration lock. This is because
- * the subsystem list in the SoC restart order will be traversed
- * and it shouldn't be changed until _this_ restart sequence completes.
+ /*
+ * It's necessary to take the registration lock because the subsystem
+ * list in the SoC restart order will be traversed and it shouldn't be
+ * changed until _this_ restart sequence completes.
*/
mutex_lock(&soc_order_reg_lock);
pr_debug("[%p]: Starting restart sequence for %s\n", current,
- r_work->subsys->name);
+ desc->name);
+ send_notification_to_order(list, count, SUBSYS_BEFORE_SHUTDOWN);
+ for_each_subsys_device(list, count, NULL, subsystem_shutdown);
+ send_notification_to_order(list, count, SUBSYS_AFTER_SHUTDOWN);
- _send_notification_to_order(restart_list,
- restart_list_count,
- SUBSYS_BEFORE_SHUTDOWN);
-
- for (i = 0; i < restart_list_count; i++) {
-
- if (!restart_list[i])
- continue;
-
- pr_info("[%p]: Shutting down %s\n", current,
- restart_list[i]->name);
-
- if (restart_list[i]->shutdown(subsys) < 0)
- panic("subsys-restart: %s[%p]: Failed to shutdown %s!",
- __func__, current, restart_list[i]->name);
- }
-
- _send_notification_to_order(restart_list, restart_list_count,
- SUBSYS_AFTER_SHUTDOWN);
-
- /* Now that we've finished shutting down these subsystems, release the
+ /*
+ * Now that we've finished shutting down these subsystems, release the
* shutdown lock. If a subsystem restart request comes in for a
* subsystem in _this_ restart order after the unlock below, and
* before the powerup lock is released, panic and bail out.
@@ -380,40 +389,14 @@
mutex_unlock(shutdown_lock);
/* Collect ram dumps for all subsystems in order here */
- for (i = 0; i < restart_list_count; i++) {
- if (!restart_list[i])
- continue;
+ for_each_subsys_device(list, count, NULL, subsystem_ramdump);
- if (restart_list[i]->ramdump)
- if (restart_list[i]->ramdump(enable_ramdumps,
- subsys) < 0)
- pr_warn("%s[%p]: Ramdump failed.\n",
- restart_list[i]->name, current);
- }
-
- _send_notification_to_order(restart_list,
- restart_list_count,
- SUBSYS_BEFORE_POWERUP);
-
- for (i = restart_list_count - 1; i >= 0; i--) {
-
- if (!restart_list[i])
- continue;
-
- pr_info("[%p]: Powering up %s\n", current,
- restart_list[i]->name);
-
- if (restart_list[i]->powerup(subsys) < 0)
- panic("%s[%p]: Failed to powerup %s!", __func__,
- current, restart_list[i]->name);
- }
-
- _send_notification_to_order(restart_list,
- restart_list_count,
- SUBSYS_AFTER_POWERUP);
+ send_notification_to_order(list, count, SUBSYS_BEFORE_POWERUP);
+ for_each_subsys_device(list, count, NULL, subsystem_powerup);
+ send_notification_to_order(list, count, SUBSYS_AFTER_POWERUP);
pr_info("[%p]: Restart sequence for %s completed.\n",
- current, r_work->subsys->name);
+ current, desc->name);
mutex_unlock(powerup_lock);
@@ -422,123 +405,119 @@
pr_debug("[%p]: Released powerup lock!\n", current);
out:
- wake_unlock(&r_work->ssr_wake_lock);
- wake_lock_destroy(&r_work->ssr_wake_lock);
- kfree(r_work);
+ spin_lock_irqsave(&dev->restart_lock, flags);
+ wake_unlock(&dev->wake_lock);
+ dev->restarting = false;
+ spin_unlock_irqrestore(&dev->restart_lock, flags);
}
-static void __subsystem_restart(struct subsys_data *subsys)
+static void __subsystem_restart_dev(struct subsys_device *dev)
{
- struct restart_wq_data *data = NULL;
- int rc;
+ struct subsys_desc *desc = dev->desc;
+ unsigned long flags;
- pr_debug("Restarting %s [level=%d]!\n", subsys->name,
+ spin_lock_irqsave(&dev->restart_lock, flags);
+ if (!dev->restarting) {
+ pr_debug("Restarting %s [level=%d]!\n", desc->name,
restart_level);
- data = kzalloc(sizeof(struct restart_wq_data), GFP_ATOMIC);
- if (!data)
- panic("%s: Unable to allocate memory to restart %s.",
- __func__, subsys->name);
-
- data->subsys = subsys;
-
- if (restart_level != RESET_SUBSYS_INDEPENDENT)
- data->use_restart_order = 1;
-
- snprintf(data->wlname, sizeof(data->wlname), "ssr(%s)", subsys->name);
- wake_lock_init(&data->ssr_wake_lock, WAKE_LOCK_SUSPEND, data->wlname);
- wake_lock(&data->ssr_wake_lock);
-
- INIT_WORK(&data->work, subsystem_restart_wq_func);
- rc = queue_work(ssr_wq, &data->work);
- if (rc < 0)
- panic("%s: Unable to schedule work to restart %s (%d).",
- __func__, subsys->name, rc);
+ dev->restarting = true;
+ wake_lock(&dev->wake_lock);
+ queue_work(ssr_wq, &dev->work);
+ }
+ spin_unlock_irqrestore(&dev->restart_lock, flags);
}
-int subsystem_restart(const char *subsys_name)
+int subsystem_restart_dev(struct subsys_device *dev)
{
- struct subsys_data *subsys;
-
- if (!subsys_name) {
- pr_err("Invalid subsystem name.\n");
- return -EINVAL;
- }
+ const char *name = dev->desc->name;
pr_info("Restart sequence requested for %s, restart_level = %d.\n",
- subsys_name, restart_level);
-
- /* List of subsystems is protected by a lock. New subsystems can
- * still come in.
- */
- subsys = _find_subsystem(subsys_name);
-
- if (!subsys) {
- pr_warn("Unregistered subsystem %s!\n", subsys_name);
- return -EINVAL;
- }
+ name, restart_level);
switch (restart_level) {
case RESET_SUBSYS_COUPLED:
case RESET_SUBSYS_INDEPENDENT:
- __subsystem_restart(subsys);
+ __subsystem_restart_dev(dev);
break;
-
case RESET_SOC:
- panic("subsys-restart: Resetting the SoC - %s crashed.",
- subsys->name);
+ panic("subsys-restart: Resetting the SoC - %s crashed.", name);
break;
-
default:
panic("subsys-restart: Unknown restart level!\n");
- break;
-
+ break;
}
return 0;
}
+EXPORT_SYMBOL(subsystem_restart_dev);
+
+int subsystem_restart(const char *name)
+{
+ struct subsys_device *dev;
+
+ mutex_lock(&subsystem_list_lock);
+ list_for_each_entry(dev, &subsystem_list, list)
+ if (!strncmp(dev->desc->name, name, SUBSYS_NAME_MAX_LENGTH))
+ goto found;
+ dev = NULL;
+found:
+ mutex_unlock(&subsystem_list_lock);
+ if (dev)
+ return subsystem_restart_dev(dev);
+ return -ENODEV;
+}
EXPORT_SYMBOL(subsystem_restart);
-int ssr_register_subsystem(struct subsys_data *subsys)
+struct subsys_device *subsys_register(struct subsys_desc *desc)
{
- unsigned long flags;
+ struct subsys_device *dev;
- if (!subsys)
- goto err;
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
- if (!subsys->name)
- goto err;
+ dev->desc = desc;
+ dev->notify = subsys_notif_add_subsys(desc->name);
+ dev->restart_order = update_restart_order(dev);
- if (!subsys->powerup || !subsys->shutdown)
- goto err;
+ snprintf(dev->wlname, sizeof(dev->wlname), "ssr(%s)", desc->name);
+ wake_lock_init(&dev->wake_lock, WAKE_LOCK_SUSPEND, dev->wlname);
+ INIT_WORK(&dev->work, subsystem_restart_wq_func);
+ spin_lock_init(&dev->restart_lock);
- subsys->notif_handle = subsys_notif_add_subsys(subsys->name);
- subsys->restart_order = _update_restart_order(subsys);
- subsys->single_restart_list[0] = subsys;
+ mutex_init(&dev->shutdown_lock);
+ mutex_init(&dev->powerup_lock);
- mutex_init(&subsys->shutdown_lock);
- mutex_init(&subsys->powerup_lock);
+ mutex_lock(&subsystem_list_lock);
+ list_add(&dev->list, &subsystem_list);
+ mutex_unlock(&subsystem_list_lock);
- spin_lock_irqsave(&subsystem_list_lock, flags);
- list_add(&subsys->list, &subsystem_list);
- spin_unlock_irqrestore(&subsystem_list_lock, flags);
-
- return 0;
-
-err:
- return -EINVAL;
+ return dev;
}
-EXPORT_SYMBOL(ssr_register_subsystem);
+EXPORT_SYMBOL(subsys_register);
+
+void subsys_unregister(struct subsys_device *dev)
+{
+ if (IS_ERR_OR_NULL(dev))
+ return;
+ mutex_lock(&subsystem_list_lock);
+ list_del(&dev->list);
+ mutex_unlock(&subsystem_list_lock);
+ wake_lock_destroy(&dev->wake_lock);
+ kfree(dev);
+}
+EXPORT_SYMBOL(subsys_unregister);
static int ssr_panic_handler(struct notifier_block *this,
unsigned long event, void *ptr)
{
- struct subsys_data *subsys;
+ struct subsys_device *dev;
- list_for_each_entry(subsys, &subsystem_list, list)
- if (subsys->crash_shutdown)
- subsys->crash_shutdown(subsys);
+ list_for_each_entry(dev, &subsystem_list, list)
+ if (dev->desc->crash_shutdown)
+ dev->desc->crash_shutdown(dev->desc);
return NOTIFY_DONE;
}
@@ -595,20 +574,12 @@
static int __init subsys_restart_init(void)
{
- int ret = 0;
-
- restart_level = RESET_SOC;
-
ssr_wq = alloc_workqueue("ssr_wq", 0, 0);
-
if (!ssr_wq)
panic("Couldn't allocate workqueue for subsystem restart.\n");
- ret = ssr_init_soc_restart_orders();
-
- return ret;
+ return ssr_init_soc_restart_orders();
}
-
arch_initcall(subsys_restart_init);
MODULE_DESCRIPTION("Subsystem Restart Driver");
diff --git a/arch/arm/mach-msm/wcnss-ssr-8960.c b/arch/arm/mach-msm/wcnss-ssr-8960.c
index 6e8d57c..4295d9b 100644
--- a/arch/arm/mach-msm/wcnss-ssr-8960.c
+++ b/arch/arm/mach-msm/wcnss-ssr-8960.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/wcnss_wlan.h>
+#include <linux/err.h>
#include <mach/irqs.h>
#include <mach/scm.h>
#include <mach/subsystem_restart.h>
@@ -37,6 +38,7 @@
static int riva_crash;
static int ss_restart_inprogress;
static int enable_riva_ssr;
+static struct subsys_device *riva_8960_dev;
static void smsm_state_cb_hdlr(void *data, uint32_t old_state,
uint32_t new_state)
@@ -83,7 +85,7 @@
}
ss_restart_inprogress = true;
- subsystem_restart("riva");
+ subsystem_restart_dev(riva_8960_dev);
}
static irqreturn_t riva_wdog_bite_irq_hdlr(int irq, void *dev_id)
@@ -100,7 +102,7 @@
panic(MODULE_NAME ": Watchdog bite received from Riva");
ss_restart_inprogress = true;
- subsystem_restart("riva");
+ subsystem_restart_dev(riva_8960_dev);
return IRQ_HANDLED;
}
@@ -126,7 +128,7 @@
}
/* Subsystem handlers */
-static int riva_shutdown(const struct subsys_data *subsys)
+static int riva_shutdown(const struct subsys_desc *subsys)
{
pil_force_shutdown("wcnss");
flush_delayed_work(&cancel_vote_work);
@@ -135,7 +137,7 @@
return 0;
}
-static int riva_powerup(const struct subsys_data *subsys)
+static int riva_powerup(const struct subsys_desc *subsys)
{
struct platform_device *pdev = wcnss_get_platform_device();
struct wcnss_wlan_config *pwlanconfig = wcnss_get_wlan_config();
@@ -162,7 +164,7 @@
static struct ramdump_segment riva_segments[] = {{0x8f200000,
0x8f700000 - 0x8f200000} };
-static int riva_ramdump(int enable, const struct subsys_data *subsys)
+static int riva_ramdump(int enable, const struct subsys_desc *subsys)
{
pr_debug("%s: enable[%d]\n", MODULE_NAME, enable);
if (enable)
@@ -174,14 +176,14 @@
}
/* Riva crash handler */
-static void riva_crash_shutdown(const struct subsys_data *subsys)
+static void riva_crash_shutdown(const struct subsys_desc *subsys)
{
pr_err("%s: crash shutdown : %d\n", MODULE_NAME, riva_crash);
if (riva_crash != true)
smsm_riva_reset();
}
-static struct subsys_data riva_8960 = {
+static struct subsys_desc riva_8960 = {
.name = "riva",
.shutdown = riva_shutdown,
.powerup = riva_powerup,
@@ -208,7 +210,10 @@
static int __init riva_restart_init(void)
{
- return ssr_register_subsystem(&riva_8960);
+ riva_8960_dev = subsys_register(&riva_8960);
+ if (IS_ERR(riva_8960_dev))
+ return PTR_ERR(riva_8960_dev);
+ return 0;
}
static int __init riva_ssr_module_init(void)
@@ -253,6 +258,7 @@
static void __exit riva_ssr_module_exit(void)
{
+ subsys_unregister(riva_8960_dev);
free_irq(RIVA_APSS_WDOG_BITE_RESET_RDY_IRQ, NULL);
}
diff --git a/block/test-iosched.c b/block/test-iosched.c
index 3c38734..0a033dc 100644
--- a/block/test-iosched.c
+++ b/block/test-iosched.c
@@ -127,7 +127,7 @@
test_pr_info("%s: request %d completed, err=%d",
__func__, test_rq->req_id, err);
- test_rq->req_completed = 1;
+ test_rq->req_completed = true;
test_rq->req_result = err;
check_test_completion();
@@ -173,6 +173,9 @@
bio->bi_size = nr_sects << 9;
bio->bi_sector = start_sec;
break;
+ case REQ_UNIQUE_SANITIZE:
+ bio->bi_rw = REQ_WRITE | REQ_SANITIZE;
+ break;
default:
test_pr_err("%s: Invalid request type %d", __func__,
req_unique);
@@ -204,8 +207,8 @@
blk_put_request(rq);
return -ENODEV;
}
- test_rq->req_completed = 0;
- test_rq->req_result = -1;
+ test_rq->req_completed = false;
+ test_rq->req_result = -EINVAL;
test_rq->rq = rq;
test_rq->is_err_expected = is_err_expcted;
rq->elv.priv[0] = (void *)test_rq;
@@ -347,8 +350,8 @@
ptd->num_of_write_bios += num_bios;
test_rq->req_id = ptd->wr_rd_next_req_id++;
- test_rq->req_completed = 0;
- test_rq->req_result = -1;
+ test_rq->req_completed = false;
+ test_rq->req_result = -EINVAL;
test_rq->rq = rq;
test_rq->is_err_expected = is_err_expcted;
rq->elv.priv[0] = (void *)test_rq;
@@ -519,10 +522,26 @@
static void free_test_requests(struct test_data *td)
{
struct test_request *test_rq;
+ struct bio *bio;
+
while (!list_empty(&td->test_queue)) {
test_rq = list_entry(td->test_queue.next, struct test_request,
queuelist);
list_del_init(&test_rq->queuelist);
+ /*
+ * If the request was not completed we need to free its BIOs
+ * and remove it from the packed list
+ */
+ if (!test_rq->req_completed) {
+ test_pr_info(
+ "%s: Freeing memory of an uncompleted request",
+ __func__);
+ list_del_init(&test_rq->rq->queuelist);
+ while ((bio = test_rq->rq->bio) != NULL) {
+ test_rq->rq->bio = bio->bi_next;
+ bio_put(bio);
+ }
+ }
blk_put_request(test_rq->rq);
kfree(test_rq->bios_buffer);
kfree(test_rq);
@@ -606,7 +625,8 @@
test_pr_info(
"%s: Another test is running, try again later",
__func__);
- return -EINVAL;
+ spin_unlock(&ptd->lock);
+ return -EBUSY;
}
if (ptd->start_sector == 0) {
@@ -698,9 +718,8 @@
return -EINVAL;
error:
- ptd->test_result = TEST_FAILED;
- ptd->test_info.testcase = 0;
post_test(ptd);
+ ptd->test_result = TEST_FAILED;
return ret;
}
EXPORT_SYMBOL(test_iosched_start_test);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index e9d654b..be71347 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -142,7 +142,7 @@
static LIST_HEAD(cpufreq_governor_list);
static DEFINE_MUTEX(cpufreq_governor_mutex);
-struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
+static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, int sysfs)
{
struct cpufreq_policy *data;
unsigned long flags;
@@ -166,7 +166,7 @@
if (!data)
goto err_out_put_module;
- if (!kobject_get(&data->kobj))
+ if (!sysfs && !kobject_get(&data->kobj))
goto err_out_put_module;
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -179,16 +179,35 @@
err_out:
return NULL;
}
+
+struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
+{
+ return __cpufreq_cpu_get(cpu, 0);
+}
EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
+static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
+{
+ return __cpufreq_cpu_get(cpu, 1);
+}
+
+static void __cpufreq_cpu_put(struct cpufreq_policy *data, int sysfs)
+{
+ if (!sysfs)
+ kobject_put(&data->kobj);
+ module_put(cpufreq_driver->owner);
+}
void cpufreq_cpu_put(struct cpufreq_policy *data)
{
- kobject_put(&data->kobj);
- module_put(cpufreq_driver->owner);
+ __cpufreq_cpu_put(data, 0);
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
+static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
+{
+ __cpufreq_cpu_put(data, 1);
+}
/*********************************************************************
* EXTERNALLY AFFECTING FREQUENCY CHANGES *
@@ -643,7 +662,7 @@
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
- policy = cpufreq_cpu_get(policy->cpu);
+ policy = cpufreq_cpu_get_sysfs(policy->cpu);
if (!policy)
goto no_policy;
@@ -657,7 +676,7 @@
unlock_policy_rwsem_read(policy->cpu);
fail:
- cpufreq_cpu_put(policy);
+ cpufreq_cpu_put_sysfs(policy);
no_policy:
return ret;
}
@@ -668,7 +687,7 @@
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
- policy = cpufreq_cpu_get(policy->cpu);
+ policy = cpufreq_cpu_get_sysfs(policy->cpu);
if (!policy)
goto no_policy;
@@ -682,7 +701,7 @@
unlock_policy_rwsem_write(policy->cpu);
fail:
- cpufreq_cpu_put(policy);
+ cpufreq_cpu_put_sysfs(policy);
no_policy:
return ret;
}
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 5798c94..785ba6c 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -994,7 +994,6 @@
rc = input_register_handler(&dbs_input_handler);
mutex_unlock(&dbs_mutex);
- mutex_init(&this_dbs_info->timer_mutex);
if (!ondemand_powersave_bias_setspeed(
this_dbs_info->cur_policy,
@@ -1071,6 +1070,9 @@
return -EFAULT;
}
for_each_possible_cpu(i) {
+ struct cpu_dbs_info_s *this_dbs_info =
+ &per_cpu(od_cpu_dbs_info, i);
+ mutex_init(&this_dbs_info->timer_mutex);
INIT_WORK(&per_cpu(dbs_refresh_work, i), dbs_refresh_callback);
}
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
index 2a191d5..fecce3f 100644
--- a/drivers/crypto/msm/qcedev.c
+++ b/drivers/crypto/msm/qcedev.c
@@ -572,6 +572,7 @@
if (podev->ce_support.sha_hmac) {
sreq.alg = QCE_HASH_SHA1_HMAC;
sreq.authkey = &handle->sha_ctxt.authkey[0];
+ sreq.authklen = QCEDEV_MAX_SHA_BLOCK_SIZE;
} else {
sreq.alg = QCE_HASH_SHA1;
@@ -582,7 +583,7 @@
if (podev->ce_support.sha_hmac) {
sreq.alg = QCE_HASH_SHA256_HMAC;
sreq.authkey = &handle->sha_ctxt.authkey[0];
-
+ sreq.authklen = QCEDEV_MAX_SHA_BLOCK_SIZE;
} else {
sreq.alg = QCE_HASH_SHA256;
sreq.authkey = NULL;
@@ -959,7 +960,6 @@
uint8_t *k_buf_src = NULL;
uint8_t *k_align_src = NULL;
- handle->sha_ctxt.first_blk = 0;
handle->sha_ctxt.last_blk = 1;
total = handle->sha_ctxt.trailing_buf_len;
@@ -977,9 +977,6 @@
CACHE_LINE_SIZE);
memcpy(k_align_src, &handle->sha_ctxt.trailing_buf[0], total);
}
- handle->sha_ctxt.last_blk = 1;
- handle->sha_ctxt.first_blk = 0;
-
qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
sg_set_buf(qcedev_areq->sha_req.sreq.src, k_align_src, total);
sg_mark_end(qcedev_areq->sha_req.sreq.src);
@@ -1071,6 +1068,7 @@
int err = 0;
if (areq->sha_op_req.authklen <= QCEDEV_MAX_KEY_SIZE) {
+ qcedev_sha_init(areq, handle);
/* Verify Source Address */
if (!access_ok(VERIFY_READ,
(void __user *)areq->sha_op_req.authkey,
@@ -1082,6 +1080,7 @@
return -EFAULT;
} else {
struct qcedev_async_req authkey_areq;
+ uint8_t authkey[QCEDEV_MAX_SHA_BLOCK_SIZE];
init_completion(&authkey_areq.complete);
@@ -1091,6 +1090,8 @@
authkey_areq.sha_op_req.data[0].len = areq->sha_op_req.authklen;
authkey_areq.sha_op_req.data_len = areq->sha_op_req.authklen;
authkey_areq.sha_op_req.diglen = 0;
+ authkey_areq.handle = handle;
+
memset(&authkey_areq.sha_op_req.digest[0], 0,
QCEDEV_MAX_SHA_DIGEST);
if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
@@ -1106,8 +1107,11 @@
err = qcedev_sha_final(&authkey_areq, handle);
else
return err;
- memcpy(&handle->sha_ctxt.authkey[0],
- &handle->sha_ctxt.digest[0],
+ memcpy(&authkey[0], &handle->sha_ctxt.digest[0],
+ handle->sha_ctxt.diglen);
+ qcedev_sha_init(areq, handle);
+
+ memcpy(&handle->sha_ctxt.authkey[0], &authkey[0],
handle->sha_ctxt.diglen);
}
return err;
@@ -1209,7 +1213,6 @@
int err;
struct qcedev_control *podev = handle->cntl;
- qcedev_sha_init(areq, handle);
err = qcedev_set_hmac_auth_key(areq, handle);
if (err)
return err;
@@ -2098,12 +2101,19 @@
return 0;
};
+static struct of_device_id qcedev_match[] = {
+ { .compatible = "qcom,qcedev",
+ },
+ {}
+};
+
static struct platform_driver qcedev_plat_driver = {
.probe = qcedev_probe,
.remove = qcedev_remove,
.driver = {
.name = "qce",
.owner = THIS_MODULE,
+ .of_match_table = qcedev_match,
},
};
@@ -2222,7 +2232,7 @@
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
MODULE_DESCRIPTION("Qualcomm DEV Crypto driver");
-MODULE_VERSION("1.26");
+MODULE_VERSION("1.27");
module_init(qcedev_init);
module_exit(qcedev_exit);
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
index a41a64b..c11c36e 100644
--- a/drivers/crypto/msm/qcrypto.c
+++ b/drivers/crypto/msm/qcrypto.c
@@ -3260,12 +3260,20 @@
return rc;
};
+
+static struct of_device_id qcrypto_match[] = {
+ { .compatible = "qcom,qcrypto",
+ },
+ {}
+};
+
static struct platform_driver _qualcomm_crypto = {
.probe = _qcrypto_probe,
.remove = _qcrypto_remove,
.driver = {
.owner = THIS_MODULE,
.name = "qcrypto",
+ .of_match_table = qcrypto_match,
},
};
@@ -3364,4 +3372,4 @@
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
MODULE_DESCRIPTION("Qualcomm Crypto driver");
-MODULE_VERSION("1.21");
+MODULE_VERSION("1.22");
diff --git a/drivers/gpu/msm/a3xx_reg.h b/drivers/gpu/msm/a3xx_reg.h
index 2e8964d..8ec9431 100644
--- a/drivers/gpu/msm/a3xx_reg.h
+++ b/drivers/gpu/msm/a3xx_reg.h
@@ -290,6 +290,8 @@
#define RB_CLEAR_MODE_RESOLVE 1
#define RB_TILINGMODE_LINEAR 0
#define RB_REF_NEVER 0
+#define RB_FRAG_LESS 1
+#define RB_REF_ALWAYS 7
#define RB_STENCIL_KEEP 0
#define RB_RENDERING_PASS 0
#define RB_TILINGMODE_32X32 2
@@ -327,6 +329,7 @@
#define GRAS_SC_SCREEN_SCISSOR_BR_BR_Y 16
#define GRAS_SC_WINDOW_SCISSOR_BR_BR_X 0
#define GRAS_SC_WINDOW_SCISSOR_BR_BR_Y 16
+#define GRAS_SU_CTRLMODE_LINEHALFWIDTH 03
#define HLSQ_CONSTFSPRESERVEDRANGEREG_ENDENTRY 16
#define HLSQ_CONSTFSPRESERVEDRANGEREG_STARTENTRY 0
#define HLSQ_CTRL0REG_CHUNKDISABLE 26
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 7720df0..b72c847 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -289,6 +289,10 @@
KGSL_IOMMU_CONTEXT_USER))
goto done;
+ cmds += __adreno_add_idle_indirect_cmds(cmds,
+ device->mmu.setstate_memory.gpuaddr +
+ KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+
if (cpu_is_msm8960())
cmds += adreno_add_change_mh_phys_limit_cmds(cmds, 0xFFFFF000,
device->mmu.setstate_memory.gpuaddr +
@@ -357,10 +361,9 @@
*cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
*cmds++ = 0x7fff;
- if (flags & KGSL_MMUFLAGS_TLBFLUSH)
- cmds += __adreno_add_idle_indirect_cmds(cmds,
- device->mmu.setstate_memory.gpuaddr +
- KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+ cmds += __adreno_add_idle_indirect_cmds(cmds,
+ device->mmu.setstate_memory.gpuaddr +
+ KGSL_IOMMU_SETSTATE_NOP_OFFSET);
}
if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
/*
@@ -793,133 +796,290 @@
return 0;
}
+static void adreno_mark_context_status(struct kgsl_device *device,
+ int recovery_status)
+{
+ struct kgsl_context *context;
+ int next = 0;
+ /*
+ * Set the reset status of all contexts to
+ * INNOCENT_CONTEXT_RESET_EXT except for the bad context
+ * since thats the guilty party, if recovery failed then
+ * mark all as guilty
+ */
+ while ((context = idr_get_next(&device->context_idr, &next))) {
+ struct adreno_context *adreno_context = context->devctxt;
+ if (recovery_status) {
+ context->reset_status =
+ KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT;
+ adreno_context->flags |= CTXT_FLAGS_GPU_HANG;
+ } else if (KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT !=
+ context->reset_status) {
+ if (adreno_context->flags & (CTXT_FLAGS_GPU_HANG ||
+ CTXT_FLAGS_GPU_HANG_RECOVERED))
+ context->reset_status =
+ KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT;
+ else
+ context->reset_status =
+ KGSL_CTX_STAT_INNOCENT_CONTEXT_RESET_EXT;
+ }
+ next = next + 1;
+ }
+}
+
+static void adreno_set_max_ts_for_bad_ctxs(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+ struct kgsl_context *context;
+ struct adreno_context *temp_adreno_context;
+ int next = 0;
+
+ while ((context = idr_get_next(&device->context_idr, &next))) {
+ temp_adreno_context = context->devctxt;
+ if (temp_adreno_context->flags & CTXT_FLAGS_GPU_HANG) {
+ kgsl_sharedmem_writel(&device->memstore,
+ KGSL_MEMSTORE_OFFSET(context->id,
+ soptimestamp),
+ rb->timestamp[context->id]);
+ kgsl_sharedmem_writel(&device->memstore,
+ KGSL_MEMSTORE_OFFSET(context->id,
+ eoptimestamp),
+ rb->timestamp[context->id]);
+ }
+ next = next + 1;
+ }
+}
+
+static void adreno_destroy_recovery_data(struct adreno_recovery_data *rec_data)
+{
+ vfree(rec_data->rb_buffer);
+ vfree(rec_data->bad_rb_buffer);
+}
+
+static int adreno_setup_recovery_data(struct kgsl_device *device,
+ struct adreno_recovery_data *rec_data)
+{
+ int ret = 0;
+ unsigned int ib1_sz, ib2_sz;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+
+ memset(rec_data, 0, sizeof(*rec_data));
+
+ adreno_regread(device, REG_CP_IB1_BUFSZ, &ib1_sz);
+ adreno_regread(device, REG_CP_IB2_BUFSZ, &ib2_sz);
+ if (ib1_sz || ib2_sz)
+ adreno_regread(device, REG_CP_IB1_BASE, &rec_data->ib1);
+
+ kgsl_sharedmem_readl(&device->memstore, &rec_data->context_id,
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+ current_context));
+
+ kgsl_sharedmem_readl(&device->memstore,
+ &rec_data->global_eop,
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+ eoptimestamp));
+
+ rec_data->rb_buffer = vmalloc(rb->buffer_desc.size);
+ if (!rec_data->rb_buffer) {
+ KGSL_MEM_ERR(device, "vmalloc(%d) failed\n",
+ rb->buffer_desc.size);
+ return -ENOMEM;
+ }
+
+ rec_data->bad_rb_buffer = vmalloc(rb->buffer_desc.size);
+ if (!rec_data->bad_rb_buffer) {
+ KGSL_MEM_ERR(device, "vmalloc(%d) failed\n",
+ rb->buffer_desc.size);
+ ret = -ENOMEM;
+ goto done;
+ }
+
+done:
+ if (ret) {
+ vfree(rec_data->rb_buffer);
+ vfree(rec_data->bad_rb_buffer);
+ }
+ return ret;
+}
+
static int
-adreno_recover_hang(struct kgsl_device *device)
+_adreno_recover_hang(struct kgsl_device *device,
+ struct adreno_recovery_data *rec_data,
+ bool try_bad_commands)
{
int ret;
- unsigned int *rb_buffer;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+ struct kgsl_context *context;
+ struct adreno_context *adreno_context = NULL;
+ struct adreno_context *last_active_ctx = adreno_dev->drawctxt_active;
+
+ context = idr_find(&device->context_idr, rec_data->context_id);
+ if (context == NULL) {
+ KGSL_DRV_ERR(device, "Last context unknown id:%d\n",
+ rec_data->context_id);
+ } else {
+ adreno_context = context->devctxt;
+ adreno_context->flags |= CTXT_FLAGS_GPU_HANG;
+ }
+
+ /* Extract valid contents from rb which can still be executed after
+ * hang */
+ ret = adreno_ringbuffer_extract(rb, rec_data);
+ if (ret)
+ goto done;
+
+ /* restart device */
+ ret = adreno_stop(device);
+ if (ret) {
+ KGSL_DRV_ERR(device, "Device stop failed in recovery\n");
+ goto done;
+ }
+
+ ret = adreno_start(device, true);
+ if (ret) {
+ KGSL_DRV_ERR(device, "Device start failed in recovery\n");
+ goto done;
+ }
+
+ if (context)
+ kgsl_mmu_setstate(&device->mmu, adreno_context->pagetable,
+ KGSL_MEMSTORE_GLOBAL);
+
+ /* Do not try the bad caommands if recovery has failed bad commands
+ * once already */
+ if (!try_bad_commands)
+ rec_data->bad_rb_size = 0;
+
+ if (rec_data->bad_rb_size) {
+ int idle_ret;
+ /* submit the bad and good context commands and wait for
+ * them to pass */
+ adreno_ringbuffer_restore(rb, rec_data->bad_rb_buffer,
+ rec_data->bad_rb_size);
+ idle_ret = adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
+ if (idle_ret) {
+ ret = adreno_stop(device);
+ if (ret) {
+ KGSL_DRV_ERR(device,
+ "Device stop failed in recovery\n");
+ goto done;
+ }
+ ret = adreno_start(device, true);
+ if (ret) {
+ KGSL_DRV_ERR(device,
+ "Device start failed in recovery\n");
+ goto done;
+ }
+ ret = idle_ret;
+ KGSL_DRV_ERR(device,
+ "Bad context commands hung in recovery\n");
+ } else {
+ KGSL_DRV_ERR(device,
+ "Bad context commands succeeded in recovery\n");
+ if (adreno_context)
+ adreno_context->flags = (adreno_context->flags &
+ ~CTXT_FLAGS_GPU_HANG) |
+ CTXT_FLAGS_GPU_HANG_RECOVERED;
+ adreno_dev->drawctxt_active = last_active_ctx;
+ }
+ }
+ /* If either the bad command sequence failed or we did not play it */
+ if (ret || !rec_data->bad_rb_size) {
+ adreno_ringbuffer_restore(rb, rec_data->rb_buffer,
+ rec_data->rb_size);
+ ret = adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
+ if (ret) {
+ /* If we fail here we can try to invalidate another
+ * context and try recovering again */
+ ret = -EAGAIN;
+ goto done;
+ }
+ /* ringbuffer now has data from the last valid context id,
+ * so restore the active_ctx to the last valid context */
+ if (rec_data->last_valid_ctx_id) {
+ struct kgsl_context *last_ctx =
+ idr_find(&device->context_idr,
+ rec_data->last_valid_ctx_id);
+ if (last_ctx)
+ adreno_dev->drawctxt_active = last_ctx->devctxt;
+ }
+ }
+done:
+ return ret;
+}
+
+static int
+adreno_recover_hang(struct kgsl_device *device,
+ struct adreno_recovery_data *rec_data)
+{
+ int ret = 0;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
unsigned int timestamp;
- unsigned int num_rb_contents;
- unsigned int reftimestamp;
- unsigned int enable_ts;
- unsigned int soptimestamp;
- unsigned int eoptimestamp;
- unsigned int context_id;
- struct kgsl_context *context;
- struct adreno_context *adreno_context;
- int next = 0;
- KGSL_DRV_ERR(device, "Starting recovery from 3D GPU hang....\n");
- rb_buffer = vmalloc(rb->buffer_desc.size);
- if (!rb_buffer) {
- KGSL_MEM_ERR(device,
- "Failed to allocate memory for recovery: %x\n",
- rb->buffer_desc.size);
- return -ENOMEM;
- }
- /* Extract valid contents from rb which can stil be executed after
- * hang */
- ret = adreno_ringbuffer_extract(rb, rb_buffer, &num_rb_contents);
- if (ret)
- goto done;
- kgsl_sharedmem_readl(&device->memstore, &context_id,
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- current_context));
- context = idr_find(&device->context_idr, context_id);
- if (context == NULL) {
- KGSL_DRV_ERR(device, "Last context unknown id:%d\n",
- context_id);
- context_id = KGSL_MEMSTORE_GLOBAL;
- }
+ KGSL_DRV_ERR(device,
+ "Starting recovery from 3D GPU hang. Recovery parameters: IB1: 0x%X, "
+ "Bad context_id: %u, global_eop: 0x%x\n",
+ rec_data->ib1, rec_data->context_id, rec_data->global_eop);
timestamp = rb->timestamp[KGSL_MEMSTORE_GLOBAL];
KGSL_DRV_ERR(device, "Last issued global timestamp: %x\n", timestamp);
- kgsl_sharedmem_readl(&device->memstore, &reftimestamp,
- KGSL_MEMSTORE_OFFSET(context_id,
- ref_wait_ts));
- kgsl_sharedmem_readl(&device->memstore, &enable_ts,
- KGSL_MEMSTORE_OFFSET(context_id,
- ts_cmp_enable));
- kgsl_sharedmem_readl(&device->memstore, &soptimestamp,
- KGSL_MEMSTORE_OFFSET(context_id,
- soptimestamp));
- kgsl_sharedmem_readl(&device->memstore, &eoptimestamp,
- KGSL_MEMSTORE_OFFSET(context_id,
- eoptimestamp));
- /* Make sure memory is synchronized before restarting the GPU */
- mb();
- KGSL_CTXT_ERR(device,
- "Context id that caused a GPU hang: %d\n", context_id);
- /* restart device */
- ret = adreno_stop(device);
- if (ret)
- goto done;
- ret = adreno_start(device, true);
- if (ret)
- goto done;
- KGSL_DRV_ERR(device, "Device has been restarted after hang\n");
- /* Restore timestamp states */
- kgsl_sharedmem_writel(&device->memstore,
- KGSL_MEMSTORE_OFFSET(context_id, soptimestamp),
- soptimestamp);
- kgsl_sharedmem_writel(&device->memstore,
- KGSL_MEMSTORE_OFFSET(context_id, eoptimestamp),
- eoptimestamp);
+ /* We may need to replay commands multiple times based on whether
+ * multiple contexts hang the GPU */
+ while (true) {
+ if (!ret)
+ ret = _adreno_recover_hang(device, rec_data, true);
+ else
+ ret = _adreno_recover_hang(device, rec_data, false);
- if (num_rb_contents) {
- kgsl_sharedmem_writel(&device->memstore,
- KGSL_MEMSTORE_OFFSET(context_id, ref_wait_ts),
- reftimestamp);
- kgsl_sharedmem_writel(&device->memstore,
- KGSL_MEMSTORE_OFFSET(context_id, ts_cmp_enable),
- enable_ts);
- }
- /* Make sure all writes are posted before the GPU reads them */
- wmb();
- /* Mark the invalid context so no more commands are accepted from
- * that context */
-
- adreno_context = context->devctxt;
-
- KGSL_CTXT_ERR(device,
- "Context that caused a GPU hang: %d\n", adreno_context->id);
-
- adreno_context->flags |= CTXT_FLAGS_GPU_HANG;
-
- /*
- * Set the reset status of all contexts to
- * INNOCENT_CONTEXT_RESET_EXT except for the bad context
- * since thats the guilty party
- */
- while ((context = idr_get_next(&device->context_idr, &next))) {
- if (KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT !=
- context->reset_status) {
- if (context->id != context_id)
- context->reset_status =
- KGSL_CTX_STAT_INNOCENT_CONTEXT_RESET_EXT;
- else
- context->reset_status =
- KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT;
+ if (-EAGAIN == ret) {
+ /* setup new recovery parameters and retry, this
+ * means more than 1 contexts are causing hang */
+ adreno_destroy_recovery_data(rec_data);
+ adreno_setup_recovery_data(device, rec_data);
+ KGSL_DRV_ERR(device,
+ "Retry recovery from 3D GPU hang. Recovery parameters: "
+ "IB1: 0x%X, Bad context_id: %u, global_eop: 0x%x\n",
+ rec_data->ib1, rec_data->context_id,
+ rec_data->global_eop);
+ } else {
+ break;
}
- next = next + 1;
}
- /* Restore valid commands in ringbuffer */
- adreno_ringbuffer_restore(rb, rb_buffer, num_rb_contents);
+ if (ret)
+ goto done;
+
+ /* Restore correct states after recovery */
+ if (adreno_dev->drawctxt_active)
+ device->mmu.hwpagetable =
+ adreno_dev->drawctxt_active->pagetable;
+ else
+ device->mmu.hwpagetable = device->mmu.defaultpagetable;
rb->timestamp[KGSL_MEMSTORE_GLOBAL] = timestamp;
- /* wait for idle */
- ret = adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
+ kgsl_sharedmem_writel(&device->memstore,
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+ eoptimestamp),
+ rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
done:
- vfree(rb_buffer);
+ adreno_set_max_ts_for_bad_ctxs(device);
+ adreno_mark_context_status(device, ret);
+ if (!ret)
+ KGSL_DRV_ERR(device, "Recovery succeeded\n");
+ else
+ KGSL_DRV_ERR(device, "Recovery failed\n");
return ret;
}
-int adreno_dump_and_recover(struct kgsl_device *device)
+int
+adreno_dump_and_recover(struct kgsl_device *device)
{
int result = -ETIMEDOUT;
+ struct adreno_recovery_data rec_data;
if (device->state == KGSL_STATE_HUNG)
goto done;
@@ -934,7 +1094,8 @@
INIT_COMPLETION(device->recovery_gate);
/* Detected a hang */
-
+ /* Get the recovery data as soon as hang is detected */
+ result = adreno_setup_recovery_data(device, &rec_data);
/*
* Trigger an automatic dump of the state to
* the console
@@ -947,11 +1108,14 @@
*/
kgsl_device_snapshot(device, 1);
- result = adreno_recover_hang(device);
- if (result)
+ result = adreno_recover_hang(device, &rec_data);
+ adreno_destroy_recovery_data(&rec_data);
+ if (result) {
kgsl_pwrctrl_set_state(device, KGSL_STATE_HUNG);
- else
+ } else {
kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
+ mod_timer(&device->idle_timer, jiffies + FIRST_TIMEOUT);
+ }
complete_all(&device->recovery_gate);
}
done:
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 04dc3d6..57f4859 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -33,7 +33,6 @@
#define KGSL_CMD_FLAGS_NONE 0x00000000
#define KGSL_CMD_FLAGS_PMODE 0x00000001
#define KGSL_CMD_FLAGS_NO_TS_CMP 0x00000002
-#define KGSL_CMD_FLAGS_NOT_KERNEL_CMD 0x00000004
/* Command identifiers */
#define KGSL_CONTEXT_TO_MEM_IDENTIFIER 0x2EADBEEF
@@ -115,6 +114,30 @@
unsigned int (*busy_cycles)(struct adreno_device *);
};
+/*
+ * struct adreno_recovery_data - Structure that contains all information to
+ * perform gpu recovery from hangs
+ * @ib1 - IB1 that the GPU was executing when hang happened
+ * @context_id - Context which caused the hang
+ * @global_eop - eoptimestamp at time of hang
+ * @rb_buffer - Buffer that holds the commands from good contexts
+ * @rb_size - Number of valid dwords in rb_buffer
+ * @bad_rb_buffer - Buffer that holds commands from the hanging context
+ * bad_rb_size - Number of valid dwords in bad_rb_buffer
+ * @last_valid_ctx_id - The last context from which commands were placed in
+ * ringbuffer before the GPU hung
+ */
+struct adreno_recovery_data {
+ unsigned int ib1;
+ unsigned int context_id;
+ unsigned int global_eop;
+ unsigned int *rb_buffer;
+ unsigned int rb_size;
+ unsigned int *bad_rb_buffer;
+ unsigned int bad_rb_size;
+ unsigned int last_valid_ctx_id;
+};
+
extern struct adreno_gpudev adreno_a2xx_gpudev;
extern struct adreno_gpudev adreno_a3xx_gpudev;
@@ -266,7 +289,6 @@
{
unsigned int *start = cmds;
- cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
*cmds++ = cp_type0_packet(MH_MMU_MPU_END, 1);
*cmds++ = new_phys_limit;
cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
@@ -279,7 +301,6 @@
{
unsigned int *start = cmds;
- cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
*cmds++ = cp_type0_packet(REG_CP_STATE_DEBUG_INDEX, 1);
*cmds++ = (cur_ctx_bank ? 0 : 0x20);
cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index 152fc76..bb89067 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -1236,12 +1236,13 @@
*cmds++ = CP_REG(A3XX_HLSQ_CONTROL_0_REG);
/* HLSQ_CONTROL_0_REG */
*cmds++ = _SET(HLSQ_CTRL0REG_FSTHREADSIZE, HLSQ_FOUR_PIX_QUADS) |
+ _SET(HLSQ_CTRL0REG_FSSUPERTHREADENABLE, 1) |
_SET(HLSQ_CTRL0REG_SPSHADERRESTART, 1) |
_SET(HLSQ_CTRL0REG_CHUNKDISABLE, 1) |
- _SET(HLSQ_CTRL0REG_SPCONSTFULLUPDATE, 1) |
- _SET(HLSQ_CTRL0REG_TPFULLUPDATE, 1);
+ _SET(HLSQ_CTRL0REG_SPCONSTFULLUPDATE, 1);
/* HLSQ_CONTROL_1_REG */
- *cmds++ = _SET(HLSQ_CTRL1REG_VSTHREADSIZE, HLSQ_TWO_VTX_QUADS);
+ *cmds++ = _SET(HLSQ_CTRL1REG_VSTHREADSIZE, HLSQ_TWO_VTX_QUADS) |
+ _SET(HLSQ_CTRL1REG_VSSUPERTHREADENABLE, 1);
/* HLSQ_CONTROL_2_REG */
*cmds++ = _SET(HLSQ_CTRL2REG_PRIMALLOCTHRESHOLD, 31);
/* HLSQ_CONTROL3_REG */
@@ -1268,6 +1269,9 @@
*cmds++ = 0x00000240;
*cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_VFD_PERFCOUNTER0_SELECT, 1);
+ *cmds++ = 0x00000000;
+
/* Texture memobjs */
*cmds++ = cp_type3_packet(CP_LOAD_STATE, 6);
*cmds++ = (16 << CP_LOADSTATE_DSTOFFSET_SHIFT)
@@ -1281,6 +1285,9 @@
*cmds++ = (shadow->pitch*4*8) << 9;
*cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_VFD_PERFCOUNTER0_SELECT, 1);
+ *cmds++ = 0x00000000;
+
/* Mipmap bases */
*cmds++ = cp_type3_packet(CP_LOAD_STATE, 16);
*cmds++ = (224 << CP_LOADSTATE_DSTOFFSET_SHIFT)
@@ -1304,6 +1311,9 @@
*cmds++ = 0x00000000;
*cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_VFD_PERFCOUNTER0_SELECT, 1);
+ *cmds++ = 0x00000000;
+
*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
*cmds++ = CP_REG(A3XX_HLSQ_VS_CONTROL_REG);
/* HLSQ_VS_CONTROL_REG */
@@ -1377,9 +1387,11 @@
*cmds++ = _SET(SP_FSCTRLREG0_FSTHREADMODE, SP_MULTI) |
_SET(SP_FSCTRLREG0_FSINSTRBUFFERMODE, SP_BUFFER_MODE) |
_SET(SP_FSCTRLREG0_FSICACHEINVALID, 1) |
- _SET(SP_FSCTRLREG0_FSFULLREGFOOTPRINT, 2) |
+ _SET(SP_FSCTRLREG0_FSHALFREGFOOTPRINT, 1) |
+ _SET(SP_FSCTRLREG0_FSFULLREGFOOTPRINT, 1) |
_SET(SP_FSCTRLREG0_FSINOUTREGOVERLAP, 1) |
_SET(SP_FSCTRLREG0_FSTHREADSIZE, SP_FOUR_PIX_QUADS) |
+ _SET(SP_FSCTRLREG0_FSSUPERTHREADMODE, 1) |
_SET(SP_FSCTRLREG0_PIXLODENABLE, 1) |
_SET(SP_FSCTRLREG0_FSLENGTH, 2);
/* SP_FS_CTRL_REG1 */
@@ -1388,7 +1400,7 @@
_SET(SP_FSCTRLREG1_HALFPRECVAROFFSET, 63);
/* SP_FS_OBJ_OFFSET_REG */
*cmds++ = _SET(SP_OBJOFFSETREG_CONSTOBJECTSTARTOFFSET, 128) |
- _SET(SP_OBJOFFSETREG_SHADEROBJOFFSETINIC, 1);
+ _SET(SP_OBJOFFSETREG_SHADEROBJOFFSETINIC, 126);
/* SP_FS_OBJ_START_REG */
*cmds++ = 0x00000000;
@@ -1407,7 +1419,7 @@
*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
*cmds++ = CP_REG(A3XX_SP_FS_MRT_REG_0);
/* SP_FS_MRT_REG0 */
- *cmds++ = _SET(SP_FSMRTREG_REGID, 4);
+ *cmds++ = _SET(SP_FSMRTREG_PRECISION, 1);
/* SP_FS_MRT_REG1 */
*cmds++ = 0;
/* SP_FS_MRT_REG2 */
@@ -1504,7 +1516,8 @@
*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
*cmds++ = CP_REG(A3XX_SP_SP_CTRL_REG);
/* SP_SP_CTRL_REG */
- *cmds++ = _SET(SP_SPCTRLREG_SLEEPMODE, 1);
+ *cmds++ = _SET(SP_SPCTRLREG_SLEEPMODE, 1) |
+ _SET(SP_SPCTRLREG_LOMODE, 1);
/* Load vertex shader */
*cmds++ = cp_type3_packet(CP_LOAD_STATE, 10);
@@ -1515,7 +1528,7 @@
*cmds++ = (HLSQ_SP_VS_INSTR << CP_LOADSTATE_STATETYPE_SHIFT)
| (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT);
/* (sy)end; */
- *cmds++ = 0x00000000; *cmds++ = 0x13000000;
+ *cmds++ = 0x00000000; *cmds++ = 0x13001000;
/* nop; */
*cmds++ = 0x00000000; *cmds++ = 0x00000000;
/* nop; */
@@ -1523,6 +1536,13 @@
/* nop; */
*cmds++ = 0x00000000; *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_VFD_PERFCOUNTER0_SELECT, 1);
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+
+
/* Load fragment shader */
*cmds++ = cp_type3_packet(CP_LOAD_STATE, 18);
*cmds++ = (0 << CP_LOADSTATE_DSTOFFSET_SHIFT)
@@ -1532,21 +1552,27 @@
*cmds++ = (HLSQ_SP_FS_INSTR << CP_LOADSTATE_STATETYPE_SHIFT)
| (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT);
/* (sy)(rpt1)bary.f (ei)r0.z, (r)0, r0.x; */
- *cmds++ = 0x00002000; *cmds++ = 0x57368902;
+ *cmds++ = 0x00002000; *cmds++ = 0x57309902;
/* (rpt5)nop; */
*cmds++ = 0x00000000; *cmds++ = 0x00000500;
/* sam (f32)r0.xyzw, r0.z, s#0, t#0; */
*cmds++ = 0x00000005; *cmds++ = 0xa0c01f00;
/* (sy)mov.f32f32 r1.x, r0.x; */
- *cmds++ = 0x00000000; *cmds++ = 0x30044004;
+ *cmds++ = 0x00000000; *cmds++ = 0x30040b00;
/* mov.f32f32 r1.y, r0.y; */
- *cmds++ = 0x00000001; *cmds++ = 0x20044005;
- /* mov.f32f32 r1.z, r0.z; */
- *cmds++ = 0x00000002; *cmds++ = 0x20044006;
- /* mov.f32f32 r1.w, r0.w; */
- *cmds++ = 0x00000003; *cmds++ = 0x20044007;
- /* end; */
*cmds++ = 0x00000000; *cmds++ = 0x03000000;
+ /* mov.f32f32 r1.z, r0.z; */
+ *cmds++ = 0x00000000; *cmds++ = 0x00000000;
+ /* mov.f32f32 r1.w, r0.w; */
+ *cmds++ = 0x00000000; *cmds++ = 0x00000000;
+ /* end; */
+ *cmds++ = 0x00000000; *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type0_packet(A3XX_VFD_PERFCOUNTER0_SELECT, 1);
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
*cmds++ = CP_REG(A3XX_VFD_CONTROL_0);
@@ -1599,16 +1625,16 @@
*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
*cmds++ = CP_REG(A3XX_RB_DEPTH_CONTROL);
/* RB_DEPTH_CONTROL */
- *cmds++ = _SET(RB_DEPTHCONTROL_Z_TEST_FUNC, RB_FRAG_NEVER);
+ *cmds++ = _SET(RB_DEPTHCONTROL_Z_TEST_FUNC, RB_FRAG_LESS);
*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
*cmds++ = CP_REG(A3XX_RB_STENCIL_CONTROL);
/* RB_STENCIL_CONTROL */
- *cmds++ = _SET(RB_STENCILCONTROL_STENCIL_FUNC, RB_REF_NEVER) |
+ *cmds++ = _SET(RB_STENCILCONTROL_STENCIL_FUNC, RB_REF_ALWAYS) |
_SET(RB_STENCILCONTROL_STENCIL_FAIL, RB_STENCIL_KEEP) |
_SET(RB_STENCILCONTROL_STENCIL_ZPASS, RB_STENCIL_KEEP) |
_SET(RB_STENCILCONTROL_STENCIL_ZFAIL, RB_STENCIL_KEEP) |
- _SET(RB_STENCILCONTROL_STENCIL_FUNC_BF, RB_REF_NEVER) |
+ _SET(RB_STENCILCONTROL_STENCIL_FUNC_BF, RB_REF_ALWAYS) |
_SET(RB_STENCILCONTROL_STENCIL_FAIL_BF, RB_STENCIL_KEEP) |
_SET(RB_STENCILCONTROL_STENCIL_ZPASS_BF, RB_STENCIL_KEEP) |
_SET(RB_STENCILCONTROL_STENCIL_ZFAIL_BF, RB_STENCIL_KEEP);
@@ -1634,9 +1660,8 @@
*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
*cmds++ = CP_REG(A3XX_RB_MRT_CONTROL0);
/* RB_MRT_CONTROL0 */
- *cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) |
- _SET(RB_MRTCONTROL_ROP_CODE, 12) |
- _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_ALWAYS) |
+ *cmds++ = _SET(RB_MRTCONTROL_ROP_CODE, 12) |
+ _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_DISABLE) |
_SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF);
*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
@@ -1651,7 +1676,8 @@
_SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1);
/* RB_MRT_CONTROL1 */
*cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) |
- _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_DISABLE) |
+ _SET(RB_MRTCONTROL_ROP_CODE, 12) |
+ _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_ALWAYS) |
_SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF);
*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
@@ -1666,7 +1692,8 @@
_SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1);
/* RB_MRT_CONTROL2 */
*cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) |
- _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_DISABLE) |
+ _SET(RB_MRTCONTROL_ROP_CODE, 12) |
+ _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_ALWAYS) |
_SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF);
*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
@@ -1681,7 +1708,8 @@
_SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1);
/* RB_MRT_CONTROL3 */
*cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) |
- _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_DISABLE) |
+ _SET(RB_MRTCONTROL_ROP_CODE, 12) |
+ _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_ALWAYS) |
_SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF);
*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
@@ -1700,7 +1728,7 @@
/* VFD_INDEX_MIN */
*cmds++ = 0x00000000;
/* VFD_INDEX_MAX */
- *cmds++ = 0xFFFFFFFF;
+ *cmds++ = 340;
/* VFD_INDEX_OFFSET */
*cmds++ = 0x00000000;
/* TPL1_TP_VS_TEX_OFFSET */
@@ -1709,7 +1737,7 @@
*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
*cmds++ = CP_REG(A3XX_VFD_VS_THREADING_THRESHOLD);
/* VFD_VS_THREADING_THRESHOLD */
- *cmds++ = _SET(VFD_THREADINGTHRESHOLD_RESERVED6, 12) |
+ *cmds++ = _SET(VFD_THREADINGTHRESHOLD_REGID_THRESHOLD, 15) |
_SET(VFD_THREADINGTHRESHOLD_REGID_VTXCNT, 252);
*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
@@ -1734,7 +1762,7 @@
*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
*cmds++ = CP_REG(A3XX_GRAS_SU_MODE_CONTROL);
/* GRAS_SU_MODE_CONTROL */
- *cmds++ = 0x00000000;
+ *cmds++ = _SET(GRAS_SU_CTRLMODE_LINEHALFWIDTH, 2);
*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
*cmds++ = CP_REG(A3XX_GRAS_SC_WINDOW_SCISSOR_TL);
@@ -1790,6 +1818,46 @@
PC_DRAW_TRIANGLES) |
_SET(PC_PRIM_VTX_CONTROL_PROVOKING_VTX_LAST, 1);
+
+ /* oxili_generate_context_roll_packets */
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_CTRL_REG0, 1);
+ *cmds++ = 0x00000400;
+
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_CTRL_REG0, 1);
+ *cmds++ = 0x00000400;
+
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_PVT_MEM_SIZE_REG, 1);
+ *cmds++ = 0x00008000; /* SP_VS_MEM_SIZE_REG */
+
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_PVT_MEM_SIZE_REG, 1);
+ *cmds++ = 0x00008000; /* SP_FS_MEM_SIZE_REG */
+
+ /* Clear cache invalidate bit when re-loading the shader control regs */
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_CTRL_REG0, 1);
+ *cmds++ = _SET(SP_VSCTRLREG0_VSTHREADMODE, SP_MULTI) |
+ _SET(SP_VSCTRLREG0_VSINSTRBUFFERMODE, SP_BUFFER_MODE) |
+ _SET(SP_VSCTRLREG0_VSFULLREGFOOTPRINT, 2) |
+ _SET(SP_VSCTRLREG0_VSTHREADSIZE, SP_TWO_VTX_QUADS) |
+ _SET(SP_VSCTRLREG0_VSLENGTH, 1);
+
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_CTRL_REG0, 1);
+ *cmds++ = _SET(SP_FSCTRLREG0_FSTHREADMODE, SP_MULTI) |
+ _SET(SP_FSCTRLREG0_FSINSTRBUFFERMODE, SP_BUFFER_MODE) |
+ _SET(SP_FSCTRLREG0_FSHALFREGFOOTPRINT, 1) |
+ _SET(SP_FSCTRLREG0_FSFULLREGFOOTPRINT, 1) |
+ _SET(SP_FSCTRLREG0_FSINOUTREGOVERLAP, 1) |
+ _SET(SP_FSCTRLREG0_FSTHREADSIZE, SP_FOUR_PIX_QUADS) |
+ _SET(SP_FSCTRLREG0_FSSUPERTHREADMODE, 1) |
+ _SET(SP_FSCTRLREG0_FSLENGTH, 2);
+
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_PVT_MEM_SIZE_REG, 1);
+ *cmds++ = 0x00000000; /* SP_VS_MEM_SIZE_REG */
+
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_PVT_MEM_SIZE_REG, 1);
+ *cmds++ = 0x00000000; /* SP_FS_MEM_SIZE_REG */
+
+ /* end oxili_generate_context_roll_packets */
+
*cmds++ = cp_type3_packet(CP_DRAW_INDX, 3);
*cmds++ = 0x00000000; /* Viz query info */
*cmds++ = BUILD_PC_DRAW_INITIATOR(PC_DI_PT_RECTLIST,
@@ -1804,6 +1872,7 @@
return cmds;
}
+
static void build_regrestore_cmds(struct adreno_device *adreno_dev,
struct adreno_context *drawctxt)
{
diff --git a/drivers/gpu/msm/adreno_drawctxt.h b/drivers/gpu/msm/adreno_drawctxt.h
index 3eb1aba..5b14a69 100644
--- a/drivers/gpu/msm/adreno_drawctxt.h
+++ b/drivers/gpu/msm/adreno_drawctxt.h
@@ -44,6 +44,8 @@
#define CTXT_FLAGS_TRASHSTATE 0x00020000
/* per context timestamps enabled */
#define CTXT_FLAGS_PER_CONTEXT_TS 0x00040000
+/* Context has caused a GPU hang and recovered properly */
+#define CTXT_FLAGS_GPU_HANG_RECOVERED 0x00008000
struct kgsl_device;
struct adreno_device;
diff --git a/drivers/gpu/msm/adreno_postmortem.c b/drivers/gpu/msm/adreno_postmortem.c
index 7bb65ca..3cc4bcf 100644
--- a/drivers/gpu/msm/adreno_postmortem.c
+++ b/drivers/gpu/msm/adreno_postmortem.c
@@ -699,6 +699,10 @@
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct kgsl_memdesc **reg_map;
+ void *reg_map_array;
+ int num_iommu_units = 0;
+
mb();
if (adreno_is_a2xx(adreno_dev))
@@ -780,6 +784,10 @@
/* extract the latest ib commands from the buffer */
ib_list.count = 0;
i = 0;
+ /* get the register mapped array in case we are using IOMMU */
+ num_iommu_units = kgsl_mmu_get_reg_map_desc(&device->mmu,
+ ®_map_array);
+ reg_map = reg_map_array;
for (read_idx = 0; read_idx < num_item; ) {
uint32_t this_cmd = rb_copy[read_idx++];
if (adreno_cmd_is_ib(this_cmd)) {
@@ -792,7 +800,10 @@
ib_list.offsets[i],
ib_list.bases[i],
ib_list.sizes[i], 0);
- } else if (this_cmd == cp_type0_packet(MH_MMU_PT_BASE, 1)) {
+ } else if (this_cmd == cp_type0_packet(MH_MMU_PT_BASE, 1) ||
+ (num_iommu_units && this_cmd == (reg_map[0]->gpuaddr +
+ (KGSL_IOMMU_CONTEXT_USER << KGSL_IOMMU_CTX_SHIFT) +
+ KGSL_IOMMU_TTBR0))) {
KGSL_LOG_DUMP(device, "Current pagetable: %x\t"
"pagetable base: %x\n",
@@ -808,6 +819,8 @@
cur_pt_base);
}
}
+ if (num_iommu_units)
+ kfree(reg_map_array);
/* Restore cur_pt_base back to the pt_base of
the process in whose context the GPU hung */
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index afcceee..d54ce6b 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -493,7 +493,8 @@
*/
total_sizedwords += flags & KGSL_CMD_FLAGS_PMODE ? 4 : 0;
total_sizedwords += !(flags & KGSL_CMD_FLAGS_NO_TS_CMP) ? 7 : 0;
- total_sizedwords += !(flags & KGSL_CMD_FLAGS_NOT_KERNEL_CMD) ? 2 : 0;
+ /* 2 dwords to store the start of command sequence */
+ total_sizedwords += 2;
if (adreno_is_a3xx(adreno_dev))
total_sizedwords += 7;
@@ -521,10 +522,9 @@
rcmd_gpu = rb->buffer_desc.gpuaddr
+ sizeof(uint)*(rb->wptr-total_sizedwords);
- if (!(flags & KGSL_CMD_FLAGS_NOT_KERNEL_CMD)) {
- GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_nop_packet(1));
- GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_CMD_IDENTIFIER);
- }
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_nop_packet(1));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_CMD_IDENTIFIER);
+
if (flags & KGSL_CMD_FLAGS_PMODE) {
/* disable protected mode error checking */
GSL_RB_WRITE(ringcmds, rcmd_gpu,
@@ -926,8 +926,7 @@
adreno_drawctxt_switch(adreno_dev, drawctxt, flags);
*timestamp = adreno_ringbuffer_addcmds(&adreno_dev->ringbuffer,
- drawctxt,
- KGSL_CMD_FLAGS_NOT_KERNEL_CMD,
+ drawctxt, 0,
&link[0], (cmds - link));
KGSL_CMD_INFO(device, "ctxt %d g %08x numibs %d ts %d\n",
@@ -943,187 +942,347 @@
*/
adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
#endif
+ /* If context hung and recovered then return error so that the
+ * application may handle it */
+ if (drawctxt->flags & CTXT_FLAGS_GPU_HANG_RECOVERED)
+ return -EDEADLK;
+ else
+ return 0;
- return 0;
}
-int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
- unsigned int *temp_rb_buffer,
- int *rb_size)
+static int _find_start_of_cmd_seq(struct adreno_ringbuffer *rb,
+ unsigned int *ptr,
+ bool inc)
{
- struct kgsl_device *device = rb->device;
- unsigned int rb_rptr;
- unsigned int retired_timestamp;
- unsigned int temp_idx = 0;
- unsigned int value;
+ int status = -EINVAL;
unsigned int val1;
- unsigned int val2;
- unsigned int val3;
- unsigned int copy_rb_contents = 0;
- struct kgsl_context *context;
- unsigned int context_id;
+ unsigned int size = rb->buffer_desc.size;
+ unsigned int start_ptr = *ptr;
- GSL_RB_GET_READPTR(rb, &rb->rptr);
-
- /* current_context is the context that is presently active in the
- * GPU, i.e the context in which the hang is caused */
- kgsl_sharedmem_readl(&device->memstore, &context_id,
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- current_context));
- KGSL_DRV_ERR(device, "Last context id: %d\n", context_id);
- context = idr_find(&device->context_idr, context_id);
- if (context == NULL) {
- KGSL_DRV_ERR(device,
- "GPU recovery from hang not possible because last"
- " context id is invalid.\n");
- return -EINVAL;
- }
- retired_timestamp = kgsl_readtimestamp(device, context,
- KGSL_TIMESTAMP_RETIRED);
- KGSL_DRV_ERR(device, "GPU successfully executed till ts: %x\n",
- retired_timestamp);
- /*
- * We need to go back in history by 4 dwords from the current location
- * of read pointer as 4 dwords are read to match the end of a command.
- * Also, take care of wrap around when moving back
- */
- if (rb->rptr >= 4)
- rb_rptr = (rb->rptr - 4) * sizeof(unsigned int);
- else
- rb_rptr = rb->buffer_desc.size -
- ((4 - rb->rptr) * sizeof(unsigned int));
- /* Read the rb contents going backwards to locate end of last
- * sucessfully executed command */
- while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) {
- kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
- if (value == retired_timestamp) {
- rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
- rb->buffer_desc.size);
- kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
- rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
- rb->buffer_desc.size);
- kgsl_sharedmem_readl(&rb->buffer_desc, &val2, rb_rptr);
- rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
- rb->buffer_desc.size);
- kgsl_sharedmem_readl(&rb->buffer_desc, &val3, rb_rptr);
- /* match the pattern found at the end of a command */
- if ((val1 == 2 &&
- val2 == cp_type3_packet(CP_INTERRUPT, 1)
- && val3 == CP_INT_CNTL__RB_INT_MASK) ||
- (val1 == cp_type3_packet(CP_EVENT_WRITE, 3)
- && val2 == CACHE_FLUSH_TS &&
- val3 == (rb->device->memstore.gpuaddr +
- KGSL_MEMSTORE_OFFSET(context_id,
- eoptimestamp)))) {
- rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
- rb->buffer_desc.size);
- KGSL_DRV_ERR(device,
- "Found end of last executed "
- "command at offset: %x\n",
- rb_rptr / sizeof(unsigned int));
+ while ((start_ptr / sizeof(unsigned int)) != rb->wptr) {
+ if (inc)
+ start_ptr = adreno_ringbuffer_inc_wrapped(start_ptr,
+ size);
+ else
+ start_ptr = adreno_ringbuffer_dec_wrapped(start_ptr,
+ size);
+ kgsl_sharedmem_readl(&rb->buffer_desc, &val1, start_ptr);
+ if (KGSL_CMD_IDENTIFIER == val1) {
+ if ((start_ptr / sizeof(unsigned int)) != rb->wptr)
+ start_ptr = adreno_ringbuffer_dec_wrapped(
+ start_ptr, size);
+ *ptr = start_ptr;
+ status = 0;
break;
- } else {
- if (rb_rptr < (3 * sizeof(unsigned int)))
- rb_rptr = rb->buffer_desc.size -
- (3 * sizeof(unsigned int))
- + rb_rptr;
- else
- rb_rptr -= (3 * sizeof(unsigned int));
+ }
+ }
+ return status;
+}
+
+static int _find_cmd_seq_after_eop_ts(struct adreno_ringbuffer *rb,
+ unsigned int *rb_rptr,
+ unsigned int global_eop,
+ bool inc)
+{
+ int status = -EINVAL;
+ unsigned int temp_rb_rptr = *rb_rptr;
+ unsigned int size = rb->buffer_desc.size;
+ unsigned int val[3];
+ int i = 0;
+ bool check = false;
+
+ if (inc && temp_rb_rptr / sizeof(unsigned int) != rb->wptr)
+ return status;
+
+ do {
+ /* when decrementing we need to decrement first and
+ * then read make sure we cover all the data */
+ if (!inc)
+ temp_rb_rptr = adreno_ringbuffer_dec_wrapped(
+ temp_rb_rptr, size);
+ kgsl_sharedmem_readl(&rb->buffer_desc, &val[i],
+ temp_rb_rptr);
+
+ if (check && ((inc && val[i] == global_eop) ||
+ (!inc && (val[i] ==
+ cp_type3_packet(CP_MEM_WRITE, 2) ||
+ val[i] == CACHE_FLUSH_TS)))) {
+ /* decrement i, i.e i = (i - 1 + 3) % 3 if
+ * we are going forward, else increment i */
+ i = (i + 2) % 3;
+ if (val[i] == rb->device->memstore.gpuaddr +
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+ eoptimestamp)) {
+ int j = ((i + 2) % 3);
+ if ((inc && (val[j] == CACHE_FLUSH_TS ||
+ val[j] == cp_type3_packet(
+ CP_MEM_WRITE, 2))) ||
+ (!inc && val[j] == global_eop)) {
+ /* Found the global eop */
+ status = 0;
+ break;
+ }
}
+ /* if no match found then increment i again
+ * since we decremented before matching */
+ i = (i + 1) % 3;
+ }
+ if (inc)
+ temp_rb_rptr = adreno_ringbuffer_inc_wrapped(
+ temp_rb_rptr, size);
+
+ i = (i + 1) % 3;
+ if (2 == i)
+ check = true;
+ } while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr);
+ /* temp_rb_rptr points to the command stream after global eop,
+ * move backward till the start of command sequence */
+ if (!status) {
+ status = _find_start_of_cmd_seq(rb, &temp_rb_rptr, false);
+ if (!status) {
+ *rb_rptr = temp_rb_rptr;
+ KGSL_DRV_ERR(rb->device,
+ "Offset of cmd sequence after eop timestamp: 0x%x\n",
+ temp_rb_rptr / sizeof(unsigned int));
+ }
+ }
+ if (status)
+ KGSL_DRV_ERR(rb->device,
+ "Failed to find the command sequence after eop timestamp\n");
+ return status;
+}
+
+static int _find_hanging_ib_sequence(struct adreno_ringbuffer *rb,
+ unsigned int *rb_rptr,
+ unsigned int ib1)
+{
+ int status = -EINVAL;
+ unsigned int temp_rb_rptr = *rb_rptr;
+ unsigned int size = rb->buffer_desc.size;
+ unsigned int val[2];
+ int i = 0;
+ bool check = false;
+ bool ctx_switch = false;
+
+ while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr) {
+ kgsl_sharedmem_readl(&rb->buffer_desc, &val[i], temp_rb_rptr);
+
+ if (check && val[i] == ib1) {
+ /* decrement i, i.e i = (i - 1 + 2) % 2 */
+ i = (i + 1) % 2;
+ if (adreno_cmd_is_ib(val[i])) {
+ /* go till start of command sequence */
+ status = _find_start_of_cmd_seq(rb,
+ &temp_rb_rptr, false);
+ KGSL_DRV_ERR(rb->device,
+ "Found the hanging IB at offset 0x%x\n",
+ temp_rb_rptr / sizeof(unsigned int));
+ break;
+ }
+ /* if no match the increment i since we decremented
+ * before checking */
+ i = (i + 1) % 2;
+ }
+ /* Make sure you do not encounter a context switch twice, we can
+ * encounter it once for the bad context as the start of search
+ * can point to the context switch */
+ if (val[i] == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
+ if (ctx_switch) {
+ KGSL_DRV_ERR(rb->device,
+ "Context switch encountered before bad "
+ "IB found\n");
+ break;
+ }
+ ctx_switch = true;
+ }
+ i = (i + 1) % 2;
+ if (1 == i)
+ check = true;
+ temp_rb_rptr = adreno_ringbuffer_inc_wrapped(temp_rb_rptr,
+ size);
+ }
+ if (!status)
+ *rb_rptr = temp_rb_rptr;
+ return status;
+}
+
+static void _turn_preamble_on_for_ib_seq(struct adreno_ringbuffer *rb,
+ unsigned int rb_rptr)
+{
+ unsigned int temp_rb_rptr = rb_rptr;
+ unsigned int size = rb->buffer_desc.size;
+ unsigned int val[2];
+ int i = 0;
+ bool check = false;
+ bool cmd_start = false;
+
+ /* Go till the start of the ib sequence and turn on preamble */
+ while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr) {
+ kgsl_sharedmem_readl(&rb->buffer_desc, &val[i], temp_rb_rptr);
+ if (check && KGSL_START_OF_IB_IDENTIFIER == val[i]) {
+ /* decrement i */
+ i = (i + 1) % 2;
+ if (val[i] == cp_nop_packet(4)) {
+ temp_rb_rptr = adreno_ringbuffer_dec_wrapped(
+ temp_rb_rptr, size);
+ kgsl_sharedmem_writel(&rb->buffer_desc,
+ temp_rb_rptr, cp_nop_packet(1));
+ }
+ KGSL_DRV_ERR(rb->device,
+ "Turned preamble on at offset 0x%x\n",
+ temp_rb_rptr / 4);
+ break;
+ }
+ /* If you reach beginning of next command sequence then exit
+ * First command encountered is the current one so don't break
+ * on that. */
+ if (KGSL_CMD_IDENTIFIER == val[i]) {
+ if (cmd_start)
+ break;
+ cmd_start = true;
}
- if (rb_rptr == 0)
- rb_rptr = rb->buffer_desc.size - sizeof(unsigned int);
- else
- rb_rptr -= sizeof(unsigned int);
+ i = (i + 1) % 2;
+ if (1 == i)
+ check = true;
+ temp_rb_rptr = adreno_ringbuffer_inc_wrapped(temp_rb_rptr,
+ size);
}
+}
- if ((rb_rptr / sizeof(unsigned int)) == rb->wptr) {
- KGSL_DRV_ERR(device,
- "GPU recovery from hang not possible because last"
- " successful timestamp is overwritten\n");
- return -EINVAL;
- }
- /* rb_rptr is now pointing to the first dword of the command following
- * the last sucessfully executed command sequence. Assumption is that
- * GPU is hung in the command sequence pointed by rb_rptr */
- /* make sure the GPU is not hung in a command submitted by kgsl
- * itself */
- kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
- kgsl_sharedmem_readl(&rb->buffer_desc, &val2,
- adreno_ringbuffer_inc_wrapped(rb_rptr,
- rb->buffer_desc.size));
- if (val1 == cp_nop_packet(1) && val2 == KGSL_CMD_IDENTIFIER) {
- KGSL_DRV_ERR(device,
- "GPU recovery from hang not possible because "
- "of hang in kgsl command\n");
- return -EINVAL;
- }
+static void _copy_valid_rb_content(struct adreno_ringbuffer *rb,
+ unsigned int rb_rptr, unsigned int *temp_rb_buffer,
+ int *rb_size, unsigned int *bad_rb_buffer,
+ int *bad_rb_size,
+ int *last_valid_ctx_id)
+{
+ unsigned int good_rb_idx = 0, cmd_start_idx = 0;
+ unsigned int val1 = 0;
+ struct kgsl_context *k_ctxt;
+ struct adreno_context *a_ctxt;
+ unsigned int bad_rb_idx = 0;
+ int copy_rb_contents = 0;
+ unsigned int temp_rb_rptr;
+ unsigned int size = rb->buffer_desc.size;
+ unsigned int good_cmd_start_idx = 0;
+ /* Walk the rb from the context switch. Omit any commands
+ * for an invalid context. */
while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) {
- kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
- rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
- rb->buffer_desc.size);
+ kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
+
+ if (KGSL_CMD_IDENTIFIER == val1) {
+ /* Start is the NOP dword that comes before
+ * KGSL_CMD_IDENTIFIER */
+ cmd_start_idx = bad_rb_idx - 1;
+ if (copy_rb_contents)
+ good_cmd_start_idx = good_rb_idx - 1;
+ }
+
/* check for context switch indicator */
- if (value == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
- kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
- rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
- rb->buffer_desc.size);
- BUG_ON(value != cp_type3_packet(CP_MEM_WRITE, 2));
- kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
- rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
- rb->buffer_desc.size);
- BUG_ON(val1 != (device->memstore.gpuaddr +
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- current_context)));
- kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
- rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
- rb->buffer_desc.size);
+ if (val1 == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
+ unsigned int temp_idx, val2;
+ /* increment by 3 to get to the context_id */
+ temp_rb_rptr = rb_rptr + (3 * sizeof(unsigned int)) %
+ size;
+ kgsl_sharedmem_readl(&rb->buffer_desc, &val2,
+ temp_rb_rptr);
- /*
- * If other context switches were already lost and
- * and the current context is the one that is hanging,
- * then we cannot recover. Print an error message
- * and leave.
- */
-
- if ((copy_rb_contents == 0) && (value == context_id)) {
- KGSL_DRV_ERR(device, "GPU recovery could not "
- "find the previous context\n");
- return -EINVAL;
- }
-
- /*
- * If we were copying the commands and got to this point
- * then we need to remove the 3 commands that appear
- * before KGSL_CONTEXT_TO_MEM_IDENTIFIER
- */
- if (temp_idx)
- temp_idx -= 3;
/* if context switches to a context that did not cause
* hang then start saving the rb contents as those
* commands can be executed */
- if (value != context_id) {
+ k_ctxt = idr_find(&rb->device->context_idr, val2);
+ if (k_ctxt) {
+ a_ctxt = k_ctxt->devctxt;
+
+ /* If we are changing to a good context and were not
+ * copying commands then copy over commands to the good
+ * context */
+ if (!copy_rb_contents && ((k_ctxt &&
+ !(a_ctxt->flags & CTXT_FLAGS_GPU_HANG)) ||
+ !k_ctxt)) {
+ for (temp_idx = cmd_start_idx;
+ temp_idx < bad_rb_idx;
+ temp_idx++)
+ temp_rb_buffer[good_rb_idx++] =
+ bad_rb_buffer[temp_idx];
+ *last_valid_ctx_id = val2;
copy_rb_contents = 1;
- temp_rb_buffer[temp_idx++] = cp_nop_packet(1);
- temp_rb_buffer[temp_idx++] =
- KGSL_CMD_IDENTIFIER;
- temp_rb_buffer[temp_idx++] = cp_nop_packet(1);
- temp_rb_buffer[temp_idx++] =
- KGSL_CONTEXT_TO_MEM_IDENTIFIER;
- temp_rb_buffer[temp_idx++] =
- cp_type3_packet(CP_MEM_WRITE, 2);
- temp_rb_buffer[temp_idx++] = val1;
- temp_rb_buffer[temp_idx++] = value;
- } else {
+ } else if (copy_rb_contents && k_ctxt &&
+ (a_ctxt->flags & CTXT_FLAGS_GPU_HANG)) {
+ /* If we are changing to bad context then remove
+ * the dwords we copied for this sequence from
+ * the good buffer */
+ good_rb_idx = good_cmd_start_idx;
copy_rb_contents = 0;
}
- } else if (copy_rb_contents)
- temp_rb_buffer[temp_idx++] = value;
+ }
+ }
+
+ if (copy_rb_contents)
+ temp_rb_buffer[good_rb_idx++] = val1;
+ /* Copy both good and bad commands for replay to the bad
+ * buffer */
+ bad_rb_buffer[bad_rb_idx++] = val1;
+
+ rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr, size);
+ }
+ *rb_size = good_rb_idx;
+ *bad_rb_size = bad_rb_idx;
+}
+
+int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
+ struct adreno_recovery_data *rec_data)
+{
+ int status;
+ struct kgsl_device *device = rb->device;
+ unsigned int rb_rptr = rb->wptr * sizeof(unsigned int);
+ struct kgsl_context *context;
+ struct adreno_context *adreno_context;
+
+ context = idr_find(&device->context_idr, rec_data->context_id);
+
+ /* Look for the command stream that is right after the global eop */
+ status = _find_cmd_seq_after_eop_ts(rb, &rb_rptr,
+ rec_data->global_eop + 1, false);
+ if (status)
+ goto done;
+
+ if (context) {
+ adreno_context = context->devctxt;
+
+ if (adreno_context->flags & CTXT_FLAGS_PREAMBLE) {
+ if (rec_data->ib1) {
+ status = _find_hanging_ib_sequence(rb, &rb_rptr,
+ rec_data->ib1);
+ if (status)
+ goto copy_rb_contents;
+ }
+ _turn_preamble_on_for_ib_seq(rb, rb_rptr);
+ } else {
+ status = -EINVAL;
+ }
}
- *rb_size = temp_idx;
- return 0;
+copy_rb_contents:
+ _copy_valid_rb_content(rb, rb_rptr, rec_data->rb_buffer,
+ &rec_data->rb_size,
+ rec_data->bad_rb_buffer,
+ &rec_data->bad_rb_size,
+ &rec_data->last_valid_ctx_id);
+ /* If we failed to get the hanging IB sequence then we cannot execute
+ * commands from the bad context or preambles not supported */
+ if (status) {
+ rec_data->bad_rb_size = 0;
+ status = 0;
+ }
+ /* If there is no context then that means there are no commands for
+ * good case */
+ if (!context)
+ rec_data->rb_size = 0;
+done:
+ return status;
}
void
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
index 6429f46..4cc57c2 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.h
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -27,6 +27,7 @@
struct kgsl_device;
struct kgsl_device_private;
+struct adreno_recovery_data;
#define GSL_RB_MEMPTRS_SCRATCH_COUNT 8
struct kgsl_rbmemptrs {
@@ -114,8 +115,7 @@
void kgsl_cp_intrcallback(struct kgsl_device *device);
int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
- unsigned int *temp_rb_buffer,
- int *rb_size);
+ struct adreno_recovery_data *rec_data);
void
adreno_ringbuffer_restore(struct adreno_ringbuffer *rb, unsigned int *rb_buff,
@@ -139,4 +139,11 @@
return (val + sizeof(unsigned int)) % size;
}
+/* Decrement a value by 4 bytes with wrap-around based on size */
+static inline unsigned int adreno_ringbuffer_dec_wrapped(unsigned int val,
+ unsigned int size)
+{
+ return (val + size - sizeof(unsigned int)) % size;
+}
+
#endif /* __ADRENO_RINGBUFFER_H */
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 6df073a..fbf3bb4 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -121,7 +121,7 @@
return count;
mutex_lock(&device->mutex);
- for (i = 0; i < pwr->num_pwrlevels; i++) {
+ for (i = 0; i < pwr->num_pwrlevels - 1; i++) {
if (abs(pwr->pwrlevels[i].gpu_freq - val) < delta) {
if (max)
pwr->thermal_pwrlevel = i;
@@ -129,7 +129,7 @@
}
}
- if (i == pwr->num_pwrlevels)
+ if (i == (pwr->num_pwrlevels - 1))
goto done;
/*
diff --git a/drivers/iommu/msm_iommu-v2.c b/drivers/iommu/msm_iommu-v2.c
index 6e62e60..26e967d 100644
--- a/drivers/iommu/msm_iommu-v2.c
+++ b/drivers/iommu/msm_iommu-v2.c
@@ -128,7 +128,7 @@
return ret;
}
-static void __reset_iommu(void __iomem *base)
+static void __reset_iommu(void __iomem *base, int smt_size)
{
int i;
@@ -143,15 +143,15 @@
SET_SCR1(base, 0);
SET_SSDR_N(base, 0, 0);
- for (i = 0; i < MAX_NUM_SMR; i++)
+ for (i = 0; i < smt_size; i++)
SET_SMR_VALID(base, i, 0);
mb();
}
-static void __program_iommu(void __iomem *base)
+static void __program_iommu(void __iomem *base, int smt_size)
{
- __reset_iommu(base);
+ __reset_iommu(base, smt_size);
SET_CR0_SMCFCFG(base, 1);
SET_CR0_USFCFG(base, 1);
@@ -182,7 +182,7 @@
static void __program_context(void __iomem *base, int ctx, int ncb,
phys_addr_t pgtable, int redirect,
- u32 *sids, int len)
+ u32 *sids, int len, int smt_size)
{
unsigned int prrr, nmrr;
unsigned int pn;
@@ -227,10 +227,10 @@
/* Program the M2V tables for this context */
for (i = 0; i < len / sizeof(*sids); i++) {
- for (; num < MAX_NUM_SMR; num++)
+ for (; num < smt_size; num++)
if (GET_SMR_VALID(base, num) == 0)
break;
- BUG_ON(num >= MAX_NUM_SMR);
+ BUG_ON(num >= smt_size);
SET_SMR_VALID(base, num, 1);
SET_SMR_MASK(base, num, 0);
@@ -346,8 +346,7 @@
struct msm_iommu_drvdata *iommu_drvdata;
struct msm_iommu_ctx_drvdata *ctx_drvdata;
struct msm_iommu_ctx_drvdata *tmp_drvdata;
- u32 sids[MAX_NUM_SMR];
- int len = 0, ret;
+ int ret;
mutex_lock(&msm_iommu_lock);
@@ -375,14 +374,6 @@
goto fail;
}
- of_get_property(dev->of_node, "qcom,iommu-ctx-sids", &len);
- BUG_ON(len >= sizeof(sids));
- if (of_property_read_u32_array(dev->of_node, "qcom,iommu-ctx-sids",
- sids, len / sizeof(*sids))) {
- ret = -EINVAL;
- goto fail;
- }
-
ret = regulator_enable(iommu_drvdata->gdsc);
if (ret)
goto fail;
@@ -394,11 +385,12 @@
}
if (!msm_iommu_ctx_attached(dev->parent))
- __program_iommu(iommu_drvdata->base);
+ __program_iommu(iommu_drvdata->base, iommu_drvdata->nsmr);
__program_context(iommu_drvdata->base, ctx_drvdata->num,
iommu_drvdata->ncb, __pa(priv->pt.fl_table),
- priv->pt.redirect, sids, len);
+ priv->pt.redirect, ctx_drvdata->sids, ctx_drvdata->nsid,
+ iommu_drvdata->nsmr);
__disable_clocks(iommu_drvdata);
list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
diff --git a/drivers/iommu/msm_iommu_dev-v2.c b/drivers/iommu/msm_iommu_dev-v2.c
index 87e1a46..d3a088a 100644
--- a/drivers/iommu/msm_iommu_dev-v2.c
+++ b/drivers/iommu/msm_iommu_dev-v2.c
@@ -33,12 +33,25 @@
struct msm_iommu_drvdata *drvdata)
{
struct device_node *child;
- int ret;
+ int ret = 0;
+ u32 nsmr;
ret = device_move(&pdev->dev, &msm_iommu_root_dev->dev, DPM_ORDER_NONE);
if (ret)
- return ret;
+ goto fail;
+ ret = of_property_read_u32(pdev->dev.of_node, "qcom,iommu-smt-size",
+ &nsmr);
+ if (ret)
+ goto fail;
+
+ if (nsmr > MAX_NUM_SMR) {
+ pr_err("Invalid SMT size: %d\n", nsmr);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ drvdata->nsmr = nsmr;
for_each_child_of_node(pdev->dev.of_node, child) {
drvdata->ncb++;
if (!of_platform_device_create(child, NULL, &pdev->dev))
@@ -46,7 +59,8 @@
}
drvdata->name = dev_name(&pdev->dev);
- return 0;
+fail:
+ return ret;
}
static atomic_t msm_iommu_next_id = ATOMIC_INIT(-1);
@@ -149,6 +163,7 @@
{
struct resource *r, rp;
int irq, ret;
+ u32 nsid;
irq = platform_get_irq(pdev, 0);
if (irq > 0) {
@@ -181,6 +196,19 @@
&ctx_drvdata->name))
ctx_drvdata->name = dev_name(&pdev->dev);
+ if (!of_get_property(pdev->dev.of_node, "qcom,iommu-ctx-sids", &nsid))
+ return -EINVAL;
+
+ if (nsid >= sizeof(ctx_drvdata->sids))
+ return -EINVAL;
+
+ if (of_property_read_u32_array(pdev->dev.of_node, "qcom,iommu-ctx-sids",
+ ctx_drvdata->sids,
+ nsid / sizeof(*ctx_drvdata->sids))) {
+ return -EINVAL;
+ }
+ ctx_drvdata->nsid = nsid;
+
return 0;
}
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index d49bfa6..9240605 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -181,6 +181,20 @@
To compile this driver as a module, choose M here: the
module will be called leds-pmic-mpp.
+config LEDS_MSM_TRICOLOR
+ tristate "LED Support for Qualcomm tricolor LEDs"
+ depends on LEDS_CLASS && MSM_SMD
+ help
+ This option enables support for tricolor LEDs connected to
+ to Qualcomm reference boards. Red, green and blue color leds
+ are supported. These leds are turned on/off, blink on/off
+ by Modem upon receiving command through rpc from this driver.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called leds-msm-tricolor.
+
config LEDS_GPIO_PLATFORM
bool "Platform device bindings for GPIO LEDs"
depends on LEDS_GPIO
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index aa518d4..8edd465 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -52,6 +52,7 @@
obj-$(CONFIG_LEDS_PMIC_MPP) += leds-pmic-mpp.o
obj-$(CONFIG_LEDS_QCIBL) += leds-qci-backlight.o
obj-$(CONFIG_LEDS_MSM_PDM) += leds-msm-pdm.o
+obj-$(CONFIG_LEDS_MSM_TRICOLOR) += leds-msm-tricolor.o
# LED SPI Drivers
obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
diff --git a/drivers/leds/leds-msm-tricolor.c b/drivers/leds/leds-msm-tricolor.c
new file mode 100644
index 0000000..d0715ce
--- /dev/null
+++ b/drivers/leds/leds-msm-tricolor.c
@@ -0,0 +1,410 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/leds.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+
+#include <linux/leds-msm-tricolor.h>
+#include <mach/msm_rpcrouter.h>
+
+#define LED_RPC_PROG 0x30000091
+#define LED_RPC_VER 0x00030001
+
+#define LED_SUBSCRIBE_PROC 0x03
+#define LED_SUBS_RCV_EVNT 0x01
+#define LED_SUBS_REGISTER 0x00
+#define LED_EVNT_CLASS_ALL 0x00
+#define LINUX_HOST 0x04
+#define LED_CMD_PROC 0x02
+#define TRICOLOR_LED_ID 0x0A
+
+enum tricolor_led_status {
+ ALL_OFF,
+ ALL_ON,
+ BLUE_ON,
+ BLUE_OFF,
+ RED_ON,
+ RED_OFF,
+ GREEN_ON,
+ GREEN_OFF,
+ BLUE_BLINK,
+ RED_BLINK,
+ GREEN_BLINK,
+ BLUE_BLINK_OFF,
+ RED_BLINK_OFF,
+ GREEN_BLINK_OFF,
+ LED_MAX,
+};
+
+struct led_cmd_data_type {
+ u32 cmd_data_type_ptr; /* cmd_data_type ptr */
+ u32 ver; /* version */
+ u32 id; /* command id */
+ u32 handle; /* handle returned from subscribe proc */
+ u32 disc_id1; /* discriminator id */
+ u32 input_ptr; /* input ptr length */
+ u32 input_val; /* command specific data */
+ u32 input_len; /* length of command input */
+ u32 disc_id2; /* discriminator id */
+ u32 output_len; /* length of output data */
+ u32 delayed; /* execution context for modem */
+};
+
+struct led_subscribe_req {
+ u32 subs_ptr; /* subscribe ptr */
+ u32 ver; /* version */
+ u32 srvc; /* command or event */
+ u32 req; /* subscribe or unsubscribe */
+ u32 host_os; /* host operating system */
+ u32 disc_id; /* discriminator id */
+ u32 event; /* event */
+ u32 cb_id; /* callback id */
+ u32 handle_ptr; /* handle ptr */
+ u32 handle_data; /* handle data */
+};
+
+struct tricolor_led_data {
+ struct led_classdev cdev;
+ struct msm_rpc_client *rpc_client;
+ bool blink_status;
+ struct mutex lock;
+ u8 color;
+};
+
+static struct led_subscribe_req *led_subs_req;
+
+static int led_send_cmd_arg(struct msm_rpc_client *client,
+ void *buffer, void *data)
+{
+ struct led_cmd_data_type *led_cmd = buffer;
+ enum tricolor_led_status status = *(enum tricolor_led_status *) data;
+
+ led_cmd->cmd_data_type_ptr = cpu_to_be32(0x01);
+ led_cmd->ver = cpu_to_be32(0x03);
+ led_cmd->id = cpu_to_be32(TRICOLOR_LED_ID);
+ led_cmd->handle = cpu_to_be32(led_subs_req->handle_data);
+ led_cmd->disc_id1 = cpu_to_be32(TRICOLOR_LED_ID);
+ led_cmd->input_ptr = cpu_to_be32(0x01);
+ led_cmd->input_val = cpu_to_be32(status);
+ led_cmd->input_len = cpu_to_be32(0x01);
+ led_cmd->disc_id2 = cpu_to_be32(TRICOLOR_LED_ID);
+ led_cmd->output_len = cpu_to_be32(0x00);
+ led_cmd->delayed = cpu_to_be32(0x00);
+
+ return sizeof(*led_cmd);
+}
+
+static int led_rpc_res(struct msm_rpc_client *client,
+ void *buffer, void *data)
+{
+ uint32_t result;
+
+ result = be32_to_cpu(*((uint32_t *)buffer));
+ pr_debug("%s: request completed: 0x%x\n", __func__, result);
+
+ return 0;
+}
+
+static void led_rpc_set_status(struct msm_rpc_client *client,
+ enum tricolor_led_status status)
+{
+ int rc;
+
+ rc = msm_rpc_client_req(client, LED_CMD_PROC,
+ led_send_cmd_arg, &status, led_rpc_res, NULL, -1);
+ if (rc)
+ pr_err("%s: RPC client request for led failed", __func__);
+
+}
+
+static ssize_t led_blink_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tricolor_led_data *led = dev_get_drvdata(dev);
+
+ return snprintf(buf, 2, "%d\n", led->blink_status);
+}
+
+static ssize_t led_blink_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct tricolor_led_data *led = dev_get_drvdata(dev);
+ enum tricolor_led_status status;
+ unsigned long value;
+ int rc;
+
+ if (size > 2)
+ return -EINVAL;
+
+ rc = kstrtoul(buf, 10, &value);
+ if (rc)
+ return rc;
+
+
+ if (value < LED_OFF || value > led->cdev.max_brightness) {
+ dev_err(dev, "invalid brightness\n");
+ return -EINVAL;
+ }
+
+ switch (led->color) {
+ case LED_COLOR_RED:
+ status = value ? RED_BLINK : RED_BLINK_OFF;
+ break;
+ case LED_COLOR_GREEN:
+ status = value ? GREEN_BLINK : GREEN_BLINK_OFF;
+ break;
+ case LED_COLOR_BLUE:
+ status = value ? BLUE_BLINK : BLUE_BLINK_OFF;
+ break;
+ default:
+ dev_err(dev, "unknown led device\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&led->lock);
+ led->blink_status = !!value;
+ led->cdev.brightness = 0;
+
+ /* program the led blink */
+ led_rpc_set_status(led->rpc_client, status);
+ mutex_unlock(&led->lock);
+
+ return size;
+}
+
+static DEVICE_ATTR(blink, 0644, led_blink_show, led_blink_store);
+
+static void tricolor_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct tricolor_led_data *led;
+ enum tricolor_led_status status;
+
+ led = container_of(led_cdev, struct tricolor_led_data, cdev);
+
+ if (value < LED_OFF || value > led->cdev.max_brightness) {
+ dev_err(led->cdev.dev, "invalid brightness\n");
+ return;
+ }
+
+ switch (led->color) {
+ case LED_COLOR_RED:
+ status = value ? RED_ON : RED_OFF;
+ break;
+ case LED_COLOR_GREEN:
+ status = value ? GREEN_ON : GREEN_OFF;
+ break;
+ case LED_COLOR_BLUE:
+ status = value ? BLUE_ON : BLUE_OFF;
+ break;
+ default:
+ dev_err(led->cdev.dev, "unknown led device\n");
+ return;
+ }
+
+ mutex_lock(&led->lock);
+ led->blink_status = 0;
+ led->cdev.brightness = value;
+
+ /* program the led brightness */
+ led_rpc_set_status(led->rpc_client, status);
+ mutex_unlock(&led->lock);
+}
+
+static enum led_brightness tricolor_led_get(struct led_classdev *led_cdev)
+{
+ struct tricolor_led_data *led;
+
+ led = container_of(led_cdev, struct tricolor_led_data, cdev);
+
+ return led->cdev.brightness;
+}
+
+static int led_rpc_register_subs_arg(struct msm_rpc_client *client,
+ void *buffer, void *data)
+{
+ led_subs_req = buffer;
+
+ led_subs_req->subs_ptr = cpu_to_be32(0x1);
+ led_subs_req->ver = cpu_to_be32(0x1);
+ led_subs_req->srvc = cpu_to_be32(LED_SUBS_RCV_EVNT);
+ led_subs_req->req = cpu_to_be32(LED_SUBS_REGISTER);
+ led_subs_req->host_os = cpu_to_be32(LINUX_HOST);
+ led_subs_req->disc_id = cpu_to_be32(LED_SUBS_RCV_EVNT);
+ led_subs_req->event = cpu_to_be32(LED_EVNT_CLASS_ALL);
+ led_subs_req->cb_id = cpu_to_be32(0x1);
+ led_subs_req->handle_ptr = cpu_to_be32(0x1);
+ led_subs_req->handle_data = cpu_to_be32(0x0);
+
+ return sizeof(*led_subs_req);
+}
+
+static int led_cb_func(struct msm_rpc_client *client, void *buffer, int in_size)
+{
+ struct rpc_request_hdr *hdr = buffer;
+ int rc;
+
+ hdr->type = be32_to_cpu(hdr->type);
+ hdr->xid = be32_to_cpu(hdr->xid);
+ hdr->rpc_vers = be32_to_cpu(hdr->rpc_vers);
+ hdr->prog = be32_to_cpu(hdr->prog);
+ hdr->vers = be32_to_cpu(hdr->vers);
+ hdr->procedure = be32_to_cpu(hdr->procedure);
+
+ msm_rpc_start_accepted_reply(client, hdr->xid,
+ RPC_ACCEPTSTAT_SUCCESS);
+ rc = msm_rpc_send_accepted_reply(client, 0);
+ if (rc)
+ pr_err("%s: sending reply failed: %d\n", __func__, rc);
+
+ return rc;
+}
+
+static int __devinit tricolor_led_probe(struct platform_device *pdev)
+{
+ const struct led_platform_data *pdata = pdev->dev.platform_data;
+ struct msm_rpc_client *rpc_client;
+ struct led_info *curr_led;
+ struct tricolor_led_data *led, *tmp_led;
+ int rc, i, j;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "platform data not supplied\n");
+ return -EINVAL;
+ }
+
+ /* initialize rpc client */
+ rpc_client = msm_rpc_register_client("led", LED_RPC_PROG,
+ LED_RPC_VER, 0, led_cb_func);
+ rc = IS_ERR(rpc_client);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to initialize rpc_client\n");
+ return -EINVAL;
+ }
+
+ /* subscribe */
+ rc = msm_rpc_client_req(rpc_client, LED_SUBSCRIBE_PROC,
+ led_rpc_register_subs_arg, NULL,
+ led_rpc_res, NULL, -1);
+ if (rc) {
+ pr_err("%s: RPC client request failed for subscribe services\n",
+ __func__);
+ goto fail_mem_alloc;
+ }
+
+ led = devm_kzalloc(&pdev->dev, pdata->num_leds * sizeof(*led),
+ GFP_KERNEL);
+ if (!led) {
+ dev_err(&pdev->dev, "failed to alloc memory\n");
+ rc = -ENOMEM;
+ goto fail_mem_alloc;
+ }
+
+ for (i = 0; i < pdata->num_leds; i++) {
+ curr_led = &pdata->leds[i];
+ tmp_led = &led[i];
+
+ tmp_led->cdev.name = curr_led->name;
+ tmp_led->cdev.default_trigger = curr_led->default_trigger;
+ tmp_led->cdev.brightness_set = tricolor_led_set;
+ tmp_led->cdev.brightness_get = tricolor_led_get;
+ tmp_led->cdev.brightness = LED_OFF;
+ tmp_led->cdev.max_brightness = LED_FULL;
+ tmp_led->color = curr_led->flags;
+ tmp_led->rpc_client = rpc_client;
+ tmp_led->blink_status = false;
+
+ mutex_init(&tmp_led->lock);
+
+ rc = led_classdev_register(&pdev->dev, &tmp_led->cdev);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to register led %s(%d)\n",
+ tmp_led->cdev.name, rc);
+ goto fail_led_reg;
+ }
+
+ /* Add blink attributes */
+ rc = device_create_file(tmp_led->cdev.dev, &dev_attr_blink);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to create blink attr\n");
+ goto fail_blink_attr;
+ }
+ dev_set_drvdata(tmp_led->cdev.dev, tmp_led);
+ }
+
+ platform_set_drvdata(pdev, led);
+
+ return 0;
+
+fail_blink_attr:
+ j = i;
+ while (j)
+ device_remove_file(led[--j].cdev.dev, &dev_attr_blink);
+ i++;
+fail_led_reg:
+ while (i) {
+ led_classdev_unregister(&led[--i].cdev);
+ mutex_destroy(&led[i].lock);
+ }
+fail_mem_alloc:
+ msm_rpc_unregister_client(rpc_client);
+ return rc;
+}
+
+static int __devexit tricolor_led_remove(struct platform_device *pdev)
+{
+ const struct led_platform_data *pdata = pdev->dev.platform_data;
+ struct tricolor_led_data *led = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < pdata->num_leds; i++) {
+ led_classdev_unregister(&led[i].cdev);
+ device_remove_file(led[i].cdev.dev, &dev_attr_blink);
+ mutex_destroy(&led[i].lock);
+ }
+
+ msm_rpc_unregister_client(led->rpc_client);
+
+ return 0;
+}
+
+static struct platform_driver tricolor_led_driver = {
+ .probe = tricolor_led_probe,
+ .remove = __devexit_p(tricolor_led_remove),
+ .driver = {
+ .name = "msm-tricolor-leds",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init tricolor_led_init(void)
+{
+ return platform_driver_register(&tricolor_led_driver);
+}
+late_initcall(tricolor_led_init);
+
+static void __exit tricolor_led_exit(void)
+{
+ platform_driver_unregister(&tricolor_led_driver);
+}
+module_exit(tricolor_led_exit);
+
+MODULE_DESCRIPTION("MSM Tri-color LEDs driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:tricolor-led");
diff --git a/drivers/media/dvb/dvb-core/demux.h b/drivers/media/dvb/dvb-core/demux.h
index e22bc64..9ad79e9 100644
--- a/drivers/media/dvb/dvb-core/demux.h
+++ b/drivers/media/dvb/dvb-core/demux.h
@@ -63,15 +63,52 @@
*/
enum dmx_success {
- DMX_OK = 0, /* Received Ok */
- DMX_LENGTH_ERROR, /* Incorrect length */
- DMX_OVERRUN_ERROR, /* Receiver ring buffer overrun */
- DMX_CRC_ERROR, /* Incorrect CRC */
- DMX_FRAME_ERROR, /* Frame alignment error */
- DMX_FIFO_ERROR, /* Receiver FIFO overrun */
- DMX_MISSED_ERROR /* Receiver missed packet */
+ DMX_OK = 0, /* Received Ok */
+ DMX_OK_PES_END, /* Received ok, data reached end of PES packet */
+ DMX_OK_PCR, /* Received OK, data with new PCR/STC pair */
+ DMX_LENGTH_ERROR, /* Incorrect length */
+ DMX_OVERRUN_ERROR, /* Receiver ring buffer overrun */
+ DMX_CRC_ERROR, /* Incorrect CRC */
+ DMX_FRAME_ERROR, /* Frame alignment error */
+ DMX_FIFO_ERROR, /* Receiver FIFO overrun */
+ DMX_MISSED_ERROR /* Receiver missed packet */
} ;
+
+/*
+ * struct dmx_data_ready: Parameters for event notification callback.
+ * Event notification notifies demux device that data is written
+ * and available in the device's output buffer or provides
+ * notification on errors and other events. In the latter case
+ * data_length is zero.
+ */
+struct dmx_data_ready {
+ enum dmx_success status;
+
+ /*
+ * data_length may be 0 in case of DMX_OK_PES_END
+ * and in non-DMX_OK_XXX events. In DMX_OK_PES_END,
+ * data_length is for data comming after the end of PES.
+ */
+ int data_length;
+
+ union {
+ struct {
+ int start_gap;
+ int actual_length;
+ int disc_indicator_set;
+ int pes_length_mismatch;
+ u64 stc;
+ } pes_end;
+
+ struct {
+ u64 pcr;
+ u64 stc;
+ int disc_indicator_set;
+ } pcr;
+ };
+};
+
/*--------------------------------------------------------------------------*/
/* TS packet reception */
/*--------------------------------------------------------------------------*/
@@ -123,6 +160,10 @@
#define DMX_TS_PES_SUBTITLE DMX_TS_PES_SUBTITLE0
#define DMX_TS_PES_PCR DMX_TS_PES_PCR0
+struct dmx_ts_feed;
+typedef int (*dmx_ts_data_ready_cb)(
+ struct dmx_ts_feed *source,
+ struct dmx_data_ready *dmx_data_ready);
struct dmx_ts_feed {
int is_filtering; /* Set to non-zero when filtering in progress */
@@ -141,6 +182,8 @@
int (*get_decoder_buff_status)(
struct dmx_ts_feed *feed,
struct dmx_buffer_status *dmx_buffer_status);
+ int (*data_ready_cb)(struct dmx_ts_feed *feed,
+ dmx_ts_data_ready_cb callback);
};
/*--------------------------------------------------------------------------*/
@@ -155,6 +198,11 @@
void* priv; /* Pointer to private data of the API client */
};
+struct dmx_section_feed;
+typedef int (*dmx_section_data_ready_cb)(
+ struct dmx_section_filter *source,
+ struct dmx_data_ready *dmx_data_ready);
+
struct dmx_section_feed {
int is_filtering; /* Set to non-zero when filtering in progress */
struct dmx_demux* parent; /* Back-pointer */
@@ -177,6 +225,8 @@
struct dmx_section_filter* filter);
int (*start_filtering) (struct dmx_section_feed* feed);
int (*stop_filtering) (struct dmx_section_feed* feed);
+ int (*data_ready_cb)(struct dmx_section_feed *feed,
+ dmx_section_data_ready_cb callback);
};
/*--------------------------------------------------------------------------*/
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index 8e5127a..433e796 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -60,9 +60,302 @@
return dvb_ringbuffer_write(buf, src, len);
}
+static inline u32 dvb_dmxdev_advance_event_idx(u32 index)
+{
+ index++;
+ if (index >= DMX_EVENT_QUEUE_SIZE)
+ index = 0;
+
+ return index;
+}
+
+static inline void dvb_dmxdev_flush_events(struct dmxdev_events_queue *events)
+{
+ events->read_index = 0;
+ events->write_index = 0;
+ events->notified_index = 0;
+ events->bytes_read_no_event = 0;
+ events->current_event_data_size = 0;
+}
+
+static inline void dvb_dmxdev_flush_output(struct dvb_ringbuffer *buffer,
+ struct dmxdev_events_queue *events)
+{
+ dvb_dmxdev_flush_events(events);
+ dvb_ringbuffer_flush(buffer);
+}
+
+static int dvb_dmxdev_update_pes_event(struct dmx_filter_event *event,
+ int bytes_read)
+{
+ int start_delta;
+
+ if (event->params.pes.total_length <= bytes_read)
+ return event->params.pes.total_length;
+
+ /*
+ * only part of the data relevant to this event was read.
+ * Update the event's information to reflect the new state.
+ */
+ event->params.pes.total_length -= bytes_read;
+
+ start_delta = event->params.pes.start_offset -
+ event->params.pes.base_offset;
+
+ if (bytes_read <= start_delta) {
+ event->params.pes.base_offset +=
+ bytes_read;
+ } else {
+ start_delta =
+ bytes_read - start_delta;
+
+ event->params.pes.start_offset += start_delta;
+ event->params.pes.actual_length -= start_delta;
+
+ event->params.pes.base_offset =
+ event->params.pes.start_offset;
+ }
+
+ return 0;
+}
+
+static int dvb_dmxdev_update_section_event(struct dmx_filter_event *event,
+ int bytes_read)
+{
+ int start_delta;
+
+ if (event->params.section.total_length <= bytes_read)
+ return event->params.section.total_length;
+
+ /*
+ * only part of the data relevant to this event was read.
+ * Update the event's information to reflect the new state.
+ */
+
+ event->params.section.total_length -= bytes_read;
+
+ start_delta = event->params.section.start_offset -
+ event->params.section.base_offset;
+
+ if (bytes_read <= start_delta) {
+ event->params.section.base_offset +=
+ bytes_read;
+ } else {
+ start_delta =
+ bytes_read - start_delta;
+
+ event->params.section.start_offset += start_delta;
+ event->params.section.actual_length -= start_delta;
+
+ event->params.section.base_offset =
+ event->params.section.start_offset;
+ }
+
+ return 0;
+}
+
+static int dvb_dmxdev_update_rec_event(struct dmx_filter_event *event,
+ int bytes_read)
+{
+ if (event->params.recording_chunk.size <= bytes_read)
+ return event->params.recording_chunk.size;
+
+ /*
+ * only part of the data relevant to this event was read.
+ * Update the event's information to reflect the new state.
+ */
+ event->params.recording_chunk.size -= bytes_read;
+ event->params.recording_chunk.offset += bytes_read;
+
+ return 0;
+}
+
+static int dvb_dmxdev_add_event(struct dmxdev_events_queue *events,
+ struct dmx_filter_event *event)
+{
+ int res;
+ int new_write_index;
+ int data_event;
+
+ /* Check if we are adding an event that user already read its data */
+ if (events->bytes_read_no_event) {
+ data_event = 1;
+
+ if (event->type == DMX_EVENT_NEW_PES)
+ res = dvb_dmxdev_update_pes_event(event,
+ events->bytes_read_no_event);
+ else if (event->type == DMX_EVENT_NEW_SECTION)
+ res = dvb_dmxdev_update_section_event(event,
+ events->bytes_read_no_event);
+ else if (event->type == DMX_EVENT_NEW_REC_CHUNK)
+ res = dvb_dmxdev_update_rec_event(event,
+ events->bytes_read_no_event);
+ else
+ data_event = 0;
+
+ if (data_event) {
+ if (res) {
+ /*
+ * Data relevent to this event was fully
+ * consumed already, discard event.
+ */
+ events->bytes_read_no_event -= res;
+ return 0;
+ }
+ events->bytes_read_no_event = 0;
+ } else {
+ /*
+ * data was read beyond the non-data event,
+ * making it not relevant anymore
+ */
+ return 0;
+ }
+ }
+
+ new_write_index = dvb_dmxdev_advance_event_idx(events->write_index);
+ if (new_write_index == events->read_index) {
+ printk(KERN_ERR "dmxdev: events overflow\n");
+ return -EOVERFLOW;
+ }
+
+ events->queue[events->write_index] = *event;
+ events->write_index = new_write_index;
+
+ return 0;
+}
+
+static int dvb_dmxdev_remove_event(struct dmxdev_events_queue *events,
+ struct dmx_filter_event *event)
+{
+ if (events->notified_index == events->write_index)
+ return -ENODATA;
+
+ *event = events->queue[events->notified_index];
+
+ events->notified_index =
+ dvb_dmxdev_advance_event_idx(events->notified_index);
+
+ return 0;
+}
+
+static int dvb_dmxdev_update_events(struct dmxdev_events_queue *events,
+ int bytes_read)
+{
+ struct dmx_filter_event *event;
+ int res;
+ int data_event;
+
+ /*
+ * Go through all events that were notified and
+ * remove them from the events queue if their respective
+ * data was read.
+ */
+ while ((events->read_index != events->notified_index) &&
+ (bytes_read)) {
+ event = events->queue + events->read_index;
+
+ data_event = 1;
+
+ if (event->type == DMX_EVENT_NEW_PES)
+ res = dvb_dmxdev_update_pes_event(event, bytes_read);
+ else if (event->type == DMX_EVENT_NEW_SECTION)
+ res = dvb_dmxdev_update_section_event(event,
+ bytes_read);
+ else if (event->type == DMX_EVENT_NEW_REC_CHUNK)
+ res = dvb_dmxdev_update_rec_event(event, bytes_read);
+ else
+ data_event = 0;
+
+ if (data_event) {
+ if (res) {
+ /*
+ * Data relevent to this event was
+ * fully consumed, remove it from the queue.
+ */
+ bytes_read -= res;
+ events->read_index =
+ dvb_dmxdev_advance_event_idx(
+ events->read_index);
+ } else {
+ bytes_read = 0;
+ }
+ } else {
+ /*
+ * non-data event was already notified,
+ * no need to keep it
+ */
+ events->read_index = dvb_dmxdev_advance_event_idx(
+ events->read_index);
+ }
+ }
+
+ if (!bytes_read)
+ return 0;
+
+ /*
+ * If we reached here it means:
+ * bytes_read != 0
+ * events->read_index == events->notified_index
+ * Check if there are pending events in the queue
+ * which the user didn't read while their relevant data
+ * was read.
+ */
+ while ((events->notified_index != events->write_index) &&
+ (bytes_read)) {
+ event = events->queue + events->notified_index;
+
+ data_event = 1;
+
+ if (event->type == DMX_EVENT_NEW_PES)
+ res = dvb_dmxdev_update_pes_event(event, bytes_read);
+ else if (event->type == DMX_EVENT_NEW_SECTION)
+ res = dvb_dmxdev_update_section_event(event,
+ bytes_read);
+ else if (event->type == DMX_EVENT_NEW_REC_CHUNK)
+ res = dvb_dmxdev_update_rec_event(event, bytes_read);
+ else
+ data_event = 0;
+
+ if (data_event) {
+ if (res) {
+ /*
+ * Data relevent to this event was
+ * fully consumed, remove it from the queue.
+ */
+ bytes_read -= res;
+ events->notified_index =
+ dvb_dmxdev_advance_event_idx(
+ events->notified_index);
+ } else {
+ bytes_read = 0;
+ }
+ } else {
+ if (bytes_read)
+ /*
+ * data was read beyond the non-data event,
+ * making it not relevant anymore
+ */
+ events->notified_index =
+ dvb_dmxdev_advance_event_idx(
+ events->notified_index);
+ }
+
+ events->read_index = events->notified_index;
+ }
+
+ /*
+ * Check if data was read without having a respective
+ * event in the events-queue
+ */
+ if (bytes_read)
+ events->bytes_read_no_event += bytes_read;
+
+ return 0;
+}
+
static ssize_t dvb_dmxdev_buffer_read(struct dvb_ringbuffer *src,
- int non_blocking, char __user *buf,
- size_t count, loff_t *ppos)
+ int non_blocking, char __user *buf,
+ size_t count, loff_t *ppos)
{
size_t todo;
ssize_t avail;
@@ -73,7 +366,7 @@
if (src->error) {
ret = src->error;
- dvb_ringbuffer_flush(src);
+ src->error = 0;
return ret;
}
@@ -94,7 +387,7 @@
if (src->error) {
ret = src->error;
- dvb_ringbuffer_flush(src);
+ src->error = 0;
break;
}
@@ -166,6 +459,8 @@
return -ENOMEM;
}
dvb_ringbuffer_init(&dmxdev->dvr_buffer, mem, DVR_BUFFER_SIZE);
+ dvb_dmxdev_flush_events(&dmxdev->dvr_output_events);
+
dvbdev->readers--;
} else if (!dvbdev->writers) {
dmxdev->dvr_in_exit = 0;
@@ -412,15 +707,24 @@
static ssize_t dvb_dvr_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
+ ssize_t res;
struct dvb_device *dvbdev = file->private_data;
struct dmxdev *dmxdev = dvbdev->priv;
if (dmxdev->exit)
return -ENODEV;
- return dvb_dmxdev_buffer_read(&dmxdev->dvr_buffer,
+ res = dvb_dmxdev_buffer_read(&dmxdev->dvr_buffer,
file->f_flags & O_NONBLOCK,
buf, count, ppos);
+
+ if (res > 0) {
+ spin_lock_irq(&dmxdev->lock);
+ dvb_dmxdev_update_events(&dmxdev->dvr_output_events, res);
+ spin_unlock_irq(&dmxdev->lock);
+ }
+
+ return res;
}
static void dvr_input_work_func(struct work_struct *worker)
@@ -556,6 +860,27 @@
return 0;
}
+static int dvb_dvr_get_event(struct dmxdev *dmxdev,
+ unsigned int f_flags,
+ struct dmx_filter_event *event)
+{
+ int res;
+
+ if (!((f_flags & O_ACCMODE) == O_RDONLY))
+ return -EINVAL;
+
+ spin_lock_irq(&dmxdev->lock);
+
+ res = dvb_dmxdev_remove_event(&dmxdev->dvr_output_events, event);
+
+ if (event->type == DMX_EVENT_BUFFER_OVERFLOW)
+ dmxdev->dvr_buffer.error = 0;
+
+ spin_unlock_irq(&dmxdev->lock);
+
+ return res;
+}
+
static int dvb_dvr_get_buffer_status(struct dmxdev *dmxdev,
unsigned int f_flags,
struct dmx_buffer_status *dmx_buffer_status)
@@ -574,8 +899,7 @@
spin_lock_irq(lock);
dmx_buffer_status->error = buf->error;
- if (buf->error)
- dvb_ringbuffer_flush(buf);
+ buf->error = 0;
dmx_buffer_status->fullness = dvb_ringbuffer_avail(buf);
dmx_buffer_status->free_bytes = dvb_ringbuffer_free(buf);
@@ -594,7 +918,7 @@
{
ssize_t buff_fullness;
- if (!(f_flags & O_ACCMODE) == O_RDONLY)
+ if (!((f_flags & O_ACCMODE) == O_RDONLY))
return -EINVAL;
if (!bytes_count)
@@ -606,6 +930,11 @@
return -EINVAL;
DVB_RINGBUFFER_SKIP(&dmxdev->dvr_buffer, bytes_count);
+
+ spin_lock_irq(&dmxdev->lock);
+ dvb_dmxdev_update_events(&dmxdev->dvr_output_events, bytes_count);
+ spin_unlock_irq(&dmxdev->lock);
+
wake_up_all(&dmxdev->dvr_buffer.queue);
return 0;
}
@@ -854,40 +1183,27 @@
struct dmxdev_feed *feed;
int ret;
- /*
- * Ask for status of decoder's buffer from underlying HW.
- * In case of PCR/STC extraction, the filter's ring-buffer
- * is used to gather the PCR/STC data and not using
- * an internal decoder buffer.
- */
- if (!(dmxdevfilter->dev->capabilities &
- DMXDEV_CAP_PCR_EXTRACTION) ||
- ((dmxdevfilter->params.pes.pes_type != DMX_PES_PCR0) &&
- (dmxdevfilter->params.pes.pes_type != DMX_PES_PCR1) &&
- (dmxdevfilter->params.pes.pes_type != DMX_PES_PCR2) &&
- (dmxdevfilter->params.pes.pes_type != DMX_PES_PCR3))) {
- list_for_each_entry(feed, &dmxdevfilter->feed.ts,
- next) {
- if (feed->ts->get_decoder_buff_status)
- ret = feed->ts->get_decoder_buff_status(
- feed->ts,
- dmx_buffer_status);
- else
- ret = -ENODEV;
+ /* Ask for status of decoder's buffer from underlying HW */
+ list_for_each_entry(feed, &dmxdevfilter->feed.ts,
+ next) {
+ if (feed->ts->get_decoder_buff_status)
+ ret = feed->ts->get_decoder_buff_status(
+ feed->ts,
+ dmx_buffer_status);
+ else
+ ret = -ENODEV;
- /*
- * There should not be more than one ts feed
- * in the list as this is DECODER feed.
- */
- spin_unlock_irq(&dmxdevfilter->dev->lock);
- return ret;
- }
+ /*
+ * There should not be more than one ts feed
+ * in the list as this is DECODER feed.
+ */
+ spin_unlock_irq(&dmxdevfilter->dev->lock);
+ return ret;
}
}
dmx_buffer_status->error = buf->error;
- if (buf->error)
- dvb_ringbuffer_flush(buf);
+ buf->error = 0;
dmx_buffer_status->fullness = dvb_ringbuffer_avail(buf);
dmx_buffer_status->free_bytes = dvb_ringbuffer_free(buf);
@@ -918,11 +1234,33 @@
DVB_RINGBUFFER_SKIP(&dmxdevfilter->buffer, bytes_count);
+ spin_lock_irq(&dmxdevfilter->dev->lock);
+ dvb_dmxdev_update_events(&dmxdevfilter->events, bytes_count);
+ spin_unlock_irq(&dmxdevfilter->dev->lock);
+
wake_up_all(&dmxdevfilter->buffer.queue);
return 0;
}
+static int dvb_dmxdev_get_event(struct dmxdev_filter *dmxdevfilter,
+ struct dmx_filter_event *event)
+{
+ int res;
+
+ spin_lock_irq(&dmxdevfilter->dev->lock);
+
+ res = dvb_dmxdev_remove_event(&dmxdevfilter->events, event);
+
+ if (event->type == DMX_EVENT_BUFFER_OVERFLOW)
+ dmxdevfilter->buffer.error = 0;
+
+ spin_unlock_irq(&dmxdevfilter->dev->lock);
+
+ return res;
+
+}
+
static void dvb_dmxdev_filter_timeout(unsigned long data)
{
struct dmxdev_filter *dmxdevfilter = (struct dmxdev_filter *)data;
@@ -954,6 +1292,7 @@
enum dmx_success success)
{
struct dmxdev_filter *dmxdevfilter = filter->priv;
+ struct dmx_filter_event event;
int ret;
if (dmxdevfilter->buffer.error) {
@@ -965,20 +1304,57 @@
spin_unlock(&dmxdevfilter->dev->lock);
return 0;
}
+
+ if ((buffer1_len + buffer2_len) == 0) {
+ if (DMX_CRC_ERROR == success) {
+ /* Section was dropped due to CRC error */
+ event.type = DMX_EVENT_SECTION_CRC_ERROR;
+ dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
+
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&dmxdevfilter->buffer.queue);
+ } else {
+ spin_unlock(&dmxdevfilter->dev->lock);
+ }
+
+ return 0;
+ }
+
+ event.params.section.base_offset = dmxdevfilter->buffer.pwrite;
+ event.params.section.start_offset = dmxdevfilter->buffer.pwrite;
+
del_timer(&dmxdevfilter->timer);
dprintk("dmxdev: section callback %02x %02x %02x %02x %02x %02x\n",
buffer1[0], buffer1[1],
buffer1[2], buffer1[3], buffer1[4], buffer1[5]);
ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer1,
buffer1_len);
- if (ret == buffer1_len) {
+ if (ret == buffer1_len)
ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer2,
buffer2_len);
- }
+
if (ret < 0) {
- dvb_ringbuffer_flush(&dmxdevfilter->buffer);
+ dvb_dmxdev_flush_output(&dmxdevfilter->buffer,
+ &dmxdevfilter->events);
dmxdevfilter->buffer.error = ret;
+
+ event.type = DMX_EVENT_BUFFER_OVERFLOW;
+ } else {
+ event.type = DMX_EVENT_NEW_SECTION;
+ event.params.section.total_length =
+ buffer1_len + buffer2_len;
+ event.params.section.actual_length =
+ event.params.section.total_length;
+
+ if (success == DMX_MISSED_ERROR)
+ event.params.section.flags =
+ DMX_FILTER_CC_ERROR;
+ else
+ event.params.section.flags = 0;
}
+
+ dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
+
if (dmxdevfilter->params.sec.flags & DMX_ONESHOT)
dmxdevfilter->state = DMXDEV_STATE_DONE;
spin_unlock(&dmxdevfilter->dev->lock);
@@ -993,46 +1369,298 @@
{
struct dmxdev_filter *dmxdevfilter = feed->priv;
struct dvb_ringbuffer *buffer;
+ struct dmxdev_events_queue *events;
+ struct dmx_filter_event event;
int ret;
spin_lock(&dmxdevfilter->dev->lock);
if (dmxdevfilter->params.pes.output == DMX_OUT_DECODER) {
- if ((dmxdevfilter->dev->capabilities &
- DMXDEV_CAP_PCR_EXTRACTION) &&
- ((dmxdevfilter->params.pes.pes_type == DMX_PES_PCR0) ||
- (dmxdevfilter->params.pes.pes_type == DMX_PES_PCR1) ||
- (dmxdevfilter->params.pes.pes_type == DMX_PES_PCR2) ||
- (dmxdevfilter->params.pes.pes_type == DMX_PES_PCR3))) {
- /*
- * Support for reporting PCR and STC pairs to user.
- * Reported data should have the following format:
- * <8 bit flags><64 bits of STC> <64bits of PCR>
- * STC and PCR values are in 27MHz.
- * The current flags that are defined:
- * 0x00000001: discontinuity_indicator
- */
- buffer = &dmxdevfilter->buffer;
- } else {
- spin_unlock(&dmxdevfilter->dev->lock);
- return 0;
- }
- } else if (dmxdevfilter->params.pes.output == DMX_OUT_TAP
- || dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP)
+ spin_unlock(&dmxdevfilter->dev->lock);
+ return 0;
+ }
+
+ if (dmxdevfilter->params.pes.output == DMX_OUT_TAP
+ || dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP) {
buffer = &dmxdevfilter->buffer;
- else
+ events = &dmxdevfilter->events;
+ } else {
buffer = &dmxdevfilter->dev->dvr_buffer;
+ events = &dmxdevfilter->dev->dvr_output_events;
+ }
if (buffer->error) {
spin_unlock(&dmxdevfilter->dev->lock);
wake_up_all(&buffer->queue);
return 0;
}
- ret = dvb_dmxdev_buffer_write(buffer, buffer1, buffer1_len);
- if (ret == buffer1_len)
- ret = dvb_dmxdev_buffer_write(buffer, buffer2, buffer2_len);
- if (ret < 0) {
- dvb_ringbuffer_flush(buffer);
- buffer->error = ret;
+
+ if (dmxdevfilter->state != DMXDEV_STATE_GO) {
+ spin_unlock(&dmxdevfilter->dev->lock);
+ return 0;
+ }
+
+ if (dmxdevfilter->params.pes.output == DMX_OUT_TAP) {
+ if ((success == DMX_OK) &&
+ (!events->current_event_data_size)) {
+ events->current_event_start_offset = buffer->pwrite;
+ } else if (success == DMX_OK_PES_END) {
+ event.type = DMX_EVENT_NEW_PES;
+
+ event.params.pes.actual_length =
+ events->current_event_data_size;
+ event.params.pes.total_length =
+ events->current_event_data_size;
+
+ event.params.pes.base_offset =
+ events->current_event_start_offset;
+ event.params.pes.start_offset =
+ events->current_event_start_offset;
+
+ event.params.pes.flags = 0;
+ event.params.pes.stc = 0;
+
+ dvb_dmxdev_add_event(events, &event);
+ events->current_event_data_size = 0;
+ }
+ } else {
+ if (!events->current_event_data_size) {
+ events->current_event_start_offset =
+ buffer->pwrite;
+ }
+ }
+
+ if (buffer1_len + buffer2_len) {
+ ret = dvb_dmxdev_buffer_write(buffer, buffer1, buffer1_len);
+ if (ret == buffer1_len)
+ ret = dvb_dmxdev_buffer_write(buffer, buffer2,
+ buffer2_len);
+ if (ret < 0) {
+ dvb_dmxdev_flush_output(buffer, events);
+ buffer->error = ret;
+
+ event.type = DMX_EVENT_BUFFER_OVERFLOW;
+ dvb_dmxdev_add_event(events, &event);
+ } else {
+ events->current_event_data_size +=
+ (buffer1_len + buffer2_len);
+
+ if (((dmxdevfilter->params.pes.output ==
+ DMX_OUT_TS_TAP) ||
+ (dmxdevfilter->params.pes.output ==
+ DMX_OUT_TSDEMUX_TAP)) &&
+ (events->current_event_data_size >=
+ dmxdevfilter->params.pes.rec_chunk_size)) {
+
+ event.type = DMX_EVENT_NEW_REC_CHUNK;
+ event.params.recording_chunk.offset =
+ events->current_event_start_offset;
+
+ event.params.recording_chunk.size =
+ events->current_event_data_size;
+
+ dvb_dmxdev_add_event(events, &event);
+ events->current_event_data_size = 0;
+ }
+ }
+ }
+
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&buffer->queue);
+ return 0;
+}
+
+static int dvb_dmxdev_section_event_cb(struct dmx_section_filter *filter,
+ struct dmx_data_ready *dmx_data_ready)
+{
+ int res;
+ struct dmxdev_filter *dmxdevfilter = filter->priv;
+ struct dmx_filter_event event;
+ int free;
+
+ if (dmxdevfilter->buffer.error) {
+ wake_up_all(&dmxdevfilter->buffer.queue);
+ return 0;
+ }
+
+ spin_lock(&dmxdevfilter->dev->lock);
+
+ if (dmxdevfilter->state != DMXDEV_STATE_GO) {
+ spin_unlock(&dmxdevfilter->dev->lock);
+ return 0;
+ }
+
+ if (dmx_data_ready->data_length == 0) {
+ if (DMX_CRC_ERROR == dmx_data_ready->status) {
+ /* Section was dropped due to CRC error */
+ event.type = DMX_EVENT_SECTION_CRC_ERROR;
+ dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
+
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&dmxdevfilter->buffer.queue);
+ } else {
+ spin_unlock(&dmxdevfilter->dev->lock);
+ }
+ return 0;
+ }
+
+ free = dvb_ringbuffer_free(&dmxdevfilter->buffer);
+
+ if ((DMX_OVERRUN_ERROR == dmx_data_ready->status) ||
+ (dmx_data_ready->data_length > free)) {
+ dvb_dmxdev_flush_output(&dmxdevfilter->buffer,
+ &dmxdevfilter->events);
+
+ dprintk("dmxdev: buffer overflow\n");
+
+ dmxdevfilter->buffer.error = -EOVERFLOW;
+
+ event.type = DMX_EVENT_BUFFER_OVERFLOW;
+ dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&dmxdevfilter->buffer.queue);
+ return 0;
+ }
+
+ event.type = DMX_EVENT_NEW_SECTION;
+ event.params.section.base_offset = dmxdevfilter->buffer.pwrite;
+ event.params.section.start_offset = dmxdevfilter->buffer.pwrite;
+ event.params.section.total_length = dmx_data_ready->data_length;
+ event.params.section.actual_length = dmx_data_ready->data_length;
+
+ if (dmx_data_ready->status == DMX_MISSED_ERROR)
+ event.params.section.flags = DMX_FILTER_CC_ERROR;
+ else
+ event.params.section.flags = 0;
+
+ res = dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
+ DVB_RINGBUFFER_PUSH(&dmxdevfilter->buffer, dmx_data_ready->data_length);
+
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&dmxdevfilter->buffer.queue);
+
+ return res;
+}
+
+static int dvb_dmxdev_ts_event_cb(struct dmx_ts_feed *feed,
+ struct dmx_data_ready *dmx_data_ready)
+{
+ struct dmxdev_filter *dmxdevfilter = feed->priv;
+ struct dvb_ringbuffer *buffer;
+ struct dmxdev_events_queue *events;
+ struct dmx_filter_event event;
+ int free;
+
+ spin_lock(&dmxdevfilter->dev->lock);
+
+ if (dmxdevfilter->state != DMXDEV_STATE_GO) {
+ spin_unlock(&dmxdevfilter->dev->lock);
+ return 0;
+ }
+
+ if (dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) {
+ buffer = &dmxdevfilter->buffer;
+ events = &dmxdevfilter->events;
+ } else {
+ buffer = &dmxdevfilter->dev->dvr_buffer;
+ events = &dmxdevfilter->dev->dvr_output_events;
+ }
+
+ if (dmx_data_ready->status == DMX_OK_PCR) {
+ event.type = DMX_EVENT_NEW_PCR;
+ event.params.pcr.pcr = dmx_data_ready->pcr.pcr;
+ event.params.pcr.stc = dmx_data_ready->pcr.stc;
+ if (dmx_data_ready->pcr.disc_indicator_set)
+ event.params.pcr.flags =
+ DMX_FILTER_DISCONTINUITY_INDEICATOR;
+ else
+ event.params.pcr.flags = 0;
+
+ dvb_dmxdev_add_event(events, &event);
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&buffer->queue);
+ return 0;
+ }
+
+ if ((dmxdevfilter->params.pes.output == DMX_OUT_DECODER) ||
+ (buffer->error)) {
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&buffer->queue);
+ return 0;
+ }
+
+ free = dvb_ringbuffer_free(&dmxdevfilter->buffer);
+
+ if ((DMX_OVERRUN_ERROR == dmx_data_ready->status) ||
+ (dmx_data_ready->data_length > free)) {
+ dvb_dmxdev_flush_output(buffer, events);
+
+ dprintk("dmxdev: buffer overflow\n");
+
+ buffer->error = -EOVERFLOW;
+
+ event.type = DMX_EVENT_BUFFER_OVERFLOW;
+ dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
+
+ spin_unlock(&dmxdevfilter->dev->lock);
+ return 0;
+ }
+
+ if (dmxdevfilter->params.pes.output == DMX_OUT_TAP) {
+ if ((dmx_data_ready->status == DMX_OK) &&
+ (!events->current_event_data_size)) {
+ events->current_event_start_offset =
+ dmxdevfilter->buffer.pwrite;
+ } else if (dmx_data_ready->status == DMX_OK_PES_END) {
+ event.type = DMX_EVENT_NEW_PES;
+
+ event.params.pes.base_offset =
+ events->current_event_start_offset;
+ event.params.pes.start_offset =
+ events->current_event_start_offset +
+ dmx_data_ready->pes_end.start_gap;
+
+ event.params.pes.actual_length =
+ dmx_data_ready->pes_end.actual_length;
+ event.params.pes.total_length =
+ events->current_event_data_size;
+
+ event.params.pes.flags = 0;
+ if (dmx_data_ready->pes_end.disc_indicator_set)
+ event.params.pes.flags |=
+ DMX_FILTER_DISCONTINUITY_INDEICATOR;
+ if (dmx_data_ready->pes_end.pes_length_mismatch)
+ event.params.pes.flags |=
+ DMX_FILTER_PES_LENGTH_ERROR;
+
+ event.params.pes.stc = dmx_data_ready->pes_end.stc;
+ dvb_dmxdev_add_event(events, &event);
+
+ events->current_event_data_size = 0;
+ }
+ } else {
+ if (!events->current_event_data_size)
+ events->current_event_start_offset =
+ dmxdevfilter->buffer.pwrite;
+ }
+
+ events->current_event_data_size += dmx_data_ready->data_length;
+ DVB_RINGBUFFER_PUSH(&dmxdevfilter->buffer, dmx_data_ready->data_length);
+
+ if ((dmxdevfilter->params.pes.output == DMX_OUT_TS_TAP) ||
+ (dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP)) {
+ if (events->current_event_data_size >=
+ dmxdevfilter->params.pes.rec_chunk_size) {
+ event.type = DMX_EVENT_NEW_REC_CHUNK;
+ event.params.recording_chunk.offset =
+ events->current_event_start_offset;
+
+ event.params.recording_chunk.size =
+ events->current_event_data_size;
+
+ dvb_dmxdev_add_event(events, &event);
+
+ events->current_event_data_size = 0;
+ }
}
spin_unlock(&dmxdevfilter->dev->lock);
wake_up_all(&buffer->queue);
@@ -1144,7 +1772,10 @@
return -EINVAL;
}
- dvb_ringbuffer_flush(&dmxdevfilter->buffer);
+ spin_lock_irq(&dmxdevfilter->dev->lock);
+ dvb_dmxdev_flush_output(&dmxdevfilter->buffer, &dmxdevfilter->events);
+ spin_unlock_irq(&dmxdevfilter->dev->lock);
+
wake_up_all(&dmxdevfilter->buffer.queue);
return 0;
@@ -1213,6 +1844,15 @@
tsfeed = feed->ts;
tsfeed->priv = filter;
+ if (tsfeed->data_ready_cb) {
+ ret = tsfeed->data_ready_cb(tsfeed, dvb_dmxdev_ts_event_cb);
+
+ if (ret < 0) {
+ dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed);
+ return ret;
+ }
+ }
+
ret = tsfeed->set(tsfeed, feed->pid,
ts_type, ts_pes,
filter->pes_buffer_size, timeout);
@@ -1270,7 +1910,9 @@
spin_unlock_irq(&filter->dev->lock);
}
- dvb_ringbuffer_flush(&filter->buffer);
+ spin_lock_irq(&filter->dev->lock);
+ dvb_dmxdev_flush_output(&filter->buffer, &filter->events);
+ spin_unlock_irq(&filter->dev->lock);
switch (filter->type) {
case DMXDEV_TYPE_SEC:
@@ -1295,14 +1937,27 @@
/* if no feed found, try to allocate new one */
if (!*secfeed) {
ret = dmxdev->demux->allocate_section_feed(dmxdev->demux,
- secfeed,
- dvb_dmxdev_section_callback);
+ secfeed,
+ dvb_dmxdev_section_callback);
if (ret < 0) {
printk("DVB (%s): could not alloc feed\n",
__func__);
return ret;
}
+ if ((*secfeed)->data_ready_cb) {
+ ret = (*secfeed)->data_ready_cb(
+ *secfeed,
+ dvb_dmxdev_section_event_cb);
+
+ if (ret < 0) {
+ printk(KERN_ERR "DVB (%s): could not set event cb\n",
+ __func__);
+ dvb_dmxdev_feed_restart(filter);
+ return ret;
+ }
+ }
+
ret = (*secfeed)->set(*secfeed, para->pid, 32768,
(para->flags & DMX_CHECK_CRC) ? 1 : 0);
if (ret < 0) {
@@ -1348,6 +2003,16 @@
break;
}
case DMXDEV_TYPE_PES:
+ if (filter->params.pes.rec_chunk_size <
+ DMX_REC_BUFF_CHUNK_MIN_SIZE)
+ filter->params.pes.rec_chunk_size =
+ DMX_REC_BUFF_CHUNK_MIN_SIZE;
+
+ if (filter->params.pes.rec_chunk_size >=
+ filter->buffer.size)
+ filter->params.pes.rec_chunk_size =
+ filter->buffer.size >> 2;
+
list_for_each_entry(feed, &filter->feed.ts, next) {
ret = dvb_dmxdev_start_feed(dmxdev, filter, feed);
if (ret < 0) {
@@ -1391,6 +2056,8 @@
file->private_data = dmxdevfilter;
dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192);
+ dvb_dmxdev_flush_events(&dmxdevfilter->events);
+
dmxdevfilter->type = DMXDEV_TYPE_NONE;
dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
init_timer(&dmxdevfilter->timer);
@@ -1607,8 +2274,14 @@
ret = dvb_dmxdev_read_sec(dmxdevfilter, file, buf, count, ppos);
else
ret = dvb_dmxdev_buffer_read(&dmxdevfilter->buffer,
- file->f_flags & O_NONBLOCK,
- buf, count, ppos);
+ file->f_flags & O_NONBLOCK,
+ buf, count, ppos);
+
+ if (ret > 0) {
+ spin_lock_irq(&dmxdevfilter->dev->lock);
+ dvb_dmxdev_update_events(&dmxdevfilter->events, ret);
+ spin_unlock_irq(&dmxdevfilter->dev->lock);
+ }
mutex_unlock(&dmxdevfilter->mutex);
return ret;
@@ -1764,6 +2437,15 @@
*(enum dmx_playback_mode_t *)parg);
break;
+ case DMX_GET_EVENT:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+ ret = dvb_dmxdev_get_event(dmxdevfilter, parg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
case DMX_GET_STC:
if (!dmxdev->demux->get_stc) {
ret = -EINVAL;
@@ -1823,10 +2505,15 @@
return 0;
if (dmxdevfilter->buffer.error)
- mask |= (POLLIN | POLLRDNORM | POLLPRI | POLLERR);
+ mask |= (POLLIN | POLLRDNORM | POLLERR);
if (!dvb_ringbuffer_empty(&dmxdevfilter->buffer))
- mask |= (POLLIN | POLLRDNORM | POLLPRI);
+ mask |= (POLLIN | POLLRDNORM);
+
+ if (dmxdevfilter->events.notified_index !=
+ dmxdevfilter->events.write_index) {
+ mask |= POLLPRI;
+ }
return mask;
}
@@ -1952,6 +2639,10 @@
ret = dvb_dvr_feed_data(dmxdev, file->f_flags, arg);
break;
+ case DMX_GET_EVENT:
+ ret = dvb_dvr_get_event(dmxdev, file->f_flags, parg);
+ break;
+
default:
ret = -EINVAL;
break;
@@ -1978,10 +2669,14 @@
poll_wait(file, &dmxdev->dvr_buffer.queue, wait);
if (dmxdev->dvr_buffer.error)
- mask |= (POLLIN | POLLRDNORM | POLLPRI | POLLERR);
+ mask |= (POLLIN | POLLRDNORM | POLLERR);
if (!dvb_ringbuffer_empty(&dmxdev->dvr_buffer))
- mask |= (POLLIN | POLLRDNORM | POLLPRI);
+ mask |= (POLLIN | POLLRDNORM);
+
+ if (dmxdev->dvr_output_events.notified_index !=
+ dmxdev->dvr_output_events.write_index)
+ mask |= POLLPRI;
} else {
poll_wait(file, &dmxdev->dvr_input_buffer.queue, wait);
if (dmxdev->dvr_input_buffer.error)
diff --git a/drivers/media/dvb/dvb-core/dmxdev.h b/drivers/media/dvb/dvb-core/dmxdev.h
index 4c52e84..6fa7054 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.h
+++ b/drivers/media/dvb/dvb-core/dmxdev.h
@@ -62,6 +62,35 @@
struct list_head next;
};
+struct dmxdev_events_queue {
+#define DMX_EVENT_QUEUE_SIZE 500 /* number of events */
+ /*
+ * indices used to manage events queue.
+ * read_index advanced when relevent data is read
+ * from the buffer.
+ * notified_index is the index from which next events
+ * are returned.
+ * read_index <= notified_index <= write_index
+ *
+ * If user reads the data without getting the respective
+ * event first, the read/notified indices are updated
+ * automatically to reflect the actual data that exist
+ * in the buffer.
+ */
+ u32 read_index;
+ u32 write_index;
+ u32 notified_index;
+
+ /* Bytes read by user without having respective event in the queue */
+ u32 bytes_read_no_event;
+
+ /* internal tracking of PES and recording events */
+ u32 current_event_data_size;
+ u32 current_event_start_offset;
+
+ struct dmx_filter_event queue[DMX_EVENT_QUEUE_SIZE];
+};
+
struct dmxdev_filter {
union {
struct dmx_section_filter *sec;
@@ -78,6 +107,8 @@
struct dmx_pes_filter_params pes;
} params;
+ struct dmxdev_events_queue events;
+
enum dmxdev_type type;
enum dmxdev_state state;
struct dmxdev *dev;
@@ -88,6 +119,8 @@
/* relevent for decoder PES */
unsigned long pes_buffer_size;
+ u32 rec_chunk_size;
+
/* only for sections */
struct timer_list timer;
int todo;
@@ -105,10 +138,9 @@
int filternum;
int capabilities;
-#define DMXDEV_CAP_DUPLEX 0x1
-#define DMXDEV_CAP_PULL_MODE 0x2
-#define DMXDEV_CAP_PCR_EXTRACTION 0x4
-#define DMXDEV_CAP_INDEXING 0x8
+#define DMXDEV_CAP_DUPLEX 0x1
+#define DMXDEV_CAP_PULL_MODE 0x2
+#define DMXDEV_CAP_INDEXING 0x4
enum dmx_playback_mode_t playback_mode;
dmx_source_t source;
@@ -120,6 +152,8 @@
struct dmx_frontend *dvr_orig_fe;
struct dvb_ringbuffer dvr_buffer;
+ struct dmxdev_events_queue dvr_output_events;
+
struct dvb_ringbuffer dvr_input_buffer;
struct workqueue_struct *dvr_input_workqueue;
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index bc72fee..0be6a22 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -145,8 +145,15 @@
/* PUSI ? */
if (buf[1] & 0x40) {
- feed->peslen = 0xfffa;
+ if (feed->pusi_seen)
+ /* We had seen PUSI before, this means
+ * that previous PES can be closed now.
+ */
+ feed->cb.ts(NULL, 0, NULL, 0,
+ &feed->feed.ts, DMX_OK_PES_END);
+
feed->pusi_seen = 1;
+ feed->peslen = 0;
}
if (feed->pusi_seen == 0)
@@ -204,6 +211,11 @@
if (dvb_demux_performancecheck)
demux->total_crc_time +=
dvb_dmx_calc_time_delta(pre_crc_time);
+
+ /* Notify on CRC error */
+ feed->cb.sec(NULL, 0, NULL, 0,
+ &f->filter, DMX_CRC_ERROR);
+
return -1;
}
@@ -1053,6 +1065,25 @@
return ret;
}
+static int dmx_ts_feed_data_ready_cb(struct dmx_ts_feed *feed,
+ dmx_ts_data_ready_cb callback)
+{
+ struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed;
+ struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+
+ mutex_lock(&dvbdmx->mutex);
+
+ if (dvbdmxfeed->state == DMX_STATE_GO) {
+ mutex_unlock(&dvbdmx->mutex);
+ return -EINVAL;
+ }
+
+ dvbdmxfeed->data_ready_cb.ts = callback;
+
+ mutex_unlock(&dvbdmx->mutex);
+ return 0;
+}
+
static int dmx_ts_set_indexing_params(
struct dmx_ts_feed *ts_feed,
struct dmx_indexing_video_params *params)
@@ -1084,7 +1115,7 @@
feed->cb.ts = callback;
feed->demux = demux;
feed->pid = 0xffff;
- feed->peslen = 0xfffa;
+ feed->peslen = 0;
feed->buffer = NULL;
memset(&feed->indexing_params, 0,
sizeof(struct dmx_indexing_video_params));
@@ -1105,6 +1136,7 @@
(*ts_feed)->set = dmx_ts_feed_set;
(*ts_feed)->set_indexing_params = dmx_ts_set_indexing_params;
(*ts_feed)->get_decoder_buff_status = dmx_ts_feed_decoder_buff_status;
+ (*ts_feed)->data_ready_cb = dmx_ts_feed_data_ready_cb;
if (!(feed->filter = dvb_dmx_filter_alloc(demux))) {
feed->state = DMX_STATE_FREE;
@@ -1312,6 +1344,26 @@
return ret;
}
+
+static int dmx_section_feed_data_ready_cb(struct dmx_section_feed *feed,
+ dmx_section_data_ready_cb callback)
+{
+ struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed;
+ struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+
+ mutex_lock(&dvbdmx->mutex);
+
+ if (dvbdmxfeed->state == DMX_STATE_GO) {
+ mutex_unlock(&dvbdmx->mutex);
+ return -EINVAL;
+ }
+
+ dvbdmxfeed->data_ready_cb.sec = callback;
+
+ mutex_unlock(&dvbdmx->mutex);
+ return 0;
+}
+
static int dmx_section_feed_release_filter(struct dmx_section_feed *feed,
struct dmx_section_filter *filter)
{
@@ -1381,6 +1433,7 @@
(*feed)->start_filtering = dmx_section_feed_start_filtering;
(*feed)->stop_filtering = dmx_section_feed_stop_filtering;
(*feed)->release_filter = dmx_section_feed_release_filter;
+ (*feed)->data_ready_cb = dmx_section_feed_data_ready_cb;
mutex_unlock(&dvbdmx->mutex);
return 0;
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
index ebe34ad..3970a6c 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.h
+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
@@ -78,6 +78,11 @@
dmx_section_cb sec;
} cb;
+ union {
+ dmx_ts_data_ready_cb ts;
+ dmx_section_data_ready_cb sec;
+ } data_ready_cb;
+
struct dvb_demux *demux;
void *priv;
int type;
diff --git a/drivers/media/dvb/dvb-core/dvb_ringbuffer.h b/drivers/media/dvb/dvb-core/dvb_ringbuffer.h
index 8b591a6..4093fa5 100644
--- a/drivers/media/dvb/dvb-core/dvb_ringbuffer.h
+++ b/drivers/media/dvb/dvb-core/dvb_ringbuffer.h
@@ -111,6 +111,10 @@
#define DVB_RINGBUFFER_SKIP(rbuf,num) \
(rbuf)->pread=((rbuf)->pread+(num))%(rbuf)->size
+/* advance write ptr by <num> bytes */
+#define DVB_RINGBUFFER_PUSH(rbuf, num) \
+ ((rbuf)->pwrite = (((rbuf)->pwrite+(num))%(rbuf)->size))
+
/*
** read <len> bytes from ring buffer into <buf>
** <usermem> specifies whether <buf> resides in user space
diff --git a/drivers/media/dvb/mpq/adapter/mpq_stream_buffer.c b/drivers/media/dvb/mpq/adapter/mpq_stream_buffer.c
index 738d730..4b0e7be 100644
--- a/drivers/media/dvb/mpq/adapter/mpq_stream_buffer.c
+++ b/drivers/media/dvb/mpq/adapter/mpq_stream_buffer.c
@@ -192,18 +192,18 @@
struct mpq_streambuffer *sbuff,
u8 *buf, size_t len)
{
- int actual_len;
+ ssize_t actual_len;
actual_len = dvb_ringbuffer_avail(&sbuff->raw_data);
if (actual_len < len)
len = actual_len;
- if (actual_len)
- dvb_ringbuffer_read(&sbuff->raw_data, buf, actual_len);
+ if (len)
+ dvb_ringbuffer_read(&sbuff->raw_data, buf, len);
wake_up_all(&sbuff->raw_data.queue);
- return actual_len;
+ return len;
}
EXPORT_SYMBOL(mpq_streambuffer_data_read);
diff --git a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.c b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.c
index 7223377..a01cf5b 100644
--- a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.c
+++ b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.c
@@ -33,16 +33,6 @@
sizeof(struct mpq_streambuffer_packet_header) + \
sizeof(struct mpq_adapter_video_meta_data)))
-/*
- * PCR/STC information length saved in ring-buffer.
- * PCR / STC are saved in ring-buffer in the following form:
- * <8 bit flags><64 bits of STC> <64bits of PCR>
- * STC and PCR values are in 27MHz.
- * The current flags that are defined:
- * 0x00000001: discontinuity_indicator
- */
-#define PCR_STC_LEN 17
-
/* Number of demux devices, has default of linux configuration */
static int mpq_demux_device_num = CONFIG_DVB_MPQ_NUM_DMX_DEVICES;
@@ -839,7 +829,7 @@
ion_alloc(mpq_demux->ion_client,
actual_buffer_size,
SZ_4K,
- ION_HEAP(ION_CP_MM_HEAP_ID));
+ (ION_HEAP(ION_CP_MM_HEAP_ID) | CACHED));
if (IS_ERR_OR_NULL(feed_data->payload_buff_handle)) {
ret = PTR_ERR(feed_data->payload_buff_handle);
@@ -1906,10 +1896,9 @@
struct dvb_demux_feed *feed,
const u8 *buf)
{
- int i;
u64 pcr;
u64 stc;
- u8 output[PCR_STC_LEN];
+ struct dmx_data_ready data;
struct mpq_demux *mpq_demux = feed->demux->priv;
const struct ts_packet_header *ts_header;
const struct ts_adaptation_field *adaptation_field;
@@ -1960,17 +1949,13 @@
stc += buf[188];
stc *= 256; /* convert from 105.47 KHZ to 27MHz */
- output[0] = adaptation_field->discontinuity_indicator;
+ data.data_length = 0;
+ data.pcr.pcr = pcr;
+ data.pcr.stc = stc;
+ data.pcr.disc_indicator_set = adaptation_field->discontinuity_indicator;
+ data.status = DMX_OK_PCR;
+ feed->data_ready_cb.ts(&feed->feed.ts, &data);
- for (i = 1; i <= 8; i++)
- output[i] = (stc >> ((8-i) << 3)) & 0xFF;
-
- for (i = 9; i <= 16; i++)
- output[i] = (pcr >> ((16-i) << 3)) & 0xFF;
-
- feed->cb.ts(output, PCR_STC_LEN,
- NULL, 0,
- &feed->feed.ts, DMX_OK);
return 0;
}
EXPORT_SYMBOL(mpq_dmx_process_pcr_packet);
diff --git a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tsif.c b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tsif.c
index e7d6b74..bd1ecfe 100644
--- a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tsif.c
+++ b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tsif.c
@@ -641,7 +641,6 @@
mpq_demux->dmxdev.capabilities =
DMXDEV_CAP_DUPLEX |
DMXDEV_CAP_PULL_MODE |
- DMXDEV_CAP_PCR_EXTRACTION |
DMXDEV_CAP_INDEXING;
mpq_demux->dmxdev.demux->set_source = mpq_dmx_set_source;
diff --git a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v1.c b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v1.c
index f374d91..fd94e80 100644
--- a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v1.c
+++ b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v1.c
@@ -752,7 +752,6 @@
mpq_demux->dmxdev.capabilities =
DMXDEV_CAP_DUPLEX |
DMXDEV_CAP_PULL_MODE |
- DMXDEV_CAP_PCR_EXTRACTION |
DMXDEV_CAP_INDEXING;
mpq_demux->dmxdev.demux->set_source = mpq_dmx_set_source;
diff --git a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v2.c b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v2.c
index 74b7c22..e4858fa 100644
--- a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v2.c
+++ b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v2.c
@@ -139,7 +139,6 @@
mpq_demux->dmxdev.capabilities =
DMXDEV_CAP_DUPLEX |
DMXDEV_CAP_PULL_MODE |
- DMXDEV_CAP_PCR_EXTRACTION |
DMXDEV_CAP_INDEXING;
mpq_demux->dmxdev.demux->set_source = mpq_dmx_set_source;
diff --git a/drivers/media/video/msm/Kconfig b/drivers/media/video/msm/Kconfig
index 9c791e4..fb5aea0 100644
--- a/drivers/media/video/msm/Kconfig
+++ b/drivers/media/video/msm/Kconfig
@@ -291,6 +291,54 @@
by QUP in the board file as QUP is used by
applications other than camera.
+config MSM_CSI20_HEADER
+ bool "Qualcomm MSM CSI 2.0 Header"
+ depends on MSM_CAMERA
+ ---help---
+ Enable support for CSI drivers to include 2.0
+ header. This header has register macros and its
+ values and bit mask for register configuration bits
+ This config macro is required targets based on 8960,
+ 8930 and 8064 platforms.
+
+config MSM_CSI30_HEADER
+ bool "Qualcomm MSM CSI 3.0 Header"
+ depends on MSM_CAMERA
+ ---help---
+ Enable support for CSI drivers to include 3.0
+ header. This header has register macros and its
+ values and bit mask for register configuration bits
+ This config macro is required for targets based on
+ 8064 platforms.
+
+config MSM_CSIPHY
+ bool "Qualcomm MSM Camera Serial Interface Physical receiver support"
+ depends on MSM_CAMERA
+ ---help---
+ Enable support for Camera Serial Interface
+ Physical receiver. It deserializes packets and
+ supports detection of packet start and stop
+ signalling.
+
+config MSM_CSID
+ bool "Qualcomm MSM Camera Serial Interface decoder support"
+ depends on MSM_CAMERA
+ ---help---
+ Enable support for Camera Serial Interface decoder.
+ It supports lane merging and decoding of packets
+ based on cid which is mapped to a virtual channel
+ and datatype.
+
+config MSM_CSI2_REGISTER
+ bool "Qualcomm MSM CSI2 Register"
+ depends on MSM_CAMERA
+ ---help---
+ Register CSIPHY, CSID and ISPIF subdevices during
+ msm_open. Different CSI components are registered
+ based on platform. This macro specifies registering
+ of CSIPHY, CSID and ISPIF subdevices to receive data
+ from sensor.
+
config S5K3L1YX
bool "Sensor S5K3L1YX (BAYER 12M)"
depends on MSM_CAMERA
diff --git a/drivers/media/video/msm/Makefile b/drivers/media/video/msm/Makefile
index 5703d88..eb45271 100644
--- a/drivers/media/video/msm/Makefile
+++ b/drivers/media/video/msm/Makefile
@@ -12,10 +12,11 @@
EXTRA_CFLAGS += -Idrivers/media/video/msm/actuators
EXTRA_CFLAGS += -Idrivers/media/video/msm/server
obj-$(CONFIG_MSM_CAMERA) += msm_isp.o msm.o msm_mem.o msm_mctl.o msm_mctl_buf.o msm_mctl_pp.o msm_vfe_stats_buf.o
- obj-$(CONFIG_MSM_CAMERA) += server/ eeprom/ sensors/ actuators/ csi/
+ obj-$(CONFIG_MSM_CAMERA) += server/
+ obj-$(CONFIG_MSM_CAM_IRQ_ROUTER) += msm_camirq_router.o
+ obj-$(CONFIG_MSM_CAMERA) += eeprom/ sensors/ actuators/ csi/
obj-$(CONFIG_MSM_CPP) += cpp/
obj-$(CONFIG_MSM_CAMERA) += msm_gesture.o
- obj-$(CONFIG_MSM_CAM_IRQ_ROUTER) += msm_camirq_router.o
else
obj-$(CONFIG_MSM_CAMERA) += msm_camera.o
endif
diff --git a/drivers/media/video/msm/csi/Makefile b/drivers/media/video/msm/csi/Makefile
index f7cb408..d11e2d2 100644
--- a/drivers/media/video/msm/csi/Makefile
+++ b/drivers/media/video/msm/csi/Makefile
@@ -1,7 +1,14 @@
GCC_VERSION := $(shell $(CONFIG_SHELL) $(PWD)/scripts/gcc-version.sh $(CROSS_COMPILE)gcc)
-EXTRA_CFLAGS += -Idrivers/media/video/msm
+ccflags-y += -Idrivers/media/video/msm
+ifeq ($(CONFIG_MSM_CSI20_HEADER),y)
+ ccflags-y += -Idrivers/media/video/msm/csi/include/csi2.0
+else ifeq ($(CONFIG_MSM_CSI30_HEADER),y)
+ ccflags-y += -Idrivers/media/video/msm/csi/include/csi3.0
+endif
+obj-$(CONFIG_MSM_CSI2_REGISTER) += msm_csi2_register.o
+obj-$(CONFIG_MSM_CSIPHY) += msm_csiphy.o
+obj-$(CONFIG_MSM_CSID) += msm_csid.o
obj-$(CONFIG_ARCH_MSM8960) += msm_csi2_register.o msm_csiphy.o msm_csid.o msm_ispif.o
obj-$(CONFIG_ARCH_MSM7X27A) += msm_csic_register.o msm_csic.o
obj-$(CONFIG_ARCH_MSM8X60) += msm_csic_register.o msm_csic.o
obj-$(CONFIG_ARCH_MSM7X30) += msm_csic_register.o msm_csic.o
-
diff --git a/drivers/media/video/msm/csi/include/csi2.0/msm_csid_hwreg.h b/drivers/media/video/msm/csi/include/csi2.0/msm_csid_hwreg.h
new file mode 100644
index 0000000..c79748c
--- /dev/null
+++ b/drivers/media/video/msm/csi/include/csi2.0/msm_csid_hwreg.h
@@ -0,0 +1,51 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSID_HWREG_H
+#define MSM_CSID_HWREG_H
+
+/* MIPI CSID registers */
+#define CSID_HW_VERSION_ADDR 0x0
+#define CSID_CORE_CTRL_0_ADDR 0x4
+#define CSID_CORE_CTRL_1_ADDR 0x4
+#define CSID_RST_CMD_ADDR 0x8
+#define CSID_CID_LUT_VC_0_ADDR 0xc
+#define CSID_CID_LUT_VC_1_ADDR 0x10
+#define CSID_CID_LUT_VC_2_ADDR 0x14
+#define CSID_CID_LUT_VC_3_ADDR 0x18
+#define CSID_CID_n_CFG_ADDR 0x1C
+#define CSID_IRQ_CLEAR_CMD_ADDR 0x5c
+#define CSID_IRQ_MASK_ADDR 0x60
+#define CSID_IRQ_STATUS_ADDR 0x64
+#define CSID_CAPTURED_UNMAPPED_LONG_PKT_HDR_ADDR 0x68
+#define CSID_CAPTURED_MMAPPED_LONG_PKT_HDR_ADDR 0x6c
+#define CSID_CAPTURED_SHORT_PKT_ADDR 0x70
+#define CSID_CAPTURED_LONG_PKT_HDR_ADDR 0x74
+#define CSID_CAPTURED_LONG_PKT_FTR_ADDR 0x78
+#define CSID_PIF_MISR_DL0_ADDR 0x7C
+#define CSID_PIF_MISR_DL1_ADDR 0x80
+#define CSID_PIF_MISR_DL2_ADDR 0x84
+#define CSID_PIF_MISR_DL3_ADDR 0x88
+#define CSID_STATS_TOTAL_PKTS_RCVD_ADDR 0x8C
+#define CSID_STATS_ECC_ADDR 0x90
+#define CSID_STATS_CRC_ADDR 0x94
+#define CSID_TG_CTRL_ADDR 0x9C
+#define CSID_TG_VC_CFG_ADDR 0xA0
+#define CSID_TG_DT_n_CFG_0_ADDR 0xA8
+#define CSID_TG_DT_n_CFG_1_ADDR 0xAC
+#define CSID_TG_DT_n_CFG_2_ADDR 0xB0
+#define CSID_RST_DONE_IRQ_BITSHIFT 11
+#define CSID_RST_STB_ALL 0x7FFF
+#define CSID_DL_INPUT_SEL_SHIFT 0x2
+#define CSID_PHY_SEL_SHIFT 0x17
+
+#endif
diff --git a/drivers/media/video/msm/csi/include/csi2.0/msm_csiphy_hwreg.h b/drivers/media/video/msm/csi/include/csi2.0/msm_csiphy_hwreg.h
new file mode 100644
index 0000000..93d1fc4
--- /dev/null
+++ b/drivers/media/video/msm/csi/include/csi2.0/msm_csiphy_hwreg.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSIPHY_HWREG_H
+#define MSM_CSIPHY_HWREG_H
+
+/*MIPI CSI PHY registers*/
+#define MIPI_CSIPHY_HW_VERSION_ADDR 0x180
+#define MIPI_CSIPHY_LNn_CFG1_ADDR 0x0
+#define MIPI_CSIPHY_LNn_CFG2_ADDR 0x4
+#define MIPI_CSIPHY_LNn_CFG3_ADDR 0x8
+#define MIPI_CSIPHY_LNn_CFG4_ADDR 0xC
+#define MIPI_CSIPHY_LNn_CFG5_ADDR 0x10
+#define MIPI_CSIPHY_LNCK_CFG1_ADDR 0x100
+#define MIPI_CSIPHY_LNCK_CFG2_ADDR 0x104
+#define MIPI_CSIPHY_LNCK_CFG3_ADDR 0x108
+#define MIPI_CSIPHY_LNCK_CFG4_ADDR 0x10C
+#define MIPI_CSIPHY_LNCK_CFG5_ADDR 0x110
+#define MIPI_CSIPHY_LNCK_MISC1_ADDR 0x128
+#define MIPI_CSIPHY_GLBL_RESET_ADDR 0x140
+#define MIPI_CSIPHY_GLBL_PWR_CFG_ADDR 0x144
+#define MIPI_CSIPHY_INTERRUPT_STATUS0_ADDR 0x180
+#define MIPI_CSIPHY_INTERRUPT_MASK0_ADDR 0x1A0
+#define MIPI_CSIPHY_INTERRUPT_MASK_VAL 0x6F
+#define MIPI_CSIPHY_INTERRUPT_MASK_ADDR 0x1A4
+#define MIPI_CSIPHY_INTERRUPT_CLEAR0_ADDR 0x1C0
+#define MIPI_CSIPHY_INTERRUPT_CLEAR_ADDR 0x1C4
+#define MIPI_CSIPHY_MODE_CONFIG_SHIFT 0x4
+#define MIPI_CSIPHY_GLBL_T_INIT_CFG0_ADDR 0x1E0
+#define MIPI_CSIPHY_T_WAKEUP_CFG0_ADDR 0x1E8
+
+#endif
diff --git a/drivers/media/video/msm/csi/include/csi3.0/msm_csid_hwreg.h b/drivers/media/video/msm/csi/include/csi3.0/msm_csid_hwreg.h
new file mode 100644
index 0000000..27d5a06
--- /dev/null
+++ b/drivers/media/video/msm/csi/include/csi3.0/msm_csid_hwreg.h
@@ -0,0 +1,51 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSID_HWREG_H
+#define MSM_CSID_HWREG_H
+
+/* MIPI CSID registers */
+#define CSID_HW_VERSION_ADDR 0x0
+#define CSID_CORE_CTRL_0_ADDR 0x4
+#define CSID_CORE_CTRL_1_ADDR 0x8
+#define CSID_RST_CMD_ADDR 0xC
+#define CSID_CID_LUT_VC_0_ADDR 0x10
+#define CSID_CID_LUT_VC_1_ADDR 0x14
+#define CSID_CID_LUT_VC_2_ADDR 0x18
+#define CSID_CID_LUT_VC_3_ADDR 0x1C
+#define CSID_CID_n_CFG_ADDR 0x20
+#define CSID_IRQ_CLEAR_CMD_ADDR 0x60
+#define CSID_IRQ_MASK_ADDR 0x64
+#define CSID_IRQ_STATUS_ADDR 0x68
+#define CSID_CAPTURED_UNMAPPED_LONG_PKT_HDR_ADDR 0x6C
+#define CSID_CAPTURED_MMAPPED_LONG_PKT_HDR_ADDR 0x70
+#define CSID_CAPTURED_SHORT_PKT_ADDR 0x74
+#define CSID_CAPTURED_LONG_PKT_HDR_ADDR 0x78
+#define CSID_CAPTURED_LONG_PKT_FTR_ADDR 0x7C
+#define CSID_PIF_MISR_DL0_ADDR 0x80
+#define CSID_PIF_MISR_DL1_ADDR 0x84
+#define CSID_PIF_MISR_DL2_ADDR 0x88
+#define CSID_PIF_MISR_DL3_ADDR 0x8C
+#define CSID_STATS_TOTAL_PKTS_RCVD_ADDR 0x90
+#define CSID_STATS_ECC_ADDR 0x94
+#define CSID_STATS_CRC_ADDR 0x98
+#define CSID_TG_CTRL_ADDR 0xA0
+#define CSID_TG_VC_CFG_ADDR 0xA4
+#define CSID_TG_DT_n_CFG_0_ADDR 0xAC
+#define CSID_TG_DT_n_CFG_1_ADDR 0xB0
+#define CSID_TG_DT_n_CFG_2_ADDR 0xB4
+#define CSID_RST_DONE_IRQ_BITSHIFT 11
+#define CSID_RST_STB_ALL 0x7FFF
+#define CSID_DL_INPUT_SEL_SHIFT 0x4
+#define CSID_PHY_SEL_SHIFT 0x17
+
+#endif
diff --git a/drivers/media/video/msm/csi/include/csi3.0/msm_csiphy_hwreg.h b/drivers/media/video/msm/csi/include/csi3.0/msm_csiphy_hwreg.h
new file mode 100644
index 0000000..79791bd
--- /dev/null
+++ b/drivers/media/video/msm/csi/include/csi3.0/msm_csiphy_hwreg.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSIPHY_HWREG_H
+#define MSM_CSIPHY_HWREG_H
+
+/*MIPI CSI PHY registers*/
+#define MIPI_CSIPHY_LNn_CFG1_ADDR 0x0
+#define MIPI_CSIPHY_LNn_CFG2_ADDR 0x4
+#define MIPI_CSIPHY_LNn_CFG3_ADDR 0x8
+#define MIPI_CSIPHY_LNn_CFG4_ADDR 0xC
+#define MIPI_CSIPHY_LNn_CFG5_ADDR 0x10
+#define MIPI_CSIPHY_LNCK_CFG1_ADDR 0x100
+#define MIPI_CSIPHY_LNCK_CFG2_ADDR 0x104
+#define MIPI_CSIPHY_LNCK_CFG3_ADDR 0x108
+#define MIPI_CSIPHY_LNCK_CFG4_ADDR 0x10C
+#define MIPI_CSIPHY_LNCK_CFG5_ADDR 0x110
+#define MIPI_CSIPHY_LNCK_MISC1_ADDR 0x128
+#define MIPI_CSIPHY_GLBL_RESET_ADDR 0x140
+#define MIPI_CSIPHY_GLBL_PWR_CFG_ADDR 0x144
+#define MIPI_CSIPHY_HW_VERSION_ADDR 0x188
+#define MIPI_CSIPHY_INTERRUPT_STATUS0_ADDR 0x18C
+#define MIPI_CSIPHY_INTERRUPT_MASK0_ADDR 0x1AC
+#define MIPI_CSIPHY_INTERRUPT_MASK_VAL 0x3F
+#define MIPI_CSIPHY_INTERRUPT_MASK_ADDR 0x1B0
+#define MIPI_CSIPHY_INTERRUPT_CLEAR0_ADDR 0x1CC
+#define MIPI_CSIPHY_INTERRUPT_CLEAR_ADDR 0x1D0
+#define MIPI_CSIPHY_MODE_CONFIG_SHIFT 0x4
+#define MIPI_CSIPHY_GLBL_T_INIT_CFG0_ADDR 0x1EC
+#define MIPI_CSIPHY_T_WAKEUP_CFG0_ADDR 0x1F4
+
+#endif
diff --git a/drivers/media/video/msm/csi/msm_csid.c b/drivers/media/video/msm/csi/msm_csid.c
index 1ab4e66..962c60e 100644
--- a/drivers/media/video/msm/csi/msm_csid.c
+++ b/drivers/media/video/msm/csi/msm_csid.c
@@ -14,47 +14,16 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <mach/board.h>
#include <mach/camera.h>
#include <media/msm_isp.h>
#include "msm_csid.h"
+#include "msm_csid_hwreg.h"
#include "msm.h"
#define V4L2_IDENT_CSID 50002
-/* MIPI CSID registers */
-#define CSID_HW_VERSION_ADDR 0x0
-#define CSID_CORE_CTRL_ADDR 0x4
-#define CSID_RST_CMD_ADDR 0x8
-#define CSID_CID_LUT_VC_0_ADDR 0xc
-#define CSID_CID_LUT_VC_1_ADDR 0x10
-#define CSID_CID_LUT_VC_2_ADDR 0x14
-#define CSID_CID_LUT_VC_3_ADDR 0x18
-#define CSID_CID_n_CFG_ADDR 0x1C
-#define CSID_IRQ_CLEAR_CMD_ADDR 0x5c
-#define CSID_IRQ_MASK_ADDR 0x60
-#define CSID_IRQ_STATUS_ADDR 0x64
-#define CSID_CAPTURED_UNMAPPED_LONG_PKT_HDR_ADDR 0x68
-#define CSID_CAPTURED_MMAPPED_LONG_PKT_HDR_ADDR 0x6c
-#define CSID_CAPTURED_SHORT_PKT_ADDR 0x70
-#define CSID_CAPTURED_LONG_PKT_HDR_ADDR 0x74
-#define CSID_CAPTURED_LONG_PKT_FTR_ADDR 0x78
-#define CSID_PIF_MISR_DL0_ADDR 0x7C
-#define CSID_PIF_MISR_DL1_ADDR 0x80
-#define CSID_PIF_MISR_DL2_ADDR 0x84
-#define CSID_PIF_MISR_DL3_ADDR 0x88
-#define CSID_STATS_TOTAL_PKTS_RCVD_ADDR 0x8C
-#define CSID_STATS_ECC_ADDR 0x90
-#define CSID_STATS_CRC_ADDR 0x94
-#define CSID_TG_CTRL_ADDR 0x9C
-#define CSID_TG_VC_CFG_ADDR 0xA0
-#define CSID_TG_DT_n_CFG_0_ADDR 0xA8
-#define CSID_TG_DT_n_CFG_1_ADDR 0xAC
-#define CSID_TG_DT_n_CFG_2_ADDR 0xB0
-#define CSID_TG_DT_n_CFG_3_ADDR 0xD8
-#define CSID_RST_DONE_IRQ_BITSHIFT 11
-#define CSID_RST_STB_ALL 0x7FFF
-
#define DBG_CSID 0
static int msm_csid_cid_lut(
@@ -64,7 +33,7 @@
int rc = 0, i = 0;
uint32_t val = 0;
- for (i = 0; i < csid_lut_params->num_cid && i < 4; i++) {
+ for (i = 0; i < csid_lut_params->num_cid && i < 16; i++) {
if (csid_lut_params->vc_cfg[i].dt < 0x12 ||
csid_lut_params->vc_cfg[i].dt > 0x37) {
CDBG("%s: unsupported data type 0x%x\n",
@@ -72,13 +41,13 @@
return rc;
}
val = msm_camera_io_r(csidbase + CSID_CID_LUT_VC_0_ADDR +
- (csid_lut_params->vc_cfg[i].cid >> 2) * 4)
- & ~(0xFF << csid_lut_params->vc_cfg[i].cid * 8);
- val |= csid_lut_params->vc_cfg[i].dt <<
- csid_lut_params->vc_cfg[i].cid * 8;
+ (csid_lut_params->vc_cfg[i].cid >> 2) * 4)
+ & ~(0xFF << ((csid_lut_params->vc_cfg[i].cid % 4) * 8));
+ val |= (csid_lut_params->vc_cfg[i].dt <<
+ ((csid_lut_params->vc_cfg[i].cid % 4) * 8));
msm_camera_io_w(val, csidbase + CSID_CID_LUT_VC_0_ADDR +
(csid_lut_params->vc_cfg[i].cid >> 2) * 4);
- val = csid_lut_params->vc_cfg[i].decode_format << 4 | 0x3;
+ val = (csid_lut_params->vc_cfg[i].decode_format << 4) | 0x3;
msm_camera_io_w(val, csidbase + CSID_CID_n_CFG_ADDR +
(csid_lut_params->vc_cfg[i].cid * 4));
}
@@ -113,13 +82,16 @@
csid_params = cfg_params->parms;
val = csid_params->lane_cnt - 1;
- val |= csid_params->lane_assign << 2;
- val |= 0x1 << 10;
- val |= 0x1 << 11;
- val |= 0x1 << 12;
- val |= 0x1 << 13;
- val |= 0x1 << 28;
- msm_camera_io_w(val, csidbase + CSID_CORE_CTRL_ADDR);
+ val |= csid_params->lane_assign << CSID_DL_INPUT_SEL_SHIFT;
+ if (csid_dev->hw_version < 0x30000000) {
+ val |= (0xF << 10);
+ msm_camera_io_w(val, csidbase + CSID_CORE_CTRL_0_ADDR);
+ } else {
+ msm_camera_io_w(val, csidbase + CSID_CORE_CTRL_0_ADDR);
+ val = csid_params->phy_sel << CSID_PHY_SEL_SHIFT;
+ val |= 0xF;
+ msm_camera_io_w(val, csidbase + CSID_CORE_CTRL_1_ADDR);
+ }
rc = msm_csid_cid_lut(&csid_params->lut_params, csidbase);
if (rc < 0)
@@ -317,6 +289,10 @@
platform_set_drvdata(pdev, &new_csid_dev->subdev);
mutex_init(&new_csid_dev->mutex);
+ if (pdev->dev.of_node)
+ of_property_read_u32((&pdev->dev)->of_node,
+ "cell-index", &pdev->id);
+
new_csid_dev->mem = platform_get_resource_byname(pdev,
IORESOURCE_MEM, "csid");
if (!new_csid_dev->mem) {
@@ -352,11 +328,19 @@
return 0;
}
+static const struct of_device_id msm_csid_dt_match[] = {
+ {.compatible = "qcom,csid"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_csid_dt_match);
+
static struct platform_driver csid_driver = {
.probe = csid_probe,
.driver = {
.name = MSM_CSID_DRV_NAME,
.owner = THIS_MODULE,
+ .of_match_table = msm_csid_dt_match,
},
};
diff --git a/drivers/media/video/msm/csi/msm_csiphy.c b/drivers/media/video/msm/csi/msm_csiphy.c
index 4693a8a..c524884 100644
--- a/drivers/media/video/msm/csi/msm_csiphy.c
+++ b/drivers/media/video/msm/csi/msm_csiphy.c
@@ -13,6 +13,7 @@
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/of.h>
#include <linux/module.h>
#include <mach/board.h>
#include <mach/camera.h>
@@ -20,50 +21,19 @@
#include <media/msm_isp.h>
#include "msm_csiphy.h"
#include "msm.h"
-
+#include "msm_csiphy_hwreg.h"
#define DBG_CSIPHY 0
#define V4L2_IDENT_CSIPHY 50003
-
-/*MIPI CSI PHY registers*/
-#define MIPI_CSIPHY_LNn_CFG1_ADDR 0x0
-#define MIPI_CSIPHY_LNn_CFG2_ADDR 0x4
-#define MIPI_CSIPHY_LNn_CFG3_ADDR 0x8
-#define MIPI_CSIPHY_LNn_CFG4_ADDR 0xC
-#define MIPI_CSIPHY_LNn_CFG5_ADDR 0x10
-#define MIPI_CSIPHY_LNCK_CFG1_ADDR 0x100
-#define MIPI_CSIPHY_LNCK_CFG2_ADDR 0x104
-#define MIPI_CSIPHY_LNCK_CFG3_ADDR 0x108
-#define MIPI_CSIPHY_LNCK_CFG4_ADDR 0x10C
-#define MIPI_CSIPHY_LNCK_CFG5_ADDR 0x110
-#define MIPI_CSIPHY_LNCK_MISC1_ADDR 0x128
-#define MIPI_CSIPHY_GLBL_T_INIT_CFG0_ADDR 0x1E0
-#define MIPI_CSIPHY_T_WAKEUP_CFG0_ADDR 0x1E8
-#define MIPI_CSIPHY_T_WAKEUP_CFG1_ADDR 0x1EC
-#define MIPI_CSIPHY_GLBL_RESET_ADDR 0x0140
-#define MIPI_CSIPHY_GLBL_PWR_CFG_ADDR 0x0144
-#define MIPI_CSIPHY_INTERRUPT_STATUS0_ADDR 0x0180
-#define MIPI_CSIPHY_INTERRUPT_STATUS1_ADDR 0x0184
-#define MIPI_CSIPHY_INTERRUPT_STATUS2_ADDR 0x0188
-#define MIPI_CSIPHY_INTERRUPT_STATUS3_ADDR 0x018C
-#define MIPI_CSIPHY_INTERRUPT_STATUS4_ADDR 0x0190
-#define MIPI_CSIPHY_INTERRUPT_MASK0_ADDR 0x01A0
-#define MIPI_CSIPHY_INTERRUPT_MASK1_ADDR 0x01A4
-#define MIPI_CSIPHY_INTERRUPT_MASK2_ADDR 0x01A8
-#define MIPI_CSIPHY_INTERRUPT_MASK3_ADDR 0x01AC
-#define MIPI_CSIPHY_INTERRUPT_MASK4_ADDR 0x01B0
-#define MIPI_CSIPHY_INTERRUPT_CLEAR0_ADDR 0x01C0
-#define MIPI_CSIPHY_INTERRUPT_CLEAR1_ADDR 0x01C4
-#define MIPI_CSIPHY_INTERRUPT_CLEAR2_ADDR 0x01C8
-#define MIPI_CSIPHY_INTERRUPT_CLEAR3_ADDR 0x01CC
-#define MIPI_CSIPHY_INTERRUPT_CLEAR4_ADDR 0x01D0
+#define CSIPHY_VERSION_V3 0x10
int msm_csiphy_config(struct csiphy_cfg_params *cfg_params)
{
int rc = 0;
int j = 0;
uint32_t val = 0;
- uint8_t lane_cnt = 0, lane_mask = 0;
+ uint8_t lane_cnt = 0;
+ uint16_t lane_mask = 0;
struct csiphy_device *csiphy_dev;
struct msm_camera_csiphy_params *csiphy_params;
void __iomem *csiphybase;
@@ -73,7 +43,8 @@
return -ENOMEM;
csiphy_params = cfg_params->parms;
- lane_mask = csiphy_params->lane_mask;
+ csiphy_dev->lane_mask[csiphy_dev->pdev->id] |= csiphy_params->lane_mask;
+ lane_mask = csiphy_dev->lane_mask[csiphy_dev->pdev->id];
lane_cnt = csiphy_params->lane_cnt;
if (csiphy_params->lane_cnt < 1 || csiphy_params->lane_cnt > 4) {
CDBG("%s: unsupported lane cnt %d\n",
@@ -81,13 +52,30 @@
return rc;
}
- val = 0x3;
- msm_camera_io_w((csiphy_params->lane_mask << 2) | val,
- csiphybase + MIPI_CSIPHY_GLBL_PWR_CFG_ADDR);
msm_camera_io_w(0x1, csiphybase + MIPI_CSIPHY_GLBL_T_INIT_CFG0_ADDR);
msm_camera_io_w(0x1, csiphybase + MIPI_CSIPHY_T_WAKEUP_CFG0_ADDR);
- while (lane_mask & 0xf) {
+ if (csiphy_dev->hw_version != CSIPHY_VERSION_V3) {
+ val = 0x3;
+ msm_camera_io_w((lane_mask << 2) | val,
+ csiphybase + MIPI_CSIPHY_GLBL_PWR_CFG_ADDR);
+ msm_camera_io_w(0x10, csiphybase + MIPI_CSIPHY_LNCK_CFG2_ADDR);
+ msm_camera_io_w(csiphy_params->settle_cnt,
+ csiphybase + MIPI_CSIPHY_LNCK_CFG3_ADDR);
+ msm_camera_io_w(0x24,
+ csiphybase + MIPI_CSIPHY_INTERRUPT_MASK0_ADDR);
+ msm_camera_io_w(0x24,
+ csiphybase + MIPI_CSIPHY_INTERRUPT_CLEAR0_ADDR);
+ } else {
+ val = 0x1;
+ msm_camera_io_w((lane_mask << 1) | val,
+ csiphybase + MIPI_CSIPHY_GLBL_PWR_CFG_ADDR);
+ msm_camera_io_w(csiphy_params->combo_mode <<
+ MIPI_CSIPHY_MODE_CONFIG_SHIFT,
+ csiphybase + MIPI_CSIPHY_GLBL_RESET_ADDR);
+ }
+
+ while (lane_mask & 0x1f) {
if (!(lane_mask & 0x1)) {
j++;
lane_mask >>= 1;
@@ -97,66 +85,33 @@
csiphybase + MIPI_CSIPHY_LNn_CFG2_ADDR + 0x40*j);
msm_camera_io_w(csiphy_params->settle_cnt,
csiphybase + MIPI_CSIPHY_LNn_CFG3_ADDR + 0x40*j);
- msm_camera_io_w(0x6F,
- csiphybase + MIPI_CSIPHY_INTERRUPT_MASK0_ADDR +
- 0x4*(j+1));
- msm_camera_io_w(0x6F,
- csiphybase + MIPI_CSIPHY_INTERRUPT_CLEAR0_ADDR +
- 0x4*(j+1));
+ msm_camera_io_w(MIPI_CSIPHY_INTERRUPT_MASK_VAL, csiphybase +
+ MIPI_CSIPHY_INTERRUPT_MASK_ADDR + 0x4*j);
+ msm_camera_io_w(MIPI_CSIPHY_INTERRUPT_MASK_VAL, csiphybase +
+ MIPI_CSIPHY_INTERRUPT_CLEAR_ADDR + 0x4*j);
j++;
lane_mask >>= 1;
}
- msm_camera_io_w(0x10, csiphybase + MIPI_CSIPHY_LNCK_CFG2_ADDR);
- msm_camera_io_w(csiphy_params->settle_cnt,
- csiphybase + MIPI_CSIPHY_LNCK_CFG3_ADDR);
-
- msm_camera_io_w(0x24,
- csiphybase + MIPI_CSIPHY_INTERRUPT_MASK0_ADDR);
- msm_camera_io_w(0x24,
- csiphybase + MIPI_CSIPHY_INTERRUPT_CLEAR0_ADDR);
return rc;
}
static irqreturn_t msm_csiphy_irq(int irq_num, void *data)
{
uint32_t irq;
+ int i;
struct csiphy_device *csiphy_dev = data;
- irq = msm_camera_io_r(
- csiphy_dev->base + MIPI_CSIPHY_INTERRUPT_STATUS0_ADDR);
- msm_camera_io_w(irq,
- csiphy_dev->base + MIPI_CSIPHY_INTERRUPT_CLEAR0_ADDR);
- CDBG("%s MIPI_CSIPHY%d_INTERRUPT_STATUS0 = 0x%x\n",
- __func__, csiphy_dev->pdev->id, irq);
-
- irq = msm_camera_io_r(
- csiphy_dev->base + MIPI_CSIPHY_INTERRUPT_STATUS1_ADDR);
- msm_camera_io_w(irq,
- csiphy_dev->base + MIPI_CSIPHY_INTERRUPT_CLEAR1_ADDR);
- CDBG("%s MIPI_CSIPHY%d_INTERRUPT_STATUS1 = 0x%x\n",
- __func__, csiphy_dev->pdev->id, irq);
-
- irq = msm_camera_io_r(
- csiphy_dev->base + MIPI_CSIPHY_INTERRUPT_STATUS2_ADDR);
- msm_camera_io_w(irq,
- csiphy_dev->base + MIPI_CSIPHY_INTERRUPT_CLEAR2_ADDR);
- CDBG("%s MIPI_CSIPHY%d_INTERRUPT_STATUS2 = 0x%x\n",
- __func__, csiphy_dev->pdev->id, irq);
-
- irq = msm_camera_io_r(
- csiphy_dev->base + MIPI_CSIPHY_INTERRUPT_STATUS3_ADDR);
- msm_camera_io_w(irq,
- csiphy_dev->base + MIPI_CSIPHY_INTERRUPT_CLEAR3_ADDR);
- CDBG("%s MIPI_CSIPHY%d_INTERRUPT_STATUS3 = 0x%x\n",
- __func__, csiphy_dev->pdev->id, irq);
-
- irq = msm_camera_io_r(
- csiphy_dev->base + MIPI_CSIPHY_INTERRUPT_STATUS4_ADDR);
- msm_camera_io_w(irq,
- csiphy_dev->base + MIPI_CSIPHY_INTERRUPT_CLEAR4_ADDR);
- CDBG("%s MIPI_CSIPHY%d_INTERRUPT_STATUS4 = 0x%x\n",
- __func__, csiphy_dev->pdev->id, irq);
+ for (i = 0; i < 5; i++) {
+ irq = msm_camera_io_r(
+ csiphy_dev->base +
+ MIPI_CSIPHY_INTERRUPT_STATUS0_ADDR + 0x4*i);
+ msm_camera_io_w(irq,
+ csiphy_dev->base +
+ MIPI_CSIPHY_INTERRUPT_CLEAR0_ADDR + 0x4*i);
+ CDBG("%s MIPI_CSIPHY%d_INTERRUPT_STATUS%d = 0x%x\n",
+ __func__, csiphy_dev->pdev->id, i, irq);
+ }
msm_camera_io_w(0x1, csiphy_dev->base + 0x164);
msm_camera_io_w(0x0, csiphy_dev->base + 0x164);
return IRQ_HANDLED;
@@ -193,9 +148,16 @@
return rc;
}
+ if (csiphy_dev->ref_count++) {
+ CDBG("%s csiphy refcount = %d\n", __func__,
+ csiphy_dev->ref_count);
+ return rc;
+ }
+
csiphy_dev->base = ioremap(csiphy_dev->mem->start,
resource_size(csiphy_dev->mem));
if (!csiphy_dev->base) {
+ csiphy_dev->ref_count--;
rc = -ENOMEM;
return rc;
}
@@ -204,6 +166,7 @@
csiphy_dev->csiphy_clk, ARRAY_SIZE(csiphy_clk_info), 1);
if (rc < 0) {
+ csiphy_dev->ref_count--;
iounmap(csiphy_dev->base);
csiphy_dev->base = NULL;
return rc;
@@ -214,17 +177,51 @@
#endif
msm_csiphy_reset(csiphy_dev);
+ csiphy_dev->hw_version =
+ msm_camera_io_r(csiphy_dev->base + MIPI_CSIPHY_HW_VERSION_ADDR);
+
return 0;
}
-static int msm_csiphy_release(struct v4l2_subdev *sd)
+static int msm_csiphy_release(struct v4l2_subdev *sd, void *arg)
{
struct csiphy_device *csiphy_dev;
- int i;
+ int i = 0;
+ struct msm_camera_csi_lane_params *csi_lane_params;
+ uint16_t csi_lane_mask;
csiphy_dev = v4l2_get_subdevdata(sd);
- for (i = 0; i < 4; i++)
- msm_camera_io_w(0x0, csiphy_dev->base +
- MIPI_CSIPHY_LNn_CFG2_ADDR + 0x40*i);
+ csi_lane_params = (struct msm_camera_csi_lane_params *)arg;
+ csi_lane_mask = csi_lane_params->csi_lane_mask;
+
+ if (!csiphy_dev || !csiphy_dev->ref_count) {
+ pr_err("%s csiphy dev NULL / ref_count ZERO\n", __func__);
+ return 0;
+ }
+
+ if (csiphy_dev->hw_version != CSIPHY_VERSION_V3) {
+ csiphy_dev->lane_mask[csiphy_dev->pdev->id] = 0;
+ for (i = 0; i < 4; i++)
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ MIPI_CSIPHY_LNn_CFG2_ADDR + 0x40*i);
+ } else {
+ csiphy_dev->lane_mask[csiphy_dev->pdev->id] &=
+ ~(csi_lane_params->csi_lane_mask);
+ i = 0;
+ while (csi_lane_mask & 0x1F) {
+ if (csi_lane_mask & 0x1) {
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ MIPI_CSIPHY_LNn_CFG2_ADDR + 0x40*i);
+ }
+ csi_lane_mask >>= 1;
+ i++;
+ }
+ }
+
+ if (--csiphy_dev->ref_count) {
+ CDBG("%s csiphy refcount = %d\n", __func__,
+ csiphy_dev->ref_count);
+ return 0;
+ }
msm_camera_io_w(0x0, csiphy_dev->base + MIPI_CSIPHY_LNCK_CFG2_ADDR);
msm_camera_io_w(0x0, csiphy_dev->base + MIPI_CSIPHY_GLBL_PWR_CFG_ADDR);
@@ -258,7 +255,7 @@
rc = msm_csiphy_init(sd);
break;
case VIDIOC_MSM_CSIPHY_RELEASE:
- rc = msm_csiphy_release(sd);
+ rc = msm_csiphy_release(sd, arg);
break;
default:
pr_err("%s: command not found\n", __func__);
@@ -301,6 +298,10 @@
mutex_init(&new_csiphy_dev->mutex);
+ if (pdev->dev.of_node)
+ of_property_read_u32((&pdev->dev)->of_node,
+ "cell-index", &pdev->id);
+
new_csiphy_dev->mem = platform_get_resource_byname(pdev,
IORESOURCE_MEM, "csiphy");
if (!new_csiphy_dev->mem) {
@@ -348,11 +349,19 @@
return 0;
}
+static const struct of_device_id msm_csiphy_dt_match[] = {
+ {.compatible = "qcom,csiphy"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_csiphy_dt_match);
+
static struct platform_driver csiphy_driver = {
.probe = csiphy_probe,
.driver = {
.name = MSM_CSIPHY_DRV_NAME,
.owner = THIS_MODULE,
+ .of_match_table = msm_csiphy_dt_match,
},
};
diff --git a/drivers/media/video/msm/csi/msm_csiphy.h b/drivers/media/video/msm/csi/msm_csiphy.h
index 522a1c1..1fab9c1 100644
--- a/drivers/media/video/msm/csi/msm_csiphy.h
+++ b/drivers/media/video/msm/csi/msm_csiphy.h
@@ -17,6 +17,8 @@
#include <linux/io.h>
#include <media/v4l2-subdev.h>
+#define MAX_CSIPHY 3
+
struct csiphy_device {
struct platform_device *pdev;
struct v4l2_subdev subdev;
@@ -25,8 +27,11 @@
struct resource *io;
void __iomem *base;
struct mutex mutex;
+ uint32_t hw_version;
struct clk *csiphy_clk[2];
+ uint8_t ref_count;
+ uint16_t lane_mask[MAX_CSIPHY];
};
struct csiphy_cfg_params {
@@ -35,12 +40,12 @@
};
#define VIDIOC_MSM_CSIPHY_CFG \
- _IOWR('V', BASE_VIDIOC_PRIVATE + 7, struct csiphy_cfg_params)
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 7, void *)
#define VIDIOC_MSM_CSIPHY_INIT \
_IOWR('V', BASE_VIDIOC_PRIVATE + 8, struct v4l2_subdev*)
#define VIDIOC_MSM_CSIPHY_RELEASE \
- _IOWR('V', BASE_VIDIOC_PRIVATE + 9, struct v4l2_subdev*)
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 9, void *)
#endif
diff --git a/drivers/media/video/msm/gemini/msm_gemini_sync.c b/drivers/media/video/msm/gemini/msm_gemini_sync.c
index ae3de13..e878329 100644
--- a/drivers/media/video/msm/gemini/msm_gemini_sync.c
+++ b/drivers/media/video/msm/gemini/msm_gemini_sync.c
@@ -485,10 +485,12 @@
} else {
buf_p->y_buffer_addr = msm_gemini_platform_v2p(buf_cmd.fd,
buf_cmd.y_len + buf_cmd.cbcr_len, &buf_p->file,
- &buf_p->handle) + buf_cmd.offset;
+ &buf_p->handle) + buf_cmd.offset + buf_cmd.y_off;
}
buf_p->y_len = buf_cmd.y_len;
- buf_p->cbcr_buffer_addr = buf_p->y_buffer_addr + buf_cmd.y_len;
+
+ buf_p->cbcr_buffer_addr = buf_p->y_buffer_addr + buf_cmd.y_len +
+ buf_cmd.cbcr_off;
buf_p->cbcr_len = buf_cmd.cbcr_len;
buf_p->num_of_mcu_rows = buf_cmd.num_of_mcu_rows;
GMN_DBG("%s: y_addr=%x,y_len=%x,cbcr_addr=%x,cbcr_len=%x\n", __func__,
diff --git a/drivers/media/video/msm/io/msm_camera_i2c.h b/drivers/media/video/msm/io/msm_camera_i2c.h
index a0cdd77..188a176 100644
--- a/drivers/media/video/msm/io/msm_camera_i2c.h
+++ b/drivers/media/video/msm/io/msm_camera_i2c.h
@@ -53,9 +53,9 @@
struct msm_camera_i2c_reg_conf {
uint16_t reg_addr;
uint16_t reg_data;
- int16_t mask;
enum msm_camera_i2c_data_type dt;
enum msm_camera_i2c_cmd_type cmd_type;
+ int16_t mask;
};
struct msm_camera_i2c_reg_tbl {
diff --git a/drivers/media/video/msm/io/msm_io_8960.c b/drivers/media/video/msm/io/msm_io_8960.c
index f9c454a..699425a 100644
--- a/drivers/media/video/msm/io/msm_io_8960.c
+++ b/drivers/media/video/msm/io/msm_io_8960.c
@@ -87,6 +87,14 @@
} else
CDBG("%s: Bus Client NOT Registered!!!\n", __func__);
break;
+ case S_LIVESHOT:
+ if (bus_perf_client) {
+ rc = msm_bus_scale_client_update_request(
+ bus_perf_client, 5);
+ CDBG("%s: S_LIVESHOT rc = %d\n", __func__, rc);
+ } else
+ CDBG("%s: Bus Client NOT Registered!!!\n", __func__);
+ break;
case S_DEFAULT:
break;
default:
diff --git a/drivers/media/video/msm/msm_camirq_router.c b/drivers/media/video/msm/msm_camirq_router.c
index 52dd175..25a561f 100644
--- a/drivers/media/video/msm/msm_camirq_router.c
+++ b/drivers/media/video/msm/msm_camirq_router.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/of.h>
#include <mach/irqs.h>
#include <media/msm_isp.h>
#include <media/v4l2-device.h>
@@ -207,6 +208,10 @@
v4l2_set_subdevdata(&irqrouter_ctrl->subdev, irqrouter_ctrl);
irqrouter_ctrl->pdev = pdev;
+ if (pdev->dev.of_node)
+ of_property_read_u32((&pdev->dev)->of_node,
+ "cell-index", &pdev->id);
+
msm_irqrouter_send_default_irqmap(irqrouter_ctrl);
media_entity_init(&irqrouter_ctrl->subdev.entity, 0, NULL, 0);
@@ -237,16 +242,27 @@
static int __exit irqrouter_exit(struct platform_device *pdev)
{
+ struct v4l2_subdev *subdev = dev_get_drvdata(&pdev->dev);
+ struct irqrouter_ctrl_type *irqrouter_ctrl =
+ v4l2_get_subdevdata(subdev);
kfree(irqrouter_ctrl);
return 0;
}
+static const struct of_device_id msm_irqrouter_dt_match[] = {
+ {.compatible = "qcom,irqrouter"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_irqrouter_dt_match);
+
static struct platform_driver msm_irqrouter_driver = {
.probe = irqrouter_probe,
.remove = irqrouter_exit,
.driver = {
.name = MSM_IRQ_ROUTER_DRV_NAME,
.owner = THIS_MODULE,
+ .of_match_table = msm_irqrouter_dt_match,
},
};
diff --git a/drivers/media/video/msm/msm_mctl.c b/drivers/media/video/msm/msm_mctl.c
index be6c543..be4cb41 100644
--- a/drivers/media/video/msm/msm_mctl.c
+++ b/drivers/media/video/msm/msm_mctl.c
@@ -660,7 +660,8 @@
csid_init_failed:
if (p_mctl->csiphy_sdev)
if (v4l2_subdev_call(p_mctl->csiphy_sdev, core, ioctl,
- VIDIOC_MSM_CSIPHY_RELEASE, NULL) < 0)
+ VIDIOC_MSM_CSIPHY_RELEASE,
+ sinfo->sensor_platform_info->csi_lane_params) < 0)
pr_err("%s: csiphy release failed %d\n", __func__, rc);
csiphy_init_failed:
if (p_mctl->act_sdev)
@@ -714,7 +715,8 @@
if (p_mctl->csiphy_sdev) {
v4l2_subdev_call(p_mctl->csiphy_sdev, core, ioctl,
- VIDIOC_MSM_CSIPHY_RELEASE, NULL);
+ VIDIOC_MSM_CSIPHY_RELEASE,
+ sinfo->sensor_platform_info->csi_lane_params);
}
if (p_mctl->act_sdev) {
diff --git a/drivers/media/video/msm/msm_vfe31.h b/drivers/media/video/msm/msm_vfe31.h
index 1d66621..bec5f58 100644
--- a/drivers/media/video/msm/msm_vfe31.h
+++ b/drivers/media/video/msm/msm_vfe31.h
@@ -325,8 +325,8 @@
#define V31_OPERATION_CFG_LEN 32
#define V31_AXI_OUT_OFF 0x00000038
-#define V31_AXI_OUT_LEN 212
-#define V31_AXI_CH_INF_LEN 24
+#define V31_AXI_OUT_LEN 220
+#define V31_AXI_CH_INF_LEN 32
#define V31_AXI_CFG_LEN 47
#define V31_FRAME_SKIP_OFF 0x00000504
diff --git a/drivers/media/video/msm/msm_vfe32.c b/drivers/media/video/msm/msm_vfe32.c
index c6dd143..9343c21 100644
--- a/drivers/media/video/msm/msm_vfe32.c
+++ b/drivers/media/video/msm/msm_vfe32.c
@@ -956,6 +956,8 @@
vfe32_ctrl->share_ctrl->vfe_capture_count =
vfe32_ctrl->share_ctrl->outpath.out0.capture_cnt;
+ msm_camio_bus_scale_cfg(
+ pmctl->sdata->pdata->cam_bus_scale_table, S_LIVESHOT);
vfe32_ctrl->share_ctrl->liveshot_state = VFE_STATE_START_REQUESTED;
msm_camera_io_w_mb(1, vfe32_ctrl->
share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
@@ -968,6 +970,8 @@
vfe32_ctrl->share_ctrl->liveshot_state = VFE_STATE_STOP_REQUESTED;
msm_camera_io_w_mb(1,
vfe32_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+ msm_camio_bus_scale_cfg(
+ pmctl->sdata->pdata->cam_bus_scale_table, S_VIDEO);
}
static int vfe32_zsl(
@@ -3762,6 +3766,8 @@
/* spin_lock_irqsave(&ctrl->state_lock, flags); */
struct isp_msg_stats msgStats;
msgStats.frameCounter = vfe32_ctrl->share_ctrl->vfeFrameId;
+ if (vfe32_ctrl->simultaneous_sof_stat)
+ msgStats.frameCounter--;
msgStats.buffer = bufAddress;
switch (statsNum) {
case statsAeNum:{
@@ -3844,6 +3850,9 @@
uint32_t temp;
msgStats.frame_id = vfe32_ctrl->share_ctrl->vfeFrameId;
+ if (vfe32_ctrl->simultaneous_sof_stat)
+ msgStats.frame_id--;
+
msgStats.status_bits = status_bits;
msgStats.aec.buff = vfe32_ctrl->aecStatsControl.bufToRender;
@@ -4204,7 +4213,9 @@
{
unsigned long flags;
struct axi_ctrl_t *axi_ctrl = (struct axi_ctrl_t *)data;
+ struct vfe32_ctrl_type *vfe32_ctrl = axi_ctrl->share_ctrl->vfe32_ctrl;
struct vfe32_isr_queue_cmd *qcmd = NULL;
+ int stat_interrupt;
CDBG("=== axi32_do_tasklet start ===\n");
@@ -4224,11 +4235,32 @@
spin_unlock_irqrestore(&axi_ctrl->tasklet_lock,
flags);
+ if (axi_ctrl->share_ctrl->stats_comp) {
+ stat_interrupt = (qcmd->vfeInterruptStatus0 &
+ VFE_IRQ_STATUS0_STATS_COMPOSIT_MASK);
+ } else {
+ stat_interrupt =
+ (qcmd->vfeInterruptStatus0 &
+ VFE_IRQ_STATUS0_STATS_AEC) |
+ (qcmd->vfeInterruptStatus0 &
+ VFE_IRQ_STATUS0_STATS_AWB) |
+ (qcmd->vfeInterruptStatus0 &
+ VFE_IRQ_STATUS0_STATS_AF) |
+ (qcmd->vfeInterruptStatus0 &
+ VFE_IRQ_STATUS0_STATS_IHIST) |
+ (qcmd->vfeInterruptStatus0 &
+ VFE_IRQ_STATUS0_STATS_RS) |
+ (qcmd->vfeInterruptStatus0 &
+ VFE_IRQ_STATUS0_STATS_CS);
+ }
if (qcmd->vfeInterruptStatus0 &
- VFE_IRQ_STATUS0_CAMIF_SOF_MASK)
+ VFE_IRQ_STATUS0_CAMIF_SOF_MASK) {
+ if (stat_interrupt)
+ vfe32_ctrl->simultaneous_sof_stat = 1;
v4l2_subdev_notify(&axi_ctrl->subdev,
NOTIFY_VFE_IRQ,
(void *)VFE_IRQ_STATUS0_CAMIF_SOF_MASK);
+ }
/* interrupt to be processed, *qcmd has the payload. */
if (qcmd->vfeInterruptStatus0 &
@@ -4335,6 +4367,7 @@
(void *)VFE_IRQ_STATUS0_SYNC_TIMER2);
}
}
+ vfe32_ctrl->simultaneous_sof_stat = 0;
kfree(qcmd);
}
CDBG("=== axi32_do_tasklet end ===\n");
@@ -5297,6 +5330,13 @@
axi_ctrl->fs_vfe = NULL;
}
+ /* Register subdev node before requesting irq since
+ * irq_num is needed by msm_cam_server */
+ sd_info.sdev_type = VFE_DEV;
+ sd_info.sd_index = 0;
+ sd_info.irq_num = axi_ctrl->vfeirq->start;
+ msm_cam_register_subdev_node(&vfe32_ctrl->subdev, &sd_info);
+
/* Request for this device irq from the camera server. If the
* IRQ Router is present on this target, the interrupt will be
* handled by the camera server and the interrupt service
@@ -5336,10 +5376,6 @@
axi32_do_tasklet, (unsigned long)axi_ctrl);
vfe32_ctrl->pdev = pdev;
- sd_info.sdev_type = VFE_DEV;
- sd_info.sd_index = 0;
- sd_info.irq_num = axi_ctrl->vfeirq->start;
- msm_cam_register_subdev_node(&vfe32_ctrl->subdev, &sd_info);
return 0;
vfe32_no_resource:
diff --git a/drivers/media/video/msm/msm_vfe32.h b/drivers/media/video/msm/msm_vfe32.h
index 542bbf8..2c528da 100644
--- a/drivers/media/video/msm/msm_vfe32.h
+++ b/drivers/media/video/msm/msm_vfe32.h
@@ -1006,6 +1006,8 @@
uint32_t snapshot_frame_cnt;
struct msm_stats_bufq_ctrl stats_ctrl;
struct msm_stats_ops stats_ops;
+
+ uint32_t simultaneous_sof_stat;
};
#define statsAeNum 0
diff --git a/drivers/media/video/msm/sensors/ov7692_v4l2.c b/drivers/media/video/msm/sensors/ov7692_v4l2.c
index 6fc1da1..71d436e 100644
--- a/drivers/media/video/msm/sensors/ov7692_v4l2.c
+++ b/drivers/media/video/msm/sensors/ov7692_v4l2.c
@@ -173,24 +173,33 @@
};
static struct msm_camera_i2c_reg_conf ov7692_saturation[][4] = {
- {{0x81, 0x33, 0xCC}, {0xd8, 0x00, 0x00}, {0xd9, 0x00, 0x00},
- {0xd2, 0x02, 0x00},}, /* SATURATION LEVEL0*/
- {{0x81, 0x33, 0xCC}, {0xd8, 0x10, 0x00}, {0xd9, 0x10, 0x00},
- {0xd2, 0x02, 0x00},}, /* SATURATION LEVEL1*/
- {{0x81, 0x33, 0xCC}, {0xd8, 0x20, 0x00}, {0xd9, 0x20, 0x00},
- {0xd2, 0x02, 0x00},}, /* SATURATION LEVEL2*/
- {{0x81, 0x33, 0xCC}, {0xd8, 0x30, 0x00}, {0xd9, 0x30, 0x00},
- {0xd2, 0x02, 0x00},}, /* SATURATION LEVEL3*/
- {{0x81, 0x33, 0xCC}, {0xd8, 0x40, 0x00}, {0xd9, 0x40, 0x00},
- {0xd2, 0x02, 0x00},}, /* SATURATION LEVEL4*/
- {{0x81, 0x33, 0xCC}, {0xd8, 0x50, 0x00}, {0xd9, 0x50, 0x00},
- {0xd2, 0x02, 0x00},}, /* SATURATION LEVEL5*/
- {{0x81, 0x33, 0xCC}, {0xd8, 0x60, 0x00}, {0xd9, 0x60, 0x00},
- {0xd2, 0x02, 0x00},}, /* SATURATION LEVEL6*/
- {{0x81, 0x33, 0xCC}, {0xd8, 0x70, 0x00}, {0xd9, 0x70, 0x00},
- {0xd2, 0x02, 0x00},}, /* SATURATION LEVEL7*/
- {{0x81, 0x33, 0xCC}, {0xd8, 0x80, 0x00}, {0xd9, 0x80, 0x00},
- {0xd2, 0x02, 0x00},}, /* SATURATION LEVEL8*/
+ {{0x81, 0x33, 0x00, 0x00, 0xCC}, {0xd8, 0x00, 0x00, 0x00, 0x00},
+ {0xd9, 0x00, 0x00, 0x00, 0x00},
+ {0xd2, 0x02, 0x00, 0x00, 0x00},},/* SATURATION LEVEL0*/
+ {{0x81, 0x33, 0x00, 0x00, 0xCC}, {0xd8, 0x10, 0x00, 0x00, 0x00},
+ {0xd9, 0x10, 0x00, 0x00, 0x00},
+ {0xd2, 0x02, 0x00, 0x00, 0x00},}, /* SATURATION LEVEL1*/
+ {{0x81, 0x33, 0x00, 0x00, 0xCC}, {0xd8, 0x20, 0x00, 0x00, 0x00},
+ {0xd9, 0x20, 0x00, 0x00, 0x00},
+ {0xd2, 0x02, 0x00, 0x00, 0x00},}, /* SATURATION LEVEL2*/
+ {{0x81, 0x33, 0x00, 0x00, 0xCC}, {0xd8, 0x30, 0x00, 0x00, 0x00},
+ {0xd9, 0x30, 0x00, 0x00, 0x00},
+ {0xd2, 0x02, 0x00, 0x00, 0x00},}, /* SATURATION LEVEL3*/
+ {{0x81, 0x33, 0x00, 0x00, 0xCC}, {0xd8, 0x40, 0x00, 0x00, 0x00},
+ {0xd9, 0x40, 0x00, 0x00, 0x00},
+ {0xd2, 0x02, 0x00, 0x00, 0x00},}, /* SATURATION LEVEL4*/
+ {{0x81, 0x33, 0x00, 0x00, 0xCC}, {0xd8, 0x50, 0x00, 0x00, 0x00},
+ {0xd9, 0x50, 0x00, 0x00, 0x00},
+ {0xd2, 0x02, 0x00, 0x00, 0x00},}, /* SATURATION LEVEL5*/
+ {{0x81, 0x33, 0x00, 0x00, 0xCC}, {0xd8, 0x60, 0x00, 0x00, 0x00},
+ {0xd9, 0x60, 0x00, 0x00, 0x00},
+ {0xd2, 0x02, 0x00, 0x00, 0x00},}, /* SATURATION LEVEL6*/
+ {{0x81, 0x33, 0x00, 0x00, 0xCC}, {0xd8, 0x70, 0x00, 0x00, 0x00},
+ {0xd9, 0x70, 0x00, 0x00, 0x00},
+ {0xd2, 0x02, 0x00, 0x00, 0x00},}, /* SATURATION LEVEL7*/
+ {{0x81, 0x33, 0x00, 0x00, 0xCC}, {0xd8, 0x80, 0x00, 0x00, 0x00},
+ {0xd9, 0x80, 0x00, 0x00, 0x00},
+ {0xd2, 0x02, 0x00, 0x00, 0x00},}, /* SATURATION LEVEL8*/
};
static struct msm_camera_i2c_conf_array ov7692_saturation_confs[][1] = {
{{ov7692_saturation[0], ARRAY_SIZE(ov7692_saturation[0]), 0,
@@ -327,12 +336,18 @@
.data_type = MSM_CAMERA_I2C_BYTE_DATA,
};
static struct msm_camera_i2c_reg_conf ov7692_sharpness[][2] = {
- {{0xb4, 0x20, 0xDF}, {0xb6, 0x00, 0xE0},}, /* SHARPNESS LEVEL 0*/
- {{0xb4, 0x20, 0xDF}, {0xb6, 0x01, 0xE0},}, /* SHARPNESS LEVEL 1*/
- {{0xb4, 0x00, 0xDF}, {0xb6, 0x00, 0xE0},}, /* SHARPNESS LEVEL 2*/
- {{0xb4, 0x20, 0xDF}, {0xb6, 0x66, 0xE0},}, /* SHARPNESS LEVEL 3*/
- {{0xb4, 0x20, 0xDF}, {0xb6, 0x99, 0xE0},}, /* SHARPNESS LEVEL 4*/
- {{0xb4, 0x20, 0xDF}, {0xb6, 0xcc, 0xE0},}, /* SHARPNESS LEVEL 5*/
+ {{0xb4, 0x20, 0x00, 0x00, 0xDF},
+ {0xb6, 0x00, 0x00, 0x00, 0xE0},}, /* SHARPNESS LEVEL 0*/
+ {{0xb4, 0x20, 0x00, 0x00, 0xDF},
+ {0xb6, 0x01, 0x00, 0x00, 0xE0},}, /* SHARPNESS LEVEL 1*/
+ {{0xb4, 0x00, 0x00, 0x00, 0xDF},
+ {0xb6, 0x00, 0x00, 0x00, 0xE0},}, /* SHARPNESS LEVEL 2*/
+ {{0xb4, 0x20, 0x00, 0x00, 0xDF},
+ {0xb6, 0x66, 0x00, 0x00, 0xE0},}, /* SHARPNESS LEVEL 3*/
+ {{0xb4, 0x20, 0x00, 0x00, 0xDF},
+ {0xb6, 0x99, 0x00, 0x00, 0xE0},}, /* SHARPNESS LEVEL 4*/
+ {{0xb4, 0x20, 0x00, 0x00, 0xDF},
+ {0xb6, 0xcc, 0x00, 0x00, 0xE0},}, /* SHARPNESS LEVEL 5*/
};
static struct msm_camera_i2c_conf_array ov7692_sharpness_confs[][1] = {
@@ -407,13 +422,13 @@
};
static struct msm_camera_i2c_reg_conf ov7692_iso[][1] = {
- {{0x14, 0x20, 0x8F},}, /*ISO_AUTO*/
- {{0x14, 0x20, 0x8F},}, /*ISO_DEBLUR*/
- {{0x14, 0x00, 0x8F},}, /*ISO_100*/
- {{0x14, 0x10, 0x8F},}, /*ISO_200*/
- {{0x14, 0x20, 0x8F},}, /*ISO_400*/
- {{0x14, 0x30, 0x8F},}, /*ISO_800*/
- {{0x14, 0x40, 0x8F},}, /*ISO_1600*/
+ {{0x14, 0x20, 0x00, 0x00, 0x8F},}, /*ISO_AUTO*/
+ {{0x14, 0x20, 0x00, 0x00, 0x8F},}, /*ISO_DEBLUR*/
+ {{0x14, 0x00, 0x00, 0x00, 0x8F},}, /*ISO_100*/
+ {{0x14, 0x10, 0x00, 0x00, 0x8F},}, /*ISO_200*/
+ {{0x14, 0x20, 0x00, 0x00, 0x8F},}, /*ISO_400*/
+ {{0x14, 0x30, 0x00, 0x00, 0x8F},}, /*ISO_800*/
+ {{0x14, 0x40, 0x00, 0x00, 0x8F},}, /*ISO_1600*/
};
@@ -453,7 +468,7 @@
};
static struct msm_camera_i2c_reg_conf ov7692_no_effect[] = {
- {0x81, 0x00, 0xDF},
+ {0x81, 0x00, 0x00, 0x00, 0xDF},
{0x28, 0x00,},
{0xd2, 0x00,},
{0xda, 0x80,},
@@ -467,32 +482,41 @@
};
static struct msm_camera_i2c_reg_conf ov7692_special_effect[][5] = {
- {{0x81, 0x20, 0xDF}, {0x28, 0x00,}, {0xd2, 0x18,}, {0xda, 0x80,},
- {0xdb, 0x80,},}, /*for special effect OFF*/
- {{0x81, 0x20, 0xDF}, {0x28, 0x00,}, {0xd2, 0x18,}, {0xda, 0x80,},
- {0xdb, 0x80,},}, /*for special effect MONO*/
- {{0x81, 0x20, 0xDF}, {0x28, 0x80,}, {0xd2, 0x40,}, {0xda, 0x80,},
- {0xdb, 0x80,},}, /*for special efefct Negative*/
- {{-1, -1, -1}, {-1, -1, -1}, {-1, -1, -1}, {-1, -1, -1},
- {-1, -1, -1},}, /*Solarize is not supported by sensor*/
- {{0x81, 0x20, 0xDF}, {0x28, 0x00,}, {0xd2, 0x18,}, {0xda, 0x40,},
- {0xdb, 0xa0,},}, /*for sepia*/
- {{-1, -1, -1}, {-1, -1, -1}, {-1, -1, -1}, {-1, -1, -1},
- {-1, -1, -1},}, /* Posteraize not supported */
- {{-1, -1, -1}, {-1, -1, -1}, {-1, -1, -1}, {-1, -1, -1},
- {-1, -1, -1},}, /* White board not supported*/
- {{-1, -1, -1}, {-1, -1, -1}, {-1, -1, -1}, {-1, -1, -1},
- {-1, -1, -1},}, /*Blackboard not supported*/
- {{-1, -1, -1}, {-1, -1, -1}, {-1, -1, -1}, {-1, -1, -1},
- {-1, -1, -1},}, /*Aqua not supported*/
- {{-1, -1, -1}, {-1, -1, -1}, {-1, -1, -1}, {-1, -1, -1},
- {-1, -1, -1},}, /*Emboss not supported */
- {{-1, -1, -1}, {-1, -1, -1}, {-1, -1, -1}, {-1, -1, -1},
- {-1, -1, -1},}, /*sketch not supported*/
- {{-1, -1, -1}, {-1, -1, -1}, {-1, -1, -1}, {-1, -1, -1},
- {-1, -1, -1},}, /*Neon not supported*/
- {{-1, -1, -1}, {-1, -1, -1}, {-1, -1, -1}, {-1, -1, -1},
- {-1, -1, -1},}, /*MAX value*/
+ {{0x81, 0x20, 0x00, 0x00, 0xDF}, {0x28, 0x00,}, {0xd2, 0x18,},
+ {0xda, 0x80,}, {0xdb, 0x80,},}, /*for special effect OFF*/
+ {{0x81, 0x20, 0x00, 0x00, 0xDF}, {0x28, 0x00,}, {0xd2, 0x18,},
+ {0xda, 0x80,}, {0xdb, 0x80,},}, /*for special effect MONO*/
+ {{0x81, 0x20, 0x00, 0x00, 0xDF}, {0x28, 0x80,}, {0xd2, 0x40,},
+ {0xda, 0x80,}, {0xdb, 0x80,},}, /*for special efefct Negative*/
+ {{-1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1},
+ {-1, -1, -1, -1, -1},
+ {-1, -1, -1, -1, -1},},/*Solarize is not supported by sensor*/
+ {{0x81, 0x20, 0x00, 0x00, 0xDF}, {0x28, 0x00,}, {0xd2, 0x18,},
+ {0xda, 0x40,}, {0xdb, 0xa0,},}, /*for sepia*/
+ {{-1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1},
+ {-1, -1, -1, -1, -1},
+ {-1, -1, -1, -1, -1},}, /* Posteraize not supported */
+ {{-1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1},
+ {-1, -1, -1, -1, -1},
+ {-1, -1, -1, -1, -1},}, /* White board not supported*/
+ {{-1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1},
+ {-1, -1, -1, -1, -1},
+ {-1, -1, -1, -1, -1},}, /*Blackboard not supported*/
+ {{-1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1},
+ {-1, -1, -1, -1, -1},
+ {-1, -1, -1, -1, -1},}, /*Aqua not supported*/
+ {{-1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1},
+ {-1, -1, -1, -1, -1},
+ {-1, -1, -1, -1, -1},}, /*Emboss not supported */
+ {{-1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1},
+ {-1, -1, -1, -1, -1},
+ {-1, -1, -1, -1, -1},}, /*sketch not supported*/
+ {{-1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1},
+ {-1, -1, -1, -1, -1},
+ {-1, -1, -1, -1, -1},}, /*Neon not supported*/
+ {{-1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1},
+ {-1, -1, -1, -1, -1},
+ {-1, -1, -1, -1, -1},}, /*MAX value*/
};
static struct msm_camera_i2c_conf_array ov7692_special_effect_confs[][1] = {
@@ -551,9 +575,12 @@
};
static struct msm_camera_i2c_reg_conf ov7692_antibanding[][2] = {
- {{0x13, 0x20, 0xDF}, {0x14, 0x16, 0xE8},}, /*ANTIBANDING 60HZ*/
- {{0x13, 0x20, 0xDF}, {0x14, 0x17, 0xE8},}, /*ANTIBANDING 50HZ*/
- {{0x13, 0x20, 0xDF}, {0x14, 0x14, 0xE8},}, /* ANTIBANDING AUTO*/
+ {{0x13, 0x20, 0x00, 0x00, 0xDF},
+ {0x14, 0x16, 0x00, 0x00, 0xE8},}, /*ANTIBANDING 60HZ*/
+ {{0x13, 0x20, 0x00, 0x00, 0xDF},
+ {0x14, 0x17, 0x00, 0x00, 0xE8},}, /*ANTIBANDING 50HZ*/
+ {{0x13, 0x20, 0x00, 0x00, 0xDF},
+ {0x14, 0x14, 0x00, 0x00, 0xE8},}, /* ANTIBANDING AUTO*/
};
@@ -583,16 +610,16 @@
};
static struct msm_camera_i2c_reg_conf ov7692_wb_oem[][4] = {
- {{-1, -1, -1}, {-1, -1, -1}, {-1, -1, -1},
- {-1, -1, -1},},/*WHITEBALNACE OFF*/
- {{0x13, 0xf7}, {0x15, 0x00}, {-1, -1, -1},
- {-1, -1, -1},}, /*WHITEBALNACE AUTO*/
+ {{-1, -1, -1, -1 , -1}, {-1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1},
+ {-1, -1, -1, -1, -1},},/*WHITEBALNACE OFF*/
+ {{0x13, 0xf7}, {0x15, 0x00}, {-1, -1, -1, -1, -1},
+ {-1, -1, -1, -1, -1},}, /*WHITEBALNACE AUTO*/
{{0x13, 0xf5}, {0x01, 0x56}, {0x02, 0x50},
{0x15, 0x00},}, /*WHITEBALNACE CUSTOM*/
{{0x13, 0xf5}, {0x01, 0x66}, {0x02, 0x40},
{0x15, 0x00},}, /*INCANDISCENT*/
- {{-1, -1, -1}, {-1, -1, -1}, {-1, -1, -1},
- {-1, -1, -1},}, /*FLOURESECT NOT SUPPORTED */
+ {{-1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1}, {-1, -1, -1, -1, -1},
+ {-1, -1, -1, -1, -1},}, /*FLOURESECT NOT SUPPORTED */
{{0x13, 0xf5}, {0x01, 0x43}, {0x02, 0x5d},
{0x15, 0x00},}, /*DAYLIGHT*/
{{0x13, 0xf5}, {0x01, 0x48}, {0x02, 0x63},
diff --git a/drivers/media/video/msm/server/msm_cam_server.c b/drivers/media/video/msm/server/msm_cam_server.c
index 05f3c4a..2d3022f 100644
--- a/drivers/media/video/msm/server/msm_cam_server.c
+++ b/drivers/media/video/msm/server/msm_cam_server.c
@@ -1703,10 +1703,10 @@
sizeof(struct intr_table_entry));
D("%s Saving Entry %d %d %d %p",
__func__,
- ind_irq_tbl[irq_req->cam_hw_idx].irq_num,
- ind_irq_tbl[irq_req->cam_hw_idx].cam_hw_idx,
- ind_irq_tbl[irq_req->cam_hw_idx].is_composite,
- ind_irq_tbl[irq_req->cam_hw_idx].subdev_list[0]);
+ ind_irq_tbl[irq_req->irq_idx].irq_num,
+ ind_irq_tbl[irq_req->irq_idx].cam_hw_idx,
+ ind_irq_tbl[irq_req->irq_idx].is_composite,
+ ind_irq_tbl[irq_req->irq_idx].subdev_list[0]);
spin_unlock_irqrestore(&g_server_dev.intr_table_lock,
flags);
@@ -1859,10 +1859,16 @@
break;
case ISPIF_DEV:
+ if (index >= MAX_NUM_ISPIF_DEV) {
+ pr_err("%s Invalid ISPIF idx %d", __func__, index);
+ err = -EINVAL;
+ break;
+ }
+ cam_hw_idx = MSM_CAM_HW_ISPIF + index;
g_server_dev.ispif_device = sd;
if (g_server_dev.irqr_device) {
g_server_dev.subdev_table[cam_hw_idx] = sd;
- err = msm_cam_server_fill_sdev_irqnum(MSM_CAM_HW_ISPIF,
+ err = msm_cam_server_fill_sdev_irqnum(cam_hw_idx,
sd_info->irq_num);
}
break;
diff --git a/drivers/media/video/msm_vidc/msm_vdec.c b/drivers/media/video/msm_vidc/msm_vdec.c
index 09c7215..6fe196e 100644
--- a/drivers/media/video/msm_vidc/msm_vdec.c
+++ b/drivers/media/video/msm_vidc/msm_vdec.c
@@ -154,7 +154,11 @@
static u32 get_frame_size_nv12(int plane,
u32 height, u32 width)
{
- return (ALIGN(height, 32) * ALIGN(width, 32) * 3) / 2;
+ int luma_stride = ALIGN(width, 32);
+ int luma_slice = ALIGN(height, 32);
+ int chroma_stride = ALIGN(roundup(width, 2)/2, 32);
+ int chroma_slice = ALIGN(roundup(height, 2)/2, 32);
+ return (luma_stride * luma_slice) + (chroma_stride * chroma_slice) * 2;
}
static u32 get_frame_size_nv21(int plane,
u32 height, u32 width)
@@ -165,7 +169,7 @@
static u32 get_frame_size_compressed(int plane,
u32 height, u32 width)
{
- return 0x500000;
+ return height * width * 3/2;
}
static const struct msm_vidc_format vdec_formats[] = {
@@ -218,6 +222,14 @@
.type = OUTPUT_PORT,
},
{
+ .name = "VP8",
+ .description = "VP8 compressed format",
+ .fourcc = V4L2_PIX_FMT_VP8,
+ .num_planes = 1,
+ .get_frame_size = get_frame_size_compressed,
+ .type = OUTPUT_PORT,
+ },
+ {
.name = "YCrCb Semiplanar 4:2:0",
.description = "Y/CrCb 4:2:0",
.fourcc = V4L2_PIX_FMT_NV21,
@@ -607,96 +619,20 @@
unsigned long flags;
struct vb2_buf_entry *temp;
struct list_head *ptr, *next;
- struct v4l2_control control;
- struct hal_nal_stream_format_supported stream_format;
- struct hal_enable_picture enable_picture;
- struct hal_enable hal_property;
- u32 control_idx = 0;
- enum hal_property property_id = 0;
- u32 property_val = 0;
- void *pdata;
+ rc = msm_comm_try_get_bufreqs(inst);
+ if (rc) {
+ pr_err("Failed to get buffer requirements : %d\n", rc);
+ goto fail_start;
+ }
rc = msm_comm_set_scratch_buffers(inst);
if (rc) {
pr_err("Failed to set scratch buffers: %d\n", rc);
goto fail_start;
}
- for (; control_idx < NUM_CTRLS; control_idx++) {
- control.id = msm_vdec_ctrls[control_idx].id;
- rc = v4l2_g_ctrl(&inst->ctrl_handler, &control);
- if (rc) {
- pr_err("Failed to get control value for ID=%d\n",
- msm_vdec_ctrls[control_idx].id);
- } else {
- property_id = 0;
- switch (control.id) {
- case V4L2_CID_MPEG_VIDC_VIDEO_STREAM_FORMAT:
- property_id =
- HAL_PARAM_NAL_STREAM_FORMAT_SELECT;
- stream_format.nal_stream_format_supported =
- (0x00000001 << control.value);
- pdata = &stream_format;
- break;
- case V4L2_CID_MPEG_VIDC_VIDEO_OUTPUT_ORDER:
- property_id = HAL_PARAM_VDEC_OUTPUT_ORDER;
- property_val = control.value;
- pdata = &property_val;
- break;
- case V4L2_CID_MPEG_VIDC_VIDEO_ENABLE_PICTURE_TYPE:
- property_id =
- HAL_PARAM_VDEC_PICTURE_TYPE_DECODE;
- enable_picture.picture_type = control.value;
- pdata = &enable_picture;
- break;
- case V4L2_CID_MPEG_VIDC_VIDEO_KEEP_ASPECT_RATIO:
- property_id =
- HAL_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO;
- hal_property.enable = control.value;
- pdata = &hal_property;
- break;
- case V4L2_CID_MPEG_VIDC_VIDEO_POST_LOOP_DEBLOCKER_MODE:
- property_id =
- HAL_CONFIG_VDEC_POST_LOOP_DEBLOCKER;
- hal_property.enable = control.value;
- pdata = &hal_property;
- break;
- case V4L2_CID_MPEG_VIDC_VIDEO_DIVX_FORMAT:
- property_id = HAL_PARAM_DIVX_FORMAT;
- property_val = control.value;
- pdata = &property_val;
- break;
- case V4L2_CID_MPEG_VIDC_VIDEO_MB_ERROR_MAP_REPORTING:
- property_id =
- HAL_CONFIG_VDEC_MB_ERROR_MAP_REPORTING;
- hal_property.enable = control.value;
- pdata = &hal_property;
- break;
- case V4L2_CID_MPEG_VIDC_VIDEO_CONTINUE_DATA_TRANSFER:
- property_id =
- HAL_PARAM_VDEC_CONTINUE_DATA_TRANSFER;
- hal_property.enable = control.value;
- pdata = &hal_property;
- break;
- default:
- break;
- }
- if (property_id) {
- pr_err("Control: HAL property=%x,ctrl_id=%x,ctrl_value=%d\n",
- property_id,
- msm_vdec_ctrls[control_idx].id,
- control.value);
- rc = vidc_hal_session_set_property((void *)
- inst->session, property_id,
- pdata);
- }
- if (rc)
- pr_err("Failed to set hal property for framesize\n");
- }
- }
-
rc = msm_comm_try_state(inst, MSM_VIDC_START_DONE);
if (rc) {
pr_err("Failed to move inst: %p to start done state\n",
- inst);
+ inst);
goto fail_start;
}
spin_lock_irqsave(&inst->lock, flags);
@@ -812,7 +748,94 @@
static int msm_vdec_op_s_ctrl(struct v4l2_ctrl *ctrl)
{
- return 0;
+ int rc = 0;
+ struct v4l2_control control;
+ struct hal_nal_stream_format_supported stream_format;
+ struct hal_enable_picture enable_picture;
+ struct hal_enable hal_property;/*, prop;*/
+ u32 control_idx = 0;
+ enum hal_property property_id = 0;
+ u32 property_val = 0;
+ void *pdata;
+ struct msm_vidc_inst *inst = container_of(ctrl->handler,
+ struct msm_vidc_inst, ctrl_handler);
+ rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
+
+ if (rc) {
+ pr_err("Failed to move inst: %p to start done state\n",
+ inst);
+ goto failed_open_done;
+ }
+
+ control.id = ctrl->id;
+ control.value = ctrl->val;
+
+ switch (control.id) {
+ case V4L2_CID_MPEG_VIDC_VIDEO_STREAM_FORMAT:
+ property_id =
+ HAL_PARAM_NAL_STREAM_FORMAT_SELECT;
+ stream_format.nal_stream_format_supported =
+ (0x00000001 << control.value);
+ pdata = &stream_format;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_OUTPUT_ORDER:
+ property_id = HAL_PARAM_VDEC_OUTPUT_ORDER;
+ property_val = control.value;
+ pdata = &property_val;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_ENABLE_PICTURE_TYPE:
+ property_id =
+ HAL_PARAM_VDEC_PICTURE_TYPE_DECODE;
+ enable_picture.picture_type = control.value;
+ pdata = &enable_picture;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_KEEP_ASPECT_RATIO:
+ property_id =
+ HAL_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO;
+ hal_property.enable = control.value;
+ pdata = &hal_property;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_POST_LOOP_DEBLOCKER_MODE:
+ property_id =
+ HAL_CONFIG_VDEC_POST_LOOP_DEBLOCKER;
+ hal_property.enable = control.value;
+ pdata = &hal_property;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_DIVX_FORMAT:
+ property_id = HAL_PARAM_DIVX_FORMAT;
+ property_val = control.value;
+ pdata = &property_val;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_MB_ERROR_MAP_REPORTING:
+ property_id =
+ HAL_CONFIG_VDEC_MB_ERROR_MAP_REPORTING;
+ hal_property.enable = control.value;
+ pdata = &hal_property;
+ break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_CONTINUE_DATA_TRANSFER:
+ property_id =
+ HAL_PARAM_VDEC_CONTINUE_DATA_TRANSFER;
+ hal_property.enable = control.value;
+ pdata = &hal_property;
+ break;
+ default:
+ break;
+ }
+ if (property_id) {
+ pr_debug("Control: HAL property=%d,ctrl_id=%d,ctrl_value=%d\n",
+ property_id,
+ msm_vdec_ctrls[control_idx].id,
+ control.value);
+ rc = vidc_hal_session_set_property((void *)
+ inst->session, property_id,
+ pdata);
+ }
+ if (rc)
+ pr_err("Failed to set hal property for framesize\n");
+
+failed_open_done:
+
+ return rc;
}
static int msm_vdec_op_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
{
diff --git a/drivers/media/video/msm_vidc/msm_venc.c b/drivers/media/video/msm_vidc/msm_venc.c
index e835aaa..e3833ce 100644
--- a/drivers/media/video/msm_vidc/msm_venc.c
+++ b/drivers/media/video/msm_vidc/msm_venc.c
@@ -29,6 +29,7 @@
#define MIN_FRAME_RATE 1
#define MAX_FRAME_RATE 120
#define DEFAULT_FRAME_RATE 30
+#define DEFAULT_IR_MBS 30
#define MAX_SLICE_BYTE_SIZE 1024
#define MIN_SLICE_BYTE_SIZE 1024
#define MAX_SLICE_MB_SIZE 300
@@ -417,6 +418,9 @@
static struct hal_multi_slice_control
venc_multi_slice_control = {HAL_MULTI_SLICE_OFF ,
0};
+static struct hal_intra_refresh
+ venc_intra_refresh = {HAL_INTRA_REFRESH_NONE ,
+ DEFAULT_IR_MBS, DEFAULT_IR_MBS, DEFAULT_IR_MBS};
static const struct msm_vidc_format venc_formats[] = {
{
@@ -452,6 +456,14 @@
.type = CAPTURE_PORT,
},
{
+ .name = "VP8",
+ .description = "VP8 compressed format",
+ .fourcc = V4L2_PIX_FMT_VP8,
+ .num_planes = 1,
+ .get_frame_size = get_frame_size_compressed,
+ .type = CAPTURE_PORT,
+ },
+ {
.name = "YCrCb Semiplanar 4:2:0",
.description = "Y/CrCb 4:2:0",
.fourcc = V4L2_PIX_FMT_NV21,
@@ -948,24 +960,40 @@
property_id =
HAL_PARAM_VENC_INTRA_REFRESH;
intra_refresh.mode = control.value;
+ intra_refresh.air_mbs = venc_intra_refresh.air_mbs;
+ intra_refresh.air_ref = venc_intra_refresh.air_ref;
+ intra_refresh.cir_mbs = venc_intra_refresh.cir_mbs;
+ venc_intra_refresh.mode = intra_refresh.mode;
pdata = &intra_refresh;
break;
case V4L2_CID_MPEG_VIDC_VIDEO_AIR_MBS:
property_id =
HAL_PARAM_VENC_INTRA_REFRESH;
intra_refresh.air_mbs = control.value;
+ intra_refresh.mode = venc_intra_refresh.mode;
+ intra_refresh.air_ref = venc_intra_refresh.air_ref;
+ intra_refresh.cir_mbs = venc_intra_refresh.cir_mbs;
+ venc_intra_refresh.air_mbs = control.value;
pdata = &intra_refresh;
break;
case V4L2_CID_MPEG_VIDC_VIDEO_AIR_REF:
property_id =
HAL_PARAM_VENC_INTRA_REFRESH;
intra_refresh.air_ref = control.value;
+ intra_refresh.air_mbs = venc_intra_refresh.air_mbs;
+ intra_refresh.mode = venc_intra_refresh.mode;
+ intra_refresh.cir_mbs = venc_intra_refresh.cir_mbs;
+ venc_intra_refresh.air_ref = control.value;
pdata = &intra_refresh;
break;
case V4L2_CID_MPEG_VIDC_VIDEO_CIR_MBS:
property_id =
HAL_PARAM_VENC_INTRA_REFRESH;
intra_refresh.cir_mbs = control.value;
+ intra_refresh.air_mbs = venc_intra_refresh.air_mbs;
+ intra_refresh.air_ref = venc_intra_refresh.air_ref;
+ intra_refresh.mode = venc_intra_refresh.mode;
+ venc_intra_refresh.cir_mbs = control.value;
pdata = &intra_refresh;
break;
case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
diff --git a/drivers/media/video/msm_vidc/msm_vidc.c b/drivers/media/video/msm_vidc/msm_vidc.c
index 11fbcf4..28fe2b8 100644
--- a/drivers/media/video/msm_vidc/msm_vidc.c
+++ b/drivers/media/video/msm_vidc/msm_vidc.c
@@ -29,19 +29,16 @@
struct vb2_buffer *out_vb = NULL;
struct vb2_buffer *cap_vb = NULL;
unsigned long flags;
- poll_wait(filp, &inst->event_handler.wait, wait);
- if (v4l2_event_pending(&inst->event_handler))
- return POLLPRI;
if (!outq->streaming && !capq->streaming) {
pr_err("Returning POLLERR from here: %d, %d\n",
outq->streaming, capq->streaming);
return POLLERR;
}
poll_wait(filp, &inst->event_handler.wait, wait);
- if (v4l2_event_pending(&inst->event_handler))
- return POLLPRI;
poll_wait(filp, &capq->done_wq, wait);
poll_wait(filp, &outq->done_wq, wait);
+ if (v4l2_event_pending(&inst->event_handler))
+ rc |= POLLPRI;
spin_lock_irqsave(&capq->done_lock, flags);
if (!list_empty(&capq->done_list))
cap_vb = list_first_entry(&capq->done_list, struct vb2_buffer,
diff --git a/drivers/media/video/msm_vidc/msm_vidc_common.c b/drivers/media/video/msm_vidc/msm_vidc_common.c
index a5cff9c..261879d 100644
--- a/drivers/media/video/msm_vidc/msm_vidc_common.c
+++ b/drivers/media/video/msm_vidc/msm_vidc_common.c
@@ -587,6 +587,9 @@
case V4L2_PIX_FMT_VC1_ANNEX_L:
codec = HAL_VIDEO_CODEC_VC1;
break;
+ case V4L2_PIX_FMT_VP8:
+ codec = HAL_VIDEO_CODEC_VP8;
+ break;
case V4L2_PIX_FMT_DIVX_311:
codec = HAL_VIDEO_CODEC_DIVX_311;
break;
@@ -596,8 +599,7 @@
/*HAL_VIDEO_CODEC_MVC
HAL_VIDEO_CODEC_SPARK
HAL_VIDEO_CODEC_VP6
- HAL_VIDEO_CODEC_VP7
- HAL_VIDEO_CODEC_VP8*/
+ HAL_VIDEO_CODEC_VP7*/
default:
pr_err("Wrong codec: %d\n", fourcc);
codec = HAL_UNUSED_CODEC;
@@ -933,12 +935,56 @@
{
int rc = 0;
struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
+ bool ip_flush = false,
+ op_flush = false;
+
mutex_lock(&inst->sync_lock);
- if (dec->cmd != V4L2_DEC_CMD_STOP)
- return -EINVAL;
- rc = vidc_hal_session_flush((void *)inst->session, HAL_FLUSH_OUTPUT);
+
+ switch (dec->cmd) {
+ case V4L2_DEC_QCOM_CMD_FLUSH:
+ ip_flush = dec->flags & V4L2_DEC_QCOM_CMD_FLUSH_OUTPUT;
+ op_flush = dec->flags & V4L2_DEC_QCOM_CMD_FLUSH_CAPTURE;
+ /* Only support flush on decoder (for now)*/
+ if (inst->session_type == MSM_VIDC_ENCODER) {
+ pr_err("Buffer flushing not supported for encoder\n");
+ rc = -ENOTSUPP;
+ break;
+ }
+
+ /* Certain types of flushes aren't supported such as: */
+ /* 1) Input only flush */
+ if (ip_flush && !op_flush) {
+ pr_err("Input only flush not supported\n");
+ rc = -ENOTSUPP;
+ break;
+ }
+
+ /* 2) Output only flush when in reconfig */
+ if (!ip_flush && op_flush && !inst->in_reconfig) {
+ pr_err("Output only flush only supported when reconfiguring\n");
+ rc = -ENOTSUPP;
+ break;
+ }
+
+ /* Finally flush */
+ if (op_flush && ip_flush)
+ rc = vidc_hal_session_flush(inst->session,
+ HAL_FLUSH_ALL);
+ else if (ip_flush)
+ rc = vidc_hal_session_flush(inst->session,
+ HAL_FLUSH_INPUT);
+ else if (op_flush)
+ rc = vidc_hal_session_flush(inst->session,
+ HAL_FLUSH_OUTPUT);
+
+ break;
+ default:
+ rc = -ENOTSUPP;
+ goto exit;
+ }
+
if (rc) {
- pr_err("Failed to get property\n");
+ pr_err("Failed to exec decoder cmd %d\n", dec->cmd);
goto exit;
}
exit:
diff --git a/drivers/media/video/msm_vidc/vidc_hal.c b/drivers/media/video/msm_vidc/vidc_hal.c
index 85e984d..11b1ed0 100644
--- a/drivers/media/video/msm_vidc/vidc_hal.c
+++ b/drivers/media/video/msm_vidc/vidc_hal.c
@@ -665,6 +665,8 @@
}
write_register(dev->hal_data->register_base_addr,
VIDC_CPU_CS_SCIACMDARG3, 0, 0);
+ disable_irq_nosync(dev->hal_data->irq);
+ vidc_hal_interface_queues_release(dev);
HAL_MSG_INFO("\nHAL exited\n");
return 0;
}
@@ -1272,18 +1274,6 @@
sizeof(struct hfi_h264_db_control);
break;
}
- case HAL_PARAM_VENC_TEMPORAL_SPATIAL_TRADEOFF:
- {
- struct hfi_temporal_spatial_tradeoff *hfi;
- pkt->rg_property_data[0] =
- HFI_PROPERTY_PARAM_VENC_TEMPORAL_SPATIAL_TRADEOFF;
- hfi = (struct hfi_temporal_spatial_tradeoff *)
- &pkt->rg_property_data[1];
- hfi->ts_factor = ((struct hfi_temporal_spatial_tradeoff *)
- pdata)->ts_factor;
- pkt->size += sizeof(u32) * 2;
- break;
- }
case HAL_PARAM_VENC_SESSION_QP:
{
struct hfi_quantization *hfi;
@@ -1505,8 +1495,6 @@
break;
case HAL_PARAM_VENC_H264_DEBLOCK_CONTROL:
break;
- case HAL_PARAM_VENC_TEMPORAL_SPATIAL_TRADEOFF:
- break;
case HAL_PARAM_VENC_SESSION_QP:
break;
case HAL_CONFIG_VENC_INTRA_PERIOD:
diff --git a/drivers/media/video/msm_vidc/vidc_hal.h b/drivers/media/video/msm_vidc/vidc_hal.h
index a36d7f3..6c7e5df 100644
--- a/drivers/media/video/msm_vidc/vidc_hal.h
+++ b/drivers/media/video/msm_vidc/vidc_hal.h
@@ -155,6 +155,7 @@
#define HFI_EXTRADATA_VC1_SEQDISP 0x00000004
#define HFI_EXTRADATA_TIMESTAMP 0x00000005
#define HFI_EXTRADATA_S3D_FRAME_PACKING 0x00000006
+#define HFI_EXTRADATA_EOSNAL_DETECTED 0x00000007
#define HFI_EXTRADATA_MULTISLICE_INFO 0x7F100000
#define HFI_EXTRADATA_NUM_CONCEALED_MB 0x7F100001
#define HFI_EXTRADATA_INDEX 0x7F100002
@@ -164,6 +165,11 @@
#define HFI_INDEX_EXTRADATA_DIGITAL_ZOOM 0x07000010
#define HFI_INDEX_EXTRADATA_ASPECT_RATIO 0x7F100003
+struct HFI_INDEX_EXTRADATA_CONFIG_TYPE {
+ int enable;
+ u32 index_extra_data_id;
+};
+
struct hfi_extradata_header {
u32 size;
u32 version;
@@ -196,7 +202,7 @@
(HFI_PROPERTY_PARAM_OX_START + 0x004)
#define HFI_PROPERTY_PARAM_EXTRA_DATA_HEADER_CONFIG \
(HFI_PROPERTY_PARAM_OX_START + 0x005)
-#define HFI_PROPERTY_PARAM_MAX_SEQUENCE_HEADER_SIZE \
+#define HFI_PROPERTY_PARAM_INDEX_EXTRADATA \
(HFI_PROPERTY_PARAM_OX_START + 0x006)
#define HFI_PROPERTY_PARAM_DIVX_FORMAT \
(HFI_PROPERTY_PARAM_OX_START + 0x007)
@@ -244,6 +250,10 @@
#define HFI_PROPERTY_PARAM_VENC_OX_START \
(HFI_DOMAIN_BASE_VENC + HFI_ARCH_OX_OFFSET + 0x5000)
+#define HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_INFO \
+ (HFI_PROPERTY_PARAM_VENC_OX_START + 0x001)
+#define HFI_PROPERTY_PARAM_VENC_H264_IDR_S3D_FRAME_PACKING_NAL \
+ (HFI_PROPERTY_PARAM_VENC_OX_START + 0x002)
#define HFI_PROPERTY_CONFIG_VENC_OX_START \
(HFI_DOMAIN_BASE_VENC + HFI_ARCH_OX_OFFSET + 0x6000)
@@ -285,10 +295,6 @@
u8 rg_data[1];
};
-struct hfi_seq_header_info {
- u32 max_header_len;
-};
-
struct hfi_enable_picture {
u32 picture_type;
};
@@ -861,6 +867,14 @@
int dev_count;
};
+struct hfi_index_extradata_aspect_ratio_payload {
+ u32 size;
+ u32 version;
+ u32 port_index;
+ u32 saspect_width;
+ u32 saspect_height;
+};
+
extern struct hal_device_data hal_ctxt;
int vidc_hal_iface_msgq_read(struct hal_device *device, void *pkt);
diff --git a/drivers/media/video/msm_vidc/vidc_hal_helper.h b/drivers/media/video/msm_vidc/vidc_hal_helper.h
index d4e2619..43995eb 100644
--- a/drivers/media/video/msm_vidc/vidc_hal_helper.h
+++ b/drivers/media/video/msm_vidc/vidc_hal_helper.h
@@ -68,8 +68,8 @@
#define HFI_VIDEO_DOMAIN_ENCODER (HFI_COMMON_BASE + 0x1)
#define HFI_VIDEO_DOMAIN_DECODER (HFI_COMMON_BASE + 0x2)
-#define HFI_VIDEO_DOMAIN_VPE (HFI_COMMON_BASE + 0x3)
-#define HFI_VIDEO_DOMAIN_MBI (HFI_COMMON_BASE + 0x4)
+#define HFI_VIDEO_DOMAIN_VPE (HFI_COMMON_BASE + 0x4)
+#define HFI_VIDEO_DOMAIN_MBI (HFI_COMMON_BASE + 0x8)
#define HFI_DOMAIN_BASE_COMMON (HFI_COMMON_BASE + 0)
#define HFI_DOMAIN_BASE_VDEC (HFI_COMMON_BASE + 0x01000000)
@@ -131,6 +131,7 @@
#define HFI_H264_PROFILE_STEREO_HIGH 0x00000008
#define HFI_H264_PROFILE_MULTIVIEW_HIGH 0x00000010
#define HFI_H264_PROFILE_CONSTRAINED_HIGH 0x00000020
+#define HFI_H264_PROFILE_CONSTRAINED_BASE 0x00000040
#define HFI_H264_LEVEL_1 0x00000001
#define HFI_H264_LEVEL_1b 0x00000002
@@ -261,6 +262,10 @@
(HFI_PROPERTY_PARAM_COMMON_START + 0x00B)
#define HFI_PROPERTY_PARAM_MULTI_VIEW_FORMAT \
(HFI_PROPERTY_PARAM_COMMON_START + 0x00C)
+#define HFI_PROPERTY_PARAM_MAX_SEQUENCE_HEADER_SIZE \
+ (HFI_PROPERTY_PARAM_COMMON_START + 0x00D)
+#define HFI_PROPERTY_PARAM_CODEC_MASK_SUPPORTED \
+ (HFI_PROPERTY_PARAM_COMMON_START + 0x00E)
#define HFI_PROPERTY_CONFIG_COMMON_START \
(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x2000)
@@ -271,6 +276,8 @@
(HFI_DOMAIN_BASE_VDEC + HFI_ARCH_COMMON_OFFSET + 0x3000)
#define HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM \
(HFI_PROPERTY_PARAM_VDEC_COMMON_START + 0x001)
+#define HFI_PROPERTY_PARAM_VDEC_CONCEAL_COLOR \
+ (HFI_PROPERTY_PARAM_VDEC_COMMON_START + 0x002)
#define HFI_PROPERTY_CONFIG_VDEC_COMMON_START \
(HFI_DOMAIN_BASE_VDEC + HFI_ARCH_COMMON_OFFSET + 0x4000)
@@ -285,15 +292,13 @@
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x003)
#define HFI_PROPERTY_PARAM_VENC_RATE_CONTROL \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x004)
-#define HFI_PROPERTY_PARAM_VENC_TEMPORAL_SPATIAL_TRADEOFF \
+#define HFI_PROPERTY_PARAM_VENC_H264_PICORDER_CNT_TYPE \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x005)
-#define HFI_PROPERTY_PARAM_VENC_QUALITY_VS_SPEED \
- (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x010)
#define HFI_PROPERTY_PARAM_VENC_SESSION_QP \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x006)
#define HFI_PROPERTY_PARAM_VENC_MPEG4_AC_PREDICTION \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x007)
-#define HFI_PROPERTY_PARAM_VENC_MPEG4_DATA_PARTITIONING \
+#define HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x008)
#define HFI_PROPERTY_PARAM_VENC_MPEG4_TIME_RESOLUTION \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x009)
@@ -301,22 +306,26 @@
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00A)
#define HFI_PROPERTY_PARAM_VENC_MPEG4_HEADER_EXTENSION \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00B)
-#define HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_INFO \
+#define HFI_PROPERTY_PARAM_VENC_OPEN_GOP \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00C)
#define HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00D)
#define HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_CONTROL \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00E)
-#define HFI_PROPERTY_PARAM_VENC_VBVBUFFER_SIZE \
+#define HFI_PROPERTY_PARAM_VENC_VBV_HRD_BUF_SIZE \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00F)
+#define HFI_PROPERTY_PARAM_VENC_QUALITY_VS_SPEED \
+ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x010)
#define HFI_PROPERTY_PARAM_VENC_MPEG4_QPEL \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x011)
#define HFI_PROPERTY_PARAM_VENC_ADVANCED \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x012)
#define HFI_PROPERTY_PARAM_VENC_SYNC_FRAME_SEQUENCE_HEADER \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x013)
-#define HFI_PROPERTY_PARAM_VENC_H264_IDR_S3D_FRAME_PACKING_NAL \
+#define HFI_PROPERTY_PARAM_VENC_H264_SPS_ID \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x014)
+#define HFI_PROPERTY_PARAM_VENC_H264_PPS_ID \
+ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x015)
#define HFI_PROPERTY_CONFIG_VENC_COMMON_START \
(HFI_DOMAIN_BASE_VENC + HFI_ARCH_COMMON_OFFSET + 0x6000)
@@ -328,7 +337,7 @@
(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x003)
#define HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME \
(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x004)
-#define HFI_PROPERTY_CONFIG_VENC_TIMESTAMP_SCALE \
+#define HFI_PROPERTY_CONFIG_VENC_SLICE_SIZE \
(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x005)
#define HFI_PROPERTY_CONFIG_VENC_FRAME_QP \
(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x006)
@@ -357,6 +366,8 @@
#define HFI_CAPABILITY_SCALE_X (HFI_COMMON_BASE + 0x6)
#define HFI_CAPABILITY_SCALE_Y (HFI_COMMON_BASE + 0x7)
#define HFI_CAPABILITY_BITRATE (HFI_COMMON_BASE + 0x8)
+#define HFI_CAPABILITY_BFRAME (HFI_COMMON_BASE + 0x9)
+#define HFI_CAPABILITY_HIERARCHICAL_P_LAYERS (HFI_COMMON_BASE + 0x10)
struct hfi_capability_supported {
u32 capability_type;
@@ -433,10 +444,6 @@
u32 bframes;
};
-struct hfi_timestamp_scale {
- u32 time_stamp_scale;
-};
-
struct hfi_mpeg4_header_extension {
u32 header_extension;
};
@@ -492,6 +499,10 @@
struct hfi_profile_level rg_profile_level[1];
};
+struct hfi_quality_vs_speed {
+ u32 quality_vs_speed;
+};
+
struct hfi_quantization {
u32 qp_i;
u32 qp_p;
@@ -499,8 +510,10 @@
u32 layer_id;
};
-struct hfi_temporal_spatial_tradeoff {
- u32 ts_factor;
+struct hfi_quantization_range {
+ u32 min_qp;
+ u32 max_qp;
+ u32 layer_id;
};
struct hfi_frame_size {
@@ -605,6 +618,8 @@
u8 pipe2d;
u8 hw_mode;
u8 low_delay_enforce;
+ u8 worker_vppsg_delay;
+ int close_gop;
int h264_constrain_intra_pred;
int h264_transform_8x8_flag;
int mpeg4_qpel_enable;
@@ -613,6 +628,9 @@
u8 vpp_info_packet_mode;
u8 ref_tile_mode;
u8 bitstream_flush_mode;
+ u32 vppsg_vspap_fb_sync_delay;
+ u32 rc_initial_delay;
+ u32 peak_bitrate_constraint;
u32 ds_display_frame_width;
u32 ds_display_frame_height;
u32 perf_tune_param_ptr;
@@ -624,6 +642,19 @@
u32 h264_num_ref_frames;
};
+struct hfi_vbv_hrd_bufsize {
+ u32 buffer_size;
+};
+
+struct hfi_codec_mask_supported {
+ u32 codecs;
+ u32 video_domains;
+};
+
+struct hfi_seq_header_info {
+ u32 max_hader_len;
+};
+
#define HFI_CMD_SYS_COMMON_START \
(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x0000)
#define HFI_CMD_SYS_INIT (HFI_CMD_SYS_COMMON_START + 0x001)
diff --git a/drivers/media/video/vcap_v4l2.c b/drivers/media/video/vcap_v4l2.c
index 0a033ae..98e2fd9 100644
--- a/drivers/media/video/vcap_v4l2.c
+++ b/drivers/media/video/vcap_v4l2.c
@@ -303,6 +303,105 @@
cd->set_vp_o = false;
}
+/* VCAP Internal QBUF and DQBUF for VC + VP */
+int vcvp_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
+{
+ struct vb2_buffer *vb;
+
+ if (q->fileio) {
+ dprintk(1, "%s: file io in progress\n", __func__);
+ return -EBUSY;
+ }
+
+ if (b->type != q->type) {
+ dprintk(1, "%s: invalid buffer type\n", __func__);
+ return -EINVAL;
+ }
+
+ if (b->index >= q->num_buffers) {
+ dprintk(1, "%s: buffer index out of range\n", __func__);
+ return -EINVAL;
+ }
+
+ vb = q->bufs[b->index];
+ if (NULL == vb) {
+ dprintk(1, "%s: buffer is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (b->memory != q->memory) {
+ dprintk(1, "%s: invalid memory type\n", __func__);
+ return -EINVAL;
+ }
+
+ if (vb->state != VB2_BUF_STATE_DEQUEUED &&
+ vb->state != VB2_BUF_STATE_PREPARED) {
+ dprintk(1, "%s: buffer already in use\n", __func__);
+ return -EINVAL;
+ }
+
+ list_add_tail(&vb->queued_entry, &q->queued_list);
+ vb->state = VB2_BUF_STATE_QUEUED;
+
+ if (q->streaming) {
+ vb->state = VB2_BUF_STATE_ACTIVE;
+ atomic_inc(&q->queued_count);
+ q->ops->buf_queue(vb);
+ }
+ return 0;
+}
+
+int vcvp_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b)
+{
+ struct vb2_buffer *vb = NULL;
+ unsigned long flags;
+
+ if (q->fileio) {
+ dprintk(1, "%s: file io in progress\n", __func__);
+ return -EBUSY;
+ }
+
+ if (b->type != q->type) {
+ dprintk(1, "%s: invalid buffer type\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!q->streaming) {
+ dprintk(1, "Streaming off, will not wait for buffers\n");
+ return -EINVAL;
+ }
+
+ if (!list_empty(&q->done_list)) {
+ spin_lock_irqsave(&q->done_lock, flags);
+ vb = list_first_entry(&q->done_list, struct vb2_buffer,
+ done_entry);
+ list_del(&vb->done_entry);
+ spin_unlock_irqrestore(&q->done_lock, flags);
+
+ switch (vb->state) {
+ case VB2_BUF_STATE_DONE:
+ dprintk(3, "%s: Returning done buffer\n", __func__);
+ break;
+ case VB2_BUF_STATE_ERROR:
+ dprintk(3, "%s: Ret done buf with err\n", __func__);
+ break;
+ default:
+ dprintk(1, "%s: Invalid buffer state\n", __func__);
+ return -EINVAL;
+ }
+
+ memcpy(b, &vb->v4l2_buf, offsetof(struct v4l2_buffer, m));
+
+ list_del(&vb->queued_entry);
+
+ vb->state = VB2_BUF_STATE_DEQUEUED;
+ return 0;
+ }
+
+ dprintk(1, "No buffers to dequeue\n");
+ return -EAGAIN;
+}
+
int get_phys_addr(struct vcap_dev *dev, struct vb2_queue *q,
struct v4l2_buffer *b)
{
@@ -889,6 +988,14 @@
dprintk(1, "qbuf: buffer already in use\n");
return -EINVAL;
}
+ rc = get_phys_addr(c_data->dev, &c_data->vc_vidq, p);
+ if (rc < 0)
+ return rc;
+ rc = vcvp_qbuf(&c_data->vc_vidq, p);
+ if (rc < 0)
+ free_ion_handle(c_data->dev,
+ &c_data->vc_vidq, p);
+ return rc;
}
rc = get_phys_addr(c_data->dev, &c_data->vc_vidq, p);
if (rc < 0)
@@ -1009,6 +1116,7 @@
unsigned long flags;
int rc;
unsigned long rate;
+ long rate_rc;
dprintk(3, "In Stream ON\n");
if (determine_mode(c_data) != c_data->op_mode) {
@@ -1035,12 +1143,13 @@
}
rate = c_data->vc_format.clk_freq;
- rate = clk_round_rate(dev->vcap_clk, rate);
- if (rate <= 0) {
+ rate_rc = clk_round_rate(dev->vcap_clk, rate);
+ if (rate_rc <= 0) {
pr_err("%s: Failed core rnd_rate\n", __func__);
rc = -EINVAL;
goto free_res;
}
+ rate = (unsigned long)rate_rc;
rc = clk_set_rate(dev->vcap_clk, rate);
if (rc < 0)
goto free_res;
@@ -1064,6 +1173,7 @@
goto free_res;
config_vc_format(c_data);
+ c_data->streaming = 1;
rc = vb2_streamon(&c_data->vc_vidq, i);
if (rc < 0)
goto free_res;
@@ -1080,12 +1190,13 @@
c_data->dev->vp_client = c_data;
rate = 160000000;
- rate = clk_round_rate(dev->vcap_clk, rate);
- if (rate <= 0) {
+ rate_rc = clk_round_rate(dev->vcap_clk, rate);
+ if (rate_rc <= 0) {
pr_err("%s: Failed core rnd_rate\n", __func__);
rc = -EINVAL;
goto free_res;
}
+ rate = (unsigned long)rate_rc;
rc = clk_set_rate(dev->vcap_clk, rate);
if (rc < 0)
goto free_res;
@@ -1148,12 +1259,13 @@
}
rate = c_data->vc_format.clk_freq;
- rate = clk_round_rate(dev->vcap_clk, rate);
- if (rate <= 0) {
+ rate_rc = clk_round_rate(dev->vcap_clk, rate);
+ if (rate_rc <= 0) {
pr_err("%s: Failed core rnd_rate\n", __func__);
rc = -EINVAL;
goto free_res;
}
+ rate = (unsigned long)rate_rc;
rc = clk_set_rate(dev->vcap_clk, rate);
if (rc < 0)
goto free_res;
@@ -1266,13 +1378,11 @@
return 0;
}
-static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
+int streamoff_work(struct vcap_client_data *c_data)
{
- struct vcap_client_data *c_data = to_client_data(file->private_data);
struct vcap_dev *dev = c_data->dev;
unsigned long flags;
int rc;
-
switch (c_data->op_mode) {
case VC_VCAP_OP:
if (c_data != dev->vc_client) {
@@ -1288,9 +1398,12 @@
}
dev->vc_resource = 0;
spin_unlock_irqrestore(&dev->dev_slock, flags);
- rc = vb2_streamoff(&c_data->vc_vidq, i);
- if (rc >= 0)
+ rc = vb2_streamoff(&c_data->vc_vidq,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ if (rc >= 0) {
+ c_data->streaming = 0;
atomic_set(&c_data->dev->vc_enabled, 0);
+ }
return rc;
case VP_VCAP_OP:
if (c_data != dev->vp_client) {
@@ -1383,7 +1496,12 @@
pr_err("VCAP Error: %s: Unknown Operation mode", __func__);
return -ENOTRECOVERABLE;
}
- return 0;
+}
+
+static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+ struct vcap_client_data *c_data = to_client_data(file->private_data);
+ return streamoff_work(c_data);
}
static int vidioc_subscribe_event(struct v4l2_fh *fh,
@@ -1539,6 +1657,9 @@
if (c_data == NULL)
return 0;
+ if (c_data->streaming)
+ streamoff_work(c_data);
+
spin_lock_irqsave(&dev->dev_slock, flags);
atomic_dec(&dev->open_clients);
ret = atomic_read(&dev->open_clients);
diff --git a/drivers/media/video/vcap_vc.c b/drivers/media/video/vcap_vc.c
index ad0718e..62cc306 100644
--- a/drivers/media/video/vcap_vc.c
+++ b/drivers/media/video/vcap_vc.c
@@ -62,19 +62,19 @@
struct vcap_buffer *buf_vp;
int rc;
- p.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
p.memory = V4L2_MEMORY_USERPTR;
while (1) {
+ p.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (!vp_work->cd->streaming)
return;
- rc = vb2_dqbuf(&vp_work->cd->vc_vidq, &p, O_NONBLOCK);
+ rc = vcvp_dqbuf(&vp_work->cd->vc_vidq, &p);
if (rc < 0)
return;
vb_vc = vp_work->cd->vc_vidq.bufs[p.index];
if (NULL == vb_vc) {
dprintk(1, "%s: buffer is NULL\n", __func__);
- vb2_qbuf(&vp_work->cd->vc_vidq, &p);
+ vcvp_qbuf(&vp_work->cd->vc_vidq, &p);
return;
}
buf_vc = container_of(vb_vc, struct vcap_buffer, vb);
@@ -82,7 +82,7 @@
vb_vp = vp_work->cd->vp_in_vidq.bufs[p.index];
if (NULL == vb_vp) {
dprintk(1, "%s: buffer is NULL\n", __func__);
- vb2_qbuf(&vp_work->cd->vc_vidq, &p);
+ vcvp_qbuf(&vp_work->cd->vc_vidq, &p);
return;
}
buf_vp = container_of(vb_vp, struct vcap_buffer, vb);
@@ -94,7 +94,7 @@
p.type = V4L2_BUF_TYPE_INTERLACED_IN_DECODER;
/* This call should not fail */
- rc = vb2_qbuf(&vp_work->cd->vp_in_vidq, &p);
+ rc = vcvp_qbuf(&vp_work->cd->vp_in_vidq, &p);
if (rc < 0) {
pr_err("%s: qbuf to vp_in failed\n", __func__);
buf_vc->ion_handle = buf_vp->ion_handle;
@@ -102,7 +102,7 @@
buf_vp->ion_handle = NULL;
buf_vp->paddr = 0;
p.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- vb2_qbuf(&vp_work->cd->vc_vidq, &p);
+ vcvp_qbuf(&vp_work->cd->vc_vidq, &p);
}
}
}
diff --git a/drivers/media/video/vcap_vc.h b/drivers/media/video/vcap_vc.h
index 57d13cd..792fb14 100644
--- a/drivers/media/video/vcap_vc.h
+++ b/drivers/media/video/vcap_vc.h
@@ -69,11 +69,6 @@
#define VC_BUFFER_WRITTEN (0x3 << 1)
-struct vc_reg_data {
- unsigned data;
- unsigned addr;
-};
-
int vc_start_capture(struct vcap_client_data *c_data);
int vc_hw_kick_off(struct vcap_client_data *c_data);
void vc_stop_capture(struct vcap_client_data *c_data);
diff --git a/drivers/media/video/vcap_vp.c b/drivers/media/video/vcap_vp.c
index 1b503ba..be1b4ff 100644
--- a/drivers/media/video/vcap_vp.c
+++ b/drivers/media/video/vcap_vp.c
@@ -117,21 +117,21 @@
struct vcap_buffer *buf_vp;
int rc;
- p.type = V4L2_BUF_TYPE_INTERLACED_IN_DECODER;
p.memory = V4L2_MEMORY_USERPTR;
/* This loop exits when there is no more buffers left */
while (1) {
+ p.type = V4L2_BUF_TYPE_INTERLACED_IN_DECODER;
if (!vp_work->cd->streaming)
return;
- rc = vb2_dqbuf(&vp_work->cd->vp_in_vidq, &p, O_NONBLOCK);
+ rc = vcvp_dqbuf(&vp_work->cd->vp_in_vidq, &p);
if (rc < 0)
return;
vb_vc = vp_work->cd->vc_vidq.bufs[p.index];
if (NULL == vb_vc) {
dprintk(1, "%s: buffer is NULL\n", __func__);
- vb2_qbuf(&vp_work->cd->vp_in_vidq, &p);
+ vcvp_qbuf(&vp_work->cd->vp_in_vidq, &p);
return;
}
buf_vc = container_of(vb_vc, struct vcap_buffer, vb);
@@ -139,7 +139,7 @@
vb_vp = vp_work->cd->vp_in_vidq.bufs[p.index];
if (NULL == vb_vp) {
dprintk(1, "%s: buffer is NULL\n", __func__);
- vb2_qbuf(&vp_work->cd->vp_in_vidq, &p);
+ vcvp_qbuf(&vp_work->cd->vp_in_vidq, &p);
return;
}
buf_vp = container_of(vb_vp, struct vcap_buffer, vb);
@@ -150,7 +150,7 @@
p.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
/* This call should not fail */
- rc = vb2_qbuf(&vp_work->cd->vc_vidq, &p);
+ rc = vcvp_qbuf(&vp_work->cd->vc_vidq, &p);
if (rc < 0) {
dprintk(1, "%s: qbuf to vc failed\n", __func__);
buf_vp->ion_handle = buf_vc->ion_handle;
@@ -158,7 +158,7 @@
buf_vc->ion_handle = NULL;
buf_vc->paddr = 0;
p.type = V4L2_BUF_TYPE_INTERLACED_IN_DECODER;
- vb2_qbuf(&vp_work->cd->vp_in_vidq, &p);
+ vcvp_qbuf(&vp_work->cd->vp_in_vidq, &p);
}
}
}
@@ -180,12 +180,13 @@
return;
vp_act = &dev->vp_client->vid_vp_action;
- irq = vp_work->irq;
rc = readl_relaxed(VCAP_OFFSET(0x048));
while (!(rc & 0x00000100))
rc = readl_relaxed(VCAP_OFFSET(0x048));
+ irq = readl_relaxed(VCAP_VP_INT_STATUS);
+
writel_relaxed(0x00000000, VCAP_VP_BAL_VMOTION_STATE);
writel_relaxed(0x40000000, VCAP_VP_REDUCT_AVG_MOTION2);
@@ -283,7 +284,7 @@
}
dprintk(1, "%s: irq=0x%08x\n", __func__, irq);
- if (!(irq & VP_PIC_DONE)) {
+ if (!(irq & (VP_PIC_DONE || VP_MODE_CHANGE))) {
writel_relaxed(irq, VCAP_VP_INT_CLEAR);
pr_err("VP IRQ shows some error\n");
return IRQ_HANDLED;
@@ -307,7 +308,6 @@
INIT_WORK(&dev->vp_work.work, vp_wq_fnc);
dev->vp_work.cd = c_data;
- dev->vp_work.irq = irq;
rc = queue_work(dev->vcap_wq, &dev->vp_work.work);
disable_irq_nosync(dev->vpirq->start);
@@ -411,7 +411,7 @@
void *buf;
if (!c_data->vid_vp_action.bufMotion) {
- dprintk(1, "Motion buffer has not been created");
+ pr_err("Motion buffer has not been created");
return;
}
@@ -556,7 +556,7 @@
if (c_data->vp_out_fmt.pixfmt == V4L2_PIX_FMT_NV16)
chroma_fmt = 1;
- writel_relaxed((c_data->vp_in_fmt.width / 16) << 20 |
+ writel_relaxed((c_data->vp_out_fmt.width / 16) << 20 |
chroma_fmt << 11 | 0x1 << 4, VCAP_VP_OUT_CONFIG);
/* Enable Interrupt */
diff --git a/drivers/media/video/vcap_vp.h b/drivers/media/video/vcap_vp.h
index 47ad8d4..5415e54 100644
--- a/drivers/media/video/vcap_vp.h
+++ b/drivers/media/video/vcap_vp.h
@@ -89,6 +89,7 @@
#define VCAP_VP_NR_T2_C_BASE_ADDR (VCAP_BASE + 0x4B8)
#define VP_PIC_DONE (0x1 << 0)
+#define VP_MODE_CHANGE (0x1 << 8)
irqreturn_t vp_handler(struct vcap_dev *dev);
int config_vp_format(struct vcap_client_data *c_data);
diff --git a/drivers/mfd/wcd9xxx-core.c b/drivers/mfd/wcd9xxx-core.c
index 021dcf1..2256f67 100644
--- a/drivers/mfd/wcd9xxx-core.c
+++ b/drivers/mfd/wcd9xxx-core.c
@@ -297,7 +297,6 @@
static int wcd9xxx_device_init(struct wcd9xxx *wcd9xxx, int irq)
{
int ret;
- u8 idbyte_0, idbyte_1, idbyte_2, idbyte_3;
struct mfd_cell *wcd9xxx_dev = NULL;
int wcd9xxx_dev_size = 0;
@@ -321,49 +320,26 @@
goto err;
}
- idbyte_0 = wcd9xxx_reg_read(wcd9xxx, WCD9XXX_A_CHIP_ID_BYTE_0);
- idbyte_1 = wcd9xxx_reg_read(wcd9xxx, WCD9XXX_A_CHIP_ID_BYTE_1);
- idbyte_2 = wcd9xxx_reg_read(wcd9xxx, WCD9XXX_A_CHIP_ID_BYTE_2);
- idbyte_3 = wcd9xxx_reg_read(wcd9xxx, WCD9XXX_A_CHIP_ID_BYTE_3);
+ wcd9xxx->idbyte_0 = wcd9xxx_reg_read(wcd9xxx, WCD9XXX_A_CHIP_ID_BYTE_0);
+ wcd9xxx->idbyte_1 = wcd9xxx_reg_read(wcd9xxx, WCD9XXX_A_CHIP_ID_BYTE_1);
+ wcd9xxx->idbyte_2 = wcd9xxx_reg_read(wcd9xxx, WCD9XXX_A_CHIP_ID_BYTE_2);
+ wcd9xxx->idbyte_3 = wcd9xxx_reg_read(wcd9xxx, WCD9XXX_A_CHIP_ID_BYTE_3);
wcd9xxx->version = wcd9xxx_reg_read(wcd9xxx,
WCD9XXX_A_CHIP_VERSION) & 0x1F;
pr_info("%s : Codec version %u initialized\n",
__func__, wcd9xxx->version);
- pr_info("idbyte_0[%08x] idbyte_1[%08x] idbyte_2[%08x] idbyte_3[%08x]\n",
- idbyte_0, idbyte_1, idbyte_2, idbyte_3);
- if (wcd9xxx->slim != NULL) {
- if (!strncmp(wcd9xxx->slim->name, "tabla", 5)) {
- if (TABLA_IS_1_X(wcd9xxx->version)) {
- wcd9xxx_dev = tabla1x_devs;
- wcd9xxx_dev_size = ARRAY_SIZE(tabla1x_devs);
- } else {
- wcd9xxx_dev = tabla_devs;
- wcd9xxx_dev_size = ARRAY_SIZE(tabla_devs);
- }
- } else {
- wcd9xxx_dev = sitar_devs;
- wcd9xxx_dev_size = ARRAY_SIZE(sitar_devs);
- }
- } else {
- /* Need to add here check for Tabla.
- * For now the read of version takes
- * care of now only tabla.
- */
- pr_debug("%s : Read codec version using I2C\n", __func__);
- if (!strncmp(wcd9xxx_modules[0].client->name, "sitar", 5)) {
- wcd9xxx_dev = sitar_devs;
- wcd9xxx_dev_size = ARRAY_SIZE(sitar_devs);
- } else if (TABLA_IS_1_X(wcd9xxx->version)) {
- wcd9xxx_dev = tabla1x_devs;
- wcd9xxx_dev_size = ARRAY_SIZE(tabla1x_devs);
- } else if (TABLA_IS_2_0(wcd9xxx->version)) {
- wcd9xxx_dev = tabla_devs;
- wcd9xxx_dev_size = ARRAY_SIZE(tabla_devs);
- }
+ if (wcd9xxx->idbyte_0 == 0x2) {
+ wcd9xxx_dev = tabla_devs;
+ wcd9xxx_dev_size = ARRAY_SIZE(tabla_devs);
+ } else if (wcd9xxx->idbyte_0 == 0x1) {
+ wcd9xxx_dev = tabla1x_devs;
+ wcd9xxx_dev_size = ARRAY_SIZE(tabla1x_devs);
+ } else if (wcd9xxx->idbyte_0 == 0x0) {
+ wcd9xxx_dev = sitar_devs;
+ wcd9xxx_dev_size = ARRAY_SIZE(sitar_devs);
}
-
ret = mfd_add_devices(wcd9xxx->dev, -1,
wcd9xxx_dev, wcd9xxx_dev_size,
NULL, 0);
@@ -766,21 +742,22 @@
wcd9xxx->irq = pdata->irq;
wcd9xxx->irq_base = pdata->irq_base;
- /*read the tabla status before initializing the device type*/
- ret = wcd9xxx_read(wcd9xxx, WCD9XXX_A_CHIP_STATUS, 1, &val, 0);
- if (!strncmp(wcd9xxx_modules[0].client->name, "sitar", 5))
- i2c_mode = SITAR_I2C_MODE;
- else if (!strncmp(wcd9xxx_modules[0].client->name, "tabla", 5))
- i2c_mode = TABLA_I2C_MODE;
-
- if ((ret < 0) || (val != i2c_mode))
- pr_err("failed to read the wcd9xxx status ret = %d\n", ret);
-
ret = wcd9xxx_device_init(wcd9xxx, wcd9xxx->irq);
if (ret) {
pr_err("%s: error, initializing device failed\n", __func__);
goto err_device_init;
}
+
+ if ((wcd9xxx->idbyte_0 == 0x2) || (wcd9xxx->idbyte_0 == 0x1))
+ i2c_mode = TABLA_I2C_MODE;
+ else if (wcd9xxx->idbyte_0 == 0x0)
+ i2c_mode = SITAR_I2C_MODE;
+
+ ret = wcd9xxx_read(wcd9xxx, WCD9XXX_A_CHIP_STATUS, 1, &val, 0);
+
+ if ((ret < 0) || (val != i2c_mode))
+ pr_err("failed to read the wcd9xxx status ret = %d\n", ret);
+
wcd9xxx_intf = WCD9XXX_INTERFACE_TYPE_I2C;
return ret;
@@ -1127,15 +1104,6 @@
};
MODULE_DEVICE_TABLE(i2c, tabla_id_table);
-static struct i2c_device_id sitar_id_table[] = {
- {"sitar top level", WCD9XXX_I2C_TOP_LEVEL},
- {"sitar analog", WCD9XXX_I2C_ANALOG},
- {"sitar digital1", WCD9XXX_I2C_DIGITAL_1},
- {"sitar digital2", WCD9XXX_I2C_DIGITAL_2},
- {}
-};
-MODULE_DEVICE_TABLE(i2c, tabla_id_table);
-
static struct i2c_driver tabla_i2c_driver = {
.driver = {
.owner = THIS_MODULE,
@@ -1148,21 +1116,9 @@
.suspend = wcd9xxx_i2c_suspend,
};
-static struct i2c_driver sitar_i2c_driver = {
- .driver = {
- .owner = THIS_MODULE,
- .name = "sitar-i2c-core",
- },
- .id_table = sitar_id_table,
- .probe = wcd9xxx_i2c_probe,
- .remove = __devexit_p(wcd9xxx_i2c_remove),
- .resume = wcd9xxx_i2c_resume,
- .suspend = wcd9xxx_i2c_suspend,
-};
-
static int __init wcd9xxx_init(void)
{
- int ret1, ret2, ret3, ret4, ret5, ret6;
+ int ret1, ret2, ret3, ret4, ret5;
ret1 = slim_driver_register(&tabla_slim_driver);
if (ret1 != 0)
@@ -1184,11 +1140,7 @@
if (ret5 != 0)
pr_err("Failed to register sitar SB driver: %d\n", ret5);
- ret6 = i2c_add_driver(&sitar_i2c_driver);
- if (ret6 != 0)
- pr_err("failed to add the I2C driver\n");
-
- return (ret1 && ret2 && ret3 && ret4 && ret5 && ret6) ? -1 : 0;
+ return (ret1 && ret2 && ret3 && ret4 && ret5) ? -1 : 0;
}
module_init(wcd9xxx_init);
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index d4eb6e0..8354aa8 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -609,10 +609,24 @@
return -EINVAL;
}
- if (resp.result == QSEOS_RESULT_FAILURE)
- return 0;
- else
- return resp.data;
+ if (resp.result == QSEOS_RESULT_FAILURE) {
+ return 0;
+ } else {
+ switch (resp.resp_type) {
+ /*qsee returned listener type response */
+ case QSEOS_LISTENER_ID:
+ pr_err("resp type is of listener type instead of app");
+ return -EINVAL;
+ break;
+ case QSEOS_APP_ID:
+ return resp.data;
+ default:
+ pr_err("invalid resp type (%d) from qsee",
+ resp.resp_type);
+ return -ENODEV;
+ break;
+ }
+ }
}
static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
@@ -627,6 +641,8 @@
uint32_t len;
struct qseecom_command_scm_resp resp;
struct qseecom_check_app_ireq req;
+ struct qseecom_load_app_ireq load_req;
+
/* Copy the relevant information needed for loading the image */
if (__copy_from_user(&load_img_req,
(void __user *)argp,
@@ -642,108 +658,86 @@
req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
memcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
- ret = __qseecom_check_app_exists(req);
- if (ret < 0)
- return ret;
- else
- app_id = ret;
-
- if (app_id) {
- pr_warn("App id %d (%s) already exists\n", app_id,
+ pr_warn("App (%s) does not exist, loading apps for first time\n",
(char *)(req.app_name));
- spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
- list_for_each_entry(entry,
- &qseecom.registered_app_list_head, list){
- if (entry->app_id == app_id) {
- entry->ref_cnt++;
- break;
- }
- }
- spin_unlock_irqrestore(
- &qseecom.registered_app_list_lock, flags);
- } else {
- struct qseecom_load_app_ireq load_req;
-
- pr_warn("App (%s) does not exist, loading apps for first time\n",
- (char *)(req.app_name));
- /* Get the handle of the shared fd */
- ihandle = ion_import_dma_buf(qseecom.ion_clnt,
+ /* Get the handle of the shared fd */
+ ihandle = ion_import_dma_buf(qseecom.ion_clnt,
load_img_req.ifd_data_fd);
- if (IS_ERR_OR_NULL(ihandle)) {
- pr_err("Ion client could not retrieve the handle\n");
- qsee_disable_clock_vote(CLK_SFPB);
- return -ENOMEM;
- }
+ if (IS_ERR_OR_NULL(ihandle)) {
+ pr_err("Ion client could not retrieve the handle\n");
+ qsee_disable_clock_vote(CLK_SFPB);
+ return -ENOMEM;
+ }
- /* Get the physical address of the ION BUF */
- ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
+ /* Get the physical address of the ION BUF */
+ ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
- /* Populate the structure for sending scm call to load image */
- load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
- load_req.mdt_len = load_img_req.mdt_len;
- load_req.img_len = load_img_req.img_len;
- load_req.phy_addr = pa;
+ /* Populate the structure for sending scm call to load image */
+ load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
+ load_req.mdt_len = load_img_req.mdt_len;
+ load_req.img_len = load_img_req.img_len;
+ load_req.phy_addr = pa;
- /* SCM_CALL to load the app and get the app_id back */
- ret = scm_call(SCM_SVC_TZSCHEDULER, 1, &load_req,
- sizeof(struct qseecom_load_app_ireq),
- &resp, sizeof(resp));
- if (ret) {
- pr_err("scm_call to load app failed\n");
- return -EINVAL;
- }
+ /* SCM_CALL to load the app and get the app_id back */
+ ret = scm_call(SCM_SVC_TZSCHEDULER, 1, &load_req,
+ sizeof(struct qseecom_load_app_ireq),
+ &resp, sizeof(resp));
+ if (ret) {
+ pr_err("scm_call to load app failed\n");
+ return -EINVAL;
+ }
- if (resp.result == QSEOS_RESULT_FAILURE) {
- pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
- if (!IS_ERR_OR_NULL(ihandle))
- ion_free(qseecom.ion_clnt, ihandle);
- qsee_disable_clock_vote(CLK_SFPB);
- return -EFAULT;
- }
-
- if (resp.result == QSEOS_RESULT_INCOMPLETE) {
- ret = __qseecom_process_incomplete_cmd(data, &resp);
- if (ret) {
- pr_err("process_incomplete_cmd failed err: %d\n",
- ret);
- if (!IS_ERR_OR_NULL(ihandle))
- ion_free(qseecom.ion_clnt, ihandle);
- qsee_disable_clock_vote(CLK_SFPB);
- return ret;
- }
- }
- if (resp.result != QSEOS_RESULT_SUCCESS) {
- pr_err("scm_call failed resp.result unknown, %d\n",
- resp.result);
- if (!IS_ERR_OR_NULL(ihandle))
- ion_free(qseecom.ion_clnt, ihandle);
- qsee_disable_clock_vote(CLK_SFPB);
- return -EFAULT;
- }
-
- app_id = resp.data;
-
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry) {
- pr_err("kmalloc failed\n");
- qsee_disable_clock_vote(CLK_SFPB);
- return -ENOMEM;
- }
- entry->app_id = app_id;
- entry->ref_cnt = 1;
-
- /* Deallocate the handle */
+ if (resp.result == QSEOS_RESULT_FAILURE) {
+ pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
if (!IS_ERR_OR_NULL(ihandle))
ion_free(qseecom.ion_clnt, ihandle);
-
- spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
- list_add_tail(&entry->list, &qseecom.registered_app_list_head);
- spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
- flags);
-
- pr_warn("App with id %d (%s) now loaded\n", app_id,
- (char *)(req.app_name));
+ qsee_disable_clock_vote(CLK_SFPB);
+ return -EFAULT;
}
+
+ if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+ ret = __qseecom_process_incomplete_cmd(data, &resp);
+ if (ret) {
+ pr_err("process_incomplete_cmd failed err: %d\n",
+ ret);
+ if (!IS_ERR_OR_NULL(ihandle))
+ ion_free(qseecom.ion_clnt, ihandle);
+ qsee_disable_clock_vote(CLK_SFPB);
+ return ret;
+ }
+ }
+
+ if (resp.result != QSEOS_RESULT_SUCCESS) {
+ pr_err("scm_call failed resp.result unknown, %d\n",
+ resp.result);
+ if (!IS_ERR_OR_NULL(ihandle))
+ ion_free(qseecom.ion_clnt, ihandle);
+ qsee_disable_clock_vote(CLK_SFPB);
+ return -EFAULT;
+ }
+
+ app_id = resp.data;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ pr_err("kmalloc failed\n");
+ qsee_disable_clock_vote(CLK_SFPB);
+ return -ENOMEM;
+ }
+ entry->app_id = app_id;
+ entry->ref_cnt = 1;
+
+ /* Deallocate the handle */
+ if (!IS_ERR_OR_NULL(ihandle))
+ ion_free(qseecom.ion_clnt, ihandle);
+
+ spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+ list_add_tail(&entry->list, &qseecom.registered_app_list_head);
+ spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
+
+ pr_warn("App with id %d (%s) now loaded\n", app_id,
+ (char *)(req.app_name));
+
data->client.app_id = app_id;
load_img_req.app_id = app_id;
if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
@@ -1432,6 +1426,8 @@
int32_t ret;
struct qseecom_qseos_app_load_query query_req;
struct qseecom_check_app_ireq req;
+ struct qseecom_registered_app_list *entry = NULL;
+ unsigned long flags = 0;
/* Copy the relevant information needed for loading the image */
if (__copy_from_user(&query_req,
@@ -1445,11 +1441,30 @@
memcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
ret = __qseecom_check_app_exists(req);
- if (ret == -EINVAL) {
+
+ if ((ret == -EINVAL) || (ret == -ENODEV)) {
pr_err(" scm call to check if app is loaded failed");
return ret; /* scm call failed */
} else if (ret > 0) {
- pr_err("app is already loaded in QSEE");
+ pr_warn("App id %d (%s) already exists\n", ret,
+ (char *)(req.app_name));
+ spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+ list_for_each_entry(entry,
+ &qseecom.registered_app_list_head, list){
+ if (entry->app_id == ret) {
+ entry->ref_cnt++;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(
+ &qseecom.registered_app_list_lock, flags);
+ data->client.app_id = ret;
+ query_req.app_id = ret;
+
+ if (copy_to_user(argp, &query_req, sizeof(query_req))) {
+ pr_err("copy_to_user failed\n");
+ return -EFAULT;
+ }
return -EEXIST; /* app already loaded */
} else {
return 0; /* app not loaded */
diff --git a/drivers/misc/tspp.c b/drivers/misc/tspp.c
index 4d7553e..cc65929 100644
--- a/drivers/misc/tspp.c
+++ b/drivers/misc/tspp.c
@@ -1818,11 +1818,12 @@
goto err_bam;
}
- if (device->tsif_pclk && clk_enable(device->tsif_pclk) != 0) {
+ if (device->tsif_pclk && clk_prepare_enable(device->tsif_pclk) != 0) {
dev_err(&pdev->dev, "Can't start pclk");
goto err_pclk;
}
- if (device->tsif_ref_clk && clk_enable(device->tsif_ref_clk) != 0) {
+ if (device->tsif_ref_clk &&
+ clk_prepare_enable(device->tsif_ref_clk) != 0) {
dev_err(&pdev->dev, "Can't start ref clk");
goto err_refclk;
}
@@ -1849,7 +1850,7 @@
err_refclk:
if (device->tsif_pclk)
- clk_disable(device->tsif_pclk);
+ clk_disable_unprepare(device->tsif_pclk);
err_pclk:
sps_deregister_bam_device(device->bam_handle);
err_bam:
diff --git a/drivers/mmc/card/mmc_block_test.c b/drivers/mmc/card/mmc_block_test.c
index 0ace608..871675c 100644
--- a/drivers/mmc/card/mmc_block_test.c
+++ b/drivers/mmc/card/mmc_block_test.c
@@ -33,11 +33,16 @@
#define PACKED_HDR_RW_MASK 0x0000FF00
#define PACKED_HDR_NUM_REQS_MASK 0x00FF0000
#define PACKED_HDR_BITS_16_TO_29_SET 0x3FFF0000
+#define SECTOR_SIZE 512
+#define NUM_OF_SECTORS_PER_BIO ((BIO_U32_SIZE * 4) / SECTOR_SIZE)
+#define BIO_TO_SECTOR(x) (x * NUM_OF_SECTORS_PER_BIO)
#define test_pr_debug(fmt, args...) pr_debug("%s: "fmt"\n", MODULE_NAME, args)
#define test_pr_info(fmt, args...) pr_info("%s: "fmt"\n", MODULE_NAME, args)
#define test_pr_err(fmt, args...) pr_err("%s: "fmt"\n", MODULE_NAME, args)
+#define SANITIZE_TEST_TIMEOUT 240000
+
enum is_random {
NON_RANDOM_TEST,
RANDOM_TEST,
@@ -83,6 +88,27 @@
TEST_CMD23_BITS_16TO29_SET,
TEST_CMD23_HDR_BLK_NOT_IN_COUNT,
INVALID_CMD_MAX_TESTCASE = TEST_CMD23_HDR_BLK_NOT_IN_COUNT,
+
+ /*
+ * Start of packing control test group.
+ * in these next testcases the abbreviation FB = followed by
+ */
+ PACKING_CONTROL_MIN_TESTCASE,
+ TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ =
+ PACKING_CONTROL_MIN_TESTCASE,
+ TEST_PACKING_EXP_N_OVER_TRIGGER,
+ TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ,
+ TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N,
+ TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER,
+ TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS,
+ TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS,
+ TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER,
+ TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER,
+ TEST_PACK_MIX_PACKED_NO_PACKED_PACKED,
+ TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED,
+ PACKING_CONTROL_MAX_TESTCASE = TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED,
+
+ TEST_WRITE_DISCARD_SANITIZE_READ,
};
enum mmc_block_test_group {
@@ -91,6 +117,7 @@
TEST_SEND_WRITE_PACKING_GROUP,
TEST_ERR_CHECK_GROUP,
TEST_SEND_INVALID_GROUP,
+ TEST_PACKING_CONTROL_GROUP,
};
struct mmc_block_test_debug {
@@ -98,6 +125,8 @@
struct dentry *err_check_test;
struct dentry *send_invalid_packed_test;
struct dentry *random_test_seed;
+ struct dentry *packing_control_test;
+ struct dentry *discard_sanitize_test;
};
struct mmc_block_test_data {
@@ -454,6 +483,30 @@
return "Test invalid - cmd23 bits [16-29] set";
case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
return "Test invalid - cmd23 header block not in count";
+ case TEST_PACKING_EXP_N_OVER_TRIGGER:
+ return "\nTest packing control - pack n";
+ case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
+ return "\nTest packing control - pack n followed by read";
+ case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
+ return "\nTest packing control - pack n followed by flush";
+ case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
+ return "\nTest packing control - pack one followed by read";
+ case TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER:
+ return "\nTest packing control - pack threshold";
+ case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
+ return "\nTest packing control - no packing";
+ case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
+ return "\nTest packing control - no packing, trigger requests";
+ case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
+ return "\nTest packing control - no pack, trigger-read-trigger";
+ case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
+ return "\nTest packing control- no pack, trigger-flush-trigger";
+ case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
+ return "\nTest packing control - mix: pack -> no pack -> pack";
+ case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
+ return "\nTest packing control - mix: no pack->pack->no pack";
+ case TEST_WRITE_DISCARD_SANITIZE_READ:
+ return "\nTest write, discard, sanitize";
default:
return "Unknown testcase";
}
@@ -505,7 +558,7 @@
stop_reason = mmc_packed_stats->pack_stop_reason;
- for (i = 1 ; i <= max_packed_reqs ; ++i) {
+ for (i = 1; i <= max_packed_reqs; ++i) {
if (mmc_packed_stats->packing_events[i] !=
expected_stats.packing_events[i]) {
test_pr_err(
@@ -710,7 +763,7 @@
test_pr_info("%s: Adding %d write requests, first req_id=%d", __func__,
num_requests, td->wr_rd_next_req_id);
- for (i = 1 ; i <= num_requests ; i++) {
+ for (i = 1; i <= num_requests; i++) {
start_sec = td->start_sector + 4096 * td->num_of_write_bios;
if (is_random)
pseudo_rnd_num_of_bios(bio_seed, &num_bios);
@@ -826,6 +879,185 @@
}
/*
+ * Prepare the write, read and flush requests for the packing control
+ * testcases
+ */
+static int prepare_packed_control_tests_requests(struct test_data *td,
+ int is_err_expected, int num_requests, int is_random)
+{
+ int ret = 0;
+ struct mmc_queue *mq;
+ int max_packed_reqs;
+ int temp_num_req = num_requests;
+ struct request_queue *req_q;
+ int test_packed_trigger;
+ int num_packed_reqs;
+
+ if (!td) {
+ test_pr_err("%s: NULL td\n", __func__);
+ return -EINVAL;
+ }
+
+ req_q = td->req_q;
+
+ if (!req_q) {
+ test_pr_err("%s: NULL request queue\n", __func__);
+ return -EINVAL;
+ }
+
+ mq = req_q->queuedata;
+ if (!mq) {
+ test_pr_err("%s: NULL mq", __func__);
+ return -EINVAL;
+ }
+
+ max_packed_reqs = mq->card->ext_csd.max_packed_writes;
+ test_packed_trigger = mq->num_wr_reqs_to_start_packing;
+ num_packed_reqs = num_requests - test_packed_trigger;
+
+ if (mbtd->random_test_seed == 0) {
+ mbtd->random_test_seed =
+ (unsigned int)(get_jiffies_64() & 0xFFFF);
+ test_pr_info("%s: got seed from jiffies %d",
+ __func__, mbtd->random_test_seed);
+ }
+
+ mmc_blk_init_packed_statistics(mq->card);
+
+ if (td->test_info.testcase ==
+ TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED) {
+ temp_num_req = num_requests;
+ num_requests = test_packed_trigger - 1;
+ }
+
+ /* Verify that the packing is disabled before starting the test */
+ mq->wr_packing_enabled = false;
+ mq->num_of_potential_packed_wr_reqs = 0;
+
+ if (td->test_info.testcase == TEST_PACK_MIX_PACKED_NO_PACKED_PACKED) {
+ mq->num_of_potential_packed_wr_reqs = test_packed_trigger + 1;
+ mq->wr_packing_enabled = true;
+ num_requests = test_packed_trigger + 2;
+ }
+
+ ret = prepare_request_add_write_reqs(td, num_requests, is_err_expected,
+ is_random);
+ if (ret)
+ goto exit;
+
+ if (td->test_info.testcase == TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED)
+ num_requests = temp_num_req;
+
+ memset((void *)mbtd->exp_packed_stats.pack_stop_reason, 0,
+ sizeof(mbtd->exp_packed_stats.pack_stop_reason));
+ memset(mbtd->exp_packed_stats.packing_events, 0,
+ (max_packed_reqs + 1) * sizeof(u32));
+
+ switch (td->test_info.testcase) {
+ case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
+ case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
+ ret = prepare_request_add_read(td);
+ if (ret)
+ goto exit;
+
+ mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
+ mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
+ break;
+ case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
+ ret = prepare_request_add_flush(td);
+ if (ret)
+ goto exit;
+
+ ret = prepare_request_add_write_reqs(td, num_packed_reqs,
+ is_err_expected, is_random);
+ if (ret)
+ goto exit;
+
+ mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
+ mbtd->exp_packed_stats.pack_stop_reason[FLUSH_OR_DISCARD] = 1;
+ mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 2;
+ break;
+ case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
+ ret = prepare_request_add_read(td);
+ if (ret)
+ goto exit;
+
+ ret = prepare_request_add_write_reqs(td, test_packed_trigger,
+ is_err_expected, is_random);
+ if (ret)
+ goto exit;
+
+ mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
+ break;
+ case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
+ ret = prepare_request_add_flush(td);
+ if (ret)
+ goto exit;
+
+ ret = prepare_request_add_write_reqs(td, test_packed_trigger,
+ is_err_expected, is_random);
+ if (ret)
+ goto exit;
+
+ mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
+ break;
+ case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
+ ret = prepare_request_add_read(td);
+ if (ret)
+ goto exit;
+
+ ret = prepare_request_add_write_reqs(td, test_packed_trigger-1,
+ is_err_expected, is_random);
+ if (ret)
+ goto exit;
+
+ ret = prepare_request_add_write_reqs(td, num_requests,
+ is_err_expected, is_random);
+ if (ret)
+ goto exit;
+
+ mbtd->exp_packed_stats.packing_events[num_requests] = 1;
+ mbtd->exp_packed_stats.packing_events[num_requests-1] = 1;
+ mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
+ mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
+ break;
+ case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
+ ret = prepare_request_add_read(td);
+ if (ret)
+ goto exit;
+
+ ret = prepare_request_add_write_reqs(td, num_requests,
+ is_err_expected, is_random);
+ if (ret)
+ goto exit;
+
+ ret = prepare_request_add_read(td);
+ if (ret)
+ goto exit;
+
+ ret = prepare_request_add_write_reqs(td, test_packed_trigger-1,
+ is_err_expected, is_random);
+ if (ret)
+ goto exit;
+
+ mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
+ mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
+ break;
+ case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
+ case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
+ mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
+ break;
+ default:
+ mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
+ mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
+ }
+ mbtd->num_requests = num_requests;
+
+exit:
+ return ret;
+}
+
+/*
* Prepare requests for the TEST_RET_PARTIAL_FOLLOWED_BY_ABORT testcase.
* In this testcase we have mixed error expectations from different
* write requests, hence the special prepare function.
@@ -848,13 +1080,14 @@
mmc_blk_init_packed_statistics(mq->card);
- for (i = 1 ; i <= num_requests ; i++) {
+ for (i = 1; i <= num_requests; i++) {
if (i > (num_requests / 2))
is_err_expected = 1;
- start_address = td->start_sector + 4096*td->num_of_write_bios;
+ start_address = td->start_sector + 4096 * td->num_of_write_bios;
ret = test_iosched_add_wr_rd_test_req(is_err_expected, WRITE,
- start_address, (i%5)+1, TEST_PATTERN_5A, NULL);
+ start_address, (i % 5) + 1, TEST_PATTERN_5A,
+ NULL);
if (ret) {
test_pr_err("%s: failed to add a write request",
__func__);
@@ -888,6 +1121,8 @@
int num_requests;
int min_num_requests = 2;
int is_random = mbtd->is_random;
+ int max_for_double;
+ int test_packed_trigger;
req_q = test_iosched_get_req_queue();
if (req_q)
@@ -904,16 +1139,52 @@
max_num_requests = mq->card->ext_csd.max_packed_writes;
num_requests = max_num_requests - 2;
+ test_packed_trigger = mq->num_wr_reqs_to_start_packing;
+
+ /*
+ * Here max_for_double is intended for packed control testcases
+ * in which we issue many write requests. It's purpose is to prevent
+ * exceeding max number of req_queue requests.
+ */
+ max_for_double = max_num_requests - 10;
+
+ if (td->test_info.testcase ==
+ TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
+ /* Don't expect packing, so issue up to trigger-1 reqs */
+ num_requests = test_packed_trigger - 1;
if (is_random) {
if (td->test_info.testcase ==
TEST_RET_PARTIAL_FOLLOWED_BY_ABORT)
+ /*
+ * Here we don't want num_requests to be less than 1
+ * as a consequence of division by 2.
+ */
min_num_requests = 3;
+ if (td->test_info.testcase ==
+ TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
+ /* Don't expect packing, so issue up to trigger reqs */
+ max_num_requests = test_packed_trigger;
+
num_requests = pseudo_random_seed(seed, min_num_requests,
max_num_requests - 1);
}
+ if (td->test_info.testcase ==
+ TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
+ num_requests -= test_packed_trigger;
+
+ if (td->test_info.testcase == TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N)
+ num_requests =
+ num_requests > max_for_double ? max_for_double : num_requests;
+
+ if (mbtd->test_group == TEST_PACKING_CONTROL_GROUP)
+ num_requests += test_packed_trigger;
+
+ if (td->test_info.testcase == TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS)
+ num_requests = test_packed_trigger;
+
return num_requests;
}
@@ -929,6 +1200,7 @@
int num_requests = 0;
int ret = 0;
int is_random = mbtd->is_random;
+ int test_packed_trigger = mq->num_wr_reqs_to_start_packing;
if (!mq) {
test_pr_err("%s: NULL mq", __func__);
@@ -997,6 +1269,33 @@
case TEST_HDR_CMD23_PACKED_BIT_SET:
ret = prepare_packed_requests(td, 1, num_requests, is_random);
break;
+ case TEST_PACKING_EXP_N_OVER_TRIGGER:
+ case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
+ case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
+ case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
+ case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
+ case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
+ ret = prepare_packed_control_tests_requests(td, 0, num_requests,
+ is_random);
+ break;
+ case TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER:
+ ret = prepare_packed_control_tests_requests(td, 0,
+ max_num_requests, is_random);
+ break;
+ case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
+ ret = prepare_packed_control_tests_requests(td, 0,
+ test_packed_trigger + 1,
+ is_random);
+ break;
+ case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
+ ret = prepare_packed_control_tests_requests(td, 0, num_requests,
+ is_random);
+ break;
+ case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
+ case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
+ ret = prepare_packed_control_tests_requests(td, 0,
+ test_packed_trigger, is_random);
+ break;
default:
test_pr_info("%s: Invalid test case...", __func__);
return -EINVAL;
@@ -1084,6 +1383,9 @@
/* disable the packing control */
host->caps2 &= ~MMC_CAP2_PACKED_WR_CONTROL;
break;
+ case TEST_PACKING_CONTROL_GROUP:
+ host->caps2 |= MMC_CAP2_PACKED_WR_CONTROL;
+ break;
default:
break;
}
@@ -1091,6 +1393,63 @@
return 0;
}
+static void pseudo_rnd_sector_and_size(unsigned int *seed,
+ unsigned int min_start_sector,
+ unsigned int *start_sector,
+ unsigned int *num_of_bios)
+{
+ unsigned int max_sec = min_start_sector + TEST_MAX_SECTOR_RANGE;
+ do {
+ *start_sector = pseudo_random_seed(seed,
+ 1, max_sec);
+ *num_of_bios = pseudo_random_seed(seed,
+ 1, TEST_MAX_BIOS_PER_REQ);
+ if (!(*num_of_bios))
+ *num_of_bios = 1;
+ } while ((*start_sector < min_start_sector) ||
+ (*start_sector + (*num_of_bios * BIO_U32_SIZE * 4)) > max_sec);
+}
+
+/* sanitize test functions */
+static int prepare_write_discard_sanitize_read(struct test_data *td)
+{
+ unsigned int start_sector;
+ unsigned int num_of_bios = 0;
+ static unsigned int total_bios;
+ unsigned int *num_bios_seed;
+ int i = 0;
+
+ if (mbtd->random_test_seed == 0) {
+ mbtd->random_test_seed =
+ (unsigned int)(get_jiffies_64() & 0xFFFF);
+ test_pr_info("%s: got seed from jiffies %d",
+ __func__, mbtd->random_test_seed);
+ }
+ num_bios_seed = &mbtd->random_test_seed;
+
+ do {
+ pseudo_rnd_sector_and_size(num_bios_seed, td->start_sector,
+ &start_sector, &num_of_bios);
+
+ /* DISCARD */
+ total_bios += num_of_bios;
+ test_pr_info("%s: discard req: id=%d, startSec=%d, NumBios=%d",
+ __func__, td->unique_next_req_id, start_sector,
+ num_of_bios);
+ test_iosched_add_unique_test_req(0, REQ_UNIQUE_DISCARD,
+ start_sector, BIO_TO_SECTOR(num_of_bios),
+ NULL);
+
+ } while (++i < (BLKDEV_MAX_RQ-10));
+
+ test_pr_info("%s: total discard bios = %d", __func__, total_bios);
+
+ test_pr_info("%s: add sanitize req", __func__);
+ test_iosched_add_unique_test_req(0, REQ_UNIQUE_SANITIZE, 0, 0, NULL);
+
+ return 0;
+}
+
static bool message_repeat;
static int test_open(struct inode *inode, struct file *file)
{
@@ -1135,12 +1494,12 @@
mbtd->test_info.get_test_case_str_fn = get_test_case_str;
mbtd->test_info.post_test_fn = post_test;
- for (i = 0 ; i < number ; ++i) {
+ for (i = 0; i < number; ++i) {
test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
test_pr_info("%s: ====================", __func__);
- for (j = SEND_WRITE_PACKING_MIN_TESTCASE ;
- j <= SEND_WRITE_PACKING_MAX_TESTCASE ; j++) {
+ for (j = SEND_WRITE_PACKING_MIN_TESTCASE;
+ j <= SEND_WRITE_PACKING_MAX_TESTCASE; j++) {
mbtd->test_info.testcase = j;
mbtd->is_random = RANDOM_TEST;
@@ -1233,7 +1592,7 @@
mbtd->test_info.get_test_case_str_fn = get_test_case_str;
mbtd->test_info.post_test_fn = post_test;
- for (i = 0 ; i < number ; ++i) {
+ for (i = 0; i < number; ++i) {
test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
test_pr_info("%s: ====================", __func__);
@@ -1332,7 +1691,7 @@
mbtd->test_info.get_test_case_str_fn = get_test_case_str;
mbtd->test_info.post_test_fn = post_test;
- for (i = 0 ; i < number ; ++i) {
+ for (i = 0; i < number; ++i) {
test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
test_pr_info("%s: ====================", __func__);
@@ -1408,12 +1767,167 @@
.read = send_invalid_packed_test_read,
};
+/* packing_control TEST */
+static ssize_t write_packing_control_test_write(struct file *file,
+ const char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int ret = 0;
+ int i = 0;
+ int number = -1;
+ int j = 0;
+ struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
+ int max_num_requests = mq->card->ext_csd.max_packed_writes;
+ int test_successful = 1;
+
+ test_pr_info("%s: -- write_packing_control TEST --", __func__);
+
+ sscanf(buf, "%d", &number);
+
+ if (number <= 0)
+ number = 1;
+
+ test_pr_info("%s: max_num_requests = %d ", __func__,
+ max_num_requests);
+
+ memset(&mbtd->test_info, 0, sizeof(struct test_info));
+ mbtd->test_group = TEST_PACKING_CONTROL_GROUP;
+
+ if (validate_packed_commands_settings())
+ return count;
+
+ mbtd->test_info.data = mbtd;
+ mbtd->test_info.prepare_test_fn = prepare_test;
+ mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
+ mbtd->test_info.get_test_case_str_fn = get_test_case_str;
+
+ for (i = 0; i < number; ++i) {
+ test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
+ test_pr_info("%s: ====================", __func__);
+
+ for (j = PACKING_CONTROL_MIN_TESTCASE;
+ j <= PACKING_CONTROL_MAX_TESTCASE; j++) {
+
+ test_successful = 1;
+ mbtd->test_info.testcase = j;
+ mbtd->is_random = RANDOM_TEST;
+ ret = test_iosched_start_test(&mbtd->test_info);
+ if (ret) {
+ test_successful = 0;
+ break;
+ }
+ /* Allow FS requests to be dispatched */
+ msleep(1000);
+
+ mbtd->test_info.testcase = j;
+ mbtd->is_random = NON_RANDOM_TEST;
+ ret = test_iosched_start_test(&mbtd->test_info);
+ if (ret) {
+ test_successful = 0;
+ break;
+ }
+ /* Allow FS requests to be dispatched */
+ msleep(1000);
+ }
+
+ if (!test_successful)
+ break;
+ }
+
+ test_pr_info("%s: Completed all the test cases.", __func__);
+
+ return count;
+}
+
+static ssize_t write_packing_control_test_read(struct file *file,
+ char __user *buffer,
+ size_t count,
+ loff_t *offset)
+{
+ memset((void *)buffer, 0, count);
+
+ snprintf(buffer, count,
+ "\nwrite_packing_control_test\n"
+ "=========\n"
+ "Description:\n"
+ "This test checks the following scenarios\n"
+ "- Packing expected - one over trigger\n"
+ "- Packing expected - N over trigger\n"
+ "- Packing expected - N over trigger followed by read\n"
+ "- Packing expected - N over trigger followed by flush\n"
+ "- Packing expected - threshold over trigger FB by flush\n"
+ "- Packing not expected - less than trigger\n"
+ "- Packing not expected - trigger requests\n"
+ "- Packing not expected - trigger, read, trigger\n"
+ "- Mixed state - packing -> no packing -> packing\n"
+ "- Mixed state - no packing -> packing -> no packing\n");
+
+ if (message_repeat == 1) {
+ message_repeat = 0;
+ return strnlen(buffer, count);
+ } else {
+ return 0;
+ }
+}
+
+const struct file_operations write_packing_control_test_ops = {
+ .open = test_open,
+ .write = write_packing_control_test_write,
+ .read = write_packing_control_test_read,
+};
+
+static ssize_t write_discard_sanitize_test_write(struct file *file,
+ const char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int ret = 0;
+ int i = 0;
+ int number = -1;
+
+ sscanf(buf, "%d", &number);
+ if (number <= 0)
+ number = 1;
+
+ test_pr_info("%s: -- write_discard_sanitize TEST --\n", __func__);
+
+ memset(&mbtd->test_info, 0, sizeof(struct test_info));
+
+ mbtd->test_group = TEST_GENERAL_GROUP;
+
+ mbtd->test_info.data = mbtd;
+ mbtd->test_info.prepare_test_fn = prepare_write_discard_sanitize_read;
+ mbtd->test_info.get_test_case_str_fn = get_test_case_str;
+ mbtd->test_info.timeout_msec = SANITIZE_TEST_TIMEOUT;
+
+ for (i = 0 ; i < number ; ++i) {
+ test_pr_info("%s: Cycle # %d / %d\n", __func__, i+1, number);
+ test_pr_info("%s: ===================", __func__);
+
+ mbtd->test_info.testcase = TEST_WRITE_DISCARD_SANITIZE_READ;
+ ret = test_iosched_start_test(&mbtd->test_info);
+
+ if (ret)
+ break;
+ }
+
+ return count;
+}
+
+const struct file_operations write_discard_sanitize_test_ops = {
+ .open = test_open,
+ .write = write_discard_sanitize_test_write,
+};
+
static void mmc_block_test_debugfs_cleanup(void)
{
debugfs_remove(mbtd->debug.random_test_seed);
debugfs_remove(mbtd->debug.send_write_packing_test);
debugfs_remove(mbtd->debug.err_check_test);
debugfs_remove(mbtd->debug.send_invalid_packed_test);
+ debugfs_remove(mbtd->debug.packing_control_test);
+ debugfs_remove(mbtd->debug.discard_sanitize_test);
}
static int mmc_block_test_debugfs_init(void)
@@ -1465,6 +1979,27 @@
if (!mbtd->debug.send_invalid_packed_test)
goto err_nomem;
+ mbtd->debug.packing_control_test = debugfs_create_file(
+ "packing_control_test",
+ S_IRUGO | S_IWUGO,
+ tests_root,
+ NULL,
+ &write_packing_control_test_ops);
+
+ if (!mbtd->debug.packing_control_test)
+ goto err_nomem;
+
+ mbtd->debug.discard_sanitize_test =
+ debugfs_create_file("write_discard_sanitize_test",
+ S_IRUGO | S_IWUGO,
+ tests_root,
+ NULL,
+ &write_discard_sanitize_test_ops);
+ if (!mbtd->debug.discard_sanitize_test) {
+ mmc_block_test_debugfs_cleanup();
+ return -ENOMEM;
+ }
+
return 0;
err_nomem:
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index a4af6c9..d833707 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -236,6 +236,14 @@
pr_err("%s:msmsdcc_sps_reset_ep(cons) error=%d\n",
mmc_hostname(host->mmc), rc);
+ if (host->sps.reset_device) {
+ rc = sps_device_reset(host->sps.bam_handle);
+ if (rc)
+ pr_err("%s: sps_device_reset error=%d\n",
+ mmc_hostname(host->mmc), rc);
+ host->sps.reset_device = false;
+ }
+
/* Restore all BAM pipes connections */
rc = msmsdcc_sps_restore_ep(host, &host->sps.prod);
if (rc)
@@ -4323,6 +4331,96 @@
}
/**
+ * Handle BAM device's global error condition
+ *
+ * This is an error handler for the SDCC bam device
+ *
+ * This function is registered as a callback with SPS-BAM
+ * driver and will called in case there are an errors for
+ * the SDCC BAM deivce. Any error conditions in the BAM
+ * device are global and will be result in this function
+ * being called once per device.
+ *
+ * This function will be called from the sps driver's
+ * interrupt context.
+ *
+ * @sps_cb_case - indicates what error it is
+ * @user - Pointer to sdcc host structure
+ */
+static void
+msmsdcc_sps_bam_global_irq_cb(enum sps_callback_case sps_cb_case, void *user)
+{
+ struct msmsdcc_host *host = (struct msmsdcc_host *)user;
+ struct mmc_request *mrq;
+ unsigned long flags;
+ int32_t error = 0;
+
+ BUG_ON(!host);
+ BUG_ON(!is_sps_mode(host));
+
+ if (sps_cb_case == SPS_CALLBACK_BAM_ERROR_IRQ) {
+ /**
+ * Reset the all endpoints along with reseting the sps device.
+ */
+ host->sps.pipe_reset_pending = true;
+ host->sps.reset_device = true;
+
+ pr_err("%s: BAM Global ERROR IRQ happened\n",
+ mmc_hostname(host->mmc));
+ error = EAGAIN;
+ } else if (sps_cb_case == SPS_CALLBACK_BAM_HRESP_ERR_IRQ) {
+ /**
+ * This means that there was an AHB access error and
+ * the address we are trying to read/write is something
+ * we dont have priviliges to do so.
+ */
+ pr_err("%s: BAM HRESP_ERR_IRQ happened\n",
+ mmc_hostname(host->mmc));
+ error = EACCES;
+ } else {
+ /**
+ * This should not have happened ideally. If this happens
+ * there is some seriously wrong.
+ */
+ pr_err("%s: BAM global IRQ callback received, type:%d\n",
+ mmc_hostname(host->mmc), (u32) sps_cb_case);
+ error = EIO;
+ }
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ mrq = host->curr.mrq;
+
+ if (mrq && mrq->cmd) {
+ msmsdcc_dump_sdcc_state(host);
+
+ if (!mrq->cmd->error)
+ mrq->cmd->error = -error;
+ if (host->curr.data) {
+ if (mrq->data && !mrq->data->error)
+ mrq->data->error = -error;
+ host->curr.data_xfered = 0;
+ if (host->sps.sg && is_sps_mode(host)) {
+ /* Stop current SPS transfer */
+ msmsdcc_sps_exit_curr_xfer(host);
+ } else {
+ /* this condition should not have happened */
+ pr_err("%s: something is seriously wrong. "\
+ "Funtion: %s, line: %d\n",
+ mmc_hostname(host->mmc),
+ __func__, __LINE__);
+ }
+ } else {
+ /* this condition should not have happened */
+ pr_err("%s: something is seriously wrong. Funtion: "\
+ "%s, line: %d\n", mmc_hostname(host->mmc),
+ __func__, __LINE__);
+ }
+ }
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+/**
* Initialize SPS HW connected with SDCC core
*
* This function register BAM HW resources with
@@ -4371,6 +4469,8 @@
/* SPS driver wll handle the SDCC BAM IRQ */
bam.irq = (u32)host->bam_irqres->start;
bam.manage = SPS_BAM_MGR_LOCAL;
+ bam.callback = msmsdcc_sps_bam_global_irq_cb;
+ bam.user = (void *)host;
pr_info("%s: bam physical base=0x%x\n", mmc_hostname(host->mmc),
(u32)bam.phys_addr);
@@ -4505,6 +4605,35 @@
return count;
}
+static ssize_t
+show_idle_timeout(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct mmc_host *mmc = dev_get_drvdata(dev);
+ struct msmsdcc_host *host = mmc_priv(mmc);
+
+ return snprintf(buf, PAGE_SIZE, "%u (Min 5 sec)\n",
+ host->idle_tout_ms / 1000);
+}
+
+static ssize_t
+store_idle_timeout(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mmc_host *mmc = dev_get_drvdata(dev);
+ struct msmsdcc_host *host = mmc_priv(mmc);
+ unsigned int long flags;
+ int timeout; /* in secs */
+
+ if (!kstrtou32(buf, 0, &timeout)
+ && (timeout > MSM_MMC_DEFAULT_IDLE_TIMEOUT / 1000)) {
+ spin_lock_irqsave(&host->lock, flags);
+ host->idle_tout_ms = timeout * 1000;
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
+ return count;
+}
+
#ifdef CONFIG_HAS_EARLYSUSPEND
static void msmsdcc_early_suspend(struct early_suspend *h)
{
@@ -5321,6 +5450,7 @@
pm_runtime_enable(&(pdev)->dev);
}
#endif
+ host->idle_tout_ms = MSM_MMC_DEFAULT_IDLE_TIMEOUT;
setup_timer(&host->req_tout_timer, msmsdcc_req_tout_timer_hdlr,
(unsigned long)host);
@@ -5392,8 +5522,19 @@
if (ret)
goto remove_max_bus_bw_file;
}
+ host->idle_timeout.show = show_idle_timeout;
+ host->idle_timeout.store = store_idle_timeout;
+ sysfs_attr_init(&host->idle_timeout.attr);
+ host->idle_timeout.attr.name = "idle_timeout";
+ host->idle_timeout.attr.mode = S_IRUGO | S_IWUSR;
+ ret = device_create_file(&pdev->dev, &host->idle_timeout);
+ if (ret)
+ goto remove_polling_file;
return 0;
+ remove_polling_file:
+ if (!plat->status_irq)
+ device_remove_file(&pdev->dev, &host->polling);
remove_max_bus_bw_file:
device_remove_file(&pdev->dev, &host->max_bus_bw);
platform_irq_free:
@@ -5474,6 +5615,7 @@
device_remove_file(&pdev->dev, &host->max_bus_bw);
if (!plat->status_irq)
device_remove_file(&pdev->dev, &host->polling);
+ device_remove_file(&pdev->dev, &host->idle_timeout);
del_timer_sync(&host->req_tout_timer);
tasklet_kill(&host->dma_tlet);
@@ -5771,7 +5913,7 @@
return 0;
/* Idle timeout is not configurable for now */
- pm_schedule_suspend(dev, MSM_MMC_IDLE_TIMEOUT);
+ pm_schedule_suspend(dev, host->idle_tout_ms);
return -EAGAIN;
}
diff --git a/drivers/mmc/host/msm_sdcc.h b/drivers/mmc/host/msm_sdcc.h
index baeabd2..cc41c46 100644
--- a/drivers/mmc/host/msm_sdcc.h
+++ b/drivers/mmc/host/msm_sdcc.h
@@ -213,7 +213,7 @@
#define NR_SG 128
-#define MSM_MMC_IDLE_TIMEOUT 5000 /* msecs */
+#define MSM_MMC_DEFAULT_IDLE_TIMEOUT 5000 /* msecs */
#define MSM_MMC_CLK_GATE_DELAY 200 /* msecs */
/* Set the request timeout to 10secs */
@@ -321,6 +321,7 @@
unsigned int busy;
unsigned int xfer_req_cnt;
bool pipe_reset_pending;
+ bool reset_device;
struct tasklet_struct tlet;
};
@@ -410,9 +411,11 @@
bool sdio_wakeupirq_disabled;
struct mutex clk_mutex;
bool pending_resume;
+ unsigned int idle_tout_ms; /* Timeout in msecs */
struct msmsdcc_msm_bus_vote msm_bus_vote;
struct device_attribute max_bus_bw;
struct device_attribute polling;
+ struct device_attribute idle_timeout;
};
#define MSMSDCC_VERSION_MASK 0xFFFF
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
index 0c3d4ad..d75cac4 100644
--- a/drivers/platform/msm/Kconfig
+++ b/drivers/platform/msm/Kconfig
@@ -60,4 +60,11 @@
devices support Pulse Width Modulation output with user generated
patterns. They share a lookup table with size of 64 entries.
+config QPNP_POWER_ON
+ tristate "QPNP PMIC POWER-ON Driver"
+ depends on OF_SPMI && SPMI && MSM_QPNP_INT
+ help
+ This driver supports the power-on functionality on Qualcomm
+ PNP PMIC. It currently supports reporting the change in status of
+ the KPDPWR_N line (connected to the power-key).
endmenu
diff --git a/drivers/platform/msm/Makefile b/drivers/platform/msm/Makefile
index 6deb6ee..2b6b806 100644
--- a/drivers/platform/msm/Makefile
+++ b/drivers/platform/msm/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_USB_BAM) += usb_bam.o
obj-$(CONFIG_SPS) += sps/
obj-$(CONFIG_QPNP_PWM) += qpnp-pwm.o
+obj-$(CONFIG_QPNP_POWER_ON) += qpnp-power-on.o
diff --git a/drivers/platform/msm/qpnp-power-on.c b/drivers/platform/msm/qpnp-power-on.c
new file mode 100644
index 0000000..d8bb884
--- /dev/null
+++ b/drivers/platform/msm/qpnp-power-on.c
@@ -0,0 +1,241 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/spmi.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+
+#define QPNP_PON_RT_STS(base) (base + 0x10)
+#define QPNP_PON_PULL_CTL(base) (base + 0x70)
+#define QPNP_PON_DBC_CTL(base) (base + 0x71)
+
+#define QPNP_PON_CNTL_PULL_UP BIT(1)
+#define QPNP_PON_CNTL_TRIG_DELAY_MASK (0x7)
+#define QPNP_PON_KPDPWR_N_SET BIT(0)
+
+struct qpnp_pon {
+ struct spmi_device *spmi;
+ struct input_dev *pon_input;
+ u32 key_status_irq;
+ u16 base;
+};
+
+static irqreturn_t qpnp_pon_key_irq(int irq, void *_pon)
+{
+ u8 pon_rt_sts;
+ int rc;
+ struct qpnp_pon *pon = _pon;
+
+ rc = spmi_ext_register_readl(pon->spmi->ctrl, pon->spmi->sid,
+ QPNP_PON_RT_STS(pon->base), &pon_rt_sts, 1);
+ if (rc) {
+ dev_err(&pon->spmi->dev, "Unable to read PON RT status\n");
+ return IRQ_HANDLED;
+ }
+
+ input_report_key(pon->pon_input, KEY_POWER,
+ !(pon_rt_sts & QPNP_PON_KPDPWR_N_SET));
+ input_sync(pon->pon_input);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit qpnp_pon_key_init(struct qpnp_pon *pon)
+{
+ int rc = 0;
+ u32 pullup, delay;
+ u8 pon_cntl;
+
+ pon->key_status_irq = spmi_get_irq_byname(pon->spmi,
+ NULL, "power-key");
+ if (pon->key_status_irq < 0) {
+ dev_err(&pon->spmi->dev, "Unable to get pon key irq\n");
+ return -ENXIO;
+ }
+
+ rc = of_property_read_u32(pon->spmi->dev.of_node,
+ "qcom,pon-key-dbc-delay", &delay);
+ if (rc) {
+ delay = (delay << 6) / USEC_PER_SEC;
+ delay = ilog2(delay);
+
+ rc = spmi_ext_register_readl(pon->spmi->ctrl, pon->spmi->sid,
+ QPNP_PON_DBC_CTL(pon->base), &pon_cntl, 1);
+ if (rc) {
+ dev_err(&pon->spmi->dev, "spmi read addr=%x failed\n",
+ QPNP_PON_DBC_CTL(pon->base));
+ return rc;
+ }
+ pon_cntl &= ~QPNP_PON_CNTL_TRIG_DELAY_MASK;
+ pon_cntl |= (delay & QPNP_PON_CNTL_TRIG_DELAY_MASK);
+ rc = spmi_ext_register_writel(pon->spmi->ctrl, pon->spmi->sid,
+ QPNP_PON_DBC_CTL(pon->base), &pon_cntl, 1);
+ if (rc) {
+ dev_err(&pon->spmi->dev, "spmi write addre=%x failed\n",
+ QPNP_PON_DBC_CTL(pon->base));
+ return rc;
+ }
+ }
+
+ rc = of_property_read_u32(pon->spmi->dev.of_node,
+ "qcom,pon-key-pull-up", &pullup);
+ if (!rc) {
+ rc = spmi_ext_register_readl(pon->spmi->ctrl, pon->spmi->sid,
+ QPNP_PON_PULL_CTL(pon->base), &pon_cntl, 1);
+ if (rc) {
+ dev_err(&pon->spmi->dev, "spmi read addr=%x failed\n",
+ QPNP_PON_PULL_CTL(pon->base));
+ return rc;
+ }
+ if (pullup)
+ pon_cntl |= QPNP_PON_CNTL_PULL_UP;
+ else
+ pon_cntl &= ~QPNP_PON_CNTL_PULL_UP;
+
+ rc = spmi_ext_register_writel(pon->spmi->ctrl, pon->spmi->sid,
+ QPNP_PON_PULL_CTL(pon->base), &pon_cntl, 1);
+ if (rc) {
+ dev_err(&pon->spmi->dev, "spmi write addr=%x failed\n",
+ QPNP_PON_PULL_CTL(pon->base));
+ return rc;
+ }
+ }
+
+ pon->pon_input = input_allocate_device();
+ if (!pon->pon_input) {
+ dev_err(&pon->spmi->dev, "Can't allocate pon button\n");
+ return -ENOMEM;
+ }
+
+ input_set_capability(pon->pon_input, EV_KEY, KEY_POWER);
+ pon->pon_input->name = "qpnp_pon_key";
+ pon->pon_input->phys = "qpnp_pon_key/input0";
+
+ rc = input_register_device(pon->pon_input);
+ if (rc) {
+ dev_err(&pon->spmi->dev, "Can't register pon key: %d\n", rc);
+ goto free_input_dev;
+ }
+
+ rc = request_any_context_irq(pon->key_status_irq, qpnp_pon_key_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "qpnp_pon_key_status", pon);
+ if (rc < 0) {
+ dev_err(&pon->spmi->dev, "Can't request %d IRQ for pon: %d\n",
+ pon->key_status_irq, rc);
+ goto unreg_input_dev;
+ }
+
+ device_init_wakeup(&pon->spmi->dev, 1);
+ enable_irq_wake(pon->key_status_irq);
+
+ return rc;
+
+unreg_input_dev:
+ input_unregister_device(pon->pon_input);
+free_input_dev:
+ input_free_device(pon->pon_input);
+ return rc;
+}
+
+static int __devinit qpnp_pon_probe(struct spmi_device *spmi)
+{
+ struct qpnp_pon *pon;
+ struct resource *pon_resource;
+ u32 pon_key_enable = 0;
+ int rc = 0;
+
+ pon = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_pon),
+ GFP_KERNEL);
+ if (!pon) {
+ dev_err(&spmi->dev, "Can't allocate qpnp_pon\n");
+ return -ENOMEM;
+ }
+
+ pon->spmi = spmi;
+
+ pon_resource = spmi_get_resource(spmi, NULL, IORESOURCE_MEM, 0);
+ if (!pon_resource) {
+ dev_err(&spmi->dev, "Unable to get PON base address\n");
+ return -ENXIO;
+ }
+ pon->base = pon_resource->start;
+
+ dev_set_drvdata(&spmi->dev, pon);
+
+ /* pon-key-enable property must be set to register pon key */
+ rc = of_property_read_u32(spmi->dev.of_node, "qcom,pon-key-enable",
+ &pon_key_enable);
+ if (rc && rc != -EINVAL) {
+ dev_err(&spmi->dev,
+ "Error reading 'pon-key-enable' property (%d)", rc);
+ return rc;
+ }
+
+ if (pon_key_enable) {
+ rc = qpnp_pon_key_init(pon);
+ if (rc < 0) {
+ dev_err(&spmi->dev, "Failed to register pon-key\n");
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int qpnp_pon_remove(struct spmi_device *spmi)
+{
+ struct qpnp_pon *pon = dev_get_drvdata(&spmi->dev);
+
+ if (pon->pon_input) {
+ free_irq(pon->key_status_irq, pon);
+ input_unregister_device(pon->pon_input);
+ }
+
+ return 0;
+}
+
+static struct of_device_id spmi_match_table[] = {
+ { .compatible = "qcom,qpnp-power-on",
+ }
+};
+
+static struct spmi_driver qpnp_pon_driver = {
+ .driver = {
+ .name = "qcom,qpnp-power-on",
+ .of_match_table = spmi_match_table,
+ },
+ .probe = qpnp_pon_probe,
+ .remove = __devexit_p(qpnp_pon_remove),
+};
+
+static int __init qpnp_pon_init(void)
+{
+ return spmi_driver_register(&qpnp_pon_driver);
+}
+module_init(qpnp_pon_init);
+
+static void __exit qpnp_pon_exit(void)
+{
+ return spmi_driver_unregister(&qpnp_pon_driver);
+}
+module_exit(qpnp_pon_exit);
+
+MODULE_DESCRIPTION("QPNP PMIC POWER-ON driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/msm/sps/sps.c b/drivers/platform/msm/sps/sps.c
index 1329f6c..0371f5a 100644
--- a/drivers/platform/msm/sps/sps.c
+++ b/drivers/platform/msm/sps/sps.c
@@ -2028,30 +2028,30 @@
goto device_create_err;
}
- if (!d_type) {
- sps->dfab_clk = clk_get(sps->dev, "dfab_clk");
- if (IS_ERR(sps->dfab_clk)) {
- SPS_ERR("sps:fail to get dfab_clk.");
+ sps->dfab_clk = clk_get(sps->dev, "dfab_clk");
+ if (IS_ERR(sps->dfab_clk)) {
+ SPS_ERR("sps:fail to get dfab_clk.");
+ goto clk_err;
+ } else {
+ ret = clk_set_rate(sps->dfab_clk, 64000000);
+ if (ret) {
+ SPS_ERR("sps:failed to set dfab_clk rate.");
+ clk_put(sps->dfab_clk);
goto clk_err;
- } else {
- ret = clk_set_rate(sps->dfab_clk, 64000000);
- if (ret) {
- SPS_ERR("sps:failed to set dfab_clk rate.");
- clk_put(sps->dfab_clk);
- goto clk_err;
- }
}
}
- sps->pmem_clk = clk_get(sps->dev, "mem_clk");
- if (IS_ERR(sps->pmem_clk)) {
- SPS_ERR("sps:fail to get pmem_clk.");
- goto clk_err;
- } else {
- ret = clk_prepare_enable(sps->pmem_clk);
- if (ret) {
- SPS_ERR("sps:failed to enable pmem_clk. ret=%d", ret);
+ if (!d_type) {
+ sps->pmem_clk = clk_get(sps->dev, "mem_clk");
+ if (IS_ERR(sps->pmem_clk)) {
+ SPS_ERR("sps:fail to get pmem_clk.");
goto clk_err;
+ } else {
+ ret = clk_prepare_enable(sps->pmem_clk);
+ if (ret) {
+ SPS_ERR("sps:failed to enable pmem_clk.");
+ goto clk_err;
+ }
}
}
@@ -2068,26 +2068,22 @@
}
}
- if (!d_type) {
- ret = clk_prepare_enable(sps->dfab_clk);
- if (ret) {
- SPS_ERR("sps:failed to enable dfab_clk. ret=%d", ret);
- goto clk_err;
- }
+ ret = clk_prepare_enable(sps->dfab_clk);
+ if (ret) {
+ SPS_ERR("sps:failed to enable dfab_clk. ret=%d", ret);
+ goto clk_err;
}
#endif
ret = sps_device_init();
if (ret) {
SPS_ERR("sps:sps_device_init err.");
#ifdef CONFIG_SPS_SUPPORT_BAMDMA
- if (!d_type)
- clk_disable_unprepare(sps->dfab_clk);
+ clk_disable_unprepare(sps->dfab_clk);
#endif
goto sps_device_init_err;
}
#ifdef CONFIG_SPS_SUPPORT_BAMDMA
- if (!d_type)
- clk_disable_unprepare(sps->dfab_clk);
+ clk_disable_unprepare(sps->dfab_clk);
#endif
sps->is_ready = true;
@@ -2114,9 +2110,9 @@
class_destroy(sps->dev_class);
sps_device_de_init();
+ clk_put(sps->dfab_clk);
if (!d_type)
- clk_put(sps->dfab_clk);
- clk_put(sps->pmem_clk);
+ clk_put(sps->pmem_clk);
clk_put(sps->bamdma_clk);
return 0;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index fc0d02a..1849118 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1169,4 +1169,13 @@
This driver can also be built as a module. If so, the module
will be called rtc-ls1x.
+config RTC_DRV_QPNP
+ tristate "Qualcomm QPNP PMIC RTC"
+ depends on SPMI && OF_SPMI && MSM_QPNP_INT
+ help
+ Say Y here if you want to support the Qualcomm QPNP PMIC RTC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called qpnp-rtc.
+
endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 8a3cecd..295f927 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -88,6 +88,7 @@
obj-$(CONFIG_RTC_DRV_PS3) += rtc-ps3.o
obj-$(CONFIG_RTC_DRV_PUV3) += rtc-puv3.o
obj-$(CONFIG_RTC_DRV_PXA) += rtc-pxa.o
+obj-$(CONFIG_RTC_DRV_QPNP) += qpnp-rtc.o
obj-$(CONFIG_RTC_DRV_R9701) += rtc-r9701.o
obj-$(CONFIG_RTC_DRV_RP5C01) += rtc-rp5c01.o
obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o
diff --git a/drivers/rtc/qpnp-rtc.c b/drivers/rtc/qpnp-rtc.c
new file mode 100644
index 0000000..5650e74
--- /dev/null
+++ b/drivers/rtc/qpnp-rtc.c
@@ -0,0 +1,641 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/rtc.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <linux/of_device.h>
+#include <linux/spmi.h>
+#include <linux/spinlock.h>
+#include <linux/spmi.h>
+
+/* RTC/ALARM Register offsets */
+#define REG_OFFSET_ALARM_RW 0x40
+#define REG_OFFSET_ALARM_CTRL1 0x46
+#define REG_OFFSET_ALARM_CTRL2 0x48
+#define REG_OFFSET_RTC_WRITE 0x40
+#define REG_OFFSET_RTC_CTRL 0x46
+#define REG_OFFSET_RTC_READ 0x48
+#define REG_OFFSET_PERP_SUBTYPE 0x05
+
+/* RTC_CTRL register bit fields */
+#define BIT_RTC_ENABLE BIT(7)
+#define BIT_RTC_ALARM_ENABLE BIT(7)
+#define BIT_RTC_ABORT_ENABLE BIT(0)
+#define BIT_RTC_ALARM_CLEAR BIT(0)
+
+/* RTC/ALARM peripheral subtype values */
+#define RTC_PERPH_SUBTYPE 0x1
+#define ALARM_PERPH_SUBTYPE 0x3
+
+#define NUM_8_BIT_RTC_REGS 0x4
+
+#define TO_SECS(arr) (arr[0] | (arr[1] << 8) | (arr[2] << 16) | \
+ (arr[3] << 24))
+
+/* rtc driver internal structure */
+struct qpnp_rtc {
+ u8 rtc_ctrl_reg;
+ u8 alarm_ctrl_reg1;
+ u16 rtc_base;
+ u16 alarm_base;
+ u32 rtc_write_enable;
+ u32 rtc_alarm_powerup;
+ int rtc_alarm_irq;
+ struct device *rtc_dev;
+ struct rtc_device *rtc;
+ struct spmi_device *spmi;
+ spinlock_t alarm_ctrl_lock;
+};
+
+static int qpnp_read_wrapper(struct qpnp_rtc *rtc_dd, u8 *rtc_val,
+ u16 base, int count)
+{
+ int rc;
+ struct spmi_device *spmi = rtc_dd->spmi;
+
+ rc = spmi_ext_register_readl(spmi->ctrl, spmi->sid, base, rtc_val,
+ count);
+ if (rc) {
+ dev_err(rtc_dd->rtc_dev, "SPMI read failed\n");
+ return rc;
+ }
+ return 0;
+}
+
+static int qpnp_write_wrapper(struct qpnp_rtc *rtc_dd, u8 *rtc_val,
+ u16 base, int count)
+{
+ int rc;
+ struct spmi_device *spmi = rtc_dd->spmi;
+
+ rc = spmi_ext_register_writel(spmi->ctrl, spmi->sid, base, rtc_val,
+ count);
+ if (rc) {
+ dev_err(rtc_dd->rtc_dev, "SPMI write failed\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static int
+qpnp_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ int rc;
+ unsigned long secs, irq_flags;
+ u8 value[4], reg = 0, alarm_enabled = 0, ctrl_reg;
+ struct qpnp_rtc *rtc_dd = dev_get_drvdata(dev);
+
+ rtc_tm_to_time(tm, &secs);
+
+ value[0] = secs & 0xFF;
+ value[1] = (secs >> 8) & 0xFF;
+ value[2] = (secs >> 16) & 0xFF;
+ value[3] = (secs >> 24) & 0xFF;
+
+ dev_dbg(dev, "Seconds value to be written to RTC = %lu\n", secs);
+
+ spin_lock_irqsave(&rtc_dd->alarm_ctrl_lock, irq_flags);
+ ctrl_reg = rtc_dd->alarm_ctrl_reg1;
+
+ if (ctrl_reg & BIT_RTC_ALARM_ENABLE) {
+ alarm_enabled = 1;
+ ctrl_reg &= ~BIT_RTC_ALARM_ENABLE;
+ rc = qpnp_write_wrapper(rtc_dd, &ctrl_reg,
+ rtc_dd->alarm_base + REG_OFFSET_ALARM_CTRL1, 1);
+ if (rc) {
+ dev_err(dev, "Write to ALARM ctrl reg failed\n");
+ goto rtc_rw_fail;
+ }
+ } else
+ spin_unlock_irqrestore(&rtc_dd->alarm_ctrl_lock, irq_flags);
+
+ /*
+ * 32 bit seconds value is coverted to four 8 bit values
+ * |<------ 32 bit time value in seconds ------>|
+ * <- 8 bit ->|<- 8 bit ->|<- 8 bit ->|<- 8 bit ->|
+ * ----------------------------------------------
+ * | BYTE[3] | BYTE[2] | BYTE[1] | BYTE[0] |
+ * ----------------------------------------------
+ *
+ * RTC has four 8 bit registers for writting time in seconds:
+ * WDATA[3], WDATA[2], WDATA[1], WDATA[0]
+ *
+ * Write to the RTC registers should be done in following order
+ * Clear WDATA[0] register
+ *
+ * Write BYTE[1], BYTE[2] and BYTE[3] of time to
+ * RTC WDATA[3], WDATA[2], WDATA[1] registers
+ *
+ * Write BYTE[0] of time to RTC WDATA[0] register
+ *
+ * Clearing BYTE[0] and writting in the end will prevent any
+ * unintentional overflow from WDATA[0] to higher bytes during the
+ * write operation
+ */
+
+ /* Clear WDATA[0] */
+ reg = 0x0;
+ rc = qpnp_write_wrapper(rtc_dd, ®,
+ rtc_dd->rtc_base + REG_OFFSET_RTC_WRITE, 1);
+ if (rc) {
+ dev_err(dev, "Write to RTC reg failed\n");
+ goto rtc_rw_fail;
+ }
+
+ /* Write to WDATA[3], WDATA[2] and WDATA[1] */
+ rc = qpnp_write_wrapper(rtc_dd, &value[1],
+ rtc_dd->rtc_base + REG_OFFSET_RTC_WRITE + 1, 3);
+ if (rc) {
+ dev_err(dev, "Write to RTC reg failed\n");
+ goto rtc_rw_fail;
+ }
+
+ /* Write to WDATA[0] */
+ rc = qpnp_write_wrapper(rtc_dd, value,
+ rtc_dd->rtc_base + REG_OFFSET_RTC_WRITE, 1);
+ if (rc) {
+ dev_err(dev, "Write to RTC reg failed\n");
+ goto rtc_rw_fail;
+ }
+
+ if (alarm_enabled) {
+ ctrl_reg |= BIT_RTC_ALARM_ENABLE;
+ rc = qpnp_write_wrapper(rtc_dd, &ctrl_reg,
+ rtc_dd->alarm_base + REG_OFFSET_ALARM_CTRL1, 1);
+ if (rc) {
+ dev_err(dev, "Write to ALARM ctrl reg failed\n");
+ goto rtc_rw_fail;
+ }
+ }
+
+ rtc_dd->alarm_ctrl_reg1 = ctrl_reg;
+
+rtc_rw_fail:
+ if (alarm_enabled)
+ spin_unlock_irqrestore(&rtc_dd->alarm_ctrl_lock, irq_flags);
+
+ return rc;
+}
+
+static int
+qpnp_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ int rc;
+ u8 value[4], reg;
+ unsigned long secs;
+ struct qpnp_rtc *rtc_dd = dev_get_drvdata(dev);
+
+ rc = qpnp_read_wrapper(rtc_dd, value,
+ rtc_dd->rtc_base + REG_OFFSET_RTC_READ,
+ NUM_8_BIT_RTC_REGS);
+ if (rc) {
+ dev_err(dev, "Read from RTC reg failed\n");
+ return rc;
+ }
+
+ /*
+ * Read the LSB again and check if there has been a carry over
+ * If there is, redo the read operation
+ */
+ rc = qpnp_read_wrapper(rtc_dd, ®,
+ rtc_dd->rtc_base + REG_OFFSET_RTC_READ, 1);
+ if (rc) {
+ dev_err(dev, "Read from RTC reg failed\n");
+ return rc;
+ }
+
+ if (reg < value[0]) {
+ rc = qpnp_read_wrapper(rtc_dd, value,
+ rtc_dd->rtc_base + REG_OFFSET_RTC_READ,
+ NUM_8_BIT_RTC_REGS);
+ if (rc) {
+ dev_err(dev, "Read from RTC reg failed\n");
+ return rc;
+ }
+ }
+
+ secs = TO_SECS(value);
+
+ rtc_time_to_tm(secs, tm);
+
+ rc = rtc_valid_tm(tm);
+ if (rc) {
+ dev_err(dev, "Invalid time read from RTC\n");
+ return rc;
+ }
+
+ dev_dbg(dev, "secs = %lu, h:m:s == %d:%d:%d, d/m/y = %d/%d/%d\n",
+ secs, tm->tm_hour, tm->tm_min, tm->tm_sec,
+ tm->tm_mday, tm->tm_mon, tm->tm_year);
+
+ return 0;
+}
+
+static int
+qpnp_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ int rc;
+ u8 value[4], ctrl_reg;
+ unsigned long secs, secs_rtc, irq_flags;
+ struct qpnp_rtc *rtc_dd = dev_get_drvdata(dev);
+ struct rtc_time rtc_tm;
+
+ rtc_tm_to_time(&alarm->time, &secs);
+
+ /*
+ * Read the current RTC time and verify if the alarm time is in the
+ * past. If yes, return invalid
+ */
+ rc = qpnp_rtc_read_time(dev, &rtc_tm);
+ if (rc) {
+ dev_err(dev, "Unable to read RTC time\n");
+ return -EINVAL;
+ }
+
+ rtc_tm_to_time(&rtc_tm, &secs_rtc);
+ if (secs < secs_rtc) {
+ dev_err(dev, "Trying to set alarm in the past\n");
+ return -EINVAL;
+ }
+
+ value[0] = secs & 0xFF;
+ value[1] = (secs >> 8) & 0xFF;
+ value[2] = (secs >> 16) & 0xFF;
+ value[3] = (secs >> 24) & 0xFF;
+
+ spin_lock_irqsave(&rtc_dd->alarm_ctrl_lock, irq_flags);
+
+ rc = qpnp_write_wrapper(rtc_dd, value,
+ rtc_dd->alarm_base + REG_OFFSET_ALARM_RW,
+ NUM_8_BIT_RTC_REGS);
+ if (rc) {
+ dev_err(dev, "Write to ALARM reg failed\n");
+ goto rtc_rw_fail;
+ }
+
+ ctrl_reg = (alarm->enabled) ?
+ (rtc_dd->alarm_ctrl_reg1 | BIT_RTC_ALARM_ENABLE) :
+ (rtc_dd->alarm_ctrl_reg1 & ~BIT_RTC_ALARM_ENABLE);
+
+ rc = qpnp_write_wrapper(rtc_dd, &ctrl_reg,
+ rtc_dd->alarm_base + REG_OFFSET_ALARM_CTRL1, 1);
+ if (rc) {
+ dev_err(dev, "Write to ALARM cntrol reg failed\n");
+ goto rtc_rw_fail;
+ }
+
+ rtc_dd->alarm_ctrl_reg1 = ctrl_reg;
+
+ dev_dbg(dev, "Alarm Set for h:r:s=%d:%d:%d, d/m/y=%d/%d/%d\n",
+ alarm->time.tm_hour, alarm->time.tm_min,
+ alarm->time.tm_sec, alarm->time.tm_mday,
+ alarm->time.tm_mon, alarm->time.tm_year);
+rtc_rw_fail:
+ spin_unlock_irqrestore(&rtc_dd->alarm_ctrl_lock, irq_flags);
+ return rc;
+}
+
+static int
+qpnp_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ int rc;
+ u8 value[4];
+ unsigned long secs;
+ struct qpnp_rtc *rtc_dd = dev_get_drvdata(dev);
+
+ rc = qpnp_read_wrapper(rtc_dd, value,
+ rtc_dd->alarm_base + REG_OFFSET_ALARM_RW,
+ NUM_8_BIT_RTC_REGS);
+ if (rc) {
+ dev_err(dev, "Read from ALARM reg failed\n");
+ return rc;
+ }
+
+ secs = TO_SECS(value);
+ rtc_time_to_tm(secs, &alarm->time);
+
+ rc = rtc_valid_tm(&alarm->time);
+ if (rc) {
+ dev_err(dev, "Invalid time read from RTC\n");
+ return rc;
+ }
+
+ dev_dbg(dev, "Alarm set for - h:r:s=%d:%d:%d, d/m/y=%d/%d/%d\n",
+ alarm->time.tm_hour, alarm->time.tm_min,
+ alarm->time.tm_sec, alarm->time.tm_mday,
+ alarm->time.tm_mon, alarm->time.tm_year);
+
+ return 0;
+}
+
+
+static int
+qpnp_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ int rc;
+ unsigned long irq_flags;
+ struct qpnp_rtc *rtc_dd = dev_get_drvdata(dev);
+ u8 ctrl_reg;
+
+ spin_lock_irqsave(&rtc_dd->alarm_ctrl_lock, irq_flags);
+ ctrl_reg = rtc_dd->alarm_ctrl_reg1;
+ ctrl_reg = enabled ? (ctrl_reg | BIT_RTC_ALARM_ENABLE) :
+ (ctrl_reg & ~BIT_RTC_ALARM_ENABLE);
+
+ rc = qpnp_write_wrapper(rtc_dd, &ctrl_reg,
+ rtc_dd->alarm_base + REG_OFFSET_ALARM_CTRL1, 1);
+ if (rc) {
+ dev_err(dev, "Write to ALARM control reg failed\n");
+ goto rtc_rw_fail;
+ }
+
+ rtc_dd->alarm_ctrl_reg1 = ctrl_reg;
+
+rtc_rw_fail:
+ spin_unlock_irqrestore(&rtc_dd->alarm_ctrl_lock, irq_flags);
+ return rc;
+}
+
+static struct rtc_class_ops qpnp_rtc_ops = {
+ .read_time = qpnp_rtc_read_time,
+ .set_alarm = qpnp_rtc_set_alarm,
+ .read_alarm = qpnp_rtc_read_alarm,
+ .alarm_irq_enable = qpnp_rtc_alarm_irq_enable,
+};
+
+static irqreturn_t qpnp_alarm_trigger(int irq, void *dev_id)
+{
+ struct qpnp_rtc *rtc_dd = dev_id;
+ u8 ctrl_reg;
+ int rc;
+ unsigned long irq_flags;
+
+ rtc_update_irq(rtc_dd->rtc, 1, RTC_IRQF | RTC_AF);
+
+ spin_lock_irqsave(&rtc_dd->alarm_ctrl_lock, irq_flags);
+
+ /* Clear the alarm enable bit */
+ ctrl_reg = rtc_dd->alarm_ctrl_reg1;
+ ctrl_reg &= ~BIT_RTC_ALARM_ENABLE;
+
+ rc = qpnp_write_wrapper(rtc_dd, &ctrl_reg,
+ rtc_dd->alarm_base + REG_OFFSET_ALARM_CTRL1, 1);
+ if (rc) {
+ spin_unlock_irqrestore(&rtc_dd->alarm_ctrl_lock, irq_flags);
+ dev_err(rtc_dd->rtc_dev,
+ "Write to ALARM control reg failed\n");
+ goto rtc_alarm_handled;
+ }
+
+ rtc_dd->alarm_ctrl_reg1 = ctrl_reg;
+ spin_unlock_irqrestore(&rtc_dd->alarm_ctrl_lock, irq_flags);
+
+ /* Set ALARM_CLR bit */
+ ctrl_reg = 0x1;
+ rc = qpnp_write_wrapper(rtc_dd, &ctrl_reg,
+ rtc_dd->alarm_base + REG_OFFSET_ALARM_CTRL2, 1);
+ if (rc)
+ dev_err(rtc_dd->rtc_dev,
+ "Write to ALARM control reg failed\n");
+
+rtc_alarm_handled:
+ return IRQ_HANDLED;
+}
+
+static int __devinit qpnp_rtc_probe(struct spmi_device *spmi)
+{
+ int rc;
+ u8 subtype;
+ struct qpnp_rtc *rtc_dd;
+ struct resource *resource;
+ struct spmi_resource *spmi_resource;
+
+ rtc_dd = devm_kzalloc(&spmi->dev, sizeof(*rtc_dd), GFP_KERNEL);
+ if (rtc_dd == NULL) {
+ dev_err(&spmi->dev, "Unable to allocate memory!\n");
+ return -ENOMEM;
+ }
+
+ /* Get the rtc write property */
+ rc = of_property_read_u32(spmi->dev.of_node, "qcom,qpnp-rtc-write",
+ &rtc_dd->rtc_write_enable);
+ if (rc && rc != -EINVAL) {
+ dev_err(&spmi->dev,
+ "Error reading rtc_write_enable property %d\n", rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(spmi->dev.of_node,
+ "qcom,qpnp-rtc-alarm-pwrup",
+ &rtc_dd->rtc_alarm_powerup);
+ if (rc && rc != -EINVAL) {
+ dev_err(&spmi->dev,
+ "Error reading rtc_alarm_powerup property %d\n", rc);
+ return rc;
+ }
+
+ /* Initialise spinlock to protect RTC control register */
+ spin_lock_init(&rtc_dd->alarm_ctrl_lock);
+
+ rtc_dd->rtc_dev = &(spmi->dev);
+ rtc_dd->spmi = spmi;
+
+ /* Get RTC/ALARM resources */
+ spmi_for_each_container_dev(spmi_resource, spmi) {
+ if (!spmi_resource) {
+ dev_err(&spmi->dev,
+ "%s: rtc_alarm: spmi resource absent!\n",
+ __func__);
+ rc = -ENXIO;
+ goto fail_rtc_enable;
+ }
+
+ resource = spmi_get_resource(spmi, spmi_resource,
+ IORESOURCE_MEM, 0);
+ if (!(resource && resource->start)) {
+ dev_err(&spmi->dev,
+ "%s: node %s IO resource absent!\n",
+ __func__, spmi->dev.of_node->full_name);
+ rc = -ENXIO;
+ goto fail_rtc_enable;
+ }
+
+ rc = qpnp_read_wrapper(rtc_dd, &subtype,
+ resource->start + REG_OFFSET_PERP_SUBTYPE, 1);
+ if (rc) {
+ dev_err(&spmi->dev,
+ "Peripheral subtype read failed\n");
+ goto fail_rtc_enable;
+ }
+
+ switch (subtype) {
+ case RTC_PERPH_SUBTYPE:
+ rtc_dd->rtc_base = resource->start;
+ break;
+ case ALARM_PERPH_SUBTYPE:
+ rtc_dd->alarm_base = resource->start;
+ rtc_dd->rtc_alarm_irq =
+ spmi_get_irq(spmi, spmi_resource, 0);
+ if (rtc_dd->rtc_alarm_irq < 0) {
+ dev_err(&spmi->dev, "ALARM IRQ absent\n");
+ rc = -ENXIO;
+ goto fail_rtc_enable;
+ }
+ break;
+ default:
+ dev_err(&spmi->dev, "Invalid peripheral subtype\n");
+ rc = -EINVAL;
+ goto fail_rtc_enable;
+ }
+ }
+
+ rtc_dd->rtc_ctrl_reg = BIT_RTC_ENABLE;
+ rc = qpnp_write_wrapper(rtc_dd, &rtc_dd->rtc_ctrl_reg,
+ rtc_dd->rtc_base + REG_OFFSET_RTC_CTRL, 1);
+ if (rc) {
+ dev_err(&spmi->dev,
+ "Write to RTC control reg failed\n");
+ goto fail_rtc_enable;
+ }
+
+ /* Enable abort enable feature */
+ rtc_dd->alarm_ctrl_reg1 = BIT_RTC_ABORT_ENABLE;
+ rc = qpnp_write_wrapper(rtc_dd, &rtc_dd->alarm_ctrl_reg1,
+ rtc_dd->alarm_base + REG_OFFSET_ALARM_CTRL1, 1);
+ if (rc) {
+ dev_err(&spmi->dev, "SPMI write failed!\n");
+ goto fail_rtc_enable;
+ }
+
+ if (rtc_dd->rtc_write_enable == true)
+ qpnp_rtc_ops.set_time = qpnp_rtc_set_time;
+
+ dev_set_drvdata(&spmi->dev, rtc_dd);
+
+ /* Register the RTC device */
+ rtc_dd->rtc = rtc_device_register("qpnp_rtc", &spmi->dev,
+ &qpnp_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc_dd->rtc)) {
+ dev_err(&spmi->dev, "%s: RTC registration failed (%ld)\n",
+ __func__, PTR_ERR(rtc_dd->rtc));
+ rc = PTR_ERR(rtc_dd->rtc);
+ goto fail_rtc_enable;
+ }
+
+ /* Request the alarm IRQ */
+ rc = request_any_context_irq(rtc_dd->rtc_alarm_irq,
+ qpnp_alarm_trigger, IRQF_TRIGGER_RISING,
+ "qpnp_rtc_alarm", rtc_dd);
+ if (rc) {
+ dev_err(&spmi->dev, "Request IRQ failed (%d)\n", rc);
+ goto fail_req_irq;
+ }
+
+ device_init_wakeup(&spmi->dev, 1);
+ enable_irq_wake(rtc_dd->rtc_alarm_irq);
+
+ dev_dbg(&spmi->dev, "Probe success !!\n");
+
+ return 0;
+
+fail_req_irq:
+ rtc_device_unregister(rtc_dd->rtc);
+fail_rtc_enable:
+ dev_set_drvdata(&spmi->dev, NULL);
+
+ return rc;
+}
+
+static int __devexit qpnp_rtc_remove(struct spmi_device *spmi)
+{
+ struct qpnp_rtc *rtc_dd = dev_get_drvdata(&spmi->dev);
+
+ device_init_wakeup(&spmi->dev, 0);
+ free_irq(rtc_dd->rtc_alarm_irq, rtc_dd);
+ rtc_device_unregister(rtc_dd->rtc);
+ dev_set_drvdata(&spmi->dev, NULL);
+
+ return 0;
+}
+
+static void qpnp_rtc_shutdown(struct spmi_device *spmi)
+{
+ u8 value[4] = {0};
+ u8 reg;
+ int rc;
+ unsigned long irq_flags;
+ struct qpnp_rtc *rtc_dd = dev_get_drvdata(&spmi->dev);
+ bool rtc_alarm_powerup = rtc_dd->rtc_alarm_powerup;
+
+ if (!rtc_alarm_powerup) {
+ spin_lock_irqsave(&rtc_dd->alarm_ctrl_lock, irq_flags);
+ dev_dbg(&spmi->dev, "Disabling alarm interrupts\n");
+
+ /* Disable RTC alarms */
+ reg = rtc_dd->alarm_ctrl_reg1;
+ reg &= ~BIT_RTC_ALARM_ENABLE;
+ rc = qpnp_write_wrapper(rtc_dd, ®,
+ rtc_dd->alarm_base + REG_OFFSET_ALARM_CTRL1, 1);
+ if (rc) {
+ dev_err(rtc_dd->rtc_dev, "SPMI write failed\n");
+ goto fail_alarm_disable;
+ }
+
+ /* Clear Alarm register */
+ rc = qpnp_write_wrapper(rtc_dd, value,
+ rtc_dd->alarm_base + REG_OFFSET_ALARM_RW,
+ NUM_8_BIT_RTC_REGS);
+ if (rc)
+ dev_err(rtc_dd->rtc_dev, "SPMI write failed\n");
+
+fail_alarm_disable:
+ spin_unlock_irqrestore(&rtc_dd->alarm_ctrl_lock, irq_flags);
+ }
+}
+
+static struct of_device_id spmi_match_table[] = {
+ {
+ .compatible = "qcom,qpnp-rtc",
+ },
+ {}
+};
+
+static struct spmi_driver qpnp_rtc_driver = {
+ .probe = qpnp_rtc_probe,
+ .remove = __devexit_p(qpnp_rtc_remove),
+ .shutdown = qpnp_rtc_shutdown,
+ .driver = {
+ .name = "qcom,qpnp-rtc",
+ .owner = THIS_MODULE,
+ .of_match_table = spmi_match_table,
+ },
+};
+
+static int __init qpnp_rtc_init(void)
+{
+ return spmi_driver_register(&qpnp_rtc_driver);
+}
+module_init(qpnp_rtc_init);
+
+static void __exit qpnp_rtc_exit(void)
+{
+ spmi_driver_unregister(&qpnp_rtc_driver);
+}
+module_exit(qpnp_rtc_exit);
+
+MODULE_DESCRIPTION("SMPI PMIC RTC driver");
+MODULE_LICENSE("GPL V2");
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index d99a02a..b2d2f74 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -52,6 +52,16 @@
Also able to set threshold temperature for both hot and cold and update
when a threshold is reached.
+config THERMAL_TSENS8974
+ tristate "Qualcomm 8974 TSENS Temperature driver"
+ depends on THERMAL
+ help
+ This enables the thermal sysfs driver for the TSENS device. It shows
+ up in Sysfs as a thermal zone with multiple trip points. Also able
+ to set threshold temperature for both warm and cool and update
+ thermal userspace client when a threshold is reached. Warm/Cool
+ temperature thresholds can be set independently for each sensor.
+
config THERMAL_PM8XXX
tristate "Qualcomm PMIC PM8xxx Temperature Alarm"
depends on THERMAL
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 275a692..f7e7cc6 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -8,4 +8,5 @@
obj-$(CONFIG_THERMAL_TSENS8960) += msm8960_tsens.o
obj-$(CONFIG_THERMAL_PM8XXX) += pm8xxx-tm.o
obj-$(CONFIG_THERMAL_MONITOR) += msm_thermal.o
-obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o
\ No newline at end of file
+obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o
+obj-$(CONFIG_THERMAL_TSENS8974) += msm8974-tsens.o
diff --git a/drivers/thermal/msm8974-tsens.c b/drivers/thermal/msm8974-tsens.c
new file mode 100644
index 0000000..6628b79
--- /dev/null
+++ b/drivers/thermal/msm8974-tsens.c
@@ -0,0 +1,870 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/thermal.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/msm_tsens.h>
+#include <linux/err.h>
+#include <linux/of.h>
+
+#include <mach/msm_iomap.h>
+
+#define TSENS_DRIVER_NAME "msm-tsens"
+/* TSENS register info */
+#define TSENS_UPPER_LOWER_INTERRUPT_CTRL(n) ((n) + 0x1000)
+#define TSENS_INTERRUPT_EN BIT(0)
+
+#define TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR(n) ((n) + 0x1004)
+#define TSENS_UPPER_STATUS_CLR BIT(21)
+#define TSENS_LOWER_STATUS_CLR BIT(20)
+#define TSENS_UPPER_THRESHOLD_MASK 0xffc00
+#define TSENS_LOWER_THRESHOLD_MASK 0x3ff
+#define TSENS_UPPER_THRESHOLD_SHIFT 10
+
+#define TSENS_S0_STATUS_ADDR(n) ((n) + 0x1030)
+#define TSENS_SN_ADDR_OFFSET 0x4
+#define TSENS_SN_STATUS_TEMP_MASK 0x3ff
+#define TSENS_SN_STATUS_LOWER_STATUS BIT(11)
+#define TSENS_SN_STATUS_UPPER_STATUS BIT(12)
+#define TSENS_STATUS_ADDR_OFFSET 2
+
+#define TSENS_TRDY_ADDR(n) ((n) + 0x105c)
+#define TSENS_TRDY_MASK BIT(0)
+
+#define TSENS_CTRL_ADDR(n) (n)
+#define TSENS_SW_RST BIT(1)
+#define TSENS_SN_MIN_MAX_STATUS_CTRL(n) ((n) + 4)
+#define TSENS_GLOBAL_CONFIG(n) ((n) + 0x34)
+#define TSENS_S0_MAIN_CONFIG(n) ((n) + 0x38)
+#define TSENS_SN_REMOTE_CONFIG(n) ((n) + 0x3c)
+
+/* TSENS calibration Mask data */
+#define TSENS_BASE1_MASK 0xff
+#define TSENS0_POINT1_MASK 0x3f00
+#define TSENS1_POINT1_MASK 0xfc000
+#define TSENS2_POINT1_MASK 0x3f00000
+#define TSENS3_POINT1_MASK 0xfc000000
+#define TSENS4_POINT1_MASK 0x3f
+#define TSENS5_POINT1_MASK 0xfc00
+#define TSENS6_POINT1_MASK 0x3f000
+#define TSENS7_POINT1_MASK 0xfc0000
+#define TSENS8_POINT1_MASK 0x3f000000
+#define TSENS9_POINT1_MASK 0x3f
+#define TSENS10_POINT1_MASK 0xfc00
+#define TSENS_CAL_SEL_0_1 0xc0000000
+#define TSENS_CAL_SEL_2 BIT(30)
+#define TSENS_CAL_SEL_SHIFT 30
+#define TSENS_CAL_SEL_SHIFT_2 28
+#define TSENS_ONE_POINT_CALIB 0x3
+#define TSENS_TWO_POINT_CALIB 0x2
+
+#define TSENS_BASE2_MASK 0xff000
+#define TSENS0_POINT2_MASK 0x3f00000
+#define TSENS1_POINT2_MASK 0xfc000000
+#define TSENS2_POINT2_MASK 0x3f
+#define TSENS3_POINT2_MASK 0xfc00
+#define TSENS4_POINT2_MASK 0x3f000
+#define TSENS5_POINT2_MASK 0xfc0000
+#define TSENS6_POINT2_MASK 0x3f000000
+#define TSENS7_POINT2_MASK 0x3f
+#define TSENS8_POINT2_MASK 0xfc00
+#define TSENS9_POINT2_MASK 0x3f000
+#define TSENS10_POINT2_MASK 0xfc0000
+
+#define TSENS_BIT_APPEND 0x3
+#define TSENS_CAL_DEGC_POINT1 30
+#define TSENS_CAL_DEGC_POINT2 120
+#define TSENS_SLOPE_FACTOR 1000
+
+/* TSENS register data */
+#define TSENS_TRDY_RDY_MIN_TIME 2000
+#define TSENS_TRDY_RDY_MAX_TIME 2100
+#define TSENS_THRESHOLD_MAX_CODE 0x3ff
+#define TSENS_THRESHOLD_MIN_CODE 0x0
+
+#define TSENS_CTRL_INIT_DATA1 0x3fffff9
+#define TSENS_GLOBAL_INIT_DATA 0x20013
+#define TSENS_S0_MAIN_CFG_INIT_DATA 0x1ba
+#define TSENS_SN_MIN_MAX_STATUS_CTRL_DATA 0x3ffc00
+#define TSENS_SN_REMOTE_CFG_DATA 0xdba
+
+/* Trips: warm and cool */
+enum tsens_trip_type {
+ TSENS_TRIP_WARM = 0,
+ TSENS_TRIP_COOL,
+ TSENS_TRIP_NUM,
+};
+
+struct tsens_tm_device_sensor {
+ struct thermal_zone_device *tz_dev;
+ enum thermal_device_mode mode;
+ unsigned int sensor_num;
+ struct work_struct work;
+ int offset;
+ int calib_data_point1;
+ int calib_data_point2;
+ uint32_t slope_mul_tsens_factor;
+};
+
+struct tsens_tm_device {
+ struct platform_device *pdev;
+ bool prev_reading_avail;
+ int tsens_factor;
+ uint32_t tsens_num_sensor;
+ int tsens_irq;
+ void *tsens_addr;
+ void *tsens_calib_addr;
+ int tsens_len;
+ int calib_len;
+ struct resource *res_tsens_mem;
+ struct resource *res_calib_mem;
+ struct tsens_tm_device_sensor sensor[0];
+};
+
+struct tsens_tm_device *tmdev;
+
+static int tsens_tz_code_to_degc(int adc_code, int sensor_num)
+{
+ int degcbeforefactor, degc;
+ degcbeforefactor = (adc_code *
+ tmdev->sensor[sensor_num].slope_mul_tsens_factor
+ + tmdev->sensor[sensor_num].offset);
+
+ if (degcbeforefactor == 0)
+ degc = degcbeforefactor;
+ else if (degcbeforefactor > 0)
+ degc = (degcbeforefactor + tmdev->tsens_factor/2)
+ / tmdev->tsens_factor;
+ else
+ degc = (degcbeforefactor - tmdev->tsens_factor/2)
+ / tmdev->tsens_factor;
+ return degc;
+}
+
+static int tsens_tz_degc_to_code(int degc, int sensor_num)
+{
+ int code = (degc * tmdev->tsens_factor -
+ tmdev->sensor[sensor_num].offset
+ + tmdev->sensor[sensor_num].slope_mul_tsens_factor/2)
+ / tmdev->sensor[sensor_num].slope_mul_tsens_factor;
+
+ if (code > TSENS_THRESHOLD_MAX_CODE)
+ code = TSENS_THRESHOLD_MAX_CODE;
+ else if (code < TSENS_THRESHOLD_MIN_CODE)
+ code = TSENS_THRESHOLD_MIN_CODE;
+ return code;
+}
+
+static void msm_tsens_get_temp(int sensor_num, unsigned long *temp)
+{
+ unsigned int code, sensor_addr;
+
+ if (!tmdev->prev_reading_avail) {
+ while (!(readl_relaxed(TSENS_TRDY_ADDR(tmdev->tsens_addr))
+ & TSENS_TRDY_MASK))
+ usleep_range(TSENS_TRDY_RDY_MIN_TIME,
+ TSENS_TRDY_RDY_MAX_TIME);
+ tmdev->prev_reading_avail = true;
+ }
+
+ sensor_addr =
+ (unsigned int)TSENS_S0_STATUS_ADDR(tmdev->tsens_addr);
+ code = readl_relaxed(sensor_addr +
+ (sensor_num << TSENS_STATUS_ADDR_OFFSET));
+ *temp = tsens_tz_code_to_degc((code & TSENS_SN_STATUS_TEMP_MASK),
+ sensor_num);
+}
+
+static int tsens_tz_get_temp(struct thermal_zone_device *thermal,
+ unsigned long *temp)
+{
+ struct tsens_tm_device_sensor *tm_sensor = thermal->devdata;
+
+ if (!tm_sensor || tm_sensor->mode != THERMAL_DEVICE_ENABLED || !temp)
+ return -EINVAL;
+
+ msm_tsens_get_temp(tm_sensor->sensor_num, temp);
+
+ return 0;
+}
+
+int tsens_get_temp(struct tsens_device *device, unsigned long *temp)
+{
+ if (!tmdev)
+ return -ENODEV;
+
+ msm_tsens_get_temp(device->sensor_num, temp);
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_tsens_get_temp);
+
+static int tsens_tz_get_mode(struct thermal_zone_device *thermal,
+ enum thermal_device_mode *mode)
+{
+ struct tsens_tm_device_sensor *tm_sensor = thermal->devdata;
+
+ if (!tm_sensor || !mode)
+ return -EINVAL;
+
+ *mode = tm_sensor->mode;
+
+ return 0;
+}
+
+static int tsens_tz_get_trip_type(struct thermal_zone_device *thermal,
+ int trip, enum thermal_trip_type *type)
+{
+ struct tsens_tm_device_sensor *tm_sensor = thermal->devdata;
+
+ if (!tm_sensor || trip < 0 || !type)
+ return -EINVAL;
+
+ switch (trip) {
+ case TSENS_TRIP_WARM:
+ *type = THERMAL_TRIP_CONFIGURABLE_HI;
+ break;
+ case TSENS_TRIP_COOL:
+ *type = THERMAL_TRIP_CONFIGURABLE_LOW;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tsens_tz_activate_trip_type(struct thermal_zone_device *thermal,
+ int trip, enum thermal_trip_activation_mode mode)
+{
+ struct tsens_tm_device_sensor *tm_sensor = thermal->devdata;
+ unsigned int reg_cntl, code, hi_code, lo_code, mask;
+
+ if (!tm_sensor || trip < 0)
+ return -EINVAL;
+
+ lo_code = TSENS_THRESHOLD_MIN_CODE;
+ hi_code = TSENS_THRESHOLD_MAX_CODE;
+
+ reg_cntl = readl_relaxed((TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR
+ (tmdev->tsens_addr) +
+ (tm_sensor->sensor_num * 4)));
+ switch (trip) {
+ case TSENS_TRIP_WARM:
+ code = (reg_cntl & TSENS_UPPER_THRESHOLD_MASK)
+ >> TSENS_UPPER_THRESHOLD_SHIFT;
+ mask = TSENS_UPPER_STATUS_CLR;
+
+ if (!(reg_cntl & TSENS_LOWER_STATUS_CLR))
+ lo_code = (reg_cntl & TSENS_LOWER_THRESHOLD_MASK);
+ break;
+ case TSENS_TRIP_COOL:
+ code = (reg_cntl & TSENS_LOWER_THRESHOLD_MASK);
+ mask = TSENS_LOWER_STATUS_CLR;
+
+ if (!(reg_cntl & TSENS_UPPER_STATUS_CLR))
+ hi_code = (reg_cntl & TSENS_UPPER_THRESHOLD_MASK)
+ >> TSENS_UPPER_THRESHOLD_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (mode == THERMAL_TRIP_ACTIVATION_DISABLED)
+ writel_relaxed(reg_cntl | mask,
+ (TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR
+ (tmdev->tsens_addr) +
+ (tm_sensor->sensor_num * 4)));
+ else {
+ if (code < lo_code || code > hi_code) {
+ pr_err("%s with invalid code %x\n", __func__, code);
+ return -EINVAL;
+ }
+ writel_relaxed(reg_cntl & ~mask,
+ (TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR(tmdev->tsens_addr) +
+ (tm_sensor->sensor_num * 4)));
+ }
+ mb();
+ return 0;
+}
+
+static int tsens_tz_get_trip_temp(struct thermal_zone_device *thermal,
+ int trip, unsigned long *temp)
+{
+ struct tsens_tm_device_sensor *tm_sensor = thermal->devdata;
+ unsigned int reg;
+
+ if (!tm_sensor || trip < 0 || !temp)
+ return -EINVAL;
+
+ reg = readl_relaxed(TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR
+ (tmdev->tsens_addr) +
+ (tm_sensor->sensor_num * TSENS_SN_ADDR_OFFSET));
+ switch (trip) {
+ case TSENS_TRIP_WARM:
+ reg = (reg & TSENS_UPPER_THRESHOLD_MASK) >>
+ TSENS_UPPER_THRESHOLD_SHIFT;
+ break;
+ case TSENS_TRIP_COOL:
+ reg = (reg & TSENS_LOWER_THRESHOLD_MASK);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *temp = tsens_tz_code_to_degc(reg, tm_sensor->sensor_num);
+
+ return 0;
+}
+
+static int tsens_tz_notify(struct thermal_zone_device *thermal,
+ int count, enum thermal_trip_type type)
+{
+ /* TSENS driver does not shutdown the device.
+ All Thermal notification are sent to the
+ thermal daemon to take appropriate action */
+ pr_debug("%s debug\n", __func__);
+ return 1;
+}
+
+static int tsens_tz_set_trip_temp(struct thermal_zone_device *thermal,
+ int trip, long temp)
+{
+ struct tsens_tm_device_sensor *tm_sensor = thermal->devdata;
+ unsigned int reg_cntl;
+ int code, hi_code, lo_code, code_err_chk;
+
+ code_err_chk = code = tsens_tz_degc_to_code(temp,
+ tm_sensor->sensor_num);
+ if (!tm_sensor || trip < 0)
+ return -EINVAL;
+
+ lo_code = TSENS_THRESHOLD_MIN_CODE;
+ hi_code = TSENS_THRESHOLD_MAX_CODE;
+
+ reg_cntl = readl_relaxed(TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR
+ (tmdev->tsens_addr) +
+ (tm_sensor->sensor_num * TSENS_SN_ADDR_OFFSET));
+ switch (trip) {
+ case TSENS_TRIP_WARM:
+ code <<= TSENS_UPPER_THRESHOLD_SHIFT;
+ reg_cntl &= ~TSENS_UPPER_THRESHOLD_MASK;
+ if (!(reg_cntl & TSENS_LOWER_STATUS_CLR))
+ lo_code = (reg_cntl & TSENS_LOWER_THRESHOLD_MASK);
+ break;
+ case TSENS_TRIP_COOL:
+ reg_cntl &= ~TSENS_LOWER_THRESHOLD_MASK;
+ if (!(reg_cntl & TSENS_UPPER_STATUS_CLR))
+ hi_code = (reg_cntl & TSENS_UPPER_THRESHOLD_MASK)
+ >> TSENS_UPPER_THRESHOLD_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (code_err_chk < lo_code || code_err_chk > hi_code)
+ return -EINVAL;
+
+ writel_relaxed(reg_cntl | code, (TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR
+ (tmdev->tsens_addr) +
+ (tm_sensor->sensor_num *
+ TSENS_SN_ADDR_OFFSET)));
+ mb();
+ return 0;
+}
+
+static struct thermal_zone_device_ops tsens_thermal_zone_ops = {
+ .get_temp = tsens_tz_get_temp,
+ .get_mode = tsens_tz_get_mode,
+ .get_trip_type = tsens_tz_get_trip_type,
+ .activate_trip_type = tsens_tz_activate_trip_type,
+ .get_trip_temp = tsens_tz_get_trip_temp,
+ .set_trip_temp = tsens_tz_set_trip_temp,
+ .notify = tsens_tz_notify,
+};
+
+static void notify_uspace_tsens_fn(struct work_struct *work)
+{
+ struct tsens_tm_device_sensor *tm = container_of(work,
+ struct tsens_tm_device_sensor, work);
+
+ sysfs_notify(&tm->tz_dev->device.kobj,
+ NULL, "type");
+}
+
+static irqreturn_t tsens_isr(int irq, void *data)
+{
+ struct tsens_tm_device *tm = data;
+ unsigned int i, status, threshold;
+ unsigned int sensor_status_addr, sensor_status_ctrl_addr;
+
+ sensor_status_addr =
+ (unsigned int)TSENS_S0_STATUS_ADDR(tmdev->tsens_addr);
+ sensor_status_ctrl_addr =
+ (unsigned int)TSENS_S0_UPPER_LOWER_STATUS_CTRL_ADDR
+ (tmdev->tsens_addr);
+ for (i = 0; i < tmdev->tsens_num_sensor; i++) {
+ bool upper_thr = false, lower_thr = false;
+ status = readl_relaxed(sensor_status_addr);
+ threshold = readl_relaxed(sensor_status_ctrl_addr);
+ if (status & TSENS_SN_STATUS_UPPER_STATUS) {
+ writel_relaxed(threshold | TSENS_UPPER_STATUS_CLR,
+ sensor_status_ctrl_addr);
+ upper_thr = true;
+ }
+ if (status & TSENS_SN_STATUS_LOWER_STATUS) {
+ writel_relaxed(threshold | TSENS_LOWER_STATUS_CLR,
+ sensor_status_ctrl_addr);
+ lower_thr = true;
+ }
+ if (upper_thr || lower_thr) {
+ /* Notify user space */
+ schedule_work(&tm->sensor[i].work);
+ pr_debug("sensor:%d trigger temp (%d degC)\n", i,
+ tsens_tz_code_to_degc((status &
+ TSENS_SN_STATUS_TEMP_MASK), i));
+ }
+ sensor_status_addr += TSENS_SN_ADDR_OFFSET;
+ sensor_status_ctrl_addr += TSENS_SN_ADDR_OFFSET;
+ }
+ mb();
+ return IRQ_HANDLED;
+}
+
+static void tsens_hw_init(void)
+{
+ unsigned int reg_cntl = 0;
+ unsigned int i;
+
+ reg_cntl = readl_relaxed(TSENS_CTRL_ADDR(tmdev->tsens_addr));
+ writel_relaxed(reg_cntl | TSENS_SW_RST,
+ TSENS_CTRL_ADDR(tmdev->tsens_addr));
+ writel_relaxed(TSENS_CTRL_INIT_DATA1,
+ TSENS_CTRL_ADDR(tmdev->tsens_addr));
+ writel_relaxed(TSENS_GLOBAL_INIT_DATA,
+ TSENS_GLOBAL_CONFIG(tmdev->tsens_addr));
+ writel_relaxed(TSENS_S0_MAIN_CFG_INIT_DATA,
+ TSENS_S0_MAIN_CONFIG(tmdev->tsens_addr));
+ for (i = 0; i < tmdev->tsens_num_sensor; i++) {
+ writel_relaxed(TSENS_SN_MIN_MAX_STATUS_CTRL_DATA,
+ TSENS_SN_MIN_MAX_STATUS_CTRL(tmdev->tsens_addr)
+ + (i * TSENS_SN_ADDR_OFFSET));
+ writel_relaxed(TSENS_SN_REMOTE_CFG_DATA,
+ TSENS_SN_REMOTE_CONFIG(tmdev->tsens_addr)
+ + (i * TSENS_SN_ADDR_OFFSET));
+ }
+ writel_relaxed(TSENS_INTERRUPT_EN,
+ TSENS_UPPER_LOWER_INTERRUPT_CTRL(tmdev->tsens_addr));
+}
+
+static int tsens_calib_sensors(void)
+{
+ int i, tsens_base1_data, tsens0_point1, tsens1_point1;
+ int tsens2_point1, tsens3_point1, tsens4_point1, tsens5_point1;
+ int tsens6_point1, tsens7_point1, tsens8_point1, tsens9_point1;
+ int tsens10_point1, tsens0_point2, tsens1_point2, tsens2_point2;
+ int tsens3_point2, tsens4_point2, tsens5_point2, tsens6_point2;
+ int tsens7_point2, tsens8_point2, tsens9_point2, tsens10_point2;
+ int tsens_base2_data, tsens_calibration_mode, temp;
+ uint32_t calib_data[5];
+
+ for (i = 0; i < 5; i++)
+ calib_data[i] = readl_relaxed(tmdev->tsens_calib_addr
+ + (i * TSENS_SN_ADDR_OFFSET));
+
+ tsens_calibration_mode = (calib_data[1] & TSENS_CAL_SEL_0_1
+ >> TSENS_CAL_SEL_SHIFT);
+ temp = (calib_data[3] & TSENS_CAL_SEL_2
+ >> TSENS_CAL_SEL_SHIFT_2);
+ tsens_calibration_mode |= temp;
+ /* Remove this after bringup */
+ tsens_calibration_mode = TSENS_ONE_POINT_CALIB;
+
+ if (!tsens_calibration_mode) {
+ pr_err("TSENS not calibrated\n");
+ return -ENODEV;
+ } else if (tsens_calibration_mode == TSENS_ONE_POINT_CALIB ||
+ TSENS_TWO_POINT_CALIB) {
+ tsens_base1_data = calib_data[0] & TSENS_BASE1_MASK;
+ tsens0_point1 = calib_data[0] & TSENS0_POINT1_MASK;
+ tsens1_point1 = calib_data[0] & TSENS1_POINT1_MASK;
+ tsens2_point1 = calib_data[0] & TSENS2_POINT1_MASK;
+ tsens3_point1 = calib_data[0] & TSENS3_POINT1_MASK;
+ tsens4_point1 = calib_data[1] & TSENS4_POINT1_MASK;
+ tsens5_point1 = calib_data[1] & TSENS5_POINT1_MASK;
+ tsens6_point1 = calib_data[1] & TSENS6_POINT1_MASK;
+ tsens7_point1 = calib_data[1] & TSENS7_POINT1_MASK;
+ tsens8_point1 = calib_data[1] & TSENS8_POINT1_MASK;
+ tsens9_point1 = calib_data[2] & TSENS9_POINT1_MASK;
+ tsens10_point1 = calib_data[2] & TSENS10_POINT1_MASK;
+ } else if (tsens_calibration_mode == TSENS_TWO_POINT_CALIB) {
+ tsens_base2_data = calib_data[2] & TSENS_BASE2_MASK;
+ tsens0_point2 = calib_data[2] & TSENS0_POINT2_MASK;
+ tsens1_point2 = calib_data[2] & TSENS1_POINT2_MASK;
+ tsens2_point2 = calib_data[3] & TSENS2_POINT2_MASK;
+ tsens3_point2 = calib_data[3] & TSENS3_POINT2_MASK;
+ tsens4_point2 = calib_data[3] & TSENS4_POINT2_MASK;
+ tsens5_point2 = calib_data[3] & TSENS5_POINT2_MASK;
+ tsens6_point2 = calib_data[3] & TSENS6_POINT2_MASK;
+ tsens7_point2 = calib_data[4] & TSENS7_POINT2_MASK;
+ tsens8_point2 = calib_data[4] & TSENS8_POINT2_MASK;
+ tsens9_point2 = calib_data[4] & TSENS9_POINT2_MASK;
+ tsens10_point2 = calib_data[4] & TSENS10_POINT2_MASK;
+ } else
+ pr_debug("Calibration mode is unknown: %d\n",
+ tsens_calibration_mode);
+
+ tmdev->sensor[0].calib_data_point1 =
+ (((tsens_base1_data + tsens0_point1) << 2) | TSENS_BIT_APPEND);
+ tmdev->sensor[0].calib_data_point2 =
+ (((tsens_base2_data + tsens0_point2) << 2) | TSENS_BIT_APPEND);
+ tmdev->sensor[1].calib_data_point1 =
+ (((tsens_base1_data + tsens1_point1) << 2) | TSENS_BIT_APPEND);
+ tmdev->sensor[1].calib_data_point2 =
+ (((tsens_base2_data + tsens1_point2) << 2) | TSENS_BIT_APPEND);
+ tmdev->sensor[2].calib_data_point1 =
+ (((tsens_base1_data + tsens2_point1) << 2) | TSENS_BIT_APPEND);
+ tmdev->sensor[2].calib_data_point2 =
+ (((tsens_base2_data + tsens2_point2) << 2) | TSENS_BIT_APPEND);
+ tmdev->sensor[3].calib_data_point1 =
+ (((tsens_base1_data + tsens3_point1) << 2) | TSENS_BIT_APPEND);
+ tmdev->sensor[3].calib_data_point2 =
+ (((tsens_base2_data + tsens3_point2) << 2) | TSENS_BIT_APPEND);
+ tmdev->sensor[4].calib_data_point1 =
+ (((tsens_base1_data + tsens4_point1) << 2) | TSENS_BIT_APPEND);
+ tmdev->sensor[4].calib_data_point2 =
+ (((tsens_base2_data + tsens4_point2) << 2) | TSENS_BIT_APPEND);
+ tmdev->sensor[5].calib_data_point1 =
+ (((tsens_base1_data + tsens5_point1) << 2) | TSENS_BIT_APPEND);
+ tmdev->sensor[5].calib_data_point2 =
+ (((tsens_base2_data + tsens5_point2) << 2) | TSENS_BIT_APPEND);
+ tmdev->sensor[6].calib_data_point1 =
+ (((tsens_base1_data + tsens6_point1) << 2) | TSENS_BIT_APPEND);
+ tmdev->sensor[6].calib_data_point2 =
+ (((tsens_base2_data + tsens6_point2) << 2) | TSENS_BIT_APPEND);
+ tmdev->sensor[7].calib_data_point1 =
+ (((tsens_base1_data + tsens7_point1) << 2) | TSENS_BIT_APPEND);
+ tmdev->sensor[7].calib_data_point2 =
+ (((tsens_base2_data + tsens7_point2) << 2) | TSENS_BIT_APPEND);
+ tmdev->sensor[8].calib_data_point1 =
+ (((tsens_base1_data + tsens8_point1) << 2) | TSENS_BIT_APPEND);
+ tmdev->sensor[8].calib_data_point2 =
+ (((tsens_base2_data + tsens8_point2) << 2) | TSENS_BIT_APPEND);
+ tmdev->sensor[9].calib_data_point1 =
+ (((tsens_base1_data + tsens9_point1) << 2) | TSENS_BIT_APPEND);
+ tmdev->sensor[9].calib_data_point2 =
+ (((tsens_base2_data + tsens9_point2) < 2) | TSENS_BIT_APPEND);
+ tmdev->sensor[10].calib_data_point1 =
+ (((tsens_base1_data + tsens10_point1) << 2) | TSENS_BIT_APPEND);
+ tmdev->sensor[10].calib_data_point2 =
+ (((tsens_base2_data + tsens10_point2) << 2) | TSENS_BIT_APPEND);
+
+ for (i = 0; i < tmdev->tsens_num_sensor; i++) {
+ int32_t num = 0, den = 0;
+ num = TSENS_CAL_DEGC_POINT2 - TSENS_CAL_DEGC_POINT2;
+ den = tmdev->sensor[i].calib_data_point2 -
+ tmdev->sensor[i].calib_data_point1;
+ num *= tmdev->tsens_factor;
+ if (tsens_calibration_mode == TSENS_TWO_POINT_CALIB)
+ tmdev->sensor[i].slope_mul_tsens_factor = num/den;
+ tmdev->sensor[i].offset = (TSENS_CAL_DEGC_POINT1 *
+ tmdev->tsens_factor)
+ - (tmdev->sensor[i].calib_data_point1 *
+ tmdev->sensor[i].slope_mul_tsens_factor);
+ INIT_WORK(&tmdev->sensor[i].work, notify_uspace_tsens_fn);
+ tmdev->prev_reading_avail = false;
+ }
+
+ return 0;
+}
+
+static int get_device_tree_data(struct platform_device *pdev)
+{
+ const struct device_node *of_node = pdev->dev.of_node;
+ struct resource *res_mem = NULL;
+ u32 *tsens_slope_data;
+ u32 rc = 0, i, tsens_num_sensors;
+
+ rc = of_property_read_u32(of_node,
+ "qcom,sensors", &tsens_num_sensors);
+ if (rc) {
+ dev_err(&pdev->dev, "missing sensor number\n");
+ return -ENODEV;
+ }
+
+ tsens_slope_data = devm_kzalloc(&pdev->dev,
+ tsens_num_sensors, GFP_KERNEL);
+ if (!tsens_slope_data) {
+ dev_err(&pdev->dev, "can not allocate slope data\n");
+ return -ENOMEM;
+ }
+
+ rc = of_property_read_u32_array(of_node,
+ "qcom,slope", tsens_slope_data, tsens_num_sensors/sizeof(u32));
+ if (rc) {
+ dev_err(&pdev->dev, "invalid or missing property: tsens-slope\n");
+ return rc;
+ };
+
+ tmdev = devm_kzalloc(&pdev->dev,
+ sizeof(struct tsens_tm_device) +
+ tsens_num_sensors *
+ sizeof(struct tsens_tm_device_sensor),
+ GFP_KERNEL);
+ if (tmdev == NULL) {
+ pr_err("%s: kzalloc() failed.\n", __func__);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < tsens_num_sensors; i++)
+ tmdev->sensor[i].slope_mul_tsens_factor = tsens_slope_data[i];
+ tmdev->tsens_factor = TSENS_SLOPE_FACTOR;
+ tmdev->tsens_num_sensor = tsens_num_sensors;
+
+ tmdev->tsens_irq = platform_get_irq(pdev, 0);
+ if (tmdev->tsens_irq < 0) {
+ pr_err("Invalid get irq\n");
+ return tmdev->tsens_irq;
+ }
+
+ tmdev->res_tsens_mem = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "tsens_physical");
+ if (!tmdev->res_tsens_mem) {
+ pr_err("Could not get tsens physical address resource\n");
+ rc = -EINVAL;
+ goto fail_free_irq;
+ }
+
+ tmdev->tsens_len = tmdev->res_tsens_mem->end -
+ tmdev->res_tsens_mem->start + 1;
+
+ res_mem = request_mem_region(tmdev->res_tsens_mem->start,
+ tmdev->tsens_len, tmdev->res_tsens_mem->name);
+ if (!res_mem) {
+ pr_err("Request tsens physical memory region failed\n");
+ rc = -EINVAL;
+ goto fail_free_irq;
+ }
+
+ tmdev->tsens_addr = ioremap(res_mem->start, tmdev->tsens_len);
+ if (!tmdev->tsens_addr) {
+ pr_err("Failed to IO map TSENS registers.\n");
+ rc = -EINVAL;
+ goto fail_unmap_tsens_region;
+ }
+
+ tmdev->res_calib_mem = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "tsens_eeprom_physical");
+ if (!tmdev->res_calib_mem) {
+ pr_err("Could not get qfprom physical address resource\n");
+ rc = -EINVAL;
+ goto fail_unmap_tsens;
+ }
+
+ tmdev->calib_len = tmdev->res_calib_mem->end -
+ tmdev->res_calib_mem->start + 1;
+
+ res_mem = request_mem_region(tmdev->res_calib_mem->start,
+ tmdev->calib_len, tmdev->res_calib_mem->name);
+ if (!res_mem) {
+ pr_err("Request calibration memory region failed\n");
+ rc = -EINVAL;
+ goto fail_unmap_tsens;
+ }
+
+ tmdev->tsens_calib_addr = ioremap(res_mem->start,
+ tmdev->calib_len);
+ if (!tmdev->tsens_calib_addr) {
+ pr_err("Failed to IO map EEPROM registers.\n");
+ rc = -EINVAL;
+ goto fail_unmap_calib_region;
+ }
+
+ return 0;
+
+fail_unmap_calib_region:
+ if (tmdev->res_calib_mem)
+ release_mem_region(tmdev->res_calib_mem->start,
+ tmdev->calib_len);
+fail_unmap_tsens:
+ if (tmdev->tsens_addr)
+ iounmap(tmdev->tsens_addr);
+fail_unmap_tsens_region:
+ if (tmdev->res_tsens_mem)
+ release_mem_region(tmdev->res_tsens_mem->start,
+ tmdev->tsens_len);
+fail_free_irq:
+ free_irq(tmdev->tsens_irq, tmdev);
+
+ return rc;
+}
+
+static int __devinit tsens_tm_probe(struct platform_device *pdev)
+{
+ int rc;
+
+ if (tmdev) {
+ pr_err("TSENS device already in use\n");
+ return -EBUSY;
+ }
+
+ if (pdev->dev.of_node)
+ rc = get_device_tree_data(pdev);
+ else
+ return -ENODEV;
+
+ tmdev->pdev = pdev;
+ rc = tsens_calib_sensors();
+ if (rc < 0)
+ goto fail;
+
+ tsens_hw_init();
+ tmdev->prev_reading_avail = true;
+
+ platform_set_drvdata(pdev, tmdev);
+
+ return 0;
+fail:
+ if (tmdev->tsens_calib_addr)
+ iounmap(tmdev->tsens_calib_addr);
+ if (tmdev->res_calib_mem)
+ release_mem_region(tmdev->res_calib_mem->start,
+ tmdev->calib_len);
+ if (tmdev->tsens_addr)
+ iounmap(tmdev->tsens_addr);
+ if (tmdev->res_tsens_mem)
+ release_mem_region(tmdev->res_tsens_mem->start,
+ tmdev->tsens_len);
+ free_irq(tmdev->tsens_irq, tmdev);
+ kfree(tmdev);
+
+ return rc;
+}
+
+static int __devinit _tsens_register_thermal(void)
+{
+ struct platform_device *pdev = tmdev->pdev;
+ int rc, i;
+
+ if (!tmdev) {
+ pr_err("%s: TSENS early init not done\n", __func__);
+ return -ENODEV;
+ }
+
+ for (i = 0; i < tmdev->tsens_num_sensor; i++) {
+ char name[18];
+ snprintf(name, sizeof(name), "tsens_tz_sensor%d", i);
+ tmdev->sensor[i].mode = THERMAL_DEVICE_ENABLED;
+ tmdev->sensor[i].sensor_num = i;
+ tmdev->sensor[i].tz_dev = thermal_zone_device_register(name,
+ TSENS_TRIP_NUM, &tmdev->sensor[i],
+ &tsens_thermal_zone_ops, 0, 0, 0, 0);
+ if (IS_ERR(tmdev->sensor[i].tz_dev)) {
+ pr_err("%s: thermal_zone_device_register() failed.\n",
+ __func__);
+ rc = -ENODEV;
+ goto fail;
+ }
+ }
+ rc = request_irq(tmdev->tsens_irq, tsens_isr,
+ IRQF_TRIGGER_RISING, "tsens_interrupt", tmdev);
+ if (rc < 0) {
+ pr_err("%s: request_irq FAIL: %d\n", __func__, rc);
+ for (i = 0; i < tmdev->tsens_num_sensor; i++)
+ thermal_zone_device_unregister(tmdev->sensor[i].tz_dev);
+ goto fail;
+ }
+ platform_set_drvdata(pdev, tmdev);
+
+ return 0;
+fail:
+ if (tmdev->tsens_calib_addr)
+ iounmap(tmdev->tsens_calib_addr);
+ if (tmdev->res_calib_mem)
+ release_mem_region(tmdev->res_calib_mem->start,
+ tmdev->calib_len);
+ if (tmdev->tsens_addr)
+ iounmap(tmdev->tsens_addr);
+ if (tmdev->res_tsens_mem)
+ release_mem_region(tmdev->res_tsens_mem->start,
+ tmdev->tsens_len);
+ kfree(tmdev);
+
+ return rc;
+}
+
+static int __devexit tsens_tm_remove(struct platform_device *pdev)
+{
+ struct tsens_tm_device *tmdev = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < tmdev->tsens_num_sensor; i++)
+ thermal_zone_device_unregister(tmdev->sensor[i].tz_dev);
+ if (tmdev->tsens_calib_addr)
+ iounmap(tmdev->tsens_calib_addr);
+ if (tmdev->res_calib_mem)
+ release_mem_region(tmdev->res_calib_mem->start,
+ tmdev->calib_len);
+ if (tmdev->tsens_addr)
+ iounmap(tmdev->tsens_addr);
+ if (tmdev->res_tsens_mem)
+ release_mem_region(tmdev->res_tsens_mem->start,
+ tmdev->tsens_len);
+ free_irq(tmdev->tsens_irq, tmdev);
+ platform_set_drvdata(pdev, NULL);
+ kfree(tmdev);
+
+ return 0;
+}
+
+static struct of_device_id tsens_match[] = {
+ { .compatible = "qcom,msm-tsens",
+ },
+ {}
+};
+
+static struct platform_driver tsens_tm_driver = {
+ .probe = tsens_tm_probe,
+ .remove = tsens_tm_remove,
+ .driver = {
+ .name = "msm-tsens",
+ .owner = THIS_MODULE,
+ .of_match_table = tsens_match,
+ },
+};
+
+static int __init tsens_tm_init_driver(void)
+{
+ return platform_driver_register(&tsens_tm_driver);
+}
+arch_initcall(tsens_tm_init_driver);
+
+static int __init tsens_thermal_register(void)
+{
+ return _tsens_register_thermal();
+}
+module_init(tsens_thermal_register);
+
+static void __exit _tsens_tm_remove(void)
+{
+ platform_driver_unregister(&tsens_tm_driver);
+}
+module_exit(_tsens_tm_remove);
+
+MODULE_ALIAS("platform:" TSENS_DRIVER_NAME);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 1fbfdd8..54ea85d 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -373,6 +373,17 @@
dwc3_writel(dwc->regs, DWC3_GCTL, reg);
/*
+ * The default value of GUCTL[31:22] should be 0x8. But on cores
+ * revision < 2.30a, the default value is mistakenly overridden
+ * with 0x0. Restore the correct default value.
+ */
+ if (dwc->revision < DWC3_REVISION_230A) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUCTL);
+ reg &= ~DWC3_GUCTL_REFCLKPER;
+ reg |= 0x8 << __ffs(DWC3_GUCTL_REFCLKPER);
+ dwc3_writel(dwc->regs, DWC3_GUCTL, reg);
+ }
+ /*
* Currently, the default and the recommended value for GUSB3PIPECTL
* [21:19] in the RTL is 3'b100 or 32 consecutive errors. Based on
* analysis and experiments in the lab, it is found that there is a
@@ -395,6 +406,11 @@
reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
reg &= ~DWC3_GUSB3PIPECTL_DELAY_P1P2P3;
reg |= 1 << __ffs(DWC3_GUSB3PIPECTL_DELAY_P1P2P3);
+ /*
+ * Receiver Detection in U3/Rx.Det is mistakenly disabled in
+ * cores < 2.30a. Fix it here.
+ */
+ reg &= ~DWC3_GUSB3PIPECTL_DIS_RXDET_U3_RXDET;
dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
}
@@ -506,9 +522,9 @@
if (of_get_property(node, "tx-fifo-resize", NULL))
dwc->needs_fifo_resize = true;
+ pm_runtime_no_callbacks(dev);
+ pm_runtime_set_active(dev);
pm_runtime_enable(dev);
- pm_runtime_get_sync(dev);
- pm_runtime_forbid(dev);
ret = dwc3_core_init(dwc);
if (ret) {
@@ -570,8 +586,6 @@
goto err2;
}
- pm_runtime_allow(dev);
-
return 0;
err2:
@@ -605,7 +619,6 @@
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
dwc3_debugfs_exit(dwc);
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 98adff7..92e28f5 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -180,6 +180,9 @@
#define DWC3_GCTL_DISSCRAMBLE (1 << 3)
#define DWC3_GCTL_DSBLCLKGTNG (1 << 0)
+/* Global User Control Register */
+#define DWC3_GUCTL_REFCLKPER (0x3FF << 22)
+
/* Global USB2 PHY Configuration Register */
#define DWC3_GUSB2PHYCFG_PHYSOFTRST (1 << 31)
#define DWC3_GUSB2PHYCFG_SUSPHY (1 << 6)
@@ -188,6 +191,7 @@
#define DWC3_GUSB3PIPECTL_PHYSOFTRST (1 << 31)
#define DWC3_GUSB3PIPECTL_SUSPHY (1 << 17)
#define DWC3_GUSB3PIPECTL_DELAY_P1P2P3 (7 << 19)
+#define DWC3_GUSB3PIPECTL_DIS_RXDET_U3_RXDET (1 << 22)
/* Global TX Fifo Size Register */
#define DWC3_GTXFIFOSIZ_TXFDEF(n) ((n) & 0xffff)
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 05f1a60..136c6d9 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -16,6 +16,8 @@
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/clk.h>
#include <linux/io.h>
@@ -24,6 +26,8 @@
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/list.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/msm_hsusb.h>
@@ -124,6 +128,12 @@
struct regulator *ssusb_vddcx;
enum usb_vdd_type ss_vdd_type;
enum usb_vdd_type hs_vdd_type;
+ struct dwc3_ext_xceiv ext_xceiv;
+ bool resume_pending;
+ atomic_t pm_suspended;
+ atomic_t in_lpm;
+ struct delayed_work resume_work;
+ struct wake_lock wlock;
struct dwc3_charger charger;
struct usb_phy *otg_xceiv;
struct delayed_work chg_work;
@@ -1213,6 +1223,159 @@
queue_delayed_work(system_nrt_wq, &mdwc->chg_work, 0);
}
+static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
+{
+ dev_dbg(mdwc->dev, "%s: entering lpm\n", __func__);
+
+ if (atomic_read(&mdwc->in_lpm)) {
+ dev_dbg(mdwc->dev, "%s: Already suspended\n", __func__);
+ return 0;
+ }
+
+ clk_disable_unprepare(mdwc->core_clk);
+ dwc3_hsusb_ldo_enable(0);
+ dwc3_ssusb_ldo_enable(0);
+ wake_unlock(&mdwc->wlock);
+
+ atomic_set(&mdwc->in_lpm, 1);
+ dev_info(mdwc->dev, "DWC3 in low power mode\n");
+
+ return 0;
+}
+
+static int dwc3_msm_resume(struct dwc3_msm *mdwc)
+{
+ dev_dbg(mdwc->dev, "%s: exiting lpm\n", __func__);
+
+ if (!atomic_read(&mdwc->in_lpm)) {
+ dev_dbg(mdwc->dev, "%s: Already resumed\n", __func__);
+ return 0;
+ }
+
+ wake_lock(&mdwc->wlock);
+ clk_prepare_enable(mdwc->core_clk);
+ dwc3_hsusb_ldo_enable(1);
+ dwc3_ssusb_ldo_enable(1);
+
+ atomic_set(&mdwc->in_lpm, 0);
+ dev_info(mdwc->dev, "DWC3 exited from low power mode\n");
+
+ return 0;
+}
+
+static void dwc3_resume_work(struct work_struct *w)
+{
+ struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
+ resume_work.work);
+
+ dev_dbg(mdwc->dev, "%s: dwc3 resume work\n", __func__);
+ /* handle any event that was queued while work was already running */
+ if (!atomic_read(&mdwc->in_lpm)) {
+ dev_dbg(mdwc->dev, "%s: notifying xceiv event\n", __func__);
+ if (mdwc->otg_xceiv)
+ mdwc->ext_xceiv.notify_ext_events(mdwc->otg_xceiv->otg,
+ DWC3_EVENT_XCEIV_STATE);
+ return;
+ }
+
+ /* bail out if system resume in process, else initiate RESUME */
+ if (atomic_read(&mdwc->pm_suspended)) {
+ mdwc->resume_pending = true;
+ } else {
+ pm_runtime_get_sync(mdwc->dev);
+ if (mdwc->otg_xceiv)
+ mdwc->ext_xceiv.notify_ext_events(mdwc->otg_xceiv->otg,
+ DWC3_EVENT_PHY_RESUME);
+ pm_runtime_put_sync(mdwc->dev);
+ }
+}
+
+static bool debug_id, debug_bsv, debug_connect;
+
+static int dwc3_connect_show(struct seq_file *s, void *unused)
+{
+ if (debug_connect)
+ seq_printf(s, "true\n");
+ else
+ seq_printf(s, "false\n");
+
+ return 0;
+}
+
+static int dwc3_connect_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dwc3_connect_show, inode->i_private);
+}
+
+static ssize_t dwc3_connect_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct dwc3_msm *mdwc = s->private;
+ char buf[8];
+
+ memset(buf, 0x00, sizeof(buf));
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
+ if (!strncmp(buf, "enable", 6) || !strncmp(buf, "true", 4)) {
+ debug_connect = true;
+ } else {
+ debug_connect = debug_bsv = false;
+ debug_id = true;
+ }
+
+ mdwc->ext_xceiv.bsv = debug_bsv;
+ mdwc->ext_xceiv.id = debug_id ? DWC3_ID_FLOAT : DWC3_ID_GROUND;
+
+ if (atomic_read(&mdwc->in_lpm)) {
+ dev_dbg(mdwc->dev, "%s: calling resume_work\n", __func__);
+ dwc3_resume_work(&mdwc->resume_work.work);
+ } else {
+ dev_dbg(mdwc->dev, "%s: notifying xceiv event\n", __func__);
+ if (mdwc->otg_xceiv)
+ mdwc->ext_xceiv.notify_ext_events(mdwc->otg_xceiv->otg,
+ DWC3_EVENT_XCEIV_STATE);
+ }
+
+ return count;
+}
+
+const struct file_operations dwc3_connect_fops = {
+ .open = dwc3_connect_open,
+ .read = seq_read,
+ .write = dwc3_connect_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static struct dentry *dwc3_debugfs_root;
+
+static void dwc3_debugfs_init(struct dwc3_msm *mdwc)
+{
+ dwc3_debugfs_root = debugfs_create_dir("msm_dwc3", NULL);
+
+ if (!dwc3_debugfs_root || IS_ERR(dwc3_debugfs_root))
+ return;
+
+ if (!debugfs_create_bool("id", S_IRUGO | S_IWUSR, dwc3_debugfs_root,
+ (u32 *)&debug_id))
+ goto error;
+
+ if (!debugfs_create_bool("bsv", S_IRUGO | S_IWUSR, dwc3_debugfs_root,
+ (u32 *)&debug_bsv))
+ goto error;
+
+ if (!debugfs_create_file("connect", S_IRUGO | S_IWUSR,
+ dwc3_debugfs_root, mdwc, &dwc3_connect_fops))
+ goto error;
+
+ return;
+
+error:
+ debugfs_remove_recursive(dwc3_debugfs_root);
+}
static int __devinit dwc3_msm_probe(struct platform_device *pdev)
{
@@ -1234,6 +1397,7 @@
INIT_LIST_HEAD(&msm->req_complete_list);
INIT_DELAYED_WORK(&msm->chg_work, dwc3_chg_detect_work);
+ INIT_DELAYED_WORK(&msm->resume_work, dwc3_resume_work);
/*
* DWC3 Core requires its CORE CLK (aka master / bus clk) to
@@ -1354,6 +1518,9 @@
msm->resource_size = resource_size(res);
msm->dwc3 = dwc3;
+ pm_runtime_set_active(msm->dev);
+ pm_runtime_enable(msm->dev);
+
if (of_property_read_u32(node, "qcom,dwc-usb3-msm-dbm-eps",
&msm->dbm_num_eps)) {
dev_err(&pdev->dev,
@@ -1395,10 +1562,21 @@
ret);
goto put_xcvr;
}
+
+ ret = dwc3_set_ext_xceiv(msm->otg_xceiv->otg, &msm->ext_xceiv);
+ if (ret || !msm->ext_xceiv.notify_ext_events) {
+ dev_err(&pdev->dev, "failed to register xceiver: %d\n",
+ ret);
+ goto put_xcvr;
+ }
} else {
dev_err(&pdev->dev, "%s: No OTG transceiver found\n", __func__);
}
+ wake_lock_init(&msm->wlock, WAKE_LOCK_SUSPEND, "msm_dwc3");
+ wake_lock(&msm->wlock);
+ dwc3_debugfs_init(msm);
+
return 0;
put_xcvr:
@@ -1432,11 +1610,15 @@
{
struct dwc3_msm *msm = platform_get_drvdata(pdev);
+ if (dwc3_debugfs_root)
+ debugfs_remove_recursive(dwc3_debugfs_root);
if (msm->otg_xceiv) {
dwc3_start_chg_det(&msm->charger, false);
usb_put_transceiver(msm->otg_xceiv);
}
+ pm_runtime_disable(msm->dev);
platform_device_unregister(msm->dwc3);
+ wake_lock_destroy(&msm->wlock);
dwc3_hsusb_ldo_enable(0);
dwc3_hsusb_ldo_init(0);
@@ -1451,6 +1633,77 @@
return 0;
}
+static int dwc3_msm_pm_suspend(struct device *dev)
+{
+ int ret = 0;
+ struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "dwc3-msm PM suspend\n");
+
+ ret = dwc3_msm_suspend(mdwc);
+ if (!ret)
+ atomic_set(&mdwc->pm_suspended, 1);
+
+ return ret;
+}
+
+static int dwc3_msm_pm_resume(struct device *dev)
+{
+ int ret = 0;
+ struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "dwc3-msm PM resume\n");
+
+ atomic_set(&mdwc->pm_suspended, 0);
+ if (mdwc->resume_pending) {
+ mdwc->resume_pending = false;
+
+ ret = dwc3_msm_resume(mdwc);
+ /* Update runtime PM status */
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ /* Let OTG know about resume event and update pm_count */
+ if (mdwc->otg_xceiv)
+ mdwc->ext_xceiv.notify_ext_events(mdwc->otg_xceiv->otg,
+ DWC3_EVENT_PHY_RESUME);
+ }
+
+ return ret;
+}
+
+static int dwc3_msm_runtime_idle(struct device *dev)
+{
+ dev_dbg(dev, "DWC3-msm runtime idle\n");
+
+ return 0;
+}
+
+static int dwc3_msm_runtime_suspend(struct device *dev)
+{
+ struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "DWC3-msm runtime suspend\n");
+
+ return dwc3_msm_suspend(mdwc);
+}
+
+static int dwc3_msm_runtime_resume(struct device *dev)
+{
+ struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "DWC3-msm runtime resume\n");
+
+ return dwc3_msm_resume(mdwc);
+}
+
+static const struct dev_pm_ops dwc3_msm_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dwc3_msm_pm_suspend, dwc3_msm_pm_resume)
+ SET_RUNTIME_PM_OPS(dwc3_msm_runtime_suspend, dwc3_msm_runtime_resume,
+ dwc3_msm_runtime_idle)
+};
+
static const struct of_device_id of_dwc3_matach[] = {
{
.compatible = "qcom,dwc-usb3-msm",
@@ -1464,6 +1717,7 @@
.remove = __devexit_p(dwc3_msm_remove),
.driver = {
.name = "msm-dwc3",
+ .pm = &dwc3_msm_dev_pm_ops,
.of_match_table = of_dwc3_matach,
},
};
diff --git a/drivers/usb/dwc3/dwc3_otg.c b/drivers/usb/dwc3/dwc3_otg.c
index 23b582d..4a37f03 100644
--- a/drivers/usb/dwc3/dwc3_otg.c
+++ b/drivers/usb/dwc3/dwc3_otg.c
@@ -271,6 +271,61 @@
return 0;
}
+/**
+ * dwc3_ext_event_notify - callback to handle events from external transceiver
+ * @otg: Pointer to the otg transceiver structure
+ * @event: Event reported by transceiver
+ *
+ * Returns 0 on success
+ */
+static void dwc3_ext_event_notify(struct usb_otg *otg,
+ enum dwc3_ext_events event)
+{
+ struct dwc3_otg *dotg = container_of(otg, struct dwc3_otg, otg);
+ struct dwc3_ext_xceiv *ext_xceiv = dotg->ext_xceiv;
+ struct usb_phy *phy = dotg->otg.phy;
+
+ if (event == DWC3_EVENT_PHY_RESUME) {
+ if (!pm_runtime_status_suspended(phy->dev)) {
+ dev_warn(phy->dev, "PHY_RESUME event out of LPM!!!!\n");
+ } else {
+ dev_dbg(phy->dev, "ext PHY_RESUME event received\n");
+ /* ext_xceiver would have taken h/w out of LPM by now */
+ pm_runtime_get(phy->dev);
+ }
+ }
+
+ if (ext_xceiv->id == DWC3_ID_FLOAT)
+ set_bit(ID, &dotg->inputs);
+ else
+ clear_bit(ID, &dotg->inputs);
+
+ if (ext_xceiv->bsv)
+ set_bit(B_SESS_VLD, &dotg->inputs);
+ else
+ clear_bit(B_SESS_VLD, &dotg->inputs);
+
+ schedule_work(&dotg->sm_work);
+}
+
+/**
+ * dwc3_set_ext_xceiv - bind/unbind external transceiver driver
+ * @otg: Pointer to the otg transceiver structure
+ * @ext_xceiv: Pointer to the external transceiver struccture
+ *
+ * Returns 0 on success
+ */
+int dwc3_set_ext_xceiv(struct usb_otg *otg, struct dwc3_ext_xceiv *ext_xceiv)
+{
+ struct dwc3_otg *dotg = container_of(otg, struct dwc3_otg, otg);
+
+ dotg->ext_xceiv = ext_xceiv;
+ if (ext_xceiv)
+ ext_xceiv->notify_ext_events = dwc3_ext_event_notify;
+
+ return 0;
+}
+
/* IRQs which OTG driver is interested in handling */
#define DWC3_OEVT_MASK (DWC3_OEVTEN_OTGCONIDSTSCHNGEVNT | \
DWC3_OEVTEN_OTGBDEVVBUSCHNGEVNT)
@@ -284,10 +339,21 @@
static irqreturn_t dwc3_otg_interrupt(int irq, void *_dotg)
{
struct dwc3_otg *dotg = (struct dwc3_otg *)_dotg;
+ struct usb_phy *phy = dotg->otg.phy;
u32 osts, oevt_reg;
int ret = IRQ_NONE;
int handled_irqs = 0;
+ /*
+ * If PHY is in retention mode then this interrupt would not be fired.
+ * ext_xcvr (parent) is responsible for bringing h/w out of LPM.
+ * OTG driver just need to increment power count and can safely
+ * assume that h/w is out of low power state now.
+ * TODO: explicitly disable OEVTEN interrupts if ext_xceiv is present
+ */
+ if (pm_runtime_status_suspended(phy->dev))
+ pm_runtime_get(phy->dev);
+
oevt_reg = dwc3_readl(dotg->regs, DWC3_OEVT);
if (!(oevt_reg & DWC3_OEVT_MASK))
@@ -371,6 +437,7 @@
struct dwc3_charger *charger = dotg->charger;
bool work = 0;
+ pm_runtime_resume(phy->dev);
dev_dbg(phy->dev, "%s state\n", otg_state_string(phy->state));
/* Check OTG state */
@@ -388,7 +455,8 @@
work = 1;
} else {
phy->state = OTG_STATE_B_IDLE;
- /* TODO: Enter low power state */
+ dev_dbg(phy->dev, "No device, trying to suspend\n");
+ pm_runtime_put_sync(phy->dev);
}
break;
@@ -411,7 +479,8 @@
/* Has charger been detected? If no detect it */
switch (charger->chg_type) {
case DWC3_DCP_CHARGER:
- /* TODO: initiate LPM */
+ dev_dbg(phy->dev, "lpm, DCP charger\n");
+ pm_runtime_put_sync(phy->dev);
break;
case DWC3_CDP_CHARGER:
dwc3_otg_start_peripheral(&dotg->otg,
@@ -438,9 +507,10 @@
* yet. We will re-try as soon as it
* will be called
*/
- dev_err(phy->dev,
+ dev_err(phy->dev, "enter lpm as\n"
"unable to start B-device\n");
phy->state = OTG_STATE_UNDEFINED;
+ pm_runtime_put_sync(phy->dev);
return;
}
}
@@ -453,7 +523,8 @@
charger->chg_type =
DWC3_INVALID_CHARGER;
}
- /* TODO: Enter low power state */
+ dev_dbg(phy->dev, "No device, trying to suspend\n");
+ pm_runtime_put_sync(phy->dev);
}
break;
@@ -481,9 +552,10 @@
* Probably set_host was not called yet.
* We will re-try as soon as it will be called
*/
- dev_dbg(phy->dev,
+ dev_dbg(phy->dev, "enter lpm as\n"
"unable to start A-device\n");
phy->state = OTG_STATE_UNDEFINED;
+ pm_runtime_put_sync(phy->dev);
return;
}
phy->state = OTG_STATE_A_HOST;
@@ -628,6 +700,8 @@
goto err3;
}
+ pm_runtime_get(dwc->dev);
+
return 0;
err3:
@@ -658,6 +732,7 @@
dotg->charger->start_detection(dotg->charger, false);
cancel_work_sync(&dotg->sm_work);
usb_set_transceiver(NULL);
+ pm_runtime_put(dwc->dev);
free_irq(dotg->irq, dotg);
kfree(dotg->otg.phy);
kfree(dotg);
diff --git a/drivers/usb/dwc3/dwc3_otg.h b/drivers/usb/dwc3/dwc3_otg.h
index 0d8b61b..b60b42a 100644
--- a/drivers/usb/dwc3/dwc3_otg.h
+++ b/drivers/usb/dwc3/dwc3_otg.h
@@ -35,8 +35,9 @@
struct usb_otg otg;
int irq;
void __iomem *regs;
- struct work_struct sm_work;
- struct dwc3_charger *charger;
+ struct work_struct sm_work;
+ struct dwc3_charger *charger;
+ struct dwc3_ext_xceiv *ext_xceiv;
#define ID 0
#define B_SESS_VLD 1
unsigned long inputs;
@@ -73,4 +74,29 @@
/* for external charger driver */
extern int dwc3_set_charger(struct usb_otg *otg, struct dwc3_charger *charger);
+enum dwc3_ext_events {
+ DWC3_EVENT_NONE = 0, /* no change event */
+ DWC3_EVENT_PHY_RESUME, /* PHY has come out of LPM */
+ DWC3_EVENT_XCEIV_STATE, /* XCEIV state (id/bsv) has changed */
+};
+
+enum dwc3_id_state {
+ DWC3_ID_GROUND = 0,
+ DWC3_ID_FLOAT,
+};
+
+/* external transceiver that can perform connect/disconnect monitoring in LPM */
+struct dwc3_ext_xceiv {
+ enum dwc3_id_state id;
+ bool bsv;
+
+ /* to notify OTG about LPM exit event, provided by OTG */
+ void (*notify_ext_events)(struct usb_otg *otg,
+ enum dwc3_ext_events ext_event);
+};
+
+/* for external transceiver driver */
+extern int dwc3_set_ext_xceiv(struct usb_otg *otg,
+ struct dwc3_ext_xceiv *ext_xceiv);
+
#endif /* __LINUX_USB_DWC3_OTG_H */
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 060144f..a3f6e58 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2425,6 +2425,11 @@
dev_err(dwc->dev, "failed to set peripheral to otg\n");
goto err7;
}
+ } else {
+ pm_runtime_no_callbacks(&dwc->gadget.dev);
+ pm_runtime_set_active(&dwc->gadget.dev);
+ pm_runtime_enable(&dwc->gadget.dev);
+ pm_runtime_get(&dwc->gadget.dev);
}
return 0;
@@ -2462,6 +2467,11 @@
{
int irq;
+ if (dwc->dotg) {
+ pm_runtime_put(&dwc->gadget.dev);
+ pm_runtime_disable(&dwc->gadget.dev);
+ }
+
usb_del_gadget_udc(&dwc->gadget);
irq = platform_get_irq(to_platform_device(dwc->dev), 0);
diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c
index a13b5da..154d523 100644
--- a/drivers/usb/gadget/android.c
+++ b/drivers/usb/gadget/android.c
@@ -1065,9 +1065,12 @@
static int mass_storage_function_init(struct android_usb_function *f,
struct usb_composite_dev *cdev)
{
+ struct android_dev *dev = _android_dev;
struct mass_storage_function_config *config;
struct fsg_common *common;
int err;
+ int i;
+ const char *name[2];
config = kzalloc(sizeof(struct mass_storage_function_config),
GFP_KERNEL);
@@ -1075,6 +1078,15 @@
return -ENOMEM;
config->fsg.nluns = 1;
+ name[0] = "lun";
+ if (dev->pdata && dev->pdata->cdrom) {
+ config->fsg.nluns = 2;
+ config->fsg.luns[1].cdrom = 1;
+ config->fsg.luns[1].ro = 1;
+ config->fsg.luns[1].removable = 1;
+ name[1] = "lun0";
+ }
+
config->fsg.luns[0].removable = 1;
common = fsg_common_init(NULL, cdev, &config->fsg);
@@ -1083,18 +1095,24 @@
return PTR_ERR(common);
}
- err = sysfs_create_link(&f->dev->kobj,
- &common->luns[0].dev.kobj,
- "lun");
- if (err) {
- fsg_common_release(&common->ref);
- kfree(config);
- return err;
+ for (i = 0; i < config->fsg.nluns; i++) {
+ err = sysfs_create_link(&f->dev->kobj,
+ &common->luns[i].dev.kobj,
+ name[i]);
+ if (err)
+ goto error;
}
config->common = common;
f->config = config;
return 0;
+error:
+ for (; i > 0 ; i--)
+ sysfs_remove_link(&f->dev->kobj, name[i-1]);
+
+ fsg_common_release(&common->ref);
+ kfree(config);
+ return err;
}
static void mass_storage_function_cleanup(struct android_usb_function *f)
diff --git a/drivers/usb/gadget/f_tcm.c b/drivers/usb/gadget/f_tcm.c
new file mode 100644
index 0000000..d944745
--- /dev/null
+++ b/drivers/usb/gadget/f_tcm.c
@@ -0,0 +1,2434 @@
+/* Target based USB-Gadget Function
+ *
+ * UAS protocol handling, target callbacks, configfs handling,
+ * BBB (USB Mass Storage Class Bulk-Only (BBB) and Transport protocol handling.
+ *
+ * Author: Sebastian Andrzej Siewior <bigeasy at linutronix dot de>
+ * License: GPLv2 as published by FSF.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/configfs.h>
+#include <linux/ctype.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/storage.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_tcq.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+#include <asm/unaligned.h>
+
+#include "f_tcm.h"
+
+static struct target_fabric_configfs *usbg_fabric_configfs;
+static int (*usbg_connect_cb) (bool connect);
+
+static inline struct f_uas *to_f_uas(struct usb_function *f)
+{
+ return container_of(f, struct f_uas, function);
+}
+
+static void usbg_cmd_release(struct kref *);
+
+static inline void usbg_cleanup_cmd(struct usbg_cmd *cmd)
+{
+ kref_put(&cmd->ref, usbg_cmd_release);
+}
+
+/* Start bot.c code */
+
+static int bot_enqueue_cmd_cbw(struct f_uas *fu)
+{
+ int ret;
+
+ if (fu->flags & USBG_BOT_CMD_PEND)
+ return 0;
+
+ ret = usb_ep_queue(fu->ep_out, fu->cmd.req, GFP_ATOMIC);
+ if (!ret)
+ fu->flags |= USBG_BOT_CMD_PEND;
+ return ret;
+}
+
+static void bot_status_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct usbg_cmd *cmd = req->context;
+ struct f_uas *fu = cmd->fu;
+
+ usbg_cleanup_cmd(cmd);
+ if (req->status < 0) {
+ pr_err("ERR %s(%d)\n", __func__, __LINE__);
+ return;
+ }
+
+ /* CSW completed, wait for next CBW */
+ bot_enqueue_cmd_cbw(fu);
+}
+
+static void bot_enqueue_sense_code(struct f_uas *fu, struct usbg_cmd *cmd)
+{
+ struct bulk_cs_wrap *csw = &fu->bot_status.csw;
+ int ret;
+ u8 *sense;
+ unsigned int csw_stat;
+
+ csw_stat = cmd->csw_code;
+
+ /*
+ * We can't send SENSE as a response. So we take ASC & ASCQ from our
+ * sense buffer and queue it and hope the host sends a REQUEST_SENSE
+ * command where it learns why we failed.
+ */
+ sense = cmd->sense_iu.sense;
+
+ csw->Tag = cmd->bot_tag;
+ csw->Status = csw_stat;
+ fu->bot_status.req->context = cmd;
+ ret = usb_ep_queue(fu->ep_in, fu->bot_status.req, GFP_ATOMIC);
+ if (ret)
+ pr_err("%s(%d) ERR: %d\n", __func__, __LINE__, ret);
+}
+
+static void bot_err_compl(struct usb_ep *ep, struct usb_request *req)
+{
+ struct usbg_cmd *cmd = req->context;
+ struct f_uas *fu = cmd->fu;
+
+ if (req->status < 0)
+ pr_err("ERR %s(%d)\n", __func__, __LINE__);
+
+ if (cmd->data_len) {
+ if (cmd->data_len > ep->maxpacket) {
+ req->length = ep->maxpacket;
+ cmd->data_len -= ep->maxpacket;
+ } else {
+ req->length = cmd->data_len;
+ cmd->data_len = 0;
+ }
+
+ usb_ep_queue(ep, req, GFP_ATOMIC);
+ return ;
+ }
+ bot_enqueue_sense_code(fu, cmd);
+}
+
+static void bot_send_bad_status(struct usbg_cmd *cmd)
+{
+ struct f_uas *fu = cmd->fu;
+ struct bulk_cs_wrap *csw = &fu->bot_status.csw;
+ struct usb_request *req;
+ struct usb_ep *ep;
+
+ csw->Residue = cpu_to_le32(cmd->data_len);
+
+ if (cmd->data_len) {
+ if (cmd->is_read) {
+ ep = fu->ep_in;
+ req = fu->bot_req_in;
+ } else {
+ ep = fu->ep_out;
+ req = fu->bot_req_out;
+ }
+
+ if (cmd->data_len > fu->ep_in->maxpacket) {
+ req->length = ep->maxpacket;
+ cmd->data_len -= ep->maxpacket;
+ } else {
+ req->length = cmd->data_len;
+ cmd->data_len = 0;
+ }
+ req->complete = bot_err_compl;
+ req->context = cmd;
+ req->buf = fu->cmd.buf;
+ usb_ep_queue(ep, req, GFP_KERNEL);
+ } else {
+ bot_enqueue_sense_code(fu, cmd);
+ }
+}
+
+static int bot_send_status(struct usbg_cmd *cmd, bool moved_data)
+{
+ struct f_uas *fu = cmd->fu;
+ struct bulk_cs_wrap *csw = &fu->bot_status.csw;
+ int ret;
+
+ if (cmd->se_cmd.scsi_status == SAM_STAT_GOOD) {
+ if (!moved_data && cmd->data_len) {
+ /*
+ * the host wants to move data, we don't. Fill / empty
+ * the pipe and then send the csw with reside set.
+ */
+ cmd->csw_code = US_BULK_STAT_OK;
+ bot_send_bad_status(cmd);
+ return 0;
+ }
+
+ csw->Tag = cmd->bot_tag;
+ csw->Residue = cpu_to_le32(0);
+ csw->Status = US_BULK_STAT_OK;
+ fu->bot_status.req->context = cmd;
+
+ ret = usb_ep_queue(fu->ep_in, fu->bot_status.req, GFP_KERNEL);
+ if (ret)
+ pr_err("%s(%d) ERR: %d\n", __func__, __LINE__, ret);
+ } else {
+ cmd->csw_code = US_BULK_STAT_FAIL;
+ bot_send_bad_status(cmd);
+ }
+ return 0;
+}
+
+/*
+ * Called after command (no data transfer) or after the write (to device)
+ * operation is completed
+ */
+static int bot_send_status_response(struct usbg_cmd *cmd)
+{
+ bool moved_data = false;
+
+ if (!cmd->is_read)
+ moved_data = true;
+ return bot_send_status(cmd, moved_data);
+}
+
+/* Read request completed, now we have to send the CSW */
+static void bot_read_compl(struct usb_ep *ep, struct usb_request *req)
+{
+ struct usbg_cmd *cmd = req->context;
+
+ if (req->status < 0)
+ pr_err("ERR %s(%d)\n", __func__, __LINE__);
+
+ bot_send_status(cmd, true);
+}
+
+static int bot_send_read_response(struct usbg_cmd *cmd)
+{
+ struct f_uas *fu = cmd->fu;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct usb_gadget *gadget = fuas_to_gadget(fu);
+ int ret;
+
+ if (!cmd->data_len) {
+ cmd->csw_code = US_BULK_STAT_PHASE;
+ bot_send_bad_status(cmd);
+ return 0;
+ }
+
+ if (!gadget->sg_supported) {
+ cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC);
+ if (!cmd->data_buf)
+ return -ENOMEM;
+
+ sg_copy_to_buffer(se_cmd->t_data_sg,
+ se_cmd->t_data_nents,
+ cmd->data_buf,
+ se_cmd->data_length);
+
+ fu->bot_req_in->buf = cmd->data_buf;
+ } else {
+ fu->bot_req_in->buf = NULL;
+ fu->bot_req_in->num_sgs = se_cmd->t_data_nents;
+ fu->bot_req_in->sg = se_cmd->t_data_sg;
+ }
+
+ fu->bot_req_in->complete = bot_read_compl;
+ fu->bot_req_in->length = se_cmd->data_length;
+ fu->bot_req_in->context = cmd;
+ ret = usb_ep_queue(fu->ep_in, fu->bot_req_in, GFP_ATOMIC);
+ if (ret)
+ pr_err("%s(%d)\n", __func__, __LINE__);
+ return 0;
+}
+
+static void usbg_data_write_cmpl(struct usb_ep *, struct usb_request *);
+static int usbg_prepare_w_request(struct usbg_cmd *, struct usb_request *);
+
+static int bot_send_write_request(struct usbg_cmd *cmd)
+{
+ struct f_uas *fu = cmd->fu;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct usb_gadget *gadget = fuas_to_gadget(fu);
+ int ret;
+
+ init_completion(&cmd->write_complete);
+ cmd->fu = fu;
+
+ if (!cmd->data_len) {
+ cmd->csw_code = US_BULK_STAT_PHASE;
+ return -EINVAL;
+ }
+
+ if (!gadget->sg_supported) {
+ cmd->data_buf = kmalloc(se_cmd->data_length, GFP_KERNEL);
+ if (!cmd->data_buf)
+ return -ENOMEM;
+
+ fu->bot_req_out->buf = cmd->data_buf;
+ } else {
+ fu->bot_req_out->buf = NULL;
+ fu->bot_req_out->num_sgs = se_cmd->t_data_nents;
+ fu->bot_req_out->sg = se_cmd->t_data_sg;
+ }
+
+ fu->bot_req_out->complete = usbg_data_write_cmpl;
+ fu->bot_req_out->length = se_cmd->data_length;
+ fu->bot_req_out->context = cmd;
+
+ ret = usbg_prepare_w_request(cmd, fu->bot_req_out);
+ if (ret)
+ goto cleanup;
+ ret = usb_ep_queue(fu->ep_out, fu->bot_req_out, GFP_KERNEL);
+ if (ret)
+ pr_err("%s(%d)\n", __func__, __LINE__);
+
+ wait_for_completion(&cmd->write_complete);
+ transport_generic_process_write(se_cmd);
+cleanup:
+ return ret;
+}
+
+static int bot_submit_command(struct f_uas *, void *, unsigned int);
+
+static void bot_cmd_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_uas *fu = req->context;
+ int ret;
+
+ fu->flags &= ~USBG_BOT_CMD_PEND;
+
+ if (req->status < 0)
+ return;
+
+ ret = bot_submit_command(fu, req->buf, req->actual);
+ if (ret)
+ pr_err("%s(%d): %d\n", __func__, __LINE__, ret);
+}
+
+static int bot_prepare_reqs(struct f_uas *fu)
+{
+ int ret;
+
+ fu->bot_req_in = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
+ if (!fu->bot_req_in)
+ goto err;
+
+ fu->bot_req_out = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
+ if (!fu->bot_req_out)
+ goto err_out;
+
+ fu->cmd.req = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
+ if (!fu->cmd.req)
+ goto err_cmd;
+
+ fu->bot_status.req = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
+ if (!fu->bot_status.req)
+ goto err_sts;
+
+ fu->bot_status.req->buf = &fu->bot_status.csw;
+ fu->bot_status.req->length = US_BULK_CS_WRAP_LEN;
+ fu->bot_status.req->complete = bot_status_complete;
+ fu->bot_status.csw.Signature = cpu_to_le32(US_BULK_CS_SIGN);
+
+ fu->cmd.buf = kmalloc(fu->ep_out->maxpacket, GFP_KERNEL);
+ if (!fu->cmd.buf)
+ goto err_buf;
+
+ fu->cmd.req->complete = bot_cmd_complete;
+ fu->cmd.req->buf = fu->cmd.buf;
+ fu->cmd.req->length = fu->ep_out->maxpacket;
+ fu->cmd.req->context = fu;
+
+ ret = bot_enqueue_cmd_cbw(fu);
+ if (ret)
+ goto err_queue;
+ return 0;
+err_queue:
+ kfree(fu->cmd.buf);
+ fu->cmd.buf = NULL;
+err_buf:
+ usb_ep_free_request(fu->ep_in, fu->bot_status.req);
+err_sts:
+ usb_ep_free_request(fu->ep_out, fu->cmd.req);
+ fu->cmd.req = NULL;
+err_cmd:
+ usb_ep_free_request(fu->ep_out, fu->bot_req_out);
+ fu->bot_req_out = NULL;
+err_out:
+ usb_ep_free_request(fu->ep_in, fu->bot_req_in);
+ fu->bot_req_in = NULL;
+err:
+ pr_err("BOT: endpoint setup failed\n");
+ return -ENOMEM;
+}
+
+void bot_cleanup_old_alt(struct f_uas *fu)
+{
+ if (!(fu->flags & USBG_ENABLED))
+ return;
+
+ usb_ep_disable(fu->ep_in);
+ usb_ep_disable(fu->ep_out);
+
+ if (!fu->bot_req_in)
+ return;
+
+ usb_ep_free_request(fu->ep_in, fu->bot_req_in);
+ usb_ep_free_request(fu->ep_out, fu->bot_req_out);
+ usb_ep_free_request(fu->ep_out, fu->cmd.req);
+ usb_ep_free_request(fu->ep_out, fu->bot_status.req);
+
+ kfree(fu->cmd.buf);
+
+ fu->bot_req_in = NULL;
+ fu->bot_req_out = NULL;
+ fu->cmd.req = NULL;
+ fu->bot_status.req = NULL;
+ fu->cmd.buf = NULL;
+}
+
+static void bot_set_alt(struct f_uas *fu)
+{
+ struct usb_function *f = &fu->function;
+ struct usb_gadget *gadget = f->config->cdev->gadget;
+ int ret;
+
+ fu->flags = USBG_IS_BOT;
+
+ config_ep_by_speed(gadget, f, fu->ep_in);
+ ret = usb_ep_enable(fu->ep_in);
+ if (ret)
+ goto err_b_in;
+
+ config_ep_by_speed(gadget, f, fu->ep_out);
+ ret = usb_ep_enable(fu->ep_out);
+ if (ret)
+ goto err_b_out;
+
+ ret = bot_prepare_reqs(fu);
+ if (ret)
+ goto err_wq;
+ fu->flags |= USBG_ENABLED;
+ pr_info("Using the BOT protocol\n");
+ return;
+err_wq:
+ usb_ep_disable(fu->ep_out);
+err_b_out:
+ usb_ep_disable(fu->ep_in);
+err_b_in:
+ fu->flags = USBG_IS_BOT;
+}
+
+static int usbg_bot_setup(struct usb_function *f,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct f_uas *fu = to_f_uas(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+ int luns;
+ u8 *ret_lun;
+
+ switch (ctrl->bRequest) {
+ case US_BULK_GET_MAX_LUN:
+ if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_CLASS |
+ USB_RECIP_INTERFACE))
+ return -ENOTSUPP;
+
+ if (w_length < 1)
+ return -EINVAL;
+ if (w_value != 0)
+ return -EINVAL;
+ luns = atomic_read(&fu->tpg->tpg_port_count);
+ if (!luns) {
+ pr_err("No LUNs configured?\n");
+ return -EINVAL;
+ }
+ /*
+ * If 4 LUNs are present we return 3 i.e. LUN 0..3 can be
+ * accessed. The upper limit is 0xf
+ */
+ luns--;
+ if (luns > 0xf) {
+ pr_info_once("Limiting the number of luns to 16\n");
+ luns = 0xf;
+ }
+ ret_lun = cdev->req->buf;
+ *ret_lun = luns;
+ cdev->req->length = 1;
+ return usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+ break;
+
+ case US_BULK_RESET_REQUEST:
+ /* XXX maybe we should remove previous requests for IN + OUT */
+ bot_enqueue_cmd_cbw(fu);
+ return 0;
+ break;
+ };
+ return -ENOTSUPP;
+}
+
+/* Start uas.c code */
+
+static void uasp_cleanup_one_stream(struct f_uas *fu, struct uas_stream *stream)
+{
+ /* We have either all three allocated or none */
+ if (!stream->req_in)
+ return;
+
+ usb_ep_free_request(fu->ep_in, stream->req_in);
+ usb_ep_free_request(fu->ep_out, stream->req_out);
+ usb_ep_free_request(fu->ep_status, stream->req_status);
+
+ stream->req_in = NULL;
+ stream->req_out = NULL;
+ stream->req_status = NULL;
+}
+
+static void uasp_free_cmdreq(struct f_uas *fu)
+{
+ usb_ep_free_request(fu->ep_cmd, fu->cmd.req);
+ kfree(fu->cmd.buf);
+ fu->cmd.req = NULL;
+ fu->cmd.buf = NULL;
+}
+
+static void uasp_cleanup_old_alt(struct f_uas *fu)
+{
+ int i;
+
+ if (!(fu->flags & USBG_ENABLED))
+ return;
+
+ usb_ep_disable(fu->ep_in);
+ usb_ep_disable(fu->ep_out);
+ usb_ep_disable(fu->ep_status);
+ usb_ep_disable(fu->ep_cmd);
+
+ for (i = 0; i < UASP_SS_EP_COMP_NUM_STREAMS; i++)
+ uasp_cleanup_one_stream(fu, &fu->stream[i]);
+ uasp_free_cmdreq(fu);
+}
+
+static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req);
+
+static int uasp_prepare_r_request(struct usbg_cmd *cmd)
+{
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct f_uas *fu = cmd->fu;
+ struct usb_gadget *gadget = fuas_to_gadget(fu);
+ struct uas_stream *stream = cmd->stream;
+
+ if (!gadget->sg_supported) {
+ cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC);
+ if (!cmd->data_buf)
+ return -ENOMEM;
+
+ sg_copy_to_buffer(se_cmd->t_data_sg,
+ se_cmd->t_data_nents,
+ cmd->data_buf,
+ se_cmd->data_length);
+
+ stream->req_in->buf = cmd->data_buf;
+ } else {
+ stream->req_in->buf = NULL;
+ stream->req_in->num_sgs = se_cmd->t_data_nents;
+ stream->req_in->sg = se_cmd->t_data_sg;
+ }
+
+ stream->req_in->complete = uasp_status_data_cmpl;
+ stream->req_in->length = se_cmd->data_length;
+ stream->req_in->context = cmd;
+
+ cmd->state = UASP_SEND_STATUS;
+ return 0;
+}
+
+static void uasp_prepare_status(struct usbg_cmd *cmd)
+{
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct sense_iu *iu = &cmd->sense_iu;
+ struct uas_stream *stream = cmd->stream;
+
+ cmd->state = UASP_QUEUE_COMMAND;
+ iu->iu_id = IU_ID_STATUS;
+ iu->tag = cpu_to_be16(cmd->tag);
+
+ /*
+ * iu->status_qual = cpu_to_be16(STATUS QUALIFIER SAM-4. Where R U?);
+ */
+ iu->len = cpu_to_be16(se_cmd->scsi_sense_length);
+ iu->status = se_cmd->scsi_status;
+ stream->req_status->context = cmd;
+ stream->req_status->length = se_cmd->scsi_sense_length + 16;
+ stream->req_status->buf = iu;
+ stream->req_status->complete = uasp_status_data_cmpl;
+}
+
+static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
+{
+ struct usbg_cmd *cmd = req->context;
+ struct uas_stream *stream = cmd->stream;
+ struct f_uas *fu = cmd->fu;
+ int ret;
+
+ if (req->status < 0)
+ goto cleanup;
+
+ switch (cmd->state) {
+ case UASP_SEND_DATA:
+ ret = uasp_prepare_r_request(cmd);
+ if (ret)
+ goto cleanup;
+ ret = usb_ep_queue(fu->ep_in, stream->req_in, GFP_ATOMIC);
+ if (ret)
+ pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
+ break;
+
+ case UASP_RECEIVE_DATA:
+ ret = usbg_prepare_w_request(cmd, stream->req_out);
+ if (ret)
+ goto cleanup;
+ ret = usb_ep_queue(fu->ep_out, stream->req_out, GFP_ATOMIC);
+ if (ret)
+ pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
+ break;
+
+ case UASP_SEND_STATUS:
+ uasp_prepare_status(cmd);
+ ret = usb_ep_queue(fu->ep_status, stream->req_status,
+ GFP_ATOMIC);
+ if (ret)
+ pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
+ break;
+
+ case UASP_QUEUE_COMMAND:
+ usbg_cleanup_cmd(cmd);
+ usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
+ break;
+
+ default:
+ BUG();
+ };
+ return;
+
+cleanup:
+ usbg_cleanup_cmd(cmd);
+}
+
+static int uasp_send_status_response(struct usbg_cmd *cmd)
+{
+ struct f_uas *fu = cmd->fu;
+ struct uas_stream *stream = cmd->stream;
+ struct sense_iu *iu = &cmd->sense_iu;
+
+ iu->tag = cpu_to_be16(cmd->tag);
+ stream->req_status->complete = uasp_status_data_cmpl;
+ stream->req_status->context = cmd;
+ cmd->fu = fu;
+ uasp_prepare_status(cmd);
+ return usb_ep_queue(fu->ep_status, stream->req_status, GFP_ATOMIC);
+}
+
+static int uasp_send_read_response(struct usbg_cmd *cmd)
+{
+ struct f_uas *fu = cmd->fu;
+ struct uas_stream *stream = cmd->stream;
+ struct sense_iu *iu = &cmd->sense_iu;
+ int ret;
+
+ cmd->fu = fu;
+
+ iu->tag = cpu_to_be16(cmd->tag);
+ if (fu->flags & USBG_USE_STREAMS) {
+
+ ret = uasp_prepare_r_request(cmd);
+ if (ret)
+ goto out;
+ ret = usb_ep_queue(fu->ep_in, stream->req_in, GFP_ATOMIC);
+ if (ret) {
+ pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
+ kfree(cmd->data_buf);
+ cmd->data_buf = NULL;
+ }
+
+ } else {
+
+ iu->iu_id = IU_ID_READ_READY;
+ iu->tag = cpu_to_be16(cmd->tag);
+
+ stream->req_status->complete = uasp_status_data_cmpl;
+ stream->req_status->context = cmd;
+
+ cmd->state = UASP_SEND_DATA;
+ stream->req_status->buf = iu;
+ stream->req_status->length = sizeof(struct iu);
+
+ ret = usb_ep_queue(fu->ep_status, stream->req_status,
+ GFP_ATOMIC);
+ if (ret)
+ pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
+ }
+out:
+ return ret;
+}
+
+static int uasp_send_write_request(struct usbg_cmd *cmd)
+{
+ struct f_uas *fu = cmd->fu;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct uas_stream *stream = cmd->stream;
+ struct sense_iu *iu = &cmd->sense_iu;
+ int ret;
+
+ init_completion(&cmd->write_complete);
+ cmd->fu = fu;
+
+ iu->tag = cpu_to_be16(cmd->tag);
+
+ if (fu->flags & USBG_USE_STREAMS) {
+
+ ret = usbg_prepare_w_request(cmd, stream->req_out);
+ if (ret)
+ goto cleanup;
+ ret = usb_ep_queue(fu->ep_out, stream->req_out, GFP_ATOMIC);
+ if (ret)
+ pr_err("%s(%d)\n", __func__, __LINE__);
+
+ } else {
+
+ iu->iu_id = IU_ID_WRITE_READY;
+ iu->tag = cpu_to_be16(cmd->tag);
+
+ stream->req_status->complete = uasp_status_data_cmpl;
+ stream->req_status->context = cmd;
+
+ cmd->state = UASP_RECEIVE_DATA;
+ stream->req_status->buf = iu;
+ stream->req_status->length = sizeof(struct iu);
+
+ ret = usb_ep_queue(fu->ep_status, stream->req_status,
+ GFP_ATOMIC);
+ if (ret)
+ pr_err("%s(%d)\n", __func__, __LINE__);
+ }
+
+ wait_for_completion(&cmd->write_complete);
+ transport_generic_process_write(se_cmd);
+cleanup:
+ return ret;
+}
+
+static int usbg_submit_command(struct f_uas *, void *, unsigned int);
+
+static void uasp_cmd_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_uas *fu = req->context;
+ int ret;
+
+ if (req->status < 0)
+ return;
+
+ ret = usbg_submit_command(fu, req->buf, req->actual);
+ /*
+ * Once we tune for performance enqueue the command req here again so
+ * we can receive a second command while we processing this one. Pay
+ * attention to properly sync STAUS endpoint with DATA IN + OUT so you
+ * don't break HS.
+ */
+ if (!ret)
+ return;
+ usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
+}
+
+static int uasp_alloc_stream_res(struct f_uas *fu, struct uas_stream *stream)
+{
+ stream->req_in = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
+ if (!stream->req_in)
+ goto out;
+
+ stream->req_out = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
+ if (!stream->req_out)
+ goto err_out;
+
+ stream->req_status = usb_ep_alloc_request(fu->ep_status, GFP_KERNEL);
+ if (!stream->req_status)
+ goto err_sts;
+
+ return 0;
+err_sts:
+ usb_ep_free_request(fu->ep_status, stream->req_status);
+ stream->req_status = NULL;
+err_out:
+ usb_ep_free_request(fu->ep_out, stream->req_out);
+ stream->req_out = NULL;
+out:
+ return -ENOMEM;
+}
+
+static int uasp_alloc_cmd(struct f_uas *fu)
+{
+ fu->cmd.req = usb_ep_alloc_request(fu->ep_cmd, GFP_KERNEL);
+ if (!fu->cmd.req)
+ goto err;
+
+ fu->cmd.buf = kmalloc(fu->ep_cmd->maxpacket, GFP_KERNEL);
+ if (!fu->cmd.buf)
+ goto err_buf;
+
+ fu->cmd.req->complete = uasp_cmd_complete;
+ fu->cmd.req->buf = fu->cmd.buf;
+ fu->cmd.req->length = fu->ep_cmd->maxpacket;
+ fu->cmd.req->context = fu;
+ return 0;
+
+err_buf:
+ usb_ep_free_request(fu->ep_cmd, fu->cmd.req);
+err:
+ return -ENOMEM;
+}
+
+static void uasp_setup_stream_res(struct f_uas *fu, int max_streams)
+{
+ int i;
+
+ for (i = 0; i < max_streams; i++) {
+ struct uas_stream *s = &fu->stream[i];
+
+ s->req_in->stream_id = i + 1;
+ s->req_out->stream_id = i + 1;
+ s->req_status->stream_id = i + 1;
+ }
+}
+
+static int uasp_prepare_reqs(struct f_uas *fu)
+{
+ int ret;
+ int i;
+ int max_streams;
+
+ if (fu->flags & USBG_USE_STREAMS)
+ max_streams = UASP_SS_EP_COMP_NUM_STREAMS;
+ else
+ max_streams = 1;
+
+ for (i = 0; i < max_streams; i++) {
+ ret = uasp_alloc_stream_res(fu, &fu->stream[i]);
+ if (ret)
+ goto err_cleanup;
+ }
+
+ ret = uasp_alloc_cmd(fu);
+ if (ret)
+ goto err_free_stream;
+ uasp_setup_stream_res(fu, max_streams);
+
+ ret = usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
+ if (ret)
+ goto err_free_stream;
+
+ return 0;
+
+err_free_stream:
+ uasp_free_cmdreq(fu);
+
+err_cleanup:
+ if (i) {
+ do {
+ uasp_cleanup_one_stream(fu, &fu->stream[i - 1]);
+ i--;
+ } while (i);
+ }
+ pr_err("UASP: endpoint setup failed\n");
+ return ret;
+}
+
+static void uasp_set_alt(struct f_uas *fu)
+{
+ struct usb_function *f = &fu->function;
+ struct usb_gadget *gadget = f->config->cdev->gadget;
+ int ret;
+
+ fu->flags = USBG_IS_UAS;
+
+ if (gadget->speed == USB_SPEED_SUPER)
+ fu->flags |= USBG_USE_STREAMS;
+
+ config_ep_by_speed(gadget, f, fu->ep_in);
+ ret = usb_ep_enable(fu->ep_in);
+ if (ret)
+ goto err_b_in;
+
+ config_ep_by_speed(gadget, f, fu->ep_out);
+ ret = usb_ep_enable(fu->ep_out);
+ if (ret)
+ goto err_b_out;
+
+ config_ep_by_speed(gadget, f, fu->ep_cmd);
+ ret = usb_ep_enable(fu->ep_cmd);
+ if (ret)
+ goto err_cmd;
+ config_ep_by_speed(gadget, f, fu->ep_status);
+ ret = usb_ep_enable(fu->ep_status);
+ if (ret)
+ goto err_status;
+
+ ret = uasp_prepare_reqs(fu);
+ if (ret)
+ goto err_wq;
+ fu->flags |= USBG_ENABLED;
+
+ pr_info("Using the UAS protocol\n");
+ return;
+err_wq:
+ usb_ep_disable(fu->ep_status);
+err_status:
+ usb_ep_disable(fu->ep_cmd);
+err_cmd:
+ usb_ep_disable(fu->ep_out);
+err_b_out:
+ usb_ep_disable(fu->ep_in);
+err_b_in:
+ fu->flags = 0;
+}
+
+static int get_cmd_dir(const unsigned char *cdb)
+{
+ int ret;
+
+ switch (cdb[0]) {
+ case READ_6:
+ case READ_10:
+ case READ_12:
+ case READ_16:
+ case INQUIRY:
+ case MODE_SENSE:
+ case MODE_SENSE_10:
+ case SERVICE_ACTION_IN:
+ case MAINTENANCE_IN:
+ case PERSISTENT_RESERVE_IN:
+ case SECURITY_PROTOCOL_IN:
+ case ACCESS_CONTROL_IN:
+ case REPORT_LUNS:
+ case READ_BLOCK_LIMITS:
+ case READ_POSITION:
+ case READ_CAPACITY:
+ case READ_TOC:
+ case READ_FORMAT_CAPACITIES:
+ case REQUEST_SENSE:
+ ret = DMA_FROM_DEVICE;
+ break;
+
+ case WRITE_6:
+ case WRITE_10:
+ case WRITE_12:
+ case WRITE_16:
+ case MODE_SELECT:
+ case MODE_SELECT_10:
+ case WRITE_VERIFY:
+ case WRITE_VERIFY_12:
+ case PERSISTENT_RESERVE_OUT:
+ case MAINTENANCE_OUT:
+ case SECURITY_PROTOCOL_OUT:
+ case ACCESS_CONTROL_OUT:
+ ret = DMA_TO_DEVICE;
+ break;
+ case ALLOW_MEDIUM_REMOVAL:
+ case TEST_UNIT_READY:
+ case SYNCHRONIZE_CACHE:
+ case START_STOP:
+ case ERASE:
+ case REZERO_UNIT:
+ case SEEK_10:
+ case SPACE:
+ case VERIFY:
+ case WRITE_FILEMARKS:
+ ret = DMA_NONE;
+ break;
+ default:
+ pr_warn("target: Unknown data direction for SCSI Opcode "
+ "0x%02x\n", cdb[0]);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static void usbg_data_write_cmpl(struct usb_ep *ep, struct usb_request *req)
+{
+ struct usbg_cmd *cmd = req->context;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+
+ if (req->status < 0) {
+ pr_err("%s() state %d transfer failed\n", __func__, cmd->state);
+ goto cleanup;
+ }
+
+ if (req->num_sgs == 0) {
+ sg_copy_from_buffer(se_cmd->t_data_sg,
+ se_cmd->t_data_nents,
+ cmd->data_buf,
+ se_cmd->data_length);
+ }
+
+ complete(&cmd->write_complete);
+ return;
+
+cleanup:
+ usbg_cleanup_cmd(cmd);
+}
+
+static int usbg_prepare_w_request(struct usbg_cmd *cmd, struct usb_request *req)
+{
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct f_uas *fu = cmd->fu;
+ struct usb_gadget *gadget = fuas_to_gadget(fu);
+
+ if (!gadget->sg_supported) {
+ cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC);
+ if (!cmd->data_buf)
+ return -ENOMEM;
+
+ req->buf = cmd->data_buf;
+ } else {
+ req->buf = NULL;
+ req->num_sgs = se_cmd->t_data_nents;
+ req->sg = se_cmd->t_data_sg;
+ }
+
+ req->complete = usbg_data_write_cmpl;
+ req->length = se_cmd->data_length;
+ req->context = cmd;
+ return 0;
+}
+
+static int usbg_send_status_response(struct se_cmd *se_cmd)
+{
+ struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
+ se_cmd);
+ struct f_uas *fu = cmd->fu;
+
+ if (fu->flags & USBG_IS_BOT)
+ return bot_send_status_response(cmd);
+ else
+ return uasp_send_status_response(cmd);
+}
+
+static int usbg_send_write_request(struct se_cmd *se_cmd)
+{
+ struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
+ se_cmd);
+ struct f_uas *fu = cmd->fu;
+
+ if (fu->flags & USBG_IS_BOT)
+ return bot_send_write_request(cmd);
+ else
+ return uasp_send_write_request(cmd);
+}
+
+static int usbg_send_read_response(struct se_cmd *se_cmd)
+{
+ struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
+ se_cmd);
+ struct f_uas *fu = cmd->fu;
+
+ if (fu->flags & USBG_IS_BOT)
+ return bot_send_read_response(cmd);
+ else
+ return uasp_send_read_response(cmd);
+}
+
+static void usbg_cmd_work(struct work_struct *work)
+{
+ struct usbg_cmd *cmd = container_of(work, struct usbg_cmd, work);
+ struct se_cmd *se_cmd;
+ struct tcm_usbg_nexus *tv_nexus;
+ struct usbg_tpg *tpg;
+ int dir;
+
+ se_cmd = &cmd->se_cmd;
+ tpg = cmd->fu->tpg;
+ tv_nexus = tpg->tpg_nexus;
+ dir = get_cmd_dir(cmd->cmd_buf);
+ if (dir < 0) {
+ transport_init_se_cmd(se_cmd,
+ tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
+ tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
+ cmd->prio_attr, cmd->sense_iu.sense);
+
+ transport_send_check_condition_and_sense(se_cmd,
+ TCM_UNSUPPORTED_SCSI_OPCODE, 1);
+ usbg_cleanup_cmd(cmd);
+ return;
+ }
+
+ target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
+ cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun,
+ 0, cmd->prio_attr, dir, TARGET_SCF_UNKNOWN_SIZE);
+}
+
+static int usbg_submit_command(struct f_uas *fu,
+ void *cmdbuf, unsigned int len)
+{
+ struct command_iu *cmd_iu = cmdbuf;
+ struct usbg_cmd *cmd;
+ struct usbg_tpg *tpg;
+ struct se_cmd *se_cmd;
+ struct tcm_usbg_nexus *tv_nexus;
+ u32 cmd_len;
+ int ret;
+
+ if (cmd_iu->iu_id != IU_ID_COMMAND) {
+ pr_err("Unsupported type %d\n", cmd_iu->iu_id);
+ return -EINVAL;
+ }
+
+ cmd = kzalloc(sizeof *cmd, GFP_ATOMIC);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->fu = fu;
+
+ /* XXX until I figure out why I can't free in on complete */
+ kref_init(&cmd->ref);
+ kref_get(&cmd->ref);
+
+ tpg = fu->tpg;
+ cmd_len = (cmd_iu->len & ~0x3) + 16;
+ if (cmd_len > USBG_MAX_CMD)
+ goto err;
+
+ memcpy(cmd->cmd_buf, cmd_iu->cdb, cmd_len);
+
+ cmd->tag = be16_to_cpup(&cmd_iu->tag);
+ if (fu->flags & USBG_USE_STREAMS) {
+ if (cmd->tag > UASP_SS_EP_COMP_NUM_STREAMS)
+ goto err;
+ if (!cmd->tag)
+ cmd->stream = &fu->stream[0];
+ else
+ cmd->stream = &fu->stream[cmd->tag - 1];
+ } else {
+ cmd->stream = &fu->stream[0];
+ }
+
+ tv_nexus = tpg->tpg_nexus;
+ if (!tv_nexus) {
+ pr_err("Missing nexus, ignoring command\n");
+ goto err;
+ }
+
+ switch (cmd_iu->prio_attr & 0x7) {
+ case UAS_HEAD_TAG:
+ cmd->prio_attr = MSG_HEAD_TAG;
+ break;
+ case UAS_ORDERED_TAG:
+ cmd->prio_attr = MSG_ORDERED_TAG;
+ break;
+ case UAS_ACA:
+ cmd->prio_attr = MSG_ACA_TAG;
+ break;
+ default:
+ pr_debug_once("Unsupported prio_attr: %02x.\n",
+ cmd_iu->prio_attr);
+ case UAS_SIMPLE_TAG:
+ cmd->prio_attr = MSG_SIMPLE_TAG;
+ break;
+ }
+
+ se_cmd = &cmd->se_cmd;
+ cmd->unpacked_lun = scsilun_to_int(&cmd_iu->lun);
+
+ INIT_WORK(&cmd->work, usbg_cmd_work);
+ ret = queue_work(tpg->workqueue, &cmd->work);
+ if (ret < 0)
+ goto err;
+
+ return 0;
+err:
+ kfree(cmd);
+ return -EINVAL;
+}
+
+static void bot_cmd_work(struct work_struct *work)
+{
+ struct usbg_cmd *cmd = container_of(work, struct usbg_cmd, work);
+ struct se_cmd *se_cmd;
+ struct tcm_usbg_nexus *tv_nexus;
+ struct usbg_tpg *tpg;
+ int dir;
+
+ se_cmd = &cmd->se_cmd;
+ tpg = cmd->fu->tpg;
+ tv_nexus = tpg->tpg_nexus;
+ dir = get_cmd_dir(cmd->cmd_buf);
+ if (dir < 0) {
+ transport_init_se_cmd(se_cmd,
+ tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
+ tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
+ cmd->prio_attr, cmd->sense_iu.sense);
+
+ transport_send_check_condition_and_sense(se_cmd,
+ TCM_UNSUPPORTED_SCSI_OPCODE, 1);
+ usbg_cleanup_cmd(cmd);
+ return;
+ }
+
+ target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
+ cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun,
+ cmd->data_len, cmd->prio_attr, dir, 0);
+}
+
+static int bot_submit_command(struct f_uas *fu,
+ void *cmdbuf, unsigned int len)
+{
+ struct bulk_cb_wrap *cbw = cmdbuf;
+ struct usbg_cmd *cmd;
+ struct usbg_tpg *tpg;
+ struct se_cmd *se_cmd;
+ struct tcm_usbg_nexus *tv_nexus;
+ u32 cmd_len;
+ int ret;
+
+ if (cbw->Signature != cpu_to_le32(US_BULK_CB_SIGN)) {
+ pr_err("Wrong signature on CBW\n");
+ return -EINVAL;
+ }
+ if (len != 31) {
+ pr_err("Wrong length for CBW\n");
+ return -EINVAL;
+ }
+
+ cmd_len = cbw->Length;
+ if (cmd_len < 1 || cmd_len > 16)
+ return -EINVAL;
+
+ cmd = kzalloc(sizeof *cmd, GFP_ATOMIC);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->fu = fu;
+
+ /* XXX until I figure out why I can't free in on complete */
+ kref_init(&cmd->ref);
+ kref_get(&cmd->ref);
+
+ tpg = fu->tpg;
+
+ memcpy(cmd->cmd_buf, cbw->CDB, cmd_len);
+
+ cmd->bot_tag = cbw->Tag;
+
+ tv_nexus = tpg->tpg_nexus;
+ if (!tv_nexus) {
+ pr_err("Missing nexus, ignoring command\n");
+ goto err;
+ }
+
+ cmd->prio_attr = MSG_SIMPLE_TAG;
+ se_cmd = &cmd->se_cmd;
+ cmd->unpacked_lun = cbw->Lun;
+ cmd->is_read = cbw->Flags & US_BULK_FLAG_IN ? 1 : 0;
+ cmd->data_len = le32_to_cpu(cbw->DataTransferLength);
+
+ INIT_WORK(&cmd->work, bot_cmd_work);
+ ret = queue_work(tpg->workqueue, &cmd->work);
+ if (ret < 0)
+ goto err;
+
+ return 0;
+err:
+ kfree(cmd);
+ return -EINVAL;
+}
+
+/* Start fabric.c code */
+
+static int usbg_check_true(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static int usbg_check_false(struct se_portal_group *se_tpg)
+{
+ return 0;
+}
+
+static char *usbg_get_fabric_name(void)
+{
+ return "usb_gadget";
+}
+
+static u8 usbg_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+ struct usbg_tpg *tpg = container_of(se_tpg,
+ struct usbg_tpg, se_tpg);
+ struct usbg_tport *tport = tpg->tport;
+ u8 proto_id;
+
+ switch (tport->tport_proto_id) {
+ case SCSI_PROTOCOL_SAS:
+ default:
+ proto_id = sas_get_fabric_proto_ident(se_tpg);
+ break;
+ }
+
+ return proto_id;
+}
+
+static char *usbg_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+ struct usbg_tpg *tpg = container_of(se_tpg,
+ struct usbg_tpg, se_tpg);
+ struct usbg_tport *tport = tpg->tport;
+
+ return &tport->tport_name[0];
+}
+
+static u16 usbg_get_tag(struct se_portal_group *se_tpg)
+{
+ struct usbg_tpg *tpg = container_of(se_tpg,
+ struct usbg_tpg, se_tpg);
+ return tpg->tport_tpgt;
+}
+
+static u32 usbg_get_default_depth(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static u32 usbg_get_pr_transport_id(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code,
+ unsigned char *buf)
+{
+ struct usbg_tpg *tpg = container_of(se_tpg,
+ struct usbg_tpg, se_tpg);
+ struct usbg_tport *tport = tpg->tport;
+ int ret = 0;
+
+ switch (tport->tport_proto_id) {
+ case SCSI_PROTOCOL_SAS:
+ default:
+ ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+ format_code, buf);
+ break;
+ }
+
+ return ret;
+}
+
+static u32 usbg_get_pr_transport_id_len(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code)
+{
+ struct usbg_tpg *tpg = container_of(se_tpg,
+ struct usbg_tpg, se_tpg);
+ struct usbg_tport *tport = tpg->tport;
+ int ret = 0;
+
+ switch (tport->tport_proto_id) {
+ case SCSI_PROTOCOL_SAS:
+ default:
+ ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+ format_code);
+ break;
+ }
+
+ return ret;
+}
+
+static char *usbg_parse_pr_out_transport_id(
+ struct se_portal_group *se_tpg,
+ const char *buf,
+ u32 *out_tid_len,
+ char **port_nexus_ptr)
+{
+ struct usbg_tpg *tpg = container_of(se_tpg,
+ struct usbg_tpg, se_tpg);
+ struct usbg_tport *tport = tpg->tport;
+ char *tid = NULL;
+
+ switch (tport->tport_proto_id) {
+ case SCSI_PROTOCOL_SAS:
+ default:
+ tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+ port_nexus_ptr);
+ }
+
+ return tid;
+}
+
+static struct se_node_acl *usbg_alloc_fabric_acl(struct se_portal_group *se_tpg)
+{
+ struct usbg_nacl *nacl;
+
+ nacl = kzalloc(sizeof(struct usbg_nacl), GFP_KERNEL);
+ if (!nacl) {
+ printk(KERN_ERR "Unable to alocate struct usbg_nacl\n");
+ return NULL;
+ }
+
+ return &nacl->se_node_acl;
+}
+
+static void usbg_release_fabric_acl(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl)
+{
+ struct usbg_nacl *nacl = container_of(se_nacl,
+ struct usbg_nacl, se_node_acl);
+ kfree(nacl);
+}
+
+static u32 usbg_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static int usbg_new_cmd(struct se_cmd *se_cmd)
+{
+ struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
+ se_cmd);
+ int ret;
+
+ ret = target_setup_cmd_from_cdb(se_cmd, cmd->cmd_buf);
+ if (ret)
+ return ret;
+
+ return transport_generic_map_mem_to_cmd(se_cmd, NULL, 0, NULL, 0);
+}
+
+static void usbg_cmd_release(struct kref *ref)
+{
+ struct usbg_cmd *cmd = container_of(ref, struct usbg_cmd,
+ ref);
+
+ transport_generic_free_cmd(&cmd->se_cmd, 0);
+}
+
+static void usbg_release_cmd(struct se_cmd *se_cmd)
+{
+ struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
+ se_cmd);
+ kfree(cmd->data_buf);
+ kfree(cmd);
+ return;
+}
+
+static int usbg_shutdown_session(struct se_session *se_sess)
+{
+ return 0;
+}
+
+static void usbg_close_session(struct se_session *se_sess)
+{
+ return;
+}
+
+static u32 usbg_sess_get_index(struct se_session *se_sess)
+{
+ return 0;
+}
+
+/*
+ * XXX Error recovery: return != 0 if we expect writes. Dunno when that could be
+ */
+static int usbg_write_pending_status(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static void usbg_set_default_node_attrs(struct se_node_acl *nacl)
+{
+ return;
+}
+
+static u32 usbg_get_task_tag(struct se_cmd *se_cmd)
+{
+ struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
+ se_cmd);
+ struct f_uas *fu = cmd->fu;
+
+ if (fu->flags & USBG_IS_BOT)
+ return le32_to_cpu(cmd->bot_tag);
+ else
+ return cmd->tag;
+}
+
+static int usbg_get_cmd_state(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static int usbg_queue_tm_rsp(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static u16 usbg_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)
+{
+ return 0;
+}
+
+static u16 usbg_get_fabric_sense_len(void)
+{
+ return 0;
+}
+
+static const char *usbg_check_wwn(const char *name)
+{
+ const char *n;
+ unsigned int len;
+
+ n = strstr(name, "naa.");
+ if (!n)
+ return NULL;
+ n += 4;
+ len = strlen(n);
+ if (len == 0 || len > USBG_NAMELEN - 1)
+ return NULL;
+ return n;
+}
+
+static struct se_node_acl *usbg_make_nodeacl(
+ struct se_portal_group *se_tpg,
+ struct config_group *group,
+ const char *name)
+{
+ struct se_node_acl *se_nacl, *se_nacl_new;
+ struct usbg_nacl *nacl;
+ u64 wwpn = 0;
+ u32 nexus_depth;
+ const char *wnn_name;
+
+ wnn_name = usbg_check_wwn(name);
+ if (!wnn_name)
+ return ERR_PTR(-EINVAL);
+ se_nacl_new = usbg_alloc_fabric_acl(se_tpg);
+ if (!(se_nacl_new))
+ return ERR_PTR(-ENOMEM);
+
+ nexus_depth = 1;
+ /*
+ * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
+ * when converting a NodeACL from demo mode -> explict
+ */
+ se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
+ name, nexus_depth);
+ if (IS_ERR(se_nacl)) {
+ usbg_release_fabric_acl(se_tpg, se_nacl_new);
+ return se_nacl;
+ }
+ /*
+ * Locate our struct usbg_nacl and set the FC Nport WWPN
+ */
+ nacl = container_of(se_nacl, struct usbg_nacl, se_node_acl);
+ nacl->iport_wwpn = wwpn;
+ snprintf(nacl->iport_name, sizeof(nacl->iport_name), "%s", name);
+ return se_nacl;
+}
+
+static void usbg_drop_nodeacl(struct se_node_acl *se_acl)
+{
+ struct usbg_nacl *nacl = container_of(se_acl,
+ struct usbg_nacl, se_node_acl);
+ core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
+ kfree(nacl);
+}
+
+struct usbg_tpg *the_only_tpg_I_currently_have;
+
+static struct se_portal_group *usbg_make_tpg(
+ struct se_wwn *wwn,
+ struct config_group *group,
+ const char *name)
+{
+ struct usbg_tport *tport = container_of(wwn, struct usbg_tport,
+ tport_wwn);
+ struct usbg_tpg *tpg;
+ unsigned long tpgt;
+ int ret;
+
+ if (strstr(name, "tpgt_") != name)
+ return ERR_PTR(-EINVAL);
+ if (kstrtoul(name + 5, 0, &tpgt) || tpgt > UINT_MAX)
+ return ERR_PTR(-EINVAL);
+ if (the_only_tpg_I_currently_have) {
+ pr_err("Until the gadget framework can't handle multiple\n");
+ pr_err("gadgets, you can't do this here.\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ tpg = kzalloc(sizeof(struct usbg_tpg), GFP_KERNEL);
+ if (!tpg) {
+ printk(KERN_ERR "Unable to allocate struct usbg_tpg");
+ return ERR_PTR(-ENOMEM);
+ }
+ mutex_init(&tpg->tpg_mutex);
+ atomic_set(&tpg->tpg_port_count, 0);
+ tpg->workqueue = alloc_workqueue("tcm_usb_gadget", 0, 1);
+ if (!tpg->workqueue) {
+ kfree(tpg);
+ return NULL;
+ }
+
+ tpg->tport = tport;
+ tpg->tport_tpgt = tpgt;
+
+ ret = core_tpg_register(&usbg_fabric_configfs->tf_ops, wwn,
+ &tpg->se_tpg, tpg,
+ TRANSPORT_TPG_TYPE_NORMAL);
+ if (ret < 0) {
+ destroy_workqueue(tpg->workqueue);
+ kfree(tpg);
+ return NULL;
+ }
+ the_only_tpg_I_currently_have = tpg;
+ return &tpg->se_tpg;
+}
+
+static void usbg_drop_tpg(struct se_portal_group *se_tpg)
+{
+ struct usbg_tpg *tpg = container_of(se_tpg,
+ struct usbg_tpg, se_tpg);
+
+ core_tpg_deregister(se_tpg);
+ destroy_workqueue(tpg->workqueue);
+ kfree(tpg);
+ the_only_tpg_I_currently_have = NULL;
+}
+
+static struct se_wwn *usbg_make_tport(
+ struct target_fabric_configfs *tf,
+ struct config_group *group,
+ const char *name)
+{
+ struct usbg_tport *tport;
+ const char *wnn_name;
+ u64 wwpn = 0;
+
+ wnn_name = usbg_check_wwn(name);
+ if (!wnn_name)
+ return ERR_PTR(-EINVAL);
+
+ tport = kzalloc(sizeof(struct usbg_tport), GFP_KERNEL);
+ if (!(tport)) {
+ printk(KERN_ERR "Unable to allocate struct usbg_tport");
+ return ERR_PTR(-ENOMEM);
+ }
+ tport->tport_wwpn = wwpn;
+ snprintf(tport->tport_name, sizeof(tport->tport_name), wnn_name);
+ return &tport->tport_wwn;
+}
+
+static void usbg_drop_tport(struct se_wwn *wwn)
+{
+ struct usbg_tport *tport = container_of(wwn,
+ struct usbg_tport, tport_wwn);
+ kfree(tport);
+}
+
+/*
+ * If somebody feels like dropping the version property, go ahead.
+ */
+static ssize_t usbg_wwn_show_attr_version(
+ struct target_fabric_configfs *tf,
+ char *page)
+{
+ return sprintf(page, "usb-gadget fabric module\n");
+}
+TF_WWN_ATTR_RO(usbg, version);
+
+static struct configfs_attribute *usbg_wwn_attrs[] = {
+ &usbg_wwn_version.attr,
+ NULL,
+};
+
+static ssize_t tcm_usbg_tpg_show_enable(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", tpg->gadget_connect);
+}
+
+static int usbg_attach(struct usbg_tpg *tpg)
+{
+ return usbg_connect_cb(true);
+}
+
+static void usbg_detach(struct usbg_tpg *tpg)
+{
+ usbg_connect_cb(false);
+}
+
+static ssize_t tcm_usbg_tpg_store_enable(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
+ unsigned long op;
+ ssize_t ret;
+
+ ret = kstrtoul(page, 0, &op);
+ if (ret < 0)
+ return -EINVAL;
+ if (op > 1)
+ return -EINVAL;
+
+ if (op && tpg->gadget_connect)
+ goto out;
+ if (!op && !tpg->gadget_connect)
+ goto out;
+
+ if (op) {
+ ret = usbg_attach(tpg);
+ if (ret)
+ goto out;
+ } else {
+ usbg_detach(tpg);
+ }
+ tpg->gadget_connect = op;
+out:
+ return count;
+}
+TF_TPG_BASE_ATTR(tcm_usbg, enable, S_IRUGO | S_IWUSR);
+
+static ssize_t tcm_usbg_tpg_show_nexus(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
+ struct tcm_usbg_nexus *tv_nexus;
+ ssize_t ret;
+
+ mutex_lock(&tpg->tpg_mutex);
+ tv_nexus = tpg->tpg_nexus;
+ if (!tv_nexus) {
+ ret = -ENODEV;
+ goto out;
+ }
+ ret = snprintf(page, PAGE_SIZE, "%s\n",
+ tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
+out:
+ mutex_unlock(&tpg->tpg_mutex);
+ return ret;
+}
+
+static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name)
+{
+ struct se_portal_group *se_tpg;
+ struct tcm_usbg_nexus *tv_nexus;
+ int ret;
+
+ mutex_lock(&tpg->tpg_mutex);
+ if (tpg->tpg_nexus) {
+ ret = -EEXIST;
+ pr_debug("tpg->tpg_nexus already exists\n");
+ goto err_unlock;
+ }
+ se_tpg = &tpg->se_tpg;
+
+ ret = -ENOMEM;
+ tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
+ if (!tv_nexus) {
+ pr_err("Unable to allocate struct tcm_vhost_nexus\n");
+ goto err_unlock;
+ }
+ tv_nexus->tvn_se_sess = transport_init_session();
+ if (IS_ERR(tv_nexus->tvn_se_sess))
+ goto err_free;
+
+ /*
+ * Since we are running in 'demo mode' this call with generate a
+ * struct se_node_acl for the tcm_vhost struct se_portal_group with
+ * the SCSI Initiator port name of the passed configfs group 'name'.
+ */
+ tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
+ se_tpg, name);
+ if (!tv_nexus->tvn_se_sess->se_node_acl) {
+ pr_debug("core_tpg_check_initiator_node_acl() failed"
+ " for %s\n", name);
+ goto err_session;
+ }
+ /*
+ * Now register the TCM vHost virtual I_T Nexus as active with the
+ * call to __transport_register_session()
+ */
+ __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
+ tv_nexus->tvn_se_sess, tv_nexus);
+ tpg->tpg_nexus = tv_nexus;
+ mutex_unlock(&tpg->tpg_mutex);
+ return 0;
+
+err_session:
+ transport_free_session(tv_nexus->tvn_se_sess);
+err_free:
+ kfree(tv_nexus);
+err_unlock:
+ mutex_unlock(&tpg->tpg_mutex);
+ return ret;
+}
+
+static int tcm_usbg_drop_nexus(struct usbg_tpg *tpg)
+{
+ struct se_session *se_sess;
+ struct tcm_usbg_nexus *tv_nexus;
+ int ret = -ENODEV;
+
+ mutex_lock(&tpg->tpg_mutex);
+ tv_nexus = tpg->tpg_nexus;
+ if (!tv_nexus)
+ goto out;
+
+ se_sess = tv_nexus->tvn_se_sess;
+ if (!se_sess)
+ goto out;
+
+ if (atomic_read(&tpg->tpg_port_count)) {
+ ret = -EPERM;
+ pr_err("Unable to remove Host I_T Nexus with"
+ " active TPG port count: %d\n",
+ atomic_read(&tpg->tpg_port_count));
+ goto out;
+ }
+
+ pr_debug("Removing I_T Nexus to Initiator Port: %s\n",
+ tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
+ /*
+ * Release the SCSI I_T Nexus to the emulated vHost Target Port
+ */
+ transport_deregister_session(tv_nexus->tvn_se_sess);
+ tpg->tpg_nexus = NULL;
+
+ kfree(tv_nexus);
+out:
+ mutex_unlock(&tpg->tpg_mutex);
+ return 0;
+}
+
+static ssize_t tcm_usbg_tpg_store_nexus(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
+ unsigned char i_port[USBG_NAMELEN], *ptr;
+ int ret;
+
+ if (!strncmp(page, "NULL", 4)) {
+ ret = tcm_usbg_drop_nexus(tpg);
+ return (!ret) ? count : ret;
+ }
+ if (strlen(page) > USBG_NAMELEN) {
+ pr_err("Emulated NAA Sas Address: %s, exceeds"
+ " max: %d\n", page, USBG_NAMELEN);
+ return -EINVAL;
+ }
+ snprintf(i_port, USBG_NAMELEN, "%s", page);
+
+ ptr = strstr(i_port, "naa.");
+ if (!ptr) {
+ pr_err("Missing 'naa.' prefix\n");
+ return -EINVAL;
+ }
+
+ if (i_port[strlen(i_port) - 1] == '\n')
+ i_port[strlen(i_port) - 1] = '\0';
+
+ ret = tcm_usbg_make_nexus(tpg, &i_port[4]);
+ if (ret < 0)
+ return ret;
+ return count;
+}
+TF_TPG_BASE_ATTR(tcm_usbg, nexus, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *usbg_base_attrs[] = {
+ &tcm_usbg_tpg_enable.attr,
+ &tcm_usbg_tpg_nexus.attr,
+ NULL,
+};
+
+static int usbg_port_link(struct se_portal_group *se_tpg, struct se_lun *lun)
+{
+ struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
+
+ atomic_inc(&tpg->tpg_port_count);
+ smp_mb__after_atomic_inc();
+ return 0;
+}
+
+static void usbg_port_unlink(struct se_portal_group *se_tpg,
+ struct se_lun *se_lun)
+{
+ struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
+
+ atomic_dec(&tpg->tpg_port_count);
+ smp_mb__after_atomic_dec();
+}
+
+static int usbg_check_stop_free(struct se_cmd *se_cmd)
+{
+ struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
+ se_cmd);
+
+ kref_put(&cmd->ref, usbg_cmd_release);
+ return 1;
+}
+
+static struct target_core_fabric_ops usbg_ops = {
+ .get_fabric_name = usbg_get_fabric_name,
+ .get_fabric_proto_ident = usbg_get_fabric_proto_ident,
+ .tpg_get_wwn = usbg_get_fabric_wwn,
+ .tpg_get_tag = usbg_get_tag,
+ .tpg_get_default_depth = usbg_get_default_depth,
+ .tpg_get_pr_transport_id = usbg_get_pr_transport_id,
+ .tpg_get_pr_transport_id_len = usbg_get_pr_transport_id_len,
+ .tpg_parse_pr_out_transport_id = usbg_parse_pr_out_transport_id,
+ .tpg_check_demo_mode = usbg_check_true,
+ .tpg_check_demo_mode_cache = usbg_check_false,
+ .tpg_check_demo_mode_write_protect = usbg_check_false,
+ .tpg_check_prod_mode_write_protect = usbg_check_false,
+ .tpg_alloc_fabric_acl = usbg_alloc_fabric_acl,
+ .tpg_release_fabric_acl = usbg_release_fabric_acl,
+ .tpg_get_inst_index = usbg_tpg_get_inst_index,
+ .new_cmd_map = usbg_new_cmd,
+ .release_cmd = usbg_release_cmd,
+ .shutdown_session = usbg_shutdown_session,
+ .close_session = usbg_close_session,
+ .sess_get_index = usbg_sess_get_index,
+ .sess_get_initiator_sid = NULL,
+ .write_pending = usbg_send_write_request,
+ .write_pending_status = usbg_write_pending_status,
+ .set_default_node_attributes = usbg_set_default_node_attrs,
+ .get_task_tag = usbg_get_task_tag,
+ .get_cmd_state = usbg_get_cmd_state,
+ .queue_data_in = usbg_send_read_response,
+ .queue_status = usbg_send_status_response,
+ .queue_tm_rsp = usbg_queue_tm_rsp,
+ .get_fabric_sense_len = usbg_get_fabric_sense_len,
+ .set_fabric_sense_len = usbg_set_fabric_sense_len,
+ .check_stop_free = usbg_check_stop_free,
+
+ .fabric_make_wwn = usbg_make_tport,
+ .fabric_drop_wwn = usbg_drop_tport,
+ .fabric_make_tpg = usbg_make_tpg,
+ .fabric_drop_tpg = usbg_drop_tpg,
+ .fabric_post_link = usbg_port_link,
+ .fabric_pre_unlink = usbg_port_unlink,
+ .fabric_make_np = NULL,
+ .fabric_drop_np = NULL,
+ .fabric_make_nodeacl = usbg_make_nodeacl,
+ .fabric_drop_nodeacl = usbg_drop_nodeacl,
+};
+
+static int usbg_register_configfs(void)
+{
+ struct target_fabric_configfs *fabric;
+ int ret;
+
+ fabric = target_fabric_configfs_init(THIS_MODULE, "usb_gadget");
+ if (IS_ERR(fabric)) {
+ printk(KERN_ERR "target_fabric_configfs_init() failed\n");
+ return PTR_ERR(fabric);
+ }
+
+ fabric->tf_ops = usbg_ops;
+ TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = usbg_wwn_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = usbg_base_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+ ret = target_fabric_configfs_register(fabric);
+ if (ret < 0) {
+ printk(KERN_ERR "target_fabric_configfs_register() failed"
+ " for usb-gadget\n");
+ return ret;
+ }
+ usbg_fabric_configfs = fabric;
+ return 0;
+};
+
+static void usbg_deregister_configfs(void)
+{
+ if (!(usbg_fabric_configfs))
+ return;
+
+ target_fabric_configfs_deregister(usbg_fabric_configfs);
+ usbg_fabric_configfs = NULL;
+};
+
+/* Start gadget.c code */
+
+static struct usb_interface_descriptor bot_intf_desc = {
+ .bLength = sizeof(bot_intf_desc),
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 2,
+ .bAlternateSetting = USB_G_ALT_INT_BBB,
+ .bInterfaceClass = USB_CLASS_MASS_STORAGE,
+ .bInterfaceSubClass = USB_SC_SCSI,
+ .bInterfaceProtocol = USB_PR_BULK,
+ /* .iInterface = DYNAMIC */
+};
+
+static struct usb_interface_descriptor uasp_intf_desc = {
+ .bLength = sizeof(uasp_intf_desc),
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bNumEndpoints = 4,
+ .bAlternateSetting = USB_G_ALT_INT_UAS,
+ .bInterfaceClass = USB_CLASS_MASS_STORAGE,
+ .bInterfaceSubClass = USB_SC_SCSI,
+ .bInterfaceProtocol = USB_PR_UAS,
+ /* .iInterface = DYNAMIC */
+};
+
+static struct usb_endpoint_descriptor uasp_bi_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor uasp_fs_bi_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_pipe_usage_descriptor uasp_bi_pipe_desc = {
+ .bLength = sizeof(uasp_bi_pipe_desc),
+ .bDescriptorType = USB_DT_PIPE_USAGE,
+ .bPipeID = DATA_IN_PIPE_ID,
+};
+
+static struct usb_endpoint_descriptor uasp_ss_bi_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor uasp_bi_ep_comp_desc = {
+ .bLength = sizeof(uasp_bi_ep_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 0,
+ .bmAttributes = UASP_SS_EP_COMP_LOG_STREAMS,
+ .wBytesPerInterval = 0,
+};
+
+static struct usb_ss_ep_comp_descriptor bot_bi_ep_comp_desc = {
+ .bLength = sizeof(bot_bi_ep_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 0,
+};
+
+static struct usb_endpoint_descriptor uasp_bo_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor uasp_fs_bo_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_pipe_usage_descriptor uasp_bo_pipe_desc = {
+ .bLength = sizeof(uasp_bo_pipe_desc),
+ .bDescriptorType = USB_DT_PIPE_USAGE,
+ .bPipeID = DATA_OUT_PIPE_ID,
+};
+
+static struct usb_endpoint_descriptor uasp_ss_bo_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(0x400),
+};
+
+static struct usb_ss_ep_comp_descriptor uasp_bo_ep_comp_desc = {
+ .bLength = sizeof(uasp_bo_ep_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bmAttributes = UASP_SS_EP_COMP_LOG_STREAMS,
+};
+
+static struct usb_ss_ep_comp_descriptor bot_bo_ep_comp_desc = {
+ .bLength = sizeof(bot_bo_ep_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+};
+
+static struct usb_endpoint_descriptor uasp_status_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor uasp_fs_status_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_pipe_usage_descriptor uasp_status_pipe_desc = {
+ .bLength = sizeof(uasp_status_pipe_desc),
+ .bDescriptorType = USB_DT_PIPE_USAGE,
+ .bPipeID = STATUS_PIPE_ID,
+};
+
+static struct usb_endpoint_descriptor uasp_ss_status_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor uasp_status_in_ep_comp_desc = {
+ .bLength = sizeof(uasp_status_in_ep_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bmAttributes = UASP_SS_EP_COMP_LOG_STREAMS,
+};
+
+static struct usb_endpoint_descriptor uasp_cmd_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor uasp_fs_cmd_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_pipe_usage_descriptor uasp_cmd_pipe_desc = {
+ .bLength = sizeof(uasp_cmd_pipe_desc),
+ .bDescriptorType = USB_DT_PIPE_USAGE,
+ .bPipeID = CMD_PIPE_ID,
+};
+
+static struct usb_endpoint_descriptor uasp_ss_cmd_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor uasp_cmd_comp_desc = {
+ .bLength = sizeof(uasp_cmd_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+};
+
+static struct usb_descriptor_header *uasp_fs_function_desc[] = {
+ (struct usb_descriptor_header *) &bot_intf_desc,
+ (struct usb_descriptor_header *) &uasp_fs_bi_desc,
+ (struct usb_descriptor_header *) &uasp_fs_bo_desc,
+
+ (struct usb_descriptor_header *) &uasp_intf_desc,
+ (struct usb_descriptor_header *) &uasp_fs_bi_desc,
+ (struct usb_descriptor_header *) &uasp_bi_pipe_desc,
+ (struct usb_descriptor_header *) &uasp_fs_bo_desc,
+ (struct usb_descriptor_header *) &uasp_bo_pipe_desc,
+ (struct usb_descriptor_header *) &uasp_fs_status_desc,
+ (struct usb_descriptor_header *) &uasp_status_pipe_desc,
+ (struct usb_descriptor_header *) &uasp_fs_cmd_desc,
+ (struct usb_descriptor_header *) &uasp_cmd_pipe_desc,
+};
+
+static struct usb_descriptor_header *uasp_hs_function_desc[] = {
+ (struct usb_descriptor_header *) &bot_intf_desc,
+ (struct usb_descriptor_header *) &uasp_bi_desc,
+ (struct usb_descriptor_header *) &uasp_bo_desc,
+
+ (struct usb_descriptor_header *) &uasp_intf_desc,
+ (struct usb_descriptor_header *) &uasp_bi_desc,
+ (struct usb_descriptor_header *) &uasp_bi_pipe_desc,
+ (struct usb_descriptor_header *) &uasp_bo_desc,
+ (struct usb_descriptor_header *) &uasp_bo_pipe_desc,
+ (struct usb_descriptor_header *) &uasp_status_desc,
+ (struct usb_descriptor_header *) &uasp_status_pipe_desc,
+ (struct usb_descriptor_header *) &uasp_cmd_desc,
+ (struct usb_descriptor_header *) &uasp_cmd_pipe_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *uasp_ss_function_desc[] = {
+ (struct usb_descriptor_header *) &bot_intf_desc,
+ (struct usb_descriptor_header *) &uasp_ss_bi_desc,
+ (struct usb_descriptor_header *) &bot_bi_ep_comp_desc,
+ (struct usb_descriptor_header *) &uasp_ss_bo_desc,
+ (struct usb_descriptor_header *) &bot_bo_ep_comp_desc,
+
+ (struct usb_descriptor_header *) &uasp_intf_desc,
+ (struct usb_descriptor_header *) &uasp_ss_bi_desc,
+ (struct usb_descriptor_header *) &uasp_bi_ep_comp_desc,
+ (struct usb_descriptor_header *) &uasp_bi_pipe_desc,
+ (struct usb_descriptor_header *) &uasp_ss_bo_desc,
+ (struct usb_descriptor_header *) &uasp_bo_ep_comp_desc,
+ (struct usb_descriptor_header *) &uasp_bo_pipe_desc,
+ (struct usb_descriptor_header *) &uasp_ss_status_desc,
+ (struct usb_descriptor_header *) &uasp_status_in_ep_comp_desc,
+ (struct usb_descriptor_header *) &uasp_status_pipe_desc,
+ (struct usb_descriptor_header *) &uasp_ss_cmd_desc,
+ (struct usb_descriptor_header *) &uasp_cmd_comp_desc,
+ (struct usb_descriptor_header *) &uasp_cmd_pipe_desc,
+ NULL,
+};
+
+static struct usb_string tcm_us_strings[] = {
+ [0].s = "Bulk Only Transport",
+ [1].s = "USB Attached SCSI",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings tcm_stringtab = {
+ .language = 0x0409,
+ .strings = tcm_us_strings,
+};
+
+static struct usb_gadget_strings *tcm_strings[] = {
+ &tcm_stringtab,
+ NULL,
+};
+
+static void give_back_ep(struct usb_ep **pep)
+{
+ struct usb_ep *ep = *pep;
+ if (!ep)
+ return;
+ ep->driver_data = NULL;
+}
+
+static int usbg_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_uas *fu = to_f_uas(f);
+ struct usb_gadget *gadget = c->cdev->gadget;
+ struct usb_ep *ep;
+ int iface;
+
+ iface = usb_interface_id(c, f);
+ if (iface < 0)
+ return iface;
+
+ bot_intf_desc.bInterfaceNumber = iface;
+ uasp_intf_desc.bInterfaceNumber = iface;
+ fu->iface = iface;
+ ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_bi_desc,
+ &uasp_bi_ep_comp_desc);
+ if (!ep)
+ goto ep_fail;
+
+ ep->driver_data = fu;
+ fu->ep_in = ep;
+
+ ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_bo_desc,
+ &uasp_bo_ep_comp_desc);
+ if (!ep)
+ goto ep_fail;
+ ep->driver_data = fu;
+ fu->ep_out = ep;
+
+ ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_status_desc,
+ &uasp_status_in_ep_comp_desc);
+ if (!ep)
+ goto ep_fail;
+ ep->driver_data = fu;
+ fu->ep_status = ep;
+
+ ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_cmd_desc,
+ &uasp_cmd_comp_desc);
+ if (!ep)
+ goto ep_fail;
+ ep->driver_data = fu;
+ fu->ep_cmd = ep;
+
+ /* Assume endpoint addresses are the same for both speeds */
+ uasp_bi_desc.bEndpointAddress = uasp_ss_bi_desc.bEndpointAddress;
+ uasp_bo_desc.bEndpointAddress = uasp_ss_bo_desc.bEndpointAddress;
+ uasp_status_desc.bEndpointAddress =
+ uasp_ss_status_desc.bEndpointAddress;
+ uasp_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress;
+
+ uasp_fs_bi_desc.bEndpointAddress = uasp_ss_bi_desc.bEndpointAddress;
+ uasp_fs_bo_desc.bEndpointAddress = uasp_ss_bo_desc.bEndpointAddress;
+ uasp_fs_status_desc.bEndpointAddress =
+ uasp_ss_status_desc.bEndpointAddress;
+ uasp_fs_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress;
+
+ return 0;
+ep_fail:
+ pr_err("Can't claim all required eps\n");
+
+ give_back_ep(&fu->ep_in);
+ give_back_ep(&fu->ep_out);
+ give_back_ep(&fu->ep_status);
+ give_back_ep(&fu->ep_cmd);
+ return -ENOTSUPP;
+}
+
+static void usbg_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_uas *fu = to_f_uas(f);
+
+ kfree(fu);
+}
+
+struct guas_setup_wq {
+ struct work_struct work;
+ struct f_uas *fu;
+ unsigned int alt;
+};
+
+static void usbg_delayed_set_alt(struct work_struct *wq)
+{
+ struct guas_setup_wq *work = container_of(wq, struct guas_setup_wq,
+ work);
+ struct f_uas *fu = work->fu;
+ int alt = work->alt;
+
+ kfree(work);
+
+ if (fu->flags & USBG_IS_BOT)
+ bot_cleanup_old_alt(fu);
+ if (fu->flags & USBG_IS_UAS)
+ uasp_cleanup_old_alt(fu);
+
+ if (alt == USB_G_ALT_INT_BBB)
+ bot_set_alt(fu);
+ else if (alt == USB_G_ALT_INT_UAS)
+ uasp_set_alt(fu);
+ usb_composite_setup_continue(fu->function.config->cdev);
+}
+
+static int usbg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct f_uas *fu = to_f_uas(f);
+
+ if ((alt == USB_G_ALT_INT_BBB) || (alt == USB_G_ALT_INT_UAS)) {
+ struct guas_setup_wq *work;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return -ENOMEM;
+ INIT_WORK(&work->work, usbg_delayed_set_alt);
+ work->fu = fu;
+ work->alt = alt;
+ schedule_work(&work->work);
+ return USB_GADGET_DELAYED_STATUS;
+ }
+ return -EOPNOTSUPP;
+}
+
+static void usbg_disable(struct usb_function *f)
+{
+ struct f_uas *fu = to_f_uas(f);
+
+ if (fu->flags & USBG_IS_UAS)
+ uasp_cleanup_old_alt(fu);
+ else if (fu->flags & USBG_IS_BOT)
+ bot_cleanup_old_alt(fu);
+ fu->flags = 0;
+}
+
+static int usbg_setup(struct usb_function *f,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct f_uas *fu = to_f_uas(f);
+
+ if (!(fu->flags & USBG_IS_BOT))
+ return -EOPNOTSUPP;
+
+ return usbg_bot_setup(f, ctrl);
+}
+
+static int tcm_bind_config(struct usb_configuration *c)
+{
+ struct f_uas *fu;
+ int ret;
+
+ fu = kzalloc(sizeof(*fu), GFP_KERNEL);
+ if (!fu)
+ return -ENOMEM;
+ fu->function.name = "Target Function";
+ fu->function.descriptors = uasp_fs_function_desc;
+ fu->function.hs_descriptors = uasp_hs_function_desc;
+ fu->function.ss_descriptors = uasp_ss_function_desc;
+ fu->function.strings = tcm_strings;
+ fu->function.bind = usbg_bind;
+ fu->function.unbind = usbg_unbind;
+ fu->function.set_alt = usbg_set_alt;
+ fu->function.setup = usbg_setup;
+ fu->function.disable = usbg_disable;
+ fu->tpg = the_only_tpg_I_currently_have;
+
+ /* BOT interface string */
+ ret = usb_string_id(c->cdev);
+ if (ret < 0)
+ goto err;
+ tcm_us_strings[0].id = ret;
+ bot_intf_desc.iInterface = ret;
+
+ /* data interface label */
+ ret = usb_string_id(c->cdev);
+ if (ret < 0)
+ goto err;
+ tcm_us_strings[1].id = ret;
+ uasp_intf_desc.iInterface = ret;
+
+ ret = usb_add_function(c, &fu->function);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ kfree(fu);
+ return ret;
+}
+
+static int f_tcm_init(int (*connect_cb)(bool connect))
+{
+ int ret;
+
+ usbg_connect_cb = connect_cb;
+ ret = usbg_register_configfs();
+ return ret;
+}
+
+static void f_tcm_exit(void)
+{
+ usbg_deregister_configfs();
+ usbg_connect_cb = NULL;
+}
diff --git a/drivers/usb/gadget/tcm_usb_gadget.h b/drivers/usb/gadget/f_tcm.h
similarity index 93%
rename from drivers/usb/gadget/tcm_usb_gadget.h
rename to drivers/usb/gadget/f_tcm.h
index bb18999..bed8435 100644
--- a/drivers/usb/gadget/tcm_usb_gadget.h
+++ b/drivers/usb/gadget/f_tcm.h
@@ -16,13 +16,6 @@
#define UASP_SS_EP_COMP_LOG_STREAMS 4
#define UASP_SS_EP_COMP_NUM_STREAMS (1 << UASP_SS_EP_COMP_LOG_STREAMS)
-#define USB_G_STR_MANUFACTOR 1
-#define USB_G_STR_PRODUCT 2
-#define USB_G_STR_SERIAL 3
-#define USB_G_STR_CONFIG 4
-#define USB_G_STR_INT_UAS 5
-#define USB_G_STR_INT_BBB 6
-
#define USB_G_ALT_INT_BBB 0
#define USB_G_ALT_INT_UAS 1
diff --git a/drivers/usb/gadget/msm72k_udc.c b/drivers/usb/gadget/msm72k_udc.c
index 3e40552..297c183 100644
--- a/drivers/usb/gadget/msm72k_udc.c
+++ b/drivers/usb/gadget/msm72k_udc.c
@@ -190,6 +190,8 @@
/* max power requested by selected configuration */
unsigned b_max_pow;
unsigned chg_current;
+ unsigned chg_type_retry_cnt;
+ bool proprietary_chg;
struct delayed_work chg_det;
struct delayed_work chg_stop;
struct msm_hsusb_gadget_platform_data *pdata;
@@ -294,11 +296,18 @@
{
if ((readl(USB_PORTSC) & PORTSC_LS) == PORTSC_LS)
return USB_CHG_TYPE__WALLCHARGER;
- else
- return USB_CHG_TYPE__SDP;
+ else {
+ if (ui->gadget.speed == USB_SPEED_LOW ||
+ ui->gadget.speed == USB_SPEED_FULL ||
+ ui->gadget.speed == USB_SPEED_HIGH)
+ return USB_CHG_TYPE__SDP;
+ else
+ return USB_CHG_TYPE__INVALID;
+ }
}
#define USB_WALLCHARGER_CHG_CURRENT 1800
+#define USB_PROPRIETARY_CHG_CURRENT 500
static int usb_get_max_power(struct usb_info *ui)
{
struct msm_otg *otg = to_msm_otg(ui->xceiv);
@@ -321,8 +330,10 @@
if (temp == USB_CHG_TYPE__INVALID)
return -ENODEV;
- if (temp == USB_CHG_TYPE__WALLCHARGER)
+ if (temp == USB_CHG_TYPE__WALLCHARGER && !ui->proprietary_chg)
return USB_WALLCHARGER_CHG_CURRENT;
+ else
+ return USB_PROPRIETARY_CHG_CURRENT;
if (suspended || !configured)
return 0;
@@ -428,6 +439,17 @@
}
temp = usb_get_chg_type(ui);
+ if (temp != USB_CHG_TYPE__WALLCHARGER && temp != USB_CHG_TYPE__SDP
+ && !ui->chg_type_retry_cnt) {
+ schedule_delayed_work(&ui->chg_det, USB_CHG_DET_DELAY);
+ ui->chg_type_retry_cnt++;
+ spin_unlock_irqrestore(&ui->lock, flags);
+ return;
+ }
+ if (temp == USB_CHG_TYPE__INVALID) {
+ temp = USB_CHG_TYPE__WALLCHARGER;
+ ui->proprietary_chg = true;
+ }
spin_unlock_irqrestore(&ui->lock, flags);
atomic_set(&otg->chg_type, temp);
@@ -1737,6 +1759,8 @@
ui->gadget.speed = USB_SPEED_UNKNOWN;
ui->usb_state = USB_STATE_NOTATTACHED;
ui->flags |= USB_FLAG_VBUS_OFFLINE;
+ ui->chg_type_retry_cnt = 0;
+ ui->proprietary_chg = false;
}
if (in_interrupt()) {
schedule_work(&ui->work);
diff --git a/drivers/usb/gadget/tcm_usb_gadget.c b/drivers/usb/gadget/tcm_usb_gadget.c
index c46439c..8787768 100644
--- a/drivers/usb/gadget/tcm_usb_gadget.c
+++ b/drivers/usb/gadget/tcm_usb_gadget.c
@@ -1,4 +1,4 @@
-/* Target based USB-Gadget
+/* Target based USB-Gadget Function
*
* UAS protocol handling, target callbacks, configfs handling,
* BBB (USB Mass Storage Class Bulk-Only (BBB) and Transport protocol handling.
@@ -6,2208 +6,28 @@
* Author: Sebastian Andrzej Siewior <bigeasy at linutronix dot de>
* License: GPLv2 as published by FSF.
*/
-#include <linux/kernel.h>
+
+#include <linux/init.h>
#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/configfs.h>
-#include <linux/ctype.h>
-#include <linux/usb/ch9.h>
+
#include <linux/usb/composite.h>
#include <linux/usb/gadget.h>
-#include <linux/usb/storage.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_tcq.h>
-#include <target/target_core_base.h>
-#include <target/target_core_fabric.h>
-#include <target/target_core_fabric_configfs.h>
-#include <target/target_core_configfs.h>
-#include <target/configfs_macros.h>
-#include <asm/unaligned.h>
#include "usbstring.c"
#include "epautoconf.c"
#include "config.c"
#include "composite.c"
-#include "tcm_usb_gadget.h"
-
-static struct target_fabric_configfs *usbg_fabric_configfs;
-
-static inline struct f_uas *to_f_uas(struct usb_function *f)
-{
- return container_of(f, struct f_uas, function);
-}
-
-static void usbg_cmd_release(struct kref *);
-
-static inline void usbg_cleanup_cmd(struct usbg_cmd *cmd)
-{
- kref_put(&cmd->ref, usbg_cmd_release);
-}
-
-/* Start bot.c code */
-
-static int bot_enqueue_cmd_cbw(struct f_uas *fu)
-{
- int ret;
-
- if (fu->flags & USBG_BOT_CMD_PEND)
- return 0;
-
- ret = usb_ep_queue(fu->ep_out, fu->cmd.req, GFP_ATOMIC);
- if (!ret)
- fu->flags |= USBG_BOT_CMD_PEND;
- return ret;
-}
-
-static void bot_status_complete(struct usb_ep *ep, struct usb_request *req)
-{
- struct usbg_cmd *cmd = req->context;
- struct f_uas *fu = cmd->fu;
-
- usbg_cleanup_cmd(cmd);
- if (req->status < 0) {
- pr_err("ERR %s(%d)\n", __func__, __LINE__);
- return;
- }
-
- /* CSW completed, wait for next CBW */
- bot_enqueue_cmd_cbw(fu);
-}
-
-static void bot_enqueue_sense_code(struct f_uas *fu, struct usbg_cmd *cmd)
-{
- struct bulk_cs_wrap *csw = &fu->bot_status.csw;
- int ret;
- u8 *sense;
- unsigned int csw_stat;
-
- csw_stat = cmd->csw_code;
-
- /*
- * We can't send SENSE as a response. So we take ASC & ASCQ from our
- * sense buffer and queue it and hope the host sends a REQUEST_SENSE
- * command where it learns why we failed.
- */
- sense = cmd->sense_iu.sense;
-
- csw->Tag = cmd->bot_tag;
- csw->Status = csw_stat;
- fu->bot_status.req->context = cmd;
- ret = usb_ep_queue(fu->ep_in, fu->bot_status.req, GFP_ATOMIC);
- if (ret)
- pr_err("%s(%d) ERR: %d\n", __func__, __LINE__, ret);
-}
-
-static void bot_err_compl(struct usb_ep *ep, struct usb_request *req)
-{
- struct usbg_cmd *cmd = req->context;
- struct f_uas *fu = cmd->fu;
-
- if (req->status < 0)
- pr_err("ERR %s(%d)\n", __func__, __LINE__);
-
- if (cmd->data_len) {
- if (cmd->data_len > ep->maxpacket) {
- req->length = ep->maxpacket;
- cmd->data_len -= ep->maxpacket;
- } else {
- req->length = cmd->data_len;
- cmd->data_len = 0;
- }
-
- usb_ep_queue(ep, req, GFP_ATOMIC);
- return ;
- }
- bot_enqueue_sense_code(fu, cmd);
-}
-
-static void bot_send_bad_status(struct usbg_cmd *cmd)
-{
- struct f_uas *fu = cmd->fu;
- struct bulk_cs_wrap *csw = &fu->bot_status.csw;
- struct usb_request *req;
- struct usb_ep *ep;
-
- csw->Residue = cpu_to_le32(cmd->data_len);
-
- if (cmd->data_len) {
- if (cmd->is_read) {
- ep = fu->ep_in;
- req = fu->bot_req_in;
- } else {
- ep = fu->ep_out;
- req = fu->bot_req_out;
- }
-
- if (cmd->data_len > fu->ep_in->maxpacket) {
- req->length = ep->maxpacket;
- cmd->data_len -= ep->maxpacket;
- } else {
- req->length = cmd->data_len;
- cmd->data_len = 0;
- }
- req->complete = bot_err_compl;
- req->context = cmd;
- req->buf = fu->cmd.buf;
- usb_ep_queue(ep, req, GFP_KERNEL);
- } else {
- bot_enqueue_sense_code(fu, cmd);
- }
-}
-
-static int bot_send_status(struct usbg_cmd *cmd, bool moved_data)
-{
- struct f_uas *fu = cmd->fu;
- struct bulk_cs_wrap *csw = &fu->bot_status.csw;
- int ret;
-
- if (cmd->se_cmd.scsi_status == SAM_STAT_GOOD) {
- if (!moved_data && cmd->data_len) {
- /*
- * the host wants to move data, we don't. Fill / empty
- * the pipe and then send the csw with reside set.
- */
- cmd->csw_code = US_BULK_STAT_OK;
- bot_send_bad_status(cmd);
- return 0;
- }
-
- csw->Tag = cmd->bot_tag;
- csw->Residue = cpu_to_le32(0);
- csw->Status = US_BULK_STAT_OK;
- fu->bot_status.req->context = cmd;
-
- ret = usb_ep_queue(fu->ep_in, fu->bot_status.req, GFP_KERNEL);
- if (ret)
- pr_err("%s(%d) ERR: %d\n", __func__, __LINE__, ret);
- } else {
- cmd->csw_code = US_BULK_STAT_FAIL;
- bot_send_bad_status(cmd);
- }
- return 0;
-}
-
-/*
- * Called after command (no data transfer) or after the write (to device)
- * operation is completed
- */
-static int bot_send_status_response(struct usbg_cmd *cmd)
-{
- bool moved_data = false;
-
- if (!cmd->is_read)
- moved_data = true;
- return bot_send_status(cmd, moved_data);
-}
-
-/* Read request completed, now we have to send the CSW */
-static void bot_read_compl(struct usb_ep *ep, struct usb_request *req)
-{
- struct usbg_cmd *cmd = req->context;
-
- if (req->status < 0)
- pr_err("ERR %s(%d)\n", __func__, __LINE__);
-
- bot_send_status(cmd, true);
-}
-
-static int bot_send_read_response(struct usbg_cmd *cmd)
-{
- struct f_uas *fu = cmd->fu;
- struct se_cmd *se_cmd = &cmd->se_cmd;
- struct usb_gadget *gadget = fuas_to_gadget(fu);
- int ret;
-
- if (!cmd->data_len) {
- cmd->csw_code = US_BULK_STAT_PHASE;
- bot_send_bad_status(cmd);
- return 0;
- }
-
- if (!gadget->sg_supported) {
- cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC);
- if (!cmd->data_buf)
- return -ENOMEM;
-
- sg_copy_to_buffer(se_cmd->t_data_sg,
- se_cmd->t_data_nents,
- cmd->data_buf,
- se_cmd->data_length);
-
- fu->bot_req_in->buf = cmd->data_buf;
- } else {
- fu->bot_req_in->buf = NULL;
- fu->bot_req_in->num_sgs = se_cmd->t_data_nents;
- fu->bot_req_in->sg = se_cmd->t_data_sg;
- }
-
- fu->bot_req_in->complete = bot_read_compl;
- fu->bot_req_in->length = se_cmd->data_length;
- fu->bot_req_in->context = cmd;
- ret = usb_ep_queue(fu->ep_in, fu->bot_req_in, GFP_ATOMIC);
- if (ret)
- pr_err("%s(%d)\n", __func__, __LINE__);
- return 0;
-}
-
-static void usbg_data_write_cmpl(struct usb_ep *, struct usb_request *);
-static int usbg_prepare_w_request(struct usbg_cmd *, struct usb_request *);
-
-static int bot_send_write_request(struct usbg_cmd *cmd)
-{
- struct f_uas *fu = cmd->fu;
- struct se_cmd *se_cmd = &cmd->se_cmd;
- struct usb_gadget *gadget = fuas_to_gadget(fu);
- int ret;
-
- init_completion(&cmd->write_complete);
- cmd->fu = fu;
-
- if (!cmd->data_len) {
- cmd->csw_code = US_BULK_STAT_PHASE;
- return -EINVAL;
- }
-
- if (!gadget->sg_supported) {
- cmd->data_buf = kmalloc(se_cmd->data_length, GFP_KERNEL);
- if (!cmd->data_buf)
- return -ENOMEM;
-
- fu->bot_req_out->buf = cmd->data_buf;
- } else {
- fu->bot_req_out->buf = NULL;
- fu->bot_req_out->num_sgs = se_cmd->t_data_nents;
- fu->bot_req_out->sg = se_cmd->t_data_sg;
- }
-
- fu->bot_req_out->complete = usbg_data_write_cmpl;
- fu->bot_req_out->length = se_cmd->data_length;
- fu->bot_req_out->context = cmd;
-
- ret = usbg_prepare_w_request(cmd, fu->bot_req_out);
- if (ret)
- goto cleanup;
- ret = usb_ep_queue(fu->ep_out, fu->bot_req_out, GFP_KERNEL);
- if (ret)
- pr_err("%s(%d)\n", __func__, __LINE__);
-
- wait_for_completion(&cmd->write_complete);
- transport_generic_process_write(se_cmd);
-cleanup:
- return ret;
-}
-
-static int bot_submit_command(struct f_uas *, void *, unsigned int);
-
-static void bot_cmd_complete(struct usb_ep *ep, struct usb_request *req)
-{
- struct f_uas *fu = req->context;
- int ret;
-
- fu->flags &= ~USBG_BOT_CMD_PEND;
-
- if (req->status < 0)
- return;
-
- ret = bot_submit_command(fu, req->buf, req->actual);
- if (ret)
- pr_err("%s(%d): %d\n", __func__, __LINE__, ret);
-}
-
-static int bot_prepare_reqs(struct f_uas *fu)
-{
- int ret;
-
- fu->bot_req_in = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
- if (!fu->bot_req_in)
- goto err;
-
- fu->bot_req_out = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
- if (!fu->bot_req_out)
- goto err_out;
-
- fu->cmd.req = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
- if (!fu->cmd.req)
- goto err_cmd;
-
- fu->bot_status.req = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
- if (!fu->bot_status.req)
- goto err_sts;
-
- fu->bot_status.req->buf = &fu->bot_status.csw;
- fu->bot_status.req->length = US_BULK_CS_WRAP_LEN;
- fu->bot_status.req->complete = bot_status_complete;
- fu->bot_status.csw.Signature = cpu_to_le32(US_BULK_CS_SIGN);
-
- fu->cmd.buf = kmalloc(fu->ep_out->maxpacket, GFP_KERNEL);
- if (!fu->cmd.buf)
- goto err_buf;
-
- fu->cmd.req->complete = bot_cmd_complete;
- fu->cmd.req->buf = fu->cmd.buf;
- fu->cmd.req->length = fu->ep_out->maxpacket;
- fu->cmd.req->context = fu;
-
- ret = bot_enqueue_cmd_cbw(fu);
- if (ret)
- goto err_queue;
- return 0;
-err_queue:
- kfree(fu->cmd.buf);
- fu->cmd.buf = NULL;
-err_buf:
- usb_ep_free_request(fu->ep_in, fu->bot_status.req);
-err_sts:
- usb_ep_free_request(fu->ep_out, fu->cmd.req);
- fu->cmd.req = NULL;
-err_cmd:
- usb_ep_free_request(fu->ep_out, fu->bot_req_out);
- fu->bot_req_out = NULL;
-err_out:
- usb_ep_free_request(fu->ep_in, fu->bot_req_in);
- fu->bot_req_in = NULL;
-err:
- pr_err("BOT: endpoint setup failed\n");
- return -ENOMEM;
-}
-
-void bot_cleanup_old_alt(struct f_uas *fu)
-{
- if (!(fu->flags & USBG_ENABLED))
- return;
-
- usb_ep_disable(fu->ep_in);
- usb_ep_disable(fu->ep_out);
-
- if (!fu->bot_req_in)
- return;
-
- usb_ep_free_request(fu->ep_in, fu->bot_req_in);
- usb_ep_free_request(fu->ep_out, fu->bot_req_out);
- usb_ep_free_request(fu->ep_out, fu->cmd.req);
- usb_ep_free_request(fu->ep_out, fu->bot_status.req);
-
- kfree(fu->cmd.buf);
-
- fu->bot_req_in = NULL;
- fu->bot_req_out = NULL;
- fu->cmd.req = NULL;
- fu->bot_status.req = NULL;
- fu->cmd.buf = NULL;
-}
-
-static void bot_set_alt(struct f_uas *fu)
-{
- struct usb_function *f = &fu->function;
- struct usb_gadget *gadget = f->config->cdev->gadget;
- int ret;
-
- fu->flags = USBG_IS_BOT;
-
- config_ep_by_speed(gadget, f, fu->ep_in);
- ret = usb_ep_enable(fu->ep_in);
- if (ret)
- goto err_b_in;
-
- config_ep_by_speed(gadget, f, fu->ep_out);
- ret = usb_ep_enable(fu->ep_out);
- if (ret)
- goto err_b_out;
-
- ret = bot_prepare_reqs(fu);
- if (ret)
- goto err_wq;
- fu->flags |= USBG_ENABLED;
- pr_info("Using the BOT protocol\n");
- return;
-err_wq:
- usb_ep_disable(fu->ep_out);
-err_b_out:
- usb_ep_disable(fu->ep_in);
-err_b_in:
- fu->flags = USBG_IS_BOT;
-}
-
-static int usbg_bot_setup(struct usb_function *f,
- const struct usb_ctrlrequest *ctrl)
-{
- struct f_uas *fu = to_f_uas(f);
- struct usb_composite_dev *cdev = f->config->cdev;
- u16 w_value = le16_to_cpu(ctrl->wValue);
- u16 w_length = le16_to_cpu(ctrl->wLength);
- int luns;
- u8 *ret_lun;
-
- switch (ctrl->bRequest) {
- case US_BULK_GET_MAX_LUN:
- if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_CLASS |
- USB_RECIP_INTERFACE))
- return -ENOTSUPP;
-
- if (w_length < 1)
- return -EINVAL;
- if (w_value != 0)
- return -EINVAL;
- luns = atomic_read(&fu->tpg->tpg_port_count);
- if (!luns) {
- pr_err("No LUNs configured?\n");
- return -EINVAL;
- }
- /*
- * If 4 LUNs are present we return 3 i.e. LUN 0..3 can be
- * accessed. The upper limit is 0xf
- */
- luns--;
- if (luns > 0xf) {
- pr_info_once("Limiting the number of luns to 16\n");
- luns = 0xf;
- }
- ret_lun = cdev->req->buf;
- *ret_lun = luns;
- cdev->req->length = 1;
- return usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
- break;
-
- case US_BULK_RESET_REQUEST:
- /* XXX maybe we should remove previous requests for IN + OUT */
- bot_enqueue_cmd_cbw(fu);
- return 0;
- break;
- };
- return -ENOTSUPP;
-}
-
-/* Start uas.c code */
-
-static void uasp_cleanup_one_stream(struct f_uas *fu, struct uas_stream *stream)
-{
- /* We have either all three allocated or none */
- if (!stream->req_in)
- return;
-
- usb_ep_free_request(fu->ep_in, stream->req_in);
- usb_ep_free_request(fu->ep_out, stream->req_out);
- usb_ep_free_request(fu->ep_status, stream->req_status);
-
- stream->req_in = NULL;
- stream->req_out = NULL;
- stream->req_status = NULL;
-}
-
-static void uasp_free_cmdreq(struct f_uas *fu)
-{
- usb_ep_free_request(fu->ep_cmd, fu->cmd.req);
- kfree(fu->cmd.buf);
- fu->cmd.req = NULL;
- fu->cmd.buf = NULL;
-}
-
-static void uasp_cleanup_old_alt(struct f_uas *fu)
-{
- int i;
-
- if (!(fu->flags & USBG_ENABLED))
- return;
-
- usb_ep_disable(fu->ep_in);
- usb_ep_disable(fu->ep_out);
- usb_ep_disable(fu->ep_status);
- usb_ep_disable(fu->ep_cmd);
-
- for (i = 0; i < UASP_SS_EP_COMP_NUM_STREAMS; i++)
- uasp_cleanup_one_stream(fu, &fu->stream[i]);
- uasp_free_cmdreq(fu);
-}
-
-static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req);
-
-static int uasp_prepare_r_request(struct usbg_cmd *cmd)
-{
- struct se_cmd *se_cmd = &cmd->se_cmd;
- struct f_uas *fu = cmd->fu;
- struct usb_gadget *gadget = fuas_to_gadget(fu);
- struct uas_stream *stream = cmd->stream;
-
- if (!gadget->sg_supported) {
- cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC);
- if (!cmd->data_buf)
- return -ENOMEM;
-
- sg_copy_to_buffer(se_cmd->t_data_sg,
- se_cmd->t_data_nents,
- cmd->data_buf,
- se_cmd->data_length);
-
- stream->req_in->buf = cmd->data_buf;
- } else {
- stream->req_in->buf = NULL;
- stream->req_in->num_sgs = se_cmd->t_data_nents;
- stream->req_in->sg = se_cmd->t_data_sg;
- }
-
- stream->req_in->complete = uasp_status_data_cmpl;
- stream->req_in->length = se_cmd->data_length;
- stream->req_in->context = cmd;
-
- cmd->state = UASP_SEND_STATUS;
- return 0;
-}
-
-static void uasp_prepare_status(struct usbg_cmd *cmd)
-{
- struct se_cmd *se_cmd = &cmd->se_cmd;
- struct sense_iu *iu = &cmd->sense_iu;
- struct uas_stream *stream = cmd->stream;
-
- cmd->state = UASP_QUEUE_COMMAND;
- iu->iu_id = IU_ID_STATUS;
- iu->tag = cpu_to_be16(cmd->tag);
-
- /*
- * iu->status_qual = cpu_to_be16(STATUS QUALIFIER SAM-4. Where R U?);
- */
- iu->len = cpu_to_be16(se_cmd->scsi_sense_length);
- iu->status = se_cmd->scsi_status;
- stream->req_status->context = cmd;
- stream->req_status->length = se_cmd->scsi_sense_length + 16;
- stream->req_status->buf = iu;
- stream->req_status->complete = uasp_status_data_cmpl;
-}
-
-static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
-{
- struct usbg_cmd *cmd = req->context;
- struct uas_stream *stream = cmd->stream;
- struct f_uas *fu = cmd->fu;
- int ret;
-
- if (req->status < 0)
- goto cleanup;
-
- switch (cmd->state) {
- case UASP_SEND_DATA:
- ret = uasp_prepare_r_request(cmd);
- if (ret)
- goto cleanup;
- ret = usb_ep_queue(fu->ep_in, stream->req_in, GFP_ATOMIC);
- if (ret)
- pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
- break;
-
- case UASP_RECEIVE_DATA:
- ret = usbg_prepare_w_request(cmd, stream->req_out);
- if (ret)
- goto cleanup;
- ret = usb_ep_queue(fu->ep_out, stream->req_out, GFP_ATOMIC);
- if (ret)
- pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
- break;
-
- case UASP_SEND_STATUS:
- uasp_prepare_status(cmd);
- ret = usb_ep_queue(fu->ep_status, stream->req_status,
- GFP_ATOMIC);
- if (ret)
- pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
- break;
-
- case UASP_QUEUE_COMMAND:
- usbg_cleanup_cmd(cmd);
- usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
- break;
-
- default:
- BUG();
- };
- return;
-
-cleanup:
- usbg_cleanup_cmd(cmd);
-}
-
-static int uasp_send_status_response(struct usbg_cmd *cmd)
-{
- struct f_uas *fu = cmd->fu;
- struct uas_stream *stream = cmd->stream;
- struct sense_iu *iu = &cmd->sense_iu;
-
- iu->tag = cpu_to_be16(cmd->tag);
- stream->req_status->complete = uasp_status_data_cmpl;
- stream->req_status->context = cmd;
- cmd->fu = fu;
- uasp_prepare_status(cmd);
- return usb_ep_queue(fu->ep_status, stream->req_status, GFP_ATOMIC);
-}
-
-static int uasp_send_read_response(struct usbg_cmd *cmd)
-{
- struct f_uas *fu = cmd->fu;
- struct uas_stream *stream = cmd->stream;
- struct sense_iu *iu = &cmd->sense_iu;
- int ret;
-
- cmd->fu = fu;
-
- iu->tag = cpu_to_be16(cmd->tag);
- if (fu->flags & USBG_USE_STREAMS) {
-
- ret = uasp_prepare_r_request(cmd);
- if (ret)
- goto out;
- ret = usb_ep_queue(fu->ep_in, stream->req_in, GFP_ATOMIC);
- if (ret) {
- pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
- kfree(cmd->data_buf);
- cmd->data_buf = NULL;
- }
-
- } else {
-
- iu->iu_id = IU_ID_READ_READY;
- iu->tag = cpu_to_be16(cmd->tag);
-
- stream->req_status->complete = uasp_status_data_cmpl;
- stream->req_status->context = cmd;
-
- cmd->state = UASP_SEND_DATA;
- stream->req_status->buf = iu;
- stream->req_status->length = sizeof(struct iu);
-
- ret = usb_ep_queue(fu->ep_status, stream->req_status,
- GFP_ATOMIC);
- if (ret)
- pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
- }
-out:
- return ret;
-}
-
-static int uasp_send_write_request(struct usbg_cmd *cmd)
-{
- struct f_uas *fu = cmd->fu;
- struct se_cmd *se_cmd = &cmd->se_cmd;
- struct uas_stream *stream = cmd->stream;
- struct sense_iu *iu = &cmd->sense_iu;
- int ret;
-
- init_completion(&cmd->write_complete);
- cmd->fu = fu;
-
- iu->tag = cpu_to_be16(cmd->tag);
-
- if (fu->flags & USBG_USE_STREAMS) {
-
- ret = usbg_prepare_w_request(cmd, stream->req_out);
- if (ret)
- goto cleanup;
- ret = usb_ep_queue(fu->ep_out, stream->req_out, GFP_ATOMIC);
- if (ret)
- pr_err("%s(%d)\n", __func__, __LINE__);
-
- } else {
-
- iu->iu_id = IU_ID_WRITE_READY;
- iu->tag = cpu_to_be16(cmd->tag);
-
- stream->req_status->complete = uasp_status_data_cmpl;
- stream->req_status->context = cmd;
-
- cmd->state = UASP_RECEIVE_DATA;
- stream->req_status->buf = iu;
- stream->req_status->length = sizeof(struct iu);
-
- ret = usb_ep_queue(fu->ep_status, stream->req_status,
- GFP_ATOMIC);
- if (ret)
- pr_err("%s(%d)\n", __func__, __LINE__);
- }
-
- wait_for_completion(&cmd->write_complete);
- transport_generic_process_write(se_cmd);
-cleanup:
- return ret;
-}
-
-static int usbg_submit_command(struct f_uas *, void *, unsigned int);
-
-static void uasp_cmd_complete(struct usb_ep *ep, struct usb_request *req)
-{
- struct f_uas *fu = req->context;
- int ret;
-
- if (req->status < 0)
- return;
-
- ret = usbg_submit_command(fu, req->buf, req->actual);
- /*
- * Once we tune for performance enqueue the command req here again so
- * we can receive a second command while we processing this one. Pay
- * attention to properly sync STAUS endpoint with DATA IN + OUT so you
- * don't break HS.
- */
- if (!ret)
- return;
- usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
-}
-
-static int uasp_alloc_stream_res(struct f_uas *fu, struct uas_stream *stream)
-{
- stream->req_in = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
- if (!stream->req_in)
- goto out;
-
- stream->req_out = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
- if (!stream->req_out)
- goto err_out;
-
- stream->req_status = usb_ep_alloc_request(fu->ep_status, GFP_KERNEL);
- if (!stream->req_status)
- goto err_sts;
-
- return 0;
-err_sts:
- usb_ep_free_request(fu->ep_status, stream->req_status);
- stream->req_status = NULL;
-err_out:
- usb_ep_free_request(fu->ep_out, stream->req_out);
- stream->req_out = NULL;
-out:
- return -ENOMEM;
-}
-
-static int uasp_alloc_cmd(struct f_uas *fu)
-{
- fu->cmd.req = usb_ep_alloc_request(fu->ep_cmd, GFP_KERNEL);
- if (!fu->cmd.req)
- goto err;
-
- fu->cmd.buf = kmalloc(fu->ep_cmd->maxpacket, GFP_KERNEL);
- if (!fu->cmd.buf)
- goto err_buf;
-
- fu->cmd.req->complete = uasp_cmd_complete;
- fu->cmd.req->buf = fu->cmd.buf;
- fu->cmd.req->length = fu->ep_cmd->maxpacket;
- fu->cmd.req->context = fu;
- return 0;
-
-err_buf:
- usb_ep_free_request(fu->ep_cmd, fu->cmd.req);
-err:
- return -ENOMEM;
-}
-
-static void uasp_setup_stream_res(struct f_uas *fu, int max_streams)
-{
- int i;
-
- for (i = 0; i < max_streams; i++) {
- struct uas_stream *s = &fu->stream[i];
-
- s->req_in->stream_id = i + 1;
- s->req_out->stream_id = i + 1;
- s->req_status->stream_id = i + 1;
- }
-}
-
-static int uasp_prepare_reqs(struct f_uas *fu)
-{
- int ret;
- int i;
- int max_streams;
-
- if (fu->flags & USBG_USE_STREAMS)
- max_streams = UASP_SS_EP_COMP_NUM_STREAMS;
- else
- max_streams = 1;
-
- for (i = 0; i < max_streams; i++) {
- ret = uasp_alloc_stream_res(fu, &fu->stream[i]);
- if (ret)
- goto err_cleanup;
- }
-
- ret = uasp_alloc_cmd(fu);
- if (ret)
- goto err_free_stream;
- uasp_setup_stream_res(fu, max_streams);
-
- ret = usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
- if (ret)
- goto err_free_stream;
-
- return 0;
-
-err_free_stream:
- uasp_free_cmdreq(fu);
-
-err_cleanup:
- if (i) {
- do {
- uasp_cleanup_one_stream(fu, &fu->stream[i - 1]);
- i--;
- } while (i);
- }
- pr_err("UASP: endpoint setup failed\n");
- return ret;
-}
-
-static void uasp_set_alt(struct f_uas *fu)
-{
- struct usb_function *f = &fu->function;
- struct usb_gadget *gadget = f->config->cdev->gadget;
- int ret;
-
- fu->flags = USBG_IS_UAS;
-
- if (gadget->speed == USB_SPEED_SUPER)
- fu->flags |= USBG_USE_STREAMS;
-
- config_ep_by_speed(gadget, f, fu->ep_in);
- ret = usb_ep_enable(fu->ep_in);
- if (ret)
- goto err_b_in;
-
- config_ep_by_speed(gadget, f, fu->ep_out);
- ret = usb_ep_enable(fu->ep_out);
- if (ret)
- goto err_b_out;
-
- config_ep_by_speed(gadget, f, fu->ep_cmd);
- ret = usb_ep_enable(fu->ep_cmd);
- if (ret)
- goto err_cmd;
- config_ep_by_speed(gadget, f, fu->ep_status);
- ret = usb_ep_enable(fu->ep_status);
- if (ret)
- goto err_status;
-
- ret = uasp_prepare_reqs(fu);
- if (ret)
- goto err_wq;
- fu->flags |= USBG_ENABLED;
-
- pr_info("Using the UAS protocol\n");
- return;
-err_wq:
- usb_ep_disable(fu->ep_status);
-err_status:
- usb_ep_disable(fu->ep_cmd);
-err_cmd:
- usb_ep_disable(fu->ep_out);
-err_b_out:
- usb_ep_disable(fu->ep_in);
-err_b_in:
- fu->flags = 0;
-}
-
-static int get_cmd_dir(const unsigned char *cdb)
-{
- int ret;
-
- switch (cdb[0]) {
- case READ_6:
- case READ_10:
- case READ_12:
- case READ_16:
- case INQUIRY:
- case MODE_SENSE:
- case MODE_SENSE_10:
- case SERVICE_ACTION_IN:
- case MAINTENANCE_IN:
- case PERSISTENT_RESERVE_IN:
- case SECURITY_PROTOCOL_IN:
- case ACCESS_CONTROL_IN:
- case REPORT_LUNS:
- case READ_BLOCK_LIMITS:
- case READ_POSITION:
- case READ_CAPACITY:
- case READ_TOC:
- case READ_FORMAT_CAPACITIES:
- case REQUEST_SENSE:
- ret = DMA_FROM_DEVICE;
- break;
-
- case WRITE_6:
- case WRITE_10:
- case WRITE_12:
- case WRITE_16:
- case MODE_SELECT:
- case MODE_SELECT_10:
- case WRITE_VERIFY:
- case WRITE_VERIFY_12:
- case PERSISTENT_RESERVE_OUT:
- case MAINTENANCE_OUT:
- case SECURITY_PROTOCOL_OUT:
- case ACCESS_CONTROL_OUT:
- ret = DMA_TO_DEVICE;
- break;
- case ALLOW_MEDIUM_REMOVAL:
- case TEST_UNIT_READY:
- case SYNCHRONIZE_CACHE:
- case START_STOP:
- case ERASE:
- case REZERO_UNIT:
- case SEEK_10:
- case SPACE:
- case VERIFY:
- case WRITE_FILEMARKS:
- ret = DMA_NONE;
- break;
- default:
- pr_warn("target: Unknown data direction for SCSI Opcode "
- "0x%02x\n", cdb[0]);
- ret = -EINVAL;
- }
- return ret;
-}
-
-static void usbg_data_write_cmpl(struct usb_ep *ep, struct usb_request *req)
-{
- struct usbg_cmd *cmd = req->context;
- struct se_cmd *se_cmd = &cmd->se_cmd;
-
- if (req->status < 0) {
- pr_err("%s() state %d transfer failed\n", __func__, cmd->state);
- goto cleanup;
- }
-
- if (req->num_sgs == 0) {
- sg_copy_from_buffer(se_cmd->t_data_sg,
- se_cmd->t_data_nents,
- cmd->data_buf,
- se_cmd->data_length);
- }
-
- complete(&cmd->write_complete);
- return;
-
-cleanup:
- usbg_cleanup_cmd(cmd);
-}
-
-static int usbg_prepare_w_request(struct usbg_cmd *cmd, struct usb_request *req)
-{
- struct se_cmd *se_cmd = &cmd->se_cmd;
- struct f_uas *fu = cmd->fu;
- struct usb_gadget *gadget = fuas_to_gadget(fu);
-
- if (!gadget->sg_supported) {
- cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC);
- if (!cmd->data_buf)
- return -ENOMEM;
-
- req->buf = cmd->data_buf;
- } else {
- req->buf = NULL;
- req->num_sgs = se_cmd->t_data_nents;
- req->sg = se_cmd->t_data_sg;
- }
-
- req->complete = usbg_data_write_cmpl;
- req->length = se_cmd->data_length;
- req->context = cmd;
- return 0;
-}
-
-static int usbg_send_status_response(struct se_cmd *se_cmd)
-{
- struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
- se_cmd);
- struct f_uas *fu = cmd->fu;
-
- if (fu->flags & USBG_IS_BOT)
- return bot_send_status_response(cmd);
- else
- return uasp_send_status_response(cmd);
-}
-
-static int usbg_send_write_request(struct se_cmd *se_cmd)
-{
- struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
- se_cmd);
- struct f_uas *fu = cmd->fu;
-
- if (fu->flags & USBG_IS_BOT)
- return bot_send_write_request(cmd);
- else
- return uasp_send_write_request(cmd);
-}
-
-static int usbg_send_read_response(struct se_cmd *se_cmd)
-{
- struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
- se_cmd);
- struct f_uas *fu = cmd->fu;
-
- if (fu->flags & USBG_IS_BOT)
- return bot_send_read_response(cmd);
- else
- return uasp_send_read_response(cmd);
-}
-
-static void usbg_cmd_work(struct work_struct *work)
-{
- struct usbg_cmd *cmd = container_of(work, struct usbg_cmd, work);
- struct se_cmd *se_cmd;
- struct tcm_usbg_nexus *tv_nexus;
- struct usbg_tpg *tpg;
- int dir;
-
- se_cmd = &cmd->se_cmd;
- tpg = cmd->fu->tpg;
- tv_nexus = tpg->tpg_nexus;
- dir = get_cmd_dir(cmd->cmd_buf);
- if (dir < 0) {
- transport_init_se_cmd(se_cmd,
- tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
- tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
- cmd->prio_attr, cmd->sense_iu.sense);
-
- transport_send_check_condition_and_sense(se_cmd,
- TCM_UNSUPPORTED_SCSI_OPCODE, 1);
- usbg_cleanup_cmd(cmd);
- return;
- }
-
- target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
- cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun,
- 0, cmd->prio_attr, dir, TARGET_SCF_UNKNOWN_SIZE);
-}
-
-static int usbg_submit_command(struct f_uas *fu,
- void *cmdbuf, unsigned int len)
-{
- struct command_iu *cmd_iu = cmdbuf;
- struct usbg_cmd *cmd;
- struct usbg_tpg *tpg;
- struct se_cmd *se_cmd;
- struct tcm_usbg_nexus *tv_nexus;
- u32 cmd_len;
- int ret;
-
- if (cmd_iu->iu_id != IU_ID_COMMAND) {
- pr_err("Unsupported type %d\n", cmd_iu->iu_id);
- return -EINVAL;
- }
-
- cmd = kzalloc(sizeof *cmd, GFP_ATOMIC);
- if (!cmd)
- return -ENOMEM;
-
- cmd->fu = fu;
-
- /* XXX until I figure out why I can't free in on complete */
- kref_init(&cmd->ref);
- kref_get(&cmd->ref);
-
- tpg = fu->tpg;
- cmd_len = (cmd_iu->len & ~0x3) + 16;
- if (cmd_len > USBG_MAX_CMD)
- goto err;
-
- memcpy(cmd->cmd_buf, cmd_iu->cdb, cmd_len);
-
- cmd->tag = be16_to_cpup(&cmd_iu->tag);
- if (fu->flags & USBG_USE_STREAMS) {
- if (cmd->tag > UASP_SS_EP_COMP_NUM_STREAMS)
- goto err;
- if (!cmd->tag)
- cmd->stream = &fu->stream[0];
- else
- cmd->stream = &fu->stream[cmd->tag - 1];
- } else {
- cmd->stream = &fu->stream[0];
- }
-
- tv_nexus = tpg->tpg_nexus;
- if (!tv_nexus) {
- pr_err("Missing nexus, ignoring command\n");
- goto err;
- }
-
- switch (cmd_iu->prio_attr & 0x7) {
- case UAS_HEAD_TAG:
- cmd->prio_attr = MSG_HEAD_TAG;
- break;
- case UAS_ORDERED_TAG:
- cmd->prio_attr = MSG_ORDERED_TAG;
- break;
- case UAS_ACA:
- cmd->prio_attr = MSG_ACA_TAG;
- break;
- default:
- pr_debug_once("Unsupported prio_attr: %02x.\n",
- cmd_iu->prio_attr);
- case UAS_SIMPLE_TAG:
- cmd->prio_attr = MSG_SIMPLE_TAG;
- break;
- }
-
- se_cmd = &cmd->se_cmd;
- cmd->unpacked_lun = scsilun_to_int(&cmd_iu->lun);
-
- INIT_WORK(&cmd->work, usbg_cmd_work);
- ret = queue_work(tpg->workqueue, &cmd->work);
- if (ret < 0)
- goto err;
-
- return 0;
-err:
- kfree(cmd);
- return -EINVAL;
-}
-
-static void bot_cmd_work(struct work_struct *work)
-{
- struct usbg_cmd *cmd = container_of(work, struct usbg_cmd, work);
- struct se_cmd *se_cmd;
- struct tcm_usbg_nexus *tv_nexus;
- struct usbg_tpg *tpg;
- int dir;
-
- se_cmd = &cmd->se_cmd;
- tpg = cmd->fu->tpg;
- tv_nexus = tpg->tpg_nexus;
- dir = get_cmd_dir(cmd->cmd_buf);
- if (dir < 0) {
- transport_init_se_cmd(se_cmd,
- tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
- tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
- cmd->prio_attr, cmd->sense_iu.sense);
-
- transport_send_check_condition_and_sense(se_cmd,
- TCM_UNSUPPORTED_SCSI_OPCODE, 1);
- usbg_cleanup_cmd(cmd);
- return;
- }
-
- target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
- cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun,
- cmd->data_len, cmd->prio_attr, dir, 0);
-}
-
-static int bot_submit_command(struct f_uas *fu,
- void *cmdbuf, unsigned int len)
-{
- struct bulk_cb_wrap *cbw = cmdbuf;
- struct usbg_cmd *cmd;
- struct usbg_tpg *tpg;
- struct se_cmd *se_cmd;
- struct tcm_usbg_nexus *tv_nexus;
- u32 cmd_len;
- int ret;
-
- if (cbw->Signature != cpu_to_le32(US_BULK_CB_SIGN)) {
- pr_err("Wrong signature on CBW\n");
- return -EINVAL;
- }
- if (len != 31) {
- pr_err("Wrong length for CBW\n");
- return -EINVAL;
- }
-
- cmd_len = cbw->Length;
- if (cmd_len < 1 || cmd_len > 16)
- return -EINVAL;
-
- cmd = kzalloc(sizeof *cmd, GFP_ATOMIC);
- if (!cmd)
- return -ENOMEM;
-
- cmd->fu = fu;
-
- /* XXX until I figure out why I can't free in on complete */
- kref_init(&cmd->ref);
- kref_get(&cmd->ref);
-
- tpg = fu->tpg;
-
- memcpy(cmd->cmd_buf, cbw->CDB, cmd_len);
-
- cmd->bot_tag = cbw->Tag;
-
- tv_nexus = tpg->tpg_nexus;
- if (!tv_nexus) {
- pr_err("Missing nexus, ignoring command\n");
- goto err;
- }
-
- cmd->prio_attr = MSG_SIMPLE_TAG;
- se_cmd = &cmd->se_cmd;
- cmd->unpacked_lun = cbw->Lun;
- cmd->is_read = cbw->Flags & US_BULK_FLAG_IN ? 1 : 0;
- cmd->data_len = le32_to_cpu(cbw->DataTransferLength);
-
- INIT_WORK(&cmd->work, bot_cmd_work);
- ret = queue_work(tpg->workqueue, &cmd->work);
- if (ret < 0)
- goto err;
-
- return 0;
-err:
- kfree(cmd);
- return -EINVAL;
-}
-
-/* Start fabric.c code */
-
-static int usbg_check_true(struct se_portal_group *se_tpg)
-{
- return 1;
-}
-
-static int usbg_check_false(struct se_portal_group *se_tpg)
-{
- return 0;
-}
-
-static char *usbg_get_fabric_name(void)
-{
- return "usb_gadget";
-}
-
-static u8 usbg_get_fabric_proto_ident(struct se_portal_group *se_tpg)
-{
- struct usbg_tpg *tpg = container_of(se_tpg,
- struct usbg_tpg, se_tpg);
- struct usbg_tport *tport = tpg->tport;
- u8 proto_id;
-
- switch (tport->tport_proto_id) {
- case SCSI_PROTOCOL_SAS:
- default:
- proto_id = sas_get_fabric_proto_ident(se_tpg);
- break;
- }
-
- return proto_id;
-}
-
-static char *usbg_get_fabric_wwn(struct se_portal_group *se_tpg)
-{
- struct usbg_tpg *tpg = container_of(se_tpg,
- struct usbg_tpg, se_tpg);
- struct usbg_tport *tport = tpg->tport;
-
- return &tport->tport_name[0];
-}
-
-static u16 usbg_get_tag(struct se_portal_group *se_tpg)
-{
- struct usbg_tpg *tpg = container_of(se_tpg,
- struct usbg_tpg, se_tpg);
- return tpg->tport_tpgt;
-}
-
-static u32 usbg_get_default_depth(struct se_portal_group *se_tpg)
-{
- return 1;
-}
-
-static u32 usbg_get_pr_transport_id(
- struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl,
- struct t10_pr_registration *pr_reg,
- int *format_code,
- unsigned char *buf)
-{
- struct usbg_tpg *tpg = container_of(se_tpg,
- struct usbg_tpg, se_tpg);
- struct usbg_tport *tport = tpg->tport;
- int ret = 0;
-
- switch (tport->tport_proto_id) {
- case SCSI_PROTOCOL_SAS:
- default:
- ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
- format_code, buf);
- break;
- }
-
- return ret;
-}
-
-static u32 usbg_get_pr_transport_id_len(
- struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl,
- struct t10_pr_registration *pr_reg,
- int *format_code)
-{
- struct usbg_tpg *tpg = container_of(se_tpg,
- struct usbg_tpg, se_tpg);
- struct usbg_tport *tport = tpg->tport;
- int ret = 0;
-
- switch (tport->tport_proto_id) {
- case SCSI_PROTOCOL_SAS:
- default:
- ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
- format_code);
- break;
- }
-
- return ret;
-}
-
-static char *usbg_parse_pr_out_transport_id(
- struct se_portal_group *se_tpg,
- const char *buf,
- u32 *out_tid_len,
- char **port_nexus_ptr)
-{
- struct usbg_tpg *tpg = container_of(se_tpg,
- struct usbg_tpg, se_tpg);
- struct usbg_tport *tport = tpg->tport;
- char *tid = NULL;
-
- switch (tport->tport_proto_id) {
- case SCSI_PROTOCOL_SAS:
- default:
- tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
- port_nexus_ptr);
- }
-
- return tid;
-}
-
-static struct se_node_acl *usbg_alloc_fabric_acl(struct se_portal_group *se_tpg)
-{
- struct usbg_nacl *nacl;
-
- nacl = kzalloc(sizeof(struct usbg_nacl), GFP_KERNEL);
- if (!nacl) {
- printk(KERN_ERR "Unable to alocate struct usbg_nacl\n");
- return NULL;
- }
-
- return &nacl->se_node_acl;
-}
-
-static void usbg_release_fabric_acl(
- struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl)
-{
- struct usbg_nacl *nacl = container_of(se_nacl,
- struct usbg_nacl, se_node_acl);
- kfree(nacl);
-}
-
-static u32 usbg_tpg_get_inst_index(struct se_portal_group *se_tpg)
-{
- return 1;
-}
-
-static int usbg_new_cmd(struct se_cmd *se_cmd)
-{
- struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
- se_cmd);
- int ret;
-
- ret = target_setup_cmd_from_cdb(se_cmd, cmd->cmd_buf);
- if (ret)
- return ret;
-
- return transport_generic_map_mem_to_cmd(se_cmd, NULL, 0, NULL, 0);
-}
-
-static void usbg_cmd_release(struct kref *ref)
-{
- struct usbg_cmd *cmd = container_of(ref, struct usbg_cmd,
- ref);
-
- transport_generic_free_cmd(&cmd->se_cmd, 0);
-}
-
-static void usbg_release_cmd(struct se_cmd *se_cmd)
-{
- struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
- se_cmd);
- kfree(cmd->data_buf);
- kfree(cmd);
- return;
-}
-
-static int usbg_shutdown_session(struct se_session *se_sess)
-{
- return 0;
-}
-
-static void usbg_close_session(struct se_session *se_sess)
-{
- return;
-}
-
-static u32 usbg_sess_get_index(struct se_session *se_sess)
-{
- return 0;
-}
-
-/*
- * XXX Error recovery: return != 0 if we expect writes. Dunno when that could be
- */
-static int usbg_write_pending_status(struct se_cmd *se_cmd)
-{
- return 0;
-}
-
-static void usbg_set_default_node_attrs(struct se_node_acl *nacl)
-{
- return;
-}
-
-static u32 usbg_get_task_tag(struct se_cmd *se_cmd)
-{
- struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
- se_cmd);
- struct f_uas *fu = cmd->fu;
-
- if (fu->flags & USBG_IS_BOT)
- return le32_to_cpu(cmd->bot_tag);
- else
- return cmd->tag;
-}
-
-static int usbg_get_cmd_state(struct se_cmd *se_cmd)
-{
- return 0;
-}
-
-static int usbg_queue_tm_rsp(struct se_cmd *se_cmd)
-{
- return 0;
-}
-
-static u16 usbg_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)
-{
- return 0;
-}
-
-static u16 usbg_get_fabric_sense_len(void)
-{
- return 0;
-}
-
-static const char *usbg_check_wwn(const char *name)
-{
- const char *n;
- unsigned int len;
-
- n = strstr(name, "naa.");
- if (!n)
- return NULL;
- n += 4;
- len = strlen(n);
- if (len == 0 || len > USBG_NAMELEN - 1)
- return NULL;
- return n;
-}
-
-static struct se_node_acl *usbg_make_nodeacl(
- struct se_portal_group *se_tpg,
- struct config_group *group,
- const char *name)
-{
- struct se_node_acl *se_nacl, *se_nacl_new;
- struct usbg_nacl *nacl;
- u64 wwpn = 0;
- u32 nexus_depth;
- const char *wnn_name;
-
- wnn_name = usbg_check_wwn(name);
- if (!wnn_name)
- return ERR_PTR(-EINVAL);
- se_nacl_new = usbg_alloc_fabric_acl(se_tpg);
- if (!(se_nacl_new))
- return ERR_PTR(-ENOMEM);
-
- nexus_depth = 1;
- /*
- * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
- * when converting a NodeACL from demo mode -> explict
- */
- se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
- name, nexus_depth);
- if (IS_ERR(se_nacl)) {
- usbg_release_fabric_acl(se_tpg, se_nacl_new);
- return se_nacl;
- }
- /*
- * Locate our struct usbg_nacl and set the FC Nport WWPN
- */
- nacl = container_of(se_nacl, struct usbg_nacl, se_node_acl);
- nacl->iport_wwpn = wwpn;
- snprintf(nacl->iport_name, sizeof(nacl->iport_name), "%s", name);
- return se_nacl;
-}
-
-static void usbg_drop_nodeacl(struct se_node_acl *se_acl)
-{
- struct usbg_nacl *nacl = container_of(se_acl,
- struct usbg_nacl, se_node_acl);
- core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
- kfree(nacl);
-}
-
-struct usbg_tpg *the_only_tpg_I_currently_have;
-
-static struct se_portal_group *usbg_make_tpg(
- struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
-{
- struct usbg_tport *tport = container_of(wwn, struct usbg_tport,
- tport_wwn);
- struct usbg_tpg *tpg;
- unsigned long tpgt;
- int ret;
-
- if (strstr(name, "tpgt_") != name)
- return ERR_PTR(-EINVAL);
- if (kstrtoul(name + 5, 0, &tpgt) || tpgt > UINT_MAX)
- return ERR_PTR(-EINVAL);
- if (the_only_tpg_I_currently_have) {
- pr_err("Until the gadget framework can't handle multiple\n");
- pr_err("gadgets, you can't do this here.\n");
- return ERR_PTR(-EBUSY);
- }
-
- tpg = kzalloc(sizeof(struct usbg_tpg), GFP_KERNEL);
- if (!tpg) {
- printk(KERN_ERR "Unable to allocate struct usbg_tpg");
- return ERR_PTR(-ENOMEM);
- }
- mutex_init(&tpg->tpg_mutex);
- atomic_set(&tpg->tpg_port_count, 0);
- tpg->workqueue = alloc_workqueue("tcm_usb_gadget", 0, 1);
- if (!tpg->workqueue) {
- kfree(tpg);
- return NULL;
- }
-
- tpg->tport = tport;
- tpg->tport_tpgt = tpgt;
-
- ret = core_tpg_register(&usbg_fabric_configfs->tf_ops, wwn,
- &tpg->se_tpg, tpg,
- TRANSPORT_TPG_TYPE_NORMAL);
- if (ret < 0) {
- destroy_workqueue(tpg->workqueue);
- kfree(tpg);
- return NULL;
- }
- the_only_tpg_I_currently_have = tpg;
- return &tpg->se_tpg;
-}
-
-static void usbg_drop_tpg(struct se_portal_group *se_tpg)
-{
- struct usbg_tpg *tpg = container_of(se_tpg,
- struct usbg_tpg, se_tpg);
-
- core_tpg_deregister(se_tpg);
- destroy_workqueue(tpg->workqueue);
- kfree(tpg);
- the_only_tpg_I_currently_have = NULL;
-}
-
-static struct se_wwn *usbg_make_tport(
- struct target_fabric_configfs *tf,
- struct config_group *group,
- const char *name)
-{
- struct usbg_tport *tport;
- const char *wnn_name;
- u64 wwpn = 0;
-
- wnn_name = usbg_check_wwn(name);
- if (!wnn_name)
- return ERR_PTR(-EINVAL);
-
- tport = kzalloc(sizeof(struct usbg_tport), GFP_KERNEL);
- if (!(tport)) {
- printk(KERN_ERR "Unable to allocate struct usbg_tport");
- return ERR_PTR(-ENOMEM);
- }
- tport->tport_wwpn = wwpn;
- snprintf(tport->tport_name, sizeof(tport->tport_name), wnn_name);
- return &tport->tport_wwn;
-}
-
-static void usbg_drop_tport(struct se_wwn *wwn)
-{
- struct usbg_tport *tport = container_of(wwn,
- struct usbg_tport, tport_wwn);
- kfree(tport);
-}
-
-/*
- * If somebody feels like dropping the version property, go ahead.
- */
-static ssize_t usbg_wwn_show_attr_version(
- struct target_fabric_configfs *tf,
- char *page)
-{
- return sprintf(page, "usb-gadget fabric module\n");
-}
-TF_WWN_ATTR_RO(usbg, version);
-
-static struct configfs_attribute *usbg_wwn_attrs[] = {
- &usbg_wwn_version.attr,
- NULL,
-};
-
-static ssize_t tcm_usbg_tpg_show_enable(
- struct se_portal_group *se_tpg,
- char *page)
-{
- struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
-
- return snprintf(page, PAGE_SIZE, "%u\n", tpg->gadget_connect);
-}
-
-static int usbg_attach(struct usbg_tpg *);
-static void usbg_detach(struct usbg_tpg *);
-
-static ssize_t tcm_usbg_tpg_store_enable(
- struct se_portal_group *se_tpg,
- const char *page,
- size_t count)
-{
- struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
- unsigned long op;
- ssize_t ret;
-
- ret = kstrtoul(page, 0, &op);
- if (ret < 0)
- return -EINVAL;
- if (op > 1)
- return -EINVAL;
-
- if (op && tpg->gadget_connect)
- goto out;
- if (!op && !tpg->gadget_connect)
- goto out;
-
- if (op) {
- ret = usbg_attach(tpg);
- if (ret)
- goto out;
- } else {
- usbg_detach(tpg);
- }
- tpg->gadget_connect = op;
-out:
- return count;
-}
-TF_TPG_BASE_ATTR(tcm_usbg, enable, S_IRUGO | S_IWUSR);
-
-static ssize_t tcm_usbg_tpg_show_nexus(
- struct se_portal_group *se_tpg,
- char *page)
-{
- struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
- struct tcm_usbg_nexus *tv_nexus;
- ssize_t ret;
-
- mutex_lock(&tpg->tpg_mutex);
- tv_nexus = tpg->tpg_nexus;
- if (!tv_nexus) {
- ret = -ENODEV;
- goto out;
- }
- ret = snprintf(page, PAGE_SIZE, "%s\n",
- tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
-out:
- mutex_unlock(&tpg->tpg_mutex);
- return ret;
-}
-
-static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name)
-{
- struct se_portal_group *se_tpg;
- struct tcm_usbg_nexus *tv_nexus;
- int ret;
-
- mutex_lock(&tpg->tpg_mutex);
- if (tpg->tpg_nexus) {
- ret = -EEXIST;
- pr_debug("tpg->tpg_nexus already exists\n");
- goto err_unlock;
- }
- se_tpg = &tpg->se_tpg;
-
- ret = -ENOMEM;
- tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
- if (!tv_nexus) {
- pr_err("Unable to allocate struct tcm_vhost_nexus\n");
- goto err_unlock;
- }
- tv_nexus->tvn_se_sess = transport_init_session();
- if (IS_ERR(tv_nexus->tvn_se_sess))
- goto err_free;
-
- /*
- * Since we are running in 'demo mode' this call with generate a
- * struct se_node_acl for the tcm_vhost struct se_portal_group with
- * the SCSI Initiator port name of the passed configfs group 'name'.
- */
- tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
- se_tpg, name);
- if (!tv_nexus->tvn_se_sess->se_node_acl) {
- pr_debug("core_tpg_check_initiator_node_acl() failed"
- " for %s\n", name);
- goto err_session;
- }
- /*
- * Now register the TCM vHost virtual I_T Nexus as active with the
- * call to __transport_register_session()
- */
- __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
- tv_nexus->tvn_se_sess, tv_nexus);
- tpg->tpg_nexus = tv_nexus;
- mutex_unlock(&tpg->tpg_mutex);
- return 0;
-
-err_session:
- transport_free_session(tv_nexus->tvn_se_sess);
-err_free:
- kfree(tv_nexus);
-err_unlock:
- mutex_unlock(&tpg->tpg_mutex);
- return ret;
-}
-
-static int tcm_usbg_drop_nexus(struct usbg_tpg *tpg)
-{
- struct se_session *se_sess;
- struct tcm_usbg_nexus *tv_nexus;
- int ret = -ENODEV;
-
- mutex_lock(&tpg->tpg_mutex);
- tv_nexus = tpg->tpg_nexus;
- if (!tv_nexus)
- goto out;
-
- se_sess = tv_nexus->tvn_se_sess;
- if (!se_sess)
- goto out;
-
- if (atomic_read(&tpg->tpg_port_count)) {
- ret = -EPERM;
- pr_err("Unable to remove Host I_T Nexus with"
- " active TPG port count: %d\n",
- atomic_read(&tpg->tpg_port_count));
- goto out;
- }
-
- pr_debug("Removing I_T Nexus to Initiator Port: %s\n",
- tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
- /*
- * Release the SCSI I_T Nexus to the emulated vHost Target Port
- */
- transport_deregister_session(tv_nexus->tvn_se_sess);
- tpg->tpg_nexus = NULL;
-
- kfree(tv_nexus);
-out:
- mutex_unlock(&tpg->tpg_mutex);
- return 0;
-}
-
-static ssize_t tcm_usbg_tpg_store_nexus(
- struct se_portal_group *se_tpg,
- const char *page,
- size_t count)
-{
- struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
- unsigned char i_port[USBG_NAMELEN], *ptr;
- int ret;
-
- if (!strncmp(page, "NULL", 4)) {
- ret = tcm_usbg_drop_nexus(tpg);
- return (!ret) ? count : ret;
- }
- if (strlen(page) > USBG_NAMELEN) {
- pr_err("Emulated NAA Sas Address: %s, exceeds"
- " max: %d\n", page, USBG_NAMELEN);
- return -EINVAL;
- }
- snprintf(i_port, USBG_NAMELEN, "%s", page);
-
- ptr = strstr(i_port, "naa.");
- if (!ptr) {
- pr_err("Missing 'naa.' prefix\n");
- return -EINVAL;
- }
-
- if (i_port[strlen(i_port) - 1] == '\n')
- i_port[strlen(i_port) - 1] = '\0';
-
- ret = tcm_usbg_make_nexus(tpg, &i_port[4]);
- if (ret < 0)
- return ret;
- return count;
-}
-TF_TPG_BASE_ATTR(tcm_usbg, nexus, S_IRUGO | S_IWUSR);
-
-static struct configfs_attribute *usbg_base_attrs[] = {
- &tcm_usbg_tpg_enable.attr,
- &tcm_usbg_tpg_nexus.attr,
- NULL,
-};
-
-static int usbg_port_link(struct se_portal_group *se_tpg, struct se_lun *lun)
-{
- struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
-
- atomic_inc(&tpg->tpg_port_count);
- smp_mb__after_atomic_inc();
- return 0;
-}
-
-static void usbg_port_unlink(struct se_portal_group *se_tpg,
- struct se_lun *se_lun)
-{
- struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
-
- atomic_dec(&tpg->tpg_port_count);
- smp_mb__after_atomic_dec();
-}
-
-static int usbg_check_stop_free(struct se_cmd *se_cmd)
-{
- struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
- se_cmd);
-
- kref_put(&cmd->ref, usbg_cmd_release);
- return 1;
-}
-
-static struct target_core_fabric_ops usbg_ops = {
- .get_fabric_name = usbg_get_fabric_name,
- .get_fabric_proto_ident = usbg_get_fabric_proto_ident,
- .tpg_get_wwn = usbg_get_fabric_wwn,
- .tpg_get_tag = usbg_get_tag,
- .tpg_get_default_depth = usbg_get_default_depth,
- .tpg_get_pr_transport_id = usbg_get_pr_transport_id,
- .tpg_get_pr_transport_id_len = usbg_get_pr_transport_id_len,
- .tpg_parse_pr_out_transport_id = usbg_parse_pr_out_transport_id,
- .tpg_check_demo_mode = usbg_check_true,
- .tpg_check_demo_mode_cache = usbg_check_false,
- .tpg_check_demo_mode_write_protect = usbg_check_false,
- .tpg_check_prod_mode_write_protect = usbg_check_false,
- .tpg_alloc_fabric_acl = usbg_alloc_fabric_acl,
- .tpg_release_fabric_acl = usbg_release_fabric_acl,
- .tpg_get_inst_index = usbg_tpg_get_inst_index,
- .new_cmd_map = usbg_new_cmd,
- .release_cmd = usbg_release_cmd,
- .shutdown_session = usbg_shutdown_session,
- .close_session = usbg_close_session,
- .sess_get_index = usbg_sess_get_index,
- .sess_get_initiator_sid = NULL,
- .write_pending = usbg_send_write_request,
- .write_pending_status = usbg_write_pending_status,
- .set_default_node_attributes = usbg_set_default_node_attrs,
- .get_task_tag = usbg_get_task_tag,
- .get_cmd_state = usbg_get_cmd_state,
- .queue_data_in = usbg_send_read_response,
- .queue_status = usbg_send_status_response,
- .queue_tm_rsp = usbg_queue_tm_rsp,
- .get_fabric_sense_len = usbg_get_fabric_sense_len,
- .set_fabric_sense_len = usbg_set_fabric_sense_len,
- .check_stop_free = usbg_check_stop_free,
-
- .fabric_make_wwn = usbg_make_tport,
- .fabric_drop_wwn = usbg_drop_tport,
- .fabric_make_tpg = usbg_make_tpg,
- .fabric_drop_tpg = usbg_drop_tpg,
- .fabric_post_link = usbg_port_link,
- .fabric_pre_unlink = usbg_port_unlink,
- .fabric_make_np = NULL,
- .fabric_drop_np = NULL,
- .fabric_make_nodeacl = usbg_make_nodeacl,
- .fabric_drop_nodeacl = usbg_drop_nodeacl,
-};
-
-static int usbg_register_configfs(void)
-{
- struct target_fabric_configfs *fabric;
- int ret;
-
- fabric = target_fabric_configfs_init(THIS_MODULE, "usb_gadget");
- if (IS_ERR(fabric)) {
- printk(KERN_ERR "target_fabric_configfs_init() failed\n");
- return PTR_ERR(fabric);
- }
-
- fabric->tf_ops = usbg_ops;
- TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = usbg_wwn_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = usbg_base_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
- ret = target_fabric_configfs_register(fabric);
- if (ret < 0) {
- printk(KERN_ERR "target_fabric_configfs_register() failed"
- " for usb-gadget\n");
- return ret;
- }
- usbg_fabric_configfs = fabric;
- return 0;
-};
-
-static void usbg_deregister_configfs(void)
-{
- if (!(usbg_fabric_configfs))
- return;
-
- target_fabric_configfs_deregister(usbg_fabric_configfs);
- usbg_fabric_configfs = NULL;
-};
-
-/* Start gadget.c code */
-
-static struct usb_interface_descriptor bot_intf_desc = {
- .bLength = sizeof(bot_intf_desc),
- .bDescriptorType = USB_DT_INTERFACE,
- .bAlternateSetting = 0,
- .bNumEndpoints = 2,
- .bAlternateSetting = USB_G_ALT_INT_BBB,
- .bInterfaceClass = USB_CLASS_MASS_STORAGE,
- .bInterfaceSubClass = USB_SC_SCSI,
- .bInterfaceProtocol = USB_PR_BULK,
- .iInterface = USB_G_STR_INT_UAS,
-};
-
-static struct usb_interface_descriptor uasp_intf_desc = {
- .bLength = sizeof(uasp_intf_desc),
- .bDescriptorType = USB_DT_INTERFACE,
- .bNumEndpoints = 4,
- .bAlternateSetting = USB_G_ALT_INT_UAS,
- .bInterfaceClass = USB_CLASS_MASS_STORAGE,
- .bInterfaceSubClass = USB_SC_SCSI,
- .bInterfaceProtocol = USB_PR_UAS,
- .iInterface = USB_G_STR_INT_BBB,
-};
-
-static struct usb_endpoint_descriptor uasp_bi_desc = {
- .bLength = USB_DT_ENDPOINT_SIZE,
- .bDescriptorType = USB_DT_ENDPOINT,
- .bEndpointAddress = USB_DIR_IN,
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
- .wMaxPacketSize = cpu_to_le16(512),
-};
-
-static struct usb_endpoint_descriptor uasp_fs_bi_desc = {
- .bLength = USB_DT_ENDPOINT_SIZE,
- .bDescriptorType = USB_DT_ENDPOINT,
- .bEndpointAddress = USB_DIR_IN,
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
-};
-
-static struct usb_pipe_usage_descriptor uasp_bi_pipe_desc = {
- .bLength = sizeof(uasp_bi_pipe_desc),
- .bDescriptorType = USB_DT_PIPE_USAGE,
- .bPipeID = DATA_IN_PIPE_ID,
-};
-
-static struct usb_endpoint_descriptor uasp_ss_bi_desc = {
- .bLength = USB_DT_ENDPOINT_SIZE,
- .bDescriptorType = USB_DT_ENDPOINT,
- .bEndpointAddress = USB_DIR_IN,
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
- .wMaxPacketSize = cpu_to_le16(1024),
-};
-
-static struct usb_ss_ep_comp_descriptor uasp_bi_ep_comp_desc = {
- .bLength = sizeof(uasp_bi_ep_comp_desc),
- .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
- .bMaxBurst = 0,
- .bmAttributes = UASP_SS_EP_COMP_LOG_STREAMS,
- .wBytesPerInterval = 0,
-};
-
-static struct usb_ss_ep_comp_descriptor bot_bi_ep_comp_desc = {
- .bLength = sizeof(bot_bi_ep_comp_desc),
- .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
- .bMaxBurst = 0,
-};
-
-static struct usb_endpoint_descriptor uasp_bo_desc = {
- .bLength = USB_DT_ENDPOINT_SIZE,
- .bDescriptorType = USB_DT_ENDPOINT,
- .bEndpointAddress = USB_DIR_OUT,
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
- .wMaxPacketSize = cpu_to_le16(512),
-};
-
-static struct usb_endpoint_descriptor uasp_fs_bo_desc = {
- .bLength = USB_DT_ENDPOINT_SIZE,
- .bDescriptorType = USB_DT_ENDPOINT,
- .bEndpointAddress = USB_DIR_OUT,
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
-};
-
-static struct usb_pipe_usage_descriptor uasp_bo_pipe_desc = {
- .bLength = sizeof(uasp_bo_pipe_desc),
- .bDescriptorType = USB_DT_PIPE_USAGE,
- .bPipeID = DATA_OUT_PIPE_ID,
-};
-
-static struct usb_endpoint_descriptor uasp_ss_bo_desc = {
- .bLength = USB_DT_ENDPOINT_SIZE,
- .bDescriptorType = USB_DT_ENDPOINT,
- .bEndpointAddress = USB_DIR_OUT,
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
- .wMaxPacketSize = cpu_to_le16(0x400),
-};
-
-static struct usb_ss_ep_comp_descriptor uasp_bo_ep_comp_desc = {
- .bLength = sizeof(uasp_bo_ep_comp_desc),
- .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
- .bmAttributes = UASP_SS_EP_COMP_LOG_STREAMS,
-};
-
-static struct usb_ss_ep_comp_descriptor bot_bo_ep_comp_desc = {
- .bLength = sizeof(bot_bo_ep_comp_desc),
- .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
-};
-
-static struct usb_endpoint_descriptor uasp_status_desc = {
- .bLength = USB_DT_ENDPOINT_SIZE,
- .bDescriptorType = USB_DT_ENDPOINT,
- .bEndpointAddress = USB_DIR_IN,
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
- .wMaxPacketSize = cpu_to_le16(512),
-};
-
-static struct usb_endpoint_descriptor uasp_fs_status_desc = {
- .bLength = USB_DT_ENDPOINT_SIZE,
- .bDescriptorType = USB_DT_ENDPOINT,
- .bEndpointAddress = USB_DIR_IN,
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
-};
-
-static struct usb_pipe_usage_descriptor uasp_status_pipe_desc = {
- .bLength = sizeof(uasp_status_pipe_desc),
- .bDescriptorType = USB_DT_PIPE_USAGE,
- .bPipeID = STATUS_PIPE_ID,
-};
-
-static struct usb_endpoint_descriptor uasp_ss_status_desc = {
- .bLength = USB_DT_ENDPOINT_SIZE,
- .bDescriptorType = USB_DT_ENDPOINT,
- .bEndpointAddress = USB_DIR_IN,
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
- .wMaxPacketSize = cpu_to_le16(1024),
-};
-
-static struct usb_ss_ep_comp_descriptor uasp_status_in_ep_comp_desc = {
- .bLength = sizeof(uasp_status_in_ep_comp_desc),
- .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
- .bmAttributes = UASP_SS_EP_COMP_LOG_STREAMS,
-};
-
-static struct usb_endpoint_descriptor uasp_cmd_desc = {
- .bLength = USB_DT_ENDPOINT_SIZE,
- .bDescriptorType = USB_DT_ENDPOINT,
- .bEndpointAddress = USB_DIR_OUT,
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
- .wMaxPacketSize = cpu_to_le16(512),
-};
-
-static struct usb_endpoint_descriptor uasp_fs_cmd_desc = {
- .bLength = USB_DT_ENDPOINT_SIZE,
- .bDescriptorType = USB_DT_ENDPOINT,
- .bEndpointAddress = USB_DIR_OUT,
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
-};
-
-static struct usb_pipe_usage_descriptor uasp_cmd_pipe_desc = {
- .bLength = sizeof(uasp_cmd_pipe_desc),
- .bDescriptorType = USB_DT_PIPE_USAGE,
- .bPipeID = CMD_PIPE_ID,
-};
-
-static struct usb_endpoint_descriptor uasp_ss_cmd_desc = {
- .bLength = USB_DT_ENDPOINT_SIZE,
- .bDescriptorType = USB_DT_ENDPOINT,
- .bEndpointAddress = USB_DIR_OUT,
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
- .wMaxPacketSize = cpu_to_le16(1024),
-};
-
-static struct usb_ss_ep_comp_descriptor uasp_cmd_comp_desc = {
- .bLength = sizeof(uasp_cmd_comp_desc),
- .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
-};
-
-static struct usb_descriptor_header *uasp_fs_function_desc[] = {
- (struct usb_descriptor_header *) &bot_intf_desc,
- (struct usb_descriptor_header *) &uasp_fs_bi_desc,
- (struct usb_descriptor_header *) &uasp_fs_bo_desc,
-
- (struct usb_descriptor_header *) &uasp_intf_desc,
- (struct usb_descriptor_header *) &uasp_fs_bi_desc,
- (struct usb_descriptor_header *) &uasp_bi_pipe_desc,
- (struct usb_descriptor_header *) &uasp_fs_bo_desc,
- (struct usb_descriptor_header *) &uasp_bo_pipe_desc,
- (struct usb_descriptor_header *) &uasp_fs_status_desc,
- (struct usb_descriptor_header *) &uasp_status_pipe_desc,
- (struct usb_descriptor_header *) &uasp_fs_cmd_desc,
- (struct usb_descriptor_header *) &uasp_cmd_pipe_desc,
-};
-
-static struct usb_descriptor_header *uasp_hs_function_desc[] = {
- (struct usb_descriptor_header *) &bot_intf_desc,
- (struct usb_descriptor_header *) &uasp_bi_desc,
- (struct usb_descriptor_header *) &uasp_bo_desc,
-
- (struct usb_descriptor_header *) &uasp_intf_desc,
- (struct usb_descriptor_header *) &uasp_bi_desc,
- (struct usb_descriptor_header *) &uasp_bi_pipe_desc,
- (struct usb_descriptor_header *) &uasp_bo_desc,
- (struct usb_descriptor_header *) &uasp_bo_pipe_desc,
- (struct usb_descriptor_header *) &uasp_status_desc,
- (struct usb_descriptor_header *) &uasp_status_pipe_desc,
- (struct usb_descriptor_header *) &uasp_cmd_desc,
- (struct usb_descriptor_header *) &uasp_cmd_pipe_desc,
- NULL,
-};
-
-static struct usb_descriptor_header *uasp_ss_function_desc[] = {
- (struct usb_descriptor_header *) &bot_intf_desc,
- (struct usb_descriptor_header *) &uasp_ss_bi_desc,
- (struct usb_descriptor_header *) &bot_bi_ep_comp_desc,
- (struct usb_descriptor_header *) &uasp_ss_bo_desc,
- (struct usb_descriptor_header *) &bot_bo_ep_comp_desc,
-
- (struct usb_descriptor_header *) &uasp_intf_desc,
- (struct usb_descriptor_header *) &uasp_ss_bi_desc,
- (struct usb_descriptor_header *) &uasp_bi_ep_comp_desc,
- (struct usb_descriptor_header *) &uasp_bi_pipe_desc,
- (struct usb_descriptor_header *) &uasp_ss_bo_desc,
- (struct usb_descriptor_header *) &uasp_bo_ep_comp_desc,
- (struct usb_descriptor_header *) &uasp_bo_pipe_desc,
- (struct usb_descriptor_header *) &uasp_ss_status_desc,
- (struct usb_descriptor_header *) &uasp_status_in_ep_comp_desc,
- (struct usb_descriptor_header *) &uasp_status_pipe_desc,
- (struct usb_descriptor_header *) &uasp_ss_cmd_desc,
- (struct usb_descriptor_header *) &uasp_cmd_comp_desc,
- (struct usb_descriptor_header *) &uasp_cmd_pipe_desc,
- NULL,
-};
+#include "f_tcm.c"
#define UAS_VENDOR_ID 0x0525 /* NetChip */
#define UAS_PRODUCT_ID 0xa4a5 /* Linux-USB File-backed Storage Gadget */
+#define USB_G_STR_MANUFACTOR 1
+#define USB_G_STR_PRODUCT 2
+#define USB_G_STR_SERIAL 3
+#define USB_G_STR_CONFIG 4
+
static struct usb_device_descriptor usbg_device_desc = {
.bLength = sizeof(usbg_device_desc),
.bDescriptorType = USB_DT_DEVICE,
@@ -2227,8 +47,6 @@
{ USB_G_STR_PRODUCT, "Target Product"},
{ USB_G_STR_SERIAL, "000000000001"},
{ USB_G_STR_CONFIG, "default config"},
- { USB_G_STR_INT_UAS, "USB Attached SCSI"},
- { USB_G_STR_INT_BBB, "Bulk Only Transport"},
{ },
};
@@ -2242,11 +60,6 @@
NULL,
};
-static int guas_unbind(struct usb_composite_dev *cdev)
-{
- return 0;
-}
-
static struct usb_configuration usbg_config_driver = {
.label = "Linux Target",
.bConfigurationValue = 1,
@@ -2254,183 +67,9 @@
.bmAttributes = USB_CONFIG_ATT_SELFPOWER,
};
-static void give_back_ep(struct usb_ep **pep)
-{
- struct usb_ep *ep = *pep;
- if (!ep)
- return;
- ep->driver_data = NULL;
-}
-
-static int usbg_bind(struct usb_configuration *c, struct usb_function *f)
-{
- struct f_uas *fu = to_f_uas(f);
- struct usb_gadget *gadget = c->cdev->gadget;
- struct usb_ep *ep;
- int iface;
-
- iface = usb_interface_id(c, f);
- if (iface < 0)
- return iface;
-
- bot_intf_desc.bInterfaceNumber = iface;
- uasp_intf_desc.bInterfaceNumber = iface;
- fu->iface = iface;
- ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_bi_desc,
- &uasp_bi_ep_comp_desc);
- if (!ep)
- goto ep_fail;
-
- ep->driver_data = fu;
- fu->ep_in = ep;
-
- ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_bo_desc,
- &uasp_bo_ep_comp_desc);
- if (!ep)
- goto ep_fail;
- ep->driver_data = fu;
- fu->ep_out = ep;
-
- ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_status_desc,
- &uasp_status_in_ep_comp_desc);
- if (!ep)
- goto ep_fail;
- ep->driver_data = fu;
- fu->ep_status = ep;
-
- ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_cmd_desc,
- &uasp_cmd_comp_desc);
- if (!ep)
- goto ep_fail;
- ep->driver_data = fu;
- fu->ep_cmd = ep;
-
- /* Assume endpoint addresses are the same for both speeds */
- uasp_bi_desc.bEndpointAddress = uasp_ss_bi_desc.bEndpointAddress;
- uasp_bo_desc.bEndpointAddress = uasp_ss_bo_desc.bEndpointAddress;
- uasp_status_desc.bEndpointAddress =
- uasp_ss_status_desc.bEndpointAddress;
- uasp_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress;
-
- uasp_fs_bi_desc.bEndpointAddress = uasp_ss_bi_desc.bEndpointAddress;
- uasp_fs_bo_desc.bEndpointAddress = uasp_ss_bo_desc.bEndpointAddress;
- uasp_fs_status_desc.bEndpointAddress =
- uasp_ss_status_desc.bEndpointAddress;
- uasp_fs_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress;
-
- return 0;
-ep_fail:
- pr_err("Can't claim all required eps\n");
-
- give_back_ep(&fu->ep_in);
- give_back_ep(&fu->ep_out);
- give_back_ep(&fu->ep_status);
- give_back_ep(&fu->ep_cmd);
- return -ENOTSUPP;
-}
-
-static void usbg_unbind(struct usb_configuration *c, struct usb_function *f)
-{
- struct f_uas *fu = to_f_uas(f);
-
- kfree(fu);
-}
-
-struct guas_setup_wq {
- struct work_struct work;
- struct f_uas *fu;
- unsigned int alt;
-};
-
-static void usbg_delayed_set_alt(struct work_struct *wq)
-{
- struct guas_setup_wq *work = container_of(wq, struct guas_setup_wq,
- work);
- struct f_uas *fu = work->fu;
- int alt = work->alt;
-
- kfree(work);
-
- if (fu->flags & USBG_IS_BOT)
- bot_cleanup_old_alt(fu);
- if (fu->flags & USBG_IS_UAS)
- uasp_cleanup_old_alt(fu);
-
- if (alt == USB_G_ALT_INT_BBB)
- bot_set_alt(fu);
- else if (alt == USB_G_ALT_INT_UAS)
- uasp_set_alt(fu);
- usb_composite_setup_continue(fu->function.config->cdev);
-}
-
-static int usbg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
-{
- struct f_uas *fu = to_f_uas(f);
-
- if ((alt == USB_G_ALT_INT_BBB) || (alt == USB_G_ALT_INT_UAS)) {
- struct guas_setup_wq *work;
-
- work = kmalloc(sizeof(*work), GFP_ATOMIC);
- if (!work)
- return -ENOMEM;
- INIT_WORK(&work->work, usbg_delayed_set_alt);
- work->fu = fu;
- work->alt = alt;
- schedule_work(&work->work);
- return USB_GADGET_DELAYED_STATUS;
- }
- return -EOPNOTSUPP;
-}
-
-static void usbg_disable(struct usb_function *f)
-{
- struct f_uas *fu = to_f_uas(f);
-
- if (fu->flags & USBG_IS_UAS)
- uasp_cleanup_old_alt(fu);
- else if (fu->flags & USBG_IS_BOT)
- bot_cleanup_old_alt(fu);
- fu->flags = 0;
-}
-
-static int usbg_setup(struct usb_function *f,
- const struct usb_ctrlrequest *ctrl)
-{
- struct f_uas *fu = to_f_uas(f);
-
- if (!(fu->flags & USBG_IS_BOT))
- return -EOPNOTSUPP;
-
- return usbg_bot_setup(f, ctrl);
-}
-
static int usbg_cfg_bind(struct usb_configuration *c)
{
- struct f_uas *fu;
- int ret;
-
- fu = kzalloc(sizeof(*fu), GFP_KERNEL);
- if (!fu)
- return -ENOMEM;
- fu->function.name = "Target Function";
- fu->function.descriptors = uasp_fs_function_desc;
- fu->function.hs_descriptors = uasp_hs_function_desc;
- fu->function.ss_descriptors = uasp_ss_function_desc;
- fu->function.bind = usbg_bind;
- fu->function.unbind = usbg_unbind;
- fu->function.set_alt = usbg_set_alt;
- fu->function.setup = usbg_setup;
- fu->function.disable = usbg_disable;
- fu->tpg = the_only_tpg_I_currently_have;
-
- ret = usb_add_function(c, &fu->function);
- if (ret)
- goto err;
-
- return 0;
-err:
- kfree(fu);
- return ret;
+ return tcm_bind_config(c);
}
static int usb_target_bind(struct usb_composite_dev *cdev)
@@ -2439,6 +78,11 @@
ret = usb_add_config(cdev, &usbg_config_driver,
usbg_cfg_bind);
+ return ret;
+}
+
+static int guas_unbind(struct usb_composite_dev *cdev)
+{
return 0;
}
@@ -2450,28 +94,30 @@
.unbind = guas_unbind,
};
-static int usbg_attach(struct usbg_tpg *tpg)
+static int usbg_attach_cb(bool connect)
{
- return usb_composite_probe(&usbg_driver, usb_target_bind);
-}
+ int ret = 0;
-static void usbg_detach(struct usbg_tpg *tpg)
-{
- usb_composite_unregister(&usbg_driver);
+ if (connect)
+ ret = usb_composite_probe(&usbg_driver, usb_target_bind);
+ else
+ usb_composite_unregister(&usbg_driver);
+
+ return ret;
}
static int __init usb_target_gadget_init(void)
{
int ret;
- ret = usbg_register_configfs();
+ ret = f_tcm_init(&usbg_attach_cb);
return ret;
}
module_init(usb_target_gadget_init);
static void __exit usb_target_gadget_exit(void)
{
- usbg_deregister_configfs();
+ f_tcm_exit();
}
module_exit(usb_target_gadget_exit);
diff --git a/drivers/usb/host/ehci-msm2.c b/drivers/usb/host/ehci-msm2.c
index 4657283..8a87a6a 100644
--- a/drivers/usb/host/ehci-msm2.c
+++ b/drivers/usb/host/ehci-msm2.c
@@ -660,6 +660,7 @@
skip_phy_resume:
+ usb_hcd_resume_root_hub(hcd);
atomic_set(&mhcd->in_lpm, 0);
if (mhcd->async_int) {
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 8467dc0..d895f27 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -12,6 +12,7 @@
*/
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb/otg.h>
@@ -173,6 +174,11 @@
usb_put_transceiver(phy);
goto put_usb3_hcd;
}
+ } else {
+ pm_runtime_no_callbacks(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get(&pdev->dev);
}
return 0;
@@ -211,6 +217,9 @@
if (phy && phy->otg) {
otg_set_host(phy->otg, NULL);
usb_put_transceiver(phy);
+ } else {
+ pm_runtime_put(&dev->dev);
+ pm_runtime_disable(&dev->dev);
}
return 0;
diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c
index c1e1e13..b6fc43f 100644
--- a/drivers/usb/otg/msm_otg.c
+++ b/drivers/usb/otg/msm_otg.c
@@ -40,6 +40,7 @@
#include <linux/mfd/pm8xxx/pm8921-charger.h>
#include <linux/mfd/pm8xxx/misc.h>
#include <linux/power_supply.h>
+#include <linux/mhl_8334.h>
#include <mach/clk.h>
#include <mach/msm_xo.h>
@@ -69,6 +70,7 @@
static struct msm_otg *the_msm_otg;
static bool debug_aca_enabled;
static bool debug_bus_voting_enabled;
+static bool mhl_det_in_progress;
static struct regulator *hsusb_3p3;
static struct regulator *hsusb_1p8;
@@ -738,7 +740,8 @@
return 0;
disable_irq(motg->irq);
- host_bus_suspend = phy->otg->host && !test_bit(ID, &motg->inputs);
+ host_bus_suspend = !test_bit(MHL, &motg->inputs) && phy->otg->host &&
+ !test_bit(ID, &motg->inputs);
device_bus_suspend = phy->otg->gadget && test_bit(ID, &motg->inputs) &&
test_bit(A_BUS_SUSPEND, &motg->inputs) &&
motg->caps & ALLOW_LPM_ON_DEV_SUSPEND;
@@ -1402,6 +1405,102 @@
return 0;
}
+static int msm_otg_mhl_register_callback(struct msm_otg *motg,
+ void (*callback)(int on))
+{
+ struct usb_phy *phy = &motg->phy;
+ int ret;
+
+ if (motg->pdata->otg_control != OTG_PMIC_CONTROL ||
+ !motg->pdata->pmic_id_irq) {
+ dev_dbg(phy->dev, "MHL can not be supported without PMIC Id\n");
+ return -ENODEV;
+ }
+
+ if (!motg->pdata->mhl_dev_name) {
+ dev_dbg(phy->dev, "MHL device name does not exist.\n");
+ return -ENODEV;
+ }
+
+ if (callback)
+ ret = mhl_register_callback(motg->pdata->mhl_dev_name,
+ callback);
+ else
+ ret = mhl_unregister_callback(motg->pdata->mhl_dev_name);
+
+ if (ret)
+ dev_dbg(phy->dev, "mhl_register_callback(%s) return error=%d\n",
+ motg->pdata->mhl_dev_name, ret);
+ else
+ motg->mhl_enabled = true;
+
+ return ret;
+}
+
+static void msm_otg_mhl_notify_online(int on)
+{
+ struct msm_otg *motg = the_msm_otg;
+ struct usb_phy *phy = &motg->phy;
+ bool queue = false;
+
+ dev_dbg(phy->dev, "notify MHL %s%s\n", on ? "" : "dis", "connected");
+
+ if (on) {
+ set_bit(MHL, &motg->inputs);
+ } else {
+ clear_bit(MHL, &motg->inputs);
+ queue = true;
+ }
+
+ if (queue && phy->state != OTG_STATE_UNDEFINED)
+ schedule_work(&motg->sm_work);
+}
+
+static bool msm_otg_is_mhl(struct msm_otg *motg)
+{
+ struct usb_phy *phy = &motg->phy;
+ int is_mhl, ret;
+
+ ret = mhl_device_discovery(motg->pdata->mhl_dev_name, &is_mhl);
+ if (ret || is_mhl != MHL_DISCOVERY_RESULT_MHL) {
+ /*
+ * MHL driver calls our callback saying that MHL connected
+ * if RID_GND is detected. But at later part of discovery
+ * it may figure out MHL is not connected and returns
+ * false. Hence clear MHL input here.
+ */
+ clear_bit(MHL, &motg->inputs);
+ dev_dbg(phy->dev, "MHL device not found\n");
+ return false;
+ }
+
+ set_bit(MHL, &motg->inputs);
+ dev_dbg(phy->dev, "MHL device found\n");
+ return true;
+}
+
+static bool msm_chg_mhl_detect(struct msm_otg *motg)
+{
+ bool ret, id;
+ unsigned long flags;
+
+ if (!motg->mhl_enabled)
+ return false;
+
+ local_irq_save(flags);
+ id = irq_read_line(motg->pdata->pmic_id_irq);
+ local_irq_restore(flags);
+
+ if (id)
+ return false;
+
+ mhl_det_in_progress = true;
+ ret = msm_otg_is_mhl(motg);
+ mhl_det_in_progress = false;
+
+ return ret;
+}
+
static bool msm_chg_aca_detect(struct msm_otg *motg)
{
struct usb_phy *phy = &motg->phy;
@@ -1844,6 +1943,12 @@
unsigned long delay;
dev_dbg(phy->dev, "chg detection work\n");
+
+ if (test_bit(MHL, &motg->inputs)) {
+ dev_dbg(phy->dev, "detected MHL, escape chg detection work\n");
+ return;
+ }
+
switch (motg->chg_state) {
case USB_CHG_STATE_UNDEFINED:
msm_chg_block_on(motg);
@@ -1855,6 +1960,13 @@
delay = MSM_CHG_DCD_POLL_TIME;
break;
case USB_CHG_STATE_WAIT_FOR_DCD:
+ if (msm_chg_mhl_detect(motg)) {
+ msm_chg_block_off(motg);
+ motg->chg_state = USB_CHG_STATE_DETECTED;
+ motg->chg_type = USB_INVALID_CHARGER;
+ queue_work(system_nrt_wq, &motg->sm_work);
+ return;
+ }
is_aca = msm_chg_aca_detect(motg);
if (is_aca) {
/*
@@ -2048,9 +2160,17 @@
}
/* FALL THROUGH */
case OTG_STATE_B_IDLE:
- if ((!test_bit(ID, &motg->inputs) ||
+ if (test_bit(MHL, &motg->inputs)) {
+ /* allow LPM */
+ pm_runtime_put_noidle(otg->phy->dev);
+ pm_runtime_suspend(otg->phy->dev);
+ } else if ((!test_bit(ID, &motg->inputs) ||
test_bit(ID_A, &motg->inputs)) && otg->host) {
pr_debug("!id || id_A\n");
+ if (msm_chg_mhl_detect(motg)) {
+ work = 1;
+ break;
+ }
clear_bit(B_BUS_REQ, &motg->inputs);
set_bit(A_BUS_REQ, &motg->inputs);
otg->phy->state = OTG_STATE_A_IDLE;
@@ -2760,6 +2880,12 @@
return;
}
+ if (test_bit(MHL, &motg->inputs) ||
+ mhl_det_in_progress) {
+ pr_debug("PMIC: BSV interrupt ignored in MHL\n");
+ return;
+ }
+
if (atomic_read(&motg->pm_suspended))
motg->sm_work_pending = true;
else
@@ -2802,6 +2928,12 @@
{
struct msm_otg *motg = data;
+ if (test_bit(MHL, &motg->inputs) ||
+ mhl_det_in_progress) {
+ pr_debug("PMIC: Id interrupt ignored in MHL\n");
+ return IRQ_HANDLED;
+ }
+
if (!aca_id_turned_on)
/*schedule delayed work for 5msec for ID line state to settle*/
queue_delayed_work(system_nrt_wq, &motg->pmic_id_status_work,
@@ -3416,6 +3548,9 @@
/* Ensure that above STOREs are completed before enabling interrupts */
mb();
+ ret = msm_otg_mhl_register_callback(motg, msm_otg_mhl_notify_online);
+ if (ret)
+ dev_dbg(&pdev->dev, "MHL can not be supported\n");
wake_lock_init(&motg->wlock, WAKE_LOCK_SUSPEND, "msm_otg");
msm_otg_init_timer(motg);
INIT_WORK(&motg->sm_work, msm_otg_sm_work);
@@ -3563,6 +3698,7 @@
msm_otg_setup_devices(pdev, motg->pdata->mode, false);
if (motg->pdata->otg_control == OTG_PMIC_CONTROL)
pm8921_charger_unregister_vbus_sn(0);
+ msm_otg_mhl_register_callback(motg, NULL);
msm_otg_debugfs_cleanup();
cancel_delayed_work_sync(&motg->chg_work);
cancel_delayed_work_sync(&motg->pmic_id_status_work);
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index bf30c0b..68500a3 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -302,7 +302,7 @@
tty = tty_port_tty_get(&port->port);
if (!tty)
- continue;
+ break;
list_del_init(&urb->urb_list);
diff --git a/drivers/video/msm/hdmi_msm.c b/drivers/video/msm/hdmi_msm.c
index 0655daf..f919ad0 100644
--- a/drivers/video/msm/hdmi_msm.c
+++ b/drivers/video/msm/hdmi_msm.c
@@ -840,6 +840,9 @@
DEV_INFO("HDMI HPD: sense CONNECTED: send ONLINE\n");
kobject_uevent(external_common_state->uevent_kobj,
KOBJ_ONLINE);
+ switch_set_state(&external_common_state->sdev, 1);
+ DEV_INFO("Hdmi state switch to %d: %s\n",
+ external_common_state->sdev.state, __func__);
#ifndef CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT
/* Send Audio for HDMI Compliance Cases*/
envp[0] = "HDCP_STATE=PASS";
diff --git a/drivers/video/msm/mdp.c b/drivers/video/msm/mdp.c
index 3c869cc..3204552 100644
--- a/drivers/video/msm/mdp.c
+++ b/drivers/video/msm/mdp.c
@@ -52,7 +52,7 @@
int mdp_iommu_split_domain;
static struct platform_device *mdp_init_pdev;
-static struct regulator *footswitch, *hdmi_pll_fs;
+static struct regulator *footswitch;
static unsigned int mdp_footswitch_on;
struct completion mdp_ppp_comp;
@@ -2132,27 +2132,10 @@
}
disable_irq(mdp_irq);
- hdmi_pll_fs = regulator_get(&pdev->dev, "hdmi_pll_fs");
- if (IS_ERR(hdmi_pll_fs)) {
- hdmi_pll_fs = NULL;
- } else {
- if (mdp_rev != MDP_REV_44) {
- ret = regulator_set_voltage(hdmi_pll_fs, 1800000,
- 1800000);
- if (ret) {
- pr_err("set_voltage failed for hdmi_pll_fs, ret=%d\n",
- ret);
- }
- }
- }
-
footswitch = regulator_get(&pdev->dev, "vdd");
- if (IS_ERR(footswitch)) {
+ if (IS_ERR(footswitch))
footswitch = NULL;
- } else {
- if (hdmi_pll_fs)
- regulator_enable(hdmi_pll_fs);
-
+ else {
regulator_enable(footswitch);
mdp_footswitch_on = 1;
@@ -2161,8 +2144,6 @@
msleep(20);
regulator_enable(footswitch);
}
- if (hdmi_pll_fs)
- regulator_disable(hdmi_pll_fs);
}
mdp_clk = clk_get(&pdev->dev, "core_clk");
@@ -2507,6 +2488,9 @@
mfd->cursor_update = mdp_hw_cursor_update;
mfd->dma_fnc = mdp4_dtv_overlay;
mfd->dma = &dma_e_data;
+ mfd->do_histogram = mdp_do_histogram;
+ mfd->start_histogram = mdp_histogram_start;
+ mfd->stop_histogram = mdp_histogram_stop;
mdp4_display_intf_sel(EXTERNAL_INTF_SEL, DTV_INTF);
break;
#endif
@@ -2655,9 +2639,6 @@
return;
}
- if (hdmi_pll_fs)
- regulator_enable(hdmi_pll_fs);
-
if (on && !mdp_footswitch_on) {
pr_debug("Enable MDP FS\n");
regulator_enable(footswitch);
@@ -2668,9 +2649,6 @@
mdp_footswitch_on = 0;
}
- if (hdmi_pll_fs)
- regulator_disable(hdmi_pll_fs);
-
mutex_unlock(&mdp_suspend_mutex);
}
diff --git a/drivers/video/msm/mhl/mhl_8334.c b/drivers/video/msm/mhl/mhl_8334.c
index d6e3f6f..646dd29 100644
--- a/drivers/video/msm/mhl/mhl_8334.c
+++ b/drivers/video/msm/mhl/mhl_8334.c
@@ -30,11 +30,11 @@
#include <linux/regulator/consumer.h>
#include <linux/device.h>
#include <linux/platform_device.h>
+#include <linux/mhl_8334.h>
#include "msm_fb.h"
#include "external_common.h"
#include "hdmi_msm.h"
-#include "mhl_8334.h"
#include "mhl_i2c_utils.h"
@@ -53,6 +53,8 @@
static void release_usb_switch_open(void);
static void switch_mode(enum mhl_st_type to_mode);
static irqreturn_t mhl_tx_isr(int irq, void *dev_id);
+void (*notify_usb_online)(int online);
+static void mhl_drive_hpd(uint8_t to_state);
static struct i2c_driver mhl_sii_i2c_driver = {
.driver = {
@@ -227,6 +229,58 @@
return true;
}
+
+/* USB_HANDSHAKING FUNCTIONS */
+
+int mhl_device_discovery(const char *name, int *result)
+
+{
+ int timeout ;
+ mhl_i2c_reg_write(TX_PAGE_3, 0x0010, 0x27);
+ msleep(50);
+ if (mhl_msm_state->cur_state == POWER_STATE_D3) {
+ /* give MHL driver chance to handle RGND interrupt */
+ INIT_COMPLETION(mhl_msm_state->rgnd_done);
+ timeout = wait_for_completion_interruptible_timeout
+ (&mhl_msm_state->rgnd_done, HZ/2);
+ if (!timeout) {
+ /* most likely nothing plugged in USB */
+ /* USB HOST connected or already in USB mode */
+ pr_debug("Timedout Returning from discovery mode\n");
+ *result = MHL_DISCOVERY_RESULT_USB;
+ return 0;
+ }
+ *result = mhl_msm_state->mhl_mode ?
+ MHL_DISCOVERY_RESULT_MHL : MHL_DISCOVERY_RESULT_USB;
+ } else
+ /* not in D3. already in MHL mode */
+ *result = MHL_DISCOVERY_RESULT_MHL;
+
+ return 0;
+}
+EXPORT_SYMBOL(mhl_device_discovery);
+
+int mhl_register_callback(const char *name, void (*callback)(int online))
+{
+ pr_debug("%s\n", __func__);
+ if (!callback)
+ return -EINVAL;
+ if (!notify_usb_online)
+ notify_usb_online = callback;
+ return 0;
+}
+EXPORT_SYMBOL(mhl_register_callback);
+
+int mhl_unregister_callback(const char *name)
+{
+ pr_debug("%s\n", __func__);
+ if (notify_usb_online)
+ notify_usb_online = NULL;
+ return 0;
+}
+EXPORT_SYMBOL(mhl_unregister_callback);
+
+
static void cbus_reset(void)
{
uint8_t i;
@@ -240,7 +294,7 @@
/*
* REG_INTR1 and REG_INTR4
*/
- mhl_i2c_reg_write(TX_PAGE_L0, 0x0075, BIT6 | BIT5);
+ mhl_i2c_reg_write(TX_PAGE_L0, 0x0075, BIT6);
mhl_i2c_reg_write(TX_PAGE_3, 0x0022,
BIT0 | BIT2 | BIT3 | BIT4 | BIT5 | BIT6);
/* REG5 */
@@ -340,7 +394,7 @@
/*
* Configure the initial reg settings
*/
-static void mhl_init_reg_settings(void)
+static void mhl_init_reg_settings(bool mhl_disc_en)
{
/*
@@ -419,15 +473,19 @@
/* Pull-up resistance off for IDLE state */
mhl_i2c_reg_write(TX_PAGE_3, 0x0013, 0x8C);
/* Enable CBUS Discovery */
- mhl_i2c_reg_write(TX_PAGE_3, 0x0010, 0x27);
+ if (mhl_disc_en)
+ /* Enable MHL Discovery */
+ mhl_i2c_reg_write(TX_PAGE_3, 0x0010, 0x27);
+ else
+ /* Disable MHL Discovery */
+ mhl_i2c_reg_write(TX_PAGE_3, 0x0010, 0x26);
mhl_i2c_reg_write(TX_PAGE_3, 0x0016, 0x20);
/* MHL CBUS Discovery - immediate comm. */
mhl_i2c_reg_write(TX_PAGE_3, 0x0012, 0x86);
/* Do not force HPD to 0 during wake-up from D3 */
- if (mhl_msm_state->cur_state != POWER_STATE_D3) {
- mhl_i2c_reg_modify(TX_PAGE_3, 0x0020,
- BIT5 | BIT4, BIT4);
- }
+ if (mhl_msm_state->cur_state != POWER_STATE_D0_MHL)
+ mhl_drive_hpd(HPD_DOWN);
+
/* Enable Auto Soft RESET */
mhl_i2c_reg_write(TX_PAGE_3, 0x0000, 0x084);
/* HDMI Transcode mode enable */
@@ -452,7 +510,10 @@
/* MHL spec requires a 100 ms wait here. */
msleep(100);
- mhl_init_reg_settings();
+ /*
+ * Need to disable MHL discovery
+ */
+ mhl_init_reg_settings(true);
/*
* Power down the chip to the
@@ -563,6 +624,7 @@
/* MHL SII 8334 chip specific init */
mhl_chip_init();
+ init_completion(&mhl_msm_state->rgnd_done);
return 0;
init_exit:
@@ -583,10 +645,9 @@
case POWER_STATE_D0_NO_MHL:
break;
case POWER_STATE_D0_MHL:
- mhl_init_reg_settings();
-
+ mhl_init_reg_settings(true);
/* REG_DISC_CTRL1 */
- mhl_i2c_reg_modify(TX_PAGE_3, 0x0010, BIT1, 0);
+ mhl_i2c_reg_modify(TX_PAGE_3, 0x0010, BIT1 | BIT0, BIT0);
/*
* TPI_DEVICE_POWER_STATE_CTRL_REG
@@ -597,16 +658,15 @@
case POWER_STATE_D3:
if (mhl_msm_state->cur_state != POWER_STATE_D3) {
/* Force HPD to 0 when not in MHL mode. */
- mhl_i2c_reg_modify(TX_PAGE_3, 0x0020,
- BIT5 | BIT4, BIT4);
-
+ mhl_drive_hpd(HPD_DOWN);
/*
* Change TMDS termination to high impedance
* on disconnection.
*/
mhl_i2c_reg_write(TX_PAGE_3, 0x0030, 0xD0);
- mhl_i2c_reg_modify(TX_PAGE_L1, 0x003D,
- BIT1 | BIT0, BIT0);
+ msleep(50);
+ mhl_i2c_reg_modify(TX_PAGE_3, 0x0010,
+ BIT1 | BIT0, BIT1);
spin_lock_irqsave(&mhl_state_lock, flags);
mhl_msm_state->cur_state = POWER_STATE_D3;
spin_unlock_irqrestore(&mhl_state_lock, flags);
@@ -619,6 +679,11 @@
static void mhl_drive_hpd(uint8_t to_state)
{
+ if (mhl_msm_state->cur_state != POWER_STATE_D0_MHL) {
+ pr_err("MHL: invalid state to ctrl HPD\n");
+ return;
+ }
+
pr_debug("%s: To state=[0x%x]\n", __func__, to_state);
if (to_state == HPD_UP) {
/*
@@ -644,6 +709,7 @@
* Disable TMDS Output on REG_TMDS_CCTRL
* Enable/Disable TMDS output (MHL TMDS output only)
*/
+ mhl_i2c_reg_modify(TX_PAGE_3, 0x20, BIT4 | BIT5, BIT4);
mhl_i2c_reg_modify(TX_PAGE_L0, 0x0080, BIT4, 0x00);
}
return;
@@ -682,20 +748,11 @@
static void mhl_msm_disconnection(void)
{
- uint8_t reg;
- /* Clear interrupts - REG INTR4 */
- reg = mhl_i2c_reg_read(TX_PAGE_3, 0x0021);
- mhl_i2c_reg_write(TX_PAGE_3, 0x0021, reg);
/*
* MHL TX CTL1
* Disabling Tx termination
*/
mhl_i2c_reg_write(TX_PAGE_3, 0x30, 0xD0);
- /*
- * MSC REQUESTOR ABORT REASON
- * Clear CBUS_HPD status
- */
- mhl_i2c_reg_modify(TX_PAGE_CBUS, 0x000D, BIT6, 0x00);
/* Change HPD line to drive it low */
mhl_drive_hpd(HPD_DOWN);
/* switch power state to D3 */
@@ -704,11 +761,11 @@
}
/*
- * If hardware detected a change in impedence and raised an INTR
- * We check the range of this impedence to infer if the connected
+ * If hardware detected a change in impedance and raised an INTR
+ * We check the range of this impedance to infer if the connected
* device is MHL or USB and take appropriate actions.
*/
-static void mhl_msm_read_rgnd_int(void)
+static int mhl_msm_read_rgnd_int(void)
{
uint8_t rgnd_imp;
@@ -720,20 +777,27 @@
* 10 - 1 kOHM ***(MHL)**** It's range 800 - 1200 OHM from MHL spec
* 11 - short (USB)
*/
- rgnd_imp = mhl_i2c_reg_read(TX_PAGE_3, 0x001C);
+ rgnd_imp = (mhl_i2c_reg_read(TX_PAGE_3, 0x001C) & (BIT1 | BIT0));
pr_debug("Imp Range read = %02X\n", (int)rgnd_imp);
-
if (0x02 == rgnd_imp) {
pr_debug("MHL: MHL DEVICE!!!\n");
+ mhl_i2c_reg_modify(TX_PAGE_3, 0x0018, BIT0, BIT0);
/*
* Handling the MHL event in driver
*/
- mhl_i2c_reg_modify(TX_PAGE_3, 0x0018, BIT0, BIT0);
+ mhl_msm_state->mhl_mode = TRUE;
+ if (notify_usb_online)
+ notify_usb_online(1);
} else {
pr_debug("MHL: NON-MHL DEVICE!!!\n");
+ mhl_msm_state->mhl_mode = FALSE;
mhl_i2c_reg_modify(TX_PAGE_3, 0x0018, BIT3, BIT3);
+ switch_mode(POWER_STATE_D3);
}
+ complete(&mhl_msm_state->rgnd_done);
+ return mhl_msm_state->mhl_mode ?
+ MHL_DISCOVERY_RESULT_MHL : MHL_DISCOVERY_RESULT_USB;
}
static void force_usb_switch_open(void)
@@ -756,7 +820,7 @@
static void int_4_isr(void)
{
- uint8_t status;
+ uint8_t status, reg ;
/* INTR_STATUS4 */
status = mhl_i2c_reg_read(TX_PAGE_3, 0x0021);
@@ -767,7 +831,7 @@
* do nothing.
*/
if ((0x00 == status) && (mhl_msm_state->cur_state == POWER_STATE_D3)) {
- mhl_chip_init();
+ pr_debug("MHL: spurious interrupt\n");
return;
}
if (0xFF != status) {
@@ -816,8 +880,13 @@
if (status & BIT5) {
mhl_connect_api(false);
+ /* Clear interrupts - REG INTR4 */
+ reg = mhl_i2c_reg_read(TX_PAGE_3, 0x0021);
+ mhl_i2c_reg_write(TX_PAGE_3, 0x0021, reg);
mhl_msm_disconnection();
- pr_debug("MHL Disconn Drv: INT4 Status = %02X\n",
+ if (notify_usb_online)
+ notify_usb_online(0);
+ pr_debug("MHL Disconnect Drv: INT4 Status = %02X\n",
(int)status);
}
@@ -971,6 +1040,122 @@
return;
}
+static void clear_all_intrs(void)
+{
+ uint8_t regval = 0x00;
+ /*
+ * intr status debug
+ */
+ pr_debug("********* EXITING ISR MASK CHECK ?? *************\n");
+ pr_debug("Drv: INT1 MASK = %02X\n",
+ (int) mhl_i2c_reg_read(TX_PAGE_L0, 0x0071));
+ pr_debug("Drv: INT3 MASK = %02X\n",
+ (int) mhl_i2c_reg_read(TX_PAGE_L0, 0x0077));
+ pr_debug("Drv: INT4 MASK = %02X\n",
+ (int) mhl_i2c_reg_read(TX_PAGE_3, 0x0021));
+ pr_debug("Drv: INT5 MASK = %02X\n",
+ (int) mhl_i2c_reg_read(TX_PAGE_3, 0x0023));
+ pr_debug("Drv: CBUS1 MASK = %02X\n",
+ (int) mhl_i2c_reg_read(TX_PAGE_CBUS, 0x0009));
+ pr_debug("Drv: CBUS2 MASK = %02X\n",
+ (int) mhl_i2c_reg_read(TX_PAGE_CBUS, 0x001F));
+ pr_debug("********* END OF ISR MASK CHECK *************\n");
+
+ pr_debug("********* EXITING IN ISR ?? *************\n");
+ regval = mhl_i2c_reg_read(TX_PAGE_L0, 0x0071);
+ pr_debug("Drv: INT1 Status = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_L0, 0x0071, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_L0, 0x0072);
+ pr_debug("Drv: INT2 Status = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_L0, 0x0072, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_L0, 0x0073);
+ pr_debug("Drv: INT3 Status = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_L0, 0x0073, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_3, 0x0021);
+ pr_debug("Drv: INT4 Status = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_3, 0x0021, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_3, 0x0023);
+ pr_debug("Drv: INT5 Status = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_3, 0x0023, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x0008);
+ pr_debug("Drv: cbusInt Status = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x0008, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x001E);
+ pr_debug("Drv: CBUS INTR_2: %d\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x001E, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00A0);
+ pr_debug("Drv: A0 INT Set = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00A0, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00A1);
+ pr_debug("Drv: A1 INT Set = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00A1, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00A2);
+ pr_debug("Drv: A2 INT Set = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00A2, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00A3);
+ pr_debug("Drv: A3 INT Set = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00A3, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00B0);
+ pr_debug("Drv: B0 STATUS Set = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00B0, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00B1);
+ pr_debug("Drv: B1 STATUS Set = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00B1, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00B2);
+ pr_debug("Drv: B2 STATUS Set = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00B2, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00B3);
+ pr_debug("Drv: B3 STATUS Set = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00B3, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00E0);
+ pr_debug("Drv: E0 STATUS Set = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00E0, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00E1);
+ pr_debug("Drv: E1 STATUS Set = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00E1, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00E2);
+ pr_debug("Drv: E2 STATUS Set = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00E2, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00E3);
+ pr_debug("Drv: E3 STATUS Set = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00E3, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00F0);
+ pr_debug("Drv: F0 INT Set = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00F0, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00F1);
+ pr_debug("Drv: F1 INT Set = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00F1, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00F2);
+ pr_debug("Drv: F2 INT Set = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00F2, regval);
+
+ regval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x00F3);
+ pr_debug("Drv: F3 INT Set = %02X\n", (int)regval);
+ mhl_i2c_reg_write(TX_PAGE_CBUS, 0x00F3, regval);
+ pr_debug("********* END OF EXITING IN ISR *************\n");
+}
+
static irqreturn_t mhl_tx_isr(int irq, void *dev_id)
{
/*
@@ -997,6 +1182,7 @@
mhl_cbus_isr();
int_1_isr();
}
+ clear_all_intrs();
return IRQ_HANDLED;
}
diff --git a/drivers/video/msm/mhl/mhl_i2c_utils.c b/drivers/video/msm/mhl/mhl_i2c_utils.c
index aab6e02..ee069bb 100644
--- a/drivers/video/msm/mhl/mhl_i2c_utils.c
+++ b/drivers/video/msm/mhl/mhl_i2c_utils.c
@@ -11,9 +11,9 @@
*
*/
#include <linux/i2c.h>
+#include <linux/mhl_8334.h>
#include "mhl_i2c_utils.h"
-#include "mhl_8334.h"
uint8_t slave_addrs[MAX_PAGES] = {
DEV_PAGE_TPI_0 ,
diff --git a/drivers/video/msm/mhl/mhl_i2c_utils.h b/drivers/video/msm/mhl/mhl_i2c_utils.h
index 76498d4..5a2d199 100644
--- a/drivers/video/msm/mhl/mhl_i2c_utils.h
+++ b/drivers/video/msm/mhl/mhl_i2c_utils.h
@@ -16,8 +16,7 @@
#include <linux/i2c.h>
#include <linux/types.h>
-
-#include "mhl_defs.h"
+#include <linux/mhl_defs.h>
/*
* I2C command to the adapter to append
diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c
index 18ee3e6..5f58af7 100644
--- a/drivers/video/msm/msm_fb.c
+++ b/drivers/video/msm/msm_fb.c
@@ -107,6 +107,9 @@
static int msm_fb_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg);
static int msm_fb_mmap(struct fb_info *info, struct vm_area_struct * vma);
+static int mdp_bl_scale_config(struct msm_fb_data_type *mfd,
+ struct mdp_bl_scale_data *data);
+static void msm_fb_scale_bl(__u32 *bl_lvl);
#ifdef MSM_FB_ENABLE_DBGFS
@@ -116,6 +119,7 @@
int msm_fb_debugfs_file_index;
struct dentry *msm_fb_debugfs_root;
struct dentry *msm_fb_debugfs_file[MSM_FB_MAX_DBGFS];
+static int bl_scale, bl_min_lvl;
DEFINE_MUTEX(msm_fb_notify_update_sem);
void msmfb_no_update_notify_timer_cb(unsigned long data)
@@ -372,6 +376,8 @@
mfd->panel_info.frame_count = 0;
mfd->bl_level = 0;
+ bl_scale = 1024;
+ bl_min_lvl = 255;
#ifdef CONFIG_FB_MSM_OVERLAY
mfd->overlay_play_enable = 1;
#endif
@@ -746,11 +752,41 @@
static int unset_bl_level, bl_updated;
static int bl_level_old;
+static int mdp_bl_scale_config(struct msm_fb_data_type *mfd,
+ struct mdp_bl_scale_data *data)
+{
+ int ret = 0;
+ int curr_bl = mfd->bl_level;
+ bl_scale = data->scale;
+ bl_min_lvl = data->min_lvl;
+ pr_debug("%s: update scale = %d, min_lvl = %d\n", __func__, bl_scale,
+ bl_min_lvl);
+
+ /* update current backlight to use new scaling*/
+ msm_fb_set_backlight(mfd, curr_bl);
+
+ return ret;
+}
+
+static void msm_fb_scale_bl(__u32 *bl_lvl)
+{
+ __u32 temp = *bl_lvl;
+ if (temp >= bl_min_lvl) {
+ /* bl_scale is the numerator of scaling fraction (x/1024)*/
+ temp = ((*bl_lvl) * bl_scale) / 1024;
+
+ /*if less than minimum level, use min level*/
+ if (temp < bl_min_lvl)
+ temp = bl_min_lvl;
+ }
+
+ (*bl_lvl) = temp;
+}
void msm_fb_set_backlight(struct msm_fb_data_type *mfd, __u32 bkl_lvl)
{
struct msm_fb_panel_data *pdata;
-
+ __u32 temp = bkl_lvl;
if (!mfd->panel_power_on || !bl_updated) {
unset_bl_level = bkl_lvl;
return;
@@ -758,17 +794,19 @@
unset_bl_level = 0;
}
+ msm_fb_scale_bl(&temp);
pdata = (struct msm_fb_panel_data *)mfd->pdev->dev.platform_data;
if ((pdata) && (pdata->set_backlight)) {
down(&mfd->sem);
- if (bl_level_old == bkl_lvl) {
+ if (bl_level_old == temp) {
up(&mfd->sem);
return;
}
- mfd->bl_level = bkl_lvl;
+ mfd->bl_level = temp;
pdata->set_backlight(mfd);
- bl_level_old = mfd->bl_level;
+ mfd->bl_level = bkl_lvl;
+ bl_level_old = temp;
up(&mfd->sem);
}
}
@@ -3133,7 +3171,8 @@
return 0;
}
-static int msmfb_handle_pp_ioctl(struct msmfb_mdp_pp *pp_ptr)
+static int msmfb_handle_pp_ioctl(struct msm_fb_data_type *mfd,
+ struct msmfb_mdp_pp *pp_ptr)
{
int ret = -1;
@@ -3178,6 +3217,11 @@
&pp_ptr->data.qseed_cfg_data);
break;
#endif
+ case mdp_bl_scale_cfg:
+ ret = mdp_bl_scale_config(mfd, (struct mdp_bl_scale_data *)
+ &pp_ptr->data.bl_scale_data);
+ break;
+
default:
pr_warn("Unsupported request to MDP_PP IOCTL.\n");
ret = -EINVAL;
@@ -3496,7 +3540,7 @@
if (ret)
return ret;
- ret = msmfb_handle_pp_ioctl(&mdp_pp);
+ ret = msmfb_handle_pp_ioctl(mfd, &mdp_pp);
break;
default:
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c
index 6571245..22eaf4f 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c
@@ -161,11 +161,6 @@
encoder = &ddl->codec_data.encoder;
vidc_1080p_get_encoder_sequence_header_size(
&encoder->seq_header_length);
- if ((encoder->codec.codec == VCD_CODEC_H264) &&
- (encoder->profile.profile == VCD_PROFILE_H264_BASELINE))
- if ((encoder->seq_header.align_virtual_addr) &&
- (encoder->seq_header_length > 6))
- encoder->seq_header.align_virtual_addr[6] = 0xC0;
ddl_context->ddl_callback(VCD_EVT_RESP_START, VCD_S_SUCCESS,
NULL, 0, (u32 *) ddl, ddl->client_data);
ddl_release_command_channel(ddl_context,
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c
index a6001eb..d1f6e07 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c
@@ -380,7 +380,7 @@
encode_profile = VIDC_1080P_PROFILE_MPEG4_ADV_SIMPLE;
break;
case VCD_PROFILE_H264_BASELINE:
- encode_profile = VIDC_1080P_PROFILE_H264_BASELINE;
+ encode_profile = VIDC_1080P_PROFILE_H264_CONSTRAINED_BASELINE;
break;
case VCD_PROFILE_H264_MAIN:
encode_profile = VIDC_1080P_PROFILE_H264_MAIN;
diff --git a/drivers/video/msm/vidc/1080p/ddl/vidc.h b/drivers/video/msm/vidc/1080p/ddl/vidc.h
index 7460ef3..7b8dc6f 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vidc.h
+++ b/drivers/video/msm/vidc/1080p/ddl/vidc.h
@@ -311,6 +311,7 @@
#define VIDC_1080P_PROFILE_H264_MAIN 0x00000000
#define VIDC_1080P_PROFILE_H264_HIGH 0x00000001
#define VIDC_1080P_PROFILE_H264_BASELINE 0x00000002
+#define VIDC_1080P_PROFILE_H264_CONSTRAINED_BASELINE 0x00000003
enum vidc_1080p_decode{
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 28c0917..b2b79b1 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -259,6 +259,7 @@
header-y += msdos_fs.h
header-y += msg.h
header-y += msm_adc.h
+header-y += epm_adc.h
header-y += mtio.h
header-y += n_r3964.h
header-y += nbd.h
diff --git a/include/linux/dvb/dmx.h b/include/linux/dvb/dmx.h
index e0058d3..d19dfa5 100644
--- a/include/linux/dvb/dmx.h
+++ b/include/linux/dvb/dmx.h
@@ -36,6 +36,9 @@
#define DMX_FILTER_SIZE 16
+/* Min recording chunk upon which event is generated */
+#define DMX_REC_BUFF_CHUNK_MIN_SIZE (100*188)
+
typedef enum
{
DMX_OUT_DECODER, /* Streaming directly to decoder. */
@@ -135,7 +138,6 @@
enum dmx_indexing_video_profile profile;
};
-
struct dmx_pes_filter_params
{
__u16 pid;
@@ -144,6 +146,18 @@
dmx_pes_type_t pes_type;
__u32 flags;
+ /*
+ * The following configures when the event
+ * DMX_EVENT_NEW_REC_CHUNK will be triggered.
+ * When new recorded data is received with size
+ * equal or larger than this value a new event
+ * will be triggered. This is relevent when
+ * output is DMX_OUT_TS_TAP or DMX_OUT_TSDEMUX_TAP,
+ * size must be at least DMX_REC_BUFF_CHUNK_MIN_SIZE
+ * and smaller than buffer size.
+ */
+ __u32 rec_chunk_size;
+
struct dmx_indexing_video_params video_params;
};
@@ -170,6 +184,129 @@
int error;
};
+/* Events associated with each demux filter */
+enum dmx_event {
+ /* New PES packet is ready to be consumed */
+ DMX_EVENT_NEW_PES,
+
+ /* New section is ready to be consumed */
+ DMX_EVENT_NEW_SECTION,
+
+ /* New recording chunk is ready to be consumed */
+ DMX_EVENT_NEW_REC_CHUNK,
+
+ /* New PCR value is ready */
+ DMX_EVENT_NEW_PCR,
+
+ /* Overflow */
+ DMX_EVENT_BUFFER_OVERFLOW,
+
+ /* Section was dropped due to CRC error */
+ DMX_EVENT_SECTION_CRC_ERROR,
+
+ /* End-of-stream, no more data from this filter */
+ DMX_EVENT_EOS
+};
+
+/* Flags passed in filter events */
+
+/* Continuity counter error was detected */
+#define DMX_FILTER_CC_ERROR 0x01
+
+/* Discontinuity indicator was set */
+#define DMX_FILTER_DISCONTINUITY_INDEICATOR 0x02
+
+/* PES legnth in PES header is not correct */
+#define DMX_FILTER_PES_LENGTH_ERROR 0x04
+
+
+/* PES info associated with DMX_EVENT_NEW_PES event */
+struct dmx_pes_event_info {
+ /* Offset at which PES information starts */
+ __u32 base_offset;
+
+ /*
+ * Start offset at which PES data
+ * from the stream starts.
+ * Equal to base_offset if PES data
+ * starts from the beginning.
+ */
+ __u32 start_offset;
+
+ /* Total length holding the PES information */
+ __u32 total_length;
+
+ /* Actual length holding the PES data */
+ __u32 actual_length;
+
+ /* Local receiver timestamp in 27MHz */
+ __u64 stc;
+
+ /* Flags passed in filter events */
+ __u32 flags;
+};
+
+/* Section info associated with DMX_EVENT_NEW_SECTION event */
+struct dmx_section_event_info {
+ /* Offset at which section information starts */
+ __u32 base_offset;
+
+ /*
+ * Start offset at which section data
+ * from the stream starts.
+ * Equal to base_offset if section data
+ * starts from the beginning.
+ */
+ __u32 start_offset;
+
+ /* Total length holding the section information */
+ __u32 total_length;
+
+ /* Actual length holding the section data */
+ __u32 actual_length;
+
+ /* Flags passed in filter events */
+ __u32 flags;
+};
+
+/* Recording info associated with DMX_EVENT_NEW_REC_CHUNK event */
+struct dmx_rec_chunk_event_info {
+ /* Offset at which recording chunk starts */
+ __u32 offset;
+
+ /* Size of recording chunk in bytes */
+ __u32 size;
+};
+
+/* PCR info associated with DMX_EVENT_NEW_PCR event */
+struct dmx_pcr_event_info {
+ /* Local timestamp in 27MHz
+ * when PCR packet was received
+ */
+ __u64 stc;
+
+ /* PCR value in 27MHz */
+ __u64 pcr;
+
+ /* Flags passed in filter events */
+ __u32 flags;
+};
+
+/*
+ * Filter's event returned through DMX_GET_EVENT.
+ * poll with POLLPRI would block until events are available.
+ */
+struct dmx_filter_event {
+ enum dmx_event type;
+
+ union {
+ struct dmx_pes_event_info pes;
+ struct dmx_section_event_info section;
+ struct dmx_rec_chunk_event_info recording_chunk;
+ struct dmx_pcr_event_info pcr;
+ } params;
+};
+
typedef struct dmx_caps {
__u32 caps;
@@ -292,5 +429,6 @@
#define DMX_RELEASE_DATA _IO('o', 57)
#define DMX_FEED_DATA _IO('o', 58)
#define DMX_SET_PLAYBACK_MODE _IOW('o', 59, enum dmx_playback_mode_t)
+#define DMX_GET_EVENT _IOR('o', 60, struct dmx_filter_event)
#endif /*_DVBDMX_H_*/
diff --git a/include/linux/epm_adc.h b/include/linux/epm_adc.h
index 1af97fe..9cf2acf 100644
--- a/include/linux/epm_adc.h
+++ b/include/linux/epm_adc.h
@@ -1,16 +1,3 @@
-/*
- * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
#ifndef __EPM_ADC_H
#define __EPM_ADC_H
@@ -26,6 +13,7 @@
int32_t physical;
};
+#ifdef __KERNEL__
struct epm_chan_properties {
uint32_t resistorValue;
uint32_t gain;
@@ -41,6 +29,7 @@
uint32_t bus_id;
uint32_t gpio_expander_base_addr;
};
+#endif
#define EPM_ADC_IOCTL_CODE 0x91
diff --git a/include/linux/leds-msm-tricolor.h b/include/linux/leds-msm-tricolor.h
new file mode 100644
index 0000000..314645e
--- /dev/null
+++ b/include/linux/leds-msm-tricolor.h
@@ -0,0 +1,20 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LEDS_MSM_TRICOLOR__
+enum tri_color_led_color {
+ LED_COLOR_RED,
+ LED_COLOR_GREEN,
+ LED_COLOR_BLUE,
+ LED_COLOR_MAX
+};
+#endif
diff --git a/include/linux/mfd/wcd9xxx/core.h b/include/linux/mfd/wcd9xxx/core.h
index 2657ec3..17be2cb 100644
--- a/include/linux/mfd/wcd9xxx/core.h
+++ b/include/linux/mfd/wcd9xxx/core.h
@@ -155,6 +155,11 @@
int num_rx_port;
int num_tx_port;
+
+ u8 idbyte_0;
+ u8 idbyte_1;
+ u8 idbyte_2;
+ u8 idbyte_3;
};
int wcd9xxx_reg_read(struct wcd9xxx *wcd9xxx, unsigned short reg);
diff --git a/drivers/video/msm/mhl/mhl_8334.h b/include/linux/mhl_8334.h
similarity index 66%
rename from drivers/video/msm/mhl/mhl_8334.h
rename to include/linux/mhl_8334.h
index eba544a..1b19103 100644
--- a/drivers/video/msm/mhl/mhl_8334.h
+++ b/include/linux/mhl_8334.h
@@ -17,9 +17,8 @@
#include <linux/types.h>
#include <linux/platform_device.h>
#include <mach/board.h>
-
-#include "mhl_devcap.h"
-#include "mhl_defs.h"
+#include <linux/mhl_devcap.h>
+#include <linux/mhl_defs.h>
#define MHL_DEVICE_NAME "sii8334"
#define MHL_DRIVER_NAME "sii8334"
@@ -27,12 +26,48 @@
#define HPD_UP 1
#define HPD_DOWN 0
+enum discovery_result_enum {
+ MHL_DISCOVERY_RESULT_USB = 0,
+ MHL_DISCOVERY_RESULT_MHL,
+};
+
+/* USB driver interface */
+
+#ifdef CONFIG_FB_MSM_HDMI_MHL_8334
+ /* mhl_device_discovery */
+extern int mhl_device_discovery(const char *name, int *result);
+
+/* - register|unregister MHL cable plug callback. */
+extern int mhl_register_callback
+ (const char *name, void (*callback)(int online));
+extern int mhl_unregister_callback(const char *name);
+#else
+static inline int mhl_device_discovery(const char *name, int *result)
+{
+ return -ENODEV;
+}
+
+static inline int
+ mhl_register_callback(const char *name, void (*callback)(int online))
+{
+ return -ENODEV;
+}
+
+static inline int mhl_unregister_callback(const char *name)
+{
+ return -ENODEV;
+}
+#endif
+
struct mhl_msm_state_t {
struct i2c_client *i2c_client;
struct i2c_driver *i2c_driver;
uint8_t cur_state;
uint8_t chip_rev_id;
struct msm_mhl_platform_data *mhl_data;
+ /* Device Discovery stuff */
+ int mhl_mode;
+ struct completion rgnd_done;
};
enum {
diff --git a/drivers/video/msm/mhl/mhl_defs.h b/include/linux/mhl_defs.h
similarity index 100%
rename from drivers/video/msm/mhl/mhl_defs.h
rename to include/linux/mhl_defs.h
diff --git a/drivers/video/msm/mhl/mhl_devcap.h b/include/linux/mhl_devcap.h
similarity index 100%
rename from drivers/video/msm/mhl/mhl_devcap.h
rename to include/linux/mhl_devcap.h
diff --git a/include/linux/msm_charm.h b/include/linux/msm_charm.h
index c31e493..44d2553 100644
--- a/include/linux/msm_charm.h
+++ b/include/linux/msm_charm.h
@@ -11,10 +11,15 @@
#define RAM_DUMP_DONE _IOW(CHARM_CODE, 6, int)
#define WAIT_FOR_RESTART _IOR(CHARM_CODE, 7, int)
#define GET_DLOAD_STATUS _IOR(CHARM_CODE, 8, int)
+#define IMAGE_UPGRADE _IOW(CHARM_CODE, 9, int)
enum charm_boot_type {
CHARM_NORMAL_BOOT = 0,
CHARM_RAM_DUMPS,
};
+enum image_upgrade_type {
+ APQ_CONTROLLED_UPGRADE = 0,
+ MDM_CONTROLLED_UPGRADE,
+};
#endif
diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h
index 19728fe..d8edbc8 100644
--- a/include/linux/msm_mdp.h
+++ b/include/linux/msm_mdp.h
@@ -434,7 +434,6 @@
uint32_t *data;
};
-
struct mdp_lut_cfg_data {
uint32_t lut_type;
union {
@@ -452,12 +451,17 @@
uint32_t *data;
};
+struct mdp_bl_scale_data {
+ uint32_t min_lvl;
+ uint32_t scale;
+};
enum {
mdp_op_pcc_cfg,
mdp_op_csc_cfg,
mdp_op_lut_cfg,
mdp_op_qseed_cfg,
+ mdp_bl_scale_cfg,
mdp_op_max,
};
@@ -468,6 +472,7 @@
struct mdp_csc_cfg_data csc_cfg_data;
struct mdp_lut_cfg_data lut_cfg_data;
struct mdp_qseed_cfg_data qseed_cfg_data;
+ struct mdp_bl_scale_data bl_scale_data;
} data;
};
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index ed136ad..e3d59cd 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -119,6 +119,11 @@
extern void early_init_devtree(void *);
#else /* CONFIG_OF_FLATTREE */
static inline void unflatten_device_tree(void) {}
+static inline void *of_get_flat_dt_prop(unsigned long node, const char *name,
+ unsigned long *size) { return NULL; }
+
+static inline int of_flat_dt_is_compatible(unsigned long node,
+ const char *name) { return 0; }
#endif /* CONFIG_OF_FLATTREE */
#endif /* __ASSEMBLY__ */
diff --git a/include/linux/qseecom.h b/include/linux/qseecom.h
index 0fcf96f..b0f089b 100644
--- a/include/linux/qseecom.h
+++ b/include/linux/qseecom.h
@@ -110,9 +110,11 @@
/*
* struct qseecom_qseos_app_load_query - verify if app is loaded in qsee
* @app_name[MAX_APP_NAME_SIZE]- name of the app.
+ * @app_id - app id.
*/
struct qseecom_qseos_app_load_query {
char app_name[MAX_APP_NAME_SIZE]; /* in */
+ int app_id; /* out */
};
#define QSEECOM_IOC_MAGIC 0x97
diff --git a/include/linux/test-iosched.h b/include/linux/test-iosched.h
index 8054409..1e428c5 100644
--- a/include/linux/test-iosched.h
+++ b/include/linux/test-iosched.h
@@ -65,6 +65,7 @@
REQ_UNIQUE_NONE,
REQ_UNIQUE_DISCARD,
REQ_UNIQUE_FLUSH,
+ REQ_UNIQUE_SANITIZE,
};
/**
diff --git a/include/linux/tick.h b/include/linux/tick.h
index ab8be90..494a314 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -44,7 +44,6 @@
* @idle_exittime: Time when the idle state was left
* @idle_sleeptime: Sum of the time slept in idle with sched tick stopped
* @iowait_sleeptime: Sum of the time slept in idle with sched tick stopped, with IO outstanding
- * @sleep_length: Duration of the current idle sleep
* @do_timer_lst: CPU was the last one doing do_timer before going idle
*/
struct tick_sched {
@@ -63,7 +62,6 @@
ktime_t idle_exittime;
ktime_t idle_sleeptime;
ktime_t iowait_sleeptime;
- ktime_t sleep_length;
unsigned long last_jiffies;
unsigned long next_jiffies;
ktime_t idle_expires;
diff --git a/include/linux/usb/android.h b/include/linux/usb/android.h
index bf65ebb..7c2b33b 100644
--- a/include/linux/usb/android.h
+++ b/include/linux/usb/android.h
@@ -21,6 +21,7 @@
int (*update_pid_and_serial_num)(uint32_t, const char *);
u32 swfi_latency;
u8 usb_core_id;
+ bool cdrom;
};
#endif /* __LINUX_USB_ANDROID_H */
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
index c0a23a3..3b1d06d 100644
--- a/include/linux/usb/msm_hsusb.h
+++ b/include/linux/usb/msm_hsusb.h
@@ -199,6 +199,7 @@
* @core_clk_always_on_workaround: Don't disable core_clk when
* USB enters LPM.
* @bus_scale_table: parameters for bus bandwidth requirements
+ * @mhl_dev_name: MHL device name used to register with MHL driver.
*/
struct msm_otg_platform_data {
int *phy_init_seq;
@@ -216,6 +217,7 @@
bool enable_lpm_on_dev_suspend;
bool core_clk_always_on_workaround;
struct msm_bus_scale_pdata *bus_scale_table;
+ const char *mhl_dev_name;
};
/* Timeout (in msec) values (min - max) associated with OTG timers */
@@ -287,6 +289,7 @@
* @id_timer: The timer used for polling ID line to detect ACA states.
* @xo_handle: TCXO buffer handle
* @bus_perf_client: Bus performance client handle to request BUS bandwidth
+ * @mhl_enabled: MHL driver registration successful and MHL enabled.
*/
struct msm_otg {
struct usb_phy phy;
@@ -314,6 +317,7 @@
#define A_BUS_SUSPEND 14
#define A_CONN 15
#define B_BUS_REQ 16
+#define MHL 17
unsigned long inputs;
struct work_struct sm_work;
bool sm_work_pending;
@@ -333,6 +337,7 @@
unsigned long caps;
struct msm_xo_voter *xo_handle;
uint32_t bus_perf_client;
+ bool mhl_enabled;
/*
* Allowing PHY power collpase turns off the HSUSB 3.3v and 1.8v
* analog regulators while going to low power mode.
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 42f7349..3e2f39b 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -1751,6 +1751,31 @@
#define V4L2_CID_MPEG_VIDC_VIDEO_AIR_REF (V4L2_CID_MPEG_MSM_VIDC_BASE+18)
#define V4L2_CID_MPEG_VIDC_VIDEO_CIR_MBS (V4L2_CID_MPEG_MSM_VIDC_BASE+19)
+#define V4L2_CID_MPEG_VIDC_VIDEO_H263_PROFILE (V4L2_CID_MPEG_MSM_VIDC_BASE+20)
+enum v4l2_mpeg_vidc_video_h263_profile {
+ V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_BASELINE = 0,
+ V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_H320CODING = 1,
+ V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_BACKWARDCOMPATIBLE = 2,
+ V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_ISWV2 = 3,
+ V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_ISWV3 = 4,
+ V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_HIGHCOMPRESSION = 5,
+ V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_INTERNET = 6,
+ V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_INTERLACE = 7,
+ V4L2_MPEG_VIDC_VIDEO_H263_PROFILE_HIGHLATENCY = 8,
+};
+
+#define V4L2_CID_MPEG_VIDC_VIDEO_H263_LEVEL (V4L2_CID_MPEG_MSM_VIDC_BASE+21)
+enum v4l2_mpeg_vidc_video_h263_level {
+ V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_1_0 = 0,
+ V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_2_0 = 1,
+ V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_3_0 = 2,
+ V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_4_0 = 3,
+ V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_4_5 = 4,
+ V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_5_0 = 5,
+ V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_6_0 = 6,
+ V4L2_MPEG_VIDC_VIDEO_H263_LEVEL_7_0 = 7,
+};
+
/* Camera class control IDs */
#define V4L2_CID_CAMERA_CLASS_BASE (V4L2_CTRL_CLASS_CAMERA | 0x900)
#define V4L2_CID_CAMERA_CLASS (V4L2_CTRL_CLASS_CAMERA | 1)
@@ -2047,6 +2072,7 @@
#define V4L2_DEC_CMD_STOP (1)
#define V4L2_DEC_CMD_PAUSE (2)
#define V4L2_DEC_CMD_RESUME (3)
+#define V4L2_DEC_QCOM_CMD_FLUSH (4)
/* Flags for V4L2_DEC_CMD_START */
#define V4L2_DEC_CMD_START_MUTE_AUDIO (1 << 0)
@@ -2058,6 +2084,10 @@
#define V4L2_DEC_CMD_STOP_TO_BLACK (1 << 0)
#define V4L2_DEC_CMD_STOP_IMMEDIATELY (1 << 1)
+/* Flags for V4L2_DEC_QCOM_CMD_FLUSH */
+#define V4L2_DEC_QCOM_CMD_FLUSH_OUTPUT (1 << 0)
+#define V4L2_DEC_QCOM_CMD_FLUSH_CAPTURE (1 << 1)
+
/* Play format requirements (returned by the driver): */
/* The decoder has no special format requirements */
diff --git a/include/media/vcap_v4l2.h b/include/media/vcap_v4l2.h
index 045c107..f7d1e6b 100644
--- a/include/media/vcap_v4l2.h
+++ b/include/media/vcap_v4l2.h
@@ -126,7 +126,6 @@
struct vp_work_t {
struct work_struct work;
struct vcap_client_data *cd;
- uint32_t irq;
};
struct vcap_dev {
@@ -221,9 +220,6 @@
extern struct vcap_hacked_vals hacked_buf[];
#endif
-int free_ion_handle(struct vcap_dev *dev, struct vb2_queue *q,
- struct v4l2_buffer *b);
-
-int get_phys_addr(struct vcap_dev *dev, struct vb2_queue *q,
- struct v4l2_buffer *b);
+int vcvp_qbuf(struct vb2_queue *q, struct v4l2_buffer *b);
+int vcvp_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b);
#endif
diff --git a/include/sound/apr_audio.h b/include/sound/apr_audio.h
index c770f13..979db58 100644
--- a/include/sound/apr_audio.h
+++ b/include/sound/apr_audio.h
@@ -402,11 +402,15 @@
#define AFE_MODULE_ID_PORT_INFO 0x00010200
/* Module ID for the loopback-related parameters. */
#define AFE_MODULE_LOOPBACK 0x00010205
-struct afe_param_payload {
+struct afe_param_payload_base {
u32 module_id;
u32 param_id;
u16 param_size;
u16 reserved;
+} __packed;
+
+struct afe_param_payload {
+ struct afe_param_payload_base base;
union {
struct afe_param_sidetone_gain sidetone_gain;
struct afe_param_sampling_rate sampling_rate;
@@ -1348,6 +1352,14 @@
u32 uid;
} __attribute__((packed));
+#define ASM_DATA_CMD_READ_COMPRESSED 0x00010DBC
+struct asm_stream_cmd_read_compressed {
+ struct apr_hdr hdr;
+ u32 buf_add;
+ u32 buf_size;
+ u32 uid;
+} __packed;
+
#define ASM_DATA_CMD_MEDIA_FORMAT_UPDATE 0x00010BDC
#define ASM_DATA_EVENT_ENC_SR_CM_NOTIFY 0x00010BDE
struct asm_stream_media_format_update{
@@ -1411,6 +1423,19 @@
u32 id;
} __attribute__((packed));
+#define ASM_DATA_EVENT_READ_COMPRESSED_DONE 0x00010DBD
+struct asm_data_event_read_compressed_done {
+ u32 status;
+ u32 buffer_add;
+ u32 enc_frame_size;
+ u32 offset;
+ u32 msw_ts;
+ u32 lsw_ts;
+ u32 flags;
+ u32 num_frames;
+ u32 id;
+} __packed;
+
#define ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY 0x00010C65
struct asm_data_event_sr_cm_change_notify {
u32 sample_rate;
diff --git a/include/sound/compress_params.h b/include/sound/compress_params.h
index 75558bf..31d684b 100644
--- a/include/sound/compress_params.h
+++ b/include/sound/compress_params.h
@@ -56,6 +56,15 @@
#define MAX_NUM_CODEC_DESCRIPTORS 32
#define MAX_NUM_BITRATES 32
+/* compressed TX */
+#define MAX_NUM_FRAMES_PER_BUFFER 1
+#define COMPRESSED_META_DATA_MODE 0x10
+#define META_DATA_LEN_BYTES 36
+#define Q6_AC3_DECODER 0x00010BF6
+#define Q6_EAC3_DECODER 0x00010C3C
+#define Q6_DTS 0x00010D88
+#define Q6_DTS_LBR 0x00010DBB
+
/* Codecs are listed linearly to allow for extensibility */
#define SND_AUDIOCODEC_PCM ((__u32) 0x00000001)
#define SND_AUDIOCODEC_MP3 ((__u32) 0x00000002)
diff --git a/include/sound/q6asm.h b/include/sound/q6asm.h
index d38dbd5..ea77974 100644
--- a/include/sound/q6asm.h
+++ b/include/sound/q6asm.h
@@ -151,6 +151,7 @@
atomic_t cmd_state;
atomic_t time_flag;
+ atomic_t nowait_cmd_cnt;
wait_queue_head_t cmd_wait;
wait_queue_head_t time_wait;
@@ -182,7 +183,8 @@
int q6asm_open_read(struct audio_client *ac, uint32_t format);
-int q6asm_open_read_compressed(struct audio_client *ac, uint32_t format);
+int q6asm_open_read_compressed(struct audio_client *ac,
+ uint32_t frames_per_buffer, uint32_t meta_data_mode);
int q6asm_open_write(struct audio_client *ac, uint32_t format);
@@ -203,6 +205,9 @@
int q6asm_async_read(struct audio_client *ac,
struct audio_aio_read_param *param);
+int q6asm_async_read_compressed(struct audio_client *ac,
+ struct audio_aio_read_param *param);
+
int q6asm_read(struct audio_client *ac);
int q6asm_read_nolock(struct audio_client *ac);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 08f52bb..c6cd85b 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -447,7 +447,6 @@
out:
ts->next_jiffies = next_jiffies;
ts->last_jiffies = last_jiffies;
- ts->sleep_length = ktime_sub(dev->next_event, now);
}
/**
@@ -516,8 +515,8 @@
ktime_t tick_nohz_get_sleep_length(void)
{
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
-
- return ts->sleep_length;
+ struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
+ return ktime_sub(dev->next_event, ts->idle_entrytime);
}
static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
diff --git a/sound/soc/codecs/wcd9310.c b/sound/soc/codecs/wcd9310.c
index 74ae595..3164a9b 100644
--- a/sound/soc/codecs/wcd9310.c
+++ b/sound/soc/codecs/wcd9310.c
@@ -3747,20 +3747,6 @@
return 0;
}
-static void tabla_shutdown(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct wcd9xxx *tabla_core = dev_get_drvdata(dai->codec->dev->parent);
- pr_debug("%s(): substream = %s stream = %d\n" , __func__,
- substream->name, substream->stream);
- if ((tabla_core != NULL) &&
- (tabla_core->dev != NULL) &&
- (tabla_core->dev->parent != NULL)) {
- pm_runtime_mark_last_busy(tabla_core->dev->parent);
- pm_runtime_put(tabla_core->dev->parent);
- }
-}
-
int tabla_mclk_enable(struct snd_soc_codec *codec, int mclk_enable, bool dapm)
{
struct tabla_priv *tabla = snd_soc_codec_get_drvdata(codec);
@@ -4310,7 +4296,6 @@
static struct snd_soc_dai_ops tabla_dai_ops = {
.startup = tabla_startup,
- .shutdown = tabla_shutdown,
.hw_params = tabla_hw_params,
.set_sysclk = tabla_set_dai_sysclk,
.set_fmt = tabla_set_dai_fmt,
@@ -4481,9 +4466,17 @@
u32 ret = 0;
codec->control_data = dev_get_drvdata(codec->dev->parent);
tabla = codec->control_data;
+
/* Execute the callback only if interface type is slimbus */
- if (tabla_p->intf_type != WCD9XXX_INTERFACE_TYPE_SLIMBUS)
+ if (tabla_p->intf_type != WCD9XXX_INTERFACE_TYPE_SLIMBUS) {
+ if (event == SND_SOC_DAPM_POST_PMD && (tabla != NULL) &&
+ (tabla->dev != NULL) &&
+ (tabla->dev->parent != NULL)) {
+ pm_runtime_mark_last_busy(tabla->dev->parent);
+ pm_runtime_put(tabla->dev->parent);
+ }
return 0;
+ }
pr_debug("%s: %s %d\n", __func__, w->name, event);
@@ -4534,6 +4527,12 @@
ret = tabla_codec_enable_chmask(tabla_p,
SND_SOC_DAPM_POST_PMD,
j);
+ if ((tabla != NULL) &&
+ (tabla->dev != NULL) &&
+ (tabla->dev->parent != NULL)) {
+ pm_runtime_mark_last_busy(tabla->dev->parent);
+ pm_runtime_put(tabla->dev->parent);
+ }
}
}
return ret;
@@ -4553,8 +4552,15 @@
tabla = codec->control_data;
/* Execute the callback only if interface type is slimbus */
- if (tabla_p->intf_type != WCD9XXX_INTERFACE_TYPE_SLIMBUS)
+ if (tabla_p->intf_type != WCD9XXX_INTERFACE_TYPE_SLIMBUS) {
+ if (event == SND_SOC_DAPM_POST_PMD && (tabla != NULL) &&
+ (tabla->dev != NULL) &&
+ (tabla->dev->parent != NULL)) {
+ pm_runtime_mark_last_busy(tabla->dev->parent);
+ pm_runtime_put(tabla->dev->parent);
+ }
return 0;
+ }
pr_debug("%s(): %s %d\n", __func__, w->name, event);
@@ -4604,6 +4610,12 @@
ret = tabla_codec_enable_chmask(tabla_p,
SND_SOC_DAPM_POST_PMD,
j);
+ if ((tabla != NULL) &&
+ (tabla->dev != NULL) &&
+ (tabla->dev->parent != NULL)) {
+ pm_runtime_mark_last_busy(tabla->dev->parent);
+ pm_runtime_put(tabla->dev->parent);
+ }
}
}
return ret;
diff --git a/sound/soc/msm/apq8064.c b/sound/soc/msm/apq8064.c
index a4bc2ef..d8a4624 100644
--- a/sound/soc/msm/apq8064.c
+++ b/sound/soc/msm/apq8064.c
@@ -98,8 +98,6 @@
static struct clk *codec_clk;
static int clk_users;
-static int msm_headset_gpios_configured;
-
static struct snd_soc_jack hs_jack;
static struct snd_soc_jack button_jack;
@@ -1971,57 +1969,6 @@
static struct platform_device *msm_snd_device;
-static int msm_configure_headset_mic_gpios(void)
-{
- int ret;
- struct pm_gpio param = {
- .direction = PM_GPIO_DIR_OUT,
- .output_buffer = PM_GPIO_OUT_BUF_CMOS,
- .output_value = 1,
- .pull = PM_GPIO_PULL_NO,
- .vin_sel = PM_GPIO_VIN_S4,
- .out_strength = PM_GPIO_STRENGTH_MED,
- .function = PM_GPIO_FUNC_NORMAL,
- };
-
- ret = gpio_request(PM8921_GPIO_PM_TO_SYS(23), "AV_SWITCH");
- if (ret) {
- pr_err("%s: Failed to request gpio %d\n", __func__,
- PM8921_GPIO_PM_TO_SYS(23));
- return ret;
- }
-
- ret = pm8xxx_gpio_config(PM8921_GPIO_PM_TO_SYS(23), ¶m);
- if (ret)
- pr_err("%s: Failed to configure gpio %d\n", __func__,
- PM8921_GPIO_PM_TO_SYS(23));
- else
- gpio_direction_output(PM8921_GPIO_PM_TO_SYS(23), 0);
-
- ret = gpio_request(PM8921_GPIO_PM_TO_SYS(35), "US_EURO_SWITCH");
- if (ret) {
- pr_err("%s: Failed to request gpio %d\n", __func__,
- PM8921_GPIO_PM_TO_SYS(35));
- gpio_free(PM8921_GPIO_PM_TO_SYS(23));
- return ret;
- }
- ret = pm8xxx_gpio_config(PM8921_GPIO_PM_TO_SYS(35), ¶m);
- if (ret)
- pr_err("%s: Failed to configure gpio %d\n", __func__,
- PM8921_GPIO_PM_TO_SYS(35));
- else
- gpio_direction_output(PM8921_GPIO_PM_TO_SYS(35), 0);
-
- return 0;
-}
-static void msm_free_headset_mic_gpios(void)
-{
- if (msm_headset_gpios_configured) {
- gpio_free(PM8921_GPIO_PM_TO_SYS(23));
- gpio_free(PM8921_GPIO_PM_TO_SYS(35));
- }
-}
-
static int __init msm_audio_init(void)
{
int ret;
@@ -2052,12 +1999,6 @@
return ret;
}
- if (msm_configure_headset_mic_gpios()) {
- pr_err("%s Fail to configure headset mic gpios\n", __func__);
- msm_headset_gpios_configured = 0;
- } else
- msm_headset_gpios_configured = 1;
-
mutex_init(&cdc_mclk_mutex);
return ret;
@@ -2070,7 +2011,6 @@
pr_err("%s: Not the right machine type\n", __func__);
return ;
}
- msm_free_headset_mic_gpios();
platform_device_unregister(msm_snd_device);
if (mbhc_cfg.gpio)
gpio_free(mbhc_cfg.gpio);
diff --git a/sound/soc/msm/msm-compr-q6.c b/sound/soc/msm/msm-compr-q6.c
index c894921..35cbb5b 100644
--- a/sound/soc/msm/msm-compr-q6.c
+++ b/sound/soc/msm/msm-compr-q6.c
@@ -38,8 +38,9 @@
/* Allocate the worst case frame size for compressed audio */
#define COMPRE_CAPTURE_HEADER_SIZE (sizeof(struct snd_compr_audio_info))
#define COMPRE_CAPTURE_MAX_FRAME_SIZE (6144)
-#define COMPRE_CAPTURE_PERIOD_SIZE (COMPRE_CAPTURE_MAX_FRAME_SIZE + \
- COMPRE_CAPTURE_HEADER_SIZE)
+#define COMPRE_CAPTURE_PERIOD_SIZE ((COMPRE_CAPTURE_MAX_FRAME_SIZE + \
+ COMPRE_CAPTURE_HEADER_SIZE) * \
+ MAX_NUM_FRAMES_PER_BUFFER)
struct snd_msm {
struct msm_audio *prtd;
@@ -207,6 +208,31 @@
q6asm_async_read(prtd->audio_client, &read_param);
break;
}
+ case ASM_DATA_EVENT_READ_COMPRESSED_DONE: {
+ pr_debug("ASM_DATA_EVENT_READ_COMPRESSED_DONE\n");
+ pr_debug("buf = %p, data = 0x%X, *data = %p,\n"
+ "prtd->pcm_irq_pos = %d\n",
+ prtd->audio_client->port[OUT].buf,
+ *(uint32_t *)prtd->audio_client->port[OUT].buf->data,
+ prtd->audio_client->port[OUT].buf->data,
+ prtd->pcm_irq_pos);
+
+ if (!atomic_read(&prtd->start))
+ break;
+ buf = prtd->audio_client->port[OUT].buf;
+ pr_debug("pcm_irq_pos=%d, buf[0].phys = 0x%X\n",
+ prtd->pcm_irq_pos, (uint32_t)buf[0].phys);
+ read_param.len = prtd->pcm_count;
+ read_param.paddr = (unsigned long)(buf[0].phys) +
+ prtd->pcm_irq_pos;
+ prtd->pcm_irq_pos += prtd->pcm_count;
+
+ if (atomic_read(&prtd->start))
+ snd_pcm_period_elapsed(substream);
+
+ q6asm_async_read_compressed(prtd->audio_client, &read_param);
+ break;
+ }
case APR_BASIC_RSP_RESULT: {
switch (payload[0]) {
case ASM_SESSION_CMD_RUN: {
@@ -392,7 +418,7 @@
if (prtd->enabled)
return ret;
- read_param.len = prtd->pcm_count - COMPRE_CAPTURE_HEADER_SIZE;
+ read_param.len = prtd->pcm_count;
pr_debug("%s: Samp_rate = %d, Channel = %d, pcm_size = %d,\n"
"pcm_count = %d, periods = %d\n",
__func__, prtd->samp_rate, prtd->channel_mode,
@@ -400,9 +426,8 @@
for (i = 0; i < runtime->periods; i++) {
read_param.uid = i;
- read_param.paddr = ((unsigned long)(buf[i].phys) +
- COMPRE_CAPTURE_HEADER_SIZE);
- q6asm_async_read(prtd->audio_client, &read_param);
+ read_param.paddr = (unsigned long)(buf[i].phys);
+ q6asm_async_read_compressed(prtd->audio_client, &read_param);
}
prtd->periods = runtime->periods;
@@ -749,7 +774,8 @@
}
} else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
ret = q6asm_open_read_compressed(prtd->audio_client,
- compr->codec);
+ MAX_NUM_FRAMES_PER_BUFFER,
+ COMPRESSED_META_DATA_MODE);
if (ret < 0) {
pr_err("%s: compressed Session out open failed\n",
diff --git a/sound/soc/msm/msm-pcm-routing.c b/sound/soc/msm/msm-pcm-routing.c
index 7e8e282..c0c679d 100644
--- a/sound/soc/msm/msm-pcm-routing.c
+++ b/sound/soc/msm/msm-pcm-routing.c
@@ -65,6 +65,10 @@
static const DECLARE_TLV_DB_LINEAR(multimedia2_rx_vol_gain, 0,
INT_RX_VOL_MAX_STEPS);
+static int msm_route_multimedia5_vol_control;
+static const DECLARE_TLV_DB_LINEAR(multimedia5_rx_vol_gain, 0,
+ INT_RX_VOL_MAX_STEPS);
+
static int msm_route_compressed_vol_control;
static const DECLARE_TLV_DB_LINEAR(compressed_rx_vol_gain, 0,
INT_RX_VOL_MAX_STEPS);
@@ -798,6 +802,25 @@
return 0;
}
+static int msm_routing_get_multimedia5_vol_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+
+ ucontrol->value.integer.value[0] = msm_route_multimedia5_vol_control;
+ return 0;
+}
+
+static int msm_routing_set_multimedia5_vol_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+
+ if (!multi_ch_pcm_set_volume(ucontrol->value.integer.value[0]))
+ msm_route_multimedia5_vol_control =
+ ucontrol->value.integer.value[0];
+
+ return 0;
+}
+
static int msm_routing_get_compressed_vol_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -1737,6 +1760,12 @@
msm_routing_set_multimedia2_vol_mixer, multimedia2_rx_vol_gain),
};
+static const struct snd_kcontrol_new multimedia5_vol_mixer_controls[] = {
+ SOC_SINGLE_EXT_TLV("HIFI3 RX Volume", SND_SOC_NOPM, 0,
+ INT_RX_VOL_GAIN, 0, msm_routing_get_multimedia5_vol_mixer,
+ msm_routing_set_multimedia5_vol_mixer, multimedia5_rx_vol_gain),
+};
+
static const struct snd_kcontrol_new compressed_vol_mixer_controls[] = {
SOC_SINGLE_EXT_TLV("COMPRESSED RX Volume", SND_SOC_NOPM, 0,
INT_RX_VOL_GAIN, 0, msm_routing_get_compressed_vol_mixer,
@@ -2645,6 +2674,10 @@
ARRAY_SIZE(multimedia2_vol_mixer_controls));
snd_soc_add_platform_controls(platform,
+ multimedia5_vol_mixer_controls,
+ ARRAY_SIZE(multimedia5_vol_mixer_controls));
+
+ snd_soc_add_platform_controls(platform,
compressed_vol_mixer_controls,
ARRAY_SIZE(compressed_vol_mixer_controls));
diff --git a/sound/soc/msm/qdsp6/q6afe.c b/sound/soc/msm/qdsp6/q6afe.c
index 2f6772d..4c0ac9e 100644
--- a/sound/soc/msm/qdsp6/q6afe.c
+++ b/sound/soc/msm/qdsp6/q6afe.c
@@ -799,13 +799,14 @@
lp_cfg.hdr.opcode = AFE_PORT_CMD_SET_PARAM;
lp_cfg.port_id = src_port;
- lp_cfg.payload_size = sizeof(struct afe_param_payload);
+ lp_cfg.payload_size = sizeof(struct afe_param_payload_base) +
+ sizeof(struct afe_param_loopback_cfg);
lp_cfg.payload_address = 0;
- lp_cfg.payload.module_id = AFE_MODULE_LOOPBACK;
- lp_cfg.payload.param_id = AFE_PARAM_ID_LOOPBACK_CONFIG;
- lp_cfg.payload.param_size = sizeof(struct afe_param_loopback_cfg);
- lp_cfg.payload.reserved = 0;
+ lp_cfg.payload.base.module_id = AFE_MODULE_LOOPBACK;
+ lp_cfg.payload.base.param_id = AFE_PARAM_ID_LOOPBACK_CONFIG;
+ lp_cfg.payload.base.param_size = sizeof(struct afe_param_loopback_cfg);
+ lp_cfg.payload.base.reserved = 0;
lp_cfg.payload.param.loopback_cfg.loopback_cfg_minor_version =
AFE_API_VERSION_LOOPBACK_CONFIG;
@@ -879,13 +880,15 @@
set_param.hdr.opcode = AFE_PORT_CMD_SET_PARAM;
set_param.port_id = port_id;
- set_param.payload_size = sizeof(struct afe_param_payload);
+ set_param.payload_size = sizeof(struct afe_param_payload_base) +
+ sizeof(struct afe_param_loopback_gain);
set_param.payload_address = 0;
- set_param.payload.module_id = AFE_MODULE_ID_PORT_INFO;
- set_param.payload.param_id = AFE_PARAM_ID_LOOPBACK_GAIN;
- set_param.payload.param_size = sizeof(struct afe_param_loopback_gain);
- set_param.payload.reserved = 0;
+ set_param.payload.base.module_id = AFE_MODULE_ID_PORT_INFO;
+ set_param.payload.base.param_id = AFE_PARAM_ID_LOOPBACK_GAIN;
+ set_param.payload.base.param_size =
+ sizeof(struct afe_param_loopback_gain);
+ set_param.payload.base.reserved = 0;
set_param.payload.param.loopback_gain.gain = volume;
set_param.payload.param.loopback_gain.reserved = 0;
diff --git a/sound/soc/msm/qdsp6/q6asm.c b/sound/soc/msm/qdsp6/q6asm.c
index 59c390e..06be186 100644
--- a/sound/soc/msm/qdsp6/q6asm.c
+++ b/sound/soc/msm/qdsp6/q6asm.c
@@ -805,6 +805,7 @@
uint32_t token;
unsigned long dsp_flags;
uint32_t *payload;
+ uint32_t wakeup_flag = 1;
if ((ac == NULL) || (data == NULL)) {
@@ -816,7 +817,13 @@
ac->session);
return -EINVAL;
}
-
+ if (atomic_read(&ac->nowait_cmd_cnt) > 0) {
+ pr_debug("%s: nowait_cmd_cnt %d\n",
+ __func__,
+ atomic_read(&ac->nowait_cmd_cnt));
+ atomic_dec(&ac->nowait_cmd_cnt);
+ wakeup_flag = 0;
+ }
payload = data->payload;
if (data->opcode == RESET_EVENTS) {
@@ -862,7 +869,7 @@
case ASM_STREAM_CMD_SET_ENCDEC_PARAM:
case ASM_STREAM_CMD_OPEN_WRITE_COMPRESSED:
case ASM_STREAM_CMD_OPEN_READ_COMPRESSED:
- if (atomic_read(&ac->cmd_state)) {
+ if (atomic_read(&ac->cmd_state) && wakeup_flag) {
atomic_set(&ac->cmd_state, 0);
if (payload[1] == ADSP_EUNSUPPORTED)
atomic_set(&ac->cmd_response, 1);
@@ -1269,7 +1276,8 @@
return -EINVAL;
}
-int q6asm_open_read_compressed(struct audio_client *ac, uint32_t format)
+int q6asm_open_read_compressed(struct audio_client *ac,
+ uint32_t frames_per_buffer, uint32_t meta_data_mode)
{
int rc = 0x00;
struct asm_stream_cmd_open_read_compressed open;
@@ -1285,8 +1293,8 @@
q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
open.hdr.opcode = ASM_STREAM_CMD_OPEN_READ_COMPRESSED;
/* hardcoded as following*/
- open.frame_per_buf = 1;
- open.uMode = 0;
+ open.frame_per_buf = frames_per_buffer;
+ open.uMode = meta_data_mode;
rc = apr_send_pkt(ac->apr, (uint32_t *) &open);
if (rc < 0) {
@@ -1618,12 +1626,12 @@
run.flags = flags;
run.msw_ts = msw_ts;
run.lsw_ts = lsw_ts;
-
rc = apr_send_pkt(ac->apr, (uint32_t *) &run);
if (rc < 0) {
pr_err("%s:Commmand run failed[%d]", __func__, rc);
return -EINVAL;
}
+ atomic_inc(&ac->nowait_cmd_cnt);
return 0;
}
@@ -3259,6 +3267,40 @@
return -EINVAL;
}
+int q6asm_async_read_compressed(struct audio_client *ac,
+ struct audio_aio_read_param *param)
+{
+ int rc = 0;
+ struct asm_stream_cmd_read read;
+
+ if (!ac || ac->apr == NULL) {
+ pr_err("%s: APR handle NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ q6asm_add_hdr_async(ac, &read.hdr, sizeof(read), FALSE);
+
+ /* Pass physical address as token for AIO scheme */
+ read.hdr.token = param->paddr;
+ read.hdr.opcode = ASM_DATA_CMD_READ_COMPRESSED;
+ read.buf_add = param->paddr;
+ read.buf_size = param->len;
+ read.uid = param->uid;
+
+ pr_debug("%s: session[%d] bufadd[0x%x]len[0x%x]", __func__, ac->session,
+ read.buf_add, read.buf_size);
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &read);
+ if (rc < 0) {
+ pr_debug("[%s] read op[0x%x]rc[%d]\n", __func__,
+ read.hdr.opcode, rc);
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
int q6asm_write(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
uint32_t lsw_ts, uint32_t flags)
{
@@ -3541,11 +3583,13 @@
pr_debug("%s:session[%d]opcode[0x%x] ", __func__,
ac->session,
hdr.opcode);
+
rc = apr_send_pkt(ac->apr, (uint32_t *) &hdr);
if (rc < 0) {
pr_err("%s:Commmand 0x%x failed\n", __func__, hdr.opcode);
goto fail_cmd;
}
+ atomic_inc(&ac->nowait_cmd_cnt);
return 0;
fail_cmd:
return -EINVAL;