Merge "EHCI: HSIC: Halt the controller while resetting the port"
diff --git a/Documentation/devicetree/bindings/arm/msm/ipc-spinlock.txt b/Documentation/devicetree/bindings/arm/msm/ipc-spinlock.txt
index c71b190..24dbb4b 100644
--- a/Documentation/devicetree/bindings/arm/msm/ipc-spinlock.txt
+++ b/Documentation/devicetree/bindings/arm/msm/ipc-spinlock.txt
@@ -1,14 +1,27 @@
Qualcomm Interprocessor Communication Spinlock
+--Dedicated Hardware Implementation--
Required properties:
-- compatible : should be "qcom,ipc-spinlock"
+- compatible : should be "qcom,ipc-spinlock-sfpb"
- reg : the location and size of the spinlock hardware
- qcom,num-locks : the number of locks supported
Example:
qcom,ipc-spinlock@fd484000 {
- compatible = "qcom,ipc-spinlock";
+ compatible = "qcom,ipc-spinlock-sfpb";
reg = <0xfd484000 0x1000>;
qcom,num-locks = <32>;
};
+
+--LDREX Implementation--
+Required properties:
+- compatible : should be "qcom,ipc-spinlock-ldrex"
+- reg : the location and size of the shared lock memory
+
+Example:
+
+ qcom,ipc-spinlock@fa00000 {
+ compatible = "qcom,ipc-spinlock-ldrex";
+ reg = <0xfa00000 0x200000>;
+ };
diff --git a/Documentation/devicetree/bindings/media/video/msm-vidc.txt b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
index cd14056..f97e063 100644
--- a/Documentation/devicetree/bindings/media/video/msm-vidc.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
@@ -31,6 +31,14 @@
request by different video encoder usecases.
- qcom,dec-ddr-ab-ib : list of bus vectors(ab, ib pair) for ddr bandwidth
request by different video decoder usecases.
+- qcom,iommu-groups : list of IOMMU groups to be used. Groups are defined as
+ phandles in <target>-iommu-domains.dtsi (e.g msm8974-v1-iommu-domains.dtsi)
+- qcom,iommu-group-buffer-types : bitmap of buffer types that can be mapped into
+ the corresponding IOMMU group. Buffer types are defined within the vidc driver
+ by "enum hal_buffer" in msm_smem.h
+- qcom,buffer-type-tz-usage-table : a key-value pair, mapping a buffer type
+ (enum hal_buffer) to its corresponding TZ usage. The TZ usages are defined
+ as "enum cp_mem_usage" in include/linux/msm_ion.h
Example:
@@ -59,4 +67,8 @@
<60000 664950>;
qcom,dec-ddr-ab-ib = <0 0>,
<110000 909000>;
+ qcom,iommu-groups = <&venus_domain_ns &venus_domain_cp>;
+ qcom,iommu-group-buffer-types = <0xfff 0x1ff>;
+ qcom,buffer-type-tz-usage-table = <0x1 0x1>,
+ <0x1fe 0x2>;
};
diff --git a/Documentation/devicetree/bindings/memory.txt b/Documentation/devicetree/bindings/memory.txt
index 74e0476..e98ee05 100644
--- a/Documentation/devicetree/bindings/memory.txt
+++ b/Documentation/devicetree/bindings/memory.txt
@@ -36,6 +36,7 @@
reg = <(baseaddr) (size)>;
(linux,contiguous-region);
(linux,default-contiguous-region);
+ label = (unique_name);
};
name: an name given to the defined region.
@@ -47,7 +48,11 @@
linux,default-contiguous-region: property indicating that the region
is the default region for all contiguous memory
allocations, Linux specific (optional)
-
+label: an internal name used for automatically associating the
+ cma region with a given device. The label is optional;
+ if the label is not given the client is responsible for
+ calling the appropriate functions to associate the region
+ with a device.
* Device nodes
diff --git a/Documentation/devicetree/bindings/usb/msm-hsusb.txt b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
index 9d0b0a5..2cdc7ff 100644
--- a/Documentation/devicetree/bindings/usb/msm-hsusb.txt
+++ b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
@@ -52,6 +52,13 @@
- qcom,hsusb-otg-clk-always-on-workaround: If present then USB core clocks
remain active upon receiving bus suspend and USB cable is connected.
Used for allowing USB to respond for remote wakup.
+- <supply-name>-supply: handle to the regulator device tree node
+ Required "supply-name" is "HSUSB_VDDCX" (when voting for VDDCX) or
+ "hsusb_vdd_dig" (when voting for VDDCX Corner voltage),
+ "HSUSB_1p8-supply" and "HSUSB_3p3-supply".
+- qcom,vdd-voltage-level: This property must be a list of three integer
+ values (no, min, max) where each value represents either a voltage
+ in microvolts or a value corresponding to voltage corner.
Example HSUSB OTG controller device node :
usb@f9690000 {
@@ -72,6 +79,10 @@
qcom,hsusb-otg-pmic-id-irq = <47>
qcom,hsusb-otg-lpm-on-dev-suspend;
qcom,hsusb-otg-clk-always-on-workaround;
+ hsusb_vdd_dig-supply = <&pm8226_s1_corner>;
+ HSUSB_1p8-supply = <&pm8226_l10>;
+ HSUSB_3p3-supply = <&pm8226_l20>;
+ qcom,vdd-voltage-level = <1 5 7>;
qcom,msm_bus,name = "usb2";
qcom,msm_bus,num_cases = <2>;
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index ed18cae..3a9b770 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -63,27 +63,6 @@
8 - SIGSEGV faults
16 - SIGBUS faults
-config DEBUG_RODATA
- bool "Write protect kernel text section"
- default n
- depends on DEBUG_KERNEL && MMU
- ---help---
- Mark the kernel text section as write-protected in the pagetables,
- in order to catch accidental (and incorrect) writes to such const
- data. This will cause the size of the kernel, plus up to 4MB, to
- be mapped as pages instead of sections, which will increase TLB
- pressure.
- If in doubt, say "N".
-
-config DEBUG_RODATA_TEST
- bool "Testcase for the DEBUG_RODATA feature"
- depends on DEBUG_RODATA
- default n
- ---help---
- This option enables a testcase for the DEBUG_RODATA
- feature.
- If in doubt, say "N"
-
# These options are only for real kernel hackers who want to get their hands dirty.
config DEBUG_LL
bool "Kernel low-level debugging functions (read help!)"
diff --git a/arch/arm/boot/dts/msm-pm8226.dtsi b/arch/arm/boot/dts/msm-pm8226.dtsi
index de23f4c..6a07bad 100644
--- a/arch/arm/boot/dts/msm-pm8226.dtsi
+++ b/arch/arm/boot/dts/msm-pm8226.dtsi
@@ -22,6 +22,33 @@
#address-cells = <1>;
#size-cells = <1>;
+ qcom,power-on@800 {
+ compatible = "qcom,qpnp-power-on";
+ reg = <0x800 0x100>;
+ interrupts = <0x0 0x8 0x0>,
+ <0x0 0x8 0x1>,
+ <0x0 0x8 0x4>;
+ interrupt-names = "kpdpwr", "resin", "resin-bark";
+ qcom,pon-dbc-delay = <15625>;
+ qcom,system-reset;
+
+ qcom,pon_1 {
+ qcom,pon-type = <0>;
+ qcom,pull-up = <1>;
+ linux,code = <116>;
+ };
+
+ qcom,pon_2 {
+ qcom,pon-type = <1>;
+ qcom,support-reset = <1>;
+ qcom,pull-up = <1>;
+ qcom,s1-timer = <0>;
+ qcom,s2-timer = <2000>;
+ qcom,s2-type = <1>;
+ linux,code = <114>;
+ };
+ };
+
pm8226_gpios: gpios {
spmi-dev-container;
compatible = "qcom,qpnp-pin";
diff --git a/arch/arm/boot/dts/msm8226-cdp.dts b/arch/arm/boot/dts/msm8226-cdp.dts
index 1c431e8..f01491f 100644
--- a/arch/arm/boot/dts/msm8226-cdp.dts
+++ b/arch/arm/boot/dts/msm8226-cdp.dts
@@ -22,3 +22,66 @@
status = "ok";
};
};
+
+&sdcc1 {
+ vdd-supply = <&pm8226_l17>;
+ qcom,vdd-always-on;
+ qcom,vdd-lpm-sup;
+ qcom,vdd-voltage-level = <2950000 2950000>;
+ qcom,vdd-current-level = <800 500000>;
+
+ vdd-io-supply = <&pm8226_l6>;
+ qcom,vdd-io-always-on;
+ qcom,vdd-io-voltage-level = <1800000 1800000>;
+ qcom,vdd-io-current-level = <250 154000>;
+
+ qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 16mA, 10mA, 10mA */
+ qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
+
+ qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
+ qcom,sup-voltages = <2950 2950>;
+
+ qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
+ qcom,nonremovable;
+
+ status = "ok";
+};
+
+&sdcc2 {
+ vdd-supply = <&pm8226_l18>;
+ qcom,vdd-voltage-level = <2950000 2950000>;
+ qcom,vdd-current-level = <9000 800000>;
+
+ vdd-io-supply = <&pm8226_l21>;
+ qcom,vdd-io-always-on;
+ qcom,vdd-io-lpm-sup;
+ qcom,vdd-io-voltage-level = <1800000 2950000>;
+ qcom,vdd-io-current-level = <6 22000>;
+
+ qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 16mA, 10mA, 10mA */
+ qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
+
+ qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
+ qcom,sup-voltages = <2950 2950>;
+
+ qcom,xpc;
+ qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
+ qcom,current-limit = <600>;
+
+ #address-cells = <0>;
+ interrupt-parent = <&sdcc2>;
+ interrupts = <0 1 2>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0xffffffff>;
+ interrupt-map = <0 &intc 0 125 0
+ 1 &intc 0 220 0
+ 2 &msmgpio 38 0x3>;
+ interrupt-names = "core_irq", "bam_irq", "status_irq";
+ cd-gpios = <&msmgpio 38 0x1>;
+
+ status = "ok";
+};
diff --git a/arch/arm/boot/dts/msm8226-mtp.dts b/arch/arm/boot/dts/msm8226-mtp.dts
index ef0fdc0..0242540 100644
--- a/arch/arm/boot/dts/msm8226-mtp.dts
+++ b/arch/arm/boot/dts/msm8226-mtp.dts
@@ -22,3 +22,63 @@
status = "ok";
};
};
+
+&sdcc1 {
+ vdd-supply = <&pm8226_l17>;
+ qcom,vdd-always-on;
+ qcom,vdd-lpm-sup;
+ qcom,vdd-voltage-level = <2950000 2950000>;
+ qcom,vdd-current-level = <800 500000>;
+
+ vdd-io-supply = <&pm8226_l6>;
+ qcom,vdd-io-always-on;
+ qcom,vdd-io-voltage-level = <1800000 1800000>;
+ qcom,vdd-io-current-level = <250 154000>;
+
+ qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 16mA, 10mA, 10mA */
+ qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
+
+ qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
+ qcom,sup-voltages = <2950 2950>;
+
+ qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
+ qcom,nonremovable;
+
+ status = "ok";
+};
+
+&sdcc2 {
+ vdd-supply = <&pm8226_l18>;
+ qcom,vdd-voltage-level = <2950000 2950000>;
+ qcom,vdd-current-level = <9000 800000>;
+
+ vdd-io-supply = <&pm8226_l21>;
+ qcom,vdd-io-always-on;
+ qcom,vdd-io-lpm-sup;
+ qcom,vdd-io-voltage-level = <1800000 2950000>;
+ qcom,vdd-io-current-level = <6 22000>;
+
+ qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 16mA, 10mA, 10mA */
+ qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
+
+ qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
+ qcom,sup-voltages = <2950 2950>;
+
+ qcom,xpc;
+ qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
+ qcom,current-limit = <600>; #address-cells = <0>; interrupt-parent = <&sdcc2>;
+ interrupts = <0 1 2>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0xffffffff>;
+ interrupt-map = <0 &intc 0 125 0
+ 1 &intc 0 220 0
+ 2 &msmgpio 38 0x3>;
+ interrupt-names = "core_irq", "bam_irq", "status_irq";
+ cd-gpios = <&msmgpio 38 0x1>;
+
+ status = "ok";
+};
diff --git a/arch/arm/boot/dts/msm8226-qrd.dts b/arch/arm/boot/dts/msm8226-qrd.dts
index 7909435..65d4b33 100644
--- a/arch/arm/boot/dts/msm8226-qrd.dts
+++ b/arch/arm/boot/dts/msm8226-qrd.dts
@@ -22,3 +22,66 @@
status = "ok";
};
};
+
+&sdcc1 {
+ vdd-supply = <&pm8226_l17>;
+ qcom,vdd-always-on;
+ qcom,vdd-lpm-sup;
+ qcom,vdd-voltage-level = <2950000 2950000>;
+ qcom,vdd-current-level = <800 500000>;
+
+ vdd-io-supply = <&pm8226_l6>;
+ qcom,vdd-io-always-on;
+ qcom,vdd-io-voltage-level = <1800000 1800000>;
+ qcom,vdd-io-current-level = <250 154000>;
+
+ qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 16mA, 10mA, 10mA */
+ qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
+
+ qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
+ qcom,sup-voltages = <2950 2950>;
+
+ qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
+ qcom,nonremovable;
+
+ status = "ok";
+};
+
+&sdcc2 {
+ vdd-supply = <&pm8226_l18>;
+ qcom,vdd-voltage-level = <2950000 2950000>;
+ qcom,vdd-current-level = <9000 800000>;
+
+ vdd-io-supply = <&pm8226_l21>;
+ qcom,vdd-io-always-on;
+ qcom,vdd-io-lpm-sup;
+ qcom,vdd-io-voltage-level = <1800000 2950000>;
+ qcom,vdd-io-current-level = <6 22000>;
+
+ qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+ qcom,pad-drv-on = <0x4 0x4 0x4>; /* 16mA, 10mA, 10mA */
+ qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
+
+ qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
+ qcom,sup-voltages = <2950 2950>;
+
+ qcom,xpc;
+ qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
+ qcom,current-limit = <600>;
+
+ #address-cells = <0>;
+ interrupt-parent = <&sdcc2>;
+ interrupts = <0 1 2>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0xffffffff>;
+ interrupt-map = <0 &intc 0 125 0
+ 1 &intc 0 220 0
+ 2 &msmgpio 38 0x3>;
+ interrupt-names = "core_irq", "bam_irq", "status_irq";
+ cd-gpios = <&msmgpio 38 0x1>;
+
+ status = "ok";
+};
diff --git a/arch/arm/boot/dts/msm8226.dtsi b/arch/arm/boot/dts/msm8226.dtsi
index 3533d19..741ffbd 100644
--- a/arch/arm/boot/dts/msm8226.dtsi
+++ b/arch/arm/boot/dts/msm8226.dtsi
@@ -85,14 +85,23 @@
reg = <0xf9a55000 0x400>;
interrupts = <0 134 0>, <0 140 0>;
interrupt-names = "core_irq", "async_irq";
- HSUSB_VDDCX-supply = <&pm8226_s1>;
+ hsusb_vdd_dig-supply = <&pm8226_s1_corner>;
HSUSB_1p8-supply = <&pm8226_l10>;
HSUSB_3p3-supply = <&pm8226_l20>;
+ qcom,vdd-voltage-level = <1 5 7>;
qcom,hsusb-otg-phy-type = <2>;
qcom,hsusb-otg-mode = <1>;
qcom,hsusb-otg-otg-control = <1>;
qcom,hsusb-otg-disable-reset;
+
+ qcom,msm-bus,name = "usb2";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,active-only = <0>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <87 512 0 0>,
+ <87 512 60000 960000>;
};
android_usb@fe8050c8 {
@@ -541,8 +550,8 @@
<0xfc4b8000 0x1000>;
reg-names = "tsens_physical", "tsens_eeprom_physical";
interrupts = <0 184 0>;
- qcom,sensors = <6>;
- qcom,slope = <3200 3200 3200 3200 3200 3200>;
+ qcom,sensors = <4>;
+ qcom,slope = <2901 2846 3038 2955>;
qcom,calib-mode = "fuse_map2";
};
@@ -578,6 +587,11 @@
qcom,bam-producer-pipe-index = <13>;
};
+ qcom,bam_dmux@fc834000 {
+ compatible = "qcom,bam_dmux";
+ reg = <0xfc834000 0x7000>;
+ interrupts = <0 29 1>;
+ };
};
&gdsc_venus {
diff --git a/arch/arm/boot/dts/msm8974-ion.dtsi b/arch/arm/boot/dts/msm8974-ion.dtsi
index f55cff2..dfa22c1 100644
--- a/arch/arm/boot/dts/msm8974-ion.dtsi
+++ b/arch/arm/boot/dts/msm8974-ion.dtsi
@@ -24,8 +24,7 @@
compatible = "qcom,msm-ion-reserve";
reg = <8>;
qcom,heap-align = <0x1000>;
- qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
- qcom,memory-reservation-size = <0x7800000>;
+ linux,contiguous-region = <&secure_mem>;
};
qcom,ion-heap@25 { /* IOMMU HEAP */
diff --git a/arch/arm/boot/dts/msm8974-liquid.dtsi b/arch/arm/boot/dts/msm8974-liquid.dtsi
index 68fed68..6e2719b 100644
--- a/arch/arm/boot/dts/msm8974-liquid.dtsi
+++ b/arch/arm/boot/dts/msm8974-liquid.dtsi
@@ -380,7 +380,7 @@
};
&usb3 {
- qcom,charging-disabled;
+ qcom,otg-capability;
};
&pm8941_mvs1 {
@@ -708,3 +708,33 @@
};
};
};
+
+&pm8941_chg {
+ status = "ok";
+
+ qcom,chg-charging-disabled;
+
+ qcom,chg-chgr@1000 {
+ status = "ok";
+ };
+
+ qcom,chg-buck@1100 {
+ status = "ok";
+ };
+
+ qcom,chg-usb-chgpth@1300 {
+ status = "ok";
+ };
+
+ qcom,chg-dc-chgpth@1400 {
+ status = "ok";
+ };
+
+ qcom,chg-boost@1500 {
+ status = "ok";
+ };
+
+ qcom,chg-misc@1600 {
+ status = "ok";
+ };
+};
diff --git a/arch/arm/boot/dts/msm8974-v1.dtsi b/arch/arm/boot/dts/msm8974-v1.dtsi
index aed4daf..64014b3 100644
--- a/arch/arm/boot/dts/msm8974-v1.dtsi
+++ b/arch/arm/boot/dts/msm8974-v1.dtsi
@@ -110,4 +110,16 @@
<1010000 1818000>,
<1616000 2908800>,
<2020000 6400000>;
+ qcom,iommu-groups = <&venus_domain_ns &venus_domain_cp>;
+ qcom,iommu-group-buffer-types = <0xfff 0x1ff>;
+ qcom,buffer-type-tz-usage-table = <0x1 0x1>,
+ <0x1fe 0x2>;
+};
+
+&sfpb_spinlock {
+ status = "disable";
+};
+
+&ldrex_spinlock {
+ status = "ok";
};
diff --git a/arch/arm/boot/dts/msm8974-v2.dtsi b/arch/arm/boot/dts/msm8974-v2.dtsi
index 7e6c0bf..3dda20f 100644
--- a/arch/arm/boot/dts/msm8974-v2.dtsi
+++ b/arch/arm/boot/dts/msm8974-v2.dtsi
@@ -110,6 +110,12 @@
<1620000 970000>,
<2024000 1212000>,
<2132000 1279000>;
+ qcom,iommu-groups = <&venus_domain_ns &venus_domain_sec_bitstream
+ &venus_domain_sec_pixel &venus_domain_sec_non_pixel>;
+ qcom,iommu-group-buffer-types = <0xfff 0x91 0x42 0x120>;
+ qcom,buffer-type-tz-usage-table = <0x91 0x1>,
+ <0x42 0x2>,
+ <0x120 0x3>;
};
&krait_pdn {
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index b0b7677..7c6a9d1 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -31,6 +31,15 @@
spi7 = &spi_7;
};
+ memory {
+
+ secure_mem: region@0 {
+ linux,contiguous-region;
+ reg = <0 0x7800000>;
+ label = "secure_mem";
+ };
+ };
+
intc: interrupt-controller@F9000000 {
compatible = "qcom,msm-qgic2";
interrupt-controller;
@@ -1234,6 +1243,18 @@
compatible = "qcom,ssm";
qcom,channel-name = "SSM_RTR";
};
+
+ sfpb_spinlock: qcom,ipc-spinlock@fd484000 {
+ compatible = "qcom,ipc-spinlock-sfpb";
+ reg = <0xfd484000 0x1000>;
+ qcom,num-locks = <32>;
+ };
+
+ ldrex_spinlock: qcom,ipc-spinlock@fa00000 {
+ compatible = "qcom,ipc-spinlock-ldrex";
+ reg = <0xfa00000 0x200000>;
+ status = "disable";
+ };
};
&gdsc_venus {
diff --git a/arch/arm/boot/dts/msm9625.dtsi b/arch/arm/boot/dts/msm9625.dtsi
index f22fc28..8517605 100644
--- a/arch/arm/boot/dts/msm9625.dtsi
+++ b/arch/arm/boot/dts/msm9625.dtsi
@@ -515,6 +515,7 @@
qcom,msm-pcm {
compatible = "qcom,msm-pcm-dsp";
+ qcom,msm-pcm-dsp-id = <0>;
};
qcom,msm-pcm-routing {
diff --git a/arch/arm/configs/msm8610_defconfig b/arch/arm/configs/msm8610_defconfig
index 2e4f84d..df4ae19 100644
--- a/arch/arm/configs/msm8610_defconfig
+++ b/arch/arm/configs/msm8610_defconfig
@@ -45,6 +45,7 @@
CONFIG_MSM_SMD=y
CONFIG_MSM_SMD_PKG4=y
CONFIG_MSM_IPC_LOGGING=y
+CONFIG_MSM_BAM_DMUX=y
CONFIG_MSM_SMP2P=y
CONFIG_MSM_SMP2P_TEST=y
CONFIG_MSM_IPC_ROUTER=y
@@ -63,6 +64,7 @@
CONFIG_MSM_WATCHDOG_V2=y
CONFIG_MSM_DLOAD_MODE=y
CONFIG_MSM_ADSP_LOADER=m
+CONFIG_MSM_OCMEM_POWER_DISABLE=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_SMP=y
@@ -108,6 +110,10 @@
CONFIG_BRIDGE_NF_EBTABLES=y
CONFIG_BRIDGE_EBT_BROUTE=y
CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_CLS_FW=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_MD=y
@@ -115,6 +121,8 @@
CONFIG_DM_CRYPT=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
+# CONFIG_MSM_RMNET is not set
+CONFIG_MSM_RMNET_BAM=y
CONFIG_WCNSS_CORE=y
CONFIG_WCNSS_CORE_PRONTO=y
CONFIG_WCNSS_MEM_PRE_ALLOC=y
@@ -142,7 +150,7 @@
CONFIG_I2C_QUP=y
CONFIG_WCD9306_CODEC=y
CONFIG_GPIO_QPNP_PIN=y
-# CONFIG_HWMON is not set
+CONFIG_HWMON=y
CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
CONFIG_SENSORS_QPNP_ADC_CURRENT=y
CONFIG_REGULATOR=y
@@ -213,6 +221,7 @@
CONFIG_CRYPTO_TWOFISH=y
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC_CCITT=y
+CONFIG_QPNP_POWER_ON=y
CONFIG_LIBCRC32C=y
CONFIG_MEDIA_SUPPORT=y
CONFIG_MEDIA_CAMERA_SUPPORT=y
diff --git a/arch/arm/configs/msm8974-perf_defconfig b/arch/arm/configs/msm8974-perf_defconfig
index 5c03630..d36d5a2 100644
--- a/arch/arm/configs/msm8974-perf_defconfig
+++ b/arch/arm/configs/msm8974-perf_defconfig
@@ -234,6 +234,7 @@
CONFIG_GENLOCK_MISCDEVICE=y
CONFIG_SYNC=y
CONFIG_SW_SYNC=y
+CONFIG_CMA=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_HAPTIC_ISA1200=y
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index faa0471..df0b5f0 100644
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -238,6 +238,7 @@
CONFIG_GENLOCK_MISCDEVICE=y
CONFIG_SYNC=y
CONFIG_SW_SYNC=y
+CONFIG_CMA=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_TSPP=m
diff --git a/arch/arm/configs/msm9625-perf_defconfig b/arch/arm/configs/msm9625-perf_defconfig
index 2070f46..1fe528a 100644
--- a/arch/arm/configs/msm9625-perf_defconfig
+++ b/arch/arm/configs/msm9625-perf_defconfig
@@ -172,7 +172,8 @@
CONFIG_KS8851=y
# CONFIG_NET_VENDOR_MICROCHIP is not set
# CONFIG_MSM_RMNET is not set
-CONFIG_MSM_RMNET_BAM=y
+# CONFIG_MSM_RMNET_BAM is not set
+CONFIG_MSM_RMNET_WWAN=y
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SMSC is not set
diff --git a/arch/arm/configs/msm9625_defconfig b/arch/arm/configs/msm9625_defconfig
index 9a1f872..aa18209 100644
--- a/arch/arm/configs/msm9625_defconfig
+++ b/arch/arm/configs/msm9625_defconfig
@@ -172,7 +172,8 @@
CONFIG_KS8851=y
# CONFIG_NET_VENDOR_MICROCHIP is not set
# CONFIG_MSM_RMNET is not set
-CONFIG_MSM_RMNET_BAM=y
+# CONFIG_MSM_RMNET_BAM is not set
+CONFIG_MSM_RMNET_WWAN=y
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SMSC is not set
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index d021905..584fe0b 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -16,7 +16,6 @@
#include <asm/shmparam.h>
#include <asm/cachetype.h>
#include <asm/outercache.h>
-#include <asm/rodata.h>
#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
diff --git a/arch/arm/include/asm/rodata.h b/arch/arm/include/asm/rodata.h
deleted file mode 100644
index 8c8add8..0000000
--- a/arch/arm/include/asm/rodata.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * arch/arm/include/asm/rodata.h
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * Author: Colin Cross <ccross@android.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef _ASMARM_RODATA_H
-#define _ASMARM_RODATA_H
-
-#ifndef __ASSEMBLY__
-
-#ifdef CONFIG_DEBUG_RODATA
-
-int set_memory_rw(unsigned long virt, int numpages);
-int set_memory_ro(unsigned long virt, int numpages);
-
-void mark_rodata_ro(void);
-void set_kernel_text_rw(void);
-void set_kernel_text_ro(void);
-#else
-static inline void set_kernel_text_rw(void) { }
-static inline void set_kernel_text_ro(void) { }
-#endif
-
-#endif
-
-#endif
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index bf17145..df0bf0c 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -13,7 +13,6 @@
*/
#include <linux/ftrace.h>
-#include <linux/module.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
@@ -64,20 +63,6 @@
}
#endif
-int ftrace_arch_code_modify_prepare(void)
-{
- set_kernel_text_rw();
- set_all_modules_text_rw();
- return 0;
-}
-
-int ftrace_arch_code_modify_post_process(void)
-{
- set_all_modules_text_ro();
- set_kernel_text_ro();
- return 0;
-}
-
static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
{
return arm_gen_branch_link(pc, addr);
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index 42ed059..b10212e 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -412,6 +412,7 @@
select CPU_FREQ_GOV_ONDEMAND
select MSM_PIL
select MSM_RUN_QUEUE_STATS
+ select ARM_HAS_SG_CHAIN
config ARCH_MSM8226
bool "MSM8226"
@@ -438,6 +439,7 @@
select MEMORY_HOLE_CARVEOUT
select DONT_MAP_HOLE_AFTER_MEMBANK0
select MSM_BUS_SCALING
+ select ARM_HAS_SG_CHAIN
endmenu
choice
@@ -1037,8 +1039,8 @@
default "0x80200000" if ARCH_MSM8930
default "0x00000000" if ARCH_MSM8974
default "0x00000000" if ARCH_MPQ8092
- default "0x00100000" if ARCH_MSM8226
- default "0x00100000" if ARCH_MSM8610
+ default "0x00000000" if ARCH_MSM8226
+ default "0x00000000" if ARCH_MSM8610
default "0x10000000" if ARCH_FSM9XXX
default "0x00200000" if ARCH_MSM9625
default "0x00200000" if !MSM_STACKED_MEMORY
diff --git a/arch/arm/mach-msm/Makefile.boot b/arch/arm/mach-msm/Makefile.boot
index f683b33..02d0b46 100644
--- a/arch/arm/mach-msm/Makefile.boot
+++ b/arch/arm/mach-msm/Makefile.boot
@@ -72,7 +72,7 @@
dtb-$(CONFIG_ARCH_MSM9625) += msm9625-v2-1-cdp.dtb
# MSM8226
- zreladdr-$(CONFIG_ARCH_MSM8226) := 0x00108000
+ zreladdr-$(CONFIG_ARCH_MSM8226) := 0x00008000
dtb-$(CONFIG_ARCH_MSM8226) += msm8226-sim.dtb
dtb-$(CONFIG_ARCH_MSM8226) += msm8226-cdp.dtb
dtb-$(CONFIG_ARCH_MSM8226) += msm8226-mtp.dtb
@@ -87,6 +87,6 @@
zreladdr-$(CONFIG_ARCH_MPQ8092) := 0x00008000
# MSM8610
- zreladdr-$(CONFIG_ARCH_MSM8610) := 0x00108000
+ zreladdr-$(CONFIG_ARCH_MSM8610) := 0x00008000
dtb-$(CONFIG_ARCH_MSM8610) += msm8610-rumi.dtb
dtb-$(CONFIG_ARCH_MSM8610) += msm8610-sim.dtb
diff --git a/arch/arm/mach-msm/acpuclock-8226.c b/arch/arm/mach-msm/acpuclock-8226.c
index 7dc3a0e..8ba1b39 100644
--- a/arch/arm/mach-msm/acpuclock-8226.c
+++ b/arch/arm/mach-msm/acpuclock-8226.c
@@ -53,13 +53,13 @@
* 3) Depending on Frodo version, may need minimum of LVL_NOM
*/
static struct clkctl_acpu_speed acpu_freq_tbl[] = {
- { 0, 19200, CXO, 0, 0, LVL_LOW, 950000, 0 },
- { 1, 300000, PLL0, 4, 2, LVL_LOW, 950000, 4 },
- { 1, 384000, ACPUPLL, 5, 0, LVL_LOW, 950000, 4 },
- { 1, 600000, PLL0, 4, 0, LVL_NOM, 950000, 6 },
- { 1, 787200, ACPUPLL, 5, 0, LVL_NOM, 1050000, 6 },
- { 1, 998400, ACPUPLL, 5, 0, LVL_HIGH, 1050000, 7 },
- { 1, 1190400, ACPUPLL, 5, 0, LVL_HIGH, 1050000, 7 },
+ { 0, 19200, CXO, 0, 0, 1150000, 1150000, 0 },
+ { 1, 300000, PLL0, 4, 2, 1150000, 1150000, 4 },
+ { 1, 384000, ACPUPLL, 5, 0, 1150000, 1150000, 4 },
+ { 1, 600000, PLL0, 4, 0, 1150000, 1150000, 6 },
+ { 1, 787200, ACPUPLL, 5, 0, 1150000, 1150000, 6 },
+ { 0, 998400, ACPUPLL, 5, 0, 1150000, 1150000, 7 },
+ { 0, 1190400, ACPUPLL, 5, 0, 1150000, 1150000, 7 },
{ 0 }
};
@@ -68,7 +68,7 @@
.current_speed = &(struct clkctl_acpu_speed){ 0 },
.bus_scale = &bus_client_pdata,
/* FIXME regulator doesn't support corners yet */
- .vdd_max_cpu = 1050000,
+ .vdd_max_cpu = 1150000,
.vdd_max_mem = 1150000,
.src_clocks = {
[PLL0].name = "gpll0",
diff --git a/arch/arm/mach-msm/acpuclock-cortex.c b/arch/arm/mach-msm/acpuclock-cortex.c
index 4ac1408..febf95a 100644
--- a/arch/arm/mach-msm/acpuclock-cortex.c
+++ b/arch/arm/mach-msm/acpuclock-cortex.c
@@ -329,8 +329,8 @@
max_cpu_khz = acpuclk_init_data->freq_tbl[i].khz;
/* Initialize regulators */
- rc = increase_vdd(acpuclk_init_data->freq_tbl[i].vdd_cpu,
- acpuclk_init_data->freq_tbl[i].vdd_mem);
+ rc = increase_vdd(acpuclk_init_data->vdd_max_cpu,
+ acpuclk_init_data->vdd_max_mem);
if (rc)
goto err_vdd;
diff --git a/arch/arm/mach-msm/bam_dmux.c b/arch/arm/mach-msm/bam_dmux.c
index 7c2c463..833b213 100644
--- a/arch/arm/mach-msm/bam_dmux.c
+++ b/arch/arm/mach-msm/bam_dmux.c
@@ -302,81 +302,56 @@
#define bam_ch_is_in_reset(x) \
(bam_ch[(x)].status & BAM_CH_IN_RESET)
-#define LOG_MESSAGE_MAX_SIZE 80
struct kfifo bam_dmux_state_log;
-static uint32_t bam_dmux_state_logging_disabled;
static int bam_dmux_uplink_vote;
static int bam_dmux_power_state;
-static void bam_dmux_log(const char *fmt, ...)
- __printf(1, 2);
-
-
-#define DMUX_LOG_KERR(fmt...) \
-do { \
- bam_dmux_log(fmt); \
- pr_err(fmt); \
-} while (0)
-
static void *bam_ipc_log_txt;
#define BAM_IPC_LOG_PAGES 5
/**
* Log a state change along with a small message.
- *
* Complete size of messsage is limited to @todo.
+ * Logging is done using IPC Logging infrastructure.
+ *
+ * States
+ * D: 1 = Power collapse disabled
+ * R: 1 = in global reset
+ * P: 1 = BAM is powered up
+ * A: 1 = BAM initialized and ready for data
+ * V: 1 = Uplink vote for power
+ * U: 1 = Uplink active
+ * W: 1 = Uplink Wait-for-ack
+ * A: 1 = Uplink ACK received
+ * #: >=1 On-demand uplink vote
+ * D: 1 = Disconnect ACK active
*/
-static void bam_dmux_log(const char *fmt, ...)
-{
- char buff[LOG_MESSAGE_MAX_SIZE];
- va_list arg_list;
- unsigned long long t_now;
- unsigned long nanosec_rem;
- int len = 0;
- if (bam_dmux_state_logging_disabled)
- return;
+#define BAM_DMUX_LOG(fmt, args...) \
+do { \
+ if (bam_ipc_log_txt) { \
+ ipc_log_string(bam_ipc_log_txt, \
+ "<DMUX> %c%c%c%c %c%c%c%c%d%c " fmt, \
+ a2_pc_disabled ? 'D' : 'd', \
+ in_global_reset ? 'R' : 'r', \
+ bam_dmux_power_state ? 'P' : 'p', \
+ bam_connection_is_active ? 'A' : 'a', \
+ bam_dmux_uplink_vote ? 'V' : 'v', \
+ bam_is_connected ? 'U' : 'u', \
+ wait_for_ack ? 'W' : 'w', \
+ ul_wakeup_ack_completion.done ? 'A' : 'a', \
+ atomic_read(&ul_ondemand_vote), \
+ disconnect_ack ? 'D' : 'd', \
+ args); \
+ } \
+} while (0)
- t_now = sched_clock();
- nanosec_rem = do_div(t_now, 1000000000U);
-
- /*
- * States
- * D: 1 = Power collapse disabled
- * R: 1 = in global reset
- * P: 1 = BAM is powered up
- * A: 1 = BAM initialized and ready for data
- *
- * V: 1 = Uplink vote for power
- * U: 1 = Uplink active
- * W: 1 = Uplink Wait-for-ack
- * A: 1 = Uplink ACK received
- * #: >=1 On-demand uplink vote
- * D: 1 = Disconnect ACK active
- */
- len += scnprintf(buff, sizeof(buff),
- "<DMUX> %u.%09lu %c%c%c%c %c%c%c%c%d%c ",
- (unsigned)t_now, nanosec_rem,
- a2_pc_disabled ? 'D' : 'd',
- in_global_reset ? 'R' : 'r',
- bam_dmux_power_state ? 'P' : 'p',
- bam_connection_is_active ? 'A' : 'a',
- bam_dmux_uplink_vote ? 'V' : 'v',
- bam_is_connected ? 'U' : 'u',
- wait_for_ack ? 'W' : 'w',
- ul_wakeup_ack_completion.done ? 'A' : 'a',
- atomic_read(&ul_ondemand_vote),
- disconnect_ack ? 'D' : 'd'
- );
-
- va_start(arg_list, fmt);
- len += vscnprintf(buff + len, sizeof(buff) - len, fmt, arg_list);
- va_end(arg_list);
- memset(buff + len, 0x0, sizeof(buff) - len);
- if (bam_ipc_log_txt)
- ipc_log_string(bam_ipc_log_txt, buff);
-}
+#define DMUX_LOG_KERR(fmt, args...) \
+do { \
+ BAM_DMUX_LOG(fmt, args); \
+ pr_err(fmt, args); \
+} while (0)
static inline void set_tx_timestamp(struct tx_pkt_info *pkt)
{
@@ -396,12 +371,12 @@
spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
list_for_each_entry(info, &bam_tx_pool, list_node) {
if (!reported) {
- bam_dmux_log("%s: tx pool not empty\n", func);
+ BAM_DMUX_LOG("%s: tx pool not empty\n", func);
if (!in_global_reset)
pr_err("%s: tx pool not empty\n", func);
reported = 1;
}
- bam_dmux_log("%s: node=%p ts=%u.%09lu\n", __func__,
+ BAM_DMUX_LOG("%s: node=%p ts=%u.%09lu\n", __func__,
&info->list_node, info->ts_sec, info->ts_nsec);
if (!in_global_reset)
pr_err("%s: node=%p ts=%u.%09lu\n", __func__,
@@ -529,7 +504,7 @@
mutex_lock(&bam_pdev_mutexlock);
if (in_global_reset) {
- bam_dmux_log("%s: open cid %d aborted due to ssr\n",
+ BAM_DMUX_LOG("%s: open cid %d aborted due to ssr\n",
__func__, rx_hdr->ch_id);
mutex_unlock(&bam_pdev_mutexlock);
queue_rx();
@@ -593,18 +568,18 @@
bam_mux_process_data(rx_skb);
break;
case BAM_MUX_HDR_CMD_OPEN:
- bam_dmux_log("%s: opening cid %d PC enabled\n", __func__,
+ BAM_DMUX_LOG("%s: opening cid %d PC enabled\n", __func__,
rx_hdr->ch_id);
handle_bam_mux_cmd_open(rx_hdr);
if (!(rx_hdr->reserved & ENABLE_DISCONNECT_ACK)) {
- bam_dmux_log("%s: deactivating disconnect ack\n",
+ BAM_DMUX_LOG("%s: deactivating disconnect ack\n",
__func__);
disconnect_ack = 0;
}
dev_kfree_skb_any(rx_skb);
break;
case BAM_MUX_HDR_CMD_OPEN_NO_A2_PC:
- bam_dmux_log("%s: opening cid %d PC disabled\n", __func__,
+ BAM_DMUX_LOG("%s: opening cid %d PC disabled\n", __func__,
rx_hdr->ch_id);
if (!a2_pc_disabled) {
@@ -617,11 +592,11 @@
break;
case BAM_MUX_HDR_CMD_CLOSE:
/* probably should drop pending write */
- bam_dmux_log("%s: closing cid %d\n", __func__,
+ BAM_DMUX_LOG("%s: closing cid %d\n", __func__,
rx_hdr->ch_id);
mutex_lock(&bam_pdev_mutexlock);
if (in_global_reset) {
- bam_dmux_log("%s: close cid %d aborted due to ssr\n",
+ BAM_DMUX_LOG("%s: close cid %d aborted due to ssr\n",
__func__, rx_hdr->ch_id);
mutex_unlock(&bam_pdev_mutexlock);
break;
@@ -1412,7 +1387,7 @@
for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) {
if (bam_ch_is_open(i)) {
bam_ch[i].notify(bam_ch[i].priv, event, data);
- bam_dmux_log("%s: cid=%d, event=%d, data=%lu\n",
+ BAM_DMUX_LOG("%s: cid=%d, event=%d, data=%lu\n",
__func__, i, event, data);
}
}
@@ -1455,11 +1430,11 @@
static void power_vote(int vote)
{
- bam_dmux_log("%s: curr=%d, vote=%d\n", __func__,
+ BAM_DMUX_LOG("%s: curr=%d, vote=%d\n", __func__,
bam_dmux_uplink_vote, vote);
if (bam_dmux_uplink_vote == vote)
- bam_dmux_log("%s: warning - duplicate power vote\n", __func__);
+ BAM_DMUX_LOG("%s: warning - duplicate power vote\n", __func__);
bam_dmux_uplink_vote = vote;
if (vote)
@@ -1473,7 +1448,7 @@
*/
static inline void ul_powerdown(void)
{
- bam_dmux_log("%s: powerdown\n", __func__);
+ BAM_DMUX_LOG("%s: powerdown\n", __func__);
verify_tx_queue_is_empty(__func__);
if (a2_pc_disabled) {
@@ -1585,7 +1560,7 @@
}
if (ul_packet_written || atomic_read(&ul_ondemand_vote)) {
- bam_dmux_log("%s: pkt written %d\n",
+ BAM_DMUX_LOG("%s: pkt written %d\n",
__func__, ul_packet_written);
ul_packet_written = 0;
schedule_delayed_work(&ul_timeout_work,
@@ -1614,7 +1589,7 @@
mutex_lock(&wakeup_lock);
if (bam_is_connected) { /* bam got connected before lock grabbed */
- bam_dmux_log("%s Already awake\n", __func__);
+ BAM_DMUX_LOG("%s Already awake\n", __func__);
mutex_unlock(&wakeup_lock);
return;
}
@@ -1677,35 +1652,35 @@
* instead of waiting
*/
if (wait_for_ack) {
- bam_dmux_log("%s waiting for previous ack\n", __func__);
+ BAM_DMUX_LOG("%s waiting for previous ack\n", __func__);
ret = wait_for_completion_timeout(
&ul_wakeup_ack_completion, HZ);
wait_for_ack = 0;
if (unlikely(ret == 0) && ssrestart_check()) {
mutex_unlock(&wakeup_lock);
- bam_dmux_log("%s timeout previous ack\n", __func__);
+ BAM_DMUX_LOG("%s timeout previous ack\n", __func__);
return;
}
}
INIT_COMPLETION(ul_wakeup_ack_completion);
power_vote(1);
- bam_dmux_log("%s waiting for wakeup ack\n", __func__);
+ BAM_DMUX_LOG("%s waiting for wakeup ack\n", __func__);
ret = wait_for_completion_timeout(&ul_wakeup_ack_completion, HZ);
if (unlikely(ret == 0) && ssrestart_check()) {
mutex_unlock(&wakeup_lock);
- bam_dmux_log("%s timeout wakeup ack\n", __func__);
+ BAM_DMUX_LOG("%s timeout wakeup ack\n", __func__);
return;
}
- bam_dmux_log("%s waiting completion\n", __func__);
+ BAM_DMUX_LOG("%s waiting completion\n", __func__);
ret = wait_for_completion_timeout(&bam_connection_completion, HZ);
if (unlikely(ret == 0) && ssrestart_check()) {
mutex_unlock(&wakeup_lock);
- bam_dmux_log("%s timeout power on\n", __func__);
+ BAM_DMUX_LOG("%s timeout power on\n", __func__);
return;
}
bam_is_connected = 1;
- bam_dmux_log("%s complete\n", __func__);
+ BAM_DMUX_LOG("%s complete\n", __func__);
schedule_delayed_work(&ul_timeout_work,
msecs_to_jiffies(UL_TIMEOUT_DELAY));
mutex_unlock(&wakeup_lock);
@@ -1771,7 +1746,7 @@
/* handle disconnect during active UL */
write_lock_irqsave(&ul_wakeup_lock, flags);
if (bam_is_connected) {
- bam_dmux_log("%s: UL active - forcing powerdown\n", __func__);
+ BAM_DMUX_LOG("%s: UL active - forcing powerdown\n", __func__);
ul_powerdown();
}
write_unlock_irqrestore(&ul_wakeup_lock, flags);
@@ -1817,10 +1792,10 @@
{
int rc;
- bam_dmux_log("%s\n", __func__);
+ BAM_DMUX_LOG("%s\n", __func__);
mutex_lock(&dfab_status_lock);
if (dfab_is_on) {
- bam_dmux_log("%s: dfab is already on\n", __func__);
+ BAM_DMUX_LOG("%s: dfab is already on\n", __func__);
mutex_unlock(&dfab_status_lock);
return;
}
@@ -1842,7 +1817,7 @@
static void unvote_dfab(void)
{
- bam_dmux_log("%s\n", __func__);
+ BAM_DMUX_LOG("%s\n", __func__);
mutex_lock(&dfab_status_lock);
if (!dfab_is_on) {
DMUX_LOG_KERR("%s: dfab is already off\n", __func__);
@@ -1864,7 +1839,7 @@
unsigned long flags;
spin_lock_irqsave(&wakelock_reference_lock, flags);
- bam_dmux_log("%s: ref count = %d\n", __func__,
+ BAM_DMUX_LOG("%s: ref count = %d\n", __func__,
wakelock_reference_count);
if (wakelock_reference_count == 0)
wake_lock(&bam_wakelock);
@@ -1883,7 +1858,7 @@
spin_unlock_irqrestore(&wakelock_reference_lock, flags);
return;
}
- bam_dmux_log("%s: ref count = %d\n", __func__,
+ BAM_DMUX_LOG("%s: ref count = %d\n", __func__,
wakelock_reference_count);
--wakelock_reference_count;
if (wakelock_reference_count == 0)
@@ -1917,7 +1892,7 @@
if (code == SUBSYS_BEFORE_SHUTDOWN) {
in_global_reset = 1;
in_ssr = 1;
- bam_dmux_log("%s: begin\n", __func__);
+ BAM_DMUX_LOG("%s: begin\n", __func__);
flush_workqueue(bam_mux_rx_workqueue);
}
if (code != SUBSYS_AFTER_SHUTDOWN)
@@ -1978,7 +1953,7 @@
}
spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
- bam_dmux_log("%s: complete\n", __func__);
+ BAM_DMUX_LOG("%s: complete\n", __func__);
return NOTIFY_DONE;
}
@@ -2225,7 +2200,7 @@
{
static unsigned int clear_bit; /* 0 = set the bit, else clear bit */
- bam_dmux_log("%s: apps ack %d->%d\n", __func__,
+ BAM_DMUX_LOG("%s: apps ack %d->%d\n", __func__,
clear_bit & 0x1, ~clear_bit & 0x1);
smsm_change_state(SMSM_APPS_STATE,
clear_bit & SMSM_A2_POWER_CONTROL_ACK,
@@ -2241,10 +2216,10 @@
mutex_lock(&smsm_cb_lock);
bam_dmux_power_state = new_state & SMSM_A2_POWER_CONTROL ? 1 : 0;
DBG_INC_A2_POWER_CONTROL_IN_CNT();
- bam_dmux_log("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
+ BAM_DMUX_LOG("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
new_state);
if (last_processed_state == (new_state & SMSM_A2_POWER_CONTROL)) {
- bam_dmux_log("%s: already processed this state\n", __func__);
+ BAM_DMUX_LOG("%s: already processed this state\n", __func__);
mutex_unlock(&smsm_cb_lock);
return;
}
@@ -2252,23 +2227,23 @@
last_processed_state = new_state & SMSM_A2_POWER_CONTROL;
if (bam_mux_initialized && new_state & SMSM_A2_POWER_CONTROL) {
- bam_dmux_log("%s: reconnect\n", __func__);
+ BAM_DMUX_LOG("%s: reconnect\n", __func__);
grab_wakelock();
reconnect_to_bam();
} else if (bam_mux_initialized &&
!(new_state & SMSM_A2_POWER_CONTROL)) {
- bam_dmux_log("%s: disconnect\n", __func__);
+ BAM_DMUX_LOG("%s: disconnect\n", __func__);
disconnect_to_bam();
release_wakelock();
} else if (new_state & SMSM_A2_POWER_CONTROL) {
- bam_dmux_log("%s: init\n", __func__);
+ BAM_DMUX_LOG("%s: init\n", __func__);
grab_wakelock();
if (cpu_is_msm9615())
msm9615_bam_init();
else
bam_init();
} else {
- bam_dmux_log("%s: bad state change\n", __func__);
+ BAM_DMUX_LOG("%s: bad state change\n", __func__);
pr_err("%s: unsupported state change\n", __func__);
}
mutex_unlock(&smsm_cb_lock);
@@ -2279,7 +2254,7 @@
uint32_t new_state)
{
DBG_INC_ACK_IN_CNT();
- bam_dmux_log("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
+ BAM_DMUX_LOG("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
new_state);
complete_all(&ul_wakeup_ack_completion);
}
@@ -2322,12 +2297,12 @@
xo_clk = clk_get(&pdev->dev, "xo");
if (IS_ERR(xo_clk)) {
- bam_dmux_log("%s: did not get xo clock\n", __func__);
+ BAM_DMUX_LOG("%s: did not get xo clock\n", __func__);
xo_clk = NULL;
}
dfab_clk = clk_get(&pdev->dev, "bus_clk");
if (IS_ERR(dfab_clk)) {
- bam_dmux_log("%s: did not get dfab clock\n", __func__);
+ BAM_DMUX_LOG("%s: did not get dfab clock\n", __func__);
dfab_clk = NULL;
} else {
rc = clk_set_rate(dfab_clk, 64000000);
@@ -2434,7 +2409,6 @@
bam_ipc_log_txt = ipc_log_context_create(BAM_IPC_LOG_PAGES, "bam_dmux");
if (!bam_ipc_log_txt) {
pr_err("%s : unable to create IPC Logging Context", __func__);
- bam_dmux_state_logging_disabled = 1;
}
rx_timer_interval = DEFAULT_POLLING_MIN_SLEEP;
diff --git a/arch/arm/mach-msm/board-8064-gpiomux.c b/arch/arm/mach-msm/board-8064-gpiomux.c
index 0dee8f5..0f88287 100644
--- a/arch/arm/mach-msm/board-8064-gpiomux.c
+++ b/arch/arm/mach-msm/board-8064-gpiomux.c
@@ -1729,13 +1729,6 @@
},
};
-static struct gpiomux_setting fsm8064_ep_sync_drsync_cfg = {
- .func = GPIOMUX_FUNC_GPIO,
- .drv = GPIOMUX_DRV_2MA,
- .pull = GPIOMUX_PULL_UP,
- .dir = GPIOMUX_OUT_HIGH,
-};
-
static struct gpiomux_setting fsm8064_ep_sync_input_cfg = {
.func = GPIOMUX_FUNC_GPIO,
.drv = GPIOMUX_DRV_4MA,
@@ -1746,7 +1739,7 @@
{
.gpio = 6, /* GPSPPSIN_DRSYNC */
.settings = {
- [GPIOMUX_SUSPENDED] = &fsm8064_ep_sync_drsync_cfg,
+ [GPIOMUX_SUSPENDED] = &fsm8064_ep_sync_input_cfg,
},
},
{
diff --git a/arch/arm/mach-msm/board-8064-pmic.c b/arch/arm/mach-msm/board-8064-pmic.c
index a1ff607..a1ed251 100644
--- a/arch/arm/mach-msm/board-8064-pmic.c
+++ b/arch/arm/mach-msm/board-8064-pmic.c
@@ -137,7 +137,7 @@
PM8921_GPIO_OUTPUT_VIN(14, 1, PM_GPIO_VIN_VPH),
/* PPS_SRC_SEL_N, chooses between WGR7640 PPS source (high) or
* CW GPS module PPS source (low) */
- PM8921_GPIO_OUTPUT_VIN(19, 1, PM_GPIO_VIN_VPH), /* PPS_SRC_SEL_N */
+ PM8921_GPIO_OUTPUT_VIN(19, 0, PM_GPIO_VIN_VPH), /* PPS_SRC_SEL_N */
PM8921_GPIO_OUTPUT_VIN(13, 1, PM_GPIO_VIN_VPH), /* PCIE_CLK_PWR_EN */
PM8921_GPIO_OUTPUT_VIN(37, 1, PM_GPIO_VIN_VPH), /* PCIE_RST_N */
@@ -557,4 +557,7 @@
if (!machine_is_apq8064_mtp() && !machine_is_apq8064_liquid())
apq8064_pm8921_chg_pdata.battery_less_hardware = 1;
+
+ if (machine_is_mpq8064_hrd())
+ apq8064_pm8921_chg_pdata.disable_chg_rmvl_wrkarnd = 1;
}
diff --git a/arch/arm/mach-msm/board-8064.c b/arch/arm/mach-msm/board-8064.c
index 9ed71da..f3d648e 100644
--- a/arch/arm/mach-msm/board-8064.c
+++ b/arch/arm/mach-msm/board-8064.c
@@ -4005,6 +4005,7 @@
.init_early = apq8064_allocate_memory_regions,
.init_very_early = apq8064_early_reserve,
.restart = msm_restart,
+ .smp = &msm8960_smp_ops,
MACHINE_END
MACHINE_START(APQ8064_MTP, "QCT APQ8064 MTP")
diff --git a/arch/arm/mach-msm/board-8226.c b/arch/arm/mach-msm/board-8226.c
index 2b331d0..79ab428 100644
--- a/arch/arm/mach-msm/board-8226.c
+++ b/arch/arm/mach-msm/board-8226.c
@@ -51,6 +51,7 @@
#include "platsmp.h"
#include "spm.h"
#include "lpm_resources.h"
+#include "modem_notifier.h"
static struct memtype_reserve msm8226_reserve_table[] __initdata = {
[MEMTYPE_SMI] = {
@@ -102,6 +103,8 @@
*/
void __init msm8226_add_drivers(void)
{
+ msm_init_modem_notifier_list();
+ msm_smd_init();
msm_rpm_driver_init();
msm_lpmrs_module_init();
msm_spm_device_init();
diff --git a/arch/arm/mach-msm/board-8960.c b/arch/arm/mach-msm/board-8960.c
index 819ccc5..3df0b38 100644
--- a/arch/arm/mach-msm/board-8960.c
+++ b/arch/arm/mach-msm/board-8960.c
@@ -3037,7 +3037,10 @@
/* Fixup data that needs to change based on GPU ID */
if (cpu_is_msm8960ab()) {
- kgsl_3d0_pdata->chipid = ADRENO_CHIPID(3, 2, 1, 0);
+ if (SOCINFO_VERSION_MINOR(soc_platform_version) == 0)
+ kgsl_3d0_pdata->chipid = ADRENO_CHIPID(3, 2, 1, 0);
+ else
+ kgsl_3d0_pdata->chipid = ADRENO_CHIPID(3, 2, 1, 1);
/* 8960PRO nominal clock rate is 320Mhz */
kgsl_3d0_pdata->pwrlevel[1].gpu_freq = 320000000;
#ifdef CONFIG_MSM_BUS_SCALING
diff --git a/arch/arm/mach-msm/board-qrd7627a.c b/arch/arm/mach-msm/board-qrd7627a.c
index 7038ab9..9c9ccaa 100644
--- a/arch/arm/mach-msm/board-qrd7627a.c
+++ b/arch/arm/mach-msm/board-qrd7627a.c
@@ -93,9 +93,9 @@
"qup_scl" },
{ GPIO_CFG(61, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA),
"qup_sda" },
- { GPIO_CFG(131, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA),
+ { GPIO_CFG(131, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA),
"qup_scl" },
- { GPIO_CFG(132, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA),
+ { GPIO_CFG(132, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA),
"qup_sda" },
};
@@ -104,9 +104,9 @@
"qup_scl" },
{ GPIO_CFG(61, 1, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA),
"qup_sda" },
- { GPIO_CFG(131, 2, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA),
+ { GPIO_CFG(131, 2, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA),
"qup_scl" },
- { GPIO_CFG(132, 2, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA),
+ { GPIO_CFG(132, 2, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA),
"qup_sda" },
};
diff --git a/arch/arm/mach-msm/clock-8226.c b/arch/arm/mach-msm/clock-8226.c
index a963c19..9198976 100644
--- a/arch/arm/mach-msm/clock-8226.c
+++ b/arch/arm/mach-msm/clock-8226.c
@@ -1334,17 +1334,6 @@
},
};
-static struct branch_clk gcc_mmss_noc_cfg_ahb_clk = {
- .cbcr_reg = MMSS_NOC_CFG_AHB_CBCR,
- .has_sibling = 1,
- .base = &virt_bases[GCC_BASE],
- .c = {
- .dbg_name = "gcc_mmss_noc_cfg_ahb_clk",
- .ops = &clk_ops_branch,
- CLK_INIT(gcc_mmss_noc_cfg_ahb_clk.c),
- },
-};
-
static struct branch_clk gcc_mss_cfg_ahb_clk = {
.cbcr_reg = MSS_CFG_AHB_CBCR,
.has_sibling = 1,
@@ -1592,7 +1581,6 @@
static struct measure_mux_entry measure_mux_GCC[] = {
{ &gcc_periph_noc_ahb_clk.c, GCC_BASE, 0x0010 },
{ &gcc_noc_conf_xpu_ahb_clk.c, GCC_BASE, 0x0018 },
- { &gcc_mmss_noc_cfg_ahb_clk.c, GCC_BASE, 0x002a },
{ &gcc_mss_cfg_ahb_clk.c, GCC_BASE, 0x0030 },
{ &gcc_mss_q6_bimc_axi_clk.c, GCC_BASE, 0x0031 },
{ &gcc_usb_hsic_ahb_clk.c, GCC_BASE, 0x0058 },
@@ -1697,19 +1685,6 @@
},
};
-static struct pll_clk mmpll2_pll = {
- .mode_reg = (void __iomem *)MMPLL2_PLL_MODE,
- .status_reg = (void __iomem *)MMPLL2_PLL_STATUS,
- .base = &virt_bases[MMSS_BASE],
- .c = {
- .dbg_name = "mmpll2_pll",
- .parent = &xo.c,
- .rate = 900000000,
- .ops = &clk_ops_local_pll,
- CLK_INIT(mmpll2_pll.c),
- },
-};
-
static struct clk_freq_tbl ftbl_camss_csi0_1_clk[] = {
F_MMSS( 100000000, gpll0, 6, 0, 0),
F_MMSS( 200000000, mmpll0_pll, 4, 0, 0),
@@ -2530,17 +2505,6 @@
},
};
-static struct branch_clk mmss_mmssnoc_ahb_clk = {
- .cbcr_reg = MMSS_MMSSNOC_AHB_CBCR,
- .has_sibling = 1,
- .base = &virt_bases[MMSS_BASE],
- .c = {
- .dbg_name = "mmss_mmssnoc_ahb_clk",
- .ops = &clk_ops_branch,
- CLK_INIT(mmss_mmssnoc_ahb_clk.c),
- },
-};
-
static struct branch_clk mmss_mmssnoc_bto_ahb_clk = {
.cbcr_reg = MMSS_MMSSNOC_BTO_AHB_CBCR,
.has_sibling = 1,
@@ -2650,7 +2614,6 @@
};
static struct measure_mux_entry measure_mux_MMSS[] = {
- { &mmss_mmssnoc_ahb_clk.c, MMSS_BASE, 0x0001 },
{ &mmss_mmssnoc_bto_ahb_clk.c, MMSS_BASE, 0x0002 },
{ &mmss_misc_ahb_clk.c, MMSS_BASE, 0x0003 },
{ &mmss_mmssnoc_axi_clk.c, MMSS_BASE, 0x0004 },
@@ -3100,7 +3063,6 @@
CLK_LOOKUP("ocmem_a_clk", ocmemgx_msmbus_a_clk.c, "msm_bus"),
CLK_LOOKUP("bus_clk", mmss_s0_axi_clk.c, "msm_mmss_noc"),
CLK_LOOKUP("bus_a_clk", mmss_s0_axi_clk.c, "msm_mmss_noc"),
- CLK_LOOKUP("iface_clk", gcc_mmss_noc_cfg_ahb_clk.c, ""),
/* CoreSight clocks */
CLK_LOOKUP("core_clk", qdss_clk.c, "fc322000.tmc"),
@@ -3197,7 +3159,6 @@
CLK_LOOKUP("gpll1", gpll1.c, ""),
CLK_LOOKUP("mmpll0", mmpll0_pll.c, ""),
CLK_LOOKUP("mmpll1", mmpll1_pll.c, ""),
- CLK_LOOKUP("mmpll2", mmpll2_pll.c, ""),
CLK_LOOKUP("core_clk", gcc_blsp1_qup1_i2c_apps_clk.c, ""),
CLK_LOOKUP("core_clk", gcc_blsp1_qup2_i2c_apps_clk.c, ""),
@@ -3368,7 +3329,6 @@
CLK_LOOKUP("cam_gp1_clk", camss_gp1_clk.c, ""),
CLK_LOOKUP("iface_clk", camss_micro_ahb_clk.c, ""),
- CLK_LOOKUP("", mmss_mmssnoc_ahb_clk.c, ""),
CLK_LOOKUP("", mmss_mmssnoc_bto_ahb_clk.c, ""),
CLK_LOOKUP("", mmss_mmssnoc_axi_clk.c, ""),
CLK_LOOKUP("", mmss_s0_axi_clk.c, ""),
diff --git a/arch/arm/mach-msm/clock-8974.c b/arch/arm/mach-msm/clock-8974.c
index bfa9ec0..4cef377 100644
--- a/arch/arm/mach-msm/clock-8974.c
+++ b/arch/arm/mach-msm/clock-8974.c
@@ -809,6 +809,7 @@
static DEFINE_CLK_BRANCH_VOTER(cxo_wlan_clk, &cxo_clk_src.c);
static DEFINE_CLK_BRANCH_VOTER(cxo_pil_pronto_clk, &cxo_clk_src.c);
static DEFINE_CLK_BRANCH_VOTER(cxo_dwc3_clk, &cxo_clk_src.c);
+static DEFINE_CLK_BRANCH_VOTER(cxo_ehci_host_clk, &cxo_clk_src.c);
static struct clk_freq_tbl ftbl_gcc_usb30_master_clk[] = {
F(125000000, gpll0, 1, 5, 24),
@@ -4893,6 +4894,7 @@
CLK_LOOKUP("xo", cxo_wlan_clk.c, "fb000000.qcom,wcnss-wlan"),
CLK_LOOKUP("xo", cxo_pil_pronto_clk.c, "fb21b000.qcom,pronto"),
CLK_LOOKUP("xo", cxo_dwc3_clk.c, "msm_dwc3"),
+ CLK_LOOKUP("xo", cxo_ehci_host_clk.c, "msm_ehci_host"),
CLK_LOOKUP("measure", measure_clk.c, "debug"),
@@ -5033,9 +5035,11 @@
/* MM sensor clocks */
CLK_LOOKUP("cam_src_clk", mclk0_clk_src.c, "6e.qcom,camera"),
+ CLK_LOOKUP("cam_src_clk", mclk0_clk_src.c, "20.qcom,camera"),
CLK_LOOKUP("cam_src_clk", mclk2_clk_src.c, "6c.qcom,camera"),
CLK_LOOKUP("cam_src_clk", mclk1_clk_src.c, "90.qcom,camera"),
CLK_LOOKUP("cam_clk", camss_mclk0_clk.c, "6e.qcom,camera"),
+ CLK_LOOKUP("cam_clk", camss_mclk0_clk.c, "20.qcom,camera"),
CLK_LOOKUP("cam_clk", camss_mclk2_clk.c, "6c.qcom,camera"),
CLK_LOOKUP("cam_clk", camss_mclk1_clk.c, "90.qcom,camera"),
CLK_LOOKUP("cam_clk", camss_mclk1_clk.c, ""),
diff --git a/arch/arm/mach-msm/include/mach/iommu_perfmon.h b/arch/arm/mach-msm/include/mach/iommu_perfmon.h
index 5a01bee..c03c752 100644
--- a/arch/arm/mach-msm/include/mach/iommu_perfmon.h
+++ b/arch/arm/mach-msm/include/mach/iommu_perfmon.h
@@ -12,6 +12,7 @@
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/list.h>
+#include <linux/irqreturn.h>
#ifndef MSM_IOMMU_PERFMON_H
#define MSM_IOMMU_PERFMON_H
diff --git a/arch/arm/mach-msm/include/mach/ipa.h b/arch/arm/mach-msm/include/mach/ipa.h
index 5ccdf82..f2a4427 100644
--- a/arch/arm/mach-msm/include/mach/ipa.h
+++ b/arch/arm/mach-msm/include/mach/ipa.h
@@ -438,6 +438,9 @@
int (*release_resource)(void);
};
+#define A2_MUX_HDR_NAME_V4_PREF "dmux_hdr_v4_"
+#define A2_MUX_HDR_NAME_V6_PREF "dmux_hdr_v6_"
+
enum a2_mux_event_type {
A2_MUX_RECEIVE,
A2_MUX_WRITE_DONE
@@ -460,26 +463,28 @@
enum a2_mux_event_type event,
unsigned long data);
-#ifdef CONFIG_IPA
-
-/*
- * a2 service
+/**
+ * enum teth_tethering_mode - Tethering mode (Rmnet / MBIM)
*/
-int a2_mux_open_channel(enum a2_mux_logical_channel_id lcid,
- void *user_data,
- a2_mux_notify_cb notify_cb);
+enum teth_tethering_mode {
+ TETH_TETHERING_MODE_RMNET,
+ TETH_TETHERING_MODE_MBIM,
+ TETH_TETHERING_MODE_MAX,
+};
-int a2_mux_close_channel(enum a2_mux_logical_channel_id lcid);
+/**
+ * struct teth_bridge_connect_params - Parameters used in teth_bridge_connect()
+ * @ipa_usb_pipe_hdl: IPA to USB pipe handle, returned from ipa_connect()
+ * @usb_ipa_pipe_hdl: USB to IPA pipe handle, returned from ipa_connect()
+ * @tethering_mode: Rmnet or MBIM
+ */
+struct teth_bridge_connect_params {
+ u32 ipa_usb_pipe_hdl;
+ u32 usb_ipa_pipe_hdl;
+ enum teth_tethering_mode tethering_mode;
+};
-int a2_mux_write(enum a2_mux_logical_channel_id lcid, struct sk_buff *skb);
-
-int a2_mux_is_ch_low(enum a2_mux_logical_channel_id lcid);
-
-int a2_mux_is_ch_full(enum a2_mux_logical_channel_id lcid);
-
-int a2_mux_get_tethered_client_handles(enum a2_mux_logical_channel_id lcid,
- unsigned int *clnt_cons_handle,
- unsigned int *clnt_prod_handle);
+#ifdef CONFIG_IPA
/*
* Connect / Disconnect
@@ -649,6 +654,34 @@
int ipa_rm_inactivity_timer_release_resource(
enum ipa_rm_resource_name resource_name);
+/*
+ * a2 service
+ */
+int a2_mux_open_channel(enum a2_mux_logical_channel_id lcid,
+ void *user_data,
+ a2_mux_notify_cb notify_cb);
+
+int a2_mux_close_channel(enum a2_mux_logical_channel_id lcid);
+
+int a2_mux_write(enum a2_mux_logical_channel_id lcid, struct sk_buff *skb);
+
+int a2_mux_is_ch_low(enum a2_mux_logical_channel_id lcid);
+
+int a2_mux_is_ch_full(enum a2_mux_logical_channel_id lcid);
+
+int a2_mux_get_tethered_client_handles(enum a2_mux_logical_channel_id lcid,
+ unsigned int *clnt_cons_handle,
+ unsigned int *clnt_prod_handle);
+
+/*
+ * Tethering bridge (Rmnet / MBIM)
+ */
+int teth_bridge_init(ipa_notify_cb *usb_notify_cb_ptr, void **private_data_ptr);
+
+int teth_bridge_disconnect(void);
+
+int teth_bridge_connect(struct teth_bridge_connect_params *connect_params);
+
#else /* CONFIG_IPA */
static inline int a2_mux_open_channel(enum a2_mux_logical_channel_id lcid,
@@ -685,7 +718,6 @@
return -EPERM;
}
-
/*
* Connect / Disconnect
*/
@@ -1051,6 +1083,26 @@
return -EPERM;
}
+/*
+ * Tethering bridge (Rmnetm / MBIM)
+ */
+static inline int teth_bridge_init(ipa_notify_cb *usb_notify_cb_ptr,
+ void **private_data_ptr)
+{
+ return -EPERM;
+}
+
+static inline int teth_bridge_disconnect(void)
+{
+ return -EPERM;
+}
+
+static inline int teth_bridge_connect(struct teth_bridge_connect_params
+ *connect_params)
+{
+ return -EPERM;
+}
+
#endif /* CONFIG_IPA*/
#endif /* _IPA_H_ */
diff --git a/arch/arm/mach-msm/iommu_domains.c b/arch/arm/mach-msm/iommu_domains.c
index 02272bc..eb44c40 100644
--- a/arch/arm/mach-msm/iommu_domains.c
+++ b/arch/arm/mach-msm/iommu_domains.c
@@ -19,15 +19,15 @@
#include <linux/rbtree.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <asm/sizes.h>
#include <asm/page.h>
#include <mach/iommu.h>
#include <mach/iommu_domains.h>
#include <mach/socinfo.h>
#include <mach/msm_subsystem_map.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
struct msm_iova_data {
struct rb_node node;
diff --git a/arch/arm/mach-msm/ipc_socket.c b/arch/arm/mach-msm/ipc_socket.c
index 2cec5c5..a08e7de 100644
--- a/arch/arm/mach-msm/ipc_socket.c
+++ b/arch/arm/mach-msm/ipc_socket.c
@@ -20,6 +20,9 @@
#include <linux/fcntl.h>
#include <linux/gfp.h>
#include <linux/msm_ipc.h>
+#include <linux/sched.h>
+#include <linux/thread_info.h>
+#include <linux/qmi_encdec.h>
#include <asm/string.h>
#include <asm/atomic.h>
@@ -27,16 +30,93 @@
#include <net/sock.h>
#include <mach/msm_ipc_router.h>
+#include <mach/msm_ipc_logging.h>
#include "ipc_router.h"
#include "msm_ipc_router_security.h"
#define msm_ipc_sk(sk) ((struct msm_ipc_sock *)(sk))
#define msm_ipc_sk_port(sk) ((struct msm_ipc_port *)(msm_ipc_sk(sk)->port))
+#define REQ_RESP_IPC_LOG_PAGES 5
+#define IND_IPC_LOG_PAGES 5
+#define IPC_SEND 1
+#define IPC_RECV 2
+#define IPC_REQ_RESP_LOG(level, buf...) \
+do { \
+ if (ipc_req_resp_log_txt) { \
+ ipc_log_string(ipc_req_resp_log_txt, buf); \
+ } \
+} while (0) \
+
+#define IPC_IND_LOG(level, buf...) \
+do { \
+ if (ipc_ind_log_txt) { \
+ ipc_log_string(ipc_ind_log_txt, buf); \
+ } \
+} while (0) \
static int sockets_enabled;
static struct proto msm_ipc_proto;
static const struct proto_ops msm_ipc_proto_ops;
+static void *ipc_req_resp_log_txt;
+static void *ipc_ind_log_txt;
+
+/**
+ * msm_ipc_router_ipc_log() - Pass log data to IPC logging framework
+ * @tran: Identifies the data to be a receive or send.
+ * @ipc_buf: Buffer to extract the log data.
+ * @port_ptr: IPC Router port corresponding to the current log data.
+ *
+ * This function builds the data the would be passed on to the IPC logging
+ * framework. The data that would be passed corresponds to the information
+ * that is exchanged between the IPC Router and user space modules during
+ * request/response/indication transactions.
+ */
+
+static void msm_ipc_router_ipc_log(uint8_t tran,
+ struct sk_buff *ipc_buf, struct msm_ipc_port *port_ptr)
+{
+ struct qmi_header *hdr = (struct qmi_header *)ipc_buf->data;
+
+ /*
+ * IPC Logging format is as below:-
+ * <Name>(Name of the User Space Process):
+ * <PID> (PID of the user space process) :
+ * <TID> (TID of the user space thread) :
+ * <User Space Module>(CLNT or SERV) :
+ * <Opertaion Type> (Transmit) :
+ * <Control Flag> (Req/Resp/Ind) :
+ * <Transaction ID> :
+ * <Message ID> :
+ * <Message Length> :
+ */
+ if (ipc_req_resp_log_txt &&
+ (((uint8_t) hdr->cntl_flag == QMI_REQUEST_CONTROL_FLAG) ||
+ ((uint8_t) hdr->cntl_flag == QMI_RESPONSE_CONTROL_FLAG)) &&
+ (port_ptr->type == CLIENT_PORT ||
+ port_ptr->type == SERVER_PORT)) {
+ IPC_REQ_RESP_LOG(KERN_DEBUG,
+ "%s %d %d %s %s CF:%x TI:%x MI:%x ML:%x",
+ current->comm, current->tgid, current->pid,
+ (port_ptr->type == CLIENT_PORT ? "QCCI" : "QCSI"),
+ (tran == IPC_RECV ? "RX" :
+ (tran == IPC_SEND ? "TX" : "ERR")),
+ (uint8_t)hdr->cntl_flag, hdr->txn_id, hdr->msg_id,
+ hdr->msg_len);
+ } else if (ipc_ind_log_txt &&
+ ((uint8_t)hdr->cntl_flag == QMI_INDICATION_CONTROL_FLAG) &&
+ (port_ptr->type == CLIENT_PORT ||
+ port_ptr->type == SERVER_PORT)) {
+ IPC_IND_LOG(KERN_DEBUG,
+ "%s %d %d %s %s CF:%x TI:%x MI:%x ML:%x",
+ current->comm, current->tgid, current->pid,
+ (port_ptr->type == CLIENT_PORT ? "QCCI" : "QCSI"),
+ (tran == IPC_RECV ? "RX" :
+ (tran == IPC_SEND ? "TX" : "ERR")),
+ (uint8_t)hdr->cntl_flag, hdr->txn_id, hdr->msg_id,
+ hdr->msg_len);
+ }
+}
static struct sk_buff_head *msm_ipc_router_build_msg(unsigned int num_sect,
struct iovec const *msg_sect,
@@ -263,6 +343,7 @@
struct msm_ipc_port *port_ptr = msm_ipc_sk_port(sk);
struct sockaddr_msm_ipc *dest = (struct sockaddr_msm_ipc *)m->msg_name;
struct sk_buff_head *msg;
+ struct sk_buff *ipc_buf;
int ret;
if (!dest)
@@ -284,7 +365,8 @@
if (port_ptr->type == CLIENT_PORT)
wait_for_irsc_completion();
-
+ ipc_buf = skb_peek(msg);
+ msm_ipc_router_ipc_log(IPC_SEND, ipc_buf, port_ptr);
ret = msm_ipc_router_send_to(port_ptr, msg, &dest->address);
if (ret == (IPC_ROUTER_HDR_SIZE + total_len))
ret = total_len;
@@ -300,6 +382,7 @@
struct sock *sk = sock->sk;
struct msm_ipc_port *port_ptr = msm_ipc_sk_port(sk);
struct sk_buff_head *msg;
+ struct sk_buff *ipc_buf;
long timeout;
int ret;
@@ -344,6 +427,8 @@
}
ret = msm_ipc_router_extract_msg(m, msg);
+ ipc_buf = skb_peek(msg);
+ msm_ipc_router_ipc_log(IPC_RECV, ipc_buf, port_ptr);
msm_ipc_router_release_msg(msg);
msg = NULL;
release_sock(sk);
@@ -518,6 +603,29 @@
.obj_size = sizeof(struct msm_ipc_sock),
};
+/**
+ * msm_ipc_router_ipc_log_init() - Init function for IPC Logging
+ *
+ * Initialize the buffers to be used to provide the log information
+ * pertaining to the request, response and indication data flow that
+ * happens between user and kernel spaces.
+ */
+void msm_ipc_router_ipc_log_init(void)
+{
+ ipc_req_resp_log_txt =
+ ipc_log_context_create(REQ_RESP_IPC_LOG_PAGES, "req_resp");
+ if (!ipc_req_resp_log_txt) {
+ pr_err("%s: Unable to create IPC logging for Req/Resp",
+ __func__);
+ }
+ ipc_ind_log_txt =
+ ipc_log_context_create(IND_IPC_LOG_PAGES, "indication");
+ if (!ipc_ind_log_txt) {
+ pr_err("%s: Unable to create IPC logging for Indications",
+ __func__);
+ }
+}
+
int msm_ipc_router_init_sockets(void)
{
int ret;
@@ -536,6 +644,7 @@
}
sockets_enabled = 1;
+ msm_ipc_router_ipc_log_init();
out_init_sockets:
return ret;
}
diff --git a/arch/arm/mach-msm/pil-q6v5-mss.c b/arch/arm/mach-msm/pil-q6v5-mss.c
index cd6aaf4..aa42f5b 100644
--- a/arch/arm/mach-msm/pil-q6v5-mss.c
+++ b/arch/arm/mach-msm/pil-q6v5-mss.c
@@ -57,7 +57,7 @@
#define RMB_PMI_CODE_LENGTH 0x18
#define VDD_MSS_UV 1050000
-#define MAX_VDD_MX_UV 1050000
+#define MAX_VDD_MX_UV 1150000
#define PROXY_TIMEOUT_MS 10000
#define POLL_INTERVAL_US 50
diff --git a/arch/arm/mach-msm/remote_spinlock.c b/arch/arm/mach-msm/remote_spinlock.c
index 4e09a9e..94923a0 100644
--- a/arch/arm/mach-msm/remote_spinlock.c
+++ b/arch/arm/mach-msm/remote_spinlock.c
@@ -196,6 +196,8 @@
/* end swp implementation --------------------------------------------------- */
/* ldrex implementation ----------------------------------------------------- */
+static char *ldrex_compatible_string = "qcom,ipc-spinlock-ldrex";
+
static void __raw_remote_ex_spin_lock(raw_remote_spinlock_t *lock)
{
unsigned long tmp;
@@ -267,7 +269,7 @@
static void *hw_mutex_reg_base;
static DEFINE_MUTEX(hw_map_init_lock);
-static char *compatible_string = "qcom,ipc-spinlock";
+static char *sfpb_compatible_string = "qcom,ipc-spinlock-sfpb";
static int init_hw_mutex(struct device_node *node)
{
@@ -294,7 +296,7 @@
{
struct device_node *node;
- node = of_find_compatible_node(NULL, NULL, compatible_string);
+ node = of_find_compatible_node(NULL, NULL, sfpb_compatible_string);
if (node) {
init_hw_mutex(node);
} else {
@@ -341,7 +343,9 @@
static int __raw_remote_sfpb_spin_trylock(raw_remote_spinlock_t *lock)
{
- return 1;
+ writel_relaxed(SPINLOCK_PID_APPS, lock);
+ smp_mb();
+ return readl_relaxed(lock) == SPINLOCK_PID_APPS;
}
static void __raw_remote_sfpb_spin_unlock(raw_remote_spinlock_t *lock)
@@ -397,6 +401,23 @@
}
+static int dt_node_is_valid(const struct device_node *node)
+{
+ const char *status;
+ int statlen;
+
+ status = of_get_property(node, "status", &statlen);
+ if (status == NULL)
+ return 1;
+
+ if (statlen > 0) {
+ if (!strcmp(status, "okay") || !strcmp(status, "ok"))
+ return 1;
+ }
+
+ return 0;
+}
+
static void initialize_ops(void)
{
struct device_node *node;
@@ -435,23 +456,42 @@
is_hw_lock_type = 1;
break;
case AUTO_MODE:
- node = of_find_compatible_node(NULL, NULL, compatible_string);
- if (node) {
+ /*
+ * of_find_compatible_node() returns a valid pointer even if
+ * the status property is "disabled", so the validity needs
+ * to be checked
+ */
+ node = of_find_compatible_node(NULL, NULL,
+ sfpb_compatible_string);
+ if (node && dt_node_is_valid(node)) {
current_ops.lock = __raw_remote_sfpb_spin_lock;
current_ops.unlock = __raw_remote_sfpb_spin_unlock;
current_ops.trylock = __raw_remote_sfpb_spin_trylock;
current_ops.release = __raw_remote_gen_spin_release;
current_ops.owner = __raw_remote_gen_spin_owner;
is_hw_lock_type = 1;
- } else {
+ break;
+ }
+
+ node = of_find_compatible_node(NULL, NULL,
+ ldrex_compatible_string);
+ if (node && dt_node_is_valid(node)) {
current_ops.lock = __raw_remote_ex_spin_lock;
current_ops.unlock = __raw_remote_ex_spin_unlock;
current_ops.trylock = __raw_remote_ex_spin_trylock;
current_ops.release = __raw_remote_gen_spin_release;
current_ops.owner = __raw_remote_gen_spin_owner;
is_hw_lock_type = 0;
- pr_warn("Falling back to LDREX remote spinlock implementation");
+ break;
}
+
+ current_ops.lock = __raw_remote_ex_spin_lock;
+ current_ops.unlock = __raw_remote_ex_spin_unlock;
+ current_ops.trylock = __raw_remote_ex_spin_trylock;
+ current_ops.release = __raw_remote_gen_spin_release;
+ current_ops.owner = __raw_remote_gen_spin_owner;
+ is_hw_lock_type = 0;
+ pr_warn("Falling back to LDREX remote spinlock implementation");
break;
default:
BUG();
diff --git a/arch/arm/mach-msm/smd_tty.c b/arch/arm/mach-msm/smd_tty.c
index 1820b23..5969a3c 100644
--- a/arch/arm/mach-msm/smd_tty.c
+++ b/arch/arm/mach-msm/smd_tty.c
@@ -47,7 +47,7 @@
struct smd_tty_info {
smd_channel_t *ch;
- struct tty_struct *tty;
+ struct tty_port port;
struct wake_lock wake_lock;
int open_count;
struct tasklet_struct tty_tsklt;
@@ -125,7 +125,7 @@
unsigned char *ptr;
int avail;
struct smd_tty_info *info = (struct smd_tty_info *)param;
- struct tty_struct *tty = info->tty;
+ struct tty_struct *tty = tty_port_tty_get(&info->port);
unsigned long flags;
if (!tty)
@@ -156,6 +156,7 @@
if (avail <= 0) {
mod_timer(&info->buf_req_timer,
jiffies + msecs_to_jiffies(30));
+ tty_kref_put(tty);
return;
}
@@ -173,11 +174,13 @@
/* XXX only when writable and necessary */
tty_wakeup(tty);
+ tty_kref_put(tty);
}
static void smd_tty_notify(void *priv, unsigned event)
{
struct smd_tty_info *info = priv;
+ struct tty_struct *tty;
unsigned long flags;
switch (event) {
@@ -195,8 +198,10 @@
*/
if (smd_write_avail(info->ch)) {
smd_disable_read_intr(info->ch);
- if (info->tty)
- wake_up_interruptible(&info->tty->write_wait);
+ tty = tty_port_tty_get(&info->port);
+ if (tty)
+ wake_up_interruptible(&tty->write_wait);
+ tty_kref_put(tty);
}
spin_lock_irqsave(&info->ra_lock, flags);
if (smd_read_avail(info->ch)) {
@@ -225,9 +230,11 @@
/* schedule task to send TTY_BREAK */
tasklet_hi_schedule(&info->tty_tsklt);
- if (info->tty->index == LOOPBACK_IDX)
+ tty = tty_port_tty_get(&info->port);
+ if (tty->index == LOOPBACK_IDX)
schedule_delayed_work(&loopback_work,
msecs_to_jiffies(1000));
+ tty_kref_put(tty);
break;
}
}
@@ -241,7 +248,8 @@
return (modem_state & ready_state) == ready_state;
}
-static int smd_tty_open(struct tty_struct *tty, struct file *f)
+static int smd_tty_port_activate(struct tty_port *tport,
+ struct tty_struct *tty)
{
int res = 0;
unsigned int n = tty->index;
@@ -306,8 +314,6 @@
}
}
-
- info->tty = tty;
tasklet_init(&info->tty_tsklt, smd_tty_read,
(unsigned long)info);
wake_lock_init(&info->wake_lock, WAKE_LOCK_SUSPEND,
@@ -354,24 +360,27 @@
return res;
}
-static void smd_tty_close(struct tty_struct *tty, struct file *f)
+static void smd_tty_port_shutdown(struct tty_port *tport)
{
- struct smd_tty_info *info = tty->driver_data;
+ struct smd_tty_info *info;
+ struct tty_struct *tty = tty_port_tty_get(tport);
unsigned long flags;
- if (info == 0)
+ info = tty->driver_data;
+ if (info == 0) {
+ tty_kref_put(tty);
return;
+ }
mutex_lock(&smd_tty_lock);
if (--info->open_count == 0) {
spin_lock_irqsave(&info->reset_lock, flags);
info->is_open = 0;
spin_unlock_irqrestore(&info->reset_lock, flags);
- if (info->tty) {
+ if (tty) {
tasklet_kill(&info->tty_tsklt);
wake_lock_destroy(&info->wake_lock);
wake_lock_destroy(&info->ra_wake_lock);
- info->tty = 0;
}
tty->driver_data = 0;
del_timer(&info->buf_req_timer);
@@ -382,6 +391,21 @@
}
}
mutex_unlock(&smd_tty_lock);
+ tty_kref_put(tty);
+}
+
+static int smd_tty_open(struct tty_struct *tty, struct file *f)
+{
+ struct smd_tty_info *info = smd_tty + tty->index;
+
+ return tty_port_open(&info->port, tty, f);
+}
+
+static void smd_tty_close(struct tty_struct *tty, struct file *f)
+{
+ struct smd_tty_info *info = tty->driver_data;
+
+ tty_port_close(&info->port, tty, f);
}
static int smd_tty_write(struct tty_struct *tty, const unsigned char *buf, int len)
@@ -482,6 +506,11 @@
0, SMSM_SMD_LOOPBACK);
}
+static const struct tty_port_operations smd_tty_port_ops = {
+ .shutdown = smd_tty_port_shutdown,
+ .activate = smd_tty_port_activate,
+};
+
static struct tty_operations smd_tty_ops = {
.open = smd_tty_open,
.close = smd_tty_close,
@@ -523,6 +552,7 @@
int ret;
int n;
int idx;
+ struct tty_port *port;
smd_tty_driver = alloc_tty_driver(MAX_SMD_TTYS);
if (smd_tty_driver == 0)
@@ -578,6 +608,10 @@
continue;
}
+ port = &smd_tty[idx].port;
+ tty_port_init(port);
+ port->ops = &smd_tty_port_ops;
+ /* TODO: For kernel >= 3.7 use tty_port_register_device */
tty_register_device(smd_tty_driver, idx, 0);
init_completion(&smd_tty[idx].ch_allocated);
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 6314e94..d177b05 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -7,7 +7,6 @@
obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \
mmap.o pgd.o mmu.o vmregion.o
-obj-$(CONFIG_DEBUG_RODATA) += rodata.o
ifneq ($(CONFIG_MMU),y)
obj-y += nommu.o
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 0ebc2b9..bf59a9d 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -224,7 +224,7 @@
* allocations. This must be the smallest DMA mask in the system,
* so a successful GFP_DMA allocation will always satisfy this.
*/
-u32 arm_dma_limit;
+phys_addr_t arm_dma_limit;
static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
unsigned long dma_size)
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 8877ddd..21653f2 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -65,9 +65,9 @@
#endif
#ifdef CONFIG_ZONE_DMA
-extern u32 arm_dma_limit;
+extern phys_addr_t arm_dma_limit;
#else
-#define arm_dma_limit ((u32)~0)
+#define arm_dma_limit ((phys_addr_t)~0)
#endif
struct map_desc;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 8575f78..25cb67c 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -604,53 +604,30 @@
return early_alloc_aligned(sz, sz);
}
-static pte_t * __init early_pte_alloc(pmd_t *pmd)
-{
- if (pmd_none(*pmd) || pmd_bad(*pmd))
- return early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
- return pmd_page_vaddr(*pmd);
-}
-
-static void __init early_pte_install(pmd_t *pmd, pte_t *pte, unsigned long prot)
-{
- __pmd_populate(pmd, __pa(pte), prot);
- BUG_ON(pmd_bad(*pmd));
-}
-
-#ifdef CONFIG_HIGHMEM
-static pte_t * __init early_pte_alloc_and_install(pmd_t *pmd,
- unsigned long addr, unsigned long prot)
+static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
{
if (pmd_none(*pmd)) {
- pte_t *pte = early_pte_alloc(pmd);
- early_pte_install(pmd, pte, prot);
+ pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
+ __pmd_populate(pmd, __pa(pte), prot);
}
BUG_ON(pmd_bad(*pmd));
return pte_offset_kernel(pmd, addr);
}
-#endif
static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
unsigned long end, unsigned long pfn,
const struct mem_type *type)
{
- pte_t *start_pte = early_pte_alloc(pmd);
- pte_t *pte = start_pte + pte_index(addr);
-
- /* If replacing a section mapping, the whole section must be replaced */
- BUG_ON(pmd_bad(*pmd) && ((addr | end) & ~PMD_MASK));
-
+ pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
do {
set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
- early_pte_install(pmd, start_pte, type->prot_l1);
}
static void __init alloc_init_section(pud_t *pud, unsigned long addr,
unsigned long end, phys_addr_t phys,
- const struct mem_type *type,
- bool force_pages)
+ const struct mem_type *type)
{
pmd_t *pmd = pmd_offset(pud, addr);
@@ -660,7 +637,7 @@
* L1 entries, whereas PGDs refer to a group of L1 entries making
* up one logical pointer to an L2 table.
*/
- if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0 && !force_pages) {
+ if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) {
pmd_t *p = pmd;
#ifndef CONFIG_ARM_LPAE
@@ -684,15 +661,14 @@
}
static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
- unsigned long end, unsigned long phys, const struct mem_type *type,
- bool force_pages)
+ unsigned long end, unsigned long phys, const struct mem_type *type)
{
pud_t *pud = pud_offset(pgd, addr);
unsigned long next;
do {
next = pud_addr_end(addr, end);
- alloc_init_section(pud, addr, next, phys, type, force_pages);
+ alloc_init_section(pud, addr, next, phys, type);
phys += next - addr;
} while (pud++, addr = next, addr != end);
}
@@ -766,7 +742,7 @@
* offsets, and we take full advantage of sections and
* supersections.
*/
-static void __init create_mapping(struct map_desc *md, bool force_pages)
+static void __init create_mapping(struct map_desc *md)
{
unsigned long addr, length, end;
phys_addr_t phys;
@@ -818,7 +794,7 @@
do {
unsigned long next = pgd_addr_end(addr, end);
- alloc_init_pud(pgd, addr, next, phys, type, force_pages);
+ alloc_init_pud(pgd, addr, next, phys, type);
phys += next - addr;
addr = next;
@@ -839,7 +815,7 @@
vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
for (md = io_desc; nr; md++, nr--) {
- create_mapping(md, false);
+ create_mapping(md);
vm->addr = (void *)(md->virtual & PAGE_MASK);
vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
vm->phys_addr = __pfn_to_phys(md->pfn);
@@ -1199,12 +1175,12 @@
map.virtual = 0xffff0000;
map.length = PAGE_SIZE;
map.type = MT_HIGH_VECTORS;
- create_mapping(&map, false);
+ create_mapping(&map);
if (!vectors_high()) {
map.virtual = 0;
map.type = MT_LOW_VECTORS;
- create_mapping(&map, false);
+ create_mapping(&map);
}
/*
@@ -1224,7 +1200,7 @@
map.virtual = CONFIG_ARM_USER_ACCESSIBLE_TIMER_BASE;
map.length = PAGE_SIZE;
map.type = MT_DEVICE_USER_ACCESSIBLE;
- create_mapping(&map, false);
+ create_mapping(&map);
}
}
@@ -1241,7 +1217,7 @@
static void __init kmap_init(void)
{
#ifdef CONFIG_HIGHMEM
- pkmap_page_table = early_pte_alloc_and_install(pmd_off_k(PKMAP_BASE),
+ pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
PKMAP_BASE, _PAGE_KERNEL_TABLE);
#endif
}
@@ -1349,14 +1325,12 @@
static void __init map_lowmem(void)
{
struct memblock_region *reg;
- phys_addr_t start;
- phys_addr_t end;
- struct map_desc map;
/* Map all the lowmem memory banks. */
for_each_memblock(memory, reg) {
- start = reg->base;
- end = start + reg->size;
+ phys_addr_t start = reg->base;
+ phys_addr_t end = start + reg->size;
+ struct map_desc map;
if (end > arm_lowmem_limit)
end = arm_lowmem_limit;
@@ -1370,28 +1344,28 @@
map.length = SECTION_SIZE;
map.type = MT_MEMORY;
- create_mapping(&map, false);
+ create_mapping(&map);
map.pfn = __phys_to_pfn(start + SECTION_SIZE);
map.virtual = __phys_to_virt(start + SECTION_SIZE);
map.length = (unsigned long)RX_AREA_END - map.virtual;
map.type = MT_MEMORY_RX;
- create_mapping(&map, false);
+ create_mapping(&map);
map.pfn = __phys_to_pfn(__pa(__start_rodata));
map.virtual = (unsigned long)__start_rodata;
map.length = __init_begin - __start_rodata;
map.type = MT_MEMORY_R;
- create_mapping(&map, false);
+ create_mapping(&map);
map.pfn = __phys_to_pfn(__pa(__init_begin));
map.virtual = (unsigned long)__init_begin;
map.length = __init_data - __init_begin;
map.type = MT_MEMORY;
- create_mapping(&map, false);
+ create_mapping(&map);
map.pfn = __phys_to_pfn(__pa(__init_data));
map.virtual = (unsigned long)__init_data;
@@ -1406,20 +1380,8 @@
map.type = MT_MEMORY;
#endif
- create_mapping(&map, false);
+ create_mapping(&map);
}
-
-#ifdef CONFIG_DEBUG_RODATA
- start = __pa(_stext) & PMD_MASK;
- end = ALIGN(__pa(__end_rodata), PMD_SIZE);
-
- map.pfn = __phys_to_pfn(start);
- map.virtual = __phys_to_virt(start);
- map.length = end - start;
- map.type = MT_MEMORY;
-
- create_mapping(&map, true);
-#endif
}
/*
diff --git a/arch/arm/mm/rodata.c b/arch/arm/mm/rodata.c
deleted file mode 100644
index 9a8eb84..0000000
--- a/arch/arm/mm/rodata.c
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * linux/arch/arm/mm/rodata.c
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * Author: Colin Cross <ccross@android.com>
- *
- * Based on x86 implementation in arch/x86/mm/init_32.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-
-#include <asm/cache.h>
-#include <asm/pgtable.h>
-#include <asm/rodata.h>
-#include <asm/sections.h>
-#include <asm/tlbflush.h>
-
-#include "mm.h"
-
-static int kernel_set_to_readonly __read_mostly;
-
-#ifdef CONFIG_DEBUG_RODATA_TEST
-static const int rodata_test_data = 0xC3;
-
-static noinline void rodata_test(void)
-{
- int result;
-
- pr_info("%s: attempting to write to read-only section:\n", __func__);
-
- if (*(volatile int *)&rodata_test_data != 0xC3) {
- pr_err("read only data changed before test\n");
- return;
- }
-
- /*
- * Attempt to to write to rodata_test_data, trapping the expected
- * data abort. If the trap executed, result will be 1. If it didn't,
- * result will be 0xFF.
- */
- asm volatile(
- "0: str %[zero], [%[rodata_test_data]]\n"
- " mov %[result], #0xFF\n"
- " b 2f\n"
- "1: mov %[result], #1\n"
- "2:\n"
-
- /* Exception fixup - if store at label 0 faults, jumps to 1 */
- ".pushsection __ex_table, \"a\"\n"
- " .long 0b, 1b\n"
- ".popsection\n"
-
- : [result] "=r" (result)
- : [rodata_test_data] "r" (&rodata_test_data), [zero] "r" (0)
- : "memory"
- );
-
- if (result == 1)
- pr_info("write to read-only section trapped, success\n");
- else
- pr_err("write to read-only section NOT trapped, test failed\n");
-
- if (*(volatile int *)&rodata_test_data != 0xC3)
- pr_err("read only data changed during write\n");
-}
-#else
-static inline void rodata_test(void) { }
-#endif
-
-static int set_page_attributes(unsigned long virt, int numpages,
- pte_t (*f)(pte_t))
-{
- pmd_t *pmd;
- pte_t *pte;
- unsigned long start = virt;
- unsigned long end = virt + (numpages << PAGE_SHIFT);
- unsigned long pmd_end;
-
- while (virt < end) {
- pmd = pmd_off_k(virt);
- pmd_end = min(ALIGN(virt + 1, PMD_SIZE), end);
-
- if ((pmd_val(*pmd) & PMD_TYPE_MASK) != PMD_TYPE_TABLE) {
- pr_err("%s: pmd %p=%08lx for %08lx not page table\n",
- __func__, pmd, pmd_val(*pmd), virt);
- virt = pmd_end;
- continue;
- }
-
- while (virt < pmd_end) {
- pte = pte_offset_kernel(pmd, virt);
- set_pte_ext(pte, f(*pte), 0);
- virt += PAGE_SIZE;
- }
- }
-
- flush_tlb_kernel_range(start, end);
-
- return 0;
-}
-
-int set_memory_ro(unsigned long virt, int numpages)
-{
- return set_page_attributes(virt, numpages, pte_wrprotect);
-}
-EXPORT_SYMBOL(set_memory_ro);
-
-int set_memory_rw(unsigned long virt, int numpages)
-{
- return set_page_attributes(virt, numpages, pte_mkwrite);
-}
-EXPORT_SYMBOL(set_memory_rw);
-
-void set_kernel_text_rw(void)
-{
- unsigned long start = PAGE_ALIGN((unsigned long)_text);
- unsigned long size = PAGE_ALIGN((unsigned long)__end_rodata) - start;
-
- if (!kernel_set_to_readonly)
- return;
-
- pr_debug("Set kernel text: %lx - %lx to read-write\n",
- start, start + size);
-
- set_memory_rw(start, size >> PAGE_SHIFT);
-}
-
-void set_kernel_text_ro(void)
-{
- unsigned long start = PAGE_ALIGN((unsigned long)_text);
- unsigned long size = PAGE_ALIGN((unsigned long)__end_rodata) - start;
-
- if (!kernel_set_to_readonly)
- return;
-
- pr_info_once("Write protecting the kernel text section %lx - %lx\n",
- start, start + size);
-
- pr_debug("Set kernel text: %lx - %lx to read only\n",
- start, start + size);
-
- set_memory_ro(start, size >> PAGE_SHIFT);
-}
-
-void mark_rodata_ro(void)
-{
- kernel_set_to_readonly = 1;
-
- set_kernel_text_ro();
-
- rodata_test();
-}
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index cd341e8..3a8bbc5 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -9,6 +9,9 @@
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License or (at your optional) any later version of the license.
+ *
+ * The Linux Foundation chooses to take subject only to the GPLv2 license
+ * terms, and distributes only under these terms.
*/
#define pr_fmt(fmt) "cma: " fmt
@@ -55,8 +58,9 @@
phys_addr_t base;
unsigned long size;
struct cma *cma;
-} cma_areas[MAX_CMA_AREAS] __initdata;
-static unsigned cma_area_count __initdata;
+ const char *name;
+} cma_areas[MAX_CMA_AREAS];
+static unsigned cma_area_count;
static struct cma_map {
@@ -74,6 +78,20 @@
return NULL;
}
+static struct cma *cma_get_area_by_name(const char *name)
+{
+ int i;
+ if (!name)
+ return NULL;
+
+ for (i = 0; i < cma_area_count; i++)
+ if (cma_areas[i].name && strcmp(cma_areas[i].name, name) == 0)
+ return cma_areas[i].cma;
+ return NULL;
+}
+
+
+
#ifdef CONFIG_CMA_SIZE_MBYTES
#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
#else
@@ -193,9 +211,10 @@
phys_addr_t base, size;
unsigned long len;
__be32 *prop;
+ char *name;
if (strncmp(uname, "region@", 7) != 0 || depth != 2 ||
- !of_get_flat_dt_prop(node, "contiguous-region", NULL))
+ !of_get_flat_dt_prop(node, "linux,contiguous-region", NULL))
return 0;
prop = of_get_flat_dt_prop(node, "reg", &len);
@@ -205,9 +224,11 @@
base = be32_to_cpu(prop[0]);
size = be32_to_cpu(prop[1]);
+ name = of_get_flat_dt_prop(node, "label", NULL);
+
pr_info("Found %s, memory base %lx, size %ld MiB\n", uname,
(unsigned long)base, (unsigned long)size / SZ_1M);
- dma_contiguous_reserve_area(size, &base, 0);
+ dma_contiguous_reserve_area(size, &base, 0, name);
return 0;
}
@@ -248,7 +269,8 @@
pr_debug("%s: reserving %ld MiB for global area\n", __func__,
(unsigned long)sel_size / SZ_1M);
- if (dma_contiguous_reserve_area(sel_size, &base, limit) == 0)
+ if (dma_contiguous_reserve_area(sel_size, &base, limit, NULL)
+ == 0)
dma_contiguous_def_base = base;
}
#ifdef CONFIG_OF
@@ -271,7 +293,7 @@
* devices.
*/
int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t *res_base,
- phys_addr_t limit)
+ phys_addr_t limit, const char *name)
{
phys_addr_t base = *res_base;
phys_addr_t alignment;
@@ -323,6 +345,7 @@
*/
cma_areas[cma_area_count].base = base;
cma_areas[cma_area_count].size = size;
+ cma_areas[cma_area_count].name = name;
cma_area_count++;
*res_base = base;
@@ -363,6 +386,7 @@
{
struct device_node *node;
struct cma *cma;
+ const char *name;
u32 value;
node = of_parse_phandle(dev->of_node, "linux,contiguous-region", 0);
@@ -370,7 +394,11 @@
return;
if (of_property_read_u32(node, "reg", &value) && !value)
return;
- cma = cma_get_area(value);
+
+ if (of_property_read_string(node, "label", &name))
+ return;
+
+ cma = cma_get_area_by_name(name);
if (!cma)
return;
diff --git a/drivers/char/diag/diagchar_hdlc.c b/drivers/char/diag/diagchar_hdlc.c
index b94ea2f..2369c4d 100644
--- a/drivers/char/diag/diagchar_hdlc.c
+++ b/drivers/char/diag/diagchar_hdlc.c
@@ -1,4 +1,5 @@
-/* Copyright (c) 2008-2009, 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2009, 2012-2013, The Linux Foundation.
+ * All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -172,11 +173,14 @@
uint8_t src_byte;
int pkt_bnd = 0;
+ int msg_start;
if (hdlc && hdlc->src_ptr && hdlc->dest_ptr &&
(hdlc->src_size - hdlc->src_idx > 0) &&
(hdlc->dest_size - hdlc->dest_idx > 0)) {
+ msg_start = (hdlc->src_idx == 0) ? 1 : 0;
+
src_ptr = hdlc->src_ptr;
src_ptr = &src_ptr[hdlc->src_idx];
src_length = hdlc->src_size - hdlc->src_idx;
@@ -203,8 +207,16 @@
}
} else if (src_byte == CONTROL_CHAR) {
dest_ptr[len++] = src_byte;
- pkt_bnd = 1;
+ /*
+ * If this is the first byte in the message,
+ * then it is part of the command. Otherwise,
+ * consider it as the last byte of the
+ * message.
+ */
+ if (msg_start && i == 0 && src_length > 1)
+ continue;
i++;
+ pkt_bnd = 1;
break;
} else {
dest_ptr[len++] = src_byte;
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 7f4edd1..2aca8cf 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -993,10 +993,18 @@
ret = diag_hdlc_decode(&hdlc);
- if (hdlc.dest_idx < 3) {
- pr_err("diag: Integer underflow in hdlc processing\n");
+ /*
+ * If the message is 3 bytes or less in length then the message is
+ * too short. A message will need 4 bytes minimum, since there are
+ * 2 bytes for the CRC and 1 byte for the ending 0x7e for the hdlc
+ * encoding
+ */
+ if (hdlc.dest_idx < 4) {
+ pr_err_ratelimited("diag: In %s, message is too short, len: %d, dest len: %d\n",
+ __func__, len, hdlc.dest_idx);
return;
}
+
if (ret) {
type = diag_process_apps_pkt(driver->hdlc_buf,
hdlc.dest_idx - 3);
diff --git a/drivers/gpu/ion/msm/msm_ion.c b/drivers/gpu/ion/msm/msm_ion.c
index 1ca457f..832a9a1 100644
--- a/drivers/gpu/ion/msm/msm_ion.c
+++ b/drivers/gpu/ion/msm/msm_ion.c
@@ -55,7 +55,7 @@
},
{
.id = ION_CP_MM_HEAP_ID,
- .type = ION_HEAP_TYPE_CP,
+ .type = ION_HEAP_TYPE_SECURE_DMA,
.name = ION_MM_HEAP_NAME,
.permission_type = IPT_TYPE_MM_CARVEOUT,
},
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index b7d813c..b1a45bf 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -2000,8 +2000,17 @@
/* Is the ring buffer is empty? */
GSL_RB_GET_READPTR(rb, &rb->rptr);
if (!device->active_cnt && (rb->rptr == rb->wptr)) {
- /* Is the core idle? */
- status = is_adreno_rbbm_status_idle(device);
+ /*
+ * Are there interrupts pending? If so then pretend we
+ * are not idle - this avoids the possiblity that we go
+ * to a lower power state without handling interrupts
+ * first.
+ */
+
+ if (!adreno_dev->gpudev->irq_pending(adreno_dev)) {
+ /* Is the core idle? */
+ status = is_adreno_rbbm_status_idle(device);
+ }
}
} else {
status = true;
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index cc6eb16..b1cab9b 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -125,6 +125,7 @@
struct adreno_context *);
irqreturn_t (*irq_handler)(struct adreno_device *);
void (*irq_control)(struct adreno_device *, int);
+ unsigned int (*irq_pending)(struct adreno_device *);
void * (*snapshot)(struct adreno_device *, void *, int *, int);
void (*rb_init)(struct adreno_device *, struct adreno_ringbuffer *);
void (*start)(struct adreno_device *);
diff --git a/drivers/gpu/msm/adreno_a2xx.c b/drivers/gpu/msm/adreno_a2xx.c
index 952d1f8..6db6e7b 100644
--- a/drivers/gpu/msm/adreno_a2xx.c
+++ b/drivers/gpu/msm/adreno_a2xx.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1706,34 +1706,6 @@
return;
}
- if (status & CP_INT_CNTL__RB_INT_MASK) {
- /* signal intr completion event */
- unsigned int context_id, timestamp;
- kgsl_sharedmem_readl(&device->memstore, &context_id,
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- current_context));
-
- kgsl_sharedmem_readl(&device->memstore, ×tamp,
- KGSL_MEMSTORE_OFFSET(context_id,
- eoptimestamp));
-
- if (context_id < KGSL_MEMSTORE_MAX) {
- /* reset per context ts_cmp_enable */
- kgsl_sharedmem_writel(&device->memstore,
- KGSL_MEMSTORE_OFFSET(context_id,
- ts_cmp_enable), 0);
- /* Always reset global timestamp ts_cmp_enable */
- kgsl_sharedmem_writel(&device->memstore,
- KGSL_MEMSTORE_OFFSET(
- KGSL_MEMSTORE_GLOBAL,
- ts_cmp_enable), 0);
- wmb();
- }
-
- KGSL_CMD_WARN(device, "<%d:0x%x> ringbuffer interrupt\n",
- context_id, timestamp);
- }
-
for (i = 0; i < ARRAY_SIZE(kgsl_cp_error_irqs); i++) {
if (status & kgsl_cp_error_irqs[i].mask) {
KGSL_CMD_CRIT(rb->device, "%s\n",
@@ -1840,6 +1812,19 @@
wmb();
}
+static unsigned int a2xx_irq_pending(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ unsigned int rbbm, cp, mh;
+
+ adreno_regread(device, REG_RBBM_INT_CNTL, &rbbm);
+ adreno_regread(device, REG_CP_INT_CNTL, &cp);
+ adreno_regread(device, MH_INTERRUPT_MASK, &mh);
+
+ return ((rbbm & RBBM_INT_MASK) || (cp & CP_INT_MASK) ||
+ (mh & kgsl_mmu_get_int_mask())) ? 1 : 0;
+}
+
static void a2xx_rb_init(struct adreno_device *adreno_dev,
struct adreno_ringbuffer *rb)
{
@@ -2035,6 +2020,7 @@
.ctxt_draw_workaround = a2xx_drawctxt_draw_workaround,
.irq_handler = a2xx_irq_handler,
.irq_control = a2xx_irq_control,
+ .irq_pending = a2xx_irq_pending,
.snapshot = a2xx_snapshot,
.rb_init = a2xx_rb_init,
.busy_cycles = a2xx_busy_cycles,
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index a3739a2..73a7f52 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -2591,33 +2591,7 @@
{
struct kgsl_device *device = &adreno_dev->dev;
- if (irq == A3XX_INT_CP_RB_INT) {
- unsigned int context_id, timestamp;
- kgsl_sharedmem_readl(&device->memstore, &context_id,
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- current_context));
-
- kgsl_sharedmem_readl(&device->memstore, ×tamp,
- KGSL_MEMSTORE_OFFSET(context_id,
- eoptimestamp));
-
- if (context_id < KGSL_MEMSTORE_MAX) {
- /* reset per context ts_cmp_enable */
- kgsl_sharedmem_writel(&device->memstore,
- KGSL_MEMSTORE_OFFSET(context_id,
- ts_cmp_enable), 0);
- /* Always reset global timestamp ts_cmp_enable */
- kgsl_sharedmem_writel(&device->memstore,
- KGSL_MEMSTORE_OFFSET(
- KGSL_MEMSTORE_GLOBAL,
- ts_cmp_enable), 0);
- wmb();
- }
-
- KGSL_CMD_WARN(device, "<%d:0x%x> ringbuffer interrupt\n",
- context_id, timestamp);
- }
-
+ /* Wake up everybody waiting for the interrupt */
wake_up_interruptible_all(&device->wait_queue);
/* Schedule work to free mem and issue ibs */
@@ -2713,6 +2687,15 @@
adreno_regwrite(device, A3XX_RBBM_INT_0_MASK, 0);
}
+static unsigned int a3xx_irq_pending(struct adreno_device *adreno_dev)
+{
+ unsigned int status;
+
+ adreno_regread(&adreno_dev->dev, A3XX_RBBM_INT_0_STATUS, &status);
+
+ return (status & A3XX_INT_MASK) ? 1 : 0;
+}
+
static unsigned int a3xx_busy_cycles(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = &adreno_dev->dev;
@@ -2958,6 +2941,7 @@
.rb_init = a3xx_rb_init,
.irq_control = a3xx_irq_control,
.irq_handler = a3xx_irq_handler,
+ .irq_pending = a3xx_irq_pending,
.busy_cycles = a3xx_busy_cycles,
.start = a3xx_start,
.snapshot = a3xx_snapshot,
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index c43ac51..1d25646 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -570,7 +570,7 @@
total_sizedwords += (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE) ? 2 : 0;
/* Add CP_COND_EXEC commands to generate CP_INTERRUPT */
- total_sizedwords += context ? 7 : 0;
+ total_sizedwords += context ? 13 : 0;
if (adreno_is_a3xx(adreno_dev))
total_sizedwords += 7;
@@ -720,7 +720,25 @@
context_id, ref_wait_ts)) >> 2);
GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
/* # of conditional command DWORDs */
- GSL_RB_WRITE(ringcmds, rcmd_gpu, 2);
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, 8);
+
+ /* Clear the ts_cmp_enable for the context */
+ GSL_RB_WRITE(ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_MEM_WRITE, 2));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, gpuaddr +
+ KGSL_MEMSTORE_OFFSET(
+ context_id, ts_cmp_enable));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x0);
+
+ /* Clear the ts_cmp_enable for the global timestamp */
+ GSL_RB_WRITE(ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_MEM_WRITE, 2));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, gpuaddr +
+ KGSL_MEMSTORE_OFFSET(
+ KGSL_MEMSTORE_GLOBAL, ts_cmp_enable));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x0);
+
+ /* Trigger the interrupt */
GSL_RB_WRITE(ringcmds, rcmd_gpu,
cp_type3_packet(CP_INTERRUPT, 1));
GSL_RB_WRITE(ringcmds, rcmd_gpu, CP_INT_CNTL__RB_INT_MASK);
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 115fcb7..7ed0b10 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -1088,10 +1088,7 @@
result);
/* Fire off any pending suspend operations that are in flight */
-
- INIT_COMPLETION(dev_priv->device->suspend_gate);
- dev_priv->device->active_cnt--;
- complete(&dev_priv->device->suspend_gate);
+ kgsl_active_count_put(dev_priv->device);
return result;
}
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 62316f3..66390fc 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -133,6 +133,7 @@
void *priv;
struct list_head list;
void *owner;
+ unsigned int created;
};
@@ -449,4 +450,23 @@
kref_put(&context->refcount, kgsl_context_destroy);
}
+/**
+ * kgsl_active_count_put - Decrease the device active count
+ * @device: Pointer to a KGSL device
+ *
+ * Decrease the active count for the KGSL device and trigger the suspend_gate
+ * completion if it hits zero
+ */
+static inline void
+kgsl_active_count_put(struct kgsl_device *device)
+{
+ if (device->active_cnt == 1)
+ INIT_COMPLETION(device->suspend_gate);
+
+ device->active_cnt--;
+
+ if (device->active_cnt == 0)
+ complete(&device->suspend_gate);
+}
+
#endif /* __KGSL_DEVICE_H */
diff --git a/drivers/gpu/msm/kgsl_events.c b/drivers/gpu/msm/kgsl_events.c
index be9b5eb..6798eed 100644
--- a/drivers/gpu/msm/kgsl_events.c
+++ b/drivers/gpu/msm/kgsl_events.c
@@ -16,6 +16,8 @@
#include <linux/module.h>
#include <kgsl_device.h>
+#include "kgsl_trace.h"
+
static void _add_event_to_list(struct list_head *head, struct kgsl_event *event)
{
struct list_head *n;
@@ -71,6 +73,7 @@
*/
if (timestamp_cmp(cur_ts, ts) >= 0) {
+ trace_kgsl_fire_event(id, ts, 0);
cb(device, priv, id, ts);
return 0;
}
@@ -84,6 +87,9 @@
event->priv = priv;
event->func = cb;
event->owner = owner;
+ event->created = jiffies;
+
+ trace_kgsl_register_event(id, ts);
/* inc refcount to avoid race conditions in cleanup */
if (context)
@@ -106,6 +112,13 @@
} else
_add_event_to_list(&device->events, event);
+ /*
+ * Increase the active count on the device to avoid going into power
+ * saving modes while events are pending
+ */
+
+ device->active_cnt++;
+
queue_work(device->work_queue, &device->ts_expired_ws);
return 0;
}
@@ -137,12 +150,16 @@
* system got before the event was canceled
*/
+ trace_kgsl_fire_event(id, cur, jiffies - event->created);
+
if (event->func)
event->func(device, event->priv, id, cur);
kgsl_context_put(context);
list_del(&event->list);
kfree(event);
+
+ kgsl_active_count_put(device);
}
/* Remove ourselves from the master pending list */
@@ -175,6 +192,10 @@
* the callback knows how far the GPU made it before things went
* explosion
*/
+
+ trace_kgsl_fire_event(KGSL_MEMSTORE_GLOBAL, cur,
+ jiffies - event->created);
+
if (event->func)
event->func(device, event->priv, KGSL_MEMSTORE_GLOBAL,
cur);
@@ -184,6 +205,8 @@
list_del(&event->list);
kfree(event);
+
+ kgsl_active_count_put(device);
}
}
EXPORT_SYMBOL(kgsl_cancel_events);
@@ -207,6 +230,9 @@
* to the timestamp they wanted
*/
+ trace_kgsl_fire_event(id, event->timestamp,
+ jiffies - event->created);
+
if (event->func)
event->func(device, event->priv, id, event->timestamp);
@@ -215,6 +241,8 @@
list_del(&event->list);
kfree(event);
+
+ kgsl_active_count_put(device);
}
}
diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h
index f7818bb..8c4811e 100644
--- a/drivers/gpu/msm/kgsl_trace.h
+++ b/drivers/gpu/msm/kgsl_trace.h
@@ -693,6 +693,41 @@
)
);
+TRACE_EVENT(kgsl_register_event,
+ TP_PROTO(unsigned int id, unsigned int timestamp),
+ TP_ARGS(id, timestamp),
+ TP_STRUCT__entry(
+ __field(unsigned int, id)
+ __field(unsigned int, timestamp)
+ ),
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->timestamp = timestamp;
+ ),
+ TP_printk(
+ "ctx=%d ts=%d",
+ __entry->id, __entry->timestamp)
+);
+
+TRACE_EVENT(kgsl_fire_event,
+ TP_PROTO(unsigned int id, unsigned int ts,
+ unsigned int age),
+ TP_ARGS(id, ts, age),
+ TP_STRUCT__entry(
+ __field(unsigned int, id)
+ __field(unsigned int, ts)
+ __field(unsigned int, age)
+ ),
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->ts = ts;
+ __entry->age = age;
+ ),
+ TP_printk(
+ "ctx=%d ts=%d age=%u",
+ __entry->id, __entry->ts, __entry->age)
+);
+
#endif /* _KGSL_TRACE_H */
/* This part must be outside protection */
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index ffddcba..1283fa3 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -55,6 +55,27 @@
If unsure, say Y.
+config UHID
+ tristate "User-space I/O driver support for HID subsystem"
+ depends on HID
+ default n
+ ---help---
+ Say Y here if you want to provide HID I/O Drivers from user-space.
+ This allows to write I/O drivers in user-space and feed the data from
+ the device into the kernel. The kernel parses the HID reports, loads the
+ corresponding HID Device Driver or provides input devices on top of your
+ user-space device.
+
+ This driver cannot be used to parse HID-reports in user-space and write
+ special HID-drivers. You should use hidraw for that.
+ Instead, this driver allows to write the transport-layer driver in
+ user-space like USB-HID and Bluetooth-HID do in kernel-space.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called uhid.
+
source "drivers/hid/usbhid/Kconfig"
menu "Special HID drivers"
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 22f1d16..9dca845 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -8,6 +8,7 @@
endif
obj-$(CONFIG_HID) += hid.o
+obj-$(CONFIG_UHID) += uhid.o
hid-$(CONFIG_HIDRAW) += hidraw.o
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
new file mode 100644
index 0000000..05ef4b0
--- /dev/null
+++ b/drivers/hid/uhid.c
@@ -0,0 +1,153 @@
+/*
+ * User-space I/O driver support for HID subsystem
+ * Copyright (c) 2012 David Herrmann
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/hid.h>
+#include <linux/input.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/uhid.h>
+#include <linux/wait.h>
+
+#define UHID_NAME "uhid"
+#define UHID_BUFSIZE 32
+
+struct uhid_device {
+ struct hid_device *hid;
+
+ wait_queue_head_t waitq;
+ spinlock_t qlock;
+ __u8 head;
+ __u8 tail;
+ struct uhid_event *outq[UHID_BUFSIZE];
+};
+
+static struct miscdevice uhid_misc;
+
+static void uhid_queue(struct uhid_device *uhid, struct uhid_event *ev)
+{
+ __u8 newhead;
+
+ newhead = (uhid->head + 1) % UHID_BUFSIZE;
+
+ if (newhead != uhid->tail) {
+ uhid->outq[uhid->head] = ev;
+ uhid->head = newhead;
+ wake_up_interruptible(&uhid->waitq);
+ } else {
+ hid_warn(uhid->hid, "Output queue is full\n");
+ kfree(ev);
+ }
+}
+
+static int uhid_queue_event(struct uhid_device *uhid, __u32 event)
+{
+ unsigned long flags;
+ struct uhid_event *ev;
+
+ ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+ if (!ev)
+ return -ENOMEM;
+
+ ev->type = event;
+
+ spin_lock_irqsave(&uhid->qlock, flags);
+ uhid_queue(uhid, ev);
+ spin_unlock_irqrestore(&uhid->qlock, flags);
+
+ return 0;
+}
+
+static int uhid_char_open(struct inode *inode, struct file *file)
+{
+ struct uhid_device *uhid;
+
+ uhid = kzalloc(sizeof(*uhid), GFP_KERNEL);
+ if (!uhid)
+ return -ENOMEM;
+
+ spin_lock_init(&uhid->qlock);
+ init_waitqueue_head(&uhid->waitq);
+
+ file->private_data = uhid;
+ nonseekable_open(inode, file);
+
+ return 0;
+}
+
+static int uhid_char_release(struct inode *inode, struct file *file)
+{
+ struct uhid_device *uhid = file->private_data;
+ unsigned int i;
+
+ for (i = 0; i < UHID_BUFSIZE; ++i)
+ kfree(uhid->outq[i]);
+
+ kfree(uhid);
+
+ return 0;
+}
+
+static ssize_t uhid_char_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+
+static ssize_t uhid_char_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+
+static unsigned int uhid_char_poll(struct file *file, poll_table *wait)
+{
+ return 0;
+}
+
+static const struct file_operations uhid_fops = {
+ .owner = THIS_MODULE,
+ .open = uhid_char_open,
+ .release = uhid_char_release,
+ .read = uhid_char_read,
+ .write = uhid_char_write,
+ .poll = uhid_char_poll,
+ .llseek = no_llseek,
+};
+
+static struct miscdevice uhid_misc = {
+ .fops = &uhid_fops,
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = UHID_NAME,
+};
+
+static int __init uhid_init(void)
+{
+ return misc_register(&uhid_misc);
+}
+
+static void __exit uhid_exit(void)
+{
+ misc_deregister(&uhid_misc);
+}
+
+module_init(uhid_init);
+module_exit(uhid_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
+MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem");
diff --git a/drivers/input/touchscreen/synaptics_i2c_rmi4.c b/drivers/input/touchscreen/synaptics_i2c_rmi4.c
index 899c83b..2c79276 100644
--- a/drivers/input/touchscreen/synaptics_i2c_rmi4.c
+++ b/drivers/input/touchscreen/synaptics_i2c_rmi4.c
@@ -82,6 +82,8 @@
#define RMI4_I2C_LOAD_UA 10000
#define RMI4_I2C_LPM_LOAD_UA 10
+#define RMI4_GPIO_SLEEP_LOW_US 10000
+#define RMI4_GPIO_WAIT_HIGH_MS 25
static int synaptics_rmi4_i2c_read(struct synaptics_rmi4_data *rmi4_data,
unsigned short addr, unsigned char *data,
@@ -1871,15 +1873,60 @@
retval = synaptics_rmi4_regulator_configure(rmi4_data, true);
if (retval < 0) {
dev_err(&client->dev, "Failed to configure regulators\n");
- goto err_input_device;
+ goto err_reg_configure;
}
retval = synaptics_rmi4_power_on(rmi4_data, true);
if (retval < 0) {
dev_err(&client->dev, "Failed to power on\n");
- goto err_input_device;
+ goto err_power_device;
}
+ if (gpio_is_valid(platform_data->irq_gpio)) {
+ /* configure touchscreen irq gpio */
+ retval = gpio_request(platform_data->irq_gpio, "rmi4_irq_gpio");
+ if (retval) {
+ dev_err(&client->dev, "unable to request gpio [%d]\n",
+ platform_data->irq_gpio);
+ goto err_query_device;
+ }
+ retval = gpio_direction_input(platform_data->irq_gpio);
+ if (retval) {
+ dev_err(&client->dev,
+ "unable to set direction for gpio [%d]\n",
+ platform_data->irq_gpio);
+ goto err_irq_gpio_req;
+ }
+ } else {
+ dev_err(&client->dev, "irq gpio not provided\n");
+ goto err_query_device;
+ }
+
+ if (gpio_is_valid(platform_data->reset_gpio)) {
+ /* configure touchscreen reset out gpio */
+ retval = gpio_request(platform_data->reset_gpio,
+ "rmi4_reset_gpio");
+ if (retval) {
+ dev_err(&client->dev, "unable to request gpio [%d]\n",
+ platform_data->reset_gpio);
+ goto err_irq_gpio_req;
+ }
+
+ retval = gpio_direction_output(platform_data->reset_gpio, 1);
+ if (retval) {
+ dev_err(&client->dev,
+ "unable to set direction for gpio [%d]\n",
+ platform_data->reset_gpio);
+ goto err_reset_gpio_req;
+ }
+
+ gpio_set_value(platform_data->reset_gpio, 0);
+ usleep(RMI4_GPIO_SLEEP_LOW_US);
+ gpio_set_value(platform_data->reset_gpio, 1);
+ msleep(RMI4_GPIO_WAIT_HIGH_MS);
+ }
+
+
init_waitqueue_head(&rmi4_data->wait);
mutex_init(&(rmi4_data->rmi4_io_ctrl_mutex));
@@ -1888,7 +1935,7 @@
dev_err(&client->dev,
"%s: Failed to query device\n",
__func__);
- goto err_query_device;
+ goto err_reset_gpio_req;
}
i2c_set_clientdata(client, rmi4_data);
@@ -1972,9 +2019,6 @@
input_unregister_device(rmi4_data->input_dev);
err_register_input:
-err_query_device:
- synaptics_rmi4_power_on(rmi4_data, false);
- synaptics_rmi4_regulator_configure(rmi4_data, false);
if (!list_empty(&rmi->support_fn_list)) {
list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
if (fhandler->fn_number == SYNAPTICS_RMI4_F1A)
@@ -1984,6 +2028,17 @@
kfree(fhandler);
}
}
+err_reset_gpio_req:
+ if (gpio_is_valid(platform_data->reset_gpio))
+ gpio_free(platform_data->reset_gpio);
+err_irq_gpio_req:
+ if (gpio_is_valid(platform_data->irq_gpio))
+ gpio_free(platform_data->irq_gpio);
+err_query_device:
+ synaptics_rmi4_power_on(rmi4_data, false);
+err_power_device:
+ synaptics_rmi4_regulator_configure(rmi4_data, false);
+err_reg_configure:
input_free_device(rmi4_data->input_dev);
rmi4_data->input_dev = NULL;
err_input_device:
@@ -2036,7 +2091,11 @@
kfree(fhandler);
}
}
- input_free_device(rmi4_data->input_dev);
+
+ if (gpio_is_valid(rmi4_data->board->reset_gpio))
+ gpio_free(rmi4_data->board->reset_gpio);
+ if (gpio_is_valid(rmi4_data->board->irq_gpio))
+ gpio_free(rmi4_data->board->irq_gpio);
synaptics_rmi4_power_on(rmi4_data, false);
synaptics_rmi4_regulator_configure(rmi4_data, false);
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index dfb8d58..2a0cde9 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -514,6 +514,10 @@
((feed->pid != pid) && (feed->pid != 0x2000)))
continue;
+ if (feed->secure_mode.is_secured &&
+ !dvb_dmx_is_rec_feed(feed))
+ return 0;
+
if (feed->type == DMX_TYPE_TS) {
desired_space = 192; /* upper bound */
ts = &feed->feed.ts;
@@ -593,19 +597,23 @@
if (!feed->feed.ts.is_filtering)
break;
if (feed->ts_type & TS_PACKET) {
- if (feed->ts_type & TS_PAYLOAD_ONLY)
- dvb_dmx_swfilter_payload(feed, buf);
- else
+ if (feed->ts_type & TS_PAYLOAD_ONLY) {
+ if (!feed->secure_mode.is_secured)
+ dvb_dmx_swfilter_payload(feed, buf);
+ } else {
dvb_dmx_swfilter_output_packet(feed,
buf, timestamp);
+ }
}
- if (feed->ts_type & TS_DECODER)
+ if ((feed->ts_type & TS_DECODER) &&
+ !feed->secure_mode.is_secured)
if (feed->demux->write_to_decoder)
feed->demux->write_to_decoder(feed, buf, 188);
break;
case DMX_TYPE_SEC:
- if (!feed->feed.sec.is_filtering)
+ if (!feed->feed.sec.is_filtering ||
+ feed->secure_mode.is_secured)
break;
if (dvb_dmx_swfilter_section_packet(feed, buf) < 0)
feed->feed.sec.seclen = feed->feed.sec.secbufp = 0;
@@ -1212,17 +1220,22 @@
{
struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed;
struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+ int ret = 0;
mutex_lock(&dvbdmx->mutex);
- dvbdmxfeed->secure_mode = *secure_mode;
-
if ((dvbdmxfeed->state == DMX_STATE_GO) &&
- dvbdmxfeed->demux->set_secure_mode)
- dvbdmxfeed->demux->set_secure_mode(dvbdmxfeed, secure_mode);
+ dvbdmxfeed->demux->set_secure_mode) {
+ ret = dvbdmxfeed->demux->set_secure_mode(dvbdmxfeed,
+ secure_mode);
+ if (!ret)
+ dvbdmxfeed->secure_mode = *secure_mode;
+ } else {
+ dvbdmxfeed->secure_mode = *secure_mode;
+ }
mutex_unlock(&dvbdmx->mutex);
- return 0;
+ return ret;
}
static int dmx_ts_set_indexing_params(
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
index f5f6039..f3dc4b8 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.h
+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
@@ -203,5 +203,85 @@
void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf,
const u8 timestamp[TIMESTAMP_LEN]);
+/**
+ * dvb_dmx_is_video_feed - Returns whether the PES feed
+ * is video one.
+ *
+ * @feed: The feed to be checked.
+ *
+ * Return 1 if feed is video feed, 0 otherwise.
+ */
+static inline int dvb_dmx_is_video_feed(struct dvb_demux_feed *feed)
+{
+ if (feed->type != DMX_TYPE_TS)
+ return 0;
+
+ if (feed->ts_type & (~TS_DECODER))
+ return 0;
+
+ if ((feed->pes_type == DMX_TS_PES_VIDEO0) ||
+ (feed->pes_type == DMX_TS_PES_VIDEO1) ||
+ (feed->pes_type == DMX_TS_PES_VIDEO2) ||
+ (feed->pes_type == DMX_TS_PES_VIDEO3))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * dvb_dmx_is_pcr_feed - Returns whether the PES feed
+ * is PCR one.
+ *
+ * @feed: The feed to be checked.
+ *
+ * Return 1 if feed is PCR feed, 0 otherwise.
+ */
+static inline int dvb_dmx_is_pcr_feed(struct dvb_demux_feed *feed)
+{
+ if (feed->type != DMX_TYPE_TS)
+ return 0;
+
+ if (feed->ts_type & (~TS_DECODER))
+ return 0;
+
+ if ((feed->pes_type == DMX_TS_PES_PCR0) ||
+ (feed->pes_type == DMX_TS_PES_PCR1) ||
+ (feed->pes_type == DMX_TS_PES_PCR2) ||
+ (feed->pes_type == DMX_TS_PES_PCR3))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * dvb_dmx_is_sec_feed - Returns whether this is a section feed
+ *
+ * @feed: The feed to be checked.
+ *
+ * Return 1 if feed is a section feed, 0 otherwise.
+ */
+static inline int dvb_dmx_is_sec_feed(struct dvb_demux_feed *feed)
+{
+ return (feed->type == DMX_TYPE_SEC);
+}
+
+/**
+ * dvb_dmx_is_rec_feed - Returns whether this is a recording feed
+ *
+ * @feed: The feed to be checked.
+ *
+ * Return 1 if feed is recording feed, 0 otherwise.
+ */
+static inline int dvb_dmx_is_rec_feed(struct dvb_demux_feed *feed)
+{
+ if (feed->type != DMX_TYPE_TS)
+ return 0;
+
+ if (feed->ts_type & (TS_DECODER | TS_PAYLOAD_ONLY))
+ return 0;
+
+ return 1;
+}
+
#endif /* _DVB_DEMUX_H_ */
diff --git a/drivers/media/platform/msm/camera_v2/Kconfig b/drivers/media/platform/msm/camera_v2/Kconfig
index 2bbdc22..e4777e6 100644
--- a/drivers/media/platform/msm/camera_v2/Kconfig
+++ b/drivers/media/platform/msm/camera_v2/Kconfig
@@ -82,6 +82,15 @@
snapshot config = 4000 * 3000 at 20 fps,
hfr video at 60, 90 and 120 fps.
+config IMX135
+ bool "Sensor IMX135 (BAYER 12M)"
+ depends on MSMB_CAMERA
+ ---help---
+ Sony 12 MP Bayer Sensor with auto focus, uses
+ 4 mipi lanes, preview config = 2104 x 1560 at 49 fps,
+ snapshot config = 4208 x 3120 at 24 fps,
+ Video HDR support.
+
config OV2720
bool "Sensor OV2720 (BAYER 2M)"
depends on MSMB_CAMERA
diff --git a/drivers/media/platform/msm/camera_v2/msm.c b/drivers/media/platform/msm/camera_v2/msm.c
index 08a4566..6974cb4 100644
--- a/drivers/media/platform/msm/camera_v2/msm.c
+++ b/drivers/media/platform/msm/camera_v2/msm.c
@@ -461,9 +461,6 @@
static inline int __msm_sd_close_session_streams(struct v4l2_subdev *sd,
struct msm_sd_close_ioctl *sd_close)
{
- v4l2_subdev_call(sd, core, ioctl,
- MSM_SD_CLOSE_SESSION_AND_STREAM, &sd_close);
-
return 0;
}
diff --git a/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c b/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c
index 6ea86ae..22131f8 100644
--- a/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c
+++ b/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c
@@ -105,7 +105,7 @@
unsigned long flags;
stream = msm_get_stream(session_id, stream_id);
- if (!stream)
+ if (IS_ERR_OR_NULL(stream))
return NULL;
spin_lock_irqsave(&stream->stream_lock, flags);
@@ -163,7 +163,7 @@
int rc = 0;
stream = msm_get_stream(session_id, stream_id);
- if (!stream)
+ if (IS_ERR_OR_NULL(stream))
return 0;
spin_lock_irqsave(&stream->stream_lock, flags);
if (vb) {
@@ -172,6 +172,7 @@
/* put buf before buf done */
if (msm_vb2->in_freeq) {
vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ msm_vb2->in_freeq = 0;
rc = 0;
} else
rc = -EINVAL;
diff --git a/drivers/media/platform/msm/camera_v2/sensor/Makefile b/drivers/media/platform/msm/camera_v2/sensor/Makefile
index f6011ba..6f941f7 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/Makefile
+++ b/drivers/media/platform/msm/camera_v2/sensor/Makefile
@@ -6,5 +6,6 @@
obj-$(CONFIG_MSMB_CAMERA) += cci/ io/ csiphy/ csid/ actuator/ flash/
obj-$(CONFIG_MSM_CAMERA_SENSOR) += msm_sensor.o
obj-$(CONFIG_S5K3L1YX) += s5k3l1yx.o
+obj-$(CONFIG_IMX135) += imx135.o
obj-$(CONFIG_OV2720) += ov2720.o
obj-$(CONFIG_MT9M114) += mt9m114.o
diff --git a/drivers/media/platform/msm/camera_v2/sensor/imx135.c b/drivers/media/platform/msm/camera_v2/sensor/imx135.c
new file mode 100644
index 0000000..c9476ee
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/imx135.c
@@ -0,0 +1,149 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include "msm_sensor.h"
+#define IMX135_SENSOR_NAME "imx135"
+DEFINE_MSM_MUTEX(imx135_mut);
+
+static struct msm_sensor_ctrl_t imx135_s_ctrl;
+
+static struct msm_sensor_power_setting imx135_power_setting[] = {
+ {
+ .seq_type = SENSOR_VREG,
+ .seq_val = CAM_VDIG,
+ .config_val = 0,
+ .delay = 0,
+ },
+ {
+ .seq_type = SENSOR_VREG,
+ .seq_val = CAM_VANA,
+ .config_val = 0,
+ .delay = 0,
+ },
+ {
+ .seq_type = SENSOR_VREG,
+ .seq_val = CAM_VIO,
+ .config_val = 0,
+ .delay = 0,
+ },
+ {
+ .seq_type = SENSOR_GPIO,
+ .seq_val = SENSOR_GPIO_RESET,
+ .config_val = GPIO_OUT_LOW,
+ .delay = 1,
+ },
+ {
+ .seq_type = SENSOR_GPIO,
+ .seq_val = SENSOR_GPIO_RESET,
+ .config_val = GPIO_OUT_HIGH,
+ .delay = 30,
+ },
+ {
+ .seq_type = SENSOR_CLK,
+ .seq_val = SENSOR_CAM_MCLK,
+ .config_val = 0,
+ .delay = 1,
+ },
+ {
+ .seq_type = SENSOR_I2C_MUX,
+ .seq_val = 0,
+ .config_val = 0,
+ .delay = 0,
+ },
+};
+
+static struct v4l2_subdev_info imx135_subdev_info[] = {
+ {
+ .code = V4L2_MBUS_FMT_SBGGR10_1X10,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .fmt = 1,
+ .order = 0,
+ },
+};
+
+static const struct i2c_device_id imx135_i2c_id[] = {
+ {IMX135_SENSOR_NAME, (kernel_ulong_t)&imx135_s_ctrl},
+ { }
+};
+
+static struct i2c_driver imx135_i2c_driver = {
+ .id_table = imx135_i2c_id,
+ .probe = msm_sensor_i2c_probe,
+ .driver = {
+ .name = IMX135_SENSOR_NAME,
+ },
+};
+
+static struct msm_camera_i2c_client imx135_sensor_i2c_client = {
+ .addr_type = MSM_CAMERA_I2C_WORD_ADDR,
+};
+
+static const struct of_device_id imx135_dt_match[] = {
+ {.compatible = "qcom,imx135", .data = &imx135_s_ctrl},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, imx135_dt_match);
+
+static struct platform_driver imx135_platform_driver = {
+ .driver = {
+ .name = "qcom,imx135",
+ .owner = THIS_MODULE,
+ .of_match_table = imx135_dt_match,
+ },
+};
+
+static int32_t imx135_platform_probe(struct platform_device *pdev)
+{
+ int32_t rc = 0;
+ const struct of_device_id *match;
+ match = of_match_device(imx135_dt_match, &pdev->dev);
+ rc = msm_sensor_platform_probe(pdev, match->data);
+ return rc;
+}
+
+static int __init imx135_init_module(void)
+{
+ int32_t rc = 0;
+ pr_info("%s:%d\n", __func__, __LINE__);
+ rc = platform_driver_probe(&imx135_platform_driver,
+ imx135_platform_probe);
+ if (!rc)
+ return rc;
+ pr_err("%s:%d rc %d\n", __func__, __LINE__, rc);
+ return i2c_add_driver(&imx135_i2c_driver);
+}
+
+static void __exit imx135_exit_module(void)
+{
+ pr_info("%s:%d\n", __func__, __LINE__);
+ if (imx135_s_ctrl.pdev) {
+ msm_sensor_free_sensor_data(&imx135_s_ctrl);
+ platform_driver_unregister(&imx135_platform_driver);
+ } else
+ i2c_del_driver(&imx135_i2c_driver);
+ return;
+}
+
+static struct msm_sensor_ctrl_t imx135_s_ctrl = {
+ .sensor_i2c_client = &imx135_sensor_i2c_client,
+ .power_setting_array.power_setting = imx135_power_setting,
+ .power_setting_array.size = ARRAY_SIZE(imx135_power_setting),
+ .msm_sensor_mutex = &imx135_mut,
+ .sensor_v4l2_subdev_info = imx135_subdev_info,
+ .sensor_v4l2_subdev_info_size = ARRAY_SIZE(imx135_subdev_info),
+};
+
+module_init(imx135_init_module);
+module_exit(imx135_exit_module);
+MODULE_DESCRIPTION("imx135");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
index 9d89a7e..0641162 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
@@ -903,6 +903,8 @@
mutex_init(&mpq_demux->mutex);
+ mpq_demux->num_secure_feeds = 0;
+ mpq_demux->num_active_feeds = 0;
mpq_demux->sdmx_filter_count = 0;
mpq_demux->sdmx_session_handle = SDMX_INVALID_SESSION_HANDLE;
@@ -1230,7 +1232,7 @@
return -EINVAL;
}
- if (mpq_dmx_is_video_feed(feed)) {
+ if (dvb_dmx_is_video_feed(feed)) {
struct mpq_video_feed_info *feed_data;
struct mpq_feed *mpq_feed;
struct mpq_streambuffer *stream_buffer;
@@ -1957,9 +1959,10 @@
}
mpq_sdmx_close_session(mpq_demux);
+ mpq_demux->num_secure_feeds--;
}
- if (mpq_dmx_is_video_feed(feed)) {
+ if (dvb_dmx_is_video_feed(feed)) {
ret = mpq_dmx_terminate_video_feed(mpq_feed);
if (ret)
MPQ_DVB_ERR_PRINT(
@@ -1973,6 +1976,7 @@
}
mpq_sdmx_terminate_metadata_buffer(mpq_feed);
+ mpq_demux->num_active_feeds--;
mutex_unlock(&mpq_demux->mutex);
@@ -1982,7 +1986,7 @@
int mpq_dmx_decoder_fullness_init(struct dvb_demux_feed *feed)
{
- if (mpq_dmx_is_video_feed(feed)) {
+ if (dvb_dmx_is_video_feed(feed)) {
struct mpq_feed *mpq_feed;
struct mpq_video_feed_info *feed_data;
@@ -2056,7 +2060,7 @@
struct mpq_feed *mpq_feed;
int ret = 0;
- if (!mpq_dmx_is_video_feed(feed)) {
+ if (!dvb_dmx_is_video_feed(feed)) {
MPQ_DVB_DBG_PRINT("%s: Invalid feed type %d\n",
__func__,
feed->pes_type);
@@ -2139,7 +2143,7 @@
int mpq_dmx_decoder_fullness_abort(struct dvb_demux_feed *feed)
{
- if (mpq_dmx_is_video_feed(feed)) {
+ if (dvb_dmx_is_video_feed(feed)) {
struct mpq_feed *mpq_feed;
struct mpq_video_feed_info *feed_data;
struct dvb_ringbuffer *video_buff;
@@ -3087,7 +3091,7 @@
struct mpq_streambuffer *video_buff;
struct mpq_feed *mpq_feed;
- if (!mpq_dmx_is_video_feed(feed)) {
+ if (!dvb_dmx_is_video_feed(feed)) {
MPQ_DVB_ERR_PRINT(
"%s: Invalid feed type %d\n",
__func__,
@@ -3230,67 +3234,6 @@
}
EXPORT_SYMBOL(mpq_dmx_process_pcr_packet);
-int mpq_dmx_set_secure_mode(struct dvb_demux_feed *feed,
- struct dmx_secure_mode *sec_mode)
-{
- struct mpq_feed *mpq_feed;
- struct mpq_demux *mpq_demux;
- int ret;
-
- if (!feed || !feed->priv || !sec_mode) {
- MPQ_DVB_ERR_PRINT(
- "%s: invalid parameters\n",
- __func__);
- return -EINVAL;
- }
-
- MPQ_DVB_DBG_PRINT("%s(%d, %d, %d)\n",
- __func__, sec_mode->pid,
- sec_mode->is_secured,
- sec_mode->key_ladder_id);
-
- mpq_feed = feed->priv;
- mpq_demux = mpq_feed->mpq_demux;
-
- mutex_lock(&mpq_demux->mutex);
-
- /*
- * If secure demux is active, set the KL now,
- * otherwise it will be set when secure-demux is started
- * (when filtering starts).
- */
- if (mpq_demux->sdmx_session_handle !=
- SDMX_INVALID_SESSION_HANDLE) {
- if (sec_mode->is_secured) {
- MPQ_DVB_DBG_PRINT(
- "%s: set key-ladder %d to PID %d\n",
- __func__,
- sec_mode->key_ladder_id,
- sec_mode->pid);
- ret = sdmx_set_kl_ind(mpq_demux->sdmx_session_handle,
- sec_mode->pid, sec_mode->key_ladder_id);
- if (ret) {
- MPQ_DVB_ERR_PRINT(
- "%s: FAILED to set keyladder, ret=%d\n",
- __func__, ret);
- ret = -EINVAL;
- }
- } else {
- MPQ_DVB_DBG_PRINT("%s: setting non-secure mode\n",
- __func__);
- ret = 0;
- }
- } else {
- MPQ_DVB_DBG_PRINT("%s: SDMX not started yet\n", __func__);
- ret = 0;
- }
-
- mutex_unlock(&mpq_demux->mutex);
-
- return ret;
-}
-EXPORT_SYMBOL(mpq_dmx_set_secure_mode);
-
int mpq_sdmx_open_session(struct mpq_demux *mpq_demux)
{
enum sdmx_status ret = SDMX_SUCCESS;
@@ -3409,7 +3352,7 @@
*buf_mode = SDMX_RING_BUF;
- if (mpq_dmx_is_video_feed(feed->dvb_demux_feed)) {
+ if (dvb_dmx_is_video_feed(feed->dvb_demux_feed)) {
if (feed_data->buffer_desc.decoder_buffers_num > 1)
*buf_mode = SDMX_LINEAR_GROUP_BUF;
*num_buffers = feed_data->buffer_desc.decoder_buffers_num;
@@ -3429,8 +3372,8 @@
}
} else {
*num_buffers = 1;
- if (mpq_dmx_is_sec_feed(dvbdmx_feed) ||
- mpq_dmx_is_pcr_feed(dvbdmx_feed)) {
+ if (dvb_dmx_is_sec_feed(dvbdmx_feed) ||
+ dvb_dmx_is_pcr_feed(dvbdmx_feed)) {
buffer = &feed->sdmx_buf;
sdmx_buff = feed->sdmx_buf_handle;
} else {
@@ -3481,18 +3424,18 @@
feed = dvbdmx_feed->priv;
- if (mpq_dmx_is_sec_feed(dvbdmx_feed)) {
+ if (dvb_dmx_is_sec_feed(dvbdmx_feed)) {
feed->filter_type = SDMX_SECTION_FILTER;
if (dvbdmx_feed->feed.sec.check_crc)
filter_flags |= SDMX_FILTER_FLAG_VERIFY_SECTION_CRC;
MPQ_DVB_DBG_PRINT("%s: SDMX_SECTION_FILTER\n", __func__);
- } else if (mpq_dmx_is_pcr_feed(dvbdmx_feed)) {
+ } else if (dvb_dmx_is_pcr_feed(dvbdmx_feed)) {
feed->filter_type = SDMX_PCR_FILTER;
MPQ_DVB_DBG_PRINT("%s: SDMX_PCR_FILTER\n", __func__);
- } else if (mpq_dmx_is_video_feed(dvbdmx_feed)) {
+ } else if (dvb_dmx_is_video_feed(dvbdmx_feed)) {
feed->filter_type = SDMX_SEPARATED_PES_FILTER;
MPQ_DVB_DBG_PRINT("%s: SDMX_SEPARATED_PES_FILTER\n", __func__);
- } else if (mpq_dmx_is_rec_feed(dvbdmx_feed)) {
+ } else if (dvb_dmx_is_rec_feed(dvbdmx_feed)) {
feed->filter_type = SDMX_RAW_FILTER;
switch (dvbdmx_feed->tsp_out_format) {
case (DMX_TSP_FORMAT_188):
@@ -3546,7 +3489,7 @@
/* Meta-data initialization,
* Recording filters do no need meta-data buffers.
*/
- if (mpq_dmx_is_rec_feed(dvbdmx_feed)) {
+ if (dvb_dmx_is_rec_feed(dvbdmx_feed)) {
metadata_buff_desc.base_addr = 0;
metadata_buff_desc.size = 0;
} else {
@@ -3640,6 +3583,63 @@
return ret;
}
+/**
+ * mpq_sdmx_init_feed - initialize secure demux related elements of mpq feed
+ *
+ * @mpq_demux: mpq_demux object
+ * @mpq_feed: mpq_feed object
+ *
+ * Note: the function assumes mpq_demux->mutex locking is done by caller.
+ */
+static int mpq_sdmx_init_feed(struct mpq_demux *mpq_demux,
+ struct mpq_feed *mpq_feed)
+{
+ int ret;
+
+ ret = mpq_sdmx_open_session(mpq_demux);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_sdmx_open_session failed, ret=%d\n",
+ __func__, ret);
+
+ ret = -ENODEV;
+ goto init_sdmx_feed_failed;
+ }
+
+ /* PCR and sections have internal buffer for SDMX */
+ if (dvb_dmx_is_pcr_feed(mpq_feed->dvb_demux_feed))
+ ret = mpq_sdmx_alloc_data_buf(mpq_feed, SDMX_PCR_BUFFER_SIZE);
+ else if (dvb_dmx_is_sec_feed(mpq_feed->dvb_demux_feed))
+ ret = mpq_sdmx_alloc_data_buf(mpq_feed,
+ SDMX_SECTION_BUFFER_SIZE);
+ else
+ ret = 0;
+
+ if (ret) {
+ MPQ_DVB_ERR_PRINT("%s: init buffer failed, ret=%d\n",
+ __func__, ret);
+ goto init_sdmx_feed_failed_free_sdmx;
+ }
+
+ ret = mpq_sdmx_filter_setup(mpq_demux, mpq_feed->dvb_demux_feed);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_sdmx_filter_setup failed, ret=%d\n",
+ __func__, ret);
+ goto init_sdmx_feed_failed_free_data_buff;
+ }
+
+ mpq_demux->num_secure_feeds++;
+ return 0;
+
+init_sdmx_feed_failed_free_data_buff:
+ mpq_sdmx_free_data_buf(mpq_feed);
+init_sdmx_feed_failed_free_sdmx:
+ mpq_sdmx_close_session(mpq_demux);
+init_sdmx_feed_failed:
+ return ret;
+}
+
int mpq_dmx_init_mpq_feed(struct dvb_demux_feed *feed)
{
int ret = 0;
@@ -3648,80 +3648,113 @@
mutex_lock(&mpq_demux->mutex);
- if (mpq_dmx_is_video_feed(feed)) {
- ret = mpq_dmx_init_video_feed(mpq_feed);
-
- if (ret) {
- MPQ_DVB_ERR_PRINT(
- "%s: mpq_dmx_init_video_feed failed, ret=%d\n",
- __func__, ret);
- goto init_mpq_feed_failed;
- }
- }
-
mpq_feed->sdmx_buf_handle = NULL;
mpq_feed->metadata_buf_handle = NULL;
mpq_feed->sdmx_filter_handle = SDMX_INVALID_FILTER_HANDLE;
- if (!mpq_sdmx_is_loaded()) {
- /* nothing more to do */
- mpq_demux->sdmx_session_handle = SDMX_INVALID_SESSION_HANDLE;
- mutex_unlock(&mpq_demux->mutex);
- return ret;
+ if (dvb_dmx_is_video_feed(feed)) {
+ ret = mpq_dmx_init_video_feed(mpq_feed);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_dmx_init_video_feed failed, ret=%d\n",
+ __func__, ret);
+ goto init_mpq_feed_end;
+ }
}
- /* Further initializations for secure demux */
- ret = mpq_sdmx_open_session(mpq_demux);
+ /*
+ * sdmx is not relevant for recording filters, which always use
+ * regular filters (non-sdmx)
+ */
+ if (!mpq_sdmx_is_loaded() || !feed->secure_mode.is_secured ||
+ dvb_dmx_is_rec_feed(feed)) {
+ if (!mpq_sdmx_is_loaded())
+ mpq_demux->sdmx_session_handle =
+ SDMX_INVALID_SESSION_HANDLE;
+ goto init_mpq_feed_end;
+ }
+
+ /* Initialization of secure demux filters (PES/PCR/Video/Section) */
+ ret = mpq_sdmx_init_feed(mpq_demux, mpq_feed);
if (ret) {
MPQ_DVB_ERR_PRINT(
- "%s: mpq_sdmx_open_session failed, ret=%d\n",
+ "%s: mpq_sdmx_init_feed failed, ret=%d\n",
__func__, ret);
-
- ret = -ENODEV;
- goto init_mpq_feed_failed_free_video;
+ if (dvb_dmx_is_video_feed(feed))
+ mpq_dmx_terminate_video_feed(mpq_feed);
}
- /* PCR and sections have internal buffer for SDMX */
- if (mpq_dmx_is_pcr_feed(feed))
- ret = mpq_sdmx_alloc_data_buf(mpq_feed,
- SDMX_PCR_BUFFER_SIZE);
- else if (mpq_dmx_is_sec_feed(feed))
- ret = mpq_sdmx_alloc_data_buf(mpq_feed,
- SDMX_SECTION_BUFFER_SIZE);
- else
- ret = 0;
-
- if (ret) {
- MPQ_DVB_ERR_PRINT(
- "%s: init buffer failed, ret=%d\n",
- __func__, ret);
- goto init_mpq_feed_failed_free_sdmx;
- }
-
- ret = mpq_sdmx_filter_setup(mpq_demux, feed);
- if (ret) {
- MPQ_DVB_ERR_PRINT(
- "%s: mpq_sdmx_filter_setup failed, ret=%d\n",
- __func__, ret);
- goto init_mpq_feed_failed_free_data_buff;
- }
-
- mutex_unlock(&mpq_demux->mutex);
- return 0;
-
-init_mpq_feed_failed_free_data_buff:
- mpq_sdmx_free_data_buf(mpq_feed);
-init_mpq_feed_failed_free_sdmx:
- mpq_sdmx_close_session(mpq_demux);
-init_mpq_feed_failed_free_video:
- if (mpq_dmx_is_video_feed(feed))
- mpq_dmx_terminate_video_feed(mpq_feed);
-init_mpq_feed_failed:
+init_mpq_feed_end:
+ if (!ret)
+ mpq_demux->num_active_feeds++;
mutex_unlock(&mpq_demux->mutex);
return ret;
}
EXPORT_SYMBOL(mpq_dmx_init_mpq_feed);
+/**
+ * Note: Called only when filter is in "GO" state - after feed has been started.
+ */
+int mpq_dmx_set_secure_mode(struct dvb_demux_feed *feed,
+ struct dmx_secure_mode *sec_mode)
+{
+ struct mpq_feed *mpq_feed;
+ struct mpq_demux *mpq_demux;
+ int ret = 0;
+
+ if (!feed || !feed->priv || !sec_mode) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: invalid parameters\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ MPQ_DVB_DBG_PRINT("%s(%d, %d, %d)\n",
+ __func__, sec_mode->pid,
+ sec_mode->is_secured,
+ sec_mode->key_ladder_id);
+
+ mpq_feed = feed->priv;
+ mpq_demux = mpq_feed->mpq_demux;
+
+ mutex_lock(&mpq_demux->mutex);
+
+ if (feed->secure_mode.is_secured != sec_mode->is_secured) {
+ /*
+ * Switching between secure & non-secure mode is not allowed
+ * while filter is running
+ */
+ MPQ_DVB_ERR_PRINT(
+ "%s: Cannot switch between secure mode while filter is running\n",
+ __func__);
+ mutex_unlock(&mpq_demux->mutex);
+ return -EPERM;
+ }
+
+ /*
+ * Feed is running in secure mode, this secure mode request is to
+ * update the key ladder id
+ */
+ if (feed->secure_mode.pid == sec_mode->pid && sec_mode->is_secured &&
+ feed->secure_mode.key_ladder_id != sec_mode->key_ladder_id &&
+ mpq_demux->sdmx_session_handle != SDMX_INVALID_SESSION_HANDLE) {
+ ret = sdmx_set_kl_ind(mpq_demux->sdmx_session_handle,
+ sec_mode->pid,
+ sec_mode->key_ladder_id);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to set key ladder, ret=%d\n",
+ __func__, ret);
+ ret = -ENODEV;
+ }
+ }
+
+ mutex_unlock(&mpq_demux->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(mpq_dmx_set_secure_mode);
+
static void mpq_sdmx_prepare_filter_status(struct mpq_demux *mpq_demux,
struct sdmx_filter_status *filter_sts,
struct mpq_feed *mpq_feed)
@@ -3742,11 +3775,11 @@
__func__, filter_sts->metadata_fill_count,
filter_sts->metadata_write_offset);
- if (!mpq_dmx_is_video_feed(feed)) {
+ if (!dvb_dmx_is_video_feed(feed)) {
struct dvb_ringbuffer *buffer;
- if (mpq_dmx_is_sec_feed(feed) ||
- mpq_dmx_is_pcr_feed(feed)) {
+ if (dvb_dmx_is_sec_feed(feed) ||
+ dvb_dmx_is_pcr_feed(feed)) {
buffer = (struct dvb_ringbuffer *)
&mpq_feed->sdmx_buf;
} else {
@@ -4521,7 +4554,7 @@
int total_bytes_read = 0;
int limit = mpq_sdmx_proc_limit * mpq_demux->demux.ts_packet_size;
- do {
+ while (fill_count >= mpq_demux->demux.ts_packet_size) {
todo = fill_count > limit ? limit : fill_count;
ret = mpq_sdmx_process_buffer(mpq_demux, input, todo,
read_offset);
@@ -4541,7 +4574,7 @@
__func__, ret);
break;
}
- } while (fill_count > 0);
+ }
return total_bytes_read;
}
@@ -4584,6 +4617,7 @@
{
struct dvb_demux *dvb_demux;
struct mpq_demux *mpq_demux;
+ int ret = count;
if (demux == NULL)
return -EINVAL;
@@ -4591,20 +4625,35 @@
dvb_demux = demux->priv;
mpq_demux = dvb_demux->priv;
- if (mpq_sdmx_is_loaded()) {
- /* route through secure demux */
- return mpq_sdmx_write(mpq_demux,
+ /* Route through secure demux - process secure feeds if any exist */
+ if (mpq_sdmx_is_loaded() && mpq_demux->sdmx_filter_count) {
+ ret = mpq_sdmx_write(mpq_demux,
demux->dvr_input.priv_handle,
buf,
count);
- } else {
- /* route through sw filter */
- dvb_dmx_swfilter_format(dvb_demux, buf, count,
- dvb_demux->tsp_format);
- if (signal_pending(current))
- return -EINTR;
- return count;
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_sdmx_write failed. ret = %d\n",
+ __func__, ret);
+ ret = count;
+ }
}
+
+ /*
+ * Route through sw filter - process non-secure feeds if any exist.
+ * For sw filter, should process the same amount of bytes the sdmx
+ * process managed to consume, unless some sdmx error occurred, for
+ * which should process the whole buffer
+ */
+ if (mpq_demux->num_active_feeds > mpq_demux->num_secure_feeds) {
+ dvb_dmx_swfilter_format(dvb_demux, buf, ret,
+ dvb_demux->tsp_format);
+ }
+
+ if (signal_pending(current))
+ return -EINTR;
+
+ return ret;
}
EXPORT_SYMBOL(mpq_dmx_write);
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h
index 2c2420b..7affcc6 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h
@@ -362,6 +362,9 @@
* @ion_client: ION demux client used to allocate memory from ION.
* @mutex: Lock used to protect against private feed data
* @feeds: mpq common feed object pool
+ * @num_active_feeds: Number of active mpq feeds
+ * @num_secure_feeds: Number of secure feeds (have a sdmx filter associated)
+ * currently allocated.
* @filters_status: Array holding buffers status for each secure demux filter.
* Used before each call to sdmx_process() to build up to date state.
* @sdmx_session_handle: Secure demux open session handle
@@ -406,6 +409,8 @@
struct ion_client *ion_client;
struct mutex mutex;
struct mpq_feed feeds[MPQ_MAX_DMX_FILES];
+ u32 num_active_feeds;
+ u32 num_secure_feeds;
struct sdmx_filter_status filters_status[MPQ_MAX_DMX_FILES];
int sdmx_session_handle;
int sdmx_session_ref_count;
@@ -615,86 +620,6 @@
int mpq_dmx_process_pcr_packet(struct dvb_demux_feed *feed, const u8 *buf);
/**
- * mpq_dmx_is_video_feed - Returns whether the PES feed
- * is video one.
- *
- * @feed: The feed to be checked.
- *
- * Return 1 if feed is video feed, 0 otherwise.
- */
-static inline int mpq_dmx_is_video_feed(struct dvb_demux_feed *feed)
-{
- if (feed->type != DMX_TYPE_TS)
- return 0;
-
- if (feed->ts_type & (~TS_DECODER))
- return 0;
-
- if ((feed->pes_type == DMX_TS_PES_VIDEO0) ||
- (feed->pes_type == DMX_TS_PES_VIDEO1) ||
- (feed->pes_type == DMX_TS_PES_VIDEO2) ||
- (feed->pes_type == DMX_TS_PES_VIDEO3))
- return 1;
-
- return 0;
-}
-
-/**
- * mpq_dmx_is_pcr_feed - Returns whether the PES feed
- * is PCR one.
- *
- * @feed: The feed to be checked.
- *
- * Return 1 if feed is PCR feed, 0 otherwise.
- */
-static inline int mpq_dmx_is_pcr_feed(struct dvb_demux_feed *feed)
-{
- if (feed->type != DMX_TYPE_TS)
- return 0;
-
- if (feed->ts_type & (~TS_DECODER))
- return 0;
-
- if ((feed->pes_type == DMX_TS_PES_PCR0) ||
- (feed->pes_type == DMX_TS_PES_PCR1) ||
- (feed->pes_type == DMX_TS_PES_PCR2) ||
- (feed->pes_type == DMX_TS_PES_PCR3))
- return 1;
-
- return 0;
-}
-
-/**
- * mpq_dmx_is_sec_feed - Returns whether this is a section feed
- *
- * @feed: The feed to be checked.
- *
- * Return 1 if feed is a section feed, 0 otherwise.
- */
-static inline int mpq_dmx_is_sec_feed(struct dvb_demux_feed *feed)
-{
- return (feed->type == DMX_TYPE_SEC);
-}
-
-/**
- * mpq_dmx_is_rec_feed - Returns whether this is a recording feed
- *
- * @feed: The feed to be checked.
- *
- * Return 1 if feed is recording feed, 0 otherwise.
- */
-static inline int mpq_dmx_is_rec_feed(struct dvb_demux_feed *feed)
-{
- if (feed->type != DMX_TYPE_TS)
- return 0;
-
- if (feed->ts_type & (TS_DECODER | TS_PAYLOAD_ONLY))
- return 0;
-
- return 1;
-}
-
-/**
* mpq_dmx_init_hw_statistics -
* Extend dvb-demux debugfs with HW statistics.
*
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tsif.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tsif.c
index b29759c..3d48441 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tsif.c
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tsif.c
@@ -518,10 +518,10 @@
"%s: warnning - len larger than one packet\n",
__func__);
- if (mpq_dmx_is_video_feed(feed))
+ if (dvb_dmx_is_video_feed(feed))
return mpq_dmx_process_video_packet(feed, buf);
- if (mpq_dmx_is_pcr_feed(feed))
+ if (dvb_dmx_is_pcr_feed(feed))
return mpq_dmx_process_pcr_packet(feed, buf);
return 0;
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v1.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v1.c
index 632e864..beb4cce 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v1.c
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v1.c
@@ -306,6 +306,30 @@
}
/**
+ * mpq_dmx_tspp_swfilter_desc - helper function
+ *
+ * Takes a tspp buffer descriptor and send it to the SW filter for demuxing,
+ * one TS packet at a time.
+ *
+ * @mpq_demux - mpq demux object
+ * @tspp_data_desc - tspp buffer descriptor
+ */
+static inline void mpq_dmx_tspp_swfilter_desc(struct mpq_demux *mpq_demux,
+ const struct tspp_data_descriptor *tspp_data_desc)
+{
+ u32 notif_size;
+ int i;
+
+ notif_size = tspp_data_desc->size / TSPP_RAW_TTS_SIZE;
+ for (i = 0; i < notif_size; i++)
+ dvb_dmx_swfilter_packet(&mpq_demux->demux,
+ ((u8 *)tspp_data_desc->virt_base) +
+ i * TSPP_RAW_TTS_SIZE,
+ ((u8 *)tspp_data_desc->virt_base) +
+ i * TSPP_RAW_TTS_SIZE + TSPP_RAW_SIZE);
+}
+
+/**
* Demux TS packets from TSPP by secure-demux.
* The fucntion assumes the buffer is physically contiguous
* and that TSPP descriptors are continuous in memory.
@@ -320,37 +344,46 @@
struct sdmx_buff_descr input;
size_t aggregate_len = 0;
size_t aggregate_count = 0;
- phys_addr_t buff_start_addr;
- phys_addr_t buff_current_addr = 0;
+ phys_addr_t buff_start_addr_phys;
+ phys_addr_t buff_current_addr_phys = 0;
+ u32 notif_size;
int i;
while ((tspp_data_desc = tspp_get_buffer(0, channel_id)) != NULL) {
if (0 == aggregate_count)
- buff_current_addr = tspp_data_desc->phys_base;
+ buff_current_addr_phys = tspp_data_desc->phys_base;
+ notif_size = tspp_data_desc->size / TSPP_RAW_TTS_SIZE;
mpq_dmx_tspp_info.tsif[tsif].aggregate_ids[aggregate_count] =
tspp_data_desc->id;
aggregate_len += tspp_data_desc->size;
aggregate_count++;
- mpq_demux->hw_notification_size +=
- tspp_data_desc->size / TSPP_RAW_TTS_SIZE;
+ mpq_demux->hw_notification_size += notif_size;
+
+ /* Let SW filter process only if it might be relevant */
+ if (mpq_demux->num_active_feeds > mpq_demux->num_secure_feeds)
+ mpq_dmx_tspp_swfilter_desc(mpq_demux, tspp_data_desc);
+
}
if (!aggregate_count)
return;
- buff_start_addr = mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_phys_base;
- input.base_addr = (void *)buff_start_addr;
+ buff_start_addr_phys =
+ mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_phys_base;
+ input.base_addr = (void *)buff_start_addr_phys;
input.size = mpq_dmx_tspp_info.tsif[tsif].buffer_count *
TSPP_DESCRIPTOR_SIZE;
- MPQ_DVB_DBG_PRINT(
- "%s: Processing %d descriptors: %d bytes at start address 0x%x, read offset %d\n",
- __func__, aggregate_count, aggregate_len,
- (unsigned int)input.base_addr,
- buff_current_addr - buff_start_addr);
+ if (mpq_sdmx_is_loaded() && mpq_demux->sdmx_filter_count) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: SDMX Processing %d descriptors: %d bytes at start address 0x%x, read offset %d\n",
+ __func__, aggregate_count, aggregate_len,
+ (unsigned int)input.base_addr,
+ buff_current_addr_phys - buff_start_addr_phys);
- mpq_sdmx_process(mpq_demux, &input, aggregate_len,
- buff_current_addr - buff_start_addr);
+ mpq_sdmx_process(mpq_demux, &input, aggregate_len,
+ buff_current_addr_phys - buff_start_addr_phys);
+ }
for (i = 0; i < aggregate_count; i++)
tspp_release_buffer(0, channel_id,
@@ -373,7 +406,6 @@
int channel_id;
int ref_count;
int ret;
- int j;
do {
ret = wait_event_interruptible(
@@ -427,13 +459,8 @@
TSPP_RAW_TTS_SIZE;
mpq_demux->hw_notification_size += notif_size;
- for (j = 0; j < notif_size; j++)
- dvb_dmx_swfilter_packet(
- &mpq_demux->demux,
- ((u8 *)tspp_data_desc->virt_base) +
- j * TSPP_RAW_TTS_SIZE,
- ((u8 *)tspp_data_desc->virt_base) +
- j * TSPP_RAW_TTS_SIZE + TSPP_RAW_SIZE);
+ mpq_dmx_tspp_swfilter_desc(mpq_demux,
+ tspp_data_desc);
/*
* Notify TSPP that the buffer
* is no longer needed
@@ -1554,10 +1581,10 @@
"%s: warnning - len larger than one packet\n",
__func__);
- if (mpq_dmx_is_video_feed(feed))
+ if (dvb_dmx_is_video_feed(feed))
return mpq_dmx_process_video_packet(feed, buf);
- if (mpq_dmx_is_pcr_feed(feed))
+ if (dvb_dmx_is_pcr_feed(feed))
return mpq_dmx_process_pcr_packet(feed, buf);
return 0;
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_sdmx.h b/drivers/media/platform/msm/dvb/demux/mpq_sdmx.h
index d292992..5b91436 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_sdmx.h
+++ b/drivers/media/platform/msm/dvb/demux/mpq_sdmx.h
@@ -177,9 +177,7 @@
/* Payload length */
u32 payload_length;
- /* Total metadata length (including this header, plus optional
- * additional metadata.
- */
+ /* Number of meta data bytes immediately following this header */
u32 metadata_length;
};
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index 8593760..f23c0aa 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -199,9 +199,18 @@
case HAL_BUFFER_INTERNAL_SCRATCH:
buffer = HFI_BUFFER_INTERNAL_SCRATCH;
break;
+ case HAL_BUFFER_INTERNAL_SCRATCH_1:
+ buffer = HFI_BUFFER_INTERNAL_SCRATCH_1;
+ break;
+ case HAL_BUFFER_INTERNAL_SCRATCH_2:
+ buffer = HFI_BUFFER_INTERNAL_SCRATCH_2;
+ break;
case HAL_BUFFER_INTERNAL_PERSIST:
buffer = HFI_BUFFER_INTERNAL_PERSIST;
break;
+ case HAL_BUFFER_INTERNAL_PERSIST_1:
+ buffer = HFI_BUFFER_INTERNAL_PERSIST_1;
+ break;
default:
dprintk(VIDC_ERR, "Invalid buffer :0x%x\n",
hal_buffer);
diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c
index 709eafc..be9458d 100644
--- a/drivers/media/platform/msm/vidc/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c
@@ -391,12 +391,30 @@
buffreq->buffer[6].buffer_type =
HAL_BUFFER_INTERNAL_SCRATCH;
break;
- case HFI_BUFFER_INTERNAL_PERSIST:
+ case HFI_BUFFER_INTERNAL_SCRATCH_1:
memcpy(&buffreq->buffer[7], hfi_buf_req,
- sizeof(struct hfi_buffer_requirements));
+ sizeof(struct hfi_buffer_requirements));
buffreq->buffer[7].buffer_type =
+ HAL_BUFFER_INTERNAL_SCRATCH_1;
+ break;
+ case HFI_BUFFER_INTERNAL_SCRATCH_2:
+ memcpy(&buffreq->buffer[8], hfi_buf_req,
+ sizeof(struct hfi_buffer_requirements));
+ buffreq->buffer[8].buffer_type =
+ HAL_BUFFER_INTERNAL_SCRATCH_2;
+ break;
+ case HFI_BUFFER_INTERNAL_PERSIST:
+ memcpy(&buffreq->buffer[9], hfi_buf_req,
+ sizeof(struct hfi_buffer_requirements));
+ buffreq->buffer[9].buffer_type =
HAL_BUFFER_INTERNAL_PERSIST;
break;
+ case HFI_BUFFER_INTERNAL_PERSIST_1:
+ memcpy(&buffreq->buffer[10], hfi_buf_req,
+ sizeof(struct hfi_buffer_requirements));
+ buffreq->buffer[10].buffer_type =
+ HAL_BUFFER_INTERNAL_PERSIST_1;
+ break;
default:
dprintk(VIDC_ERR,
"hal_process_sess_get_prop_buf_req: bad_buffer_type: %d",
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index d43e5ba..8cce310 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -1443,6 +1443,198 @@
return flipped_state;
}
+struct hal_buffer_requirements *get_buff_req_buffer(
+ struct msm_vidc_inst *inst, enum hal_buffer buffer_type)
+{
+ int i;
+ for (i = 0; i < HAL_BUFFER_MAX; i++) {
+ if (inst->buff_req.buffer[i].buffer_type == buffer_type)
+ return &inst->buff_req.buffer[i];
+ }
+ return NULL;
+}
+
+static int set_scratch_buffers(struct msm_vidc_inst *inst,
+ enum hal_buffer buffer_type)
+{
+ int rc = 0;
+ struct msm_smem *handle;
+ struct internal_buf *binfo;
+ struct vidc_buffer_addr_info buffer_info;
+ u32 smem_flags = 0;
+ int domain;
+ struct hal_buffer_requirements *scratch_buf;
+ int i;
+ struct hfi_device *hdev;
+
+ hdev = inst->core->device;
+
+ scratch_buf = get_buff_req_buffer(inst, buffer_type);
+ if (!scratch_buf) {
+ dprintk(VIDC_DBG,
+ "This scratch buffer not required, buffer_type: %x\n",
+ buffer_type);
+ return 0;
+ }
+ dprintk(VIDC_DBG,
+ "scratch: num = %d, size = %d\n",
+ scratch_buf->buffer_count_actual,
+ scratch_buf->buffer_size);
+
+ if (inst->mode == VIDC_SECURE) {
+ domain = call_hfi_op(hdev, get_domain,
+ hdev->hfi_device_data, CP_MAP);
+ smem_flags |= SMEM_SECURE;
+ } else
+ domain = call_hfi_op(hdev, get_domain,
+ hdev->hfi_device_data, NS_MAP);
+
+ if (scratch_buf->buffer_size) {
+ for (i = 0; i < scratch_buf->buffer_count_actual;
+ i++) {
+ handle = msm_smem_alloc(inst->mem_client,
+ scratch_buf->buffer_size, 1, smem_flags,
+ domain, 0, 0);
+ if (!handle) {
+ dprintk(VIDC_ERR,
+ "Failed to allocate scratch memory\n");
+ rc = -ENOMEM;
+ goto err_no_mem;
+ }
+ rc = msm_smem_cache_operations(inst->mem_client,
+ handle, SMEM_CACHE_CLEAN);
+ if (rc) {
+ dprintk(VIDC_WARN,
+ "Failed to clean cache may cause undefined behavior\n");
+ }
+ binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
+ if (!binfo) {
+ dprintk(VIDC_ERR, "Out of memory\n");
+ rc = -ENOMEM;
+ goto fail_kzalloc;
+ }
+ binfo->handle = handle;
+ buffer_info.buffer_size = scratch_buf->buffer_size;
+ buffer_info.buffer_type = buffer_type;
+ binfo->buffer_type = buffer_type;
+ buffer_info.num_buffers = 1;
+ buffer_info.align_device_addr = handle->device_addr;
+ dprintk(VIDC_DBG, "Scratch buffer address: %x",
+ buffer_info.align_device_addr);
+ rc = call_hfi_op(hdev, session_set_buffers,
+ (void *) inst->session, &buffer_info);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "vidc_hal_session_set_buffers failed");
+ goto fail_set_buffers;
+ }
+ mutex_lock(&inst->lock);
+ list_add_tail(&binfo->list, &inst->internalbufs);
+ mutex_unlock(&inst->lock);
+ }
+ }
+ return rc;
+fail_set_buffers:
+ kfree(binfo);
+fail_kzalloc:
+ msm_smem_free(inst->mem_client, handle);
+err_no_mem:
+ return rc;
+}
+
+static int set_persist_buffers(struct msm_vidc_inst *inst,
+ enum hal_buffer buffer_type)
+{
+ int rc = 0;
+ struct msm_smem *handle;
+ struct internal_buf *binfo;
+ struct vidc_buffer_addr_info buffer_info;
+ u32 smem_flags = 0;
+ int domain;
+ struct hal_buffer_requirements *persist_buf;
+ int i;
+ struct hfi_device *hdev;
+
+ hdev = inst->core->device;
+
+ persist_buf = get_buff_req_buffer(inst, buffer_type);
+ if (!persist_buf) {
+ dprintk(VIDC_DBG,
+ "This persist buffer not required, buffer_type: %x\n",
+ buffer_type);
+ return 0;
+ }
+
+ dprintk(VIDC_DBG,
+ "persist: num = %d, size = %d\n",
+ persist_buf->buffer_count_actual,
+ persist_buf->buffer_size);
+ if (!list_empty(&inst->persistbufs)) {
+ dprintk(VIDC_ERR,
+ "Persist buffers already allocated\n");
+ return rc;
+ }
+
+ if (inst->mode == VIDC_SECURE) {
+ domain = call_hfi_op(hdev, get_domain,
+ hdev->hfi_device_data, CP_MAP);
+ smem_flags |= SMEM_SECURE;
+ } else
+ domain = call_hfi_op(hdev, get_domain,
+ hdev->hfi_device_data, NS_MAP);
+
+ if (persist_buf->buffer_size) {
+ for (i = 0; i < persist_buf->buffer_count_actual; i++) {
+ handle = msm_smem_alloc(inst->mem_client,
+ persist_buf->buffer_size, 1, smem_flags,
+ domain, 0, 0);
+ if (!handle) {
+ dprintk(VIDC_ERR,
+ "Failed to allocate persist memory\n");
+ rc = -ENOMEM;
+ goto err_no_mem;
+ }
+ rc = msm_smem_cache_operations(inst->mem_client,
+ handle, SMEM_CACHE_CLEAN);
+ if (rc) {
+ dprintk(VIDC_WARN,
+ "Failed to clean cache may cause undefined behavior\n");
+ }
+ binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
+ if (!binfo) {
+ dprintk(VIDC_ERR, "Out of memory\n");
+ rc = -ENOMEM;
+ goto fail_kzalloc;
+ }
+ binfo->handle = handle;
+ buffer_info.buffer_size = persist_buf->buffer_size;
+ buffer_info.buffer_type = buffer_type;
+ binfo->buffer_type = buffer_type;
+ buffer_info.num_buffers = 1;
+ buffer_info.align_device_addr = handle->device_addr;
+ dprintk(VIDC_DBG, "Persist buffer address: %x",
+ buffer_info.align_device_addr);
+ rc = call_hfi_op(hdev, session_set_buffers,
+ (void *) inst->session, &buffer_info);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "vidc_hal_session_set_buffers failed");
+ goto fail_set_buffers;
+ }
+ mutex_lock(&inst->lock);
+ list_add_tail(&binfo->list, &inst->persistbufs);
+ mutex_unlock(&inst->lock);
+ }
+ }
+ return rc;
+fail_set_buffers:
+ kfree(binfo);
+fail_kzalloc:
+ msm_smem_free(inst->mem_client, handle);
+err_no_mem:
+ return rc;
+}
+
int msm_comm_try_state(struct msm_vidc_inst *inst, int state)
{
int rc = 0;
@@ -1723,6 +1915,7 @@
mutex_unlock(&inst->sync_lock);
return rc;
}
+
int msm_comm_release_scratch_buffers(struct msm_vidc_inst *inst)
{
struct msm_smem *handle;
@@ -1755,7 +1948,7 @@
list);
handle = buf->handle;
buffer_info.buffer_size = handle->size;
- buffer_info.buffer_type = HAL_BUFFER_INTERNAL_SCRATCH;
+ buffer_info.buffer_type = buf->buffer_type;
buffer_info.num_buffers = 1;
buffer_info.align_device_addr = handle->device_addr;
if (inst->state != MSM_VIDC_CORE_INVALID &&
@@ -1819,7 +2012,7 @@
list);
handle = buf->handle;
buffer_info.buffer_size = handle->size;
- buffer_info.buffer_type = HAL_BUFFER_INTERNAL_PERSIST;
+ buffer_info.buffer_type = buf->buffer_type;
buffer_info.num_buffers = 1;
buffer_info.align_device_addr = handle->device_addr;
if (inst->state != MSM_VIDC_CORE_INVALID &&
@@ -1885,178 +2078,50 @@
int msm_comm_set_scratch_buffers(struct msm_vidc_inst *inst)
{
int rc = 0;
- struct msm_smem *handle;
- struct internal_buf *binfo;
- struct vidc_buffer_addr_info buffer_info;
- int domain;
- unsigned long smem_flags = 0;
- struct hal_buffer_requirements *scratch_buf;
- int i;
- struct hfi_device *hdev;
-
if (!inst || !inst->core || !inst->core->device) {
dprintk(VIDC_ERR, "%s invalid parameters", __func__);
return -EINVAL;
}
- hdev = inst->core->device;
-
- scratch_buf =
- &inst->buff_req.buffer[HAL_BUFFER_INTERNAL_SCRATCH];
- dprintk(VIDC_DBG,
- "scratch: num = %d, size = %d\n",
- scratch_buf->buffer_count_actual,
- scratch_buf->buffer_size);
if (msm_comm_release_scratch_buffers(inst))
dprintk(VIDC_WARN, "Failed to release scratch buffers\n");
- if (inst->mode == VIDC_SECURE) {
- domain = call_hfi_op(hdev, get_domain,
- hdev->hfi_device_data, CP_MAP);
- smem_flags |= SMEM_SECURE;
- } else
- domain = call_hfi_op(hdev, get_domain,
- hdev->hfi_device_data, NS_MAP);
- if (scratch_buf->buffer_size) {
- for (i = 0; i < scratch_buf->buffer_count_actual;
- i++) {
- handle = msm_smem_alloc(inst->mem_client,
- scratch_buf->buffer_size, 1, smem_flags,
- domain, 0, 0);
- if (!handle) {
- dprintk(VIDC_ERR,
- "Failed to allocate scratch memory\n");
- rc = -ENOMEM;
- goto err_no_mem;
- }
- rc = msm_smem_cache_operations(inst->mem_client,
- handle, SMEM_CACHE_CLEAN);
- if (rc) {
- dprintk(VIDC_WARN,
- "Failed to clean cache may cause undefined behavior\n");
- }
- binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
- if (!binfo) {
- dprintk(VIDC_ERR, "Out of memory\n");
- rc = -ENOMEM;
- goto fail_kzalloc;
- }
- binfo->handle = handle;
- buffer_info.buffer_size = scratch_buf->buffer_size;
- buffer_info.buffer_type = HAL_BUFFER_INTERNAL_SCRATCH;
- buffer_info.num_buffers = 1;
- buffer_info.align_device_addr = handle->device_addr;
- dprintk(VIDC_DBG, "Scratch buffer address: %x",
- buffer_info.align_device_addr);
- rc = call_hfi_op(hdev, session_set_buffers,
- (void *) inst->session, &buffer_info);
- if (rc) {
- dprintk(VIDC_ERR,
- "vidc_hal_session_set_buffers failed");
- goto fail_set_buffers;
- }
- mutex_lock(&inst->lock);
- list_add_tail(&binfo->list, &inst->internalbufs);
- mutex_unlock(&inst->lock);
- }
- }
+ rc = set_scratch_buffers(inst, HAL_BUFFER_INTERNAL_SCRATCH);
+ if (rc)
+ goto error;
+
+ rc = set_scratch_buffers(inst, HAL_BUFFER_INTERNAL_SCRATCH_1);
+ if (rc)
+ goto error;
+
+ rc = set_scratch_buffers(inst, HAL_BUFFER_INTERNAL_SCRATCH_2);
+ if (rc)
+ goto error;
+
return rc;
-fail_set_buffers:
- kfree(binfo);
-fail_kzalloc:
- msm_smem_free(inst->mem_client, handle);
-err_no_mem:
+error:
+ msm_comm_release_scratch_buffers(inst);
return rc;
}
int msm_comm_set_persist_buffers(struct msm_vidc_inst *inst)
{
int rc = 0;
- struct msm_smem *handle;
- struct internal_buf *binfo;
- struct vidc_buffer_addr_info buffer_info;
- unsigned long flags;
- unsigned long smem_flags = 0;
- int domain;
- struct hal_buffer_requirements *persist_buf;
- int i;
- struct hfi_device *hdev;
-
if (!inst || !inst->core || !inst->core->device) {
dprintk(VIDC_ERR, "%s invalid parameters", __func__);
return -EINVAL;
}
- hdev = inst->core->device;
+ rc = set_persist_buffers(inst, HAL_BUFFER_INTERNAL_PERSIST);
+ if (rc)
+ goto error;
- persist_buf =
- &inst->buff_req.buffer[HAL_BUFFER_INTERNAL_PERSIST];
- dprintk(VIDC_DBG,
- "persist: num = %d, size = %d\n",
- persist_buf->buffer_count_actual,
- persist_buf->buffer_size);
- if (!list_empty(&inst->persistbufs)) {
- dprintk(VIDC_ERR,
- "Persist buffers already allocated\n");
- return rc;
- }
-
- if (inst->mode == VIDC_SECURE) {
- domain = call_hfi_op(hdev, get_domain,
- hdev->hfi_device_data, CP_MAP);
- flags |= SMEM_SECURE;
- } else
- domain = call_hfi_op(hdev, get_domain,
- hdev->hfi_device_data, NS_MAP);
-
- if (persist_buf->buffer_size) {
- for (i = 0; i < persist_buf->buffer_count_actual; i++) {
- handle = msm_smem_alloc(inst->mem_client,
- persist_buf->buffer_size, 1, smem_flags,
- domain, 0, 0);
- if (!handle) {
- dprintk(VIDC_ERR,
- "Failed to allocate persist memory\n");
- rc = -ENOMEM;
- goto err_no_mem;
- }
- rc = msm_smem_cache_operations(inst->mem_client,
- handle, SMEM_CACHE_CLEAN);
- if (rc) {
- dprintk(VIDC_WARN,
- "Failed to clean cache may cause undefined behavior\n");
- }
- binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
- if (!binfo) {
- dprintk(VIDC_ERR, "Out of memory\n");
- rc = -ENOMEM;
- goto fail_kzalloc;
- }
- binfo->handle = handle;
- buffer_info.buffer_size = persist_buf->buffer_size;
- buffer_info.buffer_type = HAL_BUFFER_INTERNAL_PERSIST;
- buffer_info.num_buffers = 1;
- buffer_info.align_device_addr = handle->device_addr;
- dprintk(VIDC_DBG, "Persist buffer address: %x",
- buffer_info.align_device_addr);
- rc = call_hfi_op(hdev, session_set_buffers,
- (void *) inst->session, &buffer_info);
- if (rc) {
- dprintk(VIDC_ERR,
- "vidc_hal_session_set_buffers failed");
- goto fail_set_buffers;
- }
- mutex_lock(&inst->lock);
- list_add_tail(&binfo->list, &inst->persistbufs);
- mutex_unlock(&inst->lock);
- }
- }
+ rc = set_persist_buffers(inst, HAL_BUFFER_INTERNAL_PERSIST_1);
+ if (rc)
+ goto error;
return rc;
-fail_set_buffers:
- kfree(binfo);
-fail_kzalloc:
- msm_smem_free(inst->mem_client, handle);
-err_no_mem:
+error:
+ msm_comm_release_persist_buffers(inst);
return rc;
}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index c03a4c4..8238d42 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -92,6 +92,7 @@
struct internal_buf {
struct list_head list;
+ enum hal_buffer buffer_type;
struct msm_smem *handle;
};
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi.h b/drivers/media/platform/msm/vidc/vidc_hfi.h
index 75594b3..8b3e7cb 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi.h
@@ -53,6 +53,8 @@
#define HFI_BUFFER_EXTRADATA_INPUT (HFI_OX_BASE + 0x2)
#define HFI_BUFFER_EXTRADATA_OUTPUT (HFI_OX_BASE + 0x3)
#define HFI_BUFFER_EXTRADATA_OUTPUT2 (HFI_OX_BASE + 0x4)
+#define HFI_BUFFER_INTERNAL_SCRATCH_1 (HFI_OX_BASE + 0x5)
+#define HFI_BUFFER_INTERNAL_SCRATCH_2 (HFI_OX_BASE + 0x6)
#define HFI_BUFFER_MODE_STATIC (HFI_OX_BASE + 0x1)
#define HFI_BUFFER_MODE_RING (HFI_OX_BASE + 0x2)
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index d06ea51..a057303 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -383,7 +383,10 @@
HAL_BUFFER_EXTRADATA_OUTPUT,
HAL_BUFFER_EXTRADATA_OUTPUT2,
HAL_BUFFER_INTERNAL_SCRATCH,
+ HAL_BUFFER_INTERNAL_SCRATCH_1,
+ HAL_BUFFER_INTERNAL_SCRATCH_2,
HAL_BUFFER_INTERNAL_PERSIST,
+ HAL_BUFFER_INTERNAL_PERSIST_1,
HAL_BUFFER_MAX
};
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index 37c051e..01c5e0b 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -179,6 +179,7 @@
#define HFI_BUFFER_OUTPUT (HFI_COMMON_BASE + 0x2)
#define HFI_BUFFER_OUTPUT2 (HFI_COMMON_BASE + 0x3)
#define HFI_BUFFER_INTERNAL_PERSIST (HFI_COMMON_BASE + 0x4)
+#define HFI_BUFFER_INTERNAL_PERSIST_1 (HFI_COMMON_BASE + 0x5)
struct hfi_buffer_info {
u32 buffer_addr;
diff --git a/drivers/misc/qseecom_kernel.h b/drivers/misc/qseecom_kernel.h
index 0c93ef2..c6c8fc9 100644
--- a/drivers/misc/qseecom_kernel.h
+++ b/drivers/misc/qseecom_kernel.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,12 @@
#define __QSEECOM_KERNEL_H_
#include <linux/types.h>
+
+#define QSEECOM_ALIGN_SIZE 0x40
+#define QSEECOM_ALIGN_MASK (QSEECOM_ALIGN_SIZE - 1)
+#define QSEECOM_ALIGN(x) \
+ ((x + QSEECOM_ALIGN_SIZE) & (~QSEECOM_ALIGN_MASK))
+
/*
* struct qseecom_handle -
* Handle to the qseecom device for kernel clients
diff --git a/drivers/net/ethernet/msm/Kconfig b/drivers/net/ethernet/msm/Kconfig
index e15f4a9..4e95614 100644
--- a/drivers/net/ethernet/msm/Kconfig
+++ b/drivers/net/ethernet/msm/Kconfig
@@ -42,6 +42,16 @@
help
Debug stats on wakeup counts.
+config MSM_RMNET_WWAN
+ tristate "MSM RMNET WWAN Network Device"
+ depends on IPA
+ default n
+ help
+ WWAN Network Driver
+ Provides an API to embedded
+ applications to send and receive
+ the data to/from A2
+
config QFEC
tristate "QFEC ethernet driver"
select MII
diff --git a/drivers/net/ethernet/msm/Makefile b/drivers/net/ethernet/msm/Makefile
index e152ec7..0afa00f 100644
--- a/drivers/net/ethernet/msm/Makefile
+++ b/drivers/net/ethernet/msm/Makefile
@@ -3,6 +3,7 @@
#
obj-$(CONFIG_MSM_RMNET) += msm_rmnet.o
+obj-$(CONFIG_MSM_RMNET_WWAN) += msm_rmnet_wwan.o
obj-$(CONFIG_MSM_RMNET_SDIO) += msm_rmnet_sdio.o
obj-$(CONFIG_MSM_RMNET_BAM) += msm_rmnet_bam.o
obj-$(CONFIG_MSM_RMNET_SMUX) += msm_rmnet_smux.o
diff --git a/drivers/net/ethernet/msm/msm_rmnet_wwan.c b/drivers/net/ethernet/msm/msm_rmnet_wwan.c
new file mode 100644
index 0000000..fe1ac46
--- /dev/null
+++ b/drivers/net/ethernet/msm/msm_rmnet_wwan.c
@@ -0,0 +1,736 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * WWAN Network Interface.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/wakelock.h>
+#include <linux/msm_rmnet.h>
+#include <linux/if_arp.h>
+#include <linux/platform_device.h>
+#include <net/pkt_sched.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <mach/ipa.h>
+
+#define WWAN_DEV_NAME "rmnet%d"
+#define WWAN_METADATA_MASK 0x00FF0000
+#define IPA_RM_INACTIVITY_TIMER 1000
+#define WWAN_DEVICE_COUNT (8)
+#define WWAN_DATA_LEN 2000
+#define HEADROOM_FOR_A2_MUX 8 /* for mux header */
+#define TAILROOM 8 /* for padding by mux layer */
+
+enum wwan_device_status {
+ WWAN_DEVICE_INACTIVE = 0,
+ WWAN_DEVICE_ACTIVE = 1
+};
+static enum ipa_rm_resource_name
+ ipa_rm_resource_by_ch_id[WWAN_DEVICE_COUNT] = {
+ IPA_RM_RESOURCE_WWAN_0_PROD,
+ IPA_RM_RESOURCE_WWAN_1_PROD,
+ IPA_RM_RESOURCE_WWAN_2_PROD,
+ IPA_RM_RESOURCE_WWAN_3_PROD,
+ IPA_RM_RESOURCE_WWAN_4_PROD,
+ IPA_RM_RESOURCE_WWAN_5_PROD,
+ IPA_RM_RESOURCE_WWAN_6_PROD,
+ IPA_RM_RESOURCE_WWAN_7_PROD
+};
+static enum a2_mux_logical_channel_id
+ a2_mux_lcid_by_ch_id[WWAN_DEVICE_COUNT] = {
+ A2_MUX_WWAN_0,
+ A2_MUX_WWAN_1,
+ A2_MUX_WWAN_2,
+ A2_MUX_WWAN_3,
+ A2_MUX_WWAN_4,
+ A2_MUX_WWAN_5,
+ A2_MUX_WWAN_6,
+ A2_MUX_WWAN_7
+};
+
+/**
+ * struct wwan_private - WWAN private data
+ * @stats: iface statistics
+ * @ch_id: channel id
+ * @lock: spinlock for mutual exclusion
+ * @device_status: holds device status
+ *
+ * WWAN private - holds all relevant info about WWAN driver
+ */
+struct wwan_private {
+ struct net_device_stats stats;
+ uint32_t ch_id;
+ spinlock_t lock;
+ struct completion resource_granted_completion;
+ enum wwan_device_status device_status;
+};
+
+static struct net_device *netdevs[WWAN_DEVICE_COUNT];
+
+static __be16 wwan_ip_type_trans(struct sk_buff *skb)
+{
+ __be16 protocol = 0;
+ /* Determine L3 protocol */
+ switch (skb->data[0] & 0xf0) {
+ case 0x40:
+ protocol = htons(ETH_P_IP);
+ break;
+ case 0x60:
+ protocol = htons(ETH_P_IPV6);
+ break;
+ default:
+ pr_err("[%s] %s() L3 protocol decode error: 0x%02x",
+ skb->dev->name, __func__, skb->data[0] & 0xf0);
+ /* skb will be dropped in upper layer for unknown protocol */
+ break;
+ }
+ return protocol;
+}
+
+/**
+ * a2_mux_recv_notify() - Deliver an RX packet to network stack
+ *
+ * @skb: skb to be delivered
+ * @dev: network device
+ *
+ * Return codes:
+ * None
+ */
+static void a2_mux_recv_notify(void *dev, struct sk_buff *skb)
+{
+ struct wwan_private *wwan_ptr = netdev_priv(dev);
+
+ skb->dev = dev;
+ skb->protocol = wwan_ip_type_trans(skb);
+ wwan_ptr->stats.rx_packets++;
+ wwan_ptr->stats.rx_bytes += skb->len;
+ pr_debug("[%s] Rx packet #%lu len=%d\n",
+ skb->dev->name,
+ wwan_ptr->stats.rx_packets, skb->len);
+ netif_rx(skb);
+}
+
+/**
+ * wwan_send_packet() - Deliver a TX packet to A2 MUX driver.
+ *
+ * @skb: skb to be delivered
+ * @dev: network device
+ *
+ * Return codes:
+ * 0: success
+ * -EAGAIN: A2 MUX is not ready to send the skb. try later
+ * -EFAULT: A2 MUX rejected the skb
+ * -EPREM: Unknown error
+ */
+static int wwan_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct wwan_private *wwan_ptr = netdev_priv(dev);
+ int ret;
+
+ dev->trans_start = jiffies;
+ ret = a2_mux_write(a2_mux_lcid_by_ch_id[wwan_ptr->ch_id], skb);
+ if (ret != 0 && ret != -EAGAIN && ret != -EFAULT) {
+ pr_err("[%s] %s: write returned error %d",
+ dev->name, __func__, ret);
+ return -EPERM;
+ }
+ return ret;
+}
+
+/**
+ * a2_mux_write_done() - Update device statistics and start
+ * network stack queue is was stop and A2 MUX queue is below low
+ * watermark.
+ *
+ * @dev: network device
+ * @skb: skb to be delivered
+ *
+ * Return codes:
+ * None
+ */
+static void a2_mux_write_done(void *dev, struct sk_buff *skb)
+{
+ struct wwan_private *wwan_ptr = netdev_priv(dev);
+ unsigned long flags;
+
+ pr_debug("%s: write complete\n", __func__);
+ wwan_ptr->stats.tx_packets++;
+ wwan_ptr->stats.tx_bytes += skb->len;
+ pr_debug("[%s] Tx packet #%lu len=%d mark=0x%x\n",
+ ((struct net_device *)(dev))->name, wwan_ptr->stats.tx_packets,
+ skb->len, skb->mark);
+ dev_kfree_skb_any(skb);
+ spin_lock_irqsave(&wwan_ptr->lock, flags);
+ if (netif_queue_stopped(dev) &&
+ a2_mux_is_ch_low(a2_mux_lcid_by_ch_id[wwan_ptr->ch_id])) {
+ pr_debug("%s: Low WM hit, waking queue=%p\n",
+ __func__, skb);
+ netif_wake_queue(dev);
+ }
+ spin_unlock_irqrestore(&wwan_ptr->lock, flags);
+}
+
+/**
+ * a2_mux_notify() - Callback function for A2 MUX events Handles
+ * A2_MUX_RECEIVE and A2_MUX_WRITE_DONE events.
+ *
+ * @dev: network device
+ * @event: A2 MUX event
+ * @data: Additional data provided by A2 MUX
+ *
+ * Return codes:
+ * None
+ */
+static void a2_mux_notify(void *dev, enum a2_mux_event_type event,
+ unsigned long data)
+{
+ struct sk_buff *skb = (struct sk_buff *)data;
+
+ switch (event) {
+ case A2_MUX_RECEIVE:
+ if (!skb) {
+ pr_err("[%s] %s: No skb received",
+ ((struct net_device *)dev)->name, __func__);
+ return;
+ }
+ a2_mux_recv_notify(dev, skb);
+ break;
+ case A2_MUX_WRITE_DONE:
+ a2_mux_write_done(dev, skb);
+ break;
+ default:
+ pr_err("%s: unknown event %d\n", __func__, event);
+ break;
+ }
+}
+
+/**
+ * ipa_rm_resource_granted() - Called upon
+ * IPA_RM_RESOURCE_GRANTED event. Wakes up queue is was stopped.
+ *
+ * @work: work object supplied ny workqueue
+ *
+ * Return codes:
+ * None
+ */
+static void ipa_rm_resource_granted(void *dev)
+{
+ netif_wake_queue(dev);
+}
+/**
+ * ipa_rm_notify() - Callback function for RM events. Handles
+ * IPA_RM_RESOURCE_GRANTED and IPA_RM_RESOURCE_RELEASED events.
+ * IPA_RM_RESOURCE_GRANTED is handled in the context of shared
+ * workqueue.
+ *
+ * @dev: network device
+ * @event: IPA RM event
+ * @data: Additional data provided by IPA RM
+ *
+ * Return codes:
+ * None
+ */
+static void ipa_rm_notify(void *dev, enum ipa_rm_event event,
+ unsigned long data)
+{
+ struct wwan_private *wwan_ptr = netdev_priv(dev);
+
+ pr_debug("%s: event %d\n", __func__, event);
+ switch (event) {
+ case IPA_RM_RESOURCE_GRANTED:
+ if (wwan_ptr->device_status == WWAN_DEVICE_INACTIVE) {
+ complete_all(&wwan_ptr->resource_granted_completion);
+ break;
+ }
+ ipa_rm_resource_granted(dev);
+ break;
+ case IPA_RM_RESOURCE_RELEASED:
+ break;
+ default:
+ pr_err("%s: unknown event %d\n", __func__, event);
+ break;
+ }
+}
+
+static int wwan_register_to_ipa(struct net_device *dev)
+{
+ struct wwan_private *wwan_ptr = netdev_priv(dev);
+ struct ipa_tx_intf tx_properties = {0};
+ struct ipa_ioc_tx_intf_prop tx_ioc_properties[2] = { {0}, {0} };
+ struct ipa_ioc_tx_intf_prop *tx_ipv4_property;
+ struct ipa_ioc_tx_intf_prop *tx_ipv6_property;
+ struct ipa_rx_intf rx_properties = {0};
+ struct ipa_ioc_rx_intf_prop rx_ioc_properties[2] = { {0}, {0} };
+ struct ipa_ioc_rx_intf_prop *rx_ipv4_property;
+ struct ipa_ioc_rx_intf_prop *rx_ipv6_property;
+ int ret = 0;
+
+ pr_debug("[%s] %s:\n", dev->name, __func__);
+ tx_properties.prop = tx_ioc_properties;
+ tx_ipv4_property = &tx_properties.prop[0];
+ tx_ipv4_property->ip = IPA_IP_v4;
+ tx_ipv4_property->dst_pipe = IPA_CLIENT_A2_EMBEDDED_CONS;
+ snprintf(tx_ipv4_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
+ A2_MUX_HDR_NAME_V4_PREF,
+ a2_mux_lcid_by_ch_id[wwan_ptr->ch_id]);
+ tx_ipv6_property = &tx_properties.prop[1];
+ tx_ipv6_property->ip = IPA_IP_v6;
+ tx_ipv6_property->dst_pipe = IPA_CLIENT_A2_EMBEDDED_CONS;
+ snprintf(tx_ipv6_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
+ A2_MUX_HDR_NAME_V6_PREF,
+ a2_mux_lcid_by_ch_id[wwan_ptr->ch_id]);
+ tx_properties.num_props = 2;
+ rx_properties.prop = rx_ioc_properties;
+ rx_ipv4_property = &rx_properties.prop[0];
+ rx_ipv4_property->ip = IPA_IP_v4;
+ rx_ipv4_property->attrib.attrib_mask |= IPA_FLT_META_DATA;
+ rx_ipv4_property->attrib.meta_data = wwan_ptr->ch_id;
+ rx_ipv4_property->attrib.meta_data_mask = WWAN_METADATA_MASK;
+ rx_ipv4_property->src_pipe = IPA_CLIENT_A2_EMBEDDED_PROD;
+ rx_ipv6_property = &rx_properties.prop[1];
+ rx_ipv6_property->ip = IPA_IP_v6;
+ rx_ipv6_property->attrib.attrib_mask |= IPA_FLT_META_DATA;
+ rx_ipv6_property->attrib.meta_data = wwan_ptr->ch_id;
+ rx_ipv6_property->attrib.meta_data_mask = WWAN_METADATA_MASK;
+ rx_ipv6_property->src_pipe = IPA_CLIENT_A2_EMBEDDED_PROD;
+ rx_properties.num_props = 2;
+ ret = ipa_register_intf(dev->name, &tx_properties, &rx_properties);
+ if (ret) {
+ pr_err("[%s] %s: ipa_register_intf failed %d\n", dev->name,
+ __func__, ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int __wwan_open(struct net_device *dev)
+{
+ int r;
+ struct wwan_private *wwan_ptr = netdev_priv(dev);
+
+ pr_debug("[%s] __wwan_open()\n", dev->name);
+ if (wwan_ptr->device_status != WWAN_DEVICE_ACTIVE) {
+ INIT_COMPLETION(wwan_ptr->resource_granted_completion);
+ r = ipa_rm_inactivity_timer_request_resource(
+ ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
+ if (r < 0 && r != -EINPROGRESS) {
+ pr_err("%s: ipa rm timer request resource failed %d\n",
+ __func__, r);
+ return -ENODEV;
+ }
+ if (r == -EINPROGRESS) {
+ wait_for_completion(
+ &wwan_ptr->resource_granted_completion);
+ }
+ r = a2_mux_open_channel(a2_mux_lcid_by_ch_id[wwan_ptr->ch_id],
+ dev, a2_mux_notify);
+ if (r < 0) {
+ pr_err("%s: ch=%d failed with rc %d\n",
+ __func__, wwan_ptr->ch_id, r);
+ ipa_rm_inactivity_timer_release_resource(
+ ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
+ return -ENODEV;
+ }
+ ipa_rm_inactivity_timer_release_resource(
+ ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
+ r = wwan_register_to_ipa(dev);
+ if (r < 0) {
+ pr_err("%s: ch=%d failed to register to IPA rc %d\n",
+ __func__, wwan_ptr->ch_id, r);
+ return -ENODEV;
+ }
+ }
+ wwan_ptr->device_status = WWAN_DEVICE_ACTIVE;
+ return 0;
+}
+
+/**
+ * wwan_open() - Opens the wwan network interface. Opens logical
+ * channel on A2 MUX driver and starts the network stack queue
+ *
+ * @dev: network device
+ *
+ * Return codes:
+ * 0: success
+ * -ENODEV: Error while opening logical channel on A2 MUX driver
+ */
+static int wwan_open(struct net_device *dev)
+{
+ int rc = 0;
+
+ pr_debug("[%s] wwan_open()\n", dev->name);
+ rc = __wwan_open(dev);
+ if (rc == 0)
+ netif_start_queue(dev);
+ return rc;
+}
+
+
+static int __wwan_close(struct net_device *dev)
+{
+ struct wwan_private *wwan_ptr = netdev_priv(dev);
+ int rc = 0;
+
+ if (wwan_ptr->device_status == WWAN_DEVICE_ACTIVE) {
+ wwan_ptr->device_status = WWAN_DEVICE_INACTIVE;
+ /* do not close wwan port once up, this causes
+ remote side to hang if tried to open again */
+ INIT_COMPLETION(wwan_ptr->resource_granted_completion);
+ rc = ipa_rm_inactivity_timer_request_resource(
+ ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
+ if (rc < 0 && rc != -EINPROGRESS) {
+ pr_err("%s: ipa rm timer request resource failed %d\n",
+ __func__, rc);
+ return -ENODEV;
+ }
+ if (rc == -EINPROGRESS) {
+ wait_for_completion(
+ &wwan_ptr->resource_granted_completion);
+ }
+ rc = a2_mux_close_channel(
+ a2_mux_lcid_by_ch_id[wwan_ptr->ch_id]);
+ if (rc) {
+ pr_err("[%s] %s: a2_mux_close_channel failed %d\n",
+ dev->name, __func__, rc);
+ ipa_rm_inactivity_timer_release_resource(
+ ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
+ return rc;
+ }
+ ipa_rm_inactivity_timer_release_resource(
+ ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
+ rc = ipa_deregister_intf(dev->name);
+ if (rc) {
+ pr_err("[%s] %s: ipa_deregister_intf failed %d\n",
+ dev->name, __func__, rc);
+ return rc;
+ }
+ return rc;
+ } else
+ return -EBADF;
+}
+
+/**
+ * wwan_stop() - Stops the wwan network interface. Closes
+ * logical channel on A2 MUX driver and stops the network stack
+ * queue
+ *
+ * @dev: network device
+ *
+ * Return codes:
+ * 0: success
+ * -ENODEV: Error while opening logical channel on A2 MUX driver
+ */
+static int wwan_stop(struct net_device *dev)
+{
+ pr_debug("[%s] wwan_stop()\n", dev->name);
+ __wwan_close(dev);
+ netif_stop_queue(dev);
+ return 0;
+}
+
+static int wwan_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if (0 > new_mtu || WWAN_DATA_LEN < new_mtu)
+ return -EINVAL;
+ pr_debug("[%s] MTU change: old=%d new=%d\n",
+ dev->name, dev->mtu, new_mtu);
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+/**
+ * wwan_xmit() - Transmits an skb. In charge of asking IPA
+ * RM needed resources. In case that IPA RM is not ready, then
+ * the skb is saved for tranmitting as soon as IPA RM resources
+ * are granted.
+ *
+ * @skb: skb to be transmitted
+ * @dev: network device
+ *
+ * Return codes:
+ * 0: success
+ * NETDEV_TX_BUSY: Error while transmitting the skb. Try again
+ * later
+ * -EFAULT: Error while transmitting the skb
+ */
+static int wwan_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct wwan_private *wwan_ptr = netdev_priv(dev);
+ unsigned long flags;
+ int ret = 0;
+
+ if (netif_queue_stopped(dev)) {
+ pr_err("[%s]fatal: wwan_xmit called when netif_queue stopped\n",
+ dev->name);
+ return 0;
+ }
+ ret = ipa_rm_inactivity_timer_request_resource(
+ ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
+ if (ret == -EINPROGRESS) {
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+ if (ret) {
+ pr_err("[%s] fatal: ipa rm timer request resource failed %d\n",
+ dev->name, ret);
+ return -EFAULT;
+ }
+ ret = wwan_send_packet(skb, dev);
+ if (ret == -EPERM) {
+ ret = NETDEV_TX_BUSY;
+ goto exit;
+ }
+ /*
+ * detected SSR a bit early. shut some things down now, and leave
+ * the rest to the main ssr handling code when that happens later
+ */
+ if (ret == -EFAULT) {
+ netif_carrier_off(dev);
+ dev_kfree_skb_any(skb);
+ ret = 0;
+ goto exit;
+ }
+ if (ret == -EAGAIN) {
+ /*
+ * This should not happen
+ * EAGAIN means we attempted to overflow the high watermark
+ * Clearly the queue is not stopped like it should be, so
+ * stop it and return BUSY to the TCP/IP framework. It will
+ * retry this packet with the queue is restarted which happens
+ * in the write_done callback when the low watermark is hit.
+ */
+ netif_stop_queue(dev);
+ ret = NETDEV_TX_BUSY;
+ goto exit;
+ }
+ spin_lock_irqsave(&wwan_ptr->lock, flags);
+ if (a2_mux_is_ch_full(a2_mux_lcid_by_ch_id[wwan_ptr->ch_id])) {
+ netif_stop_queue(dev);
+ pr_debug("%s: High WM hit, stopping queue=%p\n",
+ __func__, skb);
+ }
+ spin_unlock_irqrestore(&wwan_ptr->lock, flags);
+exit:
+ ipa_rm_inactivity_timer_release_resource(
+ ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
+ return ret;
+}
+
+static struct net_device_stats *wwan_get_stats(struct net_device *dev)
+{
+ struct wwan_private *wwan_ptr = netdev_priv(dev);
+ return &wwan_ptr->stats;
+}
+
+static void wwan_tx_timeout(struct net_device *dev)
+{
+ pr_warning("[%s] wwan_tx_timeout()\n", dev->name);
+}
+
+/**
+ * wwan_ioctl() - I/O control for wwan network driver.
+ *
+ * @dev: network device
+ * @ifr: ignored
+ * @cmd: cmd to be excecuded. can be one of the following:
+ * WWAN_IOCTL_OPEN - Open the network interface
+ * WWAN_IOCTL_CLOSE - Close the network interface
+ *
+ * Return codes:
+ * 0: success
+ * NETDEV_TX_BUSY: Error while transmitting the skb. Try again
+ * later
+ * -EFAULT: Error while transmitting the skb
+ */
+static int wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ int rc = 0;
+
+ switch (cmd) {
+ case RMNET_IOCTL_SET_LLP_IP: /* Set RAWIP protocol */
+ break;
+ case RMNET_IOCTL_GET_LLP: /* Get link protocol state */
+ ifr->ifr_ifru.ifru_data = (void *) RMNET_MODE_LLP_IP;
+ break;
+ case RMNET_IOCTL_SET_QOS_DISABLE: /* Set QoS header disabled */
+ break;
+ case RMNET_IOCTL_FLOW_ENABLE:
+ tc_qdisc_flow_control(dev, (u32)ifr->ifr_data, 1);
+ pr_debug("[%s] %s: enabled flow", dev->name, __func__);
+ break;
+ case RMNET_IOCTL_FLOW_DISABLE:
+ tc_qdisc_flow_control(dev, (u32)ifr->ifr_data, 0);
+ pr_debug("[%s] %s: disabled flow", dev->name, __func__);
+ break;
+ case RMNET_IOCTL_GET_QOS: /* Get QoS header state */
+ /* QoS disabled */
+ ifr->ifr_ifru.ifru_data = (void *) 0;
+ break;
+ case RMNET_IOCTL_GET_OPMODE: /* Get operation mode */
+ ifr->ifr_ifru.ifru_data = (void *) RMNET_MODE_LLP_IP;
+ break;
+ case RMNET_IOCTL_OPEN: /* Open transport port */
+ rc = __wwan_open(dev);
+ pr_debug("[%s] wwan_ioctl(): open transport port\n",
+ dev->name);
+ break;
+ case RMNET_IOCTL_CLOSE: /* Close transport port */
+ rc = __wwan_close(dev);
+ pr_debug("[%s] wwan_ioctl(): close transport port\n",
+ dev->name);
+ break;
+ default:
+ pr_err("[%s] error: wwan_ioct called for unsupported cmd[%d]",
+ dev->name, cmd);
+ return -EINVAL;
+ }
+ return rc;
+}
+
+static const struct net_device_ops wwan_ops_ip = {
+ .ndo_open = wwan_open,
+ .ndo_stop = wwan_stop,
+ .ndo_start_xmit = wwan_xmit,
+ .ndo_get_stats = wwan_get_stats,
+ .ndo_tx_timeout = wwan_tx_timeout,
+ .ndo_do_ioctl = wwan_ioctl,
+ .ndo_change_mtu = wwan_change_mtu,
+ .ndo_set_mac_address = 0,
+ .ndo_validate_addr = 0,
+};
+
+/**
+ * wwan_setup() - Setups the wwan network driver.
+ *
+ * @dev: network device
+ *
+ * Return codes:
+ * None
+ */
+static void wwan_setup(struct net_device *dev)
+{
+ dev->netdev_ops = &wwan_ops_ip;
+ ether_setup(dev);
+ /* set this after calling ether_setup */
+ dev->header_ops = 0; /* No header */
+ dev->type = ARPHRD_RAWIP;
+ dev->hard_header_len = 0;
+ dev->mtu = WWAN_DATA_LEN;
+ dev->addr_len = 0;
+ dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
+ dev->needed_headroom = HEADROOM_FOR_A2_MUX;
+ dev->needed_tailroom = TAILROOM;
+ dev->watchdog_timeo = 1000;
+}
+
+/**
+ * wwan_init() - Initialized the module and registers as a
+ * network interface to the network stack
+ *
+ * Return codes:
+ * 0: success
+ * -ENOMEM: No memory available
+ * -EFAULT: Internal error
+ */
+static int __init wwan_init(void)
+{
+ int ret;
+ struct net_device *dev;
+ struct wwan_private *wwan_ptr;
+ unsigned n;
+ struct ipa_rm_create_params ipa_rm_params;
+
+ pr_info("%s: WWAN devices[%d]\n", __func__, WWAN_DEVICE_COUNT);
+ for (n = 0; n < WWAN_DEVICE_COUNT; n++) {
+ dev = alloc_netdev(sizeof(struct wwan_private),
+ WWAN_DEV_NAME, wwan_setup);
+ if (!dev) {
+ pr_err("%s: no memory for netdev %d\n", __func__, n);
+ ret = -ENOMEM;
+ goto fail;
+ }
+ netdevs[n] = dev;
+ wwan_ptr = netdev_priv(dev);
+ wwan_ptr->ch_id = n;
+ spin_lock_init(&wwan_ptr->lock);
+ init_completion(&wwan_ptr->resource_granted_completion);
+ memset(&ipa_rm_params, 0, sizeof(struct ipa_rm_create_params));
+ ipa_rm_params.name = ipa_rm_resource_by_ch_id[n];
+ ipa_rm_params.reg_params.user_data = dev;
+ ipa_rm_params.reg_params.notify_cb = ipa_rm_notify;
+ ret = ipa_rm_create_resource(&ipa_rm_params);
+ if (ret) {
+ pr_err("%s: unable to create resourse %d in IPA RM\n",
+ __func__, ipa_rm_resource_by_ch_id[n]);
+ goto fail;
+ }
+ ret = ipa_rm_inactivity_timer_init(ipa_rm_resource_by_ch_id[n],
+ IPA_RM_INACTIVITY_TIMER);
+ if (ret) {
+ pr_err("%s: ipa rm timer init failed %d on ins %d\n",
+ __func__, ret, n);
+ goto fail;
+ }
+ ret = ipa_rm_add_dependency(ipa_rm_resource_by_ch_id[n],
+ IPA_RM_RESOURCE_A2_CONS);
+ if (ret) {
+ pr_err("%s: unable to add dependency %d rc=%d\n",
+ __func__, n, ret);
+ goto fail;
+ }
+ ret = register_netdev(dev);
+ if (ret) {
+ pr_err("%s: unable to register netdev %d rc=%d\n",
+ __func__, n, ret);
+ goto fail;
+ }
+ }
+ return 0;
+fail:
+ for (n = 0; n < WWAN_DEVICE_COUNT; n++) {
+ if (!netdevs[n])
+ break;
+ unregister_netdev(netdevs[n]);
+ ipa_rm_inactivity_timer_destroy(ipa_rm_resource_by_ch_id[n]);
+ free_netdev(netdevs[n]);
+ netdevs[n] = NULL;
+ }
+ return ret;
+}
+late_initcall(wwan_init);
+
+void wwan_cleanup(void)
+{
+ unsigned n;
+
+ pr_info("%s: WWAN devices[%d]\n", __func__, WWAN_DEVICE_COUNT);
+ for (n = 0; n < WWAN_DEVICE_COUNT; n++) {
+ unregister_netdev(netdevs[n]);
+ ipa_rm_inactivity_timer_destroy(ipa_rm_resource_by_ch_id[n]);
+ free_netdev(netdevs[n]);
+ netdevs[n] = NULL;
+ }
+}
+
+MODULE_DESCRIPTION("WWAN Network Interface");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/msm/ipa/Makefile b/drivers/platform/msm/ipa/Makefile
index a25c799..b7eca61 100644
--- a/drivers/platform/msm/ipa/Makefile
+++ b/drivers/platform/msm/ipa/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_IPA) += ipat.o
ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \
- ipa_utils.o ipa_nat.o rmnet_bridge.o a2_service.o ipa_bridge.o ipa_intf.o \
+ ipa_utils.o ipa_nat.o rmnet_bridge.o a2_service.o ipa_bridge.o ipa_intf.o teth_bridge.o \
ipa_rm.o ipa_rm_dependency_graph.o ipa_rm_peers_list.o ipa_rm_resource.o ipa_rm_inactivity_timer.o
diff --git a/drivers/platform/msm/ipa/a2_service.c b/drivers/platform/msm/ipa/a2_service.c
index 2c5245c..4b5f0a2 100644
--- a/drivers/platform/msm/ipa/a2_service.c
+++ b/drivers/platform/msm/ipa/a2_service.c
@@ -53,6 +53,8 @@
spinlock_t lock;
int num_tx_pkts;
int use_wm;
+ u32 v4_hdr_hdl;
+ u32 v6_hdr_hdl;
};
struct tx_pkt_info {
struct sk_buff *skb;
@@ -70,6 +72,7 @@
u8 ch_id;
u16 pkt_len;
};
+
struct a2_mux_context_type {
u32 tethered_prod;
u32 tethered_cons;
@@ -515,6 +518,9 @@
goto bridge_tethered_dl_failed;
}
memset(&connect_params, 0, sizeof(struct ipa_sys_connect_params));
+ connect_params.ipa_ep_cfg.hdr.hdr_len = sizeof(struct bam_mux_hdr);
+ connect_params.ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1;
+ connect_params.ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 6;
connect_params.client = IPA_CLIENT_A2_EMBEDDED_CONS;
connect_params.notify = ipa_embedded_notify;
connect_params.desc_fifo_sz = 0x800;
@@ -527,6 +533,9 @@
goto bridge_embedded_ul_failed;
}
memset(&connect_params, 0, sizeof(struct ipa_sys_connect_params));
+ connect_params.ipa_ep_cfg.hdr.hdr_len = sizeof(struct bam_mux_hdr);
+ connect_params.ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1;
+ connect_params.ipa_ep_cfg.hdr.hdr_ofst_metadata = 4;
connect_params.client = IPA_CLIENT_A2_EMBEDDED_PROD;
connect_params.notify = ipa_embedded_notify;
connect_params.desc_fifo_sz = 0x800;
@@ -1006,6 +1015,176 @@
}
/**
+ * a2_mux_add_hdr() - called when MUX header should
+ * be added
+ * @lcid: logical channel ID
+ *
+ * Returns: 0 on success, negative on failure
+ */
+static int a2_mux_add_hdr(enum a2_mux_logical_channel_id lcid)
+{
+ struct ipa_ioc_add_hdr *hdrs;
+ struct ipa_hdr_add *ipv4_hdr;
+ struct ipa_hdr_add *ipv6_hdr;
+ struct bam_mux_hdr *dmux_hdr;
+ int rc;
+
+ IPADBG("%s: ch %d\n", __func__, lcid);
+
+ if (lcid < A2_MUX_WWAN_0 || lcid > A2_MUX_WWAN_7) {
+ IPAERR("%s: non valid lcid passed: %d\n", __func__, lcid);
+ return -EINVAL;
+ }
+
+
+ hdrs = kzalloc(sizeof(struct ipa_ioc_add_hdr) +
+ 2 * sizeof(struct ipa_hdr_add), GFP_KERNEL);
+ if (!hdrs) {
+ IPAERR("%s: hdr allocation fail for ch %d\n", __func__, lcid);
+ return -ENOMEM;
+ }
+
+ ipv4_hdr = &hdrs->hdr[0];
+ ipv6_hdr = &hdrs->hdr[1];
+
+ dmux_hdr = (struct bam_mux_hdr *)ipv4_hdr->hdr;
+ snprintf(ipv4_hdr->name, IPA_RESOURCE_NAME_MAX, "%s%d",
+ A2_MUX_HDR_NAME_V4_PREF, lcid);
+ dmux_hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
+ dmux_hdr->cmd = BAM_MUX_HDR_CMD_DATA;
+ dmux_hdr->reserved = 0;
+ dmux_hdr->ch_id = lcid;
+
+ /* Packet lenght is added by IPA */
+ dmux_hdr->pkt_len = 0;
+ dmux_hdr->pad_len = 0;
+
+ dmux_hdr->magic_num = htons(dmux_hdr->magic_num);
+ IPADBG("converted to network order magic_num=%d\n",
+ dmux_hdr->magic_num);
+
+ ipv4_hdr->hdr_len = sizeof(struct bam_mux_hdr);
+ ipv4_hdr->is_partial = 0;
+
+ dmux_hdr = (struct bam_mux_hdr *)ipv6_hdr->hdr;
+ snprintf(ipv6_hdr->name, IPA_RESOURCE_NAME_MAX, "%s%d",
+ A2_MUX_HDR_NAME_V6_PREF, lcid);
+ dmux_hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
+ dmux_hdr->cmd = BAM_MUX_HDR_CMD_DATA;
+ dmux_hdr->reserved = 0;
+ dmux_hdr->ch_id = lcid;
+
+ /* Packet lenght is added by IPA */
+ dmux_hdr->pkt_len = 0;
+ dmux_hdr->pad_len = 0;
+
+ dmux_hdr->magic_num = htons(dmux_hdr->magic_num);
+ IPADBG("converted to network order magic_num=%d\n",
+ dmux_hdr->magic_num);
+
+ ipv6_hdr->hdr_len = sizeof(struct bam_mux_hdr);
+ ipv6_hdr->is_partial = 0;
+
+ hdrs->commit = 1;
+ hdrs->num_hdrs = 2;
+
+ rc = ipa_add_hdr(hdrs);
+ if (rc) {
+ IPAERR("Fail on Header-Insertion(%d)\n", rc);
+ goto bail;
+ }
+
+ if (ipv4_hdr->status) {
+ IPAERR("Fail on Header-Insertion ipv4(%d)\n",
+ ipv4_hdr->status);
+ rc = ipv4_hdr->status;
+ goto bail;
+ }
+
+ if (ipv6_hdr->status) {
+ IPAERR("%s: Fail on Header-Insertion ipv4(%d)\n", __func__,
+ ipv6_hdr->status);
+ rc = ipv6_hdr->status;
+ goto bail;
+ }
+
+ a2_mux_ctx->bam_ch[lcid].v4_hdr_hdl = ipv4_hdr->hdr_hdl;
+ a2_mux_ctx->bam_ch[lcid].v6_hdr_hdl = ipv6_hdr->hdr_hdl;
+
+ rc = 0;
+bail:
+ kfree(hdrs);
+ return rc;
+}
+
+/**
+ * a2_mux_del_hdr() - called when MUX header should
+ * be removed
+ * @lcid: logical channel ID
+ *
+ * Returns: 0 on success, negative on failure
+ */
+static int a2_mux_del_hdr(enum a2_mux_logical_channel_id lcid)
+{
+ struct ipa_ioc_del_hdr *hdrs;
+ struct ipa_hdr_del *ipv4_hdl;
+ struct ipa_hdr_del *ipv6_hdl;
+ int rc;
+
+ IPADBG("%s: ch %d\n", __func__, lcid);
+
+ if (lcid < A2_MUX_WWAN_0 || lcid > A2_MUX_WWAN_7) {
+ IPAERR("invalid lcid passed: %d\n", lcid);
+ return -EINVAL;
+ }
+
+
+ hdrs = kzalloc(sizeof(struct ipa_ioc_del_hdr) +
+ 2 * sizeof(struct ipa_hdr_del), GFP_KERNEL);
+ if (!hdrs) {
+ IPAERR("hdr alloc fail for ch %d\n", lcid);
+ return -ENOMEM;
+ }
+
+ ipv4_hdl = &hdrs->hdl[0];
+ ipv6_hdl = &hdrs->hdl[1];
+
+ ipv4_hdl->hdl = a2_mux_ctx->bam_ch[lcid].v4_hdr_hdl;
+ ipv6_hdl->hdl = a2_mux_ctx->bam_ch[lcid].v6_hdr_hdl;
+
+ hdrs->commit = 1;
+ hdrs->num_hdls = 2;
+
+ rc = ipa_del_hdr(hdrs);
+ if (rc) {
+ IPAERR("Fail on Del Header-Insertion(%d)\n", rc);
+ goto bail;
+ }
+
+ if (ipv4_hdl->status) {
+ IPAERR("Fail on Del Header-Insertion ipv4(%d)\n",
+ ipv4_hdl->status);
+ rc = ipv4_hdl->status;
+ goto bail;
+ }
+ a2_mux_ctx->bam_ch[lcid].v4_hdr_hdl = 0;
+
+ if (ipv6_hdl->status) {
+ IPAERR("Fail on Del Header-Insertion ipv4(%d)\n",
+ ipv6_hdl->status);
+ rc = ipv6_hdl->status;
+ goto bail;
+ }
+ a2_mux_ctx->bam_ch[lcid].v6_hdr_hdl = 0;
+
+ rc = 0;
+bail:
+ kfree(hdrs);
+ return rc;
+
+}
+
+/**
* a2_mux_open_channel() - opens logical channel
* to A2
* @lcid: logical channel ID
@@ -1090,6 +1269,12 @@
kfree(hdr);
return rc;
}
+ rc = a2_mux_add_hdr(lcid);
+ if (rc) {
+ IPAERR("a2_mux_add_hdr failed %d; ch: %d\n",
+ rc, lcid);
+ return rc;
+ }
}
open_done:
@@ -1154,6 +1339,13 @@
kfree(hdr);
return rc;
}
+
+ rc = a2_mux_del_hdr(lcid);
+ if (rc) {
+ IPAERR("a2_mux_del_hdr failed %d; ch: %d\n",
+ rc, lcid);
+ return rc;
+ }
}
IPADBG("%s: closed ch %d\n", __func__, lcid);
return 0;
@@ -1342,6 +1534,13 @@
}
if (smsm_get_state(SMSM_MODEM_STATE) & SMSM_A2_POWER_CONTROL)
bam_dmux_smsm_cb(NULL, 0, smsm_get_state(SMSM_MODEM_STATE));
+
+ /*
+ * Set remote channel open for tethered channel since there is
+ * no actual remote tethered channel
+ */
+ a2_mux_ctx->bam_ch[A2_MUX_TETHERED_0].status |= BAM_CH_REMOTE_OPEN;
+
rc = 0;
goto bail;
diff --git a/drivers/platform/msm/ipa/ipa.c b/drivers/platform/msm/ipa/ipa.c
index b07c653..edf3a60 100644
--- a/drivers/platform/msm/ipa/ipa.c
+++ b/drivers/platform/msm/ipa/ipa.c
@@ -806,7 +806,7 @@
/* check all the system pipes for tx comp and rx avail */
if (ipa_ctx->sys[IPA_A5_LAN_WAN_IN].ep->valid)
- cnt |= ipa_handle_rx_core(false);
+ cnt |= ipa_handle_rx_core(false, true);
for (i = 0; i < num_tx_pipes; i++)
if (ipa_ctx->sys[tx_pipes[i]].ep->valid)
@@ -1578,6 +1578,10 @@
IPADBG("polling_mode=%u delay_ms=%u\n", polling_mode, polling_delay_ms);
ipa_ctx->polling_mode = polling_mode;
+ if (ipa_ctx->polling_mode)
+ atomic_set(&ipa_ctx->curr_polling_state, 1);
+ else
+ atomic_set(&ipa_ctx->curr_polling_state, 0);
IPADBG("hdr_lcl=%u ip4_rt=%u ip6_rt=%u ip4_flt=%u ip6_flt=%u\n",
hdr_tbl_lcl, ip4_rt_tbl_lcl, ip6_rt_tbl_lcl, ip4_flt_tbl_lcl,
ip6_flt_tbl_lcl);
@@ -1896,10 +1900,6 @@
ipa_ctx->aggregation_byte_limit = 1;
ipa_ctx->aggregation_time_limit = 0;
- /* gate IPA clocks */
- if (ipa_ctx->ipa_hw_mode == IPA_HW_MODE_NORMAL)
- ipa_disable_clks();
-
/* Initialize IPA RM (resource manager) */
result = ipa_rm_initialize();
if (result) {
@@ -1910,6 +1910,18 @@
a2_mux_init();
+ /* Initialize the tethering bridge driver */
+ result = teth_bridge_driver_init();
+ if (result) {
+ IPAERR(":teth_bridge_driver_init() failed\n");
+ result = -ENODEV;
+ goto fail_cdev_add;
+ }
+
+ /* gate IPA clocks */
+ if (ipa_ctx->ipa_hw_mode == IPA_HW_MODE_NORMAL)
+ ipa_disable_clks();
+
IPADBG(":IPA driver init OK.\n");
return 0;
diff --git a/drivers/platform/msm/ipa/ipa_bridge.c b/drivers/platform/msm/ipa/ipa_bridge.c
index 56e9b0d..0227ee4 100644
--- a/drivers/platform/msm/ipa/ipa_bridge.c
+++ b/drivers/platform/msm/ipa/ipa_bridge.c
@@ -42,9 +42,6 @@
struct sps_connect connection;
struct sps_mem_buffer desc_mem_buf;
struct sps_register_event register_event;
- spinlock_t spinlock;
- u32 len;
- u32 free_len;
struct list_head free_desc_list;
};
@@ -162,12 +159,10 @@
goto fail_dma;
}
- info->len = ~0;
-
list_add_tail(&info->link, &sys_rx->head_desc_list);
ret = sps_transfer_one(sys_rx->pipe, info->dma_address,
IPA_RX_SKB_SIZE, info,
- SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
+ SPS_IOVEC_FLAG_INT);
if (ret) {
list_del(&info->link);
dma_unmap_single(NULL, info->dma_address, IPA_RX_SKB_SIZE,
@@ -176,7 +171,6 @@
type, dir);
goto fail_dma;
}
- sys_rx->len++;
return 0;
fail_dma:
@@ -206,9 +200,6 @@
link);
list_move_tail(&tx_pkt->link,
&sys_tx->free_desc_list);
- sys_tx->len--;
- sys_tx->free_len++;
- tx_pkt->len = ~0;
cnt++;
}
} while (all);
@@ -245,7 +236,6 @@
struct ipa_pkt_info,
link);
list_del(&rx_pkt->link);
- sys_rx->len--;
rx_pkt->len = iov.size;
retry_alloc_tx:
@@ -285,15 +275,12 @@
list_add_tail(&tmp_pkt->link,
&sys_tx->free_desc_list);
- sys_tx->free_len++;
- tmp_pkt->len = ~0;
}
tx_pkt = list_first_entry(&sys_tx->free_desc_list,
struct ipa_pkt_info,
link);
list_del(&tx_pkt->link);
- sys_tx->free_len--;
retry_add_rx:
list_add_tail(&tx_pkt->link,
@@ -302,8 +289,7 @@
tx_pkt->dma_address,
IPA_RX_SKB_SIZE,
tx_pkt,
- SPS_IOVEC_FLAG_INT |
- SPS_IOVEC_FLAG_EOT);
+ SPS_IOVEC_FLAG_INT);
if (ret) {
list_del(&tx_pkt->link);
pr_debug_ratelimited("%s: sps_transfer_one failed %d type=%d dir=%d\n",
@@ -312,7 +298,6 @@
polling_max_sleep[dir]);
goto retry_add_rx;
}
- sys_rx->len++;
retry_add_tx:
list_add_tail(&rx_pkt->link,
@@ -332,7 +317,6 @@
polling_max_sleep[dir]);
goto retry_add_tx;
}
- sys_tx->len++;
IPA_STATS_INC_BRIDGE_CNT(ctx->type, dir,
ipa_ctx->stats.bridged_pkts);
}
@@ -444,7 +428,6 @@
INIT_LIST_HEAD(&sys->head_desc_list);
INIT_LIST_HEAD(&sys->free_desc_list);
- spin_lock_init(&sys->spinlock);
memset(&ipa_ctx->ep[ipa_ep_idx], 0,
sizeof(struct ipa_ep_context));
@@ -614,7 +597,6 @@
INIT_LIST_HEAD(&sys->head_desc_list);
INIT_LIST_HEAD(&sys->free_desc_list);
- spin_lock_init(&sys->spinlock);
if (dir == IPA_BRIDGE_DIR_DL) {
sys->register_event.options = SPS_O_EOT;
@@ -663,32 +645,32 @@
int ret;
int i;
- bridge[IPA_BRIDGE_TYPE_TETHERED].ul_wq = alloc_workqueue("ipa_ul_teth",
- WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
+ bridge[IPA_BRIDGE_TYPE_TETHERED].ul_wq =
+ create_singlethread_workqueue("ipa_ul_teth");
if (!bridge[IPA_BRIDGE_TYPE_TETHERED].ul_wq) {
IPAERR("ipa ul teth wq alloc failed\n");
ret = -ENOMEM;
goto fail_ul_teth;
}
- bridge[IPA_BRIDGE_TYPE_TETHERED].dl_wq = alloc_workqueue("ipa_dl_teth",
- WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
+ bridge[IPA_BRIDGE_TYPE_TETHERED].dl_wq =
+ create_singlethread_workqueue("ipa_dl_teth");
if (!bridge[IPA_BRIDGE_TYPE_TETHERED].dl_wq) {
IPAERR("ipa dl teth wq alloc failed\n");
ret = -ENOMEM;
goto fail_dl_teth;
}
- bridge[IPA_BRIDGE_TYPE_EMBEDDED].ul_wq = alloc_workqueue("ipa_ul_emb",
- WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
+ bridge[IPA_BRIDGE_TYPE_EMBEDDED].ul_wq =
+ create_singlethread_workqueue("ipa_ul_emb");
if (!bridge[IPA_BRIDGE_TYPE_EMBEDDED].ul_wq) {
IPAERR("ipa ul emb wq alloc failed\n");
ret = -ENOMEM;
goto fail_ul_emb;
}
- bridge[IPA_BRIDGE_TYPE_EMBEDDED].dl_wq = alloc_workqueue("ipa_dl_emb",
- WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
+ bridge[IPA_BRIDGE_TYPE_EMBEDDED].dl_wq =
+ create_singlethread_workqueue("ipa_dl_emb");
if (!bridge[IPA_BRIDGE_TYPE_EMBEDDED].dl_wq) {
IPAERR("ipa dl emb wq alloc failed\n");
ret = -ENOMEM;
diff --git a/drivers/platform/msm/ipa/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_debugfs.c
index ec83653..1605ed2 100644
--- a/drivers/platform/msm/ipa/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_debugfs.c
@@ -45,6 +45,37 @@
__stringify(IPA_CLIENT_MAX),
};
+const char *ipa_ic_name[] = {
+ __stringify_1(IPA_IP_CMD_INVALID),
+ __stringify_1(IPA_DECIPH_INIT),
+ __stringify_1(IPA_PPP_FRM_INIT),
+ __stringify_1(IPA_IP_V4_FILTER_INIT),
+ __stringify_1(IPA_IP_V6_FILTER_INIT),
+ __stringify_1(IPA_IP_V4_NAT_INIT),
+ __stringify_1(IPA_IP_V6_NAT_INIT),
+ __stringify_1(IPA_IP_V4_ROUTING_INIT),
+ __stringify_1(IPA_IP_V6_ROUTING_INIT),
+ __stringify_1(IPA_HDR_INIT_LOCAL),
+ __stringify_1(IPA_HDR_INIT_SYSTEM),
+ __stringify_1(IPA_DECIPH_SETUP),
+ __stringify_1(IPA_INSERT_NAT_RULE),
+ __stringify_1(IPA_DELETE_NAT_RULE),
+ __stringify_1(IPA_NAT_DMA),
+ __stringify_1(IPA_IP_PACKET_TAG),
+ __stringify_1(IPA_IP_PACKET_INIT),
+};
+
+const char *ipa_excp_name[] = {
+ __stringify_1(IPA_A5_MUX_HDR_EXCP_RSVD0),
+ __stringify_1(IPA_A5_MUX_HDR_EXCP_RSVD1),
+ __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_IHL),
+ __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_REPLICATED),
+ __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_TAG),
+ __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_SW_FLT),
+ __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_NAT),
+ __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_IP),
+};
+
static struct dentry *dent;
static struct dentry *dfile_gen_reg;
static struct dentry *dfile_ep_reg;
@@ -489,33 +520,39 @@
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
"sw_tx=%u\n"
"hw_tx=%u\n"
- "rx=%u\n",
+ "rx=%u\n"
+ "rx_repl_repost=%u\n"
+ "x_intr_repost=%u\n"
+ "rx_q_len=%u\n",
ipa_ctx->stats.tx_sw_pkts,
ipa_ctx->stats.tx_hw_pkts,
- ipa_ctx->stats.rx_pkts);
+ ipa_ctx->stats.rx_pkts,
+ ipa_ctx->stats.rx_repl_repost,
+ ipa_ctx->stats.x_intr_repost,
+ ipa_ctx->stats.rx_q_len);
cnt += nbytes;
for (i = 0; i < MAX_NUM_EXCP; i++) {
nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
- "rx_excp[%u]=%u\n", i,
+ "rx_excp[%u:%35s]=%u\n", i, ipa_excp_name[i],
ipa_ctx->stats.rx_excp_pkts[i]);
cnt += nbytes;
}
for (i = 0; i < IPA_BRIDGE_TYPE_MAX; i++) {
nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
- "bridged_pkt[%u][dl]=%u\n"
- "bridged_pkt[%u][ul]=%u\n",
- i,
+ "brg_pkt[%u:%s][dl]=%u\n"
+ "brg_pkt[%u:%s][ul]=%u\n",
+ i, (i == 0) ? "teth" : "embd",
ipa_ctx->stats.bridged_pkts[i][0],
- i,
+ i, (i == 0) ? "teth" : "embd",
ipa_ctx->stats.bridged_pkts[i][1]);
cnt += nbytes;
}
for (i = 0; i < MAX_NUM_IMM_CMD; i++) {
nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
- "IC[%u]=%u\n", i,
+ "IC[%2u:%22s]=%u\n", i, ipa_ic_name[i],
ipa_ctx->stats.imm_cmds[i]);
cnt += nbytes;
}
diff --git a/drivers/platform/msm/ipa/ipa_dp.c b/drivers/platform/msm/ipa/ipa_dp.c
index 52ed428..38690e9 100644
--- a/drivers/platform/msm/ipa/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_dp.c
@@ -10,6 +10,7 @@
* GNU General Public License for more details.
*/
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dmapool.h>
#include <linux/list.h>
@@ -19,6 +20,17 @@
#define list_next_entry(pos, member) \
list_entry(pos->member.next, typeof(*pos), member)
#define IPA_LAST_DESC_CNT 0xFFFF
+#define POLLING_INACTIVITY 40
+#define POLLING_MIN_SLEEP 950
+#define POLLING_MAX_SLEEP 1050
+
+static void replenish_rx_work_func(struct work_struct *work);
+static struct delayed_work replenish_rx_work;
+static void switch_to_intr_work_func(struct work_struct *work);
+static struct delayed_work switch_to_intr_work;
+static void ipa_wq_handle_rx(struct work_struct *work);
+static DECLARE_WORK(rx_work, ipa_wq_handle_rx);
+
/**
* ipa_write_done() - this function will be (eventually) called when a Tx
* operation is complete
@@ -40,7 +52,7 @@
unsigned long irq_flags;
struct ipa_mem_buffer mult = { 0 };
int i;
- u16 cnt;
+ u32 cnt;
tx_pkt = container_of(work, struct ipa_tx_pkt_wrapper, work);
cnt = tx_pkt->cnt;
@@ -66,9 +78,8 @@
}
next_pkt = list_next_entry(tx_pkt, link);
list_del(&tx_pkt->link);
- tx_pkt->sys->len--;
spin_unlock_irqrestore(&tx_pkt->sys->spinlock, irq_flags);
- if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0) {
+ if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0)) {
dma_pool_free(ipa_ctx->one_kb_no_straddle_pool,
tx_pkt->bounce,
tx_pkt->mem.phys_base);
@@ -114,10 +125,10 @@
u16 sps_flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_INT;
dma_addr_t dma_address;
u16 len;
- u32 mem_flag = GFP_KERNEL;
+ u32 mem_flag = GFP_ATOMIC;
- if (in_atomic)
- mem_flag = GFP_ATOMIC;
+ if (unlikely(!in_atomic))
+ mem_flag = GFP_KERNEL;
tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache, mem_flag);
if (!tx_pkt) {
@@ -125,7 +136,7 @@
goto fail_mem_alloc;
}
- if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0) {
+ if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0)) {
WARN_ON(desc->len > 512);
/*
@@ -173,19 +184,15 @@
if (desc->type == IPA_IMM_CMD_DESC) {
sps_flags |= SPS_IOVEC_FLAG_IMME;
len = desc->opcode;
+ IPADBG("sending cmd=%d pyld_len=%d sps_flags=%x\n",
+ desc->opcode, desc->len, sps_flags);
+ IPA_DUMP_BUFF(desc->pyld, dma_address, desc->len);
} else {
len = desc->len;
}
- if (desc->type == IPA_IMM_CMD_DESC) {
- IPADBG("sending cmd=%d pyld_len=%d sps_flags=%x\n",
- desc->opcode, desc->len, sps_flags);
- IPA_DUMP_BUFF(desc->pyld, dma_address, desc->len);
- }
-
spin_lock_irqsave(&sys->spinlock, irq_flags);
list_add_tail(&tx_pkt->link, &sys->head_desc_list);
- sys->len++;
result = sps_transfer_one(sys->ep->ep_hdl, dma_address, len, tx_pkt,
sps_flags);
if (result) {
@@ -200,7 +207,7 @@
fail_sps_send:
list_del(&tx_pkt->link);
spin_unlock_irqrestore(&sys->spinlock, irq_flags);
- if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0)
+ if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0))
dma_pool_free(ipa_ctx->one_kb_no_straddle_pool, tx_pkt->bounce,
dma_address);
else
@@ -233,7 +240,7 @@
*
* Return codes: 0: success, -EFAULT: failure
*/
-int ipa_send(struct ipa_sys_context *sys, u16 num_desc, struct ipa_desc *desc,
+int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
bool in_atomic)
{
struct ipa_tx_pkt_wrapper *tx_pkt;
@@ -247,17 +254,18 @@
int result;
int fail_dma_wrap = 0;
uint size = num_desc * sizeof(struct sps_iovec);
- u32 mem_flag = GFP_KERNEL;
+ u32 mem_flag = GFP_ATOMIC;
- if (likely(in_atomic))
- mem_flag = GFP_ATOMIC;
+ if (unlikely(!in_atomic))
+ mem_flag = GFP_KERNEL;
transfer.iovec = dma_alloc_coherent(NULL, size, &dma_addr, 0);
transfer.iovec_phys = dma_addr;
transfer.iovec_count = num_desc;
+ spin_lock_irqsave(&sys->spinlock, irq_flags);
if (!transfer.iovec) {
IPAERR("fail to alloc DMA mem for sps xfr buff\n");
- goto failure;
+ goto failure_coherent;
}
for (i = 0; i < num_desc; i++) {
@@ -274,24 +282,23 @@
*/
if (i == 0) {
transfer.user = tx_pkt;
-
tx_pkt->mult.phys_base = dma_addr;
tx_pkt->mult.base = transfer.iovec;
tx_pkt->mult.size = size;
tx_pkt->cnt = num_desc;
+ INIT_WORK(&tx_pkt->work, ipa_wq_write_done);
}
iovec = &transfer.iovec[i];
iovec->flags = 0;
INIT_LIST_HEAD(&tx_pkt->link);
- INIT_WORK(&tx_pkt->work, ipa_wq_write_done);
tx_pkt->type = desc[i].type;
tx_pkt->mem.base = desc[i].pyld;
tx_pkt->mem.size = desc[i].len;
- if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0) {
+ if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0)) {
WARN_ON(tx_pkt->mem.size > 512);
/*
@@ -334,10 +341,7 @@
* add this packet to system pipe context.
*/
iovec->addr = tx_pkt->mem.phys_base;
- spin_lock_irqsave(&sys->spinlock, irq_flags);
list_add_tail(&tx_pkt->link, &sys->head_desc_list);
- sys->len++;
- spin_unlock_irqrestore(&sys->spinlock, irq_flags);
/*
* Special treatment for immediate commands, where the structure
@@ -364,16 +368,15 @@
goto failure;
}
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
return 0;
failure:
tx_pkt = transfer.user;
for (j = 0; j < i; j++) {
- spin_lock_irqsave(&sys->spinlock, irq_flags);
next_pkt = list_next_entry(tx_pkt, link);
list_del(&tx_pkt->link);
- spin_unlock_irqrestore(&sys->spinlock, irq_flags);
- if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0)
+ if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0))
dma_pool_free(ipa_ctx->one_kb_no_straddle_pool,
tx_pkt->bounce,
tx_pkt->mem.phys_base);
@@ -391,7 +394,8 @@
if (transfer.iovec_phys)
dma_free_coherent(NULL, size, transfer.iovec,
transfer.iovec_phys);
-
+failure_coherent:
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
return -EFAULT;
}
@@ -512,15 +516,14 @@
* - Call the endpoints notify function, passing the skb in the parameters
* - Replenish the rx cache
*/
-int ipa_handle_rx_core(bool process_all)
+int ipa_handle_rx_core(bool process_all, bool in_poll_state)
{
struct ipa_a5_mux_hdr *mux_hdr;
struct ipa_rx_pkt_wrapper *rx_pkt;
struct sk_buff *rx_skb;
struct sps_iovec iov;
- unsigned long irq_flags;
- u16 pull_len;
- u16 padding;
+ unsigned int pull_len;
+ unsigned int padding;
int ret;
struct ipa_sys_context *sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
struct ipa_ep_context *ep;
@@ -528,35 +531,35 @@
struct completion *compl;
struct ipa_tree_node *node;
- do {
+ while ((in_poll_state ? atomic_read(&ipa_ctx->curr_polling_state) :
+ !atomic_read(&ipa_ctx->curr_polling_state))) {
+ if (cnt && !process_all)
+ break;
+
ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
if (ret) {
IPAERR("sps_get_iovec failed %d\n", ret);
break;
}
- /* Break the loop when there are no more packets to receive */
if (iov.addr == 0)
break;
- spin_lock_irqsave(&sys->spinlock, irq_flags);
- if (list_empty(&sys->head_desc_list))
- WARN_ON(1);
+ if (unlikely(list_empty(&sys->head_desc_list)))
+ continue;
+
rx_pkt = list_first_entry(&sys->head_desc_list,
struct ipa_rx_pkt_wrapper, link);
- if (!rx_pkt)
- WARN_ON(1);
+
rx_pkt->len = iov.size;
sys->len--;
list_del(&rx_pkt->link);
- spin_unlock_irqrestore(&sys->spinlock, irq_flags);
IPADBG("--curr_cnt=%d\n", sys->len);
rx_skb = rx_pkt->skb;
dma_unmap_single(NULL, rx_pkt->dma_address, IPA_RX_SKB_SIZE,
DMA_FROM_DEVICE);
- kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
/*
* make it look like a real skb, "data" was already set at
@@ -565,6 +568,7 @@
rx_skb->tail = rx_skb->data + rx_pkt->len;
rx_skb->len = rx_pkt->len;
rx_skb->truesize = rx_pkt->len + sizeof(struct sk_buff);
+ kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
mux_hdr = (struct ipa_a5_mux_hdr *)rx_skb->data;
@@ -601,20 +605,20 @@
}
mutex_unlock(&ipa_ctx->lock);
}
- dev_kfree_skb_any(rx_skb);
+ dev_kfree_skb(rx_skb);
ipa_replenish_rx_cache();
++cnt;
continue;
}
- if (mux_hdr->src_pipe_index >= IPA_NUM_PIPES ||
+ if (unlikely(mux_hdr->src_pipe_index >= IPA_NUM_PIPES ||
!ipa_ctx->ep[mux_hdr->src_pipe_index].valid ||
- !ipa_ctx->ep[mux_hdr->src_pipe_index].client_notify) {
+ !ipa_ctx->ep[mux_hdr->src_pipe_index].client_notify)) {
IPAERR("drop pipe=%d ep_valid=%d client_notify=%p\n",
mux_hdr->src_pipe_index,
ipa_ctx->ep[mux_hdr->src_pipe_index].valid,
ipa_ctx->ep[mux_hdr->src_pipe_index].client_notify);
- dev_kfree_skb_any(rx_skb);
+ dev_kfree_skb(rx_skb);
ipa_replenish_rx_cache();
++cnt;
continue;
@@ -634,11 +638,11 @@
IPADBG("pulling %d bytes from skb\n", pull_len);
skb_pull(rx_skb, pull_len);
+ ipa_replenish_rx_cache();
ep->client_notify(ep->priv, IPA_RECEIVE,
(unsigned long)(rx_skb));
- ipa_replenish_rx_cache();
cnt++;
- } while (process_all);
+ };
return cnt;
}
@@ -652,9 +656,9 @@
struct ipa_sys_context *sys;
IPADBG("Enter");
- if (!ipa_ctx->curr_polling_state) {
+ if (!atomic_read(&ipa_ctx->curr_polling_state)) {
IPAERR("already in intr mode\n");
- return;
+ goto fail;
}
sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
@@ -662,49 +666,28 @@
ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
if (ret) {
IPAERR("sps_get_config() failed %d\n", ret);
- return;
+ goto fail;
}
sys->event.options = SPS_O_EOT;
ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
if (ret) {
IPAERR("sps_register_event() failed %d\n", ret);
- return;
+ goto fail;
}
sys->ep->connect.options =
SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
if (ret) {
IPAERR("sps_set_config() failed %d\n", ret);
- return;
+ goto fail;
}
- ipa_handle_rx_core(true);
- ipa_ctx->curr_polling_state = 0;
-}
+ atomic_set(&ipa_ctx->curr_polling_state, 0);
+ ipa_handle_rx_core(true, false);
+ return;
-/**
- * ipa_rx_switch_to_poll_mode() - Operate the Rx data path in polling mode
- */
-static void ipa_rx_switch_to_poll_mode(void)
-{
- int ret;
- struct ipa_ep_context *ep;
-
- IPADBG("Enter");
- ep = ipa_ctx->sys[IPA_A5_LAN_WAN_IN].ep;
-
- ret = sps_get_config(ep->ep_hdl, &ep->connect);
- if (ret) {
- IPAERR("sps_get_config() failed %d\n", ret);
- return;
- }
- ep->connect.options =
- SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_POLL;
- ret = sps_set_config(ep->ep_hdl, &ep->connect);
- if (ret) {
- IPAERR("sps_set_config() failed %d\n", ret);
- return;
- }
- ipa_ctx->curr_polling_state = 1;
+fail:
+ IPA_STATS_INC_CNT(ipa_ctx->stats.x_intr_repost);
+ schedule_delayed_work(&switch_to_intr_work, msecs_to_jiffies(1));
}
/**
@@ -722,16 +705,30 @@
*/
static void ipa_sps_irq_rx_notify(struct sps_event_notify *notify)
{
- struct ipa_rx_pkt_wrapper *rx_pkt;
+ struct ipa_ep_context *ep;
+ int ret;
IPADBG("event %d notified\n", notify->event_id);
switch (notify->event_id) {
case SPS_EVENT_EOT:
- if (!ipa_ctx->curr_polling_state) {
- ipa_rx_switch_to_poll_mode();
- rx_pkt = notify->data.transfer.user;
- queue_work(ipa_ctx->rx_wq, &rx_pkt->work);
+ if (!atomic_read(&ipa_ctx->curr_polling_state)) {
+ ep = ipa_ctx->sys[IPA_A5_LAN_WAN_IN].ep;
+
+ ret = sps_get_config(ep->ep_hdl, &ep->connect);
+ if (ret) {
+ IPAERR("sps_get_config() failed %d\n", ret);
+ break;
+ }
+ ep->connect.options = SPS_O_AUTO_ENABLE |
+ SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+ ret = sps_set_config(ep->ep_hdl, &ep->connect);
+ if (ret) {
+ IPAERR("sps_set_config() failed %d\n", ret);
+ break;
+ }
+ atomic_set(&ipa_ctx->curr_polling_state, 1);
+ queue_work(ipa_ctx->rx_wq, &rx_work);
}
break;
default:
@@ -861,6 +858,9 @@
/* fall through */
case 3:
sys_idx = ipa_ep_idx;
+ INIT_DELAYED_WORK(&replenish_rx_work, replenish_rx_work_func);
+ INIT_DELAYED_WORK(&switch_to_intr_work,
+ switch_to_intr_work_func);
break;
case WLAN_AMPDU_TX_EP:
sys_idx = IPA_A5_WLAN_AMPDU_OUT;
@@ -954,7 +954,7 @@
ipa_ctx->ep[ep_idx].client_notify(ipa_ctx->ep[ep_idx].priv,
IPA_WRITE_DONE, (unsigned long)skb);
else
- dev_kfree_skb_any(skb);
+ dev_kfree_skb(skb);
}
static void ipa_tx_cmd_comp(void *user1, void *user2)
@@ -1066,6 +1066,24 @@
}
EXPORT_SYMBOL(ipa_tx_dp);
+static void ipa_handle_rx(void)
+{
+ int inactive_cycles = 0;
+ int cnt;
+
+ do {
+ cnt = ipa_handle_rx_core(true, true);
+ if (cnt == 0) {
+ inactive_cycles++;
+ usleep_range(POLLING_MIN_SLEEP, POLLING_MAX_SLEEP);
+ } else {
+ inactive_cycles = 0;
+ }
+ } while (inactive_cycles <= POLLING_INACTIVITY);
+
+ ipa_rx_switch_to_intr_mode();
+}
+
/**
* ipa_handle_rx() - handle packet reception. This function is executed in the
* context of a work queue.
@@ -1074,10 +1092,9 @@
* ipa_handle_rx_core() is run in polling mode. After all packets has been
* received, the driver switches back to interrupt mode.
*/
-void ipa_wq_handle_rx(struct work_struct *work)
+static void ipa_wq_handle_rx(struct work_struct *work)
{
- ipa_handle_rx_core(true);
- ipa_rx_switch_to_intr_mode();
+ ipa_handle_rx();
}
/**
@@ -1099,26 +1116,23 @@
void *ptr;
struct ipa_rx_pkt_wrapper *rx_pkt;
int ret;
- int rx_len_cached;
- unsigned long irq_flags;
+ int rx_len_cached = 0;
struct ipa_sys_context *sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
+ gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
- spin_lock_irqsave(&sys->spinlock, irq_flags);
rx_len_cached = sys->len;
- spin_unlock_irqrestore(&sys->spinlock, irq_flags);
while (rx_len_cached < IPA_RX_POOL_CEIL) {
rx_pkt = kmem_cache_zalloc(ipa_ctx->rx_pkt_wrapper_cache,
- GFP_KERNEL);
+ flag);
if (!rx_pkt) {
IPAERR("failed to alloc rx wrapper\n");
- return;
+ goto fail_kmem_cache_alloc;
}
INIT_LIST_HEAD(&rx_pkt->link);
- INIT_WORK(&rx_pkt->work, ipa_wq_handle_rx);
- rx_pkt->skb = __dev_alloc_skb(IPA_RX_SKB_SIZE, GFP_KERNEL);
+ rx_pkt->skb = __dev_alloc_skb(IPA_RX_SKB_SIZE, flag);
if (rx_pkt->skb == NULL) {
IPAERR("failed to alloc skb\n");
goto fail_skb_alloc;
@@ -1133,10 +1147,8 @@
goto fail_dma_mapping;
}
- spin_lock_irqsave(&sys->spinlock, irq_flags);
list_add_tail(&rx_pkt->link, &sys->head_desc_list);
rx_len_cached = ++sys->len;
- spin_unlock_irqrestore(&sys->spinlock, irq_flags);
ret = sps_transfer_one(sys->ep->ep_hdl, rx_pkt->dma_address,
IPA_RX_SKB_SIZE, rx_pkt,
@@ -1146,27 +1158,41 @@
IPAERR("sps_transfer_one failed %d\n", ret);
goto fail_sps_transfer;
}
-
- IPADBG("++curr_cnt=%d\n", sys->len);
}
+ ipa_ctx->stats.rx_q_len = sys->len;
+
return;
fail_sps_transfer:
- spin_lock_irqsave(&sys->spinlock, irq_flags);
list_del(&rx_pkt->link);
- --sys->len;
- spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+ rx_len_cached = --sys->len;
dma_unmap_single(NULL, rx_pkt->dma_address, IPA_RX_SKB_SIZE,
DMA_FROM_DEVICE);
fail_dma_mapping:
- dev_kfree_skb_any(rx_pkt->skb);
+ dev_kfree_skb(rx_pkt->skb);
fail_skb_alloc:
kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
-
+fail_kmem_cache_alloc:
+ if (rx_len_cached == 0) {
+ IPA_STATS_INC_CNT(ipa_ctx->stats.rx_repl_repost);
+ schedule_delayed_work(&replenish_rx_work,
+ msecs_to_jiffies(100));
+ }
+ ipa_ctx->stats.rx_q_len = sys->len;
return;
}
+static void replenish_rx_work_func(struct work_struct *work)
+{
+ ipa_replenish_rx_cache();
+}
+
+static void switch_to_intr_work_func(struct work_struct *work)
+{
+ ipa_handle_rx();
+}
+
/**
* ipa_cleanup_rx() - release RX queue resources
*
@@ -1175,18 +1201,15 @@
{
struct ipa_rx_pkt_wrapper *rx_pkt;
struct ipa_rx_pkt_wrapper *r;
- unsigned long irq_flags;
struct ipa_sys_context *sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
- spin_lock_irqsave(&sys->spinlock, irq_flags);
list_for_each_entry_safe(rx_pkt, r,
&sys->head_desc_list, link) {
list_del(&rx_pkt->link);
dma_unmap_single(NULL, rx_pkt->dma_address, IPA_RX_SKB_SIZE,
DMA_FROM_DEVICE);
- dev_kfree_skb_any(rx_pkt->skb);
+ dev_kfree_skb(rx_pkt->skb);
kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
}
- spin_unlock_irqrestore(&sys->spinlock, irq_flags);
}
diff --git a/drivers/platform/msm/ipa/ipa_i.h b/drivers/platform/msm/ipa/ipa_i.h
index 14195d7..cb8c0f5 100644
--- a/drivers/platform/msm/ipa/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_i.h
@@ -110,7 +110,7 @@
#define IPA_EVENT_THRESHOLD 0x10
-#define IPA_RX_POOL_CEIL 24
+#define IPA_RX_POOL_CEIL 32
#define IPA_RX_SKB_SIZE 2048
#define IPA_DFLT_HDR_NAME "ipa_excp_hdr"
@@ -422,7 +422,7 @@
void *user2;
struct ipa_sys_context *sys;
struct ipa_mem_buffer mult;
- u16 cnt;
+ u32 cnt;
void *bounce;
};
@@ -453,16 +453,14 @@
* struct ipa_rx_pkt_wrapper - IPA Rx packet wrapper
* @skb: skb
* @dma_address: DMA address of this Rx packet
- * @work: work struct for current Rx packet
* @link: linked to the Rx packets on that pipe
* @len: how many bytes are copied into skb's flat buffer
*/
struct ipa_rx_pkt_wrapper {
struct sk_buff *skb;
dma_addr_t dma_address;
- struct work_struct work;
struct list_head link;
- u16 len;
+ u32 len;
};
/**
@@ -527,6 +525,9 @@
u32 rx_pkts;
u32 rx_excp_pkts[MAX_NUM_EXCP];
u32 bridged_pkts[IPA_BRIDGE_TYPE_MAX][IPA_BRIDGE_DIR_MAX];
+ u32 rx_repl_repost;
+ u32 x_intr_repost;
+ u32 rx_q_len;
};
/**
@@ -629,7 +630,7 @@
uint aggregation_type;
uint aggregation_byte_limit;
uint aggregation_time_limit;
- uint curr_polling_state;
+ atomic_t curr_polling_state;
struct delayed_work poll_work;
bool hdr_tbl_lcl;
struct ipa_mem_buffer hdr_mem;
@@ -742,7 +743,7 @@
u32 *consumer_handle);
int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc,
bool in_atomic);
-int ipa_send(struct ipa_sys_context *sys, u16 num_desc, struct ipa_desc *desc,
+int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
bool in_atomic);
int ipa_get_ep_mapping(enum ipa_operating_mode mode,
enum ipa_client_type client);
@@ -783,8 +784,7 @@
void ipa_cleanup_rx(void);
int ipa_cfg_filter(u32 disable);
void ipa_wq_write_done(struct work_struct *work);
-void ipa_wq_handle_rx(struct work_struct *work);
-int ipa_handle_rx_core(bool process_all);
+int ipa_handle_rx_core(bool process_all, bool in_poll_state);
int ipa_pipe_mem_init(u32 start_ofst, u32 size);
int ipa_pipe_mem_alloc(u32 *ofst, u32 size);
int ipa_pipe_mem_free(u32 ofst, u32 size);
@@ -823,4 +823,8 @@
int a2_mux_init(void);
int a2_mux_exit(void);
+void wwan_cleanup(void);
+
+int teth_bridge_driver_init(void);
+
#endif /* _IPA_I_H_ */
diff --git a/drivers/platform/msm/ipa/teth_bridge.c b/drivers/platform/msm/ipa/teth_bridge.c
new file mode 100644
index 0000000..76e2eee
--- /dev/null
+++ b/drivers/platform/msm/ipa/teth_bridge.c
@@ -0,0 +1,1483 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/completion.h>
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/if_ether.h>
+#include <linux/ioctl.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msm_ipa.h>
+#include <linux/mutex.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <mach/bam_dmux.h>
+#include <mach/ipa.h>
+#include <mach/sps.h>
+#include "ipa_i.h"
+
+#define TETH_BRIDGE_DRV_NAME "ipa_tethering_bridge"
+
+#ifdef TETH_DEBUG
+#define TETH_DBG(fmt, args...) \
+ pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args)
+#define TETH_DBG_FUNC_ENTRY() \
+ pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d ENTRY\n", __func__, __LINE__)
+#define TETH_DBG_FUNC_EXIT() \
+ pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d EXIT\n", __func__, __LINE__)
+#else
+#define TETH_DBG(fmt, args...)
+#define TETH_DBG_FUNC_ENTRY()
+#define TETH_DBG_FUNC_EXIT()
+#endif
+
+#define TETH_ERR(fmt, args...) \
+ pr_err(TETH_BRIDGE_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+
+#define USB_ETH_HDR_NAME_IPV4 "usb_bridge_ipv4"
+#define USB_ETH_HDR_NAME_IPV6 "usb_bridge_ipv6"
+#define A2_ETH_HDR_NAME_IPV4 "a2_bridge_ipv4"
+#define A2_ETH_HDR_NAME_IPV6 "a2_bridge_ipv6"
+
+#define USB_TO_A2_RT_TBL_NAME_IPV4 "usb_a2_rt_tbl_ipv4"
+#define A2_TO_USB_RT_TBL_NAME_IPV4 "a2_usb_rt_tbl_ipv4"
+#define USB_TO_A2_RT_TBL_NAME_IPV6 "usb_a2_rt_tbl_ipv6"
+#define A2_TO_USB_RT_TBL_NAME_IPV6 "a2_usb_rt_tbl_ipv6"
+
+#define MBIM_HEADER_NAME "mbim_header"
+#define TETH_DEFAULT_AGGR_TIME_LIMIT 1
+
+#define ETHERTYPE_IPV4 0x0800
+#define ETHERTYPE_IPV6 0x86DD
+
+struct mac_addresses_type {
+ u8 host_pc_mac_addr[ETH_ALEN];
+ bool host_pc_mac_addr_known;
+ u8 device_mac_addr[ETH_ALEN];
+ bool device_mac_addr_known;
+};
+
+struct teth_bridge_ctx {
+ struct class *class;
+ dev_t dev_num;
+ struct device *dev;
+ struct cdev cdev;
+ u32 usb_ipa_pipe_hdl;
+ u32 ipa_usb_pipe_hdl;
+ u32 a2_ipa_pipe_hdl;
+ u32 ipa_a2_pipe_hdl;
+ bool is_connected;
+ enum teth_link_protocol_type link_protocol;
+ struct mac_addresses_type mac_addresses;
+ bool is_hw_bridge_complete;
+ struct teth_aggr_params aggr_params;
+ bool aggr_params_known;
+ enum teth_tethering_mode tethering_mode;
+ struct completion is_bridge_prod_up;
+ struct completion is_bridge_prod_down;
+ struct work_struct comp_hw_bridge_work;
+ bool comp_hw_bridge_in_progress;
+ struct teth_aggr_capabilities *aggr_caps;
+};
+
+static struct teth_bridge_ctx *teth_ctx;
+
+#ifdef CONFIG_DEBUG_FS
+#define TETH_MAX_MSG_LEN 512
+static char dbg_buff[TETH_MAX_MSG_LEN];
+#endif
+
+static int add_eth_hdrs(char *hdr_name_ipv4, char *hdr_name_ipv6,
+ u8 *src_mac_addr, u8 *dst_mac_addr)
+{
+ int res;
+ struct ipa_ioc_add_hdr *hdrs;
+ struct ethhdr hdr_ipv4;
+ struct ethhdr hdr_ipv6;
+
+ TETH_DBG_FUNC_ENTRY();
+ memcpy(hdr_ipv4.h_source, src_mac_addr, ETH_ALEN);
+ memcpy(hdr_ipv4.h_dest, dst_mac_addr, ETH_ALEN);
+ hdr_ipv4.h_proto = htons(ETHERTYPE_IPV4);
+
+ memcpy(hdr_ipv6.h_source, src_mac_addr, ETH_ALEN);
+ memcpy(hdr_ipv6.h_dest, dst_mac_addr, ETH_ALEN);
+ hdr_ipv6.h_proto = htons(ETHERTYPE_IPV6);
+
+ /* Add headers to the header insertion tables */
+ hdrs = kzalloc(sizeof(struct ipa_ioc_add_hdr) +
+ 2 * sizeof(struct ipa_hdr_add), GFP_KERNEL);
+ if (hdrs == NULL) {
+ TETH_ERR("Failed allocating memory for headers !\n");
+ return -ENOMEM;
+ }
+
+ hdrs->commit = 0;
+ hdrs->num_hdrs = 2;
+
+ /* Ethernet IPv4 header */
+ strlcpy(hdrs->hdr[0].name, hdr_name_ipv4, IPA_RESOURCE_NAME_MAX);
+ hdrs->hdr[0].hdr_len = ETH_HLEN;
+ memcpy(hdrs->hdr[0].hdr, &hdr_ipv4, ETH_HLEN);
+
+ /* Ethernet IPv6 header */
+ strlcpy(hdrs->hdr[1].name, hdr_name_ipv6, IPA_RESOURCE_NAME_MAX);
+ hdrs->hdr[1].hdr_len = ETH_HLEN;
+ memcpy(hdrs->hdr[1].hdr, &hdr_ipv6, ETH_HLEN);
+
+ res = ipa_add_hdr(hdrs);
+ if (res || hdrs->hdr[0].status || hdrs->hdr[1].status)
+ TETH_ERR("Header insertion failed\n");
+ kfree(hdrs);
+ TETH_DBG_FUNC_EXIT();
+
+ return res;
+}
+
+static int configure_ipa_header_block_internal(u32 usb_ipa_hdr_len,
+ u32 a2_ipa_hdr_len,
+ u32 ipa_usb_hdr_len,
+ u32 ipa_a2_hdr_len)
+{
+ struct ipa_ep_cfg_hdr hdr_cfg;
+ int res;
+
+ TETH_DBG_FUNC_ENTRY();
+ /* Configure header removal for the USB->IPA pipe and A2->IPA pipe */
+ memset(&hdr_cfg, 0, sizeof(hdr_cfg));
+ hdr_cfg.hdr_len = usb_ipa_hdr_len;
+ res = ipa_cfg_ep_hdr(teth_ctx->usb_ipa_pipe_hdl, &hdr_cfg);
+ if (res) {
+ TETH_ERR("Header removal config for USB->IPA pipe failed\n");
+ goto bail;
+ }
+
+ hdr_cfg.hdr_len = a2_ipa_hdr_len;
+ res = ipa_cfg_ep_hdr(teth_ctx->a2_ipa_pipe_hdl, &hdr_cfg);
+ if (res) {
+ TETH_ERR("Header removal config for A2->IPA pipe failed\n");
+ goto bail;
+ }
+
+ /* Configure header insertion for the IPA->USB pipe and IPA->A2 pipe */
+ hdr_cfg.hdr_len = ipa_usb_hdr_len;
+ res = ipa_cfg_ep_hdr(teth_ctx->ipa_usb_pipe_hdl, &hdr_cfg);
+ if (res) {
+ TETH_ERR("Header insertion config for IPA->USB pipe failed\n");
+ goto bail;
+ }
+
+ hdr_cfg.hdr_len = ipa_a2_hdr_len;
+ res = ipa_cfg_ep_hdr(teth_ctx->ipa_a2_pipe_hdl, &hdr_cfg);
+ if (res) {
+ TETH_ERR("Header insertion config for IPA->A2 pipe failed\n");
+ goto bail;
+ }
+ TETH_DBG_FUNC_EXIT();
+
+bail:
+ return res;
+}
+
+static int add_mbim_hdr(void)
+{
+ int res;
+ struct ipa_ioc_add_hdr *mbim_hdr;
+ u8 mbim_stream_id = 0;
+
+ TETH_DBG_FUNC_ENTRY();
+ mbim_hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) +
+ sizeof(struct ipa_hdr_add),
+ GFP_KERNEL);
+ if (!mbim_hdr) {
+ TETH_ERR("Failed allocating memory for MBIM header\n");
+ return -ENOMEM;
+ }
+
+ mbim_hdr->commit = 0;
+ mbim_hdr->num_hdrs = 1;
+ strlcpy(mbim_hdr->hdr[0].name, MBIM_HEADER_NAME, IPA_RESOURCE_NAME_MAX);
+ memcpy(mbim_hdr->hdr[0].hdr, &mbim_stream_id, sizeof(u8));
+ mbim_hdr->hdr[0].hdr_len = sizeof(u8);
+ mbim_hdr->hdr[0].is_partial = false;
+ res = ipa_add_hdr(mbim_hdr);
+ if (res || mbim_hdr->hdr[0].status) {
+ TETH_ERR("Failed adding MBIM header\n");
+ res = -EFAULT;
+ } else {
+ TETH_DBG("Added MBIM stream ID header\n");
+ }
+ kfree(mbim_hdr);
+ TETH_DBG_FUNC_EXIT();
+
+ return res;
+}
+
+static int configure_ipa_header_block(void)
+{
+ int res;
+ u32 hdr_len = 0;
+ u32 ipa_usb_hdr_len = 0;
+
+ TETH_DBG_FUNC_ENTRY();
+ if (teth_ctx->link_protocol == TETH_LINK_PROTOCOL_IP) {
+ /*
+ * Create a new header for MBIM stream ID and associate it with
+ * the IPA->USB routing table
+ */
+ if (teth_ctx->aggr_params.dl.aggr_prot ==
+ TETH_AGGR_PROTOCOL_MBIM) {
+ ipa_usb_hdr_len = 1;
+ res = add_mbim_hdr();
+ if (res) {
+ TETH_ERR("Failed adding MBIM header\n");
+ goto bail;
+ }
+ }
+ } else if (teth_ctx->link_protocol == TETH_LINK_PROTOCOL_ETHERNET) {
+ /* Add a header entry for USB */
+ res = add_eth_hdrs(USB_ETH_HDR_NAME_IPV4,
+ USB_ETH_HDR_NAME_IPV6,
+ teth_ctx->mac_addresses.host_pc_mac_addr,
+ teth_ctx->mac_addresses.device_mac_addr);
+ if (res) {
+ TETH_ERR("Failed adding USB Ethernet header\n");
+ goto bail;
+ }
+ TETH_DBG("Added USB Ethernet headers (IPv4 / IPv6)\n");
+
+ /* Add a header entry for A2 */
+ res = add_eth_hdrs(A2_ETH_HDR_NAME_IPV4,
+ A2_ETH_HDR_NAME_IPV6,
+ teth_ctx->mac_addresses.device_mac_addr,
+ teth_ctx->mac_addresses.host_pc_mac_addr);
+ if (res) {
+ TETH_ERR("Failed adding A2 Ethernet header\n");
+ goto bail;
+ }
+ TETH_DBG("Added A2 Ethernet headers (IPv4 / IPv6\n");
+
+ hdr_len = ETH_HLEN;
+ ipa_usb_hdr_len = ETH_HLEN;
+ }
+
+ res = configure_ipa_header_block_internal(hdr_len,
+ hdr_len,
+ ipa_usb_hdr_len,
+ hdr_len);
+ if (res) {
+ TETH_ERR("Configuration of header removal/insertion failed\n");
+ goto bail;
+ }
+
+ res = ipa_commit_hdr();
+ if (res) {
+ TETH_ERR("Failed committing headers\n");
+ goto bail;
+ }
+ TETH_DBG_FUNC_EXIT();
+
+bail:
+ return res;
+}
+
+static int configure_routing_by_ip(char *hdr_name,
+ char *rt_tbl_name,
+ enum ipa_client_type dst,
+ enum ipa_ip_type ip_address_family)
+{
+
+ struct ipa_ioc_add_rt_rule *rt_rule;
+ struct ipa_ioc_get_hdr hdr_info;
+ int res;
+
+ TETH_DBG_FUNC_ENTRY();
+ /* Get the header handle */
+ memset(&hdr_info, 0, sizeof(hdr_info));
+ strlcpy(hdr_info.name, hdr_name, IPA_RESOURCE_NAME_MAX);
+ ipa_get_hdr(&hdr_info);
+
+ rt_rule = kzalloc(sizeof(struct ipa_ioc_add_rt_rule) +
+ 1 * sizeof(struct ipa_rt_rule_add),
+ GFP_KERNEL);
+ if (!rt_rule) {
+ TETH_ERR("Memory allocation failure");
+ return -ENOMEM;
+ }
+
+ /* Match all, do not commit to HW*/
+ rt_rule->commit = 0;
+ rt_rule->num_rules = 1;
+ rt_rule->ip = ip_address_family;
+ strlcpy(rt_rule->rt_tbl_name, rt_tbl_name, IPA_RESOURCE_NAME_MAX);
+ rt_rule->rules[0].rule.dst = dst;
+ rt_rule->rules[0].rule.hdr_hdl = hdr_info.hdl;
+ rt_rule->rules[0].rule.attrib.attrib_mask = 0; /* Match all */
+ res = ipa_add_rt_rule(rt_rule);
+ if (res || rt_rule->rules[0].status)
+ TETH_ERR("Failed adding routing rule\n");
+ kfree(rt_rule);
+ TETH_DBG_FUNC_EXIT();
+
+ return res;
+}
+
+static int configure_routing(char *hdr_name_ipv4,
+ char *rt_tbl_name_ipv4,
+ char *hdr_name_ipv6,
+ char *rt_tbl_name_ipv6,
+ enum ipa_client_type dst)
+{
+ int res;
+
+ TETH_DBG_FUNC_ENTRY();
+ /* Configure IPv4 routing table */
+ res = configure_routing_by_ip(hdr_name_ipv4,
+ rt_tbl_name_ipv4,
+ dst,
+ IPA_IP_v4);
+ if (res) {
+ TETH_ERR("Failed adding IPv4 routing table\n");
+ goto bail;
+ }
+
+ /* Configure IPv6 routing table */
+ res = configure_routing_by_ip(hdr_name_ipv6,
+ rt_tbl_name_ipv6,
+ dst,
+ IPA_IP_v6);
+ if (res) {
+ TETH_ERR("Failed adding IPv6 routing table\n");
+ goto bail;
+ }
+ TETH_DBG_FUNC_EXIT();
+
+bail:
+ return res;
+}
+
+static int configure_ipa_routing_block(void)
+{
+ int res;
+ char hdr_name_ipv4[IPA_RESOURCE_NAME_MAX];
+ char hdr_name_ipv6[IPA_RESOURCE_NAME_MAX];
+
+ TETH_DBG_FUNC_ENTRY();
+ hdr_name_ipv4[0] = '\0';
+ hdr_name_ipv6[0] = '\0';
+
+ /* Configure USB -> A2 routing table */
+ if (teth_ctx->link_protocol == TETH_LINK_PROTOCOL_ETHERNET) {
+ strlcpy(hdr_name_ipv4,
+ A2_ETH_HDR_NAME_IPV4,
+ IPA_RESOURCE_NAME_MAX);
+ strlcpy(hdr_name_ipv6,
+ A2_ETH_HDR_NAME_IPV6,
+ IPA_RESOURCE_NAME_MAX);
+ }
+ res = configure_routing(hdr_name_ipv4,
+ USB_TO_A2_RT_TBL_NAME_IPV4,
+ hdr_name_ipv6,
+ USB_TO_A2_RT_TBL_NAME_IPV6,
+ IPA_CLIENT_A2_TETHERED_CONS);
+ if (res) {
+ TETH_ERR("USB to A2 routing block configuration failed\n");
+ goto bail;
+ }
+
+ /* Configure A2 -> USB routing table */
+ if (teth_ctx->link_protocol == TETH_LINK_PROTOCOL_ETHERNET) {
+ strlcpy(hdr_name_ipv4,
+ USB_ETH_HDR_NAME_IPV4,
+ IPA_RESOURCE_NAME_MAX);
+ strlcpy(hdr_name_ipv6,
+ USB_ETH_HDR_NAME_IPV6,
+ IPA_RESOURCE_NAME_MAX);
+ } else if (teth_ctx->aggr_params.dl.aggr_prot ==
+ TETH_AGGR_PROTOCOL_MBIM) {
+ strlcpy(hdr_name_ipv4,
+ MBIM_HEADER_NAME,
+ IPA_RESOURCE_NAME_MAX);
+ strlcpy(hdr_name_ipv6,
+ MBIM_HEADER_NAME,
+ IPA_RESOURCE_NAME_MAX);
+ }
+ res = configure_routing(hdr_name_ipv4,
+ A2_TO_USB_RT_TBL_NAME_IPV4,
+ hdr_name_ipv6,
+ A2_TO_USB_RT_TBL_NAME_IPV6,
+ IPA_CLIENT_USB_CONS);
+ if (res) {
+ TETH_ERR("A2 to USB routing block configuration failed\n");
+ goto bail;
+ }
+
+ /* Commit all the changes to HW in one shot */
+ res = ipa_commit_rt(IPA_IP_v4);
+ if (res) {
+ TETH_ERR("Failed commiting IPv4 routing tables\n");
+ goto bail;
+ }
+ res = ipa_commit_rt(IPA_IP_v6);
+ if (res) {
+ TETH_ERR("Failed commiting IPv6 routing tables\n");
+ goto bail;
+ }
+ TETH_DBG_FUNC_EXIT();
+
+bail:
+ return res;
+}
+
+static int configure_filtering_by_ip(char *rt_tbl_name,
+ enum ipa_client_type src,
+ enum ipa_ip_type ip_address_family)
+{
+ struct ipa_ioc_add_flt_rule *flt_tbl;
+ struct ipa_ioc_get_rt_tbl rt_tbl_info;
+ int res;
+
+ TETH_DBG_FUNC_ENTRY();
+ /* Get the needed routing table handle */
+ rt_tbl_info.ip = ip_address_family;
+ strlcpy(rt_tbl_info.name, rt_tbl_name, IPA_RESOURCE_NAME_MAX);
+ res = ipa_get_rt_tbl(&rt_tbl_info);
+ if (res) {
+ TETH_ERR("Failed getting routing table handle\n");
+ goto bail;
+ }
+
+ flt_tbl = kzalloc(sizeof(struct ipa_ioc_add_flt_rule) +
+ 1 * sizeof(struct ipa_flt_rule_add), GFP_KERNEL);
+ if (!flt_tbl) {
+ TETH_ERR("Filtering table memory allocation failure\n");
+ return -ENOMEM;
+ }
+
+ flt_tbl->commit = 0;
+ flt_tbl->ep = src;
+ flt_tbl->global = 0;
+ flt_tbl->ip = ip_address_family;
+ flt_tbl->num_rules = 1;
+ flt_tbl->rules[0].rule.action = IPA_PASS_TO_ROUTING;
+ flt_tbl->rules[0].rule.rt_tbl_hdl = rt_tbl_info.hdl;
+ flt_tbl->rules[0].rule.attrib.attrib_mask = 0; /* Match all */
+
+ res = ipa_add_flt_rule(flt_tbl);
+ if (res || flt_tbl->rules[0].status)
+ TETH_ERR("Failed adding filtering table\n");
+ kfree(flt_tbl);
+ TETH_DBG_FUNC_EXIT();
+
+bail:
+ return res;
+}
+
+static int configure_filtering(char *rt_tbl_name_ipv4,
+ char *rt_tbl_name_ipv6,
+ enum ipa_client_type src)
+{
+ int res;
+
+ TETH_DBG_FUNC_ENTRY();
+ res = configure_filtering_by_ip(rt_tbl_name_ipv4, src, IPA_IP_v4);
+ if (res) {
+ TETH_ERR("Failed adding IPv4 filtering table\n");
+ goto bail;
+ }
+
+ res = configure_filtering_by_ip(rt_tbl_name_ipv6, src, IPA_IP_v6);
+ if (res) {
+ TETH_ERR("Failed adding IPv4 filtering table\n");
+ goto bail;
+ }
+ TETH_DBG_FUNC_EXIT();
+
+bail:
+ return res;
+}
+
+static int configure_ipa_filtering_block(void)
+{
+ int res;
+
+ TETH_DBG_FUNC_ENTRY();
+ /* Filter all traffic coming from USB to A2 */
+ res = configure_filtering(USB_TO_A2_RT_TBL_NAME_IPV4,
+ USB_TO_A2_RT_TBL_NAME_IPV6,
+ IPA_CLIENT_USB_PROD);
+ if (res) {
+ TETH_ERR("USB_PROD ep filtering configuration failed\n");
+ goto bail;
+ }
+
+ /* Filter all traffic coming from A2 to USB */
+ res = configure_filtering(A2_TO_USB_RT_TBL_NAME_IPV4,
+ A2_TO_USB_RT_TBL_NAME_IPV6,
+ IPA_CLIENT_A2_TETHERED_PROD);
+ if (res) {
+ TETH_ERR("A2_PROD filtering configuration failed\n");
+ goto bail;
+ }
+
+ /* Commit all the changes to HW in one shot */
+ res = ipa_commit_flt(IPA_IP_v4);
+ if (res) {
+ TETH_ERR("Failed commiting IPv4 filtering tables\n");
+ goto bail;
+ }
+ res = ipa_commit_flt(IPA_IP_v6);
+ if (res) {
+ TETH_ERR("Failed commiting IPv6 filtering tables\n");
+ goto bail;
+ }
+ TETH_DBG_FUNC_EXIT();
+
+bail:
+ return res;
+}
+
+static int prepare_ipa_aggr_struct(
+ const struct teth_aggr_params_link *teth_aggr_params,
+ struct ipa_ep_cfg_aggr *ipa_aggr_params,
+ bool client_is_prod)
+{
+ TETH_DBG_FUNC_ENTRY();
+ memset(ipa_aggr_params, 0, sizeof(*ipa_aggr_params));
+
+ switch (teth_aggr_params->aggr_prot) {
+ case TETH_AGGR_PROTOCOL_NONE:
+ ipa_aggr_params->aggr_en = IPA_BYPASS_AGGR;
+ break;
+ case TETH_AGGR_PROTOCOL_MBIM:
+ ipa_aggr_params->aggr = IPA_MBIM_16;
+ ipa_aggr_params->aggr_en = (client_is_prod) ?
+ IPA_ENABLE_DEAGGR : IPA_ENABLE_AGGR;
+ break;
+ case TETH_AGGR_PROTOCOL_TLP:
+ ipa_aggr_params->aggr = IPA_TLP;
+ ipa_aggr_params->aggr_en = (client_is_prod) ?
+ IPA_ENABLE_DEAGGR : IPA_ENABLE_AGGR;
+ break;
+ default:
+ TETH_ERR("Unsupported aggregation protocol\n");
+ return -EFAULT;
+ }
+
+ ipa_aggr_params->aggr_byte_limit =
+ teth_aggr_params->max_transfer_size_byte / 1024;
+ ipa_aggr_params->aggr_time_limit = TETH_DEFAULT_AGGR_TIME_LIMIT;
+ TETH_DBG_FUNC_EXIT();
+
+ return 0;
+}
+
+static int teth_set_aggr_per_ep(
+ const struct teth_aggr_params_link *teth_aggr_params,
+ bool client_is_prod,
+ u32 pipe_hdl)
+{
+ struct ipa_ep_cfg_aggr agg_params;
+ struct ipa_ep_cfg_hdr hdr_params;
+ int res;
+
+ TETH_DBG_FUNC_ENTRY();
+ res = prepare_ipa_aggr_struct(teth_aggr_params,
+ &agg_params,
+ client_is_prod);
+ if (res) {
+ TETH_ERR("prepare_ipa_aggregation_struct() failed\n");
+ goto bail;
+ }
+
+ res = ipa_cfg_ep_aggr(pipe_hdl, &agg_params);
+ if (res) {
+ TETH_ERR("ipa_cfg_ep_aggr() failed\n");
+ goto bail;
+ }
+
+ if (!client_is_prod) {
+ memset(&hdr_params, 0, sizeof(hdr_params));
+ hdr_params.hdr_len = 1;
+ res = ipa_cfg_ep_hdr(pipe_hdl, &hdr_params);
+ if (res) {
+ TETH_ERR("ipa_cfg_ep_hdr() failed\n");
+ goto bail;
+ }
+ }
+ TETH_DBG_FUNC_EXIT();
+
+bail:
+ return res;
+}
+
+static void aggr_prot_to_str(enum teth_aggr_protocol_type aggr_prot,
+ char *buff,
+ uint buff_size)
+{
+ switch (aggr_prot) {
+ case TETH_AGGR_PROTOCOL_NONE:
+ strlcpy(buff, "NONE", buff_size);
+ break;
+ case TETH_AGGR_PROTOCOL_MBIM:
+ strlcpy(buff, "MBIM", buff_size);
+ break;
+ case TETH_AGGR_PROTOCOL_TLP:
+ strlcpy(buff, "TLP", buff_size);
+ break;
+ default:
+ strlcpy(buff, "ERROR", buff_size);
+ break;
+ }
+}
+
+static int teth_set_aggregation(void)
+{
+ int res;
+ char aggr_prot_str[20];
+
+ TETH_DBG_FUNC_ENTRY();
+ if (teth_ctx->aggr_params.ul.aggr_prot == TETH_AGGR_PROTOCOL_MBIM ||
+ teth_ctx->aggr_params.dl.aggr_prot == TETH_AGGR_PROTOCOL_MBIM) {
+ res = ipa_set_aggr_mode(IPA_MBIM);
+ if (res) {
+ TETH_ERR("ipa_set_aggr_mode() failed\n");
+ goto bail;
+ }
+ res = ipa_set_single_ndp_per_mbim(false);
+ if (res) {
+ TETH_ERR("ipa_set_single_ndp_per_mbim() failed\n");
+ goto bail;
+ }
+ }
+
+ aggr_prot_to_str(teth_ctx->aggr_params.ul.aggr_prot,
+ aggr_prot_str,
+ sizeof(aggr_prot_str)-1);
+ TETH_DBG("Setting %s aggregation on UL\n", aggr_prot_str);
+ aggr_prot_to_str(teth_ctx->aggr_params.dl.aggr_prot,
+ aggr_prot_str,
+ sizeof(aggr_prot_str)-1);
+ TETH_DBG("Setting %s aggregation on DL\n", aggr_prot_str);
+
+ /* Configure aggregation on UL producer (USB->IPA) */
+ res = teth_set_aggr_per_ep(&teth_ctx->aggr_params.ul,
+ true,
+ teth_ctx->usb_ipa_pipe_hdl);
+ if (res) {
+ TETH_ERR("teth_set_aggregation_per_ep() failed\n");
+ goto bail;
+ }
+
+ /* Configure aggregation on DL consumer (IPA->USB) */
+ res = teth_set_aggr_per_ep(&teth_ctx->aggr_params.dl,
+ false,
+ teth_ctx->ipa_usb_pipe_hdl);
+ if (res) {
+ TETH_ERR("teth_set_aggregation_per_ep() failed\n");
+ goto bail;
+ }
+ TETH_DBG_FUNC_EXIT();
+bail:
+ return res;
+}
+
+static void complete_hw_bridge(struct work_struct *work)
+{
+ int res;
+ static DEFINE_MUTEX(f_lock);
+
+ mutex_lock(&f_lock);
+
+ TETH_DBG_FUNC_ENTRY();
+ TETH_DBG("Completing HW bridge in %s mode\n",
+ (teth_ctx->link_protocol == TETH_LINK_PROTOCOL_ETHERNET) ?
+ "ETHERNET" :
+ "IP");
+
+ res = teth_set_aggregation();
+ if (res) {
+ TETH_ERR("Failed setting aggregation params\n");
+ goto bail;
+ }
+
+ /*
+ * Reset the Header, Routing and Filtering blocks.
+ * Resetting the Header block will also reset the other blocks.
+ * This reset is not comitted to HW.
+ */
+ res = ipa_reset_hdr();
+ if (res) {
+ TETH_ERR("Failed resetting IPA\n");
+ goto bail;
+ }
+
+ res = configure_ipa_header_block();
+ if (res) {
+ TETH_ERR("Configuration of IPA header block Failed\n");
+ goto bail;
+ }
+
+ res = configure_ipa_routing_block();
+ if (res) {
+ TETH_ERR("Configuration of IPA routing block Failed\n");
+ goto bail;
+ }
+
+ res = configure_ipa_filtering_block();
+ if (res) {
+ TETH_ERR("Configuration of IPA filtering block Failed\n");
+ goto bail;
+ }
+
+ teth_ctx->is_hw_bridge_complete = true;
+ teth_ctx->comp_hw_bridge_in_progress = false;
+bail:
+ mutex_unlock(&f_lock);
+ TETH_DBG_FUNC_EXIT();
+
+ return;
+}
+
+static void mac_addr_to_str(u8 mac_addr[ETH_ALEN],
+ char *buff,
+ uint buff_size)
+{
+ scnprintf(buff, buff_size, "%02x-%02x-%02x-%02x-%02x-%02x",
+ mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
+ mac_addr[4], mac_addr[5]);
+}
+
+static void check_to_complete_hw_bridge(struct sk_buff *skb,
+ u8 *my_mac_addr,
+ bool *my_mac_addr_known,
+ bool *peer_mac_addr_known)
+{
+ bool both_mac_addresses_known;
+ char mac_addr_str[20];
+
+ if ((teth_ctx->link_protocol == TETH_LINK_PROTOCOL_ETHERNET) &&
+ (!(*my_mac_addr_known))) {
+ memcpy(my_mac_addr, &skb->data[ETH_ALEN], ETH_ALEN);
+ mac_addr_to_str(my_mac_addr,
+ mac_addr_str,
+ sizeof(mac_addr_str)-1);
+ TETH_DBG("Extracted MAC addr: %s\n", mac_addr_str);
+ *my_mac_addr_known = true;
+ }
+
+ both_mac_addresses_known = *my_mac_addr_known && *peer_mac_addr_known;
+ if ((both_mac_addresses_known ||
+ (teth_ctx->link_protocol == TETH_LINK_PROTOCOL_IP)) &&
+ (!teth_ctx->comp_hw_bridge_in_progress) &&
+ (teth_ctx->aggr_params_known)) {
+ INIT_WORK(&teth_ctx->comp_hw_bridge_work, complete_hw_bridge);
+ teth_ctx->comp_hw_bridge_in_progress = true;
+ schedule_work(&teth_ctx->comp_hw_bridge_work);
+ }
+}
+
+static void usb_notify_cb(void *priv,
+ enum ipa_dp_evt_type evt,
+ unsigned long data)
+{
+ struct sk_buff *skb = (struct sk_buff *)data;
+ int res;
+
+ switch (evt) {
+ case IPA_RECEIVE:
+ if (!teth_ctx->is_hw_bridge_complete)
+ check_to_complete_hw_bridge(
+ skb,
+ teth_ctx->mac_addresses.host_pc_mac_addr,
+ &teth_ctx->mac_addresses.host_pc_mac_addr_known,
+ &teth_ctx->mac_addresses.device_mac_addr_known);
+
+ /* Send the packet to A2, using a2_service driver API */
+ res = a2_mux_write(A2_MUX_TETHERED_0, skb);
+ if (res) {
+ TETH_ERR("Packet send failure, dropping packet !\n");
+ dev_kfree_skb(skb);
+ }
+ break;
+
+ case IPA_WRITE_DONE:
+ dev_kfree_skb(skb);
+ break;
+
+ default:
+ TETH_ERR("Unsupported IPA event !\n");
+ WARN_ON(1);
+ }
+
+ return;
+}
+
+static void a2_notify_cb(void *user_data,
+ enum a2_mux_event_type event,
+ unsigned long data)
+{
+ struct sk_buff *skb = (struct sk_buff *)data;
+ int res;
+
+ switch (event) {
+ case A2_MUX_RECEIVE:
+ if (!teth_ctx->is_hw_bridge_complete)
+ check_to_complete_hw_bridge(
+ skb,
+ teth_ctx->mac_addresses.device_mac_addr,
+ &teth_ctx->mac_addresses.device_mac_addr_known,
+ &teth_ctx->
+ mac_addresses.host_pc_mac_addr_known);
+
+ /* Send the packet to USB */
+ res = ipa_tx_dp(IPA_CLIENT_USB_CONS, skb, NULL);
+ if (res) {
+ TETH_ERR("Packet send failure, dropping packet !\n");
+ dev_kfree_skb(skb);
+ }
+ break;
+
+ case A2_MUX_WRITE_DONE:
+ dev_kfree_skb(skb);
+ break;
+
+ default:
+ TETH_ERR("Unsupported IPA event !\n");
+ WARN_ON(1);
+ }
+
+ return;
+}
+
+static void bridge_prod_notify_cb(void *notify_cb_data,
+ enum ipa_rm_event event,
+ unsigned long data)
+{
+ switch (event) {
+ case IPA_RM_RESOURCE_GRANTED:
+ complete(&teth_ctx->is_bridge_prod_up);
+ break;
+
+ case IPA_RM_RESOURCE_RELEASED:
+ complete(&teth_ctx->is_bridge_prod_down);
+ break;
+
+ default:
+ TETH_ERR("Unsupported notification!\n");
+ WARN_ON(1);
+ break;
+ }
+
+ return;
+}
+
+/**
+* teth_bridge_init() - Initialize the Tethering bridge driver
+* @usb_notify_cb_ptr: Callback function which should be used
+* by the caller. Output parameter.
+* @private_data_ptr: Data for the callback function. Should
+* be used by the caller. Output parameter.
+* Return codes: 0: success,
+* -EINVAL - Bad parameter
+* Other negative value - Failure
+*/
+int teth_bridge_init(ipa_notify_cb *usb_notify_cb_ptr, void **private_data_ptr)
+{
+ int res = 0;
+ struct ipa_rm_create_params bridge_prod_params;
+
+ TETH_DBG_FUNC_ENTRY();
+ if (usb_notify_cb_ptr == NULL) {
+ TETH_ERR("Bad parameter\n");
+ res = -EINVAL;
+ goto bail;
+ }
+
+ *usb_notify_cb_ptr = usb_notify_cb;
+ *private_data_ptr = NULL;
+
+ /* Build IPA Resource manager dependency graph */
+ bridge_prod_params.name = IPA_RM_RESOURCE_BRIDGE_PROD;
+ bridge_prod_params.reg_params.user_data = NULL;
+ bridge_prod_params.reg_params.notify_cb = bridge_prod_notify_cb;
+ res = ipa_rm_create_resource(&bridge_prod_params);
+ if (res) {
+ TETH_ERR("ipa_rm_create_resource() failed\n");
+ goto bail;
+ }
+
+ res = ipa_rm_add_dependency(IPA_RM_RESOURCE_BRIDGE_PROD,
+ IPA_RM_RESOURCE_USB_CONS);
+ if (res) {
+ TETH_ERR("ipa_rm_add_dependency() failed\n");
+ goto bail;
+ }
+
+ res = ipa_rm_add_dependency(IPA_RM_RESOURCE_BRIDGE_PROD,
+ IPA_RM_RESOURCE_A2_CONS);
+ if (res) {
+ TETH_ERR("ipa_rm_add_dependency() failed\n");
+ goto fail_add_dependency_1;
+ }
+
+ res = ipa_rm_add_dependency(IPA_RM_RESOURCE_USB_PROD,
+ IPA_RM_RESOURCE_A2_CONS);
+ if (res) {
+ TETH_ERR("ipa_rm_add_dependency() failed\n");
+ goto fail_add_dependency_2;
+ }
+
+ res = ipa_rm_add_dependency(IPA_RM_RESOURCE_A2_PROD,
+ IPA_RM_RESOURCE_USB_CONS);
+ if (res) {
+ TETH_ERR("ipa_rm_add_dependency() failed\n");
+ goto fail_add_dependency_3;
+ }
+
+ init_completion(&teth_ctx->is_bridge_prod_up);
+ init_completion(&teth_ctx->is_bridge_prod_down);
+
+ /* The default link protocol is Ethernet */
+ teth_ctx->link_protocol = TETH_LINK_PROTOCOL_ETHERNET;
+ goto bail;
+
+fail_add_dependency_3:
+ ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
+ IPA_RM_RESOURCE_A2_CONS);
+fail_add_dependency_2:
+ ipa_rm_delete_dependency(IPA_RM_RESOURCE_BRIDGE_PROD,
+ IPA_RM_RESOURCE_A2_CONS);
+fail_add_dependency_1:
+ ipa_rm_delete_dependency(IPA_RM_RESOURCE_BRIDGE_PROD,
+ IPA_RM_RESOURCE_USB_CONS);
+bail:
+ TETH_DBG_FUNC_EXIT();
+ return res;
+}
+EXPORT_SYMBOL(teth_bridge_init);
+
+/**
+* teth_bridge_disconnect() - Disconnect tethering bridge module
+*
+* Return codes: 0: success
+* -EPERM: Operation not permitted as the bridge is already
+* disconnected
+*/
+int teth_bridge_disconnect(void)
+{
+ int res = -EPERM;
+
+ TETH_DBG_FUNC_ENTRY();
+ if (!teth_ctx->is_connected) {
+ TETH_ERR(
+ "Trying to disconnect an already disconnected bridge\n");
+ goto bail;
+ }
+
+ teth_ctx->is_connected = false;
+
+ res = ipa_rm_release_resource(IPA_RM_RESOURCE_BRIDGE_PROD);
+ if (res == -EINPROGRESS)
+ wait_for_completion(&teth_ctx->is_bridge_prod_down);
+
+bail:
+ TETH_DBG_FUNC_EXIT();
+ return res;
+}
+EXPORT_SYMBOL(teth_bridge_disconnect);
+
+/**
+* teth_bridge_connect() - Connect bridge for a tethered Rmnet / MBIM call
+* @connect_params: Connection info
+*
+* Return codes: 0: success
+* -EINVAL: invalid parameters
+* -EPERM: Operation not permitted as the bridge is already
+* connected
+*/
+int teth_bridge_connect(struct teth_bridge_connect_params *connect_params)
+{
+ int res;
+ struct ipa_ep_cfg ipa_ep_cfg;
+
+ TETH_DBG_FUNC_ENTRY();
+ if (teth_ctx->is_connected) {
+ TETH_ERR("Trying to connect an already connected bridge !\n");
+ return -EPERM;
+ }
+ if (connect_params == NULL ||
+ connect_params->ipa_usb_pipe_hdl <= 0 ||
+ connect_params->usb_ipa_pipe_hdl <= 0 ||
+ connect_params->tethering_mode >= TETH_TETHERING_MODE_MAX ||
+ connect_params->tethering_mode < 0)
+ return -EINVAL;
+
+ teth_ctx->ipa_usb_pipe_hdl = connect_params->ipa_usb_pipe_hdl;
+ teth_ctx->usb_ipa_pipe_hdl = connect_params->usb_ipa_pipe_hdl;
+ teth_ctx->tethering_mode = connect_params->tethering_mode;
+
+ res = ipa_rm_request_resource(IPA_RM_RESOURCE_BRIDGE_PROD);
+ if (res < 0) {
+ if (res == -EINPROGRESS)
+ wait_for_completion(&teth_ctx->is_bridge_prod_up);
+ else
+ goto bail;
+ }
+
+ res = a2_mux_open_channel(A2_MUX_TETHERED_0,
+ NULL,
+ a2_notify_cb);
+ if (res) {
+ TETH_ERR("a2_mux_open_channel() failed\n");
+ goto bail;
+ }
+
+ res = a2_mux_get_tethered_client_handles(A2_MUX_TETHERED_0,
+ &teth_ctx->ipa_a2_pipe_hdl,
+ &teth_ctx->a2_ipa_pipe_hdl);
+ if (res) {
+ TETH_ERR(
+ "a2_mux_get_tethered_client_handles() failed, res = %d\n", res);
+ goto bail;
+ }
+
+ /* Reset the various endpoints configuration */
+ memset(&ipa_ep_cfg, 0, sizeof(ipa_ep_cfg));
+ ipa_cfg_ep(teth_ctx->ipa_usb_pipe_hdl, &ipa_ep_cfg);
+ ipa_cfg_ep(teth_ctx->usb_ipa_pipe_hdl, &ipa_ep_cfg);
+ ipa_cfg_ep(teth_ctx->ipa_a2_pipe_hdl, &ipa_ep_cfg);
+ ipa_cfg_ep(teth_ctx->a2_ipa_pipe_hdl, &ipa_ep_cfg);
+
+ teth_ctx->is_connected = true;
+
+ if (teth_ctx->tethering_mode == TETH_TETHERING_MODE_MBIM)
+ teth_ctx->link_protocol = TETH_LINK_PROTOCOL_IP;
+ TETH_DBG_FUNC_EXIT();
+bail:
+ if (res)
+ ipa_rm_release_resource(IPA_RM_RESOURCE_BRIDGE_PROD);
+ return res;
+}
+EXPORT_SYMBOL(teth_bridge_connect);
+
+static void set_aggr_default_params(struct teth_aggr_params_link *params)
+{
+ if (params->max_datagrams == 0)
+ params->max_datagrams = 16;
+ if (params->max_transfer_size_byte == 0)
+ params->max_transfer_size_byte = 16*1024;
+}
+
+static void teth_set_bridge_mode(enum teth_link_protocol_type link_protocol)
+{
+ teth_ctx->link_protocol = link_protocol;
+ teth_ctx->is_hw_bridge_complete = false;
+ memset(&teth_ctx->mac_addresses, 0, sizeof(teth_ctx->mac_addresses));
+}
+
+static long teth_bridge_ioctl(struct file *filp,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ int res = 0;
+
+ TETH_DBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd));
+
+ if ((_IOC_TYPE(cmd) != TETH_BRIDGE_IOC_MAGIC) ||
+ (_IOC_NR(cmd) >= TETH_BRIDGE_IOCTL_MAX)) {
+ TETH_ERR("Invalid ioctl\n");
+ return -ENOIOCTLCMD;
+ }
+
+ switch (cmd) {
+ case TETH_BRIDGE_IOC_SET_BRIDGE_MODE:
+ TETH_DBG("TETH_BRIDGE_IOC_SET_BRIDGE_MODE ioctl called\n");
+ if (teth_ctx->link_protocol != arg)
+ teth_set_bridge_mode(arg);
+ break;
+
+ case TETH_BRIDGE_IOC_SET_AGGR_PARAMS:
+ TETH_DBG("TETH_BRIDGE_IOC_SET_AGGR_PARAMS ioctl called\n");
+ res = copy_from_user(&teth_ctx->aggr_params,
+ (struct teth_aggr_params *)arg,
+ sizeof(struct teth_aggr_params));
+ if (res) {
+ TETH_ERR("Error, res = %d\n", res);
+ res = -EFAULT;
+ break;
+ }
+ set_aggr_default_params(&teth_ctx->aggr_params.dl);
+ set_aggr_default_params(&teth_ctx->aggr_params.ul);
+ teth_ctx->aggr_params_known = true;
+ break;
+
+ case TETH_BRIDGE_IOC_GET_AGGR_PARAMS:
+ TETH_DBG("TETH_BRIDGE_IOC_GET_AGGR_PARAMS ioctl called\n");
+ if (copy_to_user((u8 *)arg, (u8 *)&teth_ctx->aggr_params,
+ sizeof(struct teth_aggr_params))) {
+ res = -EFAULT;
+ break;
+ }
+ break;
+
+ case TETH_BRIDGE_IOC_GET_AGGR_CAPABILITIES:
+ {
+ u16 sz;
+ u16 pyld_sz;
+ struct teth_aggr_capabilities caps;
+
+ TETH_DBG("GET_AGGR_CAPABILITIES ioctl called\n");
+ sz = sizeof(struct teth_aggr_capabilities);
+ if (copy_from_user(&caps,
+ (struct teth_aggr_capabilities *)arg,
+ sz)) {
+ res = -EFAULT;
+ break;
+ }
+
+ if (caps.num_protocols < teth_ctx->aggr_caps->num_protocols) {
+ caps.num_protocols = teth_ctx->aggr_caps->num_protocols;
+ if (copy_to_user((struct teth_aggr_capabilities *)arg,
+ &caps,
+ sz)) {
+ res = -EFAULT;
+ break;
+ }
+ TETH_DBG("Not enough space allocated.\n");
+ res = -EAGAIN;
+ break;
+ }
+
+ pyld_sz = sz + caps.num_protocols *
+ sizeof(struct teth_aggr_params_link);
+
+ if (copy_to_user((u8 *)arg,
+ (u8 *)(teth_ctx->aggr_caps),
+ pyld_sz)) {
+ res = -EFAULT;
+ break;
+ }
+ }
+ break;
+ }
+
+ return res;
+}
+
+static void set_aggr_capabilities(void)
+{
+ u16 NUM_PROTOCOLS = 2;
+
+ teth_ctx->aggr_caps = kzalloc(sizeof(struct teth_aggr_capabilities) +
+ NUM_PROTOCOLS *
+ sizeof(struct teth_aggr_params_link),
+ GFP_KERNEL);
+ if (teth_ctx->aggr_caps == NULL) {
+ TETH_ERR("Memory alloc failed for aggregation capabilities.\n");
+ return;
+ }
+
+ teth_ctx->aggr_caps->num_protocols = NUM_PROTOCOLS;
+
+ teth_ctx->aggr_caps->prot_caps[0].aggr_prot = TETH_AGGR_PROTOCOL_MBIM;
+ teth_ctx->aggr_caps->prot_caps[0].max_datagrams = 16;
+ teth_ctx->aggr_caps->prot_caps[0].max_transfer_size_byte = 16*1024;
+
+ teth_ctx->aggr_caps->prot_caps[1].aggr_prot = TETH_AGGR_PROTOCOL_TLP;
+ teth_ctx->aggr_caps->prot_caps[1].max_datagrams = 16;
+ teth_ctx->aggr_caps->prot_caps[1].max_transfer_size_byte = 16*1024;
+}
+
+void teth_bridge_get_client_handles(u32 *producer_handle,
+ u32 *consumer_handle)
+{
+ if (producer_handle == NULL || consumer_handle == NULL)
+ return;
+
+ *producer_handle = teth_ctx->usb_ipa_pipe_hdl;
+ *consumer_handle = teth_ctx->ipa_usb_pipe_hdl;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *dent;
+static struct dentry *dfile_link_protocol;
+static struct dentry *dfile_get_aggr_params;
+static struct dentry *dfile_set_aggr_protocol;
+
+static ssize_t teth_debugfs_read_link_protocol(struct file *file,
+ char __user *ubuf,
+ size_t count,
+ loff_t *ppos)
+{
+ int nbytes;
+
+ nbytes = scnprintf(dbg_buff, TETH_MAX_MSG_LEN, "Link protocol = %s\n",
+ (teth_ctx->link_protocol ==
+ TETH_LINK_PROTOCOL_ETHERNET) ?
+ "ETHERNET" :
+ "IP");
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t teth_debugfs_write_link_protocol(struct file *file,
+ const char __user *ubuf,
+ size_t count,
+ loff_t *ppos)
+{
+ unsigned long missing;
+ enum teth_link_protocol_type link_protocol;
+
+ if (sizeof(dbg_buff) < count + 1)
+ return -EFAULT;
+
+ missing = copy_from_user(dbg_buff, ubuf, count);
+ if (missing)
+ return -EFAULT;
+
+ if (count > 0)
+ dbg_buff[count-1] = '\0';
+
+ if (strcmp(dbg_buff, "ETHERNET") == 0) {
+ link_protocol = TETH_LINK_PROTOCOL_ETHERNET;
+ } else if (strcmp(dbg_buff, "IP") == 0) {
+ link_protocol = TETH_LINK_PROTOCOL_IP;
+ } else {
+ TETH_ERR("Bad link protocol, got %s,\n"
+ "Use <ETHERNET> or <IP>.\n", dbg_buff);
+ return count;
+ }
+
+ teth_set_bridge_mode(link_protocol);
+
+ return count;
+}
+
+static ssize_t teth_debugfs_read_aggr_params(struct file *file,
+ char __user *ubuf,
+ size_t count,
+ loff_t *ppos)
+{
+ int nbytes = 0;
+ char aggr_str[20];
+
+ aggr_prot_to_str(teth_ctx->aggr_params.ul.aggr_prot,
+ aggr_str,
+ sizeof(aggr_str)-1);
+ nbytes += scnprintf(&dbg_buff[nbytes], TETH_MAX_MSG_LEN,
+ "Aggregation parameters for uplink:\n");
+ nbytes += scnprintf(&dbg_buff[nbytes], TETH_MAX_MSG_LEN - nbytes,
+ " Aggregation protocol: %s\n",
+ aggr_str);
+ nbytes += scnprintf(&dbg_buff[nbytes], TETH_MAX_MSG_LEN - nbytes,
+ " Max transfer size [byte]: %d\n",
+ teth_ctx->aggr_params.ul.max_transfer_size_byte);
+ nbytes += scnprintf(&dbg_buff[nbytes], TETH_MAX_MSG_LEN - nbytes,
+ " Max datagrams: %d\n",
+ teth_ctx->aggr_params.ul.max_datagrams);
+
+ aggr_prot_to_str(teth_ctx->aggr_params.dl.aggr_prot,
+ aggr_str,
+ sizeof(aggr_str)-1);
+ nbytes += scnprintf(&dbg_buff[nbytes], TETH_MAX_MSG_LEN,
+ "Aggregation parameters for downlink:\n");
+ nbytes += scnprintf(&dbg_buff[nbytes], TETH_MAX_MSG_LEN - nbytes,
+ " Aggregation protocol: %s\n",
+ aggr_str);
+ nbytes += scnprintf(&dbg_buff[nbytes], TETH_MAX_MSG_LEN - nbytes,
+ " Max transfer size [byte]: %d\n",
+ teth_ctx->aggr_params.dl.max_transfer_size_byte);
+ nbytes += scnprintf(&dbg_buff[nbytes], TETH_MAX_MSG_LEN - nbytes,
+ " Max datagrams: %d\n",
+ teth_ctx->aggr_params.dl.max_datagrams);
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t teth_debugfs_set_aggr_protocol(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long missing;
+ enum teth_aggr_protocol_type aggr_prot;
+ int res;
+
+ if (sizeof(dbg_buff) < count + 1)
+ return -EFAULT;
+
+ missing = copy_from_user(dbg_buff, ubuf, count);
+ if (missing)
+ return -EFAULT;
+
+ if (count > 0)
+ dbg_buff[count-1] = '\0';
+
+ set_aggr_default_params(&teth_ctx->aggr_params.dl);
+ set_aggr_default_params(&teth_ctx->aggr_params.ul);
+
+ if (strcmp(dbg_buff, "NONE") == 0) {
+ aggr_prot = TETH_AGGR_PROTOCOL_NONE;
+ } else if (strcmp(dbg_buff, "MBIM") == 0) {
+ aggr_prot = TETH_AGGR_PROTOCOL_MBIM;
+ } else if (strcmp(dbg_buff, "TLP") == 0) {
+ aggr_prot = TETH_AGGR_PROTOCOL_TLP;
+ } else {
+ TETH_ERR("Bad aggregation protocol, got %s,\n"
+ "Use <NONE>, <MBIM> or <TLP>.\n", dbg_buff);
+ return count;
+ }
+
+ teth_ctx->aggr_params.dl.aggr_prot = aggr_prot;
+ teth_ctx->aggr_params.ul.aggr_prot = aggr_prot;
+ teth_ctx->aggr_params_known = true;
+
+ res = teth_set_aggregation();
+ if (res)
+ TETH_ERR("Failed setting aggregation params\n");
+
+ return count;
+}
+
+const struct file_operations teth_link_protocol_ops = {
+ .read = teth_debugfs_read_link_protocol,
+ .write = teth_debugfs_write_link_protocol,
+};
+
+const struct file_operations teth_get_aggr_params_ops = {
+ .read = teth_debugfs_read_aggr_params,
+};
+
+const struct file_operations teth_set_aggr_protocol_ops = {
+ .write = teth_debugfs_set_aggr_protocol,
+};
+
+void teth_debugfs_init(void)
+{
+ const mode_t read_only_mode = S_IRUSR | S_IRGRP | S_IROTH;
+ const mode_t read_write_mode = S_IRUSR | S_IRGRP | S_IROTH |
+ S_IWUSR | S_IWGRP | S_IWOTH;
+
+ dent = debugfs_create_dir("ipa_teth", 0);
+ if (IS_ERR(dent)) {
+ IPAERR("fail to create folder ipa_teth debug_fs.\n");
+ return;
+ }
+
+ dfile_link_protocol =
+ debugfs_create_file("link_protocol", read_write_mode, dent, 0,
+ &teth_link_protocol_ops);
+ if (!dfile_link_protocol || IS_ERR(dfile_link_protocol)) {
+ IPAERR("fail to create file link_protocol\n");
+ goto fail;
+ }
+
+ dfile_get_aggr_params =
+ debugfs_create_file("get_aggr_params", read_only_mode, dent, 0,
+ &teth_get_aggr_params_ops);
+ if (!dfile_get_aggr_params || IS_ERR(dfile_get_aggr_params)) {
+ IPAERR("fail to create file get_aggr_params\n");
+ goto fail;
+ }
+
+ dfile_set_aggr_protocol =
+ debugfs_create_file("set_aggr_protocol", read_only_mode, dent,
+ 0, &teth_set_aggr_protocol_ops);
+ if (!dfile_set_aggr_protocol || IS_ERR(dfile_set_aggr_protocol)) {
+ IPAERR("fail to create file set_aggr_protocol\n");
+ goto fail;
+ }
+
+ return;
+fail:
+ debugfs_remove_recursive(dent);
+}
+#else
+void teth_debugfs_init(void) {}
+#endif /* CONFIG_DEBUG_FS */
+
+
+static const struct file_operations teth_bridge_drv_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = teth_bridge_ioctl,
+};
+
+/**
+* teth_bridge_driver_init() - Initialize tethering bridge driver
+*
+*/
+int teth_bridge_driver_init(void)
+{
+ int res;
+
+ TETH_DBG("Tethering bridge driver init\n");
+ teth_ctx = kzalloc(sizeof(*teth_ctx), GFP_KERNEL);
+ if (!teth_ctx) {
+ TETH_ERR("kzalloc err.\n");
+ return -ENOMEM;
+ }
+
+ set_aggr_capabilities();
+
+ teth_ctx->class = class_create(THIS_MODULE, TETH_BRIDGE_DRV_NAME);
+
+ res = alloc_chrdev_region(&teth_ctx->dev_num, 0, 1,
+ TETH_BRIDGE_DRV_NAME);
+ if (res) {
+ TETH_ERR("alloc_chrdev_region err.\n");
+ res = -ENODEV;
+ goto fail_alloc_chrdev_region;
+ }
+
+ teth_ctx->dev = device_create(teth_ctx->class, NULL, teth_ctx->dev_num,
+ teth_ctx, TETH_BRIDGE_DRV_NAME);
+ if (IS_ERR(teth_ctx->dev)) {
+ TETH_ERR(":device_create err.\n");
+ res = -ENODEV;
+ goto fail_device_create;
+ }
+
+ cdev_init(&teth_ctx->cdev, &teth_bridge_drv_fops);
+ teth_ctx->cdev.owner = THIS_MODULE;
+ teth_ctx->cdev.ops = &teth_bridge_drv_fops;
+
+ res = cdev_add(&teth_ctx->cdev, teth_ctx->dev_num, 1);
+ if (res) {
+ TETH_ERR(":cdev_add err=%d\n", -res);
+ res = -ENODEV;
+ goto fail_cdev_add;
+ }
+
+ teth_ctx->comp_hw_bridge_in_progress = false;
+
+ teth_debugfs_init();
+ TETH_DBG("Tethering bridge driver init OK\n");
+
+ return 0;
+fail_cdev_add:
+ device_destroy(teth_ctx->class, teth_ctx->dev_num);
+fail_device_create:
+ unregister_chrdev_region(teth_ctx->dev_num, 1);
+fail_alloc_chrdev_region:
+ kfree(teth_ctx->aggr_caps);
+ kfree(teth_ctx);
+ teth_ctx = NULL;
+
+ return res;
+}
+EXPORT_SYMBOL(teth_bridge_driver_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Tethering bridge driver");
diff --git a/drivers/platform/msm/ssbi.c b/drivers/platform/msm/ssbi.c
index a08eb48..e0bbdd1 100644
--- a/drivers/platform/msm/ssbi.c
+++ b/drivers/platform/msm/ssbi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
* Copyright (c) 2010, Google Inc.
*
* Original authors: Code Aurora Forum
@@ -362,7 +362,7 @@
ssbi->base = ioremap(mem_res->start, resource_size(mem_res));
if (!ssbi->base) {
- pr_err("ioremap of 0x%p failed\n", (void *)mem_res->start);
+ pr_err("ioremap failed: %pr\n", mem_res);
ret = -EINVAL;
goto err_ioremap;
}
diff --git a/drivers/power/pm8921-charger.c b/drivers/power/pm8921-charger.c
index f87a443..03b3e0d 100644
--- a/drivers/power/pm8921-charger.c
+++ b/drivers/power/pm8921-charger.c
@@ -255,12 +255,9 @@
struct dentry *dent;
struct bms_notify bms_notify;
int *usb_trim_table;
- struct regulator *vreg_xoadc;
bool ext_charging;
bool ext_charge_done;
bool iusb_fine_res;
- bool final_kickstart;
- bool lockup_lpm_wrkarnd;
DECLARE_BITMAP(enabled_irqs, PM_CHG_MAX_INTS);
struct work_struct battery_id_valid_work;
int64_t batt_id_min;
@@ -296,6 +293,7 @@
int stop_chg_upon_expiry;
bool disable_aicl;
int usb_type;
+ bool disable_chg_rmvl_wrkarnd;
};
/* user space parameter to limit usb current */
@@ -311,7 +309,6 @@
static struct pm8921_chg_chip *the_chip;
-static DEFINE_SPINLOCK(lpm_lock);
#define LPM_ENABLE_BIT BIT(2)
static int pm8921_chg_set_lpm(struct pm8921_chg_chip *chip, int enable)
{
@@ -340,66 +337,11 @@
static int pm_chg_write(struct pm8921_chg_chip *chip, u16 addr, u8 reg)
{
int rc;
- unsigned long flags = 0;
- u8 temp;
- /* Disable LPM */
- if (chip->lockup_lpm_wrkarnd) {
- spin_lock_irqsave(&lpm_lock, flags);
+ rc = pm8xxx_writeb(chip->dev->parent, addr, reg);
+ if (rc)
+ pr_err("failed: addr=%03X, rc=%d\n", addr, rc);
- /*
- * This delay is to prevent exit out of 32khz mode within
- * 200uS. It could be that chg was removed just few uS before
- * this gets called.
- */
- udelay(200);
- /* no clks */
- temp = 0xD1;
- rc = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
- if (rc) {
- pr_err("Error %d writing %d to CHG_TEST\n", rc, temp);
- goto release_lpm_lock;
- }
-
- /* force 19.2Mhz before reading */
- temp = 0xD3;
- rc = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
- if (rc) {
- pr_err("Error %d writing %d to CHG_TEST\n", rc, temp);
- goto release_lpm_lock;
- }
-
- rc = pm8xxx_writeb(chip->dev->parent, addr, reg);
- if (rc) {
- pr_err("failed: addr=%03X, rc=%d\n", addr, rc);
- goto release_lpm_lock;
- }
-
- /* no clks */
- temp = 0xD1;
- rc = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
- if (rc) {
- pr_err("Error %d writing %d to CHG_TEST\n", rc, temp);
- goto release_lpm_lock;
- }
-
- /* switch to hw clk selection */
- temp = 0xD0;
- rc = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
- if (rc) {
- pr_err("Error %d writing %d to CHG_TEST\n", rc, temp);
- goto release_lpm_lock;
- }
-
- udelay(200);
-
-release_lpm_lock:
- spin_unlock_irqrestore(&lpm_lock, flags);
- } else {
- rc = pm8xxx_writeb(chip->dev->parent, addr, reg);
- if (rc)
- pr_err("failed: addr=%03X, rc=%d\n", addr, rc);
- }
return rc;
}
@@ -430,23 +372,6 @@
chip->pmic_chg_irq[irq_id]);
}
-static int is_chg_on_bat(struct pm8921_chg_chip *chip)
-{
- return !(pm_chg_get_rt_status(chip, DCIN_VALID_IRQ)
- || pm_chg_get_rt_status(chip, USBIN_VALID_IRQ));
-}
-
-static void pm8921_chg_bypass_bat_gone_debounce(struct pm8921_chg_chip *chip,
- int bypass)
-{
- int rc;
-
- rc = pm_chg_write(chip, COMPARATOR_OVERRIDE, bypass ? 0x89 : 0x88);
- if (rc) {
- pr_err("Failed to set bypass bit to %d rc=%d\n", bypass, rc);
- }
-}
-
/* Treat OverVoltage/UnderVoltage as source missing */
static int is_usb_chg_plugged_in(struct pm8921_chg_chip *chip)
{
@@ -469,35 +394,8 @@
static int pm_chg_get_fsm_state(struct pm8921_chg_chip *chip)
{
u8 temp;
- unsigned long flags = 0;
int err = 0, ret = 0;
- if (chip->lockup_lpm_wrkarnd) {
- spin_lock_irqsave(&lpm_lock, flags);
-
- /*
- * This delay is to prevent exit out of 32khz mode within
- * 200uS. It could be that chg was removed just few uS before
- * this gets called.
- */
- udelay(200);
- /* no clks */
- temp = 0xD1;
- err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
- if (err) {
- pr_err("Error %d writing %d to CHG_TEST\n", err, temp);
- goto err_out;
- }
-
- /* force 19.2Mhz before reading */
- temp = 0xD3;
- err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
- if (err) {
- pr_err("Error %d writing %d to CHG_TEST\n", err, temp);
- goto err_out;
- }
- }
-
temp = CAPTURE_FSM_STATE_CMD;
err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
if (err) {
@@ -535,29 +433,7 @@
/* get the upper 1 bit */
ret |= (temp & 0x1) << 4;
- if (chip->lockup_lpm_wrkarnd) {
- /* no clks */
- temp = 0xD1;
- err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
- if (err) {
- pr_err("Error %d writing %d to CHG_TEST\n", err, temp);
- goto err_out;
- }
-
- /* switch to hw clk selection */
- temp = 0xD0;
- err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
- if (err) {
- pr_err("Error %d writing %d to CHG_TEST\n", err, temp);
- goto err_out;
- }
-
- udelay(200);
- }
-
err_out:
- if (chip->lockup_lpm_wrkarnd)
- spin_unlock_irqrestore(&lpm_lock, flags);
if (err)
return err;
@@ -568,35 +444,8 @@
static int pm_chg_get_regulation_loop(struct pm8921_chg_chip *chip)
{
u8 temp, data;
- unsigned long flags = 0;
int err = 0;
- if (chip->lockup_lpm_wrkarnd) {
- spin_lock_irqsave(&lpm_lock, flags);
-
- /*
- * This delay is to prevent exit out of 32khz mode within
- * 200uS. It could be that chg was removed just few uS before
- * this gets called.
- */
- udelay(200);
- /* no clks */
- temp = 0xD1;
- err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
- if (err) {
- pr_err("Error %d writing %d to CHG_TEST\n", err, temp);
- goto err_out;
- }
-
- /* force 19.2Mhz before reading */
- temp = 0xD3;
- err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
- if (err) {
- pr_err("Error %d writing %d to CHG_TEST\n", err, temp);
- goto err_out;
- }
- }
-
temp = READ_BANK_6;
err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
if (err) {
@@ -610,29 +459,7 @@
goto err_out;
}
- if (chip->lockup_lpm_wrkarnd) {
- /* no clks */
- temp = 0xD1;
- err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
- if (err) {
- pr_err("Error %d writing %d to CHG_TEST\n", err, temp);
- goto err_out;
- }
-
- /* switch to hw clk selection */
- temp = 0xD0;
- err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
- if (err) {
- pr_err("Error %d writing %d to CHG_TEST\n", err, temp);
- goto err_out;
- }
-
- udelay(200);
- }
-
err_out:
- if (chip->lockup_lpm_wrkarnd)
- spin_unlock_irqrestore(&lpm_lock, flags);
if (err)
return err;
@@ -2099,10 +1926,10 @@
* This would also apply when the battery has been
* removed from the running system.
*/
- if (the_chip && !get_prop_batt_present(the_chip)
+ if (mA == 0 && the_chip && !get_prop_batt_present(the_chip)
&& !is_dc_chg_plugged_in(the_chip)) {
if (!the_chip->has_dc_supply) {
- pr_err("rejected: no other power source connected\n");
+ pr_err("rejected: no other power source mA = %d\n", mA);
return;
}
}
@@ -2377,96 +2204,9 @@
return get_prop_batt_temp(the_chip);
}
-static int __pm8921_apply_19p2mhz_kickstart(struct pm8921_chg_chip *chip)
-{
- int err;
- u8 temp;
-
-
- temp = 0xD1;
- err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
- if (err) {
- pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
- return err;
- }
-
- temp = 0xD3;
- err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
- if (err) {
- pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
- return err;
- }
-
- temp = 0xD1;
- err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
- if (err) {
- pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
- return err;
- }
-
- temp = 0xD5;
- err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
- if (err) {
- pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
- return err;
- }
-
- /* Wait a few clock cycles before re-enabling hw clock switching */
- udelay(183);
-
- temp = 0xD1;
- err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
- if (err) {
- pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
- return err;
- }
-
- temp = 0xD0;
- err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
- if (err) {
- pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
- return err;
- }
-
- /* Wait for few clock cycles before re-enabling LPM */
- udelay(32);
-
- return 0;
-}
-
-static int pm8921_apply_19p2mhz_kickstart(struct pm8921_chg_chip *chip)
-{
- int err;
- unsigned long flags = 0;
-
- spin_lock_irqsave(&lpm_lock, flags);
- err = pm8921_chg_set_lpm(chip, 0);
- if (err) {
- pr_err("Error settig LPM rc=%d\n", err);
- goto kick_err;
- }
-
- __pm8921_apply_19p2mhz_kickstart(chip);
-
-kick_err:
- err = pm8921_chg_set_lpm(chip, 1);
- if (err)
- pr_err("Error settig LPM rc=%d\n", err);
-
- spin_unlock_irqrestore(&lpm_lock, flags);
-
- return err;
-}
-
static void handle_usb_insertion_removal(struct pm8921_chg_chip *chip)
{
- int usb_present, rc = 0;
-
- if (chip->lockup_lpm_wrkarnd) {
- rc = pm8921_apply_19p2mhz_kickstart(chip);
- if (rc)
- pr_err("Failed to apply kickstart rc=%d\n", rc);
- }
+ int usb_present;
pm_chg_failed_clear(chip, 1);
usb_present = is_usb_chg_plugged_in(chip);
@@ -2476,11 +2216,6 @@
power_supply_changed(&chip->usb_psy);
power_supply_changed(&chip->batt_psy);
pm8921_bms_calibrate_hkadc();
-
- /* Enable/disable bypass if charger is on battery */
- if (chip->lockup_lpm_wrkarnd)
- pm8921_chg_bypass_bat_gone_debounce(chip,
- is_chg_on_bat(chip));
}
if (usb_present) {
schedule_delayed_work(&chip->unplug_check_work,
@@ -2496,10 +2231,6 @@
static void handle_stop_ext_chg(struct pm8921_chg_chip *chip)
{
- if (chip->lockup_lpm_wrkarnd)
- /* Enable bypass if charger is on battery */
- pm8921_chg_bypass_bat_gone_debounce(chip, is_chg_on_bat(chip));
-
if (!chip->ext_psy) {
pr_debug("external charger not registered.\n");
return;
@@ -2529,10 +2260,6 @@
unsigned long delay =
round_jiffies_relative(msecs_to_jiffies(EOC_CHECK_PERIOD_MS));
- /* Disable bypass if charger connected and not running on bat */
- if (chip->lockup_lpm_wrkarnd)
- pm8921_chg_bypass_bat_gone_debounce(chip, is_chg_on_bat(chip));
-
if (!chip->ext_psy) {
pr_debug("external charger not registered.\n");
return;
@@ -3014,28 +2741,12 @@
pm_chg_get_fsm_state(chip),
get_prop_batt_current(chip)
);
- if (chip->lockup_lpm_wrkarnd) {
- rc = pm8921_apply_19p2mhz_kickstart(chip);
- if (rc)
- pr_err("Failed kickstart rc=%d\n", rc);
-
- /*
- * Make sure kickstart happens at least 200 ms
- * after charger has been removed.
- */
- if (chip->final_kickstart) {
- chip->final_kickstart = false;
- goto check_again_later;
- }
- }
return;
} else {
goto check_again_later;
}
}
- chip->final_kickstart = true;
-
/* AICL only for usb wall charger */
if ((active_path & USB_ACTIVE_BIT) && usb_target_ma > 0 &&
!chip->disable_aicl) {
@@ -3057,7 +2768,7 @@
pr_debug("reg_loop=0x%x usb_ma = %d\n", reg_loop, usb_ma);
ibat = get_prop_batt_current(chip);
- if (reg_loop & VIN_ACTIVE_BIT) {
+ if ((reg_loop & VIN_ACTIVE_BIT) && !chip->disable_chg_rmvl_wrkarnd) {
if (ibat > 0) {
pr_debug("revboost ibat = %d fsm = %d loop = 0x%x\n",
ibat, pm_chg_get_fsm_state(chip), reg_loop);
@@ -3077,7 +2788,8 @@
active_path, active_chg_plugged_in);
chg_gone = pm_chg_get_rt_status(chip, CHG_GONE_IRQ);
- if (chg_gone == 1 && active_chg_plugged_in == 1) {
+ if (chg_gone == 1 && active_chg_plugged_in == 1 &&
+ !chip->disable_chg_rmvl_wrkarnd) {
pr_debug("chg_gone=%d, active_chg_plugged_in = %d\n",
chg_gone, active_chg_plugged_in);
unplug_ovp_fet_open(chip);
@@ -3328,11 +3040,6 @@
else
handle_stop_ext_chg(chip);
} else {
- if (chip->lockup_lpm_wrkarnd)
- /* if no external supply call bypass debounce here */
- pm8921_chg_bypass_bat_gone_debounce(chip,
- is_chg_on_bat(chip));
-
if (dc_present)
schedule_delayed_work(&chip->unplug_check_work,
msecs_to_jiffies(UNPLUG_CHECK_WAIT_PERIOD_MS));
@@ -4164,6 +3871,91 @@
return -EINVAL;
}
+static void pm8921_chg_force_19p2mhz_clk(struct pm8921_chg_chip *chip)
+{
+ int err;
+ u8 temp;
+
+ temp = 0xD1;
+ err = pm_chg_write(chip, CHG_TEST, temp);
+ if (err) {
+ pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
+ return;
+ }
+
+ temp = 0xD3;
+ err = pm_chg_write(chip, CHG_TEST, temp);
+ if (err) {
+ pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
+ return;
+ }
+
+ temp = 0xD1;
+ err = pm_chg_write(chip, CHG_TEST, temp);
+ if (err) {
+ pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
+ return;
+ }
+
+ temp = 0xD5;
+ err = pm_chg_write(chip, CHG_TEST, temp);
+ if (err) {
+ pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
+ return;
+ }
+
+ udelay(183);
+
+ temp = 0xD1;
+ err = pm_chg_write(chip, CHG_TEST, temp);
+ if (err) {
+ pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
+ return;
+ }
+
+ temp = 0xD0;
+ err = pm_chg_write(chip, CHG_TEST, temp);
+ if (err) {
+ pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
+ return;
+ }
+ udelay(32);
+
+ temp = 0xD1;
+ err = pm_chg_write(chip, CHG_TEST, temp);
+ if (err) {
+ pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
+ return;
+ }
+
+ temp = 0xD3;
+ err = pm_chg_write(chip, CHG_TEST, temp);
+ if (err) {
+ pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
+ return;
+ }
+}
+
+static void pm8921_chg_set_hw_clk_switching(struct pm8921_chg_chip *chip)
+{
+ int err;
+ u8 temp;
+
+ temp = 0xD1;
+ err = pm_chg_write(chip, CHG_TEST, temp);
+ if (err) {
+ pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
+ return;
+ }
+
+ temp = 0xD0;
+ err = pm_chg_write(chip, CHG_TEST, temp);
+ if (err) {
+ pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
+ return;
+ }
+}
+
#define VREF_BATT_THERM_FORCE_ON BIT(7)
static void detect_battery_removal(struct pm8921_chg_chip *chip)
{
@@ -4195,15 +3987,8 @@
u8 subrev;
int rc, vdd_safe, fcc_uah, safety_time = DEFAULT_SAFETY_MINUTES;
- spin_lock_init(&lpm_lock);
-
- if (pm8xxx_get_version(chip->dev->parent) == PM8XXX_VERSION_8921) {
- rc = __pm8921_apply_19p2mhz_kickstart(chip);
- if (rc) {
- pr_err("Failed to apply kickstart rc=%d\n", rc);
- return rc;
- }
- }
+ /* forcing 19p2mhz before accessing any charger registers */
+ pm8921_chg_force_19p2mhz_clk(chip);
detect_battery_removal(chip);
@@ -4451,45 +4236,6 @@
return rc;
}
- if (pm8xxx_get_version(chip->dev->parent) == PM8XXX_VERSION_8921) {
- /* Clear kickstart */
- rc = pm8xxx_writeb(chip->dev->parent, CHG_TEST, 0xD0);
- if (rc) {
- pr_err("Failed to clear kickstart rc=%d\n", rc);
- return rc;
- }
-
- /* From here the lpm_workaround will be active */
- chip->lockup_lpm_wrkarnd = true;
-
- /* Enable LPM */
- pm8921_chg_set_lpm(chip, 1);
- }
-
- if (chip->lockup_lpm_wrkarnd) {
- chip->vreg_xoadc = regulator_get(chip->dev, "vreg_xoadc");
- if (IS_ERR(chip->vreg_xoadc))
- return -ENODEV;
-
- rc = regulator_set_optimum_mode(chip->vreg_xoadc, 10000);
- if (rc < 0) {
- pr_err("Failed to set configure HPM rc=%d\n", rc);
- return rc;
- }
-
- rc = regulator_set_voltage(chip->vreg_xoadc, 1800000, 1800000);
- if (rc) {
- pr_err("Failed to set L14 voltage rc=%d\n", rc);
- return rc;
- }
-
- rc = regulator_enable(chip->vreg_xoadc);
- if (rc) {
- pr_err("Failed to enable L14 rc=%d\n", rc);
- return rc;
- }
- }
-
return 0;
}
@@ -4740,19 +4486,16 @@
int rc;
struct pm8921_chg_chip *chip = dev_get_drvdata(dev);
- if (chip->lockup_lpm_wrkarnd) {
- rc = regulator_disable(chip->vreg_xoadc);
- if (rc)
- pr_err("Failed to disable L14 rc=%d\n", rc);
-
- rc = pm8921_apply_19p2mhz_kickstart(chip);
- if (rc)
- pr_err("Failed to apply kickstart rc=%d\n", rc);
- }
-
rc = pm_chg_masked_write(chip, CHG_CNTRL, VREF_BATT_THERM_FORCE_ON, 0);
if (rc)
pr_err("Failed to Force Vref therm off rc=%d\n", rc);
+
+ rc = pm8921_chg_set_lpm(chip, 1);
+ if (rc)
+ pr_err("Failed to set lpm rc=%d\n", rc);
+
+ pm8921_chg_set_hw_clk_switching(chip);
+
return 0;
}
@@ -4761,15 +4504,11 @@
int rc;
struct pm8921_chg_chip *chip = dev_get_drvdata(dev);
- if (chip->lockup_lpm_wrkarnd) {
- rc = regulator_enable(chip->vreg_xoadc);
- if (rc)
- pr_err("Failed to enable L14 rc=%d\n", rc);
+ pm8921_chg_force_19p2mhz_clk(chip);
- rc = pm8921_apply_19p2mhz_kickstart(chip);
- if (rc)
- pr_err("Failed to apply kickstart rc=%d\n", rc);
- }
+ rc = pm8921_chg_set_lpm(chip, 0);
+ if (rc)
+ pr_err("Failed to set lpm rc=%d\n", rc);
rc = pm_chg_masked_write(chip, CHG_CNTRL, VREF_BATT_THERM_FORCE_ON,
VREF_BATT_THERM_FORCE_ON);
@@ -4869,6 +4608,7 @@
chip->vin_min = pdata->vin_min;
chip->thermal_mitigation = pdata->thermal_mitigation;
chip->thermal_levels = pdata->thermal_levels;
+ chip->disable_chg_rmvl_wrkarnd = pdata->disable_chg_rmvl_wrkarnd;
chip->cold_thr = pdata->cold_thr;
chip->hot_thr = pdata->hot_thr;
@@ -5000,7 +4740,6 @@
{
struct pm8921_chg_chip *chip = platform_get_drvdata(pdev);
- regulator_put(chip->vreg_xoadc);
free_irqs(chip);
platform_set_drvdata(pdev, NULL);
the_chip = NULL;
diff --git a/drivers/power/qpnp-bms.c b/drivers/power/qpnp-bms.c
index eb75475..ec0b0e7 100644
--- a/drivers/power/qpnp-bms.c
+++ b/drivers/power/qpnp-bms.c
@@ -1095,60 +1095,6 @@
return 1;
}
-#define BMS_OVERRIDE_MODE_EN_BIT BIT(7)
-#define EN_VBAT_BIT BIT(0)
-#define OVERRIDE_MODE_DELAY_MS 20
-static int override_mode_batt_v_and_i(
- struct qpnp_bms_chip *chip, int *ibat_ua, int *vbat_uv)
-{
- int16_t vsense_raw, vbat_raw;
- int vsense_uv, rc;
- u8 delay;
-
- mutex_lock(&chip->bms_output_lock);
-
- delay = 0x00;
- rc = qpnp_write_wrapper(chip, &delay,
- chip->base + BMS1_S1_DELAY_CTL, 1);
- if (rc)
- pr_err("unable to write into BMS1_S1_DELAY, rc: %d\n", rc);
-
- rc = qpnp_masked_write(chip, BMS1_MODE_CTL,
- BMS_OVERRIDE_MODE_EN_BIT | EN_VBAT_BIT,
- BMS_OVERRIDE_MODE_EN_BIT | EN_VBAT_BIT);
- if (rc)
- pr_err("unable to write into BMS1_MODE_CTL, rc: %d\n", rc);
-
- msleep(OVERRIDE_MODE_DELAY_MS);
-
- lock_output_data(chip);
- qpnp_read_wrapper(chip, (u8 *)&vsense_raw,
- chip->base + BMS1_VSENSE_AVG_DATA0, 2);
- qpnp_read_wrapper(chip, (u8 *)&vbat_raw,
- chip->base + BMS1_VBAT_AVG_DATA0, 2);
- unlock_output_data(chip);
-
- rc = qpnp_masked_write(chip, BMS1_MODE_CTL,
- BMS_OVERRIDE_MODE_EN_BIT | EN_VBAT_BIT, 0);
-
- delay = 0x0B;
- rc = qpnp_write_wrapper(chip, &delay,
- chip->base + BMS1_S1_DELAY_CTL, 1);
- if (rc)
- pr_err("unable to write into BMS1_S1_DELAY, rc: %d\n", rc);
-
- mutex_unlock(&chip->bms_output_lock);
-
- *vbat_uv = convert_vbatt_raw_to_uv(chip, vbat_raw);
- vsense_uv = convert_vsense_to_uv(chip, vsense_raw);
- *ibat_ua = div_s64(vsense_uv * 1000000LL, (int)chip->r_sense_uohm);
-
- pr_debug("vsense_raw = 0x%x vbat_raw = 0x%x ibat_ua = %d vbat_uv = %d\n",
- (uint16_t)vsense_raw, (uint16_t)vbat_raw,
- *ibat_ua, *vbat_uv);
- return 0;
-}
-
static bool is_battery_charging(struct qpnp_bms_chip *chip)
{
union power_supply_propval ret = {0,};
@@ -1188,23 +1134,21 @@
static int get_simultaneous_batt_v_and_i(struct qpnp_bms_chip *chip,
int *ibat_ua, int *vbat_uv)
{
+ struct qpnp_iadc_result i_result;
+ struct qpnp_vadc_result v_result;
+ enum qpnp_iadc_channels iadc_channel;
int rc;
- if (is_batfet_open(chip)) {
- pr_debug("batfet is open using separate vbat and ibat meas\n");
- rc = get_battery_voltage(vbat_uv);
- if (rc < 0) {
- pr_err("adc vbat failed err = %d\n", rc);
- return rc;
- }
- rc = get_battery_current(chip, ibat_ua);
- if (rc < 0) {
- pr_err("bms ibat failed err = %d\n", rc);
- return rc;
- }
- } else {
- return override_mode_batt_v_and_i(chip, ibat_ua, vbat_uv);
+ iadc_channel = chip->use_external_rsense ?
+ EXTERNAL_RSENSE : INTERNAL_RSENSE;
+ rc = qpnp_iadc_vadc_sync_read(iadc_channel, &i_result,
+ VBAT_SNS, &v_result);
+ if (rc) {
+ pr_err("vadc read failed with rc: %d\n", rc);
+ return rc;
}
+ *ibat_ua = (int)i_result.result_ua;
+ *vbat_uv = (int)v_result.physical;
return 0;
}
@@ -1231,7 +1175,7 @@
static int reset_bms_for_test(struct qpnp_bms_chip *chip)
{
- int ibat_ua, vbat_uv, rc;
+ int ibat_ua = 0, vbat_uv = 0, rc;
int ocv_est_uv;
if (!chip) {
diff --git a/drivers/power/qpnp-charger.c b/drivers/power/qpnp-charger.c
index e2ba042..7833afa 100644
--- a/drivers/power/qpnp-charger.c
+++ b/drivers/power/qpnp-charger.c
@@ -86,8 +86,8 @@
#define USB_OVP_CTL 0x42
#define SEC_ACCESS 0xD0
-/* SMBB peripheral subtype values */
#define REG_OFFSET_PERP_SUBTYPE 0x05
+/* SMBB peripheral subtype values */
#define SMBB_CHGR_SUBTYPE 0x01
#define SMBB_BUCK_SUBTYPE 0x02
#define SMBB_BAT_IF_SUBTYPE 0x03
@@ -96,6 +96,14 @@
#define SMBB_BOOST_SUBTYPE 0x06
#define SMBB_MISC_SUBTYPE 0x07
+/* SMBB peripheral subtype values */
+#define SMBBP_CHGR_SUBTYPE 0x31
+#define SMBBP_BUCK_SUBTYPE 0x32
+#define SMBBP_BAT_IF_SUBTYPE 0x33
+#define SMBBP_USB_CHGPTH_SUBTYPE 0x34
+#define SMBBP_BOOST_SUBTYPE 0x36
+#define SMBBP_MISC_SUBTYPE 0x37
+
#define QPNP_CHARGER_DEV_NAME "qcom,qpnp-charger"
/* Status bits and masks */
@@ -341,6 +349,9 @@
u8 dcin_valid_rt_sts;
int rc;
+ if (!chip->dc_chgpth_base)
+ return 0;
+
rc = qpnp_chg_read(chip, &dcin_valid_rt_sts,
INT_RT_STS(chip->dc_chgpth_base), 1);
if (rc) {
@@ -1212,6 +1223,7 @@
switch (subtype) {
case SMBB_CHGR_SUBTYPE:
+ case SMBBP_CHGR_SUBTYPE:
chip->chg_done_irq = spmi_get_irq_byname(chip->spmi,
spmi_resource, "chg-done");
if (chip->chg_done_irq < 0) {
@@ -1289,6 +1301,7 @@
enable_irq_wake(chip->chg_done_irq);
break;
case SMBB_BUCK_SUBTYPE:
+ case SMBBP_BUCK_SUBTYPE:
rc = qpnp_chg_masked_write(chip,
chip->chgr_base + CHGR_BUCK_BCK_VBAT_REG_MODE,
BUCK_VBAT_REG_NODE_SEL_BIT,
@@ -1299,8 +1312,10 @@
}
break;
case SMBB_BAT_IF_SUBTYPE:
+ case SMBBP_BAT_IF_SUBTYPE:
break;
case SMBB_USB_CHGPTH_SUBTYPE:
+ case SMBBP_USB_CHGPTH_SUBTYPE:
chip->usbin_valid_irq = spmi_get_irq_byname(chip->spmi,
spmi_resource, "usbin-valid");
if (chip->usbin_valid_irq < 0) {
@@ -1361,8 +1376,10 @@
enable_irq_wake(chip->dcin_valid_irq);
break;
case SMBB_BOOST_SUBTYPE:
+ case SMBBP_BOOST_SUBTYPE:
break;
case SMBB_MISC_SUBTYPE:
+ case SMBBP_MISC_SUBTYPE:
pr_debug("Setting BOOT_DONE\n");
rc = qpnp_chg_masked_write(chip,
chip->misc_base + CHGR_MISC_BOOT_DONE,
@@ -1397,10 +1414,6 @@
return -ENOMEM;
}
- rc = qpnp_vadc_is_ready();
- if (rc)
- goto fail_chg_enable;
-
chip->dev = &(spmi->dev);
chip->spmi = spmi;
@@ -1557,6 +1570,7 @@
switch (subtype) {
case SMBB_CHGR_SUBTYPE:
+ case SMBBP_CHGR_SUBTYPE:
chip->chgr_base = resource->start;
rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
if (rc) {
@@ -1566,6 +1580,7 @@
}
break;
case SMBB_BUCK_SUBTYPE:
+ case SMBBP_BUCK_SUBTYPE:
chip->buck_base = resource->start;
rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
if (rc) {
@@ -1575,6 +1590,7 @@
}
break;
case SMBB_BAT_IF_SUBTYPE:
+ case SMBBP_BAT_IF_SUBTYPE:
chip->bat_if_base = resource->start;
rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
if (rc) {
@@ -1584,6 +1600,7 @@
}
break;
case SMBB_USB_CHGPTH_SUBTYPE:
+ case SMBBP_USB_CHGPTH_SUBTYPE:
chip->usb_chgpth_base = resource->start;
rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
if (rc) {
@@ -1602,6 +1619,7 @@
}
break;
case SMBB_BOOST_SUBTYPE:
+ case SMBBP_BOOST_SUBTYPE:
chip->boost_base = resource->start;
rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
if (rc) {
@@ -1611,6 +1629,7 @@
}
break;
case SMBB_MISC_SUBTYPE:
+ case SMBBP_MISC_SUBTYPE:
chip->misc_base = resource->start;
rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
if (rc) {
@@ -1628,34 +1647,44 @@
dev_set_drvdata(&spmi->dev, chip);
device_init_wakeup(&spmi->dev, 1);
- chip->dc_psy.name = "qpnp-dc";
- chip->dc_psy.type = POWER_SUPPLY_TYPE_MAINS;
- chip->dc_psy.supplied_to = pm_power_supplied_to;
- chip->dc_psy.num_supplicants = ARRAY_SIZE(pm_power_supplied_to);
- chip->dc_psy.properties = pm_power_props_mains;
- chip->dc_psy.num_properties = ARRAY_SIZE(pm_power_props_mains);
- chip->dc_psy.get_property = qpnp_power_get_property_mains;
+ if (chip->bat_if_base) {
+ rc = qpnp_vadc_is_ready();
+ if (rc)
+ goto fail_chg_enable;
- chip->batt_psy.name = "battery";
- chip->batt_psy.type = POWER_SUPPLY_TYPE_BATTERY;
- chip->batt_psy.properties = msm_batt_power_props;
- chip->batt_psy.num_properties = ARRAY_SIZE(msm_batt_power_props);
- chip->batt_psy.get_property = qpnp_batt_power_get_property;
- chip->batt_psy.set_property = qpnp_batt_power_set_property;
- chip->batt_psy.property_is_writeable = qpnp_batt_property_is_writeable;
- chip->batt_psy.external_power_changed =
+ chip->batt_psy.name = "battery";
+ chip->batt_psy.type = POWER_SUPPLY_TYPE_BATTERY;
+ chip->batt_psy.properties = msm_batt_power_props;
+ chip->batt_psy.num_properties =
+ ARRAY_SIZE(msm_batt_power_props);
+ chip->batt_psy.get_property = qpnp_batt_power_get_property;
+ chip->batt_psy.set_property = qpnp_batt_power_set_property;
+ chip->batt_psy.property_is_writeable =
+ qpnp_batt_property_is_writeable;
+ chip->batt_psy.external_power_changed =
qpnp_batt_external_power_changed;
- rc = power_supply_register(chip->dev, &chip->batt_psy);
- if (rc < 0) {
- pr_err("power_supply_register batt failed rc = %d\n", rc);
- goto fail_chg_enable;
+ rc = power_supply_register(chip->dev, &chip->batt_psy);
+ if (rc < 0) {
+ pr_err("batt failed to register rc = %d\n", rc);
+ goto fail_chg_enable;
+ }
}
- rc = power_supply_register(chip->dev, &chip->dc_psy);
- if (rc < 0) {
- pr_err("power_supply_register usb failed rc = %d\n", rc);
- goto unregister_batt;
+ if (chip->dc_chgpth_base) {
+ chip->dc_psy.name = "qpnp-dc";
+ chip->dc_psy.type = POWER_SUPPLY_TYPE_MAINS;
+ chip->dc_psy.supplied_to = pm_power_supplied_to;
+ chip->dc_psy.num_supplicants = ARRAY_SIZE(pm_power_supplied_to);
+ chip->dc_psy.properties = pm_power_props_mains;
+ chip->dc_psy.num_properties = ARRAY_SIZE(pm_power_props_mains);
+ chip->dc_psy.get_property = qpnp_power_get_property_mains;
+
+ rc = power_supply_register(chip->dev, &chip->dc_psy);
+ if (rc < 0) {
+ pr_err("power_supply_register dc failed rc=%d\n", rc);
+ goto unregister_batt;
+ }
}
/* Turn on appropriate workaround flags */
@@ -1664,11 +1693,11 @@
power_supply_set_present(chip->usb_psy,
qpnp_chg_is_usb_chg_plugged_in(chip));
- if (chip->maxinput_dc_ma) {
+ if (chip->maxinput_dc_ma && chip->dc_chgpth_base) {
rc = qpnp_chg_idcmax_set(chip, chip->maxinput_dc_ma);
if (rc) {
pr_err("Error setting idcmax property %d\n", rc);
- goto fail_chg_enable;
+ goto unregister_batt;
}
}
@@ -1684,7 +1713,8 @@
return 0;
unregister_batt:
- power_supply_unregister(&chip->batt_psy);
+ if (chip->bat_if_base)
+ power_supply_unregister(&chip->batt_psy);
fail_chg_enable:
kfree(chip->thermal_mitigation);
kfree(chip);
diff --git a/drivers/slimbus/slim-msm-ctrl.c b/drivers/slimbus/slim-msm-ctrl.c
index 9b0b8b4..9a864aa 100644
--- a/drivers/slimbus/slim-msm-ctrl.c
+++ b/drivers/slimbus/slim-msm-ctrl.c
@@ -263,6 +263,17 @@
*/
mb();
complete(&dev->rx_msgq_notify);
+ } else if (mt == SLIM_MSG_MT_CORE &&
+ mc == SLIM_MSG_MC_REPORT_ABSENT) {
+ writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
+ MGR_INT_CLR);
+ /*
+ * Guarantee that CLR bit write goes through
+ * before signalling completion
+ */
+ mb();
+ complete(&dev->rx_msgq_notify);
+
} else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
mc == SLIM_MSG_MC_REPLY_VALUE) {
msm_slim_rx_enqueue(dev, rx_buf, len);
@@ -975,6 +986,10 @@
txn.wbuf = wbuf;
gen_ack = true;
ret = msm_xfer_msg(&dev->ctrl, &txn);
+ break;
+ case SLIM_MSG_MC_REPORT_ABSENT:
+ dev_info(dev->dev, "Received Report Absent Message\n");
+ break;
default:
break;
}
@@ -1087,7 +1102,8 @@
laddr = (u8)((buffer[0] >> 16) & 0xff);
sat = addr_to_sat(dev, laddr);
}
- } else if ((index * 4) >= msg_len) {
+ }
+ if ((index * 4) >= msg_len) {
index = 0;
if (sat) {
msm_sat_enqueue(sat, buffer, msg_len);
diff --git a/drivers/usb/host/ehci-msm2.c b/drivers/usb/host/ehci-msm2.c
index 40e1eea..faa5625 100644
--- a/drivers/usb/host/ehci-msm2.c
+++ b/drivers/usb/host/ehci-msm2.c
@@ -45,6 +45,7 @@
struct ehci_hcd ehci;
spinlock_t wakeup_lock;
struct device *dev;
+ struct clk *xo_clk;
struct clk *iface_clk;
struct clk *core_clk;
struct clk *alt_core_clk;
@@ -659,10 +660,14 @@
clk_disable_unprepare(mhcd->core_clk);
/* usb phy does not require TCXO clock, hence vote for TCXO disable */
- ret = msm_xo_mode_vote(mhcd->xo_handle, MSM_XO_MODE_OFF);
- if (ret)
- dev_err(mhcd->dev, "%s failed to devote for "
- "TCXO D0 buffer%d\n", __func__, ret);
+ if (!IS_ERR(mhcd->xo_clk)) {
+ clk_disable_unprepare(mhcd->xo_clk);
+ } else {
+ ret = msm_xo_mode_vote(mhcd->xo_handle, MSM_XO_MODE_OFF);
+ if (ret)
+ dev_err(mhcd->dev, "%s failed to devote for TCXO %d\n",
+ __func__, ret);
+ }
msm_ehci_config_vddcx(mhcd, 0);
@@ -714,10 +719,14 @@
wake_lock(&mhcd->wlock);
/* Vote for TCXO when waking up the phy */
- ret = msm_xo_mode_vote(mhcd->xo_handle, MSM_XO_MODE_ON);
- if (ret)
- dev_err(mhcd->dev, "%s failed to vote for "
- "TCXO D0 buffer%d\n", __func__, ret);
+ if (!IS_ERR(mhcd->xo_clk)) {
+ clk_prepare_enable(mhcd->xo_clk);
+ } else {
+ ret = msm_xo_mode_vote(mhcd->xo_handle, MSM_XO_MODE_ON);
+ if (ret)
+ dev_err(mhcd->dev, "%s failed to vote for TCXO D0 %d\n",
+ __func__, ret);
+ }
clk_prepare_enable(mhcd->core_clk);
clk_prepare_enable(mhcd->iface_clk);
@@ -1091,18 +1100,23 @@
}
snprintf(pdev_name, PDEV_NAME_LEN, "%s.%d", pdev->name, pdev->id);
- mhcd->xo_handle = msm_xo_get(MSM_XO_TCXO_D0, pdev_name);
- if (IS_ERR(mhcd->xo_handle)) {
- dev_err(&pdev->dev, "%s not able to get the handle "
- "to vote for TCXO D0 buffer\n", __func__);
- ret = PTR_ERR(mhcd->xo_handle);
- goto free_async_irq;
+ mhcd->xo_clk = clk_get(&pdev->dev, "xo");
+ if (!IS_ERR(mhcd->xo_clk)) {
+ ret = clk_prepare_enable(mhcd->xo_clk);
+ } else {
+ mhcd->xo_handle = msm_xo_get(MSM_XO_TCXO_D0, pdev_name);
+ if (IS_ERR(mhcd->xo_handle)) {
+ dev_err(&pdev->dev, "%s fail to get handle for X0 D0\n",
+ __func__);
+ ret = PTR_ERR(mhcd->xo_handle);
+ goto free_async_irq;
+ } else {
+ ret = msm_xo_mode_vote(mhcd->xo_handle, MSM_XO_MODE_ON);
+ }
}
-
- ret = msm_xo_mode_vote(mhcd->xo_handle, MSM_XO_MODE_ON);
if (ret) {
- dev_err(&pdev->dev, "%s failed to vote for TCXO "
- "D0 buffer%d\n", __func__, ret);
+ dev_err(&pdev->dev, "%s failed to vote for TCXO %d\n",
+ __func__, ret);
goto free_xo_handle;
}
@@ -1202,9 +1216,15 @@
deinit_clocks:
msm_ehci_init_clocks(mhcd, 0);
devote_xo_handle:
- msm_xo_mode_vote(mhcd->xo_handle, MSM_XO_MODE_OFF);
+ if (!IS_ERR(mhcd->xo_clk))
+ clk_disable_unprepare(mhcd->xo_clk);
+ else
+ msm_xo_mode_vote(mhcd->xo_handle, MSM_XO_MODE_OFF);
free_xo_handle:
- msm_xo_put(mhcd->xo_handle);
+ if (!IS_ERR(mhcd->xo_clk))
+ clk_put(mhcd->xo_clk);
+ else
+ msm_xo_put(mhcd->xo_handle);
free_async_irq:
if (mhcd->async_irq)
free_irq(mhcd->async_irq, mhcd);
@@ -1236,7 +1256,12 @@
usb_remove_hcd(hcd);
- msm_xo_put(mhcd->xo_handle);
+ if (!IS_ERR(mhcd->xo_clk)) {
+ clk_disable_unprepare(mhcd->xo_clk);
+ clk_put(mhcd->xo_clk);
+ } else {
+ msm_xo_put(mhcd->xo_handle);
+ }
msm_ehci_vbus_power(mhcd, 0);
msm_ehci_init_vbus(mhcd, 0);
msm_ehci_ldo_enable(mhcd, 0);
diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c
index bd1423d..c03ca69 100644
--- a/drivers/usb/otg/msm_otg.c
+++ b/drivers/usb/otg/msm_otg.c
@@ -95,7 +95,7 @@
static struct regulator *hsusb_3p3;
static struct regulator *hsusb_1p8;
-static struct regulator *hsusb_vddcx;
+static struct regulator *hsusb_vdd;
static struct regulator *vbus_otg;
static struct regulator *mhl_usb_hs_switch;
static struct power_supply *psy;
@@ -111,7 +111,7 @@
#endif
}
-static const int vdd_val[VDD_TYPE_MAX][VDD_VAL_MAX] = {
+static int vdd_val[VDD_TYPE_MAX][VDD_VAL_MAX] = {
{ /* VDD_CX CORNER Voting */
[VDD_NONE] = RPM_VREG_CORNER_NONE,
[VDD_MIN] = RPM_VREG_CORNER_NOMINAL,
@@ -175,7 +175,7 @@
int ret;
min_vol = vdd_val[vdd_type][!!high];
- ret = regulator_set_voltage(hsusb_vddcx, min_vol, max_vol);
+ ret = regulator_set_voltage(hsusb_vdd, min_vol, max_vol);
if (ret) {
pr_err("%s: unable to set the voltage for regulator "
"HSUSB_VDDCX\n", __func__);
@@ -3802,6 +3802,8 @@
static int __init msm_otg_probe(struct platform_device *pdev)
{
int ret = 0;
+ int len = 0;
+ u32 tmp[3];
struct resource *res;
struct msm_otg *motg;
struct usb_phy *phy;
@@ -3958,24 +3960,40 @@
clk_prepare_enable(motg->pclk);
motg->vdd_type = VDDCX_CORNER;
- hsusb_vddcx = devm_regulator_get(motg->phy.dev, "hsusb_vdd_dig");
- if (IS_ERR(hsusb_vddcx)) {
- hsusb_vddcx = devm_regulator_get(motg->phy.dev, "HSUSB_VDDCX");
- if (IS_ERR(hsusb_vddcx)) {
+ hsusb_vdd = devm_regulator_get(motg->phy.dev, "hsusb_vdd_dig");
+ if (IS_ERR(hsusb_vdd)) {
+ hsusb_vdd = devm_regulator_get(motg->phy.dev, "HSUSB_VDDCX");
+ if (IS_ERR(hsusb_vdd)) {
dev_err(motg->phy.dev, "unable to get hsusb vddcx\n");
- ret = PTR_ERR(hsusb_vddcx);
+ ret = PTR_ERR(hsusb_vdd);
goto devote_xo_handle;
}
motg->vdd_type = VDDCX;
}
+ if (pdev->dev.of_node) {
+ of_get_property(pdev->dev.of_node,
+ "qcom,vdd-voltage-level",
+ &len);
+ if (len == sizeof(tmp)) {
+ of_property_read_u32_array(pdev->dev.of_node,
+ "qcom,vdd-voltage-level",
+ tmp, len/sizeof(*tmp));
+ vdd_val[motg->vdd_type][0] = tmp[0];
+ vdd_val[motg->vdd_type][1] = tmp[1];
+ vdd_val[motg->vdd_type][2] = tmp[2];
+ } else {
+ dev_dbg(&pdev->dev, "Using default hsusb vdd config.\n");
+ }
+ }
+
ret = msm_hsusb_config_vddcx(1);
if (ret) {
dev_err(&pdev->dev, "hsusb vddcx configuration failed\n");
goto devote_xo_handle;
}
- ret = regulator_enable(hsusb_vddcx);
+ ret = regulator_enable(hsusb_vdd);
if (ret) {
dev_err(&pdev->dev, "unable to enable the hsusb vddcx\n");
goto free_config_vddcx;
@@ -3984,7 +4002,7 @@
ret = msm_hsusb_ldo_init(motg, 1);
if (ret) {
dev_err(&pdev->dev, "hsusb vreg configuration failed\n");
- goto free_hsusb_vddcx;
+ goto free_hsusb_vdd;
}
if (pdata->mhl_enable) {
@@ -4167,10 +4185,10 @@
msm_hsusb_ldo_enable(motg, USB_PHY_REG_OFF);
free_ldo_init:
msm_hsusb_ldo_init(motg, 0);
-free_hsusb_vddcx:
- regulator_disable(hsusb_vddcx);
+free_hsusb_vdd:
+ regulator_disable(hsusb_vdd);
free_config_vddcx:
- regulator_set_voltage(hsusb_vddcx,
+ regulator_set_voltage(hsusb_vdd,
vdd_val[motg->vdd_type][VDD_NONE],
vdd_val[motg->vdd_type][VDD_MAX]);
devote_xo_handle:
@@ -4264,8 +4282,8 @@
}
msm_hsusb_ldo_enable(motg, USB_PHY_REG_OFF);
msm_hsusb_ldo_init(motg, 0);
- regulator_disable(hsusb_vddcx);
- regulator_set_voltage(hsusb_vddcx,
+ regulator_disable(hsusb_vdd);
+ regulator_set_voltage(hsusb_vdd,
vdd_val[motg->vdd_type][VDD_NONE],
vdd_val[motg->vdd_type][VDD_MAX]);
diff --git a/drivers/video/msm/mdp4.h b/drivers/video/msm/mdp4.h
index ed0a385..a3d8d7e 100644
--- a/drivers/video/msm/mdp4.h
+++ b/drivers/video/msm/mdp4.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -276,6 +276,7 @@
struct mdp4_overlay_pipe *solidfill_pipe;
};
+
struct mdp4_overlay_pipe {
uint32 pipe_used;
uint32 pipe_type; /* rgb, video/graphic */
@@ -983,6 +984,8 @@
void mdp4_overlay_mdp_perf_upd(struct msm_fb_data_type *mfd, int flag);
int mdp4_update_base_blend(struct msm_fb_data_type *mfd,
struct mdp_blend_cfg *mdp_blend_cfg);
+int mdp4_update_writeback_format(struct msm_fb_data_type *mfd,
+ struct mdp_mixer_cfg *mdp_mixer_cfg);
u32 mdp4_get_mixer_num(u32 panel_type);
#ifndef CONFIG_FB_MSM_WRITEBACK_MSM_PANEL
diff --git a/drivers/video/msm/mdp4_overlay.c b/drivers/video/msm/mdp4_overlay.c
index fbae011..bfd8238 100644
--- a/drivers/video/msm/mdp4_overlay.c
+++ b/drivers/video/msm/mdp4_overlay.c
@@ -49,6 +49,7 @@
struct mdp4_overlay_pipe *baselayer[MDP4_MIXER_MAX];
struct blend_cfg blend[MDP4_MIXER_MAX][MDP4_MIXER_STAGE_MAX];
struct mdp4_overlay_pipe sf_plist[MDP4_MIXER_MAX][OVERLAY_PIPE_MAX];
+ struct mdp_mixer_cfg mdp_mixer_cfg[MDP4_MIXER_MAX];
uint32 mixer_cfg[MDP4_MIXER_MAX];
uint32 flush[MDP4_MIXER_MAX];
struct iommu_free_list iommu_free[MDP4_MIXER_MAX];
@@ -1456,6 +1457,87 @@
(pipe->element1 << 8) | pipe->element0;
}
+static uint32 mdp4_overlayproc_cfg_wb_panel(struct mdp4_overlay_pipe *pipe,
+ char *overlay_base, uint32 curr)
+{
+ int off, bpp;
+ uint32 flag;
+ bool is_rgb = false;
+ struct mdp_mixer_cfg *mixer_cfg;
+
+ off = 0;
+ mixer_cfg = &ctrl->mdp_mixer_cfg[MDP4_MIXER2];
+
+ switch (mixer_cfg->writeback_format) {
+ case WB_FORMAT_RGB_888:
+ bpp = 3; /* RGB888 */
+ flag = 0x0;
+ is_rgb = true;
+ break;
+ case WB_FORMAT_RGB_565:
+ bpp = 2; /* RGB565 */
+ flag = 0x1;
+ is_rgb = true;
+ break;
+ case WB_FORMAT_xRGB_8888:
+ bpp = 4; /* xRGB8888 */
+ flag = 0x3;
+ is_rgb = true;
+ break;
+ case WB_FORMAT_ARGB_8888:
+ bpp = 4; /* ARGB8888 */
+ flag = 0x80000003;
+ is_rgb = true;
+ break;
+ case WB_FORMAT_ARGB_8888_INPUT_ALPHA:
+ pr_warn("currently not supported ARGB_8888_INPUT_ALPHA\n");
+ default:
+ bpp = 1; /* NV12 */
+ is_rgb = false;
+ break;
+ }
+
+ if (is_rgb == true) {
+ if (pipe->ov_cnt & 0x01)
+ off = pipe->src_height * pipe->src_width * bpp;
+
+ outpdw(overlay_base + 0x000c, pipe->ov_blt_addr + off);
+ /* overlay ouput is RGB888 */
+ outpdw(overlay_base + 0x0010, pipe->src_width * bpp);
+ outpdw(overlay_base + 0x001c, pipe->ov_blt_addr + off);
+ /* MDDI - BLT + on demand */
+ outpdw(overlay_base + 0x0004, 0x08);
+
+ curr = inpdw(overlay_base + 0x0014);
+ curr &= 0x4;
+
+ outpdw(overlay_base + 0x0014, curr | flag);
+ } else {
+ if (pipe->ov_cnt & 0x01)
+ off = pipe->src_height * pipe->src_width * bpp;
+
+ outpdw(overlay_base + 0x000c, pipe->ov_blt_addr + off);
+ /* overlay ouput is RGB888 */
+ outpdw(overlay_base + 0x0010, ((pipe->src_width << 16) |
+ pipe->src_width));
+ outpdw(overlay_base + 0x001c, pipe->ov_blt_addr + off);
+ off = pipe->src_height * pipe->src_width;
+ /* align chroma to 2k address */
+ off = (off + 2047) & ~2047;
+ /* UV plane adress */
+ outpdw(overlay_base + 0x0020, pipe->ov_blt_addr + off);
+ /* MDDI - BLT + on demand */
+ outpdw(overlay_base + 0x0004, 0x08);
+ /* pseudo planar + writeback */
+ curr = inpdw(overlay_base + 0x0014);
+ curr &= 0x4;
+ outpdw(overlay_base + 0x0014, curr | 0x012);
+ /* rgb->yuv */
+ outpdw(overlay_base + 0x0200, 0x05);
+ }
+ return curr;
+}
+
/*
* mdp4_overlayproc_cfg: only be called from base layer
*/
@@ -1515,34 +1597,8 @@
#endif
} else if (pipe->mixer_num == MDP4_MIXER2) {
if (ctrl->panel_mode & MDP4_PANEL_WRITEBACK) {
- off = 0;
- bpp = 1;
- if (pipe->ov_cnt & 0x01)
- off = pipe->src_height *
- pipe->src_width * bpp;
-
- outpdw(overlay_base + 0x000c,
- pipe->ov_blt_addr + off);
- /* overlay ouput is RGB888 */
- outpdw(overlay_base + 0x0010,
- ((pipe->src_width << 16) |
- pipe->src_width));
- outpdw(overlay_base + 0x001c,
- pipe->ov_blt_addr + off);
- off = pipe->src_height * pipe->src_width;
- /* align chroma to 2k address */
- off = (off + 2047) & ~2047;
- /* UV plane adress */
- outpdw(overlay_base + 0x0020,
- pipe->ov_blt_addr + off);
- /* MDDI - BLT + on demand */
- outpdw(overlay_base + 0x0004, 0x08);
- /* pseudo planar + writeback */
- curr = inpdw(overlay_base + 0x0014);
- curr &= 0x4;
- outpdw(overlay_base + 0x0014, curr | 0x012);
- /* rgb->yuv */
- outpdw(overlay_base + 0x0200, 0x05);
+ curr = mdp4_overlayproc_cfg_wb_panel(pipe,
+ overlay_base, curr);
}
}
} else {
@@ -3865,6 +3921,42 @@
mutex_unlock(&mfd->dma->ov_mutex);
return err;
}
+
+int mdp4_update_writeback_format(struct msm_fb_data_type *mfd,
+ struct mdp_mixer_cfg *mdp_mixer_cfg)
+{
+ int ret = 0;
+ u32 mixer_num;
+ struct mdp_mixer_cfg *mixer;
+
+ mixer_num = mdp4_get_mixer_num(mfd->panel_info.type);
+ if (!ctrl) {
+ pr_warn("mdp4_overlay_ctrl is NULL\n");
+ return -EPERM;
+ }
+ mixer = &ctrl->mdp_mixer_cfg[mixer_num];
+
+ switch (mdp_mixer_cfg->writeback_format) {
+ case WB_FORMAT_RGB_888:
+ case WB_FORMAT_RGB_565:
+ case WB_FORMAT_NV12:
+ case WB_FORMAT_xRGB_8888:
+ case WB_FORMAT_ARGB_8888:
+ mixer->writeback_format = mdp_mixer_cfg->writeback_format;
+ break;
+ case WB_FORMAT_ARGB_8888_INPUT_ALPHA:
+ mixer->writeback_format = mdp_mixer_cfg->writeback_format;
+ mixer->alpha = mdp_mixer_cfg->alpha;
+ break;
+ default:
+ mixer->writeback_format = WB_FORMAT_NV12;
+ pr_warn("Unsupported format request, setting to NV12\n");
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
int mdp4_update_base_blend(struct msm_fb_data_type *mfd,
struct mdp_blend_cfg *mdp_blend_cfg)
{
diff --git a/drivers/video/msm/mdss/mdss_fb.c b/drivers/video/msm/mdss/mdss_fb.c
index ea0eb7b..1e0de89 100644
--- a/drivers/video/msm/mdss/mdss_fb.c
+++ b/drivers/video/msm/mdss/mdss_fb.c
@@ -1069,7 +1069,15 @@
int i, ret = 0;
/* buf sync */
for (i = 0; i < mfd->acq_fen_cnt; i++) {
- ret = sync_fence_wait(mfd->acq_fen[i], WAIT_FENCE_TIMEOUT);
+ ret = sync_fence_wait(mfd->acq_fen[i],
+ WAIT_FENCE_FIRST_TIMEOUT);
+ if (ret == -ETIME) {
+ pr_warn("sync_fence_wait timed out! ");
+ pr_cont("Waiting %ld more seconds\n",
+ WAIT_FENCE_FINAL_TIMEOUT/MSEC_PER_SEC);
+ ret = sync_fence_wait(mfd->acq_fen[i],
+ WAIT_FENCE_FINAL_TIMEOUT);
+ }
if (ret < 0) {
pr_err("%s: sync_fence_wait failed! ret = %x\n",
__func__, ret);
diff --git a/drivers/video/msm/mdss/mdss_fb.h b/drivers/video/msm/mdss/mdss_fb.h
index c4e837e..db2e305 100644
--- a/drivers/video/msm/mdss/mdss_fb.h
+++ b/drivers/video/msm/mdss/mdss_fb.h
@@ -27,10 +27,11 @@
#define MSM_FB_MAX_DEV_LIST 32
#define MSM_FB_ENABLE_DBGFS
-/* 900 ms for fence time out */
-#define WAIT_FENCE_TIMEOUT 900
-/* 950 ms for display operation time out */
-#define WAIT_DISP_OP_TIMEOUT 950
+#define WAIT_FENCE_FIRST_TIMEOUT MSEC_PER_SEC
+#define WAIT_FENCE_FINAL_TIMEOUT (10 * MSEC_PER_SEC)
+/* Display op timeout should be greater than total timeout */
+#define WAIT_DISP_OP_TIMEOUT ((WAIT_FENCE_FIRST_TIMEOUT + \
+ WAIT_FENCE_FINAL_TIMEOUT) * MDP_MAX_FENCE_FD)
#ifndef MAX
#define MAX(x, y) (((x) > (y)) ? (x) : (y))
diff --git a/drivers/video/msm/mdss/mdss_mdp.h b/drivers/video/msm/mdss/mdss_mdp.h
index 4c70770..e4f78ad 100644
--- a/drivers/video/msm/mdss/mdss_mdp.h
+++ b/drivers/video/msm/mdss/mdss_mdp.h
@@ -323,6 +323,7 @@
struct mdss_mdp_ctl *mdss_mdp_ctl_init(struct mdss_panel_data *pdata,
struct msm_fb_data_type *mfd);
+int mdss_mdp_ctl_setup(struct mdss_mdp_ctl *ctl);
int mdss_mdp_ctl_split_display_setup(struct mdss_mdp_ctl *ctl,
struct mdss_panel_data *pdata);
int mdss_mdp_ctl_destroy(struct mdss_mdp_ctl *ctl);
diff --git a/drivers/video/msm/mdss/mdss_mdp_ctl.c b/drivers/video/msm/mdss/mdss_mdp_ctl.c
index c640c73..cabb183 100644
--- a/drivers/video/msm/mdss/mdss_mdp_ctl.c
+++ b/drivers/video/msm/mdss/mdss_mdp_ctl.c
@@ -443,7 +443,7 @@
return NULL;
}
-static int mdss_mdp_ctl_setup(struct mdss_mdp_ctl *ctl)
+int mdss_mdp_ctl_setup(struct mdss_mdp_ctl *ctl)
{
struct mdss_mdp_ctl *split_ctl;
u32 width, height;
@@ -566,12 +566,6 @@
ctl->opmode |= (ctl->intf_num << 4);
- ret = mdss_mdp_ctl_setup(ctl);
- if (ret) {
- pr_err("unable to setup control path %d\n", ctl->num);
- goto ctl_init_fail;
- }
-
if (ctl->intf_num == MDSS_MDP_NO_INTF) {
ctl->dst_format = pdata->panel_info.out_format;
} else {
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index c1dcc18..daa2499 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -1017,6 +1017,12 @@
return;
}
+ ret = mdss_mdp_overlay_start(mfd);
+ if (ret) {
+ pr_err("unable to start overlay %d (%d)\n", mfd->index, ret);
+ return;
+ }
+
if (is_mdss_iommu_attached())
data.p[0].addr = mfd->iova;
else
@@ -1454,6 +1460,10 @@
rc = mdss_mdp_overlay_start(mfd);
if (!IS_ERR_VALUE(rc))
rc = mdss_mdp_overlay_kickoff(mfd->ctl);
+ } else {
+ rc = mdss_mdp_ctl_setup(mfd->ctl);
+ if (rc)
+ return rc;
}
if (!IS_ERR_VALUE(rc) && mfd->vsync_pending) {
diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c
index 5f994a0..b96e093 100644
--- a/drivers/video/msm/msm_fb.c
+++ b/drivers/video/msm/msm_fb.c
@@ -3301,6 +3301,10 @@
ret = mdp4_update_base_blend(mfd,
&metadata_ptr->data.blend_cfg);
break;
+ case metadata_op_wb_format:
+ ret = mdp4_update_writeback_format(mfd,
+ &metadata_ptr->data.mixer_cfg);
+ break;
#endif
default:
pr_warn("Unsupported request to MDP META IOCTL.\n");
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 06772d9..f6ca334 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -385,6 +385,7 @@
header-y += types.h
header-y += udf_fs_i.h
header-y += udp.h
+header-y += uhid.h
header-y += uinput.h
header-y += uio.h
header-y += ultrasound.h
@@ -450,3 +451,4 @@
header-y += ci-bridge-spi.h
header-y += msm_audio_amrwbplus.h
header-y += avtimer.h
+header-y += msm_ipa.h
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index d3ee879..7a5ab0d 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -112,10 +112,10 @@
/* This needs to be modified manually now, when we add
a new RANGE of SSIDs to the msg_mask_tbl */
#define MSG_MASK_TBL_CNT 24
-#define EVENT_LAST_ID 0x099F
+#define EVENT_LAST_ID 0x09AB
#define MSG_SSID_0 0
-#define MSG_SSID_0_LAST 93
+#define MSG_SSID_0_LAST 94
#define MSG_SSID_1 500
#define MSG_SSID_1_LAST 506
#define MSG_SSID_2 1000
@@ -278,6 +278,9 @@
MSG_LVL_LOW,
MSG_LVL_MED,
MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_HIGH,
MSG_LVL_LOW
};
@@ -713,7 +716,7 @@
/* LOG CODES */
#define LOG_0 0x0
-#define LOG_1 0x1750
+#define LOG_1 0x1755
#define LOG_2 0x0
#define LOG_3 0x0
#define LOG_4 0x4910
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
index 285b593..8a1b3a1 100644
--- a/include/linux/dma-contiguous.h
+++ b/include/linux/dma-contiguous.h
@@ -70,7 +70,7 @@
void dma_contiguous_reserve(phys_addr_t addr_limit);
int dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t *res_base,
- phys_addr_t limit);
+ phys_addr_t limit, const char *name);
int dma_contiguous_add_device(struct device *dev, phys_addr_t base);
@@ -91,7 +91,7 @@
phys_addr_t base, phys_addr_t limit)
{
int ret;
- ret = dma_contiguous_reserve_area(size, &base, limit);
+ ret = dma_contiguous_reserve_area(size, &base, limit, NULL);
if (ret == 0)
ret = dma_contiguous_add_device(dev, base);
return ret;
diff --git a/include/linux/mfd/pm8xxx/pm8921-charger.h b/include/linux/mfd/pm8xxx/pm8921-charger.h
index 785a33a..1c67b1e 100644
--- a/include/linux/mfd/pm8xxx/pm8921-charger.h
+++ b/include/linux/mfd/pm8xxx/pm8921-charger.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -188,6 +188,7 @@
int btc_delay_ms;
int btc_panic_if_cant_stop_chg;
int stop_chg_upon_expiry;
+ bool disable_chg_rmvl_wrkarnd;
};
enum pm8921_charger_source {
diff --git a/include/linux/msm_ipa.h b/include/linux/msm_ipa.h
index 1b869b1..30bf4f2 100644
--- a/include/linux/msm_ipa.h
+++ b/include/linux/msm_ipa.h
@@ -155,6 +155,10 @@
* wlan client normal: wlan client moved out of power save
* sw routing enable: ipa routing is disabled
* sw routing disable: ipa routing is enabled
+ * wlan ap connect: wlan AP(access point) is up
+ * wlan ap disconnect: wlan AP(access point) is down
+ * wlan sta connect: wlan STA(station) is up
+ * wlan sta disconnect: wlan STA(station) is down
*/
enum ipa_wlan_event {
WLAN_CLIENT_CONNECT,
@@ -163,6 +167,10 @@
WLAN_CLIENT_NORMAL_MODE,
SW_ROUTING_ENABLE,
SW_ROUTING_DISABLE,
+ WLAN_AP_CONNECT,
+ WLAN_AP_DISCONNECT,
+ WLAN_STA_CONNECT,
+ WLAN_STA_DISCONNECT,
};
@@ -761,4 +769,87 @@
IPA_IOCTL_PULL_MSG, \
struct ipa_msg_meta *)
+/*
+ * unique magic number of the Tethering bridge ioctls
+ */
+#define TETH_BRIDGE_IOC_MAGIC 0xCE
+
+/*
+ * Ioctls supported by Tethering bridge driver
+ */
+#define TETH_BRIDGE_IOCTL_SET_BRIDGE_MODE 0
+#define TETH_BRIDGE_IOCTL_SET_AGGR_PARAMS 1
+#define TETH_BRIDGE_IOCTL_GET_AGGR_PARAMS 2
+#define TETH_BRIDGE_IOCTL_GET_AGGR_CAPABILITIES 3
+#define TETH_BRIDGE_IOCTL_MAX 4
+
+
+/**
+ * enum teth_link_protocol_type - link protocol (IP / Ethernet)
+ */
+enum teth_link_protocol_type {
+ TETH_LINK_PROTOCOL_IP,
+ TETH_LINK_PROTOCOL_ETHERNET,
+ TETH_LINK_PROTOCOL_MAX,
+};
+
+/**
+ * enum teth_aggr_protocol_type - Aggregation protocol (MBIM / TLP)
+ */
+enum teth_aggr_protocol_type {
+ TETH_AGGR_PROTOCOL_NONE,
+ TETH_AGGR_PROTOCOL_MBIM,
+ TETH_AGGR_PROTOCOL_TLP,
+ TETH_AGGR_PROTOCOL_MAX,
+};
+
+/**
+ * struct teth_aggr_params_link - Aggregation parameters for uplink/downlink
+ * @aggr_prot: Aggregation protocol (MBIM / TLP)
+ * @max_transfer_size_byte: Maximal size of aggregated packet in bytes.
+ * Default value is 16*1024.
+ * @max_datagrams: Maximal number of IP packets in an aggregated
+ * packet. Default value is 16
+ */
+struct teth_aggr_params_link {
+ enum teth_aggr_protocol_type aggr_prot;
+ uint32_t max_transfer_size_byte;
+ uint32_t max_datagrams;
+};
+
+
+/**
+ * struct teth_aggr_params - Aggregation parmeters
+ * @ul: Uplink parameters
+ * @dl: Downlink parmaeters
+ */
+struct teth_aggr_params {
+ struct teth_aggr_params_link ul;
+ struct teth_aggr_params_link dl;
+};
+
+/**
+ * struct teth_aggr_capabilities - Aggregation capabilities
+ * @num_protocols: Number of protocols described in the array
+ * @prot_caps[]: Array of aggregation capabilities per protocol
+ */
+struct teth_aggr_capabilities {
+ uint16_t num_protocols;
+ struct teth_aggr_params_link prot_caps[0];
+};
+
+
+#define TETH_BRIDGE_IOC_SET_BRIDGE_MODE _IOW(TETH_BRIDGE_IOC_MAGIC, \
+ TETH_BRIDGE_IOCTL_SET_BRIDGE_MODE, \
+ enum teth_link_protocol_type)
+#define TETH_BRIDGE_IOC_SET_AGGR_PARAMS _IOW(TETH_BRIDGE_IOC_MAGIC, \
+ TETH_BRIDGE_IOCTL_SET_AGGR_PARAMS, \
+ struct teth_aggr_params *)
+#define TETH_BRIDGE_IOC_GET_AGGR_PARAMS _IOR(TETH_BRIDGE_IOC_MAGIC, \
+ TETH_BRIDGE_IOCTL_GET_AGGR_PARAMS, \
+ struct teth_aggr_params *)
+#define TETH_BRIDGE_IOC_GET_AGGR_CAPABILITIES _IOWR(TETH_BRIDGE_IOC_MAGIC, \
+ TETH_BRIDGE_IOCTL_GET_AGGR_CAPABILITIES, \
+ struct teth_aggr_capabilities *)
+
#endif /* _MSM_IPA_H_ */
diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h
index 404ea52..45bc0ea 100644
--- a/include/linux/msm_mdp.h
+++ b/include/linux/msm_mdp.h
@@ -564,6 +564,15 @@
mdp_op_max,
};
+enum {
+ WB_FORMAT_NV12,
+ WB_FORMAT_RGB_565,
+ WB_FORMAT_RGB_888,
+ WB_FORMAT_xRGB_8888,
+ WB_FORMAT_ARGB_8888,
+ WB_FORMAT_ARGB_8888_INPUT_ALPHA /* Need to support */
+};
+
struct msmfb_mdp_pp {
uint32_t op;
union {
@@ -585,6 +594,7 @@
metadata_op_base_blend,
metadata_op_frame_rate,
metadata_op_vic,
+ metadata_op_wb_format,
metadata_op_max
};
@@ -592,11 +602,17 @@
uint32_t is_premultiplied;
};
+struct mdp_mixer_cfg {
+ uint32_t writeback_format;
+ uint32_t alpha;
+};
+
struct msmfb_metadata {
uint32_t op;
uint32_t flags;
union {
struct mdp_blend_cfg blend_cfg;
+ struct mdp_mixer_cfg mixer_cfg;
uint32_t panel_frame_rate;
uint32_t video_info_code;
} data;
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index c6ee4f0..0683296 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -365,8 +365,8 @@
* requests to connect to a specified network but without separating
* auth and assoc steps. For this, you need to specify the SSID in a
* %NL80211_ATTR_SSID attribute, and can optionally specify the association
- * IEs in %NL80211_ATTR_IE, %NL80211_ATTR_AUTH_TYPE, %NL80211_ATTR_MAC,
- * %NL80211_ATTR_WIPHY_FREQ, %NL80211_ATTR_CONTROL_PORT,
+ * IEs in %NL80211_ATTR_IE, %NL80211_ATTR_AUTH_TYPE, %NL80211_ATTR_USE_MFP,
+ * %NL80211_ATTR_MAC, %NL80211_ATTR_WIPHY_FREQ, %NL80211_ATTR_CONTROL_PORT,
* %NL80211_ATTR_CONTROL_PORT_ETHERTYPE and
* %NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT.
* Background scan period can optionally be
@@ -906,7 +906,7 @@
* @NL80211_ATTR_USE_MFP: Whether management frame protection (IEEE 802.11w) is
* used for the association (&enum nl80211_mfp, represented as a u32);
* this attribute can be used
- * with %NL80211_CMD_ASSOCIATE request
+ * with %NL80211_CMD_ASSOCIATE and %NL80211_CMD_CONNECT requests
*
* @NL80211_ATTR_STA_FLAGS2: Attribute containing a
* &struct nl80211_sta_flag_update.
diff --git a/include/linux/uhid.h b/include/linux/uhid.h
new file mode 100644
index 0000000..16b786a
--- /dev/null
+++ b/include/linux/uhid.h
@@ -0,0 +1,33 @@
+#ifndef __UHID_H_
+#define __UHID_H_
+
+/*
+ * User-space I/O driver support for HID subsystem
+ * Copyright (c) 2012 David Herrmann
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/*
+ * Public header for user-space communication. We try to keep every structure
+ * aligned but to be safe we also use __attribute__((__packed__)). Therefore,
+ * the communication should be ABI compatible even between architectures.
+ */
+
+#include <linux/input.h>
+#include <linux/types.h>
+
+enum uhid_event_type {
+ UHID_DUMMY,
+};
+
+struct uhid_event {
+ __u32 type;
+} __attribute__((__packed__));
+
+#endif /* __UHID_H_ */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 5e32ff7..6666c69 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1199,6 +1199,7 @@
* @ie: IEs for association request
* @ie_len: Length of assoc_ie in octets
* @privacy: indicates whether privacy-enabled APs should be used
+ * @mfp: indicate whether management frame protection is used
* @crypto: crypto settings
* @key_len: length of WEP key for shared key authentication
* @key_idx: index of WEP key for shared key authentication
@@ -1219,6 +1220,7 @@
u8 *ie;
size_t ie_len;
bool privacy;
+ enum nl80211_mfp mfp;
struct cfg80211_crypto_settings crypto;
const u8 *key;
u8 key_len, key_idx;
diff --git a/kernel/sys.c b/kernel/sys.c
index e7006eb..39791be 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1179,15 +1179,16 @@
* Work around broken programs that cannot handle "Linux 3.0".
* Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
*/
-static int override_release(char __user *release, int len)
+static int override_release(char __user *release, size_t len)
{
int ret = 0;
- char buf[65];
if (current->personality & UNAME26) {
- char *rest = UTS_RELEASE;
+ const char *rest = UTS_RELEASE;
+ char buf[65] = { 0 };
int ndots = 0;
unsigned v;
+ size_t copy;
while (*rest) {
if (*rest == '.' && ++ndots >= 3)
@@ -1197,8 +1198,9 @@
rest++;
}
v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
- snprintf(buf, len, "2.6.%u%s", v, rest);
- ret = copy_to_user(release, buf, len);
+ copy = min(sizeof(buf), max_t(size_t, 1, len));
+ copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
+ ret = copy_to_user(release, buf, copy + 1);
}
return ret;
}
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 5c2e805..1ccc69e 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -5173,6 +5173,15 @@
connect.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
}
+ if (info->attrs[NL80211_ATTR_USE_MFP]) {
+ connect.mfp = nla_get_u32(info->attrs[NL80211_ATTR_USE_MFP]);
+ if (connect.mfp != NL80211_MFP_REQUIRED &&
+ connect.mfp != NL80211_MFP_NO)
+ return -EINVAL;
+ } else {
+ connect.mfp = NL80211_MFP_NO;
+ }
+
if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
connect.channel =
ieee80211_get_channel(wiphy,
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index bbbed73..ab91446 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -190,7 +190,8 @@
prev_bssid,
params->ssid, params->ssid_len,
params->ie, params->ie_len,
- false, ¶ms->crypto,
+ params->mfp != NL80211_MFP_NO,
+ ¶ms->crypto,
params->flags, ¶ms->ht_capa,
¶ms->ht_capa_mask);
if (err)
diff --git a/sound/soc/codecs/wcd9306.c b/sound/soc/codecs/wcd9306.c
index 0b26a56..25d3f56 100644
--- a/sound/soc/codecs/wcd9306.c
+++ b/sound/soc/codecs/wcd9306.c
@@ -487,16 +487,16 @@
SOC_ENUM_SINGLE(TAPAN_A_CDC_TX4_MUX_CTL, 4, 3, cf_text);
static const struct soc_enum cf_rxmix1_enum =
- SOC_ENUM_SINGLE(TAPAN_A_CDC_RX1_B4_CTL, 1, 3, cf_text);
+ SOC_ENUM_SINGLE(TAPAN_A_CDC_RX1_B4_CTL, 0, 3, cf_text);
static const struct soc_enum cf_rxmix2_enum =
- SOC_ENUM_SINGLE(TAPAN_A_CDC_RX2_B4_CTL, 1, 3, cf_text);
+ SOC_ENUM_SINGLE(TAPAN_A_CDC_RX2_B4_CTL, 0, 3, cf_text);
static const struct soc_enum cf_rxmix3_enum =
- SOC_ENUM_SINGLE(TAPAN_A_CDC_RX3_B4_CTL, 1, 3, cf_text);
+ SOC_ENUM_SINGLE(TAPAN_A_CDC_RX3_B4_CTL, 0, 3, cf_text);
static const struct soc_enum cf_rxmix4_enum =
- SOC_ENUM_SINGLE(TAPAN_A_CDC_RX4_B4_CTL, 1, 3, cf_text);
+ SOC_ENUM_SINGLE(TAPAN_A_CDC_RX4_B4_CTL, 0, 3, cf_text);
static const struct snd_kcontrol_new tapan_snd_controls[] = {
diff --git a/sound/soc/codecs/wcd9320.c b/sound/soc/codecs/wcd9320.c
index f48dbf1..b3d4901 100644
--- a/sound/soc/codecs/wcd9320.c
+++ b/sound/soc/codecs/wcd9320.c
@@ -829,25 +829,25 @@
SOC_ENUM_SINGLE(TAIKO_A_CDC_TX10_MUX_CTL, 4, 3, cf_text);
static const struct soc_enum cf_rxmix1_enum =
- SOC_ENUM_SINGLE(TAIKO_A_CDC_RX1_B4_CTL, 1, 3, cf_text);
+ SOC_ENUM_SINGLE(TAIKO_A_CDC_RX1_B4_CTL, 0, 3, cf_text);
static const struct soc_enum cf_rxmix2_enum =
- SOC_ENUM_SINGLE(TAIKO_A_CDC_RX2_B4_CTL, 1, 3, cf_text);
+ SOC_ENUM_SINGLE(TAIKO_A_CDC_RX2_B4_CTL, 0, 3, cf_text);
static const struct soc_enum cf_rxmix3_enum =
- SOC_ENUM_SINGLE(TAIKO_A_CDC_RX3_B4_CTL, 1, 3, cf_text);
+ SOC_ENUM_SINGLE(TAIKO_A_CDC_RX3_B4_CTL, 0, 3, cf_text);
static const struct soc_enum cf_rxmix4_enum =
- SOC_ENUM_SINGLE(TAIKO_A_CDC_RX4_B4_CTL, 1, 3, cf_text);
+ SOC_ENUM_SINGLE(TAIKO_A_CDC_RX4_B4_CTL, 0, 3, cf_text);
static const struct soc_enum cf_rxmix5_enum =
- SOC_ENUM_SINGLE(TAIKO_A_CDC_RX5_B4_CTL, 1, 3, cf_text)
+ SOC_ENUM_SINGLE(TAIKO_A_CDC_RX5_B4_CTL, 0, 3, cf_text)
;
static const struct soc_enum cf_rxmix6_enum =
- SOC_ENUM_SINGLE(TAIKO_A_CDC_RX6_B4_CTL, 1, 3, cf_text);
+ SOC_ENUM_SINGLE(TAIKO_A_CDC_RX6_B4_CTL, 0, 3, cf_text);
static const struct soc_enum cf_rxmix7_enum =
- SOC_ENUM_SINGLE(TAIKO_A_CDC_RX7_B4_CTL, 1, 3, cf_text);
+ SOC_ENUM_SINGLE(TAIKO_A_CDC_RX7_B4_CTL, 0, 3, cf_text);
static const char * const class_h_dsm_text[] = {
"ZERO", "DSM_HPHL_RX1", "DSM_SPKR_RX7"
@@ -932,12 +932,6 @@
SOC_SINGLE_TLV("ADC5 Volume", TAIKO_A_TX_5_6_EN, 5, 3, 0, analog_gain),
SOC_SINGLE_TLV("ADC6 Volume", TAIKO_A_TX_5_6_EN, 1, 3, 0, analog_gain),
-
- SOC_SINGLE("MICBIAS1 CAPLESS Switch", TAIKO_A_MICB_1_CTL, 4, 1, 1),
- SOC_SINGLE("MICBIAS2 CAPLESS Switch", TAIKO_A_MICB_2_CTL, 4, 1, 1),
- SOC_SINGLE("MICBIAS3 CAPLESS Switch", TAIKO_A_MICB_3_CTL, 4, 1, 1),
- SOC_SINGLE("MICBIAS4 CAPLESS Switch", TAIKO_A_MICB_4_CTL, 4, 1, 1),
-
SOC_SINGLE_EXT("ANC Slot", SND_SOC_NOPM, 0, 0, 100, taiko_get_anc_slot,
taiko_put_anc_slot),
SOC_ENUM("TX1 HPF cut off", cf_dec1_enum),
diff --git a/sound/soc/msm/mdm9625.c b/sound/soc/msm/mdm9625.c
index eb7366c..2bef1b7 100644
--- a/sound/soc/msm/mdm9625.c
+++ b/sound/soc/msm/mdm9625.c
@@ -749,7 +749,7 @@
.name = "MDM9625 Media1",
.stream_name = "MultiMedia1",
.cpu_dai_name = "MultiMedia1",
- .platform_name = "msm-pcm-dsp",
+ .platform_name = "msm-pcm-dsp.0",
.dynamic = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
diff --git a/sound/soc/msm/qdsp6v2/audio_ocmem.c b/sound/soc/msm/qdsp6v2/audio_ocmem.c
index 1969fe8..c14cb74 100644
--- a/sound/soc/msm/qdsp6v2/audio_ocmem.c
+++ b/sound/soc/msm/qdsp6v2/audio_ocmem.c
@@ -503,6 +503,7 @@
rc = -EINVAL;
}
+ kfree(voice_ocm_work);
return;
}
/**
@@ -614,6 +615,7 @@
rc = -EINVAL;
}
+ kfree(audio_ocm_work);
return;
}
diff --git a/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
index 2d2fe31..d0b5500 100644
--- a/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
@@ -148,7 +148,6 @@
*/
snd_pcm_stream_lock_irq(substream);
if (snd_pcm_playback_empty(substream)) {
- atomic_set(&prtd->pending_buffer, 1);
runtime->render_flag |= SNDRV_RENDER_STOPPED;
stop_playback = 1;
}
@@ -1038,7 +1037,6 @@
(prtd->out_head + 1) & (runtime->periods - 1);
runtime->render_flag &= ~SNDRV_RENDER_STOPPED;
- atomic_set(&prtd->pending_buffer, 0);
return 0;
}
return 0;
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c
index ae7e76c..3a4a674 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c
@@ -536,8 +536,8 @@
memset(&tstamp, 0x0, sizeof(struct snd_compr_tstamp));
rc = q6asm_get_session_time(prtd->audio_client, ×tamp);
if (rc < 0) {
- pr_err("%s: Get Session Time return value =%lld\n",
- __func__, timestamp);
+ pr_err("%s: Fail to get session time stamp, rc:%d\n",
+ __func__, rc);
return -EAGAIN;
}
temp = (timestamp * 2 * runtime->channels);
diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c
index 913dded..1f2f307 100644
--- a/sound/soc/msm/qdsp6v2/q6adm.c
+++ b/sound/soc/msm/qdsp6v2/q6adm.c
@@ -223,6 +223,12 @@
adm_params->hdr.dest_svc = APR_SVC_ADM;
adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
index = afe_get_port_index(port_id);
+ if (index < 0 || index >= AFE_MAX_PORTS) {
+ pr_err("%s: invalid port idx %d portid %#x\n",
+ __func__, index, port_id);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
adm_params->hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
adm_params->hdr.token = port_id;
adm_params->hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;