Merge "msm: vidc: Support multiple scratch/persist buffers"
diff --git a/Documentation/devicetree/bindings/arm/msm/ipc-spinlock.txt b/Documentation/devicetree/bindings/arm/msm/ipc-spinlock.txt
index c71b190..24dbb4b 100644
--- a/Documentation/devicetree/bindings/arm/msm/ipc-spinlock.txt
+++ b/Documentation/devicetree/bindings/arm/msm/ipc-spinlock.txt
@@ -1,14 +1,27 @@
Qualcomm Interprocessor Communication Spinlock
+--Dedicated Hardware Implementation--
Required properties:
-- compatible : should be "qcom,ipc-spinlock"
+- compatible : should be "qcom,ipc-spinlock-sfpb"
- reg : the location and size of the spinlock hardware
- qcom,num-locks : the number of locks supported
Example:
qcom,ipc-spinlock@fd484000 {
- compatible = "qcom,ipc-spinlock";
+ compatible = "qcom,ipc-spinlock-sfpb";
reg = <0xfd484000 0x1000>;
qcom,num-locks = <32>;
};
+
+--LDREX Implementation--
+Required properties:
+- compatible : should be "qcom,ipc-spinlock-ldrex"
+- reg : the location and size of the shared lock memory
+
+Example:
+
+ qcom,ipc-spinlock@fa00000 {
+ compatible = "qcom,ipc-spinlock-ldrex";
+ reg = <0xfa00000 0x200000>;
+ };
diff --git a/Documentation/devicetree/bindings/bif/bif.txt b/Documentation/devicetree/bindings/bif/bif.txt
new file mode 100644
index 0000000..c4ff08b
--- /dev/null
+++ b/Documentation/devicetree/bindings/bif/bif.txt
@@ -0,0 +1,22 @@
+BIF (Battery Interface) Controllers
+
+Optional properties:
+- qcom,known-device-addresses: Specifies a list of integers which correspond to
+ the 8-bit BIF bus device addresses of BIF slaves
+ found on the target.
+
+BIF Consumers
+
+Optional properties:
+- qcom,bif-ctrl: phandle of parent BIF controller device node
+
+Example:
+ foo_ctrl: foo-controller {
+ ...
+ qcom,known-device-addresses = <0x80, 0x81>;
+ };
+
+ bar-consumer {
+ ...
+ qcom,bif-ctrl = <&foo_ctrl>;
+ };
diff --git a/Documentation/devicetree/bindings/memory.txt b/Documentation/devicetree/bindings/memory.txt
new file mode 100644
index 0000000..e98ee05
--- /dev/null
+++ b/Documentation/devicetree/bindings/memory.txt
@@ -0,0 +1,106 @@
+* Memory binding
+
+The /memory node provides basic information about the address and size
+of the physical memory. This node is usually filled or updated by the
+bootloader, depending on the actual memory configuration of the given
+hardware.
+
+The memory layout is described by the folllowing node:
+
+memory {
+ reg = <(baseaddr1) (size1)
+ (baseaddr2) (size2)
+ ...
+ (baseaddrN) (sizeN)>;
+};
+
+baseaddrX: the base address of the defined memory bank
+sizeX: the size of the defined memory bank
+
+More than one memory bank can be defined.
+
+
+* Memory regions
+
+In /memory node one can create additional nodes describing particular
+memory regions, usually for the special usage by various device drivers.
+A good example are contiguous memory allocations or memory sharing with
+other operating system on the same hardware board. Those special memory
+regions might depend on the board configuration and devices used on the
+target system.
+
+Parameters for each memory region can be encoded into the device tree
+wit the following convention:
+
+(name): region@(base-address) {
+ reg = <(baseaddr) (size)>;
+ (linux,contiguous-region);
+ (linux,default-contiguous-region);
+ label = (unique_name);
+};
+
+name: an name given to the defined region.
+base-address: the base address of the defined region.
+size: the size of the memory region.
+linux,contiguous-region: property indicating that the defined memory
+ region is used for contiguous memory allocations,
+ Linux specific (optional)
+linux,default-contiguous-region: property indicating that the region
+ is the default region for all contiguous memory
+ allocations, Linux specific (optional)
+label: an internal name used for automatically associating the
+ cma region with a given device. The label is optional;
+ if the label is not given the client is responsible for
+ calling the appropriate functions to associate the region
+ with a device.
+
+* Device nodes
+
+Once the regions in the /memory node are defined, they can be assigned
+to device some device nodes for their special use. The following
+properties are defined:
+
+linux,contiguous-region = <&phandle>;
+ This property indicates that the device driver should use the
+ memory region pointed by the given phandle.
+
+
+* Example:
+
+This example defines a memory consisting of 4 memory banks. 2 contiguous
+regions are defined for Linux kernel, one default of all device drivers
+(named contig_mem, placed at 0x72000000, 64MiB) and one dedicated to the
+framebuffer device (named display_mem, placed at 0x78000000, 16MiB). The
+display_mem region is then assigned to fb@12300000 device for contiguous
+memory allocation with Linux kernel drivers.
+
+The reason for creating a separate region for framebuffer device is to
+match the framebuffer address of from configuration done by bootloader,
+so once Linux kernel drivers starts, no glitches on the displayed boot
+logo appears.
+
+/ {
+ /* ... */
+ memory {
+ reg = <0x40000000 0x10000000
+ 0x50000000 0x10000000
+ 0x60000000 0x10000000
+ 0x70000000 0x10000000>;
+
+ contig_mem: region@72000000 {
+ linux,contiguous-region;
+ linux,default-contiguous-region;
+ reg = <0x72000000 0x4000000>;
+ };
+
+ display_mem: region@78000000 {
+ linux,contiguous-region;
+ reg = <0x78000000 0x1000000>;
+ };
+ };
+
+ fb@12300000 {
+ linux,contiguous-region = <&display_mem>;
+ status = "okay";
+ };
+};
diff --git a/Documentation/devicetree/bindings/regulator/krait-regulator.txt b/Documentation/devicetree/bindings/regulator/krait-regulator.txt
index f057834..c783ac8 100644
--- a/Documentation/devicetree/bindings/regulator/krait-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/krait-regulator.txt
@@ -1,5 +1,20 @@
Krait Voltage regulators
+The cpus are powered using a single supply powered by PMIC ganged regulators operating in
+different phases. Individual kraits further can draw power from the single supply via
+a LDO or a head switch (BHS). The first level node represents the PMIC ganged regulator
+and its properties and encompasses second level nodes that represent the individual
+krait LDO/BHS control regulator.
+
+[First Level Nodes]
+Required properties:
+- compatible: Must be "qcom,krait-pdn"
+
+Optional properties:
+- qcom,use-phase-switching indicates whether the driver should add/shed phases on the PMIC
+ ganged regulator as cpus are hotplugged.
+
+[Second Level Nodes]
Required properties:
- compatible: Must be "qcom,krait-regulator"
- reg: Specifies the address and size for this regulator device,
@@ -27,19 +42,26 @@
binding, defined in regulator.txt, can also be used.
Example:
- krait0_vreg: regulator@f9088000 {
- compatible = "qcom,krait-regulator";
- regulator-name = "krait0";
- reg = <0xf9088000 0x1000>, /* APCS_ALIAS0_KPSS_ACS */
- <0xf908a800 0x1000>; /* APCS_ALIAS0_KPSS_MDD */
- reg-names = "acs", "mdd";
- regulator-min-microvolt = <500000>;
- regulator-max-microvolt = <1100000>;
- qcom,headroom-voltage = <150000>;
- qcom,retention-voltage = <745000>;
- qcom,ldo-default-voltage = <745000>;
- qcom,ldo-threshold-voltage = <750000>;
- qcom,ldo-delta-voltage = <50000>;
- qcom,cpu-num = 0;
- };
+ krait_pdn: krait-pdn {
+ compatible = "qcom,krait-pdn";
+ qcom,use-phase-switching;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ krait0_vreg: regulator@f9088000 {
+ compatible = "qcom,krait-regulator";
+ regulator-name = "krait0";
+ reg = <0xf9088000 0x1000>, /* APCS_ALIAS0_KPSS_ACS */
+ <0xf908a800 0x1000>; /* APCS_ALIAS0_KPSS_MDD */
+ reg-names = "acs", "mdd";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1100000>;
+ qcom,headroom-voltage = <150000>;
+ qcom,retention-voltage = <675000>;
+ qcom,ldo-default-voltage = <750000>;
+ qcom,ldo-threshold-voltage = <850000>;
+ qcom,ldo-delta-voltage = <50000>;
+ qcom,cpu-num = <0>;
+ };
+ };
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index ed18cae..3a9b770 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -63,27 +63,6 @@
8 - SIGSEGV faults
16 - SIGBUS faults
-config DEBUG_RODATA
- bool "Write protect kernel text section"
- default n
- depends on DEBUG_KERNEL && MMU
- ---help---
- Mark the kernel text section as write-protected in the pagetables,
- in order to catch accidental (and incorrect) writes to such const
- data. This will cause the size of the kernel, plus up to 4MB, to
- be mapped as pages instead of sections, which will increase TLB
- pressure.
- If in doubt, say "N".
-
-config DEBUG_RODATA_TEST
- bool "Testcase for the DEBUG_RODATA feature"
- depends on DEBUG_RODATA
- default n
- ---help---
- This option enables a testcase for the DEBUG_RODATA
- feature.
- If in doubt, say "N"
-
# These options are only for real kernel hackers who want to get their hands dirty.
config DEBUG_LL
bool "Kernel low-level debugging functions (read help!)"
diff --git a/arch/arm/boot/dts/msm8226.dtsi b/arch/arm/boot/dts/msm8226.dtsi
index 3533d19..bcf2cc9 100644
--- a/arch/arm/boot/dts/msm8226.dtsi
+++ b/arch/arm/boot/dts/msm8226.dtsi
@@ -578,6 +578,11 @@
qcom,bam-producer-pipe-index = <13>;
};
+ qcom,bam_dmux@fc834000 {
+ compatible = "qcom,bam_dmux";
+ reg = <0xfc834000 0x7000>;
+ interrupts = <0 29 1>;
+ };
};
&gdsc_venus {
diff --git a/arch/arm/boot/dts/msm8974-ion.dtsi b/arch/arm/boot/dts/msm8974-ion.dtsi
index f55cff2..dfa22c1 100644
--- a/arch/arm/boot/dts/msm8974-ion.dtsi
+++ b/arch/arm/boot/dts/msm8974-ion.dtsi
@@ -24,8 +24,7 @@
compatible = "qcom,msm-ion-reserve";
reg = <8>;
qcom,heap-align = <0x1000>;
- qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
- qcom,memory-reservation-size = <0x7800000>;
+ linux,contiguous-region = <&secure_mem>;
};
qcom,ion-heap@25 { /* IOMMU HEAP */
diff --git a/arch/arm/boot/dts/msm8974-liquid.dtsi b/arch/arm/boot/dts/msm8974-liquid.dtsi
index 68fed68..6e2719b 100644
--- a/arch/arm/boot/dts/msm8974-liquid.dtsi
+++ b/arch/arm/boot/dts/msm8974-liquid.dtsi
@@ -380,7 +380,7 @@
};
&usb3 {
- qcom,charging-disabled;
+ qcom,otg-capability;
};
&pm8941_mvs1 {
@@ -708,3 +708,33 @@
};
};
};
+
+&pm8941_chg {
+ status = "ok";
+
+ qcom,chg-charging-disabled;
+
+ qcom,chg-chgr@1000 {
+ status = "ok";
+ };
+
+ qcom,chg-buck@1100 {
+ status = "ok";
+ };
+
+ qcom,chg-usb-chgpth@1300 {
+ status = "ok";
+ };
+
+ qcom,chg-dc-chgpth@1400 {
+ status = "ok";
+ };
+
+ qcom,chg-boost@1500 {
+ status = "ok";
+ };
+
+ qcom,chg-misc@1600 {
+ status = "ok";
+ };
+};
diff --git a/arch/arm/boot/dts/msm8974-regulator.dtsi b/arch/arm/boot/dts/msm8974-regulator.dtsi
index 1a6d9ba..2dad8e7 100644
--- a/arch/arm/boot/dts/msm8974-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8974-regulator.dtsi
@@ -423,68 +423,75 @@
};
/ {
- krait0_vreg: regulator@f9088000 {
- compatible = "qcom,krait-regulator";
- regulator-name = "krait0";
- reg = <0xf9088000 0x1000>, /* APCS_ALIAS0_KPSS_ACS */
- <0xf908a800 0x1000>; /* APCS_ALIAS0_KPSS_MDD */
- reg-names = "acs", "mdd";
- regulator-min-microvolt = <500000>;
- regulator-max-microvolt = <1100000>;
- qcom,headroom-voltage = <150000>;
- qcom,retention-voltage = <675000>;
- qcom,ldo-default-voltage = <750000>;
- qcom,ldo-threshold-voltage = <850000>;
- qcom,ldo-delta-voltage = <50000>;
- qcom,cpu-num = <0>;
- };
+ krait_pdn: krait-pdn {
+ compatible = "qcom,krait-pdn";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
- krait1_vreg: regulator@f9098000 {
- compatible = "qcom,krait-regulator";
- regulator-name = "krait1";
- reg = <0xf9098000 0x1000>, /* APCS_ALIAS1_KPSS_ACS */
- <0xf909a800 0x1000>; /* APCS_ALIAS1_KPSS_MDD */
- reg-names = "acs", "mdd";
- regulator-min-microvolt = <500000>;
- regulator-max-microvolt = <1100000>;
- qcom,headroom-voltage = <150000>;
- qcom,retention-voltage = <675000>;
- qcom,ldo-default-voltage = <750000>;
- qcom,ldo-threshold-voltage = <850000>;
- qcom,ldo-delta-voltage = <50000>;
- qcom,cpu-num = <1>;
- };
+ krait0_vreg: regulator@f9088000 {
+ compatible = "qcom,krait-regulator";
+ regulator-name = "krait0";
+ reg = <0xf9088000 0x1000>, /* APCS_ALIAS0_KPSS_ACS */
+ <0xf908a800 0x1000>; /* APCS_ALIAS0_KPSS_MDD */
+ reg-names = "acs", "mdd";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1100000>;
+ qcom,headroom-voltage = <150000>;
+ qcom,retention-voltage = <675000>;
+ qcom,ldo-default-voltage = <750000>;
+ qcom,ldo-threshold-voltage = <850000>;
+ qcom,ldo-delta-voltage = <50000>;
+ qcom,cpu-num = <0>;
+ };
- krait2_vreg: regulator@f90a8000 {
- compatible = "qcom,krait-regulator";
- regulator-name = "krait2";
- reg = <0xf90a8000 0x1000>, /* APCS_ALIAS2_KPSS_ACS */
- <0xf90aa800 0x1000>; /* APCS_ALIAS2_KPSS_MDD */
- reg-names = "acs", "mdd";
- regulator-min-microvolt = <500000>;
- regulator-max-microvolt = <1100000>;
- qcom,headroom-voltage = <150000>;
- qcom,retention-voltage = <675000>;
- qcom,ldo-default-voltage = <750000>;
- qcom,ldo-threshold-voltage = <850000>;
- qcom,ldo-delta-voltage = <50000>;
- qcom,cpu-num = <2>;
- };
+ krait1_vreg: regulator@f9098000 {
+ compatible = "qcom,krait-regulator";
+ regulator-name = "krait1";
+ reg = <0xf9098000 0x1000>, /* APCS_ALIAS1_KPSS_ACS */
+ <0xf909a800 0x1000>; /* APCS_ALIAS1_KPSS_MDD */
+ reg-names = "acs", "mdd";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1100000>;
+ qcom,headroom-voltage = <150000>;
+ qcom,retention-voltage = <675000>;
+ qcom,ldo-default-voltage = <750000>;
+ qcom,ldo-threshold-voltage = <850000>;
+ qcom,ldo-delta-voltage = <50000>;
+ qcom,cpu-num = <1>;
+ };
- krait3_vreg: regulator@f90b8000 {
- compatible = "qcom,krait-regulator";
- regulator-name = "krait3";
- reg = <0xf90b8000 0x1000>, /* APCS_ALIAS3_KPSS_ACS */
- <0xf90ba800 0x1000>; /* APCS_ALIAS3_KPSS_MDD */
- reg-names = "acs", "mdd";
- regulator-min-microvolt = <500000>;
- regulator-max-microvolt = <1100000>;
- qcom,headroom-voltage = <150000>;
- qcom,retention-voltage = <675000>;
- qcom,ldo-default-voltage = <750000>;
- qcom,ldo-threshold-voltage = <850000>;
- qcom,ldo-delta-voltage = <50000>;
- qcom,cpu-num = <3>;
+ krait2_vreg: regulator@f90a8000 {
+ compatible = "qcom,krait-regulator";
+ regulator-name = "krait2";
+ reg = <0xf90a8000 0x1000>, /* APCS_ALIAS2_KPSS_ACS */
+ <0xf90aa800 0x1000>; /* APCS_ALIAS2_KPSS_MDD */
+ reg-names = "acs", "mdd";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1100000>;
+ qcom,headroom-voltage = <150000>;
+ qcom,retention-voltage = <675000>;
+ qcom,ldo-default-voltage = <750000>;
+ qcom,ldo-threshold-voltage = <850000>;
+ qcom,ldo-delta-voltage = <50000>;
+ qcom,cpu-num = <2>;
+ };
+
+ krait3_vreg: regulator@f90b8000 {
+ compatible = "qcom,krait-regulator";
+ regulator-name = "krait3";
+ reg = <0xf90b8000 0x1000>, /* APCS_ALIAS3_KPSS_ACS */
+ <0xf90ba800 0x1000>; /* APCS_ALIAS3_KPSS_MDD */
+ reg-names = "acs", "mdd";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1100000>;
+ qcom,headroom-voltage = <150000>;
+ qcom,retention-voltage = <675000>;
+ qcom,ldo-default-voltage = <750000>;
+ qcom,ldo-threshold-voltage = <850000>;
+ qcom,ldo-delta-voltage = <50000>;
+ qcom,cpu-num = <3>;
+ };
};
spi_eth_vreg: spi_eth_phy_vreg {
diff --git a/arch/arm/boot/dts/msm8974-v1.dtsi b/arch/arm/boot/dts/msm8974-v1.dtsi
index aed4daf..fc3a1d3 100644
--- a/arch/arm/boot/dts/msm8974-v1.dtsi
+++ b/arch/arm/boot/dts/msm8974-v1.dtsi
@@ -111,3 +111,11 @@
<1616000 2908800>,
<2020000 6400000>;
};
+
+&sfpb_spinlock {
+ status = "disable";
+};
+
+&ldrex_spinlock {
+ status = "ok";
+};
diff --git a/arch/arm/boot/dts/msm8974-v2.dtsi b/arch/arm/boot/dts/msm8974-v2.dtsi
index 1a7c628..7e6c0bf 100644
--- a/arch/arm/boot/dts/msm8974-v2.dtsi
+++ b/arch/arm/boot/dts/msm8974-v2.dtsi
@@ -111,3 +111,7 @@
<2024000 1212000>,
<2132000 1279000>;
};
+
+&krait_pdn {
+ qcom,use-phase-switching;
+};
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index b0b7677..7c6a9d1 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -31,6 +31,15 @@
spi7 = &spi_7;
};
+ memory {
+
+ secure_mem: region@0 {
+ linux,contiguous-region;
+ reg = <0 0x7800000>;
+ label = "secure_mem";
+ };
+ };
+
intc: interrupt-controller@F9000000 {
compatible = "qcom,msm-qgic2";
interrupt-controller;
@@ -1234,6 +1243,18 @@
compatible = "qcom,ssm";
qcom,channel-name = "SSM_RTR";
};
+
+ sfpb_spinlock: qcom,ipc-spinlock@fd484000 {
+ compatible = "qcom,ipc-spinlock-sfpb";
+ reg = <0xfd484000 0x1000>;
+ qcom,num-locks = <32>;
+ };
+
+ ldrex_spinlock: qcom,ipc-spinlock@fa00000 {
+ compatible = "qcom,ipc-spinlock-ldrex";
+ reg = <0xfa00000 0x200000>;
+ status = "disable";
+ };
};
&gdsc_venus {
diff --git a/arch/arm/boot/dts/msm9625.dtsi b/arch/arm/boot/dts/msm9625.dtsi
index f22fc28..8517605 100644
--- a/arch/arm/boot/dts/msm9625.dtsi
+++ b/arch/arm/boot/dts/msm9625.dtsi
@@ -515,6 +515,7 @@
qcom,msm-pcm {
compatible = "qcom,msm-pcm-dsp";
+ qcom,msm-pcm-dsp-id = <0>;
};
qcom,msm-pcm-routing {
diff --git a/arch/arm/boot/dts/skeleton.dtsi b/arch/arm/boot/dts/skeleton.dtsi
index b41d241..f9988cd 100644
--- a/arch/arm/boot/dts/skeleton.dtsi
+++ b/arch/arm/boot/dts/skeleton.dtsi
@@ -9,5 +9,10 @@
#size-cells = <1>;
chosen { };
aliases { };
- memory { device_type = "memory"; reg = <0 0>; };
+ memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "memory";
+ reg = <0 0>;
+ };
};
diff --git a/arch/arm/configs/msm8610_defconfig b/arch/arm/configs/msm8610_defconfig
index 2e4f84d..3d710cc 100644
--- a/arch/arm/configs/msm8610_defconfig
+++ b/arch/arm/configs/msm8610_defconfig
@@ -45,6 +45,7 @@
CONFIG_MSM_SMD=y
CONFIG_MSM_SMD_PKG4=y
CONFIG_MSM_IPC_LOGGING=y
+CONFIG_MSM_BAM_DMUX=y
CONFIG_MSM_SMP2P=y
CONFIG_MSM_SMP2P_TEST=y
CONFIG_MSM_IPC_ROUTER=y
@@ -108,6 +109,10 @@
CONFIG_BRIDGE_NF_EBTABLES=y
CONFIG_BRIDGE_EBT_BROUTE=y
CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_CLS_FW=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_MD=y
@@ -115,6 +120,8 @@
CONFIG_DM_CRYPT=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
+# CONFIG_MSM_RMNET is not set
+CONFIG_MSM_RMNET_BAM=y
CONFIG_WCNSS_CORE=y
CONFIG_WCNSS_CORE_PRONTO=y
CONFIG_WCNSS_MEM_PRE_ALLOC=y
diff --git a/arch/arm/configs/msm8974-perf_defconfig b/arch/arm/configs/msm8974-perf_defconfig
index 952171c..d36d5a2 100644
--- a/arch/arm/configs/msm8974-perf_defconfig
+++ b/arch/arm/configs/msm8974-perf_defconfig
@@ -234,6 +234,7 @@
CONFIG_GENLOCK_MISCDEVICE=y
CONFIG_SYNC=y
CONFIG_SW_SYNC=y
+CONFIG_CMA=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_HAPTIC_ISA1200=y
@@ -356,10 +357,6 @@
CONFIG_SND_USB_AUDIO=y
CONFIG_SND_SOC=y
CONFIG_SND_SOC_MSM8974=y
-CONFIG_UHID=y
-CONFIG_HID_APPLE=y
-CONFIG_HID_MAGICMOUSE=y
-CONFIG_HID_MICROSOFT=y
CONFIG_USB_SUSPEND=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_EHCI_HCD=y
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index a3a4487..df0b5f0 100644
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -238,6 +238,7 @@
CONFIG_GENLOCK_MISCDEVICE=y
CONFIG_SYNC=y
CONFIG_SW_SYNC=y
+CONFIG_CMA=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_TSPP=m
@@ -364,10 +365,6 @@
CONFIG_SND_USB_AUDIO=y
CONFIG_SND_SOC=y
CONFIG_SND_SOC_MSM8974=y
-CONFIG_UHID=y
-CONFIG_HID_APPLE=y
-CONFIG_HID_MAGICMOUSE=y
-CONFIG_HID_MICROSOFT=y
CONFIG_USB_SUSPEND=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_EHCI_HCD=y
diff --git a/arch/arm/configs/msm9625-perf_defconfig b/arch/arm/configs/msm9625-perf_defconfig
index 2070f46..1fe528a 100644
--- a/arch/arm/configs/msm9625-perf_defconfig
+++ b/arch/arm/configs/msm9625-perf_defconfig
@@ -172,7 +172,8 @@
CONFIG_KS8851=y
# CONFIG_NET_VENDOR_MICROCHIP is not set
# CONFIG_MSM_RMNET is not set
-CONFIG_MSM_RMNET_BAM=y
+# CONFIG_MSM_RMNET_BAM is not set
+CONFIG_MSM_RMNET_WWAN=y
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SMSC is not set
diff --git a/arch/arm/configs/msm9625_defconfig b/arch/arm/configs/msm9625_defconfig
index 9a1f872..aa18209 100644
--- a/arch/arm/configs/msm9625_defconfig
+++ b/arch/arm/configs/msm9625_defconfig
@@ -172,7 +172,8 @@
CONFIG_KS8851=y
# CONFIG_NET_VENDOR_MICROCHIP is not set
# CONFIG_MSM_RMNET is not set
-CONFIG_MSM_RMNET_BAM=y
+# CONFIG_MSM_RMNET_BAM is not set
+CONFIG_MSM_RMNET_WWAN=y
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SMSC is not set
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index d021905..584fe0b 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -16,7 +16,6 @@
#include <asm/shmparam.h>
#include <asm/cachetype.h>
#include <asm/outercache.h>
-#include <asm/rodata.h>
#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
diff --git a/arch/arm/include/asm/rodata.h b/arch/arm/include/asm/rodata.h
deleted file mode 100644
index 8c8add8..0000000
--- a/arch/arm/include/asm/rodata.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * arch/arm/include/asm/rodata.h
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * Author: Colin Cross <ccross@android.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef _ASMARM_RODATA_H
-#define _ASMARM_RODATA_H
-
-#ifndef __ASSEMBLY__
-
-#ifdef CONFIG_DEBUG_RODATA
-
-int set_memory_rw(unsigned long virt, int numpages);
-int set_memory_ro(unsigned long virt, int numpages);
-
-void mark_rodata_ro(void);
-void set_kernel_text_rw(void);
-void set_kernel_text_ro(void);
-#else
-static inline void set_kernel_text_rw(void) { }
-static inline void set_kernel_text_ro(void) { }
-#endif
-
-#endif
-
-#endif
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index bf17145..df0bf0c 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -13,7 +13,6 @@
*/
#include <linux/ftrace.h>
-#include <linux/module.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
@@ -64,20 +63,6 @@
}
#endif
-int ftrace_arch_code_modify_prepare(void)
-{
- set_kernel_text_rw();
- set_all_modules_text_rw();
- return 0;
-}
-
-int ftrace_arch_code_modify_post_process(void)
-{
- set_all_modules_text_ro();
- set_kernel_text_ro();
- return 0;
-}
-
static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
{
return arm_gen_branch_link(pc, addr);
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index 42ed059..f0b706a 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -412,6 +412,7 @@
select CPU_FREQ_GOV_ONDEMAND
select MSM_PIL
select MSM_RUN_QUEUE_STATS
+ select ARM_HAS_SG_CHAIN
config ARCH_MSM8226
bool "MSM8226"
@@ -438,6 +439,7 @@
select MEMORY_HOLE_CARVEOUT
select DONT_MAP_HOLE_AFTER_MEMBANK0
select MSM_BUS_SCALING
+ select ARM_HAS_SG_CHAIN
endmenu
choice
diff --git a/arch/arm/mach-msm/acpuclock-8226.c b/arch/arm/mach-msm/acpuclock-8226.c
index 7dc3a0e..8ba1b39 100644
--- a/arch/arm/mach-msm/acpuclock-8226.c
+++ b/arch/arm/mach-msm/acpuclock-8226.c
@@ -53,13 +53,13 @@
* 3) Depending on Frodo version, may need minimum of LVL_NOM
*/
static struct clkctl_acpu_speed acpu_freq_tbl[] = {
- { 0, 19200, CXO, 0, 0, LVL_LOW, 950000, 0 },
- { 1, 300000, PLL0, 4, 2, LVL_LOW, 950000, 4 },
- { 1, 384000, ACPUPLL, 5, 0, LVL_LOW, 950000, 4 },
- { 1, 600000, PLL0, 4, 0, LVL_NOM, 950000, 6 },
- { 1, 787200, ACPUPLL, 5, 0, LVL_NOM, 1050000, 6 },
- { 1, 998400, ACPUPLL, 5, 0, LVL_HIGH, 1050000, 7 },
- { 1, 1190400, ACPUPLL, 5, 0, LVL_HIGH, 1050000, 7 },
+ { 0, 19200, CXO, 0, 0, 1150000, 1150000, 0 },
+ { 1, 300000, PLL0, 4, 2, 1150000, 1150000, 4 },
+ { 1, 384000, ACPUPLL, 5, 0, 1150000, 1150000, 4 },
+ { 1, 600000, PLL0, 4, 0, 1150000, 1150000, 6 },
+ { 1, 787200, ACPUPLL, 5, 0, 1150000, 1150000, 6 },
+ { 0, 998400, ACPUPLL, 5, 0, 1150000, 1150000, 7 },
+ { 0, 1190400, ACPUPLL, 5, 0, 1150000, 1150000, 7 },
{ 0 }
};
@@ -68,7 +68,7 @@
.current_speed = &(struct clkctl_acpu_speed){ 0 },
.bus_scale = &bus_client_pdata,
/* FIXME regulator doesn't support corners yet */
- .vdd_max_cpu = 1050000,
+ .vdd_max_cpu = 1150000,
.vdd_max_mem = 1150000,
.src_clocks = {
[PLL0].name = "gpll0",
diff --git a/arch/arm/mach-msm/acpuclock-cortex.c b/arch/arm/mach-msm/acpuclock-cortex.c
index 4ac1408..febf95a 100644
--- a/arch/arm/mach-msm/acpuclock-cortex.c
+++ b/arch/arm/mach-msm/acpuclock-cortex.c
@@ -329,8 +329,8 @@
max_cpu_khz = acpuclk_init_data->freq_tbl[i].khz;
/* Initialize regulators */
- rc = increase_vdd(acpuclk_init_data->freq_tbl[i].vdd_cpu,
- acpuclk_init_data->freq_tbl[i].vdd_mem);
+ rc = increase_vdd(acpuclk_init_data->vdd_max_cpu,
+ acpuclk_init_data->vdd_max_mem);
if (rc)
goto err_vdd;
diff --git a/arch/arm/mach-msm/board-8064-gpiomux.c b/arch/arm/mach-msm/board-8064-gpiomux.c
index 0dee8f5..0f88287 100644
--- a/arch/arm/mach-msm/board-8064-gpiomux.c
+++ b/arch/arm/mach-msm/board-8064-gpiomux.c
@@ -1729,13 +1729,6 @@
},
};
-static struct gpiomux_setting fsm8064_ep_sync_drsync_cfg = {
- .func = GPIOMUX_FUNC_GPIO,
- .drv = GPIOMUX_DRV_2MA,
- .pull = GPIOMUX_PULL_UP,
- .dir = GPIOMUX_OUT_HIGH,
-};
-
static struct gpiomux_setting fsm8064_ep_sync_input_cfg = {
.func = GPIOMUX_FUNC_GPIO,
.drv = GPIOMUX_DRV_4MA,
@@ -1746,7 +1739,7 @@
{
.gpio = 6, /* GPSPPSIN_DRSYNC */
.settings = {
- [GPIOMUX_SUSPENDED] = &fsm8064_ep_sync_drsync_cfg,
+ [GPIOMUX_SUSPENDED] = &fsm8064_ep_sync_input_cfg,
},
},
{
diff --git a/arch/arm/mach-msm/board-8064-pmic.c b/arch/arm/mach-msm/board-8064-pmic.c
index a1ff607..a1ed251 100644
--- a/arch/arm/mach-msm/board-8064-pmic.c
+++ b/arch/arm/mach-msm/board-8064-pmic.c
@@ -137,7 +137,7 @@
PM8921_GPIO_OUTPUT_VIN(14, 1, PM_GPIO_VIN_VPH),
/* PPS_SRC_SEL_N, chooses between WGR7640 PPS source (high) or
* CW GPS module PPS source (low) */
- PM8921_GPIO_OUTPUT_VIN(19, 1, PM_GPIO_VIN_VPH), /* PPS_SRC_SEL_N */
+ PM8921_GPIO_OUTPUT_VIN(19, 0, PM_GPIO_VIN_VPH), /* PPS_SRC_SEL_N */
PM8921_GPIO_OUTPUT_VIN(13, 1, PM_GPIO_VIN_VPH), /* PCIE_CLK_PWR_EN */
PM8921_GPIO_OUTPUT_VIN(37, 1, PM_GPIO_VIN_VPH), /* PCIE_RST_N */
@@ -557,4 +557,7 @@
if (!machine_is_apq8064_mtp() && !machine_is_apq8064_liquid())
apq8064_pm8921_chg_pdata.battery_less_hardware = 1;
+
+ if (machine_is_mpq8064_hrd())
+ apq8064_pm8921_chg_pdata.disable_chg_rmvl_wrkarnd = 1;
}
diff --git a/arch/arm/mach-msm/board-8064.c b/arch/arm/mach-msm/board-8064.c
index 9ed71da..f3d648e 100644
--- a/arch/arm/mach-msm/board-8064.c
+++ b/arch/arm/mach-msm/board-8064.c
@@ -4005,6 +4005,7 @@
.init_early = apq8064_allocate_memory_regions,
.init_very_early = apq8064_early_reserve,
.restart = msm_restart,
+ .smp = &msm8960_smp_ops,
MACHINE_END
MACHINE_START(APQ8064_MTP, "QCT APQ8064 MTP")
diff --git a/arch/arm/mach-msm/include/mach/iommu_perfmon.h b/arch/arm/mach-msm/include/mach/iommu_perfmon.h
index 5a01bee..c03c752 100644
--- a/arch/arm/mach-msm/include/mach/iommu_perfmon.h
+++ b/arch/arm/mach-msm/include/mach/iommu_perfmon.h
@@ -12,6 +12,7 @@
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/list.h>
+#include <linux/irqreturn.h>
#ifndef MSM_IOMMU_PERFMON_H
#define MSM_IOMMU_PERFMON_H
diff --git a/arch/arm/mach-msm/include/mach/ipa.h b/arch/arm/mach-msm/include/mach/ipa.h
index 5ccdf82..1073266 100644
--- a/arch/arm/mach-msm/include/mach/ipa.h
+++ b/arch/arm/mach-msm/include/mach/ipa.h
@@ -438,6 +438,9 @@
int (*release_resource)(void);
};
+#define A2_MUX_HDR_NAME_V4_PREF "dmux_hdr_v4_"
+#define A2_MUX_HDR_NAME_V6_PREF "dmux_hdr_v6_"
+
enum a2_mux_event_type {
A2_MUX_RECEIVE,
A2_MUX_WRITE_DONE
diff --git a/arch/arm/mach-msm/iommu_domains.c b/arch/arm/mach-msm/iommu_domains.c
index 02272bc..eb44c40 100644
--- a/arch/arm/mach-msm/iommu_domains.c
+++ b/arch/arm/mach-msm/iommu_domains.c
@@ -19,15 +19,15 @@
#include <linux/rbtree.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <asm/sizes.h>
#include <asm/page.h>
#include <mach/iommu.h>
#include <mach/iommu_domains.h>
#include <mach/socinfo.h>
#include <mach/msm_subsystem_map.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
struct msm_iova_data {
struct rb_node node;
diff --git a/arch/arm/mach-msm/krait-regulator.c b/arch/arm/mach-msm/krait-regulator.c
index 0c1e279..dc0b755 100644
--- a/arch/arm/mach-msm/krait-regulator.c
+++ b/arch/arm/mach-msm/krait-regulator.c
@@ -155,6 +155,7 @@
bool pfm_mode;
int pmic_min_uV_for_retention;
bool retention_enabled;
+ bool use_phase_switching;
};
static struct pmic_gang_vreg *the_gang;
@@ -390,13 +391,17 @@
return 0;
}
-static int set_pmic_gang_phases(int phase_count)
+static int set_pmic_gang_phases(struct pmic_gang_vreg *pvreg, int phase_count)
{
- /*
- * TODO : spm writes for phase control,
- * pmic phase control is not working yet
- */
- return 0;
+ pr_debug("programming phase_count = %d\n", phase_count);
+ if (pvreg->use_phase_switching)
+ /*
+ * note the PMIC sets the phase count to one more than
+ * the value in the register - hence subtract 1 from it
+ */
+ return msm_spm_apcs_set_phase(phase_count - 1);
+ else
+ return 0;
}
static int set_pmic_gang_voltage(struct pmic_gang_vreg *pvreg, int uV)
@@ -547,14 +552,19 @@
int load_uA)
{
struct pmic_gang_vreg *pvreg = from->pvreg;
- int phase_count = DIV_ROUND_UP(load_uA, LOAD_PER_PHASE) - 1;
+ int phase_count = DIV_ROUND_UP(load_uA, LOAD_PER_PHASE);
int rc = 0;
- if (phase_count < 0)
- phase_count = 0;
+ if (phase_count <= 0)
+ phase_count = 1;
+
+ /* Increase phases if it is less than the number of cpus online */
+ if (phase_count < num_online_cpus()) {
+ phase_count = num_online_cpus();
+ }
if (phase_count != pvreg->pmic_phase_count) {
- rc = set_pmic_gang_phases(phase_count);
+ rc = set_pmic_gang_phases(pvreg, phase_count);
if (rc < 0) {
dev_err(&from->rdev->dev,
"%s failed set phase %d rc = %d\n",
@@ -577,32 +587,6 @@
return rc;
}
-static int __devinit pvreg_init(struct platform_device *pdev)
-{
- struct pmic_gang_vreg *pvreg;
-
- pvreg = devm_kzalloc(&pdev->dev,
- sizeof(struct pmic_gang_vreg), GFP_KERNEL);
- if (!pvreg) {
- pr_err("kzalloc failed.\n");
- return -ENOMEM;
- }
-
- pvreg->name = "pmic_gang";
- pvreg->pmic_vmax_uV = PMIC_VOLTAGE_MIN;
- pvreg->pmic_phase_count = 1;
- pvreg->retention_enabled = true;
- pvreg->pmic_min_uV_for_retention = INT_MAX;
-
- mutex_init(&pvreg->krait_power_vregs_lock);
- INIT_LIST_HEAD(&pvreg->krait_power_vregs);
- the_gang = pvreg;
-
- pr_debug("name=%s inited\n", pvreg->name);
-
- return 0;
-}
-
static int krait_power_get_voltage(struct regulator_dev *rdev)
{
struct krait_power_vreg *kvreg = rdev_get_drvdata(rdev);
@@ -925,24 +909,6 @@
int ldo_delta_uV;
int cpu_num;
- /* Initialize the pmic gang if it hasn't been initialized already */
- if (the_gang == NULL) {
- rc = pvreg_init(pdev);
- if (rc < 0) {
- dev_err(&pdev->dev,
- "failed to init pmic gang rc = %d\n", rc);
- return rc;
- }
- /* global initializtion */
- glb_init(pdev);
- }
-
- if (dent == NULL) {
- dent = debugfs_create_dir(KRAIT_REGULATOR_DRIVER_NAME, NULL);
- debugfs_create_file("retention_uV",
- 0644, dent, the_gang, &retention_fops);
- }
-
if (pdev->dev.of_node) {
/* Get init_data from device tree. */
init_data = of_get_regulator_init_data(&pdev->dev,
@@ -1139,14 +1105,93 @@
},
};
+static struct of_device_id krait_pdn_match_table[] = {
+ { .compatible = "qcom,krait-pdn", },
+ {}
+};
+
+static int __devinit krait_pdn_probe(struct platform_device *pdev)
+{
+ int rc;
+ bool use_phase_switching = false;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct pmic_gang_vreg *pvreg;
+
+ if (!dev->of_node) {
+ dev_err(dev, "device tree information missing\n");
+ return -ENODEV;
+ }
+
+ use_phase_switching = of_property_read_bool(node,
+ "qcom,use-phase-switching");
+ pvreg = devm_kzalloc(&pdev->dev,
+ sizeof(struct pmic_gang_vreg), GFP_KERNEL);
+ if (!pvreg) {
+ pr_err("kzalloc failed.\n");
+ return 0;
+ }
+
+ pvreg->name = "pmic_gang";
+ pvreg->pmic_vmax_uV = PMIC_VOLTAGE_MIN;
+ pvreg->pmic_phase_count = -EINVAL;
+ pvreg->retention_enabled = true;
+ pvreg->pmic_min_uV_for_retention = INT_MAX;
+ pvreg->use_phase_switching = use_phase_switching;
+
+ mutex_init(&pvreg->krait_power_vregs_lock);
+ INIT_LIST_HEAD(&pvreg->krait_power_vregs);
+ the_gang = pvreg;
+
+ pr_debug("name=%s inited\n", pvreg->name);
+
+ /* global initializtion */
+ glb_init(pdev);
+
+ rc = of_platform_populate(node, NULL, NULL, dev);
+ if (rc) {
+ dev_err(dev, "failed to add child nodes, rc=%d\n", rc);
+ return rc;
+ }
+
+ dent = debugfs_create_dir(KRAIT_REGULATOR_DRIVER_NAME, NULL);
+ debugfs_create_file("retention_uV",
+ 0644, dent, the_gang, &retention_fops);
+ return 0;
+}
+
+static int __devexit krait_pdn_remove(struct platform_device *pdev)
+{
+ the_gang = NULL;
+ debugfs_remove_recursive(dent);
+ return 0;
+}
+
+static struct platform_driver krait_pdn_driver = {
+ .probe = krait_pdn_probe,
+ .remove = __devexit_p(krait_pdn_remove),
+ .driver = {
+ .name = KRAIT_PDN_DRIVER_NAME,
+ .of_match_table = krait_pdn_match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
int __init krait_power_init(void)
{
- return platform_driver_register(&krait_power_driver);
+ int rc = platform_driver_register(&krait_power_driver);
+ if (rc) {
+ pr_err("failed to add %s driver rc = %d\n",
+ KRAIT_REGULATOR_DRIVER_NAME, rc);
+ return rc;
+ }
+ return platform_driver_register(&krait_pdn_driver);
}
static void __exit krait_power_exit(void)
{
platform_driver_unregister(&krait_power_driver);
+ platform_driver_unregister(&krait_pdn_driver);
}
module_exit(krait_power_exit);
diff --git a/arch/arm/mach-msm/remote_spinlock.c b/arch/arm/mach-msm/remote_spinlock.c
index 4e09a9e..94923a0 100644
--- a/arch/arm/mach-msm/remote_spinlock.c
+++ b/arch/arm/mach-msm/remote_spinlock.c
@@ -196,6 +196,8 @@
/* end swp implementation --------------------------------------------------- */
/* ldrex implementation ----------------------------------------------------- */
+static char *ldrex_compatible_string = "qcom,ipc-spinlock-ldrex";
+
static void __raw_remote_ex_spin_lock(raw_remote_spinlock_t *lock)
{
unsigned long tmp;
@@ -267,7 +269,7 @@
static void *hw_mutex_reg_base;
static DEFINE_MUTEX(hw_map_init_lock);
-static char *compatible_string = "qcom,ipc-spinlock";
+static char *sfpb_compatible_string = "qcom,ipc-spinlock-sfpb";
static int init_hw_mutex(struct device_node *node)
{
@@ -294,7 +296,7 @@
{
struct device_node *node;
- node = of_find_compatible_node(NULL, NULL, compatible_string);
+ node = of_find_compatible_node(NULL, NULL, sfpb_compatible_string);
if (node) {
init_hw_mutex(node);
} else {
@@ -341,7 +343,9 @@
static int __raw_remote_sfpb_spin_trylock(raw_remote_spinlock_t *lock)
{
- return 1;
+ writel_relaxed(SPINLOCK_PID_APPS, lock);
+ smp_mb();
+ return readl_relaxed(lock) == SPINLOCK_PID_APPS;
}
static void __raw_remote_sfpb_spin_unlock(raw_remote_spinlock_t *lock)
@@ -397,6 +401,23 @@
}
+static int dt_node_is_valid(const struct device_node *node)
+{
+ const char *status;
+ int statlen;
+
+ status = of_get_property(node, "status", &statlen);
+ if (status == NULL)
+ return 1;
+
+ if (statlen > 0) {
+ if (!strcmp(status, "okay") || !strcmp(status, "ok"))
+ return 1;
+ }
+
+ return 0;
+}
+
static void initialize_ops(void)
{
struct device_node *node;
@@ -435,23 +456,42 @@
is_hw_lock_type = 1;
break;
case AUTO_MODE:
- node = of_find_compatible_node(NULL, NULL, compatible_string);
- if (node) {
+ /*
+ * of_find_compatible_node() returns a valid pointer even if
+ * the status property is "disabled", so the validity needs
+ * to be checked
+ */
+ node = of_find_compatible_node(NULL, NULL,
+ sfpb_compatible_string);
+ if (node && dt_node_is_valid(node)) {
current_ops.lock = __raw_remote_sfpb_spin_lock;
current_ops.unlock = __raw_remote_sfpb_spin_unlock;
current_ops.trylock = __raw_remote_sfpb_spin_trylock;
current_ops.release = __raw_remote_gen_spin_release;
current_ops.owner = __raw_remote_gen_spin_owner;
is_hw_lock_type = 1;
- } else {
+ break;
+ }
+
+ node = of_find_compatible_node(NULL, NULL,
+ ldrex_compatible_string);
+ if (node && dt_node_is_valid(node)) {
current_ops.lock = __raw_remote_ex_spin_lock;
current_ops.unlock = __raw_remote_ex_spin_unlock;
current_ops.trylock = __raw_remote_ex_spin_trylock;
current_ops.release = __raw_remote_gen_spin_release;
current_ops.owner = __raw_remote_gen_spin_owner;
is_hw_lock_type = 0;
- pr_warn("Falling back to LDREX remote spinlock implementation");
+ break;
}
+
+ current_ops.lock = __raw_remote_ex_spin_lock;
+ current_ops.unlock = __raw_remote_ex_spin_unlock;
+ current_ops.trylock = __raw_remote_ex_spin_trylock;
+ current_ops.release = __raw_remote_gen_spin_release;
+ current_ops.owner = __raw_remote_gen_spin_owner;
+ is_hw_lock_type = 0;
+ pr_warn("Falling back to LDREX remote spinlock implementation");
break;
default:
BUG();
diff --git a/arch/arm/mach-msm/smd_tty.c b/arch/arm/mach-msm/smd_tty.c
index 1820b23..5969a3c 100644
--- a/arch/arm/mach-msm/smd_tty.c
+++ b/arch/arm/mach-msm/smd_tty.c
@@ -47,7 +47,7 @@
struct smd_tty_info {
smd_channel_t *ch;
- struct tty_struct *tty;
+ struct tty_port port;
struct wake_lock wake_lock;
int open_count;
struct tasklet_struct tty_tsklt;
@@ -125,7 +125,7 @@
unsigned char *ptr;
int avail;
struct smd_tty_info *info = (struct smd_tty_info *)param;
- struct tty_struct *tty = info->tty;
+ struct tty_struct *tty = tty_port_tty_get(&info->port);
unsigned long flags;
if (!tty)
@@ -156,6 +156,7 @@
if (avail <= 0) {
mod_timer(&info->buf_req_timer,
jiffies + msecs_to_jiffies(30));
+ tty_kref_put(tty);
return;
}
@@ -173,11 +174,13 @@
/* XXX only when writable and necessary */
tty_wakeup(tty);
+ tty_kref_put(tty);
}
static void smd_tty_notify(void *priv, unsigned event)
{
struct smd_tty_info *info = priv;
+ struct tty_struct *tty;
unsigned long flags;
switch (event) {
@@ -195,8 +198,10 @@
*/
if (smd_write_avail(info->ch)) {
smd_disable_read_intr(info->ch);
- if (info->tty)
- wake_up_interruptible(&info->tty->write_wait);
+ tty = tty_port_tty_get(&info->port);
+ if (tty)
+ wake_up_interruptible(&tty->write_wait);
+ tty_kref_put(tty);
}
spin_lock_irqsave(&info->ra_lock, flags);
if (smd_read_avail(info->ch)) {
@@ -225,9 +230,11 @@
/* schedule task to send TTY_BREAK */
tasklet_hi_schedule(&info->tty_tsklt);
- if (info->tty->index == LOOPBACK_IDX)
+ tty = tty_port_tty_get(&info->port);
+ if (tty->index == LOOPBACK_IDX)
schedule_delayed_work(&loopback_work,
msecs_to_jiffies(1000));
+ tty_kref_put(tty);
break;
}
}
@@ -241,7 +248,8 @@
return (modem_state & ready_state) == ready_state;
}
-static int smd_tty_open(struct tty_struct *tty, struct file *f)
+static int smd_tty_port_activate(struct tty_port *tport,
+ struct tty_struct *tty)
{
int res = 0;
unsigned int n = tty->index;
@@ -306,8 +314,6 @@
}
}
-
- info->tty = tty;
tasklet_init(&info->tty_tsklt, smd_tty_read,
(unsigned long)info);
wake_lock_init(&info->wake_lock, WAKE_LOCK_SUSPEND,
@@ -354,24 +360,27 @@
return res;
}
-static void smd_tty_close(struct tty_struct *tty, struct file *f)
+static void smd_tty_port_shutdown(struct tty_port *tport)
{
- struct smd_tty_info *info = tty->driver_data;
+ struct smd_tty_info *info;
+ struct tty_struct *tty = tty_port_tty_get(tport);
unsigned long flags;
- if (info == 0)
+ info = tty->driver_data;
+ if (info == 0) {
+ tty_kref_put(tty);
return;
+ }
mutex_lock(&smd_tty_lock);
if (--info->open_count == 0) {
spin_lock_irqsave(&info->reset_lock, flags);
info->is_open = 0;
spin_unlock_irqrestore(&info->reset_lock, flags);
- if (info->tty) {
+ if (tty) {
tasklet_kill(&info->tty_tsklt);
wake_lock_destroy(&info->wake_lock);
wake_lock_destroy(&info->ra_wake_lock);
- info->tty = 0;
}
tty->driver_data = 0;
del_timer(&info->buf_req_timer);
@@ -382,6 +391,21 @@
}
}
mutex_unlock(&smd_tty_lock);
+ tty_kref_put(tty);
+}
+
+static int smd_tty_open(struct tty_struct *tty, struct file *f)
+{
+ struct smd_tty_info *info = smd_tty + tty->index;
+
+ return tty_port_open(&info->port, tty, f);
+}
+
+static void smd_tty_close(struct tty_struct *tty, struct file *f)
+{
+ struct smd_tty_info *info = tty->driver_data;
+
+ tty_port_close(&info->port, tty, f);
}
static int smd_tty_write(struct tty_struct *tty, const unsigned char *buf, int len)
@@ -482,6 +506,11 @@
0, SMSM_SMD_LOOPBACK);
}
+static const struct tty_port_operations smd_tty_port_ops = {
+ .shutdown = smd_tty_port_shutdown,
+ .activate = smd_tty_port_activate,
+};
+
static struct tty_operations smd_tty_ops = {
.open = smd_tty_open,
.close = smd_tty_close,
@@ -523,6 +552,7 @@
int ret;
int n;
int idx;
+ struct tty_port *port;
smd_tty_driver = alloc_tty_driver(MAX_SMD_TTYS);
if (smd_tty_driver == 0)
@@ -578,6 +608,10 @@
continue;
}
+ port = &smd_tty[idx].port;
+ tty_port_init(port);
+ port->ops = &smd_tty_port_ops;
+ /* TODO: For kernel >= 3.7 use tty_port_register_device */
tty_register_device(smd_tty_driver, idx, 0);
init_completion(&smd_tty[idx].ch_allocated);
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 6314e94..d177b05 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -7,7 +7,6 @@
obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \
mmap.o pgd.o mmu.o vmregion.o
-obj-$(CONFIG_DEBUG_RODATA) += rodata.o
ifneq ($(CONFIG_MMU),y)
obj-y += nommu.o
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 0ebc2b9..bf59a9d 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -224,7 +224,7 @@
* allocations. This must be the smallest DMA mask in the system,
* so a successful GFP_DMA allocation will always satisfy this.
*/
-u32 arm_dma_limit;
+phys_addr_t arm_dma_limit;
static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
unsigned long dma_size)
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 8877ddd..21653f2 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -65,9 +65,9 @@
#endif
#ifdef CONFIG_ZONE_DMA
-extern u32 arm_dma_limit;
+extern phys_addr_t arm_dma_limit;
#else
-#define arm_dma_limit ((u32)~0)
+#define arm_dma_limit ((phys_addr_t)~0)
#endif
struct map_desc;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 8575f78..25cb67c 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -604,53 +604,30 @@
return early_alloc_aligned(sz, sz);
}
-static pte_t * __init early_pte_alloc(pmd_t *pmd)
-{
- if (pmd_none(*pmd) || pmd_bad(*pmd))
- return early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
- return pmd_page_vaddr(*pmd);
-}
-
-static void __init early_pte_install(pmd_t *pmd, pte_t *pte, unsigned long prot)
-{
- __pmd_populate(pmd, __pa(pte), prot);
- BUG_ON(pmd_bad(*pmd));
-}
-
-#ifdef CONFIG_HIGHMEM
-static pte_t * __init early_pte_alloc_and_install(pmd_t *pmd,
- unsigned long addr, unsigned long prot)
+static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
{
if (pmd_none(*pmd)) {
- pte_t *pte = early_pte_alloc(pmd);
- early_pte_install(pmd, pte, prot);
+ pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
+ __pmd_populate(pmd, __pa(pte), prot);
}
BUG_ON(pmd_bad(*pmd));
return pte_offset_kernel(pmd, addr);
}
-#endif
static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
unsigned long end, unsigned long pfn,
const struct mem_type *type)
{
- pte_t *start_pte = early_pte_alloc(pmd);
- pte_t *pte = start_pte + pte_index(addr);
-
- /* If replacing a section mapping, the whole section must be replaced */
- BUG_ON(pmd_bad(*pmd) && ((addr | end) & ~PMD_MASK));
-
+ pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
do {
set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
- early_pte_install(pmd, start_pte, type->prot_l1);
}
static void __init alloc_init_section(pud_t *pud, unsigned long addr,
unsigned long end, phys_addr_t phys,
- const struct mem_type *type,
- bool force_pages)
+ const struct mem_type *type)
{
pmd_t *pmd = pmd_offset(pud, addr);
@@ -660,7 +637,7 @@
* L1 entries, whereas PGDs refer to a group of L1 entries making
* up one logical pointer to an L2 table.
*/
- if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0 && !force_pages) {
+ if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) {
pmd_t *p = pmd;
#ifndef CONFIG_ARM_LPAE
@@ -684,15 +661,14 @@
}
static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
- unsigned long end, unsigned long phys, const struct mem_type *type,
- bool force_pages)
+ unsigned long end, unsigned long phys, const struct mem_type *type)
{
pud_t *pud = pud_offset(pgd, addr);
unsigned long next;
do {
next = pud_addr_end(addr, end);
- alloc_init_section(pud, addr, next, phys, type, force_pages);
+ alloc_init_section(pud, addr, next, phys, type);
phys += next - addr;
} while (pud++, addr = next, addr != end);
}
@@ -766,7 +742,7 @@
* offsets, and we take full advantage of sections and
* supersections.
*/
-static void __init create_mapping(struct map_desc *md, bool force_pages)
+static void __init create_mapping(struct map_desc *md)
{
unsigned long addr, length, end;
phys_addr_t phys;
@@ -818,7 +794,7 @@
do {
unsigned long next = pgd_addr_end(addr, end);
- alloc_init_pud(pgd, addr, next, phys, type, force_pages);
+ alloc_init_pud(pgd, addr, next, phys, type);
phys += next - addr;
addr = next;
@@ -839,7 +815,7 @@
vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
for (md = io_desc; nr; md++, nr--) {
- create_mapping(md, false);
+ create_mapping(md);
vm->addr = (void *)(md->virtual & PAGE_MASK);
vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
vm->phys_addr = __pfn_to_phys(md->pfn);
@@ -1199,12 +1175,12 @@
map.virtual = 0xffff0000;
map.length = PAGE_SIZE;
map.type = MT_HIGH_VECTORS;
- create_mapping(&map, false);
+ create_mapping(&map);
if (!vectors_high()) {
map.virtual = 0;
map.type = MT_LOW_VECTORS;
- create_mapping(&map, false);
+ create_mapping(&map);
}
/*
@@ -1224,7 +1200,7 @@
map.virtual = CONFIG_ARM_USER_ACCESSIBLE_TIMER_BASE;
map.length = PAGE_SIZE;
map.type = MT_DEVICE_USER_ACCESSIBLE;
- create_mapping(&map, false);
+ create_mapping(&map);
}
}
@@ -1241,7 +1217,7 @@
static void __init kmap_init(void)
{
#ifdef CONFIG_HIGHMEM
- pkmap_page_table = early_pte_alloc_and_install(pmd_off_k(PKMAP_BASE),
+ pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
PKMAP_BASE, _PAGE_KERNEL_TABLE);
#endif
}
@@ -1349,14 +1325,12 @@
static void __init map_lowmem(void)
{
struct memblock_region *reg;
- phys_addr_t start;
- phys_addr_t end;
- struct map_desc map;
/* Map all the lowmem memory banks. */
for_each_memblock(memory, reg) {
- start = reg->base;
- end = start + reg->size;
+ phys_addr_t start = reg->base;
+ phys_addr_t end = start + reg->size;
+ struct map_desc map;
if (end > arm_lowmem_limit)
end = arm_lowmem_limit;
@@ -1370,28 +1344,28 @@
map.length = SECTION_SIZE;
map.type = MT_MEMORY;
- create_mapping(&map, false);
+ create_mapping(&map);
map.pfn = __phys_to_pfn(start + SECTION_SIZE);
map.virtual = __phys_to_virt(start + SECTION_SIZE);
map.length = (unsigned long)RX_AREA_END - map.virtual;
map.type = MT_MEMORY_RX;
- create_mapping(&map, false);
+ create_mapping(&map);
map.pfn = __phys_to_pfn(__pa(__start_rodata));
map.virtual = (unsigned long)__start_rodata;
map.length = __init_begin - __start_rodata;
map.type = MT_MEMORY_R;
- create_mapping(&map, false);
+ create_mapping(&map);
map.pfn = __phys_to_pfn(__pa(__init_begin));
map.virtual = (unsigned long)__init_begin;
map.length = __init_data - __init_begin;
map.type = MT_MEMORY;
- create_mapping(&map, false);
+ create_mapping(&map);
map.pfn = __phys_to_pfn(__pa(__init_data));
map.virtual = (unsigned long)__init_data;
@@ -1406,20 +1380,8 @@
map.type = MT_MEMORY;
#endif
- create_mapping(&map, false);
+ create_mapping(&map);
}
-
-#ifdef CONFIG_DEBUG_RODATA
- start = __pa(_stext) & PMD_MASK;
- end = ALIGN(__pa(__end_rodata), PMD_SIZE);
-
- map.pfn = __phys_to_pfn(start);
- map.virtual = __phys_to_virt(start);
- map.length = end - start;
- map.type = MT_MEMORY;
-
- create_mapping(&map, true);
-#endif
}
/*
diff --git a/arch/arm/mm/rodata.c b/arch/arm/mm/rodata.c
deleted file mode 100644
index 9a8eb84..0000000
--- a/arch/arm/mm/rodata.c
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * linux/arch/arm/mm/rodata.c
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * Author: Colin Cross <ccross@android.com>
- *
- * Based on x86 implementation in arch/x86/mm/init_32.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-
-#include <asm/cache.h>
-#include <asm/pgtable.h>
-#include <asm/rodata.h>
-#include <asm/sections.h>
-#include <asm/tlbflush.h>
-
-#include "mm.h"
-
-static int kernel_set_to_readonly __read_mostly;
-
-#ifdef CONFIG_DEBUG_RODATA_TEST
-static const int rodata_test_data = 0xC3;
-
-static noinline void rodata_test(void)
-{
- int result;
-
- pr_info("%s: attempting to write to read-only section:\n", __func__);
-
- if (*(volatile int *)&rodata_test_data != 0xC3) {
- pr_err("read only data changed before test\n");
- return;
- }
-
- /*
- * Attempt to to write to rodata_test_data, trapping the expected
- * data abort. If the trap executed, result will be 1. If it didn't,
- * result will be 0xFF.
- */
- asm volatile(
- "0: str %[zero], [%[rodata_test_data]]\n"
- " mov %[result], #0xFF\n"
- " b 2f\n"
- "1: mov %[result], #1\n"
- "2:\n"
-
- /* Exception fixup - if store at label 0 faults, jumps to 1 */
- ".pushsection __ex_table, \"a\"\n"
- " .long 0b, 1b\n"
- ".popsection\n"
-
- : [result] "=r" (result)
- : [rodata_test_data] "r" (&rodata_test_data), [zero] "r" (0)
- : "memory"
- );
-
- if (result == 1)
- pr_info("write to read-only section trapped, success\n");
- else
- pr_err("write to read-only section NOT trapped, test failed\n");
-
- if (*(volatile int *)&rodata_test_data != 0xC3)
- pr_err("read only data changed during write\n");
-}
-#else
-static inline void rodata_test(void) { }
-#endif
-
-static int set_page_attributes(unsigned long virt, int numpages,
- pte_t (*f)(pte_t))
-{
- pmd_t *pmd;
- pte_t *pte;
- unsigned long start = virt;
- unsigned long end = virt + (numpages << PAGE_SHIFT);
- unsigned long pmd_end;
-
- while (virt < end) {
- pmd = pmd_off_k(virt);
- pmd_end = min(ALIGN(virt + 1, PMD_SIZE), end);
-
- if ((pmd_val(*pmd) & PMD_TYPE_MASK) != PMD_TYPE_TABLE) {
- pr_err("%s: pmd %p=%08lx for %08lx not page table\n",
- __func__, pmd, pmd_val(*pmd), virt);
- virt = pmd_end;
- continue;
- }
-
- while (virt < pmd_end) {
- pte = pte_offset_kernel(pmd, virt);
- set_pte_ext(pte, f(*pte), 0);
- virt += PAGE_SIZE;
- }
- }
-
- flush_tlb_kernel_range(start, end);
-
- return 0;
-}
-
-int set_memory_ro(unsigned long virt, int numpages)
-{
- return set_page_attributes(virt, numpages, pte_wrprotect);
-}
-EXPORT_SYMBOL(set_memory_ro);
-
-int set_memory_rw(unsigned long virt, int numpages)
-{
- return set_page_attributes(virt, numpages, pte_mkwrite);
-}
-EXPORT_SYMBOL(set_memory_rw);
-
-void set_kernel_text_rw(void)
-{
- unsigned long start = PAGE_ALIGN((unsigned long)_text);
- unsigned long size = PAGE_ALIGN((unsigned long)__end_rodata) - start;
-
- if (!kernel_set_to_readonly)
- return;
-
- pr_debug("Set kernel text: %lx - %lx to read-write\n",
- start, start + size);
-
- set_memory_rw(start, size >> PAGE_SHIFT);
-}
-
-void set_kernel_text_ro(void)
-{
- unsigned long start = PAGE_ALIGN((unsigned long)_text);
- unsigned long size = PAGE_ALIGN((unsigned long)__end_rodata) - start;
-
- if (!kernel_set_to_readonly)
- return;
-
- pr_info_once("Write protecting the kernel text section %lx - %lx\n",
- start, start + size);
-
- pr_debug("Set kernel text: %lx - %lx to read only\n",
- start, start + size);
-
- set_memory_ro(start, size >> PAGE_SHIFT);
-}
-
-void mark_rodata_ro(void)
-{
- kernel_set_to_readonly = 1;
-
- set_kernel_text_ro();
-
- rodata_test();
-}
diff --git a/drivers/Kconfig b/drivers/Kconfig
index a73d713..adead10 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -150,4 +150,6 @@
source "drivers/coresight/Kconfig"
+source "drivers/bif/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index f461e83..d55b035 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -142,3 +142,5 @@
obj-$(CONFIG_MOBICORE_SUPPORT) += gud/
obj-$(CONFIG_CORESIGHT) += coresight/
+
+obj-$(CONFIG_BIF) += bif/
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 1937db8..3a8bbc5 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -9,6 +9,9 @@
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License or (at your optional) any later version of the license.
+ *
+ * The Linux Foundation chooses to take subject only to the GPLv2 license
+ * terms, and distributes only under these terms.
*/
#define pr_fmt(fmt) "cma: " fmt
@@ -24,6 +27,9 @@
#include <linux/memblock.h>
#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/page-isolation.h>
@@ -52,8 +58,9 @@
phys_addr_t base;
unsigned long size;
struct cma *cma;
-} cma_areas[MAX_CMA_AREAS] __initdata;
-static unsigned cma_area_count __initdata;
+ const char *name;
+} cma_areas[MAX_CMA_AREAS];
+static unsigned cma_area_count;
static struct cma_map {
@@ -71,6 +78,20 @@
return NULL;
}
+static struct cma *cma_get_area_by_name(const char *name)
+{
+ int i;
+ if (!name)
+ return NULL;
+
+ for (i = 0; i < cma_area_count; i++)
+ if (cma_areas[i].name && strcmp(cma_areas[i].name, name) == 0)
+ return cma_areas[i].cma;
+ return NULL;
+}
+
+
+
#ifdef CONFIG_CMA_SIZE_MBYTES
#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
#else
@@ -181,6 +202,38 @@
return ERR_PTR(ret);
}
+/*****************************************************************************/
+
+#ifdef CONFIG_OF
+int __init cma_fdt_scan(unsigned long node, const char *uname,
+ int depth, void *data)
+{
+ phys_addr_t base, size;
+ unsigned long len;
+ __be32 *prop;
+ char *name;
+
+ if (strncmp(uname, "region@", 7) != 0 || depth != 2 ||
+ !of_get_flat_dt_prop(node, "linux,contiguous-region", NULL))
+ return 0;
+
+ prop = of_get_flat_dt_prop(node, "reg", &len);
+ if (!prop || (len != 2 * sizeof(unsigned long)))
+ return 0;
+
+ base = be32_to_cpu(prop[0]);
+ size = be32_to_cpu(prop[1]);
+
+ name = of_get_flat_dt_prop(node, "label", NULL);
+
+ pr_info("Found %s, memory base %lx, size %ld MiB\n", uname,
+ (unsigned long)base, (unsigned long)size / SZ_1M);
+ dma_contiguous_reserve_area(size, &base, 0, name);
+
+ return 0;
+}
+#endif
+
/**
* dma_contiguous_reserve() - reserve area for contiguous memory handling
* @limit: End address of the reserved memory (optional, 0 for any).
@@ -216,9 +269,13 @@
pr_debug("%s: reserving %ld MiB for global area\n", __func__,
(unsigned long)sel_size / SZ_1M);
- if (dma_contiguous_reserve_area(sel_size, &base, limit) == 0)
+ if (dma_contiguous_reserve_area(sel_size, &base, limit, NULL)
+ == 0)
dma_contiguous_def_base = base;
}
+#ifdef CONFIG_OF
+ of_scan_flat_dt(cma_fdt_scan, NULL);
+#endif
};
/**
@@ -236,7 +293,7 @@
* devices.
*/
int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t *res_base,
- phys_addr_t limit)
+ phys_addr_t limit, const char *name)
{
phys_addr_t base = *res_base;
phys_addr_t alignment;
@@ -288,6 +345,7 @@
*/
cma_areas[cma_area_count].base = base;
cma_areas[cma_area_count].size = size;
+ cma_areas[cma_area_count].name = name;
cma_area_count++;
*res_base = base;
@@ -323,6 +381,45 @@
return 0;
}
+#ifdef CONFIG_OF
+static void cma_assign_device_from_dt(struct device *dev)
+{
+ struct device_node *node;
+ struct cma *cma;
+ const char *name;
+ u32 value;
+
+ node = of_parse_phandle(dev->of_node, "linux,contiguous-region", 0);
+ if (!node)
+ return;
+ if (of_property_read_u32(node, "reg", &value) && !value)
+ return;
+
+ if (of_property_read_string(node, "label", &name))
+ return;
+
+ cma = cma_get_area_by_name(name);
+ if (!cma)
+ return;
+
+ dev_set_cma_area(dev, cma);
+ pr_info("Assigned CMA region at %lx to %s device\n", (unsigned long)value, dev_name(dev));
+}
+
+static int cma_device_init_notifier_call(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct device *dev = data;
+ if (event == BUS_NOTIFY_ADD_DEVICE && dev->of_node)
+ cma_assign_device_from_dt(dev);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block cma_dev_init_nb = {
+ .notifier_call = cma_device_init_notifier_call,
+};
+#endif
+
static int __init cma_init_reserved_areas(void)
{
struct cma *cma;
@@ -344,6 +441,9 @@
dev_set_cma_area(cma_maps[i].dev, cma);
}
+#ifdef CONFIG_OF
+ bus_register_notifier(&platform_bus_type, &cma_dev_init_nb);
+#endif
return 0;
}
core_initcall(cma_init_reserved_areas);
diff --git a/drivers/bif/Kconfig b/drivers/bif/Kconfig
new file mode 100644
index 0000000..502b92b
--- /dev/null
+++ b/drivers/bif/Kconfig
@@ -0,0 +1,12 @@
+#
+# BIF framework and drivers
+#
+menuconfig BIF
+ bool "MIPI-BIF support"
+ select CRC_CCITT
+ select BITREVERSE
+ help
+ MIPI-BIF (battery interface) is a one-wire serial interface between a
+ host master device and one or more slave devices which are located in
+ a battery pack or also on the host. Enabling this option allows for
+ BIF consumer drivers to issue transactions via BIF controller drivers.
diff --git a/drivers/bif/Makefile b/drivers/bif/Makefile
new file mode 100644
index 0000000..02528c1
--- /dev/null
+++ b/drivers/bif/Makefile
@@ -0,0 +1,4 @@
+#
+# Makefile for kernel BIF framework.
+#
+obj-$(CONFIG_BIF) += bif-core.o
diff --git a/drivers/bif/bif-core.c b/drivers/bif/bif-core.c
new file mode 100644
index 0000000..e11e6ba4
--- /dev/null
+++ b/drivers/bif/bif-core.c
@@ -0,0 +1,2934 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/bitrev.h>
+#include <linux/crc-ccitt.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+#include <linux/bif/consumer.h>
+#include <linux/bif/driver.h>
+
+/**
+ * struct bif_ctrl_dev - holds controller device specific information
+ * @list: Doubly-linked list parameter linking to other
+ * BIF controllers registered in the system
+ * @desc: Description structure for this BIF controller
+ * @mutex: Mutex lock that is used to ensure mutual
+ * exclusion between transactions performed on the
+ * BIF bus for this controller
+ * @ctrl_dev: Device pointer to the BIF controller device
+ * @driver_data: Private data used by the BIF controller
+ * @selected_sdev: Slave device that is currently selected on
+ * the BIF bus of this controller
+ * @bus_change_notifier: Head of a notifier list containing notifier
+ * blocks that are notified when the battery
+ * presence changes
+ * @enter_irq_mode_work: Work task that is scheduled after a transaction
+ * completes when there are consumers that are
+ * actively monitoring BIF slave interrupts
+ * @irq_count: This is a count of the total number of BIF slave
+ * interrupts that are currently being monitored
+ * for the BIF slaves connected to this BIF
+ * controller
+ * @irq_mode_delay_jiffies: Number of jiffies to wait before scheduling the
+ * enter IRQ mode task. Using a larger value
+ * helps to improve the performance of BIF
+ * consumers that perform many BIF transactions.
+ * Using a smaller value reduces the latency of
+ * BIF slave interrupts.
+ * @battery_present: Cached value of the battery presence. This is
+ * used to filter out spurious presence update
+ * calls when the battery presence state has not
+ * changed.
+ */
+struct bif_ctrl_dev {
+ struct list_head list;
+ struct bif_ctrl_desc *desc;
+ struct mutex mutex;
+ struct device *ctrl_dev;
+ void *driver_data;
+ struct bif_slave_dev *selected_sdev;
+ struct blocking_notifier_head bus_change_notifier;
+ struct delayed_work enter_irq_mode_work;
+ int irq_count;
+ int irq_mode_delay_jiffies;
+ bool battery_present;
+};
+
+/**
+ * struct bif_ctrl - handle used by BIF consumers for bus oriented BIF
+ * operations
+ * @bdev: Pointer to BIF controller device
+ * @exclusive_lock: Flag which indicates that the BIF consumer responsible
+ * for this handle has locked the BIF bus of this
+ * controller. BIF transactions from other consumers are
+ * blocked until the bus is unlocked.
+ */
+struct bif_ctrl {
+ struct bif_ctrl_dev *bdev;
+ bool exclusive_lock;
+};
+
+/**
+ * struct bif_slave_dev - holds BIF slave device information
+ * @list: Doubly-linked list parameter linking to other
+ * BIF slaves that have been enumerated
+ * @bdev: Pointer to the BIF controller device that this
+ * slave is physically connected to
+ * @slave_addr: 8-bit BIF DEV_ADR assigned to this slave
+ * @unique_id: 80-bit BIF unique ID of the slave
+ * @unique_id_bits_known: Number of bits of the UID that are currently
+ * known. This number starts is incremented during
+ * a UID search and must end at 80 if the slave
+ * responds to the search properly.
+ * @present: Boolean value showing if this slave is
+* physically present in the system at a given
+* point in time. The value is set to false if the
+* battery pack containing the slave is
+* disconnected.
+ * @l1_data: BIF DDB L1 data of the slave as read from the
+ * slave's memory
+ * @function_directory: Pointer to the BIF DDB L2 function directory
+ * list as read from the slave's memory
+ * @protocol_function: Pointer to constant protocol function data as
+ * well as software state information if the slave
+ * has a protocol function
+ * @slave_ctrl_function: Pointer to constant slave control function data
+ * as well as software state information if the
+ * slave has a slave control function
+ * @nvm_function: Pointer to constant non-volatile memory function
+ * data as well as software state information if
+ * the slave has a non-volatile memory function
+ *
+ * bif_slave_dev objects are stored indefinitely after enumeration in order to
+ * speed up battery reinsertion. Only a UID check is needed after inserting a
+ * battery assuming it has been enumerated before.
+ *
+ * unique_id bytes are stored such that unique_id[0] = MSB and
+ * unique_id[BIF_UNIQUE_ID_BYTE_LENGTH - 1] = LSB
+ */
+struct bif_slave_dev {
+ struct list_head list;
+ struct bif_ctrl_dev *bdev;
+ u8 slave_addr;
+ u8 unique_id[BIF_UNIQUE_ID_BYTE_LENGTH];
+ int unique_id_bits_known;
+ bool present;
+ struct bif_ddb_l1_data l1_data;
+ struct bif_ddb_l2_data *function_directory;
+ struct bif_protocol_function *protocol_function;
+ struct bif_slave_control_function *slave_ctrl_function;
+ struct bif_nvm_function *nvm_function;
+};
+
+/**
+ * struct bif_slave - handle used by BIF consumers for slave oriented BIF
+ * operations
+ * @ctrl: Consumer BIF controller handle data
+ * @sdev: Pointer to BIF slave device
+ */
+struct bif_slave {
+ struct bif_ctrl ctrl;
+ struct bif_slave_dev *sdev;
+};
+
+/* Number of times to retry a full BIF transaction before returning an error. */
+#define BIF_TRANSACTION_RETRY_COUNT 5
+
+static DEFINE_MUTEX(bif_ctrl_list_mutex);
+static LIST_HEAD(bif_ctrl_list);
+static DEFINE_MUTEX(bif_sdev_list_mutex);
+static LIST_HEAD(bif_sdev_list);
+
+static u8 next_dev_addr = 0x02;
+
+#define DEBUG_PRINT_BUFFER_SIZE 256
+static void fill_string(char *str, size_t str_len, u8 *buf, int buf_len)
+{
+ int pos = 0;
+ int i;
+
+ for (i = 0; i < buf_len; i++) {
+ pos += scnprintf(str + pos, str_len - pos, "0x%02X", buf[i]);
+ if (i < buf_len - 1)
+ pos += scnprintf(str + pos, str_len - pos, ", ");
+ }
+}
+
+static void bif_print_slave_data(struct bif_slave_dev *sdev)
+{
+ char str[DEBUG_PRINT_BUFFER_SIZE];
+ u8 *uid;
+ int i, j;
+ struct bif_object *object;
+
+ if (sdev->unique_id_bits_known != BIF_UNIQUE_ID_BIT_LENGTH)
+ return;
+
+ uid = sdev->unique_id;
+ pr_debug("BIF slave: 0x%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
+ uid[0], uid[1], uid[2], uid[3], uid[4], uid[5], uid[6],
+ uid[7], uid[8], uid[9]);
+ pr_debug(" present=%d, dev_adr=0x%02X\n", sdev->present,
+ sdev->slave_addr);
+ pr_debug(" revision=0x%02X, level=0x%02X, device class=0x%04X\n",
+ sdev->l1_data.revision, sdev->l1_data.level,
+ sdev->l1_data.device_class);
+ pr_debug(" manufacturer ID=0x%04X, product ID=0x%04X\n",
+ sdev->l1_data.manufacturer_id, sdev->l1_data.product_id);
+ pr_debug(" function directory length=%d\n", sdev->l1_data.length);
+
+ for (i = 0; i < sdev->l1_data.length / 4; i++) {
+ pr_debug(" Function %d: type=0x%02X, version=0x%02X, pointer=0x%04X\n",
+ i, sdev->function_directory[i].function_type,
+ sdev->function_directory[i].function_version,
+ sdev->function_directory[i].function_pointer);
+ }
+
+ if (sdev->nvm_function) {
+ pr_debug(" NVM function: pointer=0x%04X, task=%d, wr_buf_size=%d, nvm_base=0x%04X, nvm_size=%d\n",
+ sdev->nvm_function->nvm_pointer,
+ sdev->nvm_function->slave_control_channel,
+ (sdev->nvm_function->write_buffer_size
+ ? sdev->nvm_function->write_buffer_size : 0),
+ sdev->nvm_function->nvm_base_address,
+ sdev->nvm_function->nvm_size);
+ if (sdev->nvm_function->object_count)
+ pr_debug(" NVM objects:\n");
+ i = 0;
+ list_for_each_entry(object, &sdev->nvm_function->object_list,
+ list) {
+ pr_debug(" Object %d - addr=0x%04X, data len=%d, type=0x%02X, version=0x%02X, manufacturer ID=0x%04X, crc=0x%04X\n",
+ i, object->addr, object->length - 8,
+ object->type, object->version,
+ object->manufacturer_id, object->crc);
+ for (j = 0; j < DIV_ROUND_UP(object->length - 8, 16);
+ j++) {
+ fill_string(str, DEBUG_PRINT_BUFFER_SIZE,
+ object->data + j * 16,
+ min(16, object->length - 8 - (j * 16)));
+ pr_debug(" data(0x%04X): %s\n", j * 16,
+ str);
+ }
+ i++;
+ }
+ }
+}
+
+static void bif_print_slaves(void)
+{
+ struct bif_slave_dev *sdev;
+
+ mutex_lock(&bif_sdev_list_mutex);
+
+ list_for_each_entry(sdev, &bif_sdev_list, list) {
+ /* Skip slaves without fully known UIDs. */
+ if (sdev->unique_id_bits_known != BIF_UNIQUE_ID_BIT_LENGTH)
+ continue;
+ bif_print_slave_data(sdev);
+ }
+
+ mutex_unlock(&bif_sdev_list_mutex);
+}
+
+static struct bif_slave_dev *bif_add_slave(struct bif_ctrl_dev *bdev)
+{
+ struct bif_slave_dev *sdev;
+
+ sdev = kzalloc(sizeof(struct bif_slave_dev), GFP_KERNEL);
+ if (sdev == NULL) {
+ pr_err("Memory allocation failed for bif_slave_dev\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ sdev->bdev = bdev;
+ INIT_LIST_HEAD(&sdev->list);
+ list_add_tail(&sdev->list, &bif_sdev_list);
+
+ return sdev;
+}
+
+static void bif_remove_slave(struct bif_slave_dev *sdev)
+{
+ list_del(&sdev->list);
+ if (sdev->bdev->selected_sdev == sdev)
+ sdev->bdev->selected_sdev = NULL;
+
+ if (sdev->slave_ctrl_function)
+ kfree(sdev->slave_ctrl_function->irq_notifier_list);
+ kfree(sdev->slave_ctrl_function);
+ kfree(sdev->protocol_function);
+ kfree(sdev->function_directory);
+
+ kfree(sdev);
+}
+
+/* This function assumes that the uid array is all 0 to start with. */
+static void set_uid_bit(u8 uid[BIF_UNIQUE_ID_BYTE_LENGTH], unsigned int bit,
+ unsigned int value)
+{
+ u8 mask;
+
+ if (bit >= BIF_UNIQUE_ID_BIT_LENGTH)
+ return;
+
+ mask = 1 << (7 - (bit % 8));
+
+ uid[bit / 8] &= ~mask;
+ uid[bit / 8] |= value << (7 - (bit % 8));
+}
+
+static unsigned int get_uid_bit(u8 uid[BIF_UNIQUE_ID_BYTE_LENGTH],
+ unsigned int bit)
+{
+ if (bit >= BIF_UNIQUE_ID_BIT_LENGTH)
+ return 0;
+
+ return (uid[bit / 8] & (1 << (7 - (bit % 8)))) ? 1 : 0;
+}
+
+static void bif_enter_irq_mode_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct bif_ctrl_dev *bdev
+ = container_of(dwork, struct bif_ctrl_dev, enter_irq_mode_work);
+ int rc, i;
+
+ mutex_lock(&bdev->mutex);
+ for (i = 0; i < BIF_TRANSACTION_RETRY_COUNT; i++) {
+ rc = bdev->desc->ops->set_bus_state(bdev,
+ BIF_BUS_STATE_INTERRUPT);
+ if (rc == 0)
+ break;
+ }
+ mutex_unlock(&bdev->mutex);
+
+ /* Reschedule the task if the transaction failed. */
+ if (rc) {
+ pr_err("Could not set BIF bus to interrupt mode, rc=%d\n", rc);
+ schedule_delayed_work(&bdev->enter_irq_mode_work,
+ bdev->irq_mode_delay_jiffies);
+ }
+}
+
+static void bif_cancel_irq_mode_work(struct bif_ctrl_dev *bdev)
+{
+ cancel_delayed_work(&bdev->enter_irq_mode_work);
+}
+
+static void bif_schedule_irq_mode_work(struct bif_ctrl_dev *bdev)
+{
+ if (bdev->irq_count > 0 &&
+ bdev->desc->ops->get_bus_state(bdev) != BIF_BUS_STATE_INTERRUPT)
+ schedule_delayed_work(&bdev->enter_irq_mode_work,
+ bdev->irq_mode_delay_jiffies);
+}
+
+static int _bif_select_slave_no_retry(struct bif_slave_dev *sdev)
+{
+ struct bif_ctrl_dev *bdev = sdev->bdev;
+ int rc = 0;
+ int i;
+
+ /* Check if the slave is already selected. */
+ if (sdev->bdev->selected_sdev == sdev)
+ return 0;
+
+ if (sdev->slave_addr) {
+ /* Select using DEV_ADR. */
+ rc = bdev->desc->ops->bus_transaction(bdev, BIF_TRANS_SDA,
+ sdev->slave_addr);
+ if (!rc)
+ sdev->bdev->selected_sdev = sdev;
+ } else if (sdev->unique_id_bits_known == BIF_UNIQUE_ID_BIT_LENGTH) {
+ /* Select using full UID. */
+ for (i = 0; i < BIF_UNIQUE_ID_BYTE_LENGTH - 1; i++) {
+ rc = bdev->desc->ops->bus_transaction(bdev,
+ BIF_TRANS_EDA, sdev->unique_id[i]);
+ if (rc)
+ goto out;
+ }
+
+ rc = bdev->desc->ops->bus_transaction(bdev, BIF_TRANS_SDA,
+ sdev->unique_id[BIF_UNIQUE_ID_BYTE_LENGTH - 1]);
+ if (rc)
+ goto out;
+ } else {
+ pr_err("Cannot select slave because it has neither UID nor DEV_ADR.\n");
+ return -EINVAL;
+ }
+
+ sdev->bdev->selected_sdev = sdev;
+
+ return 0;
+out:
+ pr_err("bus_transaction failed, rc=%d\n", rc);
+ return rc;
+}
+
+static int bif_select_slave(struct bif_slave_dev *sdev)
+{
+ int rc = -EPERM;
+ int i;
+
+ for (i = 0; i < BIF_TRANSACTION_RETRY_COUNT; i++) {
+ rc = _bif_select_slave_no_retry(sdev);
+ if (rc == 0)
+ break;
+ /* Force slave reselection. */
+ sdev->bdev->selected_sdev = NULL;
+ }
+
+ return rc;
+}
+
+/*
+ * Returns 1 if slave is selected, 0 if slave is not selected, or errno if
+ * error.
+ */
+static int bif_is_slave_selected(struct bif_ctrl_dev *bdev)
+{
+ int rc = -EPERM;
+ int tack, i;
+
+ for (i = 0; i < BIF_TRANSACTION_RETRY_COUNT; i++) {
+ /* Attempt a transaction query. */
+ rc = bdev->desc->ops->bus_transaction_read(bdev, BIF_TRANS_BC,
+ BIF_CMD_TQ, &tack);
+ if (rc == 0 || rc == -ETIMEDOUT)
+ break;
+ }
+
+ if (rc == 0)
+ rc = 1;
+ else if (rc == -ETIMEDOUT)
+ rc = 0;
+ else
+ pr_err("BIF bus_transaction_read failed, rc=%d\n", rc);
+
+ return rc;
+}
+
+/* Read from a specified number of consecutive registers. */
+static int _bif_slave_read_no_retry(struct bif_slave_dev *sdev, u16 addr,
+ u8 *buf, int len)
+{
+ struct bif_ctrl_dev *bdev = sdev->bdev;
+ int rc = 0;
+ int i, response;
+
+ rc = bif_select_slave(sdev);
+ if (rc)
+ return rc;
+
+ if (bdev->desc->ops->read_slave_registers) {
+ /*
+ * Use low level slave register read implementation in order to
+ * receive the benefits of BIF burst reads.
+ */
+ rc = bdev->desc->ops->read_slave_registers(bdev, addr, buf,
+ len);
+ if (rc)
+ pr_err("read_slave_registers failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ for (i = 0; i < len; i++) {
+ rc = bdev->desc->ops->bus_transaction(bdev, BIF_TRANS_ERA,
+ addr >> 8);
+ if (rc) {
+ pr_err("bus_transaction failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = bdev->desc->ops->bus_transaction_read(bdev, BIF_TRANS_RRA,
+ addr & 0xFF, &response);
+ if (rc) {
+ pr_err("bus_transaction_read failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ if (!(response & BIF_SLAVE_RD_ACK)) {
+ pr_err("BIF register read error=0x%02X\n",
+ response & BIF_SLAVE_RD_ERR);
+ return -EIO;
+ }
+
+ buf[i] = response & BIF_SLAVE_RD_DATA;
+ addr++;
+ }
+
+ return rc;
+}
+
+/*
+ * Read from a specified number of consecutive registers. Retry the transaction
+ * several times in case of communcation failures.
+ */
+static int _bif_slave_read(struct bif_slave_dev *sdev, u16 addr, u8 *buf,
+ int len)
+{
+ int rc = -EPERM;
+ int i;
+
+ for (i = 0; i < BIF_TRANSACTION_RETRY_COUNT; i++) {
+ rc = _bif_slave_read_no_retry(sdev, addr, buf, len);
+ if (rc == 0)
+ break;
+ /* Force slave reselection. */
+ sdev->bdev->selected_sdev = NULL;
+ }
+
+ return rc;
+}
+
+/* Write to a specified number of consecutive registers. */
+static int _bif_slave_write_no_retry(struct bif_slave_dev *sdev, u16 addr,
+ u8 *buf, int len)
+{
+ struct bif_ctrl_dev *bdev = sdev->bdev;
+ int rc = 0;
+ int i;
+
+ rc = bif_select_slave(sdev);
+ if (rc)
+ return rc;
+
+ if (bdev->desc->ops->write_slave_registers) {
+ /*
+ * Use low level slave register write implementation in order to
+ * receive the benefits of BIF burst writes.
+ */
+ rc = bdev->desc->ops->write_slave_registers(bdev, addr, buf,
+ len);
+ if (rc)
+ pr_err("write_slave_registers failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = bdev->desc->ops->bus_transaction(bdev, BIF_TRANS_ERA, addr >> 8);
+ if (rc)
+ goto out;
+
+ rc = bdev->desc->ops->bus_transaction(bdev, BIF_TRANS_WRA, addr & 0xFF);
+ if (rc)
+ goto out;
+
+ for (i = 0; i < len; i++) {
+ rc = bdev->desc->ops->bus_transaction(bdev, BIF_TRANS_WD,
+ buf[i]);
+ if (rc)
+ goto out;
+ }
+
+ return 0;
+out:
+ pr_err("bus_transaction failed, rc=%d\n", rc);
+ return rc;
+}
+
+/*
+ * Write to a specified number of consecutive registers. Retry the transaction
+ * several times in case of communcation failures.
+ */
+static int _bif_slave_write(struct bif_slave_dev *sdev, u16 addr, u8 *buf,
+ int len)
+{
+ int rc = -EPERM;
+ int i;
+
+ for (i = 0; i < BIF_TRANSACTION_RETRY_COUNT; i++) {
+ rc = _bif_slave_write_no_retry(sdev, addr, buf, len);
+ if (rc == 0)
+ break;
+ /* Force slave reselection. */
+ sdev->bdev->selected_sdev = NULL;
+ }
+
+ return rc;
+}
+
+/* Takes a mutex if this consumer is not an exclusive bus user. */
+static void bif_ctrl_lock(struct bif_ctrl *ctrl)
+{
+ if (!ctrl->exclusive_lock) {
+ mutex_lock(&ctrl->bdev->mutex);
+ bif_cancel_irq_mode_work(ctrl->bdev);
+ }
+}
+
+/* Releases a mutex if this consumer is not an exclusive bus user. */
+static void bif_ctrl_unlock(struct bif_ctrl *ctrl)
+{
+ if (!ctrl->exclusive_lock) {
+ bif_schedule_irq_mode_work(ctrl->bdev);
+ mutex_unlock(&ctrl->bdev->mutex);
+ }
+}
+
+static void bif_slave_ctrl_lock(struct bif_slave *slave)
+{
+ bif_ctrl_lock(&slave->ctrl);
+}
+
+static void bif_slave_ctrl_unlock(struct bif_slave *slave)
+{
+ bif_ctrl_unlock(&slave->ctrl);
+}
+
+static int bif_check_task(struct bif_slave *slave, unsigned int task)
+{
+ if (IS_ERR_OR_NULL(slave)) {
+ pr_err("Invalid slave handle.\n");
+ return -EINVAL;
+ } else if (!slave->sdev->bdev) {
+ pr_err("BIF controller has been removed.\n");
+ return -ENXIO;
+ } else if (!slave->sdev->slave_ctrl_function
+ || slave->sdev->slave_ctrl_function->task_count == 0) {
+ pr_err("BIF slave does not support slave control.\n");
+ return -ENODEV;
+ } else if (task >= slave->sdev->slave_ctrl_function->task_count) {
+ pr_err("Requested task: %u greater than max: %u for this slave\n",
+ task, slave->sdev->slave_ctrl_function->task_count);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * bif_request_irq() - request a BIF slave IRQ by slave task number
+ * @slave: BIF slave handle
+ * @task: BIF task number of the IRQ inside of the slave. This
+ * corresponds to the slave control channel specified for a given
+ * BIF function inside of the slave.
+ * @nb: Notifier block to call when the IRQ fires
+ *
+ * This function registers a notifier block to call when the BIF slave interrupt
+ * is triggered and also enables the interrupt. The interrupt is enabled inside
+ * of the BIF slave's slave control function and also the BIF bus is put into
+ * interrupt mode.
+ *
+ * Returns 0 for success or errno if an error occurred.
+ */
+int bif_request_irq(struct bif_slave *slave, unsigned int task,
+ struct notifier_block *nb)
+{
+ int rc;
+ u16 addr;
+ u8 reg, mask;
+
+ rc = bif_check_task(slave, task);
+ if (rc) {
+ pr_err("Invalid slave or task, rc=%d\n", rc);
+ return rc;
+ }
+
+ bif_slave_ctrl_lock(slave);
+
+ rc = blocking_notifier_chain_register(
+ &slave->sdev->slave_ctrl_function->irq_notifier_list[task], nb);
+ if (rc) {
+ pr_err("Notifier registration failed, rc=%d\n", rc);
+ goto done;
+ }
+
+ /* Enable the interrupt within the slave */
+ mask = BIT(task % SLAVE_CTRL_TASKS_PER_SET);
+ addr = SLAVE_CTRL_FUNC_IRQ_EN_ADDR(
+ slave->sdev->slave_ctrl_function->slave_ctrl_pointer, task);
+ if (task / SLAVE_CTRL_TASKS_PER_SET == 0) {
+ /* Set global interrupt enable. */
+ mask |= BIT(0);
+ }
+ rc = _bif_slave_read(slave->sdev, addr, ®, 1);
+ if (rc) {
+ pr_err("BIF slave register read failed, rc=%d\n", rc);
+ goto notifier_unregister;
+ }
+ reg |= mask;
+ rc = _bif_slave_write(slave->sdev, addr, ®, 1);
+ if (rc) {
+ pr_err("BIF slave register write failed, rc=%d\n", rc);
+ goto notifier_unregister;
+ }
+
+ /* Set global interrupt enable if task not in set 0. */
+ if (task / SLAVE_CTRL_TASKS_PER_SET != 0) {
+ mask = BIT(0);
+ addr = SLAVE_CTRL_FUNC_IRQ_EN_ADDR(
+ slave->sdev->slave_ctrl_function->slave_ctrl_pointer, 0);
+ rc = _bif_slave_read(slave->sdev, addr, ®, 1);
+ if (rc) {
+ pr_err("BIF slave register read failed, rc=%d\n", rc);
+ goto notifier_unregister;
+ }
+ reg |= mask;
+ rc = _bif_slave_write(slave->sdev, addr, ®, 1);
+ if (rc) {
+ pr_err("BIF slave register write failed, rc=%d\n", rc);
+ goto notifier_unregister;
+ }
+ }
+
+ rc = slave->sdev->bdev->desc->ops->set_bus_state(slave->sdev->bdev,
+ BIF_BUS_STATE_INTERRUPT);
+ if (rc) {
+ pr_err("Could not set BIF bus to interrupt mode, rc=%d\n", rc);
+ goto notifier_unregister;
+ }
+
+ slave->sdev->bdev->irq_count++;
+done:
+ bif_slave_ctrl_unlock(slave);
+
+ return rc;
+
+notifier_unregister:
+ blocking_notifier_chain_unregister(
+ &slave->sdev->slave_ctrl_function->irq_notifier_list[task],
+ nb);
+ bif_slave_ctrl_unlock(slave);
+
+ return rc;
+
+}
+EXPORT_SYMBOL(bif_request_irq);
+
+/**
+ * bif_free_irq() - free a BIF slave IRQ by slave task number
+ * @slave: BIF slave handle
+ * @task: BIF task number of the IRQ inside of the slave. This
+ * corresponds to the slave control channel specified for a given
+ * BIF function inside of the slave.
+ * @nb: Notifier block previously registered with this interrupt
+ *
+ * This function unregisters a notifier block that was previously registered
+ * with bif_request_irq().
+ *
+ * Returns 0 for success or errno if an error occurred.
+ */
+int bif_free_irq(struct bif_slave *slave, unsigned int task,
+ struct notifier_block *nb)
+{
+ int rc;
+ u16 addr;
+ u8 reg;
+
+ rc = bif_check_task(slave, task);
+ if (rc) {
+ pr_err("Invalid slave or task, rc=%d\n", rc);
+ return rc;
+ }
+
+ bif_slave_ctrl_lock(slave);
+
+ /* Disable the interrupt within the slave */
+ reg = BIT(task % SLAVE_CTRL_TASKS_PER_SET);
+ addr = SLAVE_CTRL_FUNC_IRQ_CLEAR_ADDR(
+ slave->sdev->slave_ctrl_function->slave_ctrl_pointer, task);
+ rc = _bif_slave_write(slave->sdev, addr, ®, 1);
+ if (rc) {
+ pr_err("BIF slave register write failed, rc=%d\n", rc);
+ goto done;
+ }
+
+ rc = blocking_notifier_chain_unregister(
+ &slave->sdev->slave_ctrl_function->irq_notifier_list[task], nb);
+ if (rc) {
+ pr_err("Notifier unregistration failed, rc=%d\n", rc);
+ goto done;
+ }
+
+ slave->sdev->bdev->irq_count--;
+
+ if (slave->sdev->bdev->irq_count == 0) {
+ bif_cancel_irq_mode_work(slave->sdev->bdev);
+ } else if (slave->sdev->bdev->irq_count < 0) {
+ pr_err("Unbalanced IRQ free.\n");
+ rc = -EINVAL;
+ slave->sdev->bdev->irq_count = 0;
+ }
+done:
+ bif_slave_ctrl_unlock(slave);
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_free_irq);
+
+/**
+ * bif_trigger_task() - trigger a task within a BIF slave
+ * @slave: BIF slave handle
+ * @task: BIF task inside of the slave to trigger. This corresponds to
+ * the slave control channel specified for a given BIF function
+ * inside of the slave.
+ *
+ * Returns 0 for success or errno if an error occurred.
+ */
+int bif_trigger_task(struct bif_slave *slave, unsigned int task)
+{
+ int rc;
+ u16 addr;
+ u8 reg;
+
+ rc = bif_check_task(slave, task);
+ if (rc) {
+ pr_err("Invalid slave or task, rc=%d\n", rc);
+ return rc;
+ }
+
+ bif_slave_ctrl_lock(slave);
+
+ /* Trigger the task within the slave. */
+ reg = BIT(task % SLAVE_CTRL_TASKS_PER_SET);
+ addr = SLAVE_CTRL_FUNC_TASK_TRIGGER_ADDR(
+ slave->sdev->slave_ctrl_function->slave_ctrl_pointer, task);
+ rc = _bif_slave_write(slave->sdev, addr, ®, 1);
+ if (rc) {
+ pr_err("BIF slave register write failed, rc=%d\n", rc);
+ goto done;
+ }
+
+done:
+ bif_slave_ctrl_unlock(slave);
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_trigger_task);
+
+/**
+ * bif_task_is_busy() - checks the state of a BIF slave task
+ * @slave: BIF slave handle
+ * @task: BIF task inside of the slave to trigger. This corresponds to
+ * the slave control channel specified for a given BIF function
+ * inside of the slave.
+ *
+ * Returns 1 if the task is busy, 0 if it is not busy, and errno on error.
+ */
+int bif_task_is_busy(struct bif_slave *slave, unsigned int task)
+{
+ int rc;
+ u16 addr;
+ u8 reg;
+
+ rc = bif_check_task(slave, task);
+ if (rc) {
+ pr_err("Invalid slave or task, rc=%d\n", rc);
+ return rc;
+ }
+
+ bif_slave_ctrl_lock(slave);
+
+ /* Check the task busy state. */
+ addr = SLAVE_CTRL_FUNC_TASK_BUSY_ADDR(
+ slave->sdev->slave_ctrl_function->slave_ctrl_pointer, task);
+ rc = _bif_slave_read(slave->sdev, addr, ®, 1);
+ if (rc) {
+ pr_err("BIF slave register read failed, rc=%d\n", rc);
+ goto done;
+ }
+
+ rc = (reg & BIT(task % SLAVE_CTRL_TASKS_PER_SET)) ? 1 : 0;
+done:
+ bif_slave_ctrl_unlock(slave);
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_task_is_busy);
+
+static int bif_slave_notify_irqs(struct bif_slave_dev *sdev, int set, u8 val)
+{
+ int rc = 0;
+ int i, task;
+
+ for (i = 0; i < SLAVE_CTRL_TASKS_PER_SET; i++) {
+ if (val & (1 << i)) {
+ task = set * SLAVE_CTRL_TASKS_PER_SET + i;
+
+ rc = blocking_notifier_call_chain(
+ &sdev->slave_ctrl_function->irq_notifier_list[task],
+ task, sdev->bdev);
+ rc = notifier_to_errno(rc);
+ if (rc)
+ pr_err("Notification failed for task %d\n",
+ task);
+ }
+ }
+
+ return rc;
+}
+
+static int bif_slave_handle_irq(struct bif_slave_dev *sdev)
+{
+ struct bif_ctrl_dev *bdev = sdev->bdev;
+ bool resp = false;
+ int rc = 0;
+ int i;
+ u16 addr;
+ u8 reg;
+
+ mutex_lock(&sdev->bdev->mutex);
+ bif_cancel_irq_mode_work(sdev->bdev);
+
+ rc = bif_select_slave(sdev);
+ if (rc) {
+ pr_err("Could not select slave, rc=%d\n", rc);
+ goto done;
+ }
+
+ /* Check overall slave interrupt status. */
+ rc = bdev->desc->ops->bus_transaction_query(bdev, BIF_TRANS_BC,
+ BIF_CMD_ISTS, &resp);
+ if (rc) {
+ pr_err("Could not query slave interrupt status, rc=%d\n", rc);
+ goto done;
+ }
+
+ if (resp) {
+ for (i = 0; i < sdev->slave_ctrl_function->task_count
+ / SLAVE_CTRL_TASKS_PER_SET; i++) {
+ addr = sdev->slave_ctrl_function->slave_ctrl_pointer
+ + 4 * i + 1;
+ rc = _bif_slave_read(sdev, addr, ®, 1);
+ if (rc) {
+ pr_err("BIF slave register read failed, rc=%d\n",
+ rc);
+ goto done;
+ }
+
+ /* Ensure that interrupts are pending in the set. */
+ if (reg != 0x00) {
+ /*
+ * Release mutex before notifying consumers so
+ * that they can use the bus.
+ */
+ mutex_unlock(&sdev->bdev->mutex);
+ rc = bif_slave_notify_irqs(sdev, i, reg);
+ if (rc) {
+ pr_err("BIF slave irq notification failed, rc=%d\n",
+ rc);
+ goto notification_failed;
+ }
+ mutex_lock(&sdev->bdev->mutex);
+
+ rc = bif_select_slave(sdev);
+ if (rc) {
+ pr_err("Could not select slave, rc=%d\n",
+ rc);
+ goto done;
+ }
+
+ /* Clear all interrupts in this set. */
+ rc = _bif_slave_write(sdev, addr, ®, 1);
+ if (rc) {
+ pr_err("BIF slave register write failed, rc=%d\n",
+ rc);
+ goto done;
+ }
+ }
+ }
+ }
+
+done:
+ bif_schedule_irq_mode_work(sdev->bdev);
+ mutex_unlock(&sdev->bdev->mutex);
+notification_failed:
+ if (rc == 0)
+ rc = resp;
+ return rc;
+}
+
+/**
+ * bif_ctrl_notify_slave_irq() - notify the BIF framework that a slave interrupt
+ * was received by a BIF controller
+ * @bdev: BIF controller device pointer
+ *
+ * This function should only be called from a BIF controller driver.
+ *
+ * Returns 0 for success or errno if an error occurred.
+ */
+int bif_ctrl_notify_slave_irq(struct bif_ctrl_dev *bdev)
+{
+ struct bif_slave_dev *sdev;
+ int rc = 0, handled = 0;
+
+ if (IS_ERR_OR_NULL(bdev))
+ return -EINVAL;
+
+ mutex_lock(&bif_sdev_list_mutex);
+
+ list_for_each_entry(sdev, &bif_sdev_list, list) {
+ if (sdev->bdev == bdev && sdev->present) {
+ rc = bif_slave_handle_irq(sdev);
+ if (rc < 0) {
+ pr_err("Could not handle BIF slave irq, rc=%d\n",
+ rc);
+ break;
+ }
+ handled += rc;
+ }
+ }
+
+ mutex_unlock(&bif_sdev_list_mutex);
+
+ if (handled == 0)
+ pr_info("Spurious BIF slave interrupt detected.\n");
+
+ if (rc > 0)
+ rc = 0;
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_ctrl_notify_slave_irq);
+
+/**
+ * bif_ctrl_notify_battery_changed() - notify the BIF framework that a battery
+ * pack has been inserted or removed
+ * @bdev: BIF controller device pointer
+ *
+ * This function should only be called from a BIF controller driver.
+ *
+ * Returns 0 for success or errno if an error occurred.
+ */
+int bif_ctrl_notify_battery_changed(struct bif_ctrl_dev *bdev)
+{
+ int rc = 0;
+ int present;
+
+ if (IS_ERR_OR_NULL(bdev))
+ return -EINVAL;
+
+ if (bdev->desc->ops->get_battery_presence) {
+ present = bdev->desc->ops->get_battery_presence(bdev);
+ if (present < 0) {
+ pr_err("Could not determine battery presence, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if (bdev->battery_present == !!present)
+ return 0;
+
+ bdev->battery_present = present;
+
+ rc = blocking_notifier_call_chain(&bdev->bus_change_notifier,
+ present ? BIF_BUS_EVENT_BATTERY_INSERTED
+ : BIF_BUS_EVENT_BATTERY_REMOVED, bdev);
+ if (rc)
+ pr_err("Call chain noification failed, rc=%d\n", rc);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_ctrl_notify_battery_changed);
+
+/**
+ * bif_ctrl_signal_battery_changed() - notify the BIF framework that a battery
+ * pack has been inserted or removed
+ * @ctrl: BIF controller consumer handle
+ *
+ * This function should only be called by a BIF consumer driver on systems where
+ * the BIF controller driver is unable to determine when a battery is inserted
+ * or removed.
+ *
+ * Returns 0 for success or errno if an error occurred.
+ */
+int bif_ctrl_signal_battery_changed(struct bif_ctrl *ctrl)
+{
+ if (IS_ERR_OR_NULL(ctrl))
+ return -EINVAL;
+
+ return bif_ctrl_notify_battery_changed(ctrl->bdev);
+}
+EXPORT_SYMBOL(bif_ctrl_signal_battery_changed);
+
+/**
+ * bif_ctrl_notifier_register() - register a notifier block to be called when
+ * a battery pack is inserted or removed
+ * @ctrl: BIF controller consumer handle
+ *
+ * The value passed into the notifier when it is called is one of
+ * enum bif_bus_event.
+ *
+ * Returns 0 for success or errno if an error occurred.
+ */
+int bif_ctrl_notifier_register(struct bif_ctrl *ctrl, struct notifier_block *nb)
+{
+ int rc;
+
+ if (IS_ERR_OR_NULL(ctrl))
+ return -EINVAL;
+
+ rc = blocking_notifier_chain_register(&ctrl->bdev->bus_change_notifier,
+ nb);
+ if (rc)
+ pr_err("Notifier registration failed, rc=%d\n", rc);
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_ctrl_notifier_register);
+
+/**
+ * bif_ctrl_notifier_unregister() - unregister a battery status change notifier
+ * block that was previously registered
+ * @ctrl: BIF controller consumer handle
+ *
+ * Returns 0 for success or errno if an error occurred.
+ */
+int bif_ctrl_notifier_unregister(struct bif_ctrl *ctrl,
+ struct notifier_block *nb)
+{
+ int rc;
+
+ if (IS_ERR_OR_NULL(ctrl))
+ return -EINVAL;
+
+ rc =
+ blocking_notifier_chain_unregister(&ctrl->bdev->bus_change_notifier,
+ nb);
+ if (rc)
+ pr_err("Notifier unregistration failed, rc=%d\n", rc);
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_ctrl_notifier_unregister);
+
+/**
+ * bif_get_bus_handle() - returns the BIF controller consumer handle associated
+ * with a BIF slave handle
+ * @slave: BIF slave handle
+ *
+ * Note, bif_ctrl_put() should never be called for the pointer output by
+ * bif_get_bus_handle().
+ */
+struct bif_ctrl *bif_get_bus_handle(struct bif_slave *slave)
+{
+ if (IS_ERR_OR_NULL(slave))
+ return ERR_PTR(-EINVAL);
+
+ return &slave->ctrl;
+}
+EXPORT_SYMBOL(bif_get_bus_handle);
+
+/**
+ * bif_ctrl_count() - returns the number of registered BIF controllers
+ */
+int bif_ctrl_count(void)
+{
+ struct bif_ctrl_dev *bdev;
+ int count = 0;
+
+ mutex_lock(&bif_ctrl_list_mutex);
+
+ list_for_each_entry(bdev, &bif_ctrl_list, list) {
+ count++;
+ }
+ mutex_unlock(&bif_ctrl_list_mutex);
+
+ return count;
+}
+EXPORT_SYMBOL(bif_ctrl_count);
+
+/**
+ * bif_ctrl_get_by_id() - get a handle for the id'th BIF controller registered
+ * in the system
+ * @id: Arbitrary number associated with the BIF bus in the system
+ *
+ * id must be in the range [0, bif_ctrl_count() - 1]. This function should only
+ * need to be called by a BIF consumer that is unable to link to a given BIF
+ * controller via a device tree binding.
+ *
+ * Returns a BIF controller consumer handle if successful or an ERR_PTR if not.
+ */
+struct bif_ctrl *bif_ctrl_get_by_id(unsigned int id)
+{
+ struct bif_ctrl_dev *bdev;
+ struct bif_ctrl_dev *bdev_found = NULL;
+ struct bif_ctrl *ctrl = ERR_PTR(-ENODEV);
+
+ mutex_lock(&bif_ctrl_list_mutex);
+
+ list_for_each_entry(bdev, &bif_ctrl_list, list) {
+ if (id == 0) {
+ bdev_found = bdev;
+ break;
+ }
+ id--;
+ }
+ mutex_unlock(&bif_ctrl_list_mutex);
+
+ if (bdev_found) {
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl) {
+ pr_err("Bus handle allocation failed\n");
+ ctrl = ERR_PTR(-ENOMEM);
+ } else {
+ ctrl->bdev = bdev_found;
+ }
+ }
+
+ return ctrl;
+}
+EXPORT_SYMBOL(bif_ctrl_get_by_id);
+
+/**
+ * bif_ctrl_get() - get a handle for the BIF controller that is linked to the
+ * consumer device in the device tree
+ * @consumer_dev: Pointer to the consumer's device
+ *
+ * In order to use this function, the BIF consumer's device must specify the
+ * "qcom,bif-ctrl" property in its device tree node which points to a BIF
+ * controller device node.
+ *
+ * Returns a BIF controller consumer handle if successful or an ERR_PTR if not.
+ * If the BIF controller linked to the consumer device has not yet probed, then
+ * ERR_PTR(-EPROBE_DEFER) is returned.
+ */
+struct bif_ctrl *bif_ctrl_get(struct device *consumer_dev)
+{
+ struct device_node *ctrl_node = NULL;
+ struct bif_ctrl_dev *bdev_found = NULL;
+ struct bif_ctrl *ctrl = ERR_PTR(-EPROBE_DEFER);
+ struct bif_ctrl_dev *bdev = NULL;
+
+ if (!consumer_dev || !consumer_dev->of_node) {
+ pr_err("Invalid device node\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ ctrl_node = of_parse_phandle(consumer_dev->of_node, "qcom,bif-ctrl", 0);
+ if (!ctrl_node) {
+ pr_err("Could not find qcom,bif-ctrl property in %s\n",
+ consumer_dev->of_node->full_name);
+ return ERR_PTR(-ENXIO);
+ }
+
+ mutex_lock(&bif_ctrl_list_mutex);
+ list_for_each_entry(bdev, &bif_ctrl_list, list) {
+ if (bdev->ctrl_dev && bdev->ctrl_dev->of_node == ctrl_node) {
+ bdev_found = bdev;
+ break;
+ }
+ }
+ mutex_unlock(&bif_ctrl_list_mutex);
+
+ if (bdev_found) {
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl) {
+ pr_err("Bus handle allocation failed\n");
+ ctrl = ERR_PTR(-ENOMEM);
+ } else {
+ ctrl->bdev = bdev_found;
+ }
+ }
+
+ return ctrl;
+}
+EXPORT_SYMBOL(bif_ctrl_get);
+
+/**
+ * bif_ctrl_put() - frees a BIF controller handle
+ * @ctrl: BIF controller consumer handle
+ */
+void bif_ctrl_put(struct bif_ctrl *ctrl)
+{
+ if (!IS_ERR_OR_NULL(ctrl) && ctrl->exclusive_lock)
+ mutex_unlock(&ctrl->bdev->mutex);
+ kfree(ctrl);
+}
+EXPORT_SYMBOL(bif_ctrl_put);
+
+/*
+ * Returns true if all parameters are matched, otherwise false.
+ * function_type and function_version mean that their exists some function in
+ * the slave which has the specified type and subtype. ctrl == NULL is treated
+ * as a wildcard.
+ */
+static bool bif_slave_match(const struct bif_ctrl *ctrl,
+ struct bif_slave_dev *sdev, const struct bif_match_criteria *criteria)
+{
+ int i, type, version;
+
+ if (ctrl && (ctrl->bdev != sdev->bdev))
+ return false;
+
+ if (!sdev->present
+ && (!(criteria->match_mask & BIF_MATCH_IGNORE_PRESENCE)
+ || ((criteria->match_mask & BIF_MATCH_IGNORE_PRESENCE)
+ && !criteria->ignore_presence)))
+ return false;
+
+ if ((criteria->match_mask & BIF_MATCH_MANUFACTURER_ID)
+ && sdev->l1_data.manufacturer_id != criteria->manufacturer_id)
+ return false;
+
+ if ((criteria->match_mask & BIF_MATCH_PRODUCT_ID)
+ && sdev->l1_data.product_id != criteria->product_id)
+ return false;
+
+ if (criteria->match_mask & BIF_MATCH_FUNCTION_TYPE) {
+ if (!sdev->function_directory)
+ return false;
+ for (i = 0; i < sdev->l1_data.length / 4; i++) {
+ type = sdev->function_directory[i].function_type;
+ version = sdev->function_directory[i].function_version;
+ if (type == criteria->function_type &&
+ (version == criteria->function_version
+ || !(criteria->match_mask
+ & BIF_MATCH_FUNCTION_VERSION)))
+ return true;
+ }
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * bif_slave_match_count() - returns the number of slaves associated with the
+ * specified BIF controller which fit the matching
+ * criteria
+ * @ctrl: BIF controller consumer handle
+ * @match_criteria: Matching criteria used to filter slaves
+ */
+int bif_slave_match_count(const struct bif_ctrl *ctrl,
+ const struct bif_match_criteria *match_criteria)
+{
+ struct bif_slave_dev *sdev;
+ int count = 0;
+
+ mutex_lock(&bif_sdev_list_mutex);
+
+ list_for_each_entry(sdev, &bif_sdev_list, list) {
+ if (bif_slave_match(ctrl, sdev, match_criteria))
+ count++;
+ }
+
+ mutex_unlock(&bif_sdev_list_mutex);
+
+ return count;
+}
+EXPORT_SYMBOL(bif_slave_match_count);
+
+/**
+ * bif_slave_match_get() - get a slave handle for the id'th slave associated
+ * with the specified BIF controller which fits the
+ * matching criteria
+ * @ctrl: BIF controller consumer handle
+ * @id: Index into the set of matching slaves
+ * @match_criteria: Matching criteria used to filter slaves
+ *
+ * id must be in the range [0, bif_slave_match_count(ctrl, match_criteria) - 1].
+ *
+ * Returns a BIF slave handle if successful or an ERR_PTR if not.
+ */
+struct bif_slave *bif_slave_match_get(const struct bif_ctrl *ctrl,
+ unsigned int id, const struct bif_match_criteria *match_criteria)
+{
+ struct bif_slave_dev *sdev;
+ struct bif_slave *slave = ERR_PTR(-ENODEV);
+ struct bif_slave_dev *sdev_found = NULL;
+ int count = 0;
+
+ mutex_lock(&bif_sdev_list_mutex);
+
+ list_for_each_entry(sdev, &bif_sdev_list, list) {
+ if (bif_slave_match(ctrl, sdev, match_criteria))
+ count++;
+ if (count == id + 1) {
+ sdev_found = sdev;
+ break;
+ }
+ }
+
+ mutex_unlock(&bif_sdev_list_mutex);
+
+ if (sdev_found) {
+ slave = kzalloc(sizeof(*slave), GFP_KERNEL);
+ if (!slave) {
+ pr_err("Slave allocation failed\n");
+ slave = ERR_PTR(-ENOMEM);
+ } else {
+ slave->sdev = sdev_found;
+ slave->ctrl.bdev = sdev_found->bdev;
+ }
+ }
+
+ return slave;
+}
+EXPORT_SYMBOL(bif_slave_match_get);
+
+/**
+ * bif_slave_put() - frees a BIF slave handle
+ * @slave: BIF slave handle
+ */
+void bif_slave_put(struct bif_slave *slave)
+{
+ if (!IS_ERR_OR_NULL(slave) && slave->ctrl.exclusive_lock)
+ mutex_unlock(&slave->sdev->bdev->mutex);
+ kfree(slave);
+}
+EXPORT_SYMBOL(bif_slave_put);
+
+/**
+ * bif_slave_find_function() - get the function pointer and version of a
+ * BIF function if it is present on the specified slave
+ * @slave: BIF slave handle
+ * @function: BIF function to search for inside of the slave
+ * @version: If the function is found, then 'version' is set to the
+ * version value of the function
+ * @function_pointer: If the function is found, then 'function_pointer' is set
+ * to the BIF slave address of the function
+ *
+ * Returns 0 for success or errno if an error occurred. If the function is not
+ * found in the slave, then -ENODEV is returned.
+ */
+int bif_slave_find_function(struct bif_slave *slave, u8 function, u8 *version,
+ u16 *function_pointer)
+{
+ int rc = -ENODEV;
+ struct bif_ddb_l2_data *func;
+ int i;
+
+ if (IS_ERR_OR_NULL(slave) || IS_ERR_OR_NULL(version)
+ || IS_ERR_OR_NULL(function_pointer)) {
+ pr_err("Invalid pointer input.\n");
+ return -EINVAL;
+ }
+
+ func = slave->sdev->function_directory;
+
+ for (i = 0; i < slave->sdev->l1_data.length / 4; i++) {
+ if (function == func[i].function_type) {
+ *version = func[i].function_version;
+ *function_pointer = func[i].function_pointer;
+ rc = 0;
+ break;
+ }
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_slave_find_function);
+
+/**
+ * bif_slave_read() - read contiguous memory values from a BIF slave
+ * @slave: BIF slave handle
+ * @addr: BIF slave address to begin reading at
+ * @buf: Buffer to fill with memory values
+ * @len: Number of byte to read
+ *
+ * Returns 0 for success or errno if an error occurred.
+ */
+int bif_slave_read(struct bif_slave *slave, u16 addr, u8 *buf, int len)
+{
+ int rc;
+
+ if (IS_ERR_OR_NULL(slave) || IS_ERR_OR_NULL(buf)) {
+ pr_err("Invalid pointer input.\n");
+ return -EINVAL;
+ }
+
+ bif_slave_ctrl_lock(slave);
+
+ rc = _bif_slave_read(slave->sdev, addr, buf, len);
+ if (rc)
+ pr_err("BIF slave read failed, rc=%d\n", rc);
+
+ bif_slave_ctrl_unlock(slave);
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_slave_read);
+
+/**
+ * bif_slave_write() - write contiguous memory values to a BIF slave
+ * @slave: BIF slave handle
+ * @addr: BIF slave address to begin writing at
+ * @buf: Buffer containing values to write
+ * @len: Number of byte to write
+ *
+ * Returns 0 for success or errno if an error occurred.
+ */
+int bif_slave_write(struct bif_slave *slave, u16 addr, u8 *buf, int len)
+{
+ int rc;
+
+ if (IS_ERR_OR_NULL(slave) || IS_ERR_OR_NULL(buf)) {
+ pr_err("Invalid pointer input.\n");
+ return -EINVAL;
+ }
+
+ bif_slave_ctrl_lock(slave);
+
+ rc = _bif_slave_write(slave->sdev, addr, buf, len);
+ if (rc)
+ pr_err("BIF slave write failed, rc=%d\n", rc);
+
+ bif_slave_ctrl_unlock(slave);
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_slave_write);
+
+/**
+ * bif_slave_is_present() - check if a slave is currently physically present
+ * in the system
+ * @slave: BIF slave handle
+ *
+ * Returns 1 if the slave is present, 0 if the slave is not present, or errno
+ * if an error occurred.
+ *
+ * This function can be used by BIF consumer drivers to check if their slave
+ * handles are still meaningful after battery reinsertion.
+ */
+int bif_slave_is_present(struct bif_slave *slave)
+{
+ if (IS_ERR_OR_NULL(slave)) {
+ pr_err("Invalid pointer input.\n");
+ return -EINVAL;
+ }
+
+ return slave->sdev->present;
+}
+EXPORT_SYMBOL(bif_slave_is_present);
+
+/**
+ * bif_slave_is_selected() - check if a slave is currently selected on the BIF
+ * bus
+ * @slave: BIF slave handle
+ *
+ * Returns 1 if the slave is selected, 0 if the slave is not selected, or errno
+ * if an error occurred.
+ *
+ * This function should not be required under normal circumstances since the
+ * bif-core framework ensures that slaves are always selected when needed.
+ * It would be most useful when used as a helper in conjunction with
+ * bif_ctrl_bus_lock() and the raw transaction functions.
+ */
+int bif_slave_is_selected(struct bif_slave *slave)
+{
+ int rc;
+
+ if (IS_ERR_OR_NULL(slave)) {
+ pr_err("Invalid pointer input.\n");
+ return -EINVAL;
+ }
+
+ if (slave->sdev->bdev->selected_sdev != slave->sdev)
+ return false;
+
+ bif_slave_ctrl_lock(slave);
+ rc = bif_is_slave_selected(slave->sdev->bdev);
+ bif_slave_ctrl_unlock(slave);
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_slave_is_selected);
+
+/**
+ * bif_slave_select() - select a slave on the BIF bus
+ * @slave: BIF slave handle
+ *
+ * Returns 0 on success or errno if an error occurred.
+ *
+ * This function should not be required under normal circumstances since the
+ * bif-core framework ensures that slaves are always selected when needed.
+ * It would be most useful when used as a helper in conjunction with
+ * bif_ctrl_bus_lock() and the raw transaction functions.
+ */
+int bif_slave_select(struct bif_slave *slave)
+{
+ int rc;
+
+ if (IS_ERR_OR_NULL(slave)) {
+ pr_err("Invalid pointer input.\n");
+ return -EINVAL;
+ }
+
+ bif_slave_ctrl_lock(slave);
+ slave->sdev->bdev->selected_sdev = NULL;
+ rc = bif_select_slave(slave->sdev);
+ bif_slave_ctrl_unlock(slave);
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_slave_select);
+
+/**
+ * bif_ctrl_raw_transaction() - perform a raw BIF transaction on the bus which
+ * expects no slave response
+ * @ctrl: BIF controller consumer handle
+ * @transaction: BIF transaction to carry out. This should be one of the
+ * values in enum bif_transaction.
+ * @data: 8-bit data to use in the transaction. The meaning of
+ * this data depends upon the transaction that is to be
+ * performed.
+ *
+ * When performing a bus command (BC) transaction, values in enum
+ * bif_bus_command may be used for the data parameter. Additional manufacturer
+ * specific values may also be used in a BC transaction.
+ *
+ * Returns 0 on success or errno if an error occurred.
+ *
+ * This function should only need to be used when BIF transactions are required
+ * that are not handled by the bif-core directly.
+ */
+int bif_ctrl_raw_transaction(struct bif_ctrl *ctrl, int transaction, u8 data)
+{
+ int rc;
+
+ if (IS_ERR_OR_NULL(ctrl)) {
+ pr_err("Invalid pointer input.\n");
+ return -EINVAL;
+ }
+
+ bif_ctrl_lock(ctrl);
+
+ rc = ctrl->bdev->desc->ops->bus_transaction(ctrl->bdev, transaction,
+ data);
+ if (rc)
+ pr_err("BIF bus transaction failed, rc=%d\n", rc);
+
+ bif_ctrl_unlock(ctrl);
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_ctrl_raw_transaction);
+
+/**
+ * bif_ctrl_raw_transaction_read() - perform a raw BIF transaction on the bus
+ * which expects an RD or TACK slave response word
+ * @ctrl: BIF controller consumer handle
+ * @transaction: BIF transaction to carry out. This should be one of the
+ * values in enum bif_transaction.
+ * @data: 8-bit data to use in the transaction. The meaning of
+ * this data depends upon the transaction that is to be
+ * performed.
+ * @response: Pointer to an integer which is filled with the 11-bit
+ * slave response word upon success. The 11-bit format is
+ * (MSB to LSB) BCF, ACK, EOT, D7-D0.
+ *
+ * When performing a bus command (BC) transaction, values in enum
+ * bif_bus_command may be used for the data parameter. Additional manufacturer
+ * specific values may also be used in a BC transaction.
+ *
+ * Returns 0 on success or errno if an error occurred.
+ *
+ * This function should only need to be used when BIF transactions are required
+ * that are not handled by the bif-core directly.
+ */
+int bif_ctrl_raw_transaction_read(struct bif_ctrl *ctrl, int transaction,
+ u8 data, int *response)
+{
+ int rc;
+
+ if (IS_ERR_OR_NULL(ctrl) || IS_ERR_OR_NULL(response)) {
+ pr_err("Invalid pointer input.\n");
+ return -EINVAL;
+ }
+
+ bif_ctrl_lock(ctrl);
+
+ rc = ctrl->bdev->desc->ops->bus_transaction_read(ctrl->bdev,
+ transaction, data, response);
+ if (rc)
+ pr_err("BIF bus transaction failed, rc=%d\n", rc);
+
+ bif_ctrl_unlock(ctrl);
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_ctrl_raw_transaction_read);
+
+/**
+ * bif_ctrl_raw_transaction_query() - perform a raw BIF transaction on the bus
+ * which expects a BQ slave response
+ * @ctrl: BIF controller consumer handle
+ * @transaction: BIF transaction to carry out. This should be one of the
+ * values in enum bif_transaction.
+ * @data: 8-bit data to use in the transaction. The meaning of
+ * this data depends upon the transaction that is to be
+ * performed.
+ * @query_response: Pointer to boolean which is set to true if a BQ pulse
+ * is receieved, or false if no BQ pulse is received before
+ * timing out.
+ *
+ * When performing a bus command (BC) transaction, values in enum
+ * bif_bus_command may be used for the data parameter. Additional manufacturer
+ * specific values may also be used in a BC transaction.
+ *
+ * Returns 0 on success or errno if an error occurred.
+ *
+ * This function should only need to be used when BIF transactions are required
+ * that are not handled by the bif-core directly.
+ */
+int bif_ctrl_raw_transaction_query(struct bif_ctrl *ctrl, int transaction,
+ u8 data, bool *query_response)
+{
+ int rc;
+
+ if (IS_ERR_OR_NULL(ctrl) || IS_ERR_OR_NULL(query_response)) {
+ pr_err("Invalid pointer input.\n");
+ return -EINVAL;
+ }
+
+ bif_ctrl_lock(ctrl);
+
+ rc = ctrl->bdev->desc->ops->bus_transaction_query(ctrl->bdev,
+ transaction, data, query_response);
+ if (rc)
+ pr_err("BIF bus transaction failed, rc=%d\n", rc);
+
+ bif_ctrl_unlock(ctrl);
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_ctrl_raw_transaction_query);
+
+/**
+ * bif_ctrl_bus_lock() - lock the BIF bus of a controller for exclusive access
+ * @ctrl: BIF controller consumer handle
+ *
+ * This function should only need to be called in circumstances where a BIF
+ * consumer is issuing special BIF bus commands that have strict ordering
+ * requirements.
+ */
+void bif_ctrl_bus_lock(struct bif_ctrl *ctrl)
+{
+ if (IS_ERR_OR_NULL(ctrl)) {
+ pr_err("Invalid controller handle.\n");
+ return;
+ }
+
+ if (ctrl->exclusive_lock) {
+ pr_err("BIF bus exclusive lock already held\n");
+ return;
+ }
+
+ mutex_lock(&ctrl->bdev->mutex);
+ ctrl->exclusive_lock = true;
+ bif_cancel_irq_mode_work(ctrl->bdev);
+}
+EXPORT_SYMBOL(bif_ctrl_bus_lock);
+
+/**
+ * bif_ctrl_bus_unlock() - lock the BIF bus of a controller that was previously
+ * locked for exclusive access
+ * @ctrl: BIF controller consumer handle
+ *
+ * This function must only be called after first calling bif_ctrl_bus_lock().
+ */
+void bif_ctrl_bus_unlock(struct bif_ctrl *ctrl)
+{
+ if (IS_ERR_OR_NULL(ctrl)) {
+ pr_err("Invalid controller handle.\n");
+ return;
+ }
+
+ if (!ctrl->exclusive_lock) {
+ pr_err("BIF bus exclusive lock not already held\n");
+ return;
+ }
+
+ ctrl->exclusive_lock = false;
+ bif_schedule_irq_mode_work(ctrl->bdev);
+ mutex_unlock(&ctrl->bdev->mutex);
+}
+EXPORT_SYMBOL(bif_ctrl_bus_unlock);
+
+/**
+ * bif_ctrl_measure_rid() - measure the battery pack Rid pull-down resistance
+ * in ohms
+ * @ctrl: BIF controller consumer handle
+ *
+ * Returns the resistance of the Rid resistor in ohms if successful or errno
+ * if an error occurred.
+ */
+int bif_ctrl_measure_rid(struct bif_ctrl *ctrl)
+{
+ int rc;
+
+ if (IS_ERR_OR_NULL(ctrl)) {
+ pr_err("Invalid controller handle.\n");
+ return -ENODEV;
+ }
+
+ if (!ctrl->bdev->desc->ops->get_battery_rid) {
+ pr_err("Cannot measure Rid.\n");
+ return -ENXIO;
+ }
+
+ bif_ctrl_lock(ctrl);
+
+ rc = ctrl->bdev->desc->ops->get_battery_rid(ctrl->bdev);
+ if (rc < 0)
+ pr_err("Error during Rid measurement, rc=%d\n", rc);
+
+ bif_ctrl_unlock(ctrl);
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_ctrl_measure_rid);
+
+/**
+ * bif_ctrl_get_bus_period() - get the BIF bus period (tau_bif) in nanoseconds
+ * @ctrl: BIF controller consumer handle
+ *
+ * Returns the currently configured bus period in nanoseconds if successful or
+ * errno if an error occurred.
+ */
+int bif_ctrl_get_bus_period(struct bif_ctrl *ctrl)
+{
+ int rc;
+
+ if (IS_ERR_OR_NULL(ctrl)) {
+ pr_err("Invalid controller handle.\n");
+ return -ENODEV;
+ }
+
+ if (!ctrl->bdev->desc->ops->get_bus_period) {
+ pr_err("Cannot get the BIF bus period.\n");
+ return -ENXIO;
+ }
+
+ rc = ctrl->bdev->desc->ops->get_bus_period(ctrl->bdev);
+ if (rc < 0)
+ pr_err("Error during bus period retrieval, rc=%d\n", rc);
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_ctrl_get_bus_period);
+
+/**
+ * bif_ctrl_set_bus_period() - set the BIF bus period (tau_bif) in nanoseconds
+ * @ctrl: BIF controller consumer handle
+ * @period_ns: BIF bus period in nanoseconds to use
+ *
+ * If the exact period is not supported by the BIF controller hardware, then the
+ * next larger supported period will be used.
+ *
+ * Returns 0 on success or errno if an error occurred.
+ */
+int bif_ctrl_set_bus_period(struct bif_ctrl *ctrl, int period_ns)
+{
+ int rc;
+
+ if (IS_ERR_OR_NULL(ctrl)) {
+ pr_err("Invalid controller handle.\n");
+ return -ENODEV;
+ }
+
+ if (!ctrl->bdev->desc->ops->set_bus_period) {
+ pr_err("Cannot set the BIF bus period.\n");
+ return -ENXIO;
+ }
+
+ bif_ctrl_lock(ctrl);
+ rc = ctrl->bdev->desc->ops->set_bus_period(ctrl->bdev, period_ns);
+ if (rc)
+ pr_err("Error during bus period configuration, rc=%d\n", rc);
+ bif_ctrl_unlock(ctrl);
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_ctrl_set_bus_period);
+
+/**
+ * bif_ctrl_get_bus_state() - get the current state of the BIF bus
+ * @ctrl: BIF controller consumer handle
+ *
+ * Returns a bus state from enum bif_bus_state if successful or errno if an
+ * error occurred.
+ */
+int bif_ctrl_get_bus_state(struct bif_ctrl *ctrl)
+{
+ int rc;
+
+ if (IS_ERR_OR_NULL(ctrl)) {
+ pr_err("Invalid controller handle.\n");
+ return -ENODEV;
+ }
+
+ rc = ctrl->bdev->desc->ops->get_bus_state(ctrl->bdev);
+ if (rc < 0)
+ pr_err("Error during bus state retrieval, rc=%d\n", rc);
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_ctrl_get_bus_state);
+
+/**
+ * bif_ctrl_set_bus_state() - set the state of the BIF bus
+ * @ctrl: BIF controller consumer handle
+ * @state: State for the BIF bus to enter
+ *
+ * Returns 0 on success or errno if an error occurred.
+ */
+int bif_ctrl_set_bus_state(struct bif_ctrl *ctrl, enum bif_bus_state state)
+{
+ int rc;
+
+ if (IS_ERR_OR_NULL(ctrl)) {
+ pr_err("Invalid controller handle.\n");
+ return -ENODEV;
+ }
+
+ bif_ctrl_lock(ctrl);
+
+ rc = ctrl->bdev->desc->ops->set_bus_state(ctrl->bdev, state);
+ if (rc < 0)
+ pr_err("Error during bus state configuration, rc=%d\n", rc);
+
+ /*
+ * Uncache the selected slave if the new bus state results in the slave
+ * becoming unselected.
+ */
+ if (state == BIF_BUS_STATE_MASTER_DISABLED
+ || state == BIF_BUS_STATE_POWER_DOWN
+ || state == BIF_BUS_STATE_STANDBY)
+ ctrl->bdev->selected_sdev = NULL;
+
+ bif_ctrl_unlock(ctrl);
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_ctrl_set_bus_state);
+
+/*
+ * Check if the specified function is a protocol function and if it is, then
+ * instantiate protocol function data for the slave.
+ */
+static int bif_initialize_protocol_function(struct bif_slave_dev *sdev,
+ struct bif_ddb_l2_data *func)
+{
+ int rc = 0;
+ u8 buf[4];
+
+ /* Ensure that this is a protocol function. */
+ if (func->function_type != BIF_FUNC_PROTOCOL)
+ return 0;
+
+ if (sdev->protocol_function) {
+ pr_err("Duplicate protocol function found for BIF slave; DEV_ADR=0x%02X\n",
+ sdev->slave_addr);
+ return -EPERM;
+ }
+
+ sdev->protocol_function = kzalloc(sizeof(struct bif_protocol_function),
+ GFP_KERNEL);
+ if (!sdev->protocol_function) {
+ pr_err("out of memory\n");
+ return -ENOMEM;
+ }
+
+ rc = _bif_slave_read(sdev, func->function_pointer, buf, 4);
+ if (rc) {
+ pr_err("Protocol function data read failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ sdev->protocol_function->protocol_pointer = buf[0] << 8 | buf[1];
+ sdev->protocol_function->device_id_pointer = buf[2] << 8 | buf[3];
+ sdev->protocol_function->l2_entry = func;
+
+ rc = _bif_slave_read(sdev, sdev->protocol_function->device_id_pointer,
+ sdev->protocol_function->device_id, BIF_DEVICE_ID_BYTE_LENGTH);
+ if (rc) {
+ pr_err("Device ID read failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Check if this slave does not have a UID value stored. */
+ if (sdev->unique_id_bits_known == 0) {
+ sdev->unique_id_bits_known = BIF_UNIQUE_ID_BIT_LENGTH;
+ /* Fill in UID using manufacturer ID and device ID. */
+ sdev->unique_id[0] = sdev->l1_data.manufacturer_id >> 8;
+ sdev->unique_id[1] = sdev->l1_data.manufacturer_id;
+ memcpy(&sdev->unique_id[2],
+ sdev->protocol_function->device_id,
+ BIF_DEVICE_ID_BYTE_LENGTH);
+ }
+
+ return rc;
+}
+
+/*
+ * Check if the specified function is a slave control function and if it is,
+ * then instantiate slave control function data for the slave.
+ */
+static int bif_initialize_slave_control_function(struct bif_slave_dev *sdev,
+ struct bif_ddb_l2_data *func)
+{
+ int rc = 0;
+ int i;
+ u8 buf[3];
+
+ /* Ensure that this is a slave control function. */
+ if (func->function_type != BIF_FUNC_SLAVE_CONTROL)
+ return 0;
+
+ if (sdev->slave_ctrl_function) {
+ pr_err("Duplicate slave control function found for BIF slave; DEV_ADR=0x%02X\n",
+ sdev->slave_addr);
+ return -EPERM;
+ }
+
+ sdev->slave_ctrl_function
+ = kzalloc(sizeof(struct bif_protocol_function), GFP_KERNEL);
+ if (!sdev->slave_ctrl_function) {
+ pr_err("out of memory\n");
+ return -ENOMEM;
+ }
+
+ rc = _bif_slave_read(sdev, func->function_pointer, buf, 3);
+ if (rc) {
+ pr_err("Slave control function data read failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ sdev->slave_ctrl_function->slave_ctrl_pointer = buf[0] << 8 | buf[1];
+ sdev->slave_ctrl_function->task_count
+ = buf[2] * SLAVE_CTRL_TASKS_PER_SET;
+ sdev->slave_ctrl_function->l2_entry = func;
+
+ if (sdev->slave_ctrl_function->task_count > 0) {
+ sdev->slave_ctrl_function->irq_notifier_list =
+ kzalloc(sizeof(struct blocking_notifier_head)
+ * sdev->slave_ctrl_function->task_count,
+ GFP_KERNEL);
+ if (!sdev->slave_ctrl_function->irq_notifier_list) {
+ pr_err("out of memory\n");
+ kfree(sdev->slave_ctrl_function);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < sdev->slave_ctrl_function->task_count; i++) {
+ BLOCKING_INIT_NOTIFIER_HEAD(
+ &sdev->slave_ctrl_function->irq_notifier_list[i]);
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * bif_crc_ccitt() - calculate the CRC-CCITT CRC value of the data specified
+ * @buffer: Data to calculate the CRC of
+ * @len: Length of the data buffer in bytes
+ *
+ * MIPI-BIF specifies the usage of CRC-CCITT for BIF data objects. This
+ * function performs the CRC calculation while taking into account the bit
+ * ordering used by BIF.
+ */
+u16 bif_crc_ccitt(const u8 *buffer, unsigned int len)
+{
+ u16 crc = 0xFFFF;
+
+ while (len--) {
+ crc = crc_ccitt_byte(crc, bitrev8(*buffer));
+ buffer++;
+ }
+ return bitrev16(crc);
+}
+EXPORT_SYMBOL(bif_crc_ccitt);
+
+static u16 bif_object_crc_ccitt(const struct bif_object *object)
+{
+ u16 crc = 0xFFFF;
+ int i;
+
+ crc = crc_ccitt_byte(crc, bitrev8(object->type));
+ crc = crc_ccitt_byte(crc, bitrev8(object->version));
+ crc = crc_ccitt_byte(crc, bitrev8(object->manufacturer_id >> 8));
+ crc = crc_ccitt_byte(crc, bitrev8(object->manufacturer_id));
+ crc = crc_ccitt_byte(crc, bitrev8(object->length >> 8));
+ crc = crc_ccitt_byte(crc, bitrev8(object->length));
+
+ for (i = 0; i < object->length - 8; i++)
+ crc = crc_ccitt_byte(crc, bitrev8(object->data[i]));
+
+ return bitrev16(crc);
+}
+
+/*
+ * Check if the specified function is an NVM function and if it is, then
+ * instantiate NVM function data for the slave and read all objects.
+ */
+static int bif_initialize_nvm_function(struct bif_slave_dev *sdev,
+ struct bif_ddb_l2_data *func)
+{
+ int rc = 0;
+ int data_len;
+ u8 buf[8], object_type;
+ struct bif_object *object;
+ struct bif_object *temp;
+ u16 addr;
+ u16 crc;
+
+ /* Ensure that this is an NVM function. */
+ if (func->function_type != BIF_FUNC_NVM)
+ return 0;
+
+ if (sdev->nvm_function) {
+ pr_err("Duplicate NVM function found for BIF slave; DEV_ADR=0x%02X\n",
+ sdev->slave_addr);
+ return -EPERM;
+ }
+
+ sdev->nvm_function
+ = kzalloc(sizeof(*sdev->nvm_function), GFP_KERNEL);
+ if (!sdev->nvm_function) {
+ pr_err("out of memory\n");
+ return -ENOMEM;
+ }
+
+ rc = _bif_slave_read(sdev, func->function_pointer, buf, 8);
+ if (rc) {
+ pr_err("NVM function data read failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ sdev->nvm_function->nvm_pointer = buf[0] << 8 | buf[1];
+ sdev->nvm_function->slave_control_channel = buf[2];
+ sdev->nvm_function->write_buffer_size = buf[3];
+ sdev->nvm_function->nvm_base_address = buf[4] << 8 | buf[5];
+ sdev->nvm_function->nvm_size = buf[6] << 8 | buf[7];
+
+ INIT_LIST_HEAD(&sdev->nvm_function->object_list);
+
+ /* Read object list */
+ addr = sdev->nvm_function->nvm_base_address;
+ rc = _bif_slave_read(sdev, addr, &object_type, 1);
+ if (rc) {
+ pr_err("Slave memory read failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Object type == 0x00 corresponds to the end of the object list. */
+ while (object_type != 0x00) {
+ object = kzalloc(sizeof(*object), GFP_KERNEL);
+ if (!object) {
+ pr_err("out of memory\n");
+ rc = -ENOMEM;
+ goto free_data;
+ }
+ list_add_tail(&object->list, &sdev->nvm_function->object_list);
+
+ rc = _bif_slave_read(sdev, addr + 1, buf + 1, 5);
+ if (rc) {
+ pr_err("Slave memory read of object header failed; addr=0x%04X, len=%d, rc=%d\n",
+ addr + 1, 5, rc);
+ goto free_data;
+ }
+
+ object->addr = addr;
+ object->type = object_type;
+ object->version = buf[1];
+ object->manufacturer_id = buf[2] << 8 | buf[3];
+ object->length = buf[4] << 8 | buf[5];
+
+ if ((object->addr + object->length)
+ > (sdev->nvm_function->nvm_base_address
+ + sdev->nvm_function->nvm_size)) {
+ pr_warn("warning: BIF slave object is not formatted correctly; NVM base=0x%04X, NVM len=%d, object addr=0x%04X, object len=%d\n",
+ sdev->nvm_function->nvm_base_address,
+ sdev->nvm_function->nvm_size,
+ object->addr,
+ object->length);
+ /* Limit object size to remaining NVM size. */
+ object->length = sdev->nvm_function->nvm_size
+ + sdev->nvm_function->nvm_base_address
+ - object->addr;
+ }
+
+ /* Object header + CRC takes up 8 bytes. */
+ data_len = object->length - 8;
+ object->data = kmalloc(data_len, GFP_KERNEL);
+ if (!object->data) {
+ pr_err("out of memory\n");
+ rc = -ENOMEM;
+ goto free_data;
+ }
+
+ rc = _bif_slave_read(sdev, addr + 6, object->data, data_len);
+ if (rc) {
+ pr_err("Slave memory read of object data failed; addr=0x%04X, len=%d, rc=%d\n",
+ addr + 6, data_len, rc);
+ goto free_data;
+ }
+
+ rc = _bif_slave_read(sdev, addr + 6 + data_len, buf, 3);
+ if (rc) {
+ pr_err("Slave memory read of object CRC failed; addr=0x%04X, len=%d, rc=%d\n",
+ addr + 6 + data_len, 3, rc);
+ goto free_data;
+ }
+
+ object->crc = buf[0] << 8 | buf[1];
+ object_type = buf[2];
+ sdev->nvm_function->object_count++;
+
+ crc = bif_object_crc_ccitt(object);
+ if (crc != object->crc)
+ pr_info("BIF object at addr=0x%04X has invalid CRC; crc calc=0x%04X, crc exp=0x%04X\n",
+ object->addr, crc, object->crc);
+
+ addr += object->length;
+ }
+
+ return rc;
+
+free_data:
+ list_for_each_entry_safe(object, temp,
+ &sdev->nvm_function->object_list, list) {
+ list_del(&object->list);
+ kfree(object->data);
+ kfree(object);
+ }
+ kfree(sdev->nvm_function);
+ sdev->nvm_function = NULL;
+ return rc;
+}
+
+static int bif_parse_slave_data(struct bif_slave_dev *sdev)
+{
+ int rc = 0;
+ u8 buf[10];
+ u8 *func_buf;
+ struct bif_ddb_l2_data *func;
+ int function_count, i;
+
+ rc = _bif_slave_read(sdev, BIF_DDB_L1_BASE_ADDR, buf, 10);
+ if (rc) {
+ pr_err("DDB L1 data read failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ sdev->l1_data.revision = buf[0];
+ sdev->l1_data.level = buf[1];
+ sdev->l1_data.device_class = buf[2] << 8 | buf[3];
+ sdev->l1_data.manufacturer_id = buf[4] << 8 | buf[5];
+ sdev->l1_data.product_id = buf[6] << 8 | buf[7];
+ sdev->l1_data.length = buf[8] << 8 | buf[9];
+
+ function_count = sdev->l1_data.length / 4;
+ if (sdev->l1_data.length % 4) {
+ pr_err("Function directory length=%d is invalid\n",
+ sdev->l1_data.length);
+ return -EPROTO;
+ }
+
+ /* No DDB L2 function directory */
+ if (function_count == 0)
+ return 0;
+
+ func_buf = kmalloc(sdev->l1_data.length, GFP_KERNEL);
+ if (!func_buf) {
+ pr_err("out of memory\n");
+ return -ENOMEM;
+ }
+
+ sdev->function_directory = kzalloc(
+ function_count * sizeof(struct bif_ddb_l2_data), GFP_KERNEL);
+ if (!sdev->function_directory) {
+ pr_err("out of memory\n");
+ return -ENOMEM;
+ }
+
+ rc = _bif_slave_read(sdev, BIF_DDB_L2_BASE_ADDR, func_buf,
+ sdev->l1_data.length);
+ if (rc) {
+ pr_err("DDB L2 data read failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ for (i = 0; i < function_count; i++) {
+ func = &sdev->function_directory[i];
+ func->function_type = func_buf[i * 4];
+ func->function_version = func_buf[i * 4 + 1];
+ func->function_pointer = func_buf[i * 4 + 2] << 8
+ | func_buf[i * 4 + 3];
+ rc = bif_initialize_protocol_function(sdev, func);
+ if (rc)
+ goto done;
+ rc = bif_initialize_slave_control_function(sdev, func);
+ if (rc)
+ goto done;
+ rc = bif_initialize_nvm_function(sdev, func);
+ if (rc)
+ goto done;
+ }
+done:
+ kfree(func_buf);
+ return rc;
+}
+
+static int bif_add_secondary_slaves(struct bif_slave_dev *primary_slave)
+{
+ int rc = 0;
+ int data_len, i;
+ u16 crc;
+ struct bif_slave_dev *sdev;
+ struct bif_object *object;
+
+ list_for_each_entry(object, &primary_slave->nvm_function->object_list,
+ list) {
+ if (object->type != BIF_OBJ_SEC_SLAVE)
+ continue;
+
+ data_len = object->length - 8;
+ if (data_len % BIF_UNIQUE_ID_BYTE_LENGTH) {
+ pr_info("Invalid secondary slave object found, addr=0x%04X, data len=%d\n",
+ object->addr, data_len);
+ continue;
+ }
+
+ crc = bif_object_crc_ccitt(object);
+ if (crc != object->crc) {
+ pr_info("BIF object at addr=0x%04X has invalid CRC; crc calc=0x%04X, crc exp=0x%04X\n",
+ object->addr, crc, object->crc);
+ continue;
+ }
+
+ for (i = 0; i < data_len / BIF_UNIQUE_ID_BYTE_LENGTH; i++) {
+ sdev = bif_add_slave(primary_slave->bdev);
+ if (IS_ERR(sdev)) {
+ rc = PTR_ERR(sdev);
+ pr_err("bif_add_slave failed, rc=%d\n", rc);
+ return rc;
+ }
+ memcpy(sdev->unique_id,
+ &object->data[i * BIF_UNIQUE_ID_BYTE_LENGTH],
+ BIF_UNIQUE_ID_BYTE_LENGTH);
+ sdev->unique_id_bits_known = BIF_UNIQUE_ID_BIT_LENGTH;
+
+ rc = bif_select_slave(sdev);
+ if (rc) {
+ pr_err("Could not select slave, rc=%d\n", rc);
+ goto free_slave;
+ }
+
+ rc = bif_is_slave_selected(sdev->bdev);
+ if (rc < 0) {
+ pr_err("Transaction failed, rc=%d\n", rc);
+ goto free_slave;
+ } else if (rc == 1) {
+ sdev->present = true;
+ sdev->bdev->selected_sdev = sdev;
+ } else {
+ sdev->present = false;
+ sdev->bdev->selected_sdev = NULL;
+ }
+ }
+ }
+
+ return rc;
+
+free_slave:
+ bif_remove_slave(sdev);
+ return rc;
+}
+
+/*
+ * Performs UID search to identify all slaves attached to the bus. Assumes that
+ * all necessary locks are held.
+ */
+static int bif_perform_uid_search(struct bif_ctrl_dev *bdev)
+{
+ struct bif_slave_dev *sdev;
+ struct bif_slave_dev *new_slave;
+ bool resp[2], resp_dilc;
+ int i;
+ int rc = 0;
+ u8 cmd_probe[2] = {BIF_CMD_DIP0, BIF_CMD_DIP1};
+ u8 cmd_enter[2] = {BIF_CMD_DIE0, BIF_CMD_DIE1};
+
+ /*
+ * Iterate over all partially known UIDs adding new ones as they are
+ * found.
+ */
+ list_for_each_entry(sdev, &bif_sdev_list, list) {
+ /* Skip slaves with fully known UIDs. */
+ if (sdev->unique_id_bits_known == BIF_UNIQUE_ID_BIT_LENGTH
+ || sdev->bdev != bdev)
+ continue;
+
+ /* Begin a new UID search. */
+ rc = bdev->desc->ops->bus_transaction(bdev, BIF_TRANS_BC,
+ BIF_CMD_DISS);
+ if (rc) {
+ pr_err("bus_transaction failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Step through all known UID bits (MSB to LSB). */
+ for (i = 0; i < sdev->unique_id_bits_known; i++) {
+ rc = bdev->desc->ops->bus_transaction(bdev,
+ BIF_TRANS_BC,
+ cmd_enter[get_uid_bit(sdev->unique_id, i)]);
+ if (rc) {
+ pr_err("bus_transaction failed, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ /* Step through unknown UID bits. */
+ for (i = sdev->unique_id_bits_known;
+ i < BIF_UNIQUE_ID_BIT_LENGTH; i++) {
+ rc = bdev->desc->ops->bus_transaction_query(bdev,
+ BIF_TRANS_BC, cmd_probe[0], &resp[0]);
+ if (rc) {
+ pr_err("bus_transaction failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = bdev->desc->ops->bus_transaction_query(bdev,
+ BIF_TRANS_BC, cmd_probe[1], &resp[1]);
+ if (rc) {
+ pr_err("bus_transaction failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ if (resp[0] && resp[1]) {
+ /* Create an entry for the new UID branch. */
+ new_slave = bif_add_slave(bdev);
+ if (IS_ERR(new_slave)) {
+ rc = PTR_ERR(sdev);
+ pr_err("bif_add_slave failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+ memcpy(new_slave->unique_id, sdev->unique_id,
+ BIF_UNIQUE_ID_BYTE_LENGTH);
+ new_slave->bdev = sdev->bdev;
+
+ set_uid_bit(sdev->unique_id, i, 0);
+ sdev->unique_id_bits_known = i + 1;
+
+ set_uid_bit(new_slave->unique_id, i, 1);
+ new_slave->unique_id_bits_known = i + 1;
+ } else if (resp[0]) {
+ set_uid_bit(sdev->unique_id, i, 0);
+ sdev->unique_id_bits_known = i + 1;
+ } else if (resp[1]) {
+ set_uid_bit(sdev->unique_id, i, 1);
+ sdev->unique_id_bits_known = i + 1;
+ } else {
+ pr_debug("no bus query response received\n");
+ rc = -ENXIO;
+ return rc;
+ }
+
+ rc = bdev->desc->ops->bus_transaction(bdev,
+ BIF_TRANS_BC, cmd_enter[resp[0] ? 0 : 1]);
+ if (rc) {
+ pr_err("bus_transaction failed, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ rc = bdev->desc->ops->bus_transaction_query(bdev,
+ BIF_TRANS_BC, BIF_CMD_DILC, &resp_dilc);
+ if (rc) {
+ pr_err("bus_transaction failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ if (resp_dilc) {
+ sdev->present = true;
+ sdev->bdev->selected_sdev = sdev;
+ rc = bif_parse_slave_data(sdev);
+ } else {
+ pr_err("Slave failed to respond to DILC bus command; its UID is thus unverified.\n");
+ sdev->unique_id_bits_known = 0;
+ rc = -ENXIO;
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+/*
+ * Removes slaves from the bif_sdev_list which have the same UID as previous
+ * slaves in the list.
+ */
+static int bif_remove_duplicate_slaves(struct bif_ctrl_dev *bdev)
+{
+ struct bif_slave_dev *sdev;
+ struct bif_slave_dev *last_slave;
+ struct bif_slave_dev *temp;
+
+ list_for_each_entry_safe(last_slave, temp, &bif_sdev_list, list) {
+ list_for_each_entry(sdev, &bif_sdev_list, list) {
+ if (last_slave == sdev) {
+ break;
+ } else if (memcmp(last_slave->unique_id,
+ sdev->unique_id,
+ BIF_UNIQUE_ID_BYTE_LENGTH) == 0) {
+ bif_remove_slave(last_slave);
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int bif_add_all_slaves(struct bif_ctrl_dev *bdev)
+{
+ struct bif_slave_dev *sdev;
+ int rc = 0;
+ int i;
+ bool has_slave = false, is_primary_slave = false;
+
+ mutex_lock(&bif_sdev_list_mutex);
+ mutex_lock(&bdev->mutex);
+
+ list_for_each_entry(sdev, &bif_sdev_list, list) {
+ if (sdev->bdev == bdev) {
+ has_slave = true;
+ break;
+ }
+ }
+
+ if (!has_slave) {
+ /* Create a single empty slave to start the search algorithm. */
+ sdev = bif_add_slave(bdev);
+ if (IS_ERR(sdev)) {
+ rc = PTR_ERR(sdev);
+ pr_err("bif_add_slave failed, rc=%d\n", rc);
+ goto out;
+ }
+
+ for (i = 0; i < BIF_TRANSACTION_RETRY_COUNT; i++) {
+ /* Attempt to select primary slave in battery pack. */
+ rc = bdev->desc->ops->bus_transaction(bdev,
+ BIF_TRANS_SDA, BIF_PRIMARY_SLAVE_DEV_ADR);
+ if (rc == 0)
+ break;
+ }
+ if (rc) {
+ pr_err("BIF bus_transaction failed, rc=%d\n", rc);
+ goto out;
+ }
+
+ /* Check if a slave is selected. */
+ rc = bif_is_slave_selected(bdev);
+ if (rc < 0) {
+ pr_err("BIF bus_transaction failed, rc=%d\n", rc);
+ goto out;
+ } else {
+ is_primary_slave = rc;
+ }
+ }
+
+ if (is_primary_slave) {
+ pr_debug("Using primary slave at DEV_ADR==0x%02X\n",
+ BIF_PRIMARY_SLAVE_DEV_ADR);
+ sdev->bdev->selected_sdev = sdev;
+ sdev->present = true;
+ sdev->slave_addr = BIF_PRIMARY_SLAVE_DEV_ADR;
+ rc = bif_parse_slave_data(sdev);
+ if (rc) {
+ pr_err("Failed to parse primary slave data, rc=%d\n",
+ rc);
+ goto out;
+ }
+ rc = bif_add_secondary_slaves(sdev);
+ if (rc) {
+ pr_err("Failed to add secondary slaves, rc=%d\n", rc);
+ goto out;
+ }
+ } else {
+ pr_debug("Falling back on full UID search.\n");
+ for (i = 0; i < BIF_TRANSACTION_RETRY_COUNT; i++) {
+ rc = bif_perform_uid_search(bdev);
+ if (rc == 0)
+ break;
+ }
+ if (rc) {
+ pr_debug("BIF UID search failed, rc=%d\n", rc);
+ goto out;
+ }
+ }
+
+ bif_remove_duplicate_slaves(bdev);
+
+ mutex_unlock(&bdev->mutex);
+ mutex_unlock(&bif_sdev_list_mutex);
+
+ return rc;
+
+out:
+ mutex_unlock(&bdev->mutex);
+ mutex_unlock(&bif_sdev_list_mutex);
+ pr_debug("BIF slave search failed, rc=%d\n", rc);
+ return rc;
+}
+
+static int bif_add_known_slave(struct bif_ctrl_dev *bdev, u8 slave_addr)
+{
+ struct bif_slave_dev *sdev;
+ int rc = 0;
+ int i;
+
+ for (i = 0; i < BIF_TRANSACTION_RETRY_COUNT; i++) {
+ /* Attempt to select the slave. */
+ rc = bdev->desc->ops->bus_transaction(bdev, BIF_TRANS_SDA,
+ slave_addr);
+ if (rc == 0)
+ break;
+ }
+ if (rc) {
+ pr_err("BIF bus_transaction failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Check if a slave is selected. */
+ rc = bif_is_slave_selected(bdev);
+ if (rc < 0) {
+ pr_err("BIF bus_transaction failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ sdev = bif_add_slave(bdev);
+ if (IS_ERR(sdev)) {
+ rc = PTR_ERR(sdev);
+ pr_err("bif_add_slave failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ sdev->bdev->selected_sdev = sdev;
+ sdev->present = true;
+ sdev->slave_addr = slave_addr;
+ rc = bif_parse_slave_data(sdev);
+ if (rc) {
+ pr_err("Failed to parse slave data, addr=0x%02X, rc=%d\n",
+ slave_addr, rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int bif_add_known_slaves_from_dt(struct bif_ctrl_dev *bdev,
+ struct device_node *of_node)
+{
+ int len = 0;
+ int rc, i;
+ u32 addr;
+ const __be32 *val;
+
+ mutex_lock(&bif_sdev_list_mutex);
+ mutex_lock(&bdev->mutex);
+
+ val = of_get_property(of_node, "qcom,known-device-addresses", &len);
+ len /= sizeof(u32);
+ if (val && len == 0) {
+ pr_err("qcom,known-device-addresses property is invalid\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ for (i = 0; i < len; i++) {
+ addr = be32_to_cpup(val++);
+ if (addr == 0x00 || addr > 0xFF) {
+ rc = -EINVAL;
+ pr_err("qcom,known-device-addresses property contains invalid address=0x%X\n",
+ addr);
+ goto out;
+ }
+ rc = bif_add_known_slave(bdev, addr);
+ if (rc) {
+ pr_err("bif_add_known_slave() failed, rc=%d\n", rc);
+ goto out;
+ }
+ }
+
+out:
+ if (len > 0)
+ bif_remove_duplicate_slaves(bdev);
+
+ mutex_unlock(&bdev->mutex);
+ mutex_unlock(&bif_sdev_list_mutex);
+
+ return rc;
+}
+
+/*
+ * Programs a device address for the specified slave in order to simplify
+ * slave selection in the future.
+ */
+static int bif_assign_slave_dev_addr(struct bif_slave_dev *sdev, u8 dev_addr)
+{
+ int rc;
+ u16 addr;
+
+ if (!sdev->protocol_function) {
+ pr_err("Protocol function not present; cannot set device address.\n");
+ return -ENODEV;
+ }
+
+ addr = PROTOCOL_FUNC_DEV_ADR_ADDR(
+ sdev->protocol_function->protocol_pointer);
+
+ rc = _bif_slave_write(sdev, addr, &dev_addr, 1);
+ if (rc)
+ pr_err("Failed to set slave device address.\n");
+ else
+ sdev->slave_addr = dev_addr;
+
+ return rc;
+}
+
+/* Assigns a unique device address to all slaves which do not have one. */
+static int bif_assign_all_slaves_dev_addr(struct bif_ctrl_dev *bdev)
+{
+ struct bif_slave_dev *sdev;
+ struct bif_slave_dev *sibling;
+ bool duplicate;
+ int rc = 0;
+ u8 dev_addr, first_dev_addr;
+
+ mutex_lock(&bif_sdev_list_mutex);
+ mutex_lock(&bdev->mutex);
+
+ first_dev_addr = next_dev_addr;
+ /*
+ * Iterate over all partially known UIDs adding new ones as they are
+ * found.
+ */
+ list_for_each_entry(sdev, &bif_sdev_list, list) {
+ /*
+ * Skip slaves without known UIDs, which already have a device
+ * address or which aren't present.
+ */
+ if (sdev->unique_id_bits_known != BIF_UNIQUE_ID_BIT_LENGTH
+ || sdev->slave_addr != 0x00 || !sdev->present)
+ continue;
+
+ do {
+ dev_addr = next_dev_addr;
+ duplicate = false;
+ list_for_each_entry(sibling, &bif_sdev_list, list) {
+ if (sibling->slave_addr == dev_addr) {
+ duplicate = true;
+ break;
+ }
+ }
+
+ next_dev_addr = dev_addr + 1;
+ } while (duplicate && (next_dev_addr != first_dev_addr));
+
+ if (next_dev_addr == first_dev_addr) {
+ pr_err("No more BIF slave device addresses available.\n");
+ rc = -ENODEV;
+ goto out;
+ }
+
+ rc = bif_assign_slave_dev_addr(sdev, dev_addr);
+ if (rc) {
+ pr_err("Failed to set slave address.\n");
+ goto out;
+ }
+ }
+
+ mutex_unlock(&bdev->mutex);
+ mutex_unlock(&bif_sdev_list_mutex);
+
+ return rc;
+
+out:
+ mutex_unlock(&bdev->mutex);
+ mutex_unlock(&bif_sdev_list_mutex);
+ pr_err("BIF slave device address setting failed, rc=%d\n", rc);
+ return rc;
+}
+
+/**
+ * bdev_get_drvdata() - get the private BIF controller driver data
+ * @bdev: BIF controller device pointer
+ */
+void *bdev_get_drvdata(struct bif_ctrl_dev *bdev)
+{
+ return bdev->driver_data;
+}
+EXPORT_SYMBOL(bdev_get_drvdata);
+
+static const char * const battery_label[] = {
+ "unknown",
+ "none",
+ "special 1",
+ "special 2",
+ "special 3",
+ "low cost",
+ "smart",
+};
+
+static const char *bif_get_battery_pack_type(int rid_ohm)
+{
+ const char *label = battery_label[0];
+
+ if (rid_ohm > BIF_BATT_RID_SMART_MAX)
+ label = battery_label[1];
+ else if (rid_ohm >= BIF_BATT_RID_SMART_MIN)
+ label = battery_label[6];
+ else if (rid_ohm >= BIF_BATT_RID_LOW_COST_MIN
+ && rid_ohm <= BIF_BATT_RID_LOW_COST_MAX)
+ label = battery_label[5];
+ else if (rid_ohm >= BIF_BATT_RID_SPECIAL3_MIN
+ && rid_ohm <= BIF_BATT_RID_SPECIAL3_MAX)
+ label = battery_label[4];
+ else if (rid_ohm >= BIF_BATT_RID_SPECIAL2_MIN
+ && rid_ohm <= BIF_BATT_RID_SPECIAL2_MAX)
+ label = battery_label[3];
+ else if (rid_ohm >= BIF_BATT_RID_SPECIAL1_MIN
+ && rid_ohm <= BIF_BATT_RID_SPECIAL1_MAX)
+ label = battery_label[2];
+
+ return label;
+}
+
+/**
+ * bif_ctrl_register() - register a BIF controller with the BIF framework
+ * @bif_desc: Pointer to BIF controller descriptor
+ * @dev: Device pointer of the BIF controller
+ * @driver_data: Private driver data to associate with the BIF controller
+ * @of_node Pointer to the device tree node of the BIF controller
+ *
+ * Returns a BIF controller device pointer for the controller if registration
+ * is successful or an ERR_PTR if an error occurred.
+ */
+struct bif_ctrl_dev *bif_ctrl_register(struct bif_ctrl_desc *bif_desc,
+ struct device *dev, void *driver_data, struct device_node *of_node)
+{
+ struct bif_ctrl_dev *bdev = ERR_PTR(-EINVAL);
+ struct bif_slave_dev *sdev;
+ bool battery_present = false;
+ int rc, rid_ohm;
+
+ if (!bif_desc) {
+ pr_err("Invalid bif_desc specified\n");
+ return bdev;
+ } else if (!bif_desc->name) {
+ pr_err("BIF name missing\n");
+ return bdev;
+ } else if (!bif_desc->ops) {
+ pr_err("BIF operations missing\n");
+ return bdev;
+ } else if (!bif_desc->ops->bus_transaction
+ || !bif_desc->ops->bus_transaction_query
+ || !bif_desc->ops->bus_transaction_read
+ || !bif_desc->ops->get_bus_state
+ || !bif_desc->ops->set_bus_state) {
+ pr_err("BIF operation callback function(s) missing\n");
+ return bdev;
+ }
+
+ bdev = kzalloc(sizeof(struct bif_ctrl_dev), GFP_KERNEL);
+ if (bdev == NULL) {
+ pr_err("Memory allocation failed for bif_ctrl_dev\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ mutex_init(&bdev->mutex);
+ INIT_LIST_HEAD(&bdev->list);
+ INIT_DELAYED_WORK(&bdev->enter_irq_mode_work, bif_enter_irq_mode_work);
+ bdev->desc = bif_desc;
+ bdev->ctrl_dev = dev;
+ bdev->driver_data = driver_data;
+ bdev->irq_mode_delay_jiffies = 2;
+
+ mutex_lock(&bif_ctrl_list_mutex);
+ list_add_tail(&bdev->list, &bif_ctrl_list);
+ mutex_unlock(&bif_ctrl_list_mutex);
+
+ rc = bif_add_all_slaves(bdev);
+ if (rc)
+ pr_debug("Search for all slaves failed, rc=%d\n", rc);
+ rc = bif_add_known_slaves_from_dt(bdev, of_node);
+ if (rc)
+ pr_err("Adding slaves based on device tree addressed failed, rc=%d.\n",
+ rc);
+ rc = bif_assign_all_slaves_dev_addr(bdev);
+ if (rc)
+ pr_err("Failed to set slave device address, rc=%d\n", rc);
+
+ bif_print_slaves();
+
+ if (bdev->desc->ops->get_battery_presence) {
+ rc = bdev->desc->ops->get_battery_presence(bdev);
+ if (rc < 0) {
+ pr_err("Could not determine battery presence, rc=%d\n",
+ rc);
+ } else {
+ battery_present = rc;
+ pr_info("Battery pack present = %c\n", rc ? 'Y' : 'N');
+ }
+ }
+
+ if (bdev->desc->ops->get_battery_rid) {
+ rid_ohm = bdev->desc->ops->get_battery_rid(bdev);
+ if (rid_ohm >= 0)
+ pr_info("Battery pack type = %s (Rid=%d ohm)\n",
+ bif_get_battery_pack_type(rid_ohm), rid_ohm);
+ else
+ pr_err("Could not read Rid, rc=%d\n", rid_ohm);
+ }
+
+ list_for_each_entry(sdev, &bif_sdev_list, list) {
+ if (sdev->present) {
+ battery_present = true;
+ break;
+ }
+ }
+
+ BLOCKING_INIT_NOTIFIER_HEAD(&bdev->bus_change_notifier);
+
+ if (battery_present) {
+ bdev->battery_present = true;
+ rc = blocking_notifier_call_chain(&bdev->bus_change_notifier,
+ BIF_BUS_EVENT_BATTERY_INSERTED, bdev);
+ if (rc)
+ pr_err("Call chain noification failed, rc=%d\n", rc);
+ }
+
+ return bdev;
+}
+EXPORT_SYMBOL(bif_ctrl_register);
+
+/**
+ * bif_ctrl_unregister() - unregisters a BIF controller
+ * @bdev: BIF controller device pointer
+ */
+void bif_ctrl_unregister(struct bif_ctrl_dev *bdev)
+{
+ if (bdev) {
+ mutex_lock(&bif_ctrl_list_mutex);
+ list_del(&bdev->list);
+ mutex_unlock(&bif_ctrl_list_mutex);
+ }
+}
+EXPORT_SYMBOL(bif_ctrl_unregister);
diff --git a/drivers/gpu/ion/msm/msm_ion.c b/drivers/gpu/ion/msm/msm_ion.c
index 1ca457f..832a9a1 100644
--- a/drivers/gpu/ion/msm/msm_ion.c
+++ b/drivers/gpu/ion/msm/msm_ion.c
@@ -55,7 +55,7 @@
},
{
.id = ION_CP_MM_HEAP_ID,
- .type = ION_HEAP_TYPE_CP,
+ .type = ION_HEAP_TYPE_SECURE_DMA,
.name = ION_MM_HEAP_NAME,
.permission_type = IPT_TYPE_MM_CARVEOUT,
},
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index b7d813c..b1a45bf 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -2000,8 +2000,17 @@
/* Is the ring buffer is empty? */
GSL_RB_GET_READPTR(rb, &rb->rptr);
if (!device->active_cnt && (rb->rptr == rb->wptr)) {
- /* Is the core idle? */
- status = is_adreno_rbbm_status_idle(device);
+ /*
+ * Are there interrupts pending? If so then pretend we
+ * are not idle - this avoids the possiblity that we go
+ * to a lower power state without handling interrupts
+ * first.
+ */
+
+ if (!adreno_dev->gpudev->irq_pending(adreno_dev)) {
+ /* Is the core idle? */
+ status = is_adreno_rbbm_status_idle(device);
+ }
}
} else {
status = true;
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index cc6eb16..b1cab9b 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -125,6 +125,7 @@
struct adreno_context *);
irqreturn_t (*irq_handler)(struct adreno_device *);
void (*irq_control)(struct adreno_device *, int);
+ unsigned int (*irq_pending)(struct adreno_device *);
void * (*snapshot)(struct adreno_device *, void *, int *, int);
void (*rb_init)(struct adreno_device *, struct adreno_ringbuffer *);
void (*start)(struct adreno_device *);
diff --git a/drivers/gpu/msm/adreno_a2xx.c b/drivers/gpu/msm/adreno_a2xx.c
index 952d1f8..6db6e7b 100644
--- a/drivers/gpu/msm/adreno_a2xx.c
+++ b/drivers/gpu/msm/adreno_a2xx.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1706,34 +1706,6 @@
return;
}
- if (status & CP_INT_CNTL__RB_INT_MASK) {
- /* signal intr completion event */
- unsigned int context_id, timestamp;
- kgsl_sharedmem_readl(&device->memstore, &context_id,
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- current_context));
-
- kgsl_sharedmem_readl(&device->memstore, ×tamp,
- KGSL_MEMSTORE_OFFSET(context_id,
- eoptimestamp));
-
- if (context_id < KGSL_MEMSTORE_MAX) {
- /* reset per context ts_cmp_enable */
- kgsl_sharedmem_writel(&device->memstore,
- KGSL_MEMSTORE_OFFSET(context_id,
- ts_cmp_enable), 0);
- /* Always reset global timestamp ts_cmp_enable */
- kgsl_sharedmem_writel(&device->memstore,
- KGSL_MEMSTORE_OFFSET(
- KGSL_MEMSTORE_GLOBAL,
- ts_cmp_enable), 0);
- wmb();
- }
-
- KGSL_CMD_WARN(device, "<%d:0x%x> ringbuffer interrupt\n",
- context_id, timestamp);
- }
-
for (i = 0; i < ARRAY_SIZE(kgsl_cp_error_irqs); i++) {
if (status & kgsl_cp_error_irqs[i].mask) {
KGSL_CMD_CRIT(rb->device, "%s\n",
@@ -1840,6 +1812,19 @@
wmb();
}
+static unsigned int a2xx_irq_pending(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ unsigned int rbbm, cp, mh;
+
+ adreno_regread(device, REG_RBBM_INT_CNTL, &rbbm);
+ adreno_regread(device, REG_CP_INT_CNTL, &cp);
+ adreno_regread(device, MH_INTERRUPT_MASK, &mh);
+
+ return ((rbbm & RBBM_INT_MASK) || (cp & CP_INT_MASK) ||
+ (mh & kgsl_mmu_get_int_mask())) ? 1 : 0;
+}
+
static void a2xx_rb_init(struct adreno_device *adreno_dev,
struct adreno_ringbuffer *rb)
{
@@ -2035,6 +2020,7 @@
.ctxt_draw_workaround = a2xx_drawctxt_draw_workaround,
.irq_handler = a2xx_irq_handler,
.irq_control = a2xx_irq_control,
+ .irq_pending = a2xx_irq_pending,
.snapshot = a2xx_snapshot,
.rb_init = a2xx_rb_init,
.busy_cycles = a2xx_busy_cycles,
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index a3739a2..73a7f52 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -2591,33 +2591,7 @@
{
struct kgsl_device *device = &adreno_dev->dev;
- if (irq == A3XX_INT_CP_RB_INT) {
- unsigned int context_id, timestamp;
- kgsl_sharedmem_readl(&device->memstore, &context_id,
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- current_context));
-
- kgsl_sharedmem_readl(&device->memstore, ×tamp,
- KGSL_MEMSTORE_OFFSET(context_id,
- eoptimestamp));
-
- if (context_id < KGSL_MEMSTORE_MAX) {
- /* reset per context ts_cmp_enable */
- kgsl_sharedmem_writel(&device->memstore,
- KGSL_MEMSTORE_OFFSET(context_id,
- ts_cmp_enable), 0);
- /* Always reset global timestamp ts_cmp_enable */
- kgsl_sharedmem_writel(&device->memstore,
- KGSL_MEMSTORE_OFFSET(
- KGSL_MEMSTORE_GLOBAL,
- ts_cmp_enable), 0);
- wmb();
- }
-
- KGSL_CMD_WARN(device, "<%d:0x%x> ringbuffer interrupt\n",
- context_id, timestamp);
- }
-
+ /* Wake up everybody waiting for the interrupt */
wake_up_interruptible_all(&device->wait_queue);
/* Schedule work to free mem and issue ibs */
@@ -2713,6 +2687,15 @@
adreno_regwrite(device, A3XX_RBBM_INT_0_MASK, 0);
}
+static unsigned int a3xx_irq_pending(struct adreno_device *adreno_dev)
+{
+ unsigned int status;
+
+ adreno_regread(&adreno_dev->dev, A3XX_RBBM_INT_0_STATUS, &status);
+
+ return (status & A3XX_INT_MASK) ? 1 : 0;
+}
+
static unsigned int a3xx_busy_cycles(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = &adreno_dev->dev;
@@ -2958,6 +2941,7 @@
.rb_init = a3xx_rb_init,
.irq_control = a3xx_irq_control,
.irq_handler = a3xx_irq_handler,
+ .irq_pending = a3xx_irq_pending,
.busy_cycles = a3xx_busy_cycles,
.start = a3xx_start,
.snapshot = a3xx_snapshot,
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index c43ac51..1d25646 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -570,7 +570,7 @@
total_sizedwords += (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE) ? 2 : 0;
/* Add CP_COND_EXEC commands to generate CP_INTERRUPT */
- total_sizedwords += context ? 7 : 0;
+ total_sizedwords += context ? 13 : 0;
if (adreno_is_a3xx(adreno_dev))
total_sizedwords += 7;
@@ -720,7 +720,25 @@
context_id, ref_wait_ts)) >> 2);
GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
/* # of conditional command DWORDs */
- GSL_RB_WRITE(ringcmds, rcmd_gpu, 2);
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, 8);
+
+ /* Clear the ts_cmp_enable for the context */
+ GSL_RB_WRITE(ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_MEM_WRITE, 2));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, gpuaddr +
+ KGSL_MEMSTORE_OFFSET(
+ context_id, ts_cmp_enable));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x0);
+
+ /* Clear the ts_cmp_enable for the global timestamp */
+ GSL_RB_WRITE(ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_MEM_WRITE, 2));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, gpuaddr +
+ KGSL_MEMSTORE_OFFSET(
+ KGSL_MEMSTORE_GLOBAL, ts_cmp_enable));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x0);
+
+ /* Trigger the interrupt */
GSL_RB_WRITE(ringcmds, rcmd_gpu,
cp_type3_packet(CP_INTERRUPT, 1));
GSL_RB_WRITE(ringcmds, rcmd_gpu, CP_INT_CNTL__RB_INT_MASK);
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 115fcb7..7ed0b10 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -1088,10 +1088,7 @@
result);
/* Fire off any pending suspend operations that are in flight */
-
- INIT_COMPLETION(dev_priv->device->suspend_gate);
- dev_priv->device->active_cnt--;
- complete(&dev_priv->device->suspend_gate);
+ kgsl_active_count_put(dev_priv->device);
return result;
}
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 62316f3..66390fc 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -133,6 +133,7 @@
void *priv;
struct list_head list;
void *owner;
+ unsigned int created;
};
@@ -449,4 +450,23 @@
kref_put(&context->refcount, kgsl_context_destroy);
}
+/**
+ * kgsl_active_count_put - Decrease the device active count
+ * @device: Pointer to a KGSL device
+ *
+ * Decrease the active count for the KGSL device and trigger the suspend_gate
+ * completion if it hits zero
+ */
+static inline void
+kgsl_active_count_put(struct kgsl_device *device)
+{
+ if (device->active_cnt == 1)
+ INIT_COMPLETION(device->suspend_gate);
+
+ device->active_cnt--;
+
+ if (device->active_cnt == 0)
+ complete(&device->suspend_gate);
+}
+
#endif /* __KGSL_DEVICE_H */
diff --git a/drivers/gpu/msm/kgsl_events.c b/drivers/gpu/msm/kgsl_events.c
index be9b5eb..6798eed 100644
--- a/drivers/gpu/msm/kgsl_events.c
+++ b/drivers/gpu/msm/kgsl_events.c
@@ -16,6 +16,8 @@
#include <linux/module.h>
#include <kgsl_device.h>
+#include "kgsl_trace.h"
+
static void _add_event_to_list(struct list_head *head, struct kgsl_event *event)
{
struct list_head *n;
@@ -71,6 +73,7 @@
*/
if (timestamp_cmp(cur_ts, ts) >= 0) {
+ trace_kgsl_fire_event(id, ts, 0);
cb(device, priv, id, ts);
return 0;
}
@@ -84,6 +87,9 @@
event->priv = priv;
event->func = cb;
event->owner = owner;
+ event->created = jiffies;
+
+ trace_kgsl_register_event(id, ts);
/* inc refcount to avoid race conditions in cleanup */
if (context)
@@ -106,6 +112,13 @@
} else
_add_event_to_list(&device->events, event);
+ /*
+ * Increase the active count on the device to avoid going into power
+ * saving modes while events are pending
+ */
+
+ device->active_cnt++;
+
queue_work(device->work_queue, &device->ts_expired_ws);
return 0;
}
@@ -137,12 +150,16 @@
* system got before the event was canceled
*/
+ trace_kgsl_fire_event(id, cur, jiffies - event->created);
+
if (event->func)
event->func(device, event->priv, id, cur);
kgsl_context_put(context);
list_del(&event->list);
kfree(event);
+
+ kgsl_active_count_put(device);
}
/* Remove ourselves from the master pending list */
@@ -175,6 +192,10 @@
* the callback knows how far the GPU made it before things went
* explosion
*/
+
+ trace_kgsl_fire_event(KGSL_MEMSTORE_GLOBAL, cur,
+ jiffies - event->created);
+
if (event->func)
event->func(device, event->priv, KGSL_MEMSTORE_GLOBAL,
cur);
@@ -184,6 +205,8 @@
list_del(&event->list);
kfree(event);
+
+ kgsl_active_count_put(device);
}
}
EXPORT_SYMBOL(kgsl_cancel_events);
@@ -207,6 +230,9 @@
* to the timestamp they wanted
*/
+ trace_kgsl_fire_event(id, event->timestamp,
+ jiffies - event->created);
+
if (event->func)
event->func(device, event->priv, id, event->timestamp);
@@ -215,6 +241,8 @@
list_del(&event->list);
kfree(event);
+
+ kgsl_active_count_put(device);
}
}
diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h
index f7818bb..8c4811e 100644
--- a/drivers/gpu/msm/kgsl_trace.h
+++ b/drivers/gpu/msm/kgsl_trace.h
@@ -693,6 +693,41 @@
)
);
+TRACE_EVENT(kgsl_register_event,
+ TP_PROTO(unsigned int id, unsigned int timestamp),
+ TP_ARGS(id, timestamp),
+ TP_STRUCT__entry(
+ __field(unsigned int, id)
+ __field(unsigned int, timestamp)
+ ),
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->timestamp = timestamp;
+ ),
+ TP_printk(
+ "ctx=%d ts=%d",
+ __entry->id, __entry->timestamp)
+);
+
+TRACE_EVENT(kgsl_fire_event,
+ TP_PROTO(unsigned int id, unsigned int ts,
+ unsigned int age),
+ TP_ARGS(id, ts, age),
+ TP_STRUCT__entry(
+ __field(unsigned int, id)
+ __field(unsigned int, ts)
+ __field(unsigned int, age)
+ ),
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->ts = ts;
+ __entry->age = age;
+ ),
+ TP_printk(
+ "ctx=%d ts=%d age=%u",
+ __entry->id, __entry->ts, __entry->age)
+);
+
#endif /* _KGSL_TRACE_H */
/* This part must be outside protection */
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index ffddcba..1283fa3 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -55,6 +55,27 @@
If unsure, say Y.
+config UHID
+ tristate "User-space I/O driver support for HID subsystem"
+ depends on HID
+ default n
+ ---help---
+ Say Y here if you want to provide HID I/O Drivers from user-space.
+ This allows to write I/O drivers in user-space and feed the data from
+ the device into the kernel. The kernel parses the HID reports, loads the
+ corresponding HID Device Driver or provides input devices on top of your
+ user-space device.
+
+ This driver cannot be used to parse HID-reports in user-space and write
+ special HID-drivers. You should use hidraw for that.
+ Instead, this driver allows to write the transport-layer driver in
+ user-space like USB-HID and Bluetooth-HID do in kernel-space.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called uhid.
+
source "drivers/hid/usbhid/Kconfig"
menu "Special HID drivers"
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 22f1d16..9dca845 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -8,6 +8,7 @@
endif
obj-$(CONFIG_HID) += hid.o
+obj-$(CONFIG_UHID) += uhid.o
hid-$(CONFIG_HIDRAW) += hidraw.o
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
new file mode 100644
index 0000000..05ef4b0
--- /dev/null
+++ b/drivers/hid/uhid.c
@@ -0,0 +1,153 @@
+/*
+ * User-space I/O driver support for HID subsystem
+ * Copyright (c) 2012 David Herrmann
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/hid.h>
+#include <linux/input.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/uhid.h>
+#include <linux/wait.h>
+
+#define UHID_NAME "uhid"
+#define UHID_BUFSIZE 32
+
+struct uhid_device {
+ struct hid_device *hid;
+
+ wait_queue_head_t waitq;
+ spinlock_t qlock;
+ __u8 head;
+ __u8 tail;
+ struct uhid_event *outq[UHID_BUFSIZE];
+};
+
+static struct miscdevice uhid_misc;
+
+static void uhid_queue(struct uhid_device *uhid, struct uhid_event *ev)
+{
+ __u8 newhead;
+
+ newhead = (uhid->head + 1) % UHID_BUFSIZE;
+
+ if (newhead != uhid->tail) {
+ uhid->outq[uhid->head] = ev;
+ uhid->head = newhead;
+ wake_up_interruptible(&uhid->waitq);
+ } else {
+ hid_warn(uhid->hid, "Output queue is full\n");
+ kfree(ev);
+ }
+}
+
+static int uhid_queue_event(struct uhid_device *uhid, __u32 event)
+{
+ unsigned long flags;
+ struct uhid_event *ev;
+
+ ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+ if (!ev)
+ return -ENOMEM;
+
+ ev->type = event;
+
+ spin_lock_irqsave(&uhid->qlock, flags);
+ uhid_queue(uhid, ev);
+ spin_unlock_irqrestore(&uhid->qlock, flags);
+
+ return 0;
+}
+
+static int uhid_char_open(struct inode *inode, struct file *file)
+{
+ struct uhid_device *uhid;
+
+ uhid = kzalloc(sizeof(*uhid), GFP_KERNEL);
+ if (!uhid)
+ return -ENOMEM;
+
+ spin_lock_init(&uhid->qlock);
+ init_waitqueue_head(&uhid->waitq);
+
+ file->private_data = uhid;
+ nonseekable_open(inode, file);
+
+ return 0;
+}
+
+static int uhid_char_release(struct inode *inode, struct file *file)
+{
+ struct uhid_device *uhid = file->private_data;
+ unsigned int i;
+
+ for (i = 0; i < UHID_BUFSIZE; ++i)
+ kfree(uhid->outq[i]);
+
+ kfree(uhid);
+
+ return 0;
+}
+
+static ssize_t uhid_char_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+
+static ssize_t uhid_char_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+
+static unsigned int uhid_char_poll(struct file *file, poll_table *wait)
+{
+ return 0;
+}
+
+static const struct file_operations uhid_fops = {
+ .owner = THIS_MODULE,
+ .open = uhid_char_open,
+ .release = uhid_char_release,
+ .read = uhid_char_read,
+ .write = uhid_char_write,
+ .poll = uhid_char_poll,
+ .llseek = no_llseek,
+};
+
+static struct miscdevice uhid_misc = {
+ .fops = &uhid_fops,
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = UHID_NAME,
+};
+
+static int __init uhid_init(void)
+{
+ return misc_register(&uhid_misc);
+}
+
+static void __exit uhid_exit(void)
+{
+ misc_deregister(&uhid_misc);
+}
+
+module_init(uhid_init);
+module_exit(uhid_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
+MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem");
diff --git a/drivers/media/platform/msm/camera_v2/msm.c b/drivers/media/platform/msm/camera_v2/msm.c
index 08a4566..6974cb4 100644
--- a/drivers/media/platform/msm/camera_v2/msm.c
+++ b/drivers/media/platform/msm/camera_v2/msm.c
@@ -461,9 +461,6 @@
static inline int __msm_sd_close_session_streams(struct v4l2_subdev *sd,
struct msm_sd_close_ioctl *sd_close)
{
- v4l2_subdev_call(sd, core, ioctl,
- MSM_SD_CLOSE_SESSION_AND_STREAM, &sd_close);
-
return 0;
}
diff --git a/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c b/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c
index 6ea86ae..e0c4083 100644
--- a/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c
+++ b/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c
@@ -172,6 +172,7 @@
/* put buf before buf done */
if (msm_vb2->in_freeq) {
vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ msm_vb2->in_freeq = 0;
rc = 0;
} else
rc = -EINVAL;
diff --git a/drivers/net/ethernet/msm/Kconfig b/drivers/net/ethernet/msm/Kconfig
index e15f4a9..4e95614 100644
--- a/drivers/net/ethernet/msm/Kconfig
+++ b/drivers/net/ethernet/msm/Kconfig
@@ -42,6 +42,16 @@
help
Debug stats on wakeup counts.
+config MSM_RMNET_WWAN
+ tristate "MSM RMNET WWAN Network Device"
+ depends on IPA
+ default n
+ help
+ WWAN Network Driver
+ Provides an API to embedded
+ applications to send and receive
+ the data to/from A2
+
config QFEC
tristate "QFEC ethernet driver"
select MII
diff --git a/drivers/net/ethernet/msm/Makefile b/drivers/net/ethernet/msm/Makefile
index e152ec7..0afa00f 100644
--- a/drivers/net/ethernet/msm/Makefile
+++ b/drivers/net/ethernet/msm/Makefile
@@ -3,6 +3,7 @@
#
obj-$(CONFIG_MSM_RMNET) += msm_rmnet.o
+obj-$(CONFIG_MSM_RMNET_WWAN) += msm_rmnet_wwan.o
obj-$(CONFIG_MSM_RMNET_SDIO) += msm_rmnet_sdio.o
obj-$(CONFIG_MSM_RMNET_BAM) += msm_rmnet_bam.o
obj-$(CONFIG_MSM_RMNET_SMUX) += msm_rmnet_smux.o
diff --git a/drivers/net/ethernet/msm/msm_rmnet_wwan.c b/drivers/net/ethernet/msm/msm_rmnet_wwan.c
new file mode 100644
index 0000000..fe1ac46
--- /dev/null
+++ b/drivers/net/ethernet/msm/msm_rmnet_wwan.c
@@ -0,0 +1,736 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * WWAN Network Interface.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/wakelock.h>
+#include <linux/msm_rmnet.h>
+#include <linux/if_arp.h>
+#include <linux/platform_device.h>
+#include <net/pkt_sched.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <mach/ipa.h>
+
+#define WWAN_DEV_NAME "rmnet%d"
+#define WWAN_METADATA_MASK 0x00FF0000
+#define IPA_RM_INACTIVITY_TIMER 1000
+#define WWAN_DEVICE_COUNT (8)
+#define WWAN_DATA_LEN 2000
+#define HEADROOM_FOR_A2_MUX 8 /* for mux header */
+#define TAILROOM 8 /* for padding by mux layer */
+
+enum wwan_device_status {
+ WWAN_DEVICE_INACTIVE = 0,
+ WWAN_DEVICE_ACTIVE = 1
+};
+static enum ipa_rm_resource_name
+ ipa_rm_resource_by_ch_id[WWAN_DEVICE_COUNT] = {
+ IPA_RM_RESOURCE_WWAN_0_PROD,
+ IPA_RM_RESOURCE_WWAN_1_PROD,
+ IPA_RM_RESOURCE_WWAN_2_PROD,
+ IPA_RM_RESOURCE_WWAN_3_PROD,
+ IPA_RM_RESOURCE_WWAN_4_PROD,
+ IPA_RM_RESOURCE_WWAN_5_PROD,
+ IPA_RM_RESOURCE_WWAN_6_PROD,
+ IPA_RM_RESOURCE_WWAN_7_PROD
+};
+static enum a2_mux_logical_channel_id
+ a2_mux_lcid_by_ch_id[WWAN_DEVICE_COUNT] = {
+ A2_MUX_WWAN_0,
+ A2_MUX_WWAN_1,
+ A2_MUX_WWAN_2,
+ A2_MUX_WWAN_3,
+ A2_MUX_WWAN_4,
+ A2_MUX_WWAN_5,
+ A2_MUX_WWAN_6,
+ A2_MUX_WWAN_7
+};
+
+/**
+ * struct wwan_private - WWAN private data
+ * @stats: iface statistics
+ * @ch_id: channel id
+ * @lock: spinlock for mutual exclusion
+ * @device_status: holds device status
+ *
+ * WWAN private - holds all relevant info about WWAN driver
+ */
+struct wwan_private {
+ struct net_device_stats stats;
+ uint32_t ch_id;
+ spinlock_t lock;
+ struct completion resource_granted_completion;
+ enum wwan_device_status device_status;
+};
+
+static struct net_device *netdevs[WWAN_DEVICE_COUNT];
+
+static __be16 wwan_ip_type_trans(struct sk_buff *skb)
+{
+ __be16 protocol = 0;
+ /* Determine L3 protocol */
+ switch (skb->data[0] & 0xf0) {
+ case 0x40:
+ protocol = htons(ETH_P_IP);
+ break;
+ case 0x60:
+ protocol = htons(ETH_P_IPV6);
+ break;
+ default:
+ pr_err("[%s] %s() L3 protocol decode error: 0x%02x",
+ skb->dev->name, __func__, skb->data[0] & 0xf0);
+ /* skb will be dropped in upper layer for unknown protocol */
+ break;
+ }
+ return protocol;
+}
+
+/**
+ * a2_mux_recv_notify() - Deliver an RX packet to network stack
+ *
+ * @skb: skb to be delivered
+ * @dev: network device
+ *
+ * Return codes:
+ * None
+ */
+static void a2_mux_recv_notify(void *dev, struct sk_buff *skb)
+{
+ struct wwan_private *wwan_ptr = netdev_priv(dev);
+
+ skb->dev = dev;
+ skb->protocol = wwan_ip_type_trans(skb);
+ wwan_ptr->stats.rx_packets++;
+ wwan_ptr->stats.rx_bytes += skb->len;
+ pr_debug("[%s] Rx packet #%lu len=%d\n",
+ skb->dev->name,
+ wwan_ptr->stats.rx_packets, skb->len);
+ netif_rx(skb);
+}
+
+/**
+ * wwan_send_packet() - Deliver a TX packet to A2 MUX driver.
+ *
+ * @skb: skb to be delivered
+ * @dev: network device
+ *
+ * Return codes:
+ * 0: success
+ * -EAGAIN: A2 MUX is not ready to send the skb. try later
+ * -EFAULT: A2 MUX rejected the skb
+ * -EPREM: Unknown error
+ */
+static int wwan_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct wwan_private *wwan_ptr = netdev_priv(dev);
+ int ret;
+
+ dev->trans_start = jiffies;
+ ret = a2_mux_write(a2_mux_lcid_by_ch_id[wwan_ptr->ch_id], skb);
+ if (ret != 0 && ret != -EAGAIN && ret != -EFAULT) {
+ pr_err("[%s] %s: write returned error %d",
+ dev->name, __func__, ret);
+ return -EPERM;
+ }
+ return ret;
+}
+
+/**
+ * a2_mux_write_done() - Update device statistics and start
+ * network stack queue is was stop and A2 MUX queue is below low
+ * watermark.
+ *
+ * @dev: network device
+ * @skb: skb to be delivered
+ *
+ * Return codes:
+ * None
+ */
+static void a2_mux_write_done(void *dev, struct sk_buff *skb)
+{
+ struct wwan_private *wwan_ptr = netdev_priv(dev);
+ unsigned long flags;
+
+ pr_debug("%s: write complete\n", __func__);
+ wwan_ptr->stats.tx_packets++;
+ wwan_ptr->stats.tx_bytes += skb->len;
+ pr_debug("[%s] Tx packet #%lu len=%d mark=0x%x\n",
+ ((struct net_device *)(dev))->name, wwan_ptr->stats.tx_packets,
+ skb->len, skb->mark);
+ dev_kfree_skb_any(skb);
+ spin_lock_irqsave(&wwan_ptr->lock, flags);
+ if (netif_queue_stopped(dev) &&
+ a2_mux_is_ch_low(a2_mux_lcid_by_ch_id[wwan_ptr->ch_id])) {
+ pr_debug("%s: Low WM hit, waking queue=%p\n",
+ __func__, skb);
+ netif_wake_queue(dev);
+ }
+ spin_unlock_irqrestore(&wwan_ptr->lock, flags);
+}
+
+/**
+ * a2_mux_notify() - Callback function for A2 MUX events Handles
+ * A2_MUX_RECEIVE and A2_MUX_WRITE_DONE events.
+ *
+ * @dev: network device
+ * @event: A2 MUX event
+ * @data: Additional data provided by A2 MUX
+ *
+ * Return codes:
+ * None
+ */
+static void a2_mux_notify(void *dev, enum a2_mux_event_type event,
+ unsigned long data)
+{
+ struct sk_buff *skb = (struct sk_buff *)data;
+
+ switch (event) {
+ case A2_MUX_RECEIVE:
+ if (!skb) {
+ pr_err("[%s] %s: No skb received",
+ ((struct net_device *)dev)->name, __func__);
+ return;
+ }
+ a2_mux_recv_notify(dev, skb);
+ break;
+ case A2_MUX_WRITE_DONE:
+ a2_mux_write_done(dev, skb);
+ break;
+ default:
+ pr_err("%s: unknown event %d\n", __func__, event);
+ break;
+ }
+}
+
+/**
+ * ipa_rm_resource_granted() - Called upon
+ * IPA_RM_RESOURCE_GRANTED event. Wakes up queue is was stopped.
+ *
+ * @work: work object supplied ny workqueue
+ *
+ * Return codes:
+ * None
+ */
+static void ipa_rm_resource_granted(void *dev)
+{
+ netif_wake_queue(dev);
+}
+/**
+ * ipa_rm_notify() - Callback function for RM events. Handles
+ * IPA_RM_RESOURCE_GRANTED and IPA_RM_RESOURCE_RELEASED events.
+ * IPA_RM_RESOURCE_GRANTED is handled in the context of shared
+ * workqueue.
+ *
+ * @dev: network device
+ * @event: IPA RM event
+ * @data: Additional data provided by IPA RM
+ *
+ * Return codes:
+ * None
+ */
+static void ipa_rm_notify(void *dev, enum ipa_rm_event event,
+ unsigned long data)
+{
+ struct wwan_private *wwan_ptr = netdev_priv(dev);
+
+ pr_debug("%s: event %d\n", __func__, event);
+ switch (event) {
+ case IPA_RM_RESOURCE_GRANTED:
+ if (wwan_ptr->device_status == WWAN_DEVICE_INACTIVE) {
+ complete_all(&wwan_ptr->resource_granted_completion);
+ break;
+ }
+ ipa_rm_resource_granted(dev);
+ break;
+ case IPA_RM_RESOURCE_RELEASED:
+ break;
+ default:
+ pr_err("%s: unknown event %d\n", __func__, event);
+ break;
+ }
+}
+
+static int wwan_register_to_ipa(struct net_device *dev)
+{
+ struct wwan_private *wwan_ptr = netdev_priv(dev);
+ struct ipa_tx_intf tx_properties = {0};
+ struct ipa_ioc_tx_intf_prop tx_ioc_properties[2] = { {0}, {0} };
+ struct ipa_ioc_tx_intf_prop *tx_ipv4_property;
+ struct ipa_ioc_tx_intf_prop *tx_ipv6_property;
+ struct ipa_rx_intf rx_properties = {0};
+ struct ipa_ioc_rx_intf_prop rx_ioc_properties[2] = { {0}, {0} };
+ struct ipa_ioc_rx_intf_prop *rx_ipv4_property;
+ struct ipa_ioc_rx_intf_prop *rx_ipv6_property;
+ int ret = 0;
+
+ pr_debug("[%s] %s:\n", dev->name, __func__);
+ tx_properties.prop = tx_ioc_properties;
+ tx_ipv4_property = &tx_properties.prop[0];
+ tx_ipv4_property->ip = IPA_IP_v4;
+ tx_ipv4_property->dst_pipe = IPA_CLIENT_A2_EMBEDDED_CONS;
+ snprintf(tx_ipv4_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
+ A2_MUX_HDR_NAME_V4_PREF,
+ a2_mux_lcid_by_ch_id[wwan_ptr->ch_id]);
+ tx_ipv6_property = &tx_properties.prop[1];
+ tx_ipv6_property->ip = IPA_IP_v6;
+ tx_ipv6_property->dst_pipe = IPA_CLIENT_A2_EMBEDDED_CONS;
+ snprintf(tx_ipv6_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
+ A2_MUX_HDR_NAME_V6_PREF,
+ a2_mux_lcid_by_ch_id[wwan_ptr->ch_id]);
+ tx_properties.num_props = 2;
+ rx_properties.prop = rx_ioc_properties;
+ rx_ipv4_property = &rx_properties.prop[0];
+ rx_ipv4_property->ip = IPA_IP_v4;
+ rx_ipv4_property->attrib.attrib_mask |= IPA_FLT_META_DATA;
+ rx_ipv4_property->attrib.meta_data = wwan_ptr->ch_id;
+ rx_ipv4_property->attrib.meta_data_mask = WWAN_METADATA_MASK;
+ rx_ipv4_property->src_pipe = IPA_CLIENT_A2_EMBEDDED_PROD;
+ rx_ipv6_property = &rx_properties.prop[1];
+ rx_ipv6_property->ip = IPA_IP_v6;
+ rx_ipv6_property->attrib.attrib_mask |= IPA_FLT_META_DATA;
+ rx_ipv6_property->attrib.meta_data = wwan_ptr->ch_id;
+ rx_ipv6_property->attrib.meta_data_mask = WWAN_METADATA_MASK;
+ rx_ipv6_property->src_pipe = IPA_CLIENT_A2_EMBEDDED_PROD;
+ rx_properties.num_props = 2;
+ ret = ipa_register_intf(dev->name, &tx_properties, &rx_properties);
+ if (ret) {
+ pr_err("[%s] %s: ipa_register_intf failed %d\n", dev->name,
+ __func__, ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int __wwan_open(struct net_device *dev)
+{
+ int r;
+ struct wwan_private *wwan_ptr = netdev_priv(dev);
+
+ pr_debug("[%s] __wwan_open()\n", dev->name);
+ if (wwan_ptr->device_status != WWAN_DEVICE_ACTIVE) {
+ INIT_COMPLETION(wwan_ptr->resource_granted_completion);
+ r = ipa_rm_inactivity_timer_request_resource(
+ ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
+ if (r < 0 && r != -EINPROGRESS) {
+ pr_err("%s: ipa rm timer request resource failed %d\n",
+ __func__, r);
+ return -ENODEV;
+ }
+ if (r == -EINPROGRESS) {
+ wait_for_completion(
+ &wwan_ptr->resource_granted_completion);
+ }
+ r = a2_mux_open_channel(a2_mux_lcid_by_ch_id[wwan_ptr->ch_id],
+ dev, a2_mux_notify);
+ if (r < 0) {
+ pr_err("%s: ch=%d failed with rc %d\n",
+ __func__, wwan_ptr->ch_id, r);
+ ipa_rm_inactivity_timer_release_resource(
+ ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
+ return -ENODEV;
+ }
+ ipa_rm_inactivity_timer_release_resource(
+ ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
+ r = wwan_register_to_ipa(dev);
+ if (r < 0) {
+ pr_err("%s: ch=%d failed to register to IPA rc %d\n",
+ __func__, wwan_ptr->ch_id, r);
+ return -ENODEV;
+ }
+ }
+ wwan_ptr->device_status = WWAN_DEVICE_ACTIVE;
+ return 0;
+}
+
+/**
+ * wwan_open() - Opens the wwan network interface. Opens logical
+ * channel on A2 MUX driver and starts the network stack queue
+ *
+ * @dev: network device
+ *
+ * Return codes:
+ * 0: success
+ * -ENODEV: Error while opening logical channel on A2 MUX driver
+ */
+static int wwan_open(struct net_device *dev)
+{
+ int rc = 0;
+
+ pr_debug("[%s] wwan_open()\n", dev->name);
+ rc = __wwan_open(dev);
+ if (rc == 0)
+ netif_start_queue(dev);
+ return rc;
+}
+
+
+static int __wwan_close(struct net_device *dev)
+{
+ struct wwan_private *wwan_ptr = netdev_priv(dev);
+ int rc = 0;
+
+ if (wwan_ptr->device_status == WWAN_DEVICE_ACTIVE) {
+ wwan_ptr->device_status = WWAN_DEVICE_INACTIVE;
+ /* do not close wwan port once up, this causes
+ remote side to hang if tried to open again */
+ INIT_COMPLETION(wwan_ptr->resource_granted_completion);
+ rc = ipa_rm_inactivity_timer_request_resource(
+ ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
+ if (rc < 0 && rc != -EINPROGRESS) {
+ pr_err("%s: ipa rm timer request resource failed %d\n",
+ __func__, rc);
+ return -ENODEV;
+ }
+ if (rc == -EINPROGRESS) {
+ wait_for_completion(
+ &wwan_ptr->resource_granted_completion);
+ }
+ rc = a2_mux_close_channel(
+ a2_mux_lcid_by_ch_id[wwan_ptr->ch_id]);
+ if (rc) {
+ pr_err("[%s] %s: a2_mux_close_channel failed %d\n",
+ dev->name, __func__, rc);
+ ipa_rm_inactivity_timer_release_resource(
+ ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
+ return rc;
+ }
+ ipa_rm_inactivity_timer_release_resource(
+ ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
+ rc = ipa_deregister_intf(dev->name);
+ if (rc) {
+ pr_err("[%s] %s: ipa_deregister_intf failed %d\n",
+ dev->name, __func__, rc);
+ return rc;
+ }
+ return rc;
+ } else
+ return -EBADF;
+}
+
+/**
+ * wwan_stop() - Stops the wwan network interface. Closes
+ * logical channel on A2 MUX driver and stops the network stack
+ * queue
+ *
+ * @dev: network device
+ *
+ * Return codes:
+ * 0: success
+ * -ENODEV: Error while opening logical channel on A2 MUX driver
+ */
+static int wwan_stop(struct net_device *dev)
+{
+ pr_debug("[%s] wwan_stop()\n", dev->name);
+ __wwan_close(dev);
+ netif_stop_queue(dev);
+ return 0;
+}
+
+static int wwan_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if (0 > new_mtu || WWAN_DATA_LEN < new_mtu)
+ return -EINVAL;
+ pr_debug("[%s] MTU change: old=%d new=%d\n",
+ dev->name, dev->mtu, new_mtu);
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+/**
+ * wwan_xmit() - Transmits an skb. In charge of asking IPA
+ * RM needed resources. In case that IPA RM is not ready, then
+ * the skb is saved for tranmitting as soon as IPA RM resources
+ * are granted.
+ *
+ * @skb: skb to be transmitted
+ * @dev: network device
+ *
+ * Return codes:
+ * 0: success
+ * NETDEV_TX_BUSY: Error while transmitting the skb. Try again
+ * later
+ * -EFAULT: Error while transmitting the skb
+ */
+static int wwan_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct wwan_private *wwan_ptr = netdev_priv(dev);
+ unsigned long flags;
+ int ret = 0;
+
+ if (netif_queue_stopped(dev)) {
+ pr_err("[%s]fatal: wwan_xmit called when netif_queue stopped\n",
+ dev->name);
+ return 0;
+ }
+ ret = ipa_rm_inactivity_timer_request_resource(
+ ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
+ if (ret == -EINPROGRESS) {
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+ if (ret) {
+ pr_err("[%s] fatal: ipa rm timer request resource failed %d\n",
+ dev->name, ret);
+ return -EFAULT;
+ }
+ ret = wwan_send_packet(skb, dev);
+ if (ret == -EPERM) {
+ ret = NETDEV_TX_BUSY;
+ goto exit;
+ }
+ /*
+ * detected SSR a bit early. shut some things down now, and leave
+ * the rest to the main ssr handling code when that happens later
+ */
+ if (ret == -EFAULT) {
+ netif_carrier_off(dev);
+ dev_kfree_skb_any(skb);
+ ret = 0;
+ goto exit;
+ }
+ if (ret == -EAGAIN) {
+ /*
+ * This should not happen
+ * EAGAIN means we attempted to overflow the high watermark
+ * Clearly the queue is not stopped like it should be, so
+ * stop it and return BUSY to the TCP/IP framework. It will
+ * retry this packet with the queue is restarted which happens
+ * in the write_done callback when the low watermark is hit.
+ */
+ netif_stop_queue(dev);
+ ret = NETDEV_TX_BUSY;
+ goto exit;
+ }
+ spin_lock_irqsave(&wwan_ptr->lock, flags);
+ if (a2_mux_is_ch_full(a2_mux_lcid_by_ch_id[wwan_ptr->ch_id])) {
+ netif_stop_queue(dev);
+ pr_debug("%s: High WM hit, stopping queue=%p\n",
+ __func__, skb);
+ }
+ spin_unlock_irqrestore(&wwan_ptr->lock, flags);
+exit:
+ ipa_rm_inactivity_timer_release_resource(
+ ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
+ return ret;
+}
+
+static struct net_device_stats *wwan_get_stats(struct net_device *dev)
+{
+ struct wwan_private *wwan_ptr = netdev_priv(dev);
+ return &wwan_ptr->stats;
+}
+
+static void wwan_tx_timeout(struct net_device *dev)
+{
+ pr_warning("[%s] wwan_tx_timeout()\n", dev->name);
+}
+
+/**
+ * wwan_ioctl() - I/O control for wwan network driver.
+ *
+ * @dev: network device
+ * @ifr: ignored
+ * @cmd: cmd to be excecuded. can be one of the following:
+ * WWAN_IOCTL_OPEN - Open the network interface
+ * WWAN_IOCTL_CLOSE - Close the network interface
+ *
+ * Return codes:
+ * 0: success
+ * NETDEV_TX_BUSY: Error while transmitting the skb. Try again
+ * later
+ * -EFAULT: Error while transmitting the skb
+ */
+static int wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ int rc = 0;
+
+ switch (cmd) {
+ case RMNET_IOCTL_SET_LLP_IP: /* Set RAWIP protocol */
+ break;
+ case RMNET_IOCTL_GET_LLP: /* Get link protocol state */
+ ifr->ifr_ifru.ifru_data = (void *) RMNET_MODE_LLP_IP;
+ break;
+ case RMNET_IOCTL_SET_QOS_DISABLE: /* Set QoS header disabled */
+ break;
+ case RMNET_IOCTL_FLOW_ENABLE:
+ tc_qdisc_flow_control(dev, (u32)ifr->ifr_data, 1);
+ pr_debug("[%s] %s: enabled flow", dev->name, __func__);
+ break;
+ case RMNET_IOCTL_FLOW_DISABLE:
+ tc_qdisc_flow_control(dev, (u32)ifr->ifr_data, 0);
+ pr_debug("[%s] %s: disabled flow", dev->name, __func__);
+ break;
+ case RMNET_IOCTL_GET_QOS: /* Get QoS header state */
+ /* QoS disabled */
+ ifr->ifr_ifru.ifru_data = (void *) 0;
+ break;
+ case RMNET_IOCTL_GET_OPMODE: /* Get operation mode */
+ ifr->ifr_ifru.ifru_data = (void *) RMNET_MODE_LLP_IP;
+ break;
+ case RMNET_IOCTL_OPEN: /* Open transport port */
+ rc = __wwan_open(dev);
+ pr_debug("[%s] wwan_ioctl(): open transport port\n",
+ dev->name);
+ break;
+ case RMNET_IOCTL_CLOSE: /* Close transport port */
+ rc = __wwan_close(dev);
+ pr_debug("[%s] wwan_ioctl(): close transport port\n",
+ dev->name);
+ break;
+ default:
+ pr_err("[%s] error: wwan_ioct called for unsupported cmd[%d]",
+ dev->name, cmd);
+ return -EINVAL;
+ }
+ return rc;
+}
+
+static const struct net_device_ops wwan_ops_ip = {
+ .ndo_open = wwan_open,
+ .ndo_stop = wwan_stop,
+ .ndo_start_xmit = wwan_xmit,
+ .ndo_get_stats = wwan_get_stats,
+ .ndo_tx_timeout = wwan_tx_timeout,
+ .ndo_do_ioctl = wwan_ioctl,
+ .ndo_change_mtu = wwan_change_mtu,
+ .ndo_set_mac_address = 0,
+ .ndo_validate_addr = 0,
+};
+
+/**
+ * wwan_setup() - Setups the wwan network driver.
+ *
+ * @dev: network device
+ *
+ * Return codes:
+ * None
+ */
+static void wwan_setup(struct net_device *dev)
+{
+ dev->netdev_ops = &wwan_ops_ip;
+ ether_setup(dev);
+ /* set this after calling ether_setup */
+ dev->header_ops = 0; /* No header */
+ dev->type = ARPHRD_RAWIP;
+ dev->hard_header_len = 0;
+ dev->mtu = WWAN_DATA_LEN;
+ dev->addr_len = 0;
+ dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
+ dev->needed_headroom = HEADROOM_FOR_A2_MUX;
+ dev->needed_tailroom = TAILROOM;
+ dev->watchdog_timeo = 1000;
+}
+
+/**
+ * wwan_init() - Initialized the module and registers as a
+ * network interface to the network stack
+ *
+ * Return codes:
+ * 0: success
+ * -ENOMEM: No memory available
+ * -EFAULT: Internal error
+ */
+static int __init wwan_init(void)
+{
+ int ret;
+ struct net_device *dev;
+ struct wwan_private *wwan_ptr;
+ unsigned n;
+ struct ipa_rm_create_params ipa_rm_params;
+
+ pr_info("%s: WWAN devices[%d]\n", __func__, WWAN_DEVICE_COUNT);
+ for (n = 0; n < WWAN_DEVICE_COUNT; n++) {
+ dev = alloc_netdev(sizeof(struct wwan_private),
+ WWAN_DEV_NAME, wwan_setup);
+ if (!dev) {
+ pr_err("%s: no memory for netdev %d\n", __func__, n);
+ ret = -ENOMEM;
+ goto fail;
+ }
+ netdevs[n] = dev;
+ wwan_ptr = netdev_priv(dev);
+ wwan_ptr->ch_id = n;
+ spin_lock_init(&wwan_ptr->lock);
+ init_completion(&wwan_ptr->resource_granted_completion);
+ memset(&ipa_rm_params, 0, sizeof(struct ipa_rm_create_params));
+ ipa_rm_params.name = ipa_rm_resource_by_ch_id[n];
+ ipa_rm_params.reg_params.user_data = dev;
+ ipa_rm_params.reg_params.notify_cb = ipa_rm_notify;
+ ret = ipa_rm_create_resource(&ipa_rm_params);
+ if (ret) {
+ pr_err("%s: unable to create resourse %d in IPA RM\n",
+ __func__, ipa_rm_resource_by_ch_id[n]);
+ goto fail;
+ }
+ ret = ipa_rm_inactivity_timer_init(ipa_rm_resource_by_ch_id[n],
+ IPA_RM_INACTIVITY_TIMER);
+ if (ret) {
+ pr_err("%s: ipa rm timer init failed %d on ins %d\n",
+ __func__, ret, n);
+ goto fail;
+ }
+ ret = ipa_rm_add_dependency(ipa_rm_resource_by_ch_id[n],
+ IPA_RM_RESOURCE_A2_CONS);
+ if (ret) {
+ pr_err("%s: unable to add dependency %d rc=%d\n",
+ __func__, n, ret);
+ goto fail;
+ }
+ ret = register_netdev(dev);
+ if (ret) {
+ pr_err("%s: unable to register netdev %d rc=%d\n",
+ __func__, n, ret);
+ goto fail;
+ }
+ }
+ return 0;
+fail:
+ for (n = 0; n < WWAN_DEVICE_COUNT; n++) {
+ if (!netdevs[n])
+ break;
+ unregister_netdev(netdevs[n]);
+ ipa_rm_inactivity_timer_destroy(ipa_rm_resource_by_ch_id[n]);
+ free_netdev(netdevs[n]);
+ netdevs[n] = NULL;
+ }
+ return ret;
+}
+late_initcall(wwan_init);
+
+void wwan_cleanup(void)
+{
+ unsigned n;
+
+ pr_info("%s: WWAN devices[%d]\n", __func__, WWAN_DEVICE_COUNT);
+ for (n = 0; n < WWAN_DEVICE_COUNT; n++) {
+ unregister_netdev(netdevs[n]);
+ ipa_rm_inactivity_timer_destroy(ipa_rm_resource_by_ch_id[n]);
+ free_netdev(netdevs[n]);
+ netdevs[n] = NULL;
+ }
+}
+
+MODULE_DESCRIPTION("WWAN Network Interface");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/msm/ipa/a2_service.c b/drivers/platform/msm/ipa/a2_service.c
index 2c5245c..60cb6bd 100644
--- a/drivers/platform/msm/ipa/a2_service.c
+++ b/drivers/platform/msm/ipa/a2_service.c
@@ -53,6 +53,8 @@
spinlock_t lock;
int num_tx_pkts;
int use_wm;
+ u32 v4_hdr_hdl;
+ u32 v6_hdr_hdl;
};
struct tx_pkt_info {
struct sk_buff *skb;
@@ -70,6 +72,7 @@
u8 ch_id;
u16 pkt_len;
};
+
struct a2_mux_context_type {
u32 tethered_prod;
u32 tethered_cons;
@@ -515,6 +518,9 @@
goto bridge_tethered_dl_failed;
}
memset(&connect_params, 0, sizeof(struct ipa_sys_connect_params));
+ connect_params.ipa_ep_cfg.hdr.hdr_len = sizeof(struct bam_mux_hdr);
+ connect_params.ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1;
+ connect_params.ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 6;
connect_params.client = IPA_CLIENT_A2_EMBEDDED_CONS;
connect_params.notify = ipa_embedded_notify;
connect_params.desc_fifo_sz = 0x800;
@@ -527,6 +533,9 @@
goto bridge_embedded_ul_failed;
}
memset(&connect_params, 0, sizeof(struct ipa_sys_connect_params));
+ connect_params.ipa_ep_cfg.hdr.hdr_len = sizeof(struct bam_mux_hdr);
+ connect_params.ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1;
+ connect_params.ipa_ep_cfg.hdr.hdr_ofst_metadata = 4;
connect_params.client = IPA_CLIENT_A2_EMBEDDED_PROD;
connect_params.notify = ipa_embedded_notify;
connect_params.desc_fifo_sz = 0x800;
@@ -1006,6 +1015,176 @@
}
/**
+ * a2_mux_add_hdr() - called when MUX header should
+ * be added
+ * @lcid: logical channel ID
+ *
+ * Returns: 0 on success, negative on failure
+ */
+static int a2_mux_add_hdr(enum a2_mux_logical_channel_id lcid)
+{
+ struct ipa_ioc_add_hdr *hdrs;
+ struct ipa_hdr_add *ipv4_hdr;
+ struct ipa_hdr_add *ipv6_hdr;
+ struct bam_mux_hdr *dmux_hdr;
+ int rc;
+
+ IPADBG("%s: ch %d\n", __func__, lcid);
+
+ if (lcid < A2_MUX_WWAN_0 || lcid > A2_MUX_WWAN_7) {
+ IPAERR("%s: non valid lcid passed: %d\n", __func__, lcid);
+ return -EINVAL;
+ }
+
+
+ hdrs = kzalloc(sizeof(struct ipa_ioc_add_hdr) +
+ 2 * sizeof(struct ipa_hdr_add), GFP_KERNEL);
+ if (!hdrs) {
+ IPAERR("%s: hdr allocation fail for ch %d\n", __func__, lcid);
+ return -ENOMEM;
+ }
+
+ ipv4_hdr = &hdrs->hdr[0];
+ ipv6_hdr = &hdrs->hdr[1];
+
+ dmux_hdr = (struct bam_mux_hdr *)ipv4_hdr->hdr;
+ snprintf(ipv4_hdr->name, IPA_RESOURCE_NAME_MAX, "%s%d",
+ A2_MUX_HDR_NAME_V4_PREF, lcid);
+ dmux_hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
+ dmux_hdr->cmd = BAM_MUX_HDR_CMD_DATA;
+ dmux_hdr->reserved = 0;
+ dmux_hdr->ch_id = lcid;
+
+ /* Packet lenght is added by IPA */
+ dmux_hdr->pkt_len = 0;
+ dmux_hdr->pad_len = 0;
+
+ dmux_hdr->magic_num = htons(dmux_hdr->magic_num);
+ IPADBG("converted to network order magic_num=%d\n",
+ dmux_hdr->magic_num);
+
+ ipv4_hdr->hdr_len = sizeof(struct bam_mux_hdr);
+ ipv4_hdr->is_partial = 0;
+
+ dmux_hdr = (struct bam_mux_hdr *)ipv6_hdr->hdr;
+ snprintf(ipv6_hdr->name, IPA_RESOURCE_NAME_MAX, "%s%d",
+ A2_MUX_HDR_NAME_V6_PREF, lcid);
+ dmux_hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
+ dmux_hdr->cmd = BAM_MUX_HDR_CMD_DATA;
+ dmux_hdr->reserved = 0;
+ dmux_hdr->ch_id = lcid;
+
+ /* Packet lenght is added by IPA */
+ dmux_hdr->pkt_len = 0;
+ dmux_hdr->pad_len = 0;
+
+ dmux_hdr->magic_num = htons(dmux_hdr->magic_num);
+ IPADBG("converted to network order magic_num=%d\n",
+ dmux_hdr->magic_num);
+
+ ipv6_hdr->hdr_len = sizeof(struct bam_mux_hdr);
+ ipv6_hdr->is_partial = 0;
+
+ hdrs->commit = 1;
+ hdrs->num_hdrs = 2;
+
+ rc = ipa_add_hdr(hdrs);
+ if (rc) {
+ IPAERR("Fail on Header-Insertion(%d)\n", rc);
+ goto bail;
+ }
+
+ if (ipv4_hdr->status) {
+ IPAERR("Fail on Header-Insertion ipv4(%d)\n",
+ ipv4_hdr->status);
+ rc = ipv4_hdr->status;
+ goto bail;
+ }
+
+ if (ipv6_hdr->status) {
+ IPAERR("%s: Fail on Header-Insertion ipv4(%d)\n", __func__,
+ ipv6_hdr->status);
+ rc = ipv6_hdr->status;
+ goto bail;
+ }
+
+ a2_mux_ctx->bam_ch[lcid].v4_hdr_hdl = ipv4_hdr->hdr_hdl;
+ a2_mux_ctx->bam_ch[lcid].v6_hdr_hdl = ipv6_hdr->hdr_hdl;
+
+ rc = 0;
+bail:
+ kfree(hdrs);
+ return rc;
+}
+
+/**
+ * a2_mux_del_hdr() - called when MUX header should
+ * be removed
+ * @lcid: logical channel ID
+ *
+ * Returns: 0 on success, negative on failure
+ */
+static int a2_mux_del_hdr(enum a2_mux_logical_channel_id lcid)
+{
+ struct ipa_ioc_del_hdr *hdrs;
+ struct ipa_hdr_del *ipv4_hdl;
+ struct ipa_hdr_del *ipv6_hdl;
+ int rc;
+
+ IPADBG("%s: ch %d\n", __func__, lcid);
+
+ if (lcid < A2_MUX_WWAN_0 || lcid > A2_MUX_WWAN_7) {
+ IPAERR("invalid lcid passed: %d\n", lcid);
+ return -EINVAL;
+ }
+
+
+ hdrs = kzalloc(sizeof(struct ipa_ioc_del_hdr) +
+ 2 * sizeof(struct ipa_hdr_del), GFP_KERNEL);
+ if (!hdrs) {
+ IPAERR("hdr alloc fail for ch %d\n", lcid);
+ return -ENOMEM;
+ }
+
+ ipv4_hdl = &hdrs->hdl[0];
+ ipv6_hdl = &hdrs->hdl[1];
+
+ ipv4_hdl->hdl = a2_mux_ctx->bam_ch[lcid].v4_hdr_hdl;
+ ipv6_hdl->hdl = a2_mux_ctx->bam_ch[lcid].v6_hdr_hdl;
+
+ hdrs->commit = 1;
+ hdrs->num_hdls = 2;
+
+ rc = ipa_del_hdr(hdrs);
+ if (rc) {
+ IPAERR("Fail on Del Header-Insertion(%d)\n", rc);
+ goto bail;
+ }
+
+ if (ipv4_hdl->status) {
+ IPAERR("Fail on Del Header-Insertion ipv4(%d)\n",
+ ipv4_hdl->status);
+ rc = ipv4_hdl->status;
+ goto bail;
+ }
+ a2_mux_ctx->bam_ch[lcid].v4_hdr_hdl = 0;
+
+ if (ipv6_hdl->status) {
+ IPAERR("Fail on Del Header-Insertion ipv4(%d)\n",
+ ipv6_hdl->status);
+ rc = ipv6_hdl->status;
+ goto bail;
+ }
+ a2_mux_ctx->bam_ch[lcid].v6_hdr_hdl = 0;
+
+ rc = 0;
+bail:
+ kfree(hdrs);
+ return rc;
+
+}
+
+/**
* a2_mux_open_channel() - opens logical channel
* to A2
* @lcid: logical channel ID
@@ -1090,6 +1269,12 @@
kfree(hdr);
return rc;
}
+ rc = a2_mux_add_hdr(lcid);
+ if (rc) {
+ IPAERR("a2_mux_add_hdr failed %d; ch: %d\n",
+ rc, lcid);
+ return rc;
+ }
}
open_done:
@@ -1154,6 +1339,13 @@
kfree(hdr);
return rc;
}
+
+ rc = a2_mux_del_hdr(lcid);
+ if (rc) {
+ IPAERR("a2_mux_del_hdr failed %d; ch: %d\n",
+ rc, lcid);
+ return rc;
+ }
}
IPADBG("%s: closed ch %d\n", __func__, lcid);
return 0;
diff --git a/drivers/platform/msm/ipa/ipa.c b/drivers/platform/msm/ipa/ipa.c
index b07c653..b8e0ce7 100644
--- a/drivers/platform/msm/ipa/ipa.c
+++ b/drivers/platform/msm/ipa/ipa.c
@@ -806,7 +806,7 @@
/* check all the system pipes for tx comp and rx avail */
if (ipa_ctx->sys[IPA_A5_LAN_WAN_IN].ep->valid)
- cnt |= ipa_handle_rx_core(false);
+ cnt |= ipa_handle_rx_core(false, true);
for (i = 0; i < num_tx_pipes; i++)
if (ipa_ctx->sys[tx_pipes[i]].ep->valid)
@@ -1578,6 +1578,10 @@
IPADBG("polling_mode=%u delay_ms=%u\n", polling_mode, polling_delay_ms);
ipa_ctx->polling_mode = polling_mode;
+ if (ipa_ctx->polling_mode)
+ atomic_set(&ipa_ctx->curr_polling_state, 1);
+ else
+ atomic_set(&ipa_ctx->curr_polling_state, 0);
IPADBG("hdr_lcl=%u ip4_rt=%u ip6_rt=%u ip4_flt=%u ip6_flt=%u\n",
hdr_tbl_lcl, ip4_rt_tbl_lcl, ip6_rt_tbl_lcl, ip4_flt_tbl_lcl,
ip6_flt_tbl_lcl);
diff --git a/drivers/platform/msm/ipa/ipa_bridge.c b/drivers/platform/msm/ipa/ipa_bridge.c
index 56e9b0d..0227ee4 100644
--- a/drivers/platform/msm/ipa/ipa_bridge.c
+++ b/drivers/platform/msm/ipa/ipa_bridge.c
@@ -42,9 +42,6 @@
struct sps_connect connection;
struct sps_mem_buffer desc_mem_buf;
struct sps_register_event register_event;
- spinlock_t spinlock;
- u32 len;
- u32 free_len;
struct list_head free_desc_list;
};
@@ -162,12 +159,10 @@
goto fail_dma;
}
- info->len = ~0;
-
list_add_tail(&info->link, &sys_rx->head_desc_list);
ret = sps_transfer_one(sys_rx->pipe, info->dma_address,
IPA_RX_SKB_SIZE, info,
- SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
+ SPS_IOVEC_FLAG_INT);
if (ret) {
list_del(&info->link);
dma_unmap_single(NULL, info->dma_address, IPA_RX_SKB_SIZE,
@@ -176,7 +171,6 @@
type, dir);
goto fail_dma;
}
- sys_rx->len++;
return 0;
fail_dma:
@@ -206,9 +200,6 @@
link);
list_move_tail(&tx_pkt->link,
&sys_tx->free_desc_list);
- sys_tx->len--;
- sys_tx->free_len++;
- tx_pkt->len = ~0;
cnt++;
}
} while (all);
@@ -245,7 +236,6 @@
struct ipa_pkt_info,
link);
list_del(&rx_pkt->link);
- sys_rx->len--;
rx_pkt->len = iov.size;
retry_alloc_tx:
@@ -285,15 +275,12 @@
list_add_tail(&tmp_pkt->link,
&sys_tx->free_desc_list);
- sys_tx->free_len++;
- tmp_pkt->len = ~0;
}
tx_pkt = list_first_entry(&sys_tx->free_desc_list,
struct ipa_pkt_info,
link);
list_del(&tx_pkt->link);
- sys_tx->free_len--;
retry_add_rx:
list_add_tail(&tx_pkt->link,
@@ -302,8 +289,7 @@
tx_pkt->dma_address,
IPA_RX_SKB_SIZE,
tx_pkt,
- SPS_IOVEC_FLAG_INT |
- SPS_IOVEC_FLAG_EOT);
+ SPS_IOVEC_FLAG_INT);
if (ret) {
list_del(&tx_pkt->link);
pr_debug_ratelimited("%s: sps_transfer_one failed %d type=%d dir=%d\n",
@@ -312,7 +298,6 @@
polling_max_sleep[dir]);
goto retry_add_rx;
}
- sys_rx->len++;
retry_add_tx:
list_add_tail(&rx_pkt->link,
@@ -332,7 +317,6 @@
polling_max_sleep[dir]);
goto retry_add_tx;
}
- sys_tx->len++;
IPA_STATS_INC_BRIDGE_CNT(ctx->type, dir,
ipa_ctx->stats.bridged_pkts);
}
@@ -444,7 +428,6 @@
INIT_LIST_HEAD(&sys->head_desc_list);
INIT_LIST_HEAD(&sys->free_desc_list);
- spin_lock_init(&sys->spinlock);
memset(&ipa_ctx->ep[ipa_ep_idx], 0,
sizeof(struct ipa_ep_context));
@@ -614,7 +597,6 @@
INIT_LIST_HEAD(&sys->head_desc_list);
INIT_LIST_HEAD(&sys->free_desc_list);
- spin_lock_init(&sys->spinlock);
if (dir == IPA_BRIDGE_DIR_DL) {
sys->register_event.options = SPS_O_EOT;
@@ -663,32 +645,32 @@
int ret;
int i;
- bridge[IPA_BRIDGE_TYPE_TETHERED].ul_wq = alloc_workqueue("ipa_ul_teth",
- WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
+ bridge[IPA_BRIDGE_TYPE_TETHERED].ul_wq =
+ create_singlethread_workqueue("ipa_ul_teth");
if (!bridge[IPA_BRIDGE_TYPE_TETHERED].ul_wq) {
IPAERR("ipa ul teth wq alloc failed\n");
ret = -ENOMEM;
goto fail_ul_teth;
}
- bridge[IPA_BRIDGE_TYPE_TETHERED].dl_wq = alloc_workqueue("ipa_dl_teth",
- WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
+ bridge[IPA_BRIDGE_TYPE_TETHERED].dl_wq =
+ create_singlethread_workqueue("ipa_dl_teth");
if (!bridge[IPA_BRIDGE_TYPE_TETHERED].dl_wq) {
IPAERR("ipa dl teth wq alloc failed\n");
ret = -ENOMEM;
goto fail_dl_teth;
}
- bridge[IPA_BRIDGE_TYPE_EMBEDDED].ul_wq = alloc_workqueue("ipa_ul_emb",
- WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
+ bridge[IPA_BRIDGE_TYPE_EMBEDDED].ul_wq =
+ create_singlethread_workqueue("ipa_ul_emb");
if (!bridge[IPA_BRIDGE_TYPE_EMBEDDED].ul_wq) {
IPAERR("ipa ul emb wq alloc failed\n");
ret = -ENOMEM;
goto fail_ul_emb;
}
- bridge[IPA_BRIDGE_TYPE_EMBEDDED].dl_wq = alloc_workqueue("ipa_dl_emb",
- WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
+ bridge[IPA_BRIDGE_TYPE_EMBEDDED].dl_wq =
+ create_singlethread_workqueue("ipa_dl_emb");
if (!bridge[IPA_BRIDGE_TYPE_EMBEDDED].dl_wq) {
IPAERR("ipa dl emb wq alloc failed\n");
ret = -ENOMEM;
diff --git a/drivers/platform/msm/ipa/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_debugfs.c
index ec83653..1605ed2 100644
--- a/drivers/platform/msm/ipa/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_debugfs.c
@@ -45,6 +45,37 @@
__stringify(IPA_CLIENT_MAX),
};
+const char *ipa_ic_name[] = {
+ __stringify_1(IPA_IP_CMD_INVALID),
+ __stringify_1(IPA_DECIPH_INIT),
+ __stringify_1(IPA_PPP_FRM_INIT),
+ __stringify_1(IPA_IP_V4_FILTER_INIT),
+ __stringify_1(IPA_IP_V6_FILTER_INIT),
+ __stringify_1(IPA_IP_V4_NAT_INIT),
+ __stringify_1(IPA_IP_V6_NAT_INIT),
+ __stringify_1(IPA_IP_V4_ROUTING_INIT),
+ __stringify_1(IPA_IP_V6_ROUTING_INIT),
+ __stringify_1(IPA_HDR_INIT_LOCAL),
+ __stringify_1(IPA_HDR_INIT_SYSTEM),
+ __stringify_1(IPA_DECIPH_SETUP),
+ __stringify_1(IPA_INSERT_NAT_RULE),
+ __stringify_1(IPA_DELETE_NAT_RULE),
+ __stringify_1(IPA_NAT_DMA),
+ __stringify_1(IPA_IP_PACKET_TAG),
+ __stringify_1(IPA_IP_PACKET_INIT),
+};
+
+const char *ipa_excp_name[] = {
+ __stringify_1(IPA_A5_MUX_HDR_EXCP_RSVD0),
+ __stringify_1(IPA_A5_MUX_HDR_EXCP_RSVD1),
+ __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_IHL),
+ __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_REPLICATED),
+ __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_TAG),
+ __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_SW_FLT),
+ __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_NAT),
+ __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_IP),
+};
+
static struct dentry *dent;
static struct dentry *dfile_gen_reg;
static struct dentry *dfile_ep_reg;
@@ -489,33 +520,39 @@
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
"sw_tx=%u\n"
"hw_tx=%u\n"
- "rx=%u\n",
+ "rx=%u\n"
+ "rx_repl_repost=%u\n"
+ "x_intr_repost=%u\n"
+ "rx_q_len=%u\n",
ipa_ctx->stats.tx_sw_pkts,
ipa_ctx->stats.tx_hw_pkts,
- ipa_ctx->stats.rx_pkts);
+ ipa_ctx->stats.rx_pkts,
+ ipa_ctx->stats.rx_repl_repost,
+ ipa_ctx->stats.x_intr_repost,
+ ipa_ctx->stats.rx_q_len);
cnt += nbytes;
for (i = 0; i < MAX_NUM_EXCP; i++) {
nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
- "rx_excp[%u]=%u\n", i,
+ "rx_excp[%u:%35s]=%u\n", i, ipa_excp_name[i],
ipa_ctx->stats.rx_excp_pkts[i]);
cnt += nbytes;
}
for (i = 0; i < IPA_BRIDGE_TYPE_MAX; i++) {
nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
- "bridged_pkt[%u][dl]=%u\n"
- "bridged_pkt[%u][ul]=%u\n",
- i,
+ "brg_pkt[%u:%s][dl]=%u\n"
+ "brg_pkt[%u:%s][ul]=%u\n",
+ i, (i == 0) ? "teth" : "embd",
ipa_ctx->stats.bridged_pkts[i][0],
- i,
+ i, (i == 0) ? "teth" : "embd",
ipa_ctx->stats.bridged_pkts[i][1]);
cnt += nbytes;
}
for (i = 0; i < MAX_NUM_IMM_CMD; i++) {
nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
- "IC[%u]=%u\n", i,
+ "IC[%2u:%22s]=%u\n", i, ipa_ic_name[i],
ipa_ctx->stats.imm_cmds[i]);
cnt += nbytes;
}
diff --git a/drivers/platform/msm/ipa/ipa_dp.c b/drivers/platform/msm/ipa/ipa_dp.c
index 52ed428..38690e9 100644
--- a/drivers/platform/msm/ipa/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_dp.c
@@ -10,6 +10,7 @@
* GNU General Public License for more details.
*/
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dmapool.h>
#include <linux/list.h>
@@ -19,6 +20,17 @@
#define list_next_entry(pos, member) \
list_entry(pos->member.next, typeof(*pos), member)
#define IPA_LAST_DESC_CNT 0xFFFF
+#define POLLING_INACTIVITY 40
+#define POLLING_MIN_SLEEP 950
+#define POLLING_MAX_SLEEP 1050
+
+static void replenish_rx_work_func(struct work_struct *work);
+static struct delayed_work replenish_rx_work;
+static void switch_to_intr_work_func(struct work_struct *work);
+static struct delayed_work switch_to_intr_work;
+static void ipa_wq_handle_rx(struct work_struct *work);
+static DECLARE_WORK(rx_work, ipa_wq_handle_rx);
+
/**
* ipa_write_done() - this function will be (eventually) called when a Tx
* operation is complete
@@ -40,7 +52,7 @@
unsigned long irq_flags;
struct ipa_mem_buffer mult = { 0 };
int i;
- u16 cnt;
+ u32 cnt;
tx_pkt = container_of(work, struct ipa_tx_pkt_wrapper, work);
cnt = tx_pkt->cnt;
@@ -66,9 +78,8 @@
}
next_pkt = list_next_entry(tx_pkt, link);
list_del(&tx_pkt->link);
- tx_pkt->sys->len--;
spin_unlock_irqrestore(&tx_pkt->sys->spinlock, irq_flags);
- if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0) {
+ if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0)) {
dma_pool_free(ipa_ctx->one_kb_no_straddle_pool,
tx_pkt->bounce,
tx_pkt->mem.phys_base);
@@ -114,10 +125,10 @@
u16 sps_flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_INT;
dma_addr_t dma_address;
u16 len;
- u32 mem_flag = GFP_KERNEL;
+ u32 mem_flag = GFP_ATOMIC;
- if (in_atomic)
- mem_flag = GFP_ATOMIC;
+ if (unlikely(!in_atomic))
+ mem_flag = GFP_KERNEL;
tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache, mem_flag);
if (!tx_pkt) {
@@ -125,7 +136,7 @@
goto fail_mem_alloc;
}
- if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0) {
+ if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0)) {
WARN_ON(desc->len > 512);
/*
@@ -173,19 +184,15 @@
if (desc->type == IPA_IMM_CMD_DESC) {
sps_flags |= SPS_IOVEC_FLAG_IMME;
len = desc->opcode;
+ IPADBG("sending cmd=%d pyld_len=%d sps_flags=%x\n",
+ desc->opcode, desc->len, sps_flags);
+ IPA_DUMP_BUFF(desc->pyld, dma_address, desc->len);
} else {
len = desc->len;
}
- if (desc->type == IPA_IMM_CMD_DESC) {
- IPADBG("sending cmd=%d pyld_len=%d sps_flags=%x\n",
- desc->opcode, desc->len, sps_flags);
- IPA_DUMP_BUFF(desc->pyld, dma_address, desc->len);
- }
-
spin_lock_irqsave(&sys->spinlock, irq_flags);
list_add_tail(&tx_pkt->link, &sys->head_desc_list);
- sys->len++;
result = sps_transfer_one(sys->ep->ep_hdl, dma_address, len, tx_pkt,
sps_flags);
if (result) {
@@ -200,7 +207,7 @@
fail_sps_send:
list_del(&tx_pkt->link);
spin_unlock_irqrestore(&sys->spinlock, irq_flags);
- if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0)
+ if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0))
dma_pool_free(ipa_ctx->one_kb_no_straddle_pool, tx_pkt->bounce,
dma_address);
else
@@ -233,7 +240,7 @@
*
* Return codes: 0: success, -EFAULT: failure
*/
-int ipa_send(struct ipa_sys_context *sys, u16 num_desc, struct ipa_desc *desc,
+int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
bool in_atomic)
{
struct ipa_tx_pkt_wrapper *tx_pkt;
@@ -247,17 +254,18 @@
int result;
int fail_dma_wrap = 0;
uint size = num_desc * sizeof(struct sps_iovec);
- u32 mem_flag = GFP_KERNEL;
+ u32 mem_flag = GFP_ATOMIC;
- if (likely(in_atomic))
- mem_flag = GFP_ATOMIC;
+ if (unlikely(!in_atomic))
+ mem_flag = GFP_KERNEL;
transfer.iovec = dma_alloc_coherent(NULL, size, &dma_addr, 0);
transfer.iovec_phys = dma_addr;
transfer.iovec_count = num_desc;
+ spin_lock_irqsave(&sys->spinlock, irq_flags);
if (!transfer.iovec) {
IPAERR("fail to alloc DMA mem for sps xfr buff\n");
- goto failure;
+ goto failure_coherent;
}
for (i = 0; i < num_desc; i++) {
@@ -274,24 +282,23 @@
*/
if (i == 0) {
transfer.user = tx_pkt;
-
tx_pkt->mult.phys_base = dma_addr;
tx_pkt->mult.base = transfer.iovec;
tx_pkt->mult.size = size;
tx_pkt->cnt = num_desc;
+ INIT_WORK(&tx_pkt->work, ipa_wq_write_done);
}
iovec = &transfer.iovec[i];
iovec->flags = 0;
INIT_LIST_HEAD(&tx_pkt->link);
- INIT_WORK(&tx_pkt->work, ipa_wq_write_done);
tx_pkt->type = desc[i].type;
tx_pkt->mem.base = desc[i].pyld;
tx_pkt->mem.size = desc[i].len;
- if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0) {
+ if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0)) {
WARN_ON(tx_pkt->mem.size > 512);
/*
@@ -334,10 +341,7 @@
* add this packet to system pipe context.
*/
iovec->addr = tx_pkt->mem.phys_base;
- spin_lock_irqsave(&sys->spinlock, irq_flags);
list_add_tail(&tx_pkt->link, &sys->head_desc_list);
- sys->len++;
- spin_unlock_irqrestore(&sys->spinlock, irq_flags);
/*
* Special treatment for immediate commands, where the structure
@@ -364,16 +368,15 @@
goto failure;
}
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
return 0;
failure:
tx_pkt = transfer.user;
for (j = 0; j < i; j++) {
- spin_lock_irqsave(&sys->spinlock, irq_flags);
next_pkt = list_next_entry(tx_pkt, link);
list_del(&tx_pkt->link);
- spin_unlock_irqrestore(&sys->spinlock, irq_flags);
- if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0)
+ if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0))
dma_pool_free(ipa_ctx->one_kb_no_straddle_pool,
tx_pkt->bounce,
tx_pkt->mem.phys_base);
@@ -391,7 +394,8 @@
if (transfer.iovec_phys)
dma_free_coherent(NULL, size, transfer.iovec,
transfer.iovec_phys);
-
+failure_coherent:
+ spin_unlock_irqrestore(&sys->spinlock, irq_flags);
return -EFAULT;
}
@@ -512,15 +516,14 @@
* - Call the endpoints notify function, passing the skb in the parameters
* - Replenish the rx cache
*/
-int ipa_handle_rx_core(bool process_all)
+int ipa_handle_rx_core(bool process_all, bool in_poll_state)
{
struct ipa_a5_mux_hdr *mux_hdr;
struct ipa_rx_pkt_wrapper *rx_pkt;
struct sk_buff *rx_skb;
struct sps_iovec iov;
- unsigned long irq_flags;
- u16 pull_len;
- u16 padding;
+ unsigned int pull_len;
+ unsigned int padding;
int ret;
struct ipa_sys_context *sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
struct ipa_ep_context *ep;
@@ -528,35 +531,35 @@
struct completion *compl;
struct ipa_tree_node *node;
- do {
+ while ((in_poll_state ? atomic_read(&ipa_ctx->curr_polling_state) :
+ !atomic_read(&ipa_ctx->curr_polling_state))) {
+ if (cnt && !process_all)
+ break;
+
ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
if (ret) {
IPAERR("sps_get_iovec failed %d\n", ret);
break;
}
- /* Break the loop when there are no more packets to receive */
if (iov.addr == 0)
break;
- spin_lock_irqsave(&sys->spinlock, irq_flags);
- if (list_empty(&sys->head_desc_list))
- WARN_ON(1);
+ if (unlikely(list_empty(&sys->head_desc_list)))
+ continue;
+
rx_pkt = list_first_entry(&sys->head_desc_list,
struct ipa_rx_pkt_wrapper, link);
- if (!rx_pkt)
- WARN_ON(1);
+
rx_pkt->len = iov.size;
sys->len--;
list_del(&rx_pkt->link);
- spin_unlock_irqrestore(&sys->spinlock, irq_flags);
IPADBG("--curr_cnt=%d\n", sys->len);
rx_skb = rx_pkt->skb;
dma_unmap_single(NULL, rx_pkt->dma_address, IPA_RX_SKB_SIZE,
DMA_FROM_DEVICE);
- kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
/*
* make it look like a real skb, "data" was already set at
@@ -565,6 +568,7 @@
rx_skb->tail = rx_skb->data + rx_pkt->len;
rx_skb->len = rx_pkt->len;
rx_skb->truesize = rx_pkt->len + sizeof(struct sk_buff);
+ kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
mux_hdr = (struct ipa_a5_mux_hdr *)rx_skb->data;
@@ -601,20 +605,20 @@
}
mutex_unlock(&ipa_ctx->lock);
}
- dev_kfree_skb_any(rx_skb);
+ dev_kfree_skb(rx_skb);
ipa_replenish_rx_cache();
++cnt;
continue;
}
- if (mux_hdr->src_pipe_index >= IPA_NUM_PIPES ||
+ if (unlikely(mux_hdr->src_pipe_index >= IPA_NUM_PIPES ||
!ipa_ctx->ep[mux_hdr->src_pipe_index].valid ||
- !ipa_ctx->ep[mux_hdr->src_pipe_index].client_notify) {
+ !ipa_ctx->ep[mux_hdr->src_pipe_index].client_notify)) {
IPAERR("drop pipe=%d ep_valid=%d client_notify=%p\n",
mux_hdr->src_pipe_index,
ipa_ctx->ep[mux_hdr->src_pipe_index].valid,
ipa_ctx->ep[mux_hdr->src_pipe_index].client_notify);
- dev_kfree_skb_any(rx_skb);
+ dev_kfree_skb(rx_skb);
ipa_replenish_rx_cache();
++cnt;
continue;
@@ -634,11 +638,11 @@
IPADBG("pulling %d bytes from skb\n", pull_len);
skb_pull(rx_skb, pull_len);
+ ipa_replenish_rx_cache();
ep->client_notify(ep->priv, IPA_RECEIVE,
(unsigned long)(rx_skb));
- ipa_replenish_rx_cache();
cnt++;
- } while (process_all);
+ };
return cnt;
}
@@ -652,9 +656,9 @@
struct ipa_sys_context *sys;
IPADBG("Enter");
- if (!ipa_ctx->curr_polling_state) {
+ if (!atomic_read(&ipa_ctx->curr_polling_state)) {
IPAERR("already in intr mode\n");
- return;
+ goto fail;
}
sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
@@ -662,49 +666,28 @@
ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
if (ret) {
IPAERR("sps_get_config() failed %d\n", ret);
- return;
+ goto fail;
}
sys->event.options = SPS_O_EOT;
ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
if (ret) {
IPAERR("sps_register_event() failed %d\n", ret);
- return;
+ goto fail;
}
sys->ep->connect.options =
SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
if (ret) {
IPAERR("sps_set_config() failed %d\n", ret);
- return;
+ goto fail;
}
- ipa_handle_rx_core(true);
- ipa_ctx->curr_polling_state = 0;
-}
+ atomic_set(&ipa_ctx->curr_polling_state, 0);
+ ipa_handle_rx_core(true, false);
+ return;
-/**
- * ipa_rx_switch_to_poll_mode() - Operate the Rx data path in polling mode
- */
-static void ipa_rx_switch_to_poll_mode(void)
-{
- int ret;
- struct ipa_ep_context *ep;
-
- IPADBG("Enter");
- ep = ipa_ctx->sys[IPA_A5_LAN_WAN_IN].ep;
-
- ret = sps_get_config(ep->ep_hdl, &ep->connect);
- if (ret) {
- IPAERR("sps_get_config() failed %d\n", ret);
- return;
- }
- ep->connect.options =
- SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_POLL;
- ret = sps_set_config(ep->ep_hdl, &ep->connect);
- if (ret) {
- IPAERR("sps_set_config() failed %d\n", ret);
- return;
- }
- ipa_ctx->curr_polling_state = 1;
+fail:
+ IPA_STATS_INC_CNT(ipa_ctx->stats.x_intr_repost);
+ schedule_delayed_work(&switch_to_intr_work, msecs_to_jiffies(1));
}
/**
@@ -722,16 +705,30 @@
*/
static void ipa_sps_irq_rx_notify(struct sps_event_notify *notify)
{
- struct ipa_rx_pkt_wrapper *rx_pkt;
+ struct ipa_ep_context *ep;
+ int ret;
IPADBG("event %d notified\n", notify->event_id);
switch (notify->event_id) {
case SPS_EVENT_EOT:
- if (!ipa_ctx->curr_polling_state) {
- ipa_rx_switch_to_poll_mode();
- rx_pkt = notify->data.transfer.user;
- queue_work(ipa_ctx->rx_wq, &rx_pkt->work);
+ if (!atomic_read(&ipa_ctx->curr_polling_state)) {
+ ep = ipa_ctx->sys[IPA_A5_LAN_WAN_IN].ep;
+
+ ret = sps_get_config(ep->ep_hdl, &ep->connect);
+ if (ret) {
+ IPAERR("sps_get_config() failed %d\n", ret);
+ break;
+ }
+ ep->connect.options = SPS_O_AUTO_ENABLE |
+ SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+ ret = sps_set_config(ep->ep_hdl, &ep->connect);
+ if (ret) {
+ IPAERR("sps_set_config() failed %d\n", ret);
+ break;
+ }
+ atomic_set(&ipa_ctx->curr_polling_state, 1);
+ queue_work(ipa_ctx->rx_wq, &rx_work);
}
break;
default:
@@ -861,6 +858,9 @@
/* fall through */
case 3:
sys_idx = ipa_ep_idx;
+ INIT_DELAYED_WORK(&replenish_rx_work, replenish_rx_work_func);
+ INIT_DELAYED_WORK(&switch_to_intr_work,
+ switch_to_intr_work_func);
break;
case WLAN_AMPDU_TX_EP:
sys_idx = IPA_A5_WLAN_AMPDU_OUT;
@@ -954,7 +954,7 @@
ipa_ctx->ep[ep_idx].client_notify(ipa_ctx->ep[ep_idx].priv,
IPA_WRITE_DONE, (unsigned long)skb);
else
- dev_kfree_skb_any(skb);
+ dev_kfree_skb(skb);
}
static void ipa_tx_cmd_comp(void *user1, void *user2)
@@ -1066,6 +1066,24 @@
}
EXPORT_SYMBOL(ipa_tx_dp);
+static void ipa_handle_rx(void)
+{
+ int inactive_cycles = 0;
+ int cnt;
+
+ do {
+ cnt = ipa_handle_rx_core(true, true);
+ if (cnt == 0) {
+ inactive_cycles++;
+ usleep_range(POLLING_MIN_SLEEP, POLLING_MAX_SLEEP);
+ } else {
+ inactive_cycles = 0;
+ }
+ } while (inactive_cycles <= POLLING_INACTIVITY);
+
+ ipa_rx_switch_to_intr_mode();
+}
+
/**
* ipa_handle_rx() - handle packet reception. This function is executed in the
* context of a work queue.
@@ -1074,10 +1092,9 @@
* ipa_handle_rx_core() is run in polling mode. After all packets has been
* received, the driver switches back to interrupt mode.
*/
-void ipa_wq_handle_rx(struct work_struct *work)
+static void ipa_wq_handle_rx(struct work_struct *work)
{
- ipa_handle_rx_core(true);
- ipa_rx_switch_to_intr_mode();
+ ipa_handle_rx();
}
/**
@@ -1099,26 +1116,23 @@
void *ptr;
struct ipa_rx_pkt_wrapper *rx_pkt;
int ret;
- int rx_len_cached;
- unsigned long irq_flags;
+ int rx_len_cached = 0;
struct ipa_sys_context *sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
+ gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
- spin_lock_irqsave(&sys->spinlock, irq_flags);
rx_len_cached = sys->len;
- spin_unlock_irqrestore(&sys->spinlock, irq_flags);
while (rx_len_cached < IPA_RX_POOL_CEIL) {
rx_pkt = kmem_cache_zalloc(ipa_ctx->rx_pkt_wrapper_cache,
- GFP_KERNEL);
+ flag);
if (!rx_pkt) {
IPAERR("failed to alloc rx wrapper\n");
- return;
+ goto fail_kmem_cache_alloc;
}
INIT_LIST_HEAD(&rx_pkt->link);
- INIT_WORK(&rx_pkt->work, ipa_wq_handle_rx);
- rx_pkt->skb = __dev_alloc_skb(IPA_RX_SKB_SIZE, GFP_KERNEL);
+ rx_pkt->skb = __dev_alloc_skb(IPA_RX_SKB_SIZE, flag);
if (rx_pkt->skb == NULL) {
IPAERR("failed to alloc skb\n");
goto fail_skb_alloc;
@@ -1133,10 +1147,8 @@
goto fail_dma_mapping;
}
- spin_lock_irqsave(&sys->spinlock, irq_flags);
list_add_tail(&rx_pkt->link, &sys->head_desc_list);
rx_len_cached = ++sys->len;
- spin_unlock_irqrestore(&sys->spinlock, irq_flags);
ret = sps_transfer_one(sys->ep->ep_hdl, rx_pkt->dma_address,
IPA_RX_SKB_SIZE, rx_pkt,
@@ -1146,27 +1158,41 @@
IPAERR("sps_transfer_one failed %d\n", ret);
goto fail_sps_transfer;
}
-
- IPADBG("++curr_cnt=%d\n", sys->len);
}
+ ipa_ctx->stats.rx_q_len = sys->len;
+
return;
fail_sps_transfer:
- spin_lock_irqsave(&sys->spinlock, irq_flags);
list_del(&rx_pkt->link);
- --sys->len;
- spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+ rx_len_cached = --sys->len;
dma_unmap_single(NULL, rx_pkt->dma_address, IPA_RX_SKB_SIZE,
DMA_FROM_DEVICE);
fail_dma_mapping:
- dev_kfree_skb_any(rx_pkt->skb);
+ dev_kfree_skb(rx_pkt->skb);
fail_skb_alloc:
kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
-
+fail_kmem_cache_alloc:
+ if (rx_len_cached == 0) {
+ IPA_STATS_INC_CNT(ipa_ctx->stats.rx_repl_repost);
+ schedule_delayed_work(&replenish_rx_work,
+ msecs_to_jiffies(100));
+ }
+ ipa_ctx->stats.rx_q_len = sys->len;
return;
}
+static void replenish_rx_work_func(struct work_struct *work)
+{
+ ipa_replenish_rx_cache();
+}
+
+static void switch_to_intr_work_func(struct work_struct *work)
+{
+ ipa_handle_rx();
+}
+
/**
* ipa_cleanup_rx() - release RX queue resources
*
@@ -1175,18 +1201,15 @@
{
struct ipa_rx_pkt_wrapper *rx_pkt;
struct ipa_rx_pkt_wrapper *r;
- unsigned long irq_flags;
struct ipa_sys_context *sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
- spin_lock_irqsave(&sys->spinlock, irq_flags);
list_for_each_entry_safe(rx_pkt, r,
&sys->head_desc_list, link) {
list_del(&rx_pkt->link);
dma_unmap_single(NULL, rx_pkt->dma_address, IPA_RX_SKB_SIZE,
DMA_FROM_DEVICE);
- dev_kfree_skb_any(rx_pkt->skb);
+ dev_kfree_skb(rx_pkt->skb);
kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
}
- spin_unlock_irqrestore(&sys->spinlock, irq_flags);
}
diff --git a/drivers/platform/msm/ipa/ipa_i.h b/drivers/platform/msm/ipa/ipa_i.h
index 14195d7..a7d1efc 100644
--- a/drivers/platform/msm/ipa/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_i.h
@@ -110,7 +110,7 @@
#define IPA_EVENT_THRESHOLD 0x10
-#define IPA_RX_POOL_CEIL 24
+#define IPA_RX_POOL_CEIL 32
#define IPA_RX_SKB_SIZE 2048
#define IPA_DFLT_HDR_NAME "ipa_excp_hdr"
@@ -422,7 +422,7 @@
void *user2;
struct ipa_sys_context *sys;
struct ipa_mem_buffer mult;
- u16 cnt;
+ u32 cnt;
void *bounce;
};
@@ -453,16 +453,14 @@
* struct ipa_rx_pkt_wrapper - IPA Rx packet wrapper
* @skb: skb
* @dma_address: DMA address of this Rx packet
- * @work: work struct for current Rx packet
* @link: linked to the Rx packets on that pipe
* @len: how many bytes are copied into skb's flat buffer
*/
struct ipa_rx_pkt_wrapper {
struct sk_buff *skb;
dma_addr_t dma_address;
- struct work_struct work;
struct list_head link;
- u16 len;
+ u32 len;
};
/**
@@ -527,6 +525,9 @@
u32 rx_pkts;
u32 rx_excp_pkts[MAX_NUM_EXCP];
u32 bridged_pkts[IPA_BRIDGE_TYPE_MAX][IPA_BRIDGE_DIR_MAX];
+ u32 rx_repl_repost;
+ u32 x_intr_repost;
+ u32 rx_q_len;
};
/**
@@ -629,7 +630,7 @@
uint aggregation_type;
uint aggregation_byte_limit;
uint aggregation_time_limit;
- uint curr_polling_state;
+ atomic_t curr_polling_state;
struct delayed_work poll_work;
bool hdr_tbl_lcl;
struct ipa_mem_buffer hdr_mem;
@@ -742,7 +743,7 @@
u32 *consumer_handle);
int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc,
bool in_atomic);
-int ipa_send(struct ipa_sys_context *sys, u16 num_desc, struct ipa_desc *desc,
+int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
bool in_atomic);
int ipa_get_ep_mapping(enum ipa_operating_mode mode,
enum ipa_client_type client);
@@ -783,8 +784,7 @@
void ipa_cleanup_rx(void);
int ipa_cfg_filter(u32 disable);
void ipa_wq_write_done(struct work_struct *work);
-void ipa_wq_handle_rx(struct work_struct *work);
-int ipa_handle_rx_core(bool process_all);
+int ipa_handle_rx_core(bool process_all, bool in_poll_state);
int ipa_pipe_mem_init(u32 start_ofst, u32 size);
int ipa_pipe_mem_alloc(u32 *ofst, u32 size);
int ipa_pipe_mem_free(u32 ofst, u32 size);
@@ -823,4 +823,7 @@
int a2_mux_init(void);
int a2_mux_exit(void);
+void wwan_cleanup(void);
+
+
#endif /* _IPA_I_H_ */
diff --git a/drivers/platform/msm/ssbi.c b/drivers/platform/msm/ssbi.c
index a08eb48..e0bbdd1 100644
--- a/drivers/platform/msm/ssbi.c
+++ b/drivers/platform/msm/ssbi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
* Copyright (c) 2010, Google Inc.
*
* Original authors: Code Aurora Forum
@@ -362,7 +362,7 @@
ssbi->base = ioremap(mem_res->start, resource_size(mem_res));
if (!ssbi->base) {
- pr_err("ioremap of 0x%p failed\n", (void *)mem_res->start);
+ pr_err("ioremap failed: %pr\n", mem_res);
ret = -EINVAL;
goto err_ioremap;
}
diff --git a/drivers/power/pm8921-charger.c b/drivers/power/pm8921-charger.c
index f87a443..03b3e0d 100644
--- a/drivers/power/pm8921-charger.c
+++ b/drivers/power/pm8921-charger.c
@@ -255,12 +255,9 @@
struct dentry *dent;
struct bms_notify bms_notify;
int *usb_trim_table;
- struct regulator *vreg_xoadc;
bool ext_charging;
bool ext_charge_done;
bool iusb_fine_res;
- bool final_kickstart;
- bool lockup_lpm_wrkarnd;
DECLARE_BITMAP(enabled_irqs, PM_CHG_MAX_INTS);
struct work_struct battery_id_valid_work;
int64_t batt_id_min;
@@ -296,6 +293,7 @@
int stop_chg_upon_expiry;
bool disable_aicl;
int usb_type;
+ bool disable_chg_rmvl_wrkarnd;
};
/* user space parameter to limit usb current */
@@ -311,7 +309,6 @@
static struct pm8921_chg_chip *the_chip;
-static DEFINE_SPINLOCK(lpm_lock);
#define LPM_ENABLE_BIT BIT(2)
static int pm8921_chg_set_lpm(struct pm8921_chg_chip *chip, int enable)
{
@@ -340,66 +337,11 @@
static int pm_chg_write(struct pm8921_chg_chip *chip, u16 addr, u8 reg)
{
int rc;
- unsigned long flags = 0;
- u8 temp;
- /* Disable LPM */
- if (chip->lockup_lpm_wrkarnd) {
- spin_lock_irqsave(&lpm_lock, flags);
+ rc = pm8xxx_writeb(chip->dev->parent, addr, reg);
+ if (rc)
+ pr_err("failed: addr=%03X, rc=%d\n", addr, rc);
- /*
- * This delay is to prevent exit out of 32khz mode within
- * 200uS. It could be that chg was removed just few uS before
- * this gets called.
- */
- udelay(200);
- /* no clks */
- temp = 0xD1;
- rc = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
- if (rc) {
- pr_err("Error %d writing %d to CHG_TEST\n", rc, temp);
- goto release_lpm_lock;
- }
-
- /* force 19.2Mhz before reading */
- temp = 0xD3;
- rc = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
- if (rc) {
- pr_err("Error %d writing %d to CHG_TEST\n", rc, temp);
- goto release_lpm_lock;
- }
-
- rc = pm8xxx_writeb(chip->dev->parent, addr, reg);
- if (rc) {
- pr_err("failed: addr=%03X, rc=%d\n", addr, rc);
- goto release_lpm_lock;
- }
-
- /* no clks */
- temp = 0xD1;
- rc = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
- if (rc) {
- pr_err("Error %d writing %d to CHG_TEST\n", rc, temp);
- goto release_lpm_lock;
- }
-
- /* switch to hw clk selection */
- temp = 0xD0;
- rc = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
- if (rc) {
- pr_err("Error %d writing %d to CHG_TEST\n", rc, temp);
- goto release_lpm_lock;
- }
-
- udelay(200);
-
-release_lpm_lock:
- spin_unlock_irqrestore(&lpm_lock, flags);
- } else {
- rc = pm8xxx_writeb(chip->dev->parent, addr, reg);
- if (rc)
- pr_err("failed: addr=%03X, rc=%d\n", addr, rc);
- }
return rc;
}
@@ -430,23 +372,6 @@
chip->pmic_chg_irq[irq_id]);
}
-static int is_chg_on_bat(struct pm8921_chg_chip *chip)
-{
- return !(pm_chg_get_rt_status(chip, DCIN_VALID_IRQ)
- || pm_chg_get_rt_status(chip, USBIN_VALID_IRQ));
-}
-
-static void pm8921_chg_bypass_bat_gone_debounce(struct pm8921_chg_chip *chip,
- int bypass)
-{
- int rc;
-
- rc = pm_chg_write(chip, COMPARATOR_OVERRIDE, bypass ? 0x89 : 0x88);
- if (rc) {
- pr_err("Failed to set bypass bit to %d rc=%d\n", bypass, rc);
- }
-}
-
/* Treat OverVoltage/UnderVoltage as source missing */
static int is_usb_chg_plugged_in(struct pm8921_chg_chip *chip)
{
@@ -469,35 +394,8 @@
static int pm_chg_get_fsm_state(struct pm8921_chg_chip *chip)
{
u8 temp;
- unsigned long flags = 0;
int err = 0, ret = 0;
- if (chip->lockup_lpm_wrkarnd) {
- spin_lock_irqsave(&lpm_lock, flags);
-
- /*
- * This delay is to prevent exit out of 32khz mode within
- * 200uS. It could be that chg was removed just few uS before
- * this gets called.
- */
- udelay(200);
- /* no clks */
- temp = 0xD1;
- err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
- if (err) {
- pr_err("Error %d writing %d to CHG_TEST\n", err, temp);
- goto err_out;
- }
-
- /* force 19.2Mhz before reading */
- temp = 0xD3;
- err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
- if (err) {
- pr_err("Error %d writing %d to CHG_TEST\n", err, temp);
- goto err_out;
- }
- }
-
temp = CAPTURE_FSM_STATE_CMD;
err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
if (err) {
@@ -535,29 +433,7 @@
/* get the upper 1 bit */
ret |= (temp & 0x1) << 4;
- if (chip->lockup_lpm_wrkarnd) {
- /* no clks */
- temp = 0xD1;
- err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
- if (err) {
- pr_err("Error %d writing %d to CHG_TEST\n", err, temp);
- goto err_out;
- }
-
- /* switch to hw clk selection */
- temp = 0xD0;
- err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
- if (err) {
- pr_err("Error %d writing %d to CHG_TEST\n", err, temp);
- goto err_out;
- }
-
- udelay(200);
- }
-
err_out:
- if (chip->lockup_lpm_wrkarnd)
- spin_unlock_irqrestore(&lpm_lock, flags);
if (err)
return err;
@@ -568,35 +444,8 @@
static int pm_chg_get_regulation_loop(struct pm8921_chg_chip *chip)
{
u8 temp, data;
- unsigned long flags = 0;
int err = 0;
- if (chip->lockup_lpm_wrkarnd) {
- spin_lock_irqsave(&lpm_lock, flags);
-
- /*
- * This delay is to prevent exit out of 32khz mode within
- * 200uS. It could be that chg was removed just few uS before
- * this gets called.
- */
- udelay(200);
- /* no clks */
- temp = 0xD1;
- err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
- if (err) {
- pr_err("Error %d writing %d to CHG_TEST\n", err, temp);
- goto err_out;
- }
-
- /* force 19.2Mhz before reading */
- temp = 0xD3;
- err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
- if (err) {
- pr_err("Error %d writing %d to CHG_TEST\n", err, temp);
- goto err_out;
- }
- }
-
temp = READ_BANK_6;
err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
if (err) {
@@ -610,29 +459,7 @@
goto err_out;
}
- if (chip->lockup_lpm_wrkarnd) {
- /* no clks */
- temp = 0xD1;
- err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
- if (err) {
- pr_err("Error %d writing %d to CHG_TEST\n", err, temp);
- goto err_out;
- }
-
- /* switch to hw clk selection */
- temp = 0xD0;
- err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
- if (err) {
- pr_err("Error %d writing %d to CHG_TEST\n", err, temp);
- goto err_out;
- }
-
- udelay(200);
- }
-
err_out:
- if (chip->lockup_lpm_wrkarnd)
- spin_unlock_irqrestore(&lpm_lock, flags);
if (err)
return err;
@@ -2099,10 +1926,10 @@
* This would also apply when the battery has been
* removed from the running system.
*/
- if (the_chip && !get_prop_batt_present(the_chip)
+ if (mA == 0 && the_chip && !get_prop_batt_present(the_chip)
&& !is_dc_chg_plugged_in(the_chip)) {
if (!the_chip->has_dc_supply) {
- pr_err("rejected: no other power source connected\n");
+ pr_err("rejected: no other power source mA = %d\n", mA);
return;
}
}
@@ -2377,96 +2204,9 @@
return get_prop_batt_temp(the_chip);
}
-static int __pm8921_apply_19p2mhz_kickstart(struct pm8921_chg_chip *chip)
-{
- int err;
- u8 temp;
-
-
- temp = 0xD1;
- err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
- if (err) {
- pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
- return err;
- }
-
- temp = 0xD3;
- err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
- if (err) {
- pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
- return err;
- }
-
- temp = 0xD1;
- err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
- if (err) {
- pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
- return err;
- }
-
- temp = 0xD5;
- err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
- if (err) {
- pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
- return err;
- }
-
- /* Wait a few clock cycles before re-enabling hw clock switching */
- udelay(183);
-
- temp = 0xD1;
- err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
- if (err) {
- pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
- return err;
- }
-
- temp = 0xD0;
- err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
- if (err) {
- pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
- return err;
- }
-
- /* Wait for few clock cycles before re-enabling LPM */
- udelay(32);
-
- return 0;
-}
-
-static int pm8921_apply_19p2mhz_kickstart(struct pm8921_chg_chip *chip)
-{
- int err;
- unsigned long flags = 0;
-
- spin_lock_irqsave(&lpm_lock, flags);
- err = pm8921_chg_set_lpm(chip, 0);
- if (err) {
- pr_err("Error settig LPM rc=%d\n", err);
- goto kick_err;
- }
-
- __pm8921_apply_19p2mhz_kickstart(chip);
-
-kick_err:
- err = pm8921_chg_set_lpm(chip, 1);
- if (err)
- pr_err("Error settig LPM rc=%d\n", err);
-
- spin_unlock_irqrestore(&lpm_lock, flags);
-
- return err;
-}
-
static void handle_usb_insertion_removal(struct pm8921_chg_chip *chip)
{
- int usb_present, rc = 0;
-
- if (chip->lockup_lpm_wrkarnd) {
- rc = pm8921_apply_19p2mhz_kickstart(chip);
- if (rc)
- pr_err("Failed to apply kickstart rc=%d\n", rc);
- }
+ int usb_present;
pm_chg_failed_clear(chip, 1);
usb_present = is_usb_chg_plugged_in(chip);
@@ -2476,11 +2216,6 @@
power_supply_changed(&chip->usb_psy);
power_supply_changed(&chip->batt_psy);
pm8921_bms_calibrate_hkadc();
-
- /* Enable/disable bypass if charger is on battery */
- if (chip->lockup_lpm_wrkarnd)
- pm8921_chg_bypass_bat_gone_debounce(chip,
- is_chg_on_bat(chip));
}
if (usb_present) {
schedule_delayed_work(&chip->unplug_check_work,
@@ -2496,10 +2231,6 @@
static void handle_stop_ext_chg(struct pm8921_chg_chip *chip)
{
- if (chip->lockup_lpm_wrkarnd)
- /* Enable bypass if charger is on battery */
- pm8921_chg_bypass_bat_gone_debounce(chip, is_chg_on_bat(chip));
-
if (!chip->ext_psy) {
pr_debug("external charger not registered.\n");
return;
@@ -2529,10 +2260,6 @@
unsigned long delay =
round_jiffies_relative(msecs_to_jiffies(EOC_CHECK_PERIOD_MS));
- /* Disable bypass if charger connected and not running on bat */
- if (chip->lockup_lpm_wrkarnd)
- pm8921_chg_bypass_bat_gone_debounce(chip, is_chg_on_bat(chip));
-
if (!chip->ext_psy) {
pr_debug("external charger not registered.\n");
return;
@@ -3014,28 +2741,12 @@
pm_chg_get_fsm_state(chip),
get_prop_batt_current(chip)
);
- if (chip->lockup_lpm_wrkarnd) {
- rc = pm8921_apply_19p2mhz_kickstart(chip);
- if (rc)
- pr_err("Failed kickstart rc=%d\n", rc);
-
- /*
- * Make sure kickstart happens at least 200 ms
- * after charger has been removed.
- */
- if (chip->final_kickstart) {
- chip->final_kickstart = false;
- goto check_again_later;
- }
- }
return;
} else {
goto check_again_later;
}
}
- chip->final_kickstart = true;
-
/* AICL only for usb wall charger */
if ((active_path & USB_ACTIVE_BIT) && usb_target_ma > 0 &&
!chip->disable_aicl) {
@@ -3057,7 +2768,7 @@
pr_debug("reg_loop=0x%x usb_ma = %d\n", reg_loop, usb_ma);
ibat = get_prop_batt_current(chip);
- if (reg_loop & VIN_ACTIVE_BIT) {
+ if ((reg_loop & VIN_ACTIVE_BIT) && !chip->disable_chg_rmvl_wrkarnd) {
if (ibat > 0) {
pr_debug("revboost ibat = %d fsm = %d loop = 0x%x\n",
ibat, pm_chg_get_fsm_state(chip), reg_loop);
@@ -3077,7 +2788,8 @@
active_path, active_chg_plugged_in);
chg_gone = pm_chg_get_rt_status(chip, CHG_GONE_IRQ);
- if (chg_gone == 1 && active_chg_plugged_in == 1) {
+ if (chg_gone == 1 && active_chg_plugged_in == 1 &&
+ !chip->disable_chg_rmvl_wrkarnd) {
pr_debug("chg_gone=%d, active_chg_plugged_in = %d\n",
chg_gone, active_chg_plugged_in);
unplug_ovp_fet_open(chip);
@@ -3328,11 +3040,6 @@
else
handle_stop_ext_chg(chip);
} else {
- if (chip->lockup_lpm_wrkarnd)
- /* if no external supply call bypass debounce here */
- pm8921_chg_bypass_bat_gone_debounce(chip,
- is_chg_on_bat(chip));
-
if (dc_present)
schedule_delayed_work(&chip->unplug_check_work,
msecs_to_jiffies(UNPLUG_CHECK_WAIT_PERIOD_MS));
@@ -4164,6 +3871,91 @@
return -EINVAL;
}
+static void pm8921_chg_force_19p2mhz_clk(struct pm8921_chg_chip *chip)
+{
+ int err;
+ u8 temp;
+
+ temp = 0xD1;
+ err = pm_chg_write(chip, CHG_TEST, temp);
+ if (err) {
+ pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
+ return;
+ }
+
+ temp = 0xD3;
+ err = pm_chg_write(chip, CHG_TEST, temp);
+ if (err) {
+ pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
+ return;
+ }
+
+ temp = 0xD1;
+ err = pm_chg_write(chip, CHG_TEST, temp);
+ if (err) {
+ pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
+ return;
+ }
+
+ temp = 0xD5;
+ err = pm_chg_write(chip, CHG_TEST, temp);
+ if (err) {
+ pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
+ return;
+ }
+
+ udelay(183);
+
+ temp = 0xD1;
+ err = pm_chg_write(chip, CHG_TEST, temp);
+ if (err) {
+ pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
+ return;
+ }
+
+ temp = 0xD0;
+ err = pm_chg_write(chip, CHG_TEST, temp);
+ if (err) {
+ pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
+ return;
+ }
+ udelay(32);
+
+ temp = 0xD1;
+ err = pm_chg_write(chip, CHG_TEST, temp);
+ if (err) {
+ pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
+ return;
+ }
+
+ temp = 0xD3;
+ err = pm_chg_write(chip, CHG_TEST, temp);
+ if (err) {
+ pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
+ return;
+ }
+}
+
+static void pm8921_chg_set_hw_clk_switching(struct pm8921_chg_chip *chip)
+{
+ int err;
+ u8 temp;
+
+ temp = 0xD1;
+ err = pm_chg_write(chip, CHG_TEST, temp);
+ if (err) {
+ pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
+ return;
+ }
+
+ temp = 0xD0;
+ err = pm_chg_write(chip, CHG_TEST, temp);
+ if (err) {
+ pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
+ return;
+ }
+}
+
#define VREF_BATT_THERM_FORCE_ON BIT(7)
static void detect_battery_removal(struct pm8921_chg_chip *chip)
{
@@ -4195,15 +3987,8 @@
u8 subrev;
int rc, vdd_safe, fcc_uah, safety_time = DEFAULT_SAFETY_MINUTES;
- spin_lock_init(&lpm_lock);
-
- if (pm8xxx_get_version(chip->dev->parent) == PM8XXX_VERSION_8921) {
- rc = __pm8921_apply_19p2mhz_kickstart(chip);
- if (rc) {
- pr_err("Failed to apply kickstart rc=%d\n", rc);
- return rc;
- }
- }
+ /* forcing 19p2mhz before accessing any charger registers */
+ pm8921_chg_force_19p2mhz_clk(chip);
detect_battery_removal(chip);
@@ -4451,45 +4236,6 @@
return rc;
}
- if (pm8xxx_get_version(chip->dev->parent) == PM8XXX_VERSION_8921) {
- /* Clear kickstart */
- rc = pm8xxx_writeb(chip->dev->parent, CHG_TEST, 0xD0);
- if (rc) {
- pr_err("Failed to clear kickstart rc=%d\n", rc);
- return rc;
- }
-
- /* From here the lpm_workaround will be active */
- chip->lockup_lpm_wrkarnd = true;
-
- /* Enable LPM */
- pm8921_chg_set_lpm(chip, 1);
- }
-
- if (chip->lockup_lpm_wrkarnd) {
- chip->vreg_xoadc = regulator_get(chip->dev, "vreg_xoadc");
- if (IS_ERR(chip->vreg_xoadc))
- return -ENODEV;
-
- rc = regulator_set_optimum_mode(chip->vreg_xoadc, 10000);
- if (rc < 0) {
- pr_err("Failed to set configure HPM rc=%d\n", rc);
- return rc;
- }
-
- rc = regulator_set_voltage(chip->vreg_xoadc, 1800000, 1800000);
- if (rc) {
- pr_err("Failed to set L14 voltage rc=%d\n", rc);
- return rc;
- }
-
- rc = regulator_enable(chip->vreg_xoadc);
- if (rc) {
- pr_err("Failed to enable L14 rc=%d\n", rc);
- return rc;
- }
- }
-
return 0;
}
@@ -4740,19 +4486,16 @@
int rc;
struct pm8921_chg_chip *chip = dev_get_drvdata(dev);
- if (chip->lockup_lpm_wrkarnd) {
- rc = regulator_disable(chip->vreg_xoadc);
- if (rc)
- pr_err("Failed to disable L14 rc=%d\n", rc);
-
- rc = pm8921_apply_19p2mhz_kickstart(chip);
- if (rc)
- pr_err("Failed to apply kickstart rc=%d\n", rc);
- }
-
rc = pm_chg_masked_write(chip, CHG_CNTRL, VREF_BATT_THERM_FORCE_ON, 0);
if (rc)
pr_err("Failed to Force Vref therm off rc=%d\n", rc);
+
+ rc = pm8921_chg_set_lpm(chip, 1);
+ if (rc)
+ pr_err("Failed to set lpm rc=%d\n", rc);
+
+ pm8921_chg_set_hw_clk_switching(chip);
+
return 0;
}
@@ -4761,15 +4504,11 @@
int rc;
struct pm8921_chg_chip *chip = dev_get_drvdata(dev);
- if (chip->lockup_lpm_wrkarnd) {
- rc = regulator_enable(chip->vreg_xoadc);
- if (rc)
- pr_err("Failed to enable L14 rc=%d\n", rc);
+ pm8921_chg_force_19p2mhz_clk(chip);
- rc = pm8921_apply_19p2mhz_kickstart(chip);
- if (rc)
- pr_err("Failed to apply kickstart rc=%d\n", rc);
- }
+ rc = pm8921_chg_set_lpm(chip, 0);
+ if (rc)
+ pr_err("Failed to set lpm rc=%d\n", rc);
rc = pm_chg_masked_write(chip, CHG_CNTRL, VREF_BATT_THERM_FORCE_ON,
VREF_BATT_THERM_FORCE_ON);
@@ -4869,6 +4608,7 @@
chip->vin_min = pdata->vin_min;
chip->thermal_mitigation = pdata->thermal_mitigation;
chip->thermal_levels = pdata->thermal_levels;
+ chip->disable_chg_rmvl_wrkarnd = pdata->disable_chg_rmvl_wrkarnd;
chip->cold_thr = pdata->cold_thr;
chip->hot_thr = pdata->hot_thr;
@@ -5000,7 +4740,6 @@
{
struct pm8921_chg_chip *chip = platform_get_drvdata(pdev);
- regulator_put(chip->vreg_xoadc);
free_irqs(chip);
platform_set_drvdata(pdev, NULL);
the_chip = NULL;
diff --git a/drivers/power/qpnp-bms.c b/drivers/power/qpnp-bms.c
index 85a310a..ec0b0e7 100644
--- a/drivers/power/qpnp-bms.c
+++ b/drivers/power/qpnp-bms.c
@@ -670,10 +670,18 @@
#define SLEEP_CLK_HZ 32764
#define SECONDS_PER_HOUR 3600
-static s64 cc_uv_to_uvh(s64 cc_uv)
+static s64 cc_uv_to_pvh(s64 cc_uv)
{
- return div_s64(cc_uv * CC_READING_TICKS,
- SLEEP_CLK_HZ * SECONDS_PER_HOUR);
+ /* Note that it is necessary need to multiply by 1000000 to convert
+ * from uvh to pvh here.
+ * However, the maximum Coulomb Counter value is 2^35, which can cause
+ * an over flow.
+ * Multiply by 100000 first to perserve as much precision as possible
+ * then multiply by 10 after doing the division in order to avoid
+ * overflow on the maximum Coulomb Counter value.
+ */
+ return div_s64(cc_uv * CC_READING_TICKS * 100000,
+ SLEEP_CLK_HZ * SECONDS_PER_HOUR) * 10;
}
/**
@@ -688,7 +696,7 @@
*/
static int calculate_cc(struct qpnp_bms_chip *chip, int64_t cc)
{
- int64_t cc_voltage_uv, cc_uvh, cc_uah;
+ int64_t cc_voltage_uv, cc_pvh, cc_uah;
struct qpnp_iadc_calib calibration;
qpnp_iadc_get_gain_and_offset(&calibration);
@@ -702,9 +710,9 @@
calibration.gain_raw
- calibration.offset_raw);
pr_debug("cc_voltage_uv = %lld uv\n", cc_voltage_uv);
- cc_uvh = cc_uv_to_uvh(cc_voltage_uv);
- pr_debug("cc_uvh = %lld micro_volt_hour\n", cc_uvh);
- cc_uah = div_s64(cc_uvh * 1000000LL, chip->r_sense_uohm);
+ cc_pvh = cc_uv_to_pvh(cc_voltage_uv);
+ pr_debug("cc_pvh = %lld pvh\n", cc_pvh);
+ cc_uah = div_s64(cc_pvh, chip->r_sense_uohm);
/* cc_raw had 4 bits of extra precision.
By now it should be within 32 bit range */
return (int)cc_uah;
@@ -1087,60 +1095,6 @@
return 1;
}
-#define BMS_OVERRIDE_MODE_EN_BIT BIT(7)
-#define EN_VBAT_BIT BIT(0)
-#define OVERRIDE_MODE_DELAY_MS 20
-static int override_mode_batt_v_and_i(
- struct qpnp_bms_chip *chip, int *ibat_ua, int *vbat_uv)
-{
- int16_t vsense_raw, vbat_raw;
- int vsense_uv, rc;
- u8 delay;
-
- mutex_lock(&chip->bms_output_lock);
-
- delay = 0x00;
- rc = qpnp_write_wrapper(chip, &delay,
- chip->base + BMS1_S1_DELAY_CTL, 1);
- if (rc)
- pr_err("unable to write into BMS1_S1_DELAY, rc: %d\n", rc);
-
- rc = qpnp_masked_write(chip, BMS1_MODE_CTL,
- BMS_OVERRIDE_MODE_EN_BIT | EN_VBAT_BIT,
- BMS_OVERRIDE_MODE_EN_BIT | EN_VBAT_BIT);
- if (rc)
- pr_err("unable to write into BMS1_MODE_CTL, rc: %d\n", rc);
-
- msleep(OVERRIDE_MODE_DELAY_MS);
-
- lock_output_data(chip);
- qpnp_read_wrapper(chip, (u8 *)&vsense_raw,
- chip->base + BMS1_VSENSE_AVG_DATA0, 2);
- qpnp_read_wrapper(chip, (u8 *)&vbat_raw,
- chip->base + BMS1_VBAT_AVG_DATA0, 2);
- unlock_output_data(chip);
-
- rc = qpnp_masked_write(chip, BMS1_MODE_CTL,
- BMS_OVERRIDE_MODE_EN_BIT | EN_VBAT_BIT, 0);
-
- delay = 0x0B;
- rc = qpnp_write_wrapper(chip, &delay,
- chip->base + BMS1_S1_DELAY_CTL, 1);
- if (rc)
- pr_err("unable to write into BMS1_S1_DELAY, rc: %d\n", rc);
-
- mutex_unlock(&chip->bms_output_lock);
-
- *vbat_uv = convert_vbatt_raw_to_uv(chip, vbat_raw);
- vsense_uv = convert_vsense_to_uv(chip, vsense_raw);
- *ibat_ua = div_s64(vsense_uv * 1000000LL, (int)chip->r_sense_uohm);
-
- pr_debug("vsense_raw = 0x%x vbat_raw = 0x%x ibat_ua = %d vbat_uv = %d\n",
- (uint16_t)vsense_raw, (uint16_t)vbat_raw,
- *ibat_ua, *vbat_uv);
- return 0;
-}
-
static bool is_battery_charging(struct qpnp_bms_chip *chip)
{
union power_supply_propval ret = {0,};
@@ -1180,23 +1134,21 @@
static int get_simultaneous_batt_v_and_i(struct qpnp_bms_chip *chip,
int *ibat_ua, int *vbat_uv)
{
+ struct qpnp_iadc_result i_result;
+ struct qpnp_vadc_result v_result;
+ enum qpnp_iadc_channels iadc_channel;
int rc;
- if (is_batfet_open(chip)) {
- pr_debug("batfet is open using separate vbat and ibat meas\n");
- rc = get_battery_voltage(vbat_uv);
- if (rc < 0) {
- pr_err("adc vbat failed err = %d\n", rc);
- return rc;
- }
- rc = get_battery_current(chip, ibat_ua);
- if (rc < 0) {
- pr_err("bms ibat failed err = %d\n", rc);
- return rc;
- }
- } else {
- return override_mode_batt_v_and_i(chip, ibat_ua, vbat_uv);
+ iadc_channel = chip->use_external_rsense ?
+ EXTERNAL_RSENSE : INTERNAL_RSENSE;
+ rc = qpnp_iadc_vadc_sync_read(iadc_channel, &i_result,
+ VBAT_SNS, &v_result);
+ if (rc) {
+ pr_err("vadc read failed with rc: %d\n", rc);
+ return rc;
}
+ *ibat_ua = (int)i_result.result_ua;
+ *vbat_uv = (int)v_result.physical;
return 0;
}
@@ -1223,7 +1175,7 @@
static int reset_bms_for_test(struct qpnp_bms_chip *chip)
{
- int ibat_ua, vbat_uv, rc;
+ int ibat_ua = 0, vbat_uv = 0, rc;
int ocv_est_uv;
if (!chip) {
@@ -1474,16 +1426,12 @@
static int clamp_soc_based_on_voltage(struct qpnp_bms_chip *chip, int soc)
{
int rc, vbat_uv;
- struct qpnp_vadc_result result;
- rc = qpnp_vadc_read(VBAT_SNS, &result);
- if (rc) {
- pr_err("error reading vbat_sns adc channel = %d, rc = %d\n",
- VBAT_SNS, rc);
- return rc;
+ rc = get_battery_voltage(&vbat_uv);
+ if (rc < 0) {
+ pr_err("adc vbat failed err = %d\n", rc);
+ return soc;
}
-
- vbat_uv = (int)result.physical;
if (soc == 0 && vbat_uv > chip->v_cutoff_uv) {
pr_debug("clamping soc to 1, vbat (%d) > cutoff (%d)\n",
vbat_uv, chip->v_cutoff_uv);
@@ -1612,28 +1560,16 @@
return chip->calculated_soc;
}
-static int read_vbat(struct qpnp_bms_chip *chip)
-{
- int rc;
- struct qpnp_vadc_result result;
-
- rc = qpnp_vadc_read(VBAT_SNS, &result);
- if (rc) {
- pr_err("error reading vadc VBAT_SNS = %d, rc = %d\n",
- VBAT_SNS, rc);
- return rc;
- }
- pr_debug("read %duv from vadc\n", (int)result.physical);
- return (int)result.physical;
-}
-
static int calculate_soc_from_voltage(struct qpnp_bms_chip *chip)
{
int voltage_range_uv, voltage_remaining_uv, voltage_based_soc;
- int vbat_uv;
+ int rc, vbat_uv;
- vbat_uv = read_vbat(chip);
-
+ rc = get_battery_voltage(&vbat_uv);
+ if (rc < 0) {
+ pr_err("adc vbat failed err = %d\n", rc);
+ return rc;
+ }
voltage_range_uv = chip->max_voltage_uv - chip->v_cutoff_uv;
voltage_remaining_uv = vbat_uv - chip->v_cutoff_uv;
voltage_based_soc = voltage_remaining_uv * 100 / voltage_range_uv;
@@ -2467,7 +2403,12 @@
}
vbatt = 0;
- get_battery_voltage(&vbatt);
+ rc = get_battery_voltage(&vbatt);
+ if (rc) {
+ pr_err("error reading vbat_sns adc channel = %d, rc = %d\n",
+ VBAT_SNS, rc);
+ goto unregister_dc;
+ }
pr_info("probe success: soc =%d vbatt = %d ocv = %d r_sense_uohm = %u\n",
get_prop_bms_capacity(chip),
diff --git a/drivers/power/qpnp-charger.c b/drivers/power/qpnp-charger.c
index e2ba042..7833afa 100644
--- a/drivers/power/qpnp-charger.c
+++ b/drivers/power/qpnp-charger.c
@@ -86,8 +86,8 @@
#define USB_OVP_CTL 0x42
#define SEC_ACCESS 0xD0
-/* SMBB peripheral subtype values */
#define REG_OFFSET_PERP_SUBTYPE 0x05
+/* SMBB peripheral subtype values */
#define SMBB_CHGR_SUBTYPE 0x01
#define SMBB_BUCK_SUBTYPE 0x02
#define SMBB_BAT_IF_SUBTYPE 0x03
@@ -96,6 +96,14 @@
#define SMBB_BOOST_SUBTYPE 0x06
#define SMBB_MISC_SUBTYPE 0x07
+/* SMBB peripheral subtype values */
+#define SMBBP_CHGR_SUBTYPE 0x31
+#define SMBBP_BUCK_SUBTYPE 0x32
+#define SMBBP_BAT_IF_SUBTYPE 0x33
+#define SMBBP_USB_CHGPTH_SUBTYPE 0x34
+#define SMBBP_BOOST_SUBTYPE 0x36
+#define SMBBP_MISC_SUBTYPE 0x37
+
#define QPNP_CHARGER_DEV_NAME "qcom,qpnp-charger"
/* Status bits and masks */
@@ -341,6 +349,9 @@
u8 dcin_valid_rt_sts;
int rc;
+ if (!chip->dc_chgpth_base)
+ return 0;
+
rc = qpnp_chg_read(chip, &dcin_valid_rt_sts,
INT_RT_STS(chip->dc_chgpth_base), 1);
if (rc) {
@@ -1212,6 +1223,7 @@
switch (subtype) {
case SMBB_CHGR_SUBTYPE:
+ case SMBBP_CHGR_SUBTYPE:
chip->chg_done_irq = spmi_get_irq_byname(chip->spmi,
spmi_resource, "chg-done");
if (chip->chg_done_irq < 0) {
@@ -1289,6 +1301,7 @@
enable_irq_wake(chip->chg_done_irq);
break;
case SMBB_BUCK_SUBTYPE:
+ case SMBBP_BUCK_SUBTYPE:
rc = qpnp_chg_masked_write(chip,
chip->chgr_base + CHGR_BUCK_BCK_VBAT_REG_MODE,
BUCK_VBAT_REG_NODE_SEL_BIT,
@@ -1299,8 +1312,10 @@
}
break;
case SMBB_BAT_IF_SUBTYPE:
+ case SMBBP_BAT_IF_SUBTYPE:
break;
case SMBB_USB_CHGPTH_SUBTYPE:
+ case SMBBP_USB_CHGPTH_SUBTYPE:
chip->usbin_valid_irq = spmi_get_irq_byname(chip->spmi,
spmi_resource, "usbin-valid");
if (chip->usbin_valid_irq < 0) {
@@ -1361,8 +1376,10 @@
enable_irq_wake(chip->dcin_valid_irq);
break;
case SMBB_BOOST_SUBTYPE:
+ case SMBBP_BOOST_SUBTYPE:
break;
case SMBB_MISC_SUBTYPE:
+ case SMBBP_MISC_SUBTYPE:
pr_debug("Setting BOOT_DONE\n");
rc = qpnp_chg_masked_write(chip,
chip->misc_base + CHGR_MISC_BOOT_DONE,
@@ -1397,10 +1414,6 @@
return -ENOMEM;
}
- rc = qpnp_vadc_is_ready();
- if (rc)
- goto fail_chg_enable;
-
chip->dev = &(spmi->dev);
chip->spmi = spmi;
@@ -1557,6 +1570,7 @@
switch (subtype) {
case SMBB_CHGR_SUBTYPE:
+ case SMBBP_CHGR_SUBTYPE:
chip->chgr_base = resource->start;
rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
if (rc) {
@@ -1566,6 +1580,7 @@
}
break;
case SMBB_BUCK_SUBTYPE:
+ case SMBBP_BUCK_SUBTYPE:
chip->buck_base = resource->start;
rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
if (rc) {
@@ -1575,6 +1590,7 @@
}
break;
case SMBB_BAT_IF_SUBTYPE:
+ case SMBBP_BAT_IF_SUBTYPE:
chip->bat_if_base = resource->start;
rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
if (rc) {
@@ -1584,6 +1600,7 @@
}
break;
case SMBB_USB_CHGPTH_SUBTYPE:
+ case SMBBP_USB_CHGPTH_SUBTYPE:
chip->usb_chgpth_base = resource->start;
rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
if (rc) {
@@ -1602,6 +1619,7 @@
}
break;
case SMBB_BOOST_SUBTYPE:
+ case SMBBP_BOOST_SUBTYPE:
chip->boost_base = resource->start;
rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
if (rc) {
@@ -1611,6 +1629,7 @@
}
break;
case SMBB_MISC_SUBTYPE:
+ case SMBBP_MISC_SUBTYPE:
chip->misc_base = resource->start;
rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
if (rc) {
@@ -1628,34 +1647,44 @@
dev_set_drvdata(&spmi->dev, chip);
device_init_wakeup(&spmi->dev, 1);
- chip->dc_psy.name = "qpnp-dc";
- chip->dc_psy.type = POWER_SUPPLY_TYPE_MAINS;
- chip->dc_psy.supplied_to = pm_power_supplied_to;
- chip->dc_psy.num_supplicants = ARRAY_SIZE(pm_power_supplied_to);
- chip->dc_psy.properties = pm_power_props_mains;
- chip->dc_psy.num_properties = ARRAY_SIZE(pm_power_props_mains);
- chip->dc_psy.get_property = qpnp_power_get_property_mains;
+ if (chip->bat_if_base) {
+ rc = qpnp_vadc_is_ready();
+ if (rc)
+ goto fail_chg_enable;
- chip->batt_psy.name = "battery";
- chip->batt_psy.type = POWER_SUPPLY_TYPE_BATTERY;
- chip->batt_psy.properties = msm_batt_power_props;
- chip->batt_psy.num_properties = ARRAY_SIZE(msm_batt_power_props);
- chip->batt_psy.get_property = qpnp_batt_power_get_property;
- chip->batt_psy.set_property = qpnp_batt_power_set_property;
- chip->batt_psy.property_is_writeable = qpnp_batt_property_is_writeable;
- chip->batt_psy.external_power_changed =
+ chip->batt_psy.name = "battery";
+ chip->batt_psy.type = POWER_SUPPLY_TYPE_BATTERY;
+ chip->batt_psy.properties = msm_batt_power_props;
+ chip->batt_psy.num_properties =
+ ARRAY_SIZE(msm_batt_power_props);
+ chip->batt_psy.get_property = qpnp_batt_power_get_property;
+ chip->batt_psy.set_property = qpnp_batt_power_set_property;
+ chip->batt_psy.property_is_writeable =
+ qpnp_batt_property_is_writeable;
+ chip->batt_psy.external_power_changed =
qpnp_batt_external_power_changed;
- rc = power_supply_register(chip->dev, &chip->batt_psy);
- if (rc < 0) {
- pr_err("power_supply_register batt failed rc = %d\n", rc);
- goto fail_chg_enable;
+ rc = power_supply_register(chip->dev, &chip->batt_psy);
+ if (rc < 0) {
+ pr_err("batt failed to register rc = %d\n", rc);
+ goto fail_chg_enable;
+ }
}
- rc = power_supply_register(chip->dev, &chip->dc_psy);
- if (rc < 0) {
- pr_err("power_supply_register usb failed rc = %d\n", rc);
- goto unregister_batt;
+ if (chip->dc_chgpth_base) {
+ chip->dc_psy.name = "qpnp-dc";
+ chip->dc_psy.type = POWER_SUPPLY_TYPE_MAINS;
+ chip->dc_psy.supplied_to = pm_power_supplied_to;
+ chip->dc_psy.num_supplicants = ARRAY_SIZE(pm_power_supplied_to);
+ chip->dc_psy.properties = pm_power_props_mains;
+ chip->dc_psy.num_properties = ARRAY_SIZE(pm_power_props_mains);
+ chip->dc_psy.get_property = qpnp_power_get_property_mains;
+
+ rc = power_supply_register(chip->dev, &chip->dc_psy);
+ if (rc < 0) {
+ pr_err("power_supply_register dc failed rc=%d\n", rc);
+ goto unregister_batt;
+ }
}
/* Turn on appropriate workaround flags */
@@ -1664,11 +1693,11 @@
power_supply_set_present(chip->usb_psy,
qpnp_chg_is_usb_chg_plugged_in(chip));
- if (chip->maxinput_dc_ma) {
+ if (chip->maxinput_dc_ma && chip->dc_chgpth_base) {
rc = qpnp_chg_idcmax_set(chip, chip->maxinput_dc_ma);
if (rc) {
pr_err("Error setting idcmax property %d\n", rc);
- goto fail_chg_enable;
+ goto unregister_batt;
}
}
@@ -1684,7 +1713,8 @@
return 0;
unregister_batt:
- power_supply_unregister(&chip->batt_psy);
+ if (chip->bat_if_base)
+ power_supply_unregister(&chip->batt_psy);
fail_chg_enable:
kfree(chip->thermal_mitigation);
kfree(chip);
diff --git a/drivers/usb/host/ehci-msm2.c b/drivers/usb/host/ehci-msm2.c
index 40e1eea..faa5625 100644
--- a/drivers/usb/host/ehci-msm2.c
+++ b/drivers/usb/host/ehci-msm2.c
@@ -45,6 +45,7 @@
struct ehci_hcd ehci;
spinlock_t wakeup_lock;
struct device *dev;
+ struct clk *xo_clk;
struct clk *iface_clk;
struct clk *core_clk;
struct clk *alt_core_clk;
@@ -659,10 +660,14 @@
clk_disable_unprepare(mhcd->core_clk);
/* usb phy does not require TCXO clock, hence vote for TCXO disable */
- ret = msm_xo_mode_vote(mhcd->xo_handle, MSM_XO_MODE_OFF);
- if (ret)
- dev_err(mhcd->dev, "%s failed to devote for "
- "TCXO D0 buffer%d\n", __func__, ret);
+ if (!IS_ERR(mhcd->xo_clk)) {
+ clk_disable_unprepare(mhcd->xo_clk);
+ } else {
+ ret = msm_xo_mode_vote(mhcd->xo_handle, MSM_XO_MODE_OFF);
+ if (ret)
+ dev_err(mhcd->dev, "%s failed to devote for TCXO %d\n",
+ __func__, ret);
+ }
msm_ehci_config_vddcx(mhcd, 0);
@@ -714,10 +719,14 @@
wake_lock(&mhcd->wlock);
/* Vote for TCXO when waking up the phy */
- ret = msm_xo_mode_vote(mhcd->xo_handle, MSM_XO_MODE_ON);
- if (ret)
- dev_err(mhcd->dev, "%s failed to vote for "
- "TCXO D0 buffer%d\n", __func__, ret);
+ if (!IS_ERR(mhcd->xo_clk)) {
+ clk_prepare_enable(mhcd->xo_clk);
+ } else {
+ ret = msm_xo_mode_vote(mhcd->xo_handle, MSM_XO_MODE_ON);
+ if (ret)
+ dev_err(mhcd->dev, "%s failed to vote for TCXO D0 %d\n",
+ __func__, ret);
+ }
clk_prepare_enable(mhcd->core_clk);
clk_prepare_enable(mhcd->iface_clk);
@@ -1091,18 +1100,23 @@
}
snprintf(pdev_name, PDEV_NAME_LEN, "%s.%d", pdev->name, pdev->id);
- mhcd->xo_handle = msm_xo_get(MSM_XO_TCXO_D0, pdev_name);
- if (IS_ERR(mhcd->xo_handle)) {
- dev_err(&pdev->dev, "%s not able to get the handle "
- "to vote for TCXO D0 buffer\n", __func__);
- ret = PTR_ERR(mhcd->xo_handle);
- goto free_async_irq;
+ mhcd->xo_clk = clk_get(&pdev->dev, "xo");
+ if (!IS_ERR(mhcd->xo_clk)) {
+ ret = clk_prepare_enable(mhcd->xo_clk);
+ } else {
+ mhcd->xo_handle = msm_xo_get(MSM_XO_TCXO_D0, pdev_name);
+ if (IS_ERR(mhcd->xo_handle)) {
+ dev_err(&pdev->dev, "%s fail to get handle for X0 D0\n",
+ __func__);
+ ret = PTR_ERR(mhcd->xo_handle);
+ goto free_async_irq;
+ } else {
+ ret = msm_xo_mode_vote(mhcd->xo_handle, MSM_XO_MODE_ON);
+ }
}
-
- ret = msm_xo_mode_vote(mhcd->xo_handle, MSM_XO_MODE_ON);
if (ret) {
- dev_err(&pdev->dev, "%s failed to vote for TCXO "
- "D0 buffer%d\n", __func__, ret);
+ dev_err(&pdev->dev, "%s failed to vote for TCXO %d\n",
+ __func__, ret);
goto free_xo_handle;
}
@@ -1202,9 +1216,15 @@
deinit_clocks:
msm_ehci_init_clocks(mhcd, 0);
devote_xo_handle:
- msm_xo_mode_vote(mhcd->xo_handle, MSM_XO_MODE_OFF);
+ if (!IS_ERR(mhcd->xo_clk))
+ clk_disable_unprepare(mhcd->xo_clk);
+ else
+ msm_xo_mode_vote(mhcd->xo_handle, MSM_XO_MODE_OFF);
free_xo_handle:
- msm_xo_put(mhcd->xo_handle);
+ if (!IS_ERR(mhcd->xo_clk))
+ clk_put(mhcd->xo_clk);
+ else
+ msm_xo_put(mhcd->xo_handle);
free_async_irq:
if (mhcd->async_irq)
free_irq(mhcd->async_irq, mhcd);
@@ -1236,7 +1256,12 @@
usb_remove_hcd(hcd);
- msm_xo_put(mhcd->xo_handle);
+ if (!IS_ERR(mhcd->xo_clk)) {
+ clk_disable_unprepare(mhcd->xo_clk);
+ clk_put(mhcd->xo_clk);
+ } else {
+ msm_xo_put(mhcd->xo_handle);
+ }
msm_ehci_vbus_power(mhcd, 0);
msm_ehci_init_vbus(mhcd, 0);
msm_ehci_ldo_enable(mhcd, 0);
diff --git a/drivers/video/msm/mdp4.h b/drivers/video/msm/mdp4.h
index ed0a385..a3d8d7e 100644
--- a/drivers/video/msm/mdp4.h
+++ b/drivers/video/msm/mdp4.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -276,6 +276,7 @@
struct mdp4_overlay_pipe *solidfill_pipe;
};
+
struct mdp4_overlay_pipe {
uint32 pipe_used;
uint32 pipe_type; /* rgb, video/graphic */
@@ -983,6 +984,8 @@
void mdp4_overlay_mdp_perf_upd(struct msm_fb_data_type *mfd, int flag);
int mdp4_update_base_blend(struct msm_fb_data_type *mfd,
struct mdp_blend_cfg *mdp_blend_cfg);
+int mdp4_update_writeback_format(struct msm_fb_data_type *mfd,
+ struct mdp_mixer_cfg *mdp_mixer_cfg);
u32 mdp4_get_mixer_num(u32 panel_type);
#ifndef CONFIG_FB_MSM_WRITEBACK_MSM_PANEL
diff --git a/drivers/video/msm/mdp4_overlay.c b/drivers/video/msm/mdp4_overlay.c
index fbae011..bfd8238 100644
--- a/drivers/video/msm/mdp4_overlay.c
+++ b/drivers/video/msm/mdp4_overlay.c
@@ -49,6 +49,7 @@
struct mdp4_overlay_pipe *baselayer[MDP4_MIXER_MAX];
struct blend_cfg blend[MDP4_MIXER_MAX][MDP4_MIXER_STAGE_MAX];
struct mdp4_overlay_pipe sf_plist[MDP4_MIXER_MAX][OVERLAY_PIPE_MAX];
+ struct mdp_mixer_cfg mdp_mixer_cfg[MDP4_MIXER_MAX];
uint32 mixer_cfg[MDP4_MIXER_MAX];
uint32 flush[MDP4_MIXER_MAX];
struct iommu_free_list iommu_free[MDP4_MIXER_MAX];
@@ -1456,6 +1457,87 @@
(pipe->element1 << 8) | pipe->element0;
}
+static uint32 mdp4_overlayproc_cfg_wb_panel(struct mdp4_overlay_pipe *pipe,
+ char *overlay_base, uint32 curr)
+{
+ int off, bpp;
+ uint32 flag;
+ bool is_rgb = false;
+ struct mdp_mixer_cfg *mixer_cfg;
+
+ off = 0;
+ mixer_cfg = &ctrl->mdp_mixer_cfg[MDP4_MIXER2];
+
+ switch (mixer_cfg->writeback_format) {
+ case WB_FORMAT_RGB_888:
+ bpp = 3; /* RGB888 */
+ flag = 0x0;
+ is_rgb = true;
+ break;
+ case WB_FORMAT_RGB_565:
+ bpp = 2; /* RGB565 */
+ flag = 0x1;
+ is_rgb = true;
+ break;
+ case WB_FORMAT_xRGB_8888:
+ bpp = 4; /* xRGB8888 */
+ flag = 0x3;
+ is_rgb = true;
+ break;
+ case WB_FORMAT_ARGB_8888:
+ bpp = 4; /* ARGB8888 */
+ flag = 0x80000003;
+ is_rgb = true;
+ break;
+ case WB_FORMAT_ARGB_8888_INPUT_ALPHA:
+ pr_warn("currently not supported ARGB_8888_INPUT_ALPHA\n");
+ default:
+ bpp = 1; /* NV12 */
+ is_rgb = false;
+ break;
+ }
+
+ if (is_rgb == true) {
+ if (pipe->ov_cnt & 0x01)
+ off = pipe->src_height * pipe->src_width * bpp;
+
+ outpdw(overlay_base + 0x000c, pipe->ov_blt_addr + off);
+ /* overlay ouput is RGB888 */
+ outpdw(overlay_base + 0x0010, pipe->src_width * bpp);
+ outpdw(overlay_base + 0x001c, pipe->ov_blt_addr + off);
+ /* MDDI - BLT + on demand */
+ outpdw(overlay_base + 0x0004, 0x08);
+
+ curr = inpdw(overlay_base + 0x0014);
+ curr &= 0x4;
+
+ outpdw(overlay_base + 0x0014, curr | flag);
+ } else {
+ if (pipe->ov_cnt & 0x01)
+ off = pipe->src_height * pipe->src_width * bpp;
+
+ outpdw(overlay_base + 0x000c, pipe->ov_blt_addr + off);
+ /* overlay ouput is RGB888 */
+ outpdw(overlay_base + 0x0010, ((pipe->src_width << 16) |
+ pipe->src_width));
+ outpdw(overlay_base + 0x001c, pipe->ov_blt_addr + off);
+ off = pipe->src_height * pipe->src_width;
+ /* align chroma to 2k address */
+ off = (off + 2047) & ~2047;
+ /* UV plane adress */
+ outpdw(overlay_base + 0x0020, pipe->ov_blt_addr + off);
+ /* MDDI - BLT + on demand */
+ outpdw(overlay_base + 0x0004, 0x08);
+ /* pseudo planar + writeback */
+ curr = inpdw(overlay_base + 0x0014);
+ curr &= 0x4;
+ outpdw(overlay_base + 0x0014, curr | 0x012);
+ /* rgb->yuv */
+ outpdw(overlay_base + 0x0200, 0x05);
+ }
+ return curr;
+}
+
/*
* mdp4_overlayproc_cfg: only be called from base layer
*/
@@ -1515,34 +1597,8 @@
#endif
} else if (pipe->mixer_num == MDP4_MIXER2) {
if (ctrl->panel_mode & MDP4_PANEL_WRITEBACK) {
- off = 0;
- bpp = 1;
- if (pipe->ov_cnt & 0x01)
- off = pipe->src_height *
- pipe->src_width * bpp;
-
- outpdw(overlay_base + 0x000c,
- pipe->ov_blt_addr + off);
- /* overlay ouput is RGB888 */
- outpdw(overlay_base + 0x0010,
- ((pipe->src_width << 16) |
- pipe->src_width));
- outpdw(overlay_base + 0x001c,
- pipe->ov_blt_addr + off);
- off = pipe->src_height * pipe->src_width;
- /* align chroma to 2k address */
- off = (off + 2047) & ~2047;
- /* UV plane adress */
- outpdw(overlay_base + 0x0020,
- pipe->ov_blt_addr + off);
- /* MDDI - BLT + on demand */
- outpdw(overlay_base + 0x0004, 0x08);
- /* pseudo planar + writeback */
- curr = inpdw(overlay_base + 0x0014);
- curr &= 0x4;
- outpdw(overlay_base + 0x0014, curr | 0x012);
- /* rgb->yuv */
- outpdw(overlay_base + 0x0200, 0x05);
+ curr = mdp4_overlayproc_cfg_wb_panel(pipe,
+ overlay_base, curr);
}
}
} else {
@@ -3865,6 +3921,42 @@
mutex_unlock(&mfd->dma->ov_mutex);
return err;
}
+
+int mdp4_update_writeback_format(struct msm_fb_data_type *mfd,
+ struct mdp_mixer_cfg *mdp_mixer_cfg)
+{
+ int ret = 0;
+ u32 mixer_num;
+ struct mdp_mixer_cfg *mixer;
+
+ mixer_num = mdp4_get_mixer_num(mfd->panel_info.type);
+ if (!ctrl) {
+ pr_warn("mdp4_overlay_ctrl is NULL\n");
+ return -EPERM;
+ }
+ mixer = &ctrl->mdp_mixer_cfg[mixer_num];
+
+ switch (mdp_mixer_cfg->writeback_format) {
+ case WB_FORMAT_RGB_888:
+ case WB_FORMAT_RGB_565:
+ case WB_FORMAT_NV12:
+ case WB_FORMAT_xRGB_8888:
+ case WB_FORMAT_ARGB_8888:
+ mixer->writeback_format = mdp_mixer_cfg->writeback_format;
+ break;
+ case WB_FORMAT_ARGB_8888_INPUT_ALPHA:
+ mixer->writeback_format = mdp_mixer_cfg->writeback_format;
+ mixer->alpha = mdp_mixer_cfg->alpha;
+ break;
+ default:
+ mixer->writeback_format = WB_FORMAT_NV12;
+ pr_warn("Unsupported format request, setting to NV12\n");
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
int mdp4_update_base_blend(struct msm_fb_data_type *mfd,
struct mdp_blend_cfg *mdp_blend_cfg)
{
diff --git a/drivers/video/msm/mdss/mdss_mdp.h b/drivers/video/msm/mdss/mdss_mdp.h
index 4c70770..e4f78ad 100644
--- a/drivers/video/msm/mdss/mdss_mdp.h
+++ b/drivers/video/msm/mdss/mdss_mdp.h
@@ -323,6 +323,7 @@
struct mdss_mdp_ctl *mdss_mdp_ctl_init(struct mdss_panel_data *pdata,
struct msm_fb_data_type *mfd);
+int mdss_mdp_ctl_setup(struct mdss_mdp_ctl *ctl);
int mdss_mdp_ctl_split_display_setup(struct mdss_mdp_ctl *ctl,
struct mdss_panel_data *pdata);
int mdss_mdp_ctl_destroy(struct mdss_mdp_ctl *ctl);
diff --git a/drivers/video/msm/mdss/mdss_mdp_ctl.c b/drivers/video/msm/mdss/mdss_mdp_ctl.c
index c640c73..cabb183 100644
--- a/drivers/video/msm/mdss/mdss_mdp_ctl.c
+++ b/drivers/video/msm/mdss/mdss_mdp_ctl.c
@@ -443,7 +443,7 @@
return NULL;
}
-static int mdss_mdp_ctl_setup(struct mdss_mdp_ctl *ctl)
+int mdss_mdp_ctl_setup(struct mdss_mdp_ctl *ctl)
{
struct mdss_mdp_ctl *split_ctl;
u32 width, height;
@@ -566,12 +566,6 @@
ctl->opmode |= (ctl->intf_num << 4);
- ret = mdss_mdp_ctl_setup(ctl);
- if (ret) {
- pr_err("unable to setup control path %d\n", ctl->num);
- goto ctl_init_fail;
- }
-
if (ctl->intf_num == MDSS_MDP_NO_INTF) {
ctl->dst_format = pdata->panel_info.out_format;
} else {
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index c1dcc18..daa2499 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -1017,6 +1017,12 @@
return;
}
+ ret = mdss_mdp_overlay_start(mfd);
+ if (ret) {
+ pr_err("unable to start overlay %d (%d)\n", mfd->index, ret);
+ return;
+ }
+
if (is_mdss_iommu_attached())
data.p[0].addr = mfd->iova;
else
@@ -1454,6 +1460,10 @@
rc = mdss_mdp_overlay_start(mfd);
if (!IS_ERR_VALUE(rc))
rc = mdss_mdp_overlay_kickoff(mfd->ctl);
+ } else {
+ rc = mdss_mdp_ctl_setup(mfd->ctl);
+ if (rc)
+ return rc;
}
if (!IS_ERR_VALUE(rc) && mfd->vsync_pending) {
diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c
index 5f994a0..b96e093 100644
--- a/drivers/video/msm/msm_fb.c
+++ b/drivers/video/msm/msm_fb.c
@@ -3301,6 +3301,10 @@
ret = mdp4_update_base_blend(mfd,
&metadata_ptr->data.blend_cfg);
break;
+ case metadata_op_wb_format:
+ ret = mdp4_update_writeback_format(mfd,
+ &metadata_ptr->data.mixer_cfg);
+ break;
#endif
default:
pr_warn("Unsupported request to MDP META IOCTL.\n");
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 06772d9..f6ca334 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -385,6 +385,7 @@
header-y += types.h
header-y += udf_fs_i.h
header-y += udp.h
+header-y += uhid.h
header-y += uinput.h
header-y += uio.h
header-y += ultrasound.h
@@ -450,3 +451,4 @@
header-y += ci-bridge-spi.h
header-y += msm_audio_amrwbplus.h
header-y += avtimer.h
+header-y += msm_ipa.h
diff --git a/include/linux/bif/consumer.h b/include/linux/bif/consumer.h
new file mode 100644
index 0000000..e4c190e
--- /dev/null
+++ b/include/linux/bif/consumer.h
@@ -0,0 +1,613 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_BIF_CONSUMER_H_
+#define _LINUX_BIF_CONSUMER_H_
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/notifier.h>
+
+#define BIF_DEVICE_ID_BYTE_LENGTH 8
+#define BIF_UNIQUE_ID_BYTE_LENGTH 10
+#define BIF_UNIQUE_ID_BIT_LENGTH 80
+
+#define BIF_PRIMARY_SLAVE_DEV_ADR 0x01
+
+/**
+ * enum bif_transaction - BIF master bus transaction types
+ * %BIF_TRANS_WD: Write data
+ * %BIF_TRANS_ERA: Extended register address
+ * %BIF_TRANS_WRA: Write register address
+ * %BIF_TRANS_RRA: Read register address
+ * %BIF_TRANS_BC: Bus command
+ * %BIF_TRANS_EDA: Extended device address
+ * %BIF_TRANS_SDA: Slave device address
+ *
+ * These values correspond to BIF word bits: BCF, bit 9, bit 8.
+ * BCF_n bit is inserted automatically.
+ */
+enum bif_transaction {
+ BIF_TRANS_WD = 0x00,
+ BIF_TRANS_ERA = 0x01,
+ BIF_TRANS_WRA = 0x02,
+ BIF_TRANS_RRA = 0x03,
+ BIF_TRANS_BC = 0x04,
+ BIF_TRANS_EDA = 0x05,
+ BIF_TRANS_SDA = 0x06,
+};
+
+/* BIF slave response components */
+#define BIF_SLAVE_RD_ACK 0x200
+#define BIF_SLAVE_RD_EOT 0x100
+#define BIF_SLAVE_RD_DATA 0x0FF
+#define BIF_SLAVE_RD_ERR 0x0FF
+#define BIF_SLAVE_TACK_ACK 0x200
+#define BIF_SLAVE_TACK_WCNT 0x0FF
+#define BIF_SLAVE_TACK_ERR 0x0FF
+
+/**
+ * enum bif_bus_command - MIPI defined bus commands to use in BC transaction
+ * %BIF_CMD_BRES: Bus reset of all slaves
+ * %BIF_CMD_PDWN: Put all slaves into power down mode
+ * %BIF_CMD_STBY: Put all slaves into standby mode
+ * %BIF_CMD_EINT: Enable interrupts for all slaves
+ * %BIF_CMD_ISTS: Poll interrupt status for all slaves. Expects BQ
+ * response if any slave has a pending interrupt.
+ * %BIF_CMD_RBL: Specify the burst read length for the next read
+ * transaction. Bits 3 to 0 should also be ORed on in
+ * order to specify the number of bytes to read.
+ * %BIF_CMD_RBE: Specify the extended burst read length for the next read
+ * transaction. Bits 3 to 0 should also be ORed on in
+ * order to specify the number of bytes to read. The burst
+ * read length for RBEy and RBLx = 16 * y + x.
+ * %BIF_CMD_DASM: Device activation stick mode. This keeps a slave
+ * selected if it would otherwise become unselected by the
+ * next transaction.
+ * %BIF_CMD_DISS: UID search start
+ * %BIF_CMD_DILC: UID length check. Expects BQ response if all 80 UID
+ * bits for a given slave have been entered.
+ * %BIF_CMD_DIE0: UID search enter 0
+ * %BIF_CMD_DIE1: UID search enter 1
+ * %BIF_CMD_DIP0: UID search probe 0
+ * %BIF_CMD_DIP1: UID search probe 1
+ * %BIF_CMD_DRES: Device reset of selected slaves
+ * %BIF_CMD_TQ: Transaction query; expects TACK response
+ * %BIF_CMD_AIO: Address increment off for the next transaction
+ *
+ * These values correspond to BIF word bits 7 to 0.
+ */
+enum bif_bus_command {
+ BIF_CMD_BRES = 0x00,
+ BIF_CMD_PDWN = 0x02,
+ BIF_CMD_STBY = 0x03,
+ BIF_CMD_EINT = 0x10,
+ BIF_CMD_ISTS = 0x11,
+ BIF_CMD_RBL = 0x20,
+ BIF_CMD_RBE = 0x30,
+ BIF_CMD_DASM = 0x40,
+ BIF_CMD_DISS = 0x80,
+ BIF_CMD_DILC = 0x81,
+ BIF_CMD_DIE0 = 0x84,
+ BIF_CMD_DIE1 = 0x85,
+ BIF_CMD_DIP0 = 0x86,
+ BIF_CMD_DIP1 = 0x87,
+ BIF_CMD_DRES = 0xC0,
+ BIF_CMD_TQ = 0xC2,
+ BIF_CMD_AIO = 0xC4,
+};
+
+/**
+ * struct bif_ddb_l1_data - MIPI defined L1 DDB data structure
+ * @revision: DDB version; should be 0x10 for DDB v1.0
+ * @level: DDB level support; should be 0x03 for DDB L1 and L2
+ * @device_class: MIPI device class; should be 0x0800
+ * @manufacturer_id: Manufacturer ID number allocated by MIPI
+ * @product_id: Manufacturer specified product ID number
+ * @length: Size of L2 function directory in bytes
+ */
+struct bif_ddb_l1_data {
+ u8 revision;
+ u8 level;
+ u16 device_class;
+ u16 manufacturer_id;
+ u16 product_id;
+ u16 length;
+};
+
+/**
+ * struct bif_ddb_l2_data - MIPI defined L2 DDB function data structure
+ * @function_type: Defines the type of the function. The type may be
+ * either MIPI or manufacturer defined.
+ * @function_version: Defines the version of the function. The version may
+ * be either MIPI or manufacturer defined.
+ * @function_pointer: Address in BIF slave memory where the register map for
+ * the function begins.
+ */
+struct bif_ddb_l2_data {
+ u8 function_type;
+ u8 function_version;
+ u16 function_pointer;
+};
+
+/**
+ * enum bif_mipi_function_type - MIPI defined DDB L2 function types
+ * %BIF_FUNC_PROTOCOL: Protocol function which provides access to core
+ * BIF communication features.
+ * %BIF_FUNC_SLAVE_CONTROL: Slave control function which provides control
+ * for BIF slave interrupts and tasks.
+ * %BIF_FUNC_TEMPERATURE: Temperature sensor function which provides a
+ * means to accurately read the battery temperature
+ * in a single-shot or periodic fashion.
+ * %BIF_FUNC_NVM: Non-volatile memory function which provides a
+ * means to store data onto a BIF slave that is
+ * non-volatile. Secondary slave objects are also
+ * found through the NVM function.
+ * %BIF_FUNC_AUTHENTICATION: Authentication function which provides a means
+ * to authenticate batteries. This function does
+ * not have a MIPI defined implimentation. Instead
+ * all aspects of the authentication function are
+ * left to the discretion of the manufacturer.
+ */
+enum bif_mipi_function_type {
+ BIF_FUNC_PROTOCOL = 0x01,
+ BIF_FUNC_SLAVE_CONTROL = 0x02,
+ BIF_FUNC_TEMPERATURE = 0x03,
+ BIF_FUNC_NVM = 0x04,
+ BIF_FUNC_AUTHENTICATION = 0x05,
+};
+
+#define BIF_DDB_L1_BASE_ADDR 0x0000
+#define BIF_DDB_L2_BASE_ADDR 0x000A
+
+/**
+ * enum bif_slave_error_code - MIPI defined BIF slave error codes
+ * %BIF_ERR_NONE: No error occurred
+ * %BIF_ERR_GENERAL: An unenumerated error occurred
+ * %BIF_ERR_PARITY: A Hamming-15 parity check failed for a word
+ * sent on the bus
+ * %BIF_ERR_INVERSION: More than 8 bits in a word were 1
+ * %BIF_ERR_BAD_LENGTH: Word had more or less than 17 bits
+ * %BIF_ERR_TIMING: Bit timing was violated in a word
+ * %BIF_ERR_UNKNOWN_CMD: Bus command was unknown to the slave
+ * %BIF_ERR_CMD_SEQ: Commands with ordering dependency were not
+ * sent in the right order
+ * %BIF_ERR_BUS_COLLISION: BCL was already low at the beginning of a new
+ * transaction
+ * %BIF_ERR_SLAVE_BUSY: Slave is busy and cannot respond
+ * %BIF_ERR_FATAL: Slave is in an unrecoverable error state and
+ * must be reset
+ *
+ * These values are present in the ERR portion of an RD or TACK slave response
+ * word. These values can also be found in the ERR_CODE register of the
+ * protocol function.
+ */
+enum bif_slave_error_code {
+ BIF_ERR_NONE = 0x00,
+ BIF_ERR_GENERAL = 0x10,
+ BIF_ERR_PARITY = 0x11,
+ BIF_ERR_INVERSION = 0x12,
+ BIF_ERR_BAD_LENGTH = 0x13,
+ BIF_ERR_TIMING = 0x14,
+ BIF_ERR_UNKNOWN_CMD = 0x15,
+ BIF_ERR_CMD_SEQ = 0x16,
+ BIF_ERR_BUS_COLLISION = 0x1F,
+ BIF_ERR_SLAVE_BUSY = 0x20,
+ BIF_ERR_FATAL = 0x7F,
+};
+
+/**
+ * struct bif_protocol_function - constant data present in protocol function
+ * @l2_entry: Pointer to protocol function L2 DDB data struct
+ * @protocol_pointer: BIF slave address where protocol registers begin
+ * @device_id_pointer: BIF slave address where device ID begins
+ * @device_id: The 8-byte unique device ID in MSB to LSB order
+ */
+struct bif_protocol_function {
+ struct bif_ddb_l2_data *l2_entry;
+ u16 protocol_pointer;
+ u16 device_id_pointer;
+ u8 device_id[BIF_DEVICE_ID_BYTE_LENGTH]; /* Unique ID */
+};
+
+#define PROTOCOL_FUNC_DEV_ADR_ADDR(protocol_pointer) ((protocol_pointer) + 0)
+#define PROTOCOL_FUNC_ERR_CODE_ADDR(protocol_pointer) ((protocol_pointer) + 2)
+#define PROTOCOL_FUNC_ERR_CNT_ADDR(protocol_pointer) ((protocol_pointer) + 3)
+#define PROTOCOL_FUNC_WORD_CNT_ADDR(protocol_pointer) ((protocol_pointer) + 4)
+
+/**
+ * struct bif_slave_control_function - constant data present in slave control
+ * function as well internal software state parameters
+ * @l2_entry: Pointer to slave control function L2 DDB data struct
+ * @slave_ctrl_pointer: BIF slave address where slave control registers begin
+ * @task_count: Number of tasks supported by the slave
+ * @irq_notifier_list: List of notifiers for consumers drivers that wish to be
+ * notified when any given interrupt triggers. This list
+ * is dynamically allocated with length task_count.
+ */
+struct bif_slave_control_function {
+ struct bif_ddb_l2_data *l2_entry;
+ u16 slave_ctrl_pointer;
+ unsigned int task_count;
+ struct blocking_notifier_head *irq_notifier_list;
+};
+
+#define SLAVE_CTRL_TASKS_PER_SET 8
+
+/**
+ * bif_slave_control_task_is_valid() - returns true if the specified task
+ * is supported by the slave or false if it isn't
+ * @func: Pointer to slave's slave control function structure
+ * @task: Slave task number to check
+ */
+static inline bool
+bif_slave_control_task_is_valid(struct bif_slave_control_function *func,
+ unsigned int task)
+{
+ return func ? task < func->task_count : false;
+}
+
+#define SLAVE_CTRL_FUNC_IRQ_EN_ADDR(slave_ctrl_pointer, task) \
+ ((slave_ctrl_pointer) + 4 * ((task) / SLAVE_CTRL_TASKS_PER_SET) + 0)
+
+#define SLAVE_CTRL_FUNC_IRQ_STATUS_ADDR(slave_ctrl_pointer, task) \
+ ((slave_ctrl_pointer) + 4 * ((task) / SLAVE_CTRL_TASKS_PER_SET) + 1)
+#define SLAVE_CTRL_FUNC_IRQ_CLEAR_ADDR(slave_ctrl_pointer, task) \
+ SLAVE_CTRL_FUNC_IRQ_STATUS_ADDR(slave_ctrl_pointer, task)
+
+#define SLAVE_CTRL_FUNC_TASK_TRIGGER_ADDR(slave_ctrl_pointer, task) \
+ ((slave_ctrl_pointer) + 4 * ((task) / SLAVE_CTRL_TASKS_PER_SET) + 2)
+#define SLAVE_CTRL_FUNC_TASK_BUSY_ADDR(slave_ctrl_pointer, task) \
+ SLAVE_CTRL_FUNC_TASK_TRIGGER_ADDR(slave_ctrl_pointer, task)
+
+#define SLAVE_CTRL_FUNC_TASK_AUTO_TRIGGER_ADDR(slave_ctrl_pointer, task) \
+ ((slave_ctrl_pointer) + 4 * ((task) / SLAVE_CTRL_TASKS_PER_SET) + 3)
+
+/**
+ * struct bif_temperature_function - constant data present in temperature
+ * sensor function
+ * @temperatuer_pointer: BIF slave address where temperature sensor
+ * control registers begin
+ * @slave_control_channel: Slave control channel associated with the
+ * temperature sensor function. This channel is
+ * also the task number.
+ * @accuracy_pointer: BIF slave address where temperature accuracy
+ * registers begin
+ */
+struct bif_temperature_function {
+ u16 temperature_pointer;
+ u8 slave_control_channel;
+ u16 accuracy_pointer;
+};
+
+/**
+ * enum bif_mipi_object_type - MIPI defined BIF object types
+ * %BIF_OBJ_END_OF_LIST: Indicates that the end of the object list in
+ * NVM has been reached
+ * %BIF_OBJ_SEC_SLAVE: Specifies the UIDs of secondary slaves found
+ * inside of the battery pack
+ * %BIF_OBJ_BATT_PARAM: Specifies some variety of battery parameter.
+ * There is no MIPI defined format for this object
+ * type so parsing is manufacturer specific.
+ */
+enum bif_mipi_object_type {
+ BIF_OBJ_END_OF_LIST = 0x00,
+ BIF_OBJ_SEC_SLAVE = 0x01,
+ BIF_OBJ_BATT_PARAM = 0x02,
+};
+
+/**
+ * struct bif_object - contains all header and data information for a slave
+ * data object
+ * @type: Object type
+ * @version: Object version
+ * @manufacturer_id: Manufacturer ID number allocated by MIPI
+ * @length: Length of the entire object including header and CRC
+ * @data: Raw byte data found in the object
+ * @crc: CRC of the object calculated using CRC-CCITT
+ * @list: Linked-list connection parameter
+ * @addr: BIF slave address correspond to the start of the object
+ *
+ * manufacturer_id == 0x0000 if MIPI type and version.
+ */
+struct bif_object {
+ u8 type;
+ u8 version;
+ u16 manufacturer_id;
+ u16 length;
+ u8 *data;
+ u16 crc;
+ struct list_head list;
+ u16 addr;
+};
+
+/**
+ * struct bif_nvm_function - constant data present in non-volatile memory
+ * function as well internal software state
+ * parameters
+ * @nvm_pointer: BIF slave address where NVM registers begin
+ * @slave_control_channel: Slave control channel associated with the
+ * NVM function. This channel is also the task
+ * number.
+ * @write_buffer_size: Size in bytes of the NVM write buffer. 0x00
+ * is used to denote a 256 byte buffer.
+ * @nvm_base_address: BIF slave address where NVM begins
+ * @nvm_size: NVM size in bytes
+ * @object_count: Number of BIF objects read from NVM
+ * @object_list: List of BIF objects read from NVM
+ */
+struct bif_nvm_function {
+ u16 nvm_pointer;
+ u8 slave_control_channel;
+ u8 write_buffer_size;
+ u16 nvm_base_address;
+ u16 nvm_size;
+ int object_count;
+ struct list_head object_list;
+};
+
+/**
+ * struct bif_ctrl - Opaque handle for a BIF controller to be used in bus
+ * oriented BIF function calls.
+ */
+struct bif_ctrl;
+
+/**
+ * struct bif_slave - Opaque handle for a BIF slave to be used in slave oriented
+ * BIF function calls.
+ */
+struct bif_slave;
+
+/**
+ * enum bif_bus_state - indicates the current or desired state of the BIF bus
+ * %BIF_BUS_STATE_MASTER_DISABLED: BIF host hardware is disabled
+ * %BIF_BUS_STATE_POWER_DOWN: BIF bus is in power down state and
+ * BCL is not being pulled high
+ * %BIF_BUS_STATE_STANDBY: BIF slaves are in standby state in which
+ * less power is drawn
+ * %BIF_BUS_STATE_ACTIVE: BIF slaves are ready for immediate
+ * communications
+ * %BIF_BUS_STATE_INTERRUPT: BIF bus is active, but no communication
+ * is possible. Instead, either one of the
+ * slaves or the master must transition to
+ * active state by pulling BCL low for 1
+ * tau bif period.
+ */
+enum bif_bus_state {
+ BIF_BUS_STATE_MASTER_DISABLED,
+ BIF_BUS_STATE_POWER_DOWN,
+ BIF_BUS_STATE_STANDBY,
+ BIF_BUS_STATE_ACTIVE,
+ BIF_BUS_STATE_INTERRUPT,
+};
+
+/**
+ * enum bif_bus_event - events that the BIF framework may send to BIF consumers
+ * %BIF_BUS_EVENT_BATTERY_INSERTED: Indicates that a battery was just
+ * inserted physically or that the BIF
+ * host controller for the battery just
+ * probed and a battery was already
+ * present.
+ * %BIF_BUS_EVENT_BATTERY_REMOVED: Indicates that a battery was just
+ * removed and thus its slaves are no
+ * longer accessible.
+ */
+enum bif_bus_event {
+ BIF_BUS_EVENT_BATTERY_INSERTED,
+ BIF_BUS_EVENT_BATTERY_REMOVED,
+};
+
+/* Mask values to be ORed together for use in bif_match_criteria.match_mask. */
+#define BIF_MATCH_MANUFACTURER_ID BIT(0)
+#define BIF_MATCH_PRODUCT_ID BIT(1)
+#define BIF_MATCH_FUNCTION_TYPE BIT(2)
+#define BIF_MATCH_FUNCTION_VERSION BIT(3)
+#define BIF_MATCH_IGNORE_PRESENCE BIT(4)
+
+/**
+ * struct bif_match_criteria - specifies the matching criteria that a BIF
+ * consumer uses to find an appropriate BIF slave
+ * @match_mask: Mask value specifying which parameters to match upon.
+ * This value should be some ORed combination of
+ * BIF_MATCH_* specified above.
+ * @manufacturer_id: Manufacturer ID number allocated by MIPI
+ * @product_id: Manufacturer specified product ID number
+ * @function_type: Defines the type of the function. The type may be
+ * either MIPI or manufacturer defined.
+ * @function_version: Defines the version of the function. The version may
+ * be either MIPI or manufacturer defined.
+ * @ignore_presence: If true, then slaves that are currently not present
+ * will be successfully matched against. By default, only
+ * present slaves can be matched.
+ */
+struct bif_match_criteria {
+ u32 match_mask;
+ u16 manufacturer_id;
+ u16 product_id;
+ u8 function_type;
+ u8 function_version;
+ bool ignore_presence;
+};
+
+/**
+ * bif_battery_rid_ranges - MIPI-BIF defined Rid battery pack resistance ranges
+ * %BIF_BATT_RID_SPECIAL1_MIN: Minimum Rid for special case 1
+ * %BIF_BATT_RID_SPECIAL1_MAX: Maximum Rid for special case 1
+ * %BIF_BATT_RID_SPECIAL2_MIN: Minimum Rid for special case 2
+ * %BIF_BATT_RID_SPECIAL2_MAX: Maximum Rid for special case 2
+ * %BIF_BATT_RID_SPECIAL3_MIN: Minimum Rid for special case 3
+ * %BIF_BATT_RID_SPECIAL3_MAX: Maximum Rid for special case 3
+ * %BIF_BATT_RID_LOW_COST_MIN: Minimum Rid for a low cost battery pack
+ * %BIF_BATT_RID_LOW_COST_MAX: Maximum Rid for a low cost battery pack
+ * %BIF_BATT_RID_SMART_MIN: Minimum Rid for a smart battery pack
+ * %BIF_BATT_RID_SMART_MAX: Maximum Rid for a smart battery pack
+ */
+enum bif_battery_rid_ranges {
+ BIF_BATT_RID_SPECIAL1_MIN = 0,
+ BIF_BATT_RID_SPECIAL1_MAX = 1,
+ BIF_BATT_RID_SPECIAL2_MIN = 7350,
+ BIF_BATT_RID_SPECIAL2_MAX = 7650,
+ BIF_BATT_RID_SPECIAL3_MIN = 12740,
+ BIF_BATT_RID_SPECIAL3_MAX = 13260,
+ BIF_BATT_RID_LOW_COST_MIN = 19600,
+ BIF_BATT_RID_LOW_COST_MAX = 140000,
+ BIF_BATT_RID_SMART_MIN = 240000,
+ BIF_BATT_RID_SMART_MAX = 450000,
+};
+
+#ifdef CONFIG_BIF
+
+int bif_request_irq(struct bif_slave *slave, unsigned int task,
+ struct notifier_block *nb);
+int bif_free_irq(struct bif_slave *slave, unsigned int task,
+ struct notifier_block *nb);
+
+int bif_trigger_task(struct bif_slave *slave, unsigned int task);
+int bif_task_is_busy(struct bif_slave *slave, unsigned int task);
+
+int bif_ctrl_count(void);
+struct bif_ctrl *bif_ctrl_get_by_id(unsigned int id);
+struct bif_ctrl *bif_ctrl_get(struct device *consumer_dev);
+void bif_ctrl_put(struct bif_ctrl *ctrl);
+
+int bif_ctrl_signal_battery_changed(struct bif_ctrl *ctrl);
+
+int bif_slave_match_count(const struct bif_ctrl *ctrl,
+ const struct bif_match_criteria *match_criteria);
+
+struct bif_slave *bif_slave_match_get(const struct bif_ctrl *ctrl,
+ unsigned int id, const struct bif_match_criteria *match_criteria);
+
+void bif_slave_put(struct bif_slave *slave);
+
+int bif_ctrl_notifier_register(struct bif_ctrl *ctrl,
+ struct notifier_block *nb);
+
+int bif_ctrl_notifier_unregister(struct bif_ctrl *ctrl,
+ struct notifier_block *nb);
+
+struct bif_ctrl *bif_get_ctrl_handle(struct bif_slave *slave);
+
+int bif_slave_find_function(struct bif_slave *slave, u8 function, u8 *version,
+ u16 *function_pointer);
+
+int bif_slave_read(struct bif_slave *slave, u16 addr, u8 *buf, int len);
+int bif_slave_write(struct bif_slave *slave, u16 addr, u8 *buf, int len);
+
+int bif_slave_is_present(struct bif_slave *slave);
+
+int bif_slave_is_selected(struct bif_slave *slave);
+int bif_slave_select(struct bif_slave *slave);
+
+int bif_ctrl_raw_transaction(struct bif_ctrl *ctrl, int transaction, u8 data);
+int bif_ctrl_raw_transaction_read(struct bif_ctrl *ctrl, int transaction,
+ u8 data, int *response);
+int bif_ctrl_raw_transaction_query(struct bif_ctrl *ctrl, int transaction,
+ u8 data, bool *query_response);
+
+void bif_ctrl_bus_lock(struct bif_ctrl *ctrl);
+void bif_ctrl_bus_unlock(struct bif_ctrl *ctrl);
+
+u16 bif_crc_ccitt(const u8 *buffer, unsigned int len);
+
+int bif_ctrl_measure_rid(struct bif_ctrl *ctrl);
+int bif_ctrl_get_bus_period(struct bif_ctrl *ctrl);
+int bif_ctrl_set_bus_period(struct bif_ctrl *ctrl, int period_ns);
+int bif_ctrl_get_bus_state(struct bif_ctrl *ctrl);
+int bif_ctrl_set_bus_state(struct bif_ctrl *ctrl, enum bif_bus_state state);
+
+#else
+
+static inline int bif_request_irq(struct bif_slave *slave, unsigned int task,
+ struct notifier_block *nb) { return -EPERM; }
+static inline int bif_free_irq(struct bif_slave *slave, unsigned int task,
+ struct notifier_block *nb) { return -EPERM; }
+
+static inline int bif_trigger_task(struct bif_slave *slave, unsigned int task)
+{ return -EPERM; }
+static inline int bif_task_is_busy(struct bif_slave *slave, unsigned int task)
+{ return -EPERM; }
+
+static inline int bif_ctrl_count(void) { return -EPERM; }
+static inline struct bif_ctrl *bif_ctrl_get_by_id(unsigned int id)
+{ return ERR_PTR(-EPERM); }
+struct bif_ctrl *bif_ctrl_get(struct device *consumer_dev)
+{ return ERR_PTR(-EPERM); }
+static inline void bif_ctrl_put(struct bif_ctrl *ctrl) { return; }
+
+int bif_ctrl_signal_battery_changed(struct bif_ctrl *ctrl) { return -EPERM; }
+
+static inline int bif_slave_match_count(const struct bif_ctrl *ctrl,
+ const struct bif_match_criteria *match_criteria)
+{ return -EPERM; }
+
+static inline struct bif_slave *bif_slave_match_get(const struct bif_ctrl *ctrl,
+ unsigned int id, const struct bif_match_criteria *match_criteria)
+{ return ERR_PTR(-EPERM); }
+
+static inline void bif_slave_put(struct bif_slave *slave) { return; }
+
+static inline int bif_ctrl_notifier_register(struct bif_ctrl *ctrl,
+ struct notifier_block *nb)
+{ return -EPERM; }
+
+static inline int bif_ctrl_notifier_unregister(struct bif_ctrl *ctrl,
+ struct notifier_block *nb)
+{ return -EPERM; }
+
+static inline struct bif_ctrl *bif_get_ctrl_handle(struct bif_slave *slave)
+{ return ERR_PTR(-EPERM); }
+
+static inline int bif_slave_find_function(struct bif_slave *slave, u8 function,
+ u8 *version, u16 *function_pointer)
+{ return -EPERM; }
+
+static inline int bif_slave_read(struct bif_slave *slave, u16 addr, u8 *buf,
+ int len)
+{ return -EPERM; }
+static inline int bif_slave_write(struct bif_slave *slave, u16 addr, u8 *buf,
+ int len)
+{ return -EPERM; }
+
+int bif_slave_is_present(struct bif_slave *slave) { return -EPERM; }
+
+int bif_slave_is_selected(struct bif_slave *slave) { return -EPERM; }
+int bif_slave_select(struct bif_slave *slave) { return -EPERM; }
+
+int bif_ctrl_raw_transaction(struct bif_ctrl *ctrl, int transaction, u8 data)
+{ return -EPERM; }
+int bif_ctrl_raw_transaction_read(struct bif_ctrl *ctrl, int transaction,
+ u8 data, int *response)
+{ return -EPERM; }
+int bif_ctrl_raw_transaction_query(struct bif_ctrl *ctrl, int transaction,
+ u8 data, bool *query_response)
+{ return -EPERM; }
+
+static inline void bif_ctrl_bus_lock(struct bif_ctrl *ctrl)
+{ return -EPERM; }
+static inline void bif_ctrl_bus_unlock(struct bif_ctrl *ctrl)
+{ return -EPERM; }
+
+static inline u16 bif_crc_ccitt(const u8 *buffer, unsigned int len)
+{ return 0; }
+
+static inline int bif_ctrl_measure_rid(struct bif_ctrl *ctrl) { return -EPERM; }
+int bif_ctrl_get_bus_period(struct bif_ctrl *ctrl) { return -EPERM; }
+int bif_ctrl_set_bus_period(struct bif_ctrl *ctrl, int period_ns)
+{ return -EPERM; }
+int bif_ctrl_get_bus_state(struct bif_ctrl *ctrl) { return -EPERM; }
+int bif_ctrl_set_bus_state(struct bif_ctrl *ctrl, enum bif_bus_state state)
+{ return -EPERM; }
+
+#endif
+
+#endif
diff --git a/include/linux/bif/driver.h b/include/linux/bif/driver.h
new file mode 100644
index 0000000..184d46f
--- /dev/null
+++ b/include/linux/bif/driver.h
@@ -0,0 +1,161 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_BIF_DRIVER_H_
+#define _LINUX_BIF_DRIVER_H_
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/bif/consumer.h>
+
+/**
+ * struct bif_ctrl_dev - opaque handle used to identify a given BIF controller
+ * device
+ */
+struct bif_ctrl_dev;
+
+/**
+ * struct bif_ctrl_ops - BIF operations which may be implemented by BIF
+ * controller drivers
+ * @bus_transaction: Perform the specified BIF transaction which does
+ * not result in any slave response.
+ * @bus_transaction_query: Perform the specified BIF transaction which
+ * expects a BQ response in the case of slave
+ * positive acknowledgement.
+ * @bus_transaction_read: Perform the specified BIF transaction which
+ * expects an RD or TACK response from the selected
+ * slave.
+ * @read_slave_registers: Perform all BIF transactions necessary to read
+ * the specified set of contiguous registers from
+ * the previously selected slave. This operation
+ * is used to optimize the common case of slave
+ * register reads since the a BIF controller driver
+ * can take advantage of BIF burst reads while the
+ * BIF core driver cannot due to the inherient
+ * tight timing requirements.
+ * @write_slave_registers: Perform all BIF transactions necessary to write
+ * the specified set of contiguous registers to
+ * the previously selected slave. This operation
+ * is used to optimize the common case of slave
+ * register writes since the a BIF controller
+ * driver can remove redundant steps when
+ * performing several WD commands in a row.
+ * @get_bus_period: Return the tau_bif BIF bus clock period in
+ * nanoseconds.
+ * @set_bus_period: Set the tau_bif BIF bus clock period in
+ * nanoseconds. If the exact period is not
+ * supported by the BIF controller hardware, then
+ * the next larger supported period should be used.
+ * @get_battery_presence: Return the current state of the battery pack.
+ * If a battery pack is present, then return >= 1.
+ * If a battery pack is not present, then return 0.
+ * If an error occurs during presence detection,
+ * then return errno.
+ * @get_battery_rid: Return the measured value of the Rid battery
+ * pack pull-down resistor in ohms.
+ * @get_bus_state: Return the current bus state as defined by one
+ * of the enum bif_bus_state values.
+ * @set_bus_state: Set the BIF bus state to the specified enum
+ * bif_bus_state value.
+ *
+ * The following operations must be defined by every BIF controller driver in
+ * order to ensure baseline functionality:
+ * bus_transaction, bus_transaction_query, get_bus_state, and set_bus_state.
+ *
+ * The BIF core driver is unaware of BIF transaction timing constraints. A
+ * given BIF controller driver must ensure that all timing constraints in the
+ * MIPI-BIF specification are met as transactions are carried out.
+ *
+ * Conversion between 11-bit and 17-bit BIF words (i.e. the insertion of BCF_n,
+ * parity bits, and the inversion bit) must be handled inside of the BIF
+ * controller driver (either in software or hardware). This guarantees maximum
+ * performance if hardware support is available.
+ *
+ * The bus_transaction_read operation must return -ETIMEDOUT in the case of no
+ * RD or TACK word received. This allows the transaction query, TQ, command
+ * to be used for slave selection verification.
+ *
+ * It is acceptable for the BIF bus state to be changed autonomously by a BIF
+ * controller driver in response to low level bus actions without a call to
+ * set_bus_state. One example is the case of receiving a slave interrupt
+ * while in interrupt state as this intrinsically causes the bus to enter the
+ * active communication state.
+ */
+struct bif_ctrl_ops {
+ int (*bus_transaction) (struct bif_ctrl_dev *bdev, int transaction,
+ u8 data);
+ int (*bus_transaction_query) (struct bif_ctrl_dev *bdev,
+ int transaction, u8 data,
+ bool *query_response);
+ int (*bus_transaction_read) (struct bif_ctrl_dev *bdev,
+ int transaction, u8 data,
+ int *response);
+ int (*read_slave_registers) (struct bif_ctrl_dev *bdev, u16 addr,
+ u8 *data, int len);
+ int (*write_slave_registers) (struct bif_ctrl_dev *bdev, u16 addr,
+ const u8 *data, int len);
+ int (*get_bus_period) (struct bif_ctrl_dev *bdev);
+ int (*set_bus_period) (struct bif_ctrl_dev *bdev, int period_ns);
+ int (*get_battery_presence) (struct bif_ctrl_dev *bdev);
+ int (*get_battery_rid) (struct bif_ctrl_dev *bdev);
+ int (*get_bus_state) (struct bif_ctrl_dev *bdev);
+ int (*set_bus_state) (struct bif_ctrl_dev *bdev, int state);
+};
+
+/**
+ * struct bif_ctrl_desc - BIF bus controller descriptor
+ * @name: Name used to identify the BIF controller
+ * @ops: BIF operations supported by the BIF controller
+ * @bus_clock_min_ns: Minimum tau_bif BIF bus clock period supported by the
+ * BIF controller
+ * @bus_clock_max_ns: Maximum tau_bif BIF bus clock period supported by the
+ * BIF controller
+ *
+ * Each BIF controller registered with the BIF core is described with a
+ * structure of this type.
+ */
+struct bif_ctrl_desc {
+ const char *name;
+ struct bif_ctrl_ops *ops;
+ int bus_clock_min_ns;
+ int bus_clock_max_ns;
+};
+
+#ifdef CONFIG_BIF
+
+struct bif_ctrl_dev *bif_ctrl_register(struct bif_ctrl_desc *bif_desc,
+ struct device *dev, void *driver_data, struct device_node *of_node);
+
+void bif_ctrl_unregister(struct bif_ctrl_dev *bdev);
+
+void *bdev_get_drvdata(struct bif_ctrl_dev *bdev);
+
+int bif_ctrl_notify_battery_changed(struct bif_ctrl_dev *bdev);
+int bif_ctrl_notify_slave_irq(struct bif_ctrl_dev *bdev);
+
+#else
+
+static inline struct bif_ctrl_dev *bif_ctrl_register(
+ struct bif_ctrl_desc *bif_desc, struct device *dev, void *driver_data,
+ struct device_node *of_node)
+{ return ERR_PTR(-EINVAL); }
+
+static inline void bif_ctrl_unregister(struct bif_ctrl_dev *bdev) { }
+
+static inline void *bdev_get_drvdata(struct bif_ctrl_dev *bdev) { return NULL; }
+
+int bif_ctrl_notify_slave_irq(struct bif_ctrl_dev *bdev) { return -EINVAL; }
+
+#endif
+
+#endif
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index d3ee879..7a5ab0d 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -112,10 +112,10 @@
/* This needs to be modified manually now, when we add
a new RANGE of SSIDs to the msg_mask_tbl */
#define MSG_MASK_TBL_CNT 24
-#define EVENT_LAST_ID 0x099F
+#define EVENT_LAST_ID 0x09AB
#define MSG_SSID_0 0
-#define MSG_SSID_0_LAST 93
+#define MSG_SSID_0_LAST 94
#define MSG_SSID_1 500
#define MSG_SSID_1_LAST 506
#define MSG_SSID_2 1000
@@ -278,6 +278,9 @@
MSG_LVL_LOW,
MSG_LVL_MED,
MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_HIGH,
MSG_LVL_LOW
};
@@ -713,7 +716,7 @@
/* LOG CODES */
#define LOG_0 0x0
-#define LOG_1 0x1750
+#define LOG_1 0x1755
#define LOG_2 0x0
#define LOG_3 0x0
#define LOG_4 0x4910
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
index 285b593..8a1b3a1 100644
--- a/include/linux/dma-contiguous.h
+++ b/include/linux/dma-contiguous.h
@@ -70,7 +70,7 @@
void dma_contiguous_reserve(phys_addr_t addr_limit);
int dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t *res_base,
- phys_addr_t limit);
+ phys_addr_t limit, const char *name);
int dma_contiguous_add_device(struct device *dev, phys_addr_t base);
@@ -91,7 +91,7 @@
phys_addr_t base, phys_addr_t limit)
{
int ret;
- ret = dma_contiguous_reserve_area(size, &base, limit);
+ ret = dma_contiguous_reserve_area(size, &base, limit, NULL);
if (ret == 0)
ret = dma_contiguous_add_device(dev, base);
return ret;
diff --git a/include/linux/mfd/pm8xxx/pm8921-charger.h b/include/linux/mfd/pm8xxx/pm8921-charger.h
index 785a33a..1c67b1e 100644
--- a/include/linux/mfd/pm8xxx/pm8921-charger.h
+++ b/include/linux/mfd/pm8xxx/pm8921-charger.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -188,6 +188,7 @@
int btc_delay_ms;
int btc_panic_if_cant_stop_chg;
int stop_chg_upon_expiry;
+ bool disable_chg_rmvl_wrkarnd;
};
enum pm8921_charger_source {
diff --git a/include/linux/msm_ipa.h b/include/linux/msm_ipa.h
index 1b869b1..c6f0730 100644
--- a/include/linux/msm_ipa.h
+++ b/include/linux/msm_ipa.h
@@ -155,6 +155,10 @@
* wlan client normal: wlan client moved out of power save
* sw routing enable: ipa routing is disabled
* sw routing disable: ipa routing is enabled
+ * wlan ap connect: wlan AP(access point) is up
+ * wlan ap disconnect: wlan AP(access point) is down
+ * wlan sta connect: wlan STA(station) is up
+ * wlan sta disconnect: wlan STA(station) is down
*/
enum ipa_wlan_event {
WLAN_CLIENT_CONNECT,
@@ -163,6 +167,10 @@
WLAN_CLIENT_NORMAL_MODE,
SW_ROUTING_ENABLE,
SW_ROUTING_DISABLE,
+ WLAN_AP_CONNECT,
+ WLAN_AP_DISCONNECT,
+ WLAN_STA_CONNECT,
+ WLAN_STA_DISCONNECT,
};
diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h
index 404ea52..45bc0ea 100644
--- a/include/linux/msm_mdp.h
+++ b/include/linux/msm_mdp.h
@@ -564,6 +564,15 @@
mdp_op_max,
};
+enum {
+ WB_FORMAT_NV12,
+ WB_FORMAT_RGB_565,
+ WB_FORMAT_RGB_888,
+ WB_FORMAT_xRGB_8888,
+ WB_FORMAT_ARGB_8888,
+ WB_FORMAT_ARGB_8888_INPUT_ALPHA /* Need to support */
+};
+
struct msmfb_mdp_pp {
uint32_t op;
union {
@@ -585,6 +594,7 @@
metadata_op_base_blend,
metadata_op_frame_rate,
metadata_op_vic,
+ metadata_op_wb_format,
metadata_op_max
};
@@ -592,11 +602,17 @@
uint32_t is_premultiplied;
};
+struct mdp_mixer_cfg {
+ uint32_t writeback_format;
+ uint32_t alpha;
+};
+
struct msmfb_metadata {
uint32_t op;
uint32_t flags;
union {
struct mdp_blend_cfg blend_cfg;
+ struct mdp_mixer_cfg mixer_cfg;
uint32_t panel_frame_rate;
uint32_t video_info_code;
} data;
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index c6ee4f0..0683296 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -365,8 +365,8 @@
* requests to connect to a specified network but without separating
* auth and assoc steps. For this, you need to specify the SSID in a
* %NL80211_ATTR_SSID attribute, and can optionally specify the association
- * IEs in %NL80211_ATTR_IE, %NL80211_ATTR_AUTH_TYPE, %NL80211_ATTR_MAC,
- * %NL80211_ATTR_WIPHY_FREQ, %NL80211_ATTR_CONTROL_PORT,
+ * IEs in %NL80211_ATTR_IE, %NL80211_ATTR_AUTH_TYPE, %NL80211_ATTR_USE_MFP,
+ * %NL80211_ATTR_MAC, %NL80211_ATTR_WIPHY_FREQ, %NL80211_ATTR_CONTROL_PORT,
* %NL80211_ATTR_CONTROL_PORT_ETHERTYPE and
* %NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT.
* Background scan period can optionally be
@@ -906,7 +906,7 @@
* @NL80211_ATTR_USE_MFP: Whether management frame protection (IEEE 802.11w) is
* used for the association (&enum nl80211_mfp, represented as a u32);
* this attribute can be used
- * with %NL80211_CMD_ASSOCIATE request
+ * with %NL80211_CMD_ASSOCIATE and %NL80211_CMD_CONNECT requests
*
* @NL80211_ATTR_STA_FLAGS2: Attribute containing a
* &struct nl80211_sta_flag_update.
diff --git a/include/linux/regulator/krait-regulator.h b/include/linux/regulator/krait-regulator.h
index 836f9d6..b784531 100644
--- a/include/linux/regulator/krait-regulator.h
+++ b/include/linux/regulator/krait-regulator.h
@@ -13,7 +13,8 @@
#ifndef __KRAIT_REGULATOR_H__
#define __KRAIT_REGULATOR_H__
-#define KRAIT_REGULATOR_DRIVER_NAME "krait-power-regulator"
+#define KRAIT_REGULATOR_DRIVER_NAME "krait-power-regulator"
+#define KRAIT_PDN_DRIVER_NAME "krait-pdn"
/**
* krait_power_init - driver initialization function
diff --git a/include/linux/uhid.h b/include/linux/uhid.h
new file mode 100644
index 0000000..16b786a
--- /dev/null
+++ b/include/linux/uhid.h
@@ -0,0 +1,33 @@
+#ifndef __UHID_H_
+#define __UHID_H_
+
+/*
+ * User-space I/O driver support for HID subsystem
+ * Copyright (c) 2012 David Herrmann
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/*
+ * Public header for user-space communication. We try to keep every structure
+ * aligned but to be safe we also use __attribute__((__packed__)). Therefore,
+ * the communication should be ABI compatible even between architectures.
+ */
+
+#include <linux/input.h>
+#include <linux/types.h>
+
+enum uhid_event_type {
+ UHID_DUMMY,
+};
+
+struct uhid_event {
+ __u32 type;
+} __attribute__((__packed__));
+
+#endif /* __UHID_H_ */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 5e32ff7..6666c69 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1199,6 +1199,7 @@
* @ie: IEs for association request
* @ie_len: Length of assoc_ie in octets
* @privacy: indicates whether privacy-enabled APs should be used
+ * @mfp: indicate whether management frame protection is used
* @crypto: crypto settings
* @key_len: length of WEP key for shared key authentication
* @key_idx: index of WEP key for shared key authentication
@@ -1219,6 +1220,7 @@
u8 *ie;
size_t ie_len;
bool privacy;
+ enum nl80211_mfp mfp;
struct cfg80211_crypto_settings crypto;
const u8 *key;
u8 key_len, key_idx;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 5c2e805..1ccc69e 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -5173,6 +5173,15 @@
connect.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
}
+ if (info->attrs[NL80211_ATTR_USE_MFP]) {
+ connect.mfp = nla_get_u32(info->attrs[NL80211_ATTR_USE_MFP]);
+ if (connect.mfp != NL80211_MFP_REQUIRED &&
+ connect.mfp != NL80211_MFP_NO)
+ return -EINVAL;
+ } else {
+ connect.mfp = NL80211_MFP_NO;
+ }
+
if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
connect.channel =
ieee80211_get_channel(wiphy,
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index bbbed73..ab91446 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -190,7 +190,8 @@
prev_bssid,
params->ssid, params->ssid_len,
params->ie, params->ie_len,
- false, ¶ms->crypto,
+ params->mfp != NL80211_MFP_NO,
+ ¶ms->crypto,
params->flags, ¶ms->ht_capa,
¶ms->ht_capa_mask);
if (err)
diff --git a/sound/soc/codecs/wcd9306.c b/sound/soc/codecs/wcd9306.c
index 0b26a56..25d3f56 100644
--- a/sound/soc/codecs/wcd9306.c
+++ b/sound/soc/codecs/wcd9306.c
@@ -487,16 +487,16 @@
SOC_ENUM_SINGLE(TAPAN_A_CDC_TX4_MUX_CTL, 4, 3, cf_text);
static const struct soc_enum cf_rxmix1_enum =
- SOC_ENUM_SINGLE(TAPAN_A_CDC_RX1_B4_CTL, 1, 3, cf_text);
+ SOC_ENUM_SINGLE(TAPAN_A_CDC_RX1_B4_CTL, 0, 3, cf_text);
static const struct soc_enum cf_rxmix2_enum =
- SOC_ENUM_SINGLE(TAPAN_A_CDC_RX2_B4_CTL, 1, 3, cf_text);
+ SOC_ENUM_SINGLE(TAPAN_A_CDC_RX2_B4_CTL, 0, 3, cf_text);
static const struct soc_enum cf_rxmix3_enum =
- SOC_ENUM_SINGLE(TAPAN_A_CDC_RX3_B4_CTL, 1, 3, cf_text);
+ SOC_ENUM_SINGLE(TAPAN_A_CDC_RX3_B4_CTL, 0, 3, cf_text);
static const struct soc_enum cf_rxmix4_enum =
- SOC_ENUM_SINGLE(TAPAN_A_CDC_RX4_B4_CTL, 1, 3, cf_text);
+ SOC_ENUM_SINGLE(TAPAN_A_CDC_RX4_B4_CTL, 0, 3, cf_text);
static const struct snd_kcontrol_new tapan_snd_controls[] = {
diff --git a/sound/soc/codecs/wcd9320.c b/sound/soc/codecs/wcd9320.c
index f48dbf1..7f17eef 100644
--- a/sound/soc/codecs/wcd9320.c
+++ b/sound/soc/codecs/wcd9320.c
@@ -829,25 +829,25 @@
SOC_ENUM_SINGLE(TAIKO_A_CDC_TX10_MUX_CTL, 4, 3, cf_text);
static const struct soc_enum cf_rxmix1_enum =
- SOC_ENUM_SINGLE(TAIKO_A_CDC_RX1_B4_CTL, 1, 3, cf_text);
+ SOC_ENUM_SINGLE(TAIKO_A_CDC_RX1_B4_CTL, 0, 3, cf_text);
static const struct soc_enum cf_rxmix2_enum =
- SOC_ENUM_SINGLE(TAIKO_A_CDC_RX2_B4_CTL, 1, 3, cf_text);
+ SOC_ENUM_SINGLE(TAIKO_A_CDC_RX2_B4_CTL, 0, 3, cf_text);
static const struct soc_enum cf_rxmix3_enum =
- SOC_ENUM_SINGLE(TAIKO_A_CDC_RX3_B4_CTL, 1, 3, cf_text);
+ SOC_ENUM_SINGLE(TAIKO_A_CDC_RX3_B4_CTL, 0, 3, cf_text);
static const struct soc_enum cf_rxmix4_enum =
- SOC_ENUM_SINGLE(TAIKO_A_CDC_RX4_B4_CTL, 1, 3, cf_text);
+ SOC_ENUM_SINGLE(TAIKO_A_CDC_RX4_B4_CTL, 0, 3, cf_text);
static const struct soc_enum cf_rxmix5_enum =
- SOC_ENUM_SINGLE(TAIKO_A_CDC_RX5_B4_CTL, 1, 3, cf_text)
+ SOC_ENUM_SINGLE(TAIKO_A_CDC_RX5_B4_CTL, 0, 3, cf_text)
;
static const struct soc_enum cf_rxmix6_enum =
- SOC_ENUM_SINGLE(TAIKO_A_CDC_RX6_B4_CTL, 1, 3, cf_text);
+ SOC_ENUM_SINGLE(TAIKO_A_CDC_RX6_B4_CTL, 0, 3, cf_text);
static const struct soc_enum cf_rxmix7_enum =
- SOC_ENUM_SINGLE(TAIKO_A_CDC_RX7_B4_CTL, 1, 3, cf_text);
+ SOC_ENUM_SINGLE(TAIKO_A_CDC_RX7_B4_CTL, 0, 3, cf_text);
static const char * const class_h_dsm_text[] = {
"ZERO", "DSM_HPHL_RX1", "DSM_SPKR_RX7"
diff --git a/sound/soc/msm/mdm9625.c b/sound/soc/msm/mdm9625.c
index eb7366c..2bef1b7 100644
--- a/sound/soc/msm/mdm9625.c
+++ b/sound/soc/msm/mdm9625.c
@@ -749,7 +749,7 @@
.name = "MDM9625 Media1",
.stream_name = "MultiMedia1",
.cpu_dai_name = "MultiMedia1",
- .platform_name = "msm-pcm-dsp",
+ .platform_name = "msm-pcm-dsp.0",
.dynamic = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},