Merge "ASoC: msm: Modify platform device name for Multimedia1"
diff --git a/Documentation/devicetree/bindings/arm/msm/ipc-spinlock.txt b/Documentation/devicetree/bindings/arm/msm/ipc-spinlock.txt
index c71b190..24dbb4b 100644
--- a/Documentation/devicetree/bindings/arm/msm/ipc-spinlock.txt
+++ b/Documentation/devicetree/bindings/arm/msm/ipc-spinlock.txt
@@ -1,14 +1,27 @@
Qualcomm Interprocessor Communication Spinlock
+--Dedicated Hardware Implementation--
Required properties:
-- compatible : should be "qcom,ipc-spinlock"
+- compatible : should be "qcom,ipc-spinlock-sfpb"
- reg : the location and size of the spinlock hardware
- qcom,num-locks : the number of locks supported
Example:
qcom,ipc-spinlock@fd484000 {
- compatible = "qcom,ipc-spinlock";
+ compatible = "qcom,ipc-spinlock-sfpb";
reg = <0xfd484000 0x1000>;
qcom,num-locks = <32>;
};
+
+--LDREX Implementation--
+Required properties:
+- compatible : should be "qcom,ipc-spinlock-ldrex"
+- reg : the location and size of the shared lock memory
+
+Example:
+
+ qcom,ipc-spinlock@fa00000 {
+ compatible = "qcom,ipc-spinlock-ldrex";
+ reg = <0xfa00000 0x200000>;
+ };
diff --git a/Documentation/devicetree/bindings/bif/bif.txt b/Documentation/devicetree/bindings/bif/bif.txt
new file mode 100644
index 0000000..c4ff08b
--- /dev/null
+++ b/Documentation/devicetree/bindings/bif/bif.txt
@@ -0,0 +1,22 @@
+BIF (Battery Interface) Controllers
+
+Optional properties:
+- qcom,known-device-addresses: Specifies a list of integers which correspond to
+ the 8-bit BIF bus device addresses of BIF slaves
+ found on the target.
+
+BIF Consumers
+
+Optional properties:
+- qcom,bif-ctrl: phandle of parent BIF controller device node
+
+Example:
+ foo_ctrl: foo-controller {
+ ...
+ qcom,known-device-addresses = <0x80, 0x81>;
+ };
+
+ bar-consumer {
+ ...
+ qcom,bif-ctrl = <&foo_ctrl>;
+ };
diff --git a/Documentation/devicetree/bindings/bt-fm/fm.txt b/Documentation/devicetree/bindings/bt-fm/fm.txt
new file mode 100644
index 0000000..6bb3599
--- /dev/null
+++ b/Documentation/devicetree/bindings/bt-fm/fm.txt
@@ -0,0 +1,29 @@
+Qualcomm radio iris device
+
+-FM RX playback with no RDS
+
+ FM samples is filtered by external RF chips at baseband, then send to Riva-FM core through serial link.
+ FM signal is demodulated then audio L/R samples are stored inside memory.
+ FM Rx received samples data is connected to external audio codec.
+
+-Audio playback to FM TX
+
+ Used to play audio source to FM TX.
+ FM TX module will read the audio samples from memory then modulated samples will be send through serial interface to external RF chip.
+
+-RX playback with RDS
+
+ FM Rx receive audio data along with RDS.
+
+-FM TX with RDS
+
+ Used to send RDS messages to external FM receiver.
+
+Required Properties:
+- compatible: "qcom,iris_fm"
+
+Example:
+ qcom,iris-fm {
+ compatible = "qcom,iris_fm";
+ };
+
diff --git a/Documentation/devicetree/bindings/coresight/coresight.txt b/Documentation/devicetree/bindings/coresight/coresight.txt
index 0519aef..48f25de 100644
--- a/Documentation/devicetree/bindings/coresight/coresight.txt
+++ b/Documentation/devicetree/bindings/coresight/coresight.txt
@@ -32,10 +32,14 @@
component
- coresight-child-ports : list of input port numbers of the children
- coresight-default-sink : represents the default compile time CoreSight sink
+- coresight-ctis : list of ctis that this component interacts with
- qcom,pc-save : program counter save implemented
- qcom,blk-size : block size for tmc-etr to usb transfers
- qcom,round-robin : indicates if per core etms are allowed round-robin access
by the funnel
+- qcom,reset-flush-race : indicates if a race exists between flushing and ddr
+ being put into self-refresh during watchdog reset
+- qcom,write-64bit : only 64bit data writes supported by stm
Examples:
@@ -118,3 +122,24 @@
qcom,pc-save;
qcom,round-robin;
};
+
+4. Miscellaneous
+ cti0: cti@fc308000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc308000 0x1000>;
+ reg-names = "cti0-base";
+
+ coresight-id = <15>;
+ coresight-name = "coresight-cti0";
+ coresight-nr-inports = <0>;
+ };
+
+ cti1: cti@fc309000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc309000 0x1000>;
+ reg-names = "cti1-base";
+
+ coresight-id = <16>;
+ coresight-name = "coresight-cti1";
+ coresight-nr-inports = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/gpu/adreno.txt b/Documentation/devicetree/bindings/gpu/adreno.txt
index 1e47c02..0004302 100644
--- a/Documentation/devicetree/bindings/gpu/adreno.txt
+++ b/Documentation/devicetree/bindings/gpu/adreno.txt
@@ -24,7 +24,11 @@
KGSL_CLK_AXI 0x00000020
Bus Scaling Data:
-- qcom,grp3d-vectors: A series of 4 cell properties, format of which is:
+- qcom,msm-bus,name: String property to describe the name of the 3D graphics processor.
+- qcom,msm-bus,num-cases: This is the the number of Bus Scaling use cases defined in the vectors property.
+- qcom,msm-bus,active-only: A boolean flag indicating if it is active only.
+- qcom,msm-bus,num-paths: This represents the number of paths in each Bus Scaling Usecase.
+- qcom,msm-bus,vectors-KBps: A series of 4 cell properties, format of which is:
<src dst ab ib>, <src dst ab ib>, // For Bus Scaling Usecase 1
<src dst ab ib>, <src dst ab ib>, // For Bus Scaling Usecase 2
<.. .. .. ..>, <.. .. .. ..>; // For Bus Scaling Usecase n
@@ -41,8 +45,6 @@
1 = MSM_BUS_SLAVE_OCMEM
ab: Represents aggregated bandwidth. This value is 0 for Graphics.
ib: Represents instantaneous bandwidth. This value has a range <0 8000 MB/s>
-- qcom,grp3d-num-vectors-per-usecase: This represents the number of vectors in each Bus Scaling Usecase.
-- qcom,grp3d-num-bus-scale-usecases: This is the the number of Bus Scaling use cases defined in the vectors property
GDSC Oxili Regulators:
- vddcx-supply: Phandle for vddcx regulator device node.
@@ -93,12 +95,17 @@
qcom,clk-map = <0x00000016>; //KGSL_CLK_CORE | KGSL_CLK_IFACE | KGSL_CLK_MEM_IFACE
/* Bus Scale Settings */
- qcom,grp3d-vectors = <0 0 0 0>, <2 1 0 0>,
- <0 0 0 2000>, <2 1 0 3000>,
- <0 0 0 4000>, <2 1 0 5000>,
- <0 0 0 6400>, <2 1 0 7600>;
- qcom,grp3d-num-vectors-per-usecase = <2>;
- qcom,grp3d-num-bus-scale-usecases = <4>;
+ qcom,msm-bus,name = "grp3d";
+ qcom,msm-bus,num-cases = <6>;
+ qcom,msm-bus,active-only = <0>;
+ qcom,msm-bus,num-paths = <2>;
+ qcom,msm-bus,vectors-KBps =
+ <26 512 0 0>, <89 604 0 0>,
+ <26 512 0 2200000>, <89 604 0 3000000>,
+ <26 512 0 4000000>, <89 604 0 3000000>,
+ <26 512 0 4000000>, <89 604 0 4500000>,
+ <26 512 0 6400000>, <89 604 0 4500000>,
+ <26 512 0 6400000>, <89 604 0 7600000>;
/* GDSC oxili regulators */
vddcx-supply = <&gdsc_oxili_cx>;
diff --git a/Documentation/devicetree/bindings/input/qpnp-keypad.txt b/Documentation/devicetree/bindings/input/qpnp-keypad.txt
new file mode 100644
index 0000000..8f7fbe7
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/qpnp-keypad.txt
@@ -0,0 +1,57 @@
+Qualcomm QPNP keypad controller
+
+The qpnp-keypad driver supports the PMIC keypad controller module
+in the Qualcomm PMICs. This controller supports 10 x 8 (row x col)
+configuration and is connected to the host processor on the
+SPMI interface.
+
+Required properties:
+- compatible: Must be "qcom,qpnp-keypad"
+- reg: Specifies the SPMI address and size for the keypad controller
+- interrupts: Specifies the interrupt associated with keypad controller
+- interrupt-names: The names of the 2 interrupts assocaited with the keypad
+ controller. They are - "kp-sense" and "kp-stuck".
+- keypad,num-rows: Number of rows used in the keypad configuration. These
+ rows are the number of PMIC gpios configured as drive
+ lines. Possible values: Max = 10, Min = 2.
+- keypad,num-columns: Number of columns used in the keypad configuration. These
+ cols are number of PMIC gpios configured as sense lines.
+ Possible values: Max = 8, Min = 1.
+- linux,keymap: Row-column-keycode mapping. It is an array of packed
+ entries containing the equivalent of row, column and
+ linux key-code. Each value represented as
+ (row << 24 | column << 16 | key-code)
+
+Optional Properties:
+- qcom,scan-delay-ms: Wait time in milliseconds before each keypad scan.
+ This is used to determine if the key has been stuck.
+ Possible values: 1, 2, 4, 8, 16, 32, 64, 128ms.
+- qcom,row-hold-ns: Wait time in nanoseconds between each row assertion.
+ Configured based on last-row scan delay.
+ Possible values: 31250, 62500, 125000, 250000ns.
+- qcom,debounce-ms: Wait time in milliseconds before the column data is
+ sampled for key press detection.
+ Possible values: 5, 10, 15, 20ms.
+- qcom,wakeup: Configure the keypad as a wakeup source. This is a
+ boolean property.
+- linux,keypad-no-autorepeat:
+ Disables the auto-repeat feature for the keys. This
+ is a boolean property.
+
+Example:
+
+ qcom,keypad@a800 {
+ compatible = "qcom,qpnp-keypad";
+ reg = <0xA800 0x100>;
+ interrupts = <0x1 0xA8 0x0>,
+ <0x1 0xA8 0x1>;
+ interrupt-names = "kp-sense", "kp-stuck";
+ keypad,num-rows = <2>;
+ keypad,num-cols = <2>;
+ qcom,scan-delay-ms = <128>;
+ qcom,row-hold-ns = <31250>;
+ qcom,debounce-ms = <20>;
+ qcom,wakeup;
+ linux,keymap = <0x00000001 0x00010002
+ 0x01000003 0x01010004>;
+ }
diff --git a/Documentation/devicetree/bindings/input/touchscreen/atmel-mxt-ts.txt b/Documentation/devicetree/bindings/input/touchscreen/atmel-mxt-ts.txt
index bcea355..6fe88a9 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/atmel-mxt-ts.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/atmel-mxt-ts.txt
@@ -17,11 +17,15 @@
- atmel,family-id : family identification of the controller
- atmel,variant-id : variant identification of the controller
- atmel,version : firmware version of the controller
- - atmel,build i : firmware build number of the controller
- - atmel,bootldr-id : bootloader identification of the controller
- - atmel,fw-name : firmware name to used for flashing firmware
+ - atmel,build : firmware build number of the controller
+
+Required for firmware update only:
+ - atmel,fw-name : firmware name to use for flashing firmware
+ - atmel,bootldr-id : bootloader identification of the controller
Optional property:
+ - atmel,bl-addr : bootloader address, by default is looked up
+ in mxt_slave_addresses structure
- atmel,config : configuration parameter for the controller
- atmel,i2c-pull-up : specify to indicate pull up is needed
- vcc_i2c-supply : Power source required to pull up i2c bus
diff --git a/Documentation/devicetree/bindings/memory.txt b/Documentation/devicetree/bindings/memory.txt
new file mode 100644
index 0000000..74e0476
--- /dev/null
+++ b/Documentation/devicetree/bindings/memory.txt
@@ -0,0 +1,101 @@
+* Memory binding
+
+The /memory node provides basic information about the address and size
+of the physical memory. This node is usually filled or updated by the
+bootloader, depending on the actual memory configuration of the given
+hardware.
+
+The memory layout is described by the folllowing node:
+
+memory {
+ reg = <(baseaddr1) (size1)
+ (baseaddr2) (size2)
+ ...
+ (baseaddrN) (sizeN)>;
+};
+
+baseaddrX: the base address of the defined memory bank
+sizeX: the size of the defined memory bank
+
+More than one memory bank can be defined.
+
+
+* Memory regions
+
+In /memory node one can create additional nodes describing particular
+memory regions, usually for the special usage by various device drivers.
+A good example are contiguous memory allocations or memory sharing with
+other operating system on the same hardware board. Those special memory
+regions might depend on the board configuration and devices used on the
+target system.
+
+Parameters for each memory region can be encoded into the device tree
+wit the following convention:
+
+(name): region@(base-address) {
+ reg = <(baseaddr) (size)>;
+ (linux,contiguous-region);
+ (linux,default-contiguous-region);
+};
+
+name: an name given to the defined region.
+base-address: the base address of the defined region.
+size: the size of the memory region.
+linux,contiguous-region: property indicating that the defined memory
+ region is used for contiguous memory allocations,
+ Linux specific (optional)
+linux,default-contiguous-region: property indicating that the region
+ is the default region for all contiguous memory
+ allocations, Linux specific (optional)
+
+
+* Device nodes
+
+Once the regions in the /memory node are defined, they can be assigned
+to device some device nodes for their special use. The following
+properties are defined:
+
+linux,contiguous-region = <&phandle>;
+ This property indicates that the device driver should use the
+ memory region pointed by the given phandle.
+
+
+* Example:
+
+This example defines a memory consisting of 4 memory banks. 2 contiguous
+regions are defined for Linux kernel, one default of all device drivers
+(named contig_mem, placed at 0x72000000, 64MiB) and one dedicated to the
+framebuffer device (named display_mem, placed at 0x78000000, 16MiB). The
+display_mem region is then assigned to fb@12300000 device for contiguous
+memory allocation with Linux kernel drivers.
+
+The reason for creating a separate region for framebuffer device is to
+match the framebuffer address of from configuration done by bootloader,
+so once Linux kernel drivers starts, no glitches on the displayed boot
+logo appears.
+
+/ {
+ /* ... */
+ memory {
+ reg = <0x40000000 0x10000000
+ 0x50000000 0x10000000
+ 0x60000000 0x10000000
+ 0x70000000 0x10000000>;
+
+ contig_mem: region@72000000 {
+ linux,contiguous-region;
+ linux,default-contiguous-region;
+ reg = <0x72000000 0x4000000>;
+ };
+
+ display_mem: region@78000000 {
+ linux,contiguous-region;
+ reg = <0x78000000 0x1000000>;
+ };
+ };
+
+ fb@12300000 {
+ linux,contiguous-region = <&display_mem>;
+ status = "okay";
+ };
+};
diff --git a/Documentation/devicetree/bindings/platform/msm/ipa.txt b/Documentation/devicetree/bindings/platform/msm/ipa.txt
index 5e311be..3cd29e4 100644
--- a/Documentation/devicetree/bindings/platform/msm/ipa.txt
+++ b/Documentation/devicetree/bindings/platform/msm/ipa.txt
@@ -13,9 +13,11 @@
registers.
- reg-names: "ipa-base" - string to identify the IPA CORE base registers.
"bam-base" - string to identify the IPA BAM base registers.
+ "a2-bam-base" - string to identify the A2 BAM base registers.
- interrupts: Specifies the interrupt associated with IPA.
- interrupt-names: "ipa-irq" - string to identify the IPA core interrupt.
"bam-irq" - string to identify the IPA BAM interrupt.
+ "a2-bam-irq" - string to identify the A2 BAM interrupt.
- qcom,ipa-hw-ver: Specifies the IPA hardware version.
IPA pipe sub nodes (A2 static pipes configurations):
@@ -49,10 +51,12 @@
compatible = "qcom,ipa";
reg = <0xfd4c0000 0x26000>,
<0xfd4c4000 0x14818>;
- reg-names = "ipa-base", "bam-base";
+ <0xfc834000 0x7000>;
+ reg-names = "ipa-base", "bam-base"; "a2-bam-base";
interrupts = <0 252 0>,
<0 253 0>;
- interrupt-names = "ipa-irq", "bam-irq";
+ <0 29 1>;
+ interrupt-names = "ipa-irq", "bam-irq"; "a2-bam-irq";
qcom,ipa-hw-ver = <1>;
qcom,pipe1 {
diff --git a/Documentation/devicetree/bindings/platform/msm/ssm.txt b/Documentation/devicetree/bindings/platform/msm/ssm.txt
new file mode 100644
index 0000000..8fb3356
--- /dev/null
+++ b/Documentation/devicetree/bindings/platform/msm/ssm.txt
@@ -0,0 +1,30 @@
+* Qualcomm Secure Service Module (SSM)
+
+SSM provides an interface for OEM driver to communicate with Modem and
+trustzone.
+
+This module provides following features:
+ - Keyexchange between Modem and trustzone for encryption/Decryption
+ of mode information
+ - Interface to third party driver to send mode updates to modem
+ - Interface for loading the trustzone application
+
+Required properties:
+- compatible: Must be "qcom,ssm"
+
+Optional properties:
+- qcom,channel-name: Name of the SMD channel used for communication
+ between MODEM and SSM driver.
+- qcom,need-keyexhg This property controls initial key exchange
+ between APPS(application processor) and MODEM.
+ If not mentioned the initial key exchange is
+ not required.
+ If this property is mentioned then it is mandatory
+ for modem to perform initial key exchange with APPS.
+
+Example:
+ qcom,ssm {
+ compatible = "qcom,ssm";
+ qcom,channel-name = "SSM_RTR";
+ qcom,need-keyexhg;
+ }
diff --git a/Documentation/devicetree/bindings/regulator/krait-regulator.txt b/Documentation/devicetree/bindings/regulator/krait-regulator.txt
index f057834..c783ac8 100644
--- a/Documentation/devicetree/bindings/regulator/krait-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/krait-regulator.txt
@@ -1,5 +1,20 @@
Krait Voltage regulators
+The cpus are powered using a single supply powered by PMIC ganged regulators operating in
+different phases. Individual kraits further can draw power from the single supply via
+a LDO or a head switch (BHS). The first level node represents the PMIC ganged regulator
+and its properties and encompasses second level nodes that represent the individual
+krait LDO/BHS control regulator.
+
+[First Level Nodes]
+Required properties:
+- compatible: Must be "qcom,krait-pdn"
+
+Optional properties:
+- qcom,use-phase-switching indicates whether the driver should add/shed phases on the PMIC
+ ganged regulator as cpus are hotplugged.
+
+[Second Level Nodes]
Required properties:
- compatible: Must be "qcom,krait-regulator"
- reg: Specifies the address and size for this regulator device,
@@ -27,19 +42,26 @@
binding, defined in regulator.txt, can also be used.
Example:
- krait0_vreg: regulator@f9088000 {
- compatible = "qcom,krait-regulator";
- regulator-name = "krait0";
- reg = <0xf9088000 0x1000>, /* APCS_ALIAS0_KPSS_ACS */
- <0xf908a800 0x1000>; /* APCS_ALIAS0_KPSS_MDD */
- reg-names = "acs", "mdd";
- regulator-min-microvolt = <500000>;
- regulator-max-microvolt = <1100000>;
- qcom,headroom-voltage = <150000>;
- qcom,retention-voltage = <745000>;
- qcom,ldo-default-voltage = <745000>;
- qcom,ldo-threshold-voltage = <750000>;
- qcom,ldo-delta-voltage = <50000>;
- qcom,cpu-num = 0;
- };
+ krait_pdn: krait-pdn {
+ compatible = "qcom,krait-pdn";
+ qcom,use-phase-switching;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ krait0_vreg: regulator@f9088000 {
+ compatible = "qcom,krait-regulator";
+ regulator-name = "krait0";
+ reg = <0xf9088000 0x1000>, /* APCS_ALIAS0_KPSS_ACS */
+ <0xf908a800 0x1000>; /* APCS_ALIAS0_KPSS_MDD */
+ reg-names = "acs", "mdd";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1100000>;
+ qcom,headroom-voltage = <150000>;
+ qcom,retention-voltage = <675000>;
+ qcom,ldo-default-voltage = <750000>;
+ qcom,ldo-threshold-voltage = <850000>;
+ qcom,ldo-delta-voltage = <50000>;
+ qcom,cpu-num = <0>;
+ };
+ };
diff --git a/Documentation/printk-formats.txt b/Documentation/printk-formats.txt
index 5df176e..6f53742 100644
--- a/Documentation/printk-formats.txt
+++ b/Documentation/printk-formats.txt
@@ -53,6 +53,14 @@
For printing struct resources. The 'R' and 'r' specifiers result in a
printed resource with ('R') or without ('r') a decoded flags member.
+Physical addresses:
+
+ %pa 0x01234567 or 0x0123456789abcdef
+
+ For printing a phys_addr_t type (and its derivatives, such as
+ resource_size_t) which can vary based on build options, regardless of
+ the width of the CPU data path. Passed by reference.
+
MAC/FDDI addresses:
%pM 00:01:02:03:04:05
@@ -134,9 +142,9 @@
printk("%lld", (long long)s64_var);
If <type> is dependent on a config option for its size (e.g., sector_t,
-blkcnt_t, phys_addr_t, resource_size_t) or is architecture-dependent
-for its size (e.g., tcflag_t), use a format specifier of its largest
-possible type and explicitly cast to it. Example:
+blkcnt_t) or is architecture-dependent for its size (e.g., tcflag_t), use a
+format specifier of its largest possible type and explicitly cast to it.
+Example:
printk("test: sector number/total blocks: %llu/%llu\n",
(unsigned long long)sector, (unsigned long long)blockcount);
diff --git a/arch/arm/boot/dts/msm-iommu-v0.dtsi b/arch/arm/boot/dts/msm-iommu-v0.dtsi
index 6ddeb68..0c44fb5 100644
--- a/arch/arm/boot/dts/msm-iommu-v0.dtsi
+++ b/arch/arm/boot/dts/msm-iommu-v0.dtsi
@@ -186,14 +186,14 @@
qcom,iommu-ctx@fd870000 {
reg = <0xfd870000 0x1000>;
- interrupts = <0 247 0>;
+ interrupts = <0 47 0>;
qcom,iommu-ctx-mids = <0>;
label = "mdps_0";
};
qcom,iommu-ctx@fd871000 {
reg = <0xfd871000 0x1000>;
- interrupts = <0 247 0>;
+ interrupts = <0 47 0>;
qcom,iommu-ctx-mids = <1>;
label = "mdps_1";
};
diff --git a/arch/arm/boot/dts/msm-pm8941.dtsi b/arch/arm/boot/dts/msm-pm8941.dtsi
index c1d8664..54f603d 100644
--- a/arch/arm/boot/dts/msm-pm8941.dtsi
+++ b/arch/arm/boot/dts/msm-pm8941.dtsi
@@ -632,7 +632,7 @@
qcom,pre-div-channel-scaling = <0>;
qcom,calibration-type = "ratiometric";
qcom,scale-function = <1>;
- qcom,hw-settle-time = <0>;
+ qcom,hw-settle-time = <2>;
qcom,fast-avg-setup = <0>;
};
@@ -643,7 +643,7 @@
qcom,pre-div-channel-scaling = <0>;
qcom,calibration-type = "ratiometric";
qcom,scale-function = <0>;
- qcom,hw-settle-time = <0>;
+ qcom,hw-settle-time = <2>;
qcom,fast-avg-setup = <0>;
};
@@ -654,7 +654,7 @@
qcom,pre-div-channel-scaling = <0>;
qcom,calibration-type = "ratiometric";
qcom,scale-function = <4>;
- qcom,hw-settle-time = <0>;
+ qcom,hw-settle-time = <2>;
qcom,fast-avg-setup = <0>;
};
@@ -665,7 +665,7 @@
qcom,pre-div-channel-scaling = <0>;
qcom,calibration-type = "ratiometric";
qcom,scale-function = <2>;
- qcom,hw-settle-time = <0>;
+ qcom,hw-settle-time = <2>;
qcom,fast-avg-setup = <0>;
};
@@ -676,7 +676,7 @@
qcom,pre-div-channel-scaling = <0>;
qcom,calibration-type = "ratiometric";
qcom,scale-function = <2>;
- qcom,hw-settle-time = <0>;
+ qcom,hw-settle-time = <2>;
qcom,fast-avg-setup = <0>;
};
@@ -687,7 +687,7 @@
qcom,pre-div-channel-scaling = <0>;
qcom,calibration-type = "ratiometric";
qcom,scale-function = <2>;
- qcom,hw-settle-time = <0>;
+ qcom,hw-settle-time = <2>;
qcom,fast-avg-setup = <0>;
};
@@ -698,7 +698,7 @@
qcom,pre-div-channel-scaling = <0>;
qcom,calibration-type = "ratiometric";
qcom,scale-function = <2>;
- qcom,hw-settle-time = <0>;
+ qcom,hw-settle-time = <2>;
qcom,fast-avg-setup = <0>;
};
@@ -709,7 +709,7 @@
qcom,pre-div-channel-scaling = <0>;
qcom,calibration-type = "ratiometric";
qcom,scale-function = <2>;
- qcom,hw-settle-time = <0>;
+ qcom,hw-settle-time = <2>;
qcom,fast-avg-setup = <0>;
};
@@ -720,7 +720,7 @@
qcom,pre-div-channel-scaling = <0>;
qcom,calibration-type = "ratiometric";
qcom,scale-function = <0>;
- qcom,hw-settle-time = <0>;
+ qcom,hw-settle-time = <2>;
qcom,fast-avg-setup = <0>;
};
};
@@ -770,7 +770,7 @@
qcom,pre-div-channel-scaling = <0>;
qcom,calibration-type = "ratiometric";
qcom,scale-function = <2>;
- qcom,hw-settle-time = <0>;
+ qcom,hw-settle-time = <2>;
qcom,fast-avg-setup = <0>;
qcom,btm-channel-number = <0x48>;
};
@@ -782,7 +782,7 @@
qcom,pre-div-channel-scaling = <0>;
qcom,calibration-type = "ratiometric";
qcom,scale-function = <1>;
- qcom,hw-settle-time = <0xf>;
+ qcom,hw-settle-time = <2>;
qcom,fast-avg-setup = <0>;
qcom,btm-channel-number = <0x68>;
};
@@ -794,7 +794,7 @@
qcom,pre-div-channel-scaling = <0>;
qcom,calibration-type = "absolute";
qcom,scale-function = <2>;
- qcom,hw-settle-time = <0>;
+ qcom,hw-settle-time = <2>;
qcom,fast-avg-setup = <0>;
qcom,btm-channel-number = <0x70>;
};
@@ -806,7 +806,7 @@
qcom,pre-div-channel-scaling = <0>;
qcom,calibration-type = "ratiometric";
qcom,scale-function = <2>;
- qcom,hw-settle-time = <0>;
+ qcom,hw-settle-time = <2>;
qcom,fast-avg-setup = <0>;
qcom,btm-channel-number = <0x78>;
};
@@ -818,7 +818,7 @@
qcom,pre-div-channel-scaling = <0>;
qcom,calibration-type = "ratiometric";
qcom,scale-function = <2>;
- qcom,hw-settle-time = <0>;
+ qcom,hw-settle-time = <2>;
qcom,fast-avg-setup = <0>;
qcom,btm-channel-number = <0x80>;
};
diff --git a/arch/arm/boot/dts/msm8226-cdp.dts b/arch/arm/boot/dts/msm8226-cdp.dts
index 7263e42..1c431e8 100644
--- a/arch/arm/boot/dts/msm8226-cdp.dts
+++ b/arch/arm/boot/dts/msm8226-cdp.dts
@@ -19,6 +19,6 @@
qcom,msm-id = <145 1 0>;
serial@f991f000 {
- status = "disabled";
+ status = "ok";
};
};
diff --git a/arch/arm/boot/dts/msm8226-gpu.dtsi b/arch/arm/boot/dts/msm8226-gpu.dtsi
index aa174bf..2734726 100644
--- a/arch/arm/boot/dts/msm8226-gpu.dtsi
+++ b/arch/arm/boot/dts/msm8226-gpu.dtsi
@@ -17,6 +17,11 @@
qcom,clk-map = <0x00000016>; /* KGSL_CLK_CORE | KGSL_CLK_IFACE | KGSL_CLK_MEM_IFACE */
+ /* Bus Scale Settings */
+ qcom,msm-bus,name = "grp3d";
+ qcom,msm-bus,num-cases = <4>;
+ qcom,msm-bus,active-only = <0>;
+ qcom,msm-bus,num-paths = <2>;
qcom,msm-bus,vectors-KBps =
<26 512 0 0>, <89 604 0 0>,
<26 512 0 1600000>, <89 604 0 6400000>,
diff --git a/arch/arm/boot/dts/msm8226-iommu.dtsi b/arch/arm/boot/dts/msm8226-iommu.dtsi
index d23d324..51c2f38 100644
--- a/arch/arm/boot/dts/msm8226-iommu.dtsi
+++ b/arch/arm/boot/dts/msm8226-iommu.dtsi
@@ -14,24 +14,216 @@
&jpeg_iommu {
status = "ok";
+
+ qcom,iommu-bfb-regs = <0x604c
+ 0x6050
+ 0x6514
+ 0x6540
+ 0x656c
+ 0x6314
+ 0x6394
+ 0x6414
+ 0x60ac
+ 0x615c
+ 0x620c
+ 0x6008
+ 0x600c
+ 0x6010
+ 0x6014>;
+
+ qcom,iommu-bfb-data = <0x0000ffff
+ 0x00000000
+ 0x4
+ 0x4
+ 0x0
+ 0x0
+ 0x10
+ 0x50
+ 0x0
+ 0x10
+ 0x20
+ 0x0
+ 0x0
+ 0x0
+ 0x0>;
};
&mdp_iommu {
status = "ok";
/* HACK: set to -1 during pre-si due to lack of TZ */
qcom,iommu-secure-id = <0xFFFFFFFF>;
+
+ qcom,iommu-bfb-regs = <0x604c
+ 0x6050
+ 0x6514
+ 0x6540
+ 0x656c
+ 0x60ac
+ 0x615c
+ 0x620c
+ 0x6314
+ 0x6394
+ 0x6414
+ 0x6008
+ 0x600c
+ 0x6010
+ 0x6014
+ 0x6018
+ 0x601c
+ 0x6020>;
+
+ qcom,iommu-bfb-data = <0xffffffff
+ 0x00000000
+ 0x00000004
+ 0x00000010
+ 0x00000000
+ 0x00000000
+ 0x00000013
+ 0x00000017
+ 0x0
+ 0x13
+ 0x23
+ 0x0
+ 0x0
+ 0x0
+ 0x0
+ 0x0
+ 0x0
+ 0x0>;
};
&venus_iommu {
status = "ok";
/* HACK: set to -1 during pre-si due to lack of TZ */
qcom,iommu-secure-id = <0xFFFFFFFF>;
+
+ qcom,iommu-bfb-regs = <0x604c
+ 0x6050
+ 0x6514
+ 0x6540
+ 0x656c
+ 0x60ac
+ 0x615c
+ 0x620c
+ 0x6314
+ 0x6394
+ 0x6414
+ 0x6008
+ 0x600c
+ 0x6010
+ 0x6014
+ 0x6018
+ 0x601c
+ 0x6020
+ 0x6024
+ 0x6028
+ 0x602c
+ 0x6030
+ 0x6034
+ 0x6038>;
+
+ qcom,iommu-bfb-data = <0xffffffff
+ 0xffffffff
+ 0x00000004
+ 0x00000008
+ 0x00000000
+ 0x00000000
+ 0x00000094
+ 0x000000b4
+ 0x0
+ 0x94
+ 0x114
+ 0x0
+ 0x0
+ 0x0
+ 0x0
+ 0x0
+ 0x0
+ 0x0
+ 0x0
+ 0x0
+ 0x0
+ 0x0
+ 0x0
+ 0x0>;
+};
+
+&venus_ns {
+ qcom,iommu-ctx-sids = <0 1 2 3 4 5 7>;
+};
+
+&venus_cp {
+ qcom,iommu-ctx-sids = <0x80 0x81 0x82 0x83 0x84>;
};
&kgsl_iommu {
status = "ok";
+
+ qcom,iommu-bfb-regs = <0x604c
+ 0x6050
+ 0x6514
+ 0x6540
+ 0x656c
+ 0x60ac
+ 0x615c
+ 0x620c
+ 0x6314
+ 0x6394
+ 0x6414
+ 0x6008>;
+
+ qcom,iommu-bfb-data = <0x00000003
+ 0x0
+ 0x00000004
+ 0x00000010
+ 0x00000000
+ 0x00000000
+ 0x00000001
+ 0x00000011
+ 0x0
+ 0x1
+ 0x41
+ 0x0>;
};
&vfe_iommu {
status = "ok";
+
+ qcom,iommu-bfb-regs = <0x604c
+ 0x6050
+ 0x6514
+ 0x6540
+ 0x656c
+ 0x6314
+ 0x6394
+ 0x6414
+ 0x60ac
+ 0x615c
+ 0x620c
+ 0x6008
+ 0x600c
+ 0x6010
+ 0x6014
+ 0x6018
+ 0x601c
+ 0x6020>;
+
+ qcom,iommu-bfb-data = <0xffffffff
+ 0x00000000
+ 0x4
+ 0x8
+ 0x0
+ 0x0
+ 0x1b
+ 0x5b
+ 0x0
+ 0x1b
+ 0x2b
+ 0x0
+ 0x0
+ 0x0
+ 0x0
+ 0x0
+ 0x0
+ 0x0>;
};
diff --git a/arch/arm/boot/dts/msm8226-mtp.dts b/arch/arm/boot/dts/msm8226-mtp.dts
index dddb44b..ef0fdc0 100644
--- a/arch/arm/boot/dts/msm8226-mtp.dts
+++ b/arch/arm/boot/dts/msm8226-mtp.dts
@@ -16,9 +16,9 @@
/ {
model = "Qualcomm MSM 8226 MTP";
compatible = "qcom,msm8226-mtp", "qcom,msm8226";
- qcom,msm-id = <145 7 0>;
+ qcom,msm-id = <145 8 0>;
serial@f991f000 {
- status = "disabled";
+ status = "ok";
};
-};
\ No newline at end of file
+};
diff --git a/arch/arm/boot/dts/msm8226-qrd.dts b/arch/arm/boot/dts/msm8226-qrd.dts
index 14bf60b..7909435 100644
--- a/arch/arm/boot/dts/msm8226-qrd.dts
+++ b/arch/arm/boot/dts/msm8226-qrd.dts
@@ -16,9 +16,9 @@
/ {
model = "Qualcomm MSM 8226 QRD";
compatible = "qcom,msm8226-qrd", "qcom,msm8226";
- qcom,msm-id = <145 1 0>;
+ qcom,msm-id = <145 11 0>;
serial@f991f000 {
- status = "disabled";
+ status = "ok";
};
-};
\ No newline at end of file
+};
diff --git a/arch/arm/boot/dts/msm8226-regulator.dtsi b/arch/arm/boot/dts/msm8226-regulator.dtsi
index 3c0dd1e..8168826 100644
--- a/arch/arm/boot/dts/msm8226-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8226-regulator.dtsi
@@ -45,7 +45,7 @@
qcom,enable-time = <500>;
qcom,system-load = <100000>;
regulator-always-on;
- regulator-min-microvolt = <1150000>;
+ regulator-min-microvolt = <1050000>;
regulator-max-microvolt = <1150000>;
};
diff --git a/arch/arm/boot/dts/msm8226-sim.dts b/arch/arm/boot/dts/msm8226-sim.dts
index f9ab957..b6590b3 100644
--- a/arch/arm/boot/dts/msm8226-sim.dts
+++ b/arch/arm/boot/dts/msm8226-sim.dts
@@ -17,7 +17,7 @@
/ {
model = "Qualcomm MSM 8226 Simulator";
compatible = "qcom,msm8226-sim", "qcom,msm8226";
- qcom,msm-id = <145 1 0>;
+ qcom,msm-id = <145 16 0>;
serial@f991f000 {
status = "ok";
diff --git a/arch/arm/boot/dts/msm8226.dtsi b/arch/arm/boot/dts/msm8226.dtsi
index b5f2dd0..3533d19 100644
--- a/arch/arm/boot/dts/msm8226.dtsi
+++ b/arch/arm/boot/dts/msm8226.dtsi
@@ -541,8 +541,8 @@
<0xfc4b8000 0x1000>;
reg-names = "tsens_physical", "tsens_eeprom_physical";
interrupts = <0 184 0>;
- qcom,sensors = <7>;
- qcom,slope = <3200 3200 3200 3200 3200 3200 3200>;
+ qcom,sensors = <6>;
+ qcom,slope = <3200 3200 3200 3200 3200 3200>;
qcom,calib-mode = "fuse_map2";
};
@@ -670,7 +670,7 @@
qcom,pre-div-channel-scaling = <0>;
qcom,calibration-type = "ratiometric";
qcom,scale-function = <1>;
- qcom,hw-settle-time = <0>;
+ qcom,hw-settle-time = <2>;
qcom,fast-avg-setup = <0>;
};
@@ -692,7 +692,7 @@
qcom,pre-div-channel-scaling = <0>;
qcom,calibration-type = "ratiometric";
qcom,scale-function = <4>;
- qcom,hw-settle-time = <0>;
+ qcom,hw-settle-time = <2>;
qcom,fast-avg-setup = <0>;
};
};
diff --git a/arch/arm/boot/dts/msm8610-rumi.dts b/arch/arm/boot/dts/msm8610-rumi.dts
index af8ce2e..a4507e3 100644
--- a/arch/arm/boot/dts/msm8610-rumi.dts
+++ b/arch/arm/boot/dts/msm8610-rumi.dts
@@ -17,7 +17,7 @@
/ {
model = "Qualcomm MSM 8610 Rumi";
compatible = "qcom,msm8610-rumi", "qcom,msm8610";
- qcom,msm-id = <147 1 0>;
+ qcom,msm-id = <147 15 0>;
serial@f991f000 {
status = "ok";
diff --git a/arch/arm/boot/dts/msm8610-sim.dts b/arch/arm/boot/dts/msm8610-sim.dts
index 73ba807..2268daf 100644
--- a/arch/arm/boot/dts/msm8610-sim.dts
+++ b/arch/arm/boot/dts/msm8610-sim.dts
@@ -17,7 +17,7 @@
/ {
model = "Qualcomm MSM 8610 Simulator";
compatible = "qcom,msm8610-sim", "qcom,msm8610";
- qcom,msm-id = <147 1 0>;
+ qcom,msm-id = <147 16 0>;
serial@f991f000 {
status = "ok";
diff --git a/arch/arm/boot/dts/msm8974-coresight.dtsi b/arch/arm/boot/dts/msm8974-coresight.dtsi
index 91de30e..5df8f10 100644
--- a/arch/arm/boot/dts/msm8974-coresight.dtsi
+++ b/arch/arm/boot/dts/msm8974-coresight.dtsi
@@ -23,6 +23,7 @@
coresight-id = <0>;
coresight-name = "coresight-tmc-etr";
coresight-nr-inports = <1>;
+ coresight-ctis = <&cti0 &cti8>;
};
tpiu: tpiu@fc318000 {
@@ -60,6 +61,7 @@
coresight-child-list = <&replicator>;
coresight-child-ports = <0>;
coresight-default-sink;
+ coresight-ctis = <&cti0 &cti8>;
};
funnel_merg: funnel@fc31b000 {
@@ -217,4 +219,144 @@
qcom,blk-size = <3>;
};
+
+ cti0: cti@fc308000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc308000 0x1000>;
+ reg-names = "cti0-base";
+
+ coresight-id = <15>;
+ coresight-name = "coresight-cti0";
+ coresight-nr-inports = <0>;
+ };
+
+ cti1: cti@fc309000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc309000 0x1000>;
+ reg-names = "cti1-base";
+
+ coresight-id = <16>;
+ coresight-name = "coresight-cti1";
+ coresight-nr-inports = <0>;
+ };
+
+ cti2: cti@fc30a000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc30a000 0x1000>;
+ reg-names = "cti2-base";
+
+ coresight-id = <17>;
+ coresight-name = "coresight-cti2";
+ coresight-nr-inports = <0>;
+ };
+
+ cti3: cti@fc30b000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc30b000 0x1000>;
+ reg-names = "cti3-base";
+
+ coresight-id = <18>;
+ coresight-name = "coresight-cti3";
+ coresight-nr-inports = <0>;
+ };
+
+ cti4: cti@fc30c000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc30c000 0x1000>;
+ reg-names = "cti4-base";
+
+ coresight-id = <19>;
+ coresight-name = "coresight-cti4";
+ coresight-nr-inports = <0>;
+ };
+
+ cti5: cti@fc30d000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc30d000 0x1000>;
+ reg-names = "cti5-base";
+
+ coresight-id = <20>;
+ coresight-name = "coresight-cti5";
+ coresight-nr-inports = <0>;
+ };
+
+ cti6: cti@fc30e000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc30e000 0x1000>;
+ reg-names = "cti6-base";
+
+ coresight-id = <21>;
+ coresight-name = "coresight-cti6";
+ coresight-nr-inports = <0>;
+ };
+
+ cti7: cti@fc30f000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc30f000 0x1000>;
+ reg-names = "cti7-base";
+
+ coresight-id = <22>;
+ coresight-name = "coresight-cti7";
+ coresight-nr-inports = <0>;
+ };
+
+ cti8: cti@fc310000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc310000 0x1000>;
+ reg-names = "cti8-base";
+
+ coresight-id = <23>;
+ coresight-name = "coresight-cti8";
+ coresight-nr-inports = <0>;
+ };
+
+ cti_l2: cti@fc340000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc340000 0x1000>;
+ reg-names = "cti-l2-base";
+
+ coresight-id = <24>;
+ coresight-name = "coresight-cti-l2";
+ coresight-nr-inports = <0>;
+ };
+
+ cti_cpu0: cti@fc341000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc341000 0x1000>;
+ reg-names = "cti-cpu0-base";
+
+ coresight-id = <25>;
+ coresight-name = "coresight-cti-cpu0";
+ coresight-nr-inports = <0>;
+ };
+
+ cti_cpu1: cti@fc342000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc342000 0x1000>;
+ reg-names = "cti-cpu1-base";
+
+ coresight-id = <26>;
+ coresight-name = "coresight-cti-cpu1";
+ coresight-nr-inports = <0>;
+ };
+
+ cti_cpu2: cti@fc343000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc343000 0x1000>;
+ reg-names = "cti-cpu2-base";
+
+ coresight-id = <27>;
+ coresight-name = "coresight-cti-cpu2";
+ coresight-nr-inports = <0>;
+ };
+
+ cti_cpu3: cti@fc344000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc344000 0x1000>;
+ reg-names = "cti-cpu3-base";
+
+ coresight-id = <28>;
+ coresight-name = "coresight-cti-cpu3";
+ coresight-nr-inports = <0>;
+ };
};
diff --git a/arch/arm/boot/dts/msm8974-liquid.dtsi b/arch/arm/boot/dts/msm8974-liquid.dtsi
index 08e4236..6e2719b 100644
--- a/arch/arm/boot/dts/msm8974-liquid.dtsi
+++ b/arch/arm/boot/dts/msm8974-liquid.dtsi
@@ -199,6 +199,90 @@
00 00
];
};
+ atmel,cfg_2 {
+ atmel,family-id = <0xa2>;
+ atmel,variant-id = <0x00>;
+ atmel,version = <0x11>;
+ atmel,build = <0xaa>;
+ atmel,config = [
+ /* Object 6, Instance = 0 */
+ 00 00 00 00 00 00
+ /* Object 38, Instance = 0 */
+ 19 01 00 0D 02 0D 00 00 00 00
+ 00 00 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00 00 00
+ 00 00 00 00
+ /* Object 7, Instance = 0 */
+ 20 08 32 C3
+ /* Object 8, Instance = 0 */
+ 41 00 14 14 00 00 00 01 00 00
+ /* Object 9, Instance = 0 */
+ 8F 00 00 20 34 00 87 4B 02 03
+ 00 05 03 40 0A 14 14 0A 80 07
+ 38 04 03 03 03 03 08 28 02 3C
+ 0F 0F 2E 33 01 00
+ /* Object 15, Instance = 0 */
+ 00 00 00 00 00 00 00 00 00 00
+ 00
+ /* Object 18, Instance = 0 */
+ 04 00
+ /* Object 24, Instance = 0 */
+ 00 00 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00 00
+ /* Object 25, Instance = 0 */
+ 00 00 54 6F F0 55 00 00 00 00
+ 00 00 00 00 00
+ /* Object 27, Instance = 0 */
+ 00 00 00 00 00 00 00
+ /* Object 40, Instance = 0 */
+ 00 14 14 14 14
+ /* Object 42, Instance = 0 */
+ 23 32 14 14 80 00 0A 00 05 05
+ /* Object 43, Instance = 0 */
+ 08 00 01 01 91 00 80 00 00 00
+ 00 00
+ /* Object 46, Instance = 0 */
+ 00 00 18 18 00 00 01 00 00 0F
+ 0A
+ /* Object 47, Instance = 0 */
+ 00 14 28 02 05 28 01 78 03 10
+ 00 00 0C 00 00 00 00 00 00 00
+ 00 00
+ /* Object 55, Instance = 0 */
+ 00 00 00 00 00 00 00
+ /* Object 56, Instance = 0 */
+ 01 00 00 30 13 14 14 14 15 15
+ 15 15 15 15 15 16 16 16 16 16
+ 16 16 16 16 16 15 14 14 14 14
+ 15 14 14 14 14 13 03 20 03 01
+ 0A 04 00 00 00 00 00 00 00 00
+ 1A
+ /* Object 57, Instance = 0 */
+ 00 00 00
+ /* Object 61, Instance = 0 */
+ 00 00 00 00 00
+ /* Object 62, Instance = 0 */
+ 00 03 00 07 02 00 00 00 00 00
+ 0F 17 23 2D 05 00 05 03 03 69
+ 14 14 34 11 64 06 06 04 40 00
+ 00 00 00 00 69 3C 02 04 01 00
+ 0A 14 14 03 03 03 03 00 00 00
+ 00 64 1E 01 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00 00 00
+ 00 00 00 00
+ /* Object 63, Instance = 0 */
+ 00 00 00 00 00 00 00 00 00 00
+ 00 00
+ /* Object 65, Instance = 0 */
+ 00 00 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00
+ /* Object 66, Instance = 0 */
+ 00 00 00 00 00
+ ];
+ };
};
};
@@ -296,7 +380,7 @@
};
&usb3 {
- qcom,charging-disabled;
+ qcom,otg-capability;
};
&pm8941_mvs1 {
@@ -624,3 +708,33 @@
};
};
};
+
+&pm8941_chg {
+ status = "ok";
+
+ qcom,chg-charging-disabled;
+
+ qcom,chg-chgr@1000 {
+ status = "ok";
+ };
+
+ qcom,chg-buck@1100 {
+ status = "ok";
+ };
+
+ qcom,chg-usb-chgpth@1300 {
+ status = "ok";
+ };
+
+ qcom,chg-dc-chgpth@1400 {
+ status = "ok";
+ };
+
+ qcom,chg-boost@1500 {
+ status = "ok";
+ };
+
+ qcom,chg-misc@1600 {
+ status = "ok";
+ };
+};
diff --git a/arch/arm/boot/dts/msm8974-v1-pm.dtsi b/arch/arm/boot/dts/msm8974-pm.dtsi
similarity index 100%
rename from arch/arm/boot/dts/msm8974-v1-pm.dtsi
rename to arch/arm/boot/dts/msm8974-pm.dtsi
diff --git a/arch/arm/boot/dts/msm8974-regulator.dtsi b/arch/arm/boot/dts/msm8974-regulator.dtsi
index 1a6d9ba..2dad8e7 100644
--- a/arch/arm/boot/dts/msm8974-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8974-regulator.dtsi
@@ -423,68 +423,75 @@
};
/ {
- krait0_vreg: regulator@f9088000 {
- compatible = "qcom,krait-regulator";
- regulator-name = "krait0";
- reg = <0xf9088000 0x1000>, /* APCS_ALIAS0_KPSS_ACS */
- <0xf908a800 0x1000>; /* APCS_ALIAS0_KPSS_MDD */
- reg-names = "acs", "mdd";
- regulator-min-microvolt = <500000>;
- regulator-max-microvolt = <1100000>;
- qcom,headroom-voltage = <150000>;
- qcom,retention-voltage = <675000>;
- qcom,ldo-default-voltage = <750000>;
- qcom,ldo-threshold-voltage = <850000>;
- qcom,ldo-delta-voltage = <50000>;
- qcom,cpu-num = <0>;
- };
+ krait_pdn: krait-pdn {
+ compatible = "qcom,krait-pdn";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
- krait1_vreg: regulator@f9098000 {
- compatible = "qcom,krait-regulator";
- regulator-name = "krait1";
- reg = <0xf9098000 0x1000>, /* APCS_ALIAS1_KPSS_ACS */
- <0xf909a800 0x1000>; /* APCS_ALIAS1_KPSS_MDD */
- reg-names = "acs", "mdd";
- regulator-min-microvolt = <500000>;
- regulator-max-microvolt = <1100000>;
- qcom,headroom-voltage = <150000>;
- qcom,retention-voltage = <675000>;
- qcom,ldo-default-voltage = <750000>;
- qcom,ldo-threshold-voltage = <850000>;
- qcom,ldo-delta-voltage = <50000>;
- qcom,cpu-num = <1>;
- };
+ krait0_vreg: regulator@f9088000 {
+ compatible = "qcom,krait-regulator";
+ regulator-name = "krait0";
+ reg = <0xf9088000 0x1000>, /* APCS_ALIAS0_KPSS_ACS */
+ <0xf908a800 0x1000>; /* APCS_ALIAS0_KPSS_MDD */
+ reg-names = "acs", "mdd";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1100000>;
+ qcom,headroom-voltage = <150000>;
+ qcom,retention-voltage = <675000>;
+ qcom,ldo-default-voltage = <750000>;
+ qcom,ldo-threshold-voltage = <850000>;
+ qcom,ldo-delta-voltage = <50000>;
+ qcom,cpu-num = <0>;
+ };
- krait2_vreg: regulator@f90a8000 {
- compatible = "qcom,krait-regulator";
- regulator-name = "krait2";
- reg = <0xf90a8000 0x1000>, /* APCS_ALIAS2_KPSS_ACS */
- <0xf90aa800 0x1000>; /* APCS_ALIAS2_KPSS_MDD */
- reg-names = "acs", "mdd";
- regulator-min-microvolt = <500000>;
- regulator-max-microvolt = <1100000>;
- qcom,headroom-voltage = <150000>;
- qcom,retention-voltage = <675000>;
- qcom,ldo-default-voltage = <750000>;
- qcom,ldo-threshold-voltage = <850000>;
- qcom,ldo-delta-voltage = <50000>;
- qcom,cpu-num = <2>;
- };
+ krait1_vreg: regulator@f9098000 {
+ compatible = "qcom,krait-regulator";
+ regulator-name = "krait1";
+ reg = <0xf9098000 0x1000>, /* APCS_ALIAS1_KPSS_ACS */
+ <0xf909a800 0x1000>; /* APCS_ALIAS1_KPSS_MDD */
+ reg-names = "acs", "mdd";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1100000>;
+ qcom,headroom-voltage = <150000>;
+ qcom,retention-voltage = <675000>;
+ qcom,ldo-default-voltage = <750000>;
+ qcom,ldo-threshold-voltage = <850000>;
+ qcom,ldo-delta-voltage = <50000>;
+ qcom,cpu-num = <1>;
+ };
- krait3_vreg: regulator@f90b8000 {
- compatible = "qcom,krait-regulator";
- regulator-name = "krait3";
- reg = <0xf90b8000 0x1000>, /* APCS_ALIAS3_KPSS_ACS */
- <0xf90ba800 0x1000>; /* APCS_ALIAS3_KPSS_MDD */
- reg-names = "acs", "mdd";
- regulator-min-microvolt = <500000>;
- regulator-max-microvolt = <1100000>;
- qcom,headroom-voltage = <150000>;
- qcom,retention-voltage = <675000>;
- qcom,ldo-default-voltage = <750000>;
- qcom,ldo-threshold-voltage = <850000>;
- qcom,ldo-delta-voltage = <50000>;
- qcom,cpu-num = <3>;
+ krait2_vreg: regulator@f90a8000 {
+ compatible = "qcom,krait-regulator";
+ regulator-name = "krait2";
+ reg = <0xf90a8000 0x1000>, /* APCS_ALIAS2_KPSS_ACS */
+ <0xf90aa800 0x1000>; /* APCS_ALIAS2_KPSS_MDD */
+ reg-names = "acs", "mdd";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1100000>;
+ qcom,headroom-voltage = <150000>;
+ qcom,retention-voltage = <675000>;
+ qcom,ldo-default-voltage = <750000>;
+ qcom,ldo-threshold-voltage = <850000>;
+ qcom,ldo-delta-voltage = <50000>;
+ qcom,cpu-num = <2>;
+ };
+
+ krait3_vreg: regulator@f90b8000 {
+ compatible = "qcom,krait-regulator";
+ regulator-name = "krait3";
+ reg = <0xf90b8000 0x1000>, /* APCS_ALIAS3_KPSS_ACS */
+ <0xf90ba800 0x1000>; /* APCS_ALIAS3_KPSS_MDD */
+ reg-names = "acs", "mdd";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1100000>;
+ qcom,headroom-voltage = <150000>;
+ qcom,retention-voltage = <675000>;
+ qcom,ldo-default-voltage = <750000>;
+ qcom,ldo-threshold-voltage = <850000>;
+ qcom,ldo-delta-voltage = <50000>;
+ qcom,cpu-num = <3>;
+ };
};
spi_eth_vreg: spi_eth_phy_vreg {
diff --git a/arch/arm/boot/dts/msm8974-v1-iommu-domains.dtsi b/arch/arm/boot/dts/msm8974-v1-iommu-domains.dtsi
new file mode 100644
index 0000000..6ea5b9e
--- /dev/null
+++ b/arch/arm/boot/dts/msm8974-v1-iommu-domains.dtsi
@@ -0,0 +1,31 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/ {
+ qcom,iommu-domains {
+ compatible = "qcom,iommu-domains";
+
+ venus_domain_ns: qcom,iommu-domain1 {
+ label = "venus_ns";
+ qcom,iommu-contexts = <&venus_ns>;
+ qcom,virtual-addr-pool = <0x40000000 0x3f000000
+ 0x7f000000 0x1000000>;
+ };
+
+ venus_domain_cp: qcom,iommu-domain2 {
+ label = "venus_cp";
+ qcom,iommu-contexts = <&venus_cp>;
+ qcom,virtual-addr-pool = <0x1000000 0x3f000000>;
+ qcom,secure-domain;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/msm8974-v1.dtsi b/arch/arm/boot/dts/msm8974-v1.dtsi
index f4f387f..fc3a1d3 100644
--- a/arch/arm/boot/dts/msm8974-v1.dtsi
+++ b/arch/arm/boot/dts/msm8974-v1.dtsi
@@ -18,7 +18,7 @@
/include/ "msm8974.dtsi"
/include/ "msm8974-v1-iommu.dtsi"
-/include/ "msm8974-v1-pm.dtsi"
+/include/ "msm8974-v1-iommu-domains.dtsi"
/ {
android_usb@fc42b0c8 {
@@ -46,6 +46,15 @@
qcom,i2c-src-freq = <19200000>;
};
+/* CoreSight */
+&tmc_etr {
+ qcom,reset-flush-race;
+};
+
+&stm {
+ qcom,write-64bit;
+};
+
&msm_vidc {
qcom,vidc-cp-map = <0x1000000 0x3f000000>;
qcom,vidc-ns-map = <0x40000000 0x40000000>;
@@ -102,3 +111,11 @@
<1616000 2908800>,
<2020000 6400000>;
};
+
+&sfpb_spinlock {
+ status = "disable";
+};
+
+&ldrex_spinlock {
+ status = "ok";
+};
diff --git a/arch/arm/boot/dts/msm8974-v2-iommu-domains.dtsi b/arch/arm/boot/dts/msm8974-v2-iommu-domains.dtsi
new file mode 100644
index 0000000..a83815e
--- /dev/null
+++ b/arch/arm/boot/dts/msm8974-v2-iommu-domains.dtsi
@@ -0,0 +1,45 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/ {
+ qcom,iommu-domains {
+ compatible = "qcom,iommu-domains";
+
+ venus_domain_ns: qcom,iommu-domain1 {
+ label = "venus_ns";
+ qcom,iommu-contexts = <&venus_ns>;
+ qcom,virtual-addr-pool = <0x5dc00000 0x7f000000
+ 0xdcc00000 0x1000000>;
+ };
+
+ venus_domain_sec_bitstream: qcom,iommu-domain2 {
+ label = "venus_sec_bitstream";
+ qcom,iommu-contexts = <&venus_sec_bitstream>;
+ qcom,virtual-addr-pool = <0x4b000000 0x12c00000>;
+ qcom,secure-domain;
+ };
+
+ venus_domain_sec_pixel: qcom,iommu-domain3 {
+ label = "venus_sec_pixel";
+ qcom,iommu-contexts = <&venus_sec_pixel>;
+ qcom,virtual-addr-pool = <0x25800000 0x25800000>;
+ qcom,secure-domain;
+ };
+
+ venus_domain_sec_non_pixel: qcom,iommu-domain4 {
+ label = "venus_sec_non_pixel";
+ qcom,iommu-contexts = <&venus_sec_non_pixel>;
+ qcom,virtual-addr-pool = <0x1000000 0x24800000>;
+ qcom,secure-domain;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/msm8974-v2-pm.dtsi b/arch/arm/boot/dts/msm8974-v2-pm.dtsi
deleted file mode 100644
index 0ed55ff..0000000
--- a/arch/arm/boot/dts/msm8974-v2-pm.dtsi
+++ /dev/null
@@ -1,426 +0,0 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/include/ "skeleton.dtsi"
-
-/ {
- qcom,spm@f9089000 {
- compatible = "qcom,spm-v2";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0xf9089000 0x1000>;
- qcom,core-id = <0>;
- qcom,saw2-ver-reg = <0xfd0>;
- qcom,saw2-cfg = <0x01>;
- qcom,saw2-avs-ctl = <0>;
- qcom,saw2-avs-hysteresis = <0>;
- qcom,saw2-avs-limit = <0>;
- qcom,saw2-avs-dly= <0>;
- qcom,saw2-spm-dly= <0x20000400>;
- qcom,saw2-spm-ctl = <0x1>;
- qcom,saw2-spm-cmd-wfi = [03 0b 0f];
- qcom,saw2-spm-cmd-ret = [42 1b 00 d0 03 d4 5b 0b 00 42 1b 0f];
- qcom,saw2-spm-cmd-spc = [00 20 50 80 60 70 10 E0 03 6E 70 3B
- E4 5B 82 2B 50 10 0B 30 06 26 30 0F];
- qcom,saw2-spm-cmd-pc = [00 20 50 80 60 70 10 E0 07 6E 70 3B
- E4 5B 82 2B 50 10 0B 30 06 26 30 0F];
- };
-
- qcom,spm@f9099000 {
- compatible = "qcom,spm-v2";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0xf9099000 0x1000>;
- qcom,core-id = <1>;
- qcom,saw2-ver-reg = <0xfd0>;
- qcom,saw2-cfg = <0x01>;
- qcom,saw2-avs-ctl = <0>;
- qcom,saw2-avs-hysteresis = <0>;
- qcom,saw2-avs-limit = <0>;
- qcom,saw2-avs-dly= <0>;
- qcom,saw2-spm-dly= <0x20000400>;
- qcom,saw2-spm-ctl = <0x1>;
- qcom,saw2-spm-cmd-wfi = [03 0b 0f];
- qcom,saw2-spm-cmd-ret = [42 1b 00 d0 03 d4 5b 0b 00 42 1b 0f];
- qcom,saw2-spm-cmd-spc = [00 20 50 80 60 70 10 E0 03 6E 70 3B
- E4 5B 82 2B 50 10 0B 30 06 26 30 0F];
- qcom,saw2-spm-cmd-pc = [00 20 50 80 60 70 10 E0 07 6E 70 3B
- E4 5B 82 2B 50 10 0B 30 06 26 30 0F];
- };
-
- qcom,spm@f90a9000 {
- compatible = "qcom,spm-v2";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0xf90a9000 0x1000>;
- qcom,core-id = <2>;
- qcom,saw2-ver-reg = <0xfd0>;
- qcom,saw2-cfg = <0x01>;
- qcom,saw2-avs-ctl = <0>;
- qcom,saw2-avs-hysteresis = <0>;
- qcom,saw2-avs-limit = <0>;
- qcom,saw2-avs-dly= <0>;
- qcom,saw2-spm-dly= <0x20000400>;
- qcom,saw2-spm-ctl = <0x1>;
- qcom,saw2-spm-cmd-wfi = [03 0b 0f];
- qcom,saw2-spm-cmd-ret = [42 1b 00 d0 03 d4 5b 0b 00 42 1b 0f];
- qcom,saw2-spm-cmd-spc = [00 20 50 80 60 70 10 E0 03 6E 70 3B
- E4 5B 82 2B 50 10 0B 30 06 26 30 0F];
- qcom,saw2-spm-cmd-pc = [00 20 50 80 60 70 10 E0 07 6E 70 3B
- E4 5B 82 2B 50 10 0B 30 06 26 30 0F];
- };
-
- qcom,spm@f90b9000 {
- compatible = "qcom,spm-v2";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0xf90b9000 0x1000>;
- qcom,core-id = <3>;
- qcom,saw2-ver-reg = <0xfd0>;
- qcom,saw2-cfg = <0x01>;
- qcom,saw2-avs-ctl = <0>;
- qcom,saw2-avs-hysteresis = <0>;
- qcom,saw2-avs-limit = <0>;
- qcom,saw2-avs-dly= <0>;
- qcom,saw2-spm-dly= <0x20000400>;
- qcom,saw2-spm-ctl = <0x1>;
- qcom,saw2-spm-cmd-wfi = [03 0b 0f];
- qcom,saw2-spm-cmd-ret = [42 1b 00 d0 03 d4 5b 0b 00 42 1b 0f];
- qcom,saw2-spm-cmd-spc = [00 20 50 80 60 70 10 E0 03 6E 70 3B
- E4 5B 82 2B 50 10 0B 30 06 26 30 0F];
- qcom,saw2-spm-cmd-pc = [00 20 50 80 60 70 10 E0 07 6E 70 3B
- E4 5B 82 2B 50 10 0B 30 06 26 30 0F];
- };
-
- qcom,spm@f9012000 {
- compatible = "qcom,spm-v2";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0xf9012000 0x1000>;
- qcom,core-id = <0xffff>; /* L2/APCS SAW */
- qcom,saw2-ver-reg = <0xfd0>;
- qcom,saw2-cfg = <0x14>;
- qcom,saw2-avs-ctl = <0>;
- qcom,saw2-avs-hysteresis = <0>;
- qcom,saw2-avs-limit = <0>;
- qcom,saw2-avs-dly= <0>;
- qcom,saw2-spm-dly= <0x20000400>;
- qcom,saw2-spm-ctl = <0x1>;
- qcom,saw2-pmic-data0 = <0x02030080>;
- qcom,saw2-pmic-data1 = <0x00030000>;
- qcom,vctl-timeout-us = <50>;
- qcom,vctl-port = <0x0>;
- qcom,phase-port = <0x1>;
- qcom,pfm-port = <0x2>;
- qcom,saw2-spm-cmd-ret = [1f 00 20 03 22 00 0f];
- qcom,saw2-spm-cmd-gdhs = [00 20 32 60 70 80 42 07 78 80 44 22 50
- 3b 60 02 32 50 0f];
- qcom,saw2-spm-cmd-pc = [00 10 32 60 70 80 b0 11 42 07 01 b0 78
- 80 12 44 50 3b 60 02 32 50 0f];
- };
-
- qcom,lpm-resources {
- compatible = "qcom,lpm-resources";
- #address-cells = <1>;
- #size-cells = <0>;
-
- qcom,lpm-resources@0 {
- reg = <0x0>;
- qcom,name = "vdd-dig";
- qcom,resource-type = <0>;
- qcom,type = <0x62706d73>; /* "smpb" */
- qcom,id = <0x02>;
- qcom,key = <0x6e726f63>; /* "corn" */
- qcom,init-value = <5>; /* Super Turbo */
- };
-
- qcom,lpm-resources@1 {
- reg = <0x1>;
- qcom,name = "vdd-mem";
- qcom,resource-type = <0>;
- qcom,type = <0x62706d73>; /* "smpb" */
- qcom,id = <0x01>;
- qcom,key = <0x7675>; /* "uv" */
- qcom,init-value = <1050000>; /* Super Turbo */
- };
-
- qcom,lpm-resources@2 {
- reg = <0x2>;
- qcom,name = "pxo";
- qcom,resource-type = <0>;
- qcom,type = <0x306b6c63>; /* "clk0" */
- qcom,id = <0x00>;
- qcom,key = <0x62616e45>; /* "Enab" */
- qcom,init-value = <1>; /* On */
- };
-
- qcom,lpm-resources@3 {
- reg = <0x3>;
- qcom,name = "l2";
- qcom,resource-type = <1>;
- qcom,init-value = <2>; /* Retention */
- };
- };
-
- qcom,lpm-levels {
- compatible = "qcom,lpm-levels";
- #address-cells = <1>;
- #size-cells = <0>;
-
- qcom,lpm-level@0 {
- reg = <0x0>;
- qcom,mode = <0>; /* MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT */
- qcom,xo = <1>; /* ON */
- qcom,l2 = <2>; /* Retention */
- qcom,vdd-mem-upper-bound = <1050000>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <950000>; /* NORMAL */
- qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
- qcom,irqs-detectable;
- qcom.gpios-detectable;
- qcom,latency-us = <1>;
- qcom,ss-power = <784>;
- qcom,energy-overhead = <190000>;
- qcom,time-overhead = <100>;
- };
-
- qcom,lpm-level@1 {
- reg = <0x1>;
- qcom,mode = <4>; /* MSM_PM_SLEEP_MODE_RETENTION*/
- qcom,xo = <1>; /* ON */
- qcom,l2 = <2>; /* Retention */
- qcom,vdd-mem-upper-bound = <1050000>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <950000>; /* NORMAL */
- qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
- qcom,irqs-detectable;
- qcom.gpios-detectable;
- qcom,latency-us = <75>;
- qcom,ss-power = <735>;
- qcom,energy-overhead = <77341>;
- qcom,time-overhead = <105>;
- };
-
- qcom,lpm-level@2 {
- reg = <0x2>;
- qcom,mode = <2>; /* MSM_PM_SLEEP_MODE_STANDALONE_POWER_COLLAPSE */
- qcom,xo = <1>; /* ON */
- qcom,l2 = <2>; /* Retention */
- qcom,vdd-mem-upper-bound = <1050000>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <950000>; /* NORMAL */
- qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
- qcom,irqs-detectable;
- qcom.gpios-detectable;
- qcom,latency-us = <95>;
- qcom,ss-power = <725>;
- qcom,energy-overhead = <99500>;
- qcom,time-overhead = <130>;
- };
-
- qcom,lpm-level@3 {
- reg = <0x3>;
- qcom,mode = <3>; /* MSM_PM_SLEEP_MODE_POWER_COLLAPSE */
- qcom,xo = <1>; /* ON */
- qcom,l2 = <1>; /* GDHS */
- qcom,vdd-mem-upper-bound = <1050000>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <950000>; /* NORMAL */
- qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
- qcom,irqs-detectable;
- qcom.gpios-detectable;
- qcom,latency-us = <2000>;
- qcom,ss-power = <138>;
- qcom,energy-overhead = <1208400>;
- qcom,time-overhead = <3200>;
- };
-
- qcom,lpm-level@4 {
- reg = <0x4>;
- qcom,mode = <3>; /* MSM_PM_SLEEP_MODE_POWER_COLLAPSE */
- qcom,xo = <1>; /* ON */
- qcom,l2 = <1>; /* GDHS */
- qcom,vdd-mem-upper-bound = <1050000>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <950000>; /* SVS SOC */
- qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-dig-lower-bound = <3>; /* SVS SOC */
- qcom,irqs-detectable;
- qcom.gpios-detectable;
- qcom,latency-us = <3000>;
- qcom,ss-power = <110>;
- qcom,energy-overhead = <1250300>;
- qcom,time-overhead = <3500>;
- };
-
- qcom,lpm-level@5 {
- reg = <0x5>;
- qcom,mode = <3>; /* MSM_PM_SLEEP_MODE_POWER_COLLAPSE */
- qcom,xo = <0>; /* OFF */
- qcom,l2 = <1>; /* GDHS */
- qcom,vdd-mem-upper-bound = <1050000>; /* SUPER TURBO */
- qcom,vdd-mem-lower-bound = <950000>; /* NORMAL */
- qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
- qcom,vdd-dig-lower-bound = <4>; /* NORMAL */
- qcom,latency-us = <3000>;
- qcom,ss-power = <68>;
- qcom,energy-overhead = <1350200>;
- qcom,time-overhead = <4000>;
- };
-
- qcom,lpm-level@6 {
- reg = <0x6>;
- qcom,mode= <3>; /* MSM_PM_SLEEP_MODE_POWER_COLLAPSE */
- qcom,xo = <0>; /* OFF */
- qcom,l2 = <1>; /* GDHS */
- qcom,vdd-mem-upper-bound = <950000>; /* NORMAL */
- qcom,vdd-mem-lower-bound = <950000>; /* SVS SOC */
- qcom,vdd-dig-upper-bound = <4>; /* NORMAL */
- qcom,vdd-dig-lower-bound = <3>; /* SVS SOC */
- qcom,latency-us = <18000>;
- qcom,ss-power = <10>;
- qcom,energy-overhead = <3202600>;
- qcom,time-overhead = <27000>;
- };
-
- qcom,lpm-level@7 {
- reg = <0x7>;
- qcom,mode= <3>; /* MSM_PM_SLEEP_MODE_POWER_COLLAPSE */
- qcom,xo = <0>; /* OFF */
- qcom,l2 = <0>; /* OFF */
- qcom,vdd-mem-upper-bound = <950000>; /* SVS SOC */
- qcom,vdd-mem-lower-bound = <675000>; /* RETENTION */
- qcom,vdd-dig-upper-bound = <3>; /* SVS SOC */
- qcom,vdd-dig-lower-bound = <1>; /* RETENTION */
- qcom,latency-us = <20000>;
- qcom,ss-power = <2>;
- qcom,energy-overhead = <4252000>;
- qcom,time-overhead = <32000>;
- };
- };
-
- qcom,pm-boot {
- compatible = "qcom,pm-boot";
- qcom,mode = <0>; /* MSM_PM_BOOT_CONFIG_TZ */
- };
-
- qcom,mpm@fc4281d0 {
- compatible = "qcom,mpm-v2";
- reg = <0xfc4281d0 0x1000>, /* MSM_RPM_MPM_BASE 4K */
- <0xf9011008 0x4>; /* MSM_APCS_GCC_BASE 4K */
- reg-names = "vmpm", "ipc";
- interrupts = <0 171 1>;
-
- qcom,ipc-bit-offset = <1>;
-
- qcom,gic-parent = <&intc>;
- qcom,gic-map = <47 172>, /* usb2_hsic_async_wakeup_irq */
- <53 104>, /* mdss_irq */
- <62 222>, /* ee0_krait_hlos_spmi_periph_irq */
- <0xff 57>, /* mss_to_apps_irq(0) */
- <0xff 58>, /* mss_to_apps_irq(1) */
- <0xff 59>, /* mss_to_apps_irq(2) */
- <0xff 60>, /* mss_to_apps_irq(3) */
- <0xff 173>, /* o_wcss_apss_smd_hi */
- <0xff 174>, /* o_wcss_apss_smd_med */
- <0xff 175>, /* o_wcss_apss_smd_low */
- <0xff 176>, /* o_wcss_apss_smsm_irq */
- <0xff 177>, /* o_wcss_apss_wlan_data_xfer_done */
- <0xff 178>, /* o_wcss_apss_wlan_rx_data_avail */
- <0xff 179>, /* o_wcss_apss_asic_intr
-
- <0xff 188>, /* lpass_irq_out_apcs(0) */
- <0xff 189>, /* lpass_irq_out_apcs(1) */
- <0xff 190>, /* lpass_irq_out_apcs(2) */
- <0xff 191>, /* lpass_irq_out_apcs(3) */
- <0xff 192>, /* lpass_irq_out_apcs(4) */
- <0xff 193>, /* lpass_irq_out_apcs(5) */
- <0xff 194>, /* lpass_irq_out_apcs(6) */
- <0xff 195>, /* lpass_irq_out_apcs(7) */
- <0xff 196>, /* lpass_irq_out_apcs(8) */
- <0xff 197>, /* lpass_irq_out_apcs(9) */
- <0xff 200>, /* rpm_ipc(4) */
- <0xff 201>, /* rpm_ipc(5) */
- <0xff 202>, /* rpm_ipc(6) */
- <0xff 203>, /* rpm_ipc(7) */
- <0xff 204>, /* rpm_ipc(24) */
- <0xff 205>, /* rpm_ipc(25) */
- <0xff 206>, /* rpm_ipc(26) */
- <0xff 207>, /* rpm_ipc(27) */
- <0xff 240>; /* summary_irq_kpss */
-
- qcom,gpio-parent = <&msmgpio>;
- qcom,gpio-map = <3 102>,
- <4 1 >,
- <5 5 >,
- <6 9 >,
- <7 18>,
- <8 20>,
- <9 24>,
- <10 27>,
- <11 28>,
- <12 34>,
- <13 35>,
- <14 37>,
- <15 42>,
- <16 44>,
- <17 46>,
- <18 50>,
- <19 54>,
- <20 59>,
- <21 61>,
- <22 62>,
- <23 64>,
- <24 65>,
- <25 66>,
- <26 67>,
- <27 68>,
- <28 71>,
- <29 72>,
- <30 73>,
- <31 74>,
- <32 75>,
- <33 77>,
- <34 79>,
- <35 80>,
- <36 82>,
- <37 86>,
- <38 92>,
- <39 93>,
- <40 95>;
- };
-
- qcom,pm-8x60@fe805664 {
- compatible = "qcom,pm-8x60";
- reg = <0xfe805664 0x40>;
- qcom,pc-mode = <0>; /*MSM_PC_TZ_L2_INT */
- qcom,use-sync-timer;
- qcom,saw-turns-off-pll;
- };
-
- qcom,rpm-log@fc19dc00 {
- compatible = "qcom,rpm-log";
- reg = <0xfc19dc00 0x4000>;
- qcom,rpm-addr-phys = <0xfc000000>;
- qcom,offset-version = <4>;
- qcom,offset-page-buffer-addr = <36>;
- qcom,offset-log-len = <40>;
- qcom,offset-log-len-mask = <44>;
- qcom,offset-page-indices = <56>;
- };
-
- qcom,rpm-stats@0xfc19dbd0{
- compatible = "qcom,rpm-stats";
- reg = <0xfc19dbd0 0x1000>;
- reg-names = "phys_addr_base";
- qcom,sleep-stats-version = <2>;
- };
-};
diff --git a/arch/arm/boot/dts/msm8974-v2.dtsi b/arch/arm/boot/dts/msm8974-v2.dtsi
index a1afda1..7e6c0bf 100644
--- a/arch/arm/boot/dts/msm8974-v2.dtsi
+++ b/arch/arm/boot/dts/msm8974-v2.dtsi
@@ -18,7 +18,7 @@
/include/ "msm8974.dtsi"
/include/ "msm8974-v2-iommu.dtsi"
-/include/ "msm8974-v2-pm.dtsi"
+/include/ "msm8974-v2-iommu-domains.dtsi"
/ {
android_usb@fe8050c8 {
@@ -111,3 +111,7 @@
<2024000 1212000>,
<2132000 1279000>;
};
+
+&krait_pdn {
+ qcom,use-phase-switching;
+};
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index b342fd8..2c4d5d9 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -11,6 +11,7 @@
*/
/include/ "skeleton.dtsi"
+/include/ "msm8974-pm.dtsi"
/include/ "msm8974-camera.dtsi"
/include/ "msm8974-coresight.dtsi"
/include/ "msm-gdsc.dtsi"
@@ -858,6 +859,10 @@
qcom,firmware-name = "wcnss";
};
+ qcom,iris-fm {
+ compatible = "qcom,iris_fm";
+ };
+
qcom,wcnss-wlan@fb000000 {
compatible = "qcom,wcnss_wlan";
reg = <0xfb000000 0x280000>;
@@ -1134,11 +1139,10 @@
};
uart7: uart@f995d000 { /*BLSP #2, UART #7 */
- cell-index = <0>;
compatible = "qcom,msm-hsuart-v14";
status = "disabled";
reg = <0xf995d000 0x1000>,
- <0xf9944000 0x5000>;
+ <0xf9944000 0x19000>;
reg-names = "core_mem", "bam_mem";
interrupts = <0 113 0>, <0 239 0>;
interrupt-names = "core_irq", "bam_irq";
@@ -1225,6 +1229,23 @@
qcom,bcl {
compatible = "qcom,bcl";
};
+
+ qcom,ssm {
+ compatible = "qcom,ssm";
+ qcom,channel-name = "SSM_RTR";
+ };
+
+ sfpb_spinlock: qcom,ipc-spinlock@fd484000 {
+ compatible = "qcom,ipc-spinlock-sfpb";
+ reg = <0xfd484000 0x1000>;
+ qcom,num-locks = <32>;
+ };
+
+ ldrex_spinlock: qcom,ipc-spinlock@fa00000 {
+ compatible = "qcom,ipc-spinlock-ldrex";
+ reg = <0xfa00000 0x200000>;
+ status = "disable";
+ };
};
&gdsc_venus {
diff --git a/arch/arm/boot/dts/msm9625-coresight.dtsi b/arch/arm/boot/dts/msm9625-coresight.dtsi
index 6a52361..0af8fa5 100644
--- a/arch/arm/boot/dts/msm9625-coresight.dtsi
+++ b/arch/arm/boot/dts/msm9625-coresight.dtsi
@@ -23,6 +23,7 @@
coresight-id = <0>;
coresight-name = "coresight-tmc-etr";
coresight-nr-inports = <1>;
+ coresight-ctis = <&cti0 &cti8>;
};
tpiu: tpiu@fc318000 {
@@ -60,6 +61,7 @@
coresight-child-list = <&replicator>;
coresight-child-ports = <0>;
coresight-default-sink;
+ coresight-ctis = <&cti0 &cti8>;
};
funnel_merg: funnel@fc31b000 {
@@ -141,4 +143,104 @@
qcom,blk-size = <1>;
};
+
+ cti0: cti@fc308000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc308000 0x1000>;
+ reg-names = "cti0-base";
+
+ coresight-id = <10>;
+ coresight-name = "coresight-cti0";
+ coresight-nr-inports = <0>;
+ };
+
+ cti1: cti@fc309000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc309000 0x1000>;
+ reg-names = "cti1-base";
+
+ coresight-id = <11>;
+ coresight-name = "coresight-cti1";
+ coresight-nr-inports = <0>;
+ };
+
+ cti2: cti@fc30a000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc30a000 0x1000>;
+ reg-names = "cti2-base";
+
+ coresight-id = <12>;
+ coresight-name = "coresight-cti2";
+ coresight-nr-inports = <0>;
+ };
+
+ cti3: cti@fc30b000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc30b000 0x1000>;
+ reg-names = "cti3-base";
+
+ coresight-id = <13>;
+ coresight-name = "coresight-cti3";
+ coresight-nr-inports = <0>;
+ };
+
+ cti4: cti@fc30c000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc30c000 0x1000>;
+ reg-names = "cti4-base";
+
+ coresight-id = <14>;
+ coresight-name = "coresight-cti4";
+ coresight-nr-inports = <0>;
+ };
+
+ cti5: cti@fc30d000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc30d000 0x1000>;
+ reg-names = "cti5-base";
+
+ coresight-id = <15>;
+ coresight-name = "coresight-cti5";
+ coresight-nr-inports = <0>;
+ };
+
+ cti6: cti@fc30e000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc30e000 0x1000>;
+ reg-names = "cti6-base";
+
+ coresight-id = <16>;
+ coresight-name = "coresight-cti6";
+ coresight-nr-inports = <0>;
+ };
+
+ cti7: cti@fc30f000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc30f000 0x1000>;
+ reg-names = "cti7-base";
+
+ coresight-id = <17>;
+ coresight-name = "coresight-cti7";
+ coresight-nr-inports = <0>;
+ };
+
+ cti8: cti@fc310000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc310000 0x1000>;
+ reg-names = "cti8-base";
+
+ coresight-id = <18>;
+ coresight-name = "coresight-cti8";
+ coresight-nr-inports = <0>;
+ };
+
+ cti_cpu: cti@fc333000 {
+ compatible = "arm,coresight-cti";
+ reg = <0xfc333000 0x1000>;
+ reg-names = "cti-cpu-base";
+
+ coresight-id = <19>;
+ coresight-name = "coresight-cti-cpu";
+ coresight-nr-inports = <0>;
+ };
};
diff --git a/arch/arm/boot/dts/msm9625-v1.dtsi b/arch/arm/boot/dts/msm9625-v1.dtsi
index 3e88158..54fe443 100644
--- a/arch/arm/boot/dts/msm9625-v1.dtsi
+++ b/arch/arm/boot/dts/msm9625-v1.dtsi
@@ -29,8 +29,23 @@
reg = <0xfc42a8c8 0xc8>;
qcom,android-usb-swfi-latency = <100>;
};
+
+ qcom,bam_dmux@fc834000 {
+ compatible = "qcom,bam_dmux";
+ reg = <0xfc834000 0x7000>;
+ interrupts = <0 29 1>;
+ };
};
&ipa_hw {
qcom,ipa-hw-ver = <1>; /* IPA h-w revision */
};
+
+/* CoreSight */
+&tmc_etr {
+ qcom,reset-flush-race;
+};
+
+&stm {
+ qcom,write-64bit;
+};
diff --git a/arch/arm/boot/dts/msm9625.dtsi b/arch/arm/boot/dts/msm9625.dtsi
index 9247826..f22fc28 100644
--- a/arch/arm/boot/dts/msm9625.dtsi
+++ b/arch/arm/boot/dts/msm9625.dtsi
@@ -317,20 +317,16 @@
qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50";
};
- qcom,bam_dmux@fc834000 {
- compatible = "qcom,bam_dmux";
- reg = <0xfc834000 0x7000>;
- interrupts = <0 29 1>;
- };
-
ipa_hw: qcom,ipa@fd4c0000 {
compatible = "qcom,ipa";
reg = <0xfd4c0000 0x26000>,
- <0xfd4c4000 0x14818>;
- reg-names = "ipa-base", "bam-base";
+ <0xfd4c4000 0x14818>,
+ <0xfc834000 0x7000>;
+ reg-names = "ipa-base", "bam-base", "a2-bam-base";
interrupts = <0 252 0>,
- <0 253 0>;
- interrupt-names = "ipa-irq", "bam-irq";
+ <0 253 0>,
+ <0 29 1>;
+ interrupt-names = "ipa-irq", "bam-irq", "a2-bam-irq";
qcom,pipe1 {
label = "a2-to-ipa";
@@ -735,7 +731,7 @@
qcom,pre-div-channel-scaling = <0>;
qcom,calibration-type = "ratiometric";
qcom,scale-function = <0>;
- qcom,hw-settle-time = <0>;
+ qcom,hw-settle-time = <2>;
qcom,fast-avg-setup = <0>;
};
@@ -746,7 +742,7 @@
qcom,pre-div-channel-scaling = <0>;
qcom,calibration-type = "ratiometric";
qcom,scale-function = <2>;
- qcom,hw-settle-time = <0>;
+ qcom,hw-settle-time = <2>;
qcom,fast-avg-setup = <0>;
};
@@ -757,7 +753,7 @@
qcom,pre-div-channel-scaling = <0>;
qcom,calibration-type = "ratiometric";
qcom,scale-function = <2>;
- qcom,hw-settle-time = <0>;
+ qcom,hw-settle-time = <2>;
qcom,fast-avg-setup = <0>;
};
@@ -768,7 +764,7 @@
qcom,pre-div-channel-scaling = <0>;
qcom,calibration-type = "ratiometric";
qcom,scale-function = <4>;
- qcom,hw-settle-time = <0>;
+ qcom,hw-settle-time = <2>;
qcom,fast-avg-setup = <0>;
};
@@ -779,7 +775,7 @@
qcom,pre-div-channel-scaling = <0>;
qcom,calibration-type = "ratiometric";
qcom,scale-function = <4>;
- qcom,hw-settle-time = <0>;
+ qcom,hw-settle-time = <2>;
qcom,fast-avg-setup = <0>;
};
};
diff --git a/arch/arm/boot/dts/skeleton.dtsi b/arch/arm/boot/dts/skeleton.dtsi
index b41d241..f9988cd 100644
--- a/arch/arm/boot/dts/skeleton.dtsi
+++ b/arch/arm/boot/dts/skeleton.dtsi
@@ -9,5 +9,10 @@
#size-cells = <1>;
chosen { };
aliases { };
- memory { device_type = "memory"; reg = <0 0>; };
+ memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "memory";
+ reg = <0 0>;
+ };
};
diff --git a/arch/arm/configs/msm8610_defconfig b/arch/arm/configs/msm8610_defconfig
index 896055d..2e4f84d 100644
--- a/arch/arm/configs/msm8610_defconfig
+++ b/arch/arm/configs/msm8610_defconfig
@@ -61,6 +61,7 @@
CONFIG_WCNSS_CORE_PRONTO=y
CONFIG_MSM_DIRECT_SCLK_ACCESS=y
CONFIG_MSM_WATCHDOG_V2=y
+CONFIG_MSM_DLOAD_MODE=y
CONFIG_MSM_ADSP_LOADER=m
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
@@ -77,6 +78,7 @@
CONFIG_VFP=y
CONFIG_NEON=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_WAKELOCK=y
CONFIG_PM_RUNTIME=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -180,6 +182,9 @@
CONFIG_SPS=y
CONFIG_SPS_SUPPORT_NDP_BAM=y
CONFIG_MMC_MSM_SPS_SUPPORT=y
+CONFIG_RTC_CLASS=y
+# CONFIG_RTC_DRV_MSM is not set
+CONFIG_RTC_DRV_QPNP=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT3_FS=y
diff --git a/arch/arm/configs/msm8974-perf_defconfig b/arch/arm/configs/msm8974-perf_defconfig
index 952171c..5c03630 100644
--- a/arch/arm/configs/msm8974-perf_defconfig
+++ b/arch/arm/configs/msm8974-perf_defconfig
@@ -356,10 +356,6 @@
CONFIG_SND_USB_AUDIO=y
CONFIG_SND_SOC=y
CONFIG_SND_SOC_MSM8974=y
-CONFIG_UHID=y
-CONFIG_HID_APPLE=y
-CONFIG_HID_MAGICMOUSE=y
-CONFIG_HID_MICROSOFT=y
CONFIG_USB_SUSPEND=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_EHCI_HCD=y
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index a3a4487..faa0471 100644
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -364,10 +364,6 @@
CONFIG_SND_USB_AUDIO=y
CONFIG_SND_SOC=y
CONFIG_SND_SOC_MSM8974=y
-CONFIG_UHID=y
-CONFIG_HID_APPLE=y
-CONFIG_HID_MAGICMOUSE=y
-CONFIG_HID_MICROSOFT=y
CONFIG_USB_SUSPEND=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_EHCI_HCD=y
diff --git a/arch/arm/configs/msm9625-perf_defconfig b/arch/arm/configs/msm9625-perf_defconfig
new file mode 100644
index 0000000..2070f46
--- /dev/null
+++ b/arch/arm/configs/msm9625-perf_defconfig
@@ -0,0 +1,324 @@
+# CONFIG_ARM_PATCH_PHYS_VIRT is not set
+CONFIG_EXPERIMENTAL=y
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_SCHED=y
+# CONFIG_FAIR_GROUP_SCHED is not set
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ARCH_MSM=y
+CONFIG_ARCH_MSM9625=y
+# CONFIG_MSM_STACKED_MEMORY is not set
+CONFIG_CPU_HAS_L2_PMU=y
+# CONFIG_MSM_FIQ_SUPPORT is not set
+# CONFIG_MSM_PROC_COMM is not set
+CONFIG_MSM_SMD=y
+CONFIG_MSM_SMD_PKG4=y
+CONFIG_MSM_BAM_DMUX=y
+CONFIG_MSM_SMP2P=y
+CONFIG_MSM_SMP2P_TEST=y
+CONFIG_MSM_IPC_LOGGING=y
+CONFIG_MSM_IPC_ROUTER=y
+CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
+CONFIG_MSM_RPM_REGULATOR_SMD=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_MSS_QDSP6V5=y
+CONFIG_MSM_DIRECT_SCLK_ACCESS=y
+CONFIG_MSM_BUS_SCALING=y
+CONFIG_MSM_WATCHDOG_V2=y
+CONFIG_MSM_DLOAD_MODE=y
+CONFIG_MSM_ADSP_LOADER=m
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_ARM_ARCH_TIMER=y
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+CONFIG_HIGHMEM=y
+CONFIG_VMALLOC_RESERVE=0x19000000
+CONFIG_USE_OF=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_IDLE=y
+CONFIG_VFP=y
+CONFIG_NEON=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_PM_RUNTIME=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_IPV6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_DEBUG=y
+CONFIG_NETFILTER_NETLINK_QUEUE=y
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_TIMESTAMP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_SNMP=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SIP=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_MARK=y
+CONFIG_NETFILTER_XT_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_IP_SET=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_REJECT_SKERR=y
+CONFIG_IP_NF_TARGET_ULOG=y
+CONFIG_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NATTYPE_MODULE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_TARGET_ECN=y
+CONFIG_IP_NF_TARGET_TTL=y
+CONFIG_IP_NF_RAW=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_AH=y
+CONFIG_IP6_NF_MATCH_FRAG=y
+CONFIG_IP6_NF_MATCH_OPTS=y
+CONFIG_IP6_NF_MATCH_HL=y
+CONFIG_IP6_NF_MATCH_IPV6HEADER=y
+CONFIG_IP6_NF_MATCH_MH=y
+CONFIG_IP6_NF_MATCH_RT=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_TARGET_REJECT_SKERR=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_BRIDGE_EBT_T_FILTER=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_CLS_FW=y
+CONFIG_CFG80211=m
+CONFIG_NL80211_TESTMODE=y
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_MTD_MSM_NAND is not set
+CONFIG_MTD_MSM_QPIC_NAND=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+# CONFIG_ANDROID_PMEM is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CIRRUS is not set
+# CONFIG_NET_VENDOR_FARADAY is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+CONFIG_KS8851=y
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_MSM_RMNET is not set
+CONFIG_MSM_RMNET_BAM=y
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+CONFIG_ATH6K_LEGACY_EXT=y
+# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=m
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_MSM_HSL=y
+CONFIG_SERIAL_MSM_HS=y
+CONFIG_MSM_UARTDM_Core_v14=y
+CONFIG_DIAG_CHAR=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM=y
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_QUP=y
+CONFIG_SPI=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_SPIDEV=m
+CONFIG_SPMI=y
+CONFIG_SPMI_MSM_PMIC_ARB=y
+CONFIG_MSM_QPNP_INT=y
+CONFIG_DEBUG_GPIO=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_QPNP_PIN=y
+CONFIG_GPIO_QPNP_PIN_DEBUG=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_SMB137C_CHARGER=y
+CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_TSENS8974=y
+CONFIG_WCD9320_CODEC=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_QPNP=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_MDM9625=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_CI13XXX_MSM=y
+CONFIG_USB_G_ANDROID=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_MSM_HSIC=y
+CONFIG_MMC=y
+CONFIG_MMC_PERF_PROFILING=y
+CONFIG_MMC_UNSAFE_RESUME=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_EMBEDDED_SDIO=y
+CONFIG_MMC_PARANOID_SD_INIT=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_TEST=m
+CONFIG_MMC_MSM=y
+CONFIG_MMC_MSM_SPS_SUPPORT=y
+CONFIG_RTC_CLASS=y
+# CONFIG_RTC_DRV_MSM is not set
+CONFIG_RTC_DRV_QPNP=y
+CONFIG_SPS=y
+CONFIG_USB_BAM=y
+CONFIG_SPS_SUPPORT_BAMDMA=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_QPNP_POWER_ON=y
+CONFIG_IPA=y
+CONFIG_ECM_IPA=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_TMC=y
+CONFIG_CORESIGHT_TPIU=y
+CONFIG_CORESIGHT_FUNNEL=y
+CONFIG_CORESIGHT_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_ETM=y
+CONFIG_CORESIGHT_EVENT=m
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_YAFFS_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_YAFFS_DISABLE_TAGS_ECC=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_ENABLE_DEFAULT_TRACERS=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_USER=y
+CONFIG_KEYS=y
+CONFIG_CRYPTO_AUTHENC=y
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_ARC4=y
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_DEFLATE=y
+CONFIG_CRYPTO_DEV_QCRYPTO=m
+CONFIG_CRYPTO_DEV_QCE=m
+CONFIG_CRYPTO_DEV_QCEDEV=m
+CONFIG_CRC_CCITT=y
+CONFIG_LIBCRC32C=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_SUSPEND=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_EHSET=y
+CONFIG_USB_EHCI_MSM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_STORAGE_DEBUG=y
+CONFIG_USB_STORAGE_DATAFAB=y
+CONFIG_USB_STORAGE_FREECOM=y
+CONFIG_USB_STORAGE_ISD200=y
+CONFIG_USB_STORAGE_USBAT=y
+CONFIG_USB_STORAGE_SDDR09=y
+CONFIG_USB_STORAGE_SDDR55=y
+CONFIG_USB_STORAGE_JUMPSHOT=y
+CONFIG_USB_STORAGE_ALAUDA=y
+CONFIG_USB_STORAGE_ONETOUCH=y
+CONFIG_USB_STORAGE_KARMA=y
+CONFIG_USB_STORAGE_CYPRESS_ATACB=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_SCSI=y
+CONFIG_SCSI_TGT=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_MSM_RTB=y
+CONFIG_MSM_MEMORY_DUMP=y
diff --git a/arch/arm/configs/msm9625_defconfig b/arch/arm/configs/msm9625_defconfig
index ecf43bb..9a1f872 100644
--- a/arch/arm/configs/msm9625_defconfig
+++ b/arch/arm/configs/msm9625_defconfig
@@ -247,6 +247,7 @@
CONFIG_SPS_SUPPORT_NDP_BAM=y
CONFIG_QPNP_POWER_ON=y
CONFIG_IPA=y
+CONFIG_ECM_IPA=y
CONFIG_CORESIGHT=y
CONFIG_CORESIGHT_TMC=y
CONFIG_CORESIGHT_TPIU=y
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index e35a806..f0b706a 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -411,6 +411,8 @@
select CPU_FREQ_GOV_USERSPACE
select CPU_FREQ_GOV_ONDEMAND
select MSM_PIL
+ select MSM_RUN_QUEUE_STATS
+ select ARM_HAS_SG_CHAIN
config ARCH_MSM8226
bool "MSM8226"
@@ -437,6 +439,7 @@
select MEMORY_HOLE_CARVEOUT
select DONT_MAP_HOLE_AFTER_MEMBANK0
select MSM_BUS_SCALING
+ select ARM_HAS_SG_CHAIN
endmenu
choice
@@ -1036,8 +1039,8 @@
default "0x80200000" if ARCH_MSM8930
default "0x00000000" if ARCH_MSM8974
default "0x00000000" if ARCH_MPQ8092
- default "0x00000000" if ARCH_MSM8226
- default "0x00000000" if ARCH_MSM8610
+ default "0x00100000" if ARCH_MSM8226
+ default "0x00100000" if ARCH_MSM8610
default "0x10000000" if ARCH_FSM9XXX
default "0x00200000" if ARCH_MSM9625
default "0x00200000" if !MSM_STACKED_MEMORY
@@ -2312,7 +2315,6 @@
config MSM_DLOAD_MODE
bool "Enable download mode on crashes"
- depends on ARCH_MSM8X60 || ARCH_MSM8960 || ARCH_MSM9615 || ARCH_MSM8974 || ARCH_MSM9625
default n
help
This makes the SoC enter download mode when it resets
diff --git a/arch/arm/mach-msm/Makefile.boot b/arch/arm/mach-msm/Makefile.boot
index 202b8dd..f683b33 100644
--- a/arch/arm/mach-msm/Makefile.boot
+++ b/arch/arm/mach-msm/Makefile.boot
@@ -72,8 +72,11 @@
dtb-$(CONFIG_ARCH_MSM9625) += msm9625-v2-1-cdp.dtb
# MSM8226
- zreladdr-$(CONFIG_ARCH_MSM8226) := 0x00008000
+ zreladdr-$(CONFIG_ARCH_MSM8226) := 0x00108000
dtb-$(CONFIG_ARCH_MSM8226) += msm8226-sim.dtb
+ dtb-$(CONFIG_ARCH_MSM8226) += msm8226-cdp.dtb
+ dtb-$(CONFIG_ARCH_MSM8226) += msm8226-mtp.dtb
+ dtb-$(CONFIG_ARCH_MSM8226) += msm8226-qrd.dtb
# FSM9XXX
zreladdr-$(CONFIG_ARCH_FSM9XXX) := 0x10008000
@@ -84,6 +87,6 @@
zreladdr-$(CONFIG_ARCH_MPQ8092) := 0x00008000
# MSM8610
- zreladdr-$(CONFIG_ARCH_MSM8610) := 0x00008000
+ zreladdr-$(CONFIG_ARCH_MSM8610) := 0x00108000
dtb-$(CONFIG_ARCH_MSM8610) += msm8610-rumi.dtb
dtb-$(CONFIG_ARCH_MSM8610) += msm8610-sim.dtb
diff --git a/arch/arm/mach-msm/board-8064.c b/arch/arm/mach-msm/board-8064.c
index beb064b..9ed71da 100644
--- a/arch/arm/mach-msm/board-8064.c
+++ b/arch/arm/mach-msm/board-8064.c
@@ -2887,6 +2887,7 @@
#ifdef CONFIG_MSM_ROTATOR
&msm_rotator_device,
#endif
+ &msm8064_cpu_slp_status,
};
static struct platform_device
diff --git a/arch/arm/mach-msm/board-8930-pmic.c b/arch/arm/mach-msm/board-8930-pmic.c
index cd292e0..4f398f4 100644
--- a/arch/arm/mach-msm/board-8930-pmic.c
+++ b/arch/arm/mach-msm/board-8930-pmic.c
@@ -374,7 +374,7 @@
.op_fdbck = true,
.ovp_val = WLED_OVP_32V,
.boost_curr_lim = WLED_CURR_LIMIT_525mA,
- .num_strings = 1,
+ .strings = WLED_SECOND_STRING,
};
static int pm8038_led0_pwm_duty_pcts[56] = {
diff --git a/arch/arm/mach-msm/board-8930.c b/arch/arm/mach-msm/board-8930.c
index 25ba1aa..fbcc6f1 100644
--- a/arch/arm/mach-msm/board-8930.c
+++ b/arch/arm/mach-msm/board-8930.c
@@ -2473,6 +2473,7 @@
&msm8930_iommu_domain_device,
&msm_tsens_device,
&msm8930_cache_dump_device,
+ &msm8930_cpu_slp_status,
};
static struct platform_device *cdp_devices[] __initdata = {
diff --git a/arch/arm/mach-msm/board-8960.c b/arch/arm/mach-msm/board-8960.c
index 95f618a..819ccc5 100644
--- a/arch/arm/mach-msm/board-8960.c
+++ b/arch/arm/mach-msm/board-8960.c
@@ -2955,6 +2955,7 @@
&msm8960_cache_dump_device,
&msm8960_iommu_domain_device,
&msm_tsens_device,
+ &msm8960_cpu_slp_status,
};
static struct platform_device *cdp_devices[] __initdata = {
diff --git a/arch/arm/mach-msm/board-8974.c b/arch/arm/mach-msm/board-8974.c
index 1de83a7..f864583 100644
--- a/arch/arm/mach-msm/board-8974.c
+++ b/arch/arm/mach-msm/board-8974.c
@@ -85,21 +85,6 @@
of_scan_flat_dt(dt_scan_for_memory_hole, msm8974_reserve_table);
}
-static struct platform_device msm_fm_platform_init = {
- .name = "iris_fm",
- .id = -1,
-};
-
-static struct platform_device *msm_bus_8974_devices[] = {
- &msm_fm_platform_init,
-};
-
-static void __init msm8974_init_buses(void)
-{
- platform_add_devices(msm_bus_8974_devices,
- ARRAY_SIZE(msm_bus_8974_devices));
-};
-
/*
* Used to satisfy dependencies for devices that need to be
* run early or in a particular order. Most likely your device doesn't fall
@@ -119,7 +104,6 @@
msm_clock_init(&msm8974_rumi_clock_init_data);
else
msm_clock_init(&msm8974_clock_init_data);
- msm8974_init_buses();
msm_thermal_device_init();
}
diff --git a/arch/arm/mach-msm/clock-8226.c b/arch/arm/mach-msm/clock-8226.c
index 11ad9d9..a963c19 100644
--- a/arch/arm/mach-msm/clock-8226.c
+++ b/arch/arm/mach-msm/clock-8226.c
@@ -3102,35 +3102,36 @@
CLK_LOOKUP("bus_a_clk", mmss_s0_axi_clk.c, "msm_mmss_noc"),
CLK_LOOKUP("iface_clk", gcc_mmss_noc_cfg_ahb_clk.c, ""),
- CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-tmc-etr"),
- CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-tpiu"),
- CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-replicator"),
- CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-tmc-etf"),
- CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-funnel-merg"),
- CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-funnel-in0"),
- CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-funnel-in1"),
- CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-funnel-kpss"),
- CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-funnel-mmss"),
- CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-stm"),
- CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-etm0"),
- CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-etm1"),
- CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-etm2"),
- CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-etm3"),
+ /* CoreSight clocks */
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc322000.tmc"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc318000.tpiu"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc31c000.replicator"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc307000.tmc"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc31b000.funnel"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc319000.funnel"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc31a000.funnel"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc345000.funnel"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc364000.funnel"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc321000.stm"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc33c000.etm"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc33d000.etm"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc33e000.etm"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc33f000.etm"),
- CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-tmc-etr"),
- CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-tpiu"),
- CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-replicator"),
- CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-tmc-etf"),
- CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-funnel-merg"),
- CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-funnel-in0"),
- CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-funnel-in1"),
- CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-funnel-kpss"),
- CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-funnel-mmss"),
- CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-stm"),
- CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-etm0"),
- CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-etm1"),
- CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-etm2"),
- CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-etm3"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc322000.tmc"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc318000.tpiu"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc31c000.replicator"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc307000.tmc"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc31b000.funnel"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc319000.funnel"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc31a000.funnel"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc345000.funnel"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc364000.funnel"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc321000.stm"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc33c000.etm"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc33d000.etm"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc33e000.etm"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc33f000.etm"),
/* HSUSB-OTG Clocks */
CLK_LOOKUP("xo", xo.c, "f9a55000.usb"),
diff --git a/arch/arm/mach-msm/clock-8974.c b/arch/arm/mach-msm/clock-8974.c
index b7693ae..bfa9ec0 100644
--- a/arch/arm/mach-msm/clock-8974.c
+++ b/arch/arm/mach-msm/clock-8974.c
@@ -3037,24 +3037,14 @@
static struct clk_ops clk_ops_pixel;
#define CFG_RCGR_DIV_MASK BM(4, 0)
+#define CMD_RCGR_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x0)
#define CFG_RCGR_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x4)
#define M_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x8)
#define N_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0xC)
#define MND_MODE_MASK BM(13, 12)
#define MND_DUAL_EDGE_MODE_BVAL BVAL(13, 12, 0x2)
#define CFG_RCGR_SRC_SEL_MASK BM(10, 8)
-
-static struct clk *get_parent_byte(struct clk *clk)
-{
- struct rcg_clk *rcg = to_rcg_clk(clk);
-
- /* The byte clock has only one known parent. */
- if ((readl_relaxed(CFG_RCGR_REG(rcg)) & CFG_RCGR_SRC_SEL_MASK)
- == BVAL(10, 8, dsipll0_byte_mm_source_val))
- return &dsipll0_byte_clk_src;
-
- return NULL;
-}
+#define CMD_RCGR_ROOT_STATUS_BIT BIT(31)
static enum handoff byte_rcg_handoff(struct clk *clk)
{
@@ -3071,6 +3061,9 @@
clk->rate = pre_div_rate;
+ if (readl_relaxed(CMD_RCGR_REG(rcg)) & CMD_RCGR_ROOT_STATUS_BIT)
+ return HANDOFF_DISABLED_CLK;
+
return HANDOFF_ENABLED_CLK;
}
@@ -3103,18 +3096,6 @@
return 0;
}
-static struct clk *get_parent_pixel(struct clk *clk)
-{
- struct rcg_clk *rcg = to_rcg_clk(clk);
-
- /* The pixel clock has one known parent. */
- if ((readl_relaxed(CFG_RCGR_REG(rcg)) & CFG_RCGR_SRC_SEL_MASK)
- == BVAL(10, 8, dsipll0_pixel_mm_source_val))
- return &dsipll0_pixel_clk_src;
-
- return NULL;
-}
-
static enum handoff pixel_rcg_handoff(struct clk *clk)
{
struct rcg_clk *rcg = to_rcg_clk(clk);
@@ -3142,6 +3123,9 @@
clk->rate = (pre_div_rate * mval) / nval;
}
+ if (readl_relaxed(CMD_RCGR_REG(rcg)) & CMD_RCGR_ROOT_STATUS_BIT)
+ return HANDOFF_DISABLED_CLK;
+
return HANDOFF_ENABLED_CLK;
}
@@ -5323,6 +5307,20 @@
CLK_LOOKUP("core_clk", qdss_clk.c, "fc33d000.etm"),
CLK_LOOKUP("core_clk", qdss_clk.c, "fc33e000.etm"),
CLK_LOOKUP("core_clk", qdss_clk.c, "fc33f000.etm"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc308000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc309000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc30a000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc30b000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc30c000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc30d000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc30e000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc30f000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc310000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc340000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc341000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc342000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc343000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc344000.cti"),
CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc322000.tmc"),
CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc318000.tpiu"),
@@ -5338,6 +5336,20 @@
CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc33d000.etm"),
CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc33e000.etm"),
CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc33f000.etm"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc308000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc309000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc30a000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc30b000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc30c000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc30d000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc30e000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc30f000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc310000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc340000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc341000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc342000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc343000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc344000.cti"),
CLK_LOOKUP("l2_m_clk", l2_m_clk, ""),
CLK_LOOKUP("krait0_m_clk", krait0_m_clk, ""),
@@ -5513,13 +5525,13 @@
{
clk_ops_byte = clk_ops_rcg;
clk_ops_byte.set_rate = set_rate_byte;
- clk_ops_byte.get_parent = get_parent_byte;
clk_ops_byte.handoff = byte_rcg_handoff;
+ clk_ops_byte.get_parent = NULL;
clk_ops_pixel = clk_ops_rcg_mnd;
clk_ops_pixel.set_rate = set_rate_pixel;
- clk_ops_pixel.get_parent = get_parent_pixel;
clk_ops_pixel.handoff = pixel_rcg_handoff;
+ clk_ops_pixel.get_parent = NULL;
clk_ops_rcg_hdmi = clk_ops_rcg;
clk_ops_rcg_hdmi.set_rate = rcg_clk_set_rate_hdmi;
diff --git a/arch/arm/mach-msm/clock-9625.c b/arch/arm/mach-msm/clock-9625.c
index 2bfb323..9648320 100644
--- a/arch/arm/mach-msm/clock-9625.c
+++ b/arch/arm/mach-msm/clock-9625.c
@@ -33,7 +33,6 @@
enum {
GCC_BASE,
- LPASS_BASE,
APCS_BASE,
APCS_PLL_BASE,
N_BASES,
@@ -42,7 +41,6 @@
static void __iomem *virt_bases[N_BASES];
#define GCC_REG_BASE(x) (void __iomem *)(virt_bases[GCC_BASE] + (x))
-#define LPASS_REG_BASE(x) (void __iomem *)(virt_bases[LPASS_BASE] + (x))
#define APCS_REG_BASE(x) (void __iomem *)(virt_bases[APCS_BASE] + (x))
#define APCS_PLL_REG_BASE(x) (void __iomem *)(virt_bases[APCS_PLL_BASE] + (x))
@@ -203,54 +201,11 @@
#define IPA_CNOC_CBCR 0x1A88
#define IPA_SLEEP_CBCR 0x1A8C
-/* LPASS registers */
-/* TODO: Needs to double check lpass regiserts after get the SWI for hw */
-#define LPAPLL_MODE_REG 0x0000
-#define LPAPLL_L_REG 0x0004
-#define LPAPLL_M_REG 0x0008
-#define LPAPLL_N_REG 0x000C
-#define LPAPLL_USER_CTL_REG 0x0010
-#define LPAPLL_CONFIG_CTL_REG 0x0014
-#define LPAPLL_TEST_CTL_REG 0x0018
-#define LPAPLL_STATUS_REG 0x001C
-
-#define LPASS_DEBUG_CLK_CTL_REG 0x29000
-#define LPASS_LPA_PLL_VOTE_APPS_REG 0x2000
-
-#define LPAIF_PRI_CMD_RCGR 0xB000
-#define LPAIF_SEC_CMD_RCGR 0xC000
-#define LPAIF_PCM0_CMD_RCGR 0xF000
-#define LPAIF_PCM1_CMD_RCGR 0x10000
-#define SLIMBUS_CMD_RCGR 0x12000
-#define LPAIF_PCMOE_CMD_RCGR 0x13000
-
-#define AUDIO_CORE_BCR 0x4000
-
-#define AUDIO_CORE_GDSCR 0x7000
-#define AUDIO_CORE_LPAIF_PRI_OSR_CBCR 0xB014
-#define AUDIO_CORE_LPAIF_PRI_IBIT_CBCR 0xB018
-#define AUDIO_CORE_LPAIF_PRI_EBIT_CBCR 0xB01C
-#define AUDIO_CORE_LPAIF_SEC_OSR_CBCR 0xC014
-#define AUDIO_CORE_LPAIF_SEC_IBIT_CBCR 0xC018
-#define AUDIO_CORE_LPAIF_SEC_EBIT_CBCR 0xC01C
-#define AUDIO_CORE_LPAIF_PCM0_IBIT_CBCR 0xF014
-#define AUDIO_CORE_LPAIF_PCM0_EBIT_CBCR 0xF018
-#define AUDIO_CORE_LPAIF_PCM1_IBIT_CBCR 0x10014
-#define AUDIO_CORE_LPAIF_PCM1_EBIT_CBCR 0x10018
-#define AUDIO_CORE_RESAMPLER_CORE_CBCR 0x11014
-#define AUDIO_CORE_RESAMPLER_LFABIF_CBCR 0x11018
-#define AUDIO_CORE_SLIMBUS_CORE_CBCR 0x12014
-#define AUDIO_CORE_SLIMBUS_LFABIF_CBCR 0x12018
-#define AUDIO_CORE_LPAIF_PCM_DATA_OE_CBCR 0x13014
-
/* Mux source select values */
#define cxo_source_val 0
#define gpll0_source_val 1
#define gpll1_hsic_source_val 4
#define gnd_source_val 5
-#define cxo_lpass_source_val 0
-#define lpapll0_lpass_source_val 1
-#define gpll0_lpass_source_val 5
#define F_GCC_GND \
{ \
@@ -282,17 +237,6 @@
| BVAL(10, 8, s##_hsic_source_val), \
}
-#define F_LPASS(f, s, div, m, n) \
- { \
- .freq_hz = (f), \
- .src_clk = &s##_clk_src.c, \
- .m_val = (m), \
- .n_val = ~((n)-(m)) * !!(n), \
- .d_val = ~(n),\
- .div_src_val = BVAL(4, 0, (int)(2*(div) - 1)) \
- | BVAL(10, 8, s##_lpass_source_val), \
- }
-
#define F_APCS_PLL(f, l, m, n, pre_div, post_div, vco) \
{ \
.freq_hz = (f), \
@@ -429,21 +373,6 @@
},
};
-static struct pll_vote_clk lpapll0_clk_src = {
- .en_reg = (void __iomem *)LPASS_LPA_PLL_VOTE_APPS_REG,
- .en_mask = BIT(0),
- .status_reg = (void __iomem *)LPAPLL_STATUS_REG,
- .status_mask = BIT(17),
- .base = &virt_bases[LPASS_BASE],
- .c = {
- .parent = &cxo_clk_src.c,
- .rate = 393216000,
- .dbg_name = "lpapll0_clk_src",
- .ops = &clk_ops_pll_vote,
- CLK_INIT(lpapll0_clk_src.c),
- },
-};
-
static struct pll_vote_clk gpll1_clk_src = {
.en_reg = (void __iomem *)APCS_GPLL_ENA_VOTE_REG,
.en_mask = BIT(1),
@@ -1619,274 +1548,6 @@
},
};
-/* LPASS clock data */
-static struct clk_freq_tbl ftbl_audio_core_lpaif_clock[] = {
- F_LPASS( 512000, lpapll0, 16, 1, 48),
- F_LPASS( 768000, lpapll0, 16, 1, 32),
- F_LPASS( 1024000, lpapll0, 16, 1, 24),
- F_LPASS( 1536000, lpapll0, 16, 1, 16),
- F_LPASS( 2048000, lpapll0, 16, 1, 12),
- F_LPASS( 3072000, lpapll0, 16, 1, 8),
- F_LPASS( 4096000, lpapll0, 16, 1, 6),
- F_LPASS( 6144000, lpapll0, 16, 1, 4),
- F_LPASS( 8192000, lpapll0, 16, 1, 3),
- F_LPASS(12288000, lpapll0, 16, 1, 2),
- F_END
-};
-
-static struct clk_freq_tbl ftbl_audio_core_lpaif_pcm_clock[] = {
- F_LPASS( 512000, lpapll0, 16, 1, 48),
- F_LPASS( 768000, lpapll0, 16, 1, 32),
- F_LPASS( 1024000, lpapll0, 16, 1, 24),
- F_LPASS( 1536000, lpapll0, 16, 1, 16),
- F_LPASS( 2048000, lpapll0, 16, 1, 12),
- F_LPASS( 3072000, lpapll0, 16, 1, 8),
- F_LPASS( 4096000, lpapll0, 16, 1, 6),
- F_LPASS( 6144000, lpapll0, 16, 1, 4),
- F_LPASS( 8192000, lpapll0, 16, 1, 3),
- F_END
-};
-
-static struct rcg_clk audio_core_lpaif_pcmoe_clk_src = {
- .cmd_rcgr_reg = LPAIF_PCMOE_CMD_RCGR,
- .set_rate = set_rate_mnd,
- .freq_tbl = ftbl_audio_core_lpaif_clock,
- .current_freq = &rcg_dummy_freq,
- .base = &virt_bases[LPASS_BASE],
- .c = {
- .dbg_name = "audio_core_lpaif_pcmoe_clk_src",
- .ops = &clk_ops_rcg_mnd,
- VDD_DIG_FMAX_MAP1(LOW, 12288000),
- CLK_INIT(audio_core_lpaif_pcmoe_clk_src.c)
- },
-};
-
-static struct rcg_clk audio_core_lpaif_pri_clk_src = {
- .cmd_rcgr_reg = LPAIF_PRI_CMD_RCGR,
- .set_rate = set_rate_mnd,
- .freq_tbl = ftbl_audio_core_lpaif_clock,
- .current_freq = &rcg_dummy_freq,
- .base = &virt_bases[LPASS_BASE],
- .c = {
- .dbg_name = "audio_core_lpaif_pri_clk_src",
- .ops = &clk_ops_rcg_mnd,
- VDD_DIG_FMAX_MAP2(LOW, 12288000, NOMINAL, 24576000),
- CLK_INIT(audio_core_lpaif_pri_clk_src.c)
- },
-};
-
-static struct rcg_clk audio_core_lpaif_sec_clk_src = {
- .cmd_rcgr_reg = LPAIF_SEC_CMD_RCGR,
- .set_rate = set_rate_mnd,
- .freq_tbl = ftbl_audio_core_lpaif_clock,
- .current_freq = &rcg_dummy_freq,
- .base = &virt_bases[LPASS_BASE],
- .c = {
- .dbg_name = "audio_core_lpaif_sec_clk_src",
- .ops = &clk_ops_rcg_mnd,
- VDD_DIG_FMAX_MAP2(LOW, 12288000, NOMINAL, 24576000),
- CLK_INIT(audio_core_lpaif_sec_clk_src.c)
- },
-};
-
-static struct clk_freq_tbl ftbl_audio_core_slimbus_core_clock[] = {
- F_LPASS(26041000, lpapll0, 1, 10, 151),
- F_END
-};
-
-static struct rcg_clk audio_core_slimbus_core_clk_src = {
- .cmd_rcgr_reg = SLIMBUS_CMD_RCGR,
- .set_rate = set_rate_mnd,
- .freq_tbl = ftbl_audio_core_slimbus_core_clock,
- .current_freq = &rcg_dummy_freq,
- .base = &virt_bases[LPASS_BASE],
- .c = {
- .dbg_name = "audio_core_slimbus_core_clk_src",
- .ops = &clk_ops_rcg_mnd,
- VDD_DIG_FMAX_MAP2(LOW, 13107000, NOMINAL, 26214000),
- CLK_INIT(audio_core_slimbus_core_clk_src.c)
- },
-};
-
-static struct rcg_clk audio_core_lpaif_pcm0_clk_src = {
- .cmd_rcgr_reg = LPAIF_PCM0_CMD_RCGR,
- .set_rate = set_rate_mnd,
- .freq_tbl = ftbl_audio_core_lpaif_pcm_clock,
- .current_freq = &rcg_dummy_freq,
- .base = &virt_bases[LPASS_BASE],
- .c = {
- .dbg_name = "audio_core_lpaif_pcm0_clk_src",
- .ops = &clk_ops_rcg_mnd,
- VDD_DIG_FMAX_MAP2(LOW, 4096000, NOMINAL, 8192000),
- CLK_INIT(audio_core_lpaif_pcm0_clk_src.c)
- },
-};
-
-static struct rcg_clk audio_core_lpaif_pcm1_clk_src = {
- .cmd_rcgr_reg = LPAIF_PCM1_CMD_RCGR,
- .set_rate = set_rate_mnd,
- .freq_tbl = ftbl_audio_core_lpaif_pcm_clock,
- .current_freq = &rcg_dummy_freq,
- .base = &virt_bases[LPASS_BASE],
- .c = {
- .dbg_name = "audio_core_lpaif_pcm1_clk_src",
- .ops = &clk_ops_rcg_mnd,
- VDD_DIG_FMAX_MAP2(LOW, 4096000, NOMINAL, 8192000),
- CLK_INIT(audio_core_lpaif_pcm1_clk_src.c)
- },
-};
-
-static struct branch_clk audio_core_slimbus_lfabif_clk = {
- .cbcr_reg = AUDIO_CORE_SLIMBUS_LFABIF_CBCR,
- .has_sibling = 1,
- .base = &virt_bases[LPASS_BASE],
- .c = {
- .dbg_name = "audio_core_slimbus_lfabif_clk",
- .ops = &clk_ops_branch,
- CLK_INIT(audio_core_slimbus_lfabif_clk.c),
- },
-};
-
-static struct branch_clk audio_core_lpaif_pcm_data_oe_clk = {
- .cbcr_reg = AUDIO_CORE_LPAIF_PCM_DATA_OE_CBCR,
- .base = &virt_bases[LPASS_BASE],
- .c = {
- .parent = &audio_core_lpaif_pcmoe_clk_src.c,
- .dbg_name = "audio_core_lpaif_pcm_data_oe_clk",
- .ops = &clk_ops_branch,
- CLK_INIT(audio_core_lpaif_pcm_data_oe_clk.c),
- },
-};
-
-static struct branch_clk audio_core_slimbus_core_clk = {
- .cbcr_reg = AUDIO_CORE_SLIMBUS_CORE_CBCR,
- .base = &virt_bases[LPASS_BASE],
- .c = {
- .parent = &audio_core_slimbus_core_clk_src.c,
- .dbg_name = "audio_core_slimbus_core_clk",
- .ops = &clk_ops_branch,
- CLK_INIT(audio_core_slimbus_core_clk.c),
- },
-};
-
-static struct branch_clk audio_core_lpaif_pri_ebit_clk = {
- .cbcr_reg = AUDIO_CORE_LPAIF_PRI_EBIT_CBCR,
- .has_sibling = 0,
- .base = &virt_bases[LPASS_BASE],
- .c = {
- .dbg_name = "audio_core_lpaif_pri_ebit_clk",
- .ops = &clk_ops_branch,
- CLK_INIT(audio_core_lpaif_pri_ebit_clk.c),
- },
-};
-
-static struct branch_clk audio_core_lpaif_pri_ibit_clk = {
- .cbcr_reg = AUDIO_CORE_LPAIF_PRI_IBIT_CBCR,
- .has_sibling = 1,
- .max_div = 15,
- .base = &virt_bases[LPASS_BASE],
- .c = {
- .parent = &audio_core_lpaif_pri_clk_src.c,
- .dbg_name = "audio_core_lpaif_pri_ibit_clk",
- .ops = &clk_ops_branch,
- CLK_INIT(audio_core_lpaif_pri_ibit_clk.c),
- },
-};
-
-static struct branch_clk audio_core_lpaif_pri_osr_clk = {
- .cbcr_reg = AUDIO_CORE_LPAIF_PRI_OSR_CBCR,
- .has_sibling = 1,
- .base = &virt_bases[LPASS_BASE],
- .c = {
- .parent = &audio_core_lpaif_pri_clk_src.c,
- .dbg_name = "audio_core_lpaif_pri_osr_clk",
- .ops = &clk_ops_branch,
- CLK_INIT(audio_core_lpaif_pri_osr_clk.c),
- },
-};
-
-static struct branch_clk audio_core_lpaif_pcm0_ebit_clk = {
- .cbcr_reg = AUDIO_CORE_LPAIF_PCM0_EBIT_CBCR,
- .has_sibling = 0,
- .base = &virt_bases[LPASS_BASE],
- .c = {
- .dbg_name = "audio_core_lpaif_pcm0_ebit_clk",
- .ops = &clk_ops_branch,
- CLK_INIT(audio_core_lpaif_pcm0_ebit_clk.c),
- },
-};
-
-static struct branch_clk audio_core_lpaif_pcm0_ibit_clk = {
- .cbcr_reg = AUDIO_CORE_LPAIF_PCM0_IBIT_CBCR,
- .has_sibling = 0,
- .base = &virt_bases[LPASS_BASE],
- .c = {
- .parent = &audio_core_lpaif_pcm0_clk_src.c,
- .dbg_name = "audio_core_lpaif_pcm0_ibit_clk",
- .ops = &clk_ops_branch,
- CLK_INIT(audio_core_lpaif_pcm0_ibit_clk.c),
- },
-};
-
-static struct branch_clk audio_core_lpaif_sec_ebit_clk = {
- .cbcr_reg = AUDIO_CORE_LPAIF_SEC_EBIT_CBCR,
- .has_sibling = 0,
- .base = &virt_bases[LPASS_BASE],
- .c = {
- .dbg_name = "audio_core_lpaif_sec_ebit_clk",
- .ops = &clk_ops_branch,
- CLK_INIT(audio_core_lpaif_sec_ebit_clk.c),
- },
-};
-
-static struct branch_clk audio_core_lpaif_sec_ibit_clk = {
- .cbcr_reg = AUDIO_CORE_LPAIF_SEC_IBIT_CBCR,
- .has_sibling = 1,
- .max_div = 15,
- .base = &virt_bases[LPASS_BASE],
- .c = {
- .parent = &audio_core_lpaif_sec_clk_src.c,
- .dbg_name = "audio_core_lpaif_sec_ibit_clk",
- .ops = &clk_ops_branch,
- CLK_INIT(audio_core_lpaif_sec_ibit_clk.c),
- },
-};
-
-static struct branch_clk audio_core_lpaif_sec_osr_clk = {
- .cbcr_reg = AUDIO_CORE_LPAIF_SEC_OSR_CBCR,
- .has_sibling = 1,
- .base = &virt_bases[LPASS_BASE],
- .c = {
- .parent = &audio_core_lpaif_sec_clk_src.c,
- .dbg_name = "audio_core_lpaif_sec_osr_clk",
- .ops = &clk_ops_branch,
- CLK_INIT(audio_core_lpaif_sec_osr_clk.c),
- },
-};
-
-static struct branch_clk audio_core_lpaif_pcm1_ebit_clk = {
- .cbcr_reg = AUDIO_CORE_LPAIF_PCM1_EBIT_CBCR,
- .has_sibling = 0,
- .base = &virt_bases[LPASS_BASE],
- .c = {
- .dbg_name = "audio_core_lpaif_pcm1_ebit_clk",
- .ops = &clk_ops_branch,
- CLK_INIT(audio_core_lpaif_pcm1_ebit_clk.c),
- },
-};
-
-static struct branch_clk audio_core_lpaif_pcm1_ibit_clk = {
- .cbcr_reg = AUDIO_CORE_LPAIF_PCM1_IBIT_CBCR,
- .has_sibling = 0,
- .base = &virt_bases[LPASS_BASE],
- .c = {
- .parent = &audio_core_lpaif_pcm1_clk_src.c,
- .dbg_name = "audio_core_lpaif_pcm1_ibit_clk",
- .ops = &clk_ops_branch,
- CLK_INIT(audio_core_lpaif_pcm1_ibit_clk.c),
- },
-};
-
static DEFINE_CLK_MEASURE(a5_m_clk);
#ifdef CONFIG_DEBUG_FS
@@ -1897,7 +1558,7 @@
u32 debug_mux;
};
-struct measure_mux_entry measure_mux[] = {
+struct measure_mux_entry measure_mux_common[] __initdata = {
{&gcc_pdm_ahb_clk.c, GCC_BASE, 0x00d0},
{&gcc_usb_hsic_xcvr_fs_clk.c, GCC_BASE, 0x005d},
{&gcc_usb_hsic_system_clk.c, GCC_BASE, 0x0059},
@@ -1943,21 +1604,22 @@
{&gcc_qpic_clk.c, GCC_BASE, 0x01D8},
{&gcc_qpic_ahb_clk.c, GCC_BASE, 0x01D9},
- {&audio_core_lpaif_pcm_data_oe_clk.c, LPASS_BASE, 0x0030},
- {&audio_core_slimbus_core_clk.c, LPASS_BASE, 0x003d},
- {&audio_core_lpaif_pri_clk_src.c, LPASS_BASE, 0x0017},
- {&audio_core_lpaif_sec_clk_src.c, LPASS_BASE, 0x0016},
- {&audio_core_slimbus_core_clk_src.c, LPASS_BASE, 0x0011},
- {&audio_core_lpaif_pcm1_clk_src.c, LPASS_BASE, 0x0012},
- {&audio_core_lpaif_pcm0_clk_src.c, LPASS_BASE, 0x0013},
- {&audio_core_lpaif_pcmoe_clk_src.c, LPASS_BASE, 0x000f},
- {&audio_core_slimbus_lfabif_clk.c, LPASS_BASE, 0x003e},
-
{&a5_m_clk, APCS_BASE, 0x3},
{&dummy_clk, N_BASES, 0x0000},
};
+struct measure_mux_entry measure_mux_v2_only[] __initdata = {
+ {&gcc_ipa_clk.c, GCC_BASE, 0x01E0},
+ {&gcc_ipa_cnoc_clk.c, GCC_BASE, 0x01E1},
+ {&gcc_ipa_sleep_clk.c, GCC_BASE, 0x01E2},
+ {&gcc_qpic_clk.c, GCC_BASE, 0x01D8},
+ {&gcc_qpic_ahb_clk.c, GCC_BASE, 0x01D9},
+};
+
+struct measure_mux_entry measure_mux[ARRAY_SIZE(measure_mux_common)
+ + ARRAY_SIZE(measure_mux_v2_only)];
+
static int measure_clk_set_parent(struct clk *c, struct clk *parent)
{
struct measure_clk *clk = to_measure_clk(c);
@@ -1982,7 +1644,6 @@
clk->sample_ticks = 0x10000;
clk->multiplier = 1;
- writel_relaxed(0, LPASS_REG_BASE(LPASS_DEBUG_CLK_CTL_REG));
writel_relaxed(0, GCC_REG_BASE(GCC_DEBUG_CLK_CTL_REG));
switch (measure_mux[i].base) {
@@ -1991,16 +1652,6 @@
clk_sel = measure_mux[i].debug_mux;
break;
- case LPASS_BASE:
- clk_sel = 0x161;
- regval = BVAL(15, 0, measure_mux[i].debug_mux);
- writel_relaxed(regval, LPASS_REG_BASE(LPASS_DEBUG_CLK_CTL_REG));
-
- /* Activate debug clock output */
- regval |= BIT(20);
- writel_relaxed(regval, LPASS_REG_BASE(LPASS_DEBUG_CLK_CTL_REG));
- break;
-
case APCS_BASE:
clk_sel = 0x16A;
regval = BVAL(5, 3, measure_mux[i].debug_mux);
@@ -2208,35 +1859,6 @@
CLK_LOOKUP("bus_clk", gcc_ce1_axi_clk.c, "fd400000.qcom,qcrypto"),
CLK_LOOKUP("core_clk_src", ce1_clk_src.c, "fd400000.qcom,qcrypto"),
- /* LPASS clocks */
- CLK_LOOKUP("core_clk", audio_core_slimbus_core_clk.c, "fe12f000.slim"),
- CLK_LOOKUP("iface_clk", audio_core_slimbus_lfabif_clk.c, ""),
-
- CLK_LOOKUP("core_clk", audio_core_lpaif_pri_clk_src.c,
- "msm-dai-q6-mi2s.0"),
- CLK_LOOKUP("osr_clk", audio_core_lpaif_pri_osr_clk.c,
- "msm-dai-q6-mi2s.0"),
- CLK_LOOKUP("ebit_clk", audio_core_lpaif_pri_ebit_clk.c,
- "msm-dai-q6-mi2s.0"),
- CLK_LOOKUP("ibit_clk", audio_core_lpaif_pri_ibit_clk.c,
- "msm-dai-q6-mi2s.0"),
- CLK_LOOKUP("core_clk", audio_core_lpaif_sec_clk_src.c,
- "msm-dai-q6-mi2s.1"),
- CLK_LOOKUP("osr_clk", audio_core_lpaif_sec_osr_clk.c,
- "msm-dai-q6-mi2s.1"),
- CLK_LOOKUP("ebit_clk", audio_core_lpaif_sec_ebit_clk.c,
- "msm-dai-q6-mi2s.1"),
- CLK_LOOKUP("ibit_clk", audio_core_lpaif_sec_ibit_clk.c,
- "msm-dai-q6-mi2s.1"),
- CLK_LOOKUP("core_clk", audio_core_lpaif_pcm0_clk_src.c, ""),
- CLK_LOOKUP("ebit_clk", audio_core_lpaif_pcm0_ebit_clk.c, ""),
- CLK_LOOKUP("ibit_clk", audio_core_lpaif_pcm0_ibit_clk.c, ""),
- CLK_LOOKUP("core_clk", audio_core_lpaif_pcm1_clk_src.c, ""),
- CLK_LOOKUP("ebit_clk", audio_core_lpaif_pcm1_ebit_clk.c, ""),
- CLK_LOOKUP("ibit_clk", audio_core_lpaif_pcm1_ibit_clk.c, ""),
- CLK_LOOKUP("core_oe_src_clk", audio_core_lpaif_pcmoe_clk_src.c, ""),
- CLK_LOOKUP("core_oe_clk", audio_core_lpaif_pcm_data_oe_clk.c, ""),
-
/* RPM and voter clocks */
CLK_LOOKUP("bus_clk", snoc_clk.c, ""),
CLK_LOOKUP("bus_clk", pnoc_clk.c, ""),
@@ -2271,6 +1893,16 @@
CLK_LOOKUP("core_clk", qdss_clk.c, "fc321000.stm"),
CLK_LOOKUP("core_clk", qdss_clk.c, "fc332000.etm"),
CLK_LOOKUP("core_clk", qdss_clk.c, "fc332000.jtagmm"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc308000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc309000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc30a000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc30b000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc30c000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc30d000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc30e000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc30f000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc310000.cti"),
+ CLK_LOOKUP("core_clk", qdss_clk.c, "fc333000.cti"),
CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc322000.tmc"),
CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc318000.tpiu"),
@@ -2282,85 +1914,19 @@
CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc321000.stm"),
CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc332000.etm"),
CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc332000.jtagmm"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc308000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc309000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc30a000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc30b000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc30c000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc30d000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc30e000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc30f000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc310000.cti"),
+ CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc333000.cti"),
};
-static struct pll_config_regs gpll0_regs __initdata = {
- .l_reg = (void __iomem *)GPLL0_L_REG,
- .m_reg = (void __iomem *)GPLL0_M_REG,
- .n_reg = (void __iomem *)GPLL0_N_REG,
- .config_reg = (void __iomem *)GPLL0_USER_CTL_REG,
- .mode_reg = (void __iomem *)GPLL0_MODE_REG,
- .base = &virt_bases[GCC_BASE],
-};
-
-/* GPLL0 at 600 MHz, main output enabled. */
-static struct pll_config gpll0_config __initdata = {
- .l = 0x1f,
- .m = 0x1,
- .n = 0x4,
- .vco_val = 0x0,
- .vco_mask = BM(21, 20),
- .pre_div_val = 0x0,
- .pre_div_mask = BM(14, 12),
- .post_div_val = 0x0,
- .post_div_mask = BM(9, 8),
- .mn_ena_val = BIT(24),
- .mn_ena_mask = BIT(24),
- .main_output_val = BIT(0),
- .main_output_mask = BIT(0),
-};
-
-static struct pll_config_regs gpll1_regs __initdata = {
- .l_reg = (void __iomem *)GPLL1_L_REG,
- .m_reg = (void __iomem *)GPLL1_M_REG,
- .n_reg = (void __iomem *)GPLL1_N_REG,
- .config_reg = (void __iomem *)GPLL1_USER_CTL_REG,
- .mode_reg = (void __iomem *)GPLL1_MODE_REG,
- .base = &virt_bases[GCC_BASE],
-};
-
-/* GPLL1 at 480 MHz, main output enabled. */
-static struct pll_config gpll1_config __initdata = {
- .l = 0x19,
- .m = 0x0,
- .n = 0x1,
- .vco_val = 0x0,
- .vco_mask = BM(21, 20),
- .pre_div_val = 0x0,
- .pre_div_mask = BM(14, 12),
- .post_div_val = 0x0,
- .post_div_mask = BM(9, 8),
- .main_output_val = BIT(0),
- .main_output_mask = BIT(0),
-};
-
-static struct pll_config_regs lpapll0_regs __initdata = {
- .l_reg = (void __iomem *)LPAPLL_L_REG,
- .m_reg = (void __iomem *)LPAPLL_M_REG,
- .n_reg = (void __iomem *)LPAPLL_N_REG,
- .config_reg = (void __iomem *)LPAPLL_USER_CTL_REG,
- .mode_reg = (void __iomem *)LPAPLL_MODE_REG,
- .base = &virt_bases[LPASS_BASE],
-};
-
-/* LPAPLL0 at 393.216 MHz, main output enabled. */
-static struct pll_config lpapll0_config __initdata = {
- .l = 0x28,
- .m = 0x18,
- .n = 0x19,
- .vco_val = 0x0,
- .vco_mask = BM(21, 20),
- .pre_div_val = 0x0,
- .pre_div_mask = BM(14, 12),
- .post_div_val = BVAL(9, 8, 0x1),
- .post_div_mask = BM(9, 8),
- .mn_ena_val = BIT(24),
- .mn_ena_mask = BIT(24),
- .main_output_val = BIT(0),
- .main_output_mask = BIT(0),
-};
-
#define PLL_AUX_OUTPUT_BIT 1
#define PLL_AUX2_OUTPUT_BIT 2
@@ -2402,64 +1968,9 @@
return 0;
}
-static void __init configure_apcs_pll(void)
-{
- u32 regval;
-
- clk_set_rate(&apcspll_clk_src.c, 998400000);
-
- writel_relaxed(0x00141200,
- APCS_PLL_REG_BASE(APCS_CPU_PLL_CONFIG_CTL_REG));
-
- /* Enable AUX and AUX2 output */
- regval = readl_relaxed(APCS_PLL_REG_BASE(APCS_CPU_PLL_USER_CTL_REG));
- regval |= BIT(PLL_AUX_OUTPUT_BIT) | BIT(PLL_AUX2_OUTPUT_BIT);
- writel_relaxed(regval, APCS_PLL_REG_BASE(APCS_CPU_PLL_USER_CTL_REG));
-}
-
-#define PWR_ON_MASK BIT(31)
-#define EN_REST_WAIT_MASK (0xF << 20)
-#define EN_FEW_WAIT_MASK (0xF << 16)
-#define CLK_DIS_WAIT_MASK (0xF << 12)
-#define SW_OVERRIDE_MASK BIT(2)
-#define HW_CONTROL_MASK BIT(1)
-#define SW_COLLAPSE_MASK BIT(0)
-
-/* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */
-#define EN_REST_WAIT_VAL (0x2 << 20)
-#define EN_FEW_WAIT_VAL (0x2 << 16)
-#define CLK_DIS_WAIT_VAL (0x2 << 12)
-#define GDSC_TIMEOUT_US 50000
-
static void __init reg_init(void)
{
- u32 regval, status;
- int ret;
-
- if (!(readl_relaxed(GCC_REG_BASE(GPLL0_STATUS_REG))
- & gpll0_clk_src.status_mask))
- configure_sr_hpm_lp_pll(&gpll0_config, &gpll0_regs, 1);
-
- if (!(readl_relaxed(GCC_REG_BASE(GPLL1_STATUS_REG))
- & gpll1_clk_src.status_mask))
- configure_sr_hpm_lp_pll(&gpll1_config, &gpll1_regs, 1);
-
- configure_sr_hpm_lp_pll(&lpapll0_config, &lpapll0_regs, 1);
-
- /* TODO: Remove A5 pll configuration once the bootloader is avaiable */
- regval = readl_relaxed(APCS_PLL_REG_BASE(APCS_CPU_PLL_MODE_REG));
- if ((regval & BM(2, 0)) != 0x7)
- configure_apcs_pll();
-
- /* TODO:
- * 1) do we need to turn on AUX2 output too?
- * 2) if need to vote off all sleep clocks
- */
-
- /* Enable GPLL0's aux outputs. */
- regval = readl_relaxed(GCC_REG_BASE(GPLL0_USER_CTL_REG));
- regval |= BIT(PLL_AUX_OUTPUT_BIT) | BIT(PLL_AUX2_OUTPUT_BIT);
- writel_relaxed(regval, GCC_REG_BASE(GPLL0_USER_CTL_REG));
+ u32 regval;
/* Vote for GPLL0 to turn on. Needed by acpuclock. */
regval = readl_relaxed(GCC_REG_BASE(APCS_GPLL_ENA_VOTE_REG));
@@ -2471,31 +1982,6 @@
* register.
*/
writel_relaxed(0x0, GCC_REG_BASE(APCS_CLOCK_SLEEP_ENA_VOTE));
-
- /*
- * TODO: The following sequence enables the LPASS audio core GDSC.
- * Remove when this becomes unnecessary.
- */
-
- /*
- * Disable HW trigger: collapse/restore occur based on registers writes.
- * Disable SW override: Use hardware state-machine for sequencing.
- */
- regval = readl_relaxed(LPASS_REG_BASE(AUDIO_CORE_GDSCR));
- regval &= ~(HW_CONTROL_MASK | SW_OVERRIDE_MASK);
-
- /* Configure wait time between states. */
- regval &= ~(EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK);
- regval |= EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | CLK_DIS_WAIT_VAL;
- writel_relaxed(regval, LPASS_REG_BASE(AUDIO_CORE_GDSCR));
-
- regval = readl_relaxed(LPASS_REG_BASE(AUDIO_CORE_GDSCR));
- regval &= ~BIT(0);
- writel_relaxed(regval, LPASS_REG_BASE(AUDIO_CORE_GDSCR));
-
- ret = readl_poll_timeout(LPASS_REG_BASE(AUDIO_CORE_GDSCR), status,
- status & PWR_ON_MASK, 50, GDSC_TIMEOUT_US);
- WARN(ret, "LPASS Audio Core GDSC did not power on.\n");
}
static void __init msm9625_clock_post_init(void)
@@ -2524,8 +2010,6 @@
clk_set_rate(&usb_hsic_xcvr_fs_clk_src.c,
usb_hsic_xcvr_fs_clk_src.freq_tbl[0].freq_hz);
clk_set_rate(&pdm2_clk_src.c, pdm2_clk_src.freq_tbl[0].freq_hz);
- clk_set_rate(&audio_core_slimbus_core_clk_src.c,
- audio_core_slimbus_core_clk_src.freq_tbl[0].freq_hz);
/*
* TODO: set rate on behalf of the i2c driver until the i2c driver
* distinguish v1/v2 and call set rate accordingly.
@@ -2538,9 +2022,6 @@
#define GCC_CC_PHYS 0xFC400000
#define GCC_CC_SIZE SZ_16K
-#define LPASS_CC_PHYS 0xFE000000
-#define LPASS_CC_SIZE SZ_256K
-
#define APCS_GCC_CC_PHYS 0xF9011000
#define APCS_GCC_CC_SIZE SZ_4K
@@ -2562,10 +2043,6 @@
if (!virt_bases[GCC_BASE])
panic("clock-9625: Unable to ioremap GCC memory!");
- virt_bases[LPASS_BASE] = ioremap(LPASS_CC_PHYS, LPASS_CC_SIZE);
- if (!virt_bases[LPASS_BASE])
- panic("clock-9625: Unable to ioremap LPASS_CC memory!");
-
virt_bases[APCS_BASE] = ioremap(APCS_GCC_CC_PHYS, APCS_GCC_CC_SIZE);
if (!virt_bases[APCS_BASE])
panic("clock-9625: Unable to ioremap APCS_GCC_CC memory!");
@@ -2593,6 +2070,16 @@
enable_rpm_scaling();
reg_init();
+
+ /* Construct measurement mux array */
+ if (SOCINFO_VERSION_MAJOR(socinfo_get_version()) == 2) {
+ memcpy(measure_mux,
+ measure_mux_v2_only, sizeof(measure_mux_v2_only));
+ memcpy(measure_mux + ARRAY_SIZE(measure_mux_v2_only),
+ measure_mux_common, sizeof(measure_mux_common));
+ } else
+ memcpy(measure_mux,
+ measure_mux_common, sizeof(measure_mux_common));
}
static int __init msm9625_clock_late_init(void)
diff --git a/arch/arm/mach-msm/clock-mdss-8974.c b/arch/arm/mach-msm/clock-mdss-8974.c
index b752aeb..91e96b7 100644
--- a/arch/arm/mach-msm/clock-mdss-8974.c
+++ b/arch/arm/mach-msm/clock-mdss-8974.c
@@ -27,6 +27,9 @@
#define REG_R(addr) readl_relaxed(addr)
#define REG_W(data, addr) writel_relaxed(data, addr)
+#define GDSC_PHYS 0xFD8C2304
+#define GDSC_SIZE 0x4
+
#define DSI_PHY_PHYS 0xFD922800
#define DSI_PHY_SIZE 0x00000800
@@ -99,6 +102,7 @@
#define VCO_CLK 424000000
static unsigned char *mdss_dsi_base;
+static unsigned char *gdsc_base;
static int pll_byte_clk_rate;
static int pll_pclk_rate;
static int pll_initialized;
@@ -112,6 +116,11 @@
void __init mdss_clk_ctrl_pre_init(struct clk *ahb_clk)
{
BUG_ON(ahb_clk == NULL);
+
+ gdsc_base = ioremap(GDSC_PHYS, GDSC_SIZE);
+ if (!gdsc_base)
+ pr_err("%s: unable to remap gdsc base", __func__);
+
mdss_dsi_base = ioremap(DSI_PHY_PHYS, DSI_PHY_SIZE);
if (!mdss_dsi_base)
pr_err("%s: unable to remap dsi base", __func__);
@@ -130,6 +139,14 @@
#define PLL_POLL_MAX_READS 10
#define PLL_POLL_TIMEOUT_US 50
+static int mdss_gdsc_enabled(void)
+{
+ if (!gdsc_base)
+ return 0;
+
+ return !!(readl_relaxed(gdsc_base) & BIT(31));
+}
+
static int mdss_dsi_check_pll_lock(void)
{
u32 status;
@@ -392,7 +409,7 @@
static enum handoff mdss_dsi_pll_byte_handoff(struct clk *c)
{
- if (mdss_dsi_check_pll_lock()) {
+ if (mdss_gdsc_enabled() && mdss_dsi_check_pll_lock()) {
c->rate = 53000000;
dsi_pll_rate = 53000000;
pll_byte_clk_rate = 53000000;
@@ -406,7 +423,7 @@
static enum handoff mdss_dsi_pll_pixel_handoff(struct clk *c)
{
- if (mdss_dsi_check_pll_lock()) {
+ if (mdss_gdsc_enabled() && mdss_dsi_check_pll_lock()) {
c->rate = 105000000;
dsipll_refcount++;
return HANDOFF_ENABLED_CLK;
diff --git a/arch/arm/mach-msm/devices-8064.c b/arch/arm/mach-msm/devices-8064.c
index 10ee1e3..b7707d7 100644
--- a/arch/arm/mach-msm/devices-8064.c
+++ b/arch/arm/mach-msm/devices-8064.c
@@ -41,6 +41,7 @@
#include <mach/msm_rtb.h>
#include <linux/msm_ion.h>
#include "clock.h"
+#include "pm.h"
#include "devices.h"
#include "footswitch.h"
#include "msm_watchdog.h"
@@ -141,6 +142,19 @@
},
};
+static struct msm_pm_sleep_status_data msm_pm_slp_sts_data = {
+ .base_addr = MSM_ACC0_BASE + 0x08,
+ .cpu_offset = MSM_ACC1_BASE - MSM_ACC0_BASE,
+ .mask = 1UL << 13,
+};
+struct platform_device msm8064_cpu_slp_status = {
+ .name = "cpu_slp_status",
+ .id = -1,
+ .dev = {
+ .platform_data = &msm_pm_slp_sts_data,
+ },
+};
+
static struct msm_watchdog_pdata msm_watchdog_pdata = {
.pet_time = 10000,
.bark_time = 11000,
diff --git a/arch/arm/mach-msm/devices-8930.c b/arch/arm/mach-msm/devices-8930.c
index 6fe8ccb..2f8f547 100644
--- a/arch/arm/mach-msm/devices-8930.c
+++ b/arch/arm/mach-msm/devices-8930.c
@@ -53,6 +53,20 @@
.retention_calls_tz = true,
};
+static struct msm_pm_sleep_status_data msm_pm_slp_sts_data = {
+ .base_addr = MSM_ACC0_BASE + 0x08,
+ .cpu_offset = MSM_ACC1_BASE - MSM_ACC0_BASE,
+ .mask = 1UL << 13,
+};
+
+struct platform_device msm8930_cpu_slp_status = {
+ .name = "cpu_slp_status",
+ .id = -1,
+ .dev = {
+ .platform_data = &msm_pm_slp_sts_data,
+ },
+};
+
struct platform_device msm8930_pm_8x60 = {
.name = "pm-8x60",
.id = -1,
diff --git a/arch/arm/mach-msm/devices-8960.c b/arch/arm/mach-msm/devices-8960.c
index 6a344be..2bd9dfe 100644
--- a/arch/arm/mach-msm/devices-8960.c
+++ b/arch/arm/mach-msm/devices-8960.c
@@ -1703,6 +1703,19 @@
.id = -1,
};
+static struct msm_pm_sleep_status_data msm_pm_slp_sts_data = {
+ .base_addr = MSM_ACC0_BASE + 0x08,
+ .cpu_offset = MSM_ACC1_BASE - MSM_ACC0_BASE,
+ .mask = 1UL << 13,
+};
+struct platform_device msm8960_cpu_slp_status = {
+ .name = "cpu_slp_status",
+ .id = -1,
+ .dev = {
+ .platform_data = &msm_pm_slp_sts_data,
+ },
+};
+
static struct msm_watchdog_pdata msm_watchdog_pdata = {
.pet_time = 10000,
.bark_time = 11000,
diff --git a/arch/arm/mach-msm/devices.h b/arch/arm/mach-msm/devices.h
index 53eca3e..327c11d 100644
--- a/arch/arm/mach-msm/devices.h
+++ b/arch/arm/mach-msm/devices.h
@@ -124,6 +124,10 @@
extern struct platform_device msm_device_hsusb_host2;
extern struct platform_device msm_device_hsic_host;
+extern struct platform_device msm8960_cpu_slp_status;
+extern struct platform_device msm8064_cpu_slp_status;
+extern struct platform_device msm8930_cpu_slp_status;
+
extern struct platform_device msm_device_otg;
extern struct platform_device msm_android_usb_device;
extern struct platform_device msm_android_usb_hsic_device;
diff --git a/arch/arm/mach-msm/include/mach/ecm_ipa.h b/arch/arm/mach-msm/include/mach/ecm_ipa.h
new file mode 100644
index 0000000..008a659
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/ecm_ipa.h
@@ -0,0 +1,76 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ECM_IPA_H_
+#define _ECM_IPA_H_
+
+#include <mach/ipa.h>
+
+/*
+ * @priv: private data given upon ipa_connect
+ * @evt: event enum, should be IPA_WRITE_DONE
+ * @data: for tx path the data field is the sent socket buffer.
+ */
+typedef void (*ecm_ipa_callback)(void *priv,
+ enum ipa_dp_evt_type evt,
+ unsigned long data);
+
+
+#ifdef CONFIG_ECM_IPA
+
+int ecm_ipa_init(ecm_ipa_callback * ecm_ipa_rx_dp_notify,
+ ecm_ipa_callback * ecm_ipa_tx_dp_notify,
+ void **priv);
+
+int ecm_ipa_configure(u8 host_ethaddr[], u8 device_ethaddr[],
+ void *priv);
+
+int ecm_ipa_connect(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl,
+ void *priv);
+
+int ecm_ipa_disconnect(void *priv);
+
+void ecm_ipa_cleanup(void *priv);
+
+#else /* CONFIG_ECM_IPA*/
+
+static inline int ecm_ipa_init(ecm_ipa_callback *ecm_ipa_rx_dp_notify,
+ ecm_ipa_callback *ecm_ipa_tx_dp_notify,
+ void **priv)
+{
+ return 0;
+}
+
+static inline int ecm_ipa_configure(u8 host_ethaddr[], u8 device_ethaddr[],
+ void *priv)
+{
+ return 0;
+}
+
+static inline int ecm_ipa_connect(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl,
+ void *priv)
+{
+ return 0;
+}
+
+static inline int ecm_ipa_disconnect(void *priv)
+{
+ return 0;
+}
+
+static inline void ecm_ipa_cleanup(void *priv)
+{
+
+}
+#endif /* CONFIG_ECM_IPA*/
+
+#endif /* _ECM_IPA_H_ */
diff --git a/arch/arm/mach-msm/include/mach/ipa.h b/arch/arm/mach-msm/include/mach/ipa.h
index 26a055d..5ccdf82 100644
--- a/arch/arm/mach-msm/include/mach/ipa.h
+++ b/arch/arm/mach-msm/include/mach/ipa.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -354,9 +354,134 @@
IPA_BRIDGE_TYPE_MAX
};
+/**
+ * enum ipa_rm_resource_name - IPA RM clients identification names
+ *
+ * Add new mapping to ipa_rm_dep_prod_index() / ipa_rm_dep_cons_index()
+ * when adding new entry to this enum.
+ */
+enum ipa_rm_resource_name {
+ IPA_RM_RESOURCE_PROD = 0,
+ IPA_RM_RESOURCE_BRIDGE_PROD = IPA_RM_RESOURCE_PROD,
+ IPA_RM_RESOURCE_A2_PROD,
+ IPA_RM_RESOURCE_USB_PROD,
+ IPA_RM_RESOURCE_HSIC_PROD,
+ IPA_RM_RESOURCE_STD_ECM_PROD,
+ IPA_RM_RESOURCE_WWAN_0_PROD,
+ IPA_RM_RESOURCE_WWAN_1_PROD,
+ IPA_RM_RESOURCE_WWAN_2_PROD,
+ IPA_RM_RESOURCE_WWAN_3_PROD,
+ IPA_RM_RESOURCE_WWAN_4_PROD,
+ IPA_RM_RESOURCE_WWAN_5_PROD,
+ IPA_RM_RESOURCE_WWAN_6_PROD,
+ IPA_RM_RESOURCE_WWAN_7_PROD,
+ IPA_RM_RESOURCE_WLAN_PROD,
+ IPA_RM_RESOURCE_PROD_MAX,
+
+ IPA_RM_RESOURCE_A2_CONS = IPA_RM_RESOURCE_PROD_MAX,
+ IPA_RM_RESOURCE_USB_CONS,
+ IPA_RM_RESOURCE_HSIC_CONS,
+ IPA_RM_RESOURCE_MAX
+};
+
+/**
+ * enum ipa_rm_event - IPA RM events
+ *
+ * Indicate the resource state change
+ */
+enum ipa_rm_event {
+ IPA_RM_RESOURCE_GRANTED,
+ IPA_RM_RESOURCE_RELEASED
+};
+
+typedef void (*ipa_rm_notify_cb)(void *user_data,
+ enum ipa_rm_event event,
+ unsigned long data);
+/**
+ * struct ipa_rm_register_params - information needed to
+ * register IPA RM client with IPA RM
+ *
+ * @user_data: IPA RM client provided information
+ * to be passed to notify_cb callback below
+ * @notify_cb: callback which is called by resource
+ * to notify the IPA RM client about its state
+ * change IPA RM client is expected to perform non
+ * blocking operations only in notify_cb and
+ * release notification context as soon as
+ * possible.
+ */
+struct ipa_rm_register_params {
+ void *user_data;
+ ipa_rm_notify_cb notify_cb;
+};
+
+/**
+ * struct ipa_rm_create_params - information needed to initialize
+ * the resource
+ * @name: resource name
+ * @reg_params: register parameters, contains are ignored
+ * for consumer resource NULL should be provided
+ * for consumer resource
+ * @request_resource: function which should be called to request resource,
+ * NULL should be provided for producer resource
+ * @release_resource: function which should be called to release resource,
+ * NULL should be provided for producer resource
+ *
+ * IPA RM client is expected to perform non blocking operations only
+ * in request_resource and release_resource functions and
+ * release notification context as soon as possible.
+ */
+struct ipa_rm_create_params {
+ enum ipa_rm_resource_name name;
+ struct ipa_rm_register_params reg_params;
+ int (*request_resource)(void);
+ int (*release_resource)(void);
+};
+
+enum a2_mux_event_type {
+ A2_MUX_RECEIVE,
+ A2_MUX_WRITE_DONE
+};
+
+enum a2_mux_logical_channel_id {
+ A2_MUX_WWAN_0,
+ A2_MUX_WWAN_1,
+ A2_MUX_WWAN_2,
+ A2_MUX_WWAN_3,
+ A2_MUX_WWAN_4,
+ A2_MUX_WWAN_5,
+ A2_MUX_WWAN_6,
+ A2_MUX_WWAN_7,
+ A2_MUX_TETHERED_0,
+ A2_MUX_NUM_CHANNELS
+};
+
+typedef void (*a2_mux_notify_cb)(void *user_data,
+ enum a2_mux_event_type event,
+ unsigned long data);
+
#ifdef CONFIG_IPA
/*
+ * a2 service
+ */
+int a2_mux_open_channel(enum a2_mux_logical_channel_id lcid,
+ void *user_data,
+ a2_mux_notify_cb notify_cb);
+
+int a2_mux_close_channel(enum a2_mux_logical_channel_id lcid);
+
+int a2_mux_write(enum a2_mux_logical_channel_id lcid, struct sk_buff *skb);
+
+int a2_mux_is_ch_low(enum a2_mux_logical_channel_id lcid);
+
+int a2_mux_is_ch_full(enum a2_mux_logical_channel_id lcid);
+
+int a2_mux_get_tethered_client_handles(enum a2_mux_logical_channel_id lcid,
+ unsigned int *clnt_cons_handle,
+ unsigned int *clnt_prod_handle);
+
+/*
* Connect / Disconnect
*/
int ipa_connect(const struct ipa_connect_params *in, struct ipa_sps_params *sps,
@@ -489,8 +614,78 @@
int ipa_teardown_sys_pipe(u32 clnt_hdl);
+/*
+ * Resource manager
+ */
+int ipa_rm_create_resource(struct ipa_rm_create_params *create_params);
+
+int ipa_rm_register(enum ipa_rm_resource_name resource_name,
+ struct ipa_rm_register_params *reg_params);
+
+int ipa_rm_deregister(enum ipa_rm_resource_name resource_name,
+ struct ipa_rm_register_params *reg_params);
+
+int ipa_rm_add_dependency(enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name);
+
+int ipa_rm_delete_dependency(enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name);
+
+int ipa_rm_request_resource(enum ipa_rm_resource_name resource_name);
+
+int ipa_rm_release_resource(enum ipa_rm_resource_name resource_name);
+
+int ipa_rm_notify_completion(enum ipa_rm_event event,
+ enum ipa_rm_resource_name resource_name);
+
+int ipa_rm_inactivity_timer_init(enum ipa_rm_resource_name resource_name,
+ unsigned long msecs);
+
+int ipa_rm_inactivity_timer_destroy(enum ipa_rm_resource_name resource_name);
+
+int ipa_rm_inactivity_timer_request_resource(
+ enum ipa_rm_resource_name resource_name);
+
+int ipa_rm_inactivity_timer_release_resource(
+ enum ipa_rm_resource_name resource_name);
+
#else /* CONFIG_IPA */
+static inline int a2_mux_open_channel(enum a2_mux_logical_channel_id lcid,
+ void *user_data, a2_mux_notify_cb notify_cb)
+{
+ return -EPERM;
+}
+
+static inline int a2_mux_close_channel(enum a2_mux_logical_channel_id lcid)
+{
+ return -EPERM;
+}
+
+static inline int a2_mux_write(enum a2_mux_logical_channel_id lcid,
+ struct sk_buff *skb)
+{
+ return -EPERM;
+}
+
+static inline int a2_mux_is_ch_low(enum a2_mux_logical_channel_id lcid)
+{
+ return -EPERM;
+}
+
+static inline int a2_mux_is_ch_full(enum a2_mux_logical_channel_id lcid)
+{
+ return -EPERM;
+}
+
+static inline int a2_mux_get_tethered_client_handles(
+ enum a2_mux_logical_channel_id lcid, unsigned int *clnt_cons_handle,
+ unsigned int *clnt_prod_handle)
+{
+ return -EPERM;
+}
+
+
/*
* Connect / Disconnect
*/
@@ -778,6 +973,84 @@
return -EPERM;
}
+/*
+ * Resource manager
+ */
+static inline int ipa_rm_create_resource(
+ struct ipa_rm_create_params *create_params)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_register(enum ipa_rm_resource_name resource_name,
+ struct ipa_rm_register_params *reg_params)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_deregister(enum ipa_rm_resource_name resource_name,
+ struct ipa_rm_register_params *reg_params)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_add_dependency(
+ enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_delete_dependency(
+ enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_request_resource(
+ enum ipa_rm_resource_name resource_name)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_release_resource(
+ enum ipa_rm_resource_name resource_name)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_notify_completion(enum ipa_rm_event event,
+ enum ipa_rm_resource_name resource_name)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_inactivity_timer_init(
+ enum ipa_rm_resource_name resource_name,
+ unsigned long msecs)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_inactivity_timer_destroy(
+ enum ipa_rm_resource_name resource_name)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_inactivity_timer_request_resource(
+ enum ipa_rm_resource_name resource_name)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_inactivity_timer_release_resource(
+ enum ipa_rm_resource_name resource_name)
+{
+ return -EPERM;
+}
+
#endif /* CONFIG_IPA*/
#endif /* _IPA_H_ */
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap.h b/arch/arm/mach-msm/include/mach/msm_iomap.h
index 4f475fe..ebb096e 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap.h
@@ -81,7 +81,7 @@
#define MSM_LPASS_CLK_CTL_BASE IOMEM(0xFA015000) /* 4K */
#define MSM_HFPLL_BASE IOMEM(0xFA016000) /* 4K */
#define MSM_TLMM_BASE IOMEM(0xFA017000) /* 16K */
-#define MSM_SHARED_RAM_BASE IOMEM(0xFA300000) /* 2M */
+#define MSM_SHARED_RAM_BASE IOMEM(0xFA400000) /* 2M */
#define MSM_SIC_NON_SECURE_BASE IOMEM(0xFA600000) /* 64K */
#define MSM_HDMI_BASE IOMEM(0xFA800000) /* 4K */
#define MSM_RPM_BASE IOMEM(0xFA801000) /* 4K */
diff --git a/arch/arm/mach-msm/include/mach/msm_ipc_logging.h b/arch/arm/mach-msm/include/mach/msm_ipc_logging.h
index ec9fdb0..b675c00 100644
--- a/arch/arm/mach-msm/include/mach/msm_ipc_logging.h
+++ b/arch/arm/mach-msm/include/mach/msm_ipc_logging.h
@@ -113,7 +113,7 @@
* @ilctxt: Debug Log Context created using ipc_log_context_create()
* @fmt: Data specified using format specifiers
*/
-int ipc_log_string(void *ilctxt, const char *fmt, ...);
+int ipc_log_string(void *ilctxt, const char *fmt, ...) __printf(2, 3);
/*
* Print a string to decode context.
diff --git a/arch/arm/mach-msm/include/mach/qpnp-int.h b/arch/arm/mach-msm/include/mach/qpnp-int.h
index 8818bf2..2b86216 100644
--- a/arch/arm/mach-msm/include/mach/qpnp-int.h
+++ b/arch/arm/mach-msm/include/mach/qpnp-int.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -57,6 +57,14 @@
struct qpnp_local_int *li_cb);
/**
+ * qpnpint_unregister_controller() - Unregister local interrupt callbacks
+ *
+ * Used by the PMIC Arbiter driver or equivalent to unregister
+ * callbacks for interrupt events.
+ */
+int qpnpint_unregister_controller(struct device_node *node);
+
+/**
* qpnpint_handle_irq - Main interrupt handling routine
*
* Pass a PMIC Arbiter interrupt to Linux.
@@ -78,6 +86,12 @@
return -ENXIO;
}
+static inline int qpnpint_unregister_controller(struct device_node *node)
+
+{
+ return -ENXIO;
+}
+
static inline int qpnpint_handle_irq(struct spmi_controller *spmi_ctrl,
struct qpnp_irq_spec *spec)
{
diff --git a/arch/arm/mach-msm/iommu_domains.c b/arch/arm/mach-msm/iommu_domains.c
index 02272bc..eb44c40 100644
--- a/arch/arm/mach-msm/iommu_domains.c
+++ b/arch/arm/mach-msm/iommu_domains.c
@@ -19,15 +19,15 @@
#include <linux/rbtree.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <asm/sizes.h>
#include <asm/page.h>
#include <mach/iommu.h>
#include <mach/iommu_domains.h>
#include <mach/socinfo.h>
#include <mach/msm_subsystem_map.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
struct msm_iova_data {
struct rb_node node;
diff --git a/arch/arm/mach-msm/ipc_socket.c b/arch/arm/mach-msm/ipc_socket.c
index d31af84..2cec5c5 100644
--- a/arch/arm/mach-msm/ipc_socket.c
+++ b/arch/arm/mach-msm/ipc_socket.c
@@ -400,6 +400,14 @@
if (server_arg.num_entries_in_array) {
srv_info_sz = server_arg.num_entries_in_array *
sizeof(*srv_info);
+ if ((srv_info_sz / sizeof(*srv_info)) !=
+ server_arg.num_entries_in_array) {
+ pr_err("%s: Integer Overflow %d * %d\n",
+ __func__, sizeof(*srv_info),
+ server_arg.num_entries_in_array);
+ ret = -EINVAL;
+ break;
+ }
srv_info = kmalloc(srv_info_sz, GFP_KERNEL);
if (!srv_info) {
ret = -ENOMEM;
diff --git a/arch/arm/mach-msm/krait-regulator.c b/arch/arm/mach-msm/krait-regulator.c
index 0c1e279..dc0b755 100644
--- a/arch/arm/mach-msm/krait-regulator.c
+++ b/arch/arm/mach-msm/krait-regulator.c
@@ -155,6 +155,7 @@
bool pfm_mode;
int pmic_min_uV_for_retention;
bool retention_enabled;
+ bool use_phase_switching;
};
static struct pmic_gang_vreg *the_gang;
@@ -390,13 +391,17 @@
return 0;
}
-static int set_pmic_gang_phases(int phase_count)
+static int set_pmic_gang_phases(struct pmic_gang_vreg *pvreg, int phase_count)
{
- /*
- * TODO : spm writes for phase control,
- * pmic phase control is not working yet
- */
- return 0;
+ pr_debug("programming phase_count = %d\n", phase_count);
+ if (pvreg->use_phase_switching)
+ /*
+ * note the PMIC sets the phase count to one more than
+ * the value in the register - hence subtract 1 from it
+ */
+ return msm_spm_apcs_set_phase(phase_count - 1);
+ else
+ return 0;
}
static int set_pmic_gang_voltage(struct pmic_gang_vreg *pvreg, int uV)
@@ -547,14 +552,19 @@
int load_uA)
{
struct pmic_gang_vreg *pvreg = from->pvreg;
- int phase_count = DIV_ROUND_UP(load_uA, LOAD_PER_PHASE) - 1;
+ int phase_count = DIV_ROUND_UP(load_uA, LOAD_PER_PHASE);
int rc = 0;
- if (phase_count < 0)
- phase_count = 0;
+ if (phase_count <= 0)
+ phase_count = 1;
+
+ /* Increase phases if it is less than the number of cpus online */
+ if (phase_count < num_online_cpus()) {
+ phase_count = num_online_cpus();
+ }
if (phase_count != pvreg->pmic_phase_count) {
- rc = set_pmic_gang_phases(phase_count);
+ rc = set_pmic_gang_phases(pvreg, phase_count);
if (rc < 0) {
dev_err(&from->rdev->dev,
"%s failed set phase %d rc = %d\n",
@@ -577,32 +587,6 @@
return rc;
}
-static int __devinit pvreg_init(struct platform_device *pdev)
-{
- struct pmic_gang_vreg *pvreg;
-
- pvreg = devm_kzalloc(&pdev->dev,
- sizeof(struct pmic_gang_vreg), GFP_KERNEL);
- if (!pvreg) {
- pr_err("kzalloc failed.\n");
- return -ENOMEM;
- }
-
- pvreg->name = "pmic_gang";
- pvreg->pmic_vmax_uV = PMIC_VOLTAGE_MIN;
- pvreg->pmic_phase_count = 1;
- pvreg->retention_enabled = true;
- pvreg->pmic_min_uV_for_retention = INT_MAX;
-
- mutex_init(&pvreg->krait_power_vregs_lock);
- INIT_LIST_HEAD(&pvreg->krait_power_vregs);
- the_gang = pvreg;
-
- pr_debug("name=%s inited\n", pvreg->name);
-
- return 0;
-}
-
static int krait_power_get_voltage(struct regulator_dev *rdev)
{
struct krait_power_vreg *kvreg = rdev_get_drvdata(rdev);
@@ -925,24 +909,6 @@
int ldo_delta_uV;
int cpu_num;
- /* Initialize the pmic gang if it hasn't been initialized already */
- if (the_gang == NULL) {
- rc = pvreg_init(pdev);
- if (rc < 0) {
- dev_err(&pdev->dev,
- "failed to init pmic gang rc = %d\n", rc);
- return rc;
- }
- /* global initializtion */
- glb_init(pdev);
- }
-
- if (dent == NULL) {
- dent = debugfs_create_dir(KRAIT_REGULATOR_DRIVER_NAME, NULL);
- debugfs_create_file("retention_uV",
- 0644, dent, the_gang, &retention_fops);
- }
-
if (pdev->dev.of_node) {
/* Get init_data from device tree. */
init_data = of_get_regulator_init_data(&pdev->dev,
@@ -1139,14 +1105,93 @@
},
};
+static struct of_device_id krait_pdn_match_table[] = {
+ { .compatible = "qcom,krait-pdn", },
+ {}
+};
+
+static int __devinit krait_pdn_probe(struct platform_device *pdev)
+{
+ int rc;
+ bool use_phase_switching = false;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct pmic_gang_vreg *pvreg;
+
+ if (!dev->of_node) {
+ dev_err(dev, "device tree information missing\n");
+ return -ENODEV;
+ }
+
+ use_phase_switching = of_property_read_bool(node,
+ "qcom,use-phase-switching");
+ pvreg = devm_kzalloc(&pdev->dev,
+ sizeof(struct pmic_gang_vreg), GFP_KERNEL);
+ if (!pvreg) {
+ pr_err("kzalloc failed.\n");
+ return 0;
+ }
+
+ pvreg->name = "pmic_gang";
+ pvreg->pmic_vmax_uV = PMIC_VOLTAGE_MIN;
+ pvreg->pmic_phase_count = -EINVAL;
+ pvreg->retention_enabled = true;
+ pvreg->pmic_min_uV_for_retention = INT_MAX;
+ pvreg->use_phase_switching = use_phase_switching;
+
+ mutex_init(&pvreg->krait_power_vregs_lock);
+ INIT_LIST_HEAD(&pvreg->krait_power_vregs);
+ the_gang = pvreg;
+
+ pr_debug("name=%s inited\n", pvreg->name);
+
+ /* global initializtion */
+ glb_init(pdev);
+
+ rc = of_platform_populate(node, NULL, NULL, dev);
+ if (rc) {
+ dev_err(dev, "failed to add child nodes, rc=%d\n", rc);
+ return rc;
+ }
+
+ dent = debugfs_create_dir(KRAIT_REGULATOR_DRIVER_NAME, NULL);
+ debugfs_create_file("retention_uV",
+ 0644, dent, the_gang, &retention_fops);
+ return 0;
+}
+
+static int __devexit krait_pdn_remove(struct platform_device *pdev)
+{
+ the_gang = NULL;
+ debugfs_remove_recursive(dent);
+ return 0;
+}
+
+static struct platform_driver krait_pdn_driver = {
+ .probe = krait_pdn_probe,
+ .remove = __devexit_p(krait_pdn_remove),
+ .driver = {
+ .name = KRAIT_PDN_DRIVER_NAME,
+ .of_match_table = krait_pdn_match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
int __init krait_power_init(void)
{
- return platform_driver_register(&krait_power_driver);
+ int rc = platform_driver_register(&krait_power_driver);
+ if (rc) {
+ pr_err("failed to add %s driver rc = %d\n",
+ KRAIT_REGULATOR_DRIVER_NAME, rc);
+ return rc;
+ }
+ return platform_driver_register(&krait_pdn_driver);
}
static void __exit krait_power_exit(void)
{
platform_driver_unregister(&krait_power_driver);
+ platform_driver_unregister(&krait_pdn_driver);
}
module_exit(krait_power_exit);
diff --git a/arch/arm/mach-msm/memory.c b/arch/arm/mach-msm/memory.c
index 90cb49e..806581d 100644
--- a/arch/arm/mach-msm/memory.c
+++ b/arch/arm/mach-msm/memory.c
@@ -1,7 +1,7 @@
/* arch/arm/mach-msm/memory.c
*
* Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -507,11 +507,10 @@
*/
void adjust_meminfo(unsigned long start, unsigned long size)
{
- int i, j;
+ int i;
- for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
- struct membank *bank = &meminfo.bank[j];
- *bank = meminfo.bank[i];
+ for (i = 0; i < meminfo.nr_banks; i++) {
+ struct membank *bank = &meminfo.bank[i];
if (((start + size) <= (bank->start + bank->size)) &&
(start >= bank->start)) {
@@ -519,15 +518,15 @@
(meminfo.nr_banks - i) * sizeof(*bank));
meminfo.nr_banks++;
i++;
- bank[1].size -= (start + size);
- bank[1].start = (start + size);
- bank[1].highmem = 0;
- j++;
+
bank->size = start - bank->start;
+ bank[1].start = (start + size);
+ bank[1].size -= (bank->size + size);
+ bank[1].highmem = 0;
}
- j++;
}
}
+
unsigned long get_ddr_size(void)
{
unsigned int i;
diff --git a/arch/arm/mach-msm/msm_ipc_router_security.c b/arch/arm/mach-msm/msm_ipc_router_security.c
index 756e24e..69efd13 100644
--- a/arch/arm/mach-msm/msm_ipc_router_security.c
+++ b/arch/arm/mach-msm/msm_ipc_router_security.c
@@ -37,7 +37,7 @@
uint32_t instance_id;
unsigned reserved;
int num_group_info;
- int *group_id;
+ gid_t *group_id;
};
static DEFINE_MUTEX(security_rules_lock);
@@ -98,6 +98,7 @@
struct config_sec_rules_args sec_rules_arg;
struct security_rule *rule, *temp_rule;
int key;
+ int group_info_sz;
int ret;
if (current_euid())
@@ -111,14 +112,20 @@
if (sec_rules_arg.num_group_info <= 0)
return -EINVAL;
+ group_info_sz = sec_rules_arg.num_group_info * sizeof(gid_t);
+ if ((group_info_sz / sizeof(gid_t)) != sec_rules_arg.num_group_info) {
+ pr_err("%s: Integer Overflow %d * %d\n", __func__,
+ sizeof(gid_t), sec_rules_arg.num_group_info);
+ return -EINVAL;
+ }
+
rule = kzalloc(sizeof(struct security_rule), GFP_KERNEL);
if (!rule) {
pr_err("%s: security_rule alloc failed\n", __func__);
return -ENOMEM;
}
- rule->group_id = kzalloc((sec_rules_arg.num_group_info * sizeof(int)),
- GFP_KERNEL);
+ rule->group_id = kzalloc(group_info_sz, GFP_KERNEL);
if (!rule->group_id) {
pr_err("%s: group_id alloc failed\n", __func__);
kfree(rule);
@@ -131,7 +138,7 @@
rule->num_group_info = sec_rules_arg.num_group_info;
ret = copy_from_user(rule->group_id,
((void *)(arg + sizeof(sec_rules_arg))),
- (rule->num_group_info * sizeof(uint32_t)));
+ group_info_sz);
if (ret) {
kfree(rule->group_id);
kfree(rule);
diff --git a/arch/arm/mach-msm/peripheral-loader.c b/arch/arm/mach-msm/peripheral-loader.c
index affb451..fc9a0fa 100644
--- a/arch/arm/mach-msm/peripheral-loader.c
+++ b/arch/arm/mach-msm/peripheral-loader.c
@@ -656,7 +656,8 @@
void pil_shutdown(struct pil_desc *desc)
{
struct pil_priv *priv = desc->priv;
- desc->ops->shutdown(desc);
+ if (desc->ops->shutdown)
+ desc->ops->shutdown(desc);
if (proxy_timeout_ms == 0 && desc->ops->proxy_unvote)
desc->ops->proxy_unvote(desc);
else
diff --git a/arch/arm/mach-msm/pil-q6v5-mss.c b/arch/arm/mach-msm/pil-q6v5-mss.c
index c1d4ab4..cd6aaf4 100644
--- a/arch/arm/mach-msm/pil-q6v5-mss.c
+++ b/arch/arm/mach-msm/pil-q6v5-mss.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -462,7 +462,7 @@
if (!drv->is_loadable)
return 0;
- /* MBA doesn't support shutdown */
+ pil_shutdown(&drv->desc);
pil_shutdown(&drv->q6->desc);
return 0;
}
@@ -578,7 +578,7 @@
if (!drv->is_loadable)
return;
- /* MBA doesn't support shutdown */
+ pil_shutdown(&drv->desc);
pil_shutdown(&drv->q6->desc);
}
diff --git a/arch/arm/mach-msm/pm.h b/arch/arm/mach-msm/pm.h
index af0744c..c77304d 100644
--- a/arch/arm/mach-msm/pm.h
+++ b/arch/arm/mach-msm/pm.h
@@ -65,6 +65,12 @@
uint32_t modified_time_us;
};
+struct msm_pm_sleep_status_data {
+ void *base_addr;
+ uint32_t cpu_offset;
+ uint32_t mask;
+};
+
struct msm_pm_platform_data {
u8 idle_supported; /* Allow device to enter mode during idle */
u8 suspend_supported; /* Allow device to enter mode during suspend */
diff --git a/arch/arm/mach-msm/qdsp6v2/ultrasound/version_b/q6usm_b.c b/arch/arm/mach-msm/qdsp6v2/ultrasound/version_b/q6usm_b.c
index 11b1405..ff7ba33 100644
--- a/arch/arm/mach-msm/qdsp6v2/ultrasound/version_b/q6usm_b.c
+++ b/arch/arm/mach-msm/qdsp6v2/ultrasound/version_b/q6usm_b.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -327,6 +327,7 @@
return usc;
fail:
+ kfree(p_mem_handle);
q6usm_us_client_free(usc);
return NULL;
fail_session:
diff --git a/arch/arm/mach-msm/remote_spinlock.c b/arch/arm/mach-msm/remote_spinlock.c
index 4e09a9e..86b068c 100644
--- a/arch/arm/mach-msm/remote_spinlock.c
+++ b/arch/arm/mach-msm/remote_spinlock.c
@@ -196,6 +196,8 @@
/* end swp implementation --------------------------------------------------- */
/* ldrex implementation ----------------------------------------------------- */
+static char *ldrex_compatible_string = "qcom,ipc-spinlock-ldrex";
+
static void __raw_remote_ex_spin_lock(raw_remote_spinlock_t *lock)
{
unsigned long tmp;
@@ -267,7 +269,7 @@
static void *hw_mutex_reg_base;
static DEFINE_MUTEX(hw_map_init_lock);
-static char *compatible_string = "qcom,ipc-spinlock";
+static char *sfpb_compatible_string = "qcom,ipc-spinlock-sfpb";
static int init_hw_mutex(struct device_node *node)
{
@@ -294,7 +296,7 @@
{
struct device_node *node;
- node = of_find_compatible_node(NULL, NULL, compatible_string);
+ node = of_find_compatible_node(NULL, NULL, sfpb_compatible_string);
if (node) {
init_hw_mutex(node);
} else {
@@ -397,6 +399,23 @@
}
+static int dt_node_is_valid(const struct device_node *node)
+{
+ const char *status;
+ int statlen;
+
+ status = of_get_property(node, "status", &statlen);
+ if (status == NULL)
+ return 1;
+
+ if (statlen > 0) {
+ if (!strcmp(status, "okay") || !strcmp(status, "ok"))
+ return 1;
+ }
+
+ return 0;
+}
+
static void initialize_ops(void)
{
struct device_node *node;
@@ -435,23 +454,42 @@
is_hw_lock_type = 1;
break;
case AUTO_MODE:
- node = of_find_compatible_node(NULL, NULL, compatible_string);
- if (node) {
+ /*
+ * of_find_compatible_node() returns a valid pointer even if
+ * the status property is "disabled", so the validity needs
+ * to be checked
+ */
+ node = of_find_compatible_node(NULL, NULL,
+ sfpb_compatible_string);
+ if (node && dt_node_is_valid(node)) {
current_ops.lock = __raw_remote_sfpb_spin_lock;
current_ops.unlock = __raw_remote_sfpb_spin_unlock;
current_ops.trylock = __raw_remote_sfpb_spin_trylock;
current_ops.release = __raw_remote_gen_spin_release;
current_ops.owner = __raw_remote_gen_spin_owner;
is_hw_lock_type = 1;
- } else {
+ break;
+ }
+
+ node = of_find_compatible_node(NULL, NULL,
+ ldrex_compatible_string);
+ if (node && dt_node_is_valid(node)) {
current_ops.lock = __raw_remote_ex_spin_lock;
current_ops.unlock = __raw_remote_ex_spin_unlock;
current_ops.trylock = __raw_remote_ex_spin_trylock;
current_ops.release = __raw_remote_gen_spin_release;
current_ops.owner = __raw_remote_gen_spin_owner;
is_hw_lock_type = 0;
- pr_warn("Falling back to LDREX remote spinlock implementation");
+ break;
}
+
+ current_ops.lock = __raw_remote_ex_spin_lock;
+ current_ops.unlock = __raw_remote_ex_spin_unlock;
+ current_ops.trylock = __raw_remote_ex_spin_trylock;
+ current_ops.release = __raw_remote_gen_spin_release;
+ current_ops.owner = __raw_remote_gen_spin_owner;
+ is_hw_lock_type = 0;
+ pr_warn("Falling back to LDREX remote spinlock implementation");
break;
default:
BUG();
diff --git a/arch/arm/mach-msm/smp2p_debug.c b/arch/arm/mach-msm/smp2p_debug.c
index 1a5c96e..a493cbe 100644
--- a/arch/arm/mach-msm/smp2p_debug.c
+++ b/arch/arm/mach-msm/smp2p_debug.c
@@ -233,7 +233,7 @@
if (in_ptr) {
in_entries = (struct smp2p_entry_v1 *)((void *)in_ptr +
sizeof(struct smp2p_smem));
- in_valid = SMP2P_GET_ENT_VALID(out_ptr->valid_total_ent);
+ in_valid = SMP2P_GET_ENT_VALID(in_ptr->valid_total_ent);
}
for (entry = 0; out_entries || in_entries; ++entry) {
diff --git a/drivers/Kconfig b/drivers/Kconfig
index a73d713..adead10 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -150,4 +150,6 @@
source "drivers/coresight/Kconfig"
+source "drivers/bif/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index f461e83..d55b035 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -142,3 +142,5 @@
obj-$(CONFIG_MOBICORE_SUPPORT) += gud/
obj-$(CONFIG_CORESIGHT) += coresight/
+
+obj-$(CONFIG_BIF) += bif/
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 81409b0..cd341e8 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -24,6 +24,9 @@
#include <linux/memblock.h>
#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/page-isolation.h>
@@ -43,7 +46,33 @@
unsigned long *bitmap;
};
-struct cma *dma_contiguous_default_area;
+static DEFINE_MUTEX(cma_mutex);
+
+struct cma *dma_contiguous_def_area;
+phys_addr_t dma_contiguous_def_base;
+
+static struct cma_area {
+ phys_addr_t base;
+ unsigned long size;
+ struct cma *cma;
+} cma_areas[MAX_CMA_AREAS] __initdata;
+static unsigned cma_area_count __initdata;
+
+
+static struct cma_map {
+ phys_addr_t base;
+ struct device *dev;
+} cma_maps[MAX_CMA_AREAS] __initdata;
+static unsigned cma_map_count __initdata;
+
+static struct cma *cma_get_area(phys_addr_t base)
+{
+ int i;
+ for (i = 0; i < cma_area_count; i++)
+ if (cma_areas[i].base == base)
+ return cma_areas[i].cma;
+ return NULL;
+}
#ifdef CONFIG_CMA_SIZE_MBYTES
#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
@@ -61,8 +90,8 @@
* Users, who want to set the size of global CMA area for their system
* should use cma= kernel parameter.
*/
-static const unsigned long size_bytes = CMA_SIZE_MBYTES * SZ_1M;
-static long size_cmdline = -1;
+static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M;
+static phys_addr_t size_cmdline = -1;
static int __init early_cma(char *p)
{
@@ -74,7 +103,7 @@
#ifdef CONFIG_CMA_SIZE_PERCENTAGE
-static unsigned long __init __maybe_unused cma_early_percent_memory(void)
+static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
{
struct memblock_region *reg;
unsigned long total_pages = 0;
@@ -92,52 +121,13 @@
#else
-static inline __maybe_unused unsigned long cma_early_percent_memory(void)
+static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
{
return 0;
}
#endif
-/**
- * dma_contiguous_reserve() - reserve area for contiguous memory handling
- * @limit: End address of the reserved memory (optional, 0 for any).
- *
- * This function reserves memory from early allocator. It should be
- * called by arch specific code once the early allocator (memblock or bootmem)
- * has been activated and all other subsystems have already allocated/reserved
- * memory.
- */
-void __init dma_contiguous_reserve(phys_addr_t limit)
-{
- unsigned long selected_size = 0;
-
- pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
-
- if (size_cmdline != -1) {
- selected_size = size_cmdline;
- } else {
-#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
- selected_size = size_bytes;
-#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
- selected_size = cma_early_percent_memory();
-#elif defined(CONFIG_CMA_SIZE_SEL_MIN)
- selected_size = min(size_bytes, cma_early_percent_memory());
-#elif defined(CONFIG_CMA_SIZE_SEL_MAX)
- selected_size = max(size_bytes, cma_early_percent_memory());
-#endif
- }
-
- if (selected_size) {
- pr_debug("%s: reserving %ld MiB for global area\n", __func__,
- selected_size / SZ_1M);
-
- dma_declare_contiguous(NULL, selected_size, 0, limit);
- }
-};
-
-static DEFINE_MUTEX(cma_mutex);
-
static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
{
unsigned long pfn = base_pfn;
@@ -194,55 +184,105 @@
return ERR_PTR(ret);
}
-static struct cma_reserved {
- phys_addr_t start;
- unsigned long size;
- struct device *dev;
-} cma_reserved[MAX_CMA_AREAS] __initdata;
-static unsigned cma_reserved_count __initdata;
+/*****************************************************************************/
-static int __init cma_init_reserved_areas(void)
+#ifdef CONFIG_OF
+int __init cma_fdt_scan(unsigned long node, const char *uname,
+ int depth, void *data)
{
- struct cma_reserved *r = cma_reserved;
- unsigned i = cma_reserved_count;
+ phys_addr_t base, size;
+ unsigned long len;
+ __be32 *prop;
- pr_debug("%s()\n", __func__);
+ if (strncmp(uname, "region@", 7) != 0 || depth != 2 ||
+ !of_get_flat_dt_prop(node, "contiguous-region", NULL))
+ return 0;
- for (; i; --i, ++r) {
- struct cma *cma;
- cma = cma_create_area(PFN_DOWN(r->start),
- r->size >> PAGE_SHIFT);
- if (!IS_ERR(cma))
- dev_set_cma_area(r->dev, cma);
- }
+ prop = of_get_flat_dt_prop(node, "reg", &len);
+ if (!prop || (len != 2 * sizeof(unsigned long)))
+ return 0;
+
+ base = be32_to_cpu(prop[0]);
+ size = be32_to_cpu(prop[1]);
+
+ pr_info("Found %s, memory base %lx, size %ld MiB\n", uname,
+ (unsigned long)base, (unsigned long)size / SZ_1M);
+ dma_contiguous_reserve_area(size, &base, 0);
+
return 0;
}
-core_initcall(cma_init_reserved_areas);
+#endif
/**
- * dma_declare_contiguous() - reserve area for contiguous memory handling
- * for particular device
- * @dev: Pointer to device structure.
- * @size: Size of the reserved memory.
- * @base: Start address of the reserved memory (optional, 0 for any).
+ * dma_contiguous_reserve() - reserve area for contiguous memory handling
* @limit: End address of the reserved memory (optional, 0 for any).
*
- * This function reserves memory for specified device. It should be
- * called by board specific code when early allocator (memblock or bootmem)
- * is still activate.
+ * This function reserves memory from early allocator. It should be
+ * called by arch specific code once the early allocator (memblock or bootmem)
+ * has been activated and all other subsystems have already allocated/reserved
+ * memory. It reserves contiguous areas for global, device independent
+ * allocations and (optionally) all areas defined in device tree structures.
*/
-int __init dma_declare_contiguous(struct device *dev, unsigned long size,
- phys_addr_t base, phys_addr_t limit)
+void __init dma_contiguous_reserve(phys_addr_t limit)
{
- struct cma_reserved *r = &cma_reserved[cma_reserved_count];
- unsigned long alignment;
+ phys_addr_t sel_size = 0;
+
+ pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
+
+ if (size_cmdline != -1) {
+ sel_size = size_cmdline;
+ } else {
+#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
+ sel_size = size_bytes;
+#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
+ sel_size = cma_early_percent_memory();
+#elif defined(CONFIG_CMA_SIZE_SEL_MIN)
+ sel_size = min(size_bytes, cma_early_percent_memory());
+#elif defined(CONFIG_CMA_SIZE_SEL_MAX)
+ sel_size = max(size_bytes, cma_early_percent_memory());
+#endif
+ }
+
+ if (sel_size) {
+ phys_addr_t base = 0;
+ pr_debug("%s: reserving %ld MiB for global area\n", __func__,
+ (unsigned long)sel_size / SZ_1M);
+
+ if (dma_contiguous_reserve_area(sel_size, &base, limit) == 0)
+ dma_contiguous_def_base = base;
+ }
+#ifdef CONFIG_OF
+ of_scan_flat_dt(cma_fdt_scan, NULL);
+#endif
+};
+
+/**
+ * dma_contiguous_reserve_area() - reserve custom contiguous area
+ * @size: Size of the reserved area (in bytes),
+ * @base: Pointer to the base address of the reserved area, also used to return
+ * base address of the actually reserved area, optional, use pointer to
+ * 0 for any
+ * @limit: End address of the reserved memory (optional, 0 for any).
+ *
+ * This function reserves memory from early allocator. It should be
+ * called by arch specific code once the early allocator (memblock or bootmem)
+ * has been activated and all other subsystems have already allocated/reserved
+ * memory. This function allows to create custom reserved areas for specific
+ * devices.
+ */
+int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t *res_base,
+ phys_addr_t limit)
+{
+ phys_addr_t base = *res_base;
+ phys_addr_t alignment;
+ int ret = 0;
pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
(unsigned long)size, (unsigned long)base,
(unsigned long)limit);
/* Sanity checks */
- if (cma_reserved_count == ARRAY_SIZE(cma_reserved)) {
+ if (cma_area_count == ARRAY_SIZE(cma_areas)) {
pr_err("Not enough slots for CMA reserved regions!\n");
return -ENOSPC;
}
@@ -251,7 +291,7 @@
return -EINVAL;
/* Sanitise input arguments */
- alignment = PAGE_SIZE << max(MAX_ORDER, pageblock_order);
+ alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
base = ALIGN(base, alignment);
size = ALIGN(size, alignment);
limit &= ~(alignment - 1);
@@ -260,7 +300,7 @@
if (base) {
if (memblock_is_region_reserved(base, size) ||
memblock_reserve(base, size) < 0) {
- base = -EBUSY;
+ ret = -EBUSY;
goto err;
}
} else {
@@ -270,11 +310,7 @@
*/
phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
if (!addr) {
- base = -ENOMEM;
- goto err;
- } else if (addr + size > ~(unsigned long)0) {
- memblock_free(addr, size);
- base = -EINVAL;
+ ret = -ENOMEM;
goto err;
} else {
base = addr;
@@ -285,22 +321,106 @@
* Each reserved area must be initialised later, when more kernel
* subsystems (like slab allocator) are available.
*/
- r->start = base;
- r->size = size;
- r->dev = dev;
- cma_reserved_count++;
- pr_info("CMA: reserved %ld MiB at %08lx\n", size / SZ_1M,
+ cma_areas[cma_area_count].base = base;
+ cma_areas[cma_area_count].size = size;
+ cma_area_count++;
+ *res_base = base;
+
+ pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
(unsigned long)base);
/* Architecture specific contiguous memory fixup. */
dma_contiguous_early_fixup(base, size);
return 0;
err:
- pr_err("CMA: failed to reserve %ld MiB\n", size / SZ_1M);
- return base;
+ pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
+ return ret;
}
/**
+ * dma_contiguous_add_device() - add device to custom contiguous reserved area
+ * @dev: Pointer to device structure.
+ * @base: Pointer to the base address of the reserved area returned by
+ * dma_contiguous_reserve_area() function, also used to return
+ *
+ * This function assigns the given device to the contiguous memory area
+ * reserved earlier by dma_contiguous_reserve_area() function.
+ */
+int __init dma_contiguous_add_device(struct device *dev, phys_addr_t base)
+{
+ if (cma_map_count == ARRAY_SIZE(cma_maps)) {
+ pr_err("Not enough slots for CMA reserved regions!\n");
+ return -ENOSPC;
+ }
+ cma_maps[cma_map_count].dev = dev;
+ cma_maps[cma_map_count].base = base;
+ cma_map_count++;
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static void cma_assign_device_from_dt(struct device *dev)
+{
+ struct device_node *node;
+ struct cma *cma;
+ u32 value;
+
+ node = of_parse_phandle(dev->of_node, "linux,contiguous-region", 0);
+ if (!node)
+ return;
+ if (of_property_read_u32(node, "reg", &value) && !value)
+ return;
+ cma = cma_get_area(value);
+ if (!cma)
+ return;
+
+ dev_set_cma_area(dev, cma);
+ pr_info("Assigned CMA region at %lx to %s device\n", (unsigned long)value, dev_name(dev));
+}
+
+static int cma_device_init_notifier_call(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct device *dev = data;
+ if (event == BUS_NOTIFY_ADD_DEVICE && dev->of_node)
+ cma_assign_device_from_dt(dev);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block cma_dev_init_nb = {
+ .notifier_call = cma_device_init_notifier_call,
+};
+#endif
+
+static int __init cma_init_reserved_areas(void)
+{
+ struct cma *cma;
+ int i;
+
+ for (i = 0; i < cma_area_count; i++) {
+ phys_addr_t base = PFN_DOWN(cma_areas[i].base);
+ unsigned int count = cma_areas[i].size >> PAGE_SHIFT;
+
+ cma = cma_create_area(base, count);
+ if (!IS_ERR(cma))
+ cma_areas[i].cma = cma;
+ }
+
+ dma_contiguous_def_area = cma_get_area(dma_contiguous_def_base);
+
+ for (i = 0; i < cma_map_count; i++) {
+ cma = cma_get_area(cma_maps[i].base);
+ dev_set_cma_area(cma_maps[i].dev, cma);
+ }
+
+#ifdef CONFIG_OF
+ bus_register_notifier(&platform_bus_type, &cma_dev_init_nb);
+#endif
+ return 0;
+}
+core_initcall(cma_init_reserved_areas);
+
+/**
* dma_alloc_from_contiguous() - allocate pages from contiguous area
* @dev: Pointer to device for which the allocation is performed.
* @count: Requested number of pages.
@@ -316,6 +436,7 @@
{
unsigned long mask, pfn, pageno, start = 0;
struct cma *cma = dev_get_cma_area(dev);
+ struct page *page = NULL;
int ret;
int tries = 0;
@@ -338,18 +459,17 @@
for (;;) {
pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
start, count, mask);
- if (pageno >= cma->count) {
- ret = -ENOMEM;
- goto error;
- }
+ if (pageno >= cma->count)
+ break;
pfn = cma->base_pfn + pageno;
ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
if (ret == 0) {
bitmap_set(cma->bitmap, pageno, count);
+ page = pfn_to_page(pfn);
break;
} else if (ret != -EBUSY) {
- goto error;
+ break;
}
tries++;
trace_dma_alloc_contiguous_retry(tries);
@@ -361,12 +481,8 @@
}
mutex_unlock(&cma_mutex);
-
- pr_debug("%s(): returned %p\n", __func__, pfn_to_page(pfn));
- return pfn_to_page(pfn);
-error:
- mutex_unlock(&cma_mutex);
- return NULL;
+ pr_debug("%s(): returned %p\n", __func__, page);
+ return page;
}
/**
diff --git a/drivers/bif/Kconfig b/drivers/bif/Kconfig
new file mode 100644
index 0000000..502b92b
--- /dev/null
+++ b/drivers/bif/Kconfig
@@ -0,0 +1,12 @@
+#
+# BIF framework and drivers
+#
+menuconfig BIF
+ bool "MIPI-BIF support"
+ select CRC_CCITT
+ select BITREVERSE
+ help
+ MIPI-BIF (battery interface) is a one-wire serial interface between a
+ host master device and one or more slave devices which are located in
+ a battery pack or also on the host. Enabling this option allows for
+ BIF consumer drivers to issue transactions via BIF controller drivers.
diff --git a/drivers/bif/Makefile b/drivers/bif/Makefile
new file mode 100644
index 0000000..02528c1
--- /dev/null
+++ b/drivers/bif/Makefile
@@ -0,0 +1,4 @@
+#
+# Makefile for kernel BIF framework.
+#
+obj-$(CONFIG_BIF) += bif-core.o
diff --git a/drivers/bif/bif-core.c b/drivers/bif/bif-core.c
new file mode 100644
index 0000000..e11e6ba4
--- /dev/null
+++ b/drivers/bif/bif-core.c
@@ -0,0 +1,2934 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/bitrev.h>
+#include <linux/crc-ccitt.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+#include <linux/bif/consumer.h>
+#include <linux/bif/driver.h>
+
+/**
+ * struct bif_ctrl_dev - holds controller device specific information
+ * @list: Doubly-linked list parameter linking to other
+ * BIF controllers registered in the system
+ * @desc: Description structure for this BIF controller
+ * @mutex: Mutex lock that is used to ensure mutual
+ * exclusion between transactions performed on the
+ * BIF bus for this controller
+ * @ctrl_dev: Device pointer to the BIF controller device
+ * @driver_data: Private data used by the BIF controller
+ * @selected_sdev: Slave device that is currently selected on
+ * the BIF bus of this controller
+ * @bus_change_notifier: Head of a notifier list containing notifier
+ * blocks that are notified when the battery
+ * presence changes
+ * @enter_irq_mode_work: Work task that is scheduled after a transaction
+ * completes when there are consumers that are
+ * actively monitoring BIF slave interrupts
+ * @irq_count: This is a count of the total number of BIF slave
+ * interrupts that are currently being monitored
+ * for the BIF slaves connected to this BIF
+ * controller
+ * @irq_mode_delay_jiffies: Number of jiffies to wait before scheduling the
+ * enter IRQ mode task. Using a larger value
+ * helps to improve the performance of BIF
+ * consumers that perform many BIF transactions.
+ * Using a smaller value reduces the latency of
+ * BIF slave interrupts.
+ * @battery_present: Cached value of the battery presence. This is
+ * used to filter out spurious presence update
+ * calls when the battery presence state has not
+ * changed.
+ */
+struct bif_ctrl_dev {
+ struct list_head list;
+ struct bif_ctrl_desc *desc;
+ struct mutex mutex;
+ struct device *ctrl_dev;
+ void *driver_data;
+ struct bif_slave_dev *selected_sdev;
+ struct blocking_notifier_head bus_change_notifier;
+ struct delayed_work enter_irq_mode_work;
+ int irq_count;
+ int irq_mode_delay_jiffies;
+ bool battery_present;
+};
+
+/**
+ * struct bif_ctrl - handle used by BIF consumers for bus oriented BIF
+ * operations
+ * @bdev: Pointer to BIF controller device
+ * @exclusive_lock: Flag which indicates that the BIF consumer responsible
+ * for this handle has locked the BIF bus of this
+ * controller. BIF transactions from other consumers are
+ * blocked until the bus is unlocked.
+ */
+struct bif_ctrl {
+ struct bif_ctrl_dev *bdev;
+ bool exclusive_lock;
+};
+
+/**
+ * struct bif_slave_dev - holds BIF slave device information
+ * @list: Doubly-linked list parameter linking to other
+ * BIF slaves that have been enumerated
+ * @bdev: Pointer to the BIF controller device that this
+ * slave is physically connected to
+ * @slave_addr: 8-bit BIF DEV_ADR assigned to this slave
+ * @unique_id: 80-bit BIF unique ID of the slave
+ * @unique_id_bits_known: Number of bits of the UID that are currently
+ * known. This number starts is incremented during
+ * a UID search and must end at 80 if the slave
+ * responds to the search properly.
+ * @present: Boolean value showing if this slave is
+* physically present in the system at a given
+* point in time. The value is set to false if the
+* battery pack containing the slave is
+* disconnected.
+ * @l1_data: BIF DDB L1 data of the slave as read from the
+ * slave's memory
+ * @function_directory: Pointer to the BIF DDB L2 function directory
+ * list as read from the slave's memory
+ * @protocol_function: Pointer to constant protocol function data as
+ * well as software state information if the slave
+ * has a protocol function
+ * @slave_ctrl_function: Pointer to constant slave control function data
+ * as well as software state information if the
+ * slave has a slave control function
+ * @nvm_function: Pointer to constant non-volatile memory function
+ * data as well as software state information if
+ * the slave has a non-volatile memory function
+ *
+ * bif_slave_dev objects are stored indefinitely after enumeration in order to
+ * speed up battery reinsertion. Only a UID check is needed after inserting a
+ * battery assuming it has been enumerated before.
+ *
+ * unique_id bytes are stored such that unique_id[0] = MSB and
+ * unique_id[BIF_UNIQUE_ID_BYTE_LENGTH - 1] = LSB
+ */
+struct bif_slave_dev {
+ struct list_head list;
+ struct bif_ctrl_dev *bdev;
+ u8 slave_addr;
+ u8 unique_id[BIF_UNIQUE_ID_BYTE_LENGTH];
+ int unique_id_bits_known;
+ bool present;
+ struct bif_ddb_l1_data l1_data;
+ struct bif_ddb_l2_data *function_directory;
+ struct bif_protocol_function *protocol_function;
+ struct bif_slave_control_function *slave_ctrl_function;
+ struct bif_nvm_function *nvm_function;
+};
+
+/**
+ * struct bif_slave - handle used by BIF consumers for slave oriented BIF
+ * operations
+ * @ctrl: Consumer BIF controller handle data
+ * @sdev: Pointer to BIF slave device
+ */
+struct bif_slave {
+ struct bif_ctrl ctrl;
+ struct bif_slave_dev *sdev;
+};
+
+/* Number of times to retry a full BIF transaction before returning an error. */
+#define BIF_TRANSACTION_RETRY_COUNT 5
+
+static DEFINE_MUTEX(bif_ctrl_list_mutex);
+static LIST_HEAD(bif_ctrl_list);
+static DEFINE_MUTEX(bif_sdev_list_mutex);
+static LIST_HEAD(bif_sdev_list);
+
+static u8 next_dev_addr = 0x02;
+
+#define DEBUG_PRINT_BUFFER_SIZE 256
+static void fill_string(char *str, size_t str_len, u8 *buf, int buf_len)
+{
+ int pos = 0;
+ int i;
+
+ for (i = 0; i < buf_len; i++) {
+ pos += scnprintf(str + pos, str_len - pos, "0x%02X", buf[i]);
+ if (i < buf_len - 1)
+ pos += scnprintf(str + pos, str_len - pos, ", ");
+ }
+}
+
+static void bif_print_slave_data(struct bif_slave_dev *sdev)
+{
+ char str[DEBUG_PRINT_BUFFER_SIZE];
+ u8 *uid;
+ int i, j;
+ struct bif_object *object;
+
+ if (sdev->unique_id_bits_known != BIF_UNIQUE_ID_BIT_LENGTH)
+ return;
+
+ uid = sdev->unique_id;
+ pr_debug("BIF slave: 0x%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
+ uid[0], uid[1], uid[2], uid[3], uid[4], uid[5], uid[6],
+ uid[7], uid[8], uid[9]);
+ pr_debug(" present=%d, dev_adr=0x%02X\n", sdev->present,
+ sdev->slave_addr);
+ pr_debug(" revision=0x%02X, level=0x%02X, device class=0x%04X\n",
+ sdev->l1_data.revision, sdev->l1_data.level,
+ sdev->l1_data.device_class);
+ pr_debug(" manufacturer ID=0x%04X, product ID=0x%04X\n",
+ sdev->l1_data.manufacturer_id, sdev->l1_data.product_id);
+ pr_debug(" function directory length=%d\n", sdev->l1_data.length);
+
+ for (i = 0; i < sdev->l1_data.length / 4; i++) {
+ pr_debug(" Function %d: type=0x%02X, version=0x%02X, pointer=0x%04X\n",
+ i, sdev->function_directory[i].function_type,
+ sdev->function_directory[i].function_version,
+ sdev->function_directory[i].function_pointer);
+ }
+
+ if (sdev->nvm_function) {
+ pr_debug(" NVM function: pointer=0x%04X, task=%d, wr_buf_size=%d, nvm_base=0x%04X, nvm_size=%d\n",
+ sdev->nvm_function->nvm_pointer,
+ sdev->nvm_function->slave_control_channel,
+ (sdev->nvm_function->write_buffer_size
+ ? sdev->nvm_function->write_buffer_size : 0),
+ sdev->nvm_function->nvm_base_address,
+ sdev->nvm_function->nvm_size);
+ if (sdev->nvm_function->object_count)
+ pr_debug(" NVM objects:\n");
+ i = 0;
+ list_for_each_entry(object, &sdev->nvm_function->object_list,
+ list) {
+ pr_debug(" Object %d - addr=0x%04X, data len=%d, type=0x%02X, version=0x%02X, manufacturer ID=0x%04X, crc=0x%04X\n",
+ i, object->addr, object->length - 8,
+ object->type, object->version,
+ object->manufacturer_id, object->crc);
+ for (j = 0; j < DIV_ROUND_UP(object->length - 8, 16);
+ j++) {
+ fill_string(str, DEBUG_PRINT_BUFFER_SIZE,
+ object->data + j * 16,
+ min(16, object->length - 8 - (j * 16)));
+ pr_debug(" data(0x%04X): %s\n", j * 16,
+ str);
+ }
+ i++;
+ }
+ }
+}
+
+static void bif_print_slaves(void)
+{
+ struct bif_slave_dev *sdev;
+
+ mutex_lock(&bif_sdev_list_mutex);
+
+ list_for_each_entry(sdev, &bif_sdev_list, list) {
+ /* Skip slaves without fully known UIDs. */
+ if (sdev->unique_id_bits_known != BIF_UNIQUE_ID_BIT_LENGTH)
+ continue;
+ bif_print_slave_data(sdev);
+ }
+
+ mutex_unlock(&bif_sdev_list_mutex);
+}
+
+static struct bif_slave_dev *bif_add_slave(struct bif_ctrl_dev *bdev)
+{
+ struct bif_slave_dev *sdev;
+
+ sdev = kzalloc(sizeof(struct bif_slave_dev), GFP_KERNEL);
+ if (sdev == NULL) {
+ pr_err("Memory allocation failed for bif_slave_dev\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ sdev->bdev = bdev;
+ INIT_LIST_HEAD(&sdev->list);
+ list_add_tail(&sdev->list, &bif_sdev_list);
+
+ return sdev;
+}
+
+static void bif_remove_slave(struct bif_slave_dev *sdev)
+{
+ list_del(&sdev->list);
+ if (sdev->bdev->selected_sdev == sdev)
+ sdev->bdev->selected_sdev = NULL;
+
+ if (sdev->slave_ctrl_function)
+ kfree(sdev->slave_ctrl_function->irq_notifier_list);
+ kfree(sdev->slave_ctrl_function);
+ kfree(sdev->protocol_function);
+ kfree(sdev->function_directory);
+
+ kfree(sdev);
+}
+
+/* This function assumes that the uid array is all 0 to start with. */
+static void set_uid_bit(u8 uid[BIF_UNIQUE_ID_BYTE_LENGTH], unsigned int bit,
+ unsigned int value)
+{
+ u8 mask;
+
+ if (bit >= BIF_UNIQUE_ID_BIT_LENGTH)
+ return;
+
+ mask = 1 << (7 - (bit % 8));
+
+ uid[bit / 8] &= ~mask;
+ uid[bit / 8] |= value << (7 - (bit % 8));
+}
+
+static unsigned int get_uid_bit(u8 uid[BIF_UNIQUE_ID_BYTE_LENGTH],
+ unsigned int bit)
+{
+ if (bit >= BIF_UNIQUE_ID_BIT_LENGTH)
+ return 0;
+
+ return (uid[bit / 8] & (1 << (7 - (bit % 8)))) ? 1 : 0;
+}
+
+static void bif_enter_irq_mode_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct bif_ctrl_dev *bdev
+ = container_of(dwork, struct bif_ctrl_dev, enter_irq_mode_work);
+ int rc, i;
+
+ mutex_lock(&bdev->mutex);
+ for (i = 0; i < BIF_TRANSACTION_RETRY_COUNT; i++) {
+ rc = bdev->desc->ops->set_bus_state(bdev,
+ BIF_BUS_STATE_INTERRUPT);
+ if (rc == 0)
+ break;
+ }
+ mutex_unlock(&bdev->mutex);
+
+ /* Reschedule the task if the transaction failed. */
+ if (rc) {
+ pr_err("Could not set BIF bus to interrupt mode, rc=%d\n", rc);
+ schedule_delayed_work(&bdev->enter_irq_mode_work,
+ bdev->irq_mode_delay_jiffies);
+ }
+}
+
+static void bif_cancel_irq_mode_work(struct bif_ctrl_dev *bdev)
+{
+ cancel_delayed_work(&bdev->enter_irq_mode_work);
+}
+
+static void bif_schedule_irq_mode_work(struct bif_ctrl_dev *bdev)
+{
+ if (bdev->irq_count > 0 &&
+ bdev->desc->ops->get_bus_state(bdev) != BIF_BUS_STATE_INTERRUPT)
+ schedule_delayed_work(&bdev->enter_irq_mode_work,
+ bdev->irq_mode_delay_jiffies);
+}
+
+static int _bif_select_slave_no_retry(struct bif_slave_dev *sdev)
+{
+ struct bif_ctrl_dev *bdev = sdev->bdev;
+ int rc = 0;
+ int i;
+
+ /* Check if the slave is already selected. */
+ if (sdev->bdev->selected_sdev == sdev)
+ return 0;
+
+ if (sdev->slave_addr) {
+ /* Select using DEV_ADR. */
+ rc = bdev->desc->ops->bus_transaction(bdev, BIF_TRANS_SDA,
+ sdev->slave_addr);
+ if (!rc)
+ sdev->bdev->selected_sdev = sdev;
+ } else if (sdev->unique_id_bits_known == BIF_UNIQUE_ID_BIT_LENGTH) {
+ /* Select using full UID. */
+ for (i = 0; i < BIF_UNIQUE_ID_BYTE_LENGTH - 1; i++) {
+ rc = bdev->desc->ops->bus_transaction(bdev,
+ BIF_TRANS_EDA, sdev->unique_id[i]);
+ if (rc)
+ goto out;
+ }
+
+ rc = bdev->desc->ops->bus_transaction(bdev, BIF_TRANS_SDA,
+ sdev->unique_id[BIF_UNIQUE_ID_BYTE_LENGTH - 1]);
+ if (rc)
+ goto out;
+ } else {
+ pr_err("Cannot select slave because it has neither UID nor DEV_ADR.\n");
+ return -EINVAL;
+ }
+
+ sdev->bdev->selected_sdev = sdev;
+
+ return 0;
+out:
+ pr_err("bus_transaction failed, rc=%d\n", rc);
+ return rc;
+}
+
+static int bif_select_slave(struct bif_slave_dev *sdev)
+{
+ int rc = -EPERM;
+ int i;
+
+ for (i = 0; i < BIF_TRANSACTION_RETRY_COUNT; i++) {
+ rc = _bif_select_slave_no_retry(sdev);
+ if (rc == 0)
+ break;
+ /* Force slave reselection. */
+ sdev->bdev->selected_sdev = NULL;
+ }
+
+ return rc;
+}
+
+/*
+ * Returns 1 if slave is selected, 0 if slave is not selected, or errno if
+ * error.
+ */
+static int bif_is_slave_selected(struct bif_ctrl_dev *bdev)
+{
+ int rc = -EPERM;
+ int tack, i;
+
+ for (i = 0; i < BIF_TRANSACTION_RETRY_COUNT; i++) {
+ /* Attempt a transaction query. */
+ rc = bdev->desc->ops->bus_transaction_read(bdev, BIF_TRANS_BC,
+ BIF_CMD_TQ, &tack);
+ if (rc == 0 || rc == -ETIMEDOUT)
+ break;
+ }
+
+ if (rc == 0)
+ rc = 1;
+ else if (rc == -ETIMEDOUT)
+ rc = 0;
+ else
+ pr_err("BIF bus_transaction_read failed, rc=%d\n", rc);
+
+ return rc;
+}
+
+/* Read from a specified number of consecutive registers. */
+static int _bif_slave_read_no_retry(struct bif_slave_dev *sdev, u16 addr,
+ u8 *buf, int len)
+{
+ struct bif_ctrl_dev *bdev = sdev->bdev;
+ int rc = 0;
+ int i, response;
+
+ rc = bif_select_slave(sdev);
+ if (rc)
+ return rc;
+
+ if (bdev->desc->ops->read_slave_registers) {
+ /*
+ * Use low level slave register read implementation in order to
+ * receive the benefits of BIF burst reads.
+ */
+ rc = bdev->desc->ops->read_slave_registers(bdev, addr, buf,
+ len);
+ if (rc)
+ pr_err("read_slave_registers failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ for (i = 0; i < len; i++) {
+ rc = bdev->desc->ops->bus_transaction(bdev, BIF_TRANS_ERA,
+ addr >> 8);
+ if (rc) {
+ pr_err("bus_transaction failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = bdev->desc->ops->bus_transaction_read(bdev, BIF_TRANS_RRA,
+ addr & 0xFF, &response);
+ if (rc) {
+ pr_err("bus_transaction_read failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ if (!(response & BIF_SLAVE_RD_ACK)) {
+ pr_err("BIF register read error=0x%02X\n",
+ response & BIF_SLAVE_RD_ERR);
+ return -EIO;
+ }
+
+ buf[i] = response & BIF_SLAVE_RD_DATA;
+ addr++;
+ }
+
+ return rc;
+}
+
+/*
+ * Read from a specified number of consecutive registers. Retry the transaction
+ * several times in case of communcation failures.
+ */
+static int _bif_slave_read(struct bif_slave_dev *sdev, u16 addr, u8 *buf,
+ int len)
+{
+ int rc = -EPERM;
+ int i;
+
+ for (i = 0; i < BIF_TRANSACTION_RETRY_COUNT; i++) {
+ rc = _bif_slave_read_no_retry(sdev, addr, buf, len);
+ if (rc == 0)
+ break;
+ /* Force slave reselection. */
+ sdev->bdev->selected_sdev = NULL;
+ }
+
+ return rc;
+}
+
+/* Write to a specified number of consecutive registers. */
+static int _bif_slave_write_no_retry(struct bif_slave_dev *sdev, u16 addr,
+ u8 *buf, int len)
+{
+ struct bif_ctrl_dev *bdev = sdev->bdev;
+ int rc = 0;
+ int i;
+
+ rc = bif_select_slave(sdev);
+ if (rc)
+ return rc;
+
+ if (bdev->desc->ops->write_slave_registers) {
+ /*
+ * Use low level slave register write implementation in order to
+ * receive the benefits of BIF burst writes.
+ */
+ rc = bdev->desc->ops->write_slave_registers(bdev, addr, buf,
+ len);
+ if (rc)
+ pr_err("write_slave_registers failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = bdev->desc->ops->bus_transaction(bdev, BIF_TRANS_ERA, addr >> 8);
+ if (rc)
+ goto out;
+
+ rc = bdev->desc->ops->bus_transaction(bdev, BIF_TRANS_WRA, addr & 0xFF);
+ if (rc)
+ goto out;
+
+ for (i = 0; i < len; i++) {
+ rc = bdev->desc->ops->bus_transaction(bdev, BIF_TRANS_WD,
+ buf[i]);
+ if (rc)
+ goto out;
+ }
+
+ return 0;
+out:
+ pr_err("bus_transaction failed, rc=%d\n", rc);
+ return rc;
+}
+
+/*
+ * Write to a specified number of consecutive registers. Retry the transaction
+ * several times in case of communcation failures.
+ */
+static int _bif_slave_write(struct bif_slave_dev *sdev, u16 addr, u8 *buf,
+ int len)
+{
+ int rc = -EPERM;
+ int i;
+
+ for (i = 0; i < BIF_TRANSACTION_RETRY_COUNT; i++) {
+ rc = _bif_slave_write_no_retry(sdev, addr, buf, len);
+ if (rc == 0)
+ break;
+ /* Force slave reselection. */
+ sdev->bdev->selected_sdev = NULL;
+ }
+
+ return rc;
+}
+
+/* Takes a mutex if this consumer is not an exclusive bus user. */
+static void bif_ctrl_lock(struct bif_ctrl *ctrl)
+{
+ if (!ctrl->exclusive_lock) {
+ mutex_lock(&ctrl->bdev->mutex);
+ bif_cancel_irq_mode_work(ctrl->bdev);
+ }
+}
+
+/* Releases a mutex if this consumer is not an exclusive bus user. */
+static void bif_ctrl_unlock(struct bif_ctrl *ctrl)
+{
+ if (!ctrl->exclusive_lock) {
+ bif_schedule_irq_mode_work(ctrl->bdev);
+ mutex_unlock(&ctrl->bdev->mutex);
+ }
+}
+
+static void bif_slave_ctrl_lock(struct bif_slave *slave)
+{
+ bif_ctrl_lock(&slave->ctrl);
+}
+
+static void bif_slave_ctrl_unlock(struct bif_slave *slave)
+{
+ bif_ctrl_unlock(&slave->ctrl);
+}
+
+static int bif_check_task(struct bif_slave *slave, unsigned int task)
+{
+ if (IS_ERR_OR_NULL(slave)) {
+ pr_err("Invalid slave handle.\n");
+ return -EINVAL;
+ } else if (!slave->sdev->bdev) {
+ pr_err("BIF controller has been removed.\n");
+ return -ENXIO;
+ } else if (!slave->sdev->slave_ctrl_function
+ || slave->sdev->slave_ctrl_function->task_count == 0) {
+ pr_err("BIF slave does not support slave control.\n");
+ return -ENODEV;
+ } else if (task >= slave->sdev->slave_ctrl_function->task_count) {
+ pr_err("Requested task: %u greater than max: %u for this slave\n",
+ task, slave->sdev->slave_ctrl_function->task_count);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * bif_request_irq() - request a BIF slave IRQ by slave task number
+ * @slave: BIF slave handle
+ * @task: BIF task number of the IRQ inside of the slave. This
+ * corresponds to the slave control channel specified for a given
+ * BIF function inside of the slave.
+ * @nb: Notifier block to call when the IRQ fires
+ *
+ * This function registers a notifier block to call when the BIF slave interrupt
+ * is triggered and also enables the interrupt. The interrupt is enabled inside
+ * of the BIF slave's slave control function and also the BIF bus is put into
+ * interrupt mode.
+ *
+ * Returns 0 for success or errno if an error occurred.
+ */
+int bif_request_irq(struct bif_slave *slave, unsigned int task,
+ struct notifier_block *nb)
+{
+ int rc;
+ u16 addr;
+ u8 reg, mask;
+
+ rc = bif_check_task(slave, task);
+ if (rc) {
+ pr_err("Invalid slave or task, rc=%d\n", rc);
+ return rc;
+ }
+
+ bif_slave_ctrl_lock(slave);
+
+ rc = blocking_notifier_chain_register(
+ &slave->sdev->slave_ctrl_function->irq_notifier_list[task], nb);
+ if (rc) {
+ pr_err("Notifier registration failed, rc=%d\n", rc);
+ goto done;
+ }
+
+ /* Enable the interrupt within the slave */
+ mask = BIT(task % SLAVE_CTRL_TASKS_PER_SET);
+ addr = SLAVE_CTRL_FUNC_IRQ_EN_ADDR(
+ slave->sdev->slave_ctrl_function->slave_ctrl_pointer, task);
+ if (task / SLAVE_CTRL_TASKS_PER_SET == 0) {
+ /* Set global interrupt enable. */
+ mask |= BIT(0);
+ }
+ rc = _bif_slave_read(slave->sdev, addr, ®, 1);
+ if (rc) {
+ pr_err("BIF slave register read failed, rc=%d\n", rc);
+ goto notifier_unregister;
+ }
+ reg |= mask;
+ rc = _bif_slave_write(slave->sdev, addr, ®, 1);
+ if (rc) {
+ pr_err("BIF slave register write failed, rc=%d\n", rc);
+ goto notifier_unregister;
+ }
+
+ /* Set global interrupt enable if task not in set 0. */
+ if (task / SLAVE_CTRL_TASKS_PER_SET != 0) {
+ mask = BIT(0);
+ addr = SLAVE_CTRL_FUNC_IRQ_EN_ADDR(
+ slave->sdev->slave_ctrl_function->slave_ctrl_pointer, 0);
+ rc = _bif_slave_read(slave->sdev, addr, ®, 1);
+ if (rc) {
+ pr_err("BIF slave register read failed, rc=%d\n", rc);
+ goto notifier_unregister;
+ }
+ reg |= mask;
+ rc = _bif_slave_write(slave->sdev, addr, ®, 1);
+ if (rc) {
+ pr_err("BIF slave register write failed, rc=%d\n", rc);
+ goto notifier_unregister;
+ }
+ }
+
+ rc = slave->sdev->bdev->desc->ops->set_bus_state(slave->sdev->bdev,
+ BIF_BUS_STATE_INTERRUPT);
+ if (rc) {
+ pr_err("Could not set BIF bus to interrupt mode, rc=%d\n", rc);
+ goto notifier_unregister;
+ }
+
+ slave->sdev->bdev->irq_count++;
+done:
+ bif_slave_ctrl_unlock(slave);
+
+ return rc;
+
+notifier_unregister:
+ blocking_notifier_chain_unregister(
+ &slave->sdev->slave_ctrl_function->irq_notifier_list[task],
+ nb);
+ bif_slave_ctrl_unlock(slave);
+
+ return rc;
+
+}
+EXPORT_SYMBOL(bif_request_irq);
+
+/**
+ * bif_free_irq() - free a BIF slave IRQ by slave task number
+ * @slave: BIF slave handle
+ * @task: BIF task number of the IRQ inside of the slave. This
+ * corresponds to the slave control channel specified for a given
+ * BIF function inside of the slave.
+ * @nb: Notifier block previously registered with this interrupt
+ *
+ * This function unregisters a notifier block that was previously registered
+ * with bif_request_irq().
+ *
+ * Returns 0 for success or errno if an error occurred.
+ */
+int bif_free_irq(struct bif_slave *slave, unsigned int task,
+ struct notifier_block *nb)
+{
+ int rc;
+ u16 addr;
+ u8 reg;
+
+ rc = bif_check_task(slave, task);
+ if (rc) {
+ pr_err("Invalid slave or task, rc=%d\n", rc);
+ return rc;
+ }
+
+ bif_slave_ctrl_lock(slave);
+
+ /* Disable the interrupt within the slave */
+ reg = BIT(task % SLAVE_CTRL_TASKS_PER_SET);
+ addr = SLAVE_CTRL_FUNC_IRQ_CLEAR_ADDR(
+ slave->sdev->slave_ctrl_function->slave_ctrl_pointer, task);
+ rc = _bif_slave_write(slave->sdev, addr, ®, 1);
+ if (rc) {
+ pr_err("BIF slave register write failed, rc=%d\n", rc);
+ goto done;
+ }
+
+ rc = blocking_notifier_chain_unregister(
+ &slave->sdev->slave_ctrl_function->irq_notifier_list[task], nb);
+ if (rc) {
+ pr_err("Notifier unregistration failed, rc=%d\n", rc);
+ goto done;
+ }
+
+ slave->sdev->bdev->irq_count--;
+
+ if (slave->sdev->bdev->irq_count == 0) {
+ bif_cancel_irq_mode_work(slave->sdev->bdev);
+ } else if (slave->sdev->bdev->irq_count < 0) {
+ pr_err("Unbalanced IRQ free.\n");
+ rc = -EINVAL;
+ slave->sdev->bdev->irq_count = 0;
+ }
+done:
+ bif_slave_ctrl_unlock(slave);
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_free_irq);
+
+/**
+ * bif_trigger_task() - trigger a task within a BIF slave
+ * @slave: BIF slave handle
+ * @task: BIF task inside of the slave to trigger. This corresponds to
+ * the slave control channel specified for a given BIF function
+ * inside of the slave.
+ *
+ * Returns 0 for success or errno if an error occurred.
+ */
+int bif_trigger_task(struct bif_slave *slave, unsigned int task)
+{
+ int rc;
+ u16 addr;
+ u8 reg;
+
+ rc = bif_check_task(slave, task);
+ if (rc) {
+ pr_err("Invalid slave or task, rc=%d\n", rc);
+ return rc;
+ }
+
+ bif_slave_ctrl_lock(slave);
+
+ /* Trigger the task within the slave. */
+ reg = BIT(task % SLAVE_CTRL_TASKS_PER_SET);
+ addr = SLAVE_CTRL_FUNC_TASK_TRIGGER_ADDR(
+ slave->sdev->slave_ctrl_function->slave_ctrl_pointer, task);
+ rc = _bif_slave_write(slave->sdev, addr, ®, 1);
+ if (rc) {
+ pr_err("BIF slave register write failed, rc=%d\n", rc);
+ goto done;
+ }
+
+done:
+ bif_slave_ctrl_unlock(slave);
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_trigger_task);
+
+/**
+ * bif_task_is_busy() - checks the state of a BIF slave task
+ * @slave: BIF slave handle
+ * @task: BIF task inside of the slave to trigger. This corresponds to
+ * the slave control channel specified for a given BIF function
+ * inside of the slave.
+ *
+ * Returns 1 if the task is busy, 0 if it is not busy, and errno on error.
+ */
+int bif_task_is_busy(struct bif_slave *slave, unsigned int task)
+{
+ int rc;
+ u16 addr;
+ u8 reg;
+
+ rc = bif_check_task(slave, task);
+ if (rc) {
+ pr_err("Invalid slave or task, rc=%d\n", rc);
+ return rc;
+ }
+
+ bif_slave_ctrl_lock(slave);
+
+ /* Check the task busy state. */
+ addr = SLAVE_CTRL_FUNC_TASK_BUSY_ADDR(
+ slave->sdev->slave_ctrl_function->slave_ctrl_pointer, task);
+ rc = _bif_slave_read(slave->sdev, addr, ®, 1);
+ if (rc) {
+ pr_err("BIF slave register read failed, rc=%d\n", rc);
+ goto done;
+ }
+
+ rc = (reg & BIT(task % SLAVE_CTRL_TASKS_PER_SET)) ? 1 : 0;
+done:
+ bif_slave_ctrl_unlock(slave);
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_task_is_busy);
+
+static int bif_slave_notify_irqs(struct bif_slave_dev *sdev, int set, u8 val)
+{
+ int rc = 0;
+ int i, task;
+
+ for (i = 0; i < SLAVE_CTRL_TASKS_PER_SET; i++) {
+ if (val & (1 << i)) {
+ task = set * SLAVE_CTRL_TASKS_PER_SET + i;
+
+ rc = blocking_notifier_call_chain(
+ &sdev->slave_ctrl_function->irq_notifier_list[task],
+ task, sdev->bdev);
+ rc = notifier_to_errno(rc);
+ if (rc)
+ pr_err("Notification failed for task %d\n",
+ task);
+ }
+ }
+
+ return rc;
+}
+
+static int bif_slave_handle_irq(struct bif_slave_dev *sdev)
+{
+ struct bif_ctrl_dev *bdev = sdev->bdev;
+ bool resp = false;
+ int rc = 0;
+ int i;
+ u16 addr;
+ u8 reg;
+
+ mutex_lock(&sdev->bdev->mutex);
+ bif_cancel_irq_mode_work(sdev->bdev);
+
+ rc = bif_select_slave(sdev);
+ if (rc) {
+ pr_err("Could not select slave, rc=%d\n", rc);
+ goto done;
+ }
+
+ /* Check overall slave interrupt status. */
+ rc = bdev->desc->ops->bus_transaction_query(bdev, BIF_TRANS_BC,
+ BIF_CMD_ISTS, &resp);
+ if (rc) {
+ pr_err("Could not query slave interrupt status, rc=%d\n", rc);
+ goto done;
+ }
+
+ if (resp) {
+ for (i = 0; i < sdev->slave_ctrl_function->task_count
+ / SLAVE_CTRL_TASKS_PER_SET; i++) {
+ addr = sdev->slave_ctrl_function->slave_ctrl_pointer
+ + 4 * i + 1;
+ rc = _bif_slave_read(sdev, addr, ®, 1);
+ if (rc) {
+ pr_err("BIF slave register read failed, rc=%d\n",
+ rc);
+ goto done;
+ }
+
+ /* Ensure that interrupts are pending in the set. */
+ if (reg != 0x00) {
+ /*
+ * Release mutex before notifying consumers so
+ * that they can use the bus.
+ */
+ mutex_unlock(&sdev->bdev->mutex);
+ rc = bif_slave_notify_irqs(sdev, i, reg);
+ if (rc) {
+ pr_err("BIF slave irq notification failed, rc=%d\n",
+ rc);
+ goto notification_failed;
+ }
+ mutex_lock(&sdev->bdev->mutex);
+
+ rc = bif_select_slave(sdev);
+ if (rc) {
+ pr_err("Could not select slave, rc=%d\n",
+ rc);
+ goto done;
+ }
+
+ /* Clear all interrupts in this set. */
+ rc = _bif_slave_write(sdev, addr, ®, 1);
+ if (rc) {
+ pr_err("BIF slave register write failed, rc=%d\n",
+ rc);
+ goto done;
+ }
+ }
+ }
+ }
+
+done:
+ bif_schedule_irq_mode_work(sdev->bdev);
+ mutex_unlock(&sdev->bdev->mutex);
+notification_failed:
+ if (rc == 0)
+ rc = resp;
+ return rc;
+}
+
+/**
+ * bif_ctrl_notify_slave_irq() - notify the BIF framework that a slave interrupt
+ * was received by a BIF controller
+ * @bdev: BIF controller device pointer
+ *
+ * This function should only be called from a BIF controller driver.
+ *
+ * Returns 0 for success or errno if an error occurred.
+ */
+int bif_ctrl_notify_slave_irq(struct bif_ctrl_dev *bdev)
+{
+ struct bif_slave_dev *sdev;
+ int rc = 0, handled = 0;
+
+ if (IS_ERR_OR_NULL(bdev))
+ return -EINVAL;
+
+ mutex_lock(&bif_sdev_list_mutex);
+
+ list_for_each_entry(sdev, &bif_sdev_list, list) {
+ if (sdev->bdev == bdev && sdev->present) {
+ rc = bif_slave_handle_irq(sdev);
+ if (rc < 0) {
+ pr_err("Could not handle BIF slave irq, rc=%d\n",
+ rc);
+ break;
+ }
+ handled += rc;
+ }
+ }
+
+ mutex_unlock(&bif_sdev_list_mutex);
+
+ if (handled == 0)
+ pr_info("Spurious BIF slave interrupt detected.\n");
+
+ if (rc > 0)
+ rc = 0;
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_ctrl_notify_slave_irq);
+
+/**
+ * bif_ctrl_notify_battery_changed() - notify the BIF framework that a battery
+ * pack has been inserted or removed
+ * @bdev: BIF controller device pointer
+ *
+ * This function should only be called from a BIF controller driver.
+ *
+ * Returns 0 for success or errno if an error occurred.
+ */
+int bif_ctrl_notify_battery_changed(struct bif_ctrl_dev *bdev)
+{
+ int rc = 0;
+ int present;
+
+ if (IS_ERR_OR_NULL(bdev))
+ return -EINVAL;
+
+ if (bdev->desc->ops->get_battery_presence) {
+ present = bdev->desc->ops->get_battery_presence(bdev);
+ if (present < 0) {
+ pr_err("Could not determine battery presence, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if (bdev->battery_present == !!present)
+ return 0;
+
+ bdev->battery_present = present;
+
+ rc = blocking_notifier_call_chain(&bdev->bus_change_notifier,
+ present ? BIF_BUS_EVENT_BATTERY_INSERTED
+ : BIF_BUS_EVENT_BATTERY_REMOVED, bdev);
+ if (rc)
+ pr_err("Call chain noification failed, rc=%d\n", rc);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_ctrl_notify_battery_changed);
+
+/**
+ * bif_ctrl_signal_battery_changed() - notify the BIF framework that a battery
+ * pack has been inserted or removed
+ * @ctrl: BIF controller consumer handle
+ *
+ * This function should only be called by a BIF consumer driver on systems where
+ * the BIF controller driver is unable to determine when a battery is inserted
+ * or removed.
+ *
+ * Returns 0 for success or errno if an error occurred.
+ */
+int bif_ctrl_signal_battery_changed(struct bif_ctrl *ctrl)
+{
+ if (IS_ERR_OR_NULL(ctrl))
+ return -EINVAL;
+
+ return bif_ctrl_notify_battery_changed(ctrl->bdev);
+}
+EXPORT_SYMBOL(bif_ctrl_signal_battery_changed);
+
+/**
+ * bif_ctrl_notifier_register() - register a notifier block to be called when
+ * a battery pack is inserted or removed
+ * @ctrl: BIF controller consumer handle
+ *
+ * The value passed into the notifier when it is called is one of
+ * enum bif_bus_event.
+ *
+ * Returns 0 for success or errno if an error occurred.
+ */
+int bif_ctrl_notifier_register(struct bif_ctrl *ctrl, struct notifier_block *nb)
+{
+ int rc;
+
+ if (IS_ERR_OR_NULL(ctrl))
+ return -EINVAL;
+
+ rc = blocking_notifier_chain_register(&ctrl->bdev->bus_change_notifier,
+ nb);
+ if (rc)
+ pr_err("Notifier registration failed, rc=%d\n", rc);
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_ctrl_notifier_register);
+
+/**
+ * bif_ctrl_notifier_unregister() - unregister a battery status change notifier
+ * block that was previously registered
+ * @ctrl: BIF controller consumer handle
+ *
+ * Returns 0 for success or errno if an error occurred.
+ */
+int bif_ctrl_notifier_unregister(struct bif_ctrl *ctrl,
+ struct notifier_block *nb)
+{
+ int rc;
+
+ if (IS_ERR_OR_NULL(ctrl))
+ return -EINVAL;
+
+ rc =
+ blocking_notifier_chain_unregister(&ctrl->bdev->bus_change_notifier,
+ nb);
+ if (rc)
+ pr_err("Notifier unregistration failed, rc=%d\n", rc);
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_ctrl_notifier_unregister);
+
+/**
+ * bif_get_bus_handle() - returns the BIF controller consumer handle associated
+ * with a BIF slave handle
+ * @slave: BIF slave handle
+ *
+ * Note, bif_ctrl_put() should never be called for the pointer output by
+ * bif_get_bus_handle().
+ */
+struct bif_ctrl *bif_get_bus_handle(struct bif_slave *slave)
+{
+ if (IS_ERR_OR_NULL(slave))
+ return ERR_PTR(-EINVAL);
+
+ return &slave->ctrl;
+}
+EXPORT_SYMBOL(bif_get_bus_handle);
+
+/**
+ * bif_ctrl_count() - returns the number of registered BIF controllers
+ */
+int bif_ctrl_count(void)
+{
+ struct bif_ctrl_dev *bdev;
+ int count = 0;
+
+ mutex_lock(&bif_ctrl_list_mutex);
+
+ list_for_each_entry(bdev, &bif_ctrl_list, list) {
+ count++;
+ }
+ mutex_unlock(&bif_ctrl_list_mutex);
+
+ return count;
+}
+EXPORT_SYMBOL(bif_ctrl_count);
+
+/**
+ * bif_ctrl_get_by_id() - get a handle for the id'th BIF controller registered
+ * in the system
+ * @id: Arbitrary number associated with the BIF bus in the system
+ *
+ * id must be in the range [0, bif_ctrl_count() - 1]. This function should only
+ * need to be called by a BIF consumer that is unable to link to a given BIF
+ * controller via a device tree binding.
+ *
+ * Returns a BIF controller consumer handle if successful or an ERR_PTR if not.
+ */
+struct bif_ctrl *bif_ctrl_get_by_id(unsigned int id)
+{
+ struct bif_ctrl_dev *bdev;
+ struct bif_ctrl_dev *bdev_found = NULL;
+ struct bif_ctrl *ctrl = ERR_PTR(-ENODEV);
+
+ mutex_lock(&bif_ctrl_list_mutex);
+
+ list_for_each_entry(bdev, &bif_ctrl_list, list) {
+ if (id == 0) {
+ bdev_found = bdev;
+ break;
+ }
+ id--;
+ }
+ mutex_unlock(&bif_ctrl_list_mutex);
+
+ if (bdev_found) {
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl) {
+ pr_err("Bus handle allocation failed\n");
+ ctrl = ERR_PTR(-ENOMEM);
+ } else {
+ ctrl->bdev = bdev_found;
+ }
+ }
+
+ return ctrl;
+}
+EXPORT_SYMBOL(bif_ctrl_get_by_id);
+
+/**
+ * bif_ctrl_get() - get a handle for the BIF controller that is linked to the
+ * consumer device in the device tree
+ * @consumer_dev: Pointer to the consumer's device
+ *
+ * In order to use this function, the BIF consumer's device must specify the
+ * "qcom,bif-ctrl" property in its device tree node which points to a BIF
+ * controller device node.
+ *
+ * Returns a BIF controller consumer handle if successful or an ERR_PTR if not.
+ * If the BIF controller linked to the consumer device has not yet probed, then
+ * ERR_PTR(-EPROBE_DEFER) is returned.
+ */
+struct bif_ctrl *bif_ctrl_get(struct device *consumer_dev)
+{
+ struct device_node *ctrl_node = NULL;
+ struct bif_ctrl_dev *bdev_found = NULL;
+ struct bif_ctrl *ctrl = ERR_PTR(-EPROBE_DEFER);
+ struct bif_ctrl_dev *bdev = NULL;
+
+ if (!consumer_dev || !consumer_dev->of_node) {
+ pr_err("Invalid device node\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ ctrl_node = of_parse_phandle(consumer_dev->of_node, "qcom,bif-ctrl", 0);
+ if (!ctrl_node) {
+ pr_err("Could not find qcom,bif-ctrl property in %s\n",
+ consumer_dev->of_node->full_name);
+ return ERR_PTR(-ENXIO);
+ }
+
+ mutex_lock(&bif_ctrl_list_mutex);
+ list_for_each_entry(bdev, &bif_ctrl_list, list) {
+ if (bdev->ctrl_dev && bdev->ctrl_dev->of_node == ctrl_node) {
+ bdev_found = bdev;
+ break;
+ }
+ }
+ mutex_unlock(&bif_ctrl_list_mutex);
+
+ if (bdev_found) {
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl) {
+ pr_err("Bus handle allocation failed\n");
+ ctrl = ERR_PTR(-ENOMEM);
+ } else {
+ ctrl->bdev = bdev_found;
+ }
+ }
+
+ return ctrl;
+}
+EXPORT_SYMBOL(bif_ctrl_get);
+
+/**
+ * bif_ctrl_put() - frees a BIF controller handle
+ * @ctrl: BIF controller consumer handle
+ */
+void bif_ctrl_put(struct bif_ctrl *ctrl)
+{
+ if (!IS_ERR_OR_NULL(ctrl) && ctrl->exclusive_lock)
+ mutex_unlock(&ctrl->bdev->mutex);
+ kfree(ctrl);
+}
+EXPORT_SYMBOL(bif_ctrl_put);
+
+/*
+ * Returns true if all parameters are matched, otherwise false.
+ * function_type and function_version mean that their exists some function in
+ * the slave which has the specified type and subtype. ctrl == NULL is treated
+ * as a wildcard.
+ */
+static bool bif_slave_match(const struct bif_ctrl *ctrl,
+ struct bif_slave_dev *sdev, const struct bif_match_criteria *criteria)
+{
+ int i, type, version;
+
+ if (ctrl && (ctrl->bdev != sdev->bdev))
+ return false;
+
+ if (!sdev->present
+ && (!(criteria->match_mask & BIF_MATCH_IGNORE_PRESENCE)
+ || ((criteria->match_mask & BIF_MATCH_IGNORE_PRESENCE)
+ && !criteria->ignore_presence)))
+ return false;
+
+ if ((criteria->match_mask & BIF_MATCH_MANUFACTURER_ID)
+ && sdev->l1_data.manufacturer_id != criteria->manufacturer_id)
+ return false;
+
+ if ((criteria->match_mask & BIF_MATCH_PRODUCT_ID)
+ && sdev->l1_data.product_id != criteria->product_id)
+ return false;
+
+ if (criteria->match_mask & BIF_MATCH_FUNCTION_TYPE) {
+ if (!sdev->function_directory)
+ return false;
+ for (i = 0; i < sdev->l1_data.length / 4; i++) {
+ type = sdev->function_directory[i].function_type;
+ version = sdev->function_directory[i].function_version;
+ if (type == criteria->function_type &&
+ (version == criteria->function_version
+ || !(criteria->match_mask
+ & BIF_MATCH_FUNCTION_VERSION)))
+ return true;
+ }
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * bif_slave_match_count() - returns the number of slaves associated with the
+ * specified BIF controller which fit the matching
+ * criteria
+ * @ctrl: BIF controller consumer handle
+ * @match_criteria: Matching criteria used to filter slaves
+ */
+int bif_slave_match_count(const struct bif_ctrl *ctrl,
+ const struct bif_match_criteria *match_criteria)
+{
+ struct bif_slave_dev *sdev;
+ int count = 0;
+
+ mutex_lock(&bif_sdev_list_mutex);
+
+ list_for_each_entry(sdev, &bif_sdev_list, list) {
+ if (bif_slave_match(ctrl, sdev, match_criteria))
+ count++;
+ }
+
+ mutex_unlock(&bif_sdev_list_mutex);
+
+ return count;
+}
+EXPORT_SYMBOL(bif_slave_match_count);
+
+/**
+ * bif_slave_match_get() - get a slave handle for the id'th slave associated
+ * with the specified BIF controller which fits the
+ * matching criteria
+ * @ctrl: BIF controller consumer handle
+ * @id: Index into the set of matching slaves
+ * @match_criteria: Matching criteria used to filter slaves
+ *
+ * id must be in the range [0, bif_slave_match_count(ctrl, match_criteria) - 1].
+ *
+ * Returns a BIF slave handle if successful or an ERR_PTR if not.
+ */
+struct bif_slave *bif_slave_match_get(const struct bif_ctrl *ctrl,
+ unsigned int id, const struct bif_match_criteria *match_criteria)
+{
+ struct bif_slave_dev *sdev;
+ struct bif_slave *slave = ERR_PTR(-ENODEV);
+ struct bif_slave_dev *sdev_found = NULL;
+ int count = 0;
+
+ mutex_lock(&bif_sdev_list_mutex);
+
+ list_for_each_entry(sdev, &bif_sdev_list, list) {
+ if (bif_slave_match(ctrl, sdev, match_criteria))
+ count++;
+ if (count == id + 1) {
+ sdev_found = sdev;
+ break;
+ }
+ }
+
+ mutex_unlock(&bif_sdev_list_mutex);
+
+ if (sdev_found) {
+ slave = kzalloc(sizeof(*slave), GFP_KERNEL);
+ if (!slave) {
+ pr_err("Slave allocation failed\n");
+ slave = ERR_PTR(-ENOMEM);
+ } else {
+ slave->sdev = sdev_found;
+ slave->ctrl.bdev = sdev_found->bdev;
+ }
+ }
+
+ return slave;
+}
+EXPORT_SYMBOL(bif_slave_match_get);
+
+/**
+ * bif_slave_put() - frees a BIF slave handle
+ * @slave: BIF slave handle
+ */
+void bif_slave_put(struct bif_slave *slave)
+{
+ if (!IS_ERR_OR_NULL(slave) && slave->ctrl.exclusive_lock)
+ mutex_unlock(&slave->sdev->bdev->mutex);
+ kfree(slave);
+}
+EXPORT_SYMBOL(bif_slave_put);
+
+/**
+ * bif_slave_find_function() - get the function pointer and version of a
+ * BIF function if it is present on the specified slave
+ * @slave: BIF slave handle
+ * @function: BIF function to search for inside of the slave
+ * @version: If the function is found, then 'version' is set to the
+ * version value of the function
+ * @function_pointer: If the function is found, then 'function_pointer' is set
+ * to the BIF slave address of the function
+ *
+ * Returns 0 for success or errno if an error occurred. If the function is not
+ * found in the slave, then -ENODEV is returned.
+ */
+int bif_slave_find_function(struct bif_slave *slave, u8 function, u8 *version,
+ u16 *function_pointer)
+{
+ int rc = -ENODEV;
+ struct bif_ddb_l2_data *func;
+ int i;
+
+ if (IS_ERR_OR_NULL(slave) || IS_ERR_OR_NULL(version)
+ || IS_ERR_OR_NULL(function_pointer)) {
+ pr_err("Invalid pointer input.\n");
+ return -EINVAL;
+ }
+
+ func = slave->sdev->function_directory;
+
+ for (i = 0; i < slave->sdev->l1_data.length / 4; i++) {
+ if (function == func[i].function_type) {
+ *version = func[i].function_version;
+ *function_pointer = func[i].function_pointer;
+ rc = 0;
+ break;
+ }
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_slave_find_function);
+
+/**
+ * bif_slave_read() - read contiguous memory values from a BIF slave
+ * @slave: BIF slave handle
+ * @addr: BIF slave address to begin reading at
+ * @buf: Buffer to fill with memory values
+ * @len: Number of byte to read
+ *
+ * Returns 0 for success or errno if an error occurred.
+ */
+int bif_slave_read(struct bif_slave *slave, u16 addr, u8 *buf, int len)
+{
+ int rc;
+
+ if (IS_ERR_OR_NULL(slave) || IS_ERR_OR_NULL(buf)) {
+ pr_err("Invalid pointer input.\n");
+ return -EINVAL;
+ }
+
+ bif_slave_ctrl_lock(slave);
+
+ rc = _bif_slave_read(slave->sdev, addr, buf, len);
+ if (rc)
+ pr_err("BIF slave read failed, rc=%d\n", rc);
+
+ bif_slave_ctrl_unlock(slave);
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_slave_read);
+
+/**
+ * bif_slave_write() - write contiguous memory values to a BIF slave
+ * @slave: BIF slave handle
+ * @addr: BIF slave address to begin writing at
+ * @buf: Buffer containing values to write
+ * @len: Number of byte to write
+ *
+ * Returns 0 for success or errno if an error occurred.
+ */
+int bif_slave_write(struct bif_slave *slave, u16 addr, u8 *buf, int len)
+{
+ int rc;
+
+ if (IS_ERR_OR_NULL(slave) || IS_ERR_OR_NULL(buf)) {
+ pr_err("Invalid pointer input.\n");
+ return -EINVAL;
+ }
+
+ bif_slave_ctrl_lock(slave);
+
+ rc = _bif_slave_write(slave->sdev, addr, buf, len);
+ if (rc)
+ pr_err("BIF slave write failed, rc=%d\n", rc);
+
+ bif_slave_ctrl_unlock(slave);
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_slave_write);
+
+/**
+ * bif_slave_is_present() - check if a slave is currently physically present
+ * in the system
+ * @slave: BIF slave handle
+ *
+ * Returns 1 if the slave is present, 0 if the slave is not present, or errno
+ * if an error occurred.
+ *
+ * This function can be used by BIF consumer drivers to check if their slave
+ * handles are still meaningful after battery reinsertion.
+ */
+int bif_slave_is_present(struct bif_slave *slave)
+{
+ if (IS_ERR_OR_NULL(slave)) {
+ pr_err("Invalid pointer input.\n");
+ return -EINVAL;
+ }
+
+ return slave->sdev->present;
+}
+EXPORT_SYMBOL(bif_slave_is_present);
+
+/**
+ * bif_slave_is_selected() - check if a slave is currently selected on the BIF
+ * bus
+ * @slave: BIF slave handle
+ *
+ * Returns 1 if the slave is selected, 0 if the slave is not selected, or errno
+ * if an error occurred.
+ *
+ * This function should not be required under normal circumstances since the
+ * bif-core framework ensures that slaves are always selected when needed.
+ * It would be most useful when used as a helper in conjunction with
+ * bif_ctrl_bus_lock() and the raw transaction functions.
+ */
+int bif_slave_is_selected(struct bif_slave *slave)
+{
+ int rc;
+
+ if (IS_ERR_OR_NULL(slave)) {
+ pr_err("Invalid pointer input.\n");
+ return -EINVAL;
+ }
+
+ if (slave->sdev->bdev->selected_sdev != slave->sdev)
+ return false;
+
+ bif_slave_ctrl_lock(slave);
+ rc = bif_is_slave_selected(slave->sdev->bdev);
+ bif_slave_ctrl_unlock(slave);
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_slave_is_selected);
+
+/**
+ * bif_slave_select() - select a slave on the BIF bus
+ * @slave: BIF slave handle
+ *
+ * Returns 0 on success or errno if an error occurred.
+ *
+ * This function should not be required under normal circumstances since the
+ * bif-core framework ensures that slaves are always selected when needed.
+ * It would be most useful when used as a helper in conjunction with
+ * bif_ctrl_bus_lock() and the raw transaction functions.
+ */
+int bif_slave_select(struct bif_slave *slave)
+{
+ int rc;
+
+ if (IS_ERR_OR_NULL(slave)) {
+ pr_err("Invalid pointer input.\n");
+ return -EINVAL;
+ }
+
+ bif_slave_ctrl_lock(slave);
+ slave->sdev->bdev->selected_sdev = NULL;
+ rc = bif_select_slave(slave->sdev);
+ bif_slave_ctrl_unlock(slave);
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_slave_select);
+
+/**
+ * bif_ctrl_raw_transaction() - perform a raw BIF transaction on the bus which
+ * expects no slave response
+ * @ctrl: BIF controller consumer handle
+ * @transaction: BIF transaction to carry out. This should be one of the
+ * values in enum bif_transaction.
+ * @data: 8-bit data to use in the transaction. The meaning of
+ * this data depends upon the transaction that is to be
+ * performed.
+ *
+ * When performing a bus command (BC) transaction, values in enum
+ * bif_bus_command may be used for the data parameter. Additional manufacturer
+ * specific values may also be used in a BC transaction.
+ *
+ * Returns 0 on success or errno if an error occurred.
+ *
+ * This function should only need to be used when BIF transactions are required
+ * that are not handled by the bif-core directly.
+ */
+int bif_ctrl_raw_transaction(struct bif_ctrl *ctrl, int transaction, u8 data)
+{
+ int rc;
+
+ if (IS_ERR_OR_NULL(ctrl)) {
+ pr_err("Invalid pointer input.\n");
+ return -EINVAL;
+ }
+
+ bif_ctrl_lock(ctrl);
+
+ rc = ctrl->bdev->desc->ops->bus_transaction(ctrl->bdev, transaction,
+ data);
+ if (rc)
+ pr_err("BIF bus transaction failed, rc=%d\n", rc);
+
+ bif_ctrl_unlock(ctrl);
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_ctrl_raw_transaction);
+
+/**
+ * bif_ctrl_raw_transaction_read() - perform a raw BIF transaction on the bus
+ * which expects an RD or TACK slave response word
+ * @ctrl: BIF controller consumer handle
+ * @transaction: BIF transaction to carry out. This should be one of the
+ * values in enum bif_transaction.
+ * @data: 8-bit data to use in the transaction. The meaning of
+ * this data depends upon the transaction that is to be
+ * performed.
+ * @response: Pointer to an integer which is filled with the 11-bit
+ * slave response word upon success. The 11-bit format is
+ * (MSB to LSB) BCF, ACK, EOT, D7-D0.
+ *
+ * When performing a bus command (BC) transaction, values in enum
+ * bif_bus_command may be used for the data parameter. Additional manufacturer
+ * specific values may also be used in a BC transaction.
+ *
+ * Returns 0 on success or errno if an error occurred.
+ *
+ * This function should only need to be used when BIF transactions are required
+ * that are not handled by the bif-core directly.
+ */
+int bif_ctrl_raw_transaction_read(struct bif_ctrl *ctrl, int transaction,
+ u8 data, int *response)
+{
+ int rc;
+
+ if (IS_ERR_OR_NULL(ctrl) || IS_ERR_OR_NULL(response)) {
+ pr_err("Invalid pointer input.\n");
+ return -EINVAL;
+ }
+
+ bif_ctrl_lock(ctrl);
+
+ rc = ctrl->bdev->desc->ops->bus_transaction_read(ctrl->bdev,
+ transaction, data, response);
+ if (rc)
+ pr_err("BIF bus transaction failed, rc=%d\n", rc);
+
+ bif_ctrl_unlock(ctrl);
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_ctrl_raw_transaction_read);
+
+/**
+ * bif_ctrl_raw_transaction_query() - perform a raw BIF transaction on the bus
+ * which expects a BQ slave response
+ * @ctrl: BIF controller consumer handle
+ * @transaction: BIF transaction to carry out. This should be one of the
+ * values in enum bif_transaction.
+ * @data: 8-bit data to use in the transaction. The meaning of
+ * this data depends upon the transaction that is to be
+ * performed.
+ * @query_response: Pointer to boolean which is set to true if a BQ pulse
+ * is receieved, or false if no BQ pulse is received before
+ * timing out.
+ *
+ * When performing a bus command (BC) transaction, values in enum
+ * bif_bus_command may be used for the data parameter. Additional manufacturer
+ * specific values may also be used in a BC transaction.
+ *
+ * Returns 0 on success or errno if an error occurred.
+ *
+ * This function should only need to be used when BIF transactions are required
+ * that are not handled by the bif-core directly.
+ */
+int bif_ctrl_raw_transaction_query(struct bif_ctrl *ctrl, int transaction,
+ u8 data, bool *query_response)
+{
+ int rc;
+
+ if (IS_ERR_OR_NULL(ctrl) || IS_ERR_OR_NULL(query_response)) {
+ pr_err("Invalid pointer input.\n");
+ return -EINVAL;
+ }
+
+ bif_ctrl_lock(ctrl);
+
+ rc = ctrl->bdev->desc->ops->bus_transaction_query(ctrl->bdev,
+ transaction, data, query_response);
+ if (rc)
+ pr_err("BIF bus transaction failed, rc=%d\n", rc);
+
+ bif_ctrl_unlock(ctrl);
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_ctrl_raw_transaction_query);
+
+/**
+ * bif_ctrl_bus_lock() - lock the BIF bus of a controller for exclusive access
+ * @ctrl: BIF controller consumer handle
+ *
+ * This function should only need to be called in circumstances where a BIF
+ * consumer is issuing special BIF bus commands that have strict ordering
+ * requirements.
+ */
+void bif_ctrl_bus_lock(struct bif_ctrl *ctrl)
+{
+ if (IS_ERR_OR_NULL(ctrl)) {
+ pr_err("Invalid controller handle.\n");
+ return;
+ }
+
+ if (ctrl->exclusive_lock) {
+ pr_err("BIF bus exclusive lock already held\n");
+ return;
+ }
+
+ mutex_lock(&ctrl->bdev->mutex);
+ ctrl->exclusive_lock = true;
+ bif_cancel_irq_mode_work(ctrl->bdev);
+}
+EXPORT_SYMBOL(bif_ctrl_bus_lock);
+
+/**
+ * bif_ctrl_bus_unlock() - lock the BIF bus of a controller that was previously
+ * locked for exclusive access
+ * @ctrl: BIF controller consumer handle
+ *
+ * This function must only be called after first calling bif_ctrl_bus_lock().
+ */
+void bif_ctrl_bus_unlock(struct bif_ctrl *ctrl)
+{
+ if (IS_ERR_OR_NULL(ctrl)) {
+ pr_err("Invalid controller handle.\n");
+ return;
+ }
+
+ if (!ctrl->exclusive_lock) {
+ pr_err("BIF bus exclusive lock not already held\n");
+ return;
+ }
+
+ ctrl->exclusive_lock = false;
+ bif_schedule_irq_mode_work(ctrl->bdev);
+ mutex_unlock(&ctrl->bdev->mutex);
+}
+EXPORT_SYMBOL(bif_ctrl_bus_unlock);
+
+/**
+ * bif_ctrl_measure_rid() - measure the battery pack Rid pull-down resistance
+ * in ohms
+ * @ctrl: BIF controller consumer handle
+ *
+ * Returns the resistance of the Rid resistor in ohms if successful or errno
+ * if an error occurred.
+ */
+int bif_ctrl_measure_rid(struct bif_ctrl *ctrl)
+{
+ int rc;
+
+ if (IS_ERR_OR_NULL(ctrl)) {
+ pr_err("Invalid controller handle.\n");
+ return -ENODEV;
+ }
+
+ if (!ctrl->bdev->desc->ops->get_battery_rid) {
+ pr_err("Cannot measure Rid.\n");
+ return -ENXIO;
+ }
+
+ bif_ctrl_lock(ctrl);
+
+ rc = ctrl->bdev->desc->ops->get_battery_rid(ctrl->bdev);
+ if (rc < 0)
+ pr_err("Error during Rid measurement, rc=%d\n", rc);
+
+ bif_ctrl_unlock(ctrl);
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_ctrl_measure_rid);
+
+/**
+ * bif_ctrl_get_bus_period() - get the BIF bus period (tau_bif) in nanoseconds
+ * @ctrl: BIF controller consumer handle
+ *
+ * Returns the currently configured bus period in nanoseconds if successful or
+ * errno if an error occurred.
+ */
+int bif_ctrl_get_bus_period(struct bif_ctrl *ctrl)
+{
+ int rc;
+
+ if (IS_ERR_OR_NULL(ctrl)) {
+ pr_err("Invalid controller handle.\n");
+ return -ENODEV;
+ }
+
+ if (!ctrl->bdev->desc->ops->get_bus_period) {
+ pr_err("Cannot get the BIF bus period.\n");
+ return -ENXIO;
+ }
+
+ rc = ctrl->bdev->desc->ops->get_bus_period(ctrl->bdev);
+ if (rc < 0)
+ pr_err("Error during bus period retrieval, rc=%d\n", rc);
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_ctrl_get_bus_period);
+
+/**
+ * bif_ctrl_set_bus_period() - set the BIF bus period (tau_bif) in nanoseconds
+ * @ctrl: BIF controller consumer handle
+ * @period_ns: BIF bus period in nanoseconds to use
+ *
+ * If the exact period is not supported by the BIF controller hardware, then the
+ * next larger supported period will be used.
+ *
+ * Returns 0 on success or errno if an error occurred.
+ */
+int bif_ctrl_set_bus_period(struct bif_ctrl *ctrl, int period_ns)
+{
+ int rc;
+
+ if (IS_ERR_OR_NULL(ctrl)) {
+ pr_err("Invalid controller handle.\n");
+ return -ENODEV;
+ }
+
+ if (!ctrl->bdev->desc->ops->set_bus_period) {
+ pr_err("Cannot set the BIF bus period.\n");
+ return -ENXIO;
+ }
+
+ bif_ctrl_lock(ctrl);
+ rc = ctrl->bdev->desc->ops->set_bus_period(ctrl->bdev, period_ns);
+ if (rc)
+ pr_err("Error during bus period configuration, rc=%d\n", rc);
+ bif_ctrl_unlock(ctrl);
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_ctrl_set_bus_period);
+
+/**
+ * bif_ctrl_get_bus_state() - get the current state of the BIF bus
+ * @ctrl: BIF controller consumer handle
+ *
+ * Returns a bus state from enum bif_bus_state if successful or errno if an
+ * error occurred.
+ */
+int bif_ctrl_get_bus_state(struct bif_ctrl *ctrl)
+{
+ int rc;
+
+ if (IS_ERR_OR_NULL(ctrl)) {
+ pr_err("Invalid controller handle.\n");
+ return -ENODEV;
+ }
+
+ rc = ctrl->bdev->desc->ops->get_bus_state(ctrl->bdev);
+ if (rc < 0)
+ pr_err("Error during bus state retrieval, rc=%d\n", rc);
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_ctrl_get_bus_state);
+
+/**
+ * bif_ctrl_set_bus_state() - set the state of the BIF bus
+ * @ctrl: BIF controller consumer handle
+ * @state: State for the BIF bus to enter
+ *
+ * Returns 0 on success or errno if an error occurred.
+ */
+int bif_ctrl_set_bus_state(struct bif_ctrl *ctrl, enum bif_bus_state state)
+{
+ int rc;
+
+ if (IS_ERR_OR_NULL(ctrl)) {
+ pr_err("Invalid controller handle.\n");
+ return -ENODEV;
+ }
+
+ bif_ctrl_lock(ctrl);
+
+ rc = ctrl->bdev->desc->ops->set_bus_state(ctrl->bdev, state);
+ if (rc < 0)
+ pr_err("Error during bus state configuration, rc=%d\n", rc);
+
+ /*
+ * Uncache the selected slave if the new bus state results in the slave
+ * becoming unselected.
+ */
+ if (state == BIF_BUS_STATE_MASTER_DISABLED
+ || state == BIF_BUS_STATE_POWER_DOWN
+ || state == BIF_BUS_STATE_STANDBY)
+ ctrl->bdev->selected_sdev = NULL;
+
+ bif_ctrl_unlock(ctrl);
+
+ return rc;
+}
+EXPORT_SYMBOL(bif_ctrl_set_bus_state);
+
+/*
+ * Check if the specified function is a protocol function and if it is, then
+ * instantiate protocol function data for the slave.
+ */
+static int bif_initialize_protocol_function(struct bif_slave_dev *sdev,
+ struct bif_ddb_l2_data *func)
+{
+ int rc = 0;
+ u8 buf[4];
+
+ /* Ensure that this is a protocol function. */
+ if (func->function_type != BIF_FUNC_PROTOCOL)
+ return 0;
+
+ if (sdev->protocol_function) {
+ pr_err("Duplicate protocol function found for BIF slave; DEV_ADR=0x%02X\n",
+ sdev->slave_addr);
+ return -EPERM;
+ }
+
+ sdev->protocol_function = kzalloc(sizeof(struct bif_protocol_function),
+ GFP_KERNEL);
+ if (!sdev->protocol_function) {
+ pr_err("out of memory\n");
+ return -ENOMEM;
+ }
+
+ rc = _bif_slave_read(sdev, func->function_pointer, buf, 4);
+ if (rc) {
+ pr_err("Protocol function data read failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ sdev->protocol_function->protocol_pointer = buf[0] << 8 | buf[1];
+ sdev->protocol_function->device_id_pointer = buf[2] << 8 | buf[3];
+ sdev->protocol_function->l2_entry = func;
+
+ rc = _bif_slave_read(sdev, sdev->protocol_function->device_id_pointer,
+ sdev->protocol_function->device_id, BIF_DEVICE_ID_BYTE_LENGTH);
+ if (rc) {
+ pr_err("Device ID read failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Check if this slave does not have a UID value stored. */
+ if (sdev->unique_id_bits_known == 0) {
+ sdev->unique_id_bits_known = BIF_UNIQUE_ID_BIT_LENGTH;
+ /* Fill in UID using manufacturer ID and device ID. */
+ sdev->unique_id[0] = sdev->l1_data.manufacturer_id >> 8;
+ sdev->unique_id[1] = sdev->l1_data.manufacturer_id;
+ memcpy(&sdev->unique_id[2],
+ sdev->protocol_function->device_id,
+ BIF_DEVICE_ID_BYTE_LENGTH);
+ }
+
+ return rc;
+}
+
+/*
+ * Check if the specified function is a slave control function and if it is,
+ * then instantiate slave control function data for the slave.
+ */
+static int bif_initialize_slave_control_function(struct bif_slave_dev *sdev,
+ struct bif_ddb_l2_data *func)
+{
+ int rc = 0;
+ int i;
+ u8 buf[3];
+
+ /* Ensure that this is a slave control function. */
+ if (func->function_type != BIF_FUNC_SLAVE_CONTROL)
+ return 0;
+
+ if (sdev->slave_ctrl_function) {
+ pr_err("Duplicate slave control function found for BIF slave; DEV_ADR=0x%02X\n",
+ sdev->slave_addr);
+ return -EPERM;
+ }
+
+ sdev->slave_ctrl_function
+ = kzalloc(sizeof(struct bif_protocol_function), GFP_KERNEL);
+ if (!sdev->slave_ctrl_function) {
+ pr_err("out of memory\n");
+ return -ENOMEM;
+ }
+
+ rc = _bif_slave_read(sdev, func->function_pointer, buf, 3);
+ if (rc) {
+ pr_err("Slave control function data read failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ sdev->slave_ctrl_function->slave_ctrl_pointer = buf[0] << 8 | buf[1];
+ sdev->slave_ctrl_function->task_count
+ = buf[2] * SLAVE_CTRL_TASKS_PER_SET;
+ sdev->slave_ctrl_function->l2_entry = func;
+
+ if (sdev->slave_ctrl_function->task_count > 0) {
+ sdev->slave_ctrl_function->irq_notifier_list =
+ kzalloc(sizeof(struct blocking_notifier_head)
+ * sdev->slave_ctrl_function->task_count,
+ GFP_KERNEL);
+ if (!sdev->slave_ctrl_function->irq_notifier_list) {
+ pr_err("out of memory\n");
+ kfree(sdev->slave_ctrl_function);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < sdev->slave_ctrl_function->task_count; i++) {
+ BLOCKING_INIT_NOTIFIER_HEAD(
+ &sdev->slave_ctrl_function->irq_notifier_list[i]);
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * bif_crc_ccitt() - calculate the CRC-CCITT CRC value of the data specified
+ * @buffer: Data to calculate the CRC of
+ * @len: Length of the data buffer in bytes
+ *
+ * MIPI-BIF specifies the usage of CRC-CCITT for BIF data objects. This
+ * function performs the CRC calculation while taking into account the bit
+ * ordering used by BIF.
+ */
+u16 bif_crc_ccitt(const u8 *buffer, unsigned int len)
+{
+ u16 crc = 0xFFFF;
+
+ while (len--) {
+ crc = crc_ccitt_byte(crc, bitrev8(*buffer));
+ buffer++;
+ }
+ return bitrev16(crc);
+}
+EXPORT_SYMBOL(bif_crc_ccitt);
+
+static u16 bif_object_crc_ccitt(const struct bif_object *object)
+{
+ u16 crc = 0xFFFF;
+ int i;
+
+ crc = crc_ccitt_byte(crc, bitrev8(object->type));
+ crc = crc_ccitt_byte(crc, bitrev8(object->version));
+ crc = crc_ccitt_byte(crc, bitrev8(object->manufacturer_id >> 8));
+ crc = crc_ccitt_byte(crc, bitrev8(object->manufacturer_id));
+ crc = crc_ccitt_byte(crc, bitrev8(object->length >> 8));
+ crc = crc_ccitt_byte(crc, bitrev8(object->length));
+
+ for (i = 0; i < object->length - 8; i++)
+ crc = crc_ccitt_byte(crc, bitrev8(object->data[i]));
+
+ return bitrev16(crc);
+}
+
+/*
+ * Check if the specified function is an NVM function and if it is, then
+ * instantiate NVM function data for the slave and read all objects.
+ */
+static int bif_initialize_nvm_function(struct bif_slave_dev *sdev,
+ struct bif_ddb_l2_data *func)
+{
+ int rc = 0;
+ int data_len;
+ u8 buf[8], object_type;
+ struct bif_object *object;
+ struct bif_object *temp;
+ u16 addr;
+ u16 crc;
+
+ /* Ensure that this is an NVM function. */
+ if (func->function_type != BIF_FUNC_NVM)
+ return 0;
+
+ if (sdev->nvm_function) {
+ pr_err("Duplicate NVM function found for BIF slave; DEV_ADR=0x%02X\n",
+ sdev->slave_addr);
+ return -EPERM;
+ }
+
+ sdev->nvm_function
+ = kzalloc(sizeof(*sdev->nvm_function), GFP_KERNEL);
+ if (!sdev->nvm_function) {
+ pr_err("out of memory\n");
+ return -ENOMEM;
+ }
+
+ rc = _bif_slave_read(sdev, func->function_pointer, buf, 8);
+ if (rc) {
+ pr_err("NVM function data read failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ sdev->nvm_function->nvm_pointer = buf[0] << 8 | buf[1];
+ sdev->nvm_function->slave_control_channel = buf[2];
+ sdev->nvm_function->write_buffer_size = buf[3];
+ sdev->nvm_function->nvm_base_address = buf[4] << 8 | buf[5];
+ sdev->nvm_function->nvm_size = buf[6] << 8 | buf[7];
+
+ INIT_LIST_HEAD(&sdev->nvm_function->object_list);
+
+ /* Read object list */
+ addr = sdev->nvm_function->nvm_base_address;
+ rc = _bif_slave_read(sdev, addr, &object_type, 1);
+ if (rc) {
+ pr_err("Slave memory read failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Object type == 0x00 corresponds to the end of the object list. */
+ while (object_type != 0x00) {
+ object = kzalloc(sizeof(*object), GFP_KERNEL);
+ if (!object) {
+ pr_err("out of memory\n");
+ rc = -ENOMEM;
+ goto free_data;
+ }
+ list_add_tail(&object->list, &sdev->nvm_function->object_list);
+
+ rc = _bif_slave_read(sdev, addr + 1, buf + 1, 5);
+ if (rc) {
+ pr_err("Slave memory read of object header failed; addr=0x%04X, len=%d, rc=%d\n",
+ addr + 1, 5, rc);
+ goto free_data;
+ }
+
+ object->addr = addr;
+ object->type = object_type;
+ object->version = buf[1];
+ object->manufacturer_id = buf[2] << 8 | buf[3];
+ object->length = buf[4] << 8 | buf[5];
+
+ if ((object->addr + object->length)
+ > (sdev->nvm_function->nvm_base_address
+ + sdev->nvm_function->nvm_size)) {
+ pr_warn("warning: BIF slave object is not formatted correctly; NVM base=0x%04X, NVM len=%d, object addr=0x%04X, object len=%d\n",
+ sdev->nvm_function->nvm_base_address,
+ sdev->nvm_function->nvm_size,
+ object->addr,
+ object->length);
+ /* Limit object size to remaining NVM size. */
+ object->length = sdev->nvm_function->nvm_size
+ + sdev->nvm_function->nvm_base_address
+ - object->addr;
+ }
+
+ /* Object header + CRC takes up 8 bytes. */
+ data_len = object->length - 8;
+ object->data = kmalloc(data_len, GFP_KERNEL);
+ if (!object->data) {
+ pr_err("out of memory\n");
+ rc = -ENOMEM;
+ goto free_data;
+ }
+
+ rc = _bif_slave_read(sdev, addr + 6, object->data, data_len);
+ if (rc) {
+ pr_err("Slave memory read of object data failed; addr=0x%04X, len=%d, rc=%d\n",
+ addr + 6, data_len, rc);
+ goto free_data;
+ }
+
+ rc = _bif_slave_read(sdev, addr + 6 + data_len, buf, 3);
+ if (rc) {
+ pr_err("Slave memory read of object CRC failed; addr=0x%04X, len=%d, rc=%d\n",
+ addr + 6 + data_len, 3, rc);
+ goto free_data;
+ }
+
+ object->crc = buf[0] << 8 | buf[1];
+ object_type = buf[2];
+ sdev->nvm_function->object_count++;
+
+ crc = bif_object_crc_ccitt(object);
+ if (crc != object->crc)
+ pr_info("BIF object at addr=0x%04X has invalid CRC; crc calc=0x%04X, crc exp=0x%04X\n",
+ object->addr, crc, object->crc);
+
+ addr += object->length;
+ }
+
+ return rc;
+
+free_data:
+ list_for_each_entry_safe(object, temp,
+ &sdev->nvm_function->object_list, list) {
+ list_del(&object->list);
+ kfree(object->data);
+ kfree(object);
+ }
+ kfree(sdev->nvm_function);
+ sdev->nvm_function = NULL;
+ return rc;
+}
+
+static int bif_parse_slave_data(struct bif_slave_dev *sdev)
+{
+ int rc = 0;
+ u8 buf[10];
+ u8 *func_buf;
+ struct bif_ddb_l2_data *func;
+ int function_count, i;
+
+ rc = _bif_slave_read(sdev, BIF_DDB_L1_BASE_ADDR, buf, 10);
+ if (rc) {
+ pr_err("DDB L1 data read failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ sdev->l1_data.revision = buf[0];
+ sdev->l1_data.level = buf[1];
+ sdev->l1_data.device_class = buf[2] << 8 | buf[3];
+ sdev->l1_data.manufacturer_id = buf[4] << 8 | buf[5];
+ sdev->l1_data.product_id = buf[6] << 8 | buf[7];
+ sdev->l1_data.length = buf[8] << 8 | buf[9];
+
+ function_count = sdev->l1_data.length / 4;
+ if (sdev->l1_data.length % 4) {
+ pr_err("Function directory length=%d is invalid\n",
+ sdev->l1_data.length);
+ return -EPROTO;
+ }
+
+ /* No DDB L2 function directory */
+ if (function_count == 0)
+ return 0;
+
+ func_buf = kmalloc(sdev->l1_data.length, GFP_KERNEL);
+ if (!func_buf) {
+ pr_err("out of memory\n");
+ return -ENOMEM;
+ }
+
+ sdev->function_directory = kzalloc(
+ function_count * sizeof(struct bif_ddb_l2_data), GFP_KERNEL);
+ if (!sdev->function_directory) {
+ pr_err("out of memory\n");
+ return -ENOMEM;
+ }
+
+ rc = _bif_slave_read(sdev, BIF_DDB_L2_BASE_ADDR, func_buf,
+ sdev->l1_data.length);
+ if (rc) {
+ pr_err("DDB L2 data read failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ for (i = 0; i < function_count; i++) {
+ func = &sdev->function_directory[i];
+ func->function_type = func_buf[i * 4];
+ func->function_version = func_buf[i * 4 + 1];
+ func->function_pointer = func_buf[i * 4 + 2] << 8
+ | func_buf[i * 4 + 3];
+ rc = bif_initialize_protocol_function(sdev, func);
+ if (rc)
+ goto done;
+ rc = bif_initialize_slave_control_function(sdev, func);
+ if (rc)
+ goto done;
+ rc = bif_initialize_nvm_function(sdev, func);
+ if (rc)
+ goto done;
+ }
+done:
+ kfree(func_buf);
+ return rc;
+}
+
+static int bif_add_secondary_slaves(struct bif_slave_dev *primary_slave)
+{
+ int rc = 0;
+ int data_len, i;
+ u16 crc;
+ struct bif_slave_dev *sdev;
+ struct bif_object *object;
+
+ list_for_each_entry(object, &primary_slave->nvm_function->object_list,
+ list) {
+ if (object->type != BIF_OBJ_SEC_SLAVE)
+ continue;
+
+ data_len = object->length - 8;
+ if (data_len % BIF_UNIQUE_ID_BYTE_LENGTH) {
+ pr_info("Invalid secondary slave object found, addr=0x%04X, data len=%d\n",
+ object->addr, data_len);
+ continue;
+ }
+
+ crc = bif_object_crc_ccitt(object);
+ if (crc != object->crc) {
+ pr_info("BIF object at addr=0x%04X has invalid CRC; crc calc=0x%04X, crc exp=0x%04X\n",
+ object->addr, crc, object->crc);
+ continue;
+ }
+
+ for (i = 0; i < data_len / BIF_UNIQUE_ID_BYTE_LENGTH; i++) {
+ sdev = bif_add_slave(primary_slave->bdev);
+ if (IS_ERR(sdev)) {
+ rc = PTR_ERR(sdev);
+ pr_err("bif_add_slave failed, rc=%d\n", rc);
+ return rc;
+ }
+ memcpy(sdev->unique_id,
+ &object->data[i * BIF_UNIQUE_ID_BYTE_LENGTH],
+ BIF_UNIQUE_ID_BYTE_LENGTH);
+ sdev->unique_id_bits_known = BIF_UNIQUE_ID_BIT_LENGTH;
+
+ rc = bif_select_slave(sdev);
+ if (rc) {
+ pr_err("Could not select slave, rc=%d\n", rc);
+ goto free_slave;
+ }
+
+ rc = bif_is_slave_selected(sdev->bdev);
+ if (rc < 0) {
+ pr_err("Transaction failed, rc=%d\n", rc);
+ goto free_slave;
+ } else if (rc == 1) {
+ sdev->present = true;
+ sdev->bdev->selected_sdev = sdev;
+ } else {
+ sdev->present = false;
+ sdev->bdev->selected_sdev = NULL;
+ }
+ }
+ }
+
+ return rc;
+
+free_slave:
+ bif_remove_slave(sdev);
+ return rc;
+}
+
+/*
+ * Performs UID search to identify all slaves attached to the bus. Assumes that
+ * all necessary locks are held.
+ */
+static int bif_perform_uid_search(struct bif_ctrl_dev *bdev)
+{
+ struct bif_slave_dev *sdev;
+ struct bif_slave_dev *new_slave;
+ bool resp[2], resp_dilc;
+ int i;
+ int rc = 0;
+ u8 cmd_probe[2] = {BIF_CMD_DIP0, BIF_CMD_DIP1};
+ u8 cmd_enter[2] = {BIF_CMD_DIE0, BIF_CMD_DIE1};
+
+ /*
+ * Iterate over all partially known UIDs adding new ones as they are
+ * found.
+ */
+ list_for_each_entry(sdev, &bif_sdev_list, list) {
+ /* Skip slaves with fully known UIDs. */
+ if (sdev->unique_id_bits_known == BIF_UNIQUE_ID_BIT_LENGTH
+ || sdev->bdev != bdev)
+ continue;
+
+ /* Begin a new UID search. */
+ rc = bdev->desc->ops->bus_transaction(bdev, BIF_TRANS_BC,
+ BIF_CMD_DISS);
+ if (rc) {
+ pr_err("bus_transaction failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Step through all known UID bits (MSB to LSB). */
+ for (i = 0; i < sdev->unique_id_bits_known; i++) {
+ rc = bdev->desc->ops->bus_transaction(bdev,
+ BIF_TRANS_BC,
+ cmd_enter[get_uid_bit(sdev->unique_id, i)]);
+ if (rc) {
+ pr_err("bus_transaction failed, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ /* Step through unknown UID bits. */
+ for (i = sdev->unique_id_bits_known;
+ i < BIF_UNIQUE_ID_BIT_LENGTH; i++) {
+ rc = bdev->desc->ops->bus_transaction_query(bdev,
+ BIF_TRANS_BC, cmd_probe[0], &resp[0]);
+ if (rc) {
+ pr_err("bus_transaction failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = bdev->desc->ops->bus_transaction_query(bdev,
+ BIF_TRANS_BC, cmd_probe[1], &resp[1]);
+ if (rc) {
+ pr_err("bus_transaction failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ if (resp[0] && resp[1]) {
+ /* Create an entry for the new UID branch. */
+ new_slave = bif_add_slave(bdev);
+ if (IS_ERR(new_slave)) {
+ rc = PTR_ERR(sdev);
+ pr_err("bif_add_slave failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+ memcpy(new_slave->unique_id, sdev->unique_id,
+ BIF_UNIQUE_ID_BYTE_LENGTH);
+ new_slave->bdev = sdev->bdev;
+
+ set_uid_bit(sdev->unique_id, i, 0);
+ sdev->unique_id_bits_known = i + 1;
+
+ set_uid_bit(new_slave->unique_id, i, 1);
+ new_slave->unique_id_bits_known = i + 1;
+ } else if (resp[0]) {
+ set_uid_bit(sdev->unique_id, i, 0);
+ sdev->unique_id_bits_known = i + 1;
+ } else if (resp[1]) {
+ set_uid_bit(sdev->unique_id, i, 1);
+ sdev->unique_id_bits_known = i + 1;
+ } else {
+ pr_debug("no bus query response received\n");
+ rc = -ENXIO;
+ return rc;
+ }
+
+ rc = bdev->desc->ops->bus_transaction(bdev,
+ BIF_TRANS_BC, cmd_enter[resp[0] ? 0 : 1]);
+ if (rc) {
+ pr_err("bus_transaction failed, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ rc = bdev->desc->ops->bus_transaction_query(bdev,
+ BIF_TRANS_BC, BIF_CMD_DILC, &resp_dilc);
+ if (rc) {
+ pr_err("bus_transaction failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ if (resp_dilc) {
+ sdev->present = true;
+ sdev->bdev->selected_sdev = sdev;
+ rc = bif_parse_slave_data(sdev);
+ } else {
+ pr_err("Slave failed to respond to DILC bus command; its UID is thus unverified.\n");
+ sdev->unique_id_bits_known = 0;
+ rc = -ENXIO;
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+/*
+ * Removes slaves from the bif_sdev_list which have the same UID as previous
+ * slaves in the list.
+ */
+static int bif_remove_duplicate_slaves(struct bif_ctrl_dev *bdev)
+{
+ struct bif_slave_dev *sdev;
+ struct bif_slave_dev *last_slave;
+ struct bif_slave_dev *temp;
+
+ list_for_each_entry_safe(last_slave, temp, &bif_sdev_list, list) {
+ list_for_each_entry(sdev, &bif_sdev_list, list) {
+ if (last_slave == sdev) {
+ break;
+ } else if (memcmp(last_slave->unique_id,
+ sdev->unique_id,
+ BIF_UNIQUE_ID_BYTE_LENGTH) == 0) {
+ bif_remove_slave(last_slave);
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int bif_add_all_slaves(struct bif_ctrl_dev *bdev)
+{
+ struct bif_slave_dev *sdev;
+ int rc = 0;
+ int i;
+ bool has_slave = false, is_primary_slave = false;
+
+ mutex_lock(&bif_sdev_list_mutex);
+ mutex_lock(&bdev->mutex);
+
+ list_for_each_entry(sdev, &bif_sdev_list, list) {
+ if (sdev->bdev == bdev) {
+ has_slave = true;
+ break;
+ }
+ }
+
+ if (!has_slave) {
+ /* Create a single empty slave to start the search algorithm. */
+ sdev = bif_add_slave(bdev);
+ if (IS_ERR(sdev)) {
+ rc = PTR_ERR(sdev);
+ pr_err("bif_add_slave failed, rc=%d\n", rc);
+ goto out;
+ }
+
+ for (i = 0; i < BIF_TRANSACTION_RETRY_COUNT; i++) {
+ /* Attempt to select primary slave in battery pack. */
+ rc = bdev->desc->ops->bus_transaction(bdev,
+ BIF_TRANS_SDA, BIF_PRIMARY_SLAVE_DEV_ADR);
+ if (rc == 0)
+ break;
+ }
+ if (rc) {
+ pr_err("BIF bus_transaction failed, rc=%d\n", rc);
+ goto out;
+ }
+
+ /* Check if a slave is selected. */
+ rc = bif_is_slave_selected(bdev);
+ if (rc < 0) {
+ pr_err("BIF bus_transaction failed, rc=%d\n", rc);
+ goto out;
+ } else {
+ is_primary_slave = rc;
+ }
+ }
+
+ if (is_primary_slave) {
+ pr_debug("Using primary slave at DEV_ADR==0x%02X\n",
+ BIF_PRIMARY_SLAVE_DEV_ADR);
+ sdev->bdev->selected_sdev = sdev;
+ sdev->present = true;
+ sdev->slave_addr = BIF_PRIMARY_SLAVE_DEV_ADR;
+ rc = bif_parse_slave_data(sdev);
+ if (rc) {
+ pr_err("Failed to parse primary slave data, rc=%d\n",
+ rc);
+ goto out;
+ }
+ rc = bif_add_secondary_slaves(sdev);
+ if (rc) {
+ pr_err("Failed to add secondary slaves, rc=%d\n", rc);
+ goto out;
+ }
+ } else {
+ pr_debug("Falling back on full UID search.\n");
+ for (i = 0; i < BIF_TRANSACTION_RETRY_COUNT; i++) {
+ rc = bif_perform_uid_search(bdev);
+ if (rc == 0)
+ break;
+ }
+ if (rc) {
+ pr_debug("BIF UID search failed, rc=%d\n", rc);
+ goto out;
+ }
+ }
+
+ bif_remove_duplicate_slaves(bdev);
+
+ mutex_unlock(&bdev->mutex);
+ mutex_unlock(&bif_sdev_list_mutex);
+
+ return rc;
+
+out:
+ mutex_unlock(&bdev->mutex);
+ mutex_unlock(&bif_sdev_list_mutex);
+ pr_debug("BIF slave search failed, rc=%d\n", rc);
+ return rc;
+}
+
+static int bif_add_known_slave(struct bif_ctrl_dev *bdev, u8 slave_addr)
+{
+ struct bif_slave_dev *sdev;
+ int rc = 0;
+ int i;
+
+ for (i = 0; i < BIF_TRANSACTION_RETRY_COUNT; i++) {
+ /* Attempt to select the slave. */
+ rc = bdev->desc->ops->bus_transaction(bdev, BIF_TRANS_SDA,
+ slave_addr);
+ if (rc == 0)
+ break;
+ }
+ if (rc) {
+ pr_err("BIF bus_transaction failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Check if a slave is selected. */
+ rc = bif_is_slave_selected(bdev);
+ if (rc < 0) {
+ pr_err("BIF bus_transaction failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ sdev = bif_add_slave(bdev);
+ if (IS_ERR(sdev)) {
+ rc = PTR_ERR(sdev);
+ pr_err("bif_add_slave failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ sdev->bdev->selected_sdev = sdev;
+ sdev->present = true;
+ sdev->slave_addr = slave_addr;
+ rc = bif_parse_slave_data(sdev);
+ if (rc) {
+ pr_err("Failed to parse slave data, addr=0x%02X, rc=%d\n",
+ slave_addr, rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int bif_add_known_slaves_from_dt(struct bif_ctrl_dev *bdev,
+ struct device_node *of_node)
+{
+ int len = 0;
+ int rc, i;
+ u32 addr;
+ const __be32 *val;
+
+ mutex_lock(&bif_sdev_list_mutex);
+ mutex_lock(&bdev->mutex);
+
+ val = of_get_property(of_node, "qcom,known-device-addresses", &len);
+ len /= sizeof(u32);
+ if (val && len == 0) {
+ pr_err("qcom,known-device-addresses property is invalid\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ for (i = 0; i < len; i++) {
+ addr = be32_to_cpup(val++);
+ if (addr == 0x00 || addr > 0xFF) {
+ rc = -EINVAL;
+ pr_err("qcom,known-device-addresses property contains invalid address=0x%X\n",
+ addr);
+ goto out;
+ }
+ rc = bif_add_known_slave(bdev, addr);
+ if (rc) {
+ pr_err("bif_add_known_slave() failed, rc=%d\n", rc);
+ goto out;
+ }
+ }
+
+out:
+ if (len > 0)
+ bif_remove_duplicate_slaves(bdev);
+
+ mutex_unlock(&bdev->mutex);
+ mutex_unlock(&bif_sdev_list_mutex);
+
+ return rc;
+}
+
+/*
+ * Programs a device address for the specified slave in order to simplify
+ * slave selection in the future.
+ */
+static int bif_assign_slave_dev_addr(struct bif_slave_dev *sdev, u8 dev_addr)
+{
+ int rc;
+ u16 addr;
+
+ if (!sdev->protocol_function) {
+ pr_err("Protocol function not present; cannot set device address.\n");
+ return -ENODEV;
+ }
+
+ addr = PROTOCOL_FUNC_DEV_ADR_ADDR(
+ sdev->protocol_function->protocol_pointer);
+
+ rc = _bif_slave_write(sdev, addr, &dev_addr, 1);
+ if (rc)
+ pr_err("Failed to set slave device address.\n");
+ else
+ sdev->slave_addr = dev_addr;
+
+ return rc;
+}
+
+/* Assigns a unique device address to all slaves which do not have one. */
+static int bif_assign_all_slaves_dev_addr(struct bif_ctrl_dev *bdev)
+{
+ struct bif_slave_dev *sdev;
+ struct bif_slave_dev *sibling;
+ bool duplicate;
+ int rc = 0;
+ u8 dev_addr, first_dev_addr;
+
+ mutex_lock(&bif_sdev_list_mutex);
+ mutex_lock(&bdev->mutex);
+
+ first_dev_addr = next_dev_addr;
+ /*
+ * Iterate over all partially known UIDs adding new ones as they are
+ * found.
+ */
+ list_for_each_entry(sdev, &bif_sdev_list, list) {
+ /*
+ * Skip slaves without known UIDs, which already have a device
+ * address or which aren't present.
+ */
+ if (sdev->unique_id_bits_known != BIF_UNIQUE_ID_BIT_LENGTH
+ || sdev->slave_addr != 0x00 || !sdev->present)
+ continue;
+
+ do {
+ dev_addr = next_dev_addr;
+ duplicate = false;
+ list_for_each_entry(sibling, &bif_sdev_list, list) {
+ if (sibling->slave_addr == dev_addr) {
+ duplicate = true;
+ break;
+ }
+ }
+
+ next_dev_addr = dev_addr + 1;
+ } while (duplicate && (next_dev_addr != first_dev_addr));
+
+ if (next_dev_addr == first_dev_addr) {
+ pr_err("No more BIF slave device addresses available.\n");
+ rc = -ENODEV;
+ goto out;
+ }
+
+ rc = bif_assign_slave_dev_addr(sdev, dev_addr);
+ if (rc) {
+ pr_err("Failed to set slave address.\n");
+ goto out;
+ }
+ }
+
+ mutex_unlock(&bdev->mutex);
+ mutex_unlock(&bif_sdev_list_mutex);
+
+ return rc;
+
+out:
+ mutex_unlock(&bdev->mutex);
+ mutex_unlock(&bif_sdev_list_mutex);
+ pr_err("BIF slave device address setting failed, rc=%d\n", rc);
+ return rc;
+}
+
+/**
+ * bdev_get_drvdata() - get the private BIF controller driver data
+ * @bdev: BIF controller device pointer
+ */
+void *bdev_get_drvdata(struct bif_ctrl_dev *bdev)
+{
+ return bdev->driver_data;
+}
+EXPORT_SYMBOL(bdev_get_drvdata);
+
+static const char * const battery_label[] = {
+ "unknown",
+ "none",
+ "special 1",
+ "special 2",
+ "special 3",
+ "low cost",
+ "smart",
+};
+
+static const char *bif_get_battery_pack_type(int rid_ohm)
+{
+ const char *label = battery_label[0];
+
+ if (rid_ohm > BIF_BATT_RID_SMART_MAX)
+ label = battery_label[1];
+ else if (rid_ohm >= BIF_BATT_RID_SMART_MIN)
+ label = battery_label[6];
+ else if (rid_ohm >= BIF_BATT_RID_LOW_COST_MIN
+ && rid_ohm <= BIF_BATT_RID_LOW_COST_MAX)
+ label = battery_label[5];
+ else if (rid_ohm >= BIF_BATT_RID_SPECIAL3_MIN
+ && rid_ohm <= BIF_BATT_RID_SPECIAL3_MAX)
+ label = battery_label[4];
+ else if (rid_ohm >= BIF_BATT_RID_SPECIAL2_MIN
+ && rid_ohm <= BIF_BATT_RID_SPECIAL2_MAX)
+ label = battery_label[3];
+ else if (rid_ohm >= BIF_BATT_RID_SPECIAL1_MIN
+ && rid_ohm <= BIF_BATT_RID_SPECIAL1_MAX)
+ label = battery_label[2];
+
+ return label;
+}
+
+/**
+ * bif_ctrl_register() - register a BIF controller with the BIF framework
+ * @bif_desc: Pointer to BIF controller descriptor
+ * @dev: Device pointer of the BIF controller
+ * @driver_data: Private driver data to associate with the BIF controller
+ * @of_node Pointer to the device tree node of the BIF controller
+ *
+ * Returns a BIF controller device pointer for the controller if registration
+ * is successful or an ERR_PTR if an error occurred.
+ */
+struct bif_ctrl_dev *bif_ctrl_register(struct bif_ctrl_desc *bif_desc,
+ struct device *dev, void *driver_data, struct device_node *of_node)
+{
+ struct bif_ctrl_dev *bdev = ERR_PTR(-EINVAL);
+ struct bif_slave_dev *sdev;
+ bool battery_present = false;
+ int rc, rid_ohm;
+
+ if (!bif_desc) {
+ pr_err("Invalid bif_desc specified\n");
+ return bdev;
+ } else if (!bif_desc->name) {
+ pr_err("BIF name missing\n");
+ return bdev;
+ } else if (!bif_desc->ops) {
+ pr_err("BIF operations missing\n");
+ return bdev;
+ } else if (!bif_desc->ops->bus_transaction
+ || !bif_desc->ops->bus_transaction_query
+ || !bif_desc->ops->bus_transaction_read
+ || !bif_desc->ops->get_bus_state
+ || !bif_desc->ops->set_bus_state) {
+ pr_err("BIF operation callback function(s) missing\n");
+ return bdev;
+ }
+
+ bdev = kzalloc(sizeof(struct bif_ctrl_dev), GFP_KERNEL);
+ if (bdev == NULL) {
+ pr_err("Memory allocation failed for bif_ctrl_dev\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ mutex_init(&bdev->mutex);
+ INIT_LIST_HEAD(&bdev->list);
+ INIT_DELAYED_WORK(&bdev->enter_irq_mode_work, bif_enter_irq_mode_work);
+ bdev->desc = bif_desc;
+ bdev->ctrl_dev = dev;
+ bdev->driver_data = driver_data;
+ bdev->irq_mode_delay_jiffies = 2;
+
+ mutex_lock(&bif_ctrl_list_mutex);
+ list_add_tail(&bdev->list, &bif_ctrl_list);
+ mutex_unlock(&bif_ctrl_list_mutex);
+
+ rc = bif_add_all_slaves(bdev);
+ if (rc)
+ pr_debug("Search for all slaves failed, rc=%d\n", rc);
+ rc = bif_add_known_slaves_from_dt(bdev, of_node);
+ if (rc)
+ pr_err("Adding slaves based on device tree addressed failed, rc=%d.\n",
+ rc);
+ rc = bif_assign_all_slaves_dev_addr(bdev);
+ if (rc)
+ pr_err("Failed to set slave device address, rc=%d\n", rc);
+
+ bif_print_slaves();
+
+ if (bdev->desc->ops->get_battery_presence) {
+ rc = bdev->desc->ops->get_battery_presence(bdev);
+ if (rc < 0) {
+ pr_err("Could not determine battery presence, rc=%d\n",
+ rc);
+ } else {
+ battery_present = rc;
+ pr_info("Battery pack present = %c\n", rc ? 'Y' : 'N');
+ }
+ }
+
+ if (bdev->desc->ops->get_battery_rid) {
+ rid_ohm = bdev->desc->ops->get_battery_rid(bdev);
+ if (rid_ohm >= 0)
+ pr_info("Battery pack type = %s (Rid=%d ohm)\n",
+ bif_get_battery_pack_type(rid_ohm), rid_ohm);
+ else
+ pr_err("Could not read Rid, rc=%d\n", rid_ohm);
+ }
+
+ list_for_each_entry(sdev, &bif_sdev_list, list) {
+ if (sdev->present) {
+ battery_present = true;
+ break;
+ }
+ }
+
+ BLOCKING_INIT_NOTIFIER_HEAD(&bdev->bus_change_notifier);
+
+ if (battery_present) {
+ bdev->battery_present = true;
+ rc = blocking_notifier_call_chain(&bdev->bus_change_notifier,
+ BIF_BUS_EVENT_BATTERY_INSERTED, bdev);
+ if (rc)
+ pr_err("Call chain noification failed, rc=%d\n", rc);
+ }
+
+ return bdev;
+}
+EXPORT_SYMBOL(bif_ctrl_register);
+
+/**
+ * bif_ctrl_unregister() - unregisters a BIF controller
+ * @bdev: BIF controller device pointer
+ */
+void bif_ctrl_unregister(struct bif_ctrl_dev *bdev)
+{
+ if (bdev) {
+ mutex_lock(&bif_ctrl_list_mutex);
+ list_del(&bdev->list);
+ mutex_unlock(&bif_ctrl_list_mutex);
+ }
+}
+EXPORT_SYMBOL(bif_ctrl_unregister);
diff --git a/drivers/char/diag/diagfwd_hsic.c b/drivers/char/diag/diagfwd_hsic.c
index aa55578..616c498 100644
--- a/drivers/char/diag/diagfwd_hsic.c
+++ b/drivers/char/diag/diagfwd_hsic.c
@@ -45,6 +45,7 @@
struct diag_hsic_dev *hsic_struct = container_of(work,
struct diag_hsic_dev, diag_read_hsic_work);
int index = hsic_struct->id;
+ static DEFINE_RATELIMIT_STATE(rl, 10*HZ, 1);
if (!diag_hsic[index].hsic_ch) {
pr_err("DIAG in %s: diag_hsic[index].hsic_ch == 0\n", __func__);
@@ -103,7 +104,8 @@
diagmem_free(driver, buf_in_hsic,
index+POOL_TYPE_HSIC);
- pr_err_ratelimited("diag: Error initiating HSIC read, err: %d\n",
+ if (__ratelimit(&rl))
+ pr_err("diag: Error initiating HSIC read, err: %d\n",
err);
/*
* An error occurred, discontinue queuing
@@ -132,6 +134,7 @@
{
int err = -2;
int index = (int)ctxt;
+ static DEFINE_RATELIMIT_STATE(rl, 10*HZ, 1);
if (!diag_hsic[index].hsic_ch) {
/*
@@ -164,7 +167,8 @@
if (err) {
diagmem_free(driver, buf, index +
POOL_TYPE_HSIC);
- pr_err_ratelimited("diag: In %s, error calling diag_device_write, err: %d\n",
+ if (__ratelimit(&rl))
+ pr_err("diag: In %s, error calling diag_device_write, err: %d\n",
__func__, err);
}
}
diff --git a/drivers/coresight/Kconfig b/drivers/coresight/Kconfig
index c77df95..5e00570 100644
--- a/drivers/coresight/Kconfig
+++ b/drivers/coresight/Kconfig
@@ -24,6 +24,14 @@
config HAVE_CORESIGHT_SINK
bool
+config CORESIGHT_CTI
+ bool "CoreSight Cross Trigger Interface driver"
+ help
+ This driver provides support for Cross Trigger Interface that is
+ used to input or output i.e. pass cross trigger events from one
+ hardware component to another. It can also be used to pass
+ software generated events.
+
config CORESIGHT_CSR
bool "CoreSight Slave Register driver"
help
@@ -32,6 +40,7 @@
config CORESIGHT_TMC
bool "CoreSight Trace Memory Controller driver"
+ select CORESIGHT_CTI
select CORESIGHT_CSR
select HAVE_CORESIGHT_SINK
help
diff --git a/drivers/coresight/Makefile b/drivers/coresight/Makefile
index 8c73794..0595064 100644
--- a/drivers/coresight/Makefile
+++ b/drivers/coresight/Makefile
@@ -3,6 +3,7 @@
#
obj-$(CONFIG_CORESIGHT) += coresight.o
obj-$(CONFIG_OF) += of_coresight.o
+obj-$(CONFIG_CORESIGHT_CTI) += coresight-cti.o
obj-$(CONFIG_CORESIGHT_CSR) += coresight-csr.o
obj-$(CONFIG_CORESIGHT_TMC) += coresight-tmc.o
obj-$(CONFIG_CORESIGHT_TPIU) += coresight-tpiu.o
diff --git a/drivers/coresight/coresight-cti.c b/drivers/coresight/coresight-cti.c
new file mode 100644
index 0000000..e077edf
--- /dev/null
+++ b/drivers/coresight/coresight-cti.c
@@ -0,0 +1,481 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/of_coresight.h>
+#include <linux/coresight.h>
+#include <linux/coresight-cti.h>
+
+#include "coresight-priv.h"
+
+#define cti_writel(drvdata, val, off) __raw_writel((val), drvdata->base + off)
+#define cti_readl(drvdata, off) __raw_readl(drvdata->base + off)
+
+#define CTI_LOCK(drvdata) \
+do { \
+ mb(); \
+ cti_writel(drvdata, 0x0, CORESIGHT_LAR); \
+} while (0)
+#define CTI_UNLOCK(drvdata) \
+do { \
+ cti_writel(drvdata, CORESIGHT_UNLOCK, CORESIGHT_LAR); \
+ mb(); \
+} while (0)
+
+#define CTICONTROL (0x000)
+#define CTIINTACK (0x010)
+#define CTIAPPSET (0x014)
+#define CTIAPPCLEAR (0x018)
+#define CTIAPPPULSE (0x01C)
+#define CTIINEN(n) (0x020 + (n * 4))
+#define CTIOUTEN(n) (0x0A0 + (n * 4))
+#define CTITRIGINSTATUS (0x130)
+#define CTITRIGOUTSTATUS (0x134)
+#define CTICHINSTATUS (0x138)
+#define CTICHOUTSTATUS (0x13C)
+#define CTIGATE (0x140)
+#define ASICCTL (0x144)
+#define ITCHINACK (0xEDC)
+#define ITTRIGINACK (0xEE0)
+#define ITCHOUT (0xEE4)
+#define ITTRIGOUT (0xEE8)
+#define ITCHOUTACK (0xEEC)
+#define ITTRIGOUTACK (0xEF0)
+#define ITCHIN (0xEF4)
+#define ITTRIGIN (0xEF8)
+
+#define CTI_MAX_TRIGGERS (8)
+#define CTI_MAX_CHANNELS (4)
+
+#define to_cti_drvdata(c) container_of(c, struct cti_drvdata, cti)
+
+struct cti_drvdata {
+ void __iomem *base;
+ struct device *dev;
+ struct coresight_device *csdev;
+ struct clk *clk;
+ struct mutex mutex;
+ struct coresight_cti cti;
+ int refcnt;
+};
+
+static LIST_HEAD(cti_list);
+static DEFINE_MUTEX(cti_lock);
+
+static int cti_verify_bounds(int trig, int ch)
+{
+ if (trig >= CTI_MAX_TRIGGERS)
+ return -EINVAL;
+
+ if (ch >= CTI_MAX_CHANNELS)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int cti_enable(struct cti_drvdata *drvdata)
+{
+ int ret;
+
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ return ret;
+
+ CTI_UNLOCK(drvdata);
+
+ cti_writel(drvdata, 0x1, CTICONTROL);
+
+ CTI_LOCK(drvdata);
+ return 0;
+}
+
+static void __cti_map_trigin(struct cti_drvdata *drvdata, int trig, int ch)
+{
+ uint32_t ctien;
+
+ CTI_UNLOCK(drvdata);
+
+ ctien = cti_readl(drvdata, CTIINEN(trig));
+ cti_writel(drvdata, (ctien | 0x1 << ch), CTIINEN(trig));
+
+ CTI_LOCK(drvdata);
+}
+
+int coresight_cti_map_trigin(struct coresight_cti *cti, int trig, int ch)
+{
+ struct cti_drvdata *drvdata;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(cti))
+ return -EINVAL;
+
+ ret = cti_verify_bounds(trig, ch);
+ if (ret)
+ return ret;
+
+ drvdata = to_cti_drvdata(cti);
+
+ mutex_lock(&drvdata->mutex);
+ if (drvdata->refcnt == 0) {
+ ret = cti_enable(drvdata);
+ if (ret)
+ goto err;
+ }
+ drvdata->refcnt++;
+
+ __cti_map_trigin(drvdata, trig, ch);
+err:
+ mutex_unlock(&drvdata->mutex);
+ return ret;
+}
+EXPORT_SYMBOL(coresight_cti_map_trigin);
+
+static void __cti_map_trigout(struct cti_drvdata *drvdata, int trig, int ch)
+{
+ uint32_t ctien;
+
+ CTI_UNLOCK(drvdata);
+
+ ctien = cti_readl(drvdata, CTIOUTEN(trig));
+ cti_writel(drvdata, (ctien | 0x1 << ch), CTIOUTEN(trig));
+
+ CTI_LOCK(drvdata);
+}
+
+int coresight_cti_map_trigout(struct coresight_cti *cti, int trig, int ch)
+{
+ struct cti_drvdata *drvdata;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(cti))
+ return -EINVAL;
+
+ ret = cti_verify_bounds(trig, ch);
+ if (ret)
+ return ret;
+
+ drvdata = to_cti_drvdata(cti);
+
+ mutex_lock(&drvdata->mutex);
+ if (drvdata->refcnt == 0) {
+ ret = cti_enable(drvdata);
+ if (ret)
+ goto err;
+ }
+ drvdata->refcnt++;
+
+ __cti_map_trigout(drvdata, trig, ch);
+err:
+ mutex_unlock(&drvdata->mutex);
+ return ret;
+}
+EXPORT_SYMBOL(coresight_cti_map_trigout);
+
+static void cti_disable(struct cti_drvdata *drvdata)
+{
+ CTI_UNLOCK(drvdata);
+
+ cti_writel(drvdata, 0x1, CTICONTROL);
+
+ CTI_LOCK(drvdata);
+}
+
+static void __cti_unmap_trigin(struct cti_drvdata *drvdata, int trig, int ch)
+{
+ uint32_t ctien;
+
+ CTI_UNLOCK(drvdata);
+
+ ctien = cti_readl(drvdata, CTIINEN(trig));
+ cti_writel(drvdata, (ctien & ~(0x1 << ch)), CTIINEN(trig));
+
+ CTI_LOCK(drvdata);
+}
+
+void coresight_cti_unmap_trigin(struct coresight_cti *cti, int trig, int ch)
+{
+ struct cti_drvdata *drvdata;
+
+ if (IS_ERR_OR_NULL(cti))
+ return;
+
+ if (cti_verify_bounds(trig, ch))
+ return;
+
+ drvdata = to_cti_drvdata(cti);
+
+ mutex_lock(&drvdata->mutex);
+ __cti_unmap_trigin(drvdata, trig, ch);
+
+ if (drvdata->refcnt == 1)
+ cti_disable(drvdata);
+ drvdata->refcnt--;
+ mutex_unlock(&drvdata->mutex);
+
+ clk_disable_unprepare(drvdata->clk);
+}
+EXPORT_SYMBOL(coresight_cti_unmap_trigin);
+
+static void __cti_unmap_trigout(struct cti_drvdata *drvdata, int trig, int ch)
+{
+ uint32_t ctien;
+
+ CTI_UNLOCK(drvdata);
+
+ ctien = cti_readl(drvdata, CTIOUTEN(trig));
+ cti_writel(drvdata, (ctien & ~(0x1 << ch)), CTIOUTEN(trig));
+
+ CTI_LOCK(drvdata);
+}
+
+void coresight_cti_unmap_trigout(struct coresight_cti *cti, int trig, int ch)
+{
+ struct cti_drvdata *drvdata;
+
+ if (IS_ERR_OR_NULL(cti))
+ return;
+
+ if (cti_verify_bounds(trig, ch))
+ return;
+
+ drvdata = to_cti_drvdata(cti);
+
+ mutex_lock(&drvdata->mutex);
+ __cti_unmap_trigout(drvdata, trig, ch);
+
+ if (drvdata->refcnt == 1)
+ cti_disable(drvdata);
+ drvdata->refcnt--;
+ mutex_unlock(&drvdata->mutex);
+
+ clk_disable_unprepare(drvdata->clk);
+}
+EXPORT_SYMBOL(coresight_cti_unmap_trigout);
+
+struct coresight_cti *coresight_cti_get(const char *name)
+{
+ struct coresight_cti *cti;
+
+ mutex_lock(&cti_lock);
+ list_for_each_entry(cti, &cti_list, link) {
+ if (!strncmp(cti->name, name, strlen(cti->name) + 1)) {
+ mutex_unlock(&cti_lock);
+ return cti;
+ }
+ }
+ mutex_unlock(&cti_lock);
+
+ return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL(coresight_cti_get);
+
+void coresight_cti_put(struct coresight_cti *cti)
+{
+}
+EXPORT_SYMBOL(coresight_cti_put);
+
+static ssize_t cti_store_map_trigin(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val1, val2;
+ int ret;
+
+ if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+ return -EINVAL;
+
+ ret = coresight_cti_map_trigin(&drvdata->cti, val1, val2);
+
+ if (ret)
+ return ret;
+ return size;
+}
+static DEVICE_ATTR(map_trigin, S_IWUSR, NULL, cti_store_map_trigin);
+
+static ssize_t cti_store_map_trigout(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val1, val2;
+ int ret;
+
+ if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+ return -EINVAL;
+
+ ret = coresight_cti_map_trigout(&drvdata->cti, val1, val2);
+
+ if (ret)
+ return ret;
+ return size;
+}
+static DEVICE_ATTR(map_trigout, S_IWUSR, NULL, cti_store_map_trigout);
+
+static ssize_t cti_store_unmap_trigin(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val1, val2;
+
+ if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+ return -EINVAL;
+
+ coresight_cti_unmap_trigin(&drvdata->cti, val1, val2);
+
+ return size;
+}
+static DEVICE_ATTR(unmap_trigin, S_IWUSR, NULL, cti_store_unmap_trigin);
+
+static ssize_t cti_store_unmap_trigout(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val1, val2;
+
+ if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+ return -EINVAL;
+
+ coresight_cti_unmap_trigout(&drvdata->cti, val1, val2);
+
+ return size;
+}
+static DEVICE_ATTR(unmap_trigout, S_IWUSR, NULL, cti_store_unmap_trigout);
+
+static struct attribute *cti_attrs[] = {
+ &dev_attr_map_trigin.attr,
+ &dev_attr_map_trigout.attr,
+ &dev_attr_unmap_trigin.attr,
+ &dev_attr_unmap_trigout.attr,
+ NULL,
+};
+
+static struct attribute_group cti_attr_grp = {
+ .attrs = cti_attrs,
+};
+
+static const struct attribute_group *cti_attr_grps[] = {
+ &cti_attr_grp,
+ NULL,
+};
+
+static int __devinit cti_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct device *dev = &pdev->dev;
+ struct coresight_platform_data *pdata;
+ struct cti_drvdata *drvdata;
+ struct resource *res;
+ struct coresight_desc *desc;
+
+ if (pdev->dev.of_node) {
+ pdata = of_get_coresight_platform_data(dev, pdev->dev.of_node);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ pdev->dev.platform_data = pdata;
+ }
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+ /* Store the driver data pointer for use in exported functions */
+ drvdata->dev = &pdev->dev;
+ platform_set_drvdata(pdev, drvdata);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ drvdata->base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!drvdata->base)
+ return -ENOMEM;
+
+ mutex_init(&drvdata->mutex);
+
+ drvdata->clk = devm_clk_get(dev, "core_clk");
+ if (IS_ERR(drvdata->clk))
+ return PTR_ERR(drvdata->clk);
+
+ ret = clk_set_rate(drvdata->clk, CORESIGHT_CLK_RATE_TRACE);
+ if (ret)
+ return ret;
+
+ mutex_lock(&cti_lock);
+ drvdata->cti.name = ((struct coresight_platform_data *)
+ (pdev->dev.platform_data))->name;
+ list_add_tail(&drvdata->cti.link, &cti_list);
+ mutex_unlock(&cti_lock);
+
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+ desc->type = CORESIGHT_DEV_TYPE_NONE;
+ desc->pdata = pdev->dev.platform_data;
+ desc->dev = &pdev->dev;
+ desc->groups = cti_attr_grps;
+ desc->owner = THIS_MODULE;
+ drvdata->csdev = coresight_register(desc);
+ if (IS_ERR(drvdata->csdev))
+ return PTR_ERR(drvdata->csdev);
+
+ dev_info(dev, "CTI initialized\n");
+ return 0;
+}
+
+static int __devexit cti_remove(struct platform_device *pdev)
+{
+ struct cti_drvdata *drvdata = platform_get_drvdata(pdev);
+
+ coresight_unregister(drvdata->csdev);
+ return 0;
+}
+
+static struct of_device_id cti_match[] = {
+ {.compatible = "arm,coresight-cti"},
+ {}
+};
+
+static struct platform_driver cti_driver = {
+ .probe = cti_probe,
+ .remove = __devexit_p(cti_remove),
+ .driver = {
+ .name = "coresight-cti",
+ .owner = THIS_MODULE,
+ .of_match_table = cti_match,
+ },
+};
+
+static int __init cti_init(void)
+{
+ return platform_driver_register(&cti_driver);
+}
+module_init(cti_init);
+
+static void __exit cti_exit(void)
+{
+ platform_driver_unregister(&cti_driver);
+}
+module_exit(cti_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CoreSight CTI driver");
diff --git a/drivers/coresight/coresight-etm.c b/drivers/coresight/coresight-etm.c
index 73c1499..2ae54ea 100644
--- a/drivers/coresight/coresight-etm.c
+++ b/drivers/coresight/coresight-etm.c
@@ -173,6 +173,8 @@
#define ETM_REG_DUMP_VER_OFF (4)
#define ETM_REG_DUMP_VER (1)
+#define CPMR_ETMCLKEN (8)
+
enum etm_addr_type {
ETM_ADDR_TYPE_NONE,
ETM_ADDR_TYPE_SINGLE,
@@ -318,11 +320,21 @@
static void etm_set_pwrup(struct etm_drvdata *drvdata)
{
+ uint32_t cpmr;
uint32_t etmpdcr;
- etmpdcr = etm_readl_mm(drvdata, ETMPDCR);
- etmpdcr |= BIT(3);
- etm_writel_mm(drvdata, etmpdcr, ETMPDCR);
+ /* For Krait, use cp15 CPMR_ETMCLKEN instead of ETMPDCR since ETMPDCR
+ * is not supported for this purpose on Krait v4.
+ */
+ if (cpu_is_krait()) {
+ asm volatile("mrc p15, 7, %0, c15, c0, 5" : "=r" (cpmr));
+ cpmr |= CPMR_ETMCLKEN;
+ asm volatile("mcr p15, 7, %0, c15, c0, 5" : : "r" (cpmr));
+ } else {
+ etmpdcr = etm_readl_mm(drvdata, ETMPDCR);
+ etmpdcr |= BIT(3);
+ etm_writel_mm(drvdata, etmpdcr, ETMPDCR);
+ }
/* ensure pwrup completes before subsequent cp14 accesses */
mb();
isb();
@@ -330,14 +342,24 @@
static void etm_clr_pwrup(struct etm_drvdata *drvdata)
{
+ uint32_t cpmr;
uint32_t etmpdcr;
/* ensure pending cp14 accesses complete before clearing pwrup */
mb();
isb();
- etmpdcr = etm_readl_mm(drvdata, ETMPDCR);
- etmpdcr &= ~BIT(3);
- etm_writel_mm(drvdata, etmpdcr, ETMPDCR);
+ /* For Krait, use cp15 CPMR_ETMCLKEN instead of ETMPDCR since ETMPDCR
+ * is not supported for this purpose on Krait v4.
+ */
+ if (cpu_is_krait()) {
+ asm volatile("mrc p15, 7, %0, c15, c0, 5" : "=r" (cpmr));
+ cpmr &= ~CPMR_ETMCLKEN;
+ asm volatile("mcr p15, 7, %0, c15, c0, 5" : : "r" (cpmr));
+ } else {
+ etmpdcr = etm_readl_mm(drvdata, ETMPDCR);
+ etmpdcr &= ~BIT(3);
+ etm_writel_mm(drvdata, etmpdcr, ETMPDCR);
+ }
}
static void etm_set_prog(struct etm_drvdata *drvdata)
diff --git a/drivers/coresight/coresight-stm.c b/drivers/coresight/coresight-stm.c
index bc72e02..1db499b 100644
--- a/drivers/coresight/coresight-stm.c
+++ b/drivers/coresight/coresight-stm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -25,6 +25,7 @@
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/bitmap.h>
+#include <linux/of.h>
#include <linux/of_coresight.h>
#include <linux/coresight.h>
#include <linux/coresight-stm.h>
@@ -35,6 +36,10 @@
#define stm_writel(drvdata, val, off) __raw_writel((val), drvdata->base + off)
#define stm_readl(drvdata, off) __raw_readl(drvdata->base + off)
+#define stm_data_writeb(val, addr) __raw_writeb_no_log(val, addr)
+#define stm_data_writew(val, addr) __raw_writew_no_log(val, addr)
+#define stm_data_writel(val, addr) __raw_writel_no_log(val, addr)
+
#define STM_LOCK(drvdata) \
do { \
mb(); \
@@ -85,8 +90,10 @@
#define STM_USERSPACE_MAGIC1_VAL (0xf0)
#define STM_USERSPACE_MAGIC2_VAL (0xf1)
-#define OST_START_TOKEN (0x30)
-#define OST_VERSION (0x1)
+#define OST_TOKEN_STARTSIMPLE (0x10)
+#define OST_TOKEN_STARTBASE (0x30)
+#define OST_VERSION_PROP (1)
+#define OST_VERSION_MIPI1 (16)
enum stm_pkt_type {
STM_PKT_TYPE_DATA = 0x98,
@@ -133,6 +140,7 @@
struct channel_space chs;
bool enable;
DECLARE_BITMAP(entities, OST_ENTITY_MAX);
+ bool write_64bit;
};
static struct stm_drvdata *stmdrvdata;
@@ -342,7 +350,7 @@
clear_bit(ch, drvdata->chs.bitmap);
}
-static int stm_send(void *addr, const void *data, uint32_t size)
+static int stm_send_64bit(void *addr, const void *data, uint32_t size)
{
uint64_t prepad = 0;
uint64_t postpad = 0;
@@ -376,7 +384,10 @@
size -= 8;
}
+ endoff = 0;
+
if (size) {
+ endoff = 8 - (uint8_t)size;
pad = (char *)&postpad;
while (size) {
@@ -386,12 +397,13 @@
*(volatile uint64_t __force *)addr = postpad;
}
- return roundup(len + off, 8);
+ return len + off + endoff;
}
-static int stm_trace_ost_header(unsigned long ch_addr, uint32_t options,
- uint8_t entity_id, uint8_t proto_id,
- const void *payload_data, uint32_t payload_size)
+static int stm_trace_ost_header_64bit(unsigned long ch_addr, uint32_t options,
+ uint8_t entity_id, uint8_t proto_id,
+ const void *payload_data,
+ uint32_t payload_size)
{
void *addr;
uint8_t prepad_size;
@@ -400,14 +412,92 @@
hdr = (char *)&header;
- hdr[0] = OST_START_TOKEN;
- hdr[1] = OST_VERSION;
+ hdr[0] = OST_TOKEN_STARTBASE;
+ hdr[1] = OST_VERSION_PROP;
hdr[2] = entity_id;
hdr[3] = proto_id;
prepad_size = (unsigned long)payload_data & 0x7;
*(uint32_t *)(hdr + 4) = (prepad_size << 24) | payload_size;
- /* for 64bit writes, header is expected to be of the D32M, D32M */
+ /* for 64bit writes, header is expected to be D32M, D32M type */
+ options |= STM_OPTION_MARKED;
+ options &= ~STM_OPTION_TIMESTAMPED;
+ addr = (void *)(ch_addr | stm_channel_off(STM_PKT_TYPE_DATA, options));
+
+ return stm_send_64bit(addr, &header, sizeof(header));
+}
+
+static int stm_trace_data_64bit(unsigned long ch_addr, uint32_t options,
+ const void *data, uint32_t size)
+{
+ void *addr;
+
+ options &= ~STM_OPTION_TIMESTAMPED;
+ addr = (void *)(ch_addr | stm_channel_off(STM_PKT_TYPE_DATA, options));
+
+ return stm_send_64bit(addr, data, size);
+}
+
+static int stm_trace_ost_tail_64bit(unsigned long ch_addr, uint32_t options)
+{
+ void *addr;
+ uint64_t tail = 0x0;
+
+ addr = (void *)(ch_addr | stm_channel_off(STM_PKT_TYPE_FLAG, options));
+
+ return stm_send_64bit(addr, &tail, sizeof(tail));
+}
+
+static int stm_send(void *addr, const void *data, uint32_t size)
+{
+ if (((unsigned long)data & 0x1) && (size >= 1)) {
+ stm_data_writeb(*(uint8_t *)data, addr);
+ data++;
+ size--;
+ }
+ if (((unsigned long)data & 0x2) && (size >= 2)) {
+ stm_data_writew(*(uint16_t *)data, addr);
+ data += 2;
+ size -= 2;
+ }
+
+ /* now we are 32bit aligned */
+ while (size >= 4) {
+ stm_data_writel(*(uint32_t *)data, addr);
+ data += 4;
+ size -= 4;
+ }
+
+ if (size >= 2) {
+ stm_data_writew(*(uint16_t *)data, addr);
+ data += 2;
+ size -= 2;
+ }
+ if (size >= 1) {
+ stm_data_writeb(*(uint8_t *)data, addr);
+ data++;
+ size--;
+ }
+
+ return size;
+}
+
+static int stm_trace_ost_header(unsigned long ch_addr, uint32_t options,
+ uint8_t entity_id, uint8_t proto_id,
+ const void *payload_data, uint32_t payload_size)
+{
+ void *addr;
+ uint32_t header;
+ char *hdr;
+
+ hdr = (char *)&header;
+
+ hdr[0] = OST_TOKEN_STARTSIMPLE;
+ hdr[1] = OST_VERSION_MIPI1;
+ hdr[2] = entity_id;
+ hdr[3] = proto_id;
+
+ /* header is expected to be D32M type */
options |= STM_OPTION_MARKED;
options &= ~STM_OPTION_TIMESTAMPED;
addr = (void *)(ch_addr | stm_channel_off(STM_PKT_TYPE_DATA, options));
@@ -429,7 +519,7 @@
static int stm_trace_ost_tail(unsigned long ch_addr, uint32_t options)
{
void *addr;
- uint64_t tail = 0x0;
+ uint32_t tail = 0x0;
addr = (void *)(ch_addr | stm_channel_off(STM_PKT_TYPE_FLAG, options));
@@ -448,15 +538,27 @@
ch = stm_channel_alloc(0);
ch_addr = (unsigned long)stm_channel_addr(drvdata, ch);
- /* send the ost header */
- len += stm_trace_ost_header(ch_addr, options, entity_id, proto_id, data,
- size);
+ if (drvdata->write_64bit) {
+ /* send the ost header */
+ len += stm_trace_ost_header_64bit(ch_addr, options, entity_id,
+ proto_id, data, size);
- /* send the payload data */
- len += stm_trace_data(ch_addr, options, data, size);
+ /* send the payload data */
+ len += stm_trace_data_64bit(ch_addr, options, data, size);
- /* send the ost tail */
- len += stm_trace_ost_tail(ch_addr, options);
+ /* send the ost tail */
+ len += stm_trace_ost_tail_64bit(ch_addr, options);
+ } else {
+ /* send the ost header */
+ len += stm_trace_ost_header(ch_addr, options, entity_id,
+ proto_id, data, size);
+
+ /* send the payload data */
+ len += stm_trace_data(ch_addr, options, data, size);
+
+ /* send the ost tail */
+ len += stm_trace_ost_tail(ch_addr, options);
+ }
/* we are done, free the channel */
stm_channel_free(ch);
@@ -744,6 +846,10 @@
bitmap_fill(drvdata->entities, OST_ENTITY_MAX);
+ if (pdev->dev.of_node)
+ drvdata->write_64bit = of_property_read_bool(pdev->dev.of_node,
+ "qcom,write-64bit");
+
desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
if (!desc)
return -ENOMEM;
diff --git a/drivers/coresight/coresight-tmc.c b/drivers/coresight/coresight-tmc.c
index 10eabca..0afb5a2 100644
--- a/drivers/coresight/coresight-tmc.c
+++ b/drivers/coresight/coresight-tmc.c
@@ -29,6 +29,7 @@
#include <linux/of.h>
#include <linux/of_coresight.h>
#include <linux/coresight.h>
+#include <linux/coresight-cti.h>
#include <linux/usb/usb_qdss.h>
#include <mach/memory.h>
#include <mach/sps.h>
@@ -136,6 +137,9 @@
struct miscdevice miscdev;
struct clk *clk;
spinlock_t spinlock;
+ bool reset_flush_race;
+ struct coresight_cti *cti_flush;
+ struct coresight_cti *cti_reset;
struct mutex read_lock;
int read_count;
bool reading;
@@ -372,7 +376,7 @@
TMC_UNLOCK(drvdata);
tmc_writel(drvdata, TMC_MODE_CIRCULAR_BUFFER, TMC_MODE);
- tmc_writel(drvdata, 0x133, TMC_FFCR);
+ tmc_writel(drvdata, 0x1133, TMC_FFCR);
tmc_writel(drvdata, drvdata->trigger_cntr, TMC_TRG);
__tmc_enable(drvdata);
@@ -401,7 +405,7 @@
tmc_writel(drvdata, drvdata->paddr, TMC_DBALO);
tmc_writel(drvdata, 0x0, TMC_DBAHI);
- tmc_writel(drvdata, 0x133, TMC_FFCR);
+ tmc_writel(drvdata, 0x1133, TMC_FFCR);
tmc_writel(drvdata, drvdata->trigger_cntr, TMC_TRG);
__tmc_enable(drvdata);
@@ -430,8 +434,15 @@
return ret;
mutex_lock(&drvdata->usb_lock);
- if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
- if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB) {
+ if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
+ coresight_cti_map_trigout(drvdata->cti_flush, 1, 0);
+ coresight_cti_map_trigin(drvdata->cti_reset, 0, 0);
+ } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
+ if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM &&
+ !drvdata->reset_flush_race) {
+ coresight_cti_map_trigout(drvdata->cti_flush, 3, 0);
+ coresight_cti_map_trigin(drvdata->cti_reset, 0, 0);
+ } else if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB) {
drvdata->usbch = usb_qdss_open("qdss", drvdata,
usb_notifier);
if (IS_ERR(drvdata->usbch)) {
@@ -440,6 +451,11 @@
goto err0;
}
}
+ } else {
+ if (mode == TMC_MODE_CIRCULAR_BUFFER) {
+ coresight_cti_map_trigout(drvdata->cti_flush, 1, 0);
+ coresight_cti_map_trigin(drvdata->cti_reset, 0, 0);
+ }
}
spin_lock_irqsave(&drvdata->spinlock, flags);
@@ -632,7 +648,6 @@
static void tmc_disable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
{
unsigned long flags;
- bool etr_bam_disable = false;
mutex_lock(&drvdata->usb_lock);
spin_lock_irqsave(&drvdata->spinlock, flags);
@@ -645,27 +660,32 @@
if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM)
__tmc_etr_disable_to_mem(drvdata);
else if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB)
- etr_bam_disable = true;
+ __tmc_etr_disable_to_bam(drvdata);
} else {
if (mode == TMC_MODE_CIRCULAR_BUFFER)
__tmc_etb_disable(drvdata);
else
__tmc_etf_disable(drvdata);
}
-out:
drvdata->enable = false;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- if (etr_bam_disable) {
- if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
- if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB) {
- spin_lock_irqsave(&drvdata->spinlock, flags);
- __tmc_etr_disable_to_bam(drvdata);
- spin_unlock_irqrestore(&drvdata->spinlock,
- flags);
- tmc_etr_bam_disable(drvdata);
- usb_qdss_close(drvdata->usbch);
- }
+ if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
+ coresight_cti_unmap_trigin(drvdata->cti_reset, 0, 0);
+ coresight_cti_unmap_trigout(drvdata->cti_flush, 1, 0);
+ } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
+ if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM &&
+ !drvdata->reset_flush_race) {
+ coresight_cti_unmap_trigin(drvdata->cti_reset, 0, 0);
+ coresight_cti_unmap_trigout(drvdata->cti_flush, 3, 0);
+ } else if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB) {
+ tmc_etr_bam_disable(drvdata);
+ usb_qdss_close(drvdata->usbch);
+ }
+ } else {
+ if (mode == TMC_MODE_CIRCULAR_BUFFER) {
+ coresight_cti_unmap_trigin(drvdata->cti_reset, 0, 0);
+ coresight_cti_unmap_trigout(drvdata->cti_flush, 1, 0);
}
}
mutex_unlock(&drvdata->usb_lock);
@@ -673,6 +693,15 @@
clk_disable_unprepare(drvdata->clk);
dev_info(drvdata->dev, "TMC disabled\n");
+ return;
+out:
+ drvdata->enable = false;
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ mutex_unlock(&drvdata->usb_lock);
+
+ clk_disable_unprepare(drvdata->clk);
+
+ dev_info(drvdata->dev, "TMC disabled\n");
}
static void tmc_disable_sink(struct coresight_device *csdev)
@@ -707,6 +736,8 @@
} else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM)
__tmc_etr_disable_to_mem(drvdata);
+ else if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB)
+ __tmc_etr_disable_to_bam(drvdata);
} else {
mode = tmc_readl(drvdata, TMC_MODE);
if (mode == TMC_MODE_CIRCULAR_BUFFER)
@@ -940,7 +971,6 @@
struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
char str[10] = "";
unsigned long flags;
- bool etr_bam_flag = false;
int ret;
if (strlen(buf) >= 10)
@@ -954,42 +984,52 @@
goto out;
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (drvdata->enable) {
- __tmc_etr_disable_to_bam(drvdata);
- __tmc_etr_enable_to_mem(drvdata);
- etr_bam_flag = true;
+ if (!drvdata->enable) {
+ drvdata->out_mode = TMC_ETR_OUT_MODE_MEM;
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ goto out;
}
+ __tmc_etr_disable_to_bam(drvdata);
+ __tmc_etr_enable_to_mem(drvdata);
drvdata->out_mode = TMC_ETR_OUT_MODE_MEM;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- if (etr_bam_flag) {
- tmc_etr_bam_disable(drvdata);
- usb_qdss_close(drvdata->usbch);
+ if (!drvdata->reset_flush_race) {
+ coresight_cti_map_trigout(drvdata->cti_flush, 3, 0);
+ coresight_cti_map_trigin(drvdata->cti_reset, 0, 0);
}
+
+ tmc_etr_bam_disable(drvdata);
+ usb_qdss_close(drvdata->usbch);
} else if (!strcmp(str, "usb")) {
if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB)
goto out;
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (drvdata->enable) {
- if (drvdata->reading) {
- ret = -EBUSY;
- goto err1;
- }
- __tmc_etr_disable_to_mem(drvdata);
- etr_bam_flag = true;
+ if (!drvdata->enable) {
+ drvdata->out_mode = TMC_ETR_OUT_MODE_USB;
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ goto out;
}
+ if (drvdata->reading) {
+ ret = -EBUSY;
+ goto err1;
+ }
+ __tmc_etr_disable_to_mem(drvdata);
drvdata->out_mode = TMC_ETR_OUT_MODE_USB;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- if (etr_bam_flag) {
- drvdata->usbch = usb_qdss_open("qdss", drvdata,
- usb_notifier);
- if (IS_ERR(drvdata->usbch)) {
- dev_err(drvdata->dev, "usb_qdss_open failed\n");
- ret = PTR_ERR(drvdata->usbch);
- goto err0;
- }
+ if (!drvdata->reset_flush_race) {
+ coresight_cti_unmap_trigin(drvdata->cti_reset, 0, 0);
+ coresight_cti_unmap_trigout(drvdata->cti_flush, 3, 0);
+ }
+
+ drvdata->usbch = usb_qdss_open("qdss", drvdata,
+ usb_notifier);
+ if (IS_ERR(drvdata->usbch)) {
+ dev_err(drvdata->dev, "usb_qdss_open failed\n");
+ ret = PTR_ERR(drvdata->usbch);
+ goto err0;
}
}
out:
@@ -1091,6 +1131,7 @@
static int count;
void *baddr;
struct msm_client_dump dump;
+ struct coresight_cti_data *ctidata;
struct coresight_desc *desc;
if (pdev->dev.of_node) {
@@ -1209,6 +1250,27 @@
}
count++;
+ if (pdev->dev.of_node) {
+ drvdata->reset_flush_race = of_property_read_bool(
+ pdev->dev.of_node,
+ "qcom,reset-flush-race");
+
+ ctidata = of_get_coresight_cti_data(dev, pdev->dev.of_node);
+ if (IS_ERR(ctidata)) {
+ dev_err(dev, "invalid cti data\n");
+ } else if (ctidata && ctidata->nr_ctis == 2) {
+ drvdata->cti_flush = coresight_cti_get(
+ ctidata->names[0]);
+ if (IS_ERR(drvdata->cti_flush))
+ dev_err(dev, "failed to get flush cti\n");
+
+ drvdata->cti_reset = coresight_cti_get(
+ ctidata->names[1]);
+ if (IS_ERR(drvdata->cti_reset))
+ dev_err(dev, "failed to get reset cti\n");
+ }
+ }
+
desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
if (!desc) {
ret = -ENOMEM;
diff --git a/drivers/coresight/of_coresight.c b/drivers/coresight/of_coresight.c
index a9d0182..1eccd09 100644
--- a/drivers/coresight/of_coresight.c
+++ b/drivers/coresight/of_coresight.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/coresight.h>
+#include <linux/coresight-cti.h>
struct coresight_platform_data *of_get_coresight_platform_data(
struct device *dev, struct device_node *node)
@@ -97,3 +98,45 @@
return pdata;
}
EXPORT_SYMBOL_GPL(of_get_coresight_platform_data);
+
+struct coresight_cti_data *of_get_coresight_cti_data(
+ struct device *dev, struct device_node *node)
+{
+ int i, ret;
+ uint32_t ctis_len;
+ struct device_node *child_node;
+ struct coresight_cti_data *ctidata;
+
+ ctidata = devm_kzalloc(dev, sizeof(*ctidata), GFP_KERNEL);
+ if (!ctidata)
+ return ERR_PTR(-ENOMEM);
+
+ if (of_get_property(node, "coresight-ctis", &ctis_len))
+ ctidata->nr_ctis = ctis_len/sizeof(uint32_t);
+ else
+ return ERR_PTR(-EINVAL);
+
+ if (ctidata->nr_ctis) {
+ ctidata->names = devm_kzalloc(dev, ctidata->nr_ctis *
+ sizeof(*ctidata->names),
+ GFP_KERNEL);
+ if (!ctidata->names)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < ctidata->nr_ctis; i++) {
+ child_node = of_parse_phandle(node, "coresight-ctis",
+ i);
+ if (!child_node)
+ return ERR_PTR(-EINVAL);
+
+ ret = of_property_read_string(child_node,
+ "coresight-name",
+ &ctidata->names[i]);
+ of_node_put(child_node);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+ }
+ return ctidata;
+}
+EXPORT_SYMBOL(of_get_coresight_cti_data);
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index 78a666d..a76b689 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -18,3 +18,6 @@
bool
depends on CPU_IDLE && NO_HZ
default y
+
+config ARCH_NEEDS_CPU_IDLE_COUPLED
+ def_bool n
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 5634f88..38c8f69 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -3,3 +3,4 @@
#
obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
+obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
new file mode 100644
index 0000000..c24dda0
--- /dev/null
+++ b/drivers/cpuidle/coupled.c
@@ -0,0 +1,727 @@
+/*
+ * coupled.c - helper functions to enter the same idle state on multiple cpus
+ *
+ * Copyright (c) 2011 Google, Inc.
+ *
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/cpu.h>
+#include <linux/cpuidle.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "cpuidle.h"
+
+/**
+ * DOC: Coupled cpuidle states
+ *
+ * On some ARM SMP SoCs (OMAP4460, Tegra 2, and probably more), the
+ * cpus cannot be independently powered down, either due to
+ * sequencing restrictions (on Tegra 2, cpu 0 must be the last to
+ * power down), or due to HW bugs (on OMAP4460, a cpu powering up
+ * will corrupt the gic state unless the other cpu runs a work
+ * around). Each cpu has a power state that it can enter without
+ * coordinating with the other cpu (usually Wait For Interrupt, or
+ * WFI), and one or more "coupled" power states that affect blocks
+ * shared between the cpus (L2 cache, interrupt controller, and
+ * sometimes the whole SoC). Entering a coupled power state must
+ * be tightly controlled on both cpus.
+ *
+ * This file implements a solution, where each cpu will wait in the
+ * WFI state until all cpus are ready to enter a coupled state, at
+ * which point the coupled state function will be called on all
+ * cpus at approximately the same time.
+ *
+ * Once all cpus are ready to enter idle, they are woken by an smp
+ * cross call. At this point, there is a chance that one of the
+ * cpus will find work to do, and choose not to enter idle. A
+ * final pass is needed to guarantee that all cpus will call the
+ * power state enter function at the same time. During this pass,
+ * each cpu will increment the ready counter, and continue once the
+ * ready counter matches the number of online coupled cpus. If any
+ * cpu exits idle, the other cpus will decrement their counter and
+ * retry.
+ *
+ * requested_state stores the deepest coupled idle state each cpu
+ * is ready for. It is assumed that the states are indexed from
+ * shallowest (highest power, lowest exit latency) to deepest
+ * (lowest power, highest exit latency). The requested_state
+ * variable is not locked. It is only written from the cpu that
+ * it stores (or by the on/offlining cpu if that cpu is offline),
+ * and only read after all the cpus are ready for the coupled idle
+ * state are are no longer updating it.
+ *
+ * Three atomic counters are used. alive_count tracks the number
+ * of cpus in the coupled set that are currently or soon will be
+ * online. waiting_count tracks the number of cpus that are in
+ * the waiting loop, in the ready loop, or in the coupled idle state.
+ * ready_count tracks the number of cpus that are in the ready loop
+ * or in the coupled idle state.
+ *
+ * To use coupled cpuidle states, a cpuidle driver must:
+ *
+ * Set struct cpuidle_device.coupled_cpus to the mask of all
+ * coupled cpus, usually the same as cpu_possible_mask if all cpus
+ * are part of the same cluster. The coupled_cpus mask must be
+ * set in the struct cpuidle_device for each cpu.
+ *
+ * Set struct cpuidle_device.safe_state to a state that is not a
+ * coupled state. This is usually WFI.
+ *
+ * Set CPUIDLE_FLAG_COUPLED in struct cpuidle_state.flags for each
+ * state that affects multiple cpus.
+ *
+ * Provide a struct cpuidle_state.enter function for each state
+ * that affects multiple cpus. This function is guaranteed to be
+ * called on all cpus at approximately the same time. The driver
+ * should ensure that the cpus all abort together if any cpu tries
+ * to abort once the function is called. The function should return
+ * with interrupts still disabled.
+ */
+
+/**
+ * struct cpuidle_coupled - data for set of cpus that share a coupled idle state
+ * @coupled_cpus: mask of cpus that are part of the coupled set
+ * @requested_state: array of requested states for cpus in the coupled set
+ * @ready_waiting_counts: combined count of cpus in ready or waiting loops
+ * @online_count: count of cpus that are online
+ * @refcnt: reference count of cpuidle devices that are using this struct
+ * @prevent: flag to prevent coupled idle while a cpu is hotplugging
+ */
+struct cpuidle_coupled {
+ cpumask_t coupled_cpus;
+ int requested_state[NR_CPUS];
+ atomic_t ready_waiting_counts;
+ int online_count;
+ int refcnt;
+ int prevent;
+};
+
+#define WAITING_BITS 16
+#define MAX_WAITING_CPUS (1 << WAITING_BITS)
+#define WAITING_MASK (MAX_WAITING_CPUS - 1)
+#define READY_MASK (~WAITING_MASK)
+
+#define CPUIDLE_COUPLED_NOT_IDLE (-1)
+
+static DEFINE_MUTEX(cpuidle_coupled_lock);
+static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb);
+
+/*
+ * The cpuidle_coupled_poked_mask mask is used to avoid calling
+ * __smp_call_function_single with the per cpu call_single_data struct already
+ * in use. This prevents a deadlock where two cpus are waiting for each others
+ * call_single_data struct to be available
+ */
+static cpumask_t cpuidle_coupled_poked_mask;
+
+/**
+ * cpuidle_coupled_parallel_barrier - synchronize all online coupled cpus
+ * @dev: cpuidle_device of the calling cpu
+ * @a: atomic variable to hold the barrier
+ *
+ * No caller to this function will return from this function until all online
+ * cpus in the same coupled group have called this function. Once any caller
+ * has returned from this function, the barrier is immediately available for
+ * reuse.
+ *
+ * The atomic variable a must be initialized to 0 before any cpu calls
+ * this function, will be reset to 0 before any cpu returns from this function.
+ *
+ * Must only be called from within a coupled idle state handler
+ * (state.enter when state.flags has CPUIDLE_FLAG_COUPLED set).
+ *
+ * Provides full smp barrier semantics before and after calling.
+ */
+void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a)
+{
+ int n = dev->coupled->online_count;
+
+ smp_mb__before_atomic_inc();
+ atomic_inc(a);
+
+ while (atomic_read(a) < n)
+ cpu_relax();
+
+ if (atomic_inc_return(a) == n * 2) {
+ atomic_set(a, 0);
+ return;
+ }
+
+ while (atomic_read(a) > n)
+ cpu_relax();
+}
+
+/**
+ * cpuidle_state_is_coupled - check if a state is part of a coupled set
+ * @dev: struct cpuidle_device for the current cpu
+ * @drv: struct cpuidle_driver for the platform
+ * @state: index of the target state in drv->states
+ *
+ * Returns true if the target state is coupled with cpus besides this one
+ */
+bool cpuidle_state_is_coupled(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int state)
+{
+ return drv->states[state].flags & CPUIDLE_FLAG_COUPLED;
+}
+
+/**
+ * cpuidle_coupled_set_ready - mark a cpu as ready
+ * @coupled: the struct coupled that contains the current cpu
+ */
+static inline void cpuidle_coupled_set_ready(struct cpuidle_coupled *coupled)
+{
+ atomic_add(MAX_WAITING_CPUS, &coupled->ready_waiting_counts);
+}
+
+/**
+ * cpuidle_coupled_set_not_ready - mark a cpu as not ready
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Decrements the ready counter, unless the ready (and thus the waiting) counter
+ * is equal to the number of online cpus. Prevents a race where one cpu
+ * decrements the waiting counter and then re-increments it just before another
+ * cpu has decremented its ready counter, leading to the ready counter going
+ * down from the number of online cpus without going through the coupled idle
+ * state.
+ *
+ * Returns 0 if the counter was decremented successfully, -EINVAL if the ready
+ * counter was equal to the number of online cpus.
+ */
+static
+inline int cpuidle_coupled_set_not_ready(struct cpuidle_coupled *coupled)
+{
+ int all;
+ int ret;
+
+ all = coupled->online_count || (coupled->online_count << WAITING_BITS);
+ ret = atomic_add_unless(&coupled->ready_waiting_counts,
+ -MAX_WAITING_CPUS, all);
+
+ return ret ? 0 : -EINVAL;
+}
+
+/**
+ * cpuidle_coupled_no_cpus_ready - check if no cpus in a coupled set are ready
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Returns true if all of the cpus in a coupled set are out of the ready loop.
+ */
+static inline int cpuidle_coupled_no_cpus_ready(struct cpuidle_coupled *coupled)
+{
+ int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS;
+ return r == 0;
+}
+
+/**
+ * cpuidle_coupled_cpus_ready - check if all cpus in a coupled set are ready
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Returns true if all cpus coupled to this target state are in the ready loop
+ */
+static inline bool cpuidle_coupled_cpus_ready(struct cpuidle_coupled *coupled)
+{
+ int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS;
+ return r == coupled->online_count;
+}
+
+/**
+ * cpuidle_coupled_cpus_waiting - check if all cpus in a coupled set are waiting
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Returns true if all cpus coupled to this target state are in the wait loop
+ */
+static inline bool cpuidle_coupled_cpus_waiting(struct cpuidle_coupled *coupled)
+{
+ int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK;
+ return w == coupled->online_count;
+}
+
+/**
+ * cpuidle_coupled_no_cpus_waiting - check if no cpus in coupled set are waiting
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Returns true if all of the cpus in a coupled set are out of the waiting loop.
+ */
+static inline int cpuidle_coupled_no_cpus_waiting(struct cpuidle_coupled *coupled)
+{
+ int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK;
+ return w == 0;
+}
+
+/**
+ * cpuidle_coupled_get_state - determine the deepest idle state
+ * @dev: struct cpuidle_device for this cpu
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Returns the deepest idle state that all coupled cpus can enter
+ */
+static inline int cpuidle_coupled_get_state(struct cpuidle_device *dev,
+ struct cpuidle_coupled *coupled)
+{
+ int i;
+ int state = INT_MAX;
+
+ /*
+ * Read barrier ensures that read of requested_state is ordered after
+ * reads of ready_count. Matches the write barriers
+ * cpuidle_set_state_waiting.
+ */
+ smp_rmb();
+
+ for_each_cpu_mask(i, coupled->coupled_cpus)
+ if (cpu_online(i) && coupled->requested_state[i] < state)
+ state = coupled->requested_state[i];
+
+ return state;
+}
+
+static void cpuidle_coupled_poked(void *info)
+{
+ int cpu = (unsigned long)info;
+ cpumask_clear_cpu(cpu, &cpuidle_coupled_poked_mask);
+}
+
+/**
+ * cpuidle_coupled_poke - wake up a cpu that may be waiting
+ * @cpu: target cpu
+ *
+ * Ensures that the target cpu exits it's waiting idle state (if it is in it)
+ * and will see updates to waiting_count before it re-enters it's waiting idle
+ * state.
+ *
+ * If cpuidle_coupled_poked_mask is already set for the target cpu, that cpu
+ * either has or will soon have a pending IPI that will wake it out of idle,
+ * or it is currently processing the IPI and is not in idle.
+ */
+static void cpuidle_coupled_poke(int cpu)
+{
+ struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);
+
+ if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poked_mask))
+ __smp_call_function_single(cpu, csd, 0);
+}
+
+/**
+ * cpuidle_coupled_poke_others - wake up all other cpus that may be waiting
+ * @dev: struct cpuidle_device for this cpu
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Calls cpuidle_coupled_poke on all other online cpus.
+ */
+static void cpuidle_coupled_poke_others(int this_cpu,
+ struct cpuidle_coupled *coupled)
+{
+ int cpu;
+
+ for_each_cpu_mask(cpu, coupled->coupled_cpus)
+ if (cpu != this_cpu && cpu_online(cpu))
+ cpuidle_coupled_poke(cpu);
+}
+
+/**
+ * cpuidle_coupled_set_waiting - mark this cpu as in the wait loop
+ * @dev: struct cpuidle_device for this cpu
+ * @coupled: the struct coupled that contains the current cpu
+ * @next_state: the index in drv->states of the requested state for this cpu
+ *
+ * Updates the requested idle state for the specified cpuidle device,
+ * poking all coupled cpus out of idle if necessary to let them see the new
+ * state.
+ */
+static void cpuidle_coupled_set_waiting(int cpu,
+ struct cpuidle_coupled *coupled, int next_state)
+{
+ int w;
+
+ coupled->requested_state[cpu] = next_state;
+
+ /*
+ * If this is the last cpu to enter the waiting state, poke
+ * all the other cpus out of their waiting state so they can
+ * enter a deeper state. This can race with one of the cpus
+ * exiting the waiting state due to an interrupt and
+ * decrementing waiting_count, see comment below.
+ *
+ * The atomic_inc_return provides a write barrier to order the write
+ * to requested_state with the later write that increments ready_count.
+ */
+ w = atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK;
+ if (w == coupled->online_count)
+ cpuidle_coupled_poke_others(cpu, coupled);
+}
+
+/**
+ * cpuidle_coupled_set_not_waiting - mark this cpu as leaving the wait loop
+ * @dev: struct cpuidle_device for this cpu
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Removes the requested idle state for the specified cpuidle device.
+ */
+static void cpuidle_coupled_set_not_waiting(int cpu,
+ struct cpuidle_coupled *coupled)
+{
+ /*
+ * Decrementing waiting count can race with incrementing it in
+ * cpuidle_coupled_set_waiting, but that's OK. Worst case, some
+ * cpus will increment ready_count and then spin until they
+ * notice that this cpu has cleared it's requested_state.
+ */
+ atomic_dec(&coupled->ready_waiting_counts);
+
+ coupled->requested_state[cpu] = CPUIDLE_COUPLED_NOT_IDLE;
+}
+
+/**
+ * cpuidle_coupled_set_done - mark this cpu as leaving the ready loop
+ * @cpu: the current cpu
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Marks this cpu as no longer in the ready and waiting loops. Decrements
+ * the waiting count first to prevent another cpu looping back in and seeing
+ * this cpu as waiting just before it exits idle.
+ */
+static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled)
+{
+ cpuidle_coupled_set_not_waiting(cpu, coupled);
+ atomic_sub(MAX_WAITING_CPUS, &coupled->ready_waiting_counts);
+}
+
+/**
+ * cpuidle_coupled_clear_pokes - spin until the poke interrupt is processed
+ * @cpu - this cpu
+ *
+ * Turns on interrupts and spins until any outstanding poke interrupts have
+ * been processed and the poke bit has been cleared.
+ *
+ * Other interrupts may also be processed while interrupts are enabled, so
+ * need_resched() must be tested after turning interrupts off again to make sure
+ * the interrupt didn't schedule work that should take the cpu out of idle.
+ *
+ * Returns 0 if need_resched was false, -EINTR if need_resched was true.
+ */
+static int cpuidle_coupled_clear_pokes(int cpu)
+{
+ local_irq_enable();
+ while (cpumask_test_cpu(cpu, &cpuidle_coupled_poked_mask))
+ cpu_relax();
+ local_irq_disable();
+
+ return need_resched() ? -EINTR : 0;
+}
+
+/**
+ * cpuidle_enter_state_coupled - attempt to enter a state with coupled cpus
+ * @dev: struct cpuidle_device for the current cpu
+ * @drv: struct cpuidle_driver for the platform
+ * @next_state: index of the requested state in drv->states
+ *
+ * Coordinate with coupled cpus to enter the target state. This is a two
+ * stage process. In the first stage, the cpus are operating independently,
+ * and may call into cpuidle_enter_state_coupled at completely different times.
+ * To save as much power as possible, the first cpus to call this function will
+ * go to an intermediate state (the cpuidle_device's safe state), and wait for
+ * all the other cpus to call this function. Once all coupled cpus are idle,
+ * the second stage will start. Each coupled cpu will spin until all cpus have
+ * guaranteed that they will call the target_state.
+ *
+ * This function must be called with interrupts disabled. It may enable
+ * interrupts while preparing for idle, and it will always return with
+ * interrupts enabled.
+ */
+int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int next_state)
+{
+ int entered_state = -1;
+ struct cpuidle_coupled *coupled = dev->coupled;
+
+ if (!coupled)
+ return -EINVAL;
+
+ while (coupled->prevent) {
+ if (cpuidle_coupled_clear_pokes(dev->cpu)) {
+ local_irq_enable();
+ return entered_state;
+ }
+ entered_state = cpuidle_enter_state(dev, drv,
+ dev->safe_state_index);
+ }
+
+ /* Read barrier ensures online_count is read after prevent is cleared */
+ smp_rmb();
+
+ cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state);
+
+retry:
+ /*
+ * Wait for all coupled cpus to be idle, using the deepest state
+ * allowed for a single cpu.
+ */
+ while (!cpuidle_coupled_cpus_waiting(coupled)) {
+ if (cpuidle_coupled_clear_pokes(dev->cpu)) {
+ cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
+ goto out;
+ }
+
+ if (coupled->prevent) {
+ cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
+ goto out;
+ }
+
+ entered_state = cpuidle_enter_state(dev, drv,
+ dev->safe_state_index);
+ }
+
+ if (cpuidle_coupled_clear_pokes(dev->cpu)) {
+ cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
+ goto out;
+ }
+
+ /*
+ * All coupled cpus are probably idle. There is a small chance that
+ * one of the other cpus just became active. Increment the ready count,
+ * and spin until all coupled cpus have incremented the counter. Once a
+ * cpu has incremented the ready counter, it cannot abort idle and must
+ * spin until either all cpus have incremented the ready counter, or
+ * another cpu leaves idle and decrements the waiting counter.
+ */
+
+ cpuidle_coupled_set_ready(coupled);
+ while (!cpuidle_coupled_cpus_ready(coupled)) {
+ /* Check if any other cpus bailed out of idle. */
+ if (!cpuidle_coupled_cpus_waiting(coupled))
+ if (!cpuidle_coupled_set_not_ready(coupled))
+ goto retry;
+
+ cpu_relax();
+ }
+
+ /* all cpus have acked the coupled state */
+ next_state = cpuidle_coupled_get_state(dev, coupled);
+
+ entered_state = cpuidle_enter_state(dev, drv, next_state);
+
+ cpuidle_coupled_set_done(dev->cpu, coupled);
+
+out:
+ /*
+ * Normal cpuidle states are expected to return with irqs enabled.
+ * That leads to an inefficiency where a cpu receiving an interrupt
+ * that brings it out of idle will process that interrupt before
+ * exiting the idle enter function and decrementing ready_count. All
+ * other cpus will need to spin waiting for the cpu that is processing
+ * the interrupt. If the driver returns with interrupts disabled,
+ * all other cpus will loop back into the safe idle state instead of
+ * spinning, saving power.
+ *
+ * Calling local_irq_enable here allows coupled states to return with
+ * interrupts disabled, but won't cause problems for drivers that
+ * exit with interrupts enabled.
+ */
+ local_irq_enable();
+
+ /*
+ * Wait until all coupled cpus have exited idle. There is no risk that
+ * a cpu exits and re-enters the ready state because this cpu has
+ * already decremented its waiting_count.
+ */
+ while (!cpuidle_coupled_no_cpus_ready(coupled))
+ cpu_relax();
+
+ return entered_state;
+}
+
+static void cpuidle_coupled_update_online_cpus(struct cpuidle_coupled *coupled)
+{
+ cpumask_t cpus;
+ cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus);
+ coupled->online_count = cpumask_weight(&cpus);
+}
+
+/**
+ * cpuidle_coupled_register_device - register a coupled cpuidle device
+ * @dev: struct cpuidle_device for the current cpu
+ *
+ * Called from cpuidle_register_device to handle coupled idle init. Finds the
+ * cpuidle_coupled struct for this set of coupled cpus, or creates one if none
+ * exists yet.
+ */
+int cpuidle_coupled_register_device(struct cpuidle_device *dev)
+{
+ int cpu;
+ struct cpuidle_device *other_dev;
+ struct call_single_data *csd;
+ struct cpuidle_coupled *coupled;
+
+ if (cpumask_empty(&dev->coupled_cpus))
+ return 0;
+
+ for_each_cpu_mask(cpu, dev->coupled_cpus) {
+ other_dev = per_cpu(cpuidle_devices, cpu);
+ if (other_dev && other_dev->coupled) {
+ coupled = other_dev->coupled;
+ goto have_coupled;
+ }
+ }
+
+ /* No existing coupled info found, create a new one */
+ coupled = kzalloc(sizeof(struct cpuidle_coupled), GFP_KERNEL);
+ if (!coupled)
+ return -ENOMEM;
+
+ coupled->coupled_cpus = dev->coupled_cpus;
+
+have_coupled:
+ dev->coupled = coupled;
+ if (WARN_ON(!cpumask_equal(&dev->coupled_cpus, &coupled->coupled_cpus)))
+ coupled->prevent++;
+
+ cpuidle_coupled_update_online_cpus(coupled);
+
+ coupled->refcnt++;
+
+ csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu);
+ csd->func = cpuidle_coupled_poked;
+ csd->info = (void *)(unsigned long)dev->cpu;
+
+ return 0;
+}
+
+/**
+ * cpuidle_coupled_unregister_device - unregister a coupled cpuidle device
+ * @dev: struct cpuidle_device for the current cpu
+ *
+ * Called from cpuidle_unregister_device to tear down coupled idle. Removes the
+ * cpu from the coupled idle set, and frees the cpuidle_coupled_info struct if
+ * this was the last cpu in the set.
+ */
+void cpuidle_coupled_unregister_device(struct cpuidle_device *dev)
+{
+ struct cpuidle_coupled *coupled = dev->coupled;
+
+ if (cpumask_empty(&dev->coupled_cpus))
+ return;
+
+ if (--coupled->refcnt)
+ kfree(coupled);
+ dev->coupled = NULL;
+}
+
+/**
+ * cpuidle_coupled_prevent_idle - prevent cpus from entering a coupled state
+ * @coupled: the struct coupled that contains the cpu that is changing state
+ *
+ * Disables coupled cpuidle on a coupled set of cpus. Used to ensure that
+ * cpu_online_mask doesn't change while cpus are coordinating coupled idle.
+ */
+static void cpuidle_coupled_prevent_idle(struct cpuidle_coupled *coupled)
+{
+ int cpu = get_cpu();
+
+ /* Force all cpus out of the waiting loop. */
+ coupled->prevent++;
+ cpuidle_coupled_poke_others(cpu, coupled);
+ put_cpu();
+ while (!cpuidle_coupled_no_cpus_waiting(coupled))
+ cpu_relax();
+}
+
+/**
+ * cpuidle_coupled_allow_idle - allows cpus to enter a coupled state
+ * @coupled: the struct coupled that contains the cpu that is changing state
+ *
+ * Enables coupled cpuidle on a coupled set of cpus. Used to ensure that
+ * cpu_online_mask doesn't change while cpus are coordinating coupled idle.
+ */
+static void cpuidle_coupled_allow_idle(struct cpuidle_coupled *coupled)
+{
+ int cpu = get_cpu();
+
+ /*
+ * Write barrier ensures readers see the new online_count when they
+ * see prevent == 0.
+ */
+ smp_wmb();
+ coupled->prevent--;
+ /* Force cpus out of the prevent loop. */
+ cpuidle_coupled_poke_others(cpu, coupled);
+ put_cpu();
+}
+
+/**
+ * cpuidle_coupled_cpu_notify - notifier called during hotplug transitions
+ * @nb: notifier block
+ * @action: hotplug transition
+ * @hcpu: target cpu number
+ *
+ * Called when a cpu is brought on or offline using hotplug. Updates the
+ * coupled cpu set appropriately
+ */
+static int cpuidle_coupled_cpu_notify(struct notifier_block *nb,
+ unsigned long action, void *hcpu)
+{
+ int cpu = (unsigned long)hcpu;
+ struct cpuidle_device *dev;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_UP_PREPARE:
+ case CPU_DOWN_PREPARE:
+ case CPU_ONLINE:
+ case CPU_DEAD:
+ case CPU_UP_CANCELED:
+ case CPU_DOWN_FAILED:
+ break;
+ default:
+ return NOTIFY_OK;
+ }
+
+ mutex_lock(&cpuidle_lock);
+
+ dev = per_cpu(cpuidle_devices, cpu);
+ if (!dev->coupled)
+ goto out;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_UP_PREPARE:
+ case CPU_DOWN_PREPARE:
+ cpuidle_coupled_prevent_idle(dev->coupled);
+ break;
+ case CPU_ONLINE:
+ case CPU_DEAD:
+ cpuidle_coupled_update_online_cpus(dev->coupled);
+ /* Fall through */
+ case CPU_UP_CANCELED:
+ case CPU_DOWN_FAILED:
+ cpuidle_coupled_allow_idle(dev->coupled);
+ break;
+ }
+
+out:
+ mutex_unlock(&cpuidle_lock);
+ return NOTIFY_OK;
+}
+
+static struct notifier_block cpuidle_coupled_cpu_notifier = {
+ .notifier_call = cpuidle_coupled_cpu_notify,
+};
+
+static int __init cpuidle_coupled_init(void)
+{
+ return register_cpu_notifier(&cpuidle_coupled_cpu_notifier);
+}
+core_initcall(cpuidle_coupled_init);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 2f0083a..e81cfda 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -103,6 +103,34 @@
}
/**
+ * cpuidle_enter_state - enter the state and update stats
+ * @dev: cpuidle device for this cpu
+ * @drv: cpuidle driver for this cpu
+ * @next_state: index into drv->states of the state to enter
+ */
+int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
+ int next_state)
+{
+ int entered_state;
+
+ entered_state = cpuidle_enter_ops(dev, drv, next_state);
+
+ if (entered_state >= 0) {
+ /* Update cpuidle counters */
+ /* This can be moved to within driver enter routine
+ * but that results in multiple copies of same code.
+ */
+ dev->states_usage[entered_state].time +=
+ (unsigned long long)dev->last_residency;
+ dev->states_usage[entered_state].usage++;
+ } else {
+ dev->last_residency = 0;
+ }
+
+ return entered_state;
+}
+
+/**
* cpuidle_idle_call - the main idle loop
*
* NOTE: no locks or semaphores should be used here
@@ -143,23 +171,15 @@
trace_power_start_rcuidle(POWER_CSTATE, next_state, dev->cpu);
trace_cpu_idle_rcuidle(next_state, dev->cpu);
- entered_state = cpuidle_enter_ops(dev, drv, next_state);
+ if (cpuidle_state_is_coupled(dev, drv, next_state))
+ entered_state = cpuidle_enter_state_coupled(dev, drv,
+ next_state);
+ else
+ entered_state = cpuidle_enter_state(dev, drv, next_state);
trace_power_end_rcuidle(dev->cpu);
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
- if (entered_state >= 0) {
- /* Update cpuidle counters */
- /* This can be moved to within driver enter routine
- * but that results in multiple copies of same code.
- */
- dev->states_usage[entered_state].time +=
- (unsigned long long)dev->last_residency;
- dev->states_usage[entered_state].usage++;
- } else {
- dev->last_residency = 0;
- }
-
/* give the governor an opportunity to reflect on the outcome */
if (cpuidle_curr_governor->reflect)
cpuidle_curr_governor->reflect(dev, entered_state);
@@ -387,13 +407,25 @@
per_cpu(cpuidle_devices, dev->cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);
- if ((ret = cpuidle_add_sysfs(cpu_dev))) {
- module_put(cpuidle_driver->owner);
- return ret;
- }
+ ret = cpuidle_add_sysfs(cpu_dev);
+ if (ret)
+ goto err_sysfs;
+
+ ret = cpuidle_coupled_register_device(dev);
+ if (ret)
+ goto err_coupled;
dev->registered = 1;
return 0;
+
+err_coupled:
+ cpuidle_remove_sysfs(cpu_dev);
+ wait_for_completion(&dev->kobj_unregister);
+err_sysfs:
+ list_del(&dev->device_list);
+ per_cpu(cpuidle_devices, dev->cpu) = NULL;
+ module_put(cpuidle_driver->owner);
+ return ret;
}
/**
@@ -443,6 +475,8 @@
wait_for_completion(&dev->kobj_unregister);
per_cpu(cpuidle_devices, dev->cpu) = NULL;
+ cpuidle_coupled_unregister_device(dev);
+
cpuidle_resume_and_unlock();
module_put(cpuidle_driver->owner);
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h
index 7db1866..76e7f69 100644
--- a/drivers/cpuidle/cpuidle.h
+++ b/drivers/cpuidle/cpuidle.h
@@ -14,6 +14,8 @@
extern struct mutex cpuidle_lock;
extern spinlock_t cpuidle_driver_lock;
extern int cpuidle_disabled(void);
+extern int cpuidle_enter_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int next_state);
/* idle loop */
extern void cpuidle_install_idle_handler(void);
@@ -30,4 +32,34 @@
extern int cpuidle_add_sysfs(struct device *dev);
extern void cpuidle_remove_sysfs(struct device *dev);
+#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
+bool cpuidle_state_is_coupled(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int state);
+int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int next_state);
+int cpuidle_coupled_register_device(struct cpuidle_device *dev);
+void cpuidle_coupled_unregister_device(struct cpuidle_device *dev);
+#else
+static inline bool cpuidle_state_is_coupled(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int state)
+{
+ return false;
+}
+
+static inline int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int next_state)
+{
+ return -1;
+}
+
+static inline int cpuidle_coupled_register_device(struct cpuidle_device *dev)
+{
+ return 0;
+}
+
+static inline void cpuidle_coupled_unregister_device(struct cpuidle_device *dev)
+{
+}
+#endif
+
#endif /* __DRIVER_CPUIDLE_H */
diff --git a/drivers/gpu/ion/msm/ion_cp_common.c b/drivers/gpu/ion/msm/ion_cp_common.c
index fa4bad5..8c9b95d 100644
--- a/drivers/gpu/ion/msm/ion_cp_common.c
+++ b/drivers/gpu/ion/msm/ion_cp_common.c
@@ -176,9 +176,9 @@
buffer, ret_value);
atomic_dec(&buf->secure_cnt);
} else {
- pr_debug("Protected buffer %p from %x-%x\n",
- buffer, buf->buffer,
- buf->buffer + buffer->size);
+ pr_debug("Protected buffer %p from %pa (size %x)\n",
+ buffer, &buf->buffer,
+ buffer->size);
buf->want_delayed_unsecure |=
flags & ION_UNSECURE_DELAYED ? 1 : 0;
buf->data = data;
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index bbe97de..a3739a2 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -2764,6 +2764,11 @@
};
static struct a3xx_vbif_data a305b_vbif[] = {
+ { A3XX_VBIF_IN_RD_LIM_CONF0, 0x00181818 },
+ { A3XX_VBIF_IN_WR_LIM_CONF0, 0x00181818 },
+ { A3XX_VBIF_OUT_RD_LIM_CONF0, 0x00000018 },
+ { A3XX_VBIF_OUT_WR_LIM_CONF0, 0x00000018 },
+ { A3XX_VBIF_DDR_OUT_MAX_BURST, 0x00000303 },
{ A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003 },
{0, 0},
};
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 5280cd8..ca0a439 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -129,6 +129,8 @@
#define QUP_MAX_CLK_STATE_RETRIES 300
#define DEFAULT_CLK_RATE (19200000)
+#define I2C_STATUS_CLK_STATE 13
+#define QUP_OUT_FIFO_NOT_EMPTY 0x10
static char const * const i2c_rsrcs[] = {"i2c_clk", "i2c_sda"};
@@ -380,6 +382,7 @@
static int qup_i2c_poll_clock_ready(struct qup_i2c_dev *dev)
{
uint32_t retries = 0;
+ uint32_t op_flgs = -1, clk_state = -1;
/*
* Wait for the clock state to transition to either IDLE or FORCED
@@ -388,16 +391,32 @@
while (retries++ < QUP_MAX_CLK_STATE_RETRIES) {
uint32_t status = readl_relaxed(dev->base + QUP_I2C_STATUS);
- uint32_t clk_state = (status >> 13) & 0x7;
+ clk_state = (status >> I2C_STATUS_CLK_STATE) & 0x7;
+ /* Read the operational register */
+ op_flgs = readl_relaxed(dev->base +
+ QUP_OPERATIONAL) & QUP_OUT_FIFO_NOT_EMPTY;
- if (clk_state == I2C_CLK_RESET_BUSIDLE_STATE ||
- clk_state == I2C_CLK_FORCED_LOW_STATE)
+ /*
+ * In very corner case when slave do clock stretching and
+ * output fifo will have 1 block of data space empty at
+ * the same time. So i2c qup will get output service
+ * interrupt and as it doesn't have more data to be written.
+ * This can lead to issue where output fifo is not empty.
+ */
+ if (op_flgs == 0 &&
+ (clk_state == I2C_CLK_RESET_BUSIDLE_STATE ||
+ clk_state == I2C_CLK_FORCED_LOW_STATE)){
+ dev_dbg(dev->dev, "clk_state 0x%x op_flgs [%x]\n",
+ clk_state, op_flgs);
return 0;
+ }
+
/* 1-bit delay before we check again */
udelay(dev->one_bit_t);
}
- dev_err(dev->dev, "Error waiting for clk ready\n");
+ dev_err(dev->dev, "Error waiting for clk ready clk_state: 0x%x op_flgs: 0x%x\n",
+ clk_state, op_flgs);
return -ETIMEDOUT;
}
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index badbc2b..4c72b65 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -447,6 +447,17 @@
To compile this driver as a module, choose M here: the module will
be called pmic8xxx-keypad.
+config KEYBOARD_QPNP
+ tristate "Qualcomm QPNP PMIC keypad support"
+ depends on OF_SPMI && SPMI && MSM_QPNP_INT
+ help
+ Say Y here if you want to enable the driver for the QPNP PMIC
+ keypad provided as a reference design from Qualcomm. This is intended
+ to support upto 10 x 8 matrix based keypad design.
+
+ To compile this driver as a module, choose M here: the module will
+ be called qpnp-keypad.
+
config KEYBOARD_SAMSUNG
tristate "Samsung keypad support"
depends on HAVE_CLK
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 61b57ef..833904a 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -36,6 +36,7 @@
obj-$(CONFIG_KEYBOARD_OMAP4) += omap4-keypad.o
obj-$(CONFIG_KEYBOARD_OPENCORES) += opencores-kbd.o
obj-$(CONFIG_KEYBOARD_PMIC8XXX) += pmic8xxx-keypad.o
+obj-$(CONFIG_KEYBOARD_QPNP) += qpnp-keypad.o
obj-$(CONFIG_KEYBOARD_PXA27x) += pxa27x_keypad.o
obj-$(CONFIG_KEYBOARD_PXA930_ROTARY) += pxa930_rotary.o
obj-$(CONFIG_KEYBOARD_QT1070) += qt1070.o
diff --git a/drivers/input/keyboard/qpnp-keypad.c b/drivers/input/keyboard/qpnp-keypad.c
new file mode 100644
index 0000000..a46e3b5
--- /dev/null
+++ b/drivers/input/keyboard/qpnp-keypad.c
@@ -0,0 +1,852 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/input/matrix_keypad.h>
+#include <linux/spmi.h>
+
+#define QPNP_MAX_ROWS 10
+#define QPNP_MAX_COLS 8
+#define QPNP_MIN_ROWS 2
+#define QPNP_MIN_COLS 1
+#define QPNP_ROW_SHIFT 3
+#define QPNP_MATRIX_MAX_SIZE (QPNP_MAX_ROWS * QPNP_MAX_COLS)
+
+/* in ms */
+#define MAX_SCAN_DELAY 128
+#define MIN_SCAN_DELAY 1
+#define KEYP_DEFAULT_SCAN_DELAY 32
+
+/* in ns */
+#define MAX_ROW_HOLD_DELAY 250000
+#define MIN_ROW_HOLD_DELAY 31250
+
+/* in ms */
+#define MAX_DEBOUNCE_TIME 20
+#define MIN_DEBOUNCE_TIME 5
+#define KEYP_DEFAULT_DEBOUNCE 15
+
+/* register offsets */
+#define KEYP_STATUS(base) (base + 0x08)
+#define KEYP_SIZE_CTRL(base) (base + 0x40)
+#define KEYP_SCAN_CTRL(base) (base + 0x42)
+#define KEYP_FSM_CNTL(base) (base + 0x44)
+#define KEYP_EN_CTRL(base) (base + 0x46)
+
+#define KEYP_CTRL_KEYP_EN BIT(7)
+#define KEYP_CTRL_EVNTS BIT(0)
+#define KEYP_CTRL_EVNTS_MASK 0x3
+
+#define KEYP_SIZE_COLS_SHIFT 4
+#define KEYP_SIZE_COLS_MASK 0x70
+#define KEYP_SIZE_ROWS_MASK 0x0F
+
+#define KEYP_SCAN_DBC_MASK 0x03
+#define KEYP_SCAN_SCNP_MASK 0x38
+#define KEYP_SCAN_ROWP_MASK 0xC0
+#define KEYP_SCAN_SCNP_SHIFT 3
+#define KEYP_SCAN_ROWP_SHIFT 6
+
+#define KEYP_CTRL_SCAN_ROWS_BITS 0x7
+
+#define KEYP_SCAN_DBOUNCE_SHIFT 1
+#define KEYP_SCAN_PAUSE_SHIFT 3
+#define KEYP_SCAN_ROW_HOLD_SHIFT 6
+
+#define KEYP_FSM_READ_EN BIT(0)
+
+/* bits of these registers represent
+ * '0' for key press
+ * '1' for key release
+ */
+#define KEYP_RECENT_DATA(base) (base + 0x7C)
+#define KEYP_OLD_DATA(base) (base + 0x5C)
+
+#define KEYP_CLOCK_FREQ 32768
+
+struct qpnp_kp {
+ const struct matrix_keymap_data *keymap_data;
+ struct input_dev *input;
+ struct spmi_device *spmi;
+
+ int key_sense_irq;
+ int key_stuck_irq;
+ u16 base;
+
+ u32 num_rows;
+ u32 num_cols;
+ u32 debounce_ms;
+ u32 row_hold_ns;
+ u32 scan_delay_ms;
+ bool wakeup;
+ bool rep;
+
+ unsigned short keycodes[QPNP_MATRIX_MAX_SIZE];
+
+ u16 keystate[QPNP_MAX_ROWS];
+ u16 stuckstate[QPNP_MAX_ROWS];
+};
+
+static int qpnp_kp_write_u8(struct qpnp_kp *kp, u8 data, u16 reg)
+{
+ int rc;
+
+ rc = spmi_ext_register_writel(kp->spmi->ctrl, kp->spmi->sid,
+ reg, &data, 1);
+ if (rc < 0)
+ dev_err(&kp->spmi->dev,
+ "Error writing to address: %X - ret %d\n", reg, rc);
+
+ return rc;
+}
+
+static int qpnp_kp_read(struct qpnp_kp *kp,
+ u8 *data, u16 reg, unsigned num_bytes)
+{
+ int rc;
+
+ rc = spmi_ext_register_readl(kp->spmi->ctrl, kp->spmi->sid,
+ reg, data, num_bytes);
+ if (rc < 0)
+ dev_err(&kp->spmi->dev,
+ "Error reading from address : %X - ret %d\n", reg, rc);
+
+ return rc;
+}
+
+static int qpnp_kp_read_u8(struct qpnp_kp *kp, u8 *data, u16 reg)
+{
+ int rc;
+
+ rc = qpnp_kp_read(kp, data, reg, 1);
+ if (rc < 0)
+ dev_err(&kp->spmi->dev, "Error reading qpnp: %X - ret %d\n",
+ reg, rc);
+ return rc;
+}
+
+static u8 qpnp_col_state(struct qpnp_kp *kp, u8 col)
+{
+ /* all keys pressed on that particular row? */
+ if (col == 0x00)
+ return 1 << kp->num_cols;
+ else
+ return col & ((1 << kp->num_cols) - 1);
+}
+
+/*
+ * Synchronous read protocol
+ *
+ * 1. Write '1' to ReadState bit in KEYP_FSM_CNTL register
+ * 2. Wait 2*32KHz clocks, so that HW can successfully enter read mode
+ * synchronously
+ * 3. Read rows in old array first if events are more than one
+ * 4. Read rows in recent array
+ * 5. Wait 4*32KHz clocks
+ * 6. Write '0' to ReadState bit of KEYP_FSM_CNTL register so that hw can
+ * synchronously exit read mode.
+ */
+static int qpnp_sync_read(struct qpnp_kp *kp, bool enable)
+{
+ int rc;
+ u8 fsm_ctl;
+
+ rc = qpnp_kp_read_u8(kp, &fsm_ctl, KEYP_FSM_CNTL(kp->base));
+ if (rc < 0) {
+ dev_err(&kp->spmi->dev,
+ "Error reading KEYP_FSM_CNTL reg, rc=%d\n", rc);
+ return rc;
+ }
+
+ if (enable)
+ fsm_ctl |= KEYP_FSM_READ_EN;
+ else
+ fsm_ctl &= ~KEYP_FSM_READ_EN;
+
+ rc = qpnp_kp_write_u8(kp, fsm_ctl, KEYP_FSM_CNTL(kp->base));
+ if (rc < 0) {
+ dev_err(&kp->spmi->dev,
+ "Error writing KEYP_FSM_CNTL reg, rc=%d\n", rc);
+ return rc;
+ }
+
+ /* 2 * 32KHz clocks */
+ udelay((2 * DIV_ROUND_UP(USEC_PER_SEC, KEYP_CLOCK_FREQ)) + 1);
+
+ return rc;
+}
+
+static int qpnp_kp_read_data(struct qpnp_kp *kp, u16 *state,
+ u16 data_reg, int read_rows)
+{
+ int rc, row;
+ u8 new_data[QPNP_MAX_ROWS];
+
+ /*
+ * Check if last row will be scanned. If not, scan to clear key event
+ * counter
+ */
+ if (kp->num_rows < QPNP_MAX_ROWS) {
+ rc = qpnp_kp_read_u8(kp, &new_data[QPNP_MAX_ROWS - 1],
+ data_reg + (QPNP_MAX_ROWS - 1) * 2);
+ if (rc)
+ return rc;
+ }
+
+ for (row = 0; row < kp->num_rows; row++) {
+ rc = qpnp_kp_read_u8(kp, &new_data[row], data_reg + row * 2);
+ if (rc)
+ return rc;
+
+ dev_dbg(&kp->spmi->dev, "new_data[%d] = %d\n", row,
+ new_data[row]);
+ state[row] = qpnp_col_state(kp, new_data[row]);
+ }
+
+ return 0;
+}
+
+static int qpnp_kp_read_matrix(struct qpnp_kp *kp, u16 *new_state,
+ u16 *old_state)
+{
+ int rc, read_rows;
+
+ read_rows = kp->num_rows;
+
+ rc = qpnp_sync_read(kp, true);
+ if (rc < 0) {
+ dev_err(&kp->spmi->dev,
+ "Error setting the FSM read enable bit rc=%d\n", rc);
+ return rc;
+ }
+
+ if (old_state) {
+ rc = qpnp_kp_read_data(kp, old_state, KEYP_OLD_DATA(kp->base),
+ read_rows);
+ if (rc < 0) {
+ dev_err(&kp->spmi->dev,
+ "Error reading KEYP_OLD_DATA, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ rc = qpnp_kp_read_data(kp, new_state, KEYP_RECENT_DATA(kp->base),
+ read_rows);
+ if (rc < 0) {
+ dev_err(&kp->spmi->dev,
+ "Error reading KEYP_RECENT_DATA, rc=%d\n", rc);
+ return rc;
+ }
+
+ /* 4 * 32KHz clocks */
+ udelay((4 * DIV_ROUND_UP(USEC_PER_SEC, KEYP_CLOCK_FREQ)) + 1);
+
+ rc = qpnp_sync_read(kp, false);
+ if (rc < 0) {
+ dev_err(&kp->spmi->dev,
+ "Error resetting the FSM read enable bit rc=%d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static void __qpnp_kp_scan_matrix(struct qpnp_kp *kp, u16 *new_state,
+ u16 *old_state)
+{
+ int row, col, code;
+
+ for (row = 0; row < kp->num_rows; row++) {
+ int bits_changed = new_state[row] ^ old_state[row];
+
+ if (!bits_changed)
+ continue;
+
+ for (col = 0; col < kp->num_cols; col++) {
+ if (!(bits_changed & (1 << col)))
+ continue;
+
+ dev_dbg(&kp->spmi->dev, "key [%d:%d] %s\n", row, col,
+ !(new_state[row] & (1 << col)) ?
+ "pressed" : "released");
+ code = MATRIX_SCAN_CODE(row, col, QPNP_ROW_SHIFT);
+ input_event(kp->input, EV_MSC, MSC_SCAN, code);
+ input_report_key(kp->input,
+ kp->keycodes[code],
+ !(new_state[row] & (1 << col)));
+ input_sync(kp->input);
+ }
+ }
+}
+
+static bool qpnp_detect_ghost_keys(struct qpnp_kp *kp, u16 *new_state)
+{
+ int row, found_first = -1;
+ u16 check, row_state;
+
+ check = 0;
+ for (row = 0; row < kp->num_rows; row++) {
+ row_state = (~new_state[row]) &
+ ((1 << kp->num_cols) - 1);
+
+ if (hweight16(row_state) > 1) {
+ if (found_first == -1)
+ found_first = row;
+ if (check & row_state) {
+ dev_dbg(&kp->spmi->dev,
+ "detected ghost key row[%d],row[%d]\n",
+ found_first, row);
+ return true;
+ }
+ }
+ check |= row_state;
+ }
+ return false;
+}
+
+static int qpnp_kp_scan_matrix(struct qpnp_kp *kp, unsigned int events)
+{
+ u16 new_state[QPNP_MAX_ROWS];
+ u16 old_state[QPNP_MAX_ROWS];
+ int rc;
+ switch (events) {
+ case 0x1:
+ rc = qpnp_kp_read_matrix(kp, new_state, NULL);
+ if (rc < 0)
+ return rc;
+
+ /* detecting ghost key is not an error */
+ if (qpnp_detect_ghost_keys(kp, new_state))
+ return 0;
+ __qpnp_kp_scan_matrix(kp, new_state, kp->keystate);
+ memcpy(kp->keystate, new_state, sizeof(new_state));
+ break;
+ case 0x3: /* two events - eventcounter is gray-coded */
+ rc = qpnp_kp_read_matrix(kp, new_state, old_state);
+ if (rc < 0)
+ return rc;
+
+ __qpnp_kp_scan_matrix(kp, old_state, kp->keystate);
+ __qpnp_kp_scan_matrix(kp, new_state, old_state);
+ memcpy(kp->keystate, new_state, sizeof(new_state));
+ break;
+ case 0x2:
+ dev_dbg(&kp->spmi->dev, "Some key events were lost\n");
+ rc = qpnp_kp_read_matrix(kp, new_state, old_state);
+ if (rc < 0)
+ return rc;
+ __qpnp_kp_scan_matrix(kp, old_state, kp->keystate);
+ __qpnp_kp_scan_matrix(kp, new_state, old_state);
+ memcpy(kp->keystate, new_state, sizeof(new_state));
+ break;
+ default:
+ rc = -EINVAL;
+ }
+ return rc;
+}
+
+/*
+ * NOTE: We are reading recent and old data registers blindly
+ * whenever key-stuck interrupt happens, because events counter doesn't
+ * get updated when this interrupt happens due to key stuck doesn't get
+ * considered as key state change.
+ *
+ * We are not using old data register contents after they are being read
+ * because it might report the key which was pressed before the key being stuck
+ * as stuck key because it's pressed status is stored in the old data
+ * register.
+ */
+static irqreturn_t qpnp_kp_stuck_irq(int irq, void *data)
+{
+ u16 new_state[QPNP_MAX_ROWS];
+ u16 old_state[QPNP_MAX_ROWS];
+ int rc;
+ struct qpnp_kp *kp = data;
+
+ rc = qpnp_kp_read_matrix(kp, new_state, old_state);
+ if (rc < 0) {
+ dev_err(&kp->spmi->dev, "failed to read keypad matrix\n");
+ return IRQ_HANDLED;
+ }
+
+ __qpnp_kp_scan_matrix(kp, new_state, kp->stuckstate);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qpnp_kp_irq(int irq, void *data)
+{
+ struct qpnp_kp *kp = data;
+ u8 ctrl_val, events;
+ int rc;
+
+ rc = qpnp_kp_read_u8(kp, &ctrl_val, KEYP_STATUS(kp->base));
+ if (rc < 0) {
+ dev_err(&kp->spmi->dev,
+ "Error reading KEYP_STATUS register\n");
+ return IRQ_HANDLED;
+ }
+
+ events = ctrl_val & KEYP_CTRL_EVNTS_MASK;
+
+ rc = qpnp_kp_scan_matrix(kp, events);
+ if (rc < 0)
+ dev_err(&kp->spmi->dev, "failed to scan matrix\n");
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit qpnp_kpd_init(struct qpnp_kp *kp)
+{
+ int bits, rc, cycles;
+ u8 kpd_scan_cntl, kpd_size_cntl;
+
+ /* Configure the SIZE register, #rows and #columns */
+ rc = qpnp_kp_read_u8(kp, &kpd_size_cntl, KEYP_SIZE_CTRL(kp->base));
+ if (rc < 0) {
+ dev_err(&kp->spmi->dev,
+ "Error reading KEYP_SIZE_CTRL reg, rc=%d\n", rc);
+ return rc;
+ }
+
+ kpd_size_cntl &= (~KEYP_SIZE_COLS_MASK | ~KEYP_SIZE_ROWS_MASK);
+ kpd_size_cntl |= (((kp->num_cols - 1) << KEYP_SIZE_COLS_SHIFT) &
+ KEYP_SIZE_COLS_MASK);
+ kpd_size_cntl |= ((kp->num_rows - 1) & KEYP_SIZE_ROWS_MASK);
+
+ rc = qpnp_kp_write_u8(kp, kpd_size_cntl, KEYP_SIZE_CTRL(kp->base));
+ if (rc < 0) {
+ dev_err(&kp->spmi->dev,
+ "Error writing to KEYP_SIZE_CTRL reg, rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Configure the SCAN CTL register, debounce, row pause, scan delay */
+ rc = qpnp_kp_read_u8(kp, &kpd_scan_cntl, KEYP_SCAN_CTRL(kp->base));
+ if (rc < 0) {
+ dev_err(&kp->spmi->dev,
+ "Error reading KEYP_SCAN_CTRL reg, rc=%d\n", rc);
+ return rc;
+ }
+
+ kpd_scan_cntl &= (~KEYP_SCAN_DBC_MASK | ~KEYP_SCAN_SCNP_MASK |
+ ~KEYP_SCAN_ROWP_MASK);
+ kpd_scan_cntl |= (((kp->debounce_ms / 5) - 1) & KEYP_SCAN_DBC_MASK);
+
+ bits = fls(kp->scan_delay_ms) - 1;
+ kpd_scan_cntl |= ((bits << KEYP_SCAN_SCNP_SHIFT) & KEYP_SCAN_SCNP_MASK);
+
+ /* Row hold time is a multiple of 32KHz cycles. */
+ cycles = (kp->row_hold_ns * KEYP_CLOCK_FREQ) / NSEC_PER_SEC;
+ if (cycles)
+ cycles = ilog2(cycles);
+ kpd_scan_cntl |= ((cycles << KEYP_SCAN_ROW_HOLD_SHIFT) &
+ KEYP_SCAN_ROWP_MASK);
+
+ rc = qpnp_kp_write_u8(kp, kpd_scan_cntl, KEYP_SCAN_CTRL(kp->base));
+ if (rc)
+ dev_err(&kp->spmi->dev,
+ "Error writing KEYP_SCAN reg, rc=%d\n", rc);
+
+ return rc;
+}
+
+static int qpnp_kp_enable(struct qpnp_kp *kp)
+{
+ int rc;
+ u8 kpd_cntl;
+
+ rc = qpnp_kp_read_u8(kp, &kpd_cntl, KEYP_EN_CTRL(kp->base));
+ if (rc < 0) {
+ dev_err(&kp->spmi->dev,
+ "Error reading KEYP_EN_CTRL reg, rc=%d\n", rc);
+ return rc;
+ }
+
+ kpd_cntl |= KEYP_CTRL_KEYP_EN;
+
+ rc = qpnp_kp_write_u8(kp, kpd_cntl, KEYP_EN_CTRL(kp->base));
+ if (rc < 0)
+ dev_err(&kp->spmi->dev,
+ "Error writing KEYP_CTRL reg, rc=%d\n", rc);
+
+ return rc;
+}
+
+static int qpnp_kp_disable(struct qpnp_kp *kp)
+{
+ int rc;
+ u8 kpd_cntl;
+
+ rc = qpnp_kp_read_u8(kp, &kpd_cntl, KEYP_EN_CTRL(kp->base));
+ if (rc < 0) {
+ dev_err(&kp->spmi->dev,
+ "Error reading KEYP_EN_CTRL reg, rc=%d\n", rc);
+ return rc;
+ }
+
+ kpd_cntl &= ~KEYP_CTRL_KEYP_EN;
+
+ rc = qpnp_kp_write_u8(kp, kpd_cntl, KEYP_EN_CTRL(kp->base));
+ if (rc < 0)
+ dev_err(&kp->spmi->dev,
+ "Error writing KEYP_CTRL reg, rc=%d\n", rc);
+
+ return rc;
+}
+
+static int qpnp_kp_open(struct input_dev *dev)
+{
+ struct qpnp_kp *kp = input_get_drvdata(dev);
+
+ return qpnp_kp_enable(kp);
+}
+
+static void qpnp_kp_close(struct input_dev *dev)
+{
+ struct qpnp_kp *kp = input_get_drvdata(dev);
+
+ qpnp_kp_disable(kp);
+}
+
+static int __devinit qpnp_keypad_parse_dt(struct qpnp_kp *kp)
+{
+ struct matrix_keymap_data *keymap_data;
+ int rc, keymap_len, i;
+ u32 *keymap;
+ const __be32 *map;
+
+ rc = of_property_read_u32(kp->spmi->dev.of_node,
+ "keypad,num-rows", &kp->num_rows);
+ if (rc) {
+ dev_err(&kp->spmi->dev, "Unable to parse 'num-rows'\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(kp->spmi->dev.of_node,
+ "keypad,num-cols", &kp->num_cols);
+ if (rc) {
+ dev_err(&kp->spmi->dev, "Unable to parse 'num-cols'\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(kp->spmi->dev.of_node,
+ "qcom,scan-delay-ms", &kp->scan_delay_ms);
+ if (rc && rc != -EINVAL) {
+ dev_err(&kp->spmi->dev, "Unable to parse 'scan-delay-ms'\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(kp->spmi->dev.of_node,
+ "qcom,row-hold-ns", &kp->row_hold_ns);
+ if (rc && rc != -EINVAL) {
+ dev_err(&kp->spmi->dev, "Unable to parse 'row-hold-ns'\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(kp->spmi->dev.of_node,
+ "qcom,debounce-ms", &kp->debounce_ms);
+ if (rc && rc != -EINVAL) {
+ dev_err(&kp->spmi->dev, "Unable to parse 'debounce-ms'\n");
+ return rc;
+ }
+
+ kp->wakeup = of_property_read_bool(kp->spmi->dev.of_node,
+ "qcom,wakeup");
+
+ kp->rep = !of_property_read_bool(kp->spmi->dev.of_node,
+ "linux,keypad-no-autorepeat");
+
+ map = of_get_property(kp->spmi->dev.of_node,
+ "linux,keymap", &keymap_len);
+ if (!map) {
+ dev_err(&kp->spmi->dev, "Keymap not specified\n");
+ return -EINVAL;
+ }
+
+ keymap_data = devm_kzalloc(&kp->spmi->dev,
+ sizeof(*keymap_data), GFP_KERNEL);
+ if (!keymap_data) {
+ dev_err(&kp->spmi->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ keymap_data->keymap_size = keymap_len / sizeof(u32);
+
+ keymap = devm_kzalloc(&kp->spmi->dev,
+ sizeof(uint32_t) * keymap_data->keymap_size, GFP_KERNEL);
+ if (!keymap) {
+ dev_err(&kp->spmi->dev, "could not allocate memory for keymap\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < keymap_data->keymap_size; i++) {
+ unsigned int key = be32_to_cpup(map + i);
+ int keycode, row, col;
+
+ row = (key >> 24) & 0xff;
+ col = (key >> 16) & 0xff;
+ keycode = key & 0xffff;
+ keymap[i] = KEY(row, col, keycode);
+ }
+ keymap_data->keymap = keymap;
+ kp->keymap_data = keymap_data;
+
+ return 0;
+}
+
+static int __devinit qpnp_kp_probe(struct spmi_device *spmi)
+{
+ struct qpnp_kp *kp;
+ struct resource *keypad_base;
+ int rc = 0;
+
+ kp = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_kp), GFP_KERNEL);
+ if (!kp) {
+ dev_err(&spmi->dev, "%s: Can't allocate qpnp_kp\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ kp->spmi = spmi;
+
+ rc = qpnp_keypad_parse_dt(kp);
+ if (rc < 0) {
+ dev_err(&spmi->dev, "Error parsing device tree\n");
+ return rc;
+ }
+
+ /* the #rows and #columns are compulsary */
+ if (!kp->num_cols || !kp->num_rows ||
+ kp->num_cols > QPNP_MAX_COLS ||
+ kp->num_rows > QPNP_MAX_ROWS ||
+ kp->num_cols < QPNP_MIN_COLS ||
+ kp->num_rows < QPNP_MIN_ROWS) {
+ dev_err(&spmi->dev, "invalid rows/cols input data\n");
+ return -EINVAL;
+ }
+
+ if (!kp->keymap_data) {
+ dev_err(&spmi->dev, "keymap not specified\n");
+ return -EINVAL;
+ }
+
+ /* the below parameters are optional*/
+ if (!kp->scan_delay_ms) {
+ kp->scan_delay_ms = KEYP_DEFAULT_SCAN_DELAY;
+ } else {
+ if (kp->scan_delay_ms > MAX_SCAN_DELAY ||
+ kp->scan_delay_ms < MIN_SCAN_DELAY) {
+ dev_err(&spmi->dev,
+ "invalid keypad scan time supplied\n");
+ return -EINVAL;
+ }
+ }
+
+ if (!kp->row_hold_ns) {
+ kp->row_hold_ns = MIN_ROW_HOLD_DELAY;
+ } else {
+ if (kp->row_hold_ns > MAX_ROW_HOLD_DELAY ||
+ kp->row_hold_ns < MIN_ROW_HOLD_DELAY) {
+ dev_err(&spmi->dev,
+ "invalid keypad row hold time supplied\n");
+ return -EINVAL;
+ }
+ }
+
+ if (!kp->debounce_ms) {
+ kp->debounce_ms = KEYP_DEFAULT_DEBOUNCE;
+ } else {
+ if (kp->debounce_ms > MAX_DEBOUNCE_TIME ||
+ kp->debounce_ms < MIN_DEBOUNCE_TIME ||
+ (kp->debounce_ms % 5 != 0)) {
+ dev_err(&spmi->dev,
+ "invalid debounce time supplied\n");
+ return -EINVAL;
+ }
+ }
+
+ kp->input = input_allocate_device();
+ if (!kp->input) {
+ dev_err(&spmi->dev, "Can't allocate keypad input device\n");
+ return -ENOMEM;
+ }
+
+ kp->key_sense_irq = spmi_get_irq_byname(spmi, NULL, "kp-sense");
+ if (kp->key_sense_irq < 0) {
+ dev_err(&spmi->dev, "Unable to get keypad sense irq\n");
+ return kp->key_sense_irq;
+ }
+
+ kp->key_stuck_irq = spmi_get_irq_byname(spmi, NULL, "kp-stuck");
+ if (kp->key_stuck_irq < 0) {
+ dev_err(&spmi->dev, "Unable to get stuck irq\n");
+ return kp->key_stuck_irq;
+ }
+
+ keypad_base = spmi_get_resource(spmi, NULL, IORESOURCE_MEM, 0);
+ if (!keypad_base) {
+ dev_err(&spmi->dev, "Unable to get keypad base address\n");
+ return -ENXIO;
+ }
+ kp->base = keypad_base->start;
+
+ kp->input->name = "qpnp_keypad";
+ kp->input->phys = "qpnp_keypad/input0";
+ kp->input->id.version = 0x0001;
+ kp->input->id.product = 0x0001;
+ kp->input->id.vendor = 0x0001;
+
+ kp->input->evbit[0] = BIT_MASK(EV_KEY);
+
+ if (kp->rep)
+ set_bit(EV_REP, kp->input->evbit);
+
+ kp->input->keycode = kp->keycodes;
+ kp->input->keycodemax = QPNP_MATRIX_MAX_SIZE;
+ kp->input->keycodesize = sizeof(kp->keycodes);
+ kp->input->open = qpnp_kp_open;
+ kp->input->close = qpnp_kp_close;
+
+ matrix_keypad_build_keymap(kp->keymap_data, QPNP_ROW_SHIFT,
+ kp->keycodes, kp->input->keybit);
+
+ input_set_capability(kp->input, EV_MSC, MSC_SCAN);
+ input_set_drvdata(kp->input, kp);
+
+ /* initialize keypad state */
+ memset(kp->keystate, 0xff, sizeof(kp->keystate));
+ memset(kp->stuckstate, 0xff, sizeof(kp->stuckstate));
+
+ rc = qpnp_kpd_init(kp);
+ if (rc < 0) {
+ dev_err(&spmi->dev, "unable to initialize keypad controller\n");
+ return rc;
+ }
+
+ rc = input_register_device(kp->input);
+ if (rc < 0) {
+ dev_err(&spmi->dev, "unable to register keypad input device\n");
+ return rc;
+ }
+
+ rc = devm_request_irq(&spmi->dev, kp->key_sense_irq, qpnp_kp_irq,
+ IRQF_TRIGGER_RISING, "qpnp-keypad-sense", kp);
+ if (rc < 0) {
+ dev_err(&spmi->dev, "failed to request keypad sense irq\n");
+ return rc;
+ }
+
+ rc = devm_request_irq(&spmi->dev, kp->key_stuck_irq, qpnp_kp_stuck_irq,
+ IRQF_TRIGGER_RISING, "qpnp-keypad-stuck", kp);
+ if (rc < 0) {
+ dev_err(&spmi->dev, "failed to request keypad stuck irq\n");
+ return rc;
+ }
+
+ device_init_wakeup(&spmi->dev, kp->wakeup);
+
+ return rc;
+}
+
+static int qpnp_kp_remove(struct spmi_device *spmi)
+{
+ struct qpnp_kp *kp = dev_get_drvdata(&spmi->dev);
+
+ device_init_wakeup(&spmi->dev, 0);
+ input_unregister_device(kp->input);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int qpnp_kp_suspend(struct device *dev)
+{
+ struct qpnp_kp *kp = dev_get_drvdata(dev);
+ struct input_dev *input_dev = kp->input;
+
+ if (device_may_wakeup(dev)) {
+ enable_irq_wake(kp->key_sense_irq);
+ } else {
+ mutex_lock(&input_dev->mutex);
+
+ if (input_dev->users)
+ qpnp_kp_disable(kp);
+
+ mutex_unlock(&input_dev->mutex);
+ }
+
+ return 0;
+}
+
+static int qpnp_kp_resume(struct device *dev)
+{
+ struct qpnp_kp *kp = dev_get_drvdata(dev);
+ struct input_dev *input_dev = kp->input;
+
+ if (device_may_wakeup(dev)) {
+ disable_irq_wake(kp->key_sense_irq);
+ } else {
+ mutex_lock(&input_dev->mutex);
+
+ if (input_dev->users)
+ qpnp_kp_enable(kp);
+
+ mutex_unlock(&input_dev->mutex);
+ }
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(qpnp_kp_pm_ops,
+ qpnp_kp_suspend, qpnp_kp_resume);
+
+static struct of_device_id spmi_match_table[] = {
+ { .compatible = "qcom,qpnp-keypad",
+ },
+ {}
+};
+
+static struct spmi_driver qpnp_kp_driver = {
+ .probe = qpnp_kp_probe,
+ .remove = __devexit_p(qpnp_kp_remove),
+ .driver = {
+ .name = "qcom,qpnp-keypad",
+ .of_match_table = spmi_match_table,
+ .owner = THIS_MODULE,
+ .pm = &qpnp_kp_pm_ops,
+ },
+};
+
+static int __init qpnp_kp_init(void)
+{
+ return spmi_driver_register(&qpnp_kp_driver);
+}
+module_init(qpnp_kp_init);
+
+static void __exit qpnp_kp_exit(void)
+{
+ spmi_driver_unregister(&qpnp_kp_driver);
+}
+module_exit(qpnp_kp_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QPNP keypad driver");
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 0c20815..0ea230a 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -369,6 +369,7 @@
struct regulator *vcc_ana;
struct regulator *vcc_dig;
struct regulator *vcc_i2c;
+ struct mxt_address_pair addr_pair;
#if defined(CONFIG_FB)
struct notifier_block fb_notif;
#elif defined(CONFIG_HAS_EARLYSUSPEND)
@@ -490,9 +491,27 @@
dev_dbg(dev, "checksum:\t0x%x\n", message->checksum);
}
-static int mxt_switch_to_bootloader_address(struct mxt_data *data)
+static int mxt_lookup_bootloader_address(struct mxt_data *data)
{
int i;
+
+ for (i = 0; mxt_slave_addresses[i].application != 0; i++) {
+ if (mxt_slave_addresses[i].application ==
+ data->client->addr) {
+ data->addr_pair.bootloader =
+ mxt_slave_addresses[i].bootloader;
+ return 0;
+ }
+ }
+
+ dev_err(&data->client->dev, "Address 0x%02x not found in address table",
+ data->client->addr);
+ return -EINVAL;
+
+};
+
+static int mxt_switch_to_bootloader_address(struct mxt_data *data)
+{
struct i2c_client *client = data->client;
if (data->state == BOOTLOADER) {
@@ -500,27 +519,16 @@
return -EINVAL;
}
- for (i = 0; mxt_slave_addresses[i].application != 0; i++) {
- if (mxt_slave_addresses[i].application == client->addr) {
- dev_info(&client->dev, "Changing to bootloader address: "
- "%02x -> %02x",
- client->addr,
- mxt_slave_addresses[i].bootloader);
+ dev_info(&client->dev, "Changing to bootloader address: 0x%02x -> 0x%02x",
+ client->addr, data->addr_pair.bootloader);
- client->addr = mxt_slave_addresses[i].bootloader;
- data->state = BOOTLOADER;
- return 0;
- }
- }
-
- dev_err(&client->dev, "Address 0x%02x not found in address table",
- client->addr);
- return -EINVAL;
+ client->addr = data->addr_pair.bootloader;
+ data->state = BOOTLOADER;
+ return 0;
}
static int mxt_switch_to_appmode_address(struct mxt_data *data)
{
- int i;
struct i2c_client *client = data->client;
if (data->state == APPMODE) {
@@ -528,23 +536,13 @@
return -EINVAL;
}
- for (i = 0; mxt_slave_addresses[i].application != 0; i++) {
- if (mxt_slave_addresses[i].bootloader == client->addr) {
- dev_info(&client->dev,
- "Changing to application mode address: "
- "0x%02x -> 0x%02x",
- client->addr,
- mxt_slave_addresses[i].application);
+ dev_info(&client->dev, "Changing to application mode address: " \
+ "0x%02x -> 0x%02x", client->addr,
+ data->addr_pair.application);
- client->addr = mxt_slave_addresses[i].application;
- data->state = APPMODE;
- return 0;
- }
- }
-
- dev_err(&client->dev, "Address 0x%02x not found in address table",
- client->addr);
- return -EINVAL;
+ client->addr = data->addr_pair.application;
+ data->state = APPMODE;
+ return 0;
}
static int mxt_get_bootloader_version(struct i2c_client *client, u8 val)
@@ -888,6 +886,17 @@
input_sync(input_dev);
}
+static void mxt_release_all(struct mxt_data *data)
+{
+ int id;
+
+ for (id = 0; id < MXT_MAX_FINGER; id++)
+ if (data->finger[id].status)
+ data->finger[id].status = MXT_RELEASE;
+
+ mxt_input_report(data, 0);
+}
+
static void mxt_input_touchevent(struct mxt_data *data,
struct mxt_message *message, int id)
{
@@ -899,6 +908,10 @@
int area;
int pressure;
+ if (status & MXT_SUPPRESS) {
+ mxt_release_all(data);
+ return;
+ }
/* Check the touch is present on the screen */
if (!(status & MXT_DETECT)) {
if (status & MXT_RELEASE) {
@@ -973,18 +986,7 @@
data->keyarray_old = data->keyarray_new;
}
-static void mxt_release_all(struct mxt_data *data)
-{
- int id;
-
- for (id = 0; id < MXT_MAX_FINGER; id++)
- if (data->finger[id].status)
- data->finger[id].status = MXT_RELEASE;
-
- mxt_input_report(data, 0);
-}
-
-static void mxt_handle_touch_supression(struct mxt_data *data, u8 status)
+static void mxt_handle_touch_suppression(struct mxt_data *data, u8 status)
{
dev_dbg(&data->client->dev, "touch suppression\n");
/* release all touches */
@@ -1039,7 +1041,7 @@
id = reportid - data->t9_min_reportid;
- /* check whether report id is part of T9,T15 or T42*/
+ /* check whether report id is part of T9, T15 or T42 */
if (reportid >= data->t9_min_reportid &&
reportid <= data->t9_max_reportid)
mxt_input_touchevent(data, &message, id);
@@ -1047,8 +1049,9 @@
reportid <= data->t15_max_reportid)
mxt_handle_key_array(data, &message);
else if (reportid >= data->t42_min_reportid &&
- reportid <= data->t42_max_reportid)
- mxt_handle_touch_supression(data, message.message[0]);
+ reportid <= data->t42_max_reportid)
+ mxt_handle_touch_suppression(data,
+ message.message[0]);
else
mxt_dump_message(dev, &message);
} while (reportid != 0xff);
@@ -1695,9 +1698,11 @@
switch (data->info.family_id) {
case MXT224_ID:
case MXT224E_ID:
+ case MXT336S_ID:
max_frame_size = MXT_SINGLE_FW_MAX_FRAME_SIZE;
break;
case MXT1386_ID:
+ case MXT1664S_ID:
max_frame_size = MXT_CHIPSET_FW_MAX_FRAME_SIZE;
break;
default:
@@ -2686,6 +2691,12 @@
return -ENOMEM;
}
+ rc = of_property_read_u32(np, "atmel,bl-addr", &temp_val);
+ if (rc && (rc != -EINVAL))
+ dev_err(dev, "Unable to read bootloader address\n");
+ else if (rc != -EINVAL)
+ pdata->bl_addr = (u8) temp_val;
+
pdata->config_array = info;
for_each_child_of_node(np, temp) {
@@ -2724,12 +2735,11 @@
} else
info->build = (u8) temp_val;
- info->bootldr_id = of_property_read_u32(temp,
+ rc = of_property_read_u32(temp,
"atmel,bootldr-id", &temp_val);
- if (rc) {
+ if (rc && (rc != -EINVAL))
dev_err(dev, "Unable to read bootldr-id\n");
- return rc;
- } else
+ else if (rc != -EINVAL)
info->bootldr_id = (u8) temp_val;
rc = mxt_parse_config(dev, temp, info);
@@ -2937,6 +2947,13 @@
mxt_power_on_delay(data);
+ data->addr_pair.application = data->client->addr;
+
+ if (pdata->bl_addr)
+ data->addr_pair.bootloader = pdata->bl_addr;
+ else
+ mxt_lookup_bootloader_address(data);
+
error = mxt_initialize(data);
if (error)
goto err_reset_gpio_req;
diff --git a/drivers/input/touchscreen/synaptics_i2c_rmi4.c b/drivers/input/touchscreen/synaptics_i2c_rmi4.c
index 7d6f3dd..899c83b 100644
--- a/drivers/input/touchscreen/synaptics_i2c_rmi4.c
+++ b/drivers/input/touchscreen/synaptics_i2c_rmi4.c
@@ -5,6 +5,7 @@
*
* Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
* Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -71,6 +72,17 @@
#define NO_SLEEP_OFF (0 << 3)
#define NO_SLEEP_ON (1 << 3)
+#define RMI4_VTG_MIN_UV 2700000
+#define RMI4_VTG_MAX_UV 3300000
+#define RMI4_ACTIVE_LOAD_UA 15000
+#define RMI4_LPM_LOAD_UA 10
+
+#define RMI4_I2C_VTG_MIN_UV 1800000
+#define RMI4_I2C_VTG_MAX_UV 1800000
+#define RMI4_I2C_LOAD_UA 10000
+#define RMI4_I2C_LPM_LOAD_UA 10
+
+
static int synaptics_rmi4_i2c_read(struct synaptics_rmi4_data *rmi4_data,
unsigned short addr, unsigned char *data,
unsigned short length);
@@ -1590,6 +1602,164 @@
}
EXPORT_SYMBOL(synaptics_rmi4_new_function);
+
+static int reg_set_optimum_mode_check(struct regulator *reg, int load_uA)
+{
+ return (regulator_count_voltages(reg) > 0) ?
+ regulator_set_optimum_mode(reg, load_uA) : 0;
+}
+
+static int synaptics_rmi4_regulator_configure(struct synaptics_rmi4_data
+ *rmi4_data, bool on)
+{
+ int retval;
+
+ if (on == false)
+ goto hw_shutdown;
+
+ if (rmi4_data->board->regulator_en) {
+ rmi4_data->vdd = regulator_get(&rmi4_data->i2c_client->dev,
+ "vdd");
+ if (IS_ERR(rmi4_data->vdd)) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: Failed to get vdd regulator\n",
+ __func__);
+ return PTR_ERR(rmi4_data->vdd);
+ }
+
+ if (regulator_count_voltages(rmi4_data->vdd) > 0) {
+ retval = regulator_set_voltage(rmi4_data->vdd,
+ RMI4_VTG_MIN_UV, RMI4_VTG_MAX_UV);
+ if (retval) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "regulator set_vtg failed retval=%d\n",
+ retval);
+ goto err_set_vtg_vdd;
+ }
+ }
+ }
+
+ if (rmi4_data->board->i2c_pull_up) {
+ rmi4_data->vcc_i2c = regulator_get(&rmi4_data->i2c_client->dev,
+ "vcc_i2c");
+ if (IS_ERR(rmi4_data->vcc_i2c)) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: Failed to get i2c regulator\n",
+ __func__);
+ retval = PTR_ERR(rmi4_data->vcc_i2c);
+ goto err_get_vtg_i2c;
+ }
+
+ if (regulator_count_voltages(rmi4_data->vcc_i2c) > 0) {
+ retval = regulator_set_voltage(rmi4_data->vcc_i2c,
+ RMI4_I2C_VTG_MIN_UV, RMI4_I2C_VTG_MAX_UV);
+ if (retval) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "reg set i2c vtg failed retval=%d\n",
+ retval);
+ goto err_set_vtg_i2c;
+ }
+ }
+ }
+
+err_set_vtg_i2c:
+ if (rmi4_data->board->i2c_pull_up)
+ regulator_put(rmi4_data->vcc_i2c);
+err_get_vtg_i2c:
+ if (rmi4_data->board->regulator_en)
+ if (regulator_count_voltages(rmi4_data->vdd) > 0)
+ regulator_set_voltage(rmi4_data->vdd, 0,
+ RMI4_VTG_MAX_UV);
+err_set_vtg_vdd:
+ if (rmi4_data->board->regulator_en)
+ regulator_put(rmi4_data->vdd);
+ return retval;
+
+hw_shutdown:
+ if (rmi4_data->board->regulator_en) {
+ if (regulator_count_voltages(rmi4_data->vdd) > 0)
+ regulator_set_voltage(rmi4_data->vdd, 0,
+ RMI4_VTG_MAX_UV);
+ regulator_put(rmi4_data->vdd);
+ }
+ if (rmi4_data->board->i2c_pull_up) {
+ if (regulator_count_voltages(rmi4_data->vcc_i2c) > 0)
+ regulator_set_voltage(rmi4_data->vcc_i2c, 0,
+ RMI4_I2C_VTG_MAX_UV);
+ regulator_put(rmi4_data->vcc_i2c);
+ }
+ return 0;
+};
+
+static int synaptics_rmi4_power_on(struct synaptics_rmi4_data *rmi4_data,
+ bool on) {
+ int retval;
+
+ if (on == false)
+ goto power_off;
+
+ if (rmi4_data->board->regulator_en) {
+ retval = reg_set_optimum_mode_check(rmi4_data->vdd,
+ RMI4_ACTIVE_LOAD_UA);
+ if (retval < 0) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "Regulator vdd set_opt failed rc=%d\n",
+ retval);
+ return retval;
+ }
+
+ retval = regulator_enable(rmi4_data->vdd);
+ if (retval) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "Regulator vdd enable failed rc=%d\n",
+ retval);
+ goto error_reg_en_vdd;
+ }
+ }
+
+ if (rmi4_data->board->i2c_pull_up) {
+ retval = reg_set_optimum_mode_check(rmi4_data->vcc_i2c,
+ RMI4_I2C_LOAD_UA);
+ if (retval < 0) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "Regulator vcc_i2c set_opt failed rc=%d\n",
+ retval);
+ goto error_reg_opt_i2c;
+ }
+
+ retval = regulator_enable(rmi4_data->vcc_i2c);
+ if (retval) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "Regulator vcc_i2c enable failed rc=%d\n",
+ retval);
+ goto error_reg_en_vcc_i2c;
+ }
+ }
+ return 0;
+
+error_reg_en_vcc_i2c:
+ if (rmi4_data->board->i2c_pull_up)
+ reg_set_optimum_mode_check(rmi4_data->vdd, 0);
+error_reg_opt_i2c:
+ if (rmi4_data->board->regulator_en)
+ regulator_disable(rmi4_data->vdd);
+error_reg_en_vdd:
+ if (rmi4_data->board->regulator_en)
+ reg_set_optimum_mode_check(rmi4_data->vdd, 0);
+ return retval;
+
+power_off:
+ if (rmi4_data->board->regulator_en) {
+ reg_set_optimum_mode_check(rmi4_data->vdd, 0);
+ regulator_disable(rmi4_data->vdd);
+ }
+ if (rmi4_data->board->i2c_pull_up) {
+ reg_set_optimum_mode_check(rmi4_data->vcc_i2c, 0);
+ regulator_disable(rmi4_data->vcc_i2c);
+ }
+ return 0;
+}
+
/**
* synaptics_rmi4_probe()
*
@@ -1607,7 +1777,7 @@
static int __devinit synaptics_rmi4_probe(struct i2c_client *client,
const struct i2c_device_id *dev_id)
{
- int retval;
+ int retval = 0;
unsigned char ii;
unsigned char attr_count;
struct synaptics_rmi4_f1a_handle *f1a;
@@ -1651,18 +1821,6 @@
goto err_input_device;
}
- if (platform_data->regulator_en) {
- rmi4_data->regulator = regulator_get(&client->dev, "vdd");
- if (IS_ERR(rmi4_data->regulator)) {
- dev_err(&client->dev,
- "%s: Failed to get regulator\n",
- __func__);
- retval = PTR_ERR(rmi4_data->regulator);
- goto err_regulator;
- }
- regulator_enable(rmi4_data->regulator);
- }
-
rmi4_data->i2c_client = client;
rmi4_data->current_page = MASK_8BIT;
rmi4_data->board = platform_data;
@@ -1675,19 +1833,6 @@
rmi4_data->irq_enable = synaptics_rmi4_irq_enable;
rmi4_data->reset_device = synaptics_rmi4_reset_device;
- init_waitqueue_head(&rmi4_data->wait);
- mutex_init(&(rmi4_data->rmi4_io_ctrl_mutex));
-
- retval = synaptics_rmi4_query_device(rmi4_data);
- if (retval < 0) {
- dev_err(&client->dev,
- "%s: Failed to query device\n",
- __func__);
- goto err_query_device;
- }
-
- i2c_set_clientdata(client, rmi4_data);
-
rmi4_data->input_dev->name = DRIVER_NAME;
rmi4_data->input_dev->phys = INPUT_PHYS_NAME;
rmi4_data->input_dev->id.bustype = BUS_I2C;
@@ -1723,6 +1868,31 @@
rmi4_data->num_of_fingers);
#endif
+ retval = synaptics_rmi4_regulator_configure(rmi4_data, true);
+ if (retval < 0) {
+ dev_err(&client->dev, "Failed to configure regulators\n");
+ goto err_input_device;
+ }
+
+ retval = synaptics_rmi4_power_on(rmi4_data, true);
+ if (retval < 0) {
+ dev_err(&client->dev, "Failed to power on\n");
+ goto err_input_device;
+ }
+
+ init_waitqueue_head(&rmi4_data->wait);
+ mutex_init(&(rmi4_data->rmi4_io_ctrl_mutex));
+
+ retval = synaptics_rmi4_query_device(rmi4_data);
+ if (retval < 0) {
+ dev_err(&client->dev,
+ "%s: Failed to query device\n",
+ __func__);
+ goto err_query_device;
+ }
+
+ i2c_set_clientdata(client, rmi4_data);
+
f1a = NULL;
if (!list_empty(&rmi->support_fn_list)) {
list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
@@ -1803,11 +1973,8 @@
err_register_input:
err_query_device:
- if (platform_data->regulator_en) {
- regulator_disable(rmi4_data->regulator);
- regulator_put(rmi4_data->regulator);
- }
-
+ synaptics_rmi4_power_on(rmi4_data, false);
+ synaptics_rmi4_regulator_configure(rmi4_data, false);
if (!list_empty(&rmi->support_fn_list)) {
list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
if (fhandler->fn_number == SYNAPTICS_RMI4_F1A)
@@ -1817,11 +1984,8 @@
kfree(fhandler);
}
}
-
-err_regulator:
input_free_device(rmi4_data->input_dev);
rmi4_data->input_dev = NULL;
-
err_input_device:
kfree(rmi4_data);
@@ -1844,8 +2008,6 @@
struct synaptics_rmi4_fn *fhandler;
struct synaptics_rmi4_data *rmi4_data = i2c_get_clientdata(client);
struct synaptics_rmi4_device_info *rmi;
- const struct synaptics_rmi4_platform_data *platform_data =
- rmi4_data->board;
rmi = &(rmi4_data->rmi4_mod_info);
@@ -1865,11 +2027,6 @@
input_unregister_device(rmi4_data->input_dev);
- if (platform_data->regulator_en) {
- regulator_disable(rmi4_data->regulator);
- regulator_put(rmi4_data->regulator);
- }
-
if (!list_empty(&rmi->support_fn_list)) {
list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
if (fhandler->fn_number == SYNAPTICS_RMI4_F1A)
@@ -1881,6 +2038,9 @@
}
input_free_device(rmi4_data->input_dev);
+ synaptics_rmi4_power_on(rmi4_data, false);
+ synaptics_rmi4_regulator_configure(rmi4_data, false);
+
kfree(rmi4_data);
return 0;
@@ -2043,8 +2203,6 @@
static int synaptics_rmi4_suspend(struct device *dev)
{
struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
- const struct synaptics_rmi4_platform_data *platform_data =
- rmi4_data->board;
if (!rmi4_data->sensor_sleep) {
rmi4_data->touch_stopped = true;
@@ -2053,9 +2211,6 @@
synaptics_rmi4_sensor_sleep(rmi4_data);
}
- if (platform_data->regulator_en)
- regulator_disable(rmi4_data->regulator);
-
return 0;
}
@@ -2072,11 +2227,6 @@
static int synaptics_rmi4_resume(struct device *dev)
{
struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
- const struct synaptics_rmi4_platform_data *platform_data =
- rmi4_data->board;
-
- if (platform_data->regulator_en)
- regulator_enable(rmi4_data->regulator);
synaptics_rmi4_sensor_wake(rmi4_data);
rmi4_data->touch_stopped = false;
diff --git a/drivers/input/touchscreen/synaptics_i2c_rmi4.h b/drivers/input/touchscreen/synaptics_i2c_rmi4.h
index 3c37e54..d13f172 100644
--- a/drivers/input/touchscreen/synaptics_i2c_rmi4.h
+++ b/drivers/input/touchscreen/synaptics_i2c_rmi4.h
@@ -5,6 +5,7 @@
*
* Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
* Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -187,7 +188,8 @@
struct input_dev *input_dev;
const struct synaptics_rmi4_platform_data *board;
struct synaptics_rmi4_device_info rmi4_mod_info;
- struct regulator *regulator;
+ struct regulator *vdd;
+ struct regulator *vcc_i2c;
struct mutex rmi4_io_ctrl_mutex;
struct delayed_work det_work;
struct workqueue_struct *det_workqueue;
diff --git a/drivers/iommu/msm_iommu_dev-v0.c b/drivers/iommu/msm_iommu_dev-v0.c
index 681d7b2..549800f 100644
--- a/drivers/iommu/msm_iommu_dev-v0.c
+++ b/drivers/iommu/msm_iommu_dev-v0.c
@@ -144,8 +144,7 @@
}
drvdata->base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
if (!drvdata->base) {
- pr_err("%s: Unable to ioremap address %x size %x\n", __func__,
- r->start, resource_size(r));
+ pr_err("%s: Unable to ioremap %pr\n", __func__, r);
return -ENOMEM;
}
drvdata->glb_base = drvdata->base;
@@ -355,8 +354,7 @@
r2 = request_mem_region(r->start, len, r->name);
if (!r2) {
- pr_err("Could not request memory region: start=%p, len=%d\n",
- (void *) r->start, len);
+ pr_err("Could not request memory region: %pr\n", r);
ret = -EBUSY;
goto fail;
}
@@ -364,8 +362,7 @@
drvdata->base = devm_ioremap(&pdev->dev, r2->start, len);
if (!drvdata->base) {
- pr_err("Could not ioremap: start=%p, len=%d\n",
- (void *) r2->start, len);
+ pr_err("Could not ioremap: %pr\n", r);
ret = -EBUSY;
goto fail;
}
@@ -466,7 +463,7 @@
ret = request_threaded_irq(irq, NULL,
msm_iommu_fault_handler,
IRQF_ONESHOT | IRQF_SHARED,
- "msm_iommu_nonsecure_irq", pdev);
+ "msm_iommu_nonsecure_irq", ctx_drvdata);
if (ret) {
pr_err("Request IRQ %d failed with ret=%d\n", irq, ret);
return ret;
diff --git a/drivers/iommu/msm_iommu_dev-v1.c b/drivers/iommu/msm_iommu_dev-v1.c
index f37e619..3f9f1c4 100644
--- a/drivers/iommu/msm_iommu_dev-v1.c
+++ b/drivers/iommu/msm_iommu_dev-v1.c
@@ -118,8 +118,8 @@
drvdata->clk_reg_virt = devm_ioremap(&pdev->dev, r->start,
resource_size(r));
if (!drvdata->clk_reg_virt) {
- pr_err("Failed to map 0x%x for iommu clk\n",
- r->start);
+ pr_err("Failed to map resource for iommu clk: %pr\n",
+ r);
ret = -ENOMEM;
goto fail;
}
diff --git a/drivers/leds/leds-pm8xxx.c b/drivers/leds/leds-pm8xxx.c
index 61b36eb..c3a5564 100644
--- a/drivers/leds/leds-pm8xxx.c
+++ b/drivers/leds/leds-pm8xxx.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -53,6 +53,9 @@
#define WLED_BOOST_CFG_REG SSBI_REG_ADDR_WLED_CTRL(14)
#define WLED_HIGH_POLE_CAP_REG SSBI_REG_ADDR_WLED_CTRL(16)
+#define WLED_STRING_ONE 0 /* Rightmost string */
+#define WLED_STRING_TWO 1 /* Middle string */
+#define WLED_STRING_THREE 2 /* Leftmost string */
#define WLED_STRINGS 0x03
#define WLED_OVP_VAL_MASK 0x30
#define WLED_OVP_VAL_BIT_SHFT 0x04
@@ -87,10 +90,6 @@
#define TWO_WLED_STRINGS 2
#define THREE_WLED_STRINGS 3
-#define WLED_CABC_ONE_STRING 0x01
-#define WLED_CABC_TWO_STRING 0x03
-#define WLED_CABC_THREE_STRING 0x07
-
#define WLED_CABC_SHIFT 3
#define SSBI_REG_ADDR_RGB_CNTL1 0x12D
@@ -247,7 +246,7 @@
led_wled_set(struct pm8xxx_led_data *led, enum led_brightness value)
{
int rc, duty;
- u8 val, i, num_wled_strings;
+ u8 val, i;
if (value > WLED_MAX_LEVEL)
value = WLED_MAX_LEVEL;
@@ -272,36 +271,41 @@
duty = (WLED_MAX_DUTY_CYCLE * value) / WLED_MAX_LEVEL;
- num_wled_strings = led->wled_cfg->num_strings;
-
/* program brightness control registers */
- for (i = 0; i < num_wled_strings; i++) {
- rc = pm8xxx_readb(led->dev->parent,
- WLED_BRIGHTNESS_CNTL_REG1(i), &val);
- if (rc) {
- dev_err(led->dev->parent, "can't read wled brightnes ctrl"
- " register1 rc=%d\n", rc);
- return rc;
- }
+ for (i = 0; i < WLED_STRINGS; i++) {
+ if (led->wled_cfg->strings && (1 << i)) {
+ rc = pm8xxx_readb(led->dev->parent,
+ WLED_BRIGHTNESS_CNTL_REG1(i), &val);
+ if (rc) {
+ dev_err(led->dev->parent,
+ "can't read wled brightnes ctrl"
+ " register1 rc=%d\n", rc);
+ return rc;
+ }
- val = (val & ~WLED_BRIGHTNESS_MSB_MASK) |
- (duty >> WLED_8_BIT_SHFT);
- rc = pm8xxx_writeb(led->dev->parent,
- WLED_BRIGHTNESS_CNTL_REG1(i), val);
- if (rc) {
- dev_err(led->dev->parent, "can't write wled brightness ctrl"
- " register1 rc=%d\n", rc);
- return rc;
- }
+ val = (val & ~WLED_MAX_CURR_MASK) |
+ (duty >> WLED_8_BIT_SHFT);
- val = duty & WLED_8_BIT_MASK;
- rc = pm8xxx_writeb(led->dev->parent,
- WLED_BRIGHTNESS_CNTL_REG2(i), val);
- if (rc) {
- dev_err(led->dev->parent, "can't write wled brightness ctrl"
- " register2 rc=%d\n", rc);
- return rc;
- }
+ rc = pm8xxx_writeb(led->dev->parent,
+ WLED_BRIGHTNESS_CNTL_REG1(i), val);
+ if (rc) {
+ dev_err(led->dev->parent,
+ "can't write wled brightness ctrl"
+ " register1 rc=%d\n", rc);
+ return rc;
+ }
+
+ val = duty & WLED_8_BIT_MASK;
+ rc = pm8xxx_writeb(led->dev->parent,
+ WLED_BRIGHTNESS_CNTL_REG2(i), val);
+ if (rc) {
+ dev_err(led->dev->parent,
+ "can't write wled brightness ctrl"
+ " register2 rc=%d\n", rc);
+ return rc;
+ }
+ } else
+ continue;
}
rc = pm8xxx_readb(led->dev->parent, WLED_SYNC_REG, &val);
if (rc) {
@@ -564,9 +568,7 @@
static int __devinit init_wled(struct pm8xxx_led_data *led)
{
int rc, i;
- u8 val, num_wled_strings;
-
- num_wled_strings = led->wled_cfg->num_strings;
+ u8 val, string_max_current;
/* program over voltage protection threshold */
if (led->wled_cfg->ovp_val > WLED_OVP_37V) {
@@ -640,38 +642,61 @@
}
/* program activation delay and maximum current */
- for (i = 0; i < num_wled_strings; i++) {
- rc = pm8xxx_readb(led->dev->parent,
- WLED_MAX_CURR_CFG_REG(i), &val);
- if (rc) {
- dev_err(led->dev->parent, "can't read wled max current"
- " config register rc=%d\n", rc);
- return rc;
- }
+ for (i = 0; i < WLED_STRINGS; i++) {
+ if (led->wled_cfg->strings && (1 << i)) {
+ rc = pm8xxx_readb(led->dev->parent,
+ WLED_MAX_CURR_CFG_REG(i), &val);
+ if (rc) {
+ dev_err(led->dev->parent,
+ "can't read wled max current"
+ " config register rc=%d\n", rc);
+ return rc;
+ }
- if ((led->wled_cfg->ctrl_delay_us % WLED_CTL_DLY_STEP) ||
- (led->wled_cfg->ctrl_delay_us > WLED_CTL_DLY_MAX)) {
- dev_err(led->dev->parent, "Invalid control delay\n");
- return rc;
- }
+ if ((led->wled_cfg->ctrl_delay_us % WLED_CTL_DLY_STEP)
+ || (led->wled_cfg->ctrl_delay_us >
+ WLED_CTL_DLY_MAX)) {
+ dev_err(led->dev->parent,
+ "Invalid control delay\n");
+ return rc;
+ }
- val = val / WLED_CTL_DLY_STEP;
- val = (val & ~WLED_CTL_DLY_MASK) |
- (led->wled_cfg->ctrl_delay_us << WLED_CTL_DLY_BIT_SHFT);
+ val = val / WLED_CTL_DLY_STEP;
+ val = (val & ~WLED_CTL_DLY_MASK) |
+ (led->wled_cfg->ctrl_delay_us <<
+ WLED_CTL_DLY_BIT_SHFT);
- if ((led->max_current > WLED_MAX_CURR)) {
- dev_err(led->dev->parent, "Invalid max current\n");
- return -EINVAL;
- }
+ if ((led->max_current > WLED_MAX_CURR)) {
+ dev_err(led->dev->parent,
+ "Invalid max current\n");
+ return -EINVAL;
+ }
+ if (led->wled_cfg->max_current_ind) {
+ switch (i) {
+ case WLED_STRING_ONE:
+ string_max_current = led->wled_cfg->max_one;
+ break;
+ case WLED_STRING_TWO:
+ string_max_current = led->wled_cfg->max_two;
+ break;
+ case WLED_STRING_THREE:
+ string_max_current = led->wled_cfg->max_three;
+ break;
+ default:
+ return -EINVAL;
+ }
+ val = (val & ~WLED_MAX_CURR_MASK) | string_max_current;
+ } else
+ val = (val & ~WLED_MAX_CURR_MASK) | led->max_current;
- val = (val & ~WLED_MAX_CURR_MASK) | led->max_current;
-
- rc = pm8xxx_writeb(led->dev->parent,
- WLED_MAX_CURR_CFG_REG(i), val);
- if (rc) {
- dev_err(led->dev->parent, "can't write wled max current"
- " config register rc=%d\n", rc);
- return rc;
+ rc = pm8xxx_writeb(led->dev->parent,
+ WLED_MAX_CURR_CFG_REG(i), val);
+ if (rc) {
+ dev_err(led->dev->parent,
+ "can't write wled max current"
+ " config register rc=%d\n", rc);
+ return rc;
+ }
}
}
@@ -683,19 +708,7 @@
return rc;
}
- switch (num_wled_strings) {
- case ONE_WLED_STRING:
- val |= (WLED_CABC_ONE_STRING << WLED_CABC_SHIFT);
- break;
- case TWO_WLED_STRINGS:
- val |= (WLED_CABC_TWO_STRING << WLED_CABC_SHIFT);
- break;
- case THREE_WLED_STRINGS:
- val |= (WLED_CABC_THREE_STRING << WLED_CABC_SHIFT);
- break;
- default:
- break;
- }
+ val |= (led->wled_cfg->strings << WLED_CABC_SHIFT);
rc = pm8xxx_writeb(led->dev->parent, WLED_SYNC_REG, val);
if (rc) {
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index fa0bf18..f08644f 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -369,15 +369,10 @@
msm_isp_send_event(vfe_dev, ISP_EVENT_SOF, &sof_event);
}
-void msm_isp_calculate_framedrop(
- struct msm_vfe_axi_shared_data *axi_data,
- struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd)
+uint32_t msm_isp_get_framedrop_period(
+ enum msm_vfe_frame_skip_pattern frame_skip_pattern)
{
- struct msm_vfe_axi_stream *stream_info =
- &axi_data->stream_info[
- (stream_cfg_cmd->axi_stream_handle & 0xFF)];
- uint32_t framedrop_period = 1;
- switch (stream_cfg_cmd->frame_skip_pattern) {
+ switch (frame_skip_pattern) {
case NO_SKIP:
case EVERY_2FRAME:
case EVERY_3FRAME:
@@ -386,18 +381,28 @@
case EVERY_6FRAME:
case EVERY_7FRAME:
case EVERY_8FRAME:
- framedrop_period = stream_cfg_cmd->frame_skip_pattern + 1;
- break;
+ return frame_skip_pattern + 1;
case EVERY_16FRAME:
- framedrop_period = 16;
+ return 16;
break;
case EVERY_32FRAME:
- framedrop_period = 32;
+ return 32;
break;
default:
- framedrop_period = 1;
- break;
+ return 1;
}
+ return 1;
+}
+
+void msm_isp_calculate_framedrop(
+ struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd)
+{
+ struct msm_vfe_axi_stream *stream_info =
+ &axi_data->stream_info[
+ (stream_cfg_cmd->axi_stream_handle & 0xFF)];
+ uint32_t framedrop_period = msm_isp_get_framedrop_period(
+ stream_cfg_cmd->frame_skip_pattern);
stream_info->framedrop_pattern = 0x1;
stream_info->framedrop_period = framedrop_period - 1;
@@ -911,8 +916,16 @@
stream_info->bufq_handle,
MSM_ISP_BUFFER_FLUSH_DIVERTED);
break;
- case UPDATE_STREAM_FRAMEDROP_PATTERN:
+ case UPDATE_STREAM_FRAMEDROP_PATTERN: {
+ uint32_t framedrop_period =
+ msm_isp_get_framedrop_period(update_cmd->skip_pattern);
+ stream_info->runtime_init_frame_drop = 0;
+ stream_info->framedrop_pattern = 0x1;
+ stream_info->framedrop_period = framedrop_period - 1;
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_framedrop(vfe_dev, stream_info);
break;
+ }
default:
pr_err("%s: Invalid update type\n", __func__);
return -EINVAL;
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
index 7d0f9cb..691edc3 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
@@ -94,6 +94,13 @@
if (data > 0x1) {
unsigned long jiffes = msecs_to_jiffies(500);
long lrc = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ispif->auto_complete_lock, flags);
+ ispif->wait_timeout = 0;
+ init_completion(&ispif->reset_complete);
+ spin_unlock_irqrestore(&ispif->auto_complete_lock, flags);
+
if (params->vfe_intf == VFE0)
msm_camera_io_w(data, ispif->base + ISPIF_RST_CMD_ADDR);
else
@@ -104,6 +111,11 @@
if (lrc < 0 || !lrc) {
pr_err("%s: wait timeout ret = %ld\n", __func__, lrc);
rc = -EIO;
+
+ spin_lock_irqsave(&ispif->auto_complete_lock, flags);
+ ispif->wait_timeout = 1;
+ spin_unlock_irqrestore(
+ &ispif->auto_complete_lock, flags);
}
}
return rc;
@@ -114,6 +126,12 @@
int rc = 0;
long lrc = 0;
unsigned long jiffes = msecs_to_jiffies(500);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ispif->auto_complete_lock, flags);
+ ispif->wait_timeout = 0;
+ init_completion(&ispif->reset_complete);
+ spin_unlock_irqrestore(&ispif->auto_complete_lock, flags);
BUG_ON(!ispif);
@@ -125,14 +143,17 @@
msm_camera_io_w_mb(ISPIF_RST_CMD_1_MASK, ispif->base +
ISPIF_RST_CMD_1_ADDR);
- CDBG("%s: Sending reset\n", __func__);
lrc = wait_for_completion_interruptible_timeout(
&ispif->reset_complete, jiffes);
+
if (lrc < 0 || !lrc) {
pr_err("%s: wait timeout ret = %ld\n", __func__, lrc);
rc = -EIO;
+
+ spin_lock_irqsave(&ispif->auto_complete_lock, flags);
+ ispif->wait_timeout = 1;
+ spin_unlock_irqrestore(&ispif->auto_complete_lock, flags);
}
- CDBG("%s: reset returned\n", __func__);
return rc;
}
@@ -571,8 +592,14 @@
ispif->base + ISPIF_IRQ_CLEAR_2_ADDR);
if (out[VFE0].ispifIrqStatus0 & ISPIF_IRQ_STATUS_MASK) {
- if (out[VFE0].ispifIrqStatus0 & RESET_DONE_IRQ)
- complete(&ispif->reset_complete);
+ if (out[VFE0].ispifIrqStatus0 & RESET_DONE_IRQ) {
+ unsigned long flags;
+ spin_lock_irqsave(&ispif->auto_complete_lock, flags);
+ if (ispif->wait_timeout == 0)
+ complete(&ispif->reset_complete);
+ spin_unlock_irqrestore(
+ &ispif->auto_complete_lock, flags);
+ }
if (out[VFE0].ispifIrqStatus0 & PIX_INTF_0_OVERFLOW_IRQ)
pr_err("%s: VFE0 pix0 overflow.\n", __func__);
@@ -709,8 +736,6 @@
goto error_irq;
}
- init_completion(&ispif->reset_complete);
-
rc = msm_ispif_reset(ispif);
if (rc == 0) {
ispif->ispif_state = ISPIF_POWER_UP;
@@ -830,13 +855,8 @@
struct ispif_device *ispif = v4l2_get_subdevdata(sd);
mutex_lock(&ispif->mutex);
- if (ispif->open_cnt > 0) {
- CDBG("%s: dev already open\n", __func__);
- goto end;
- }
/* mem remap is done in init when the clock is on */
ispif->open_cnt++;
-end:
mutex_unlock(&ispif->mutex);
return 0;
}
@@ -940,7 +960,8 @@
ispif->pdev = pdev;
ispif->ispif_state = ISPIF_POWER_DOWN;
ispif->open_cnt = 0;
-
+ spin_lock_init(&ispif->auto_complete_lock);
+ ispif->wait_timeout = 0;
return 0;
error:
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h
index c4418c1..f8c3cce 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h
@@ -48,6 +48,8 @@
struct mutex mutex;
uint8_t start_ack_pending;
struct completion reset_complete;
+ spinlock_t auto_complete_lock;
+ uint8_t wait_timeout;
uint32_t csid_version;
int enb_dump_reg;
uint32_t open_cnt;
diff --git a/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c b/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c
index 9af6674..8a21512 100644
--- a/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c
+++ b/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c
@@ -35,7 +35,9 @@
}
new_entry->session_id = buf_info->session_id;
new_entry->stream_id = buf_info->stream_id;
+ mutex_lock(&buf_mngr_dev->buf_q_lock);
list_add_tail(&new_entry->entry, &buf_mngr_dev->buf_qhead);
+ mutex_unlock(&buf_mngr_dev->buf_q_lock);
buf_info->index = new_entry->vb2_buf->v4l2_buf.index;
return 0;
}
@@ -45,6 +47,8 @@
{
struct msm_get_bufs *bufs, *save;
int ret = -EINVAL;
+
+ mutex_lock(&buf_mngr_dev->buf_q_lock);
list_for_each_entry_safe(bufs, save, &buf_mngr_dev->buf_qhead, entry) {
if ((bufs->session_id == buf_info->session_id) &&
(bufs->stream_id == buf_info->stream_id) &&
@@ -60,6 +64,7 @@
break;
}
}
+ mutex_unlock(&buf_mngr_dev->buf_q_lock);
return ret;
}
@@ -70,6 +75,7 @@
struct msm_get_bufs *bufs, *save;
int ret = -EINVAL;
+ mutex_lock(&buf_mngr_dev->buf_q_lock);
list_for_each_entry_safe(bufs, save, &buf_mngr_dev->buf_qhead, entry) {
if ((bufs->session_id == buf_info->session_id) &&
(bufs->stream_id == buf_info->stream_id) &&
@@ -81,6 +87,7 @@
break;
}
}
+ mutex_unlock(&buf_mngr_dev->buf_q_lock);
return ret;
}
@@ -156,12 +163,14 @@
&msm_buf_mngr_dev->vb2_ops);
INIT_LIST_HEAD(&msm_buf_mngr_dev->buf_qhead);
+ mutex_init(&msm_buf_mngr_dev->buf_q_lock);
end:
return rc;
}
static void __exit msm_buf_mngr_exit(void)
{
+ mutex_destroy(&msm_buf_mngr_dev->buf_q_lock);
kfree(msm_buf_mngr_dev);
}
diff --git a/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.h b/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.h
index 7e588cc..a2b3a7e 100644
--- a/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.h
+++ b/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.h
@@ -33,6 +33,7 @@
struct msm_buf_mngr_device {
struct list_head buf_qhead;
+ struct mutex buf_q_lock;
struct msm_sd_subdev subdev;
struct msm_sd_req_vb2_q vb2_ops;
};
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
index 9f0ad19..637bce3 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
@@ -553,8 +553,6 @@
struct msm_cpp_frame_info_t *new_frame =
kzalloc(sizeof(struct msm_cpp_frame_info_t), GFP_KERNEL);
uint32_t *cpp_frame_msg;
- struct ion_handle *src_ion_handle = NULL;
- struct ion_handle *dest_ion_handle = NULL;
unsigned long len;
unsigned long in_phyaddr, out_phyaddr;
uint16_t num_stripes = 0;
@@ -595,41 +593,41 @@
CPP_DBG("CPP in_fd: %d out_fd: %d\n", new_frame->src_fd,
new_frame->dst_fd);
- src_ion_handle = ion_import_dma_buf(cpp_dev->client,
+ new_frame->src_ion_handle = ion_import_dma_buf(cpp_dev->client,
new_frame->src_fd);
- if (IS_ERR_OR_NULL(src_ion_handle)) {
+ if (IS_ERR_OR_NULL(new_frame->src_ion_handle)) {
pr_err("ION import failed\n");
- rc = PTR_ERR(src_ion_handle);
+ rc = PTR_ERR(new_frame->src_ion_handle);
goto ERROR2;
}
- rc = ion_map_iommu(cpp_dev->client, src_ion_handle,
+
+ rc = ion_map_iommu(cpp_dev->client, new_frame->src_ion_handle,
cpp_dev->domain_num, 0, SZ_4K, 0,
(unsigned long *)&in_phyaddr, &len, 0, 0);
if (rc < 0) {
pr_err("ION import failed\n");
- rc = PTR_ERR(src_ion_handle);
+ rc = PTR_ERR(new_frame->src_ion_handle);
goto ERROR3;
}
CPP_DBG("in phy addr: 0x%x len: %ld\n", (uint32_t) in_phyaddr, len);
-
- dest_ion_handle = ion_import_dma_buf(cpp_dev->client,
+ new_frame->dest_ion_handle = ion_import_dma_buf(cpp_dev->client,
new_frame->dst_fd);
- if (IS_ERR_OR_NULL(dest_ion_handle)) {
+ if (IS_ERR_OR_NULL(new_frame->dest_ion_handle)) {
pr_err("ION import failed\n");
- rc = PTR_ERR(dest_ion_handle);
+ rc = PTR_ERR(new_frame->dest_ion_handle);
goto ERROR4;
}
- rc = ion_map_iommu(cpp_dev->client, dest_ion_handle,
+
+ rc = ion_map_iommu(cpp_dev->client, new_frame->dest_ion_handle,
cpp_dev->domain_num, 0, SZ_4K, 0,
(unsigned long *)&out_phyaddr, &len, 0, 0);
if (rc < 0) {
- rc = PTR_ERR(dest_ion_handle);
+ rc = PTR_ERR(new_frame->dest_ion_handle);
goto ERROR5;
}
CPP_DBG("out phy addr: 0x%x len: %ld\n", (uint32_t)out_phyaddr, len);
-
num_stripes = ((cpp_frame_msg[12] >> 20) & 0x3FF) +
((cpp_frame_msg[12] >> 10) & 0x3FF) +
(cpp_frame_msg[12] & 0x3FF);
@@ -667,15 +665,17 @@
ERROR7:
kfree(frame_qcmd);
ERROR6:
- ion_unmap_iommu(cpp_dev->client, dest_ion_handle,
+ ion_unmap_iommu(cpp_dev->client, new_frame->dest_ion_handle,
cpp_dev->domain_num, 0);
ERROR5:
- ion_free(cpp_dev->client, dest_ion_handle);
+ ion_free(cpp_dev->client, new_frame->dest_ion_handle);
+ new_frame->dest_ion_handle = NULL;
ERROR4:
- ion_unmap_iommu(cpp_dev->client, src_ion_handle,
+ ion_unmap_iommu(cpp_dev->client, new_frame->src_ion_handle,
cpp_dev->domain_num, 0);
ERROR3:
- ion_free(cpp_dev->client, src_ion_handle);
+ ion_free(cpp_dev->client, new_frame->src_ion_handle);
+ new_frame->src_ion_handle = NULL;
ERROR2:
kfree(cpp_frame_msg);
ERROR1:
@@ -719,6 +719,24 @@
mutex_unlock(&cpp_dev->mutex);
return -EINVAL;
}
+ if (process_frame->dest_ion_handle) {
+ ion_unmap_iommu(cpp_dev->client,
+ process_frame->dest_ion_handle,
+ cpp_dev->domain_num, 0);
+ ion_free(cpp_dev->client,
+ process_frame->dest_ion_handle);
+ process_frame->dest_ion_handle = NULL;
+ }
+
+ if (process_frame->src_ion_handle) {
+ ion_unmap_iommu(cpp_dev->client,
+ process_frame->src_ion_handle,
+ cpp_dev->domain_num, 0);
+ ion_free(cpp_dev->client,
+ process_frame->src_ion_handle);
+ process_frame->src_ion_handle = NULL;
+ }
+
kfree(process_frame->cpp_cmd_msg);
kfree(process_frame);
kfree(event_qcmd);
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
index 2999a23..2c8c8b8 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
@@ -147,7 +147,6 @@
{
uint32_t irq;
struct csid_device *csid_dev = data;
- uint32_t val = 0;
void __iomem *csidbase;
csidbase = csid_dev->base;
@@ -164,12 +163,6 @@
pr_debug("%s CSID%d_IRQ_STATUS_ADDR = 0x%x\n",
__func__, csid_dev->pdev->id, irq);
irq_count++;
- if (irq_count >= 5) {
- msm_camera_io_w(0x7f010800 | val,
- csidbase + CSID_IRQ_MASK_ADDR);
- msm_camera_io_w(0x7f010800 | val,
- csidbase + CSID_IRQ_CLEAR_CMD_ADDR);
- }
}
msm_camera_io_w(irq, csid_dev->base + CSID_IRQ_CLEAR_CMD_ADDR);
return IRQ_HANDLED;
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
index 3d2fe4f..9d89a7e 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
@@ -18,7 +18,7 @@
#include "mpq_dmx_plugin_common.h"
#include "mpq_sdmx.h"
-#define SDMX_MAJOR_VERSION_MATCH (2)
+#define SDMX_MAJOR_VERSION_MATCH (3)
#define TS_PACKET_HEADER_LENGTH (4)
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_sdmx.h b/drivers/media/platform/msm/dvb/demux/mpq_sdmx.h
index 0bd04e8..d292992 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_sdmx.h
+++ b/drivers/media/platform/msm/dvb/demux/mpq_sdmx.h
@@ -43,6 +43,7 @@
/* Filter-level status indicators */
#define SDMX_FILTER_STATUS_EOS BIT(0)
+#define SDMX_FILTER_STATUS_WR_PTR_CHANGED BIT(1)
/* Filter-level flags */
#define SDMX_FILTER_FLAG_VERIFY_SECTION_CRC BIT(0)
@@ -91,10 +92,9 @@
SDMX_STATUS_SINGLE_PID_RAW_FILTER = -11,
SDMX_STATUS_INP_BUF_INVALID_PARAMS = -12,
SDMX_STATUS_INVALID_FILTER_CFG = -13,
- SDMX_STATUS_ILLEGAL_WR_PTR_CHANGE = -14,
- SDMX_STATUS_STALLED_IN_PULL_MODE = -15,
- SDMX_STATUS_SECURITY_FAULT = -16,
- SDMX_STATUS_NS_BUFFER_ERROR = -17,
+ SDMX_STATUS_STALLED_IN_PULL_MODE = -14,
+ SDMX_STATUS_SECURITY_FAULT = -15,
+ SDMX_STATUS_NS_BUFFER_ERROR = -16,
};
enum sdmx_filter {
diff --git a/drivers/media/platform/msm/vcap/vcap_vp.c b/drivers/media/platform/msm/vcap/vcap_vp.c
index abc4e7e..aba7095 100644
--- a/drivers/media/platform/msm/vcap/vcap_vp.c
+++ b/drivers/media/platform/msm/vcap/vcap_vp.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -472,7 +472,7 @@
int rc;
struct vcap_dev *dev = c_data->dev;
struct ion_handle *handle = NULL;
- unsigned long paddr, len, ionflag = 0;
+ unsigned long paddr, len;
void *vaddr;
size_t size = ((c_data->vp_out_fmt.width + 63) >> 6) *
((c_data->vp_out_fmt.height + 7) >> 3) * 16;
@@ -489,13 +489,6 @@
return -ENOMEM;
}
- rc = ion_handle_get_flags(dev->ion_client, handle, &ionflag);
- if (rc) {
- pr_err("%s: get flags ion handle failed\n", __func__);
- ion_free(dev->ion_client, handle);
- return rc;
- }
-
vaddr = ion_map_kernel(dev->ion_client, handle);
if (IS_ERR(vaddr)) {
pr_err("%s: Map motion buffer failed\n", __func__);
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 6402437..cee48c7 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -544,7 +544,19 @@
(1 << V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_I_FRAME)
),
.cluster = 0,
- }
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_SECURE,
+ .name = "Secure mode",
+ .type = V4L2_CTRL_TYPE_BUTTON,
+ .minimum = 0,
+ .maximum = 0,
+ .default_value = 0,
+ .step = 0,
+ .menu_skip_mask = 0,
+ .qmenu = NULL,
+ .cluster = 0,
+ },
};
#define NUM_CTRLS ARRAY_SIZE(msm_venc_ctrls)
@@ -1434,6 +1446,10 @@
}
pdata = &enable;
break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_SECURE:
+ inst->mode = VIDC_SECURE;
+ dprintk(VIDC_INFO, "Setting secure mode to :%d\n", inst->mode);
+ break;
default:
rc = -ENOTSUPP;
break;
diff --git a/drivers/media/platform/msm/wfd/enc-subdev.h b/drivers/media/platform/msm/wfd/enc-subdev.h
index 93c0079..8bfb884 100644
--- a/drivers/media/platform/msm/wfd/enc-subdev.h
+++ b/drivers/media/platform/msm/wfd/enc-subdev.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -76,6 +76,8 @@
(a->offset == b->offset);
else if (a->kvaddr || b->kvaddr)
return a->kvaddr == b->kvaddr;
+ else if (a->paddr || b->paddr)
+ return a->paddr == b->paddr;
else
return false;
}
@@ -107,6 +109,7 @@
#define ENC_MMAP _IOWR('V', 25, struct mem_region_map *)
#define ENC_MUNMAP _IOWR('V', 26, struct mem_region_map *)
#define SET_FRAMERATE_MODE _IO('V', 27)
+#define ENC_SECURE _IO('V', 28)
extern int venc_init(struct v4l2_subdev *sd, u32 val);
extern int venc_load_fw(struct v4l2_subdev *sd);
diff --git a/drivers/media/platform/msm/wfd/enc-venus-subdev.c b/drivers/media/platform/msm/wfd/enc-venus-subdev.c
index 00d0d07..b41ece6 100644
--- a/drivers/media/platform/msm/wfd/enc-venus-subdev.c
+++ b/drivers/media/platform/msm/wfd/enc-venus-subdev.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -252,12 +252,23 @@
return msm_vidc_s_ctrl(inst->vidc_context, &ctrl);
}
+static long get_iommu_domain(struct venc_inst *inst)
+{
+ struct msm_vidc_iommu_info maps[MAX_MAP];
+ int rc = msm_vidc_get_iommu_maps(inst->vidc_context, maps);
+ if (rc) {
+ WFD_MSG_ERR("Failed to retreive domain mappings\n");
+ return rc;
+ }
+
+ return maps[inst->secure ? CP_MAP : NS_MAP].domain;
+}
+
static long venc_open(struct v4l2_subdev *sd, void *arg)
{
struct venc_inst *inst = NULL;
struct venc_msg_ops *vmops = arg;
struct v4l2_event_subscription event = {0};
- struct msm_vidc_iommu_info maps[MAX_MAP];
int rc = 0;
if (!vmops) {
@@ -305,15 +316,12 @@
goto vidc_subscribe_fail;
}
- rc = msm_vidc_get_iommu_maps(inst->vidc_context, maps);
- if (rc) {
- WFD_MSG_ERR("Failed to retreive domain mappings\n");
- rc = -ENODATA;
+ inst->domain = get_iommu_domain(inst);
+ if (inst->domain < 0) {
+ WFD_MSG_ERR("Failed to get domain\n");
goto vidc_subscribe_fail;
}
- inst->domain = maps[inst->secure ? CP_MAP : NS_MAP].domain;
-
inst->callback_thread = kthread_run(venc_vidc_callback_thread, inst,
"venc_vidc_callback_thread");
if (IS_ERR(inst->callback_thread)) {
@@ -477,7 +485,8 @@
}
bufreq->count = v4l2_bufreq.count;
- bufreq->size = v4l2_format.fmt.pix_mp.plane_fmt[0].sizeimage;
+ bufreq->size = ALIGN(v4l2_format.fmt.pix_mp.plane_fmt[0].sizeimage,
+ inst->secure ? SZ_1M : SZ_4K);
inst->free_input_indices.size_bits = bufreq->count;
inst->free_input_indices.size = roundup(bufreq->count,
@@ -632,12 +641,19 @@
struct mem_region *mregion)
{
int rc = 0;
- unsigned long flags = 0, size = 0;
+ unsigned long size = 0, align_req = 0;
if (!mregion) {
rc = -EINVAL;
goto venc_map_fail;
}
+ align_req = inst->secure ? SZ_1M : SZ_4K;
+ if (mregion->size % align_req != 0) {
+ WFD_MSG_ERR("Memregion not aligned to %ld\n", align_req);
+ rc = -EINVAL;
+ goto venc_map_fail;
+ }
+
mregion->ion_handle = ion_import_dma_buf(venc_ion_client, mregion->fd);
if (IS_ERR_OR_NULL(mregion->ion_handle)) {
rc = PTR_ERR(mregion->ion_handle);
@@ -647,25 +663,31 @@
goto venc_map_fail;
}
- rc = ion_handle_get_flags(venc_ion_client, mregion->ion_handle, &flags);
- if (rc) {
- WFD_MSG_ERR("Failed to get ion flags %d\n", rc);
- goto venc_map_fail;
+ if (!inst->secure) {
+ mregion->kvaddr = ion_map_kernel(venc_ion_client,
+ mregion->ion_handle);
+ if (IS_ERR_OR_NULL(mregion->kvaddr)) {
+ WFD_MSG_ERR("Failed to map buffer into kernel\n");
+ rc = PTR_ERR(mregion->kvaddr);
+ mregion->kvaddr = NULL;
+ goto venc_map_fail;
+ }
+ } else {
+ mregion->kvaddr = NULL;
}
- mregion->kvaddr = ion_map_kernel(venc_ion_client,
- mregion->ion_handle);
-
- if (IS_ERR_OR_NULL(mregion->kvaddr)) {
- WFD_MSG_ERR("Failed to map buffer into kernel\n");
- rc = PTR_ERR(mregion->kvaddr);
- mregion->kvaddr = NULL;
- goto venc_map_fail;
+ if (inst->secure) {
+ rc = msm_ion_secure_buffer(venc_ion_client,
+ mregion->ion_handle, VIDEO_BITSTREAM, 0);
+ if (rc) {
+ WFD_MSG_ERR("Failed to secure output buffer\n");
+ goto venc_map_iommu_map_fail;
+ }
}
rc = ion_map_iommu(venc_ion_client, mregion->ion_handle,
- inst->domain, 0, SZ_4K, 0,
- (unsigned long *)&mregion->paddr, &size, flags, 0);
+ inst->domain, 0, align_req, 0,
+ (unsigned long *)&mregion->paddr, &size, 0, 0);
if (rc) {
WFD_MSG_ERR("Failed to map into iommu\n");
@@ -679,8 +701,12 @@
venc_map_iommu_size_fail:
ion_unmap_iommu(venc_ion_client, mregion->ion_handle,
inst->domain, 0);
+
+ if (inst->secure)
+ msm_ion_unsecure_buffer(venc_ion_client, mregion->ion_handle);
venc_map_iommu_map_fail:
- ion_unmap_kernel(venc_ion_client, mregion->ion_handle);
+ if (!inst->secure)
+ ion_unmap_kernel(venc_ion_client, mregion->ion_handle);
venc_map_fail:
return rc;
}
@@ -702,6 +728,8 @@
mregion->kvaddr = NULL;
}
+ if (inst->secure)
+ msm_ion_unsecure_buffer(venc_ion_client, mregion->ion_handle);
return 0;
}
@@ -787,7 +815,7 @@
{
struct venc_inst *inst = NULL;
struct v4l2_format *fmt = arg, temp;
- int rc = 0;
+ int rc = 0, align_req = 0;
if (!sd) {
WFD_MSG_ERR("Subdevice required for %s\n", __func__);
@@ -823,7 +851,10 @@
rc = -EINVAL;
goto venc_set_format_fail;
}
- fmt->fmt.pix.sizeimage = temp.fmt.pix_mp.plane_fmt[0].sizeimage;
+
+ align_req = inst->secure ? SZ_1M : SZ_4K;
+ fmt->fmt.pix.sizeimage = ALIGN(temp.fmt.pix_mp.plane_fmt[0].sizeimage,
+ align_req);
inst->num_output_planes = temp.fmt.pix_mp.num_planes;
temp.type = BUF_TYPE_INPUT;
@@ -995,7 +1026,6 @@
WFD_MSG_ERR("Trying to free a buffer of unknown type\n");
return -EINVAL;
}
-
mregion = get_registered_mregion(buf_list, to_free);
if (!mregion) {
@@ -1115,7 +1145,7 @@
{
struct mem_region_map *mmap = arg;
struct mem_region *mregion = NULL;
- unsigned long rc = 0, size = 0;
+ unsigned long rc = 0, size = 0, align_req = 0;
void *paddr = NULL;
struct venc_inst *inst = NULL;
@@ -1129,24 +1159,47 @@
inst = (struct venc_inst *)sd->dev_priv;
mregion = mmap->mregion;
- if (mregion->size % SZ_4K != 0) {
- WFD_MSG_ERR("Memregion not aligned to %d\n", SZ_4K);
- return -EINVAL;
+
+ align_req = inst->secure ? SZ_1M : SZ_4K;
+ if (mregion->size % align_req != 0) {
+ WFD_MSG_ERR("Memregion not aligned to %ld\n", align_req);
+ rc = -EINVAL;
+ goto venc_map_bad_align;
+ }
+
+ if (inst->secure) {
+ rc = msm_ion_secure_buffer(mmap->ion_client,
+ mregion->ion_handle, VIDEO_PIXEL, 0);
+ if (rc) {
+ WFD_MSG_ERR("Failed to secure input buffer\n");
+ goto venc_map_bad_align;
+ }
}
rc = ion_map_iommu(mmap->ion_client, mregion->ion_handle,
- inst->domain, 0, SZ_4K, 0, (unsigned long *)&paddr,
+ inst->domain, 0, align_req, 0, (unsigned long *)&paddr,
&size, 0, 0);
if (rc) {
- WFD_MSG_ERR("Failed to get physical addr\n");
+ WFD_MSG_ERR("Failed to get physical addr %ld\n", rc);
paddr = NULL;
+ goto venc_map_bad_align;
} else if (size < mregion->size) {
WFD_MSG_ERR("Failed to map enough memory\n");
rc = -ENOMEM;
+ goto venc_map_iommu_size_fail;
}
mregion->paddr = paddr;
+ return 0;
+
+venc_map_iommu_size_fail:
+ ion_unmap_iommu(venc_ion_client, mregion->ion_handle,
+ inst->domain, 0);
+
+ if (inst->secure)
+ msm_ion_unsecure_buffer(mmap->ion_client, mregion->ion_handle);
+venc_map_bad_align:
return rc;
}
@@ -1167,8 +1220,13 @@
inst = (struct venc_inst *)sd->dev_priv;
mregion = mmap->mregion;
- ion_unmap_iommu(mmap->ion_client, mregion->ion_handle,
+ if (mregion->paddr)
+ ion_unmap_iommu(mmap->ion_client, mregion->ion_handle,
inst->domain, 0);
+
+ if (inst->secure)
+ msm_ion_unsecure_buffer(mmap->ion_client, mregion->ion_handle);
+
return 0;
}
@@ -1181,6 +1239,55 @@
return 0;
}
+static long secure_toggle(struct venc_inst *inst, bool secure)
+{
+ if (inst->secure == secure)
+ return 0;
+
+ if (!list_empty(&inst->registered_input_bufs.list) ||
+ !list_empty(&inst->registered_output_bufs.list)) {
+ WFD_MSG_ERR(
+ "Attempt to (un)secure encoder not allowed after registering buffers"
+ );
+ return -EEXIST;
+ }
+
+ inst->secure = secure;
+ inst->domain = get_iommu_domain(inst);
+ return 0;
+}
+
+static long venc_secure(struct v4l2_subdev *sd)
+{
+ struct venc_inst *inst = NULL;
+ struct v4l2_control ctrl;
+ int rc = 0;
+
+ if (!sd) {
+ WFD_MSG_ERR("Subdevice required for %s\n", __func__);
+ return -EINVAL;
+ }
+
+ inst = sd->dev_priv;
+ rc = secure_toggle(inst, true);
+ if (rc) {
+ WFD_MSG_ERR("Failed to toggle into secure mode\n");
+ goto secure_fail;
+ }
+
+ ctrl.id = V4L2_CID_MPEG_VIDC_VIDEO_SECURE;
+ rc = msm_vidc_s_ctrl(inst->vidc_context, &ctrl);
+ if (rc) {
+ WFD_MSG_ERR("Failed to move vidc into secure mode\n");
+ goto secure_fail;
+ }
+
+ return 0;
+secure_fail:
+ secure_toggle(sd->dev_priv, false);
+ return rc;
+}
+
long venc_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
{
long rc = 0;
@@ -1253,6 +1360,9 @@
case SET_FRAMERATE_MODE:
rc = venc_set_framerate_mode(sd, arg);
break;
+ case ENC_SECURE:
+ rc = venc_secure(sd);
+ break;
default:
WFD_MSG_ERR("Unknown ioctl %d to enc-subdev\n", cmd);
rc = -ENOTSUPP;
diff --git a/drivers/media/platform/msm/wfd/mdp-5-subdev.c b/drivers/media/platform/msm/wfd/mdp-5-subdev.c
index 218bbe5..4089a99 100644
--- a/drivers/media/platform/msm/wfd/mdp-5-subdev.c
+++ b/drivers/media/platform/msm/wfd/mdp-5-subdev.c
@@ -47,6 +47,10 @@
WFD_MSG_ERR("Invalid arguments\n");
rc = -EINVAL;
goto mdp_open_fail;
+ } else if (mops->secure) {
+ /* Deprecated API; use MDP_SECURE ioctl */
+ WFD_MSG_ERR("Deprecated API for securing subdevice\n");
+ return -ENOTSUPP;
}
fbi = msm_fb_get_writeback_fb();
@@ -120,6 +124,8 @@
struct fb_info *fbi = NULL;
if (inst) {
fbi = (struct fb_info *)inst->mdp;
+ if (inst->secure)
+ msm_fb_writeback_set_secure(inst->mdp, false);
msm_fb_writeback_terminate(fbi);
kfree(inst);
/* Unregister wfd node from switch driver */
@@ -193,10 +199,10 @@
int mdp_mmap(struct v4l2_subdev *sd, void *arg)
{
- int rc = 0;
+ int rc = 0, align = 0;
struct mem_region_map *mmap = arg;
struct mem_region *mregion;
- bool domain = -1;
+ int domain = -1;
struct mdp_instance *inst = NULL;
if (!mmap || !mmap->mregion || !mmap->cookie) {
@@ -206,17 +212,41 @@
inst = mmap->cookie;
mregion = mmap->mregion;
- if (mregion->size % SZ_4K != 0) {
- WFD_MSG_ERR("Memregion not aligned to %d\n", SZ_4K);
+ align = inst->secure ? SZ_1M : SZ_4K;
+ if (mregion->size % align != 0) {
+ WFD_MSG_ERR("Memregion not aligned to %d\n", align);
return -EINVAL;
}
- domain = msm_fb_get_iommu_domain(inst->mdp, MDP_IOMMU_DOMAIN_NS);
+ if (inst->secure) {
+ rc = msm_ion_secure_buffer(mmap->ion_client,
+ mregion->ion_handle, VIDEO_PIXEL, 0);
+ if (rc) {
+ WFD_MSG_ERR("Failed to secure input buffer\n");
+ goto secure_fail;
+ }
+ }
+
+ domain = msm_fb_get_iommu_domain(inst->mdp,
+ inst->secure ? MDP_IOMMU_DOMAIN_CP :
+ MDP_IOMMU_DOMAIN_NS);
+
rc = ion_map_iommu(mmap->ion_client, mregion->ion_handle,
- domain, 0, SZ_4K, 0,
+ domain, 0, align, 0,
(unsigned long *)&mregion->paddr,
(unsigned long *)&mregion->size,
0, 0);
+ if (rc) {
+ WFD_MSG_ERR("Failed to map into %ssecure domain: %d\n",
+ !inst->secure ? "non" : "", rc);
+ goto iommu_fail;
+ }
+
+ return 0;
+iommu_fail:
+ if (inst->secure)
+ msm_ion_unsecure_buffer(mmap->ion_client, mregion->ion_handle);
+secure_fail:
return rc;
}
@@ -224,7 +254,7 @@
{
struct mem_region_map *mmap = arg;
struct mem_region *mregion;
- bool domain = -1;
+ int domain = -1;
struct mdp_instance *inst = NULL;
if (!mmap || !mmap->mregion || !mmap->cookie) {
@@ -235,13 +265,37 @@
inst = mmap->cookie;
mregion = mmap->mregion;
- domain = msm_fb_get_iommu_domain(inst->mdp, MDP_IOMMU_DOMAIN_NS);
+ domain = msm_fb_get_iommu_domain(inst->mdp,
+ inst->secure ? MDP_IOMMU_DOMAIN_CP :
+ MDP_IOMMU_DOMAIN_NS);
ion_unmap_iommu(mmap->ion_client,
mregion->ion_handle,
domain, 0);
+
+ if (inst->secure)
+ msm_ion_unsecure_buffer(mmap->ion_client, mregion->ion_handle);
+
return 0;
}
+int mdp_secure(struct v4l2_subdev *sd, void *arg)
+{
+ struct mdp_instance *inst = NULL;
+ int rc = 0;
+
+ if (!arg) {
+ WFD_MSG_ERR("Invalid argument\n");
+ return -EINVAL;
+ }
+
+ inst = arg;
+ rc = msm_fb_writeback_set_secure(inst->mdp, true);
+ if (!rc)
+ inst->secure = true;
+
+ return rc;
+}
+
long mdp_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
{
int rc = 0;
@@ -277,6 +331,9 @@
case MDP_MUNMAP:
rc = mdp_munmap(sd, arg);
break;
+ case MDP_SECURE:
+ rc = mdp_secure(sd, arg);
+ break;
default:
WFD_MSG_ERR("IOCTL: %u not supported\n", cmd);
rc = -EINVAL;
diff --git a/drivers/media/platform/msm/wfd/mdp-dummy-subdev.c b/drivers/media/platform/msm/wfd/mdp-dummy-subdev.c
index b2db208..2242c76 100644
--- a/drivers/media/platform/msm/wfd/mdp-dummy-subdev.c
+++ b/drivers/media/platform/msm/wfd/mdp-dummy-subdev.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -28,11 +28,12 @@
struct mutex mutex;
};
-int mdp_init(struct v4l2_subdev *sd, u32 val)
+static int mdp_init(struct v4l2_subdev *sd, u32 val)
{
return 0;
}
-int mdp_open(struct v4l2_subdev *sd, void *arg)
+
+static int mdp_open(struct v4l2_subdev *sd, void *arg)
{
struct mdp_instance *inst = kzalloc(sizeof(struct mdp_instance),
GFP_KERNEL);
@@ -50,49 +51,54 @@
return rc;
}
-int mdp_start(struct v4l2_subdev *sd, void *arg)
+static int mdp_start(struct v4l2_subdev *sd, void *arg)
{
return 0;
}
-int mdp_stop(struct v4l2_subdev *sd, void *arg)
+
+static int mdp_stop(struct v4l2_subdev *sd, void *arg)
{
return 0;
}
-int mdp_close(struct v4l2_subdev *sd, void *arg)
+
+static int mdp_close(struct v4l2_subdev *sd, void *arg)
{
return 0;
}
-int mdp_q_buffer(struct v4l2_subdev *sd, void *arg)
+
+static int mdp_q_buffer(struct v4l2_subdev *sd, void *arg)
{
static int foo;
int rc = 0;
struct mdp_buf_info *binfo = arg;
struct mdp_instance *inst = NULL;
+ struct mdp_buf_queue *new_entry = NULL;
if (!binfo || !binfo->inst || !binfo->cookie) {
WFD_MSG_ERR("Invalid argument\n");
return -EINVAL;
}
-
inst = binfo->inst;
- if (binfo->kvaddr) {
- struct mdp_buf_queue *new_entry = kzalloc(sizeof(*new_entry),
- GFP_KERNEL);
- memset((void *)binfo->kvaddr, foo++, 1024);
- new_entry->mdp_buf_info = *binfo;
- mutex_lock(&inst->mutex);
- list_add_tail(&new_entry->node, &inst->mdp_bufs.node);
- mutex_unlock(&inst->mutex);
- WFD_MSG_DBG("Queue %p with cookie %p\n",
- (void *)binfo->paddr, (void *)binfo->cookie);
- } else {
- rc = -EINVAL;
- }
+ new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
+ if (!new_entry)
+ return -ENOMEM;
+ new_entry->mdp_buf_info = *binfo;
+ if (binfo->kvaddr)
+ memset((void *)binfo->kvaddr, foo++, 1024);
+
+
+ mutex_lock(&inst->mutex);
+ list_add_tail(&new_entry->node, &inst->mdp_bufs.node);
+ mutex_unlock(&inst->mutex);
+
+ WFD_MSG_DBG("Queue %p with cookie %p\n",
+ (void *)binfo->paddr, (void *)binfo->cookie);
return rc;
}
-int mdp_dq_buffer(struct v4l2_subdev *sd, void *arg)
+
+static int mdp_dq_buffer(struct v4l2_subdev *sd, void *arg)
{
struct mdp_buf_info *binfo = arg;
struct mdp_buf_queue *head = NULL;
@@ -121,12 +127,13 @@
return 0;
}
-int mdp_set_prop(struct v4l2_subdev *sd, void *arg)
+
+static int mdp_set_prop(struct v4l2_subdev *sd, void *arg)
{
return 0;
}
-int mdp_mmap(struct v4l2_subdev *sd, void *arg)
+static int mdp_mmap(struct v4l2_subdev *sd, void *arg)
{
int rc = 0;
struct mem_region_map *mmap = arg;
@@ -137,12 +144,17 @@
return rc;
}
-int mdp_munmap(struct v4l2_subdev *sd, void *arg)
+static int mdp_munmap(struct v4l2_subdev *sd, void *arg)
{
/* Whatever */
return 0;
}
+static int mdp_secure(struct v4l2_subdev *sd)
+{
+ return 0;
+}
+
long mdp_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
{
int rc = 0;
@@ -178,6 +190,9 @@
case MDP_MUNMAP:
rc = mdp_munmap(sd, arg);
break;
+ case MDP_SECURE:
+ rc = mdp_secure(sd);
+ break;
default:
WFD_MSG_ERR("IOCTL: %u not supported\n", cmd);
rc = -EINVAL;
diff --git a/drivers/media/platform/msm/wfd/mdp-subdev.h b/drivers/media/platform/msm/wfd/mdp-subdev.h
index b04d448..f2c6fb1 100644
--- a/drivers/media/platform/msm/wfd/mdp-subdev.h
+++ b/drivers/media/platform/msm/wfd/mdp-subdev.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -59,6 +59,7 @@
#define MDP_STOP _IOR(MDP_MAGIC_IOCTL, 7, void *)
#define MDP_MMAP _IOR(MDP_MAGIC_IOCTL, 8, struct mem_region_map *)
#define MDP_MUNMAP _IOR(MDP_MAGIC_IOCTL, 9, struct mem_region_map *)
+#define MDP_SECURE _IO(MDP_MAGIC_IOCTL, 9)
extern int mdp_init(struct v4l2_subdev *sd, u32 val);
diff --git a/drivers/media/platform/msm/wfd/wfd-ioctl.c b/drivers/media/platform/msm/wfd/wfd-ioctl.c
index 3b732ae..9fb7c6d 100644
--- a/drivers/media/platform/msm/wfd/wfd-ioctl.c
+++ b/drivers/media/platform/msm/wfd/wfd-ioctl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -52,7 +52,7 @@
struct v4l2_subdev enc_sdev;
struct v4l2_subdev vsg_sdev;
struct ion_client *ion_client;
- bool secure_device;
+ bool secure;
bool in_use;
bool mdp_iommu_split_domain;
};
@@ -154,16 +154,16 @@
{
struct ion_handle *handle = NULL;
void *kvaddr = NULL;
- unsigned int alloc_regions = 0;
- unsigned int ion_flags = 0;
+ unsigned int alloc_regions = 0, ion_flags = 0, align = 0;
int rc = 0;
alloc_regions = ION_HEAP(ION_CP_MM_HEAP_ID);
alloc_regions |= secure ? 0 :
ION_HEAP(ION_IOMMU_HEAP_ID);
ion_flags |= secure ? ION_SECURE : 0;
- handle = ion_alloc(client,
- mregion->size, SZ_4K, alloc_regions, ion_flags);
+ align = secure ? SZ_1M : SZ_4K;
+ handle = ion_alloc(client, mregion->size, align,
+ alloc_regions, ion_flags);
if (IS_ERR_OR_NULL(handle)) {
WFD_MSG_ERR("Failed to allocate input buffer\n");
@@ -171,12 +171,16 @@
goto alloc_fail;
}
- kvaddr = ion_map_kernel(client, handle);
+ if (!secure) {
+ kvaddr = ion_map_kernel(client, handle);
- if (IS_ERR_OR_NULL(kvaddr)) {
- WFD_MSG_ERR("Failed to get virtual addr\n");
- rc = PTR_ERR(kvaddr);
- goto alloc_fail;
+ if (IS_ERR_OR_NULL(kvaddr)) {
+ WFD_MSG_ERR("Failed to get virtual addr\n");
+ rc = PTR_ERR(kvaddr);
+ goto alloc_fail;
+ }
+ } else {
+ kvaddr = NULL;
}
mregion->kvaddr = kvaddr;
@@ -206,7 +210,8 @@
"Invalid client or region");
return -EINVAL;
}
- ion_unmap_kernel(client, mregion->ion_handle);
+ if (mregion->kvaddr)
+ ion_unmap_kernel(client, mregion->ion_handle);
ion_free(client, mregion->ion_handle);
return 0;
}
@@ -256,7 +261,7 @@
enc_mregion->size = ALIGN(inst->input_buf_size, SZ_4K);
rc = wfd_allocate_ion_buffer(wfd_dev->ion_client,
- wfd_dev->secure_device, enc_mregion);
+ wfd_dev->secure, enc_mregion);
if (rc) {
WFD_MSG_ERR("Failed to allocate input memory\n");
goto alloc_fail;
@@ -391,6 +396,7 @@
&inst->input_mem_list) {
mpair = list_entry(ptr, struct mem_region_pair,
list);
+
rc = v4l2_subdev_call(&wfd_dev->enc_sdev,
core, ioctl, FREE_INPUT_BUFFER,
(void *)mpair->enc);
@@ -1004,8 +1010,31 @@
{
int rc = 0;
struct wfd_device *wfd_dev = video_drvdata(filp);
- rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core,
- ioctl, SET_PROP, a);
+ struct wfd_inst *inst = filp->private_data;
+
+ switch (a->id) {
+ case V4L2_CID_MPEG_VIDC_VIDEO_SECURE:
+ rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core,
+ ioctl, ENC_SECURE, NULL);
+ if (rc) {
+ WFD_MSG_ERR("Couldn't secure encoder");
+ break;
+ }
+
+ rc = v4l2_subdev_call(&wfd_dev->mdp_sdev, core,
+ ioctl, MDP_SECURE, (void *)inst->mdp_inst);
+ if (rc) {
+ WFD_MSG_ERR("Couldn't secure MDP");
+ break;
+ }
+
+ wfd_dev->secure = true;
+ break;
+ default:
+ rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core,
+ ioctl, SET_PROP, a);
+ }
+
if (rc)
WFD_MSG_ERR("Failed to set encoder property\n");
return rc;
@@ -1355,7 +1384,7 @@
wfd_stats_init(&inst->stats, MINOR(filp->f_dentry->d_inode->i_rdev));
- mdp_mops.secure = wfd_dev->secure_device;
+ mdp_mops.secure = wfd_dev->secure;
mdp_mops.iommu_split_domain = wfd_dev->mdp_iommu_split_domain;
rc = v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl, MDP_OPEN,
(void *)&mdp_mops);
@@ -1373,7 +1402,7 @@
enc_mops.op_buffer_done = venc_op_buffer_done;
enc_mops.ip_buffer_done = venc_ip_buffer_done;
enc_mops.cbdata = filp;
- enc_mops.secure = wfd_dev->secure_device;
+ enc_mops.secure = wfd_dev->secure;
rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl, OPEN,
(void *)&enc_mops);
if (rc || !enc_mops.cookie) {
@@ -1421,22 +1450,21 @@
inst = filp->private_data;
if (inst) {
wfdioc_streamoff(filp, NULL, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ vb2_queue_release(&inst->vid_bufq);
+ wfd_free_input_buffers(wfd_dev, inst);
+
rc = v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl,
MDP_CLOSE, (void *)inst->mdp_inst);
if (rc)
WFD_MSG_ERR("Failed to CLOSE mdp subdevice: %d\n", rc);
- vb2_queue_release(&inst->vid_bufq);
- wfd_free_input_buffers(wfd_dev, inst);
rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl,
CLOSE, (void *)inst->venc_inst);
-
if (rc)
WFD_MSG_ERR("Failed to CLOSE enc subdev: %d\n", rc);
rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core, ioctl,
VSG_CLOSE, NULL);
-
if (rc)
WFD_MSG_ERR("Failed to CLOSE vsg subdev: %d\n", rc);
@@ -1604,7 +1632,7 @@
switch (WFD_DEVICE_NUMBER_BASE + c) {
case WFD_DEVICE_SECURE:
- wfd_dev[c].secure_device = true;
+ wfd_dev[c].secure = true;
break;
default:
break;
diff --git a/drivers/media/radio/radio-iris.c b/drivers/media/radio/radio-iris.c
index afb40be..11a8f4d 100644
--- a/drivers/media/radio/radio-iris.c
+++ b/drivers/media/radio/radio-iris.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -3956,10 +3956,16 @@
return 0;
}
+static const struct of_device_id iris_fm_match[] = {
+ {.compatible = "qcom,iris_fm"},
+ {}
+};
+
static struct platform_driver iris_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "iris_fm",
+ .of_match_table = iris_fm_match,
},
.remove = __devexit_p(iris_remove),
};
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 8aa4758..9e22ffb 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -663,6 +663,9 @@
&resp, sizeof(resp));
if (ret) {
pr_err("scm_call to load app failed\n");
+ if (!IS_ERR_OR_NULL(ihandle))
+ ion_free(qseecom.ion_clnt, ihandle);
+ qsee_disable_clock_vote(data, CLK_SFPB);
return -EINVAL;
}
@@ -1524,8 +1527,12 @@
app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
memcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
ret = __qseecom_check_app_exists(app_ireq);
- if (ret < 0)
+ if (ret < 0) {
+ kzfree(data);
+ kfree(*handle);
+ *handle = NULL;
return -EINVAL;
+ }
if (ret > 0) {
pr_warn("App id %d for [%s] app exists\n", ret,
@@ -1554,6 +1561,7 @@
if (ret < 0) {
kfree(*handle);
+ kfree(data);
*handle = NULL;
return ret;
}
@@ -1563,6 +1571,9 @@
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
pr_err("kmalloc failed\n");
+ kfree(data);
+ kfree(*handle);
+ *handle = NULL;
return -ENOMEM;
}
entry->app_id = ret;
diff --git a/drivers/misc/smsc_hub.c b/drivers/misc/smsc_hub.c
index bc338a4..41d9ff8 100644
--- a/drivers/misc/smsc_hub.c
+++ b/drivers/misc/smsc_hub.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -471,7 +471,8 @@
}
pm_runtime_disable(&pdev->dev);
- regulator_disable(smsc_hub->hub_vbus_reg);
+ if (!IS_ERR(smsc_hub->hub_vbus_reg))
+ regulator_disable(smsc_hub->hub_vbus_reg);
msm_hsic_hub_init_gpio(smsc_hub, 0);
msm_hsic_hub_init_clock(smsc_hub, 0);
msm_hsic_hub_init_vdd(smsc_hub, 0);
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index 2ca585d..b81af11 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -55,7 +55,7 @@
for (i = 0; i < nr_strings; i++) {
buffer[i] = string;
- strlcpy(string, buf, sizeof(string));
+ strlcpy(string, buf, strlen(buf));
string += strlen(string) + 1;
buf += strlen(buf) + 1;
}
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index 9edb20d..7669ea3 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -3366,20 +3366,7 @@
writel_relaxed(clk, host->base + MMCICLOCK);
msmsdcc_sync_reg_wr(host);
- /*
- * Make sure that we don't double the clock if
- * doubled clock rate is already set
- */
- if (!host->ddr_doubled_clk_rate ||
- (host->ddr_doubled_clk_rate &&
- (host->ddr_doubled_clk_rate != ios->clock))) {
- host->ddr_doubled_clk_rate =
- msmsdcc_get_sup_clk_rate(
- host, (ios->clock * 2));
- clock = host->ddr_doubled_clk_rate;
- }
- } else {
- host->ddr_doubled_clk_rate = 0;
+ clock = msmsdcc_get_sup_clk_rate(host, ios->clock * 2);
}
if (clock != host->clk_rate) {
diff --git a/drivers/mmc/host/msm_sdcc.h b/drivers/mmc/host/msm_sdcc.h
index b5522fb..4ed2d96 100644
--- a/drivers/mmc/host/msm_sdcc.h
+++ b/drivers/mmc/host/msm_sdcc.h
@@ -368,7 +368,6 @@
unsigned int clk_rate; /* Current clock rate */
unsigned int pclk_rate;
- unsigned int ddr_doubled_clk_rate;
u32 pwr;
struct mmc_platform_data *plat;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 6451d62..8c2bea09 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -68,51 +68,51 @@
static void sdhci_dumpregs(struct sdhci_host *host)
{
- printk(KERN_DEBUG DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
+ pr_debug(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
mmc_hostname(host->mmc));
- printk(KERN_DEBUG DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
+ pr_debug(DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
sdhci_readl(host, SDHCI_DMA_ADDRESS),
sdhci_readw(host, SDHCI_HOST_VERSION));
- printk(KERN_DEBUG DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
+ pr_debug(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
sdhci_readw(host, SDHCI_BLOCK_SIZE),
sdhci_readw(host, SDHCI_BLOCK_COUNT));
- printk(KERN_DEBUG DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
+ pr_debug(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
sdhci_readl(host, SDHCI_ARGUMENT),
sdhci_readw(host, SDHCI_TRANSFER_MODE));
- printk(KERN_DEBUG DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
+ pr_debug(DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
sdhci_readl(host, SDHCI_PRESENT_STATE),
sdhci_readb(host, SDHCI_HOST_CONTROL));
- printk(KERN_DEBUG DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
+ pr_debug(DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
sdhci_readb(host, SDHCI_POWER_CONTROL),
sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
- printk(KERN_DEBUG DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
+ pr_debug(DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
sdhci_readw(host, SDHCI_CLOCK_CONTROL));
- printk(KERN_DEBUG DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
+ pr_debug(DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
sdhci_readl(host, SDHCI_INT_STATUS));
- printk(KERN_DEBUG DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
+ pr_debug(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
sdhci_readl(host, SDHCI_INT_ENABLE),
sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
- printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
+ pr_debug(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
sdhci_readw(host, SDHCI_ACMD12_ERR),
sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
- printk(KERN_DEBUG DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n",
+ pr_debug(DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n",
sdhci_readl(host, SDHCI_CAPABILITIES),
sdhci_readl(host, SDHCI_CAPABILITIES_1));
- printk(KERN_DEBUG DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
+ pr_debug(DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
sdhci_readw(host, SDHCI_COMMAND),
sdhci_readl(host, SDHCI_MAX_CURRENT));
- printk(KERN_DEBUG DRIVER_NAME ": Host ctl2: 0x%08x\n",
+ pr_debug(DRIVER_NAME ": Host ctl2: 0x%08x\n",
sdhci_readw(host, SDHCI_HOST_CONTROL2));
if (host->flags & SDHCI_USE_ADMA)
- printk(KERN_DEBUG DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
+ pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
readl(host->ioaddr + SDHCI_ADMA_ERROR),
readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
- printk(KERN_DEBUG DRIVER_NAME ": ===========================================\n");
+ pr_debug(DRIVER_NAME ": ===========================================\n");
}
/*****************************************************************************\
@@ -144,14 +144,15 @@
static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
{
- u32 irqs = SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT;
+ u32 present, irqs;
if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
(host->mmc->caps & MMC_CAP_NONREMOVABLE))
return;
- if (host->quirks2 & SDHCI_QUIRK2_OWN_CARD_DETECTION)
- return;
+ present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
+ SDHCI_CARD_PRESENT;
+ irqs = present ? SDHCI_INT_CARD_REMOVE : SDHCI_INT_CARD_INSERT;
if (enable)
sdhci_unmask_irqs(host, irqs);
@@ -194,10 +195,14 @@
/* Wait max 100 ms */
timeout = 100;
+ if (host->ops->check_power_status && host->pwr &&
+ (mask & SDHCI_RESET_ALL))
+ host->ops->check_power_status(host);
+
/* hw clears the bit when it's done */
while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
if (timeout == 0) {
- printk(KERN_ERR "%s: Reset 0x%x never completed.\n",
+ pr_err("%s: Reset 0x%x never completed.\n",
mmc_hostname(host->mmc), (int)mask);
sdhci_dumpregs(host);
return;
@@ -211,6 +216,11 @@
if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier);
+
+ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
+ if ((host->ops->enable_dma) && (mask & SDHCI_RESET_ALL))
+ host->ops->enable_dma(host);
+ }
}
static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);
@@ -648,12 +658,11 @@
/* timeout in us */
if (!data)
target_timeout = cmd->cmd_timeout_ms * 1000;
- else
- target_timeout = data->timeout_ns / 1000 +
- data->timeout_clks / host->clock;
-
- if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
- host->timeout_clk = host->clock / 1000;
+ else {
+ target_timeout = data->timeout_ns / 1000;
+ if (host->clock)
+ target_timeout += data->timeout_clks / host->clock;
+ }
/*
* Figure out needed cycles.
@@ -665,7 +674,6 @@
* =>
* (1) / (2) > 2^6
*/
- BUG_ON(!host->timeout_clk);
count = 0;
current_timeout = (1 << 13) * 1000 / host->timeout_clk;
while (current_timeout < target_timeout) {
@@ -675,8 +683,11 @@
break;
}
- if (count >= 0xF)
+ if (count >= 0xF) {
+ DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
+ mmc_hostname(host->mmc), count, cmd->opcode);
count = 0xE;
+ }
return count;
}
@@ -884,8 +895,13 @@
}
}
- if (data->flags & MMC_DATA_READ)
+ if (data->flags & MMC_DATA_READ) {
mode |= SDHCI_TRNS_READ;
+ if (host->ops->toggle_cdr)
+ host->ops->toggle_cdr(host, true);
+ }
+ if (host->ops->toggle_cdr && (data->flags & MMC_DATA_WRITE))
+ host->ops->toggle_cdr(host, false);
if (host->flags & SDHCI_REQ_USE_DMA)
mode |= SDHCI_TRNS_DMA;
@@ -968,7 +984,7 @@
while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
if (timeout == 0) {
- printk(KERN_ERR "%s: Controller never released "
+ pr_err("%s: Controller never released "
"inhibit bit(s).\n", mmc_hostname(host->mmc));
sdhci_dumpregs(host);
cmd->error = -EIO;
@@ -990,7 +1006,7 @@
sdhci_set_transfer_mode(host, cmd);
if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
- printk(KERN_ERR "%s: Unsupported response type!\n",
+ pr_err("%s: Unsupported response type!\n",
mmc_hostname(host->mmc));
cmd->error = -EINVAL;
tasklet_schedule(&host->finish_tasklet);
@@ -1063,12 +1079,15 @@
static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
{
int div = 0; /* Initialized for compiler warning */
+ int real_div = div, clk_mul = 1;
u16 clk = 0;
unsigned long timeout;
if (clock && clock == host->clock)
return;
+ host->mmc->actual_clock = 0;
+
if (host->ops->set_clock) {
host->ops->set_clock(host, clock);
if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK)
@@ -1106,6 +1125,8 @@
* Control register.
*/
clk = SDHCI_PROG_CLOCK_MODE;
+ real_div = div;
+ clk_mul = host->clk_mul;
div--;
}
} else {
@@ -1119,6 +1140,7 @@
break;
}
}
+ real_div = div;
div >>= 1;
}
} else {
@@ -1127,9 +1149,13 @@
if ((host->max_clk / div) <= clock)
break;
}
+ real_div = div;
div >>= 1;
}
+ if (real_div)
+ host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div;
+
clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
<< SDHCI_DIVIDER_HI_SHIFT;
@@ -1141,7 +1167,7 @@
while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
& SDHCI_CLOCK_INT_STABLE)) {
if (timeout == 0) {
- printk(KERN_ERR "%s: Internal clock never "
+ pr_err("%s: Internal clock never "
"stabilised.\n", mmc_hostname(host->mmc));
sdhci_dumpregs(host);
return;
@@ -1157,7 +1183,7 @@
host->clock = clock;
}
-static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
+static int sdhci_set_power(struct sdhci_host *host, unsigned short power)
{
u8 pwr = 0;
@@ -1180,32 +1206,42 @@
}
if (host->pwr == pwr)
- return;
+ return -1;
host->pwr = pwr;
if (pwr == 0) {
sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
- return;
+ if (host->ops->check_power_status)
+ host->ops->check_power_status(host);
+ return 0;
}
/*
* Spec says that we should clear the power reg before setting
* a new value. Some controllers don't seem to like this though.
*/
- if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
+ if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) {
sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+ if (host->ops->check_power_status)
+ host->ops->check_power_status(host);
+ }
/*
* At least the Marvell CaFe chip gets confused if we set the voltage
* and set turn on power at the same time, so set the voltage first.
*/
- if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
+ if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) {
sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+ if (host->ops->check_power_status)
+ host->ops->check_power_status(host);
+ }
pwr |= SDHCI_POWER_ON;
sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+ if (host->ops->check_power_status)
+ host->ops->check_power_status(host);
/*
* Some controllers need an extra 10ms delay of 10ms before they
@@ -1213,6 +1249,8 @@
*/
if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
mdelay(10);
+
+ return power;
}
/*****************************************************************************\
@@ -1294,12 +1332,14 @@
static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
{
unsigned long flags;
+ int vdd_bit = -1;
u8 ctrl;
- spin_lock_irqsave(&host->lock, flags);
-
- if (host->flags & SDHCI_DEVICE_DEAD)
- goto out;
+ if (host->flags & SDHCI_DEVICE_DEAD) {
+ if (host->vmmc && ios->power_mode == MMC_POWER_OFF)
+ mmc_regulator_set_ocr(host->mmc, host->vmmc, 0);
+ return;
+ }
/*
* Reset the chip on each power off.
@@ -1313,9 +1353,14 @@
sdhci_set_clock(host, ios->clock);
if (ios->power_mode == MMC_POWER_OFF)
- sdhci_set_power(host, -1);
+ vdd_bit = sdhci_set_power(host, -1);
else
- sdhci_set_power(host, ios->vdd);
+ vdd_bit = sdhci_set_power(host, ios->vdd);
+
+ if (host->vmmc && vdd_bit != -1)
+ mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd_bit);
+
+ spin_lock_irqsave(&host->lock, flags);
if (host->ops->platform_send_init_74_clocks)
host->ops->platform_send_init_74_clocks(host, ios->power_mode);
@@ -1442,7 +1487,6 @@
if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
-out:
mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
}
@@ -1572,6 +1616,8 @@
/* Set 1.8V Signal Enable in the Host Control2 register to 0 */
ctrl &= ~SDHCI_CTRL_VDD_180;
sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+ if (host->ops->check_power_status)
+ host->ops->check_power_status(host);
/* Wait for 5ms */
usleep_range(5000, 5500);
@@ -1581,7 +1627,7 @@
if (!(ctrl & SDHCI_CTRL_VDD_180))
return 0;
else {
- printk(KERN_INFO DRIVER_NAME ": Switching to 3.3V "
+ pr_info(DRIVER_NAME ": Switching to 3.3V "
"signalling voltage failed\n");
return -EIO;
}
@@ -1602,6 +1648,8 @@
*/
ctrl |= SDHCI_CTRL_VDD_180;
sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+ if (host->ops->check_power_status)
+ host->ops->check_power_status(host);
/* Wait for 5ms */
usleep_range(5000, 5500);
@@ -1634,13 +1682,17 @@
pwr = sdhci_readb(host, SDHCI_POWER_CONTROL);
pwr &= ~SDHCI_POWER_ON;
sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+ if (host->ops->check_power_status)
+ host->ops->check_power_status(host);
/* Wait for 1ms as per the spec */
usleep_range(1000, 1500);
pwr |= SDHCI_POWER_ON;
sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+ if (host->ops->check_power_status)
+ host->ops->check_power_status(host);
- printk(KERN_INFO DRIVER_NAME ": Switching to 1.8V signalling "
+ pr_info(DRIVER_NAME ": Switching to 1.8V signalling "
"voltage failed, retrying with S18R set to 0\n");
return -EAGAIN;
} else
@@ -1666,7 +1718,7 @@
{
struct sdhci_host *host;
u16 ctrl;
- u32 ier;
+ u32 ier = 0;
int tuning_loop_counter = MAX_TUNING_LOOP;
unsigned long timeout;
int err = 0;
@@ -1687,9 +1739,9 @@
* If the Host Controller supports the HS200 mode then the
* tuning function has to be executed.
*/
- if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) &&
- (host->flags & SDHCI_SDR50_NEEDS_TUNING ||
- host->flags & SDHCI_HS200_NEEDS_TUNING))
+ if ((((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) &&
+ (host->flags & SDHCI_SDR50_NEEDS_TUNING)) ||
+ (host->flags & SDHCI_HS200_NEEDS_TUNING))
requires_tuning_nonuhs = true;
if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) ||
@@ -1702,6 +1754,14 @@
return 0;
}
+ if (host->ops->execute_tuning) {
+ spin_unlock(&host->lock);
+ enable_irq(host->irq);
+ host->ops->execute_tuning(host, opcode);
+ disable_irq(host->irq);
+ spin_lock(&host->lock);
+ goto out;
+ }
sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
/*
@@ -1780,7 +1840,7 @@
spin_lock(&host->lock);
if (!host->tuning_done) {
- printk(KERN_INFO DRIVER_NAME ": Timeout waiting for "
+ pr_info(DRIVER_NAME ": Timeout waiting for "
"Buffer Read Ready interrupt during tuning "
"procedure, falling back to fixed sampling "
"clock\n");
@@ -1810,7 +1870,7 @@
sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
} else {
if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
- printk(KERN_INFO DRIVER_NAME ": Tuning procedure"
+ pr_info(DRIVER_NAME ": Tuning procedure"
" failed, falling back to fixed sampling"
" clock\n");
err = -EIO;
@@ -1925,9 +1985,9 @@
/* Check host->mrq first in case we are runtime suspended */
if (host->mrq &&
!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
- printk(KERN_ERR "%s: Card removed during transfer!\n",
+ pr_err("%s: Card removed during transfer!\n",
mmc_hostname(host->mmc));
- printk(KERN_ERR "%s: Resetting controller.\n",
+ pr_err("%s: Resetting controller.\n",
mmc_hostname(host->mmc));
sdhci_reset(host, SDHCI_RESET_CMD);
@@ -2016,7 +2076,7 @@
spin_lock_irqsave(&host->lock, flags);
if (host->mrq) {
- printk(KERN_ERR "%s: Timeout waiting for hardware "
+ pr_err("%s: Timeout waiting for hardware "
"interrupt.\n", mmc_hostname(host->mmc));
sdhci_dumpregs(host);
@@ -2062,7 +2122,7 @@
BUG_ON(intmask == 0);
if (!host->cmd) {
- printk(KERN_ERR "%s: Got command interrupt 0x%08x even "
+ pr_err("%s: Got command interrupt 0x%08x even "
"though no command operation was in progress.\n",
mmc_hostname(host->mmc), (unsigned)intmask);
sdhci_dumpregs(host);
@@ -2164,7 +2224,7 @@
}
}
- printk(KERN_ERR "%s: Got data interrupt 0x%08x even "
+ pr_err("%s: Got data interrupt 0x%08x even "
"though no data operation was in progress.\n",
mmc_hostname(host->mmc), (unsigned)intmask);
sdhci_dumpregs(host);
@@ -2181,7 +2241,7 @@
!= MMC_BUS_TEST_R)
host->data->error = -EILSEQ;
else if (intmask & SDHCI_INT_ADMA_ERROR) {
- printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc));
+ pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
sdhci_show_adma_error(host);
host->data->error = -EIO;
}
@@ -2245,7 +2305,7 @@
if (host->runtime_suspended) {
spin_unlock(&host->lock);
- printk(KERN_WARNING "%s: got irq while runtime suspended\n",
+ pr_warning("%s: got irq while runtime suspended\n",
mmc_hostname(host->mmc));
return IRQ_HANDLED;
}
@@ -2262,13 +2322,30 @@
mmc_hostname(host->mmc), intmask);
if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
+ u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
+ SDHCI_CARD_PRESENT;
+
+ /*
+ * There is a observation on i.mx esdhc. INSERT bit will be
+ * immediately set again when it gets cleared, if a card is
+ * inserted. We have to mask the irq to prevent interrupt
+ * storm which will freeze the system. And the REMOVE gets
+ * the same situation.
+ *
+ * More testing are needed here to ensure it works for other
+ * platforms though.
+ */
+ sdhci_mask_irqs(host, present ? SDHCI_INT_CARD_INSERT :
+ SDHCI_INT_CARD_REMOVE);
+ sdhci_unmask_irqs(host, present ? SDHCI_INT_CARD_REMOVE :
+ SDHCI_INT_CARD_INSERT);
+
sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
- SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
+ SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
+ intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
tasklet_schedule(&host->card_tasklet);
}
- intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
-
if (intmask & SDHCI_INT_CMD_MASK) {
sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK,
SDHCI_INT_STATUS);
@@ -2286,7 +2363,7 @@
intmask &= ~SDHCI_INT_ERROR;
if (intmask & SDHCI_INT_BUS_POWER) {
- printk(KERN_ERR "%s: Card is consuming too much power!\n",
+ pr_err("%s: Card is consuming too much power!\n",
mmc_hostname(host->mmc));
sdhci_writel(host, SDHCI_INT_BUS_POWER, SDHCI_INT_STATUS);
}
@@ -2299,9 +2376,6 @@
intmask &= ~SDHCI_INT_CARD_INT;
if (intmask) {
- printk(KERN_ERR "%s: Unexpected interrupt 0x%08x.\n",
- mmc_hostname(host->mmc), intmask);
- sdhci_dumpregs(host);
unexpected |= intmask;
sdhci_writel(host, intmask, SDHCI_INT_STATUS);
}
@@ -2339,6 +2413,7 @@
int sdhci_suspend_host(struct sdhci_host *host)
{
int ret;
+ bool has_tuning_timer;
if (host->ops->platform_suspend)
host->ops->platform_suspend(host);
@@ -2346,21 +2421,28 @@
sdhci_disable_card_detection(host);
/* Disable tuning since we are suspending */
- if (host->version >= SDHCI_SPEC_300 && host->tuning_count &&
- host->tuning_mode == SDHCI_TUNING_MODE_1) {
+ has_tuning_timer = host->version >= SDHCI_SPEC_300 &&
+ host->tuning_count && host->tuning_mode == SDHCI_TUNING_MODE_1;
+ if (has_tuning_timer) {
del_timer_sync(&host->tuning_timer);
host->flags &= ~SDHCI_NEEDS_RETUNING;
}
ret = mmc_suspend_host(host->mmc);
- if (ret)
+ if (ret) {
+ if (has_tuning_timer) {
+ host->flags |= SDHCI_NEEDS_RETUNING;
+ mod_timer(&host->tuning_timer, jiffies +
+ host->tuning_count * HZ);
+ }
+
+ sdhci_enable_card_detection(host);
+
return ret;
+ }
free_irq(host->irq, host);
- if (host->vmmc)
- ret = regulator_disable(host->vmmc);
-
return ret;
}
@@ -2370,12 +2452,6 @@
{
int ret;
- if (host->vmmc) {
- int ret = regulator_enable(host->vmmc);
- if (ret)
- return ret;
- }
-
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
if (host->ops->enable_dma)
host->ops->enable_dma(host);
@@ -2561,7 +2637,7 @@
host->version = (host->version & SDHCI_SPEC_VER_MASK)
>> SDHCI_SPEC_VER_SHIFT;
if (host->version > SDHCI_SPEC_300) {
- printk(KERN_ERR "%s: Unknown controller version (%d). "
+ pr_err("%s: Unknown controller version (%d). "
"You may experience problems.\n", mmc_hostname(mmc),
host->version);
}
@@ -2598,7 +2674,7 @@
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
if (host->ops->enable_dma) {
if (host->ops->enable_dma(host)) {
- printk(KERN_WARNING "%s: No suitable DMA "
+ pr_warning("%s: No suitable DMA "
"available. Falling back to PIO.\n",
mmc_hostname(mmc));
host->flags &=
@@ -2618,7 +2694,7 @@
if (!host->adma_desc || !host->align_buffer) {
kfree(host->adma_desc);
kfree(host->align_buffer);
- printk(KERN_WARNING "%s: Unable to allocate ADMA "
+ pr_warning("%s: Unable to allocate ADMA "
"buffers. Falling back to standard DMA.\n",
mmc_hostname(mmc));
host->flags &= ~SDHCI_USE_ADMA;
@@ -2646,46 +2722,13 @@
if (host->max_clk == 0 || host->quirks &
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
if (!host->ops->get_max_clock) {
- printk(KERN_ERR
- "%s: Hardware doesn't specify base clock "
+ pr_err("%s: Hardware doesn't specify base clock "
"frequency.\n", mmc_hostname(mmc));
return -ENODEV;
}
host->max_clk = host->ops->get_max_clock(host);
}
- host->timeout_clk =
- (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
- if (host->timeout_clk == 0) {
- if (host->ops->get_timeout_clock) {
- host->timeout_clk = host->ops->get_timeout_clock(host);
- } else if (!(host->quirks &
- SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
- printk(KERN_ERR
- "%s: Hardware doesn't specify timeout clock "
- "frequency.\n", mmc_hostname(mmc));
- return -ENODEV;
- }
- }
- if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
- host->timeout_clk *= 1000;
-
- /*
- * In case of Host Controller v3.00, find out whether clock
- * multiplier is supported.
- */
- host->clk_mul = (caps[1] & SDHCI_CLOCK_MUL_MASK) >>
- SDHCI_CLOCK_MUL_SHIFT;
-
- /*
- * In case the value in Clock Multiplier is 0, then programmable
- * clock mode is not supported, otherwise the actual clock
- * multiplier is one more than the value of Clock Multiplier
- * in the Capabilities Register.
- */
- if (host->clk_mul)
- host->clk_mul += 1;
-
/*
* In case of Host Controller v3.00, find out whether clock
* multiplier is supported.
@@ -2718,6 +2761,26 @@
} else
mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
+ host->timeout_clk =
+ (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
+ if (host->timeout_clk == 0) {
+ if (host->ops->get_timeout_clock) {
+ host->timeout_clk = host->ops->get_timeout_clock(host);
+ } else if (!(host->quirks &
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
+ pr_err("%s: Hardware doesn't specify timeout clock "
+ "frequency.\n", mmc_hostname(mmc));
+ return -ENODEV;
+ }
+ }
+ if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
+ host->timeout_clk *= 1000;
+
+ if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
+ host->timeout_clk = mmc->f_max / 1000;
+
+ mmc->max_discard_to = (1 << 27) / host->timeout_clk;
+
mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
@@ -2869,7 +2932,7 @@
mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
if (mmc->ocr_avail == 0) {
- printk(KERN_ERR "%s: Hardware doesn't report any "
+ pr_err("%s: Hardware doesn't report any "
"support voltages.\n", mmc_hostname(mmc));
return -ENODEV;
}
@@ -2917,7 +2980,7 @@
mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >>
SDHCI_MAX_BLOCK_SHIFT;
if (mmc->max_blk_size >= 3) {
- printk(KERN_WARNING "%s: Invalid maximum block size, "
+ pr_warning("%s: Invalid maximum block size, "
"assuming 512 bytes\n", mmc_hostname(mmc));
mmc->max_blk_size = 0;
}
@@ -2956,10 +3019,8 @@
host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
if (IS_ERR(host->vmmc)) {
- printk(KERN_INFO "%s: no vmmc regulator found\n", mmc_hostname(mmc));
+ pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
host->vmmc = NULL;
- } else {
- regulator_enable(host->vmmc);
}
sdhci_init(host, 0);
@@ -2985,7 +3046,7 @@
mmc_add_host(mmc);
- printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s\n",
+ pr_info("%s: SDHCI controller on %s [%s] using %s\n",
mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
(host->flags & SDHCI_USE_ADMA) ? "ADMA" :
(host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
@@ -3018,7 +3079,7 @@
host->flags |= SDHCI_DEVICE_DEAD;
if (host->mrq) {
- printk(KERN_ERR "%s: Controller removed during "
+ pr_err("%s: Controller removed during "
" transfer!\n", mmc_hostname(host->mmc));
host->mrq->cmd->error = -ENOMEDIUM;
@@ -3048,10 +3109,8 @@
tasklet_kill(&host->card_tasklet);
tasklet_kill(&host->finish_tasklet);
- if (host->vmmc) {
- regulator_disable(host->vmmc);
+ if (host->vmmc)
regulator_put(host->vmmc);
- }
kfree(host->adma_desc);
kfree(host->align_buffer);
@@ -3077,9 +3136,9 @@
static int __init sdhci_drv_init(void)
{
- printk(KERN_INFO DRIVER_NAME
+ pr_info(DRIVER_NAME
": Secure Digital Host Controller Interface driver\n");
- printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
+ pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
return 0;
}
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index f761f23..4f8d01d 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -277,6 +277,9 @@
void (*hw_reset)(struct sdhci_host *host);
void (*platform_suspend)(struct sdhci_host *host);
void (*platform_resume)(struct sdhci_host *host);
+ void (*check_power_status)(struct sdhci_host *host);
+ int (*execute_tuning)(struct sdhci_host *host, u32 opcode);
+ void (*toggle_cdr)(struct sdhci_host *host, bool enable);
};
#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
diff --git a/drivers/net/ethernet/msm/Kconfig b/drivers/net/ethernet/msm/Kconfig
index 3fced2d..e15f4a9 100644
--- a/drivers/net/ethernet/msm/Kconfig
+++ b/drivers/net/ethernet/msm/Kconfig
@@ -50,3 +50,9 @@
This driver supports Ethernet in the FSM9xxx.
To compile this driver as a module, choose M here: the
module will be called qfec.
+
+config ECM_IPA
+ tristate "STD ECM LAN Driver support"
+ depends on IPA
+ help
+ Allows LAN between Apps and tethered HOST on STD ECM
diff --git a/drivers/net/ethernet/msm/Makefile b/drivers/net/ethernet/msm/Makefile
index 7d9d4c6..e152ec7 100644
--- a/drivers/net/ethernet/msm/Makefile
+++ b/drivers/net/ethernet/msm/Makefile
@@ -7,3 +7,4 @@
obj-$(CONFIG_MSM_RMNET_BAM) += msm_rmnet_bam.o
obj-$(CONFIG_MSM_RMNET_SMUX) += msm_rmnet_smux.o
obj-$(CONFIG_QFEC) += qfec.o
+obj-$(CONFIG_ECM_IPA) += ecm_ipa.o
diff --git a/drivers/net/ethernet/msm/ecm_ipa.c b/drivers/net/ethernet/msm/ecm_ipa.c
new file mode 100644
index 0000000..605fd84
--- /dev/null
+++ b/drivers/net/ethernet/msm/ecm_ipa.c
@@ -0,0 +1,1105 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <mach/ecm_ipa.h>
+
+#define DRIVER_NAME "ecm_ipa"
+#define DRIVER_VERSION "19-Feb-2013"
+#define ECM_IPA_IPV4_HDR_NAME "ecm_eth_ipv4"
+#define ECM_IPA_IPV6_HDR_NAME "ecm_eth_ipv6"
+#define IPA_TO_USB_CLIENT IPA_CLIENT_USB_CONS
+#define INACTIVITY_MSEC_DELAY 100
+#define ECM_IPA_ERROR(fmt, args...) \
+ pr_err(DRIVER_NAME "@%s@%d@ctx:%s: "\
+ fmt, __func__, __LINE__, current->comm, ## args)
+#ifdef ECM_IPA_DEBUG_ON
+#define ECM_IPA_DEBUG(fmt, args...) \
+ pr_err(DRIVER_NAME "@%s@%d@ctx:%s: "\
+ fmt, __func__, __LINE__, current->comm, ## args)
+#else /* ECM_IPA_DEBUG_ON */
+#define ECM_IPA_DEBUG(fmt, args...)
+#endif /* ECM_IPA_DEBUG_ON */
+
+#define NULL_CHECK(ptr) \
+ do { \
+ if (!(ptr)) { \
+ ECM_IPA_ERROR("null pointer #ptr\n"); \
+ return -EINVAL; \
+ } \
+ } \
+ while (0)
+
+#define ECM_IPA_LOG_ENTRY() ECM_IPA_DEBUG("begin\n")
+#define ECM_IPA_LOG_EXIT() ECM_IPA_DEBUG("end\n")
+
+/**
+ * struct ecm_ipa_dev - main driver context parameters
+ * @ack_spinlock: protect last sent skb
+ * @last_out_skb: last sent skb saved until Tx notify is received from IPA
+ * @net: network interface struct implemented by this driver
+ * @folder: debugfs folder for various debuging switches
+ * @tx_enable: flag that enable/disable Tx path to continue to IPA
+ * @rx_enable: flag that enable/disable Rx path to continue to IPA
+ * @rm_enable: flag that enable/disable Resource manager request prior to Tx
+ * @dma_enable: flag that allow on-the-fly DMA mode for IPA
+ * @tx_file: saved debugfs entry to allow cleanup
+ * @rx_file: saved debugfs entry to allow cleanup
+ * @rm_file: saved debugfs entry to allow cleanup
+ * @dma_file: saved debugfs entry to allow cleanup
+ * @eth_ipv4_hdr_hdl: saved handle for ipv4 header-insertion table
+ * @eth_ipv6_hdr_hdl: saved handle for ipv6 header-insertion table
+ * @usb_to_ipa_hdl: save handle for IPA pipe operations
+ * @ipa_to_usb_hdl: save handle for IPA pipe operations
+ */
+struct ecm_ipa_dev {
+ spinlock_t ack_spinlock;
+ struct sk_buff *last_out_skb;
+ struct net_device *net;
+ bool tx_enable;
+ bool rx_enable;
+ bool rm_enable;
+ bool dma_enable;
+ struct dentry *folder;
+ struct dentry *tx_file;
+ struct dentry *rx_file;
+ struct dentry *rm_file;
+ struct dentry *dma_file;
+ uint32_t eth_ipv4_hdr_hdl;
+ uint32_t eth_ipv6_hdr_hdl;
+ u32 usb_to_ipa_hdl;
+ u32 ipa_to_usb_hdl;
+};
+
+/**
+ * struct ecm_ipa_ctx - saved pointer for the std ecm network device
+ * which allow ecm_ipa to be a singleton
+ */
+static struct ecm_ipa_dev *ecm_ipa_ctx;
+
+static int ecm_ipa_ep_registers_cfg(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl);
+static void sk_buff_print(struct sk_buff *skb);
+static int ecm_ipa_set_device_ethernet_addr(
+ u8 *dev_ethaddr, u8 device_ethaddr[]);
+static void ecm_ipa_packet_receive_notify(void *priv,
+ enum ipa_dp_evt_type evt,
+ unsigned long data);
+static void ecm_ipa_tx_complete_notify(void *priv,
+ enum ipa_dp_evt_type evt,
+ unsigned long data);
+static int ecm_ipa_ep_registers_dma_cfg(u32 usb_to_ipa_hdl);
+static int ecm_ipa_open(struct net_device *net);
+static int ecm_ipa_stop(struct net_device *net);
+static netdev_tx_t ecm_ipa_start_xmit(struct sk_buff *skb,
+ struct net_device *net);
+static void ecm_ipa_rm_notify(void *user_data, enum ipa_rm_event event,
+ unsigned long data);
+static int ecm_ipa_create_rm_resource(struct ecm_ipa_dev *dev);
+static void ecm_ipa_destory_rm_resource(void);
+static bool rx_filter(struct sk_buff *skb);
+static bool tx_filter(struct sk_buff *skb);
+static bool rm_enabled(struct ecm_ipa_dev *dev);
+
+static int ecm_ipa_rules_cfg(struct ecm_ipa_dev *dev,
+ const void *dst_mac, const void *src_mac);
+static int ecm_ipa_register_tx(struct ecm_ipa_dev *dev);
+static void ecm_ipa_deregister_tx(struct ecm_ipa_dev *dev);
+static int ecm_ipa_debugfs_init(struct ecm_ipa_dev *dev);
+static void ecm_ipa_debugfs_destroy(struct ecm_ipa_dev *dev);
+static int ecm_ipa_debugfs_tx_open(struct inode *inode, struct file *file);
+static int ecm_ipa_debugfs_rx_open(struct inode *inode, struct file *file);
+static int ecm_ipa_debugfs_rm_open(struct inode *inode, struct file *file);
+static int ecm_ipa_debugfs_dma_open(struct inode *inode, struct file *file);
+static ssize_t ecm_ipa_debugfs_enable_read(struct file *file,
+ char __user *ubuf, size_t count, loff_t *ppos);
+static ssize_t ecm_ipa_debugfs_enable_write(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos);
+static ssize_t ecm_ipa_debugfs_enable_write_dma(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos);
+static void eth_get_drvinfo(struct net_device *net,
+ struct ethtool_drvinfo *drv_info);
+
+static const struct net_device_ops ecm_ipa_netdev_ops = {
+ .ndo_open = ecm_ipa_open,
+ .ndo_stop = ecm_ipa_stop,
+ .ndo_start_xmit = ecm_ipa_start_xmit,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+static const struct ethtool_ops ops = {
+ .get_drvinfo = eth_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
+const struct file_operations ecm_ipa_debugfs_tx_ops = {
+ .open = ecm_ipa_debugfs_tx_open,
+ .read = ecm_ipa_debugfs_enable_read,
+ .write = ecm_ipa_debugfs_enable_write,
+};
+const struct file_operations ecm_ipa_debugfs_rx_ops = {
+ .open = ecm_ipa_debugfs_rx_open,
+ .read = ecm_ipa_debugfs_enable_read,
+ .write = ecm_ipa_debugfs_enable_write,
+};
+const struct file_operations ecm_ipa_debugfs_rm_ops = {
+ .open = ecm_ipa_debugfs_rm_open,
+ .read = ecm_ipa_debugfs_enable_read,
+ .write = ecm_ipa_debugfs_enable_write,
+};
+const struct file_operations ecm_ipa_debugfs_dma_ops = {
+ .open = ecm_ipa_debugfs_dma_open,
+ .read = ecm_ipa_debugfs_enable_read,
+ .write = ecm_ipa_debugfs_enable_write_dma,
+};
+
+/**
+ * ecm_ipa_init() - initializes internal data structures
+ * @ecm_ipa_rx_dp_notify: supplied callback to be called by the IPA
+ * driver upon data packets received from USB pipe into IPA core.
+ * @ecm_ipa_rt_dp_notify: supplied callback to be called by the IPA
+ * driver upon exception packets sent from IPA pipe into USB core.
+ * @priv: should be passed later on to ecm_ipa_configure, hold the network
+ * structure allocated for STD ECM interface.
+ *
+ * Shall be called prior to pipe connection.
+ * The out parameters (the callbacks) shall be supplied to ipa_connect.
+ * Detailed description:
+ * - set the callbacks to be used by the caller upon ipa_connect
+ * - allocate the network device
+ * - set the priv argument with a reference to the network device
+ *
+ * Returns negative errno, or zero on success
+ */
+int ecm_ipa_init(ecm_ipa_callback *ecm_ipa_rx_dp_notify,
+ ecm_ipa_callback *ecm_ipa_tx_dp_notify,
+ void **priv)
+{
+ int ret = 0;
+ struct net_device *net;
+ struct ecm_ipa_dev *dev;
+ ECM_IPA_LOG_ENTRY();
+ ECM_IPA_DEBUG("%s version %s\n", DRIVER_NAME, DRIVER_VERSION);
+ NULL_CHECK(ecm_ipa_rx_dp_notify);
+ NULL_CHECK(ecm_ipa_tx_dp_notify);
+ NULL_CHECK(priv);
+ net = alloc_etherdev(sizeof(struct ecm_ipa_dev));
+ if (!net) {
+ ret = -ENOMEM;
+ ECM_IPA_ERROR("fail to allocate etherdev\n");
+ goto fail_alloc_etherdev;
+ }
+ ECM_IPA_DEBUG("etherdev was successfully allocated\n");
+ dev = netdev_priv(net);
+ memset(dev, 0, sizeof(*dev));
+ dev->tx_enable = true;
+ dev->rx_enable = true;
+ spin_lock_init(&dev->ack_spinlock);
+ dev->net = net;
+ ecm_ipa_ctx = dev;
+ *priv = (void *)dev;
+ snprintf(net->name, sizeof(net->name), "%s%%d", "ecm");
+ net->netdev_ops = &ecm_ipa_netdev_ops;
+ ECM_IPA_DEBUG("internal data structures were intialized\n");
+ ret = ecm_ipa_debugfs_init(dev);
+ if (ret)
+ goto fail_debugfs;
+ ECM_IPA_DEBUG("debugfs entries were created\n");
+ *ecm_ipa_rx_dp_notify = ecm_ipa_packet_receive_notify;
+ *ecm_ipa_tx_dp_notify = ecm_ipa_tx_complete_notify;
+ ECM_IPA_LOG_EXIT();
+ return 0;
+fail_debugfs:
+ free_netdev(net);
+fail_alloc_etherdev:
+ return ret;
+}
+EXPORT_SYMBOL(ecm_ipa_init);
+
+/**
+ * ecm_ipa_rules_cfg() - set header insertion and register Tx/Rx properties
+ * Headers will be commited to HW
+ * @dev: main driver context parameters
+ * @dst_mac: destination MAC address
+ * @src_mac: source MAC address
+ *
+ * Returns negative errno, or zero on success
+ */
+static int ecm_ipa_rules_cfg(struct ecm_ipa_dev *dev,
+ const void *dst_mac, const void *src_mac)
+{
+ struct ipa_ioc_add_hdr *hdrs;
+ struct ipa_hdr_add *ipv4_hdr;
+ struct ipa_hdr_add *ipv6_hdr;
+ struct ethhdr *eth_ipv4;
+ struct ethhdr *eth_ipv6;
+ int result = 0;
+
+ ECM_IPA_LOG_ENTRY();
+ hdrs = kzalloc(sizeof(*hdrs) + sizeof(*ipv4_hdr) + sizeof(*ipv6_hdr),
+ GFP_KERNEL);
+ if (!hdrs) {
+ result = -ENOMEM;
+ goto out;
+ }
+ ipv4_hdr = &hdrs->hdr[0];
+ eth_ipv4 = (struct ethhdr *)ipv4_hdr->hdr;
+ ipv6_hdr = &hdrs->hdr[1];
+ eth_ipv6 = (struct ethhdr *)ipv6_hdr->hdr;
+ strlcpy(ipv4_hdr->name, ECM_IPA_IPV4_HDR_NAME, IPA_RESOURCE_NAME_MAX);
+ memcpy(eth_ipv4->h_dest, dst_mac, ETH_ALEN);
+ memcpy(eth_ipv4->h_source, src_mac, ETH_ALEN);
+ eth_ipv4->h_proto = ETH_P_IP;
+ ipv4_hdr->hdr_len = ETH_HLEN;
+ ipv4_hdr->is_partial = 0;
+ strlcpy(ipv6_hdr->name, ECM_IPA_IPV6_HDR_NAME, IPA_RESOURCE_NAME_MAX);
+ memcpy(eth_ipv6->h_dest, dst_mac, ETH_ALEN);
+ memcpy(eth_ipv6->h_source, src_mac, ETH_ALEN);
+ eth_ipv6->h_proto = ETH_P_IPV6;
+ ipv6_hdr->hdr_len = ETH_HLEN;
+ ipv6_hdr->is_partial = 0;
+ hdrs->commit = 1;
+ hdrs->num_hdrs = 2;
+ result = ipa_add_hdr(hdrs);
+ if (result) {
+ ECM_IPA_ERROR("Fail on Header-Insertion(%d)\n", result);
+ goto out_free_mem;
+ }
+ if (ipv4_hdr->status) {
+ ECM_IPA_ERROR("Fail on Header-Insertion ipv4(%d)\n",
+ ipv4_hdr->status);
+ result = ipv4_hdr->status;
+ goto out_free_mem;
+ }
+ if (ipv6_hdr->status) {
+ ECM_IPA_ERROR("Fail on Header-Insertion ipv6(%d)\n",
+ ipv6_hdr->status);
+ result = ipv6_hdr->status;
+ goto out_free_mem;
+ }
+ dev->eth_ipv4_hdr_hdl = ipv4_hdr->hdr_hdl;
+ dev->eth_ipv6_hdr_hdl = ipv6_hdr->hdr_hdl;
+ ECM_IPA_LOG_EXIT();
+out_free_mem:
+ kfree(hdrs);
+out:
+ return result;
+}
+
+static void ecm_ipa_rules_destroy(struct ecm_ipa_dev *dev)
+{
+ struct ipa_ioc_del_hdr *del_hdr;
+ struct ipa_hdr_del *ipv4;
+ struct ipa_hdr_del *ipv6;
+ int result;
+ del_hdr = kzalloc(sizeof(*del_hdr) + sizeof(*ipv4) +
+ sizeof(*ipv6), GFP_KERNEL);
+ if (!del_hdr)
+ return;
+ del_hdr->commit = 1;
+ del_hdr->num_hdls = 2;
+ ipv4 = &del_hdr->hdl[0];
+ ipv4->hdl = dev->eth_ipv4_hdr_hdl;
+ ipv6 = &del_hdr->hdl[1];
+ ipv6->hdl = dev->eth_ipv6_hdr_hdl;
+ result = ipa_del_hdr(del_hdr);
+ if (result || ipv4->status || ipv6->status)
+ ECM_IPA_ERROR("ipa_del_hdr failed");
+}
+
+static int ecm_ipa_register_tx(struct ecm_ipa_dev *dev)
+{
+ struct ipa_tx_intf tx_properties = {0};
+ struct ipa_ioc_tx_intf_prop properties[2] = { {0}, {0} };
+ struct ipa_ioc_tx_intf_prop *ipv4_property;
+ struct ipa_ioc_tx_intf_prop *ipv6_property;
+ int result = 0;
+ ECM_IPA_LOG_ENTRY();
+ tx_properties.prop = properties;
+ ipv4_property = &tx_properties.prop[0];
+ ipv4_property->ip = IPA_IP_v4;
+ ipv4_property->dst_pipe = IPA_TO_USB_CLIENT;
+ strlcpy(ipv4_property->hdr_name, ECM_IPA_IPV4_HDR_NAME,
+ IPA_RESOURCE_NAME_MAX);
+ ipv6_property = &tx_properties.prop[1];
+ ipv6_property->ip = IPA_IP_v6;
+ ipv6_property->dst_pipe = IPA_TO_USB_CLIENT;
+ strlcpy(ipv6_property->hdr_name, ECM_IPA_IPV6_HDR_NAME,
+ IPA_RESOURCE_NAME_MAX);
+ tx_properties.num_props = 2;
+ result = ipa_register_intf(dev->net->name, &tx_properties, NULL);
+ if (result)
+ ECM_IPA_ERROR("fail on Tx_prop registration\n");
+ ECM_IPA_LOG_EXIT();
+ return result;
+}
+
+static void ecm_ipa_deregister_tx(struct ecm_ipa_dev *dev)
+{
+ int result;
+ ECM_IPA_LOG_ENTRY();
+ result = ipa_deregister_intf(dev->net->name);
+ if (result)
+ ECM_IPA_DEBUG("Fail on Tx prop deregister\n");
+ ECM_IPA_LOG_EXIT();
+ return;
+}
+
+/**
+ * ecm_ipa_configure() - make IPA core end-point specific configuration
+ * @usb_to_ipa_hdl: handle of usb_to_ipa end-point for IPA driver
+ * @ipa_to_usb_hdl: handle of ipa_to_usb end-point for IPA driver
+ * @host_ethaddr: host Ethernet address in network order
+ * @device_ethaddr: device Ethernet address in network order
+ *
+ * Configure the usb_to_ipa and ipa_to_usb end-point registers
+ * - USB->IPA end-point: disable de-aggregation, enable link layer
+ * header removal (Ethernet removal), source NATing and default routing.
+ * - IPA->USB end-point: disable aggregation, add link layer header (Ethernet)
+ * - allocate Ethernet device
+ * - register to Linux network stack
+ *
+ * Returns negative errno, or zero on success
+ */
+int ecm_ipa_configure(u8 host_ethaddr[], u8 device_ethaddr[],
+ void *priv)
+{
+ struct ecm_ipa_dev *dev = priv;
+ struct net_device *net;
+ int result;
+ ECM_IPA_LOG_ENTRY();
+ NULL_CHECK(host_ethaddr);
+ NULL_CHECK(host_ethaddr);
+ NULL_CHECK(dev);
+ net = dev->net;
+ NULL_CHECK(net);
+ ECM_IPA_DEBUG("host_ethaddr=%pM device_ethaddr=%pM\n",
+ host_ethaddr, device_ethaddr);
+ result = ecm_ipa_create_rm_resource(dev);
+ if (result) {
+ ECM_IPA_ERROR("fail on RM create\n");
+ return -EINVAL;
+ }
+ ECM_IPA_DEBUG("RM resource was created\n");
+ netif_carrier_off(dev->net);
+ result = ecm_ipa_set_device_ethernet_addr(net->dev_addr,
+ device_ethaddr);
+ if (result) {
+ ECM_IPA_ERROR("set device MAC failed\n");
+ goto fail_set_device_ethernet;
+ }
+ result = ecm_ipa_rules_cfg(dev, host_ethaddr, device_ethaddr);
+ if (result) {
+ ECM_IPA_ERROR("fail on ipa rules set\n");
+ goto fail_set_device_ethernet;
+ }
+ ECM_IPA_DEBUG("Ethernet header insertion was set\n");
+ result = ecm_ipa_register_tx(dev);
+ if (result) {
+ ECM_IPA_ERROR("fail on properties set\n");
+ goto fail_register_tx;
+ }
+ ECM_IPA_DEBUG("ECM Tx properties were registered\n");
+ result = register_netdev(net);
+ if (result) {
+ ECM_IPA_ERROR("register_netdev failed: %d\n", result);
+ goto fail_register_netdev;
+ }
+ ECM_IPA_DEBUG("register_netdev succeeded\n");
+ ECM_IPA_LOG_EXIT();
+ return 0;
+fail_register_netdev:
+ ecm_ipa_deregister_tx(dev);
+fail_register_tx:
+fail_set_device_ethernet:
+ ecm_ipa_rules_destroy(dev);
+ ecm_ipa_destory_rm_resource();
+ free_netdev(net);
+ return result;
+}
+EXPORT_SYMBOL(ecm_ipa_configure);
+
+int ecm_ipa_connect(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl,
+ void *priv)
+{
+ struct ecm_ipa_dev *dev = priv;
+ ECM_IPA_LOG_ENTRY();
+ NULL_CHECK(priv);
+ ECM_IPA_DEBUG("usb_to_ipa_hdl = %d, ipa_to_usb_hdl = %d\n",
+ usb_to_ipa_hdl, ipa_to_usb_hdl);
+ if (!usb_to_ipa_hdl || usb_to_ipa_hdl >= IPA_CLIENT_MAX) {
+ ECM_IPA_ERROR("usb_to_ipa_hdl(%d) is not a valid ipa handle\n",
+ usb_to_ipa_hdl);
+ return -EINVAL;
+ }
+ if (!ipa_to_usb_hdl || ipa_to_usb_hdl >= IPA_CLIENT_MAX) {
+ ECM_IPA_ERROR("ipa_to_usb_hdl(%d) is not a valid ipa handle\n",
+ ipa_to_usb_hdl);
+ return -EINVAL;
+ }
+ dev->ipa_to_usb_hdl = ipa_to_usb_hdl;
+ dev->usb_to_ipa_hdl = usb_to_ipa_hdl;
+ ecm_ipa_ep_registers_cfg(usb_to_ipa_hdl, ipa_to_usb_hdl);
+ netif_carrier_on(dev->net);
+ if (!netif_carrier_ok(dev->net)) {
+ ECM_IPA_ERROR("netif_carrier_ok error\n");
+ return -EBUSY;
+ }
+ ECM_IPA_LOG_EXIT();
+ return 0;
+}
+EXPORT_SYMBOL(ecm_ipa_connect);
+
+int ecm_ipa_disconnect(void *priv)
+{
+ struct ecm_ipa_dev *dev = priv;
+ ECM_IPA_LOG_ENTRY();
+ NULL_CHECK(dev);
+ netif_carrier_off(dev->net);
+ ECM_IPA_LOG_EXIT();
+ return 0;
+}
+EXPORT_SYMBOL(ecm_ipa_disconnect);
+
+
+static void ecm_ipa_rm_notify(void *user_data, enum ipa_rm_event event,
+ unsigned long data)
+{
+ struct ecm_ipa_dev *dev = user_data;
+ ECM_IPA_LOG_ENTRY();
+ if (event == IPA_RM_RESOURCE_GRANTED &&
+ netif_queue_stopped(dev->net)) {
+ ECM_IPA_DEBUG("Resource Granted - waking queue\n");
+ netif_wake_queue(dev->net);
+ } else {
+ ECM_IPA_DEBUG("Resource released\n");
+ }
+ ECM_IPA_LOG_EXIT();
+}
+
+static int ecm_ipa_create_rm_resource(struct ecm_ipa_dev *dev)
+{
+ struct ipa_rm_create_params create_params = {0};
+ int result;
+ ECM_IPA_LOG_ENTRY();
+ create_params.name = IPA_RM_RESOURCE_STD_ECM_PROD;
+ create_params.reg_params.user_data = dev;
+ create_params.reg_params.notify_cb = ecm_ipa_rm_notify;
+ result = ipa_rm_create_resource(&create_params);
+ if (result) {
+ ECM_IPA_ERROR("Fail on ipa_rm_create_resource\n");
+ goto fail_rm_create;
+ }
+ ECM_IPA_DEBUG("rm client was created");
+
+ result = ipa_rm_inactivity_timer_init(IPA_RM_RESOURCE_STD_ECM_PROD,
+ INACTIVITY_MSEC_DELAY);
+ if (result) {
+ ECM_IPA_ERROR("Fail on ipa_rm_inactivity_timer_init\n");
+ goto fail_it;
+ }
+ ECM_IPA_DEBUG("rm_it client was created");
+ ECM_IPA_LOG_EXIT();
+ return 0;
+fail_it:
+fail_rm_create:
+ return result;
+}
+
+static void ecm_ipa_destory_rm_resource(void)
+{
+ ECM_IPA_LOG_ENTRY();
+ ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_STD_ECM_PROD);
+ ECM_IPA_LOG_EXIT();
+}
+
+static bool rx_filter(struct sk_buff *skb)
+{
+ struct ecm_ipa_dev *dev = netdev_priv(skb->dev);
+ return !dev->rx_enable;
+}
+
+static bool tx_filter(struct sk_buff *skb)
+{
+ struct ecm_ipa_dev *dev = netdev_priv(skb->dev);
+ return !dev->tx_enable;
+}
+
+static bool rm_enabled(struct ecm_ipa_dev *dev)
+{
+ return dev->rm_enable;
+}
+
+static int ecm_ipa_open(struct net_device *net)
+{
+ ECM_IPA_LOG_ENTRY();
+ netif_start_queue(net);
+ ECM_IPA_LOG_EXIT();
+ return 0;
+}
+
+static int ecm_ipa_stop(struct net_device *net)
+{
+ ECM_IPA_LOG_ENTRY();
+ ECM_IPA_DEBUG("stopping net device\n");
+ netif_stop_queue(net);
+ ECM_IPA_LOG_EXIT();
+ return 0;
+}
+
+/**
+ * ecm_ipa_cleanup() - destroys all
+ * ecm information
+ * @priv: main driver context parameters
+ *
+ */
+void ecm_ipa_cleanup(void *priv)
+{
+ struct ecm_ipa_dev *dev = priv;
+ ECM_IPA_LOG_ENTRY();
+ if (!dev) {
+ ECM_IPA_ERROR("dev NULL pointer\n");
+ return;
+ }
+ if (rm_enabled(dev)) {
+ ecm_ipa_destory_rm_resource();
+ ecm_ipa_debugfs_destroy(dev);
+ }
+ if (!dev->net) {
+ unregister_netdev(dev->net);
+ free_netdev(dev->net);
+ }
+ ECM_IPA_DEBUG("cleanup done\n");
+ ecm_ipa_ctx = NULL;
+ ECM_IPA_LOG_EXIT();
+ return ;
+}
+EXPORT_SYMBOL(ecm_ipa_cleanup);
+
+static int resource_request(struct ecm_ipa_dev *dev)
+{
+ int result = 0;
+ ECM_IPA_LOG_ENTRY();
+ if (!rm_enabled(dev))
+ goto out;
+ result = ipa_rm_inactivity_timer_request_resource(
+ IPA_RM_RESOURCE_STD_ECM_PROD);
+out:
+ ECM_IPA_LOG_EXIT();
+ return result;
+}
+
+static void resource_release(struct ecm_ipa_dev *dev)
+{
+ ECM_IPA_LOG_ENTRY();
+ if (!rm_enabled(dev))
+ goto out;
+ ipa_rm_inactivity_timer_release_resource(IPA_RM_RESOURCE_STD_ECM_PROD);
+out:
+ ECM_IPA_LOG_EXIT();
+}
+
+/**
+ * ecm_ipa_start_xmit() - send data from APPs to USB core via IPA core
+ * @skb: packet received from Linux stack
+ * @net: the network device being used to send this packet
+ *
+ * Several conditions needed in order to send the packet to IPA:
+ * - we are in a valid state were the queue is not stopped
+ * - Filter Tx switch is turned off
+ * - The resources required for actual Tx are all up
+ *
+ */
+static netdev_tx_t ecm_ipa_start_xmit(struct sk_buff *skb,
+ struct net_device *net)
+{
+ int ret;
+ netdev_tx_t status = NETDEV_TX_BUSY;
+ struct ecm_ipa_dev *dev = netdev_priv(net);
+ unsigned long flags;
+ ECM_IPA_LOG_ENTRY();
+ if (unlikely(netif_queue_stopped(net))) {
+ ECM_IPA_ERROR("interface queue is stopped\n");
+ goto out;
+ }
+ ECM_IPA_DEBUG("send (proto=0x%04x)\n", ntohs(skb->protocol));
+ if (unlikely(tx_filter(skb))) {
+ dev_kfree_skb_any(skb);
+ ECM_IPA_ERROR("packet got filtered out on Tx path\n");
+ status = NETDEV_TX_OK;
+ goto out;
+ }
+ ret = resource_request(dev);
+ if (ret) {
+ ECM_IPA_DEBUG("Waiting to resource\n");
+ netif_stop_queue(net);
+ goto resource_busy;
+ }
+ ECM_IPA_DEBUG("taking ack_lock\n");
+ spin_lock_irqsave(&dev->ack_spinlock, flags);
+ ECM_IPA_DEBUG("ack_lock taken\n");
+ if (dev->last_out_skb) {
+ ECM_IPA_DEBUG("No Tx-ack received for previous packet\n");
+ ECM_IPA_DEBUG("releasing ack_lock\n");
+ spin_unlock_irqrestore(&dev->ack_spinlock, flags);
+ ECM_IPA_DEBUG("ack_lock released\n");
+ netif_stop_queue(net);
+ status = -NETDEV_TX_BUSY;
+ goto out;
+ } else {
+ dev->last_out_skb = skb;
+ }
+ ECM_IPA_DEBUG("releasing ack_lock\n");
+ spin_unlock_irqrestore(&dev->ack_spinlock, flags);
+ ECM_IPA_DEBUG("ack_lock released\n");
+ sk_buff_print(skb);
+ ECM_IPA_DEBUG("ipa_tx_dp is called (dst_client=%d)\n",
+ IPA_TO_USB_CLIENT);
+ ret = ipa_tx_dp(IPA_TO_USB_CLIENT, skb, NULL);
+ if (ret) {
+ ECM_IPA_ERROR("ipa transmit failed (%d)\n", ret);
+ goto fail_tx_packet;
+ }
+ net->stats.tx_packets++;
+ net->stats.tx_bytes += skb->len;
+ ECM_IPA_LOG_EXIT();
+ status = NETDEV_TX_OK;
+ goto out;
+fail_tx_packet:
+out:
+ resource_release(dev);
+resource_busy:
+ ECM_IPA_LOG_EXIT();
+ return status;
+}
+
+/**
+ * ecm_ipa_packet_receive_notify() - Rx notify
+ *
+ * @priv: ecm driver context
+ * @evt: event type
+ * @data: data provided with event
+ *
+ * IPA will pass a packet with skb->data pointing to Ethernet packet frame
+ */
+void ecm_ipa_packet_receive_notify(void *priv,
+ enum ipa_dp_evt_type evt,
+ unsigned long data)
+{
+ struct sk_buff *skb = (struct sk_buff *)data;
+ struct ecm_ipa_dev *dev = priv;
+ int result;
+ ECM_IPA_LOG_ENTRY();
+ if (evt != IPA_RECEIVE) {
+ ECM_IPA_ERROR("A none IPA_RECEIVE event in ecm_ipa_receive\n");
+ return;
+ }
+ ECM_IPA_DEBUG("receive\n");
+ sk_buff_print(skb);
+ skb->dev = dev->net;
+ skb->protocol = eth_type_trans(skb, dev->net);
+ if (rx_filter(skb)) {
+ ECM_IPA_ERROR("packet got filtered out on Rx path\n");
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ ECM_IPA_DEBUG("kernel stack Rx is called\n");
+ result = netif_rx(skb);
+ if (result)
+ ECM_IPA_ERROR("fail on netif_rx\n");
+ dev->net->stats.rx_packets++;
+ dev->net->stats.rx_bytes += skb->len;
+ ECM_IPA_LOG_EXIT();
+ return;
+}
+
+/**
+ * ecm_ipa_tx_complete_notify() - Rx notify
+ *
+ * @priv: ecm driver context
+ * @evt: event type
+ * @data: data provided with event
+ *
+ * Check that the packet is the one we sent and release it
+ * This function will be called in defered context in IPA wq.
+ */
+void ecm_ipa_tx_complete_notify(void *priv,
+ enum ipa_dp_evt_type evt,
+ unsigned long data)
+{
+ struct sk_buff *skb = (struct sk_buff *)data;
+ struct ecm_ipa_dev *dev = priv;
+ unsigned long flags;
+ ECM_IPA_LOG_ENTRY();
+
+ if (!dev) {
+ ECM_IPA_ERROR("dev is NULL pointer\n");
+ return;
+ }
+ if (evt != IPA_WRITE_DONE) {
+ ECM_IPA_ERROR("unsupported event on Tx callback\n");
+ return;
+ }
+ ECM_IPA_DEBUG("taking ack_lock\n");
+ spin_lock_irqsave(&dev->ack_spinlock, flags);
+ ECM_IPA_DEBUG("ack_lock taken\n");
+ if (skb != dev->last_out_skb)
+ ECM_IPA_ERROR("ACKed/Sent not the same(FIFO expected)\n");
+ dev->last_out_skb = NULL;
+ ECM_IPA_DEBUG("releasing ack_lock\n");
+ spin_unlock_irqrestore(&dev->ack_spinlock, flags);
+ ECM_IPA_DEBUG("ack_lock released\n");
+ if (netif_queue_stopped(dev->net)) {
+ ECM_IPA_DEBUG("waking up queue\n");
+ netif_wake_queue(dev->net);
+ }
+ dev_kfree_skb_any(skb);
+ ECM_IPA_LOG_EXIT();
+ return;
+}
+
+static int ecm_ipa_debugfs_tx_open(struct inode *inode, struct file *file)
+{
+ struct ecm_ipa_dev *dev = inode->i_private;
+ ECM_IPA_LOG_ENTRY();
+ file->private_data = &(dev->tx_enable);
+ ECM_IPA_LOG_ENTRY();
+ return 0;
+}
+
+static int ecm_ipa_debugfs_rx_open(struct inode *inode, struct file *file)
+{
+ struct ecm_ipa_dev *dev = inode->i_private;
+ ECM_IPA_LOG_ENTRY();
+ file->private_data = &(dev->rx_enable);
+ ECM_IPA_LOG_EXIT();
+ return 0;
+}
+
+static int ecm_ipa_debugfs_rm_open(struct inode *inode, struct file *file)
+{
+ struct ecm_ipa_dev *dev = inode->i_private;
+ ECM_IPA_LOG_ENTRY();
+ file->private_data = &(dev->rm_enable);
+ ECM_IPA_LOG_EXIT();
+ return 0;
+}
+
+static ssize_t ecm_ipa_debugfs_enable_write_dma(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ struct ecm_ipa_dev *dev = file->private_data;
+ int result;
+ ECM_IPA_LOG_ENTRY();
+ file->private_data = &dev->dma_enable;
+ result = ecm_ipa_debugfs_enable_write(file, buf, count, ppos);
+ if (dev->dma_enable)
+ ecm_ipa_ep_registers_dma_cfg(dev->usb_to_ipa_hdl);
+ else
+ ecm_ipa_ep_registers_cfg(dev->usb_to_ipa_hdl,
+ dev->usb_to_ipa_hdl);
+ ECM_IPA_LOG_EXIT();
+ return result;
+}
+
+static int ecm_ipa_debugfs_dma_open(struct inode *inode, struct file *file)
+{
+ struct ecm_ipa_dev *dev = inode->i_private;
+ ECM_IPA_LOG_ENTRY();
+ file->private_data = dev;
+ ECM_IPA_LOG_EXIT();
+ return 0;
+}
+
+static ssize_t ecm_ipa_debugfs_enable_write(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ unsigned long missing;
+ char input;
+ bool *enable = file->private_data;
+ if (count != sizeof(input) + 1) {
+ ECM_IPA_ERROR("wrong input length(%zd)\n", count);
+ return -EINVAL;
+ }
+ if (!buf) {
+ ECM_IPA_ERROR("Bad argument\n");
+ return -EINVAL;
+ }
+ missing = copy_from_user(&input, buf, 1);
+ if (missing)
+ return -EFAULT;
+ ECM_IPA_DEBUG("input received %c\n", input);
+ *enable = input - '0';
+ ECM_IPA_DEBUG("value was set to %d\n", *enable);
+ return count;
+}
+
+static ssize_t ecm_ipa_debugfs_enable_read(struct file *file,
+ char __user *ubuf, size_t count, loff_t *ppos)
+{
+ int nbytes;
+ int size = 0;
+ int ret;
+ loff_t pos;
+ u8 enable_str[sizeof(char)*3] = {0};
+ bool *enable = file->private_data;
+ pos = *ppos;
+ nbytes = scnprintf(enable_str, sizeof(enable_str), "%d\n", *enable);
+ ret = simple_read_from_buffer(ubuf, count, ppos, enable_str, nbytes);
+ if (ret < 0) {
+ ECM_IPA_ERROR("simple_read_from_buffer problem");
+ return ret;
+ }
+ size += ret;
+ count -= nbytes;
+ *ppos = pos + size;
+ return size;
+}
+
+static int ecm_ipa_debugfs_init(struct ecm_ipa_dev *dev)
+{
+ const mode_t flags = S_IRUSR | S_IRGRP | S_IROTH |
+ S_IWUSR | S_IWGRP | S_IWOTH;
+ int ret = -EINVAL;
+ ECM_IPA_LOG_ENTRY();
+ if (!dev)
+ return -EINVAL;
+ dev->folder = debugfs_create_dir("ecm_ipa", NULL);
+ if (!dev->folder) {
+ ECM_IPA_ERROR("could not create debugfs folder entry\n");
+ ret = -EFAULT;
+ goto fail_folder;
+ }
+ dev->tx_file = debugfs_create_file("tx_enable", flags, dev->folder, dev,
+ &ecm_ipa_debugfs_tx_ops);
+ if (!dev->tx_file) {
+ ECM_IPA_ERROR("could not create debugfs tx file\n");
+ ret = -EFAULT;
+ goto fail_file;
+ }
+ dev->rx_file = debugfs_create_file("rx_enable", flags, dev->folder, dev,
+ &ecm_ipa_debugfs_rx_ops);
+ if (!dev->rx_file) {
+ ECM_IPA_ERROR("could not create debugfs rx file\n");
+ ret = -EFAULT;
+ goto fail_file;
+ }
+ dev->rm_file = debugfs_create_file("rm_enable", flags, dev->folder, dev,
+ &ecm_ipa_debugfs_rm_ops);
+ if (!dev->rm_file) {
+ ECM_IPA_ERROR("could not create debugfs rm file\n");
+ ret = -EFAULT;
+ goto fail_file;
+ }
+ dev->dma_file = debugfs_create_file("dma_enable", flags, dev->folder,
+ dev, &ecm_ipa_debugfs_dma_ops);
+ if (!dev->dma_file) {
+ ECM_IPA_ERROR("could not create debugfs dma file\n");
+ ret = -EFAULT;
+ goto fail_file;
+ }
+ ECM_IPA_LOG_EXIT();
+ return 0;
+fail_file:
+ debugfs_remove_recursive(dev->folder);
+fail_folder:
+ return ret;
+}
+
+static void ecm_ipa_debugfs_destroy(struct ecm_ipa_dev *dev)
+{
+ debugfs_remove_recursive(dev->folder);
+}
+
+static void eth_get_drvinfo(struct net_device *net,
+ struct ethtool_drvinfo *drv_info)
+{
+ ECM_IPA_LOG_ENTRY();
+ strlcpy(drv_info->driver, DRIVER_NAME, sizeof(drv_info->driver));
+ strlcpy(drv_info->version, DRIVER_VERSION, sizeof(drv_info->version));
+ ECM_IPA_LOG_EXIT();
+}
+
+
+/**
+ * ecm_ipa_ep_cfg() - configure the USB endpoints for ECM
+ *
+ *usb_to_ipa_hdl: handle received from ipa_connect
+ *ipa_to_usb_hdl: handle received from ipa_connect
+ *
+ * USB to IPA pipe:
+ * - No de-aggregation
+ * - Remove Ethernet header
+ * - SRC NAT
+ * - Default routing(0)
+ * IPA to USB Pipe:
+ * - No aggregation
+ * - Add Ethernet header
+ */
+int ecm_ipa_ep_registers_cfg(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl)
+{
+ int result = 0;
+ struct ipa_ep_cfg usb_to_ipa_ep_cfg;
+ struct ipa_ep_cfg ipa_to_usb_ep_cfg;
+ ECM_IPA_LOG_ENTRY();
+ memset(&usb_to_ipa_ep_cfg, 0 , sizeof(struct ipa_ep_cfg));
+ usb_to_ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
+ usb_to_ipa_ep_cfg.hdr.hdr_len = ETH_HLEN;
+ usb_to_ipa_ep_cfg.nat.nat_en = IPA_SRC_NAT;
+ usb_to_ipa_ep_cfg.route.rt_tbl_hdl = 0;
+ usb_to_ipa_ep_cfg.mode.dst = IPA_CLIENT_A5_LAN_WAN_CONS;
+ usb_to_ipa_ep_cfg.mode.mode = IPA_BASIC;
+ result = ipa_cfg_ep(usb_to_ipa_hdl, &usb_to_ipa_ep_cfg);
+ if (result) {
+ ECM_IPA_ERROR("failed to configure USB to IPA point\n");
+ goto out;
+ }
+ memset(&ipa_to_usb_ep_cfg, 0 , sizeof(struct ipa_ep_cfg));
+ ipa_to_usb_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
+ ipa_to_usb_ep_cfg.hdr.hdr_len = ETH_HLEN;
+ ipa_to_usb_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
+ result = ipa_cfg_ep(ipa_to_usb_hdl, &ipa_to_usb_ep_cfg);
+ if (result) {
+ ECM_IPA_ERROR("failed to configure IPA to USB end-point\n");
+ goto out;
+ }
+ ECM_IPA_DEBUG("end-point registers successfully configured\n");
+out:
+ ECM_IPA_LOG_EXIT();
+ return result;
+}
+
+/**
+ * ecm_ipa_ep_registers_dma_cfg() - configure the USB endpoints for ECM
+ * DMA
+ * @usb_to_ipa_hdl: handle received from ipa_connect
+ *
+ * This function will override the previous configuration
+ * which is needed for cores that does not support blocks logic
+ * Note that client handles are the actual pipe index
+ */
+int ecm_ipa_ep_registers_dma_cfg(u32 usb_to_ipa_hdl)
+{
+ int result = 0;
+ struct ipa_ep_cfg_mode cfg_mode;
+ u32 apps_to_ipa_hdl = 2;
+ ECM_IPA_LOG_ENTRY();
+ /* Apps to IPA - override the configuration made by IPA driver
+ * in order to allow data path on older platforms*/
+ memset(&cfg_mode, 0 , sizeof(cfg_mode));
+ cfg_mode.mode = IPA_DMA;
+ cfg_mode.dst = IPA_CLIENT_USB_CONS;
+ result = ipa_cfg_ep_mode(apps_to_ipa_hdl, &cfg_mode);
+ if (result) {
+ ECM_IPA_ERROR("failed to configure Apps to IPA\n");
+ goto out;
+ }
+ memset(&cfg_mode, 0 , sizeof(cfg_mode));
+ cfg_mode.mode = IPA_DMA;
+ cfg_mode.dst = IPA_CLIENT_A5_LAN_WAN_CONS;
+ result = ipa_cfg_ep_mode(usb_to_ipa_hdl, &cfg_mode);
+ if (result) {
+ ECM_IPA_ERROR("failed to configure USB to IPA\n");
+ goto out;
+ }
+ ECM_IPA_DEBUG("end-point registers successfully configured\n");
+out:
+ ECM_IPA_LOG_EXIT();
+ return result;
+}
+
+static void ecm_ipa_dump_buff(u8 *buff, u32 byte_size)
+{
+ int i;
+ ECM_IPA_DEBUG("ofst(hex), addr(hex), data(hex), value(char):\n");
+ for (i = 0 ; i < byte_size; i += 4) {
+ ECM_IPA_DEBUG("%2x %p %02x %02x %02x %02x | %c %c %c %c\n",
+ i, &buff[i],
+ buff[i], buff[i+1], buff[i+2], buff[i+3],
+ buff[i], buff[i+1], buff[i+2], buff[i+3]);
+ }
+}
+
+/**
+ * sk_buff_print() - detailed sk_buff printouts
+ * @skb: the socket buff
+ */
+void sk_buff_print(struct sk_buff *skb)
+{
+ ECM_IPA_DEBUG("called by: %s\n", current->comm);
+ ECM_IPA_DEBUG("skb->next=0x%p, skb->prev=0x%p, skb->sk=0x%p\n",
+ skb->next, skb->prev, skb->sk);
+ ECM_IPA_DEBUG("skb->len=0x%x, skb->data_len=0x%x protocol=0x%04x\n",
+ skb->len, skb->data_len, skb->protocol);
+ ECM_IPA_DEBUG("skb->mac_len=0x%x, skb->hdr_len=0x%x, skb->csum=%x\n",
+ skb->mac_len, skb->hdr_len, skb->csum);
+
+ ECM_IPA_DEBUG("mac_header = 0x%p\n", skb_mac_header(skb));
+ ECM_IPA_DEBUG("network_header = 0x%p\n", skb_network_header(skb));
+ ECM_IPA_DEBUG("transport_header=0x%p\n", skb_transport_header(skb));
+
+ ECM_IPA_DEBUG("skb->head=0x%p\n", skb->head);
+ ECM_IPA_DEBUG("skb->data=0x%p\n", skb->data);
+ ECM_IPA_DEBUG("tail=0x%p\n", skb_tail_pointer(skb));
+ ECM_IPA_DEBUG("end =0x%p\n", skb_end_pointer(skb));
+ ECM_IPA_DEBUG("skb->truesize=0x%x (buffer size)\n",
+ skb->truesize);
+ ecm_ipa_dump_buff(skb->data, skb->len);
+}
+
+/**
+ * ecm_ipa_set_device_ethernet_addr() - set device etherenet address
+ * @dev_ethaddr: device etherenet address
+ *
+ * Returns 0 for success, negative otherwise
+ */
+int ecm_ipa_set_device_ethernet_addr(u8 *dev_ethaddr, u8 device_ethaddr[])
+{
+ if (!is_valid_ether_addr(device_ethaddr))
+ return -EINVAL;
+ memcpy(dev_ethaddr, device_ethaddr, ETH_ALEN);
+ ECM_IPA_DEBUG("device ethernet address: %pM\n", dev_ethaddr);
+ return 0;
+}
+
+/**
+ * ecm_ipa_init_module() - module initialization
+ *
+ */
+static int ecm_ipa_init_module(void)
+{
+ ECM_IPA_LOG_ENTRY();
+ ECM_IPA_LOG_EXIT();
+ return 0;
+}
+
+/**
+ * ecm_ipa_cleanup_module() - module cleanup
+ *
+ */
+static void ecm_ipa_cleanup_module(void)
+{
+ ECM_IPA_LOG_ENTRY();
+ ECM_IPA_LOG_EXIT();
+ return;
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ECM IPA network interface");
+
+late_initcall(ecm_ipa_init_module);
+module_exit(ecm_ipa_cleanup_module);
diff --git a/drivers/net/wireless/wcnss/wcnss_wlan.c b/drivers/net/wireless/wcnss/wcnss_wlan.c
index 91ecad7..ed4e246 100644
--- a/drivers/net/wireless/wcnss/wcnss_wlan.c
+++ b/drivers/net/wireless/wcnss/wcnss_wlan.c
@@ -976,12 +976,11 @@
if (has_pronto_hw) {
has_48mhz_xo = of_property_read_bool(pdev->dev.of_node,
"qcom,has_48mhz_xo");
- penv->wcnss_hw_type = WCNSS_PRONTO_HW;
} else {
- penv->wcnss_hw_type = WCNSS_RIVA_HW;
has_48mhz_xo = pdata->has_48mhz_xo;
}
}
+ penv->wcnss_hw_type = (has_pronto_hw) ? WCNSS_PRONTO_HW : WCNSS_RIVA_HW;
penv->wlan_config.use_48mhz_xo = has_48mhz_xo;
penv->thermal_mitigation = 0;
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
index f4dff66..76e3175 100644
--- a/drivers/platform/msm/Kconfig
+++ b/drivers/platform/msm/Kconfig
@@ -101,4 +101,14 @@
This driver gets the Q6 out of power collapsed state and
exposes ioctl control to read avtimer tick.
+config SSM
+ tristate "Qualcomm Secure Service Module"
+ depends on QSEECOM
+ depends on MSM_SMD
+ help
+ Provides an interface for OEM driver to communicate with Trustzone
+ and modem for key exchange and mode change.
+ This driver uses Secure Channel Manager interface for trustzone
+ communication and communicates with modem over SMD channel.
+
endmenu
diff --git a/drivers/platform/msm/Makefile b/drivers/platform/msm/Makefile
index a679fb9..289ece9 100644
--- a/drivers/platform/msm/Makefile
+++ b/drivers/platform/msm/Makefile
@@ -10,3 +10,4 @@
obj-$(CONFIG_QPNP_VIBRATOR) += qpnp-vibrator.o
obj-$(CONFIG_QPNP_CLKDIV) += qpnp-clkdiv.o
obj-$(CONFIG_MSM_AVTIMER) += avtimer.o
+obj-$(CONFIG_SSM) += ssm.o
diff --git a/drivers/platform/msm/ipa/Makefile b/drivers/platform/msm/ipa/Makefile
index c541eb7..a25c799 100644
--- a/drivers/platform/msm/ipa/Makefile
+++ b/drivers/platform/msm/ipa/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_IPA) += ipat.o
ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \
- ipa_utils.o ipa_nat.o rmnet_bridge.o a2_service.o ipa_bridge.o ipa_intf.o
+ ipa_utils.o ipa_nat.o rmnet_bridge.o a2_service.o ipa_bridge.o ipa_intf.o \
+ ipa_rm.o ipa_rm_dependency_graph.o ipa_rm_peers_list.o ipa_rm_resource.o ipa_rm_inactivity_timer.o
diff --git a/drivers/platform/msm/ipa/a2_service.c b/drivers/platform/msm/ipa/a2_service.c
index 0ae2552..2c5245c 100644
--- a/drivers/platform/msm/ipa/a2_service.c
+++ b/drivers/platform/msm/ipa/a2_service.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,267 +10,1369 @@
* GNU General Public License for more details.
*/
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <mach/bam_dmux.h>
-#include <mach/ipa.h>
+/*
+ * A2 service component
+ */
+
+#include <net/ip.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/clk.h>
+#include <linux/wakelock.h>
#include <mach/sps.h>
+#include <mach/msm_smsm.h>
+#include <mach/socinfo.h>
+#include <mach/ipa.h>
#include "ipa_i.h"
-static struct a2_service_cb_type {
- void *tx_complete_cb;
- void *rx_cb;
- u32 producer_handle;
- u32 consumer_handle;
-} a2_service_cb;
+#define A2_NUM_PIPES 6
+#define A2_SUMMING_THRESHOLD 4096
+#define BUFFER_SIZE 2048
+#define NUM_BUFFERS 32
+#define BAM_CH_LOCAL_OPEN 0x1
+#define BAM_CH_REMOTE_OPEN 0x2
+#define BAM_CH_IN_RESET 0x4
+#define BAM_MUX_HDR_MAGIC_NO 0x33fc
+#define BAM_MUX_HDR_CMD_DATA 0
+#define BAM_MUX_HDR_CMD_OPEN 1
+#define BAM_MUX_HDR_CMD_CLOSE 2
+#define BAM_MUX_HDR_CMD_STATUS 3
+#define BAM_MUX_HDR_CMD_OPEN_NO_A2_PC 4
+#define LOW_WATERMARK 2
+#define HIGH_WATERMARK 4
+#define A2_MUX_COMPLETION_TIMEOUT (60*HZ)
+#define ENABLE_DISCONNECT_ACK 0x1
+#define A2_MUX_PADDING_LENGTH(len) (4 - ((len) & 0x3))
-static struct sps_mem_buffer data_mem_buf[2];
-static struct sps_mem_buffer desc_mem_buf[2];
+struct bam_ch_info {
+ u32 status;
+ a2_mux_notify_cb notify_cb;
+ void *user_data;
+ spinlock_t lock;
+ int num_tx_pkts;
+ int use_wm;
+};
+struct tx_pkt_info {
+ struct sk_buff *skb;
+ char is_cmd;
+ u32 len;
+ struct list_head list_node;
+ unsigned ts_sec;
+ unsigned long ts_nsec;
+};
+struct bam_mux_hdr {
+ u16 magic_num;
+ u8 reserved;
+ u8 cmd;
+ u8 pad_len;
+ u8 ch_id;
+ u16 pkt_len;
+};
+struct a2_mux_context_type {
+ u32 tethered_prod;
+ u32 tethered_cons;
+ u32 embedded_prod;
+ u32 embedded_cons;
+ int a2_mux_apps_pc_enabled;
+ struct work_struct kickoff_ul_wakeup;
+ struct work_struct kickoff_ul_power_down;
+ struct work_struct kickoff_ul_request_resource;
+ struct bam_ch_info bam_ch[A2_MUX_NUM_CHANNELS];
+ struct list_head bam_tx_pool;
+ spinlock_t bam_tx_pool_spinlock;
+ struct workqueue_struct *a2_mux_tx_workqueue;
+ int a2_mux_initialized;
+ bool bam_is_connected;
+ int a2_mux_send_power_vote_on_init_once;
+ int a2_mux_sw_bridge_is_connected;
+ u32 a2_device_handle;
+ struct mutex wakeup_lock;
+ struct completion ul_wakeup_ack_completion;
+ struct completion bam_connection_completion;
+ struct completion request_resource_completion;
+ rwlock_t ul_wakeup_lock;
+ int wait_for_ack;
+ struct wake_lock bam_wakelock;
+ int a2_pc_disabled;
+ spinlock_t wakelock_reference_lock;
+ int wakelock_reference_count;
+ int a2_pc_disabled_wakelock_skipped;
+ int disconnect_ack;
+ struct mutex smsm_cb_lock;
+ int bam_dmux_uplink_vote;
+};
+static struct a2_mux_context_type *a2_mux_ctx;
-static int connect_pipe_ipa(enum a2_mux_pipe_direction pipe_dir,
- u8 *usb_pipe_idx,
- u32 *clnt_hdl,
- struct sps_pipe *pipe);
+static void handle_bam_mux_cmd(struct sk_buff *rx_skb);
-static int a2_ipa_connect_pipe(struct ipa_connect_params *in_params,
- struct ipa_sps_params *out_params, u32 *clnt_hdl);
+static bool bam_ch_is_open(int index)
+{
+ return a2_mux_ctx->bam_ch[index].status ==
+ (BAM_CH_LOCAL_OPEN | BAM_CH_REMOTE_OPEN);
+}
+
+static bool bam_ch_is_local_open(int index)
+{
+ return a2_mux_ctx->bam_ch[index].status &
+ BAM_CH_LOCAL_OPEN;
+}
+
+static bool bam_ch_is_remote_open(int index)
+{
+ return a2_mux_ctx->bam_ch[index].status &
+ BAM_CH_REMOTE_OPEN;
+}
+
+static bool bam_ch_is_in_reset(int index)
+{
+ return a2_mux_ctx->bam_ch[index].status &
+ BAM_CH_IN_RESET;
+}
+
+static void set_tx_timestamp(struct tx_pkt_info *pkt)
+{
+ unsigned long long t_now;
+
+ t_now = sched_clock();
+ pkt->ts_nsec = do_div(t_now, 1000000000U);
+ pkt->ts_sec = (unsigned)t_now;
+}
+
+static void verify_tx_queue_is_empty(const char *func)
+{
+ unsigned long flags;
+ struct tx_pkt_info *info;
+ int reported = 0;
+
+ spin_lock_irqsave(&a2_mux_ctx->bam_tx_pool_spinlock, flags);
+ list_for_each_entry(info, &a2_mux_ctx->bam_tx_pool, list_node) {
+ if (!reported) {
+ IPADBG("%s: tx pool not empty\n", func);
+ reported = 1;
+ }
+ IPADBG("%s: node=%p ts=%u.%09lu\n", __func__,
+ &info->list_node, info->ts_sec, info->ts_nsec);
+ }
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock, flags);
+}
+
+static void grab_wakelock(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&a2_mux_ctx->wakelock_reference_lock, flags);
+ IPADBG("%s: ref count = %d\n",
+ __func__,
+ a2_mux_ctx->wakelock_reference_count);
+ if (a2_mux_ctx->wakelock_reference_count == 0)
+ wake_lock(&a2_mux_ctx->bam_wakelock);
+ ++a2_mux_ctx->wakelock_reference_count;
+ spin_unlock_irqrestore(&a2_mux_ctx->wakelock_reference_lock, flags);
+}
+
+static void release_wakelock(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&a2_mux_ctx->wakelock_reference_lock, flags);
+ if (a2_mux_ctx->wakelock_reference_count == 0) {
+ IPAERR("%s: bam_dmux wakelock not locked\n", __func__);
+ dump_stack();
+ spin_unlock_irqrestore(&a2_mux_ctx->wakelock_reference_lock,
+ flags);
+ return;
+ }
+ IPADBG("%s: ref count = %d\n",
+ __func__,
+ a2_mux_ctx->wakelock_reference_count);
+ --a2_mux_ctx->wakelock_reference_count;
+ if (a2_mux_ctx->wakelock_reference_count == 0)
+ wake_unlock(&a2_mux_ctx->bam_wakelock);
+ spin_unlock_irqrestore(&a2_mux_ctx->wakelock_reference_lock, flags);
+}
+
+static void toggle_apps_ack(void)
+{
+ static unsigned int clear_bit; /* 0 = set the bit, else clear bit */
+
+ IPADBG("%s: apps ack %d->%d\n", __func__,
+ clear_bit & 0x1, ~clear_bit & 0x1);
+ smsm_change_state(SMSM_APPS_STATE,
+ clear_bit & SMSM_A2_POWER_CONTROL_ACK,
+ ~clear_bit & SMSM_A2_POWER_CONTROL_ACK);
+ clear_bit = ~clear_bit;
+}
+
+static void power_vote(int vote)
+{
+ IPADBG("%s: curr=%d, vote=%d\n",
+ __func__,
+ a2_mux_ctx->bam_dmux_uplink_vote, vote);
+ if (a2_mux_ctx->bam_dmux_uplink_vote == vote)
+ IPADBG("%s: warning - duplicate power vote\n", __func__);
+ a2_mux_ctx->bam_dmux_uplink_vote = vote;
+ if (vote)
+ smsm_change_state(SMSM_APPS_STATE, 0, SMSM_A2_POWER_CONTROL);
+ else
+ smsm_change_state(SMSM_APPS_STATE, SMSM_A2_POWER_CONTROL, 0);
+}
+
+static inline void ul_powerdown(void)
+{
+ IPADBG("%s: powerdown\n", __func__);
+ verify_tx_queue_is_empty(__func__);
+ if (a2_mux_ctx->a2_pc_disabled)
+ release_wakelock();
+ else {
+ a2_mux_ctx->wait_for_ack = 1;
+ INIT_COMPLETION(a2_mux_ctx->ul_wakeup_ack_completion);
+ power_vote(0);
+ }
+ a2_mux_ctx->bam_is_connected = false;
+}
+
+static void ul_wakeup(void)
+{
+ int ret;
+
+ mutex_lock(&a2_mux_ctx->wakeup_lock);
+ if (a2_mux_ctx->bam_is_connected) {
+ IPADBG("%s Already awake\n", __func__);
+ mutex_unlock(&a2_mux_ctx->wakeup_lock);
+ return;
+ }
+ if (a2_mux_ctx->a2_pc_disabled) {
+ /*
+ * don't grab the wakelock the first time because it is
+ * already grabbed when a2 powers on
+ */
+ if (likely(a2_mux_ctx->a2_pc_disabled_wakelock_skipped))
+ grab_wakelock();
+ else
+ a2_mux_ctx->a2_pc_disabled_wakelock_skipped = 1;
+ a2_mux_ctx->bam_is_connected = true;
+ mutex_unlock(&a2_mux_ctx->wakeup_lock);
+ return;
+ }
+ /*
+ * must wait for the previous power down request to have been acked
+ * chances are it already came in and this will just fall through
+ * instead of waiting
+ */
+ if (a2_mux_ctx->wait_for_ack) {
+ IPADBG("%s waiting for previous ack\n", __func__);
+ ret = wait_for_completion_timeout(
+ &a2_mux_ctx->ul_wakeup_ack_completion,
+ A2_MUX_COMPLETION_TIMEOUT);
+ a2_mux_ctx->wait_for_ack = 0;
+ if (unlikely(ret == 0)) {
+ IPADBG("%s timeout previous ack\n", __func__);
+ goto bail;
+ }
+ }
+ INIT_COMPLETION(a2_mux_ctx->ul_wakeup_ack_completion);
+ power_vote(1);
+ IPADBG("%s waiting for wakeup ack\n", __func__);
+ ret = wait_for_completion_timeout(&a2_mux_ctx->ul_wakeup_ack_completion,
+ A2_MUX_COMPLETION_TIMEOUT);
+ if (unlikely(ret == 0)) {
+ IPADBG("%s timeout wakeup ack\n", __func__);
+ goto bail;
+ }
+ INIT_COMPLETION(a2_mux_ctx->bam_connection_completion);
+ if (!a2_mux_ctx->a2_mux_sw_bridge_is_connected) {
+ ret = wait_for_completion_timeout(
+ &a2_mux_ctx->bam_connection_completion,
+ A2_MUX_COMPLETION_TIMEOUT);
+ if (unlikely(ret == 0)) {
+ IPADBG("%s timeout power on\n", __func__);
+ goto bail;
+ }
+ }
+ a2_mux_ctx->bam_is_connected = true;
+ IPADBG("%s complete\n", __func__);
+ mutex_unlock(&a2_mux_ctx->wakeup_lock);
+ return;
+bail:
+ mutex_unlock(&a2_mux_ctx->wakeup_lock);
+ BUG();
+ return;
+}
+
+static void bam_mux_write_done(bool is_tethered, struct sk_buff *skb)
+{
+ struct tx_pkt_info *info;
+ enum a2_mux_logical_channel_id lcid;
+ unsigned long event_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&a2_mux_ctx->bam_tx_pool_spinlock, flags);
+ info = list_first_entry(&a2_mux_ctx->bam_tx_pool,
+ struct tx_pkt_info, list_node);
+ if (unlikely(info->skb != skb)) {
+ struct tx_pkt_info *errant_pkt;
+
+ IPAERR("tx_pool mismatch next=%p list_node=%p, ts=%u.%09lu\n",
+ a2_mux_ctx->bam_tx_pool.next,
+ &info->list_node,
+ info->ts_sec, info->ts_nsec
+ );
+
+ list_for_each_entry(errant_pkt,
+ &a2_mux_ctx->bam_tx_pool, list_node) {
+ IPAERR("%s: node=%p ts=%u.%09lu\n", __func__,
+ &errant_pkt->list_node, errant_pkt->ts_sec,
+ errant_pkt->ts_nsec);
+ if (errant_pkt->skb == skb)
+ info = errant_pkt;
+
+ }
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock,
+ flags);
+ BUG();
+ }
+ list_del(&info->list_node);
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock, flags);
+ if (info->is_cmd) {
+ dev_kfree_skb_any(info->skb);
+ kfree(info);
+ return;
+ }
+ skb = info->skb;
+ kfree(info);
+ event_data = (unsigned long)(skb);
+ if (is_tethered)
+ lcid = A2_MUX_TETHERED_0;
+ else {
+ struct bam_mux_hdr *hdr = (struct bam_mux_hdr *)skb->data;
+ lcid = (enum a2_mux_logical_channel_id) hdr->ch_id;
+ }
+ spin_lock_irqsave(&a2_mux_ctx->bam_ch[lcid].lock, flags);
+ a2_mux_ctx->bam_ch[lcid].num_tx_pkts--;
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
+ if (a2_mux_ctx->bam_ch[lcid].notify_cb)
+ a2_mux_ctx->bam_ch[lcid].notify_cb(
+ a2_mux_ctx->bam_ch[lcid].user_data, A2_MUX_WRITE_DONE,
+ event_data);
+ else
+ dev_kfree_skb_any(skb);
+}
+
+static void kickoff_ul_power_down_func(struct work_struct *work)
+{
+ unsigned long flags;
+
+ write_lock_irqsave(&a2_mux_ctx->ul_wakeup_lock, flags);
+ if (a2_mux_ctx->bam_is_connected) {
+ IPADBG("%s: UL active - forcing powerdown\n", __func__);
+ ul_powerdown();
+ }
+ write_unlock_irqrestore(&a2_mux_ctx->ul_wakeup_lock, flags);
+ ipa_rm_notify_completion(IPA_RM_RESOURCE_RELEASED,
+ IPA_RM_RESOURCE_A2_CONS);
+}
+
+static void kickoff_ul_wakeup_func(struct work_struct *work)
+{
+ if (!a2_mux_ctx->bam_is_connected)
+ ul_wakeup();
+ ipa_rm_notify_completion(IPA_RM_RESOURCE_GRANTED,
+ IPA_RM_RESOURCE_A2_CONS);
+}
+
+static void kickoff_ul_request_resource_func(struct work_struct *work)
+{
+ int ret;
+
+ INIT_COMPLETION(a2_mux_ctx->request_resource_completion);
+ ret = ipa_rm_request_resource(IPA_RM_RESOURCE_A2_PROD);
+ if (ret < 0 && ret != -EINPROGRESS) {
+ IPAERR("%s: ipa_rm_request_resource failed %d\n", __func__,
+ ret);
+ return;
+ }
+ if (ret == -EINPROGRESS) {
+ ret = wait_for_completion_timeout(
+ &a2_mux_ctx->request_resource_completion,
+ A2_MUX_COMPLETION_TIMEOUT);
+ if (unlikely(ret == 0)) {
+ IPADBG("%s timeout request A2 PROD resource\n",
+ __func__);
+ BUG();
+ return;
+ }
+ }
+ toggle_apps_ack();
+}
+
+static bool msm_bam_dmux_kickoff_ul_wakeup(void)
+{
+ bool is_connected;
+
+ read_lock(&a2_mux_ctx->ul_wakeup_lock);
+ is_connected = a2_mux_ctx->bam_is_connected;
+ read_unlock(&a2_mux_ctx->ul_wakeup_lock);
+ if (!is_connected)
+ queue_work(a2_mux_ctx->a2_mux_tx_workqueue,
+ &a2_mux_ctx->kickoff_ul_wakeup);
+ return is_connected;
+}
+
+static bool msm_bam_dmux_kickoff_ul_power_down(void)
+
+{
+ bool is_connected;
+
+ read_lock(&a2_mux_ctx->ul_wakeup_lock);
+ is_connected = a2_mux_ctx->bam_is_connected;
+ read_unlock(&a2_mux_ctx->ul_wakeup_lock);
+ if (is_connected)
+ queue_work(a2_mux_ctx->a2_mux_tx_workqueue,
+ &a2_mux_ctx->kickoff_ul_power_down);
+ return is_connected;
+}
+
+static void ipa_embedded_notify(void *priv,
+ enum ipa_dp_evt_type evt,
+ unsigned long data)
+{
+ switch (evt) {
+ case IPA_RECEIVE:
+ handle_bam_mux_cmd((struct sk_buff *)data);
+ break;
+ case IPA_WRITE_DONE:
+ bam_mux_write_done(false, (struct sk_buff *)data);
+ break;
+ default:
+ IPAERR("%s: Unknown event %d\n", __func__, evt);
+ break;
+ }
+}
+
+static void ipa_tethered_notify(void *priv,
+ enum ipa_dp_evt_type evt,
+ unsigned long data)
+{
+ IPADBG("%s: event = %d\n", __func__, evt);
+ switch (evt) {
+ case IPA_RECEIVE:
+ if (a2_mux_ctx->bam_ch[A2_MUX_TETHERED_0].notify_cb)
+ a2_mux_ctx->bam_ch[A2_MUX_TETHERED_0].notify_cb(
+ a2_mux_ctx->bam_ch[A2_MUX_TETHERED_0].user_data,
+ A2_MUX_RECEIVE,
+ data);
+ break;
+ case IPA_WRITE_DONE:
+ bam_mux_write_done(true, (struct sk_buff *)data);
+ break;
+ default:
+ IPAERR("%s: Unknown event %d\n", __func__, evt);
+ break;
+ }
+}
+
+static int connect_to_bam(void)
+{
+ int ret;
+ struct ipa_sys_connect_params connect_params;
+
+ IPAERR("%s:\n", __func__);
+ if (a2_mux_ctx->a2_mux_sw_bridge_is_connected) {
+ IPAERR("%s: SW bridge is already UP\n",
+ __func__);
+ return -EFAULT;
+ }
+ ret = sps_device_reset(a2_mux_ctx->a2_device_handle);
+ if (ret)
+ IPAERR("%s: device reset failed ret = %d\n",
+ __func__, ret);
+ memset(&connect_params, 0, sizeof(struct ipa_sys_connect_params));
+ connect_params.client = IPA_CLIENT_A2_TETHERED_CONS;
+ connect_params.notify = ipa_tethered_notify;
+ connect_params.desc_fifo_sz = 0x800;
+ ret = ipa_bridge_setup(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_TETHERED,
+ &connect_params,
+ &a2_mux_ctx->tethered_prod);
+ if (ret) {
+ IPAERR("%s: IPA bridge tethered UL failed to connect: %d\n",
+ __func__, ret);
+ goto bridge_tethered_ul_failed;
+ }
+ memset(&connect_params, 0, sizeof(struct ipa_sys_connect_params));
+ connect_params.ipa_ep_cfg.mode.mode = IPA_DMA;
+ connect_params.ipa_ep_cfg.mode.dst = IPA_CLIENT_USB_CONS;
+ connect_params.client = IPA_CLIENT_A2_TETHERED_PROD;
+ connect_params.notify = ipa_tethered_notify;
+ connect_params.desc_fifo_sz = 0x800;
+ ret = ipa_bridge_setup(IPA_BRIDGE_DIR_DL, IPA_BRIDGE_TYPE_TETHERED,
+ &connect_params,
+ &a2_mux_ctx->tethered_cons);
+ if (ret) {
+ IPAERR("%s: IPA bridge tethered DL failed to connect: %d\n",
+ __func__, ret);
+ goto bridge_tethered_dl_failed;
+ }
+ memset(&connect_params, 0, sizeof(struct ipa_sys_connect_params));
+ connect_params.client = IPA_CLIENT_A2_EMBEDDED_CONS;
+ connect_params.notify = ipa_embedded_notify;
+ connect_params.desc_fifo_sz = 0x800;
+ ret = ipa_bridge_setup(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_EMBEDDED,
+ &connect_params,
+ &a2_mux_ctx->embedded_prod);
+ if (ret) {
+ IPAERR("%s: IPA bridge embedded UL failed to connect: %d\n",
+ __func__, ret);
+ goto bridge_embedded_ul_failed;
+ }
+ memset(&connect_params, 0, sizeof(struct ipa_sys_connect_params));
+ connect_params.client = IPA_CLIENT_A2_EMBEDDED_PROD;
+ connect_params.notify = ipa_embedded_notify;
+ connect_params.desc_fifo_sz = 0x800;
+ ret = ipa_bridge_setup(IPA_BRIDGE_DIR_DL, IPA_BRIDGE_TYPE_EMBEDDED,
+ &connect_params,
+ &a2_mux_ctx->embedded_cons);
+ if (ret) {
+ IPAERR("%s: IPA bridge embedded DL failed to connect: %d\n",
+ __func__, ret);
+ goto bridge_embedded_dl_failed;
+ }
+ a2_mux_ctx->a2_mux_sw_bridge_is_connected = 1;
+ complete_all(&a2_mux_ctx->bam_connection_completion);
+ return 0;
+
+bridge_embedded_dl_failed:
+ ipa_bridge_teardown(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_EMBEDDED,
+ a2_mux_ctx->embedded_prod);
+bridge_embedded_ul_failed:
+ ipa_bridge_teardown(IPA_BRIDGE_DIR_DL, IPA_BRIDGE_TYPE_TETHERED,
+ a2_mux_ctx->tethered_cons);
+bridge_tethered_dl_failed:
+ ipa_bridge_teardown(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_TETHERED,
+ a2_mux_ctx->tethered_prod);
+bridge_tethered_ul_failed:
+ return ret;
+}
+
+static int disconnect_to_bam(void)
+{
+ int ret;
+
+ IPAERR("%s\n", __func__);
+ if (!a2_mux_ctx->a2_mux_sw_bridge_is_connected) {
+ IPAERR("%s: SW bridge is already DOWN\n",
+ __func__);
+ return -EFAULT;
+ }
+ ret = ipa_bridge_teardown(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_TETHERED,
+ a2_mux_ctx->tethered_prod);
+ if (ret) {
+ IPAERR("%s: IPA bridge tethered UL failed to disconnect: %d\n",
+ __func__, ret);
+ return ret;
+ }
+ ret = ipa_bridge_teardown(IPA_BRIDGE_DIR_DL, IPA_BRIDGE_TYPE_TETHERED,
+ a2_mux_ctx->tethered_cons);
+ if (ret) {
+ IPAERR("%s: IPA bridge tethered DL failed to disconnect: %d\n",
+ __func__, ret);
+ return ret;
+ }
+ ret = ipa_bridge_teardown(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_EMBEDDED,
+ a2_mux_ctx->embedded_prod);
+ if (ret) {
+ IPAERR("%s: IPA bridge embedded UL failed to disconnect: %d\n",
+ __func__, ret);
+ return ret;
+ }
+ ret = ipa_bridge_teardown(IPA_BRIDGE_DIR_DL, IPA_BRIDGE_TYPE_EMBEDDED,
+ a2_mux_ctx->embedded_cons);
+ if (ret) {
+ IPAERR("%s: IPA bridge embedded DL failed to disconnect: %d\n",
+ __func__, ret);
+ return ret;
+ }
+ ret = sps_device_reset(a2_mux_ctx->a2_device_handle);
+ if (ret) {
+ IPAERR("%s: device reset failed ret = %d\n",
+ __func__, ret);
+ return ret;
+ }
+ verify_tx_queue_is_empty(__func__);
+ (void) ipa_rm_release_resource(IPA_RM_RESOURCE_A2_PROD);
+ if (a2_mux_ctx->disconnect_ack)
+ toggle_apps_ack();
+ a2_mux_ctx->a2_mux_sw_bridge_is_connected = 0;
+ complete_all(&a2_mux_ctx->bam_connection_completion);
+ return 0;
+}
+
+static void bam_dmux_smsm_cb(void *priv,
+ u32 old_state,
+ u32 new_state)
+{
+ static int last_processed_state;
+
+ mutex_lock(&a2_mux_ctx->smsm_cb_lock);
+ IPADBG("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
+ new_state);
+ if (last_processed_state == (new_state & SMSM_A2_POWER_CONTROL)) {
+ IPADBG("%s: already processed this state\n", __func__);
+ mutex_unlock(&a2_mux_ctx->smsm_cb_lock);
+ return;
+ }
+ last_processed_state = new_state & SMSM_A2_POWER_CONTROL;
+ if (new_state & SMSM_A2_POWER_CONTROL) {
+ IPADBG("%s: MODEM PWR CTRL 1\n", __func__);
+ grab_wakelock();
+ (void) connect_to_bam();
+ queue_work(a2_mux_ctx->a2_mux_tx_workqueue,
+ &a2_mux_ctx->kickoff_ul_request_resource);
+ } else if (!(new_state & SMSM_A2_POWER_CONTROL)) {
+ IPADBG("%s: MODEM PWR CTRL 0\n", __func__);
+ (void) disconnect_to_bam();
+ release_wakelock();
+ } else {
+ IPAERR("%s: unsupported state change\n", __func__);
+ }
+ mutex_unlock(&a2_mux_ctx->smsm_cb_lock);
+}
+
+static void bam_dmux_smsm_ack_cb(void *priv, u32 old_state,
+ u32 new_state)
+{
+ IPADBG("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
+ new_state);
+ complete_all(&a2_mux_ctx->ul_wakeup_ack_completion);
+}
+
+static int a2_mux_pm_rm_request_resource(void)
+{
+ int result = 0;
+ bool is_connected;
+
+ is_connected = msm_bam_dmux_kickoff_ul_wakeup();
+ if (!is_connected)
+ result = -EINPROGRESS;
+ return result;
+}
+
+static int a2_mux_pm_rm_release_resource(void)
+{
+ int result = 0;
+ bool is_connected;
+
+ is_connected = msm_bam_dmux_kickoff_ul_power_down();
+ if (is_connected)
+ result = -EINPROGRESS;
+ return result;
+}
+
+static void a2_mux_pm_rm_notify_cb(void *user_data,
+ enum ipa_rm_event event,
+ unsigned long data)
+{
+ switch (event) {
+ case IPA_RM_RESOURCE_GRANTED:
+ IPADBG("%s: PROD GRANTED CB\n", __func__);
+ complete_all(&a2_mux_ctx->request_resource_completion);
+ break;
+ case IPA_RM_RESOURCE_RELEASED:
+ IPADBG("%s: PROD RELEASED CB\n", __func__);
+ break;
+ default:
+ return;
+ }
+}
+static int a2_mux_pm_initialize_rm(void)
+{
+ struct ipa_rm_create_params create_params;
+ int result;
+
+ memset(&create_params, 0, sizeof(create_params));
+ create_params.name = IPA_RM_RESOURCE_A2_PROD;
+ create_params.reg_params.notify_cb = &a2_mux_pm_rm_notify_cb;
+ result = ipa_rm_create_resource(&create_params);
+ if (result)
+ goto bail;
+ memset(&create_params, 0, sizeof(create_params));
+ create_params.name = IPA_RM_RESOURCE_A2_CONS;
+ create_params.release_resource = &a2_mux_pm_rm_release_resource;
+ create_params.request_resource = &a2_mux_pm_rm_request_resource;
+ result = ipa_rm_create_resource(&create_params);
+bail:
+ return result;
+}
+
+static void bam_mux_process_data(struct sk_buff *rx_skb)
+{
+ unsigned long flags;
+ struct bam_mux_hdr *rx_hdr;
+ unsigned long event_data;
+
+ rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
+ rx_skb->data = (unsigned char *)(rx_hdr + 1);
+ rx_skb->tail = rx_skb->data + rx_hdr->pkt_len;
+ rx_skb->len = rx_hdr->pkt_len;
+ rx_skb->truesize = rx_hdr->pkt_len + sizeof(struct sk_buff);
+ event_data = (unsigned long)(rx_skb);
+ spin_lock_irqsave(&a2_mux_ctx->bam_ch[rx_hdr->ch_id].lock, flags);
+ if (a2_mux_ctx->bam_ch[rx_hdr->ch_id].notify_cb)
+ a2_mux_ctx->bam_ch[rx_hdr->ch_id].notify_cb(
+ a2_mux_ctx->bam_ch[rx_hdr->ch_id].user_data,
+ A2_MUX_RECEIVE,
+ event_data);
+ else
+ dev_kfree_skb_any(rx_skb);
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[rx_hdr->ch_id].lock,
+ flags);
+}
+
+static void handle_bam_mux_cmd_open(struct bam_mux_hdr *rx_hdr)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&a2_mux_ctx->bam_ch[rx_hdr->ch_id].lock, flags);
+ a2_mux_ctx->bam_ch[rx_hdr->ch_id].status |= BAM_CH_REMOTE_OPEN;
+ a2_mux_ctx->bam_ch[rx_hdr->ch_id].num_tx_pkts = 0;
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[rx_hdr->ch_id].lock,
+ flags);
+}
+
+static void handle_bam_mux_cmd(struct sk_buff *rx_skb)
+{
+ unsigned long flags;
+ struct bam_mux_hdr *rx_hdr;
+
+ rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
+ IPADBG("%s: magic %x reserved %d cmd %d pad %d ch %d len %d\n",
+ __func__,
+ rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
+ rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
+ rx_hdr->magic_num = ntohs(rx_hdr->magic_num);
+ rx_hdr->pkt_len = ntohs(rx_hdr->pkt_len);
+ IPADBG("%s: converted to host order magic_num=%d, pkt_len=%d\n",
+ __func__, rx_hdr->magic_num, rx_hdr->pkt_len);
+ if (rx_hdr->magic_num != BAM_MUX_HDR_MAGIC_NO) {
+ IPAERR("bad hdr magic %x rvd %d cmd %d pad %d ch %d len %d\n",
+ rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
+ rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
+ dev_kfree_skb_any(rx_skb);
+ return;
+ }
+ if (rx_hdr->ch_id >= A2_MUX_NUM_CHANNELS) {
+ IPAERR("bad LCID %d rsvd %d cmd %d pad %d ch %d len %d\n",
+ rx_hdr->ch_id, rx_hdr->reserved, rx_hdr->cmd,
+ rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
+ dev_kfree_skb_any(rx_skb);
+ return;
+ }
+ switch (rx_hdr->cmd) {
+ case BAM_MUX_HDR_CMD_DATA:
+ bam_mux_process_data(rx_skb);
+ break;
+ case BAM_MUX_HDR_CMD_OPEN:
+ IPADBG("%s: opening cid %d PC enabled\n", __func__,
+ rx_hdr->ch_id);
+ handle_bam_mux_cmd_open(rx_hdr);
+ if (!(rx_hdr->reserved & ENABLE_DISCONNECT_ACK)) {
+ IPADBG("%s: deactivating disconnect ack\n",
+ __func__);
+ a2_mux_ctx->disconnect_ack = 0;
+ }
+ dev_kfree_skb_any(rx_skb);
+ if (a2_mux_ctx->a2_mux_send_power_vote_on_init_once) {
+ kickoff_ul_wakeup_func(NULL);
+ a2_mux_ctx->a2_mux_send_power_vote_on_init_once = 0;
+ }
+ break;
+ case BAM_MUX_HDR_CMD_OPEN_NO_A2_PC:
+ IPADBG("%s: opening cid %d PC disabled\n", __func__,
+ rx_hdr->ch_id);
+ if (!a2_mux_ctx->a2_pc_disabled) {
+ a2_mux_ctx->a2_pc_disabled = 1;
+ ul_wakeup();
+ }
+ handle_bam_mux_cmd_open(rx_hdr);
+ dev_kfree_skb_any(rx_skb);
+ break;
+ case BAM_MUX_HDR_CMD_CLOSE:
+ /* probably should drop pending write */
+ IPADBG("%s: closing cid %d\n", __func__,
+ rx_hdr->ch_id);
+ spin_lock_irqsave(&a2_mux_ctx->bam_ch[rx_hdr->ch_id].lock,
+ flags);
+ a2_mux_ctx->bam_ch[rx_hdr->ch_id].status &=
+ ~BAM_CH_REMOTE_OPEN;
+ spin_unlock_irqrestore(
+ &a2_mux_ctx->bam_ch[rx_hdr->ch_id].lock, flags);
+ dev_kfree_skb_any(rx_skb);
+ break;
+ default:
+ IPAERR("bad hdr.magic %x rvd %d cmd %d pad %d ch %d len %d\n",
+ rx_hdr->magic_num, rx_hdr->reserved,
+ rx_hdr->cmd, rx_hdr->pad_len, rx_hdr->ch_id,
+ rx_hdr->pkt_len);
+ dev_kfree_skb_any(rx_skb);
+ return;
+ }
+}
+
+static int bam_mux_write_cmd(void *data, u32 len)
+{
+ int rc;
+ struct tx_pkt_info *pkt;
+ unsigned long flags;
+
+ pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
+ if (pkt == NULL) {
+ IPAERR("%s: mem alloc for tx_pkt_info failed\n", __func__);
+ return -ENOMEM;
+ }
+ pkt->skb = __dev_alloc_skb(len, GFP_NOWAIT | __GFP_NOWARN);
+ if (pkt->skb == NULL) {
+ IPAERR("%s: unable to alloc skb\n\n", __func__);
+ kfree(pkt);
+ return -ENOMEM;
+ }
+ memcpy(skb_put(pkt->skb, len), data, len);
+ kfree(data);
+ pkt->len = len;
+ pkt->is_cmd = 1;
+ set_tx_timestamp(pkt);
+ spin_lock_irqsave(&a2_mux_ctx->bam_tx_pool_spinlock, flags);
+ list_add_tail(&pkt->list_node, &a2_mux_ctx->bam_tx_pool);
+ rc = ipa_tx_dp(IPA_CLIENT_A2_EMBEDDED_CONS, pkt->skb, NULL);
+ if (rc) {
+ IPAERR("%s ipa_tx_dp failed rc=%d\n",
+ __func__, rc);
+ list_del(&pkt->list_node);
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock,
+ flags);
+ dev_kfree_skb_any(pkt->skb);
+ kfree(pkt);
+ } else {
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock,
+ flags);
+ }
+ return rc;
+}
/**
- * a2_mux_initialize() - initialize A2 MUX module
+ * a2_mux_get_tethered_client_handles() - provide the tethred
+ * pipe handles for post setup configuration
+ * @lcid: logical channel ID
+ * @clnt_cons_handle: [out] consumer pipe handle
+ * @clnt_prod_handle: [out] producer pipe handle
*
- * Return codes:
- * 0: success
+ * Returns: 0 on success, negative on failure
*/
-int a2_mux_initialize(void)
+int a2_mux_get_tethered_client_handles(enum a2_mux_logical_channel_id lcid,
+ unsigned int *clnt_cons_handle,
+ unsigned int *clnt_prod_handle)
{
- (void) msm_bam_dmux_ul_power_vote();
-
+ if (!a2_mux_ctx->a2_mux_initialized || lcid != A2_MUX_TETHERED_0)
+ return -ENODEV;
+ if (!clnt_cons_handle || !clnt_prod_handle)
+ return -EINVAL;
+ *clnt_prod_handle = a2_mux_ctx->tethered_prod;
+ *clnt_cons_handle = a2_mux_ctx->tethered_cons;
return 0;
}
/**
- * a2_mux_close() - close A2 MUX module
+ * a2_mux_write() - send the packet to A2,
+ * add MUX header acc to lcid provided
+ * @id: logical channel ID
+ * @skb: SKB to write
*
- * Return codes:
- * 0: success
- * -EINVAL: invalid parameters
+ * Returns: 0 on success, negative on failure
*/
-int a2_mux_close(void)
+int a2_mux_write(enum a2_mux_logical_channel_id id, struct sk_buff *skb)
{
- int ret = 0;
+ int rc = 0;
+ struct bam_mux_hdr *hdr;
+ unsigned long flags;
+ struct sk_buff *new_skb = NULL;
+ struct tx_pkt_info *pkt;
+ bool is_connected;
- (void) msm_bam_dmux_ul_power_unvote();
+ if (id >= A2_MUX_NUM_CHANNELS)
+ return -EINVAL;
+ if (!skb)
+ return -EINVAL;
+ if (!a2_mux_ctx->a2_mux_initialized)
+ return -ENODEV;
+ spin_lock_irqsave(&a2_mux_ctx->bam_ch[id].lock, flags);
+ if (!bam_ch_is_open(id)) {
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[id].lock, flags);
+ IPAERR("%s: port not open: %d\n",
+ __func__,
+ a2_mux_ctx->bam_ch[id].status);
+ return -ENODEV;
+ }
+ if (a2_mux_ctx->bam_ch[id].use_wm &&
+ (a2_mux_ctx->bam_ch[id].num_tx_pkts >= HIGH_WATERMARK)) {
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[id].lock, flags);
+ IPAERR("%s: watermark exceeded: %d\n", __func__, id);
+ return -EAGAIN;
+ }
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[id].lock, flags);
+ read_lock(&a2_mux_ctx->ul_wakeup_lock);
+ is_connected = a2_mux_ctx->bam_is_connected;
+ read_unlock(&a2_mux_ctx->ul_wakeup_lock);
+ if (!is_connected)
+ return -ENODEV;
+ if (id != A2_MUX_TETHERED_0) {
+ /*
+ * if skb do not have any tailroom for padding
+ * copy the skb into a new expanded skb
+ */
+ if ((skb->len & 0x3) &&
+ (skb_tailroom(skb) < A2_MUX_PADDING_LENGTH(skb->len))) {
+ new_skb = skb_copy_expand(skb, skb_headroom(skb),
+ A2_MUX_PADDING_LENGTH(skb->len),
+ GFP_ATOMIC);
+ if (new_skb == NULL) {
+ IPAERR("%s: cannot allocate skb\n", __func__);
+ rc = -ENOMEM;
+ goto write_fail;
+ }
+ dev_kfree_skb_any(skb);
+ skb = new_skb;
+ }
+ hdr = (struct bam_mux_hdr *)skb_push(
+ skb, sizeof(struct bam_mux_hdr));
+ /*
+ * caller should allocate for hdr and padding
+ * hdr is fine, padding is tricky
+ */
+ hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
+ hdr->cmd = BAM_MUX_HDR_CMD_DATA;
+ hdr->reserved = 0;
+ hdr->ch_id = id;
+ hdr->pkt_len = skb->len - sizeof(struct bam_mux_hdr);
+ if (skb->len & 0x3)
+ skb_put(skb, A2_MUX_PADDING_LENGTH(skb->len));
+ hdr->pad_len = skb->len - (sizeof(struct bam_mux_hdr) +
+ hdr->pkt_len);
+ IPADBG("data %p, tail %p skb len %d pkt len %d pad len %d\n",
+ skb->data, skb->tail, skb->len,
+ hdr->pkt_len, hdr->pad_len);
+ hdr->magic_num = htons(hdr->magic_num);
+ hdr->pkt_len = htons(hdr->pkt_len);
+ IPADBG("convert to network order magic_num=%d, pkt_len=%d\n",
+ hdr->magic_num, hdr->pkt_len);
+ }
+ pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
+ if (pkt == NULL) {
+ IPAERR("%s: mem alloc for tx_pkt_info failed\n", __func__);
+ rc = -ENOMEM;
+ goto write_fail2;
+ }
+ pkt->skb = skb;
+ pkt->is_cmd = 0;
+ set_tx_timestamp(pkt);
+ spin_lock_irqsave(&a2_mux_ctx->bam_tx_pool_spinlock, flags);
+ list_add_tail(&pkt->list_node, &a2_mux_ctx->bam_tx_pool);
+ if (id == A2_MUX_TETHERED_0)
+ rc = ipa_tx_dp(IPA_CLIENT_A2_TETHERED_CONS, skb, NULL);
+ else
+ rc = ipa_tx_dp(IPA_CLIENT_A2_EMBEDDED_CONS, skb, NULL);
+ if (rc) {
+ IPAERR("%s ipa_tx_dp failed rc=%d\n",
+ __func__, rc);
+ list_del(&pkt->list_node);
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock,
+ flags);
+ goto write_fail3;
+ } else {
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock,
+ flags);
+ spin_lock_irqsave(&a2_mux_ctx->bam_ch[id].lock, flags);
+ a2_mux_ctx->bam_ch[id].num_tx_pkts++;
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[id].lock, flags);
+ }
+ return 0;
- ret = ipa_disconnect(a2_service_cb.consumer_handle);
- if (0 != ret) {
- pr_err("%s: ipa_disconnect failure\n", __func__);
- goto bail;
+write_fail3:
+ kfree(pkt);
+write_fail2:
+ if (new_skb)
+ dev_kfree_skb_any(new_skb);
+write_fail:
+ return rc;
+}
+
+/**
+ * a2_mux_open_channel() - opens logical channel
+ * to A2
+ * @lcid: logical channel ID
+ * @user_data: user provided data for below CB
+ * @notify_cb: user provided notification CB
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int a2_mux_open_channel(enum a2_mux_logical_channel_id lcid,
+ void *user_data,
+ a2_mux_notify_cb notify_cb)
+{
+ struct bam_mux_hdr *hdr;
+ unsigned long flags;
+ int rc = 0;
+ bool is_connected;
+
+ IPADBG("%s: opening ch %d\n", __func__, lcid);
+ if (!a2_mux_ctx->a2_mux_initialized) {
+ IPAERR("%s: not inititialized\n", __func__);
+ return -ENODEV;
+ }
+ if (lcid >= A2_MUX_NUM_CHANNELS || lcid < 0) {
+ IPAERR("%s: invalid channel id %d\n", __func__, lcid);
+ return -EINVAL;
+ }
+ if (notify_cb == NULL) {
+ IPAERR("%s: notify function is NULL\n", __func__);
+ return -EINVAL;
+ }
+ spin_lock_irqsave(&a2_mux_ctx->bam_ch[lcid].lock, flags);
+ if (bam_ch_is_open(lcid)) {
+ IPAERR("%s: Already opened %d\n", __func__, lcid);
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
+ goto open_done;
+ }
+ if (!bam_ch_is_remote_open(lcid)) {
+ IPAERR("%s: Remote not open; ch: %d\n", __func__, lcid);
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
+ return -ENODEV;
+ }
+ a2_mux_ctx->bam_ch[lcid].notify_cb = notify_cb;
+ a2_mux_ctx->bam_ch[lcid].user_data = user_data;
+ a2_mux_ctx->bam_ch[lcid].status |= BAM_CH_LOCAL_OPEN;
+ a2_mux_ctx->bam_ch[lcid].num_tx_pkts = 0;
+ a2_mux_ctx->bam_ch[lcid].use_wm = 0;
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
+ read_lock(&a2_mux_ctx->ul_wakeup_lock);
+ is_connected = a2_mux_ctx->bam_is_connected;
+ read_unlock(&a2_mux_ctx->ul_wakeup_lock);
+ if (!is_connected)
+ return -ENODEV;
+ if (lcid != A2_MUX_TETHERED_0) {
+ hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_KERNEL);
+ if (hdr == NULL) {
+ IPAERR("%s: hdr kmalloc failed. ch: %d\n",
+ __func__, lcid);
+ return -ENOMEM;
+ }
+ hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
+ if (a2_mux_ctx->a2_mux_apps_pc_enabled) {
+ hdr->cmd = BAM_MUX_HDR_CMD_OPEN;
+ } else {
+ IPAERR("%s: PC DISABLED BY A5 SW BY INTENTION\n",
+ __func__);
+ a2_mux_ctx->a2_pc_disabled = 1;
+ hdr->cmd = BAM_MUX_HDR_CMD_OPEN_NO_A2_PC;
+ }
+ hdr->reserved = 0;
+ hdr->ch_id = lcid;
+ hdr->pkt_len = 0;
+ hdr->pad_len = 0;
+ hdr->magic_num = htons(hdr->magic_num);
+ hdr->pkt_len = htons(hdr->pkt_len);
+ IPADBG("convert to network order magic_num=%d, pkt_len=%d\n",
+ hdr->magic_num, hdr->pkt_len);
+ rc = bam_mux_write_cmd((void *)hdr,
+ sizeof(struct bam_mux_hdr));
+ if (rc) {
+ IPAERR("%s: bam_mux_write_cmd failed %d; ch: %d\n",
+ __func__, rc, lcid);
+ kfree(hdr);
+ return rc;
+ }
}
- ret = ipa_disconnect(a2_service_cb.producer_handle);
- if (0 != ret) {
- pr_err("%s: ipa_disconnect failure\n", __func__);
- goto bail;
+open_done:
+ IPADBG("%s: opened ch %d\n", __func__, lcid);
+ return rc;
+}
+
+/**
+ * a2_mux_close_channel() - closes logical channel
+ * to A2
+ * @lcid: logical channel ID
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int a2_mux_close_channel(enum a2_mux_logical_channel_id lcid)
+{
+ struct bam_mux_hdr *hdr;
+ unsigned long flags;
+ int rc = 0;
+ bool is_connected;
+
+ if (lcid >= A2_MUX_NUM_CHANNELS || lcid < 0)
+ return -EINVAL;
+ IPADBG("%s: closing ch %d\n", __func__, lcid);
+ if (!a2_mux_ctx->a2_mux_initialized)
+ return -ENODEV;
+ read_lock(&a2_mux_ctx->ul_wakeup_lock);
+ is_connected = a2_mux_ctx->bam_is_connected;
+ read_unlock(&a2_mux_ctx->ul_wakeup_lock);
+ if (!is_connected && !bam_ch_is_in_reset(lcid))
+ return -ENODEV;
+ spin_lock_irqsave(&a2_mux_ctx->bam_ch[lcid].lock, flags);
+ a2_mux_ctx->bam_ch[lcid].notify_cb = NULL;
+ a2_mux_ctx->bam_ch[lcid].user_data = NULL;
+ a2_mux_ctx->bam_ch[lcid].status &= ~BAM_CH_LOCAL_OPEN;
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
+ if (bam_ch_is_in_reset(lcid)) {
+ a2_mux_ctx->bam_ch[lcid].status &= ~BAM_CH_IN_RESET;
+ return 0;
}
+ if (lcid != A2_MUX_TETHERED_0) {
+ hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_ATOMIC);
+ if (hdr == NULL) {
+ IPAERR("%s: hdr kmalloc failed. ch: %d\n",
+ __func__, lcid);
+ return -ENOMEM;
+ }
+ hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
+ hdr->cmd = BAM_MUX_HDR_CMD_CLOSE;
+ hdr->reserved = 0;
+ hdr->ch_id = lcid;
+ hdr->pkt_len = 0;
+ hdr->pad_len = 0;
+ hdr->magic_num = htons(hdr->magic_num);
+ hdr->pkt_len = htons(hdr->pkt_len);
+ IPADBG("convert to network order magic_num=%d, pkt_len=%d\n",
+ hdr->magic_num, hdr->pkt_len);
+ rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
+ if (rc) {
+ IPAERR("%s: bam_mux_write_cmd failed %d; ch: %d\n",
+ __func__, rc, lcid);
+ kfree(hdr);
+ return rc;
+ }
+ }
+ IPADBG("%s: closed ch %d\n", __func__, lcid);
+ return 0;
+}
- ret = 0;
+/**
+ * a2_mux_is_ch_full() - checks if channel is above predefined WM,
+ * used for flow control implementation
+ * @lcid: logical channel ID
+ *
+ * Returns: true if the channel is above predefined WM,
+ * false otherwise
+ */
+int a2_mux_is_ch_full(enum a2_mux_logical_channel_id lcid)
+{
+ unsigned long flags;
+ int ret;
-bail:
-
+ if (lcid >= A2_MUX_NUM_CHANNELS ||
+ lcid < 0)
+ return -EINVAL;
+ if (!a2_mux_ctx->a2_mux_initialized)
+ return -ENODEV;
+ spin_lock_irqsave(&a2_mux_ctx->bam_ch[lcid].lock, flags);
+ a2_mux_ctx->bam_ch[lcid].use_wm = 1;
+ ret = a2_mux_ctx->bam_ch[lcid].num_tx_pkts >= HIGH_WATERMARK;
+ IPADBG("%s: ch %d num tx pkts=%d, HWM=%d\n", __func__,
+ lcid, a2_mux_ctx->bam_ch[lcid].num_tx_pkts, ret);
+ if (!bam_ch_is_local_open(lcid)) {
+ ret = -ENODEV;
+ IPAERR("%s: port not open: %d\n", __func__,
+ a2_mux_ctx->bam_ch[lcid].status);
+ }
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
return ret;
}
/**
- * a2_mux_open_port() - open connection to A2
- * @wwan_logical_channel_id: WWAN logical channel ID
- * @rx_cb: Rx callback
- * @tx_complete_cb: Tx completed callback
+ * a2_mux_is_ch_low() - checks if channel is below predefined WM,
+ * used for flow control implementation
+ * @lcid: logical channel ID
*
- * Return codes:
- * 0: success
- * -EINVAL: invalid parameters
+ * Returns: true if the channel is below predefined WM,
+ * false otherwise
*/
-int a2_mux_open_port(int wwan_logical_channel_id, void *rx_cb,
- void *tx_complete_cb)
+int a2_mux_is_ch_low(enum a2_mux_logical_channel_id lcid)
{
- int ret = 0;
- u8 src_pipe = 0;
- u8 dst_pipe = 0;
- struct sps_pipe *a2_to_ipa_pipe = NULL;
- struct sps_pipe *ipa_to_a2_pipe = NULL;
-
- (void) wwan_logical_channel_id;
-
- a2_service_cb.rx_cb = rx_cb;
- a2_service_cb.tx_complete_cb = tx_complete_cb;
-
- ret = connect_pipe_ipa(A2_TO_IPA,
- &src_pipe,
- &(a2_service_cb.consumer_handle),
- a2_to_ipa_pipe);
- if (ret) {
- pr_err("%s: A2 to IPA pipe connection failure\n", __func__);
- goto bail;
- }
-
- ret = connect_pipe_ipa(IPA_TO_A2,
- &dst_pipe,
- &(a2_service_cb.producer_handle),
- ipa_to_a2_pipe);
- if (ret) {
- pr_err("%s: IPA to A2 pipe connection failure\n", __func__);
- sps_disconnect(a2_to_ipa_pipe);
- sps_free_endpoint(a2_to_ipa_pipe);
- (void) ipa_disconnect(a2_service_cb.consumer_handle);
- goto bail;
- }
-
- ret = 0;
-
-bail:
-
- return ret;
-}
-
-static int connect_pipe_ipa(enum a2_mux_pipe_direction pipe_dir,
- u8 *usb_pipe_idx,
- u32 *clnt_hdl,
- struct sps_pipe *pipe)
-{
+ unsigned long flags;
int ret;
- struct sps_connect connection = {0, };
- u32 a2_handle = 0;
- u32 a2_phy_addr = 0;
- struct a2_mux_pipe_connection pipe_connection = { 0, };
- struct ipa_connect_params ipa_in_params;
- struct ipa_sps_params sps_out_params;
- memset(&ipa_in_params, 0, sizeof(ipa_in_params));
- memset(&sps_out_params, 0, sizeof(sps_out_params));
-
- if (!usb_pipe_idx || !clnt_hdl) {
- pr_err("connect_pipe_ipa :: null arguments\n");
- ret = -EINVAL;
- goto bail;
+ if (lcid >= A2_MUX_NUM_CHANNELS ||
+ lcid < 0)
+ return -EINVAL;
+ if (!a2_mux_ctx->a2_mux_initialized)
+ return -ENODEV;
+ spin_lock_irqsave(&a2_mux_ctx->bam_ch[lcid].lock, flags);
+ a2_mux_ctx->bam_ch[lcid].use_wm = 1;
+ ret = a2_mux_ctx->bam_ch[lcid].num_tx_pkts <= LOW_WATERMARK;
+ IPADBG("%s: ch %d num tx pkts=%d, LWM=%d\n", __func__,
+ lcid, a2_mux_ctx->bam_ch[lcid].num_tx_pkts, ret);
+ if (!bam_ch_is_local_open(lcid)) {
+ ret = -ENODEV;
+ IPAERR("%s: port not open: %d\n", __func__,
+ a2_mux_ctx->bam_ch[lcid].status);
}
-
- ret = ipa_get_a2_mux_pipe_info(pipe_dir, &pipe_connection);
- if (ret) {
- pr_err("ipa_get_a2_mux_pipe_info failed\n");
- goto bail;
- }
-
- if (pipe_dir == A2_TO_IPA) {
- a2_phy_addr = pipe_connection.src_phy_addr;
- ipa_in_params.client = IPA_CLIENT_A2_TETHERED_PROD;
- ipa_in_params.ipa_ep_cfg.mode.mode = IPA_DMA;
- ipa_in_params.ipa_ep_cfg.mode.dst = IPA_CLIENT_USB_CONS;
- pr_err("-*&- pipe_connection->src_pipe_index = %d\n",
- pipe_connection.src_pipe_index);
- ipa_in_params.client_ep_idx = pipe_connection.src_pipe_index;
- } else {
- a2_phy_addr = pipe_connection.dst_phy_addr;
- ipa_in_params.client = IPA_CLIENT_A2_TETHERED_CONS;
- ipa_in_params.client_ep_idx = pipe_connection.dst_pipe_index;
- }
-
- ret = sps_phy2h(a2_phy_addr, &a2_handle);
- if (ret) {
- pr_err("%s: sps_phy2h failed (A2 BAM) %d\n", __func__, ret);
- goto bail;
- }
-
- ipa_in_params.client_bam_hdl = a2_handle;
- ipa_in_params.desc_fifo_sz = pipe_connection.desc_fifo_size;
- ipa_in_params.data_fifo_sz = pipe_connection.data_fifo_size;
-
- if (pipe_connection.mem_type == IPA_SPS_PIPE_MEM) {
- pr_debug("%s: A2 BAM using SPS pipe memory\n", __func__);
- ret = sps_setup_bam2bam_fifo(&data_mem_buf[pipe_dir],
- pipe_connection.data_fifo_base_offset,
- pipe_connection.data_fifo_size, 1);
- if (ret) {
- pr_err("%s: data fifo setup failure %d\n",
- __func__, ret);
- goto bail;
- }
-
- ret = sps_setup_bam2bam_fifo(&desc_mem_buf[pipe_dir],
- pipe_connection.desc_fifo_base_offset,
- pipe_connection.desc_fifo_size, 1);
- if (ret) {
- pr_err("%s: desc. fifo setup failure %d\n",
- __func__, ret);
- goto bail;
- }
-
- ipa_in_params.data = data_mem_buf[pipe_dir];
- ipa_in_params.desc = desc_mem_buf[pipe_dir];
- }
-
- ret = a2_ipa_connect_pipe(&ipa_in_params,
- &sps_out_params,
- clnt_hdl);
- if (ret) {
- pr_err("-**- USB-IPA info: ipa_connect failed\n");
- pr_err("%s: usb_ipa_connect_pipe failed\n", __func__);
- goto bail;
- }
-
- pipe = sps_alloc_endpoint();
- if (pipe == NULL) {
- pr_err("%s: sps_alloc_endpoint failed\n", __func__);
- ret = -ENOMEM;
- goto a2_ipa_connect_pipe_failed;
- }
-
- ret = sps_get_config(pipe, &connection);
- if (ret) {
- pr_err("%s: tx get config failed %d\n", __func__, ret);
- goto get_config_failed;
- }
-
- if (pipe_dir == A2_TO_IPA) {
- connection.mode = SPS_MODE_SRC;
- *usb_pipe_idx = connection.src_pipe_index;
- connection.source = a2_handle;
- connection.destination = sps_out_params.ipa_bam_hdl;
- connection.src_pipe_index = pipe_connection.src_pipe_index;
- connection.dest_pipe_index = sps_out_params.ipa_ep_idx;
- } else {
- connection.mode = SPS_MODE_DEST;
- *usb_pipe_idx = connection.dest_pipe_index;
- connection.source = sps_out_params.ipa_bam_hdl;
- connection.destination = a2_handle;
- connection.src_pipe_index = sps_out_params.ipa_ep_idx;
- connection.dest_pipe_index = pipe_connection.dst_pipe_index;
- }
-
- connection.event_thresh = 16;
- connection.data = sps_out_params.data;
- connection.desc = sps_out_params.desc;
-
- ret = sps_connect(pipe, &connection);
- if (ret < 0) {
- pr_err("%s: tx connect error %d\n", __func__, ret);
- goto error;
- }
-
- ret = 0;
- goto bail;
-error:
- sps_disconnect(pipe);
-get_config_failed:
- sps_free_endpoint(pipe);
-a2_ipa_connect_pipe_failed:
- (void) ipa_disconnect(*clnt_hdl);
-bail:
+ spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
return ret;
}
-static int a2_ipa_connect_pipe(struct ipa_connect_params *in_params,
- struct ipa_sps_params *out_params, u32 *clnt_hdl)
+static int a2_mux_initialize_context(int handle)
{
- return ipa_connect(in_params, out_params, clnt_hdl);
+ int i;
+
+ a2_mux_ctx->a2_mux_apps_pc_enabled = 1;
+ a2_mux_ctx->a2_device_handle = handle;
+ INIT_WORK(&a2_mux_ctx->kickoff_ul_wakeup, kickoff_ul_wakeup_func);
+ INIT_WORK(&a2_mux_ctx->kickoff_ul_power_down,
+ kickoff_ul_power_down_func);
+ INIT_WORK(&a2_mux_ctx->kickoff_ul_request_resource,
+ kickoff_ul_request_resource_func);
+ INIT_LIST_HEAD(&a2_mux_ctx->bam_tx_pool);
+ spin_lock_init(&a2_mux_ctx->bam_tx_pool_spinlock);
+ mutex_init(&a2_mux_ctx->wakeup_lock);
+ rwlock_init(&a2_mux_ctx->ul_wakeup_lock);
+ spin_lock_init(&a2_mux_ctx->wakelock_reference_lock);
+ a2_mux_ctx->disconnect_ack = 1;
+ mutex_init(&a2_mux_ctx->smsm_cb_lock);
+ for (i = 0; i < A2_MUX_NUM_CHANNELS; ++i)
+ spin_lock_init(&a2_mux_ctx->bam_ch[i].lock);
+ init_completion(&a2_mux_ctx->ul_wakeup_ack_completion);
+ init_completion(&a2_mux_ctx->bam_connection_completion);
+ init_completion(&a2_mux_ctx->request_resource_completion);
+ wake_lock_init(&a2_mux_ctx->bam_wakelock,
+ WAKE_LOCK_SUSPEND, "a2_mux_wakelock");
+ a2_mux_ctx->a2_mux_initialized = 1;
+ a2_mux_ctx->a2_mux_send_power_vote_on_init_once = 1;
+ a2_mux_ctx->a2_mux_tx_workqueue =
+ create_singlethread_workqueue("a2_mux_tx");
+ if (!a2_mux_ctx->a2_mux_tx_workqueue) {
+ IPAERR("%s: a2_mux_tx_workqueue alloc failed\n",
+ __func__);
+ return -ENOMEM;
+ }
+ return 0;
}
+/**
+ * a2_mux_init() - initialize A2 MUX component
+ *
+ * Returns: 0 on success, negative otherwise
+ */
+int a2_mux_init(void)
+{
+ int rc;
+ u32 h;
+ void *a2_virt_addr;
+ u32 a2_bam_mem_base;
+ u32 a2_bam_mem_size;
+ u32 a2_bam_irq;
+ struct sps_bam_props a2_props;
+
+
+ IPADBG("%s A2 MUX\n", __func__);
+ rc = ipa_get_a2_mux_bam_info(&a2_bam_mem_base,
+ &a2_bam_mem_size,
+ &a2_bam_irq);
+ if (rc) {
+ IPAERR("%s: ipa_get_a2_mux_bam_info failed\n", __func__);
+ rc = -EFAULT;
+ goto bail;
+ }
+ a2_virt_addr = ioremap_nocache((unsigned long)(a2_bam_mem_base),
+ a2_bam_mem_size);
+ if (!a2_virt_addr) {
+ IPAERR("%s: ioremap failed\n", __func__);
+ rc = -ENOMEM;
+ goto bail;
+ }
+ memset(&a2_props, 0, sizeof(a2_props));
+ a2_props.phys_addr = a2_bam_mem_base;
+ a2_props.virt_addr = a2_virt_addr;
+ a2_props.virt_size = a2_bam_mem_size;
+ a2_props.irq = a2_bam_irq;
+ a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP;
+ a2_props.num_pipes = A2_NUM_PIPES;
+ a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
+ /* need to free on tear down */
+ rc = sps_register_bam_device(&a2_props, &h);
+ if (rc < 0) {
+ IPAERR("%s: register bam error %d\n", __func__, rc);
+ goto register_bam_failed;
+ }
+ a2_mux_ctx = kzalloc(sizeof(*a2_mux_ctx), GFP_KERNEL);
+ if (!a2_mux_ctx) {
+ IPAERR("%s: a2_mux_ctx alloc failed, rc: %d\n", __func__, rc);
+ rc = -ENOMEM;
+ goto register_bam_failed;
+ }
+ rc = a2_mux_initialize_context(h);
+ if (rc) {
+ IPAERR("%s: a2_mux_initialize_context failed, rc: %d\n",
+ __func__, rc);
+ goto ctx_alloc_failed;
+ }
+ rc = a2_mux_pm_initialize_rm();
+ if (rc) {
+ IPAERR("%s: a2_mux_pm_initialize_rm failed, rc: %d\n",
+ __func__, rc);
+ goto ctx_alloc_failed;
+ }
+ rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL,
+ bam_dmux_smsm_cb, NULL);
+ if (rc) {
+ IPAERR("%s: smsm cb register failed, rc: %d\n", __func__, rc);
+ rc = -ENOMEM;
+ goto ctx_alloc_failed;
+ }
+ rc = smsm_state_cb_register(SMSM_MODEM_STATE,
+ SMSM_A2_POWER_CONTROL_ACK,
+ bam_dmux_smsm_ack_cb, NULL);
+ if (rc) {
+ IPAERR("%s: smsm ack cb register failed, rc: %d\n",
+ __func__, rc);
+ rc = -ENOMEM;
+ goto smsm_ack_cb_reg_failed;
+ }
+ if (smsm_get_state(SMSM_MODEM_STATE) & SMSM_A2_POWER_CONTROL)
+ bam_dmux_smsm_cb(NULL, 0, smsm_get_state(SMSM_MODEM_STATE));
+ rc = 0;
+ goto bail;
+
+smsm_ack_cb_reg_failed:
+ smsm_state_cb_deregister(SMSM_MODEM_STATE,
+ SMSM_A2_POWER_CONTROL,
+ bam_dmux_smsm_cb, NULL);
+ctx_alloc_failed:
+ kfree(a2_mux_ctx);
+register_bam_failed:
+ iounmap(a2_virt_addr);
+bail:
+ return rc;
+}
+
+/**
+ * a2_mux_exit() - destroy A2 MUX component
+ *
+ * Returns: 0 on success, negative otherwise
+ */
+int a2_mux_exit(void)
+{
+ smsm_state_cb_deregister(SMSM_MODEM_STATE,
+ SMSM_A2_POWER_CONTROL_ACK,
+ bam_dmux_smsm_ack_cb,
+ NULL);
+ smsm_state_cb_deregister(SMSM_MODEM_STATE,
+ SMSM_A2_POWER_CONTROL,
+ bam_dmux_smsm_cb,
+ NULL);
+ if (a2_mux_ctx->a2_mux_tx_workqueue)
+ destroy_workqueue(a2_mux_ctx->a2_mux_tx_workqueue);
+ return 0;
+}
diff --git a/drivers/platform/msm/ipa/ipa.c b/drivers/platform/msm/ipa/ipa.c
index 7690b21..b07c653 100644
--- a/drivers/platform/msm/ipa/ipa.c
+++ b/drivers/platform/msm/ipa/ipa.c
@@ -26,6 +26,7 @@
#include <mach/msm_bus.h>
#include <mach/msm_bus_board.h>
#include "ipa_i.h"
+#include "ipa_rm_i.h"
#define IPA_SUMMING_THRESHOLD (0x10)
#define IPA_PIPE_MEM_START_OFST (0x0)
@@ -1068,6 +1069,33 @@
return 0;
}
+/**
+* ipa_get_a2_mux_bam_info() - Exposes A2 parameters fetched from
+* DTS
+*
+* @a2_bam_mem_base: A2 BAM Memory base
+* @a2_bam_mem_size: A2 BAM Memory size
+* @a2_bam_irq: A2 BAM IRQ
+*
+* Return codes:
+* 0: success
+* -EFAULT: invalid parameters
+*/
+int ipa_get_a2_mux_bam_info(u32 *a2_bam_mem_base, u32 *a2_bam_mem_size,
+ u32 *a2_bam_irq)
+{
+ if (!a2_bam_mem_base || !a2_bam_mem_size || !a2_bam_irq) {
+ IPAERR("ipa_get_a2_mux_bam_info null args\n");
+ return -EFAULT;
+ }
+
+ *a2_bam_mem_base = ipa_res.a2_bam_mem_base;
+ *a2_bam_mem_size = ipa_res.a2_bam_mem_size;
+ *a2_bam_irq = ipa_res.a2_bam_irq;
+
+ return 0;
+}
+
static void ipa_set_aggregation_params(void)
{
struct ipa_ep_cfg_aggr agg_params;
@@ -1416,7 +1444,8 @@
{
void *bam_cnfg_bits;
- if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0) {
+ if ((ipa_ctx->ipa_hw_type == IPA_HW_v1_0) ||
+ (ipa_ctx->ipa_hw_type == IPA_HW_v1_1)) {
bam_cnfg_bits = ioremap(res->ipa_mem_base +
IPA_BAM_REG_BASE_OFST,
IPA_BAM_REMAP_SIZE);
@@ -1528,6 +1557,7 @@
* - Create empty routing table in system memory(no committing)
* - Initialize pipes memory pool with ipa_pipe_mem_init for supported platforms
* - Create a char-device for IPA
+* - Initialize IPA RM (resource manager)
*/
static int ipa_init(const struct ipa_plat_drv_res *resource_p)
{
@@ -1870,10 +1900,22 @@
if (ipa_ctx->ipa_hw_mode == IPA_HW_MODE_NORMAL)
ipa_disable_clks();
+ /* Initialize IPA RM (resource manager) */
+ result = ipa_rm_initialize();
+ if (result) {
+ IPAERR(":cdev_add err=%d\n", -result);
+ result = -ENODEV;
+ goto fail_ipa_rm_init;
+ }
+
+ a2_mux_init();
+
IPADBG(":IPA driver init OK.\n");
return 0;
+fail_ipa_rm_init:
+ cdev_del(&ipa_ctx->cdev);
fail_cdev_add:
device_destroy(ipa_ctx->class, ipa_ctx->dev_num);
fail_device_create:
@@ -1981,6 +2023,18 @@
ipa_res.bam_mem_size = resource_size(resource_p);
}
+ /* Get IPA A2 BAM address */
+ resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_MEM,
+ "a2-bam-base");
+
+ if (!resource_p) {
+ IPAERR(":get resource failed for a2-bam-base!\n");
+ return -ENODEV;
+ } else {
+ ipa_res.a2_bam_mem_base = resource_p->start;
+ ipa_res.a2_bam_mem_size = resource_size(resource_p);
+ }
+
/* Get IPA pipe mem start ofst */
resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_MEM,
"ipa-pipe-mem");
@@ -2014,6 +2068,17 @@
ipa_res.bam_irq = resource_p->start;
}
+ /* Get IPA A2 BAM IRQ number */
+ resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_IRQ,
+ "a2-bam-irq");
+
+ if (!resource_p) {
+ IPAERR(":get resource failed for a2-bam-irq!\n");
+ return -ENODEV;
+ } else {
+ ipa_res.a2_bam_irq = resource_p->start;
+ }
+
/* Get IPA HW Version */
result = of_property_read_u32(pdev_p->dev.of_node, "qcom,ipa-hw-ver",
&ipa_res.ipa_hw_type);
diff --git a/drivers/platform/msm/ipa/ipa_i.h b/drivers/platform/msm/ipa/ipa_i.h
index 1b5b339..14195d7 100644
--- a/drivers/platform/msm/ipa/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_i.h
@@ -719,8 +719,11 @@
u32 ipa_mem_size;
u32 bam_mem_base;
u32 bam_mem_size;
+ u32 a2_bam_mem_base;
+ u32 a2_bam_mem_size;
u32 ipa_irq;
u32 bam_irq;
+ u32 a2_bam_irq;
u32 ipa_pipe_mem_start_ofst;
u32 ipa_pipe_mem_size;
enum ipa_hw_type ipa_hw_type;
@@ -733,6 +736,8 @@
int ipa_get_a2_mux_pipe_info(enum a2_mux_pipe_direction pipe_dir,
struct a2_mux_pipe_connection *pipe_connect);
+int ipa_get_a2_mux_bam_info(u32 *a2_bam_mem_base, u32 *a2_bam_mem_size,
+ u32 *a2_bam_irq);
void rmnet_bridge_get_client_handles(u32 *producer_handle,
u32 *consumer_handle);
int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc,
@@ -815,4 +820,7 @@
int ipa_query_intf_tx_props(struct ipa_ioc_query_intf_tx_props *tx);
int ipa_query_intf_rx_props(struct ipa_ioc_query_intf_rx_props *rx);
+int a2_mux_init(void);
+int a2_mux_exit(void);
+
#endif /* _IPA_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_rm.c b/drivers/platform/msm/ipa/ipa_rm.c
new file mode 100644
index 0000000..99b19cc
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rm.c
@@ -0,0 +1,374 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <mach/ipa.h>
+#include "ipa_i.h"
+#include "ipa_rm_dependency_graph.h"
+#include "ipa_rm_i.h"
+#include "ipa_rm_resource.h"
+
+struct ipa_rm_context_type {
+ struct ipa_rm_dep_graph *dep_graph;
+ struct workqueue_struct *ipa_rm_wq;
+};
+static struct ipa_rm_context_type *ipa_rm_ctx;
+
+/**
+ * ipa_rm_create_resource() - create resource
+ * @create_params: [in] parameters needed
+ * for resource initialization
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * This function is called by IPA RM client to initialize client's resources.
+ * This API should be called before any other IPA RM API
+ * on given resource name.
+ *
+ */
+int ipa_rm_create_resource(struct ipa_rm_create_params *create_params)
+{
+ struct ipa_rm_resource *resource;
+ int result;
+
+ if (!create_params) {
+ result = -EINVAL;
+ goto bail;
+ }
+ if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+ create_params->name,
+ &resource) == 0) {
+ result = -EPERM;
+ goto bail;
+ }
+ result = ipa_rm_resource_create(create_params,
+ &resource);
+ if (result)
+ goto bail;
+ result = ipa_rm_dep_graph_add(ipa_rm_ctx->dep_graph, resource);
+ if (result)
+ ipa_rm_resource_delete(resource);
+bail:
+ return result;
+}
+EXPORT_SYMBOL(ipa_rm_create_resource);
+
+/**
+ * ipa_rm_add_dependency() - create dependency
+ * between 2 resources
+ * @resource_name: name of dependent resource
+ * @depends_on_name: name of its dependency
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Side effects: IPA_RM_RESORCE_GRANTED could be generated
+ * in case client registered with IPA RM
+ */
+int ipa_rm_add_dependency(enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name)
+{
+ return ipa_rm_dep_graph_add_dependency(
+ ipa_rm_ctx->dep_graph,
+ resource_name,
+ depends_on_name);
+}
+EXPORT_SYMBOL(ipa_rm_add_dependency);
+
+/**
+ * ipa_rm_delete_dependency() - create dependency
+ * between 2 resources
+ * @resource_name: name of dependent resource
+ * @depends_on_name: name of its dependency
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Side effects: IPA_RM_RESORCE_GRANTED could be generated
+ * in case client registered with IPA RM
+ */
+int ipa_rm_delete_dependency(enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name)
+{
+ return ipa_rm_dep_graph_delete_dependency(
+ ipa_rm_ctx->dep_graph,
+ resource_name,
+ depends_on_name);
+}
+EXPORT_SYMBOL(ipa_rm_delete_dependency);
+
+/**
+ * ipa_rm_request_resource() - request resource
+ * @resource_name: [in] name of the requested resource
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * All registered callbacks are called with IPA_RM_RESOURCE_GRANTED
+ * on successful completion of this operation.
+ */
+int ipa_rm_request_resource(enum ipa_rm_resource_name resource_name)
+{
+ struct ipa_rm_resource *resource;
+ int result;
+ IPADBG("IPA RM ::ipa_rm_request_resource ENTER\n");
+
+ if (!IPA_RM_RESORCE_IS_PROD(resource_name)) {
+ result = -EINVAL;
+ goto bail;
+ }
+ if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+ resource_name,
+ &resource) != 0) {
+ result = -EPERM;
+ goto bail;
+ }
+ result = ipa_rm_resource_producer_request(
+ (struct ipa_rm_resource_prod *)resource);
+
+bail:
+ IPADBG("IPA RM ::ipa_rm_request_resource EXIT [%d]\n", result);
+
+ return result;
+}
+EXPORT_SYMBOL(ipa_rm_request_resource);
+
+/**
+ * ipa_rm_release_resource() - release resource
+ * @resource_name: [in] name of the requested resource
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * All registered callbacks are called with IPA_RM_RESOURCE_RELEASED
+ * on successful completion of this operation.
+ */
+int ipa_rm_release_resource(enum ipa_rm_resource_name resource_name)
+{
+ struct ipa_rm_resource *resource;
+ int result;
+ IPADBG("IPA RM ::ipa_rm_release_resource ENTER\n");
+
+ if (!IPA_RM_RESORCE_IS_PROD(resource_name)) {
+ result = -EINVAL;
+ goto bail;
+ }
+ if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+ resource_name,
+ &resource) != 0) {
+ result = -EPERM;
+ goto bail;
+ }
+ result = ipa_rm_resource_producer_release(
+ (struct ipa_rm_resource_prod *)resource);
+
+bail:
+ IPADBG("IPA RM ::ipa_rm_release_resource EXIT [%d]\n", result);
+ return result;
+}
+EXPORT_SYMBOL(ipa_rm_release_resource);
+
+/**
+ * ipa_rm_register() - register for event
+ * @resource_name: resource name
+ * @reg_params: [in] registration parameters
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Registration parameters provided here should be the same
+ * as provided later in ipa_rm_deregister() call.
+ */
+int ipa_rm_register(enum ipa_rm_resource_name resource_name,
+ struct ipa_rm_register_params *reg_params)
+{
+ int result;
+ struct ipa_rm_resource *resource;
+ if (!IPA_RM_RESORCE_IS_PROD(resource_name)) {
+ result = -EINVAL;
+ goto bail;
+ }
+ if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+ resource_name,
+ &resource) != 0) {
+ result = -EPERM;
+ goto bail;
+ }
+ result = ipa_rm_resource_producer_register(
+ (struct ipa_rm_resource_prod *)resource,
+ reg_params);
+bail:
+ return result;
+}
+EXPORT_SYMBOL(ipa_rm_register);
+
+/**
+ * ipa_rm_deregister() - cancel the registration
+ * @resource_name: resource name
+ * @reg_params: [in] registration parameters
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Registration parameters provided here should be the same
+ * as provided in ipa_rm_register() call.
+ */
+int ipa_rm_deregister(enum ipa_rm_resource_name resource_name,
+ struct ipa_rm_register_params *reg_params)
+{
+ int result;
+ struct ipa_rm_resource *resource;
+ if (!IPA_RM_RESORCE_IS_PROD(resource_name)) {
+ result = -EINVAL;
+ goto bail;
+ }
+ if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+ resource_name,
+ &resource) != 0) {
+ result = -EPERM;
+ goto bail;
+ }
+ result = ipa_rm_resource_producer_deregister(
+ (struct ipa_rm_resource_prod *)resource,
+ reg_params);
+bail:
+ return result;
+}
+EXPORT_SYMBOL(ipa_rm_deregister);
+
+/**
+ * ipa_rm_notify_completion() -
+ * consumer driver notification for
+ * request_resource / release_resource operations
+ * completion
+ * @event: notified event
+ * @resource_name: resource name
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_notify_completion(enum ipa_rm_event event,
+ enum ipa_rm_resource_name resource_name)
+{
+ int result;
+ if (!IPA_RM_RESORCE_IS_CONS(resource_name)) {
+ result = -EINVAL;
+ goto bail;
+ }
+ ipa_rm_wq_send_cmd(IPA_RM_WQ_RESOURCE_CB,
+ resource_name,
+ event);
+ result = 0;
+bail:
+ return result;
+}
+EXPORT_SYMBOL(ipa_rm_notify_completion);
+
+static void ipa_rm_wq_handler(struct work_struct *work)
+{
+ struct ipa_rm_resource *resource;
+ struct ipa_rm_wq_work_type *ipa_rm_work =
+ container_of(work,
+ struct ipa_rm_wq_work_type,
+ work);
+ switch (ipa_rm_work->wq_cmd) {
+ case IPA_RM_WQ_NOTIFY_PROD:
+ if (!IPA_RM_RESORCE_IS_PROD(ipa_rm_work->resource_name))
+ return;
+ if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+ ipa_rm_work->resource_name,
+ &resource) != 0)
+ return;
+ ipa_rm_resource_producer_notify_clients(
+ (struct ipa_rm_resource_prod *)resource,
+ ipa_rm_work->event);
+
+ break;
+ case IPA_RM_WQ_NOTIFY_CONS:
+ break;
+ case IPA_RM_WQ_RESOURCE_CB:
+ if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+ ipa_rm_work->resource_name,
+ &resource) != 0)
+ return;
+ ipa_rm_resource_consumer_handle_cb(
+ (struct ipa_rm_resource_cons *)resource,
+ ipa_rm_work->event);
+ break;
+ default:
+ break;
+ }
+
+ kfree((void *) work);
+}
+
+/**
+ * ipa_rm_wq_send_cmd() - send a command for deferred work
+ * @wq_cmd: command that should be executed
+ * @resource_name: resource on which command should be executed
+ *
+ * Returns: 0 on success, negative otherwise
+ */
+int ipa_rm_wq_send_cmd(enum ipa_rm_wq_cmd wq_cmd,
+ enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_event event)
+{
+ int result = -ENOMEM;
+ struct ipa_rm_wq_work_type *work = kzalloc(sizeof(*work), GFP_KERNEL);
+ if (work) {
+ INIT_WORK((struct work_struct *)work, ipa_rm_wq_handler);
+ work->wq_cmd = wq_cmd;
+ work->resource_name = resource_name;
+ work->event = event;
+ result = queue_work(ipa_rm_ctx->ipa_rm_wq,
+ (struct work_struct *)work);
+ }
+ return result;
+}
+
+/**
+ * ipa_rm_initialize() - initialize IPA RM component
+ *
+ * Returns: 0 on success, negative otherwise
+ */
+int ipa_rm_initialize(void)
+{
+ int result;
+
+ ipa_rm_ctx = kzalloc(sizeof(*ipa_rm_ctx), GFP_KERNEL);
+ if (!ipa_rm_ctx) {
+ result = -ENOMEM;
+ goto bail;
+ }
+ ipa_rm_ctx->ipa_rm_wq = create_singlethread_workqueue("ipa_rm_wq");
+ if (!ipa_rm_ctx->ipa_rm_wq) {
+ result = -ENOMEM;
+ goto create_wq_fail;
+ }
+ result = ipa_rm_dep_graph_create(&(ipa_rm_ctx->dep_graph));
+ if (result)
+ goto graph_alloc_fail;
+ IPADBG("IPA RM ipa_rm_initialize SUCCESS\n");
+ return 0;
+
+graph_alloc_fail:
+ destroy_workqueue(ipa_rm_ctx->ipa_rm_wq);
+create_wq_fail:
+ kfree(ipa_rm_ctx);
+bail:
+ return result;
+}
+
+/**
+ * ipa_rm_exit() - free all IPA RM resources
+ */
+void ipa_rm_exit(void)
+{
+ ipa_rm_dep_graph_delete(ipa_rm_ctx->dep_graph);
+ destroy_workqueue(ipa_rm_ctx->ipa_rm_wq);
+ kfree(ipa_rm_ctx);
+ ipa_rm_ctx = NULL;
+}
diff --git a/drivers/platform/msm/ipa/ipa_rm_dependency_graph.c b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.c
new file mode 100644
index 0000000..6afab42
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.c
@@ -0,0 +1,208 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include "ipa_rm_dependency_graph.h"
+#include "ipa_rm_i.h"
+
+static int ipa_rm_dep_get_index(enum ipa_rm_resource_name resource_name)
+{
+ int resource_index = IPA_RM_INDEX_INVALID;
+ if (IPA_RM_RESORCE_IS_PROD(resource_name))
+ resource_index = ipa_rm_prod_index(resource_name);
+ else if (IPA_RM_RESORCE_IS_CONS(resource_name))
+ resource_index = ipa_rm_cons_index(resource_name);
+
+ return resource_index;
+}
+
+/**
+ * ipa_rm_dep_graph_create() - creates graph
+ * @dep_graph: [out] created dependency graph
+ *
+ * Returns: dependency graph on success, NULL on failure
+ */
+int ipa_rm_dep_graph_create(struct ipa_rm_dep_graph **dep_graph)
+{
+ int result = 0;
+ *dep_graph = kzalloc(sizeof(**dep_graph), GFP_KERNEL);
+ if (!*dep_graph) {
+ result = -ENOMEM;
+ goto bail;
+ }
+ rwlock_init(&((*dep_graph)->lock));
+bail:
+ return result;
+}
+
+/**
+ * ipa_rm_dep_graph_delete() - destroyes the graph
+ * @graph: [in] dependency graph
+ *
+ * Frees all resources.
+ */
+void ipa_rm_dep_graph_delete(struct ipa_rm_dep_graph *graph)
+{
+ int resource_index;
+ if (!graph)
+ return;
+ write_lock(&graph->lock);
+ for (resource_index = 0;
+ resource_index < IPA_RM_RESOURCE_MAX;
+ resource_index++)
+ kfree(graph->resource_table[resource_index]);
+ write_unlock(&graph->lock);
+ memset(graph->resource_table, 0, sizeof(graph->resource_table));
+}
+
+/**
+ * ipa_rm_dep_graph_get_resource() - provides a resource by name
+ * @graph: [in] dependency graph
+ * @name: [in] name of the resource
+ * @resource: [out] resource in case of success
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_dep_graph_get_resource(
+ struct ipa_rm_dep_graph *graph,
+ enum ipa_rm_resource_name resource_name,
+ struct ipa_rm_resource **resource)
+{
+ int result;
+ int resource_index;
+ if (!graph) {
+ result = -EINVAL;
+ goto bail;
+ }
+ resource_index = ipa_rm_dep_get_index(resource_name);
+ if (resource_index == IPA_RM_INDEX_INVALID) {
+ result = -EINVAL;
+ goto bail;
+ }
+ read_lock(&graph->lock);
+ *resource = graph->resource_table[resource_index];
+ read_unlock(&graph->lock);
+ if (!*resource) {
+ result = -EINVAL;
+ goto bail;
+ }
+ result = 0;
+bail:
+ return result;
+}
+
+/**
+ * ipa_rm_dep_graph_add() - adds resource to graph
+ * @graph: [in] dependency graph
+ * @resource: [in] resource to add
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_dep_graph_add(struct ipa_rm_dep_graph *graph,
+ struct ipa_rm_resource *resource)
+{
+ int result = 0;
+ int resource_index;
+ if (!graph || !resource) {
+ result = -EINVAL;
+ goto bail;
+ }
+ resource_index = ipa_rm_dep_get_index(resource->name);
+ if (resource_index == IPA_RM_INDEX_INVALID) {
+ result = -EINVAL;
+ goto bail;
+ }
+ write_lock(&graph->lock);
+ graph->resource_table[resource_index] = resource;
+ write_unlock(&graph->lock);
+bail:
+ return result;
+}
+
+/**
+ * ipa_rm_dep_graph_add_dependency() - adds dependency between
+ * two nodes in graph
+ * @graph: [in] dependency graph
+ * @resource_name: [in] resource to add
+ * @depends_on_name: [in] resource to add
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_dep_graph_add_dependency(struct ipa_rm_dep_graph *graph,
+ enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name)
+{
+ struct ipa_rm_resource *dependant = NULL;
+ struct ipa_rm_resource *dependency = NULL;
+ int result;
+ if (!graph ||
+ !IPA_RM_RESORCE_IS_PROD(resource_name) ||
+ !IPA_RM_RESORCE_IS_CONS(depends_on_name)) {
+ result = -EINVAL;
+ goto bail;
+ }
+ if (ipa_rm_dep_graph_get_resource(graph,
+ resource_name,
+ &dependant)) {
+ result = -EINVAL;
+ goto bail;
+ }
+ if (ipa_rm_dep_graph_get_resource(graph,
+ depends_on_name,
+ &dependency)) {
+ result = -EINVAL;
+ goto bail;
+ }
+ result = ipa_rm_resource_add_dependency(dependant, dependency);
+bail:
+ return result;
+}
+
+/**
+ * ipa_rm_dep_graph_delete_dependency() - deleted dependency between
+ * two nodes in graph
+ * @graph: [in] dependency graph
+ * @resource_name: [in] resource to delete
+ * @depends_on_name: [in] resource to delete
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ */
+int ipa_rm_dep_graph_delete_dependency(struct ipa_rm_dep_graph *graph,
+ enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name)
+{
+ struct ipa_rm_resource *dependant = NULL;
+ struct ipa_rm_resource *dependency = NULL;
+ int result;
+ if (!graph ||
+ !IPA_RM_RESORCE_IS_PROD(resource_name) ||
+ !IPA_RM_RESORCE_IS_CONS(depends_on_name)) {
+ result = -EINVAL;
+ goto bail;
+ }
+ if (ipa_rm_dep_graph_get_resource(graph,
+ resource_name,
+ &dependant)) {
+ result = -EINVAL;
+ goto bail;
+ }
+ if (ipa_rm_dep_graph_get_resource(graph,
+ depends_on_name,
+ &dependency)) {
+ result = -EINVAL;
+ goto bail;
+ }
+ result = ipa_rm_resource_delete_dependency(dependant, dependency);
+bail:
+ return result;
+}
diff --git a/drivers/platform/msm/ipa/ipa_rm_dependency_graph.h b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.h
new file mode 100644
index 0000000..19d9461
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.h
@@ -0,0 +1,45 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_RM_DEPENDENCY_GRAPH_H_
+#define _IPA_RM_DEPENDENCY_GRAPH_H_
+
+#include <linux/list.h>
+#include <mach/ipa.h>
+#include "ipa_rm_resource.h"
+
+struct ipa_rm_dep_graph {
+ struct ipa_rm_resource *resource_table[IPA_RM_RESOURCE_MAX];
+ rwlock_t lock;
+};
+
+int ipa_rm_dep_graph_get_resource(
+ struct ipa_rm_dep_graph *graph,
+ enum ipa_rm_resource_name name,
+ struct ipa_rm_resource **resource);
+
+int ipa_rm_dep_graph_create(struct ipa_rm_dep_graph **dep_graph);
+
+void ipa_rm_dep_graph_delete(struct ipa_rm_dep_graph *graph);
+
+int ipa_rm_dep_graph_add(struct ipa_rm_dep_graph *graph,
+ struct ipa_rm_resource *resource);
+
+int ipa_rm_dep_graph_add_dependency(struct ipa_rm_dep_graph *graph,
+ enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name);
+
+int ipa_rm_dep_graph_delete_dependency(struct ipa_rm_dep_graph *graph,
+ enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name);
+
+#endif /* _IPA_RM_DEPENDENCY_GRAPH_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_rm_i.h b/drivers/platform/msm/ipa/ipa_rm_i.h
new file mode 100644
index 0000000..141a442
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rm_i.h
@@ -0,0 +1,64 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_RM_I_H_
+#define _IPA_RM_I_H_
+
+#include <linux/workqueue.h>
+#include <mach/ipa.h>
+
+#define IPA_RM_RESOURCE_CONS_MAX \
+ (IPA_RM_RESOURCE_MAX - IPA_RM_RESOURCE_PROD_MAX)
+#define IPA_RM_RESORCE_IS_PROD(x) \
+ (x >= IPA_RM_RESOURCE_PROD && x < IPA_RM_RESOURCE_PROD_MAX)
+#define IPA_RM_RESORCE_IS_CONS(x) \
+ (x >= IPA_RM_RESOURCE_PROD_MAX && x < IPA_RM_RESOURCE_MAX)
+#define IPA_RM_INDEX_INVALID (-1)
+
+int ipa_rm_prod_index(enum ipa_rm_resource_name resource_name);
+int ipa_rm_cons_index(enum ipa_rm_resource_name resource_name);
+
+/**
+ * enum ipa_rm_wq_cmd - workqueue commands
+ */
+enum ipa_rm_wq_cmd {
+ IPA_RM_WQ_NOTIFY_PROD,
+ IPA_RM_WQ_NOTIFY_CONS,
+ IPA_RM_WQ_RESOURCE_CB
+};
+
+/**
+ * struct ipa_rm_wq_work_type - IPA RM worqueue specific
+ * work type
+ * @work: work struct
+ * @wq_cmd: command that should be processed in workqueue context
+ * @resource_name: name of the resource on which this work
+ * should be done
+ * @dep_graph: data structure to search for resource if exists
+ * @event: event to notify
+ */
+struct ipa_rm_wq_work_type {
+ struct work_struct work;
+ enum ipa_rm_wq_cmd wq_cmd;
+ enum ipa_rm_resource_name resource_name;
+ enum ipa_rm_event event;
+};
+
+int ipa_rm_wq_send_cmd(enum ipa_rm_wq_cmd wq_cmd,
+ enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_event event);
+
+int ipa_rm_initialize(void);
+
+void ipa_rm_exit(void);
+
+#endif /* _IPA_RM_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c b/drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c
new file mode 100644
index 0000000..2a3b8d3
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c
@@ -0,0 +1,249 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/unistd.h>
+#include <linux/workqueue.h>
+#include <mach/ipa.h>
+#include "ipa_i.h"
+
+/**
+ * struct ipa_rm_it_private - IPA RM Inactivity Timer private
+ * data
+ * @initied: indicates if instance was initialized
+ * @lock - spinlock for mutual exclusion
+ * @resource_name - resource name
+ * @work: delayed work object for running delayed releas
+ * function
+ * @release_in_prog: boolean flag indicates if release resource
+ * is scheduled for happen in the future.
+ * @jiffies: number of jiffies for timeout
+ *
+ * WWAN private - holds all relevant info about WWAN driver
+ */
+struct ipa_rm_it_private {
+ bool initied;
+ enum ipa_rm_resource_name resource_name;
+ spinlock_t lock;
+ struct delayed_work work;
+ bool release_in_prog;
+ unsigned long jiffies;
+};
+
+static struct ipa_rm_it_private ipa_rm_it_handles[IPA_RM_RESOURCE_MAX];
+
+/**
+ * ipa_rm_inactivity_timer_func() - called when timer expired in
+ * the context of the shared workqueue. Checks internally is
+ * release_in_prog flag is set and calls to
+ * ipa_rm_release_resource(). release_in_prog is cleared when
+ * calling to ipa_rm_inactivity_timer_request_resource(). In
+ * this situation this function shall not call to
+ * ipa_rm_release_resource() since the resource needs to remain
+ * up
+ *
+ * @work: work object provided by the work queue
+ *
+ * Return codes:
+ * None
+ */
+static void ipa_rm_inactivity_timer_func(struct work_struct *work)
+{
+
+ struct ipa_rm_it_private *me = container_of(to_delayed_work(work),
+ struct ipa_rm_it_private,
+ work);
+ unsigned long flags;
+
+ IPADBG("%s: timer expired for resource %d!\n", __func__,
+ me->resource_name);
+
+ /* check that release still need to be performed */
+ spin_lock_irqsave(
+ &ipa_rm_it_handles[me->resource_name].lock, flags);
+ if (ipa_rm_it_handles[me->resource_name].release_in_prog) {
+ IPADBG("%s: calling release_resource on resource %d!\n",
+ __func__, me->resource_name);
+ ipa_rm_release_resource(me->resource_name);
+ ipa_rm_it_handles[me->resource_name].release_in_prog = false;
+ }
+ spin_unlock_irqrestore(
+ &ipa_rm_it_handles[me->resource_name].lock, flags);
+}
+
+/**
+* ipa_rm_inactivity_timer_init() - Init function for IPA RM
+* inactivity timer. This function shall be called prior calling
+* any other API of IPA RM inactivity timer.
+*
+* @resource_name: Resource name. @see ipa_rm.h
+* @msecs: time in miliseccond, that IPA RM inactivity timer
+* shall wait prior calling to ipa_rm_release_resource().
+*
+* Return codes:
+* 0: success
+* -EINVAL: invalid parameters
+*/
+int ipa_rm_inactivity_timer_init(enum ipa_rm_resource_name resource_name,
+ unsigned long msecs)
+{
+ IPADBG("%s: resource %d\n", __func__, resource_name);
+
+ if (resource_name < 0 ||
+ resource_name >= IPA_RM_RESOURCE_MAX) {
+ IPAERR("%s: Invalid parameter\n", __func__);
+ return -EINVAL;
+ }
+
+ if (ipa_rm_it_handles[resource_name].initied) {
+ IPAERR("%s: resource %d already inited\n",
+ __func__, resource_name);
+ return -EINVAL;
+ }
+
+ spin_lock_init(&ipa_rm_it_handles[resource_name].lock);
+ ipa_rm_it_handles[resource_name].resource_name = resource_name;
+ ipa_rm_it_handles[resource_name].jiffies = msecs_to_jiffies(msecs);
+ ipa_rm_it_handles[resource_name].release_in_prog = false;
+
+ INIT_DELAYED_WORK(&ipa_rm_it_handles[resource_name].work,
+ ipa_rm_inactivity_timer_func);
+ ipa_rm_it_handles[resource_name].initied = 1;
+
+ return 0;
+}
+
+/**
+* ipa_rm_inactivity_timer_destroy() - De-Init function for IPA
+* RM inactivity timer.
+*
+* @resource_name: Resource name. @see ipa_rm.h
+*
+* Return codes:
+* 0: success
+* -EINVAL: invalid parameters
+*/
+int ipa_rm_inactivity_timer_destroy(enum ipa_rm_resource_name resource_name)
+{
+ IPADBG("%s: resource %d\n", __func__, resource_name);
+
+ if (resource_name < 0 ||
+ resource_name >= IPA_RM_RESOURCE_MAX) {
+ IPAERR("%s: Invalid parameter\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!ipa_rm_it_handles[resource_name].initied) {
+ IPAERR("%s: resource %d already inited\n",
+ __func__, resource_name);
+ return -EINVAL;
+ }
+
+ memset(&ipa_rm_it_handles[resource_name], 0,
+ sizeof(struct ipa_rm_it_private));
+
+ return 0;
+}
+
+/**
+* ipa_rm_inactivity_timer_request_resource() - Same as
+* ipa_rm_request_resource(), with a difference that calling to
+* this function will also cancel the inactivity timer, if
+* ipa_rm_inactivity_timer_release_resource() was called earlier.
+*
+* @resource_name: Resource name. @see ipa_rm.h
+*
+* Return codes:
+* 0: success
+* -EINVAL: invalid parameters
+*/
+int ipa_rm_inactivity_timer_request_resource(
+ enum ipa_rm_resource_name resource_name)
+{
+ int ret;
+ unsigned long flags;
+ IPADBG("%s: resource %d\n", __func__, resource_name);
+
+ if (resource_name < 0 ||
+ resource_name >= IPA_RM_RESOURCE_MAX) {
+ IPAERR("%s: Invalid parameter\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!ipa_rm_it_handles[resource_name].initied) {
+ IPAERR("%s: Not initialized\n", __func__);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&ipa_rm_it_handles[resource_name].lock, flags);
+ cancel_delayed_work(&ipa_rm_it_handles[resource_name].work);
+ ipa_rm_it_handles[resource_name].release_in_prog = false;
+ spin_unlock_irqrestore(&ipa_rm_it_handles[resource_name].lock, flags);
+ ret = ipa_rm_request_resource(resource_name);
+ IPADBG("%s: resource %d: returning %d\n", __func__, resource_name, ret);
+ return ret;
+}
+
+/**
+* ipa_rm_inactivity_timer_release_resource() - Sets the
+* inactivity timer to the timeout set by
+* ipa_rm_inactivity_timer_init(). When the timeout expires, IPA
+* RM inactivity timer will call to ipa_rm_release_resource().
+* If a call to ipa_rm_inactivity_timer_request_resource() was
+* made BEFORE the timout has expired, rge timer will be
+* cancelled.
+*
+* @resource_name: Resource name. @see ipa_rm.h
+*
+* Return codes:
+* 0: success
+* -EINVAL: invalid parameters
+*/
+int ipa_rm_inactivity_timer_release_resource(
+ enum ipa_rm_resource_name resource_name)
+{
+ unsigned long flags;
+ IPADBG("%s: resource %d\n", __func__, resource_name);
+
+ if (resource_name < 0 ||
+ resource_name >= IPA_RM_RESOURCE_MAX) {
+ IPAERR("%s: Invalid parameter\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!ipa_rm_it_handles[resource_name].initied) {
+ IPAERR("%s: Not initialized\n", __func__);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&ipa_rm_it_handles[resource_name].lock, flags);
+ if (ipa_rm_it_handles[resource_name].release_in_prog) {
+ IPADBG("%s: Timer already set, not scheduling again %d\n",
+ __func__, resource_name);
+ spin_unlock_irqrestore(
+ &ipa_rm_it_handles[resource_name].lock, flags);
+ return 0;
+ }
+ ipa_rm_it_handles[resource_name].release_in_prog = true;
+ spin_unlock_irqrestore(&ipa_rm_it_handles[resource_name].lock, flags);
+
+ IPADBG("%s: setting delayed work\n", __func__);
+ schedule_delayed_work(&ipa_rm_it_handles[resource_name].work,
+ ipa_rm_it_handles[resource_name].jiffies);
+
+ return 0;
+}
+
diff --git a/drivers/platform/msm/ipa/ipa_rm_peers_list.c b/drivers/platform/msm/ipa/ipa_rm_peers_list.c
new file mode 100644
index 0000000..55f8239
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rm_peers_list.c
@@ -0,0 +1,247 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include "ipa_i.h"
+#include "ipa_rm_i.h"
+#include "ipa_rm_resource.h"
+
+/**
+ * ipa_rm_peers_list_get_resource_index() - resource name to index
+ * of this resource in corresponding peers list
+ * @resource_name: [in] resource name
+ *
+ * Returns: resource index mapping, IPA_RM_INDEX_INVALID
+ * in case provided resource name isn't contained in enum
+ * ipa_rm_resource_name.
+ *
+ */
+static int ipa_rm_peers_list_get_resource_index(
+ enum ipa_rm_resource_name resource_name)
+{
+ int resource_index = IPA_RM_INDEX_INVALID;
+ if (IPA_RM_RESORCE_IS_PROD(resource_name))
+ resource_index = ipa_rm_prod_index(resource_name);
+ else if (IPA_RM_RESORCE_IS_CONS(resource_name)) {
+ resource_index = ipa_rm_cons_index(resource_name);
+ if (resource_index != IPA_RM_INDEX_INVALID)
+ resource_index =
+ resource_index - IPA_RM_RESOURCE_PROD_MAX;
+ }
+
+ return resource_index;
+}
+
+static bool ipa_rm_peers_list_check_index(int index,
+ struct ipa_rm_peers_list *peers_list)
+{
+ return !(index > peers_list->max_peers || index < 0);
+}
+
+/**
+ * ipa_rm_peers_list_create() - creates the peers list
+ *
+ * @max_peers: maximum number of peers in new list
+ * @peers_list: [out] newly created peers list
+ *
+ * Returns: 0 in case of SUCCESS, negative otherwise
+ */
+int ipa_rm_peers_list_create(int max_peers,
+ struct ipa_rm_peers_list **peers_list)
+{
+ int result;
+ *peers_list = kzalloc(sizeof(**peers_list), GFP_KERNEL);
+ if (!*peers_list) {
+ result = -ENOMEM;
+ goto bail;
+ }
+ rwlock_init(&(*peers_list)->peers_lock);
+ (*peers_list)->max_peers = max_peers;
+ (*peers_list)->peers = kzalloc((*peers_list)->max_peers *
+ sizeof(struct ipa_rm_resource *), GFP_KERNEL);
+ if (!((*peers_list)->peers)) {
+ result = -ENOMEM;
+ goto list_alloc_fail;
+ }
+ return 0;
+
+list_alloc_fail:
+ kfree(*peers_list);
+bail:
+ return result;
+}
+
+/**
+ * ipa_rm_peers_list_delete() - deletes the peers list
+ *
+ * @peers_list: peers list
+ *
+ */
+void ipa_rm_peers_list_delete(struct ipa_rm_peers_list *peers_list)
+{
+ if (peers_list) {
+ kfree(peers_list->peers);
+ kfree(peers_list);
+ }
+}
+
+/**
+ * ipa_rm_peers_list_remove_peer() - removes peer from the list
+ *
+ * @peers_list: peers list
+ * @resource_name: name of the resource to remove
+ *
+ */
+void ipa_rm_peers_list_remove_peer(
+ struct ipa_rm_peers_list *peers_list,
+ enum ipa_rm_resource_name resource_name)
+{
+ if (!peers_list)
+ return;
+ write_lock(&peers_list->peers_lock);
+ peers_list->peers[ipa_rm_peers_list_get_resource_index(
+ resource_name)] = NULL;
+ peers_list->peers_count--;
+ write_unlock(&peers_list->peers_lock);
+}
+
+/**
+ * ipa_rm_peers_list_add_peer() - adds peer to the list
+ *
+ * @peers_list: peers list
+ * @resource: resource to add
+ *
+ */
+void ipa_rm_peers_list_add_peer(
+ struct ipa_rm_peers_list *peers_list,
+ struct ipa_rm_resource *resource)
+{
+ if (!peers_list || !resource)
+ return;
+ read_lock(&peers_list->peers_lock);
+ peers_list->peers[ipa_rm_peers_list_get_resource_index(
+ resource->name)] =
+ resource;
+ peers_list->peers_count++;
+ read_unlock(&peers_list->peers_lock);
+}
+
+/**
+ * ipa_rm_peers_list_is_empty() - checks
+ * if resource peers list is empty
+ *
+ * @peers_list: peers list
+ *
+ * Returns: true if the list is empty, false otherwise
+ */
+bool ipa_rm_peers_list_is_empty(struct ipa_rm_peers_list *peers_list)
+{
+ bool result = true;
+ if (!peers_list)
+ goto bail;
+ read_lock(&peers_list->peers_lock);
+ if (peers_list->peers_count > 0)
+ result = false;
+ read_unlock(&peers_list->peers_lock);
+bail:
+ return result;
+}
+
+/**
+ * ipa_rm_peers_list_has_last_peer() - checks
+ * if resource peers list has exactly one peer
+ *
+ * @peers_list: peers list
+ *
+ * Returns: true if the list has exactly one peer, false otherwise
+ */
+bool ipa_rm_peers_list_has_last_peer(
+ struct ipa_rm_peers_list *peers_list)
+{
+ bool result = true;
+ if (!peers_list)
+ goto bail;
+ read_lock(&peers_list->peers_lock);
+ if (peers_list->peers_count == 1)
+ result = false;
+ read_unlock(&peers_list->peers_lock);
+bail:
+ return result;
+}
+
+/**
+ * ipa_rm_peers_list_check_dependency() - check dependency
+ * between 2 peer lists
+ * @resource_peers: first peers list
+ * @resource_name: first peers list resource name
+ * @depends_on_peers: second peers list
+ * @depends_on_name: second peers list resource name
+ *
+ * Returns: true if there is dependency, false otherwise
+ *
+ */
+bool ipa_rm_peers_list_check_dependency(
+ struct ipa_rm_peers_list *resource_peers,
+ enum ipa_rm_resource_name resource_name,
+ struct ipa_rm_peers_list *depends_on_peers,
+ enum ipa_rm_resource_name depends_on_name)
+{
+ bool result = false;
+ if (!resource_peers || !depends_on_peers)
+ return result;
+ read_lock(&resource_peers->peers_lock);
+ if (resource_peers->peers[ipa_rm_peers_list_get_resource_index(
+ depends_on_name)] != NULL)
+ result = true;
+ read_unlock(&resource_peers->peers_lock);
+
+ read_lock(&depends_on_peers->peers_lock);
+ if (depends_on_peers->peers[ipa_rm_peers_list_get_resource_index(
+ resource_name)] != NULL)
+ result = true;
+ read_unlock(&depends_on_peers->peers_lock);
+
+ return result;
+}
+
+/**
+ * ipa_rm_peers_list_get_resource() - get resource by
+ * resource index
+ * @resource_index: resource index
+ * @resource_peers: peers list
+ *
+ * Returns: the resource if found, NULL otherwise
+ */
+struct ipa_rm_resource *ipa_rm_peers_list_get_resource(int resource_index,
+ struct ipa_rm_peers_list *resource_peers)
+{
+ struct ipa_rm_resource *result = NULL;
+ if (!ipa_rm_peers_list_check_index(resource_index, resource_peers))
+ goto bail;
+ read_lock(&resource_peers->peers_lock);
+ result = resource_peers->peers[resource_index];
+ read_unlock(&resource_peers->peers_lock);
+bail:
+ return result;
+}
+
+/**
+ * ipa_rm_peers_list_get_size() - get peers list sise
+ *
+ * @peers_list: peers list
+ *
+ * Returns: the size of the peers list
+ */
+int ipa_rm_peers_list_get_size(struct ipa_rm_peers_list *peers_list)
+{
+ return peers_list->max_peers;
+}
diff --git a/drivers/platform/msm/ipa/ipa_rm_peers_list.h b/drivers/platform/msm/ipa/ipa_rm_peers_list.h
new file mode 100644
index 0000000..f8fd1ca
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rm_peers_list.h
@@ -0,0 +1,55 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_RM_PEERS_LIST_H_
+#define _IPA_RM_PEERS_LIST_H_
+
+#include "ipa_rm_resource.h"
+
+/**
+ * struct ipa_rm_peers_list - IPA RM resource peers list
+ * @peers: the list of references to resources dependent on this resource
+ * in case of producer or list of dependencies in case of consumer
+ * @max_peers: maximum number of peers for this resource
+ * @peers_count: actual number of peers for this resource
+ * @peers_lock: RW lock for peers container
+ */
+struct ipa_rm_peers_list {
+ struct ipa_rm_resource **peers;
+ int max_peers;
+ int peers_count;
+ rwlock_t peers_lock;
+};
+
+int ipa_rm_peers_list_create(int max_peers,
+ struct ipa_rm_peers_list **peers_list);
+void ipa_rm_peers_list_delete(struct ipa_rm_peers_list *peers_list);
+void ipa_rm_peers_list_remove_peer(
+ struct ipa_rm_peers_list *peers_list,
+ enum ipa_rm_resource_name resource_name);
+void ipa_rm_peers_list_add_peer(
+ struct ipa_rm_peers_list *peers_list,
+ struct ipa_rm_resource *resource);
+bool ipa_rm_peers_list_check_dependency(
+ struct ipa_rm_peers_list *resource_peers,
+ enum ipa_rm_resource_name resource_name,
+ struct ipa_rm_peers_list *depends_on_peers,
+ enum ipa_rm_resource_name depends_on_name);
+struct ipa_rm_resource *ipa_rm_peers_list_get_resource(int resource_index,
+ struct ipa_rm_peers_list *peers_list);
+int ipa_rm_peers_list_get_size(struct ipa_rm_peers_list *peers_list);
+bool ipa_rm_peers_list_is_empty(struct ipa_rm_peers_list *peers_list);
+bool ipa_rm_peers_list_has_last_peer(
+ struct ipa_rm_peers_list *peers_list);
+
+
+#endif /* _IPA_RM_PEERS_LIST_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_rm_resource.c b/drivers/platform/msm/ipa/ipa_rm_resource.c
new file mode 100644
index 0000000..3ba8e84
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rm_resource.c
@@ -0,0 +1,809 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include "ipa_i.h"
+#include "ipa_rm_resource.h"
+#include "ipa_rm_i.h"
+
+/**
+ * ipa_rm_dep_prod_index() - producer name to producer index mapping
+ * @resource_name: [in] resource name (should be of producer)
+ *
+ * Returns: resource index mapping, IPA_RM_INDEX_INVALID
+ * in case provided resource name isn't contained
+ * in enum ipa_rm_resource_name or is not of producers.
+ *
+ */
+int ipa_rm_prod_index(enum ipa_rm_resource_name resource_name)
+{
+ int result = resource_name;
+ switch (resource_name) {
+ case IPA_RM_RESOURCE_BRIDGE_PROD:
+ case IPA_RM_RESOURCE_A2_PROD:
+ case IPA_RM_RESOURCE_USB_PROD:
+ case IPA_RM_RESOURCE_HSIC_PROD:
+ case IPA_RM_RESOURCE_STD_ECM_PROD:
+ case IPA_RM_RESOURCE_WWAN_0_PROD:
+ case IPA_RM_RESOURCE_WWAN_1_PROD:
+ case IPA_RM_RESOURCE_WWAN_2_PROD:
+ case IPA_RM_RESOURCE_WWAN_3_PROD:
+ case IPA_RM_RESOURCE_WWAN_4_PROD:
+ case IPA_RM_RESOURCE_WWAN_5_PROD:
+ case IPA_RM_RESOURCE_WWAN_6_PROD:
+ case IPA_RM_RESOURCE_WWAN_7_PROD:
+ case IPA_RM_RESOURCE_WLAN_PROD:
+ break;
+ default:
+ result = IPA_RM_INDEX_INVALID;
+ break;
+ }
+ return result;
+}
+
+/**
+ * ipa_rm_cons_index() - consumer name to consumer index mapping
+ * @resource_name: [in] resource name (should be of consumer)
+ *
+ * Returns: resource index mapping, IPA_RM_INDEX_INVALID
+ * in case provided resource name isn't contained
+ * in enum ipa_rm_resource_name or is not of consumers.
+ *
+ */
+int ipa_rm_cons_index(enum ipa_rm_resource_name resource_name)
+{
+ int result = resource_name;
+ switch (resource_name) {
+ case IPA_RM_RESOURCE_A2_CONS:
+ case IPA_RM_RESOURCE_USB_CONS:
+ case IPA_RM_RESOURCE_HSIC_CONS:
+ break;
+ default:
+ result = IPA_RM_INDEX_INVALID;
+ break;
+ }
+ return result;
+}
+
+static int ipa_rm_resource_consumer_request(
+ struct ipa_rm_resource_cons *consumer)
+{
+ int result = 0;
+ int driver_result;
+ unsigned long flags;
+ IPADBG("IPA RM ::ipa_rm_resource_consumer_request ENTER\n");
+ spin_lock_irqsave(&consumer->resource.state_lock, flags);
+ switch (consumer->resource.state) {
+ case IPA_RM_RELEASED:
+ case IPA_RM_RELEASE_IN_PROGRESS:
+ {
+ enum ipa_rm_resource_state prev_state =
+ consumer->resource.state;
+ consumer->resource.state = IPA_RM_REQUEST_IN_PROGRESS;
+ spin_unlock_irqrestore(&consumer->resource.state_lock, flags);
+ driver_result = consumer->request_resource();
+ spin_lock_irqsave(&consumer->resource.state_lock, flags);
+ if (driver_result == 0)
+ consumer->resource.state = IPA_RM_GRANTED;
+ else if (driver_result != -EINPROGRESS) {
+ consumer->resource.state = prev_state;
+ result = driver_result;
+ goto bail;
+ }
+ result = driver_result;
+ break;
+ }
+ case IPA_RM_GRANTED:
+ break;
+ case IPA_RM_REQUEST_IN_PROGRESS:
+ result = -EINPROGRESS;
+ break;
+ default:
+ result = -EPERM;
+ goto bail;
+ }
+ consumer->usage_count++;
+bail:
+ spin_unlock_irqrestore(&consumer->resource.state_lock, flags);
+ IPADBG("IPA RM ::ipa_rm_resource_consumer_request EXIT [%d]\n", result);
+ return result;
+}
+
+static int ipa_rm_resource_consumer_release(
+ struct ipa_rm_resource_cons *consumer)
+{
+ int result = 0;
+ int driver_result;
+ unsigned long flags;
+ enum ipa_rm_resource_state save_state;
+ IPADBG("IPA RM ::ipa_rm_resource_consumer_release ENTER\n");
+ spin_lock_irqsave(&consumer->resource.state_lock, flags);
+ switch (consumer->resource.state) {
+ case IPA_RM_RELEASED:
+ break;
+ case IPA_RM_GRANTED:
+ case IPA_RM_REQUEST_IN_PROGRESS:
+ if (consumer->usage_count > 0)
+ consumer->usage_count--;
+ if (consumer->usage_count == 0) {
+ save_state = consumer->resource.state;
+ consumer->resource.state = IPA_RM_RELEASE_IN_PROGRESS;
+ spin_unlock_irqrestore(&consumer->resource.state_lock,
+ flags);
+ driver_result = consumer->release_resource();
+ spin_lock_irqsave(&consumer->resource.state_lock,
+ flags);
+ if (driver_result == 0)
+ consumer->resource.state = IPA_RM_RELEASED;
+ else if (driver_result != -EINPROGRESS)
+ consumer->resource.state = save_state;
+ result = driver_result;
+ }
+ break;
+ case IPA_RM_RELEASE_IN_PROGRESS:
+ if (consumer->usage_count > 0)
+ consumer->usage_count--;
+ result = -EINPROGRESS;
+ break;
+ default:
+ result = -EPERM;
+ goto bail;
+ }
+bail:
+ spin_unlock_irqrestore(&consumer->resource.state_lock, flags);
+ IPADBG("IPA RM ::ipa_rm_resource_consumer_release EXIT [%d]\n", result);
+ return result;
+}
+
+/**
+ * ipa_rm_resource_producer_notify_clients() - notify
+ * all registered clients of given producer
+ * @producer: producer
+ * @event: event to notify
+ */
+void ipa_rm_resource_producer_notify_clients(
+ struct ipa_rm_resource_prod *producer,
+ enum ipa_rm_event event)
+{
+ struct ipa_rm_notification_info *reg_info, *reg_info_cloned;
+ struct list_head *pos, *q;
+ LIST_HEAD(cloned_list);
+ read_lock(&producer->event_listeners_lock);
+ list_for_each(pos, &(producer->event_listeners)) {
+ reg_info = list_entry(pos,
+ struct ipa_rm_notification_info,
+ link);
+ reg_info_cloned = kzalloc(sizeof(*reg_info_cloned), GFP_ATOMIC);
+ if (!reg_info_cloned)
+ goto clone_list_failed;
+ reg_info_cloned->reg_params.notify_cb =
+ reg_info->reg_params.notify_cb;
+ reg_info_cloned->reg_params.user_data =
+ reg_info->reg_params.user_data;
+ list_add(®_info_cloned->link, &cloned_list);
+ }
+ read_unlock(&producer->event_listeners_lock);
+ list_for_each_safe(pos, q, &cloned_list) {
+ reg_info = list_entry(pos,
+ struct ipa_rm_notification_info,
+ link);
+ reg_info->reg_params.notify_cb(
+ reg_info->reg_params.user_data,
+ event,
+ 0);
+ list_del(pos);
+ kfree(reg_info);
+ }
+ return;
+clone_list_failed:
+ read_unlock(&producer->event_listeners_lock);
+}
+
+static int ipa_rm_resource_producer_create(struct ipa_rm_resource **resource,
+ struct ipa_rm_resource_prod **producer,
+ struct ipa_rm_create_params *create_params,
+ int *max_peers)
+{
+ int result = 0;
+ *producer = kzalloc(sizeof(**producer), GFP_KERNEL);
+ if (*producer == NULL) {
+ result = -ENOMEM;
+ goto bail;
+ }
+ rwlock_init(&(*producer)->event_listeners_lock);
+ INIT_LIST_HEAD(&((*producer)->event_listeners));
+ result = ipa_rm_resource_producer_register(*producer,
+ &(create_params->reg_params));
+ if (result)
+ goto register_fail;
+ (*resource) = (struct ipa_rm_resource *) (*producer);
+ (*resource)->type = IPA_RM_PRODUCER;
+ *max_peers = IPA_RM_RESOURCE_CONS_MAX;
+ goto bail;
+register_fail:
+ kfree(*producer);
+bail:
+ return result;
+}
+
+static void ipa_rm_resource_producer_delete(
+ struct ipa_rm_resource_prod *producer)
+{
+ struct ipa_rm_notification_info *reg_info;
+ struct list_head *pos, *q;
+ write_lock(&producer->event_listeners_lock);
+ list_for_each_safe(pos, q, &(producer->event_listeners)) {
+ reg_info = list_entry(pos,
+ struct ipa_rm_notification_info,
+ link);
+ list_del(pos);
+ kfree(reg_info);
+ }
+ write_unlock(&producer->event_listeners_lock);
+}
+
+static int ipa_rm_resource_consumer_create(struct ipa_rm_resource **resource,
+ struct ipa_rm_resource_cons **consumer,
+ struct ipa_rm_create_params *create_params,
+ int *max_peers)
+{
+ int result = 0;
+ *consumer = kzalloc(sizeof(**consumer), GFP_KERNEL);
+ if (*consumer == NULL) {
+ result = -ENOMEM;
+ goto bail;
+ }
+ (*consumer)->request_resource = create_params->request_resource;
+ (*consumer)->release_resource = create_params->release_resource;
+ (*resource) = (struct ipa_rm_resource *) (*consumer);
+ (*resource)->type = IPA_RM_CONSUMER;
+ *max_peers = IPA_RM_RESOURCE_PROD_MAX;
+bail:
+ return result;
+}
+
+/**
+ * ipa_rm_resource_create() - creates resource
+ * @create_params: [in] parameters needed
+ * for resource initialization with IPA RM
+ * @resource: [out] created resource
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_resource_create(
+ struct ipa_rm_create_params *create_params,
+ struct ipa_rm_resource **resource)
+{
+ struct ipa_rm_resource_cons *consumer;
+ struct ipa_rm_resource_prod *producer;
+ int max_peers;
+ int result = 0;
+
+ if (!create_params) {
+ result = -EINVAL;
+ goto bail;
+ }
+ if (IPA_RM_RESORCE_IS_PROD(create_params->name)) {
+ result = ipa_rm_resource_producer_create(resource,
+ &producer,
+ create_params,
+ &max_peers);
+ if (result)
+ goto bail;
+ } else if (IPA_RM_RESORCE_IS_CONS(create_params->name)) {
+ result = ipa_rm_resource_consumer_create(resource,
+ &consumer,
+ create_params,
+ &max_peers);
+ if (result)
+ goto bail;
+ } else {
+ result = -EPERM;
+ goto bail;
+ }
+ result = ipa_rm_peers_list_create(max_peers,
+ &((*resource)->peers_list));
+ if (result)
+ goto peers_alloc_fail;
+ (*resource)->name = create_params->name;
+ (*resource)->state = IPA_RM_RELEASED;
+ spin_lock_init(&((*resource)->state_lock));
+ goto bail;
+peers_alloc_fail:
+ ipa_rm_resource_delete(*resource);
+bail:
+ return result;
+}
+
+/**
+ * ipa_rm_resource_delete() - deletes resource
+ * @resource: [in] resource
+ * for resource initialization with IPA RM
+ */
+void ipa_rm_resource_delete(struct ipa_rm_resource *resource)
+{
+ if (!resource)
+ return;
+ if (resource->peers_list)
+ ipa_rm_peers_list_delete(resource->peers_list);
+ if (resource->type == IPA_RM_PRODUCER) {
+ ipa_rm_resource_producer_delete(
+ (struct ipa_rm_resource_prod *) resource);
+ kfree((struct ipa_rm_resource_prod *) resource);
+ } else
+ kfree((struct ipa_rm_resource_cons *) resource);
+}
+
+/**
+ * ipa_rm_resource_register() - register resource
+ * @resource: [in] resource
+ * @reg_params: [in] registration parameters
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Producer resource is expected for this call.
+ *
+ */
+int ipa_rm_resource_producer_register(struct ipa_rm_resource_prod *producer,
+ struct ipa_rm_register_params *reg_params)
+{
+ int result = 0;
+ struct ipa_rm_notification_info *reg_info;
+ struct list_head *pos;
+ if (!producer || !reg_params) {
+ result = -EPERM;
+ goto bail;
+ }
+ read_lock(&producer->event_listeners_lock);
+ list_for_each(pos, &(producer->event_listeners)) {
+ reg_info = list_entry(pos,
+ struct ipa_rm_notification_info,
+ link);
+ if (reg_info->reg_params.notify_cb ==
+ reg_params->notify_cb) {
+ result = -EPERM;
+ read_unlock(&producer->event_listeners_lock);
+ goto bail;
+ }
+
+ }
+ read_unlock(&producer->event_listeners_lock);
+ reg_info = kzalloc(sizeof(*reg_info), GFP_KERNEL);
+ if (reg_info == NULL) {
+ result = -ENOMEM;
+ goto bail;
+ }
+ reg_info->reg_params.user_data = reg_params->user_data;
+ reg_info->reg_params.notify_cb = reg_params->notify_cb;
+ INIT_LIST_HEAD(®_info->link);
+ write_lock(&producer->event_listeners_lock);
+ list_add(®_info->link, &producer->event_listeners);
+ write_unlock(&producer->event_listeners_lock);
+bail:
+ return result;
+}
+
+/**
+ * ipa_rm_resource_deregister() - register resource
+ * @resource: [in] resource
+ * @reg_params: [in] registration parameters
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Producer resource is expected for this call.
+ * This function deleted only single instance of
+ * registration info.
+ *
+ */
+int ipa_rm_resource_producer_deregister(struct ipa_rm_resource_prod *producer,
+ struct ipa_rm_register_params *reg_params)
+{
+ int result = -EINVAL;
+ struct ipa_rm_notification_info *reg_info;
+ struct list_head *pos, *q;
+ if (!producer || !reg_params)
+ return -EINVAL;
+ write_lock(&producer->event_listeners_lock);
+ list_for_each_safe(pos, q, &(producer->event_listeners)) {
+ reg_info = list_entry(pos,
+ struct ipa_rm_notification_info,
+ link);
+ if (reg_info->reg_params.notify_cb ==
+ reg_params->notify_cb) {
+ list_del(pos);
+ kfree(reg_info);
+ result = 0;
+ goto bail;
+ }
+
+ }
+bail:
+ write_unlock(&producer->event_listeners_lock);
+ return result;
+}
+
+/**
+ * ipa_rm_resource_add_dependency() - add dependency between two
+ * given resources
+ * @resource: [in] resource resource
+ * @depends_on: [in] depends_on resource
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_resource_add_dependency(struct ipa_rm_resource *resource,
+ struct ipa_rm_resource *depends_on)
+{
+ int result = 0;
+ unsigned long flags;
+ int consumer_result;
+ if (!resource || !depends_on)
+ return -EINVAL;
+ if (ipa_rm_peers_list_check_dependency(resource->peers_list,
+ resource->name,
+ depends_on->peers_list,
+ depends_on->name))
+ return -EINVAL;
+ ipa_rm_peers_list_add_peer(resource->peers_list, depends_on);
+ ipa_rm_peers_list_add_peer(depends_on->peers_list, resource);
+ spin_lock_irqsave(&resource->state_lock, flags);
+ switch (resource->state) {
+ case IPA_RM_RELEASED:
+ case IPA_RM_RELEASE_IN_PROGRESS:
+ break;
+ case IPA_RM_GRANTED:
+ case IPA_RM_REQUEST_IN_PROGRESS:
+ {
+ enum ipa_rm_resource_state prev_state = resource->state;
+ resource->state = IPA_RM_REQUEST_IN_PROGRESS;
+ ((struct ipa_rm_resource_prod *)
+ resource)->pending_request++;
+ spin_unlock_irqrestore(&resource->state_lock, flags);
+ consumer_result = ipa_rm_resource_consumer_request(
+ (struct ipa_rm_resource_cons *)depends_on);
+ spin_lock_irqsave(&resource->state_lock, flags);
+ if (consumer_result != -EINPROGRESS)
+ resource->state = prev_state;
+ ((struct ipa_rm_resource_prod *)
+ resource)->pending_request--;
+ result = consumer_result;
+ break;
+ }
+ default:
+ result = -EPERM;
+ goto bail;
+ }
+bail:
+ spin_unlock_irqrestore(&resource->state_lock, flags);
+ IPADBG("IPA RM ipa_rm_resource_add_dependency name[%d]count[%d]EXIT\n",
+ resource->name, resource->peers_list->peers_count);
+ IPADBG("IPA RM ipa_rm_resource_add_dependency name[%d]count[%d]EXIT\n",
+ depends_on->name, depends_on->peers_list->peers_count);
+ return result;
+}
+
+/**
+ * ipa_rm_resource_delete_dependency() - add dependency between two
+ * given resources
+ * @resource: [in] resource resource
+ * @depends_on: [in] depends_on resource
+ *
+ * Returns: 0 on success, negative on failure
+ * EINPROGRESS is returned in case this is the last dependency
+ * of given resource and IPA RM client should receive the RELEASED cb
+ */
+int ipa_rm_resource_delete_dependency(struct ipa_rm_resource *resource,
+ struct ipa_rm_resource *depends_on)
+{
+ int result = 0;
+ unsigned long flags;
+ if (!resource || !depends_on)
+ return -EINVAL;
+ if (ipa_rm_peers_list_check_dependency(resource->peers_list,
+ resource->name,
+ depends_on->peers_list,
+ depends_on->name))
+ return -EINVAL;
+ spin_lock_irqsave(&resource->state_lock, flags);
+ switch (resource->state) {
+ case IPA_RM_RELEASED:
+ case IPA_RM_GRANTED:
+ break;
+ case IPA_RM_RELEASE_IN_PROGRESS:
+ if (((struct ipa_rm_resource_prod *)
+ resource)->pending_release > 0)
+ ((struct ipa_rm_resource_prod *)
+ resource)->pending_release--;
+ break;
+ case IPA_RM_REQUEST_IN_PROGRESS:
+ if (((struct ipa_rm_resource_prod *)
+ resource)->pending_request > 0)
+ ((struct ipa_rm_resource_prod *)
+ resource)->pending_request--;
+ break;
+ default:
+ result = -EINVAL;
+ spin_unlock_irqrestore(&resource->state_lock, flags);
+ goto bail;
+ }
+ spin_unlock_irqrestore(&resource->state_lock, flags);
+ (void) ipa_rm_resource_consumer_release(
+ (struct ipa_rm_resource_cons *)depends_on);
+ if (ipa_rm_peers_list_has_last_peer(resource->peers_list)) {
+ (void) ipa_rm_wq_send_cmd(IPA_RM_WQ_NOTIFY_PROD,
+ resource->name,
+ IPA_RM_RESOURCE_RELEASED);
+ result = -EINPROGRESS;
+ }
+ ipa_rm_peers_list_remove_peer(resource->peers_list,
+ depends_on->name);
+ ipa_rm_peers_list_remove_peer(depends_on->peers_list,
+ resource->name);
+bail:
+ return result;
+}
+
+/**
+ * ipa_rm_resource_producer_request() - producer resource request
+ * @producer: [in] producer
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_resource_producer_request(struct ipa_rm_resource_prod *producer)
+{
+ int peers_index;
+ int result = 0;
+ unsigned long flags;
+ struct ipa_rm_resource *consumer;
+ int consumer_result;
+ IPADBG("IPA RM ::ipa_rm_resource_producer_request [%d] ENTER\n",
+ producer->resource.name);
+ if (ipa_rm_peers_list_is_empty(producer->resource.peers_list)) {
+ spin_lock_irqsave(&producer->resource.state_lock, flags);
+ producer->resource.state = IPA_RM_GRANTED;
+ spin_unlock_irqrestore(&producer->resource.state_lock, flags);
+ return 0;
+ }
+ spin_lock_irqsave(&producer->resource.state_lock, flags);
+ IPADBG("IPA RM ::ipa_rm_resource_producer_request state [%d]\n",
+ producer->resource.state);
+ switch (producer->resource.state) {
+ case IPA_RM_RELEASED:
+ case IPA_RM_RELEASE_IN_PROGRESS:
+ producer->resource.state = IPA_RM_REQUEST_IN_PROGRESS;
+ break;
+ case IPA_RM_GRANTED:
+ goto unlock_and_bail;
+ case IPA_RM_REQUEST_IN_PROGRESS:
+ result = -EINPROGRESS;
+ goto unlock_and_bail;
+ default:
+ result = -EINVAL;
+ goto unlock_and_bail;
+ }
+ producer->pending_request = 0;
+ spin_unlock_irqrestore(&producer->resource.state_lock, flags);
+ for (peers_index = 0;
+ peers_index < ipa_rm_peers_list_get_size(
+ producer->resource.peers_list);
+ peers_index++) {
+ consumer = ipa_rm_peers_list_get_resource(peers_index,
+ producer->resource.peers_list);
+ if (consumer) {
+ spin_lock_irqsave(
+ &producer->resource.state_lock, flags);
+ producer->pending_request++;
+ spin_unlock_irqrestore(
+ &producer->resource.state_lock, flags);
+ consumer_result = ipa_rm_resource_consumer_request(
+ (struct ipa_rm_resource_cons *)consumer);
+ if (consumer_result == -EINPROGRESS) {
+ result = -EINPROGRESS;
+ } else {
+ spin_lock_irqsave(
+ &producer->resource.state_lock, flags);
+ producer->pending_request--;
+ spin_unlock_irqrestore(
+ &producer->resource.state_lock, flags);
+ if (consumer_result != 0) {
+ result = consumer_result;
+ goto bail;
+ }
+ }
+ }
+ }
+ spin_lock_irqsave(&producer->resource.state_lock, flags);
+ if (producer->pending_request == 0)
+ producer->resource.state = IPA_RM_GRANTED;
+ spin_unlock_irqrestore(&producer->resource.state_lock, flags);
+ return result;
+unlock_and_bail:
+ spin_unlock_irqrestore(&producer->resource.state_lock, flags);
+bail:
+ IPADBG("IPA RM ::ipa_rm_resource_producer_request EXIT[%d]\n", result);
+ return result;
+}
+
+/**
+ * ipa_rm_resource_producer_release() - producer resource release
+ * producer: [in] producer resource
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ */
+int ipa_rm_resource_producer_release(struct ipa_rm_resource_prod *producer)
+{
+ int peers_index;
+ int result = 0;
+ unsigned long flags;
+ struct ipa_rm_resource *consumer;
+ int consumer_result;
+ IPADBG("IPA RM ::ipa_rm_resource_producer_release ENTER\n");
+ if (ipa_rm_peers_list_is_empty(producer->resource.peers_list)) {
+ spin_lock_irqsave(&producer->resource.state_lock, flags);
+ producer->resource.state = IPA_RM_RELEASED;
+ spin_unlock_irqrestore(&producer->resource.state_lock, flags);
+ return 0;
+ }
+ spin_lock_irqsave(&producer->resource.state_lock, flags);
+ switch (producer->resource.state) {
+ case IPA_RM_RELEASED:
+ goto bail;
+ case IPA_RM_GRANTED:
+ case IPA_RM_REQUEST_IN_PROGRESS:
+ producer->resource.state = IPA_RM_RELEASE_IN_PROGRESS;
+ break;
+ case IPA_RM_RELEASE_IN_PROGRESS:
+ result = -EINPROGRESS;
+ goto bail;
+ default:
+ result = -EPERM;
+ goto bail;
+ }
+ producer->pending_release = 0;
+ spin_unlock_irqrestore(&producer->resource.state_lock, flags);
+ for (peers_index = 0;
+ peers_index < ipa_rm_peers_list_get_size(
+ producer->resource.peers_list);
+ peers_index++) {
+ consumer = ipa_rm_peers_list_get_resource(peers_index,
+ producer->resource.peers_list);
+ if (consumer) {
+ spin_lock_irqsave(
+ &producer->resource.state_lock, flags);
+ producer->pending_release++;
+ spin_unlock_irqrestore(
+ &producer->resource.state_lock, flags);
+ consumer_result = ipa_rm_resource_consumer_release(
+ (struct ipa_rm_resource_cons *)consumer);
+ if (consumer_result == -EINPROGRESS) {
+ result = -EINPROGRESS;
+ } else {
+ spin_lock_irqsave(
+ &producer->resource.state_lock, flags);
+ producer->pending_release--;
+ spin_unlock_irqrestore(
+ &producer->resource.state_lock, flags);
+ }
+ }
+ }
+ spin_lock_irqsave(&producer->resource.state_lock, flags);
+ if (producer->pending_release == 0)
+ producer->resource.state = IPA_RM_RELEASED;
+ spin_unlock_irqrestore(&producer->resource.state_lock, flags);
+ return result;
+bail:
+ spin_unlock_irqrestore(&producer->resource.state_lock, flags);
+ IPADBG("IPA RM ::ipa_rm_resource_producer_release EXIT[%d]\n", result);
+ return result;
+}
+
+static void ipa_rm_resource_producer_handle_cb(
+ struct ipa_rm_resource_prod *producer,
+ enum ipa_rm_event event)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&producer->resource.state_lock, flags);
+ switch (producer->resource.state) {
+ case IPA_RM_REQUEST_IN_PROGRESS:
+ if (event != IPA_RM_RESOURCE_GRANTED)
+ goto unlock_and_bail;
+ if (producer->pending_request > 0) {
+ producer->pending_request--;
+ if (producer->pending_request == 0) {
+ producer->resource.state =
+ IPA_RM_GRANTED;
+ spin_unlock_irqrestore(
+ &producer->resource.state_lock, flags);
+ ipa_rm_resource_producer_notify_clients(
+ producer,
+ IPA_RM_RESOURCE_GRANTED);
+ goto bail;
+ }
+ }
+ break;
+ case IPA_RM_RELEASE_IN_PROGRESS:
+ if (event != IPA_RM_RESOURCE_RELEASED)
+ goto unlock_and_bail;
+ if (producer->pending_release > 0) {
+ producer->pending_release--;
+ if (producer->pending_release == 0) {
+ producer->resource.state =
+ IPA_RM_RELEASED;
+ spin_unlock_irqrestore(
+ &producer->resource.state_lock, flags);
+ ipa_rm_resource_producer_notify_clients(
+ producer,
+ IPA_RM_RESOURCE_RELEASED);
+ goto bail;
+ }
+ }
+ break;
+ case IPA_RM_GRANTED:
+ case IPA_RM_RELEASED:
+ default:
+ goto unlock_and_bail;
+ }
+unlock_and_bail:
+ spin_unlock_irqrestore(&producer->resource.state_lock, flags);
+bail:
+ return;
+}
+
+/**
+ * ipa_rm_resource_consumer_handle_cb() - propagates resource
+ * notification to all dependent producers
+ * @consumer: [in] notifying resource
+ *
+ */
+void ipa_rm_resource_consumer_handle_cb(struct ipa_rm_resource_cons *consumer,
+ enum ipa_rm_event event)
+{
+ int peers_index;
+ struct ipa_rm_resource *producer;
+ unsigned long flags;
+ if (!consumer)
+ return;
+ spin_lock_irqsave(&consumer->resource.state_lock, flags);
+ switch (consumer->resource.state) {
+ case IPA_RM_REQUEST_IN_PROGRESS:
+ if (event == IPA_RM_RESOURCE_RELEASED)
+ goto bail;
+ consumer->resource.state = IPA_RM_GRANTED;
+ break;
+ case IPA_RM_RELEASE_IN_PROGRESS:
+ if (event == IPA_RM_RESOURCE_GRANTED)
+ goto bail;
+ consumer->resource.state = IPA_RM_RELEASED;
+ break;
+ case IPA_RM_GRANTED:
+ case IPA_RM_RELEASED:
+ default:
+ goto bail;
+ }
+ spin_unlock_irqrestore(&consumer->resource.state_lock, flags);
+ for (peers_index = 0;
+ peers_index < ipa_rm_peers_list_get_size(
+ consumer->resource.peers_list);
+ peers_index++) {
+ producer = ipa_rm_peers_list_get_resource(peers_index,
+ consumer->resource.peers_list);
+ if (producer)
+ ipa_rm_resource_producer_handle_cb(
+ (struct ipa_rm_resource_prod *)
+ producer,
+ event);
+ }
+ return;
+bail:
+ spin_unlock_irqrestore(&consumer->resource.state_lock, flags);
+ return;
+}
diff --git a/drivers/platform/msm/ipa/ipa_rm_resource.h b/drivers/platform/msm/ipa/ipa_rm_resource.h
new file mode 100644
index 0000000..b9c2e91
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rm_resource.h
@@ -0,0 +1,127 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_RM_RESOURCE_H_
+#define _IPA_RM_RESOURCE_H_
+
+#include <linux/list.h>
+#include <mach/ipa.h>
+#include "ipa_rm_peers_list.h"
+
+/**
+ * enum ipa_rm_resource_state - resource state
+ */
+enum ipa_rm_resource_state {
+ IPA_RM_RELEASED,
+ IPA_RM_REQUEST_IN_PROGRESS,
+ IPA_RM_GRANTED,
+ IPA_RM_RELEASE_IN_PROGRESS
+};
+
+/**
+ * enum ipa_rm_resource_type - IPA resource manager resource type
+ */
+enum ipa_rm_resource_type {
+ IPA_RM_PRODUCER,
+ IPA_RM_CONSUMER
+};
+
+/**
+ * struct ipa_rm_notification_info - notification information
+ * of IPA RM client
+ * @reg_params: registration parameters
+ * @link: link to the list of all registered clients information
+ */
+struct ipa_rm_notification_info {
+ struct ipa_rm_register_params reg_params;
+ struct list_head link;
+};
+
+/**
+ * struct ipa_rm_resource - IPA RM resource
+ * @name: name identifying resource
+ * @state: state of the resource
+ * @state_lock: lock for all resource state related variables
+ * @peers_list: list of the peers of the resource
+ */
+struct ipa_rm_resource {
+ enum ipa_rm_resource_name name;
+ enum ipa_rm_resource_type type;
+ enum ipa_rm_resource_state state;
+ spinlock_t state_lock;
+ struct ipa_rm_peers_list *peers_list;
+};
+
+/**
+ * struct ipa_rm_resource_cons - IPA RM consumer
+ * @resource: resource
+ * @usage_count: number of producers in GRANTED / REQUESTED state
+ * using this consumer
+ * @request_resource: function which should be called to request resource
+ * from resource manager
+ * @release_resource: function which should be called to release resource
+ * from resource manager
+ * Add new fields after @resource only.
+ */
+struct ipa_rm_resource_cons {
+ struct ipa_rm_resource resource;
+ int usage_count;
+ int (*request_resource)(void);
+ int (*release_resource)(void);
+};
+
+/**
+ * struct ipa_rm_resource_prod - IPA RM producer
+ * @resource: resource
+ * @event_listeners: clients registered with this producer
+ * for notifications in resource state
+ * @event_listeners_lock: RW lock protecting the event listeners list
+ * Add new fields after @resource only.
+ */
+struct ipa_rm_resource_prod {
+ struct ipa_rm_resource resource;
+ struct list_head event_listeners;
+ rwlock_t event_listeners_lock;
+ int pending_request;
+ int pending_release;
+};
+
+int ipa_rm_resource_create(
+ struct ipa_rm_create_params *create_params,
+ struct ipa_rm_resource **resource);
+
+void ipa_rm_resource_delete(struct ipa_rm_resource *resource);
+
+int ipa_rm_resource_producer_register(struct ipa_rm_resource_prod *producer,
+ struct ipa_rm_register_params *reg_params);
+
+int ipa_rm_resource_producer_deregister(struct ipa_rm_resource_prod *producer,
+ struct ipa_rm_register_params *reg_params);
+
+int ipa_rm_resource_add_dependency(struct ipa_rm_resource *resource,
+ struct ipa_rm_resource *depends_on);
+
+int ipa_rm_resource_delete_dependency(struct ipa_rm_resource *resource,
+ struct ipa_rm_resource *depends_on);
+
+int ipa_rm_resource_producer_request(struct ipa_rm_resource_prod *producer);
+
+int ipa_rm_resource_producer_release(struct ipa_rm_resource_prod *producer);
+
+void ipa_rm_resource_consumer_handle_cb(struct ipa_rm_resource_cons *consumer,
+ enum ipa_rm_event event);
+
+void ipa_rm_resource_producer_notify_clients(
+ struct ipa_rm_resource_prod *producer,
+ enum ipa_rm_event event);
+
+#endif /* _IPA_RM_RESOURCE_H_ */
diff --git a/drivers/platform/msm/ipa/rmnet_bridge.c b/drivers/platform/msm/ipa/rmnet_bridge.c
index e5c7ec2..696b363 100644
--- a/drivers/platform/msm/ipa/rmnet_bridge.c
+++ b/drivers/platform/msm/ipa/rmnet_bridge.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -16,7 +16,6 @@
#include <mach/bam_dmux.h>
#include <mach/ipa.h>
#include <mach/sps.h>
-#include "a2_service.h"
static struct rmnet_bridge_cb_type {
u32 producer_handle;
diff --git a/drivers/platform/msm/sps/bam.c b/drivers/platform/msm/sps/bam.c
index 1064086..0f81285 100644
--- a/drivers/platform/msm/sps/bam.c
+++ b/drivers/platform/msm/sps/bam.c
@@ -992,7 +992,11 @@
{
SPS_DBG2("sps:%s:bam=0x%x(va).pipe=%d.", __func__, (u32) base, pipe);
- bam_write_reg_field(base, P_CTRL(pipe), P_EN, 1);
+ if (bam_read_reg_field(base, P_CTRL(pipe), P_EN))
+ SPS_DBG2("sps:bam=0x%x(va).pipe=%d is already enabled.\n",
+ (u32) base, pipe);
+ else
+ bam_write_reg_field(base, P_CTRL(pipe), P_EN, 1);
}
/**
diff --git a/drivers/platform/msm/ssm.c b/drivers/platform/msm/ssm.c
new file mode 100644
index 0000000..c57bb91
--- /dev/null
+++ b/drivers/platform/msm/ssm.c
@@ -0,0 +1,931 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+/*
+ * Qualcomm Secure Service Module(SSM) driver
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/of.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/mutex.h>
+#include <linux/ion.h>
+#include <linux/types.h>
+#include <linux/firmware.h>
+#include <linux/elf.h>
+#include <linux/platform_device.h>
+#include <linux/msm_ion.h>
+#include <linux/platform_data/qcom_ssm.h>
+#include <mach/scm.h>
+#include <mach/msm_smd.h>
+
+#include "ssm.h"
+
+/* Macros */
+#define SSM_DEV_NAME "ssm"
+#define MPSS_SUBSYS 0
+#define SSM_INFO_CMD_ID 1
+#define QSEOS_CHECK_VERSION_CMD 0x00001803
+
+#define MAX_APP_NAME_SIZE 32
+#define SSM_MSG_LEN (104 + 4) /* bytes + pad */
+#define SSM_MSG_FIELD_LEN 11
+#define SSM_HEADER_LEN (SSM_MSG_FIELD_LEN * 4)
+#define ATOM_MSG_LEN (SSM_HEADER_LEN + SSM_MSG_LEN)
+#define FIRMWARE_NAME "ssmapp"
+#define TZAPP_NAME "SsmApp"
+#define CHANNEL_NAME "SSM_RTR"
+
+#define ALIGN_BUFFER(size) ((size + 4095) & ~4095)
+
+/* SSM driver structure.*/
+struct ssm_driver {
+ int32_t app_id;
+ int32_t app_status;
+ int32_t update_status;
+ int32_t atom_replay;
+ int32_t mtoa_replay;
+ uint32_t buff_len;
+ unsigned char *channel_name;
+ unsigned char *smd_buffer;
+ struct ion_client *ssm_ion_client;
+ struct ion_handle *ssm_ion_handle;
+ struct tzapp_get_mode_info_rsp *resp;
+ struct device *dev;
+ smd_channel_t *ch;
+ ion_phys_addr_t buff_phys;
+ ion_virt_addr_t buff_virt;
+ dev_t ssm_device_no;
+ struct work_struct ipc_work;
+ struct mutex mutex;
+ bool key_status;
+ bool ready;
+};
+
+static struct ssm_driver *ssm_drv;
+
+static unsigned int getint(char *buff, unsigned long *res)
+{
+ char value[SSM_MSG_FIELD_LEN];
+
+ memcpy(value, buff, SSM_MSG_FIELD_LEN);
+ value[SSM_MSG_FIELD_LEN - 1] = '\0';
+
+ return kstrtoul(skip_spaces(value), 10, res);
+}
+
+/*
+ * Send packet to modem over SMD channel.
+ */
+static int update_modem(enum ssm_ipc_req ipc_req, struct ssm_driver *ssm,
+ int length, char *data)
+{
+ unsigned int packet_len = SSM_HEADER_LEN + length + 1;
+ int rc = 0;
+
+ ssm->atom_replay += 1;
+ snprintf(ssm->smd_buffer, SSM_HEADER_LEN + 1, "%10u|%10u|%10u|%10u|"
+ , packet_len, ssm->atom_replay, ipc_req, length);
+ memcpy(ssm->smd_buffer + SSM_HEADER_LEN, data, length);
+
+ ssm->smd_buffer[packet_len - 1] = '|';
+
+ if (smd_write_avail(ssm->ch) < packet_len) {
+ dev_err(ssm->dev, "Not enough space dropping request\n");
+ rc = -ENOSPC;
+ }
+
+ rc = smd_write(ssm->ch, ssm->smd_buffer, packet_len);
+ if (rc < packet_len) {
+ dev_err(ssm->dev, "smd_write failed for %d\n", ipc_req);
+ rc = -EIO;
+ }
+
+ return rc;
+}
+
+/*
+ * Header Format
+ * Each member of header is of 10 byte (ASCII).
+ * Each entry is separated by '|' delimiter.
+ * |<-10 bytes->|<-10 bytes->|<-10 bytes->|<-10 bytes->|<-10 bytes->|
+ * |-----------------------------------------------------------------
+ * | length | replay no. | request | msg_len | message |
+ * |-----------------------------------------------------------------
+ *
+ */
+static int decode_header(char *buffer, int length,
+ struct ssm_common_msg *pkt)
+{
+ int rc;
+
+ rc = getint(buffer, &pkt->pktlen);
+ if (rc < 0)
+ return -EINVAL;
+
+ buffer += SSM_MSG_FIELD_LEN;
+ rc = getint(buffer, &pkt->replaynum);
+ if (rc < 0)
+ return -EINVAL;
+
+ buffer += SSM_MSG_FIELD_LEN;
+ rc = getint(buffer, (unsigned long *)&pkt->ipc_req);
+ if (rc < 0)
+ return -EINVAL;
+
+ buffer += SSM_MSG_FIELD_LEN;
+ rc = getint(buffer, &pkt->msg_len);
+ if ((rc < 0) || (pkt->msg_len > SSM_MSG_LEN))
+ return -EINVAL;
+
+ pkt->msg = buffer + SSM_MSG_FIELD_LEN;
+
+ dev_dbg(ssm_drv->dev, "len %lu rep %lu req %d msg_len %lu\n",
+ pkt->pktlen, pkt->replaynum, pkt->ipc_req,
+ pkt->msg_len);
+ return 0;
+}
+
+/*
+ * Decode address for storing the decryption key.
+ * Only for Key Exchange
+ * Message Format
+ * |Length@Address|
+ */
+static int decode_message(char *msg, unsigned int len, unsigned long *length,
+ unsigned long *address)
+{
+ int i = 0, rc = 0;
+ char *buff;
+
+ buff = kzalloc(len, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+ while (i < len) {
+ if (msg[i] == '@')
+ break;
+ i++;
+ }
+ if ((i < len) && (msg[i] == '@')) {
+ memcpy(buff, msg, i);
+ buff[i] = '\0';
+ rc = kstrtoul(skip_spaces(buff), 10, length);
+ if (rc || (length <= 0)) {
+ rc = -EINVAL;
+ goto exit;
+ }
+ memcpy(buff, &msg[i + 1], len - (i + 1));
+ buff[len - i] = '\0';
+ rc = kstrtoul(skip_spaces(buff), 10, address);
+ } else
+ rc = -EINVAL;
+
+exit:
+ kfree(buff);
+ return rc;
+}
+
+static void process_message(int cmd, char *msg, int len,
+ struct ssm_driver *ssm)
+{
+ int rc;
+ unsigned long key_len = 0, key_add = 0, val;
+ struct ssm_keyexchg_req req;
+
+ switch (cmd) {
+ case SSM_MTOA_KEY_EXCHANGE:
+ if (len < 3) {
+ dev_err(ssm->dev, "Invalid message\n");
+ break;
+ }
+
+ if (ssm->key_status) {
+ dev_err(ssm->dev, "Key exchange already done\n");
+ break;
+ }
+
+ rc = decode_message(msg, len, &key_len, &key_add);
+ if (rc) {
+ rc = update_modem(SSM_ATOM_KEY_STATUS, ssm,
+ 1, "1");
+ break;
+ }
+
+ /*
+ * We are doing key-exchange part here as it is very
+ * specific for this case. For all other tz
+ * communication we have generic function.
+ */
+ req.ssid = MPSS_SUBSYS;
+ req.address = (void *)key_add;
+ req.length = key_len;
+ req.status = (uint32_t *)ssm->buff_phys;
+
+ *(unsigned int *)ssm->buff_virt = -1;
+ rc = scm_call(KEY_EXCHANGE, 0x1, &req,
+ sizeof(struct ssm_keyexchg_req), NULL, 0);
+ if (rc) {
+ dev_err(ssm->dev, "Call for key exchg failed %d", rc);
+ rc = update_modem(SSM_ATOM_KEY_STATUS, ssm,
+ 1, "1");
+ } else {
+ /* Success encode packet and update modem */
+ rc = update_modem(SSM_ATOM_KEY_STATUS, ssm,
+ 1, "0");
+ ssm->key_status = true;
+ }
+ break;
+
+ case SSM_MTOA_MODE_UPDATE_STATUS:
+ msg[len] = '\0';
+ rc = kstrtoul(skip_spaces(msg), 10, &val);
+ if (val) {
+ dev_err(ssm->dev, "Modem mode update failed\n");
+ ssm->update_status = FAILED;
+ } else
+ ssm->update_status = SUCCESS;
+
+ dev_dbg(ssm->dev, "Modem mode update status %lu\n", val);
+ break;
+
+ default:
+ dev_dbg(ssm->dev, "Invalid message\n");
+ break;
+ };
+}
+
+/*
+ * Work function to handle and process packets coming from modem.
+ */
+static void ssm_app_modem_work_fn(struct work_struct *work)
+{
+ int sz, rc;
+ struct ssm_common_msg pkt;
+ struct ssm_driver *ssm;
+
+ ssm = container_of(work, struct ssm_driver, ipc_work);
+
+ mutex_lock(&ssm->mutex);
+ sz = smd_cur_packet_size(ssm->ch);
+ if ((sz <= 0) || (sz > ATOM_MSG_LEN)) {
+ dev_dbg(ssm_drv->dev, "Garbled message size\n");
+ goto unlock;
+ }
+
+ if (smd_read_avail(ssm->ch) < sz) {
+ dev_err(ssm_drv->dev, "SMD error data in channel\n");
+ goto unlock;
+ }
+
+ if (sz < SSM_HEADER_LEN) {
+ dev_err(ssm_drv->dev, "Invalid packet\n");
+ goto unlock;
+ }
+
+ if (smd_read(ssm->ch, ssm->smd_buffer, sz) != sz) {
+ dev_err(ssm_drv->dev, "Incomplete data\n");
+ goto unlock;
+ }
+
+ rc = decode_header(ssm->smd_buffer, sz, &pkt);
+ if (rc < 0) {
+ dev_err(ssm_drv->dev, "Corrupted header\n");
+ goto unlock;
+ }
+
+ /* Check validity of message */
+ if (ssm->mtoa_replay >= (int)pkt.replaynum) {
+ dev_err(ssm_drv->dev, "Replay attack...\n");
+ goto unlock;
+ }
+
+ if (pkt.msg[pkt.msg_len] != '|') {
+ dev_err(ssm_drv->dev, "Garbled message\n");
+ goto unlock;
+ }
+
+ ssm->mtoa_replay = pkt.replaynum;
+ process_message(pkt.ipc_req, pkt.msg, pkt.msg_len, ssm);
+
+unlock:
+ mutex_unlock(&ssm->mutex);
+}
+
+/*
+ * MODEM-APPS smd channel callback function.
+ */
+static void modem_request(void *ctxt, unsigned event)
+{
+ struct ssm_driver *ssm;
+
+ ssm = (struct ssm_driver *)ctxt;
+
+ switch (event) {
+ case SMD_EVENT_OPEN:
+ case SMD_EVENT_CLOSE:
+ dev_info(ssm->dev, "Port %s\n",
+ (event == SMD_EVENT_OPEN) ? "opened" : "closed");
+ break;
+ case SMD_EVENT_DATA:
+ if (smd_read_avail(ssm->ch) > 0)
+ schedule_work(&ssm->ipc_work);
+ break;
+ };
+}
+
+/*
+ * Communication interface between ssm driver and TZ.
+ */
+static int tz_scm_call(struct ssm_driver *ssm, void *tz_req, int tz_req_len,
+ void **tz_resp, int tz_resp_len)
+{
+ int rc;
+ struct common_req req;
+ struct common_resp resp;
+
+ memcpy((void *)ssm->buff_virt, tz_req, tz_req_len);
+
+ req.cmd_id = CLIENT_SEND_DATA_COMMAND;
+ req.app_id = ssm->app_id;
+ req.req_ptr = (void *)ssm->buff_phys;
+ req.req_len = tz_req_len;
+ req.resp_ptr = (void *)(ssm->buff_phys + tz_req_len);
+ req.resp_len = tz_resp_len;
+
+ rc = scm_call(SCM_SVC_TZSCHEDULER, 1, (const void *) &req,
+ sizeof(req), (void *)&resp, sizeof(resp));
+ if (rc) {
+ dev_err(ssm->dev, "SCM call failed for data command\n");
+ return rc;
+ }
+
+ if (resp.result != RESULT_SUCCESS) {
+ dev_err(ssm->dev, "Data command response failure %d\n",
+ resp.result);
+ return -EINVAL;
+ }
+
+ *tz_resp = (void *)(ssm->buff_virt + tz_req_len);
+
+ return rc;
+}
+
+/*
+ * Load SSM application in TZ and start application:
+ * 1. Check if SSM application is already loaded.
+ * 2. Load SSM application firmware.
+ * 3. Start SSM application in TZ.
+ */
+static int ssm_load_app(struct ssm_driver *ssm)
+{
+ unsigned char name[MAX_APP_NAME_SIZE], *pos;
+ int rc, i, fw_count;
+ uint32_t buff_len, size = 0, ion_len;
+ struct check_app_req app_req;
+ struct scm_resp app_resp;
+ struct load_app app_img_info;
+ const struct firmware **fw, *fw_mdt;
+ const struct elf32_hdr *ehdr;
+ const struct elf32_phdr *phdr;
+ struct ion_handle *ion_handle;
+ ion_phys_addr_t buff_phys;
+ ion_virt_addr_t buff_virt;
+
+ /* Check if TZ app already loaded */
+ app_req.cmd_id = APP_LOOKUP_COMMAND;
+ memcpy(app_req.app_name, TZAPP_NAME, MAX_APP_NAME_SIZE);
+
+ rc = scm_call(SCM_SVC_TZSCHEDULER, 1, &app_req,
+ sizeof(struct check_app_req),
+ &app_resp, sizeof(app_resp));
+ if (rc) {
+ dev_err(ssm->dev, "SCM call failed for LOOKUP COMMAND\n");
+ return -EINVAL;
+ }
+
+ if (app_resp.result == RESULT_FAILURE)
+ ssm->app_id = 0;
+ else
+ ssm->app_id = app_resp.data;
+
+ if (ssm->app_id) {
+ rc = 0;
+ dev_info(ssm->dev, "TZAPP already loaded...\n");
+ goto out;
+ }
+
+ /* APP not loaded get the firmware */
+ /* Get .mdt first */
+ rc = request_firmware(&fw_mdt, FIRMWARE_NAME".mdt", ssm->dev);
+ if (rc) {
+ dev_err(ssm->dev, "Unable to get mdt file %s\n",
+ FIRMWARE_NAME".mdt");
+ rc = -EIO;
+ goto out;
+ }
+
+ if (fw_mdt->size < sizeof(*ehdr)) {
+ dev_err(ssm->dev, "Not big enough to be an elf header\n");
+ rc = -EIO;
+ goto release_mdt;
+ }
+
+ ehdr = (struct elf32_hdr *)fw_mdt->data;
+ if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
+ dev_err(ssm->dev, "Not an elf header\n");
+ rc = -EIO;
+ goto release_mdt;
+ }
+
+ if (ehdr->e_phnum == 0) {
+ dev_err(ssm->dev, "No loadable segments\n");
+ rc = -EIO;
+ goto release_mdt;
+ }
+
+ phdr = (const struct elf32_phdr *)(fw_mdt->data +
+ sizeof(struct elf32_hdr));
+
+ fw = kzalloc((sizeof(struct firmware *) * ehdr->e_phnum), GFP_KERNEL);
+ if (!fw) {
+ rc = -ENOMEM;
+ goto release_mdt;
+ }
+
+ /* Valid .mdt now we need to load other parts .b0* */
+ for (fw_count = 0; fw_count < ehdr->e_phnum ; fw_count++) {
+ snprintf(name, MAX_APP_NAME_SIZE, FIRMWARE_NAME".b%02d",
+ fw_count);
+ rc = request_firmware(&fw[fw_count], name, ssm->dev);
+ if (rc < 0) {
+ rc = -EIO;
+ dev_err(ssm->dev, "Unable to get blob file\n");
+ goto release_blob;
+ }
+
+ if (fw[fw_count]->size != phdr->p_filesz) {
+ dev_err(ssm->dev, "Blob size %u doesn't match %u\n",
+ fw[fw_count]->size, phdr->p_filesz);
+ rc = -EIO;
+ goto release_blob;
+ }
+
+ phdr++;
+ size += fw[fw_count]->size;
+ }
+
+ /* Ion allocation for loading tzapp */
+ /* ION buffer size 4k aligned */
+ ion_len = ALIGN_BUFFER(size);
+ ion_handle = ion_alloc(ssm_drv->ssm_ion_client,
+ ion_len, SZ_4K, ION_HEAP(ION_QSECOM_HEAP_ID), 0);
+ if (IS_ERR_OR_NULL(ion_handle)) {
+ rc = PTR_ERR(ion_handle);
+ dev_err(ssm->dev, "Unable to get ion handle\n");
+ goto release_blob;
+ }
+
+ rc = ion_phys(ssm_drv->ssm_ion_client, ion_handle,
+ &buff_phys, &buff_len);
+ if (rc < 0) {
+ dev_err(ssm->dev, "Unable to get ion physical address\n");
+ goto ion_free;
+ }
+
+ if (buff_len < size) {
+ rc = -ENOMEM;
+ goto ion_free;
+ }
+
+ buff_virt =
+ (ion_virt_addr_t)ion_map_kernel(ssm_drv->ssm_ion_client,
+ ion_handle);
+ if (IS_ERR_OR_NULL((void *)buff_virt)) {
+ rc = PTR_ERR((void *)buff_virt);
+ dev_err(ssm->dev, "Unable to get ion virtual address\n");
+ goto ion_free;
+ }
+
+ /* Copy firmware to ION memory */
+ memcpy((unsigned char *)buff_virt, fw_mdt->data, fw_mdt->size);
+ pos = (unsigned char *)buff_virt + fw_mdt->size;
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ memcpy(pos, fw[i]->data, fw[i]->size);
+ pos += fw[i]->size;
+ }
+
+ /* Loading app */
+ app_img_info.cmd_id = APP_START_COMMAND;
+ app_img_info.mdt_len = fw_mdt->size;
+ app_img_info.img_len = size;
+ app_img_info.phy_addr = buff_phys;
+
+ /* SCM call to load the TZ APP */
+ rc = scm_call(SCM_SVC_TZSCHEDULER, 1, &app_img_info,
+ sizeof(struct load_app), &app_resp, sizeof(app_resp));
+ if (rc) {
+ rc = -EIO;
+ dev_err(ssm->dev, "SCM call to load APP failed\n");
+ goto ion_unmap;
+ }
+
+ if (app_resp.result == RESULT_FAILURE) {
+ rc = -EIO;
+ dev_err(ssm->dev, "SCM command to load TzAPP failed\n");
+ goto ion_unmap;
+ }
+
+ ssm->app_id = app_resp.data;
+ ssm->app_status = SUCCESS;
+
+ion_unmap:
+ ion_unmap_kernel(ssm_drv->ssm_ion_client, ion_handle);
+ion_free:
+ ion_free(ssm_drv->ssm_ion_client, ion_handle);
+release_blob:
+ while (--fw_count >= 0)
+ release_firmware(fw[fw_count]);
+ kfree(fw);
+release_mdt:
+ release_firmware(fw_mdt);
+out:
+ return rc;
+}
+
+/*
+ * Allocate buffer for transactions.
+ */
+static int ssm_setup_ion(struct ssm_driver *ssm)
+{
+ int rc = 0;
+ unsigned int size;
+
+ size = ALIGN_BUFFER(ATOM_MSG_LEN);
+
+ /* ION client for communicating with TZ */
+ ssm->ssm_ion_client = msm_ion_client_create(UINT_MAX,
+ "ssm-kernel");
+ if (IS_ERR_OR_NULL(ssm->ssm_ion_client)) {
+ rc = PTR_ERR(ssm->ssm_ion_client);
+ dev_err(ssm->dev, "Ion client not created\n");
+ return rc;
+ }
+
+ /* Setup a small ION buffer for tz communication */
+ ssm->ssm_ion_handle = ion_alloc(ssm->ssm_ion_client,
+ size, SZ_4K, ION_HEAP(ION_QSECOM_HEAP_ID), 0);
+ if (IS_ERR_OR_NULL(ssm->ssm_ion_handle)) {
+ rc = PTR_ERR(ssm->ssm_ion_handle);
+ dev_err(ssm->dev, "Unable to get ion handle\n");
+ goto out;
+ }
+
+ rc = ion_phys(ssm->ssm_ion_client, ssm->ssm_ion_handle,
+ &ssm->buff_phys, &ssm->buff_len);
+ if (rc < 0) {
+ dev_err(ssm->dev,
+ "Unable to get ion buffer physical address\n");
+ goto ion_free;
+ }
+
+ if (ssm->buff_len < size) {
+ rc = -ENOMEM;
+ goto ion_free;
+ }
+
+ ssm->buff_virt =
+ (ion_virt_addr_t)ion_map_kernel(ssm->ssm_ion_client,
+ ssm->ssm_ion_handle);
+ if (IS_ERR_OR_NULL((void *)ssm->buff_virt)) {
+ rc = PTR_ERR((void *)ssm->buff_virt);
+ dev_err(ssm->dev,
+ "Unable to get ion buffer virtual address\n");
+ goto ion_free;
+ }
+
+ return rc;
+
+ion_free:
+ ion_free(ssm->ssm_ion_client, ssm->ssm_ion_handle);
+out:
+ ion_client_destroy(ssm_drv->ssm_ion_client);
+ return rc;
+}
+
+static struct ssm_platform_data *populate_ssm_pdata(struct device *dev)
+{
+ struct ssm_platform_data *pdata;
+ int rc;
+
+ pdata = devm_kzalloc(dev, sizeof(struct ssm_platform_data),
+ GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ pdata->need_key_exchg =
+ of_property_read_bool(dev->of_node, "qcom,need-keyexhg");
+
+ rc = of_property_read_string(dev->of_node, "qcom,channel-name",
+ &pdata->channel_name);
+ if (rc && rc != -EINVAL) {
+ dev_err(dev, "Error reading channel_name property %d\n", rc);
+ return NULL;
+ } else if (rc == -EINVAL)
+ pdata->channel_name = CHANNEL_NAME;
+
+ return pdata;
+}
+
+static int __devinit ssm_probe(struct platform_device *pdev)
+{
+ int rc;
+ uint32_t system_call_id;
+ char legacy = '\0';
+ struct ssm_platform_data *pdata;
+ struct ssm_driver *drv;
+
+ if (pdev->dev.of_node)
+ pdata = populate_ssm_pdata(&pdev->dev);
+ else
+ pdata = pdev->dev.platform_data;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "Empty platform data\n");
+ return -ENOMEM;
+ }
+
+ drv = devm_kzalloc(&pdev->dev, sizeof(struct ssm_driver),
+ GFP_KERNEL);
+ if (!drv) {
+ dev_err(&pdev->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ /* Initialize the driver structure */
+ drv->atom_replay = -1;
+ drv->mtoa_replay = -1;
+ drv->app_id = -1;
+ drv->app_status = RETRY;
+ drv->ready = false;
+ drv->update_status = FAILED;
+ mutex_init(&drv->mutex);
+ drv->key_status = !pdata->need_key_exchg;
+ drv->channel_name = (char *)pdata->channel_name;
+ INIT_WORK(&drv->ipc_work, ssm_app_modem_work_fn);
+
+ /* Allocate memory for smd buffer */
+ drv->smd_buffer = devm_kzalloc(&pdev->dev,
+ (sizeof(char) * ATOM_MSG_LEN), GFP_KERNEL);
+ if (!drv->smd_buffer) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ /* Allocate response buffer */
+ drv->resp = devm_kzalloc(&pdev->dev,
+ sizeof(struct tzapp_get_mode_info_rsp),
+ GFP_KERNEL);
+ if (!drv->resp) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+
+ /* Check for TZ version */
+ system_call_id = QSEOS_CHECK_VERSION_CMD;
+ rc = scm_call(SCM_SVC_INFO, SSM_INFO_CMD_ID, &system_call_id,
+ sizeof(system_call_id), &legacy, sizeof(legacy));
+ if (rc) {
+ dev_err(&pdev->dev, "Get version failed %d\n", rc);
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ /* This driver only support 1.4 TZ and QSEOS */
+ if (!legacy) {
+ dev_err(&pdev->dev,
+ "Driver doesn't support legacy version\n");
+ rc = -EINVAL;
+ goto exit;
+
+ }
+
+ /* Setup the ion buffer for transaction */
+ rc = ssm_setup_ion(drv);
+ if (rc < 0)
+ goto exit;
+
+ drv->dev = &pdev->dev;
+ ssm_drv = drv;
+ platform_set_drvdata(pdev, ssm_drv);
+
+ dev_dbg(&pdev->dev, "probe success\n");
+ return 0;
+
+exit:
+ mutex_destroy(&drv->mutex);
+ platform_set_drvdata(pdev, NULL);
+ return rc;
+
+}
+
+static int __devexit ssm_remove(struct platform_device *pdev)
+{
+ int rc;
+
+ struct scm_shutdown_req req;
+ struct scm_resp resp;
+
+ if (!ssm_drv)
+ return 0;
+ /*
+ * Step to exit
+ * 1. set ready to 0 (oem access closed).
+ * 2. Close SMD modem connection closed.
+ * 3. cleanup ion.
+ */
+ ssm_drv->ready = false;
+ smd_close(ssm_drv->ch);
+ flush_work_sync(&ssm_drv->ipc_work);
+
+ /* ION clean up*/
+ ion_unmap_kernel(ssm_drv->ssm_ion_client, ssm_drv->ssm_ion_handle);
+ ion_free(ssm_drv->ssm_ion_client, ssm_drv->ssm_ion_handle);
+ ion_client_destroy(ssm_drv->ssm_ion_client);
+
+ /* Shutdown tzapp */
+ req.app_id = ssm_drv->app_id;
+ req.cmd_id = APP_SHUTDOWN_COMMAND;
+ rc = scm_call(SCM_SVC_TZSCHEDULER, 1, &req, sizeof(req),
+ &resp, sizeof(resp));
+ if (rc)
+ dev_err(&pdev->dev, "TZ_app Unload failed\n");
+
+ return rc;
+}
+
+static struct of_device_id ssm_match_table[] = {
+ {
+ .compatible = "qcom,ssm",
+ },
+ {}
+};
+
+static struct platform_driver ssm_pdriver = {
+ .probe = ssm_probe,
+ .remove = __devexit_p(ssm_remove),
+ .driver = {
+ .name = SSM_DEV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = ssm_match_table,
+ },
+};
+module_platform_driver(ssm_pdriver);
+
+/*
+ * Interface for external OEM driver.
+ * This interface supports following functionalities:
+ * 1. Get TZAPP ID.
+ * 2. Set default mode.
+ * 3. Set mode (encrypted mode and it's length is passed as parameter).
+ * 4. Set mode from TZ.
+ * 5. Get status of mode update.
+ *
+ */
+int ssm_oem_driver_intf(int cmd, char *mode, int len)
+{
+ int rc, req_len, resp_len;
+ struct tzapp_get_mode_info_req get_mode_req;
+ struct tzapp_get_mode_info_rsp *get_mode_resp;
+
+ /* If ssm_drv is NULL, probe failed */
+ if (!ssm_drv)
+ return -ENODEV;
+
+ mutex_lock(&ssm_drv->mutex);
+
+ if (ssm_drv->app_status == RETRY) {
+ /* Load TZAPP */
+ rc = ssm_load_app(ssm_drv);
+ if (rc) {
+ rc = -ENODEV;
+ ssm_drv->app_status = FAILED;
+ goto unlock;
+ }
+ } else if (ssm_drv->app_status == FAILED) {
+ rc = -ENODEV;
+ goto unlock;
+ }
+
+ /* Open modem SMD interface */
+ if (!ssm_drv->ready) {
+ rc = smd_open(ssm_drv->channel_name, &ssm_drv->ch, ssm_drv,
+ modem_request);
+ if (rc) {
+ rc = -EAGAIN;
+ goto unlock;
+ } else
+ ssm_drv->ready = true;
+ }
+
+ /* Try again modem key-exchange not yet done.*/
+ if (!ssm_drv->key_status) {
+ rc = -EAGAIN;
+ goto unlock;
+ }
+
+ /* Set return status to success */
+ rc = 0;
+
+ switch (cmd) {
+ case SSM_READY:
+ break;
+
+ case SSM_GET_APP_ID:
+ rc = ssm_drv->app_id;
+ break;
+
+ case SSM_MODE_INFO_READY:
+ ssm_drv->update_status = RETRY;
+ /* Fill command structure */
+ req_len = sizeof(struct tzapp_get_mode_info_req);
+ resp_len = sizeof(struct tzapp_get_mode_info_rsp);
+ get_mode_req.tzapp_ssm_cmd = GET_ENC_MODE;
+ rc = tz_scm_call(ssm_drv, (void *)&get_mode_req,
+ req_len, (void **)&get_mode_resp, resp_len);
+ if (rc) {
+ ssm_drv->update_status = FAILED;
+ break;
+ }
+
+ /* Send mode_info to modem */
+ rc = update_modem(SSM_ATOM_MODE_UPDATE, ssm_drv,
+ get_mode_resp->enc_mode_len,
+ get_mode_resp->enc_mode_info);
+ if (rc)
+ ssm_drv->update_status = FAILED;
+ break;
+
+ case SSM_SET_MODE:
+ ssm_drv->update_status = RETRY;
+
+ if (len > ENC_MODE_MAX_SIZE) {
+ ssm_drv->update_status = FAILED;
+ rc = -EINVAL;
+ break;
+ }
+ memcpy(ssm_drv->resp->enc_mode_info, mode, len);
+ ssm_drv->resp->enc_mode_len = len;
+
+ /* Send mode_info to modem */
+ rc = update_modem(SSM_ATOM_MODE_UPDATE, ssm_drv,
+ ssm_drv->resp->enc_mode_len,
+ ssm_drv->resp->enc_mode_info);
+ if (rc)
+ ssm_drv->update_status = FAILED;
+ break;
+
+ case SSM_GET_MODE_STATUS:
+ rc = ssm_drv->update_status;
+ break;
+
+ case SSM_SET_DEFAULT_MODE:
+ /* Modem does not send response for this */
+ ssm_drv->update_status = RETRY;
+ rc = update_modem(SSM_ATOM_SET_DEFAULT_MODE, ssm_drv,
+ 1, "0");
+ if (rc)
+ ssm_drv->update_status = FAILED;
+ else
+ /* For default mode we don't get any resp
+ * from modem.
+ */
+ ssm_drv->update_status = SUCCESS;
+ break;
+ default:
+ rc = -EINVAL;
+ dev_err(ssm_drv->dev, "Invalid command\n");
+ break;
+ };
+
+unlock:
+ mutex_unlock(&ssm_drv->mutex);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ssm_oem_driver_intf);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm Secure Service Module");
+
diff --git a/drivers/platform/msm/ssm.h b/drivers/platform/msm/ssm.h
new file mode 100644
index 0000000..97add11
--- /dev/null
+++ b/drivers/platform/msm/ssm.h
@@ -0,0 +1,160 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SSM_H_
+#define __SSM_H_
+
+#define MAX_APP_NAME_SIZE 32
+#define MODE_INFO_MAX_SIZE 4
+#define ENC_MODE_MAX_SIZE (100 + MODE_INFO_MAX_SIZE)
+
+/* tzapp response.*/
+enum tz_response {
+ RESULT_SUCCESS = 0,
+ RESULT_FAILURE = 0xFFFFFFFF,
+};
+
+/* tzapp command list.*/
+enum tz_commands {
+ ENC_MODE,
+ GET_ENC_MODE,
+ KEY_EXCHANGE = 11,
+};
+
+/* Command list for QSEOS.*/
+enum qceos_cmd_id {
+ APP_START_COMMAND = 0x01,
+ APP_SHUTDOWN_COMMAND,
+ APP_LOOKUP_COMMAND,
+ CLIENT_SEND_DATA_COMMAND = 0x6,
+ QSEOS_CMD_MAX = 0xEFFFFFFF,
+};
+
+/* MODEM/SSM command list.*/
+enum ssm_ipc_req {
+ SSM_MTOA_KEY_EXCHANGE = 0x0000AAAA,
+ SSM_ATOM_KEY_STATUS,
+ SSM_ATOM_MODE_UPDATE,
+ SSM_MTOA_MODE_UPDATE_STATUS,
+ SSM_MTOA_PREV_INVALID,
+ SSM_ATOM_PREV_INVALID,
+ SSM_ATOM_SET_DEFAULT_MODE,
+ SSM_INVALID_REQ,
+};
+
+/* OEM reuest commands list.*/
+enum oem_req {
+ SSM_READY,
+ SSM_GET_APP_ID,
+ SSM_MODE_INFO_READY,
+ SSM_SET_MODE,
+ SSM_GET_MODE_STATUS,
+ SSM_SET_DEFAULT_MODE,
+ SSM_INVALID,
+};
+
+/* Modem mode update status.*/
+enum modem_mode_status {
+ SUCCESS,
+ RETRY,
+ FAILED = -1,
+};
+
+__packed struct load_app {
+ uint32_t cmd_id;
+ uint32_t mdt_len;
+ uint32_t img_len;
+ uint32_t phy_addr;
+ char app_name[MAX_APP_NAME_SIZE];
+};
+
+/* Stop tzapp reuest.*/
+__packed struct scm_shutdown_req {
+ uint32_t cmd_id;
+ uint32_t app_id;
+};
+
+/* Common tzos response.*/
+__packed struct scm_resp {
+ uint32_t result;
+ enum tz_response resp_type;
+ unsigned int data;
+};
+
+/* tzos request.*/
+__packed struct check_app_req {
+ uint32_t cmd_id;
+ char app_name[MAX_APP_NAME_SIZE];
+};
+
+/* tzapp encode mode reuest.*/
+__packed struct tzapp_mode_enc_req {
+ uint32_t tzapp_ssm_cmd;
+ uint8_t mode_info[4];
+};
+
+/* tzapp encode mode response.*/
+__packed struct tzapp_mode_enc_rsp {
+ uint32_t tzapp_ssm_cmd;
+ uint8_t enc_mode_info[ENC_MODE_MAX_SIZE];
+ uint32_t enc_mode_len;
+ long status;
+};
+
+/* tzapp get mode request.*/
+__packed struct tzapp_get_mode_info_req {
+ uint32_t tzapp_ssm_cmd;
+};
+
+/* tzapp get mode response.*/
+__packed struct tzapp_get_mode_info_rsp {
+ uint32_t tzapp_ssm_cmd;
+ uint8_t enc_mode_info[ENC_MODE_MAX_SIZE];
+ uint32_t enc_mode_len;
+ long status;
+};
+
+/* tzos key exchange request.*/
+__packed struct ssm_keyexchg_req {
+ uint32_t ssid;
+ void *address;
+ uint32_t length;
+ uint32_t *status;
+};
+
+/* tzos common request.*/
+__packed struct common_req {
+ uint32_t cmd_id;
+ uint32_t app_id;
+ void *req_ptr;
+ uint32_t req_len;
+ void *resp_ptr;
+ uint32_t resp_len;
+};
+
+/* tzos common response.*/
+__packed struct common_resp {
+ uint32_t result;
+ uint32_t type;
+ uint32_t data;
+};
+
+/* Modem/SSM packet format.*/
+struct ssm_common_msg {
+ unsigned long pktlen;
+ unsigned long replaynum;
+ enum ssm_ipc_req ipc_req;
+ unsigned long msg_len;
+ char *msg;
+};
+
+#endif
diff --git a/drivers/power/pm8921-bms.c b/drivers/power/pm8921-bms.c
index 13e23e8..c5b1db4 100644
--- a/drivers/power/pm8921-bms.c
+++ b/drivers/power/pm8921-bms.c
@@ -128,7 +128,6 @@
int catch_up_time_us;
enum battery_type batt_type;
uint16_t ocv_reading_at_100;
- int cc_reading_at_100;
int max_voltage_uv;
int chg_term_ua;
@@ -1042,10 +1041,8 @@
}
/* stop faking 100% after an OCV event */
- if (chip->ocv_reading_at_100 != raw->last_good_ocv_raw) {
+ if (chip->ocv_reading_at_100 != raw->last_good_ocv_raw)
chip->ocv_reading_at_100 = OCV_RAW_UNINITIALIZED;
- chip->cc_reading_at_100 = 0;
- }
pr_debug("0p625 = %duV\n", chip->xoadc_v0625);
pr_debug("1p25 = %duV\n", chip->xoadc_v125);
pr_debug("last_good_ocv_raw= 0x%x, last_good_ocv_uv= %duV\n",
@@ -1185,10 +1182,7 @@
int64_t cc_voltage_uv, cc_pvh, cc_uah;
cc_voltage_uv = cc;
- cc_voltage_uv -= chip->cc_reading_at_100;
- pr_debug("cc = %d. after subtracting 0x%x cc = %lld\n",
- cc, chip->cc_reading_at_100,
- cc_voltage_uv);
+ pr_debug("cc = %d\n", cc);
cc_voltage_uv = cc_to_microvolt(chip, cc_voltage_uv);
cc_voltage_uv = pm8xxx_cc_adjust_for_gain(cc_voltage_uv);
pr_debug("cc_voltage_uv = %lld microvolts\n", cc_voltage_uv);
@@ -1513,10 +1507,7 @@
/* calculate cc micro_volt_hour */
calculate_cc_uah(chip, raw->cc, cc_uah);
- pr_debug("cc_uah = %duAh raw->cc = %x cc = %lld after subtracting %x\n",
- *cc_uah, raw->cc,
- (int64_t)raw->cc - chip->cc_reading_at_100,
- chip->cc_reading_at_100);
+ pr_debug("cc_uah = %duAh raw->cc = %x\n", *cc_uah, raw->cc);
soc_rbatt = ((*remaining_charge_uah - *cc_uah) * 100) / *fcc_uah;
if (soc_rbatt < 0)
@@ -2653,19 +2644,20 @@
if (is_battery_full) {
the_chip->ocv_reading_at_100 = raw.last_good_ocv_raw;
- the_chip->cc_reading_at_100 = raw.cc;
the_chip->last_ocv_uv = the_chip->max_voltage_uv;
raw.last_good_ocv_uv = the_chip->max_voltage_uv;
+ raw.cc = 0;
+ /* reset the cc in h/w */
+ reset_cc(the_chip);
the_chip->last_ocv_temp_decidegc = batt_temp;
/*
* since we are treating this as an ocv event
* forget the old cc value
*/
the_chip->last_cc_uah = 0;
- pr_debug("EOC BATT_FULL ocv_reading = 0x%x cc = 0x%x\n",
- the_chip->ocv_reading_at_100,
- the_chip->cc_reading_at_100);
+ pr_debug("EOC BATT_FULL ocv_reading = 0x%x\n",
+ the_chip->ocv_reading_at_100);
}
the_chip->end_percent = calculate_state_of_charge(the_chip, &raw,
diff --git a/drivers/power/qpnp-bms.c b/drivers/power/qpnp-bms.c
index 85a310a..eb75475 100644
--- a/drivers/power/qpnp-bms.c
+++ b/drivers/power/qpnp-bms.c
@@ -670,10 +670,18 @@
#define SLEEP_CLK_HZ 32764
#define SECONDS_PER_HOUR 3600
-static s64 cc_uv_to_uvh(s64 cc_uv)
+static s64 cc_uv_to_pvh(s64 cc_uv)
{
- return div_s64(cc_uv * CC_READING_TICKS,
- SLEEP_CLK_HZ * SECONDS_PER_HOUR);
+ /* Note that it is necessary need to multiply by 1000000 to convert
+ * from uvh to pvh here.
+ * However, the maximum Coulomb Counter value is 2^35, which can cause
+ * an over flow.
+ * Multiply by 100000 first to perserve as much precision as possible
+ * then multiply by 10 after doing the division in order to avoid
+ * overflow on the maximum Coulomb Counter value.
+ */
+ return div_s64(cc_uv * CC_READING_TICKS * 100000,
+ SLEEP_CLK_HZ * SECONDS_PER_HOUR) * 10;
}
/**
@@ -688,7 +696,7 @@
*/
static int calculate_cc(struct qpnp_bms_chip *chip, int64_t cc)
{
- int64_t cc_voltage_uv, cc_uvh, cc_uah;
+ int64_t cc_voltage_uv, cc_pvh, cc_uah;
struct qpnp_iadc_calib calibration;
qpnp_iadc_get_gain_and_offset(&calibration);
@@ -702,9 +710,9 @@
calibration.gain_raw
- calibration.offset_raw);
pr_debug("cc_voltage_uv = %lld uv\n", cc_voltage_uv);
- cc_uvh = cc_uv_to_uvh(cc_voltage_uv);
- pr_debug("cc_uvh = %lld micro_volt_hour\n", cc_uvh);
- cc_uah = div_s64(cc_uvh * 1000000LL, chip->r_sense_uohm);
+ cc_pvh = cc_uv_to_pvh(cc_voltage_uv);
+ pr_debug("cc_pvh = %lld pvh\n", cc_pvh);
+ cc_uah = div_s64(cc_pvh, chip->r_sense_uohm);
/* cc_raw had 4 bits of extra precision.
By now it should be within 32 bit range */
return (int)cc_uah;
@@ -1474,16 +1482,12 @@
static int clamp_soc_based_on_voltage(struct qpnp_bms_chip *chip, int soc)
{
int rc, vbat_uv;
- struct qpnp_vadc_result result;
- rc = qpnp_vadc_read(VBAT_SNS, &result);
- if (rc) {
- pr_err("error reading vbat_sns adc channel = %d, rc = %d\n",
- VBAT_SNS, rc);
- return rc;
+ rc = get_battery_voltage(&vbat_uv);
+ if (rc < 0) {
+ pr_err("adc vbat failed err = %d\n", rc);
+ return soc;
}
-
- vbat_uv = (int)result.physical;
if (soc == 0 && vbat_uv > chip->v_cutoff_uv) {
pr_debug("clamping soc to 1, vbat (%d) > cutoff (%d)\n",
vbat_uv, chip->v_cutoff_uv);
@@ -1612,28 +1616,16 @@
return chip->calculated_soc;
}
-static int read_vbat(struct qpnp_bms_chip *chip)
-{
- int rc;
- struct qpnp_vadc_result result;
-
- rc = qpnp_vadc_read(VBAT_SNS, &result);
- if (rc) {
- pr_err("error reading vadc VBAT_SNS = %d, rc = %d\n",
- VBAT_SNS, rc);
- return rc;
- }
- pr_debug("read %duv from vadc\n", (int)result.physical);
- return (int)result.physical;
-}
-
static int calculate_soc_from_voltage(struct qpnp_bms_chip *chip)
{
int voltage_range_uv, voltage_remaining_uv, voltage_based_soc;
- int vbat_uv;
+ int rc, vbat_uv;
- vbat_uv = read_vbat(chip);
-
+ rc = get_battery_voltage(&vbat_uv);
+ if (rc < 0) {
+ pr_err("adc vbat failed err = %d\n", rc);
+ return rc;
+ }
voltage_range_uv = chip->max_voltage_uv - chip->v_cutoff_uv;
voltage_remaining_uv = vbat_uv - chip->v_cutoff_uv;
voltage_based_soc = voltage_remaining_uv * 100 / voltage_range_uv;
@@ -2467,7 +2459,12 @@
}
vbatt = 0;
- get_battery_voltage(&vbatt);
+ rc = get_battery_voltage(&vbatt);
+ if (rc) {
+ pr_err("error reading vbat_sns adc channel = %d, rc = %d\n",
+ VBAT_SNS, rc);
+ goto unregister_dc;
+ }
pr_info("probe success: soc =%d vbatt = %d ocv = %d r_sense_uohm = %u\n",
get_prop_bms_capacity(chip),
diff --git a/drivers/power/qpnp-charger.c b/drivers/power/qpnp-charger.c
index e2ba042..7833afa 100644
--- a/drivers/power/qpnp-charger.c
+++ b/drivers/power/qpnp-charger.c
@@ -86,8 +86,8 @@
#define USB_OVP_CTL 0x42
#define SEC_ACCESS 0xD0
-/* SMBB peripheral subtype values */
#define REG_OFFSET_PERP_SUBTYPE 0x05
+/* SMBB peripheral subtype values */
#define SMBB_CHGR_SUBTYPE 0x01
#define SMBB_BUCK_SUBTYPE 0x02
#define SMBB_BAT_IF_SUBTYPE 0x03
@@ -96,6 +96,14 @@
#define SMBB_BOOST_SUBTYPE 0x06
#define SMBB_MISC_SUBTYPE 0x07
+/* SMBB peripheral subtype values */
+#define SMBBP_CHGR_SUBTYPE 0x31
+#define SMBBP_BUCK_SUBTYPE 0x32
+#define SMBBP_BAT_IF_SUBTYPE 0x33
+#define SMBBP_USB_CHGPTH_SUBTYPE 0x34
+#define SMBBP_BOOST_SUBTYPE 0x36
+#define SMBBP_MISC_SUBTYPE 0x37
+
#define QPNP_CHARGER_DEV_NAME "qcom,qpnp-charger"
/* Status bits and masks */
@@ -341,6 +349,9 @@
u8 dcin_valid_rt_sts;
int rc;
+ if (!chip->dc_chgpth_base)
+ return 0;
+
rc = qpnp_chg_read(chip, &dcin_valid_rt_sts,
INT_RT_STS(chip->dc_chgpth_base), 1);
if (rc) {
@@ -1212,6 +1223,7 @@
switch (subtype) {
case SMBB_CHGR_SUBTYPE:
+ case SMBBP_CHGR_SUBTYPE:
chip->chg_done_irq = spmi_get_irq_byname(chip->spmi,
spmi_resource, "chg-done");
if (chip->chg_done_irq < 0) {
@@ -1289,6 +1301,7 @@
enable_irq_wake(chip->chg_done_irq);
break;
case SMBB_BUCK_SUBTYPE:
+ case SMBBP_BUCK_SUBTYPE:
rc = qpnp_chg_masked_write(chip,
chip->chgr_base + CHGR_BUCK_BCK_VBAT_REG_MODE,
BUCK_VBAT_REG_NODE_SEL_BIT,
@@ -1299,8 +1312,10 @@
}
break;
case SMBB_BAT_IF_SUBTYPE:
+ case SMBBP_BAT_IF_SUBTYPE:
break;
case SMBB_USB_CHGPTH_SUBTYPE:
+ case SMBBP_USB_CHGPTH_SUBTYPE:
chip->usbin_valid_irq = spmi_get_irq_byname(chip->spmi,
spmi_resource, "usbin-valid");
if (chip->usbin_valid_irq < 0) {
@@ -1361,8 +1376,10 @@
enable_irq_wake(chip->dcin_valid_irq);
break;
case SMBB_BOOST_SUBTYPE:
+ case SMBBP_BOOST_SUBTYPE:
break;
case SMBB_MISC_SUBTYPE:
+ case SMBBP_MISC_SUBTYPE:
pr_debug("Setting BOOT_DONE\n");
rc = qpnp_chg_masked_write(chip,
chip->misc_base + CHGR_MISC_BOOT_DONE,
@@ -1397,10 +1414,6 @@
return -ENOMEM;
}
- rc = qpnp_vadc_is_ready();
- if (rc)
- goto fail_chg_enable;
-
chip->dev = &(spmi->dev);
chip->spmi = spmi;
@@ -1557,6 +1570,7 @@
switch (subtype) {
case SMBB_CHGR_SUBTYPE:
+ case SMBBP_CHGR_SUBTYPE:
chip->chgr_base = resource->start;
rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
if (rc) {
@@ -1566,6 +1580,7 @@
}
break;
case SMBB_BUCK_SUBTYPE:
+ case SMBBP_BUCK_SUBTYPE:
chip->buck_base = resource->start;
rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
if (rc) {
@@ -1575,6 +1590,7 @@
}
break;
case SMBB_BAT_IF_SUBTYPE:
+ case SMBBP_BAT_IF_SUBTYPE:
chip->bat_if_base = resource->start;
rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
if (rc) {
@@ -1584,6 +1600,7 @@
}
break;
case SMBB_USB_CHGPTH_SUBTYPE:
+ case SMBBP_USB_CHGPTH_SUBTYPE:
chip->usb_chgpth_base = resource->start;
rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
if (rc) {
@@ -1602,6 +1619,7 @@
}
break;
case SMBB_BOOST_SUBTYPE:
+ case SMBBP_BOOST_SUBTYPE:
chip->boost_base = resource->start;
rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
if (rc) {
@@ -1611,6 +1629,7 @@
}
break;
case SMBB_MISC_SUBTYPE:
+ case SMBBP_MISC_SUBTYPE:
chip->misc_base = resource->start;
rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
if (rc) {
@@ -1628,34 +1647,44 @@
dev_set_drvdata(&spmi->dev, chip);
device_init_wakeup(&spmi->dev, 1);
- chip->dc_psy.name = "qpnp-dc";
- chip->dc_psy.type = POWER_SUPPLY_TYPE_MAINS;
- chip->dc_psy.supplied_to = pm_power_supplied_to;
- chip->dc_psy.num_supplicants = ARRAY_SIZE(pm_power_supplied_to);
- chip->dc_psy.properties = pm_power_props_mains;
- chip->dc_psy.num_properties = ARRAY_SIZE(pm_power_props_mains);
- chip->dc_psy.get_property = qpnp_power_get_property_mains;
+ if (chip->bat_if_base) {
+ rc = qpnp_vadc_is_ready();
+ if (rc)
+ goto fail_chg_enable;
- chip->batt_psy.name = "battery";
- chip->batt_psy.type = POWER_SUPPLY_TYPE_BATTERY;
- chip->batt_psy.properties = msm_batt_power_props;
- chip->batt_psy.num_properties = ARRAY_SIZE(msm_batt_power_props);
- chip->batt_psy.get_property = qpnp_batt_power_get_property;
- chip->batt_psy.set_property = qpnp_batt_power_set_property;
- chip->batt_psy.property_is_writeable = qpnp_batt_property_is_writeable;
- chip->batt_psy.external_power_changed =
+ chip->batt_psy.name = "battery";
+ chip->batt_psy.type = POWER_SUPPLY_TYPE_BATTERY;
+ chip->batt_psy.properties = msm_batt_power_props;
+ chip->batt_psy.num_properties =
+ ARRAY_SIZE(msm_batt_power_props);
+ chip->batt_psy.get_property = qpnp_batt_power_get_property;
+ chip->batt_psy.set_property = qpnp_batt_power_set_property;
+ chip->batt_psy.property_is_writeable =
+ qpnp_batt_property_is_writeable;
+ chip->batt_psy.external_power_changed =
qpnp_batt_external_power_changed;
- rc = power_supply_register(chip->dev, &chip->batt_psy);
- if (rc < 0) {
- pr_err("power_supply_register batt failed rc = %d\n", rc);
- goto fail_chg_enable;
+ rc = power_supply_register(chip->dev, &chip->batt_psy);
+ if (rc < 0) {
+ pr_err("batt failed to register rc = %d\n", rc);
+ goto fail_chg_enable;
+ }
}
- rc = power_supply_register(chip->dev, &chip->dc_psy);
- if (rc < 0) {
- pr_err("power_supply_register usb failed rc = %d\n", rc);
- goto unregister_batt;
+ if (chip->dc_chgpth_base) {
+ chip->dc_psy.name = "qpnp-dc";
+ chip->dc_psy.type = POWER_SUPPLY_TYPE_MAINS;
+ chip->dc_psy.supplied_to = pm_power_supplied_to;
+ chip->dc_psy.num_supplicants = ARRAY_SIZE(pm_power_supplied_to);
+ chip->dc_psy.properties = pm_power_props_mains;
+ chip->dc_psy.num_properties = ARRAY_SIZE(pm_power_props_mains);
+ chip->dc_psy.get_property = qpnp_power_get_property_mains;
+
+ rc = power_supply_register(chip->dev, &chip->dc_psy);
+ if (rc < 0) {
+ pr_err("power_supply_register dc failed rc=%d\n", rc);
+ goto unregister_batt;
+ }
}
/* Turn on appropriate workaround flags */
@@ -1664,11 +1693,11 @@
power_supply_set_present(chip->usb_psy,
qpnp_chg_is_usb_chg_plugged_in(chip));
- if (chip->maxinput_dc_ma) {
+ if (chip->maxinput_dc_ma && chip->dc_chgpth_base) {
rc = qpnp_chg_idcmax_set(chip, chip->maxinput_dc_ma);
if (rc) {
pr_err("Error setting idcmax property %d\n", rc);
- goto fail_chg_enable;
+ goto unregister_batt;
}
}
@@ -1684,7 +1713,8 @@
return 0;
unregister_batt:
- power_supply_unregister(&chip->batt_psy);
+ if (chip->bat_if_base)
+ power_supply_unregister(&chip->batt_psy);
fail_chg_enable:
kfree(chip->thermal_mitigation);
kfree(chip);
diff --git a/drivers/spmi/qpnp-int.c b/drivers/spmi/qpnp-int.c
index d1d49ef..082c9ff 100644
--- a/drivers/spmi/qpnp-int.c
+++ b/drivers/spmi/qpnp-int.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -26,13 +26,16 @@
#include <linux/radix-tree.h>
#include <linux/slab.h>
#include <linux/printk.h>
+#include <linux/ratelimit.h>
#include <asm/irq.h>
#include <asm/mach/irq.h>
#include <mach/qpnp-int.h>
/* 16 slave_ids, 256 per_ids per slave, and 8 ints per per_id */
-#define QPNPINT_NR_IRQS (16 * 256 * 8)
+#define QPNPINT_NR_IRQS (16 * 256 * 8)
+/* This value is guaranteed not to be valid for private data */
+#define QPNPINT_INVALID_DATA 0x80000000
enum qpnpint_regs {
QPNPINT_REG_RT_STS = 0x10,
@@ -65,7 +68,7 @@
struct q_chip_data {
int bus_nr;
struct irq_domain *domain;
- struct qpnp_local_int cb;
+ struct qpnp_local_int *cb;
struct spmi_controller *spmi_ctrl;
struct radix_tree_root per_tree;
struct list_head list;
@@ -114,6 +117,18 @@
return 0;
}
+static int qpnpint_spmi_read(struct q_irq_data *irq_d, uint8_t reg,
+ void *buf, uint32_t len)
+{
+ struct q_chip_data *chip_d = irq_d->chip_d;
+
+ if (!chip_d->spmi_ctrl)
+ return -ENODEV;
+
+ return spmi_ext_register_readl(chip_d->spmi_ctrl, irq_d->spmi_slave,
+ irq_d->spmi_offset + reg, buf, len);
+}
+
static int qpnpint_spmi_write(struct q_irq_data *irq_d, uint8_t reg,
void *buf, uint32_t len)
{
@@ -128,31 +143,76 @@
return rc;
}
+static int qpnpint_arbiter_op(struct irq_data *d,
+ struct q_irq_data *irq_d,
+ int (*arb_op)(struct spmi_controller *,
+ struct qpnp_irq_spec *,
+ uint32_t))
+
+{
+ struct q_chip_data *chip_d = irq_d->chip_d;
+ struct qpnp_irq_spec q_spec;
+ int rc;
+
+ if (!arb_op)
+ return 0;
+
+ if (!chip_d->cb->register_priv_data) {
+ pr_warn_ratelimited("No ability to register arbiter registration data\n");
+ return -ENODEV;
+ }
+
+ rc = qpnpint_decode_hwirq(d->hwirq, &q_spec);
+ if (rc) {
+ pr_err_ratelimited("%s: decode failed on hwirq %lu\n",
+ __func__, d->hwirq);
+ return rc;
+ } else {
+ if (irq_d->priv_d == QPNPINT_INVALID_DATA) {
+ rc = chip_d->cb->register_priv_data(chip_d->spmi_ctrl,
+ &q_spec, &irq_d->priv_d);
+ if (rc) {
+ pr_err_ratelimited(
+ "%s: decode failed on hwirq %lu\n",
+ __func__, d->hwirq);
+ return rc;
+ }
+
+ }
+ arb_op(chip_d->spmi_ctrl, &q_spec, irq_d->priv_d);
+ }
+
+ return 0;
+}
+
static void qpnpint_irq_mask(struct irq_data *d)
{
struct q_irq_data *irq_d = irq_data_get_irq_chip_data(d);
struct q_chip_data *chip_d = irq_d->chip_d;
struct q_perip_data *per_d = irq_d->per_d;
- struct qpnp_irq_spec q_spec;
int rc;
pr_debug("hwirq %lu irq: %d\n", d->hwirq, d->irq);
- if (chip_d->cb.mask) {
- rc = qpnpint_decode_hwirq(d->hwirq, &q_spec);
- if (rc)
- pr_err("decode failed on hwirq %lu\n", d->hwirq);
- else
- chip_d->cb.mask(chip_d->spmi_ctrl, &q_spec,
- irq_d->priv_d);
+ if (!chip_d->cb) {
+ pr_warn_ratelimited("No arbiter on bus=%u slave=%u offset=%u\n",
+ chip_d->bus_nr, irq_d->spmi_slave,
+ irq_d->spmi_offset);
+ return;
}
+ qpnpint_arbiter_op(d, irq_d, chip_d->cb->mask);
+
per_d->int_en &= ~irq_d->mask_shift;
rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_EN_CLR,
(u8 *)&irq_d->mask_shift, 1);
- if (rc)
- pr_err("spmi failure on irq %d\n", d->irq);
+ if (rc) {
+ pr_err_ratelimited("spmi failure on irq %d\n", d->irq);
+ return;
+ }
+
+ pr_debug("done hwirq %lu irq: %d\n", d->hwirq, d->irq);
}
static void qpnpint_irq_mask_ack(struct irq_data *d)
@@ -160,32 +220,34 @@
struct q_irq_data *irq_d = irq_data_get_irq_chip_data(d);
struct q_chip_data *chip_d = irq_d->chip_d;
struct q_perip_data *per_d = irq_d->per_d;
- struct qpnp_irq_spec q_spec;
int rc;
- pr_debug("hwirq %lu irq: %d mask: 0x%x\n", d->hwirq, d->irq,
- irq_d->mask_shift);
+ pr_debug("hwirq %lu irq: %d\n", d->hwirq, d->irq);
- if (chip_d->cb.mask) {
- rc = qpnpint_decode_hwirq(d->hwirq, &q_spec);
- if (rc)
- pr_err("decode failed on hwirq %lu\n", d->hwirq);
- else
- chip_d->cb.mask(chip_d->spmi_ctrl, &q_spec,
- irq_d->priv_d);
+ if (!chip_d->cb) {
+ pr_warn_ratelimited("No arbiter on bus=%u slave=%u offset=%u\n",
+ chip_d->bus_nr, irq_d->spmi_slave,
+ irq_d->spmi_offset);
+ return;
}
+ qpnpint_arbiter_op(d, irq_d, chip_d->cb->mask);
+
per_d->int_en &= ~irq_d->mask_shift;
rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_EN_CLR,
&irq_d->mask_shift, 1);
- if (rc)
+ if (rc) {
pr_err("spmi failure on irq %d\n", d->irq);
+ return;
+ }
rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_LATCHED_CLR,
&irq_d->mask_shift, 1);
- if (rc)
+ if (rc) {
pr_err("spmi failure on irq %d\n", d->irq);
+ return;
+ }
}
static void qpnpint_irq_unmask(struct irq_data *d)
@@ -193,25 +255,26 @@
struct q_irq_data *irq_d = irq_data_get_irq_chip_data(d);
struct q_chip_data *chip_d = irq_d->chip_d;
struct q_perip_data *per_d = irq_d->per_d;
- struct qpnp_irq_spec q_spec;
int rc;
pr_debug("hwirq %lu irq: %d\n", d->hwirq, d->irq);
- if (chip_d->cb.unmask) {
- rc = qpnpint_decode_hwirq(d->hwirq, &q_spec);
- if (rc)
- pr_err("decode failed on hwirq %lu\n", d->hwirq);
- else
- chip_d->cb.unmask(chip_d->spmi_ctrl, &q_spec,
- irq_d->priv_d);
+ if (!chip_d->cb) {
+ pr_warn_ratelimited("No arbiter on bus=%u slave=%u offset=%u\n",
+ chip_d->bus_nr, irq_d->spmi_slave,
+ irq_d->spmi_offset);
+ return;
}
+ qpnpint_arbiter_op(d, irq_d, chip_d->cb->unmask);
+
per_d->int_en |= irq_d->mask_shift;
rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_EN_SET,
&irq_d->mask_shift, 1);
- if (rc)
+ if (rc) {
pr_err("spmi failure on irq %d\n", d->irq);
+ return;
+ }
}
static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type)
@@ -248,9 +311,29 @@
buf[2] = per_d->pol_low;
rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_SET_TYPE, &buf, 3);
- if (rc)
+ if (rc) {
pr_err("spmi failure on irq %d\n", d->irq);
- return rc;
+ return rc;
+ }
+
+ return 0;
+}
+
+static int qpnpint_irq_read_line(struct irq_data *d)
+{
+ struct q_irq_data *irq_d = irq_data_get_irq_chip_data(d);
+ int rc;
+ u8 buf;
+
+ pr_debug("hwirq %lu irq: %d\n", d->hwirq, d->irq);
+
+ rc = qpnpint_spmi_read(irq_d, QPNPINT_REG_RT_STS, &buf, 1);
+ if (rc) {
+ pr_err("spmi failure on irq %d\n", d->irq);
+ return rc;
+ }
+
+ return (buf & irq_d->mask_shift) ? 1 : 0;
}
static int qpnpint_irq_set_wake(struct irq_data *d, unsigned int on)
@@ -264,6 +347,7 @@
.irq_mask_ack = qpnpint_irq_mask_ack,
.irq_unmask = qpnpint_irq_unmask,
.irq_set_type = qpnpint_irq_set_type,
+ .irq_read_line = qpnpint_irq_read_line,
.irq_set_wake = qpnpint_irq_set_wake,
.flags = IRQCHIP_MASK_ON_SUSPEND,
};
@@ -283,11 +367,14 @@
irq_d->spmi_offset = q_spec.per << 8;
irq_d->chip_d = chip_d;
- if (chip_d->cb.register_priv_data)
- rc = chip_d->cb.register_priv_data(chip_d->spmi_ctrl, &q_spec,
+ irq_d->priv_d = QPNPINT_INVALID_DATA;
+
+ if (chip_d->cb && chip_d->cb->register_priv_data) {
+ rc = chip_d->cb->register_priv_data(chip_d->spmi_ctrl, &q_spec,
&irq_d->priv_d);
if (rc)
return rc;
+ }
irq_d->per_d->use_count++;
return 0;
@@ -365,6 +452,8 @@
*out_hwirq = ret;
*out_type = IRQ_TYPE_NONE;
+ pr_debug("out_hwirq = %lu\n", *out_hwirq);
+
return 0;
}
@@ -386,7 +475,7 @@
pr_debug("hwirq = %lu\n", hwirq);
- if (hwirq < 0 || hwirq >= 32768) {
+ if (hwirq < 0 || hwirq >= QPNPINT_NR_IRQS) {
pr_err("hwirq %lu out of bounds\n", hwirq);
return -EINVAL;
}
@@ -448,7 +537,10 @@
list_for_each_entry(chip_d, &qpnpint_chips, list)
if (node == chip_d->domain->of_node) {
- chip_d->cb = *li_cb;
+ chip_d->cb = kmemdup(li_cb,
+ sizeof(*li_cb), GFP_ATOMIC);
+ if (!chip_d->cb)
+ return -ENOMEM;
chip_d->spmi_ctrl = ctrl;
chip_lookup[ctrl->nr] = chip_d;
return 0;
@@ -458,6 +550,27 @@
}
EXPORT_SYMBOL(qpnpint_register_controller);
+int qpnpint_unregister_controller(struct device_node *node)
+{
+ struct q_chip_data *chip_d;
+
+ if (!node)
+ return -EINVAL;
+
+ list_for_each_entry(chip_d, &qpnpint_chips, list)
+ if (node == chip_d->domain->of_node) {
+ kfree(chip_d->cb);
+ chip_d->cb = NULL;
+ if (chip_d->spmi_ctrl)
+ chip_lookup[chip_d->spmi_ctrl->nr] = NULL;
+ chip_d->spmi_ctrl = NULL;
+ return 0;
+ }
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(qpnpint_unregister_controller);
+
int qpnpint_handle_irq(struct spmi_controller *spmi_ctrl,
struct qpnp_irq_spec *spec)
{
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 450db0b..05a4806 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -763,12 +763,18 @@
static int __devexit spmi_pmic_arb_remove(struct platform_device *pdev)
{
struct spmi_pmic_arb_dev *pmic_arb = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = qpnpint_unregister_controller(pmic_arb->controller.dev.of_node);
+ if (ret)
+ dev_err(&pdev->dev, "Unable to unregister controller %d\n",
+ pmic_arb->controller.nr);
if (pmic_arb->allow_wakeup)
irq_set_irq_wake(pmic_arb->pic_irq, 0);
platform_set_drvdata(pdev, NULL);
spmi_del_controller(&pmic_arb->controller);
- return 0;
+ return ret;
}
static struct of_device_id spmi_pmic_arb_match_table[] = {
diff --git a/drivers/thermal/msm8974-tsens.c b/drivers/thermal/msm8974-tsens.c
index e37b3c4..482d383 100644
--- a/drivers/thermal/msm8974-tsens.c
+++ b/drivers/thermal/msm8974-tsens.c
@@ -63,7 +63,7 @@
#define TSENS_SN_REMOTE_CONFIG(n) ((n) + 0x3c)
#define TSENS_EEPROM(n) ((n) + 0xd0)
-#define TSENS_EEPROM_REDUNDANCY_SEL(n) ((n) + 0x1cc)
+#define TSENS_EEPROM_REDUNDANCY_SEL(n) ((n) + 0x444)
#define TSENS_EEPROM_BACKUP_REGION(n) ((n) + 0x440)
#define TSENS_MAIN_CALIB_ADDR_RANGE 6
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index beba33f..f38de0c 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -1935,6 +1935,9 @@
case POWER_SUPPLY_PROP_ONLINE:
val->intval = mdwc->online;
break;
+ case POWER_SUPPLY_PROP_TYPE:
+ val->intval = psy->type;
+ break;
default:
return -EINVAL;
}
@@ -1973,6 +1976,9 @@
case POWER_SUPPLY_PROP_CURRENT_MAX:
mdwc->current_max = val->intval;
break;
+ case POWER_SUPPLY_PROP_TYPE:
+ psy->type = val->intval;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 0664376..5694999 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -191,7 +191,8 @@
* FIXME For now we will only allocate 1 wMaxPacketSize space
* for each enabled endpoint, later patches will come to
* improve this algorithm so that we better use the internal
- * FIFO space
+ * FIFO space. Also consider the case where TxFIFO RAM space
+ * may change dynamically based on the USB configuration.
*/
for (num = 0; num < DWC3_ENDPOINTS_NUM; num++) {
struct dwc3_ep *dep = dwc->eps[num];
@@ -205,7 +206,8 @@
if (!(dep->flags & DWC3_EP_ENABLED))
continue;
- if (usb_endpoint_xfer_bulk(dep->endpoint.desc)
+ if (((dep->endpoint.maxburst > 1) &&
+ usb_endpoint_xfer_bulk(dep->endpoint.desc))
|| usb_endpoint_xfer_isoc(dep->endpoint.desc))
mult = 3;
@@ -215,8 +217,8 @@
* Make sure that's true somehow and change FIFO allocation
* accordingly.
*
- * If we have Bulk or Isochronous endpoints, we want
- * them to be able to be very, very fast. So we're giving
+ * If we have Bulk (burst only) or Isochronous endpoints, we
+ * want them to be able to be very, very fast. So we're giving
* those endpoints a fifo_size which is enough for 3 full
* packets
*/
diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c
index 116b5b0..9dd9c40 100644
--- a/drivers/usb/gadget/android.c
+++ b/drivers/usb/gadget/android.c
@@ -76,10 +76,10 @@
#define USB_ETH_RNDIS y
#include "f_rndis.c"
#include "rndis.c"
+#include "f_qc_ecm.c"
#include "u_bam_data.c"
#include "f_mbim.c"
#include "f_ecm.c"
-#include "f_qc_ecm.c"
#include "f_qc_rndis.c"
#include "u_ether.c"
#include "u_qc_ether.c"
@@ -655,6 +655,9 @@
.attributes = rmnet_function_attributes,
};
+/* ecm transport string */
+static char ecm_transports[MAX_XPORT_STR_LEN];
+
struct ecm_function_config {
u8 ethaddr[ETH_ALEN];
};
@@ -678,6 +681,7 @@
struct usb_configuration *c)
{
int ret;
+ char *trans;
struct ecm_function_config *ecm = f->config;
if (!ecm) {
@@ -689,19 +693,28 @@
ecm->ethaddr[0], ecm->ethaddr[1], ecm->ethaddr[2],
ecm->ethaddr[3], ecm->ethaddr[4], ecm->ethaddr[5]);
- ret = gether_qc_setup_name(c->cdev->gadget, ecm->ethaddr, "ecm");
- if (ret) {
- pr_err("%s: gether_setup failed\n", __func__);
- return ret;
+ pr_debug("%s: ecm_transport is %s", __func__, ecm_transports);
+
+ trans = strim(ecm_transports);
+ if (strcmp("BAM2BAM_IPA", trans)) {
+ ret = gether_qc_setup_name(c->cdev->gadget,
+ ecm->ethaddr, "ecm");
+ if (ret) {
+ pr_err("%s: gether_setup failed\n", __func__);
+ return ret;
+ }
}
- return ecm_qc_bind_config(c, ecm->ethaddr);
+ return ecm_qc_bind_config(c, ecm->ethaddr, trans);
}
static void ecm_qc_function_unbind_config(struct android_usb_function *f,
struct usb_configuration *c)
{
- gether_qc_cleanup_name("ecm0");
+ char *trans = strim(ecm_transports);
+
+ if (strcmp("BAM2BAM_IPA", trans))
+ gether_qc_cleanup_name("ecm0");
}
static ssize_t ecm_ethaddr_show(struct device *dev,
@@ -731,7 +744,24 @@
static DEVICE_ATTR(ecm_ethaddr, S_IRUGO | S_IWUSR, ecm_ethaddr_show,
ecm_ethaddr_store);
+static ssize_t ecm_transports_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", ecm_transports);
+}
+
+static ssize_t ecm_transports_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ strlcpy(ecm_transports, buf, sizeof(ecm_transports));
+ return size;
+}
+
+static DEVICE_ATTR(ecm_transports, S_IRUGO | S_IWUSR, ecm_transports_show,
+ ecm_transports_store);
+
static struct device_attribute *ecm_function_attributes[] = {
+ &dev_attr_ecm_transports,
&dev_attr_ecm_ethaddr,
NULL
};
@@ -2051,6 +2081,7 @@
struct android_configuration *conf;
int enabled = 0;
bool audio_enabled = false;
+ static DEFINE_RATELIMIT_STATE(rl, 10*HZ, 1);
if (!cdev)
return -ENODEV;
@@ -2096,7 +2127,7 @@
f_holder->f->disable(f_holder->f);
}
dev->enabled = false;
- } else {
+ } else if (__ratelimit(&rl)) {
pr_err("android_usb: already %s\n",
dev->enabled ? "enabled" : "disabled");
}
diff --git a/drivers/usb/gadget/f_adb.c b/drivers/usb/gadget/f_adb.c
index a55f0e5..ff2287e 100644
--- a/drivers/usb/gadget/f_adb.c
+++ b/drivers/usb/gadget/f_adb.c
@@ -463,7 +463,10 @@
static int adb_open(struct inode *ip, struct file *fp)
{
- pr_info("adb_open\n");
+ static DEFINE_RATELIMIT_STATE(rl, 10*HZ, 1);
+
+ if (__ratelimit(&rl))
+ pr_info("adb_open\n");
if (!_adb_dev)
return -ENODEV;
@@ -486,7 +489,10 @@
static int adb_release(struct inode *ip, struct file *fp)
{
- pr_info("adb_release\n");
+ static DEFINE_RATELIMIT_STATE(rl, 10*HZ, 1);
+
+ if (__ratelimit(&rl))
+ pr_info("adb_release\n");
/*
* ADB daemon closes the device file after I/O error. The
diff --git a/drivers/usb/gadget/f_mbim.c b/drivers/usb/gadget/f_mbim.c
index ff0bdaf..a32dd15 100644
--- a/drivers/usb/gadget/f_mbim.c
+++ b/drivers/usb/gadget/f_mbim.c
@@ -665,7 +665,8 @@
pr_info("dev:%p portno:%d\n", dev, dev->port_num);
- ret = bam_data_connect(&dev->bam_port, dev->port_num, dev->port_num);
+ ret = bam_data_connect(&dev->bam_port, dev->port_num,
+ USB_GADGET_XPORT_BAM2BAM, dev->port_num, USB_FUNC_MBIM);
if (ret) {
pr_err("bam_data_setup failed: err:%d\n",
ret);
diff --git a/drivers/usb/gadget/f_qc_ecm.c b/drivers/usb/gadget/f_qc_ecm.c
index 88d19f5..559fd04 100644
--- a/drivers/usb/gadget/f_qc_ecm.c
+++ b/drivers/usb/gadget/f_qc_ecm.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2003-2005,2008 David Brownell
* Copyright (C) 2008 Nokia Corporation
- * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -21,6 +21,11 @@
/* #define VERBOSE_DEBUG */
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/device.h>
@@ -29,6 +34,9 @@
#include "u_ether.h"
#include "u_qc_ether.h"
+#include "u_bam_data.h"
+#include <mach/ecm_ipa.h>
+
/*
* This function is a "CDC Ethernet Networking Control Model" (CDC ECM)
@@ -58,9 +66,9 @@
};
struct f_ecm_qc {
- struct qc_gether port;
+ struct qc_gether port;
u8 ctrl_id, data_id;
-
+ enum transport_type xport;
char ethaddr[14];
struct usb_ep *notify;
@@ -69,6 +77,16 @@
bool is_open;
};
+struct f_ecm_qc_ipa_params {
+ u8 dev_mac[ETH_ALEN];
+ u8 host_mac[ETH_ALEN];
+ ecm_ipa_callback ipa_rx_cb;
+ ecm_ipa_callback ipa_tx_cb;
+ void *ipa_priv;
+};
+
+static struct f_ecm_qc_ipa_params ipa_params;
+
static inline struct f_ecm_qc *func_to_ecm_qc(struct usb_function *f)
{
return container_of(f, struct f_ecm_qc, port.func);
@@ -288,51 +306,6 @@
static struct data_port ecm_qc_bam_port;
-static int ecm_qc_bam_setup(void)
-{
- int ret;
-
- ret = bam_data_setup(ECM_QC_NO_PORTS);
- if (ret) {
- pr_err("bam_data_setup failed err: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-static int ecm_qc_bam_connect(struct f_ecm_qc *dev)
-{
- int ret;
-
- ecm_qc_bam_port.cdev = dev->port.func.config->cdev;
- ecm_qc_bam_port.in = dev->port.in_ep;
- ecm_qc_bam_port.out = dev->port.out_ep;
-
- /* currently we use the first connection */
- ret = bam_data_connect(&ecm_qc_bam_port, 0, 0);
- if (ret) {
- pr_err("bam_data_connect failed: err:%d\n",
- ret);
- return ret;
- } else {
- pr_info("ecm bam connected\n");
- }
-
- return 0;
-}
-
-static int ecm_qc_bam_disconnect(struct f_ecm_qc *dev)
-{
- pr_debug("dev:%p. %s Disconnect BAM.\n", dev, __func__);
-
- bam_data_disconnect(&ecm_qc_bam_port, 0);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-
static void ecm_qc_do_notify(struct f_ecm_qc *ecm)
{
struct usb_request *req = ecm->notify_req;
@@ -401,6 +374,73 @@
ecm_qc_do_notify(ecm);
}
+static int ecm_qc_bam_setup(void)
+{
+ int ret;
+
+ ret = bam_data_setup(ECM_QC_NO_PORTS);
+ if (ret) {
+ pr_err("bam_data_setup failed err: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ecm_qc_bam_connect(struct f_ecm_qc *dev)
+{
+ int ret;
+
+ ecm_qc_bam_port.cdev = dev->port.func.config->cdev;
+ ecm_qc_bam_port.in = dev->port.in_ep;
+ ecm_qc_bam_port.out = dev->port.out_ep;
+
+ /* currently we use the first connection */
+ ret = bam_data_connect(&ecm_qc_bam_port, 0, dev->xport,
+ 0, USB_FUNC_ECM);
+ if (ret) {
+ pr_err("bam_data_connect failed: err:%d\n", ret);
+ return ret;
+ } else {
+ pr_debug("ecm bam connected\n");
+ }
+
+ dev->is_open = true;
+ ecm_qc_notify(dev);
+
+ return 0;
+}
+
+static int ecm_qc_bam_disconnect(struct f_ecm_qc *dev)
+{
+ pr_debug("dev:%p. Disconnect BAM.\n", dev);
+
+ bam_data_disconnect(&ecm_qc_bam_port, 0);
+
+ ecm_ipa_cleanup(ipa_params.ipa_priv);
+
+ return 0;
+}
+
+void *ecm_qc_get_ipa_rx_cb(void)
+{
+ return ipa_params.ipa_rx_cb;
+}
+
+void *ecm_qc_get_ipa_tx_cb(void)
+{
+ return ipa_params.ipa_tx_cb;
+}
+
+void *ecm_qc_get_ipa_priv(void)
+{
+ return ipa_params.ipa_priv;
+}
+
+/*-------------------------------------------------------------------------*/
+
+
+
static void ecm_qc_notify_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_ecm_qc *ecm = req->context;
@@ -524,7 +564,8 @@
* we can disconnect the port from the network layer.
*/
ecm_qc_bam_disconnect(ecm);
- gether_qc_disconnect_name(&ecm->port, "ecm0");
+ if (ecm->xport != USB_GADGET_XPORT_BAM2BAM_IPA)
+ gether_qc_disconnect_name(&ecm->port, "ecm0");
}
if (!ecm->port.in_ep->desc ||
@@ -553,9 +594,12 @@
);
ecm->port.cdc_filter = DEFAULT_FILTER;
DBG(cdev, "activate ecm\n");
- net = gether_qc_connect_name(&ecm->port, "ecm0");
- if (IS_ERR(net))
- return PTR_ERR(net);
+ if (ecm->xport != USB_GADGET_XPORT_BAM2BAM_IPA) {
+ net = gether_qc_connect_name(&ecm->port,
+ "ecm0");
+ if (IS_ERR(net))
+ return PTR_ERR(net);
+ }
if (ecm_qc_bam_connect(ecm))
goto fail;
@@ -597,7 +641,8 @@
if (ecm->port.in_ep->driver_data) {
ecm_qc_bam_disconnect(ecm);
- gether_qc_disconnect_name(&ecm->port, "ecm0");
+ if (ecm->xport != USB_GADGET_XPORT_BAM2BAM_IPA)
+ gether_qc_disconnect_name(&ecm->port, "ecm0");
}
if (ecm->notify->driver_data) {
@@ -662,6 +707,7 @@
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
+
ecm->ctrl_id = status;
ecm_qc_control_intf.bInterfaceNumber = status;
@@ -670,6 +716,7 @@
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
+
ecm->data_id = status;
ecm_qc_data_nop_intf.bInterfaceNumber = status;
@@ -797,6 +844,7 @@
* @c: the configuration to support the network link
* @ethaddr: a buffer in which the ethernet address of the host side
* side of the link was recorded
+ * @xport_name: data path transport type name ("BAM2BAM" or "BAM2BAM_IPA")
* Context: single threaded during gadget setup
*
* Returns zero on success, else negative errno.
@@ -805,7 +853,8 @@
* for calling @gether_cleanup() before module unload.
*/
int
-ecm_qc_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
+ecm_qc_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
+ char *xport_name)
{
struct f_ecm_qc *ecm;
int status;
@@ -819,6 +868,8 @@
return status;
}
+ pr_debug("data transport type is %s", xport_name);
+
/* maybe allocate device-global string IDs */
if (ecm_qc_string_defs[0].id == 0) {
@@ -849,11 +900,23 @@
if (!ecm)
return -ENOMEM;
+ ecm->xport = str_to_xport(xport_name);
+ pr_debug("set xport = %d", ecm->xport);
+
/* export host's Ethernet address in CDC format */
- snprintf(ecm->ethaddr, sizeof ecm->ethaddr,
+ if (ecm->xport == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ gether_qc_get_macs(ipa_params.dev_mac, ipa_params.host_mac);
+ snprintf(ecm->ethaddr, sizeof ecm->ethaddr,
+ "%02X%02X%02X%02X%02X%02X",
+ ipa_params.host_mac[0], ipa_params.host_mac[1],
+ ipa_params.host_mac[2], ipa_params.host_mac[3],
+ ipa_params.host_mac[4], ipa_params.host_mac[5]);
+ } else
+ snprintf(ecm->ethaddr, sizeof ecm->ethaddr,
"%02X%02X%02X%02X%02X%02X",
ethaddr[0], ethaddr[1], ethaddr[2],
ethaddr[3], ethaddr[4], ethaddr[5]);
+
ecm_qc_string_defs[1].s = ecm->ethaddr;
ecm->port.cdc_filter = DEFAULT_FILTER;
@@ -870,8 +933,31 @@
status = usb_add_function(c, &ecm->port.func);
if (status) {
+ pr_err("failed to add function");
+ ecm_qc_string_defs[1].s = NULL;
+ kfree(ecm);
+ return status;
+ }
+
+ if (ecm->xport != USB_GADGET_XPORT_BAM2BAM_IPA)
+ return status;
+
+ status = ecm_ipa_init(&ipa_params.ipa_rx_cb, &ipa_params.ipa_tx_cb,
+ &ipa_params.ipa_priv);
+ if (status) {
+ pr_err("failed to initialize ECM IPA Driver");
+ ecm_qc_string_defs[1].s = NULL;
+ kfree(ecm);
+ return status;
+ }
+
+ status = ecm_ipa_configure(ipa_params.host_mac, ipa_params.dev_mac,
+ ipa_params.ipa_priv);
+ if (status) {
+ pr_err("failed to configure ECM IPA Driver");
ecm_qc_string_defs[1].s = NULL;
kfree(ecm);
}
+
return status;
}
diff --git a/drivers/usb/gadget/f_qc_rndis.c b/drivers/usb/gadget/f_qc_rndis.c
index 128b6d1..51d7bc1 100644
--- a/drivers/usb/gadget/f_qc_rndis.c
+++ b/drivers/usb/gadget/f_qc_rndis.c
@@ -6,7 +6,7 @@
* Copyright (C) 2008 Nokia Corporation
* Copyright (C) 2009 Samsung Electronics
* Author: Michal Nazarewicz (mina86@mina86.com)
- * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
@@ -427,7 +427,8 @@
dev->bam_port.out = dev->port.out_ep;
/* currently we use the first connection */
- ret = bam_data_connect(&dev->bam_port, 0, 0);
+ ret = bam_data_connect(&dev->bam_port, 0, USB_GADGET_XPORT_BAM2BAM,
+ 0, USB_FUNC_RNDIS);
if (ret) {
pr_err("bam_data_connect failed: err:%d\n",
ret);
diff --git a/drivers/usb/gadget/u_bam_data.c b/drivers/usb/gadget/u_bam_data.c
index 70c71d4..8df06a4 100644
--- a/drivers/usb/gadget/u_bam_data.c
+++ b/drivers/usb/gadget/u_bam_data.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -22,9 +22,10 @@
#include <linux/usb/gadget.h>
#include <mach/bam_dmux.h>
-#include <mach/usb_gadget_xport.h>
#include <mach/usb_bam.h>
+#include "u_bam_data.h"
+
#define BAM2BAM_DATA_N_PORTS 1
static struct workqueue_struct *bam_data_wq;
@@ -34,12 +35,6 @@
#define SPS_PARAMS_TBE BIT(6)
#define MSM_VENDOR_ID BIT(16)
-struct data_port {
- struct usb_composite_dev *cdev;
- struct usb_ep *in;
- struct usb_ep *out;
-};
-
struct bam_data_ch_info {
unsigned long flags;
unsigned id;
@@ -53,6 +48,10 @@
u32 src_pipe_idx;
u32 dst_pipe_idx;
u8 connection_idx;
+
+ enum function_type func_type;
+ enum transport_type trans;
+ struct usb_bam_connect_ipa_params ipa_params;
};
struct bam_data_port {
@@ -175,6 +174,22 @@
return 0;
}
+static void bam2bam_data_disconnect_work(struct work_struct *w)
+{
+ struct bam_data_port *port =
+ container_of(w, struct bam_data_port, disconnect_w);
+ struct bam_data_ch_info *d = &port->data_ch;
+ int ret;
+
+ if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ if (d->func_type == USB_FUNC_ECM)
+ ecm_ipa_disconnect(d->ipa_params.priv);
+ ret = usb_bam_disconnect_ipa(d->connection_idx, &d->ipa_params);
+ if (ret)
+ pr_err("usb_bam_disconnect_ipa failed: err:%d\n", ret);
+ }
+}
+
static void bam2bam_data_connect_work(struct work_struct *w)
{
struct bam_data_port *port = container_of(w, struct bam_data_port,
@@ -185,14 +200,49 @@
pr_debug("%s: Connect workqueue started", __func__);
- ret = usb_bam_connect(d->connection_idx, &d->src_pipe_idx,
- &d->dst_pipe_idx);
- d->src_pipe_idx = 11;
- d->dst_pipe_idx = 10;
+ if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ d->ipa_params.client = IPA_CLIENT_USB_CONS;
+ d->ipa_params.dir = PEER_PERIPHERAL_TO_USB;
+ if (d->func_type == USB_FUNC_ECM) {
+ d->ipa_params.notify = ecm_qc_get_ipa_tx_cb();
+ d->ipa_params.priv = ecm_qc_get_ipa_priv();
+ }
+ ret = usb_bam_connect_ipa(&d->ipa_params);
+ if (ret) {
+ pr_err("%s: usb_bam_connect_ipa failed: err:%d\n",
+ __func__, ret);
+ return;
+ }
- if (ret) {
- pr_err("usb_bam_connect failed: err:%d\n", ret);
- return;
+ d->ipa_params.client = IPA_CLIENT_USB_PROD;
+ d->ipa_params.dir = USB_TO_PEER_PERIPHERAL;
+ if (d->func_type == USB_FUNC_ECM) {
+ d->ipa_params.notify = ecm_qc_get_ipa_rx_cb();
+ d->ipa_params.priv = ecm_qc_get_ipa_priv();
+ }
+ ret = usb_bam_connect_ipa(&d->ipa_params);
+ if (ret) {
+ pr_err("%s: usb_bam_connect_ipa failed: err:%d\n",
+ __func__, ret);
+ return;
+ }
+ if (d->func_type == USB_FUNC_ECM) {
+ ret = ecm_ipa_connect(d->ipa_params.cons_clnt_hdl,
+ d->ipa_params.prod_clnt_hdl,
+ d->ipa_params.priv);
+ if (ret) {
+ pr_err("%s: failed to connect IPA: err:%d\n",
+ __func__, ret);
+ return;
+ }
+ }
+ } else { /* transport type is USB_GADGET_XPORT_BAM2BAM */
+ ret = usb_bam_connect(d->connection_idx, &d->src_pipe_idx,
+ &d->dst_pipe_idx);
+ if (ret) {
+ pr_err("usb_bam_connect failed: err:%d\n", ret);
+ return;
+ }
}
if (!port->port_usb) {
@@ -230,15 +280,17 @@
bam_data_start_endless_rx(port);
bam_data_start_endless_tx(port);
- /* Register for peer reset callback */
- usb_bam_register_peer_reset_cb(d->connection_idx,
+ /* Register for peer reset callback if USB_GADGET_XPORT_BAM2BAM */
+ if (d->trans != USB_GADGET_XPORT_BAM2BAM_IPA) {
+ usb_bam_register_peer_reset_cb(d->connection_idx,
bam_data_peer_reset_cb, port);
- ret = usb_bam_client_ready(true);
- if (ret) {
- pr_err("%s: usb_bam_client_ready failed: err:%d\n",
+ ret = usb_bam_client_ready(true);
+ if (ret) {
+ pr_err("%s: usb_bam_client_ready failed: err:%d\n",
__func__, ret);
- return;
+ return;
+ }
}
pr_debug("%s: Connect workqueue done", __func__);
@@ -262,6 +314,7 @@
port->port_num = portno;
INIT_WORK(&port->connect_w, bam2bam_data_connect_work);
+ INIT_WORK(&port->disconnect_w, bam2bam_data_disconnect_work);
/* data ch */
d = &port->data_ch;
@@ -276,6 +329,7 @@
void bam_data_disconnect(struct data_port *gr, u8 port_num)
{
struct bam_data_port *port;
+ struct bam_data_ch_info *d;
pr_debug("dev:%p port#%d\n", gr, port_num);
@@ -285,7 +339,7 @@
}
if (!gr) {
- pr_err("mbim data port is null\n");
+ pr_err("data port is null\n");
return;
}
@@ -303,12 +357,19 @@
port->port_usb = 0;
}
- if (usb_bam_client_ready(false))
- pr_err("%s: usb_bam_client_ready failed\n", __func__);
+ d = &port->data_ch;
+ if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA)
+ queue_work(gbam_wq, &port->disconnect_w);
+ else {
+ if (usb_bam_client_ready(false)) {
+ pr_err("%s: usb_bam_client_ready failed\n",
+ __func__);
+ }
+ }
}
int bam_data_connect(struct data_port *gr, u8 port_num,
- u8 connection_idx)
+ enum transport_type trans, u8 connection_idx, enum function_type func)
{
struct bam_data_port *port;
struct bam_data_ch_info *d;
@@ -322,7 +383,7 @@
}
if (!gr) {
- pr_err("mbim data port is null\n");
+ pr_err("data port is null\n");
return -ENODEV;
}
@@ -349,6 +410,16 @@
d->connection_idx = connection_idx;
+ d->trans = trans;
+
+ if (trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+ d->ipa_params.src_pipe = &(d->src_pipe_idx);
+ d->ipa_params.dst_pipe = &(d->dst_pipe_idx);
+ d->ipa_params.idx = connection_idx;
+ }
+
+ d->func_type = func;
+
queue_work(bam_data_wq, &port->connect_w);
return 0;
diff --git a/drivers/usb/gadget/u_bam_data.h b/drivers/usb/gadget/u_bam_data.h
new file mode 100644
index 0000000..71a01b9
--- /dev/null
+++ b/drivers/usb/gadget/u_bam_data.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __U_BAM_DATA_H
+#define __U_BAM_DATA_H
+
+#include <mach/usb_gadget_xport.h>
+
+enum function_type {
+ USB_FUNC_ECM,
+ USB_FUNC_MBIM,
+ USB_FUNC_RNDIS,
+};
+
+struct data_port {
+ struct usb_composite_dev *cdev;
+ struct usb_ep *in;
+ struct usb_ep *out;
+};
+
+void bam_data_disconnect(struct data_port *gr, u8 port_num);
+
+int bam_data_connect(struct data_port *gr, u8 port_num,
+ enum transport_type trans, u8 connection_idx, enum function_type func);
+
+int bam_data_setup(unsigned int no_bam2bam_port);
+
+void bam_data_suspend(u8 port_num);
+
+void bam_data_resume(u8 port_num);
+
+#endif /* __U_BAM_DATA_H */
diff --git a/drivers/usb/gadget/u_qc_ether.c b/drivers/usb/gadget/u_qc_ether.c
index ce0a12e..e10ec25 100644
--- a/drivers/usb/gadget/u_qc_ether.c
+++ b/drivers/usb/gadget/u_qc_ether.c
@@ -4,7 +4,7 @@
* Copyright (C) 2003-2005,2008 David Brownell
* Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
* Copyright (C) 2008 Nokia Corporation
- * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
@@ -62,7 +62,7 @@
* or updating its backlink port_usb->ioport
*/
spinlock_t lock;
- struct qc_gether *port_usb;
+ struct qc_gether *port_usb;
struct net_device *net;
struct usb_gadget *gadget;
@@ -235,6 +235,14 @@
.name = "gadget",
};
+void gether_qc_get_macs(u8 dev_mac[ETH_ALEN], u8 host_mac[ETH_ALEN])
+{
+ if (get_qc_ether_addr(qc_dev_addr, dev_mac))
+ pr_debug("using random dev_mac ethernet address\n");
+ if (get_qc_ether_addr(qc_host_addr, host_mac))
+ pr_debug("using random host_mac ethernet address\n");
+}
+
/**
* gether_qc_setup - initialize one ethernet-over-usb link
* @g: gadget to associated with these links
@@ -320,6 +328,7 @@
/**
* gether_qc_cleanup_name - remove Ethernet-over-USB device
+ * @netname: name for network device (for example, "usb")
* Context: may sleep
*
* This is called to free all resources allocated by @gether_qc_setup().
@@ -343,6 +352,7 @@
* is active
* @link: the USB link, set up with endpoints, descriptors matching
* current device speed, and any framing wrapper(s) set up.
+ * @netname: name for network device (for example, "usb")
* Context: irqs blocked
*
* This is called to let the network layer know the connection
@@ -391,6 +401,7 @@
* gether_qc_disconnect_name - notify network layer that USB
* link is inactive
* @link: the USB link, on which gether_connect() was called
+ * @netname: name for network device (for example, "usb")
* Context: irqs blocked
*
* This is called to let the network layer know the connection
diff --git a/drivers/usb/gadget/u_qc_ether.h b/drivers/usb/gadget/u_qc_ether.h
index 29193e0..25562da 100644
--- a/drivers/usb/gadget/u_qc_ether.h
+++ b/drivers/usb/gadget/u_qc_ether.h
@@ -4,7 +4,7 @@
* Copyright (C) 2003-2005,2008 David Brownell
* Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
* Copyright (C) 2008 Nokia Corporation
- * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
@@ -49,7 +49,7 @@
struct usb_function func;
/* updated by gether_{connect,disconnect} */
- struct eth_qc_dev *ioport;
+ struct eth_qc_dev *ioport;
/* endpoints handle full and/or high speeds */
struct usb_ep *in_ep;
@@ -61,10 +61,7 @@
/* hooks for added framing, as needed for RNDIS and EEM. */
u32 header_len;
- /* NCM requires fixed size bundles */
- bool is_fixed;
- u32 fixed_out_len;
- u32 fixed_in_len;
+
struct sk_buff *(*wrap)(struct qc_gether *port,
struct sk_buff *skb);
int (*unwrap)(struct qc_gether *port,
@@ -89,10 +86,14 @@
void gether_qc_disconnect_name(struct qc_gether *link, const char *netname);
/* each configuration may bind one instance of an ethernet link */
-int ecm_qc_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]);
+int ecm_qc_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
+ char *xport_name);
int
rndis_qc_bind_config_vendor(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
u32 vendorID, const char *manufacturer,
u8 maxPktPerXfer);
+
+void gether_qc_get_macs(u8 dev_mac[ETH_ALEN], u8 host_mac[ETH_ALEN]);
+
#endif /* __U_QC_ETHER_H */
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 38a3c15..323b481 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -145,29 +145,37 @@
*/
static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
- union xhci_trb *next;
unsigned long long addr;
ring->deq_updates++;
- /* If this is not event ring, there is one more usable TRB */
+ /*
+ * If this is not event ring, and the dequeue pointer
+ * is not on a link TRB, there is one more usable TRB
+ */
if (ring->type != TYPE_EVENT &&
!last_trb(xhci, ring, ring->deq_seg, ring->dequeue))
ring->num_trbs_free++;
- next = ++(ring->dequeue);
- /* Update the dequeue pointer further if that was a link TRB or we're at
- * the end of an event ring segment (which doesn't have link TRBS)
- */
- while (last_trb(xhci, ring, ring->deq_seg, next)) {
- if (ring->type == TYPE_EVENT && last_trb_on_last_seg(xhci,
- ring, ring->deq_seg, next)) {
- ring->cycle_state = (ring->cycle_state ? 0 : 1);
+ do {
+ /*
+ * Update the dequeue pointer further if that was a link TRB or
+ * we're at the end of an event ring segment (which doesn't have
+ * link TRBS)
+ */
+ if (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) {
+ if (ring->type == TYPE_EVENT &&
+ last_trb_on_last_seg(xhci, ring,
+ ring->deq_seg, ring->dequeue)) {
+ ring->cycle_state = (ring->cycle_state ? 0 : 1);
+ }
+ ring->deq_seg = ring->deq_seg->next;
+ ring->dequeue = ring->deq_seg->trbs;
+ } else {
+ ring->dequeue++;
}
- ring->deq_seg = ring->deq_seg->next;
- ring->dequeue = ring->deq_seg->trbs;
- next = ring->dequeue;
- }
+ } while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue));
+
addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
}
@@ -885,6 +893,17 @@
num_trbs_free_temp = ep_ring->num_trbs_free;
dequeue_temp = ep_ring->dequeue;
+ /* If we get two back-to-back stalls, and the first stalled transfer
+ * ends just before a link TRB, the dequeue pointer will be left on
+ * the link TRB by the code in the while loop. So we have to update
+ * the dequeue pointer one segment further, or we'll jump off
+ * the segment into la-la-land.
+ */
+ if (last_trb(xhci, ep_ring, ep_ring->deq_seg, ep_ring->dequeue)) {
+ ep_ring->deq_seg = ep_ring->deq_seg->next;
+ ep_ring->dequeue = ep_ring->deq_seg->trbs;
+ }
+
while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
/* We have more usable TRBs */
ep_ring->num_trbs_free++;
diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c
index c69071d..bd1423d 100644
--- a/drivers/usb/otg/msm_otg.c
+++ b/drivers/usb/otg/msm_otg.c
@@ -984,12 +984,18 @@
/* usb phy no more require TCXO clock, hence vote for TCXO disable */
if (!host_bus_suspend) {
- ret = msm_xo_mode_vote(motg->xo_handle, MSM_XO_MODE_OFF);
- if (ret)
- dev_err(phy->dev, "%s failed to devote for "
- "TCXO D0 buffer%d\n", __func__, ret);
- else
+ if (!IS_ERR(motg->xo_clk)) {
+ clk_disable_unprepare(motg->xo_clk);
motg->lpm_flags |= XO_SHUTDOWN;
+ } else {
+ ret = msm_xo_mode_vote(motg->xo_handle,
+ MSM_XO_MODE_OFF);
+ if (ret)
+ dev_err(phy->dev, "%s fail to devote XO %d\n",
+ __func__, ret);
+ else
+ motg->lpm_flags |= XO_SHUTDOWN;
+ }
}
if (motg->caps & ALLOW_PHY_POWER_COLLAPSE &&
@@ -1052,10 +1058,14 @@
/* Vote for TCXO when waking up the phy */
if (motg->lpm_flags & XO_SHUTDOWN) {
- ret = msm_xo_mode_vote(motg->xo_handle, MSM_XO_MODE_ON);
- if (ret)
- dev_err(phy->dev, "%s failed to vote for "
- "TCXO D0 buffer%d\n", __func__, ret);
+ if (!IS_ERR(motg->xo_clk)) {
+ clk_prepare_enable(motg->xo_clk);
+ } else {
+ ret = msm_xo_mode_vote(motg->xo_handle, MSM_XO_MODE_ON);
+ if (ret)
+ dev_err(phy->dev, "%s fail to vote for XO %d\n",
+ __func__, ret);
+ }
motg->lpm_flags &= ~XO_SHUTDOWN;
}
@@ -3919,20 +3929,31 @@
motg->async_irq = 0;
}
- motg->xo_handle = msm_xo_get(MSM_XO_TCXO_D0, "usb");
- if (IS_ERR(motg->xo_handle)) {
- dev_err(&pdev->dev, "%s not able to get the handle "
- "to vote for TCXO D0 buffer\n", __func__);
- ret = PTR_ERR(motg->xo_handle);
- goto free_regs;
+ motg->xo_clk = clk_get(&pdev->dev, "xo");
+ if (IS_ERR(motg->xo_clk)) {
+ motg->xo_handle = msm_xo_get(MSM_XO_TCXO_D0, "usb");
+ if (IS_ERR(motg->xo_handle)) {
+ dev_err(&pdev->dev, "%s fail to get handle for TCXO\n",
+ __func__);
+ ret = PTR_ERR(motg->xo_handle);
+ goto free_regs;
+ } else {
+ ret = msm_xo_mode_vote(motg->xo_handle, MSM_XO_MODE_ON);
+ if (ret) {
+ dev_err(&pdev->dev, "%s XO voting failed %d\n",
+ __func__, ret);
+ goto free_xo_handle;
+ }
+ }
+ } else {
+ ret = clk_prepare_enable(motg->xo_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "%s failed to vote for TCXO %d\n",
+ __func__, ret);
+ goto free_xo_handle;
+ }
}
- ret = msm_xo_mode_vote(motg->xo_handle, MSM_XO_MODE_ON);
- if (ret) {
- dev_err(&pdev->dev, "%s failed to vote for TCXO "
- "D0 buffer%d\n", __func__, ret);
- goto free_xo_handle;
- }
clk_prepare_enable(motg->pclk);
@@ -4154,9 +4175,15 @@
vdd_val[motg->vdd_type][VDD_MAX]);
devote_xo_handle:
clk_disable_unprepare(motg->pclk);
- msm_xo_mode_vote(motg->xo_handle, MSM_XO_MODE_OFF);
+ if (!IS_ERR(motg->xo_clk))
+ clk_disable_unprepare(motg->xo_clk);
+ else
+ msm_xo_mode_vote(motg->xo_handle, MSM_XO_MODE_OFF);
free_xo_handle:
- msm_xo_put(motg->xo_handle);
+ if (!IS_ERR(motg->xo_clk))
+ clk_put(motg->xo_clk);
+ else
+ msm_xo_put(motg->xo_handle);
free_regs:
iounmap(motg->regs);
put_pclk:
@@ -4229,7 +4256,12 @@
clk_disable_unprepare(motg->pclk);
clk_disable_unprepare(motg->core_clk);
- msm_xo_put(motg->xo_handle);
+ if (!IS_ERR(motg->xo_clk)) {
+ clk_disable_unprepare(motg->xo_clk);
+ clk_put(motg->xo_clk);
+ } else {
+ msm_xo_put(motg->xo_handle);
+ }
msm_hsusb_ldo_enable(motg, USB_PHY_REG_OFF);
msm_hsusb_ldo_init(motg, 0);
regulator_disable(hsusb_vddcx);
diff --git a/drivers/video/msm/mdss/mdss.h b/drivers/video/msm/mdss/mdss.h
index b6882b8..8ceb62e 100644
--- a/drivers/video/msm/mdss/mdss.h
+++ b/drivers/video/msm/mdss/mdss.h
@@ -76,9 +76,7 @@
u32 mdp_irq_mask;
u32 mdp_hist_irq_mask;
- u32 suspend;
- u32 timeout;
-
+ int suspend_fs_ena;
atomic_t clk_ref;
u8 clk_ena;
u8 fs_ena;
diff --git a/drivers/video/msm/mdss/mdss_fb.c b/drivers/video/msm/mdss/mdss_fb.c
index bb29842..ea0eb7b 100644
--- a/drivers/video/msm/mdss/mdss_fb.c
+++ b/drivers/video/msm/mdss/mdss_fb.c
@@ -386,6 +386,7 @@
return ret;
}
mfd->op_enable = false;
+ fb_set_suspend(mfd->fbi, FBINFO_STATE_SUSPENDED);
}
return 0;
@@ -417,6 +418,8 @@
mfd->op_enable);
if (ret)
pr_warn("can't turn on display!\n");
+ else
+ fb_set_suspend(mfd->fbi, FBINFO_STATE_RUNNING);
}
mfd->is_power_setting = false;
complete_all(&mfd->power_set_comp);
@@ -424,39 +427,61 @@
return ret;
}
-int mdss_fb_suspend_all(void)
+#if defined(CONFIG_PM) && !defined(CONFIG_PM_SLEEP)
+static int mdss_fb_suspend(struct platform_device *pdev, pm_message_t state)
{
- struct fb_info *fbi;
- int ret, i;
- int result = 0;
- for (i = 0; i < fbi_list_index; i++) {
- fbi = fbi_list[i];
- fb_set_suspend(fbi, FBINFO_STATE_SUSPENDED);
+ struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
+ if (!mfd)
+ return -ENODEV;
- ret = mdss_fb_suspend_sub(fbi->par);
- if (ret != 0) {
- fb_set_suspend(fbi, FBINFO_STATE_RUNNING);
- result = ret;
- }
- }
- return result;
+ dev_dbg(&pdev->dev, "display suspend\n");
+
+ return mdss_fb_suspend_sub(mfd);
}
-int mdss_fb_resume_all(void)
+static int mdss_fb_resume(struct platform_device *pdev)
{
- struct fb_info *fbi;
- int ret, i;
- int result = 0;
+ struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
+ if (!mfd)
+ return -ENODEV;
- for (i = 0; i < fbi_list_index; i++) {
- fbi = fbi_list[i];
+ dev_dbg(&pdev->dev, "display resume\n");
- ret = mdss_fb_resume_sub(fbi->par);
- if (ret == 0)
- fb_set_suspend(fbi, FBINFO_STATE_RUNNING);
- }
- return result;
+ return mdss_fb_resume_sub(mfd);
}
+#else
+#define mdss_fb_suspend NULL
+#define mdss_fb_resume NULL
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int mdss_fb_pm_suspend(struct device *dev)
+{
+ struct msm_fb_data_type *mfd = dev_get_drvdata(dev);
+
+ if (!mfd)
+ return -ENODEV;
+
+ dev_dbg(dev, "display pm suspend\n");
+
+ return mdss_fb_suspend_sub(mfd);
+}
+
+static int mdss_fb_pm_resume(struct device *dev)
+{
+ struct msm_fb_data_type *mfd = dev_get_drvdata(dev);
+ if (!mfd)
+ return -ENODEV;
+
+ dev_dbg(dev, "display pm resume\n");
+
+ return mdss_fb_resume_sub(mfd);
+}
+#endif
+
+static const struct dev_pm_ops mdss_fb_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mdss_fb_pm_suspend, mdss_fb_pm_resume)
+};
static const struct of_device_id mdss_fb_dt_match[] = {
{ .compatible = "qcom,mdss-fb",},
@@ -467,9 +492,12 @@
static struct platform_driver mdss_fb_driver = {
.probe = mdss_fb_probe,
.remove = mdss_fb_remove,
+ .suspend = mdss_fb_suspend,
+ .resume = mdss_fb_resume,
.driver = {
.name = "mdss_fb",
.of_match_table = mdss_fb_dt_match,
+ .pm = &mdss_fb_pm_ops,
},
};
diff --git a/drivers/video/msm/mdss/mdss_fb.h b/drivers/video/msm/mdss/mdss_fb.h
index 193b6b7..c4e837e 100644
--- a/drivers/video/msm/mdss/mdss_fb.h
+++ b/drivers/video/msm/mdss/mdss_fb.h
@@ -147,8 +147,6 @@
int mdss_fb_get_phys_info(unsigned long *start, unsigned long *len, int fb_num);
void mdss_fb_set_backlight(struct msm_fb_data_type *mfd, u32 bkl_lvl);
void mdss_fb_update_backlight(struct msm_fb_data_type *mfd);
-int mdss_fb_suspend_all(void);
-int mdss_fb_resume_all(void);
void mdss_fb_wait_for_fence(struct msm_fb_data_type *mfd);
void mdss_fb_signal_timeline(struct msm_fb_data_type *mfd);
diff --git a/drivers/video/msm/mdss/mdss_mdp.c b/drivers/video/msm/mdss/mdss_mdp.c
index 49a1daa..e4099ad 100644
--- a/drivers/video/msm/mdss/mdss_mdp.c
+++ b/drivers/video/msm/mdss/mdss_mdp.c
@@ -797,11 +797,9 @@
}
mdata->res_init = true;
- mdata->timeout = HZ/20;
mdata->clk_ena = false;
mdata->irq_mask = MDSS_MDP_DEFAULT_INTR_MASK;
mdata->irq_ena = false;
- mdata->suspend = false;
rc = mdss_mdp_irq_clk_setup(mdata);
if (rc)
@@ -1365,33 +1363,26 @@
static inline int mdss_mdp_suspend_sub(struct mdss_data_type *mdata)
{
- int ret;
+ flush_workqueue(mdata->clk_ctrl_wq);
- ret = mdss_fb_suspend_all();
- if (IS_ERR_VALUE(ret)) {
- pr_err("Unable to suspend all fb panels (%d)\n", ret);
- return ret;
- }
+ mdata->suspend_fs_ena = mdata->fs_ena;
+ mdss_mdp_footswitch_ctrl(mdata, false);
- pr_debug("suspend done\n");
+ pr_debug("suspend done fs=%d\n", mdata->suspend_fs_ena);
return 0;
}
static inline int mdss_mdp_resume_sub(struct mdss_data_type *mdata)
{
- int ret = 0;
+ if (mdata->suspend_fs_ena)
+ mdss_mdp_footswitch_ctrl(mdata, true);
- ret = mdss_fb_resume_all();
- if (IS_ERR_VALUE(ret))
- pr_err("Unable to resume all fb panels (%d)\n", ret);
+ pr_debug("resume done fs=%d\n", mdata->suspend_fs_ena);
- pr_debug("resume done\n");
-
- return ret;
+ return 0;
}
-#ifdef CONFIG_PM
#ifdef CONFIG_PM_SLEEP
static int mdss_mdp_pm_suspend(struct device *dev)
{
@@ -1418,10 +1409,9 @@
return mdss_mdp_resume_sub(mdata);
}
+#endif
-#define mdss_mdp_suspend NULL
-#define mdss_mdp_resume NULL
-#else
+#if defined(CONFIG_PM) && !defined(CONFIG_PM_SLEEP)
static int mdss_mdp_suspend(struct platform_device *pdev, pm_message_t state)
{
struct mdss_data_type *mdata = platform_get_drvdata(pdev);
@@ -1445,6 +1435,9 @@
return mdss_mdp_resume_sub(mdata);
}
+#else
+#define mdss_mdp_suspend NULL
+#define mdss_mdp_resume NULL
#endif
#ifdef CONFIG_PM_RUNTIME
@@ -1490,7 +1483,6 @@
return 0;
}
#endif
-#endif
static const struct dev_pm_ops mdss_mdp_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(mdss_mdp_pm_suspend, mdss_mdp_pm_resume)
diff --git a/drivers/video/msm/mdss/mdss_mdp_ctl.c b/drivers/video/msm/mdss/mdss_mdp_ctl.c
index 6a41fd4..c640c73 100644
--- a/drivers/video/msm/mdss/mdss_mdp_ctl.c
+++ b/drivers/video/msm/mdss/mdss_mdp_ctl.c
@@ -200,8 +200,8 @@
max_clk_rate = clk_rate;
}
- /* request minimum bandwidth for dsi commands */
- if ((total_ib_quota == 0) && (ctl->intf_type == MDSS_INTF_DSI))
+ /* request minimum bandwidth to have bus clock on when display is on */
+ if (total_ib_quota == 0)
total_ib_quota = SZ_16M >> MDSS_MDP_BUS_FACTOR_SHIFT;
if (max_clk_rate != ctl->clk_rate) {
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index 6862c0e..c1dcc18 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -589,11 +589,11 @@
if (mfd->ctl->power_on)
return 0;
- pr_debug("starting overlay\n");
+ pr_debug("starting fb%d overlay\n", mfd->index);
rc = pm_runtime_get_sync(&mfd->pdev->dev);
- if (rc) {
- pr_err("unable to resume with pm_runtime_get_sync (%d)\n", rc);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("unable to resume with pm_runtime_get_sync rc=%d\n", rc);
return rc;
}
@@ -608,11 +608,6 @@
rc = mdss_mdp_ctl_start(mfd->ctl);
if (rc == 0) {
atomic_inc(&ov_active_panels);
-
- if (mfd->vsync_pending) {
- mfd->vsync_pending = 0;
- mdss_mdp_overlay_vsync_ctrl(mfd, mfd->vsync_pending);
- }
} else {
pr_err("overlay start failed.\n");
mdss_mdp_ctl_destroy(mfd->ctl);
@@ -633,13 +628,23 @@
mutex_lock(&mfd->ov_lock);
mutex_lock(&mfd->lock);
list_for_each_entry(pipe, &mfd->pipes_used, used_list) {
- if (pipe->params_changed || pipe->back_buf.num_planes) {
- ret = mdss_mdp_pipe_queue_data(pipe, &pipe->back_buf);
- if (IS_ERR_VALUE(ret)) {
- pr_warn("Unable to queue data for pnum=%d\n",
- pipe->num);
- mdss_mdp_overlay_free_buf(&pipe->back_buf);
- }
+ struct mdss_mdp_data *buf;
+ if (pipe->back_buf.num_planes) {
+ buf = &pipe->back_buf;
+ } else if (!pipe->params_changed) {
+ continue;
+ } else if (pipe->front_buf.num_planes) {
+ buf = &pipe->front_buf;
+ } else {
+ pr_warn("pipe queue without buffer\n");
+ buf = NULL;
+ }
+
+ ret = mdss_mdp_pipe_queue_data(pipe, buf);
+ if (IS_ERR_VALUE(ret)) {
+ pr_warn("Unable to queue data for pnum=%d\n",
+ pipe->num);
+ mdss_mdp_overlay_free_buf(buf);
}
}
@@ -1107,10 +1112,6 @@
spin_lock_irqsave(&mfd->vsync_lock, flags);
INIT_COMPLETION(mfd->vsync_comp);
- if (en && ctl->play_cnt == 0) {
- mfd->vsync_time = ktime_get();
- complete(&mfd->vsync_comp);
- }
spin_unlock_irqrestore(&mfd->vsync_lock, flags);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
@@ -1139,11 +1140,6 @@
return 0;
timeout = msecs_to_jiffies(VSYNC_PERIOD * 5);
- if (mfd->ctl->play_cnt == 0) {
- pr_debug("timegen enable still pending on fb%d\n", mfd->index);
- timeout <<= 5;
- }
-
ret = wait_for_completion_interruptible_timeout(&mfd->vsync_comp,
timeout);
if (ret <= 0) {
@@ -1454,6 +1450,17 @@
mfd->ctl = ctl;
}
+ if (!mfd->panel_info->cont_splash_enabled) {
+ rc = mdss_mdp_overlay_start(mfd);
+ if (!IS_ERR_VALUE(rc))
+ rc = mdss_mdp_overlay_kickoff(mfd->ctl);
+ }
+
+ if (!IS_ERR_VALUE(rc) && mfd->vsync_pending) {
+ mfd->vsync_pending = 0;
+ mdss_mdp_overlay_vsync_ctrl(mfd, mfd->vsync_pending);
+ }
+
return rc;
}
diff --git a/drivers/video/msm/mdss/mdss_mdp_pipe.c b/drivers/video/msm/mdss/mdss_mdp_pipe.c
index f84fd82..8c88646 100644
--- a/drivers/video/msm/mdss/mdss_mdp_pipe.c
+++ b/drivers/video/msm/mdss/mdss_mdp_pipe.c
@@ -98,6 +98,9 @@
num_blks = DIV_ROUND_UP(2 * ps.ystride[i],
mdss_res->smp_mb_size);
+ if (mdss_res->mdp_rev == MDSS_MDP_HW_REV_100)
+ num_blks = roundup_pow_of_two(num_blks);
+
pr_debug("reserving %d mmb for pnum=%d plane=%d\n",
num_blks, pipe->num, i);
reserved = mdss_mdp_smp_mmb_reserve(&pipe->smp[i], num_blks);
diff --git a/drivers/video/msm/mdss/mdss_mdp_wb.c b/drivers/video/msm/mdss/mdss_mdp_wb.c
index 1d55fa9..d24a7c9 100644
--- a/drivers/video/msm/mdss/mdss_mdp_wb.c
+++ b/drivers/video/msm/mdss/mdss_mdp_wb.c
@@ -516,7 +516,8 @@
if (wb_args.data == NULL) {
pr_err("unable to get writeback buf ctl=%d\n", ctl->num);
- ret = -ENOMEM;
+ /* drop buffer but don't return error */
+ ret = 0;
goto kickoff_fail;
}
diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c
index 0715b0b..5f994a0 100644
--- a/drivers/video/msm/msm_fb.c
+++ b/drivers/video/msm/msm_fb.c
@@ -1181,6 +1181,26 @@
bpp = 4;
break;
+ case MDP_BGRA_8888:
+ fix->type = FB_TYPE_PACKED_PIXELS;
+ fix->xpanstep = 1;
+ fix->ypanstep = 1;
+ var->vmode = FB_VMODE_NONINTERLACED;
+ var->blue.offset = 0;
+ var->green.offset = 8;
+ var->red.offset = 16;
+ var->blue.length = 8;
+ var->green.length = 8;
+ var->red.length = 8;
+ var->blue.msb_right = 0;
+ var->green.msb_right = 0;
+ var->red.msb_right = 0;
+ var->transp.offset = 24;
+ var->transp.length = 8;
+ bpp = 4;
+ break;
+
+
case MDP_YCRYCB_H2V1:
/* ToDo: need to check TV-Out YUV422i framebuffer format */
/* we might need to create new type define */
@@ -1900,7 +1920,9 @@
break;
case 32:
- if (var->transp.offset == 24)
+ if ((var->transp.offset == 24) && (var->blue.offset == 0))
+ mfd->fb_imgType = MDP_BGRA_8888;
+ else if (var->transp.offset == 24)
mfd->fb_imgType = MDP_ARGB_8888;
else
mfd->fb_imgType = MDP_RGBA_8888;
diff --git a/include/asm-generic/dma-contiguous.h b/include/asm-generic/dma-contiguous.h
index c544356..9071ef1 100644
--- a/include/asm-generic/dma-contiguous.h
+++ b/include/asm-generic/dma-contiguous.h
@@ -11,15 +11,13 @@
{
if (dev && dev->cma_area)
return dev->cma_area;
- return dma_contiguous_default_area;
+ return dma_contiguous_def_area;
}
static inline void dev_set_cma_area(struct device *dev, struct cma *cma)
{
if (dev)
dev->cma_area = cma;
- if (!dev || !dma_contiguous_default_area)
- dma_contiguous_default_area = cma;
}
#endif
diff --git a/include/linux/bif/consumer.h b/include/linux/bif/consumer.h
new file mode 100644
index 0000000..e4c190e
--- /dev/null
+++ b/include/linux/bif/consumer.h
@@ -0,0 +1,613 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_BIF_CONSUMER_H_
+#define _LINUX_BIF_CONSUMER_H_
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/notifier.h>
+
+#define BIF_DEVICE_ID_BYTE_LENGTH 8
+#define BIF_UNIQUE_ID_BYTE_LENGTH 10
+#define BIF_UNIQUE_ID_BIT_LENGTH 80
+
+#define BIF_PRIMARY_SLAVE_DEV_ADR 0x01
+
+/**
+ * enum bif_transaction - BIF master bus transaction types
+ * %BIF_TRANS_WD: Write data
+ * %BIF_TRANS_ERA: Extended register address
+ * %BIF_TRANS_WRA: Write register address
+ * %BIF_TRANS_RRA: Read register address
+ * %BIF_TRANS_BC: Bus command
+ * %BIF_TRANS_EDA: Extended device address
+ * %BIF_TRANS_SDA: Slave device address
+ *
+ * These values correspond to BIF word bits: BCF, bit 9, bit 8.
+ * BCF_n bit is inserted automatically.
+ */
+enum bif_transaction {
+ BIF_TRANS_WD = 0x00,
+ BIF_TRANS_ERA = 0x01,
+ BIF_TRANS_WRA = 0x02,
+ BIF_TRANS_RRA = 0x03,
+ BIF_TRANS_BC = 0x04,
+ BIF_TRANS_EDA = 0x05,
+ BIF_TRANS_SDA = 0x06,
+};
+
+/* BIF slave response components */
+#define BIF_SLAVE_RD_ACK 0x200
+#define BIF_SLAVE_RD_EOT 0x100
+#define BIF_SLAVE_RD_DATA 0x0FF
+#define BIF_SLAVE_RD_ERR 0x0FF
+#define BIF_SLAVE_TACK_ACK 0x200
+#define BIF_SLAVE_TACK_WCNT 0x0FF
+#define BIF_SLAVE_TACK_ERR 0x0FF
+
+/**
+ * enum bif_bus_command - MIPI defined bus commands to use in BC transaction
+ * %BIF_CMD_BRES: Bus reset of all slaves
+ * %BIF_CMD_PDWN: Put all slaves into power down mode
+ * %BIF_CMD_STBY: Put all slaves into standby mode
+ * %BIF_CMD_EINT: Enable interrupts for all slaves
+ * %BIF_CMD_ISTS: Poll interrupt status for all slaves. Expects BQ
+ * response if any slave has a pending interrupt.
+ * %BIF_CMD_RBL: Specify the burst read length for the next read
+ * transaction. Bits 3 to 0 should also be ORed on in
+ * order to specify the number of bytes to read.
+ * %BIF_CMD_RBE: Specify the extended burst read length for the next read
+ * transaction. Bits 3 to 0 should also be ORed on in
+ * order to specify the number of bytes to read. The burst
+ * read length for RBEy and RBLx = 16 * y + x.
+ * %BIF_CMD_DASM: Device activation stick mode. This keeps a slave
+ * selected if it would otherwise become unselected by the
+ * next transaction.
+ * %BIF_CMD_DISS: UID search start
+ * %BIF_CMD_DILC: UID length check. Expects BQ response if all 80 UID
+ * bits for a given slave have been entered.
+ * %BIF_CMD_DIE0: UID search enter 0
+ * %BIF_CMD_DIE1: UID search enter 1
+ * %BIF_CMD_DIP0: UID search probe 0
+ * %BIF_CMD_DIP1: UID search probe 1
+ * %BIF_CMD_DRES: Device reset of selected slaves
+ * %BIF_CMD_TQ: Transaction query; expects TACK response
+ * %BIF_CMD_AIO: Address increment off for the next transaction
+ *
+ * These values correspond to BIF word bits 7 to 0.
+ */
+enum bif_bus_command {
+ BIF_CMD_BRES = 0x00,
+ BIF_CMD_PDWN = 0x02,
+ BIF_CMD_STBY = 0x03,
+ BIF_CMD_EINT = 0x10,
+ BIF_CMD_ISTS = 0x11,
+ BIF_CMD_RBL = 0x20,
+ BIF_CMD_RBE = 0x30,
+ BIF_CMD_DASM = 0x40,
+ BIF_CMD_DISS = 0x80,
+ BIF_CMD_DILC = 0x81,
+ BIF_CMD_DIE0 = 0x84,
+ BIF_CMD_DIE1 = 0x85,
+ BIF_CMD_DIP0 = 0x86,
+ BIF_CMD_DIP1 = 0x87,
+ BIF_CMD_DRES = 0xC0,
+ BIF_CMD_TQ = 0xC2,
+ BIF_CMD_AIO = 0xC4,
+};
+
+/**
+ * struct bif_ddb_l1_data - MIPI defined L1 DDB data structure
+ * @revision: DDB version; should be 0x10 for DDB v1.0
+ * @level: DDB level support; should be 0x03 for DDB L1 and L2
+ * @device_class: MIPI device class; should be 0x0800
+ * @manufacturer_id: Manufacturer ID number allocated by MIPI
+ * @product_id: Manufacturer specified product ID number
+ * @length: Size of L2 function directory in bytes
+ */
+struct bif_ddb_l1_data {
+ u8 revision;
+ u8 level;
+ u16 device_class;
+ u16 manufacturer_id;
+ u16 product_id;
+ u16 length;
+};
+
+/**
+ * struct bif_ddb_l2_data - MIPI defined L2 DDB function data structure
+ * @function_type: Defines the type of the function. The type may be
+ * either MIPI or manufacturer defined.
+ * @function_version: Defines the version of the function. The version may
+ * be either MIPI or manufacturer defined.
+ * @function_pointer: Address in BIF slave memory where the register map for
+ * the function begins.
+ */
+struct bif_ddb_l2_data {
+ u8 function_type;
+ u8 function_version;
+ u16 function_pointer;
+};
+
+/**
+ * enum bif_mipi_function_type - MIPI defined DDB L2 function types
+ * %BIF_FUNC_PROTOCOL: Protocol function which provides access to core
+ * BIF communication features.
+ * %BIF_FUNC_SLAVE_CONTROL: Slave control function which provides control
+ * for BIF slave interrupts and tasks.
+ * %BIF_FUNC_TEMPERATURE: Temperature sensor function which provides a
+ * means to accurately read the battery temperature
+ * in a single-shot or periodic fashion.
+ * %BIF_FUNC_NVM: Non-volatile memory function which provides a
+ * means to store data onto a BIF slave that is
+ * non-volatile. Secondary slave objects are also
+ * found through the NVM function.
+ * %BIF_FUNC_AUTHENTICATION: Authentication function which provides a means
+ * to authenticate batteries. This function does
+ * not have a MIPI defined implimentation. Instead
+ * all aspects of the authentication function are
+ * left to the discretion of the manufacturer.
+ */
+enum bif_mipi_function_type {
+ BIF_FUNC_PROTOCOL = 0x01,
+ BIF_FUNC_SLAVE_CONTROL = 0x02,
+ BIF_FUNC_TEMPERATURE = 0x03,
+ BIF_FUNC_NVM = 0x04,
+ BIF_FUNC_AUTHENTICATION = 0x05,
+};
+
+#define BIF_DDB_L1_BASE_ADDR 0x0000
+#define BIF_DDB_L2_BASE_ADDR 0x000A
+
+/**
+ * enum bif_slave_error_code - MIPI defined BIF slave error codes
+ * %BIF_ERR_NONE: No error occurred
+ * %BIF_ERR_GENERAL: An unenumerated error occurred
+ * %BIF_ERR_PARITY: A Hamming-15 parity check failed for a word
+ * sent on the bus
+ * %BIF_ERR_INVERSION: More than 8 bits in a word were 1
+ * %BIF_ERR_BAD_LENGTH: Word had more or less than 17 bits
+ * %BIF_ERR_TIMING: Bit timing was violated in a word
+ * %BIF_ERR_UNKNOWN_CMD: Bus command was unknown to the slave
+ * %BIF_ERR_CMD_SEQ: Commands with ordering dependency were not
+ * sent in the right order
+ * %BIF_ERR_BUS_COLLISION: BCL was already low at the beginning of a new
+ * transaction
+ * %BIF_ERR_SLAVE_BUSY: Slave is busy and cannot respond
+ * %BIF_ERR_FATAL: Slave is in an unrecoverable error state and
+ * must be reset
+ *
+ * These values are present in the ERR portion of an RD or TACK slave response
+ * word. These values can also be found in the ERR_CODE register of the
+ * protocol function.
+ */
+enum bif_slave_error_code {
+ BIF_ERR_NONE = 0x00,
+ BIF_ERR_GENERAL = 0x10,
+ BIF_ERR_PARITY = 0x11,
+ BIF_ERR_INVERSION = 0x12,
+ BIF_ERR_BAD_LENGTH = 0x13,
+ BIF_ERR_TIMING = 0x14,
+ BIF_ERR_UNKNOWN_CMD = 0x15,
+ BIF_ERR_CMD_SEQ = 0x16,
+ BIF_ERR_BUS_COLLISION = 0x1F,
+ BIF_ERR_SLAVE_BUSY = 0x20,
+ BIF_ERR_FATAL = 0x7F,
+};
+
+/**
+ * struct bif_protocol_function - constant data present in protocol function
+ * @l2_entry: Pointer to protocol function L2 DDB data struct
+ * @protocol_pointer: BIF slave address where protocol registers begin
+ * @device_id_pointer: BIF slave address where device ID begins
+ * @device_id: The 8-byte unique device ID in MSB to LSB order
+ */
+struct bif_protocol_function {
+ struct bif_ddb_l2_data *l2_entry;
+ u16 protocol_pointer;
+ u16 device_id_pointer;
+ u8 device_id[BIF_DEVICE_ID_BYTE_LENGTH]; /* Unique ID */
+};
+
+#define PROTOCOL_FUNC_DEV_ADR_ADDR(protocol_pointer) ((protocol_pointer) + 0)
+#define PROTOCOL_FUNC_ERR_CODE_ADDR(protocol_pointer) ((protocol_pointer) + 2)
+#define PROTOCOL_FUNC_ERR_CNT_ADDR(protocol_pointer) ((protocol_pointer) + 3)
+#define PROTOCOL_FUNC_WORD_CNT_ADDR(protocol_pointer) ((protocol_pointer) + 4)
+
+/**
+ * struct bif_slave_control_function - constant data present in slave control
+ * function as well internal software state parameters
+ * @l2_entry: Pointer to slave control function L2 DDB data struct
+ * @slave_ctrl_pointer: BIF slave address where slave control registers begin
+ * @task_count: Number of tasks supported by the slave
+ * @irq_notifier_list: List of notifiers for consumers drivers that wish to be
+ * notified when any given interrupt triggers. This list
+ * is dynamically allocated with length task_count.
+ */
+struct bif_slave_control_function {
+ struct bif_ddb_l2_data *l2_entry;
+ u16 slave_ctrl_pointer;
+ unsigned int task_count;
+ struct blocking_notifier_head *irq_notifier_list;
+};
+
+#define SLAVE_CTRL_TASKS_PER_SET 8
+
+/**
+ * bif_slave_control_task_is_valid() - returns true if the specified task
+ * is supported by the slave or false if it isn't
+ * @func: Pointer to slave's slave control function structure
+ * @task: Slave task number to check
+ */
+static inline bool
+bif_slave_control_task_is_valid(struct bif_slave_control_function *func,
+ unsigned int task)
+{
+ return func ? task < func->task_count : false;
+}
+
+#define SLAVE_CTRL_FUNC_IRQ_EN_ADDR(slave_ctrl_pointer, task) \
+ ((slave_ctrl_pointer) + 4 * ((task) / SLAVE_CTRL_TASKS_PER_SET) + 0)
+
+#define SLAVE_CTRL_FUNC_IRQ_STATUS_ADDR(slave_ctrl_pointer, task) \
+ ((slave_ctrl_pointer) + 4 * ((task) / SLAVE_CTRL_TASKS_PER_SET) + 1)
+#define SLAVE_CTRL_FUNC_IRQ_CLEAR_ADDR(slave_ctrl_pointer, task) \
+ SLAVE_CTRL_FUNC_IRQ_STATUS_ADDR(slave_ctrl_pointer, task)
+
+#define SLAVE_CTRL_FUNC_TASK_TRIGGER_ADDR(slave_ctrl_pointer, task) \
+ ((slave_ctrl_pointer) + 4 * ((task) / SLAVE_CTRL_TASKS_PER_SET) + 2)
+#define SLAVE_CTRL_FUNC_TASK_BUSY_ADDR(slave_ctrl_pointer, task) \
+ SLAVE_CTRL_FUNC_TASK_TRIGGER_ADDR(slave_ctrl_pointer, task)
+
+#define SLAVE_CTRL_FUNC_TASK_AUTO_TRIGGER_ADDR(slave_ctrl_pointer, task) \
+ ((slave_ctrl_pointer) + 4 * ((task) / SLAVE_CTRL_TASKS_PER_SET) + 3)
+
+/**
+ * struct bif_temperature_function - constant data present in temperature
+ * sensor function
+ * @temperatuer_pointer: BIF slave address where temperature sensor
+ * control registers begin
+ * @slave_control_channel: Slave control channel associated with the
+ * temperature sensor function. This channel is
+ * also the task number.
+ * @accuracy_pointer: BIF slave address where temperature accuracy
+ * registers begin
+ */
+struct bif_temperature_function {
+ u16 temperature_pointer;
+ u8 slave_control_channel;
+ u16 accuracy_pointer;
+};
+
+/**
+ * enum bif_mipi_object_type - MIPI defined BIF object types
+ * %BIF_OBJ_END_OF_LIST: Indicates that the end of the object list in
+ * NVM has been reached
+ * %BIF_OBJ_SEC_SLAVE: Specifies the UIDs of secondary slaves found
+ * inside of the battery pack
+ * %BIF_OBJ_BATT_PARAM: Specifies some variety of battery parameter.
+ * There is no MIPI defined format for this object
+ * type so parsing is manufacturer specific.
+ */
+enum bif_mipi_object_type {
+ BIF_OBJ_END_OF_LIST = 0x00,
+ BIF_OBJ_SEC_SLAVE = 0x01,
+ BIF_OBJ_BATT_PARAM = 0x02,
+};
+
+/**
+ * struct bif_object - contains all header and data information for a slave
+ * data object
+ * @type: Object type
+ * @version: Object version
+ * @manufacturer_id: Manufacturer ID number allocated by MIPI
+ * @length: Length of the entire object including header and CRC
+ * @data: Raw byte data found in the object
+ * @crc: CRC of the object calculated using CRC-CCITT
+ * @list: Linked-list connection parameter
+ * @addr: BIF slave address correspond to the start of the object
+ *
+ * manufacturer_id == 0x0000 if MIPI type and version.
+ */
+struct bif_object {
+ u8 type;
+ u8 version;
+ u16 manufacturer_id;
+ u16 length;
+ u8 *data;
+ u16 crc;
+ struct list_head list;
+ u16 addr;
+};
+
+/**
+ * struct bif_nvm_function - constant data present in non-volatile memory
+ * function as well internal software state
+ * parameters
+ * @nvm_pointer: BIF slave address where NVM registers begin
+ * @slave_control_channel: Slave control channel associated with the
+ * NVM function. This channel is also the task
+ * number.
+ * @write_buffer_size: Size in bytes of the NVM write buffer. 0x00
+ * is used to denote a 256 byte buffer.
+ * @nvm_base_address: BIF slave address where NVM begins
+ * @nvm_size: NVM size in bytes
+ * @object_count: Number of BIF objects read from NVM
+ * @object_list: List of BIF objects read from NVM
+ */
+struct bif_nvm_function {
+ u16 nvm_pointer;
+ u8 slave_control_channel;
+ u8 write_buffer_size;
+ u16 nvm_base_address;
+ u16 nvm_size;
+ int object_count;
+ struct list_head object_list;
+};
+
+/**
+ * struct bif_ctrl - Opaque handle for a BIF controller to be used in bus
+ * oriented BIF function calls.
+ */
+struct bif_ctrl;
+
+/**
+ * struct bif_slave - Opaque handle for a BIF slave to be used in slave oriented
+ * BIF function calls.
+ */
+struct bif_slave;
+
+/**
+ * enum bif_bus_state - indicates the current or desired state of the BIF bus
+ * %BIF_BUS_STATE_MASTER_DISABLED: BIF host hardware is disabled
+ * %BIF_BUS_STATE_POWER_DOWN: BIF bus is in power down state and
+ * BCL is not being pulled high
+ * %BIF_BUS_STATE_STANDBY: BIF slaves are in standby state in which
+ * less power is drawn
+ * %BIF_BUS_STATE_ACTIVE: BIF slaves are ready for immediate
+ * communications
+ * %BIF_BUS_STATE_INTERRUPT: BIF bus is active, but no communication
+ * is possible. Instead, either one of the
+ * slaves or the master must transition to
+ * active state by pulling BCL low for 1
+ * tau bif period.
+ */
+enum bif_bus_state {
+ BIF_BUS_STATE_MASTER_DISABLED,
+ BIF_BUS_STATE_POWER_DOWN,
+ BIF_BUS_STATE_STANDBY,
+ BIF_BUS_STATE_ACTIVE,
+ BIF_BUS_STATE_INTERRUPT,
+};
+
+/**
+ * enum bif_bus_event - events that the BIF framework may send to BIF consumers
+ * %BIF_BUS_EVENT_BATTERY_INSERTED: Indicates that a battery was just
+ * inserted physically or that the BIF
+ * host controller for the battery just
+ * probed and a battery was already
+ * present.
+ * %BIF_BUS_EVENT_BATTERY_REMOVED: Indicates that a battery was just
+ * removed and thus its slaves are no
+ * longer accessible.
+ */
+enum bif_bus_event {
+ BIF_BUS_EVENT_BATTERY_INSERTED,
+ BIF_BUS_EVENT_BATTERY_REMOVED,
+};
+
+/* Mask values to be ORed together for use in bif_match_criteria.match_mask. */
+#define BIF_MATCH_MANUFACTURER_ID BIT(0)
+#define BIF_MATCH_PRODUCT_ID BIT(1)
+#define BIF_MATCH_FUNCTION_TYPE BIT(2)
+#define BIF_MATCH_FUNCTION_VERSION BIT(3)
+#define BIF_MATCH_IGNORE_PRESENCE BIT(4)
+
+/**
+ * struct bif_match_criteria - specifies the matching criteria that a BIF
+ * consumer uses to find an appropriate BIF slave
+ * @match_mask: Mask value specifying which parameters to match upon.
+ * This value should be some ORed combination of
+ * BIF_MATCH_* specified above.
+ * @manufacturer_id: Manufacturer ID number allocated by MIPI
+ * @product_id: Manufacturer specified product ID number
+ * @function_type: Defines the type of the function. The type may be
+ * either MIPI or manufacturer defined.
+ * @function_version: Defines the version of the function. The version may
+ * be either MIPI or manufacturer defined.
+ * @ignore_presence: If true, then slaves that are currently not present
+ * will be successfully matched against. By default, only
+ * present slaves can be matched.
+ */
+struct bif_match_criteria {
+ u32 match_mask;
+ u16 manufacturer_id;
+ u16 product_id;
+ u8 function_type;
+ u8 function_version;
+ bool ignore_presence;
+};
+
+/**
+ * bif_battery_rid_ranges - MIPI-BIF defined Rid battery pack resistance ranges
+ * %BIF_BATT_RID_SPECIAL1_MIN: Minimum Rid for special case 1
+ * %BIF_BATT_RID_SPECIAL1_MAX: Maximum Rid for special case 1
+ * %BIF_BATT_RID_SPECIAL2_MIN: Minimum Rid for special case 2
+ * %BIF_BATT_RID_SPECIAL2_MAX: Maximum Rid for special case 2
+ * %BIF_BATT_RID_SPECIAL3_MIN: Minimum Rid for special case 3
+ * %BIF_BATT_RID_SPECIAL3_MAX: Maximum Rid for special case 3
+ * %BIF_BATT_RID_LOW_COST_MIN: Minimum Rid for a low cost battery pack
+ * %BIF_BATT_RID_LOW_COST_MAX: Maximum Rid for a low cost battery pack
+ * %BIF_BATT_RID_SMART_MIN: Minimum Rid for a smart battery pack
+ * %BIF_BATT_RID_SMART_MAX: Maximum Rid for a smart battery pack
+ */
+enum bif_battery_rid_ranges {
+ BIF_BATT_RID_SPECIAL1_MIN = 0,
+ BIF_BATT_RID_SPECIAL1_MAX = 1,
+ BIF_BATT_RID_SPECIAL2_MIN = 7350,
+ BIF_BATT_RID_SPECIAL2_MAX = 7650,
+ BIF_BATT_RID_SPECIAL3_MIN = 12740,
+ BIF_BATT_RID_SPECIAL3_MAX = 13260,
+ BIF_BATT_RID_LOW_COST_MIN = 19600,
+ BIF_BATT_RID_LOW_COST_MAX = 140000,
+ BIF_BATT_RID_SMART_MIN = 240000,
+ BIF_BATT_RID_SMART_MAX = 450000,
+};
+
+#ifdef CONFIG_BIF
+
+int bif_request_irq(struct bif_slave *slave, unsigned int task,
+ struct notifier_block *nb);
+int bif_free_irq(struct bif_slave *slave, unsigned int task,
+ struct notifier_block *nb);
+
+int bif_trigger_task(struct bif_slave *slave, unsigned int task);
+int bif_task_is_busy(struct bif_slave *slave, unsigned int task);
+
+int bif_ctrl_count(void);
+struct bif_ctrl *bif_ctrl_get_by_id(unsigned int id);
+struct bif_ctrl *bif_ctrl_get(struct device *consumer_dev);
+void bif_ctrl_put(struct bif_ctrl *ctrl);
+
+int bif_ctrl_signal_battery_changed(struct bif_ctrl *ctrl);
+
+int bif_slave_match_count(const struct bif_ctrl *ctrl,
+ const struct bif_match_criteria *match_criteria);
+
+struct bif_slave *bif_slave_match_get(const struct bif_ctrl *ctrl,
+ unsigned int id, const struct bif_match_criteria *match_criteria);
+
+void bif_slave_put(struct bif_slave *slave);
+
+int bif_ctrl_notifier_register(struct bif_ctrl *ctrl,
+ struct notifier_block *nb);
+
+int bif_ctrl_notifier_unregister(struct bif_ctrl *ctrl,
+ struct notifier_block *nb);
+
+struct bif_ctrl *bif_get_ctrl_handle(struct bif_slave *slave);
+
+int bif_slave_find_function(struct bif_slave *slave, u8 function, u8 *version,
+ u16 *function_pointer);
+
+int bif_slave_read(struct bif_slave *slave, u16 addr, u8 *buf, int len);
+int bif_slave_write(struct bif_slave *slave, u16 addr, u8 *buf, int len);
+
+int bif_slave_is_present(struct bif_slave *slave);
+
+int bif_slave_is_selected(struct bif_slave *slave);
+int bif_slave_select(struct bif_slave *slave);
+
+int bif_ctrl_raw_transaction(struct bif_ctrl *ctrl, int transaction, u8 data);
+int bif_ctrl_raw_transaction_read(struct bif_ctrl *ctrl, int transaction,
+ u8 data, int *response);
+int bif_ctrl_raw_transaction_query(struct bif_ctrl *ctrl, int transaction,
+ u8 data, bool *query_response);
+
+void bif_ctrl_bus_lock(struct bif_ctrl *ctrl);
+void bif_ctrl_bus_unlock(struct bif_ctrl *ctrl);
+
+u16 bif_crc_ccitt(const u8 *buffer, unsigned int len);
+
+int bif_ctrl_measure_rid(struct bif_ctrl *ctrl);
+int bif_ctrl_get_bus_period(struct bif_ctrl *ctrl);
+int bif_ctrl_set_bus_period(struct bif_ctrl *ctrl, int period_ns);
+int bif_ctrl_get_bus_state(struct bif_ctrl *ctrl);
+int bif_ctrl_set_bus_state(struct bif_ctrl *ctrl, enum bif_bus_state state);
+
+#else
+
+static inline int bif_request_irq(struct bif_slave *slave, unsigned int task,
+ struct notifier_block *nb) { return -EPERM; }
+static inline int bif_free_irq(struct bif_slave *slave, unsigned int task,
+ struct notifier_block *nb) { return -EPERM; }
+
+static inline int bif_trigger_task(struct bif_slave *slave, unsigned int task)
+{ return -EPERM; }
+static inline int bif_task_is_busy(struct bif_slave *slave, unsigned int task)
+{ return -EPERM; }
+
+static inline int bif_ctrl_count(void) { return -EPERM; }
+static inline struct bif_ctrl *bif_ctrl_get_by_id(unsigned int id)
+{ return ERR_PTR(-EPERM); }
+struct bif_ctrl *bif_ctrl_get(struct device *consumer_dev)
+{ return ERR_PTR(-EPERM); }
+static inline void bif_ctrl_put(struct bif_ctrl *ctrl) { return; }
+
+int bif_ctrl_signal_battery_changed(struct bif_ctrl *ctrl) { return -EPERM; }
+
+static inline int bif_slave_match_count(const struct bif_ctrl *ctrl,
+ const struct bif_match_criteria *match_criteria)
+{ return -EPERM; }
+
+static inline struct bif_slave *bif_slave_match_get(const struct bif_ctrl *ctrl,
+ unsigned int id, const struct bif_match_criteria *match_criteria)
+{ return ERR_PTR(-EPERM); }
+
+static inline void bif_slave_put(struct bif_slave *slave) { return; }
+
+static inline int bif_ctrl_notifier_register(struct bif_ctrl *ctrl,
+ struct notifier_block *nb)
+{ return -EPERM; }
+
+static inline int bif_ctrl_notifier_unregister(struct bif_ctrl *ctrl,
+ struct notifier_block *nb)
+{ return -EPERM; }
+
+static inline struct bif_ctrl *bif_get_ctrl_handle(struct bif_slave *slave)
+{ return ERR_PTR(-EPERM); }
+
+static inline int bif_slave_find_function(struct bif_slave *slave, u8 function,
+ u8 *version, u16 *function_pointer)
+{ return -EPERM; }
+
+static inline int bif_slave_read(struct bif_slave *slave, u16 addr, u8 *buf,
+ int len)
+{ return -EPERM; }
+static inline int bif_slave_write(struct bif_slave *slave, u16 addr, u8 *buf,
+ int len)
+{ return -EPERM; }
+
+int bif_slave_is_present(struct bif_slave *slave) { return -EPERM; }
+
+int bif_slave_is_selected(struct bif_slave *slave) { return -EPERM; }
+int bif_slave_select(struct bif_slave *slave) { return -EPERM; }
+
+int bif_ctrl_raw_transaction(struct bif_ctrl *ctrl, int transaction, u8 data)
+{ return -EPERM; }
+int bif_ctrl_raw_transaction_read(struct bif_ctrl *ctrl, int transaction,
+ u8 data, int *response)
+{ return -EPERM; }
+int bif_ctrl_raw_transaction_query(struct bif_ctrl *ctrl, int transaction,
+ u8 data, bool *query_response)
+{ return -EPERM; }
+
+static inline void bif_ctrl_bus_lock(struct bif_ctrl *ctrl)
+{ return -EPERM; }
+static inline void bif_ctrl_bus_unlock(struct bif_ctrl *ctrl)
+{ return -EPERM; }
+
+static inline u16 bif_crc_ccitt(const u8 *buffer, unsigned int len)
+{ return 0; }
+
+static inline int bif_ctrl_measure_rid(struct bif_ctrl *ctrl) { return -EPERM; }
+int bif_ctrl_get_bus_period(struct bif_ctrl *ctrl) { return -EPERM; }
+int bif_ctrl_set_bus_period(struct bif_ctrl *ctrl, int period_ns)
+{ return -EPERM; }
+int bif_ctrl_get_bus_state(struct bif_ctrl *ctrl) { return -EPERM; }
+int bif_ctrl_set_bus_state(struct bif_ctrl *ctrl, enum bif_bus_state state)
+{ return -EPERM; }
+
+#endif
+
+#endif
diff --git a/include/linux/bif/driver.h b/include/linux/bif/driver.h
new file mode 100644
index 0000000..184d46f
--- /dev/null
+++ b/include/linux/bif/driver.h
@@ -0,0 +1,161 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_BIF_DRIVER_H_
+#define _LINUX_BIF_DRIVER_H_
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/bif/consumer.h>
+
+/**
+ * struct bif_ctrl_dev - opaque handle used to identify a given BIF controller
+ * device
+ */
+struct bif_ctrl_dev;
+
+/**
+ * struct bif_ctrl_ops - BIF operations which may be implemented by BIF
+ * controller drivers
+ * @bus_transaction: Perform the specified BIF transaction which does
+ * not result in any slave response.
+ * @bus_transaction_query: Perform the specified BIF transaction which
+ * expects a BQ response in the case of slave
+ * positive acknowledgement.
+ * @bus_transaction_read: Perform the specified BIF transaction which
+ * expects an RD or TACK response from the selected
+ * slave.
+ * @read_slave_registers: Perform all BIF transactions necessary to read
+ * the specified set of contiguous registers from
+ * the previously selected slave. This operation
+ * is used to optimize the common case of slave
+ * register reads since the a BIF controller driver
+ * can take advantage of BIF burst reads while the
+ * BIF core driver cannot due to the inherient
+ * tight timing requirements.
+ * @write_slave_registers: Perform all BIF transactions necessary to write
+ * the specified set of contiguous registers to
+ * the previously selected slave. This operation
+ * is used to optimize the common case of slave
+ * register writes since the a BIF controller
+ * driver can remove redundant steps when
+ * performing several WD commands in a row.
+ * @get_bus_period: Return the tau_bif BIF bus clock period in
+ * nanoseconds.
+ * @set_bus_period: Set the tau_bif BIF bus clock period in
+ * nanoseconds. If the exact period is not
+ * supported by the BIF controller hardware, then
+ * the next larger supported period should be used.
+ * @get_battery_presence: Return the current state of the battery pack.
+ * If a battery pack is present, then return >= 1.
+ * If a battery pack is not present, then return 0.
+ * If an error occurs during presence detection,
+ * then return errno.
+ * @get_battery_rid: Return the measured value of the Rid battery
+ * pack pull-down resistor in ohms.
+ * @get_bus_state: Return the current bus state as defined by one
+ * of the enum bif_bus_state values.
+ * @set_bus_state: Set the BIF bus state to the specified enum
+ * bif_bus_state value.
+ *
+ * The following operations must be defined by every BIF controller driver in
+ * order to ensure baseline functionality:
+ * bus_transaction, bus_transaction_query, get_bus_state, and set_bus_state.
+ *
+ * The BIF core driver is unaware of BIF transaction timing constraints. A
+ * given BIF controller driver must ensure that all timing constraints in the
+ * MIPI-BIF specification are met as transactions are carried out.
+ *
+ * Conversion between 11-bit and 17-bit BIF words (i.e. the insertion of BCF_n,
+ * parity bits, and the inversion bit) must be handled inside of the BIF
+ * controller driver (either in software or hardware). This guarantees maximum
+ * performance if hardware support is available.
+ *
+ * The bus_transaction_read operation must return -ETIMEDOUT in the case of no
+ * RD or TACK word received. This allows the transaction query, TQ, command
+ * to be used for slave selection verification.
+ *
+ * It is acceptable for the BIF bus state to be changed autonomously by a BIF
+ * controller driver in response to low level bus actions without a call to
+ * set_bus_state. One example is the case of receiving a slave interrupt
+ * while in interrupt state as this intrinsically causes the bus to enter the
+ * active communication state.
+ */
+struct bif_ctrl_ops {
+ int (*bus_transaction) (struct bif_ctrl_dev *bdev, int transaction,
+ u8 data);
+ int (*bus_transaction_query) (struct bif_ctrl_dev *bdev,
+ int transaction, u8 data,
+ bool *query_response);
+ int (*bus_transaction_read) (struct bif_ctrl_dev *bdev,
+ int transaction, u8 data,
+ int *response);
+ int (*read_slave_registers) (struct bif_ctrl_dev *bdev, u16 addr,
+ u8 *data, int len);
+ int (*write_slave_registers) (struct bif_ctrl_dev *bdev, u16 addr,
+ const u8 *data, int len);
+ int (*get_bus_period) (struct bif_ctrl_dev *bdev);
+ int (*set_bus_period) (struct bif_ctrl_dev *bdev, int period_ns);
+ int (*get_battery_presence) (struct bif_ctrl_dev *bdev);
+ int (*get_battery_rid) (struct bif_ctrl_dev *bdev);
+ int (*get_bus_state) (struct bif_ctrl_dev *bdev);
+ int (*set_bus_state) (struct bif_ctrl_dev *bdev, int state);
+};
+
+/**
+ * struct bif_ctrl_desc - BIF bus controller descriptor
+ * @name: Name used to identify the BIF controller
+ * @ops: BIF operations supported by the BIF controller
+ * @bus_clock_min_ns: Minimum tau_bif BIF bus clock period supported by the
+ * BIF controller
+ * @bus_clock_max_ns: Maximum tau_bif BIF bus clock period supported by the
+ * BIF controller
+ *
+ * Each BIF controller registered with the BIF core is described with a
+ * structure of this type.
+ */
+struct bif_ctrl_desc {
+ const char *name;
+ struct bif_ctrl_ops *ops;
+ int bus_clock_min_ns;
+ int bus_clock_max_ns;
+};
+
+#ifdef CONFIG_BIF
+
+struct bif_ctrl_dev *bif_ctrl_register(struct bif_ctrl_desc *bif_desc,
+ struct device *dev, void *driver_data, struct device_node *of_node);
+
+void bif_ctrl_unregister(struct bif_ctrl_dev *bdev);
+
+void *bdev_get_drvdata(struct bif_ctrl_dev *bdev);
+
+int bif_ctrl_notify_battery_changed(struct bif_ctrl_dev *bdev);
+int bif_ctrl_notify_slave_irq(struct bif_ctrl_dev *bdev);
+
+#else
+
+static inline struct bif_ctrl_dev *bif_ctrl_register(
+ struct bif_ctrl_desc *bif_desc, struct device *dev, void *driver_data,
+ struct device_node *of_node)
+{ return ERR_PTR(-EINVAL); }
+
+static inline void bif_ctrl_unregister(struct bif_ctrl_dev *bdev) { }
+
+static inline void *bdev_get_drvdata(struct bif_ctrl_dev *bdev) { return NULL; }
+
+int bif_ctrl_notify_slave_irq(struct bif_ctrl_dev *bdev) { return -EINVAL; }
+
+#endif
+
+#endif
diff --git a/include/linux/coresight-cti.h b/include/linux/coresight-cti.h
new file mode 100644
index 0000000..7f2da3f
--- /dev/null
+++ b/include/linux/coresight-cti.h
@@ -0,0 +1,59 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_CORESIGHT_CTI_H
+#define _LINUX_CORESIGHT_CTI_H
+
+struct coresight_cti_data {
+ int nr_ctis;
+ const char **names;
+};
+
+struct coresight_cti {
+ const char *name;
+ struct list_head link;
+};
+
+#ifdef CONFIG_CORESIGHT_CTI
+extern struct coresight_cti *coresight_cti_get(const char *name);
+extern void coresight_cti_put(struct coresight_cti *cti);
+extern int coresight_cti_map_trigin(
+ struct coresight_cti *cti, int trig, int ch);
+extern int coresight_cti_map_trigout(
+ struct coresight_cti *cti, int trig, int ch);
+extern void coresight_cti_unmap_trigin(
+ struct coresight_cti *cti, int trig, int ch);
+extern void coresight_cti_unmap_trigout(
+ struct coresight_cti *cti, int trig, int ch);
+#else
+static inline struct coresight_cti *coresight_cti_get(const char *name)
+{
+ return NULL;
+}
+static inline void coresight_cti_put(struct coresight_cti *cti) {}
+static inline int coresight_cti_map_trigin(
+ struct coresight_cti *cti, int trig, int ch)
+{
+ return -ENOSYS;
+}
+static inline int coresight_cti_map_trigout(
+ struct coresight_cti *cti, int trig, int ch)
+{
+ return -ENOSYS;
+}
+static inline void coresight_cti_unmap_trigin(
+ struct coresight_cti *cti, int trig, int ch) {}
+static inline void coresight_cti_unmap_trigout(
+ struct coresight_cti *cti, int trig, int ch) {}
+#endif
+
+#endif
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 6c26a3d..5ab7183 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -57,6 +57,7 @@
/* Idle State Flags */
#define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
+#define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
@@ -100,6 +101,12 @@
struct list_head device_list;
struct kobject kobj;
struct completion kobj_unregister;
+
+#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
+ int safe_state_index;
+ cpumask_t coupled_cpus;
+ struct cpuidle_coupled *coupled;
+#endif
};
DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
@@ -176,6 +183,10 @@
#endif
+#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
+void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a);
+#endif
+
/******************************
* CPUIDLE GOVERNOR INTERFACE *
******************************/
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
index 2f303e4..285b593 100644
--- a/include/linux/dma-contiguous.h
+++ b/include/linux/dma-contiguous.h
@@ -65,11 +65,37 @@
*/
#define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS)
-extern struct cma *dma_contiguous_default_area;
+extern struct cma *dma_contiguous_def_area;
void dma_contiguous_reserve(phys_addr_t addr_limit);
-int dma_declare_contiguous(struct device *dev, unsigned long size,
- phys_addr_t base, phys_addr_t limit);
+
+int dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t *res_base,
+ phys_addr_t limit);
+
+int dma_contiguous_add_device(struct device *dev, phys_addr_t base);
+
+/**
+ * dma_declare_contiguous() - reserve area for contiguous memory handling
+ * for particular device
+ * @dev: Pointer to device structure.
+ * @size: Size of the reserved memory.
+ * @base: Start address of the reserved memory (optional, 0 for any).
+ * @limit: End address of the reserved memory (optional, 0 for any).
+ *
+ * This function reserves memory for specified device. It should be
+ * called by board specific code when early allocator (memblock or bootmem)
+ * is still activate.
+ */
+
+static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size,
+ phys_addr_t base, phys_addr_t limit)
+{
+ int ret;
+ ret = dma_contiguous_reserve_area(size, &base, limit);
+ if (ret == 0)
+ ret = dma_contiguous_add_device(dev, base);
+ return ret;
+}
struct page *dma_alloc_from_contiguous(struct device *dev, int count,
unsigned int order);
@@ -83,7 +109,7 @@
static inline void dma_contiguous_reserve(phys_addr_t limit) { }
static inline
-int dma_declare_contiguous(struct device *dev, unsigned long size,
+int dma_declare_contiguous(struct device *dev, phys_addr_t size,
phys_addr_t base, phys_addr_t limit)
{
return -ENOSYS;
diff --git a/include/linux/i2c/atmel_mxt_ts.h b/include/linux/i2c/atmel_mxt_ts.h
index b96ba84..b903dfb 100644
--- a/include/linux/i2c/atmel_mxt_ts.h
+++ b/include/linux/i2c/atmel_mxt_ts.h
@@ -3,7 +3,7 @@
*
* Copyright (C) 2010 Samsung Electronics Co.Ltd
* Author: Joonyoung Shim <jy0922.shim@samsung.com>
- * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -32,8 +32,10 @@
/* Bootoader IDs */
#define MXT_BOOTLOADER_ID_224 0x0A
#define MXT_BOOTLOADER_ID_224E 0x06
+#define MXT_BOOTLOADER_ID_336S 0x1A
#define MXT_BOOTLOADER_ID_1386 0x01
#define MXT_BOOTLOADER_ID_1386E 0x10
+#define MXT_BOOTLOADER_ID_1664S 0x14
/* Config data for a given maXTouch controller with a specific firmware */
struct mxt_config_info {
@@ -75,6 +77,7 @@
int *key_codes;
bool need_calibration;
bool no_force_update;
+ u8 bl_addr;
u8(*read_chg) (void);
int (*init_hw) (bool);
diff --git a/include/linux/input/synaptics_dsx.h b/include/linux/input/synaptics_dsx.h
index b779e42..9d03787 100644
--- a/include/linux/input/synaptics_dsx.h
+++ b/include/linux/input/synaptics_dsx.h
@@ -5,6 +5,7 @@
*
* Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
* Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -47,6 +48,7 @@
bool x_flip;
bool y_flip;
bool regulator_en;
+ bool i2c_pull_up;
unsigned irq_gpio;
unsigned long irq_flags;
unsigned reset_gpio;
diff --git a/include/linux/leds-pm8xxx.h b/include/linux/leds-pm8xxx.h
index 1e672e3..e912585 100644
--- a/include/linux/leds-pm8xxx.h
+++ b/include/linux/leds-pm8xxx.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -18,6 +18,10 @@
#define PM8XXX_LEDS_DEV_NAME "pm8xxx-led"
+#define WLED_FIRST_STRING (1 << 2)
+#define WLED_SECOND_STRING (1 << 1)
+#define WLED_THIRD_STRING (1 << 0)
+
/**
* enum pm8xxx_leds - PMIC8XXX supported led ids
* @PM8XXX_ID_LED_KB_LIGHT - keyboard backlight led
@@ -77,7 +81,7 @@
/**
* wled_config_data - wled configuration data
- * @num_strings - number of wled strings supported
+ * @strings - strings supported
* @ovp_val - over voltage protection threshold
* @boost_curr_lim - boot current limit
* @cp_select - high pole capacitance
@@ -86,9 +90,10 @@
* @cs_out_en - current sink output enable
* @op_fdbck - selection of output as feedback for the boost
* @cabc_en - enable cabc for backlight pwm control
+ *
*/
struct wled_config_data {
- u8 num_strings;
+ u8 strings;
u8 ovp_val;
u8 boost_curr_lim;
u8 cp_select;
@@ -97,6 +102,11 @@
bool cs_out_en;
bool op_fdbck;
bool cabc_en;
+ bool sstart_en;
+ bool max_current_ind;
+ u8 max_three;
+ u8 max_two;
+ u8 max_one;
};
/**
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index c5b492b..e9051e1 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -91,7 +91,6 @@
unsigned int quirks2; /* More deviations from spec. */
#define SDHCI_QUIRK2_HOST_OFF_CARD_ON (1<<0)
-#define SDHCI_QUIRK2_OWN_CARD_DETECTION (1<<1)
int irq; /* Device IRQ */
void __iomem *ioaddr; /* Mapped address */
diff --git a/include/linux/of_coresight.h b/include/linux/of_coresight.h
index 6a5e4d4..0943dda 100644
--- a/include/linux/of_coresight.h
+++ b/include/linux/of_coresight.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -16,12 +16,19 @@
#ifdef CONFIG_OF
extern struct coresight_platform_data *of_get_coresight_platform_data(
struct device *dev, struct device_node *node);
+extern struct coresight_cti_data *of_get_coresight_cti_data(
+ struct device *dev, struct device_node *node);
#else
static inline struct coresight_platform_data *of_get_coresight_platform_data(
struct device *dev, struct device_node *node)
{
return NULL;
}
+static inline struct coresight_cti_data *of_get_coresight_cti_data(
+ struct device *dev, struct device_node *node)
+{
+ return NULL;
+}
#endif
#endif
diff --git a/drivers/platform/msm/ipa/a2_service.h b/include/linux/platform_data/qcom_ssm.h
similarity index 61%
rename from drivers/platform/msm/ipa/a2_service.h
rename to include/linux/platform_data/qcom_ssm.h
index 80885da..03ac67a 100644
--- a/drivers/platform/msm/ipa/a2_service.h
+++ b/include/linux/platform_data/qcom_ssm.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,15 +10,12 @@
* GNU General Public License for more details.
*/
-#ifndef _A2_SERVICE_H_
-#define _A2_SERVICE_H_
+#ifndef __QCOM_SSM_H_
+#define __QCOM_SSM_H_
-int a2_mux_initialize(void);
+struct ssm_platform_data {
+ bool need_key_exchg;
+ const char *channel_name;
+};
-int a2_mux_close(void);
-
-int a2_mux_open_port(int wwan_logical_channel_id, void *rx_cb,
- void *tx_complete_cb);
-
-#endif /* _A2_SERVICE_H_ */
-
+#endif /* __QCOM_SSM_H_ */
diff --git a/include/linux/qpnp/qpnp-adc.h b/include/linux/qpnp/qpnp-adc.h
index 1849cee..05d75ce 100644
--- a/include/linux/qpnp/qpnp-adc.h
+++ b/include/linux/qpnp/qpnp-adc.h
@@ -1203,20 +1203,15 @@
static inline int32_t qpnp_adc_scale_therm_pu1(int32_t adc_code,
const struct qpnp_adc_properties *adc_prop,
const struct qpnp_vadc_chan_properties *chan_prop,
- struct qpnp_vadc_result *chan_rslt);
+ struct qpnp_vadc_result *chan_rslt)
{ return -ENXIO; }
static inline int32_t qpnp_adc_scale_therm_pu2(int32_t adc_code,
const struct qpnp_adc_properties *adc_prop,
const struct qpnp_vadc_chan_properties *chan_prop,
- struct qpnp_vadc_result *chan_rslt);
+ struct qpnp_vadc_result *chan_rslt)
{ return -ENXIO; }
static inline int32_t qpnp_vadc_is_ready(void)
{ return -ENXIO; }
-static inline int32_t qpnp_adc_scale_default(int32_t adc_code,
- const struct qpnp_adc_properties *adc_prop,
- const struct qpnp_adc_chan_properties *chan_prop,
- struct qpnp_adc_chan_result *chan_rslt)
-{ return -ENXIO; }
static inline int32_t qpnp_get_vadc_gain_and_offset(
struct qpnp_vadc_linear_graph *param,
enum qpnp_adc_calib_type calib_type)
diff --git a/include/linux/regulator/krait-regulator.h b/include/linux/regulator/krait-regulator.h
index 836f9d6..b784531 100644
--- a/include/linux/regulator/krait-regulator.h
+++ b/include/linux/regulator/krait-regulator.h
@@ -13,7 +13,8 @@
#ifndef __KRAIT_REGULATOR_H__
#define __KRAIT_REGULATOR_H__
-#define KRAIT_REGULATOR_DRIVER_NAME "krait-power-regulator"
+#define KRAIT_REGULATOR_DRIVER_NAME "krait-power-regulator"
+#define KRAIT_PDN_DRIVER_NAME "krait-pdn"
/**
* krait_power_init - driver initialization function
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
index b0b718f..e249953 100644
--- a/include/linux/usb/msm_hsusb.h
+++ b/include/linux/usb/msm_hsusb.h
@@ -304,6 +304,7 @@
struct msm_otg_platform_data *pdata;
int irq;
int async_irq;
+ struct clk *xo_clk;
struct clk *clk;
struct clk *pclk;
struct clk *phy_reset_clk;
diff --git a/include/media/msmb_pproc.h b/include/media/msmb_pproc.h
index b003f99..6bac1d6 100644
--- a/include/media/msmb_pproc.h
+++ b/include/media/msmb_pproc.h
@@ -85,6 +85,8 @@
uint32_t *cpp_cmd_msg;
int src_fd;
int dst_fd;
+ struct ion_handle *src_ion_handle;
+ struct ion_handle *dest_ion_handle;
};
struct msm_ver_num_info {
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index f5dfe0c..08b5ae7 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -857,6 +857,7 @@
* correctness of the format string and va_list arguments.
* - 'K' For a kernel pointer that should be hidden from unprivileged users
* - 'NF' For a netdev_features_t
+ * - 'a' For a phys_addr_t type and its derivative types (passed by reference)
*
* Note: The difference between 'S' and 'F' is that on ia64 and ppc64
* function pointers are really function descriptors, which contain a
@@ -941,6 +942,12 @@
return netdev_feature_string(buf, end, ptr, spec);
}
break;
+ case 'a':
+ spec.flags |= SPECIAL | SMALL | ZEROPAD;
+ spec.field_width = sizeof(phys_addr_t) * 2 + 2;
+ spec.base = 16;
+ return number(buf, end,
+ (unsigned long long) *((phys_addr_t *)ptr), spec);
}
spec.flags |= SMALL;
if (spec.field_width == -1) {
diff --git a/scripts/build-all.py b/scripts/build-all.py
index f5048e0..4789af7 100755
--- a/scripts/build-all.py
+++ b/scripts/build-all.py
@@ -1,6 +1,6 @@
#! /usr/bin/env python
-# Copyright (c) 2009-2011, The Linux Foundation. All rights reserved.
+# Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
@@ -88,7 +88,6 @@
r'[fm]sm[0-9]*_defconfig',
r'apq*_defconfig',
r'qsd*_defconfig',
- r'omap2*_defconfig',
)
for p in arch_pats:
for n in glob.glob('arch/arm/configs/' + p):
diff --git a/sound/soc/codecs/msm8x10-wcd.c b/sound/soc/codecs/msm8x10-wcd.c
index 4bcea07..c8647fb1 100644
--- a/sound/soc/codecs/msm8x10-wcd.c
+++ b/sound/soc/codecs/msm8x10-wcd.c
@@ -50,9 +50,9 @@
#define MSM8X10_WCD_I2S_MASTER_MODE_MASK 0x08
#define MSM8X10_DINO_CODEC_BASE_ADDR 0xFE043000
-#define MAX_MSM8X10_WCD_DEVICE 2
+#define MAX_MSM8X10_WCD_DEVICE 4
#define CODEC_DT_MAX_PROP_SIZE 40
-#define MSM8X10_WCD_I2C_GSBI_SLAVE_ID "2-000d"
+#define MSM8X10_WCD_I2C_GSBI_SLAVE_ID "1-000d"
enum {
MSM8X10_WCD_I2C_TOP_LEVEL = 0,
@@ -88,6 +88,7 @@
IIR2,
IIR_MAX,
};
+
/* Codec supports 5 bands */
enum {
BAND1 = 0,
@@ -119,7 +120,6 @@
struct wcd9xxx_mbhc mbhc;
};
-
static unsigned short rx_digital_gain_reg[] = {
MSM8X10_WCD_A_CDC_RX1_VOL_CTL_B2_CTL,
MSM8X10_WCD_A_CDC_RX2_VOL_CTL_B2_CTL,
@@ -171,7 +171,7 @@
return rtn;
}
-static int msm8x10_wcd_abh_write_device(u16 reg, u8 *value, u32 bytes)
+static int msm8x10_wcd_abh_write_device(u16 reg, unsigned int *value, u32 bytes)
{
u32 temp = ((u32)(*value)) & 0x000000FF;
u32 offset = (((u32)(reg)) ^ 0x00000400) & 0x00000FFF;
@@ -179,10 +179,10 @@
return 0;
}
-static int msm8x10_wcd_abh_read_device(u16 reg, u32 bytes, u8 *value)
+static int msm8x10_wcd_abh_read_device(u16 reg, u32 bytes, unsigned int *value)
{
u32 offset = (((u32)(reg)) ^ 0x00000400) & 0x00000FFF;
- *value = (u8)ioread32(ioremap(MSM8X10_DINO_CODEC_BASE_ADDR +
+ *value = ioread32(ioremap(MSM8X10_DINO_CODEC_BASE_ADDR +
offset, 4));
return 0;
}
@@ -194,10 +194,10 @@
int ret;
u8 reg_addr = 0;
u8 data[bytes + 1];
- struct msm8x10_wcd_i2c *msm8x10_wcd;
+ struct msm8x10_wcd_i2c *msm8x10_wcd = NULL;
ret = get_i2c_msm8x10_wcd_device_info(reg, &msm8x10_wcd);
- if (!ret) {
+ if (ret) {
pr_err("%s: Invalid register address\n", __func__);
return ret;
}
@@ -219,7 +219,7 @@
/* Try again if the write fails */
if (ret != 1) {
ret = i2c_transfer(msm8x10_wcd->client->adapter,
- msm8x10_wcd->xfer_msg, 1);
+ msm8x10_wcd->xfer_msg, 1);
if (ret != 1) {
pr_err("failed to write the device\n");
return ret;
@@ -235,11 +235,11 @@
struct i2c_msg *msg;
int ret = 0;
u8 reg_addr = 0;
- struct msm8x10_wcd_i2c *msm8x10_wcd;
+ struct msm8x10_wcd_i2c *msm8x10_wcd = NULL;
u8 i = 0;
ret = get_i2c_msm8x10_wcd_device_info(reg, &msm8x10_wcd);
- if (!ret) {
+ if (ret) {
pr_err("%s: Invalid register address\n", __func__);
return ret;
}
@@ -256,7 +256,6 @@
msg->len = 1;
msg->flags = 0;
msg->buf = ®_addr;
-
msg = &msm8x10_wcd->xfer_msg[1];
msg->addr = msm8x10_wcd->client->addr;
msg->len = 1;
@@ -275,38 +274,45 @@
}
}
}
+ pr_debug("%s: Reg 0x%x = 0x%x\n", __func__, reg, *dest);
return 0;
}
-static int msm8x10_wcd_reg_read(struct msm8x10_wcd *msm8x10_wcd, u16 reg)
+int msm8x10_wcd_i2c_read(unsigned short reg, int bytes, void *dest)
{
- u8 val;
+ return msm8x10_wcd_i2c_read_device(reg, bytes, dest);
+}
+
+int msm8x10_wcd_i2c_write(unsigned short reg, int bytes, void *src)
+{
+ return msm8x10_wcd_i2c_write_device(reg, src, bytes);
+}
+
+static int msm8x10_wcd_reg_read(struct msm8x10_wcd *msm8x10_wcd,
+ u16 reg, unsigned int *val)
+{
int ret = -EINVAL;
/* check if use I2C interface for Helicon or AHB for Dino */
mutex_lock(&msm8x10_wcd->io_lock);
if (MSM8X10_WCD_IS_HELICON_REG(reg))
- ret = msm8x10_wcd_i2c_read_device(reg, 1, &val);
+ ret = msm8x10_wcd_i2c_read(reg, 1, val);
else if (MSM8X10_WCD_IS_DINO_REG(reg))
- ret = msm8x10_wcd_abh_read_device(reg, 1, &val);
+ ret = msm8x10_wcd_abh_read_device(reg, 1, val);
mutex_unlock(&msm8x10_wcd->io_lock);
-
- if (ret < 0)
- return ret;
- else
- return val;
+ return ret;
}
static int msm8x10_wcd_reg_write(struct msm8x10_wcd *msm8x10_wcd, u16 reg,
- u8 val)
+ unsigned int val)
{
int ret = -EINVAL;
/* check if use I2C interface for Helicon or AHB for Dino */
mutex_lock(&msm8x10_wcd->io_lock);
if (MSM8X10_WCD_IS_HELICON_REG(reg))
- ret = msm8x10_wcd_i2c_write_device(reg, &val, 1);
+ ret = msm8x10_wcd_i2c_write(reg, 1, &val);
else if (MSM8X10_WCD_IS_DINO_REG(reg))
ret = msm8x10_wcd_abh_write_device(reg, &val, 1);
mutex_unlock(&msm8x10_wcd->io_lock);
@@ -331,12 +337,13 @@
return rtn;
}
-static int msm8x10_wcd_volatile(struct snd_soc_codec *ssc, unsigned int reg)
+static int msm8x10_wcd_volatile(struct snd_soc_codec *codec, unsigned int reg)
{
/*
* Registers lower than 0x100 are top level registers which can be
* written by the Taiko core driver.
*/
+ dev_dbg(codec->dev, "%s: reg 0x%x\n", __func__, reg);
if ((reg >= MSM8X10_WCD_A_CDC_MBHC_EN_CTL) || (reg < 0x100))
return 1;
@@ -373,7 +380,7 @@
unsigned int value)
{
int ret;
-
+ dev_dbg(codec->dev, "%s: Write from reg 0x%x\n", __func__, reg);
if (reg == SND_SOC_NOPM)
return 0;
@@ -395,6 +402,7 @@
unsigned int val;
int ret;
+ dev_dbg(codec->dev, "%s: Read from reg 0x%x\n", __func__, reg);
if (reg == SND_SOC_NOPM)
return 0;
@@ -411,7 +419,7 @@
reg, ret);
}
- val = msm8x10_wcd_reg_read(codec->control_data, reg);
+ ret = msm8x10_wcd_reg_read(codec->control_data, reg, &val);
return val;
}
@@ -431,7 +439,7 @@
if (!regnode) {
dev_err(dev, "Looking up %s property in node %s failed",
- prop_name, dev->of_node->full_name);
+ prop_name, dev->of_node->full_name);
return -ENODEV;
}
vreg->name = vreg_name;
@@ -442,7 +450,7 @@
if (!prop || (len != (2 * sizeof(__be32)))) {
dev_err(dev, "%s %s property\n",
- prop ? "invalid format" : "no", prop_name);
+ prop ? "invalid format" : "no", prop_name);
return -ENODEV;
} else {
vreg->min_uV = be32_to_cpup(&prop[0]);
@@ -450,18 +458,18 @@
}
snprintf(prop_name, CODEC_DT_MAX_PROP_SIZE,
- "qcom,%s-current", vreg_name);
+ "qcom,%s-current", vreg_name);
ret = of_property_read_u32(dev->of_node, prop_name, &prop_val);
if (ret) {
dev_err(dev, "Looking up %s property in node %s failed",
- prop_name, dev->of_node->full_name);
+ prop_name, dev->of_node->full_name);
return -ENODEV;
}
vreg->optimum_uA = prop_val;
dev_info(dev, "%s: vol=[%d %d]uV, curr=[%d]uA\n", vreg->name,
- vreg->min_uV, vreg->max_uV, vreg->optimum_uA);
+ vreg->min_uV, vreg->max_uV, vreg->optimum_uA);
return 0;
}
@@ -473,7 +481,7 @@
u32 prop_val;
snprintf(prop_name, CODEC_DT_MAX_PROP_SIZE,
- "qcom,cdc-micbias-ldoh-v");
+ "qcom,cdc-micbias-ldoh-v");
ret = of_property_read_u32(dev->of_node, prop_name, &prop_val);
if (ret) {
dev_err(dev, "Looking up %s property in node %s failed",
@@ -483,7 +491,7 @@
micbias->ldoh_v = (u8)prop_val;
snprintf(prop_name, CODEC_DT_MAX_PROP_SIZE,
- "qcom,cdc-micbias-cfilt1-mv");
+ "qcom,cdc-micbias-cfilt1-mv");
ret = of_property_read_u32(dev->of_node, prop_name,
&micbias->cfilt1_mv);
if (ret) {
@@ -493,7 +501,7 @@
}
snprintf(prop_name, CODEC_DT_MAX_PROP_SIZE,
- "qcom,cdc-micbias1-cfilt-sel");
+ "qcom,cdc-micbias1-cfilt-sel");
ret = of_property_read_u32(dev->of_node, prop_name, &prop_val);
if (ret) {
dev_err(dev, "Looking up %s property in node %s failed",
@@ -508,7 +516,7 @@
MICBIAS_EXT_BYP_CAP : MICBIAS_NO_EXT_BYP_CAP);
dev_dbg(dev, "ldoh_v %u cfilt1_mv %u\n",
- (u32)micbias->ldoh_v, (u32)micbias->cfilt1_mv);
+ (u32)micbias->ldoh_v, (u32)micbias->cfilt1_mv);
dev_dbg(dev, "bias1_cfilt_sel %u\n", (u32)micbias->bias1_cfilt_sel);
dev_dbg(dev, "bias1_ext_cap %d\n", micbias->bias1_cap_mode);
@@ -533,13 +541,14 @@
num_of_supplies = ARRAY_SIZE(msm8x10_wcd_supplies);
} else {
dev_err(dev, "%s unsupported device %s\n",
- __func__, dev_name(dev));
+ __func__, dev_name(dev));
goto err;
}
if (num_of_supplies > ARRAY_SIZE(pdata->regulator)) {
dev_err(dev, "%s: Num of supplies %u > max supported %u\n",
- __func__, num_of_supplies, ARRAY_SIZE(pdata->regulator));
+ __func__, num_of_supplies,
+ ARRAY_SIZE(pdata->regulator));
goto err;
}
@@ -574,8 +583,8 @@
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
+ dev_dbg(codec->dev, "%s: event = %d\n", __func__, event);
- pr_debug("%s %d\n", __func__, event);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
/* Enable charge pump clock*/
@@ -623,13 +632,11 @@
} else if (ear_pa_gain == 0x04) {
ucontrol->value.integer.value[0] = 1;
} else {
- pr_err("%s: ERROR: Unsupported Ear Gain = 0x%x\n",
- __func__, ear_pa_gain);
+ dev_err(codec->dev, "%s: ERROR: Unsupported Ear Gain = 0x%x\n",
+ __func__, ear_pa_gain);
return -EINVAL;
}
-
- pr_debug("%s: ear_pa_gain = 0x%x\n", __func__, ear_pa_gain);
-
+ dev_dbg(codec->dev, "%s: ear_pa_gain = 0x%x\n", __func__, ear_pa_gain);
return 0;
}
@@ -639,8 +646,8 @@
u8 ear_pa_gain;
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
- pr_debug("%s: ucontrol->value.integer.value[0] = %ld\n",
- __func__, ucontrol->value.integer.value[0]);
+ dev_dbg(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
switch (ucontrol->value.integer.value[0]) {
case 0:
@@ -673,7 +680,7 @@
(MSM8X10_WCD_A_CDC_IIR1_CTL + 64 * iir_idx)) &
(1 << band_idx);
- pr_debug("%s: IIR #%d band #%d enable %d\n", __func__,
+ dev_dbg(codec->dev, "%s: IIR #%d band #%d enable %d\n", __func__,
iir_idx, band_idx,
(uint32_t)ucontrol->value.integer.value[0]);
return 0;
@@ -692,15 +699,15 @@
/* Mask first 5 bits, 6-8 are reserved */
snd_soc_update_bits(codec, (MSM8X10_WCD_A_CDC_IIR1_CTL + 64 * iir_idx),
- (1 << band_idx), (value << band_idx));
+ (1 << band_idx), (value << band_idx));
- pr_debug("%s: IIR #%d band #%d enable %d\n", __func__,
+ dev_dbg(codec->dev, "%s: IIR #%d band #%d enable %d\n", __func__,
iir_idx, band_idx, value);
return 0;
}
static uint32_t get_iir_band_coeff(struct snd_soc_codec *codec,
- int iir_idx, int band_idx,
- int coeff_idx)
+ int iir_idx, int band_idx,
+ int coeff_idx)
{
/* Address does not automatically update if reading */
snd_soc_write(codec,
@@ -734,7 +741,7 @@
ucontrol->value.integer.value[4] =
get_iir_band_coeff(codec, iir_idx, band_idx, 4);
- pr_debug("%s: IIR #%d band #%d b0 = 0x%x\n"
+ dev_dbg(codec->dev, "%s: IIR #%d band #%d b0 = 0x%x\n"
"%s: IIR #%d band #%d b1 = 0x%x\n"
"%s: IIR #%d band #%d b2 = 0x%x\n"
"%s: IIR #%d band #%d a1 = 0x%x\n"
@@ -780,17 +787,17 @@
kcontrol->private_value)->shift;
set_iir_band_coeff(codec, iir_idx, band_idx, 0,
- ucontrol->value.integer.value[0]);
+ ucontrol->value.integer.value[0]);
set_iir_band_coeff(codec, iir_idx, band_idx, 1,
- ucontrol->value.integer.value[1]);
+ ucontrol->value.integer.value[1]);
set_iir_band_coeff(codec, iir_idx, band_idx, 2,
- ucontrol->value.integer.value[2]);
+ ucontrol->value.integer.value[2]);
set_iir_band_coeff(codec, iir_idx, band_idx, 3,
- ucontrol->value.integer.value[3]);
+ ucontrol->value.integer.value[3]);
set_iir_band_coeff(codec, iir_idx, band_idx, 4,
- ucontrol->value.integer.value[4]);
+ ucontrol->value.integer.value[4]);
- pr_debug("%s: IIR #%d band #%d b0 = 0x%x\n"
+ dev_dbg(codec->dev, "%s: IIR #%d band #%d b0 = 0x%x\n"
"%s: IIR #%d band #%d b1 = 0x%x\n"
"%s: IIR #%d band #%d b2 = 0x%x\n"
"%s: IIR #%d band #%d a1 = 0x%x\n"
@@ -970,7 +977,6 @@
"ZERO", "ADC1", "ADC2", "DMIC1", "DMIC2"
};
-
static const char * const anc_mux_text[] = {
"ZERO", "ADC1", "ADC2", "ADC3", "ADC4", "ADC5", "ADC6", "ADC_MB",
"RSVD_1", "DMIC1", "DMIC2", "DMIC3", "DMIC4", "DMIC5", "DMIC6"
@@ -1076,14 +1082,16 @@
dec_name = strsep(&widget_name, " ");
widget_name = temp;
if (!dec_name) {
- pr_err("%s: Invalid decimator = %s\n", __func__, w->name);
+ dev_err(codec->dev, "%s: Invalid decimator = %s\n",
+ __func__, w->name);
ret = -EINVAL;
goto out;
}
ret = kstrtouint(strpbrk(dec_name, "12"), 10, &decimator);
if (ret < 0) {
- pr_err("%s: Invalid decimator = %s\n", __func__, dec_name);
+ dev_err(codec->dev, "%s: Invalid decimator = %s\n",
+ __func__, dec_name);
ret = -EINVAL;
goto out;
}
@@ -1100,7 +1108,8 @@
adc_dmic_sel = 0x0;
break;
default:
- pr_err("%s: Invalid Decimator = %u\n", __func__, decimator);
+ dev_err(codec->dev, "%s: Invalid Decimator = %u\n",
+ __func__, decimator);
ret = -EINVAL;
goto out;
}
@@ -1204,18 +1213,18 @@
static void msm8x10_wcd_codec_enable_adc_block(struct snd_soc_codec *codec,
int enable)
{
- struct msm8x10_wcd_priv *taiko = snd_soc_codec_get_drvdata(codec);
+ struct msm8x10_wcd_priv *wcd8x10 = snd_soc_codec_get_drvdata(codec);
- pr_debug("%s %d\n", __func__, enable);
+ dev_dbg(codec->dev, "%s %d\n", __func__, enable);
if (enable) {
- taiko->adc_count++;
+ wcd8x10->adc_count++;
snd_soc_update_bits(codec,
MSM8X10_WCD_A_CDC_ANA_CLK_CTL,
0x20, 0x20);
} else {
- taiko->adc_count--;
- if (!taiko->adc_count)
+ wcd8x10->adc_count--;
+ if (!wcd8x10->adc_count)
snd_soc_update_bits(codec,
MSM8X10_WCD_A_CDC_ANA_CLK_CTL,
0x20, 0x0);
@@ -1229,7 +1238,7 @@
u16 adc_reg;
u8 init_bit_shift;
- pr_debug("%s %d\n", __func__, event);
+ dev_dbg(codec->dev, "%s %d\n", __func__, event);
adc_reg = MSM8X10_WCD_A_TX_1_2_TEST_CTL;
if (w->reg == MSM8X10_WCD_A_TX_1_EN)
@@ -1237,7 +1246,8 @@
else if (adc_reg == MSM8X10_WCD_A_TX_2_EN)
init_bit_shift = 6;
else {
- pr_err("%s: Error, invalid adc register\n", __func__);
+ dev_err(codec->dev, "%s: Error, invalid adc register\n",
+ __func__);
return -EINVAL;
}
@@ -1263,14 +1273,15 @@
struct snd_soc_codec *codec = w->codec;
u16 lineout_gain_reg;
- pr_debug("%s %d %s\n", __func__, event, w->name);
+ dev_dbg(codec->dev, "%s %d %s\n", __func__, event, w->name);
switch (w->shift) {
case 0:
lineout_gain_reg = MSM8X10_WCD_A_RX_LINE_1_GAIN;
break;
default:
- pr_err("%s: Error, incorrect lineout register value\n",
+ dev_err(codec->dev,
+ "%s: Error, incorrect lineout register value\n",
__func__);
return -EINVAL;
}
@@ -1280,8 +1291,8 @@
snd_soc_update_bits(codec, lineout_gain_reg, 0x40, 0x40);
break;
case SND_SOC_DAPM_POST_PMU:
- pr_debug("%s: sleeping 16 ms after %s PA turn on\n",
- __func__, w->name);
+ dev_dbg(codec->dev, "%s: sleeping 16 ms after %s PA turn on\n",
+ __func__, w->name);
usleep_range(16000, 16100);
break;
case SND_SOC_DAPM_POST_PMD:
@@ -1294,7 +1305,7 @@
static int msm8x10_wcd_codec_enable_spk_pa(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- pr_debug("%s %d %s\n", __func__, event, w->name);
+ dev_dbg(w->codec->dev, "%s %d %s\n", __func__, event, w->name);
return 0;
}
@@ -1311,7 +1322,8 @@
ret = kstrtouint(strpbrk(w->name, "12"), 10, &dmic);
if (ret < 0) {
- pr_err("%s: Invalid DMIC line on the codec\n", __func__);
+ dev_err(codec->dev,
+ "%s: Invalid DMIC line on the codec\n", __func__);
return -EINVAL;
}
@@ -1321,11 +1333,12 @@
dmic_clk_en = 0x01;
dmic_clk_cnt = &(msm8x10_wcd->dmic_1_2_clk_cnt);
dmic_clk_reg = MSM8X10_WCD_A_CDC_CLK_DMIC_B1_CTL;
- pr_debug("%s() event %d DMIC%d dmic_1_2_clk_cnt %d\n",
+ dev_dbg(codec->dev,
+ "%s() event %d DMIC%d dmic_1_2_clk_cnt %d\n",
__func__, event, dmic, *dmic_clk_cnt);
break;
default:
- pr_err("%s: Invalid DMIC Selection\n", __func__);
+ dev_err(codec->dev, "%s: Invalid DMIC Selection\n", __func__);
return -EINVAL;
}
@@ -1360,7 +1373,7 @@
char *internal3_text = "Internal3";
enum wcd9xxx_notify_event e_post_off, e_pre_on, e_post_on;
- pr_debug("%s %d\n", __func__, event);
+ dev_dbg(codec->dev, "%s %d\n", __func__, event);
switch (w->reg) {
case MSM8X10_WCD_A_MICB_1_CTL:
micb_int_reg = MSM8X10_WCD_A_MICB_1_INT_RBIAS;
@@ -1371,7 +1384,8 @@
e_post_off = WCD9XXX_EVENT_POST_MICBIAS_1_OFF;
break;
default:
- pr_err("%s: Error, invalid micbias register\n", __func__);
+ dev_err(codec->dev,
+ "%s: Error, invalid micbias register\n", __func__);
return -EINVAL;
}
@@ -1432,7 +1446,7 @@
u8 dec_hpf_cut_of_freq;
int offset;
- pr_debug("%s %d\n", __func__, event);
+ dev_dbg(codec->dev, "%s %d\n", __func__, event);
widget_name = kstrndup(w->name, 15, GFP_KERNEL);
if (!widget_name)
@@ -1442,26 +1456,29 @@
dec_name = strsep(&widget_name, " ");
widget_name = temp;
if (!dec_name) {
- pr_err("%s: Invalid decimator = %s\n", __func__, w->name);
+ dev_err(codec->dev,
+ "%s: Invalid decimator = %s\n", __func__, w->name);
ret = -EINVAL;
goto out;
}
ret = kstrtouint(strpbrk(dec_name, "12"), 10, &decimator);
if (ret < 0) {
- pr_err("%s: Invalid decimator = %s\n", __func__, dec_name);
+ dev_err(codec->dev,
+ "%s: Invalid decimator = %s\n", __func__, dec_name);
ret = -EINVAL;
goto out;
}
- pr_debug("%s(): widget = %s dec_name = %s decimator = %u\n", __func__,
- w->name, dec_name, decimator);
+ dev_dbg(codec->dev,
+ "%s(): widget = %s dec_name = %s decimator = %u\n", __func__,
+ w->name, dec_name, decimator);
if (w->reg == MSM8X10_WCD_A_CDC_CLK_TX_CLK_EN_B1_CTL) {
dec_reset_reg = MSM8X10_WCD_A_CDC_CLK_TX_RESET_B1_CTL;
offset = 0;
} else {
- pr_err("%s: Error, incorrect dec\n", __func__);
+ dev_err(codec->dev, "%s: Error, incorrect dec\n", __func__);
ret = -EINVAL;
goto out;
}
@@ -1531,11 +1548,12 @@
}
static int msm8x10_wcd_codec_enable_interpolator(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
+ struct snd_kcontrol *kcontrol,
+ int event)
{
struct snd_soc_codec *codec = w->codec;
- pr_debug("%s %d %s\n", __func__, event, w->name);
+ dev_dbg(codec->dev, "%s %d %s\n", __func__, event, w->name);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
@@ -1565,7 +1583,7 @@
struct snd_soc_codec *codec = w->codec;
struct msm8x10_wcd_priv *msm8x10_wcd = snd_soc_codec_get_drvdata(codec);
- pr_debug("%s %d\n", __func__, event);
+ dev_dbg(codec->dev, "%s %d\n", __func__, event);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
@@ -1583,7 +1601,7 @@
{
struct snd_soc_codec *codec = w->codec;
- pr_debug("%s %s %d\n", __func__, w->name, event);
+ dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
@@ -1603,7 +1621,7 @@
struct msm8x10_wcd_priv *msm8x10_wcd = snd_soc_codec_get_drvdata(codec);
enum wcd9xxx_notify_event e_pre_on, e_post_off;
- pr_debug("%s: %s event = %d\n", __func__, w->name, event);
+ dev_dbg(codec->dev, "%s: %s event = %d\n", __func__, w->name, event);
if (w->shift == 5) {
e_pre_on = WCD9XXX_EVENT_PRE_HPHR_PA_ON;
e_post_off = WCD9XXX_EVENT_POST_HPHR_PA_OFF;
@@ -1611,7 +1629,8 @@
e_pre_on = WCD9XXX_EVENT_PRE_HPHL_PA_ON;
e_post_off = WCD9XXX_EVENT_POST_HPHL_PA_OFF;
} else {
- pr_err("%s: Invalid w->shift %d\n", __func__, w->shift);
+ dev_err(codec->dev,
+ "%s: Invalid w->shift %d\n", __func__, w->shift);
return -EINVAL;
}
@@ -1635,8 +1654,9 @@
* would have been locked while snd_soc_jack_report also
* attempts to acquire same lock.
*/
- pr_debug("%s: sleep 10 ms after %s PA disable.\n", __func__,
- w->name);
+ dev_dbg(codec->dev,
+ "%s: sleep 10 ms after %s PA disable.\n", __func__,
+ w->name);
usleep_range(10000, 10100);
break;
}
@@ -1648,7 +1668,7 @@
{
struct snd_soc_codec *codec = w->codec;
- pr_debug("%s %s %d\n", __func__, w->name, event);
+ dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
@@ -1665,7 +1685,7 @@
static int msm8x10_wcd_spk_dac_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- pr_debug("%s %s %d\n", __func__, w->name, event);
+ dev_dbg(w->codec->dev, "%s %s %d\n", __func__, w->name, event);
return 0;
}
@@ -1812,14 +1832,14 @@
{"MIC BIAS1 External", NULL, "LDO_H"},
};
-
static int msm8x10_wcd_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct msm8x10_wcd *msm8x10_wcd_core =
dev_get_drvdata(dai->codec->dev);
- pr_debug("%s(): substream = %s stream = %d\n" , __func__,
- substream->name, substream->stream);
+ dev_dbg(dai->codec->dev, "%s(): substream = %s stream = %d\n",
+ __func__,
+ substream->name, substream->stream);
if ((msm8x10_wcd_core != NULL) &&
(msm8x10_wcd_core->dev != NULL))
pm_runtime_get_sync(msm8x10_wcd_core->dev);
@@ -1832,8 +1852,9 @@
{
struct msm8x10_wcd *msm8x10_wcd_core =
dev_get_drvdata(dai->codec->dev);
- pr_debug("%s(): substream = %s stream = %d\n" , __func__,
- substream->name, substream->stream);
+ dev_dbg(dai->codec->dev,
+ "%s(): substream = %s stream = %d\n" , __func__,
+ substream->name, substream->stream);
if ((msm8x10_wcd_core != NULL) &&
(msm8x10_wcd_core->dev != NULL)) {
pm_runtime_mark_last_busy(msm8x10_wcd_core->dev);
@@ -1846,9 +1867,9 @@
{
struct msm8x10_wcd_priv *msm8x10_wcd = snd_soc_codec_get_drvdata(codec);
- pr_debug("%s: mclk_enable = %u, dapm = %d\n", __func__, mclk_enable,
- dapm);
-
+ dev_dbg(codec->dev,
+ "%s: mclk_enable = %u, dapm = %d\n", __func__,
+ mclk_enable, dapm);
WCD9XXX_BCL_LOCK(&msm8x10_wcd->resmgr);
if (mclk_enable) {
wcd9xxx_resmgr_get_bandgap(&msm8x10_wcd->resmgr,
@@ -1870,13 +1891,13 @@
static int msm8x10_wcd_set_dai_sysclk(struct snd_soc_dai *dai,
int clk_id, unsigned int freq, int dir)
{
- pr_debug("%s\n", __func__);
+ dev_dbg(dai->codec->dev, "%s\n", __func__);
return 0;
}
static int msm8x10_wcd_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
- pr_debug("%s\n", __func__);
+ dev_dbg(dai->codec->dev, "%s\n", __func__);
return 0;
}
@@ -1885,7 +1906,7 @@
unsigned int rx_num, unsigned int *rx_slot)
{
- pr_debug("%s\n", __func__);
+ dev_dbg(dai->codec->dev, "%s\n", __func__);
return 0;
}
@@ -1894,7 +1915,7 @@
unsigned int *rx_num, unsigned int *rx_slot)
{
- pr_debug("%s\n", __func__);
+ dev_dbg(dai->codec->dev, "%s\n", __func__);
return 0;
}
@@ -1917,7 +1938,8 @@
u8 tx_fs_rate, rx_fs_rate;
int ret;
- pr_debug("%s: dai_name = %s DAI-ID %x rate %d num_ch %d\n", __func__,
+ dev_dbg(dai->codec->dev,
+ "%s: dai_name = %s DAI-ID %x rate %d num_ch %d\n", __func__,
dai->name, dai->id, params_rate(params),
params_channels(params));
@@ -1947,7 +1969,8 @@
rx_fs_rate = 0xA0;
break;
default:
- pr_err("%s: Invalid sampling rate %d\n", __func__,
+ dev_err(dai->codec->dev,
+ "%s: Invalid sampling rate %d\n", __func__,
params_rate(params));
return -EINVAL;
}
@@ -1957,7 +1980,8 @@
ret = msm8x10_wcd_set_decimator_rate(dai, tx_fs_rate,
params_rate(params));
if (ret < 0) {
- pr_err("%s: set decimator rate failed %d\n", __func__,
+ dev_err(dai->codec->dev,
+ "%s: set decimator rate failed %d\n", __func__,
ret);
return ret;
}
@@ -1966,13 +1990,15 @@
ret = msm8x10_wcd_set_interpolator_rate(dai, rx_fs_rate,
params_rate(params));
if (ret < 0) {
- pr_err("%s: set decimator rate failed %d\n", __func__,
+ dev_err(dai->codec->dev,
+ "%s: set decimator rate failed %d\n", __func__,
ret);
return ret;
}
break;
default:
- pr_err("%s: Invalid stream type %d\n", __func__,
+ dev_err(dai->codec->dev,
+ "%s: Invalid stream type %d\n", __func__,
substream->stream);
return -EINVAL;
}
@@ -2026,13 +2052,15 @@
{
switch (event) {
case SND_SOC_DAPM_POST_PMU:
- pr_debug("%s: Sleeping 20ms after enabling EAR PA\n",
- __func__);
+ dev_dbg(w->codec->dev,
+ "%s: Sleeping 20ms after enabling EAR PA\n",
+ __func__);
msleep(20);
break;
case SND_SOC_DAPM_POST_PMD:
- pr_debug("%s: Sleeping 20ms after disabling EAR PA\n",
- __func__);
+ dev_dbg(w->codec->dev,
+ "%s: Sleeping 20ms after disabling EAR PA\n",
+ __func__);
msleep(20);
break;
}
@@ -2310,11 +2338,12 @@
static int msm8x10_wcd_codec_probe(struct snd_soc_codec *codec)
{
- msm8x10_wcd_codec_init_reg(codec);
+ dev_dbg(codec->dev, "%s()\n", __func__);
+ codec->control_data = dev_get_drvdata(codec->dev);
+ msm8x10_wcd_codec_init_reg(codec);
msm8x10_wcd_update_reg_defaults(codec);
- dev_dbg(codec->dev, "%s()\n", __func__);
return 0;
}
@@ -2324,6 +2353,18 @@
return 0;
}
+static int msm8x10_wcd_device_init(struct msm8x10_wcd *msm8x10)
+{
+
+ mutex_init(&msm8x10->io_lock);
+ mutex_init(&msm8x10->xfer_lock);
+ mutex_init(&msm8x10->pm_lock);
+ msm8x10->wlock_holders = 0;
+
+ return 0;
+}
+
+
static struct snd_soc_codec_driver soc_codec_dev_msm8x10_wcd = {
.probe = msm8x10_wcd_codec_probe,
.remove = msm8x10_wcd_codec_remove,
@@ -2349,8 +2390,21 @@
static int __devinit msm8x10_wcd_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- int ret;
+ int ret = 0;
+ struct msm8x10_wcd *msm8x10 = NULL;
struct msm8x10_wcd_pdata *pdata;
+ static int device_id;
+ struct device *dev;
+
+ dev_dbg(&client->dev, "%s:slave addr = 0x%x device_id = %d\n",
+ __func__, client->addr, device_id);
+
+ if (device_id > 0) {
+ msm8x10_wcd_modules[device_id++].client = client;
+ return ret;
+ }
+
+ dev = &client->dev;
if (client->dev.of_node) {
dev_dbg(&client->dev, "%s:Platform data from device tree\n",
__func__);
@@ -2362,16 +2416,50 @@
pdata = client->dev.platform_data;
}
- ret = snd_soc_register_codec(&client->dev,
- &soc_codec_dev_msm8x10_wcd,
- msm8x10_wcd_i2s_dai, ARRAY_SIZE(msm8x10_wcd_i2s_dai));
- dev_dbg(&client->dev, "%s:ret = 0x%x\n", __func__, ret);
+ msm8x10 = kzalloc(sizeof(struct msm8x10_wcd), GFP_KERNEL);
+ if (msm8x10 == NULL) {
+ dev_err(&client->dev,
+ "%s: error, allocation failed\n", __func__);
+ ret = -ENOMEM;
+ goto fail;
+ }
+ msm8x10->dev = &client->dev;
+ msm8x10_wcd_modules[device_id++].client = client;
+ msm8x10->read_dev = msm8x10_wcd_reg_read;
+ msm8x10->write_dev = msm8x10_wcd_reg_write;
+ ret = msm8x10_wcd_device_init(msm8x10);
+ if (ret) {
+ dev_err(&client->dev,
+ "%s:msm8x10_wcd_device_init failed with error %d\n",
+ __func__, ret);
+ goto fail;
+ }
+ dev_set_drvdata(&client->dev, msm8x10);
+ ret = snd_soc_register_codec(&client->dev, &soc_codec_dev_msm8x10_wcd,
+ msm8x10_wcd_i2s_dai,
+ ARRAY_SIZE(msm8x10_wcd_i2s_dai));
+ if (ret)
+ dev_err(&client->dev,
+ "%s:snd_soc_register_codec failed with error %d\n",
+ __func__, ret);
+fail:
return ret;
}
+static void msm8x10_wcd_device_exit(struct msm8x10_wcd *msm8x10)
+{
+ mutex_destroy(&msm8x10->pm_lock);
+ mutex_destroy(&msm8x10->io_lock);
+ mutex_destroy(&msm8x10->xfer_lock);
+ kfree(msm8x10);
+}
+
static int __devexit msm8x10_wcd_i2c_remove(struct i2c_client *client)
{
+ struct msm8x10_wcd *msm8x10 = dev_get_drvdata(&client->dev);
+
+ msm8x10_wcd_device_exit(msm8x10);
return 0;
}
@@ -2407,8 +2495,8 @@
pr_debug("%s:\n", __func__);
ret = i2c_add_driver(&msm8x10_wcd_i2c_driver);
if (ret != 0)
- pr_err("%s: Failed to add msm8x10 wcd I2C driver - error code %d\n",
- __func__, ret);
+ pr_err("%s: Failed to add msm8x10 wcd I2C driver - error %d\n",
+ __func__, ret);
return ret;
}
diff --git a/sound/soc/codecs/msm8x10-wcd.h b/sound/soc/codecs/msm8x10-wcd.h
index 365d526..44e8a6d 100644
--- a/sound/soc/codecs/msm8x10-wcd.h
+++ b/sound/soc/codecs/msm8x10-wcd.h
@@ -196,6 +196,10 @@
u8 version;
int reset_gpio;
+ int (*read_dev)(struct msm8x10_wcd *msm8x10,
+ unsigned short reg, unsigned int *val);
+ int (*write_dev)(struct msm8x10_wcd *msm8x10,
+ unsigned short reg, unsigned int val);
u32 num_of_supplies;
struct regulator_bulk_data *supplies;
diff --git a/sound/soc/msm/msm-pcm-host-voice.c b/sound/soc/msm/msm-pcm-host-voice.c
index 7cb309e3..36826cc 100644
--- a/sound/soc/msm/msm-pcm-host-voice.c
+++ b/sound/soc/msm/msm-pcm-host-voice.c
@@ -28,7 +28,7 @@
#include "qdsp6/q6voice.h"
-#define HPCM_MAX_Q_LEN 2
+#define HPCM_MAX_Q_LEN 10
#define HPCM_MIN_VOC_PKT_SIZE 320
#define HPCM_MAX_VOC_PKT_SIZE 640
diff --git a/sound/soc/msm/msm8x10.c b/sound/soc/msm/msm8x10.c
index 981a9a7..4dd85fc 100644
--- a/sound/soc/msm/msm8x10.c
+++ b/sound/soc/msm/msm8x10.c
@@ -114,8 +114,8 @@
.stream_name = "Primary MI2S Playback",
.cpu_dai_name = "msm-dai-q6-mi2s.0",
.platform_name = "msm-pcm-routing",
- .codec_name = "msm-stub-codec.1",
- .codec_dai_name = "msm-stub-tx",
+ .codec_name = "msm8x10-wcd-i2c-core.1-000d",
+ .codec_dai_name = "msm8x10_wcd_i2s_rx1",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_MI2S_RX,
.init = &msm_audrx_init,
@@ -127,8 +127,8 @@
.stream_name = "Secondary MI2S Capture",
.cpu_dai_name = "msm-dai-q6-mi2s.1",
.platform_name = "msm-pcm-routing",
- .codec_name = "msm-stub-codec.1",
- .codec_dai_name = "msm-stub-tx",
+ .codec_name = "msm8x10-wcd-i2c-core.1-000d",
+ .codec_dai_name = "msm8x10_wcd_i2s_tx1",
.no_pcm = 1,
.be_id = MSM_BACKEND_DAI_SECONDARY_MI2S_TX,
.be_hw_params_fixup = msm_be_hw_params_fixup,
diff --git a/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
index b3107a4..2d2fe31 100644
--- a/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
@@ -50,7 +50,7 @@
unsigned volume;
atomic_t audio_ocmem_req;
};
-static struct snd_msm compressed_audio = {NULL, 0x2000} ;
+static struct snd_msm compressed_audio = {NULL, 0x20002000} ;
static struct audio_locks the_locks;
@@ -587,8 +587,9 @@
{
int rc = 0;
if (compressed_audio.prtd && compressed_audio.prtd->audio_client) {
- rc = q6asm_set_volume(compressed_audio.prtd->audio_client,
- volume);
+ rc = q6asm_set_lrgain(compressed_audio.prtd->audio_client,
+ (volume >> 16) & 0xFFFF,
+ volume & 0xFFFF);
if (rc < 0) {
pr_err("%s: Send Volume command failed rc=%d\n",
__func__, rc);
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c
index 2fca464..ae7e76c 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c
@@ -366,7 +366,9 @@
{
int rc = 0;
if (lpa_audio.prtd && lpa_audio.prtd->audio_client) {
- rc = q6asm_set_volume(lpa_audio.prtd->audio_client, volume);
+ rc = q6asm_set_lrgain(lpa_audio.prtd->audio_client,
+ (volume >> 16) & 0xFFFF,
+ volume & 0xFFFF);
if (rc < 0) {
pr_err("%s: Send Volume command failed rc=%d\n",
__func__, rc);
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index c48132e..02c3457 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -58,14 +58,14 @@
#define INT_RX_VOL_MAX_STEPS 0x2000
#define INT_RX_VOL_GAIN 0x2000
-
+#define INT_RX_LR_VOL_MAX_STEPS 0x20002000
static int msm_route_fm_vol_control;
static const DECLARE_TLV_DB_LINEAR(fm_rx_vol_gain, 0,
INT_RX_VOL_MAX_STEPS);
static int msm_route_lpa_vol_control;
static const DECLARE_TLV_DB_LINEAR(lpa_rx_vol_gain, 0,
- INT_RX_VOL_MAX_STEPS);
+ INT_RX_LR_VOL_MAX_STEPS);
static int msm_route_multimedia2_vol_control;
static const DECLARE_TLV_DB_LINEAR(multimedia2_rx_vol_gain, 0,
@@ -73,7 +73,7 @@
static int msm_route_compressed_vol_control;
static const DECLARE_TLV_DB_LINEAR(compressed_rx_vol_gain, 0,
- INT_RX_VOL_MAX_STEPS);
+ INT_RX_LR_VOL_MAX_STEPS);
static int msm_route_multimedia5_vol_control;
static const DECLARE_TLV_DB_LINEAR(multimedia5_rx_vol_gain, 0,
diff --git a/sound/soc/msm/qdsp6v2/q6voice.c b/sound/soc/msm/qdsp6v2/q6voice.c
index 6a65880..12e83b0 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.c
+++ b/sound/soc/msm/qdsp6v2/q6voice.c
@@ -2765,13 +2765,14 @@
uint64_t *enc_buf;
void *apr_cvs;
u16 cvs_handle;
- dec_buf = (uint64_t *)v->shmem_info.sh_buf.buf[0].phys;
- enc_buf = (uint64_t *)v->shmem_info.sh_buf.buf[1].phys;
if (v == NULL) {
pr_err("%s: v is NULL\n", __func__);
return -EINVAL;
}
+ dec_buf = (uint64_t *)v->shmem_info.sh_buf.buf[0].phys;
+ enc_buf = (uint64_t *)v->shmem_info.sh_buf.buf[1].phys;
+
apr_cvs = common.apr_q6_cvs;
if (!apr_cvs) {
@@ -4618,6 +4619,10 @@
struct voice_data *v = voice_get_session(
common.voice[VOC_PATH_FULL].session_id);
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
v->shmem_info.sh_buf.client = msm_ion_client_create(UINT_MAX,
"voip_client");
if (IS_ERR_OR_NULL((void *)v->shmem_info.sh_buf.client)) {
@@ -4686,6 +4691,10 @@
struct voice_data *v = voice_get_session(
common.voice[VOC_PATH_FULL].session_id);
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ return -EINVAL;
+ }
v->shmem_info.memtbl.client = msm_ion_client_create(UINT_MAX,
"voip_client");
if (IS_ERR_OR_NULL((void *)v->shmem_info.memtbl.client)) {