Merge "PM / devfreq: Make target flags unique"
diff --git a/Documentation/devicetree/bindings/arm/msm/cpr-regulator.txt b/Documentation/devicetree/bindings/arm/msm/cpr-regulator.txt
index 1b881f0..aa24dc6 100644
--- a/Documentation/devicetree/bindings/arm/msm/cpr-regulator.txt
+++ b/Documentation/devicetree/bindings/arm/msm/cpr-regulator.txt
@@ -203,21 +203,6 @@
- qcom,cpr-uplift-speed-bin: The speed bin value corresponding to one type of processor which needs to apply the
pvs voltage uplift workaround.
This is required if cpr-fuse-uplift-disable-sel is present.
-- qcom,cpr-quot-adjust-table: Array of triples in which each triple indicates the speed bin of the CPU, the virtual
- corner to use and the quotient adjustment.
- The 3 elements in one triple are:
- [0]: => the speed bin of the CPU.
- [1]: => the virtual voltage corner to use.
- [2]: => the quotient adjustment for the corresponding virtual corner.
- If the speed bin in a triple is equal to the speed bin of the CPU, the adjustment would
- be subtracted from the quotient value of the voltage corner when the CPU is running at
- that virtual corner. Each virtual corner value must be in the range 1 to the number of
- elements in qcom,cpr-corner-map.
-- qcom,cpr-corner-map: Array of elements of fuse corner value for each virtual corner.
- The location or 1-based index of an element in the list corresponds to
- the virtual corner value. For example, the first element in the list is the fuse corner
- value that virtual corner 1 maps to.
- This is required if qcom,cpr-quot-adjust-table is present.
- qcom,cpr-quotient-adjustment: Array of three elements of CPR quotient adjustments for each corner.
The 3 quotient adjustments with index[0..2] are:
[0] => amount to add to the SVS quotient
@@ -230,6 +215,42 @@
Not Present: No such regulator.
- vdd-apc-optional-sec-supply: Present: Regulator of second highest priority to supply VDD APC power.
Not Present: No such regulator.
+- qcom,cpr-speed-bin-max-corners: Array of quintuples in which each quintuple maps a CPU speed bin and PVS version to
+ the maximum virtual voltage corner corresponding to the SVS, NORMAL and TURBO corners.
+ The 5 elements in one quintuple are:
+ [0]: => the speed bin of the CPU.
+ [1]: => the PVS version of the CPU.
+ [2]: => the max virtual voltage corner value corresponding to SVS corner for this speed bin.
+ [3]: => the max virtual voltage corner value corresponding to NORMAL corner for this speed bin.
+ [4]: => the max virtual voltage corner value corresponding to TURBO corner for this speed bin.
+ No CPR target quotient scaling is applied on chips which have a speed bin + PVS version
+ pair that does not appear in one of the quintuples in this property. If the property is
+ specified, then quotient scaling is enabled for the TURBO corner. If this property is
+ not specified, then no quotient scaling can take place.
+- qcom,cpr-corner-map: Array of elements of fuse corner value for each virtual corner.
+ The location or 1-based index of an element in the list corresponds to
+ the virtual corner value. For example, the first element in the list is the fuse corner
+ value that virtual corner 1 maps to.
+ This property is required if qcom,cpr-speed-bin-max-corners is present.
+- qcom,cpr-corner-frequency-map: Array of tuples in which a tuple describes a corner to application processor frequency
+ mapping.
+ The 2 elements in one tuple are:
+ [0]: => a virtual voltage corner.
+ [1]: => the application processor frequency in Hz corresponding to the virtual corner.
+ This property is required if qcom,cpr-speed-bin-max-corners is present.
+- qcom,pvs-version-fuse-sel: Array of 4 elements to indicate where to read the pvs version of the processor,
+ and the fuse reading method.
+ The 4 elements with index[0..3] are:
+ [0]: => the fuse row number of the selector;
+ [1]: => LSB bit position of the bits;
+ [2]: => the number of bits;
+ [3]: => fuse reading method, 0 for direct reading or 1 for SCM reading.
+ This property is required if qcom,cpr-speed-bin-max-corners is present.
+- qcom,cpr-quot-adjust-scaling-factor-max: The maximum allowed CPR target quotient scaling factor to use when
+ calculating the quotient adjustment for a given virtual voltage corner. It
+ corresponds to 'scaling' in this equation:
+ quot_adjust = (freq_turbo - freq_corner) * scaling / 1000.
+ This property is required if qcom,cpr-speed-bin-max-corners is present.
Example:
apc_vreg_corner: regulator@f9018000 {
@@ -319,9 +340,25 @@
qcom,cpr-uplift-speed-bin = <1>;
qcom,speed-bin-fuse-sel = <22 0 3 0>;
qcom,cpr-corner-map = <1 1 2 2 3 3 3 3 3 3 3 3>;
- qcom,cpr-quot-adjust-table = <1 1 0>, <1 2 0>, <1 3 0>,
- <1 4 0>, <1 5 450>, <1 6 375>,
- <1 7 300>, <1 8 225>, <1 9 187>,
- <1 10 150>, <1 11 75>, <1 12 0>;
+ qcom,cpr-corner-frequency-map =
+ <1 300000000>,
+ <2 384000000>,
+ <3 600000000>,
+ <4 787200000>,
+ <5 998400000>,
+ <6 1094400000>,
+ <7 1190400000>,
+ <8 1305600000>,
+ <9 1344000000>,
+ <10 1401600000>,
+ <11 1497600000>,
+ <12 1593600000>;
+ qcom,pvs-version-fuse-sel = <22 4 2 0>;
+ qcom,cpr-speed-bin-max-corners =
+ <0 1 2 4 7>,
+ <1 1 2 4 12>,
+ <2 1 2 4 10>,
+ <5 1 2 4 14>;
+ qcom,cpr-quot-adjust-scaling-factor-max = <650>;
};
diff --git a/Documentation/devicetree/bindings/arm/msm/pm-8x60.txt b/Documentation/devicetree/bindings/arm/msm/pm-8x60.txt
index 795af3b..2fbe4ca 100644
--- a/Documentation/devicetree/bindings/arm/msm/pm-8x60.txt
+++ b/Documentation/devicetree/bindings/arm/msm/pm-8x60.txt
@@ -12,6 +12,7 @@
The required properties for PM-8x60 are:
- compatible: "qcom,pm-8x60"
+- qcom,lpm-levels: phandle for associated lpm_levels device.
The optional properties are:
@@ -39,4 +40,5 @@
reg = <0xfe800664 0x40>;
qcom,pc-mode = "tz_l2_int";
qcom,use-sync-timer;
+ qcom,lpm-levels = <&lpm_levels>;
};
diff --git a/Documentation/devicetree/bindings/fb/mdss-dsi-ctrl.txt b/Documentation/devicetree/bindings/fb/mdss-dsi-ctrl.txt
index cda437a..e8b02cf 100644
--- a/Documentation/devicetree/bindings/fb/mdss-dsi-ctrl.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-dsi-ctrl.txt
@@ -6,7 +6,8 @@
Required properties:
- compatible: Must be "qcom,mdss-dsi-ctrl"
- cell-index: Specifies the controller used among the two controllers.
-- reg: offset and length of the register set for the device.
+- reg: Offset and length of the register regions(s) for the device.
+- reg-names: A list of strings that map in order to the list of regs.
- vdd-supply: Phandle for vdd regulator device node.
- vddio-supply: Phandle for vdd-io regulator device node.
- vdda-supply: Phandle for vreg regulator device node.
@@ -52,7 +53,9 @@
compatible = "qcom,mdss-dsi-ctrl";
label = "MDSS DSI CTRL->0";
cell-index = <0>;
- reg = <0xfd922800 0x600>;
+ reg = <0xfd922800 0x600>,
+ <0xfd828000 0x108>;
+ reg-names = "dsi_phys", "mmss_misc_phys";
vdd-supply = <&pm8226_l15>;
vddio-supply = <&pm8226_l8>;
vdda-supply = <&pm8226_l4>;
diff --git a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
index ae6f8ef..02d6df9 100644
--- a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
@@ -254,6 +254,9 @@
mode. This master delay (t_init_delay as per DSI spec) should be sum
of DSI internal delay to reach fuctional after power up and minimum
delay required by panel to reach functional.
+- qcom,mdss-dsi-rx-eot-ignore: Boolean used to enable ignoring end of transmission packets.
+- qcom,mdss-dsi-tx-eot-append: Boolean used to enable appending end of transmission packets.
+- qcom,ulps-enabled: Boolean to enable support for Ultra Low Power State (ULPS) mode.
Note, if a given optional qcom,* binding is not present, then the driver will configure
the default values specified.
@@ -348,5 +351,8 @@
qcom,mdss-dsi-reset-sequence = <1 2>, <0 10>, <1 10>;
qcom,mdss-dsi-lp11-init;
qcom,mdss-dsi-init-delay-us = <100>;
+ mdss-dsi-rx-eot-ignore;
+ mdss-dsi-tx-eot-append;
+ qcom,ulps-enabled;
};
};
diff --git a/Documentation/devicetree/bindings/gpu/adreno.txt b/Documentation/devicetree/bindings/gpu/adreno.txt
index 656f3a4..ec5cfa5 100644
--- a/Documentation/devicetree/bindings/gpu/adreno.txt
+++ b/Documentation/devicetree/bindings/gpu/adreno.txt
@@ -71,6 +71,13 @@
This is used to override faulty hardware readings.
- qcom,strtstp-sleepwake: Boolean. Enables use of GPU SLUMBER instead of SLEEP for power savings
+- qcom,pm-qos-latency: Every time GPU wakes up from sleep, driver votes for
+ acceptable maximum latency to the pm-qos driver. This
+ voting demands that *CPU* can not go into a power save
+ state *if* the latency to bring CPU back into normal
+ state is more than this value.
+ Value is in microseconds.
+
The following properties are optional as collecting data via coresight might
not be supported for every chipset. The documentation for coresight
properties can be found in:
diff --git a/Documentation/devicetree/bindings/mtd/msm_qpic_nand.txt b/Documentation/devicetree/bindings/mtd/msm_qpic_nand.txt
index 8cef7f0..447c8c1 100644
--- a/Documentation/devicetree/bindings/mtd/msm_qpic_nand.txt
+++ b/Documentation/devicetree/bindings/mtd/msm_qpic_nand.txt
@@ -13,11 +13,12 @@
Each partition is represented as a sub-node of the qcom,mtd-partitions device.
Each node's name represents the name of the corresponding partition.
-Required properties:
-- reg : The partition offset and size
-- label : The label / name for this partition.
+This is now completely optional as the partition information is avaialble from
+bootloader.
Optional properties:
+- reg : The partition offset and size
+- label : The label / name for this partition.
- read-only: This parameter, if present, indicates that this partition
should only be mounted read-only.
diff --git a/Documentation/devicetree/bindings/nfc/nfc-nci.txt b/Documentation/devicetree/bindings/nfc/nfc-nci.txt
index 2c06599..7af847c 100644
--- a/Documentation/devicetree/bindings/nfc/nfc-nci.txt
+++ b/Documentation/devicetree/bindings/nfc/nfc-nci.txt
@@ -12,13 +12,12 @@
- qcom,clk-src-gpio: msm gpio clock,used ony if clock source is msm gpio
- qcom,clk-req-gpio: clk-req input gpio for MSM based clocks.
not used for pmic implementation
-- vlogic-supply: LDO for power supply
- interrupt-parent: Should be phandle for the interrupt controller
that services interrupts for this device.
- interrupts: Nfc read interrupt,gpio-clk-req interrupt
- qcom,clk-gpio: pmic or msm gpio on which bbclk2 signal is coming.
-LDO example:
+Example:
i2c@f9925000 { /* BLSP-1 QUP-3 */
nfc-nci@e {
@@ -31,7 +30,6 @@
interrupt-parent = <&msmgpio>;
interrupts = <77 0>;
qcom,clk-gpio = <&msmgpio 75 0x00>;
- vlogic-supply = <&pm8110_l14>;
};
};
diff --git a/Documentation/devicetree/bindings/regulator/krait-regulator.txt b/Documentation/devicetree/bindings/regulator/krait-regulator.txt
index 7c661fe..004c4df 100644
--- a/Documentation/devicetree/bindings/regulator/krait-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/krait-regulator.txt
@@ -22,7 +22,12 @@
the phase scaling factor eFuse address.
- qcom,pfm-threshold The power coeff threshold in abstract power units below which
pmic will be made to operate in PFM mode.
-
+- qcom,phase-scaling-factor-bits-pos indicates bit position of scaling factor data within the efuse
+ register.
+- qcom,valid-scaling-factor-versions This is an array holding four boolean values and indicates whether
+ the version read from efuses is valid.
+ The version is a two bit field and the value read from hardware is
+ used as an index in this array to check for validity.
Optional properties:
- qcom,use-phase-switching indicates whether the driver should add/shed phases on the PMIC
ganged regulator as cpus are hotplugged.
@@ -70,6 +75,9 @@
qcom,use-phase-switching;
qcom,use-phase-scaling-factor;
qcom,pfm-threshold = <376975>;
+ qcom,phase-scaling-factor-bits-pos = <18>;
+ qcom,valid-scaling-factor-versions = <0 1 1 0>;
+
#address-cells = <1>;
#size-cells = <1>;
ranges;
diff --git a/Documentation/devicetree/bindings/sound/voice-svc.txt b/Documentation/devicetree/bindings/sound/voice-svc.txt
new file mode 100644
index 0000000..deca7f5
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/voice-svc.txt
@@ -0,0 +1,11 @@
+* Voice Service binding
+
+Required properties:
+- compatible : "qcom,msm-voice-svc"
+
+Example:
+
+ qcom,msm-voice-svc {
+ compatible = "qcom,msm-voice-svc";
+ };
+
diff --git a/Documentation/devicetree/bindings/usb/ice40-hcd.txt b/Documentation/devicetree/bindings/usb/ice40-hcd.txt
new file mode 100644
index 0000000..43d24dc
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/ice40-hcd.txt
@@ -0,0 +1,45 @@
+ICE40 FPGA based SPI-USB bridge
+
+Documentation/devicetree/bindings/spi/spi-bus.txt provides the details
+of the required and optional properties of a SPI slave device node.
+
+The purpose of this document is to provide the additional properties
+that are required to use the ICE40 FPGA based SPI slave device as a
+USB host controller.
+
+Required properties:
+- compatible : should be "lattice,ice40-spi-usb"
+- <supply-name>-supply: handle to the regulator device tree node
+ Required "supply-name" is "core-vcc" and "spi-vcc"
+- reset-gpio: gpio used to assert the bridge chip reset
+- slave-select-gpio: gpio used to select the slave during configuration
+ loading
+- config-done-gpio: gpio used to indicate the configuration status
+- vcc-en-gpio: gpio used to enable the chip power supply
+
+Optional properties:
+- interrupts: IRQ lines used by this controller
+- clk-en-gpio: gpio used to enable the 19.2 MHZ clock to the bridge
+ chip. If it is not present, assume that the clock is available on
+ the bridge chip board.
+- <supply-name>-supply: handle to the regulator device tree node
+ Optional "supply-name" is "gpio" used to power up the gpio bank
+ used by this device
+
+ spi@f9923000 {
+ lattice,spi-usb@3 {
+ compatible = "lattice,ice40-spi-usb";
+ reg = <3>;
+ spi-max-frequency = <50000000>;
+ spi-cpol = <1>;
+ spi-cpha = <1>;
+ interrupt-parent = <&msmgpio>;
+ interrupts = <121 0x8>;
+ core-vcc-supply = <&pm8226_l2>;
+ spi-vcc-supply = <&pm8226_l5>;
+ lattice,reset-gpio = <&msmgpio 114 0>;
+ lattice,slave-select-gpio = <&msmgpio 118 0>;
+ lattice,config-done-gpio = <&msmgpio 115 0>;
+ lattice,vcc-en-gpio = <&msmgpio 117 0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index fd826d9..440dac1 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -8,6 +8,7 @@
apm Applied Micro Circuits Corporation (APM)
arm ARM Ltd.
atmel Atmel Corporation
+avago Avago Technologies
bosch Bosch Sensortec GmbH
capella Capella Microsystems, Inc.
cavium Cavium, Inc.
@@ -28,6 +29,7 @@
idt Integrated Device Technologies, Inc.
intercontrol Inter Control Group
invn InvenSense Inc.
+lattice Lattice Semiconductor.
linux Linux-specific binding
kionix Kionix Inc.
marvell Marvell Technology Group Ltd.
diff --git a/Documentation/usb/ice40-hcd.txt b/Documentation/usb/ice40-hcd.txt
new file mode 100644
index 0000000..54f845e
--- /dev/null
+++ b/Documentation/usb/ice40-hcd.txt
@@ -0,0 +1,247 @@
+Introduction
+============
+
+USB UICC connectivity is required for MSM8x12. This SoC has only 1 USB
+controller which is used for peripheral mode and charging. Hence an external
+USB host controller over SPI is used to connect a USB UICC card. ICE40 FPGA
+based SPI to IC-USB (Inter-Chip USB) bridge chip is used.
+
+The ICE40 Host controller driver (ice40-hcd) is registered as a SPI protocol
+driver and interacts with the SPI subsystem on one side and interacts with the
+USB core on the other side.
+
+Hardware description
+====================
+
+The ICE40 devices are SRAM-based FPGAs. The SRAM memory cells are volatile,
+meaning that once power is removed from the device, its configuration is lost
+and must be reloaded on the next power-up. An on-chip non-volatile configuration
+memory or an external SPI flash are not used to store the configuration data due
+to increased power consumption. Instead, the software loads the configuration
+data through SPI interface after powering up the bridge chip. Once the
+configuration data is programmed successfully, the bridge chip will be ready for
+the USB host controller operations.
+
+The ICE40 device has an interrupt signal apart from the standard SPI signals
+CSn, SCLK, MOSI and MISO. It has support for 25 to 50 MHz frequencies. The
+maximum operating frequency during configuration loading is 25 MHz.
+
+The bridge chip requires two power supplies, SPI_VCC (1.8v - 3.3v) and VCC_CORE
+(1.2v). The SPI_VCC manages the SPI slave portion and VCC_CORE manages the USB
+serial engine (SIE) portion. It requires a 19.2 MHz reference clock and a
+32 MHz clock is required for remote wakeup detection during suspend.
+
+The configuration loading sequence:
+
+- Assert the RSTn pin. This keeps bridge chip in reset state after downloading
+the configuration data.
+- The bridge chip samples the SPI interface chip select pin during power-up and
+enters SPI slave mode if it is low. Drive the chip select pin low before
+powering up the bridge chip.
+- Power-up the bridge chip by enabling SPI_VCC and VCC_CORE
+- De-assert the chip select pin after 50 usec.
+- Transfer the configuration data over SPI. Note that the bridge chip requires
+49 dummy clock cycles after sending the data.
+- The bridge chip indicates the status of the configuration loading via config
+done pin. It may take 50 usec to assert this pin.
+
+The 19.2 MHz clock should be supplied before de-asserting the RSTn pin. A PLL
+is used to generate a 48MHz clock signal that then creates a 12MHz clock signal
+by a divider. When the PLLOK bit is set in USB Transfer Result register, it
+indicates that the PLL output is locked to the input reference clock. When it
+is 0, it indicates that the PLL is out of lock. It is recommended to assert the
+RSTn pin to re-synchronize the PLL to the reference clock when the PLL loses
+lock. The chip will be ready for the USB host controller operations after it is
+brought out of reset and PLL is synchronized to the reference clock.
+
+The software is responsible for initiating all the USB host transfers by writing
+the associated registers. The SIE in the bridge chip performs the USB host
+operations via the IC-USB bus based on the registers set by the software. The
+USB transfer results as well as the bus status like the peripheral connection,
+disconnection, resume, etc. are notified to software through the interrupt and
+the internal registers.
+
+The bridge chip provides the DP & DM pull-down resistor control to the software.
+The pull-down resistors are enabled automatically after the power up to force
+the SE0 condition on the bus. The software is required to disable these
+resistors before driving the reset on the bus. Control, Bulk and Interrupt
+transfers are supported. The data toggling states are not maintained in the
+hardware and should be serviced by the software. The bridge chip returns
+one of the following values for a USB transaction (SETUP/IN/OUT) via Transfer
+result register.
+
+xSUCCESS: Successful transfer.
+xBUSY: The SIE is busy with a USB transfer.
+xPKTERR: Packet Error (stuff, EOP).
+xPIDERR: PID check bits are incorrect.
+xNAK: Device returned NAK. This is not an error condition for IN/OUT. But it
+is an error condition for SETUP.
+xSTALL: Device returned STALL.
+xWRONGPID: Wrong PID is received. For example a IN transaction is attempted on
+OUT endpoint.
+xCRCERR: CRC error.
+xTOGERR: Toggle bit error. The SIE returns ACK when the toggle mismatch happens
+for IN transaction and returns this error code. Software should discard the
+data as it was received already in the previous transaction.
+xBADLEN: Too big packet size received.
+xTIMEOUT: Device failed to respond in time.
+
+Software description
+====================
+
+This driver is compiled as a module and is loaded by the userspace after
+getting the UICC card insertion event from the modem processor. The module is
+unloaded upon the UICC card removal.
+
+This driver registers as a SPI protocol driver. The SPI controller driver
+manages the chip select pin. This pin needs to be driven low before powering
+up the bridge chip. Hence this pin settings are overridden temporarily during
+the bridge chip power-up sequence. The original settings are restored before
+sending the configuration data to the bridge chip which acts as a SPI slave.
+Both pinctl and gpiomux framework allow this type of use case.
+
+The configuration data file is stored on the eMMC card. Firmware class API
+request_firmware() is used to read the configuration data file. The
+configuration data is then sent to the bridge chip via SPI interface. The
+bridge chip asserts the config done pin once the configuration is completed.
+
+The driver registers as a Full Speed (USB 1.1) HCD. The following methods
+are implemented that are part of hc_drive struct:
+
+reset: It is called one time by the core during HCD registration. The
+default address 0 is programmed and the line state is sampled to check if any
+device is connected. If any device is connected, the port flags are updated
+accordingly. As the module is loaded after the UICC card is inserted, the
+device would be present at this time.
+
+start: This method is called one time by the core during HCD registration.
+The bridge chip is programmed to transmit the SOFs.
+
+stop: The method is called one time by the core during HCD deregistration.
+The bridge chip is programmed to stop transmitting the SOFs.
+
+hub_control: This method is called by the core to manage the Root HUB. The
+hardware does not maintain port state. The software maintain the port
+state and provide the information to the core when required. The following
+HUB class requests are supported.
+
+- GetHubDescriptor: The HUB descriptor is sent to the core. Only 1 port
+is present. Over current protection and port power control are not supported.
+- SetPortFeature: The device reset and suspend are supported. The The DP & DM
+pull-down resistors are disabled before driving the reset as per the IC-USB
+spec. The reset signaling is stopped when the core queries the port status.
+- GetPortStatus: The device connection status is sent to the core. If a reset
+is in progress, it is stopped before returning the port status.
+- ClearPortFeature: The device resume (clear suspend) is supported.
+
+urb_enqueue: This method is called by the core to initiate a USB Control/Bulk
+transfer. If the endpoint private context is not present, it will be created to
+hold the endpoint number, host endpoint structure, transaction error count, halt
+state and unlink state. The URB is attached to the endpoint URB list. If the
+endpoint is not active, it is attached to the asynchronous schedule list and the
+work is scheduled to traverse this list. The traversal algorithm is explained
+later in this document.
+
+urb_dequeue: This method is called by the core when an URB is unlinked. If the
+endpoint is not active, the URB is unlinked immediately. Otherwise the endpoint
+is marked for unlink and URB is unlinked from the asynchronous schedule work.
+
+bus_suspend: This method is called by the core during root hub suspend. The SOFs
+are already stopped during the port suspend which happens before root hub
+suspend. Assert the RSTn pin to put the bridge chip in reset state and stop XO
+(19.2 MHz) clock.
+
+bus_resume: This method is called by the core during root hub resume. Turn on
+the XO clock and de-assert the RSTn signal to bring the chip out of reset.
+
+endpoint_disable: This method is called by the core during the device
+disconnect. All the URB are unlinked by this time, so free the endpoint private
+structure.
+
+Asynchronous scheduling:
+
+All the active endpoints are queued to the asynchronous schedule list. A worker
+thread iterates over this circular list and process the URBs. Processing an URB
+involves initiating multiple SETUP/IN/OUT transactions and checking the result.
+After receiving the DATA/ACK, the toggle bit is inverted.
+
+A URB is finished when any of the following events occur:
+
+- The entire data is received for an OUT endpoint or a short packet is received
+for an IN endpoint.
+- The endpoint is stalled by the device. -EPIPE is returned.
+- Transaction error is occurred consecutively 3 times. -EPROTO is returned.
+- A NAK received for a SETUP transaction.
+- The URB is unlinked.
+
+The next transaction is issued on the next endpoint (if available) irrespective
+of the result of the current transaction. But the IN/OUT transaction of data
+or status phase is attempted immediately after the SETUP transaction for a
+control endpoint. If a NAK is received for this transaction, the control
+transfer is resumed next time when the control endpoint is encountered in the
+asynchronous schedule list. This is to give the control transfers priority
+over the bulk transfers.
+
+The endpoint is marked as halted when a URB is finished due to transaction
+errors or stall condition. The halted endpoint is removed from the asynchronous
+schedule list. It will be added again next time when a URB is enqueued on this
+endpoint.
+
+This driver provides debugfs interface and exports a file called "command" under
+<debugfs root>/ice40 directory. The following strings can be echoed to this
+file.
+
+"poll": If the device is connected after the module is loaded, it will not be
+detected automatically. The bus is sampled when this string is echoed. If a
+device is connected, port flags are updated and core is notified about the
+device connect event.
+
+"rwtest": Function Address register is written and read back to validate the
+contents. This should NOT be used while the usb device is connected. This is
+strictly for debugging purpose.
+
+"dump": Dumps all the register values to the kernel log buffer.
+
+Design Goals:
+=============
+
+- Handle errors gracefully. Implement retry mechanism for transaction errors,
+memory failures. Mark HCD as dead for serious errors like SPI transaction
+errors to avoid further interactions with the attached USB device.
+- Keep the asynchronous schedule algorithm simple and efficient. Take advantage
+of the static configuration of the USB device. UICC cards has only CCID and Mass
+storage interfaces. These interface protocol allows only 1 active transfer on
+either in or out endpoint.
+- Add trace points to capture USB transactions.
+
+Driver parameters
+=================
+
+The driver is compiled as a module and it accepts the configuration data file
+name as a module param called "firmware". The default configuration file name
+is "ice40.bin".
+
+Config options
+==============
+
+Set CONFIG_USB_SL811_HCD to m to compile this driver as a module. The driver
+should not be compiled statically, because the configuration data is not
+available during kernel boot.
+
+To do
+=====
+
+- The bridge chip has 2 IN FIFO and 2 OUT FIFO. Implement double buffering.
+- The bridge chip has an interrupt to indicate the transaction (IN/OUT)
+completion. The current implementation uses polling for simplicity and to avoid
+interrupt latencies. Evaluate interrupt approach.
+- The bridge chip can be completely power collapsed during suspend to avoid
+leakage currents. As the bridge chip does not have any non-volatile memory,
+the configuration data needs to be loaded during resume. This method has higher
+power savings with higher resume latencies. Evaluate this approach.
+- Implement Interrupt transfers if required.
+- The request_firmware() API copies the configuration data file to the kernel
+virtual memory. This memory can't be used for DMA. The current implementation
+copies this data into contiguous physical memory which is allocated via
+kmalloc. If this memory allocation fails, try to allocate multiple pages
+and submit the SPI message with multiple transfers.
diff --git a/arch/arm/boot/dts/msm8226-gpu.dtsi b/arch/arm/boot/dts/msm8226-gpu.dtsi
index fd20d8c..d1c3264 100644
--- a/arch/arm/boot/dts/msm8226-gpu.dtsi
+++ b/arch/arm/boot/dts/msm8226-gpu.dtsi
@@ -47,6 +47,9 @@
/* IOMMU Data */
iommu = <&kgsl_iommu>;
+ /* CPU latency parameter */
+ qcom,pm-qos-latency = <701>;
+
/* Power levels */
qcom,gpu-pwrlevels {
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/msm8226-mdss.dtsi b/arch/arm/boot/dts/msm8226-mdss.dtsi
index 375c5df..2176d39 100644
--- a/arch/arm/boot/dts/msm8226-mdss.dtsi
+++ b/arch/arm/boot/dts/msm8226-mdss.dtsi
@@ -95,7 +95,9 @@
compatible = "qcom,mdss-dsi-ctrl";
label = "MDSS DSI CTRL->0";
cell-index = <0>;
- reg = <0xfd922800 0x600>;
+ reg = <0xfd922800 0x600>,
+ <0xfd828000 0x108>;
+ reg-names = "dsi_phys", "mmss_misc_phys";
qcom,mdss-fb-map = <&mdss_fb0>;
qcom,mdss-mdp = <&mdss_mdp>;
vdd-supply = <&pm8226_l15>;
diff --git a/arch/arm/boot/dts/msm8226-regulator.dtsi b/arch/arm/boot/dts/msm8226-regulator.dtsi
index 5e890d3..78e1a63 100644
--- a/arch/arm/boot/dts/msm8226-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8226-regulator.dtsi
@@ -78,7 +78,7 @@
vdd-apc-supply = <&pm8226_s2>;
vdd-mx-supply = <&pm8226_l3_ao>;
- qcom,vdd-mx-vmax = <1350000>;
+ qcom,vdd-mx-vmax = <1337500>;
qcom,vdd-mx-vmin-method = <1>;
qcom,cpr-ref-clk = <19200>;
@@ -109,7 +109,7 @@
qcom,cpr-fuse-uplift-sel = <22 53 1 0 0>;
qcom,cpr-uplift-voltage = <50000>;
qcom,cpr-uplift-quotient = <0 0 120>;
- qcom,cpr-uplift-max-volt = <1350000>;
+ qcom,cpr-uplift-max-volt = <1330000>;
qcom,cpr-uplift-speed-bin = <1>;
qcom,speed-bin-fuse-sel = <22 0 3 0>;
};
diff --git a/arch/arm/boot/dts/msm8226-v1-pm.dtsi b/arch/arm/boot/dts/msm8226-v1-pm.dtsi
index d59fab3..10aff70 100644
--- a/arch/arm/boot/dts/msm8226-v1-pm.dtsi
+++ b/arch/arm/boot/dts/msm8226-v1-pm.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -105,7 +105,7 @@
qcom,L2-spm-is-apcs-master;
};
- qcom,lpm-levels {
+ lpm_levels: qcom,lpm-levels {
compatible = "qcom,lpm-levels";
qcom,default-l2-state = "l2_cache_active";
#address-cells = <1>;
@@ -300,6 +300,7 @@
qcom,pc-resets-timer;
qcom,cpus-as-clocks;
qcom,synced-clocks;
+ qcom,lpm-levels = <&lpm_levels>;
};
qcom,cpu-sleep-status@f9088008{
diff --git a/arch/arm/boot/dts/msm8226-v2-1080p-cdp.dts b/arch/arm/boot/dts/msm8226-v2-1080p-cdp.dts
index 77cc08c..d48f8b6 100644
--- a/arch/arm/boot/dts/msm8226-v2-1080p-cdp.dts
+++ b/arch/arm/boot/dts/msm8226-v2-1080p-cdp.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -21,6 +21,6 @@
qcom,board-id = <1 2>;
};
-&hsic_host {
+&smsc_hub {
status = "ok";
};
diff --git a/arch/arm/boot/dts/msm8226-v2-720p-cdp.dts b/arch/arm/boot/dts/msm8226-v2-720p-cdp.dts
index 966ae2b..f73bac0 100644
--- a/arch/arm/boot/dts/msm8226-v2-720p-cdp.dts
+++ b/arch/arm/boot/dts/msm8226-v2-720p-cdp.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -21,6 +21,6 @@
qcom,board-id = <1 0>;
};
-&hsic_host {
+&smsc_hub {
status = "ok";
};
diff --git a/arch/arm/boot/dts/msm8226-v2-pm.dtsi b/arch/arm/boot/dts/msm8226-v2-pm.dtsi
index bc8fe5d..7af2c7f 100644
--- a/arch/arm/boot/dts/msm8226-v2-pm.dtsi
+++ b/arch/arm/boot/dts/msm8226-v2-pm.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -107,7 +107,7 @@
qcom,L2-spm-is-apcs-master;
};
- qcom,lpm-levels {
+ lpm_levels: qcom,lpm-levels {
compatible = "qcom,lpm-levels";
qcom,default-l2-state = "l2_cache_active";
#address-cells = <1>;
@@ -312,6 +312,7 @@
qcom,pc-resets-timer;
qcom,cpus-as-clocks;
qcom,synced-clocks;
+ qcom,lpm-levels = <&lpm_levels>;
};
qcom,cpu-sleep-status@f9088008{
diff --git a/arch/arm/boot/dts/msm8226-v2.dtsi b/arch/arm/boot/dts/msm8226-v2.dtsi
index 6215740..14fe237 100644
--- a/arch/arm/boot/dts/msm8226-v2.dtsi
+++ b/arch/arm/boot/dts/msm8226-v2.dtsi
@@ -37,14 +37,28 @@
qcom,cpr-up-threshold = <0>;
qcom,cpr-down-threshold = <5>;
qcom,cpr-corner-map = <1 1 2 2 3 3 3 3 3 3 3 3 3 3>;
- qcom,cpr-quot-adjust-table =
- <1 5 450>,
- <1 6 375>,
- <1 7 300>,
- <1 8 225>,
- <1 9 187>,
- <1 10 150>,
- <1 11 75>;
+ qcom,pvs-version-fuse-sel = <22 4 2 0>;
+ qcom,cpr-corner-frequency-map =
+ <1 300000000>,
+ <2 384000000>,
+ <3 600000000>,
+ <4 787200000>,
+ <5 998400000>,
+ <6 1094400000>,
+ <7 1190400000>,
+ <8 1305600000>,
+ <9 1344000000>,
+ <10 1401600000>,
+ <11 1497600000>,
+ <12 1593600000>,
+ <13 1689600000>,
+ <14 1785600000>;
+ qcom,cpr-speed-bin-max-corners =
+ <0 2 2 4 7>,
+ <1 2 2 4 12>,
+ <2 2 2 4 10>,
+ <5 2 2 4 14>;
+ qcom,cpr-quot-adjust-scaling-factor-max = <650>;
};
&msm_gpu {
diff --git a/arch/arm/boot/dts/msm8226.dtsi b/arch/arm/boot/dts/msm8226.dtsi
index 9113259..4117d9d 100644
--- a/arch/arm/boot/dts/msm8226.dtsi
+++ b/arch/arm/boot/dts/msm8226.dtsi
@@ -302,35 +302,43 @@
qcom,streaming-func = "rndis";
};
- hsic_host: hsic@f9a00000 {
+ smsc_hub: hsic_hub {
status = "disabled";
- compatible = "qcom,hsic-host";
- reg = <0xf9a00000 0x400>;
- #address-cells = <0>;
- interrupt-parent = <&hsic_host>;
- interrupts = <0 1 2>;
- #interrupt-cells = <1>;
- interrupt-map-mask = <0xffffffff>;
- interrupt-map = <0 &intc 0 136 0
- 1 &intc 0 148 0
- 2 &msmgpio 115 0x8>;
- interrupt-names = "core_irq", "async_irq", "wakeup";
- hsic_vdd_dig-supply = <&pm8226_s1_corner>;
- HSIC_GDSC-supply = <&gdsc_usb_hsic>;
- hsic,strobe-gpio = <&msmgpio 115 0x00>;
- hsic,data-gpio = <&msmgpio 116 0x00>;
- hsic,ignore-cal-pad-config;
- hsic,strobe-pad-offset = <0x2050>;
- hsic,data-pad-offset = <0x2054>;
- qcom,phy-susp-sof-workaround;
- hsic,vdd-voltage-level = <1 5 7>;
+ compatible = "qcom,hsic-smsc-hub";
+ smsc,model-id = <0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
- qcom,msm-bus,name = "hsic";
- qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,num-paths = <1>;
- qcom,msm-bus,vectors-KBps =
+ hsic_host: hsic@f9a00000 {
+ compatible = "qcom,hsic-host";
+ reg = <0xf9a00000 0x400>;
+ #address-cells = <0>;
+ interrupt-parent = <&hsic_host>;
+ interrupts = <0 1 2>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0xffffffff>;
+ interrupt-map = <0 &intc 0 136 0
+ 1 &intc 0 148 0
+ 2 &msmgpio 115 0x8>;
+ interrupt-names = "core_irq", "async_irq", "wakeup";
+ hsic_vdd_dig-supply = <&pm8226_s1_corner>;
+ HSIC_GDSC-supply = <&gdsc_usb_hsic>;
+ hsic,strobe-gpio = <&msmgpio 115 0x00>;
+ hsic,data-gpio = <&msmgpio 116 0x00>;
+ hsic,ignore-cal-pad-config;
+ hsic,strobe-pad-offset = <0x2050>;
+ hsic,data-pad-offset = <0x2054>;
+ qcom,phy-susp-sof-workaround;
+ hsic,vdd-voltage-level = <1 5 7>;
+
+ qcom,msm-bus,name = "hsic";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
<85 512 0 0>,
<85 512 40000 160000>;
+ };
};
wcd9xxx_intc: wcd9xxx-irq {
@@ -491,6 +499,10 @@
compatible = "qti,msm-pcm-loopback";
};
+ qcom,msm-voice-svc {
+ compatible = "qcom,msm-voice-svc";
+ };
+
qcom,msm-dai-q6 {
compatible = "qcom,msm-dai-q6";
qcom,msm-dai-q6-sb-0-rx {
@@ -1256,8 +1268,8 @@
qcom,msm-bus,num-cases = <2>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
- <1 618 0 0>,
- <1 618 0 800>;
+ <54 618 0 0>,
+ <54 618 0 800>;
};
qcom,tz-log@fe805720 {
diff --git a/arch/arm/boot/dts/msm8610-gpu.dtsi b/arch/arm/boot/dts/msm8610-gpu.dtsi
index de480df..480ec11 100644
--- a/arch/arm/boot/dts/msm8610-gpu.dtsi
+++ b/arch/arm/boot/dts/msm8610-gpu.dtsi
@@ -46,6 +46,9 @@
/* IOMMU Data */
iommu = <&gfx_iommu>;
+ /* CPU latency parameter */
+ qcom,pm-qos-latency = <701>;
+
/* Power levels */
qcom,gpu-pwrlevels {
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/msm8610-v1-pm.dtsi b/arch/arm/boot/dts/msm8610-v1-pm.dtsi
index dc1dc8b..adc66d7 100644
--- a/arch/arm/boot/dts/msm8610-v1-pm.dtsi
+++ b/arch/arm/boot/dts/msm8610-v1-pm.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -105,7 +105,7 @@
qcom,L2-spm-is-apcs-master;
};
- qcom,lpm-levels {
+ lpm_levels: qcom,lpm-levels {
compatible = "qcom,lpm-levels";
qcom,default-l2-state = "l2_cache_active";
#address-cells = <1>;
@@ -296,6 +296,7 @@
qcom,pc-resets-timer;
qcom,cpus-as-clocks;
qcom,synced-clocks;
+ qcom,lpm-levels = <&lpm_levels>;
};
qcom,cpu-sleep-status@f9088008{
diff --git a/arch/arm/boot/dts/msm8610-v2-pm.dtsi b/arch/arm/boot/dts/msm8610-v2-pm.dtsi
index 2859744..b69b061 100644
--- a/arch/arm/boot/dts/msm8610-v2-pm.dtsi
+++ b/arch/arm/boot/dts/msm8610-v2-pm.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -107,7 +107,7 @@
qcom,L2-spm-is-apcs-master;
};
- qcom,lpm-levels {
+ lpm_levels: qcom,lpm-levels {
compatible = "qcom,lpm-levels";
qcom,default-l2-state = "l2_cache_active";
#address-cells = <1>;
@@ -308,6 +308,7 @@
qcom,pc-resets-timer;
qcom,cpus-as-clocks;
qcom,synced-clocks;
+ qcom,lpm-levels = <&lpm_levels>;
};
qcom,cpu-sleep-status@f9088008{
diff --git a/arch/arm/boot/dts/msm8610.dtsi b/arch/arm/boot/dts/msm8610.dtsi
index f152ceb..43cd7c6 100644
--- a/arch/arm/boot/dts/msm8610.dtsi
+++ b/arch/arm/boot/dts/msm8610.dtsi
@@ -606,6 +606,7 @@
qcom,gpio-miso = <&msmgpio 87 0>;
qcom,gpio-clk = <&msmgpio 89 0>;
qcom,gpio-cs0 = <&msmgpio 88 0>;
+ qcom,gpio-cs2 = <&msmgpio 85 0>;
qcom,infinite-mode = <0>;
qcom,use-bam;
@@ -613,6 +614,21 @@
qcom,bam-consumer-pipe-index = <18>;
qcom,bam-producer-pipe-index = <19>;
qcom,master-id = <86>;
+
+ lattice,spi-usb@2 {
+ compatible = "lattice,ice40-spi-usb";
+ reg = <2>;
+ spi-max-frequency = <50000000>;
+ spi-cpol = <1>;
+ spi-cpha = <1>;
+ core-vcc-supply = <&pm8110_l2>;
+ spi-vcc-supply = <&pm8110_l6>;
+ gpio-supply = <&pm8110_l22>;
+ lattice,reset-gpio = <&msmgpio 95 0>;
+ lattice,slave-select-gpio = <&msmgpio 85 0>;
+ lattice,config-done-gpio = <&msmgpio 94 0>;
+ lattice,vcc-en-gpio = <&msmgpio 96 0>;
+ };
};
qcom,pronto@fb21b000 {
@@ -765,6 +781,11 @@
compatible = "qcom,msm-dai-q6-dev";
qcom,msm-dai-q6-dev-id = <32773>;
};
+
+ qcom,msm-dai-q6-incall-music-2-rx {
+ compatible = "qcom,msm-dai-q6-dev";
+ qcom,msm-dai-q6-dev-id = <32770>;
+ };
};
qcom,msm-pcm-hostless {
diff --git a/arch/arm/boot/dts/msm8926-v1-1080p-cdp.dts b/arch/arm/boot/dts/msm8926-v1-1080p-cdp.dts
index 1b6f971..1829a8e 100644
--- a/arch/arm/boot/dts/msm8926-v1-1080p-cdp.dts
+++ b/arch/arm/boot/dts/msm8926-v1-1080p-cdp.dts
@@ -33,6 +33,6 @@
};
-&hsic_host {
+&smsc_hub {
status = "ok";
};
diff --git a/arch/arm/boot/dts/msm8926-v1-720p-cdp.dts b/arch/arm/boot/dts/msm8926-v1-720p-cdp.dts
index 37da01c..2217f15 100644
--- a/arch/arm/boot/dts/msm8926-v1-720p-cdp.dts
+++ b/arch/arm/boot/dts/msm8926-v1-720p-cdp.dts
@@ -33,6 +33,6 @@
};
-&hsic_host {
+&smsc_hub {
status = "ok";
};
diff --git a/arch/arm/boot/dts/msm8926-v2-1080p-cdp.dts b/arch/arm/boot/dts/msm8926-v2-1080p-cdp.dts
index a4ebbe1..7ab37cd 100644
--- a/arch/arm/boot/dts/msm8926-v2-1080p-cdp.dts
+++ b/arch/arm/boot/dts/msm8926-v2-1080p-cdp.dts
@@ -33,6 +33,6 @@
};
-&hsic_host {
+&smsc_hub {
status = "ok";
};
diff --git a/arch/arm/boot/dts/msm8926-v2-1080p-ext-buck-cdp.dts b/arch/arm/boot/dts/msm8926-v2-1080p-ext-buck-cdp.dts
index 64e872b..d38b53f 100644
--- a/arch/arm/boot/dts/msm8926-v2-1080p-ext-buck-cdp.dts
+++ b/arch/arm/boot/dts/msm8926-v2-1080p-ext-buck-cdp.dts
@@ -33,6 +33,6 @@
};
-&hsic_host {
+&smsc_hub {
status = "ok";
};
diff --git a/arch/arm/boot/dts/msm8926-v2-1080p-ext-buck-mtp.dts b/arch/arm/boot/dts/msm8926-v2-1080p-ext-buck-mtp.dts
index f9a3cd8..0a33976 100644
--- a/arch/arm/boot/dts/msm8926-v2-1080p-ext-buck-mtp.dts
+++ b/arch/arm/boot/dts/msm8926-v2-1080p-ext-buck-mtp.dts
@@ -21,3 +21,12 @@
compatible = "qcom,msm8926-mtp", "qcom,msm8926", "qcom,mtp";
qcom,board-id = <8 3>;
};
+
+&smsc_hub {
+ status = "ok";
+ smsc,model-id = <3502>;
+ smsc,reset-gpio = <&msmgpio 114 0x00>;
+ smsc,int-gpio = <&msmgpio 9 0x00>;
+ smsc,xo-clk-gpio = <&msmgpio 8 0x00>;
+ hub-int-supply = <&pm8226_l6>;
+};
diff --git a/arch/arm/boot/dts/msm8926-v2-1080p-mtp.dts b/arch/arm/boot/dts/msm8926-v2-1080p-mtp.dts
index 1f0bab1..2466f8b 100644
--- a/arch/arm/boot/dts/msm8926-v2-1080p-mtp.dts
+++ b/arch/arm/boot/dts/msm8926-v2-1080p-mtp.dts
@@ -21,3 +21,12 @@
compatible = "qcom,msm8926-mtp", "qcom,msm8926", "qcom,mtp";
qcom,board-id = <8 2>;
};
+
+&smsc_hub {
+ status = "ok";
+ smsc,model-id = <3502>;
+ smsc,reset-gpio = <&msmgpio 114 0x00>;
+ smsc,int-gpio = <&msmgpio 9 0x00>;
+ smsc,xo-clk-gpio = <&msmgpio 8 0x00>;
+ hub-int-supply = <&pm8226_l6>;
+};
diff --git a/arch/arm/boot/dts/msm8926-v2-720p-cdp.dts b/arch/arm/boot/dts/msm8926-v2-720p-cdp.dts
index 1e6e197..5c5ad89 100644
--- a/arch/arm/boot/dts/msm8926-v2-720p-cdp.dts
+++ b/arch/arm/boot/dts/msm8926-v2-720p-cdp.dts
@@ -33,6 +33,6 @@
};
-&hsic_host {
+&smsc_hub {
status = "ok";
};
diff --git a/arch/arm/boot/dts/msm8926-v2-720p-mtp.dts b/arch/arm/boot/dts/msm8926-v2-720p-mtp.dts
index 59ad506..2c577cd 100644
--- a/arch/arm/boot/dts/msm8926-v2-720p-mtp.dts
+++ b/arch/arm/boot/dts/msm8926-v2-720p-mtp.dts
@@ -21,3 +21,12 @@
compatible = "qcom,msm8926-mtp", "qcom,msm8926", "qcom,mtp";
qcom,board-id = <8 0>;
};
+
+&smsc_hub {
+ status = "ok";
+ smsc,model-id = <3502>;
+ smsc,reset-gpio = <&msmgpio 114 0x00>;
+ smsc,int-gpio = <&msmgpio 9 0x00>;
+ smsc,xo-clk-gpio = <&msmgpio 8 0x00>;
+ hub-int-supply = <&pm8226_l6>;
+};
diff --git a/arch/arm/boot/dts/msm8926.dtsi b/arch/arm/boot/dts/msm8926.dtsi
index 8a0e5c4..e866286 100644
--- a/arch/arm/boot/dts/msm8926.dtsi
+++ b/arch/arm/boot/dts/msm8926.dtsi
@@ -138,15 +138,29 @@
regulator-min-microvolt = <1>;
regulator-max-microvolt = <14>;
qcom,cpr-corner-map = <1 1 2 2 3 3 3 3 3 3 3 3 3 3>;
- qcom,cpr-quot-adjust-table =
- <1 5 450>,
- <1 6 375>,
- <1 7 300>,
- <1 8 225>,
- <1 9 187>,
- <1 10 150>,
- <1 11 75>;
qcom,cpr-quotient-adjustment = <0 72 72>;
+ qcom,pvs-version-fuse-sel = <22 4 2 0>;
+ qcom,cpr-corner-frequency-map =
+ <1 300000000>,
+ <2 384000000>,
+ <3 600000000>,
+ <4 787200000>,
+ <5 998400000>,
+ <6 1094400000>,
+ <7 1190400000>,
+ <8 1305600000>,
+ <9 1344000000>,
+ <10 1401600000>,
+ <11 1497600000>,
+ <12 1593600000>,
+ <13 1689600000>,
+ <14 1785600000>;
+ qcom,cpr-speed-bin-max-corners =
+ <0 1 2 4 7>,
+ <1 1 2 4 12>,
+ <2 1 2 4 10>,
+ <5 1 2 4 14>;
+ qcom,cpr-quot-adjust-scaling-factor-max = <650>;
};
&tsens {
diff --git a/arch/arm/boot/dts/msm8974-mdss.dtsi b/arch/arm/boot/dts/msm8974-mdss.dtsi
index 7f63234..0e72446 100644
--- a/arch/arm/boot/dts/msm8974-mdss.dtsi
+++ b/arch/arm/boot/dts/msm8974-mdss.dtsi
@@ -113,7 +113,9 @@
compatible = "qcom,mdss-dsi-ctrl";
label = "MDSS DSI CTRL->0";
cell-index = <0>;
- reg = <0xfd922800 0x600>;
+ reg = <0xfd922800 0x600>,
+ <0xfdf30000 0x108>;
+ reg-names = "dsi_phys", "mmss_misc_phys";
vdd-supply = <&pm8941_l22>;
vddio-supply = <&pm8941_l12>;
vdda-supply = <&pm8941_l2>;
@@ -169,7 +171,9 @@
compatible = "qcom,mdss-dsi-ctrl";
label = "MDSS DSI CTRL->1";
cell-index = <1>;
- reg = <0xfd922e00 0x600>;
+ reg = <0xfd922e00 0x600>,
+ <0xfdf30000 0x108>;
+ reg-names = "dsi_phys", "mmss_misc_phys";
vdd-supply = <&pm8941_l22>;
vddio-supply = <&pm8941_l12>;
vdda-supply = <&pm8941_l2>;
diff --git a/arch/arm/boot/dts/msm8974-regulator.dtsi b/arch/arm/boot/dts/msm8974-regulator.dtsi
index 5cf98d6..45b716a 100644
--- a/arch/arm/boot/dts/msm8974-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8974-regulator.dtsi
@@ -465,6 +465,8 @@
ranges;
qcom,pfm-threshold = <76>;
qcom,use-phase-scaling-factor;
+ qcom,phase-scaling-factor-bits-pos = <16>;
+ qcom,valid-scaling-factor-versions = <0 1 0 0>;
krait0_vreg: regulator@f9088000 {
compatible = "qcom,krait-regulator";
diff --git a/arch/arm/boot/dts/msm8974-v1-pm.dtsi b/arch/arm/boot/dts/msm8974-v1-pm.dtsi
index 886177d..516d068 100644
--- a/arch/arm/boot/dts/msm8974-v1-pm.dtsi
+++ b/arch/arm/boot/dts/msm8974-v1-pm.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -130,7 +130,7 @@
qcom,L2-spm-is-apcs-master;
};
- qcom,lpm-levels {
+ lpm_levels: qcom,lpm-levels {
compatible = "qcom,lpm-levels";
qcom,default-l2-state = "l2_cache_retention";
#address-cells = <1>;
@@ -311,6 +311,7 @@
qcom,pc-mode = "tz_l2_int";
qcom,use-sync-timer;
qcom,cpus-as-clocks;
+ qcom,lpm-levels = <&lpm_levels>;
};
qcom,cpu-sleep-status@f9088008 {
diff --git a/arch/arm/boot/dts/msm8974-v2-pm.dtsi b/arch/arm/boot/dts/msm8974-v2-pm.dtsi
index 84a8c2d..cde5e5a9 100644
--- a/arch/arm/boot/dts/msm8974-v2-pm.dtsi
+++ b/arch/arm/boot/dts/msm8974-v2-pm.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -126,7 +126,7 @@
qcom,L2-spm-is-apcs-master;
};
- qcom,lpm-levels {
+ lpm_levels: qcom,lpm-levels {
compatible = "qcom,lpm-levels";
qcom,default-l2-state = "l2_cache_retention";
#address-cells = <1>;
@@ -324,6 +324,7 @@
qcom,pc-mode = "tz_l2_int";
qcom,use-sync-timer;
qcom,cpus-as-clocks;
+ qcom,lpm-levels = <&lpm_levels>;
qcom,pm-snoc-client {
compatible = "qcom,pm-snoc-client";
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index 23ddc8c..da5474d 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -1520,7 +1520,9 @@
compatible = "qcom,cpubw";
qcom,cpu-mem-ports = <1 512>, <2 512>;
qcom,bw-tbl =
+ < 381 /* 50 MHz */ >,
< 572 /* 75 MHz */ >,
+ < 762 /* 100 MHz */ >,
< 1144 /* 150 MHz */ >,
< 1525 /* 200 MHz */ >,
< 2342 /* 307 MHz */ >,
diff --git a/arch/arm/boot/dts/msm8974pro-pm.dtsi b/arch/arm/boot/dts/msm8974pro-pm.dtsi
index aca8f20..0307e2a 100644
--- a/arch/arm/boot/dts/msm8974pro-pm.dtsi
+++ b/arch/arm/boot/dts/msm8974pro-pm.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -126,7 +126,7 @@
qcom,L2-spm-is-apcs-master;
};
- qcom,lpm-levels {
+ lpm_levels: qcom,lpm-levels {
compatible = "qcom,lpm-levels";
qcom,allow-synced-levels;
qcom,default-l2-state = "l2_cache_retention";
@@ -242,6 +242,7 @@
<0xff 109>, /* ocmem_dm_nonsec_irq */
<0xff 126>, /* bam_irq[0] */
<0xff 140>, /* uart_dm_intr */
+ <0xff 146>, /* uart_dm_intr: blsp2_uart_2_irq */
<0xff 155>, /* sdcc_irq[0] */
<0xff 157>, /* sdcc_irq[0] */
<0xff 159>, /* sdcc_irq[0] */
@@ -333,6 +334,7 @@
reg = <0xfe805664 0x40>;
qcom,pc-mode = "tz_l2_int";
qcom,cpus-as-clocks;
+ qcom,lpm-levels = <&lpm_levels>;
qcom,pm-snoc-client {
compatible = "qcom,pm-snoc-client";
diff --git a/arch/arm/boot/dts/msm8974pro-pm8941.dtsi b/arch/arm/boot/dts/msm8974pro-pm8941.dtsi
index decd444..9c2be1a 100644
--- a/arch/arm/boot/dts/msm8974pro-pm8941.dtsi
+++ b/arch/arm/boot/dts/msm8974pro-pm8941.dtsi
@@ -34,6 +34,8 @@
&krait_pdn {
qcom,use-phase-switching;
+ qcom,valid-scaling-factor-versions = <0 1 1 0>;
+
};
&krait0_vreg {
diff --git a/arch/arm/boot/dts/msm8974pro-pma8084-regulator.dtsi b/arch/arm/boot/dts/msm8974pro-pma8084-regulator.dtsi
index 78e2167..49bf4ea 100644
--- a/arch/arm/boot/dts/msm8974pro-pma8084-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8974pro-pma8084-regulator.dtsi
@@ -481,6 +481,8 @@
ranges;
qcom,pfm-threshold = <76>;
qcom,use-phase-scaling-factor;
+ qcom,phase-scaling-factor-bits-pos = <16>;
+ qcom,valid-scaling-factor-versions = <0 1 1 0>;
krait0_vreg: regulator@f9088000 {
compatible = "qcom,krait-regulator";
diff --git a/arch/arm/boot/dts/msm8974pro.dtsi b/arch/arm/boot/dts/msm8974pro.dtsi
index a72ebb2..ae0547f 100644
--- a/arch/arm/boot/dts/msm8974pro.dtsi
+++ b/arch/arm/boot/dts/msm8974pro.dtsi
@@ -1578,13 +1578,13 @@
/* Off */
<26 512 0 0>, <89 604 0 0>,
/* Sub-SVS / SVS */
- <26 512 0 1600000>, <89 604 0 3200000>,
+ <26 512 1200000 2456000>, <89 604 0 3200000>,
/* SVS */
- <26 512 0 2456000>, <89 604 0 3200000>,
+ <26 512 1200000 2456000>, <89 604 0 3200000>,
/* low Nominal / SVS */
<26 512 0 3680000>, <89 604 0 3200000>,
/* SVS / low Nominal */
- <26 512 0 2456000>, <89 604 0 5280000>,
+ <26 512 1200000 2456000>, <89 604 0 5280000>,
/* low Nominal / low Nominal */
<26 512 0 3680000>, <89 604 0 5280000>,
/* Nominal / low Nominal */
@@ -1657,6 +1657,7 @@
};
&mdss_mdp {
+ qcom,max-bandwidth-low-kbps = <2750000>;
qcom,vbif-settings = <0x0004 0x00000001>;
qcom,mdss-wb-off = <0x00011100 0x00011500
diff --git a/arch/arm/boot/dts/msm9625-pm.dtsi b/arch/arm/boot/dts/msm9625-pm.dtsi
index 1e6cdf2..ec62cd4 100644
--- a/arch/arm/boot/dts/msm9625-pm.dtsi
+++ b/arch/arm/boot/dts/msm9625-pm.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -28,7 +28,7 @@
3e 0f];
};
- qcom,lpm-levels {
+ lpm_levels: qcom,lpm-levels {
compatible = "qcom,lpm-levels";
qcom,no-l2-saw;
#address-cells = <1>;
@@ -167,6 +167,7 @@
reg = <0xfe805664 0x40>;
qcom,pc-mode = "tz_l2_ext";
qcom,use-sync-timer;
+ qcom,lpm-levels = <&lpm_levels>;
};
qcom,rpm-log@fc19dc00 {
diff --git a/arch/arm/configs/msm8610-perf_defconfig b/arch/arm/configs/msm8610-perf_defconfig
index c7abf42..ede654d 100644
--- a/arch/arm/configs/msm8610-perf_defconfig
+++ b/arch/arm/configs/msm8610-perf_defconfig
@@ -212,6 +212,13 @@
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_QSEECOM=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
@@ -335,6 +342,12 @@
CONFIG_HID_MAGICMOUSE=y
CONFIG_HID_MICROSOFT=y
CONFIG_HID_ELECOM=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_SUSPEND=y
+CONFIG_USB_ICE40_HCD=m
+CONFIG_USB_CCID_BRIDGE=y
+CONFIG_USB_STORAGE=y
CONFIG_USB_GADGET=y
CONFIG_USB_GADGET_DEBUG_FILES=y
CONFIG_USB_GADGET_DEBUG_FS=y
@@ -412,6 +425,8 @@
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_ARC4=y
CONFIG_CRYPTO_TWOFISH=y
+CONFIG_MOBICORE_SUPPORT=m
+CONFIG_MOBICORE_API=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC_CCITT=y
CONFIG_PPP=y
diff --git a/arch/arm/configs/msm8610_defconfig b/arch/arm/configs/msm8610_defconfig
index fe84f96..4f60013 100644
--- a/arch/arm/configs/msm8610_defconfig
+++ b/arch/arm/configs/msm8610_defconfig
@@ -213,6 +213,13 @@
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_QSEECOM=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
@@ -357,6 +364,12 @@
CONFIG_HID_MAGICMOUSE=y
CONFIG_HID_MICROSOFT=y
CONFIG_HID_ELECOM=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_SUSPEND=y
+CONFIG_USB_ICE40_HCD=m
+CONFIG_USB_CCID_BRIDGE=y
+CONFIG_USB_STORAGE=y
CONFIG_USB_GADGET=y
CONFIG_USB_GADGET_DEBUG_FILES=y
CONFIG_USB_GADGET_DEBUG_FS=y
@@ -485,3 +498,5 @@
CONFIG_SENSORS_MMA8X5X=y
CONFIG_SENSORS_CAPELLA_CM36283=y
CONFIG_MSM_RDBG=m
+CONFIG_MOBICORE_SUPPORT=m
+CONFIG_MOBICORE_API=m
diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h
index 71f4827..0827df7 100644
--- a/arch/arm/include/asm/system_misc.h
+++ b/arch/arm/include/asm/system_misc.h
@@ -24,6 +24,7 @@
extern void disable_hlt(void);
extern void enable_hlt(void);
extern int get_hlt(void);
+extern char* (*arch_read_hardware_id)(void);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 7814288..1a09188 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -893,6 +893,8 @@
hcpu, 1);
break;
case CPU_STARTING:
+ if (cpu_pmu && cpu_pmu->reset)
+ cpu_pmu->reset(NULL);
if (cpu_pmu && cpu_pmu->restore_pm_registers)
smp_call_function_single(cpu,
cpu_pmu->restore_pm_registers,
@@ -927,9 +929,8 @@
enable_irq_callback(&irq);
}
- if (cpu_pmu && cpu_pmu->reset) {
+ if (cpu_pmu) {
__get_cpu_var(from_idle) = 1;
- cpu_pmu->reset(NULL);
pmu = &cpu_pmu->pmu;
pmu->pmu_enable(pmu);
return NOTIFY_OK;
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 7298f9a..c110f0f 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -110,6 +110,9 @@
unsigned int cold_boot;
EXPORT_SYMBOL(cold_boot);
+char* (*arch_read_hardware_id)(void);
+EXPORT_SYMBOL(arch_read_hardware_id);
+
#ifdef MULTI_CPU
struct processor processor __read_mostly;
#endif
@@ -1108,7 +1111,10 @@
seq_puts(m, "\n");
- seq_printf(m, "Hardware\t: %s\n", machine_name);
+ if (!arch_read_hardware_id)
+ seq_printf(m, "Hardware\t: %s\n", machine_name);
+ else
+ seq_printf(m, "Hardware\t: %s\n", arch_read_hardware_id());
seq_printf(m, "Revision\t: %04x\n", system_rev);
seq_printf(m, "Serial\t\t: %08x%08x\n",
system_serial_high, system_serial_low);
diff --git a/arch/arm/mach-msm/board-8226-gpiomux.c b/arch/arm/mach-msm/board-8226-gpiomux.c
index 5882ebc..08566bb 100644
--- a/arch/arm/mach-msm/board-8226-gpiomux.c
+++ b/arch/arm/mach-msm/board-8226-gpiomux.c
@@ -11,6 +11,7 @@
*
*/
+#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <mach/board.h>
@@ -18,6 +19,12 @@
#include <mach/gpiomux.h>
#include <mach/socinfo.h>
+#define WLAN_CLK 44
+#define WLAN_SET 43
+#define WLAN_DATA0 42
+#define WLAN_DATA1 41
+#define WLAN_DATA2 40
+
#ifdef CONFIG_USB_EHCI_MSM_HSIC
static struct gpiomux_setting hsic_sus_cfg = {
.func = GPIOMUX_FUNC_GPIO,
@@ -50,6 +57,42 @@
};
#endif
+static struct gpiomux_setting smsc_hub_act_cfg = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_8MA,
+ .pull = GPIOMUX_PULL_NONE,
+};
+
+static struct gpiomux_setting smsc_hub_susp_cfg = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_NONE,
+};
+
+static struct msm_gpiomux_config smsc_hub_configs[] = {
+ {
+ .gpio = 114, /* reset_n */
+ .settings = {
+ [GPIOMUX_ACTIVE] = &smsc_hub_act_cfg,
+ [GPIOMUX_SUSPENDED] = &smsc_hub_susp_cfg,
+ },
+ },
+ {
+ .gpio = 8, /* clk_en */
+ .settings = {
+ [GPIOMUX_ACTIVE] = &smsc_hub_act_cfg,
+ [GPIOMUX_SUSPENDED] = &smsc_hub_susp_cfg,
+ },
+ },
+ {
+ .gpio = 9, /* int_n */
+ .settings = {
+ [GPIOMUX_ACTIVE] = &smsc_hub_act_cfg,
+ [GPIOMUX_SUSPENDED] = &smsc_hub_susp_cfg,
+ },
+ },
+};
+
#define KS8851_IRQ_GPIO 115
#if defined(CONFIG_KS8851) || defined(CONFIG_KS8851_MODULE)
@@ -140,6 +183,18 @@
.pull = GPIOMUX_PULL_DOWN,
};
+static struct gpiomux_setting wcnss_5gpio_suspend_cfg = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_UP,
+};
+
+static struct gpiomux_setting wcnss_5gpio_active_cfg = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_6MA,
+ .pull = GPIOMUX_PULL_DOWN,
+};
+
static struct gpiomux_setting gpio_i2c_config = {
.func = GPIOMUX_FUNC_3,
.drv = GPIOMUX_DRV_2MA,
@@ -542,6 +597,44 @@
},
};
+static struct msm_gpiomux_config wcnss_5gpio_interface[] = {
+ {
+ .gpio = 40,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &wcnss_5gpio_active_cfg,
+ [GPIOMUX_SUSPENDED] = &wcnss_5gpio_suspend_cfg,
+ },
+ },
+ {
+ .gpio = 41,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &wcnss_5gpio_active_cfg,
+ [GPIOMUX_SUSPENDED] = &wcnss_5gpio_suspend_cfg,
+ },
+ },
+ {
+ .gpio = 42,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &wcnss_5gpio_active_cfg,
+ [GPIOMUX_SUSPENDED] = &wcnss_5gpio_suspend_cfg,
+ },
+ },
+ {
+ .gpio = 43,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &wcnss_5gpio_active_cfg,
+ [GPIOMUX_SUSPENDED] = &wcnss_5gpio_suspend_cfg,
+ },
+ },
+ {
+ .gpio = 44,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &wcnss_5gpio_active_cfg,
+ [GPIOMUX_SUSPENDED] = &wcnss_5gpio_suspend_cfg,
+ },
+ },
+};
+
static struct gpiomux_setting gpio_suspend_config[] = {
{
.func = GPIOMUX_FUNC_GPIO, /* IN-NP */
@@ -883,4 +976,113 @@
}
msm_gpiomux_install(msm_hsic_configs, ARRAY_SIZE(msm_hsic_configs));
#endif
+ if (machine_is_msm8926() && of_board_is_mtp())
+ msm_gpiomux_install(smsc_hub_configs,
+ ARRAY_SIZE(smsc_hub_configs));
+}
+
+static void wcnss_switch_to_gpio(void)
+{
+ /* Switch MUX to GPIO */
+ msm_gpiomux_install(wcnss_5gpio_interface,
+ ARRAY_SIZE(wcnss_5gpio_interface));
+
+ /* Ensure GPIO config */
+ gpio_direction_input(WLAN_DATA2);
+ gpio_direction_input(WLAN_DATA1);
+ gpio_direction_input(WLAN_DATA0);
+ gpio_direction_output(WLAN_SET, 0);
+ gpio_direction_output(WLAN_CLK, 0);
+}
+
+static void wcnss_switch_to_5wire(void)
+{
+ msm_gpiomux_install(wcnss_5wire_interface,
+ ARRAY_SIZE(wcnss_5wire_interface));
+}
+
+u32 wcnss_rf_read_reg(u32 rf_reg_addr)
+{
+ int count = 0;
+ u32 rf_cmd_and_addr = 0;
+ u32 rf_data_received = 0;
+ u32 rf_bit = 0;
+
+ wcnss_switch_to_gpio();
+
+ /* Reset the signal if it is already being used. */
+ gpio_set_value(WLAN_SET, 0);
+ gpio_set_value(WLAN_CLK, 0);
+
+ /* We start with cmd_set high WLAN_SET = 1. */
+ gpio_set_value(WLAN_SET, 1);
+
+ gpio_direction_output(WLAN_DATA0, 1);
+ gpio_direction_output(WLAN_DATA1, 1);
+ gpio_direction_output(WLAN_DATA2, 1);
+
+ gpio_set_value(WLAN_DATA0, 0);
+ gpio_set_value(WLAN_DATA1, 0);
+ gpio_set_value(WLAN_DATA2, 0);
+
+ /* Prepare command and RF register address that need to sent out.
+ * Make sure that we send only 14 bits from LSB.
+ */
+ rf_cmd_and_addr = (((WLAN_RF_READ_REG_CMD) |
+ (rf_reg_addr << WLAN_RF_REG_ADDR_START_OFFSET)) &
+ WLAN_RF_READ_CMD_MASK);
+
+ for (count = 0; count < 5; count++) {
+ gpio_set_value(WLAN_CLK, 0);
+
+ rf_bit = (rf_cmd_and_addr & 0x1);
+ gpio_set_value(WLAN_DATA0, rf_bit ? 1 : 0);
+ rf_cmd_and_addr = (rf_cmd_and_addr >> 1);
+
+ rf_bit = (rf_cmd_and_addr & 0x1);
+ gpio_set_value(WLAN_DATA1, rf_bit ? 1 : 0);
+ rf_cmd_and_addr = (rf_cmd_and_addr >> 1);
+
+ rf_bit = (rf_cmd_and_addr & 0x1);
+ gpio_set_value(WLAN_DATA2, rf_bit ? 1 : 0);
+ rf_cmd_and_addr = (rf_cmd_and_addr >> 1);
+
+ /* Send the data out WLAN_CLK = 1 */
+ gpio_set_value(WLAN_CLK, 1);
+ }
+
+ /* Pull down the clock signal */
+ gpio_set_value(WLAN_CLK, 0);
+
+ /* Configure data pins to input IO pins */
+ gpio_direction_input(WLAN_DATA0);
+ gpio_direction_input(WLAN_DATA1);
+ gpio_direction_input(WLAN_DATA2);
+
+ for (count = 0; count < 2; count++) {
+ gpio_set_value(WLAN_CLK, 1);
+ gpio_set_value(WLAN_CLK, 0);
+ }
+
+ rf_bit = 0;
+ for (count = 0; count < 6; count++) {
+ gpio_set_value(WLAN_CLK, 1);
+ gpio_set_value(WLAN_CLK, 0);
+
+ rf_bit = gpio_get_value(WLAN_DATA0);
+ rf_data_received |= (rf_bit << (count * 3 + 0));
+
+ if (count != 5) {
+ rf_bit = gpio_get_value(WLAN_DATA1);
+ rf_data_received |= (rf_bit << (count * 3 + 1));
+
+ rf_bit = gpio_get_value(WLAN_DATA2);
+ rf_data_received |= (rf_bit << (count * 3 + 2));
+ }
+ }
+
+ gpio_set_value(WLAN_SET, 0);
+ wcnss_switch_to_5wire();
+
+ return rf_data_received;
}
diff --git a/arch/arm/mach-msm/board-8226.c b/arch/arm/mach-msm/board-8226.c
index 1c1fbe3..43646cd 100644
--- a/arch/arm/mach-msm/board-8226.c
+++ b/arch/arm/mach-msm/board-8226.c
@@ -167,7 +167,7 @@
NULL
};
-DT_MACHINE_START(MSM8226_DT, "Qualcomm MSM 8226 (Flattened Device Tree)")
+DT_MACHINE_START(MSM8226_DT, "Qualcomm MSM 8x26 / MSM 8x28 (Flattened Device Tree)")
.map_io = msm_map_msm8226_io,
.init_irq = msm_dt_init_irq,
.init_machine = msm8226_init,
diff --git a/arch/arm/mach-msm/board-8610-gpiomux.c b/arch/arm/mach-msm/board-8610-gpiomux.c
index 2e12fc2..c91deb2 100644
--- a/arch/arm/mach-msm/board-8610-gpiomux.c
+++ b/arch/arm/mach-msm/board-8610-gpiomux.c
@@ -11,6 +11,7 @@
*
*/
+#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <mach/board.h>
@@ -18,6 +19,12 @@
#include <mach/gpiomux.h>
#include <mach/socinfo.h>
+#define WLAN_CLK 27
+#define WLAN_SET 26
+#define WLAN_DATA0 25
+#define WLAN_DATA1 24
+#define WLAN_DATA2 23
+
static struct gpiomux_setting gpio_spi_config = {
.func = GPIOMUX_FUNC_1,
.drv = GPIOMUX_DRV_6MA,
@@ -112,6 +119,18 @@
.pull = GPIOMUX_PULL_DOWN,
};
+static struct gpiomux_setting wcnss_5gpio_suspend_cfg = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_UP,
+};
+
+static struct gpiomux_setting wcnss_5gpio_active_cfg = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_6MA,
+ .pull = GPIOMUX_PULL_DOWN,
+};
+
static struct gpiomux_setting lcd_en_act_cfg = {
.func = GPIOMUX_FUNC_GPIO,
.drv = GPIOMUX_DRV_8MA,
@@ -405,6 +424,44 @@
},
};
+static struct msm_gpiomux_config wcnss_5gpio_interface[] = {
+ {
+ .gpio = 23,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &wcnss_5gpio_active_cfg,
+ [GPIOMUX_SUSPENDED] = &wcnss_5gpio_suspend_cfg,
+ },
+ },
+ {
+ .gpio = 24,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &wcnss_5gpio_active_cfg,
+ [GPIOMUX_SUSPENDED] = &wcnss_5gpio_suspend_cfg,
+ },
+ },
+ {
+ .gpio = 25,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &wcnss_5gpio_active_cfg,
+ [GPIOMUX_SUSPENDED] = &wcnss_5gpio_suspend_cfg,
+ },
+ },
+ {
+ .gpio = 26,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &wcnss_5gpio_active_cfg,
+ [GPIOMUX_SUSPENDED] = &wcnss_5gpio_suspend_cfg,
+ },
+ },
+ {
+ .gpio = 27,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &wcnss_5gpio_active_cfg,
+ [GPIOMUX_SUSPENDED] = &wcnss_5gpio_suspend_cfg,
+ },
+ },
+};
+
static struct gpiomux_setting gpio_suspend_config[] = {
{
.func = GPIOMUX_FUNC_GPIO, /* IN-NP */
@@ -673,6 +730,61 @@
},
};
+static struct gpiomux_setting ice40_spi_cs_act_config = {
+ .func = GPIOMUX_FUNC_1,
+ .drv = GPIOMUX_DRV_6MA,
+ .pull = GPIOMUX_PULL_NONE,
+};
+
+static struct gpiomux_setting ice40_spi_cs_susp_config = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_6MA,
+ .pull = GPIOMUX_PULL_UP,
+};
+
+static struct gpiomux_setting ice40_act_config = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_8MA,
+ .pull = GPIOMUX_PULL_NONE,
+};
+
+static struct gpiomux_setting ice40_susp_config = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_NONE,
+};
+
+static struct msm_gpiomux_config ice40_spi_usb_configs[] __initdata = {
+ {
+ .gpio = 85,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &ice40_spi_cs_act_config,
+ [GPIOMUX_SUSPENDED] = &ice40_spi_cs_susp_config,
+ },
+ },
+ {
+ .gpio = 94,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &ice40_act_config,
+ [GPIOMUX_SUSPENDED] = &ice40_susp_config,
+ },
+ },
+ {
+ .gpio = 95,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &ice40_act_config,
+ [GPIOMUX_SUSPENDED] = &ice40_susp_config,
+ },
+ },
+ {
+ .gpio = 96,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &ice40_act_config,
+ [GPIOMUX_SUSPENDED] = &ice40_susp_config,
+ },
+ },
+};
+
void __init msm8610_init_gpiomux(void)
{
int rc;
@@ -713,4 +825,114 @@
if (of_board_is_cdp())
msm_gpiomux_install(msm_cdc_dmic_configs,
ARRAY_SIZE(msm_cdc_dmic_configs));
+
+ if (of_board_is_cdp())
+ msm_gpiomux_install(ice40_spi_usb_configs,
+ ARRAY_SIZE(ice40_spi_usb_configs));
+}
+
+static void wcnss_switch_to_gpio(void)
+{
+ /* Switch MUX to GPIO */
+ msm_gpiomux_install(wcnss_5gpio_interface,
+ ARRAY_SIZE(wcnss_5gpio_interface));
+
+ /* Ensure GPIO config */
+ gpio_direction_input(WLAN_DATA2);
+ gpio_direction_input(WLAN_DATA1);
+ gpio_direction_input(WLAN_DATA0);
+ gpio_direction_output(WLAN_SET, 0);
+ gpio_direction_output(WLAN_CLK, 0);
+}
+
+static void wcnss_switch_to_5wire(void)
+{
+ msm_gpiomux_install(wcnss_5wire_interface,
+ ARRAY_SIZE(wcnss_5wire_interface));
+}
+
+u32 wcnss_rf_read_reg(u32 rf_reg_addr)
+{
+ int count = 0;
+ u32 rf_cmd_and_addr = 0;
+ u32 rf_data_received = 0;
+ u32 rf_bit = 0;
+
+ wcnss_switch_to_gpio();
+
+ /* Reset the signal if it is already being used. */
+ gpio_set_value(WLAN_SET, 0);
+ gpio_set_value(WLAN_CLK, 0);
+
+ /* We start with cmd_set high WLAN_SET = 1. */
+ gpio_set_value(WLAN_SET, 1);
+
+ gpio_direction_output(WLAN_DATA0, 1);
+ gpio_direction_output(WLAN_DATA1, 1);
+ gpio_direction_output(WLAN_DATA2, 1);
+
+ gpio_set_value(WLAN_DATA0, 0);
+ gpio_set_value(WLAN_DATA1, 0);
+ gpio_set_value(WLAN_DATA2, 0);
+
+ /* Prepare command and RF register address that need to sent out.
+ * Make sure that we send only 14 bits from LSB.
+ */
+ rf_cmd_and_addr = (((WLAN_RF_READ_REG_CMD) |
+ (rf_reg_addr << WLAN_RF_REG_ADDR_START_OFFSET)) &
+ WLAN_RF_READ_CMD_MASK);
+
+ for (count = 0; count < 5; count++) {
+ gpio_set_value(WLAN_CLK, 0);
+
+ rf_bit = (rf_cmd_and_addr & 0x1);
+ gpio_set_value(WLAN_DATA0, rf_bit ? 1 : 0);
+ rf_cmd_and_addr = (rf_cmd_and_addr >> 1);
+
+ rf_bit = (rf_cmd_and_addr & 0x1);
+ gpio_set_value(WLAN_DATA1, rf_bit ? 1 : 0);
+ rf_cmd_and_addr = (rf_cmd_and_addr >> 1);
+
+ rf_bit = (rf_cmd_and_addr & 0x1);
+ gpio_set_value(WLAN_DATA2, rf_bit ? 1 : 0);
+ rf_cmd_and_addr = (rf_cmd_and_addr >> 1);
+
+ /* Send the data out WLAN_CLK = 1 */
+ gpio_set_value(WLAN_CLK, 1);
+ }
+
+ /* Pull down the clock signal */
+ gpio_set_value(WLAN_CLK, 0);
+
+ /* Configure data pins to input IO pins */
+ gpio_direction_input(WLAN_DATA0);
+ gpio_direction_input(WLAN_DATA1);
+ gpio_direction_input(WLAN_DATA2);
+
+ for (count = 0; count < 2; count++) {
+ gpio_set_value(WLAN_CLK, 1);
+ gpio_set_value(WLAN_CLK, 0);
+ }
+
+ rf_bit = 0;
+ for (count = 0; count < 6; count++) {
+ gpio_set_value(WLAN_CLK, 1);
+ gpio_set_value(WLAN_CLK, 0);
+
+ rf_bit = gpio_get_value(WLAN_DATA0);
+ rf_data_received |= (rf_bit << (count * 3 + 0));
+
+ if (count != 5) {
+ rf_bit = gpio_get_value(WLAN_DATA1);
+ rf_data_received |= (rf_bit << (count * 3 + 1));
+
+ rf_bit = gpio_get_value(WLAN_DATA2);
+ rf_data_received |= (rf_bit << (count * 3 + 2));
+ }
+ }
+
+ gpio_set_value(WLAN_SET, 0);
+ wcnss_switch_to_5wire();
+
+ return rf_data_received;
}
diff --git a/arch/arm/mach-msm/board-8610.c b/arch/arm/mach-msm/board-8610.c
index d175bb4..cd9b82e 100644
--- a/arch/arm/mach-msm/board-8610.c
+++ b/arch/arm/mach-msm/board-8610.c
@@ -136,7 +136,7 @@
NULL
};
-DT_MACHINE_START(MSM8610_DT, "Qualcomm MSM 8610 (Flattened Device Tree)")
+DT_MACHINE_START(MSM8610_DT, "Qualcomm MSM 8x10 / MSM 8x12 (Flattened Device Tree)")
.map_io = msm_map_msm8610_io,
.init_irq = msm_dt_init_irq,
.init_machine = msm8610_init,
diff --git a/arch/arm/mach-msm/board-8974-gpiomux.c b/arch/arm/mach-msm/board-8974-gpiomux.c
index cec1a8f..5d4d379 100755
--- a/arch/arm/mach-msm/board-8974-gpiomux.c
+++ b/arch/arm/mach-msm/board-8974-gpiomux.c
@@ -11,6 +11,7 @@
*
*/
+#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <mach/board.h>
@@ -20,6 +21,12 @@
#define KS8851_IRQ_GPIO 94
+#define WLAN_CLK 40
+#define WLAN_SET 39
+#define WLAN_DATA0 38
+#define WLAN_DATA1 37
+#define WLAN_DATA2 36
+
static struct gpiomux_setting ap2mdm_cfg = {
.func = GPIOMUX_FUNC_GPIO,
.drv = GPIOMUX_DRV_2MA,
@@ -208,6 +215,18 @@
.pull = GPIOMUX_PULL_DOWN,
};
+static struct gpiomux_setting wcnss_5gpio_suspend_cfg = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_UP,
+};
+
+static struct gpiomux_setting wcnss_5gpio_active_cfg = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_6MA,
+ .pull = GPIOMUX_PULL_DOWN,
+};
+
static struct gpiomux_setting ath_gpio_active_cfg = {
.func = GPIOMUX_FUNC_GPIO,
.drv = GPIOMUX_DRV_2MA,
@@ -1158,6 +1177,43 @@
},
};
+static struct msm_gpiomux_config wcnss_5gpio_interface[] = {
+ {
+ .gpio = 36,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &wcnss_5gpio_active_cfg,
+ [GPIOMUX_SUSPENDED] = &wcnss_5gpio_suspend_cfg,
+ },
+ },
+ {
+ .gpio = 37,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &wcnss_5gpio_active_cfg,
+ [GPIOMUX_SUSPENDED] = &wcnss_5gpio_suspend_cfg,
+ },
+ },
+ {
+ .gpio = 38,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &wcnss_5gpio_active_cfg,
+ [GPIOMUX_SUSPENDED] = &wcnss_5gpio_suspend_cfg,
+ },
+ },
+ {
+ .gpio = 39,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &wcnss_5gpio_active_cfg,
+ [GPIOMUX_SUSPENDED] = &wcnss_5gpio_suspend_cfg,
+ },
+ },
+ {
+ .gpio = 40,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &wcnss_5gpio_active_cfg,
+ [GPIOMUX_SUSPENDED] = &wcnss_5gpio_suspend_cfg,
+ },
+ },
+};
static struct msm_gpiomux_config ath_gpio_configs[] = {
{
@@ -1467,3 +1523,109 @@
msm_gpiomux_install(apq8074_dragonboard_ts_config,
ARRAY_SIZE(apq8074_dragonboard_ts_config));
}
+
+static void wcnss_switch_to_gpio(void)
+{
+ /* Switch MUX to GPIO */
+ msm_gpiomux_install(wcnss_5gpio_interface,
+ ARRAY_SIZE(wcnss_5gpio_interface));
+
+ /* Ensure GPIO config */
+ gpio_direction_input(WLAN_DATA2);
+ gpio_direction_input(WLAN_DATA1);
+ gpio_direction_input(WLAN_DATA0);
+ gpio_direction_output(WLAN_SET, 0);
+ gpio_direction_output(WLAN_CLK, 0);
+}
+
+static void wcnss_switch_to_5wire(void)
+{
+ msm_gpiomux_install(wcnss_5wire_interface,
+ ARRAY_SIZE(wcnss_5wire_interface));
+}
+
+u32 wcnss_rf_read_reg(u32 rf_reg_addr)
+{
+ int count = 0;
+ u32 rf_cmd_and_addr = 0;
+ u32 rf_data_received = 0;
+ u32 rf_bit = 0;
+
+ wcnss_switch_to_gpio();
+
+ /* Reset the signal if it is already being used. */
+ gpio_set_value(WLAN_SET, 0);
+ gpio_set_value(WLAN_CLK, 0);
+
+ /* We start with cmd_set high WLAN_SET = 1. */
+ gpio_set_value(WLAN_SET, 1);
+
+ gpio_direction_output(WLAN_DATA0, 1);
+ gpio_direction_output(WLAN_DATA1, 1);
+ gpio_direction_output(WLAN_DATA2, 1);
+
+ gpio_set_value(WLAN_DATA0, 0);
+ gpio_set_value(WLAN_DATA1, 0);
+ gpio_set_value(WLAN_DATA2, 0);
+
+ /* Prepare command and RF register address that need to sent out.
+ * Make sure that we send only 14 bits from LSB.
+ */
+ rf_cmd_and_addr = (((WLAN_RF_READ_REG_CMD) |
+ (rf_reg_addr << WLAN_RF_REG_ADDR_START_OFFSET)) &
+ WLAN_RF_READ_CMD_MASK);
+
+ for (count = 0; count < 5; count++) {
+ gpio_set_value(WLAN_CLK, 0);
+
+ rf_bit = (rf_cmd_and_addr & 0x1);
+ gpio_set_value(WLAN_DATA0, rf_bit ? 1 : 0);
+ rf_cmd_and_addr = (rf_cmd_and_addr >> 1);
+
+ rf_bit = (rf_cmd_and_addr & 0x1);
+ gpio_set_value(WLAN_DATA1, rf_bit ? 1 : 0);
+ rf_cmd_and_addr = (rf_cmd_and_addr >> 1);
+
+ rf_bit = (rf_cmd_and_addr & 0x1);
+ gpio_set_value(WLAN_DATA2, rf_bit ? 1 : 0);
+ rf_cmd_and_addr = (rf_cmd_and_addr >> 1);
+
+ /* Send the data out WLAN_CLK = 1 */
+ gpio_set_value(WLAN_CLK, 1);
+ }
+
+ /* Pull down the clock signal */
+ gpio_set_value(WLAN_CLK, 0);
+
+ /* Configure data pins to input IO pins */
+ gpio_direction_input(WLAN_DATA0);
+ gpio_direction_input(WLAN_DATA1);
+ gpio_direction_input(WLAN_DATA2);
+
+ for (count = 0; count < 2; count++) {
+ gpio_set_value(WLAN_CLK, 1);
+ gpio_set_value(WLAN_CLK, 0);
+ }
+
+ rf_bit = 0;
+ for (count = 0; count < 6; count++) {
+ gpio_set_value(WLAN_CLK, 1);
+ gpio_set_value(WLAN_CLK, 0);
+
+ rf_bit = gpio_get_value(WLAN_DATA0);
+ rf_data_received |= (rf_bit << (count * 3 + 0));
+
+ if (count != 5) {
+ rf_bit = gpio_get_value(WLAN_DATA1);
+ rf_data_received |= (rf_bit << (count * 3 + 1));
+
+ rf_bit = gpio_get_value(WLAN_DATA2);
+ rf_data_received |= (rf_bit << (count * 3 + 2));
+ }
+ }
+
+ gpio_set_value(WLAN_SET, 0);
+ wcnss_switch_to_5wire();
+
+ return rf_data_received;
+}
diff --git a/arch/arm/mach-msm/clock-8226.c b/arch/arm/mach-msm/clock-8226.c
index 798a33d..d1f8666 100644
--- a/arch/arm/mach-msm/clock-8226.c
+++ b/arch/arm/mach-msm/clock-8226.c
@@ -2583,17 +2583,6 @@
},
};
-static struct branch_clk mmss_mmssnoc_bto_ahb_clk = {
- .cbcr_reg = MMSS_MMSSNOC_BTO_AHB_CBCR,
- .has_sibling = 1,
- .base = &virt_bases[MMSS_BASE],
- .c = {
- .dbg_name = "mmss_mmssnoc_bto_ahb_clk",
- .ops = &clk_ops_branch,
- CLK_INIT(mmss_mmssnoc_bto_ahb_clk.c),
- },
-};
-
static struct branch_clk mmss_mmssnoc_axi_clk = {
.cbcr_reg = MMSS_MMSSNOC_AXI_CBCR,
.has_sibling = 1,
@@ -2695,7 +2684,6 @@
#ifdef CONFIG_DEBUG_FS
static struct measure_mux_entry measure_mux_MMSS[] = {
- { &mmss_mmssnoc_bto_ahb_clk.c, MMSS_BASE, 0x0002 },
{ &mmss_misc_ahb_clk.c, MMSS_BASE, 0x0003 },
{ &mmss_mmssnoc_axi_clk.c, MMSS_BASE, 0x0004 },
{ &mmss_s0_axi_clk.c, MMSS_BASE, 0x0005 },
@@ -3395,6 +3383,8 @@
CLK_LOOKUP("iface_clk", mdss_ahb_clk.c, "fd922800.qcom,mdss_dsi"),
CLK_LOOKUP("bus_clk", mdss_axi_clk.c, "fd922800.qcom,mdss_dsi"),
CLK_LOOKUP("mdp_core_clk", mdss_mdp_clk.c, "fd922800.qcom,mdss_dsi"),
+ CLK_LOOKUP("core_mmss_clk", mmss_misc_ahb_clk.c,
+ "fd922800.qcom,mdss_dsi"),
CLK_LOOKUP("core_clk", mdss_mdp_clk.c, "fd900000.qcom,mdss_mdp"),
CLK_LOOKUP("lut_clk", mdss_mdp_lut_clk.c, "fd900000.qcom,mdss_mdp"),
@@ -3581,7 +3571,6 @@
CLK_LOOKUP("cam_gp1_clk", camss_gp1_clk.c, ""),
CLK_LOOKUP("iface_clk", camss_micro_ahb_clk.c, ""),
- CLK_LOOKUP("", mmss_mmssnoc_bto_ahb_clk.c, ""),
CLK_LOOKUP("", mmss_mmssnoc_axi_clk.c, ""),
CLK_LOOKUP("", mmss_s0_axi_clk.c, ""),
diff --git a/arch/arm/mach-msm/clock-8610.c b/arch/arm/mach-msm/clock-8610.c
index 8bd3bb5..e9c749a 100644
--- a/arch/arm/mach-msm/clock-8610.c
+++ b/arch/arm/mach-msm/clock-8610.c
@@ -2347,17 +2347,6 @@
},
};
-static struct branch_clk mmss_mmssnoc_bto_ahb_clk = {
- .cbcr_reg = MMSS_MMSSNOC_BTO_AHB_CBCR,
- .has_sibling = 1,
- .base = &virt_bases[MMSS_BASE],
- .c = {
- .dbg_name = "mmss_mmssnoc_bto_ahb_clk",
- .ops = &clk_ops_branch,
- CLK_INIT(mmss_mmssnoc_bto_ahb_clk.c),
- },
-};
-
static struct branch_clk oxili_ahb_clk = {
.cbcr_reg = OXILI_AHB_CBCR,
.bcr_reg = OXILI_AHB_BCR,
@@ -2991,7 +2980,6 @@
CLK_LOOKUP("core_clk", mdp_vsync_clk.c, ""),
CLK_LOOKUP("core_clk", mmss_misc_ahb_clk.c, ""),
CLK_LOOKUP("core_clk", mmss_s0_axi_clk.c, ""),
- CLK_LOOKUP("core_clk", mmss_mmssnoc_bto_ahb_clk.c, ""),
CLK_LOOKUP("core_clk", mmss_mmssnoc_axi_clk.c, ""),
CLK_LOOKUP("core_clk", vfe_clk.c, ""),
CLK_LOOKUP("core_clk", vfe_ahb_clk.c, ""),
diff --git a/arch/arm/mach-msm/clock-8974.c b/arch/arm/mach-msm/clock-8974.c
index acfbfc7..1771090 100755
--- a/arch/arm/mach-msm/clock-8974.c
+++ b/arch/arm/mach-msm/clock-8974.c
@@ -2197,6 +2197,7 @@
.en_mask = BIT(5),
.base = &virt_bases[GCC_BASE],
.c = {
+ .parent = &ce1_clk_src.c,
.dbg_name = "gcc_ce1_clk",
.ops = &clk_ops_vote,
CLK_INIT(gcc_ce1_clk.c),
@@ -2233,6 +2234,7 @@
.en_mask = BIT(2),
.base = &virt_bases[GCC_BASE],
.c = {
+ .parent = &ce2_clk_src.c,
.dbg_name = "gcc_ce2_clk",
.ops = &clk_ops_vote,
CLK_INIT(gcc_ce2_clk.c),
@@ -4237,17 +4239,6 @@
},
};
-static struct branch_clk mmss_mmssnoc_bto_ahb_clk = {
- .cbcr_reg = MMSS_MMSSNOC_BTO_AHB_CBCR,
- .has_sibling = 1,
- .base = &virt_bases[MMSS_BASE],
- .c = {
- .dbg_name = "mmss_mmssnoc_bto_ahb_clk",
- .ops = &clk_ops_branch,
- CLK_INIT(mmss_mmssnoc_bto_ahb_clk.c),
- },
-};
-
static struct branch_clk mmss_mmssnoc_axi_clk = {
.cbcr_reg = MMSS_MMSSNOC_AXI_CBCR,
.has_sibling = 1,
@@ -5087,6 +5078,10 @@
CLK_LOOKUP("pixel_clk", mdss_pclk1_clk.c, "fd922e00.qcom,mdss_dsi"),
CLK_LOOKUP("mdp_core_clk", mdss_mdp_clk.c, "fd922800.qcom,mdss_dsi"),
CLK_LOOKUP("mdp_core_clk", mdss_mdp_clk.c, "fd922e00.qcom,mdss_dsi"),
+ CLK_LOOKUP("core_mmss_clk", mmss_misc_ahb_clk.c,
+ "fd922800.qcom,mdss_dsi"),
+ CLK_LOOKUP("core_mmss_clk", mmss_misc_ahb_clk.c,
+ "fd922e00.qcom,mdss_dsi"),
CLK_LOOKUP("iface_clk", mdss_ahb_clk.c, "fd922100.qcom,hdmi_tx"),
CLK_LOOKUP("alt_iface_clk", mdss_hdmi_ahb_clk.c,
"fd922100.qcom,hdmi_tx"),
diff --git a/arch/arm/mach-msm/cpr-regulator.c b/arch/arm/mach-msm/cpr-regulator.c
index b940cb4..d952f82 100644
--- a/arch/arm/mach-msm/cpr-regulator.c
+++ b/arch/arm/mach-msm/cpr-regulator.c
@@ -169,6 +169,8 @@
/* Process voltage variables */
u32 pvs_bin;
u32 speed_bin;
+ u32 pvs_version;
+
/* APC voltage regulator */
struct regulator *vdd_apc;
@@ -1291,14 +1293,48 @@
return rc;
}
-static int cpr_get_of_corner_mappings(struct cpr_regulator *cpr_vreg,
+static void cpr_parse_pvs_version_fuse(struct cpr_regulator *cpr_vreg,
+ struct device_node *of_node)
+{
+ int rc;
+ u64 fuse_bits;
+ u32 fuse_sel[4];
+
+ rc = of_property_read_u32_array(of_node,
+ "qcom,pvs-version-fuse-sel", fuse_sel, 4);
+ if (!rc) {
+ fuse_bits = cpr_read_efuse_row(cpr_vreg,
+ fuse_sel[0], fuse_sel[3]);
+ cpr_vreg->pvs_version = (fuse_bits >> fuse_sel[1]) &
+ ((1 << fuse_sel[2]) - 1);
+ pr_info("[row: %d]: 0x%llx, pvs_version = %d\n",
+ fuse_sel[0], fuse_bits, cpr_vreg->pvs_version);
+ } else {
+ cpr_vreg->pvs_version = UINT_MAX;
+ }
+}
+
+/*
+ * cpr_get_corner_quot_adjustment() -- get the quot_adjust for each corner.
+ *
+ * Get the corner to fuse corner (SVS/NORMAL/TURBO) mappings and corner to
+ * APC clock frequency mappings from device tree.
+ * Calculate the quotient adjustment scaling factor for those corners mapping
+ * to the TURBO fuse corner.
+ * Calculate the quotient adjustment for each corner which map to the TURBO
+ * fuse corner.
+ */
+static int cpr_get_corner_quot_adjustment(struct cpr_regulator *cpr_vreg,
struct device *dev)
{
int rc = 0;
- int i, size, stripe_size;
+ int i, size;
struct property *prop;
- u32 *tmp;
bool corners_mapped;
+ u32 *tmp, *freq_mappings = NULL;
+ u32 scaling, max_factor;
+ u32 corner, turbo_corner = 0, normal_corner = 0, svs_corner = 0;
+ u32 freq_turbo, freq_normal, freq_corner;
prop = of_find_property(dev->of_node, "qcom,cpr-corner-map", NULL);
@@ -1313,81 +1349,182 @@
cpr_vreg->corner_map = devm_kzalloc(dev, sizeof(int) * (size + 1),
GFP_KERNEL);
if (!cpr_vreg->corner_map) {
- pr_err("Can't allocate cpr_vreg->corner_map memory\n");
+ pr_err("Can't allocate memory for cpr_vreg->corner_map\n");
return -ENOMEM;
}
cpr_vreg->num_corners = size;
+ cpr_vreg->quot_adjust = devm_kzalloc(dev,
+ sizeof(u32) * (cpr_vreg->num_corners + 1),
+ GFP_KERNEL);
+ if (!cpr_vreg->quot_adjust) {
+ pr_err("Can't allocate memory for cpr_vreg->quot_adjust\n");
+ return -ENOMEM;
+ }
+
if (!corners_mapped) {
for (i = CPR_FUSE_CORNER_SVS; i < CPR_FUSE_CORNER_MAX; i++)
cpr_vreg->corner_map[i] = i;
+ return 0;
} else {
rc = of_property_read_u32_array(dev->of_node,
"qcom,cpr-corner-map", &cpr_vreg->corner_map[1], size);
if (rc) {
- pr_err("qcom,cpr-corner-map missing, rc = %d", rc);
+ pr_err("qcom,cpr-corner-map missing, rc = %d\n", rc);
return rc;
}
}
- cpr_vreg->quot_adjust = devm_kzalloc(dev,
- sizeof(int) * (cpr_vreg->num_corners + 1),
- GFP_KERNEL);
- if (!cpr_vreg->quot_adjust) {
- pr_err("Can't allocate cpr_vreg->quot_adjust memory\n");
+ prop = of_find_property(dev->of_node,
+ "qcom,cpr-speed-bin-max-corners", NULL);
+ if (!prop) {
+ cpr_debug("qcom,cpr-speed-bin-max-corner missing\n");
+ return 0;
+ }
+
+ size = prop->length / sizeof(u32);
+ tmp = kzalloc(size * sizeof(u32), GFP_KERNEL);
+ if (!tmp) {
+ pr_err("memory alloc failed\n");
return -ENOMEM;
}
-
- prop = of_find_property(dev->of_node, "qcom,cpr-quot-adjust-table",
- NULL);
-
- if (prop) {
- if (!corners_mapped) {
- pr_err("qcom,cpr-corner-map missing\n");
- return -EINVAL;
- }
-
- size = prop->length / sizeof(u32);
- tmp = kzalloc(sizeof(u32) * size, GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
-
- rc = of_property_read_u32_array(dev->of_node,
- "qcom,cpr-quot-adjust-table", tmp, size);
- if (rc) {
- pr_err("qcom,cpr-quot-adjust-table missing, rc = %d",
- rc);
- kfree(tmp);
- return rc;
- }
-
- stripe_size = sizeof(struct quot_adjust_info) / sizeof(int);
-
- if ((size % stripe_size) != 0) {
- pr_err("qcom,cpr-quot-adjust-table data is not correct");
- kfree(tmp);
- return -EINVAL;
- }
-
- for (i = 0; i < size; i += stripe_size) {
- if (tmp[i] == cpr_vreg->speed_bin) {
- if (tmp[i + 1] >= 1 &&
- tmp[i + 1] <=
- cpr_vreg->num_corners) {
- cpr_vreg->quot_adjust[tmp[i + 1]] =
- tmp[i + 2];
- } else {
- pr_err("qcom,cpr-quot-adjust-table data is not correct");
- kfree(tmp);
- return -EINVAL;
- }
- }
- }
-
+ rc = of_property_read_u32_array(dev->of_node,
+ "qcom,cpr-speed-bin-max-corners", tmp, size);
+ if (rc < 0) {
kfree(tmp);
+ pr_err("get cpr-speed-bin-max-corners failed, rc = %d\n", rc);
+ return rc;
}
+ cpr_parse_pvs_version_fuse(cpr_vreg, dev->of_node);
+
+ /*
+ * According to speed_bin && pvs_version, get the maximum
+ * corner corresponding to SVS/NORMAL/TURBO fuse corner.
+ */
+ for (i = 0; i < size; i += 5) {
+ if (tmp[i] == cpr_vreg->speed_bin &&
+ tmp[i + 1] == cpr_vreg->pvs_version) {
+ svs_corner = tmp[i + 2];
+ normal_corner = tmp[i + 3];
+ turbo_corner = tmp[i + 4];
+ break;
+ }
+ }
+ kfree(tmp);
+ /*
+ * Return success if the virtual corner values read from
+ * qcom,cpr-speed-bin-max-corners property are incorrect,
+ * which make sure the driver could continue run without
+ * error.
+ */
+ if (turbo_corner <= normal_corner ||
+ turbo_corner > cpr_vreg->num_corners) {
+ cpr_debug("turbo:%d should be larger than normal:%d\n",
+ turbo_corner, normal_corner);
+ return 0;
+ }
+
+ prop = of_find_property(dev->of_node,
+ "qcom,cpr-corner-frequency-map", NULL);
+ if (!prop) {
+ cpr_debug("qcom,cpr-corner-frequency-map missing\n");
+ return 0;
+ }
+
+ size = prop->length / sizeof(u32);
+ tmp = kzalloc(sizeof(u32) * size, GFP_KERNEL);
+ if (!tmp) {
+ pr_err("memory alloc failed\n");
+ return -ENOMEM;
+ }
+ rc = of_property_read_u32_array(dev->of_node,
+ "qcom,cpr-corner-frequency-map", tmp, size);
+ if (rc < 0) {
+ pr_err("get cpr-corner-frequency-map failed, rc = %d\n", rc);
+ kfree(tmp);
+ return rc;
+ }
+ freq_mappings = kzalloc(sizeof(u32) * (cpr_vreg->num_corners + 1),
+ GFP_KERNEL);
+ if (!freq_mappings) {
+ pr_err("memory alloc for freq_mappings failed!\n");
+ kfree(tmp);
+ return -ENOMEM;
+ }
+ for (i = 0; i < size; i += 2) {
+ corner = tmp[i];
+ if ((corner < 1) || (corner > cpr_vreg->num_corners)) {
+ pr_err("corner should be in 1~%d range: %d\n",
+ cpr_vreg->num_corners, corner);
+ continue;
+ }
+ freq_mappings[corner] = tmp[i + 1];
+ cpr_debug("Frequency at virtual corner %d is %d Hz.\n",
+ corner, freq_mappings[corner]);
+ }
+ kfree(tmp);
+
+ rc = of_property_read_u32(dev->of_node,
+ "qcom,cpr-quot-adjust-scaling-factor-max",
+ &max_factor);
+ if (rc < 0) {
+ cpr_debug("get cpr-quot-adjust-scaling-factor-max failed\n");
+ kfree(freq_mappings);
+ return 0;
+ }
+
+ /*
+ * Get the quot adjust scaling factor, according to:
+ * scaling =
+ * min(1000 * (QUOT(fused @turbo) - QUOT(fused @normal)) /
+ * (freq_turbo - freq_normal), max_factor)
+ *
+ * @QUOT(fused @turbo): quotient read from fuse for TURBO fuse corner;
+ * @QUOT(fused @normal): quotient read from fuse for NORMAL fuse corner;
+ * @freq_turbo: MHz, max frequency running at TURBO fuse corner;
+ * @freq_normal: MHz, max frequency running at NORMAL fuse corner.
+ */
+
+ freq_turbo = freq_mappings[turbo_corner];
+ freq_normal = freq_mappings[normal_corner];
+ if (freq_normal == 0 || freq_turbo <= freq_normal) {
+ pr_err("freq_turbo: %d should larger than freq_normal: %d\n",
+ freq_turbo, freq_normal);
+ kfree(freq_mappings);
+ return -EINVAL;
+ }
+ freq_turbo /= 1000000; /* MHz */
+ freq_normal /= 1000000;
+ scaling = 1000 *
+ (cpr_vreg->cpr_fuse_target_quot[CPR_FUSE_CORNER_TURBO] -
+ cpr_vreg->cpr_fuse_target_quot[CPR_FUSE_CORNER_NORMAL]) /
+ (freq_turbo - freq_normal);
+ scaling = min(scaling, max_factor);
+ pr_info("quotient adjustment scaling factor: %d.%03d\n",
+ scaling / 1000, scaling % 1000);
+
+ /*
+ * Walk through the corners mapped to the TURBO fuse corner and
+ * calculate the quotient adjustment for each one using the following
+ * formula:
+ * quot_adjust = (freq_turbo - freq_corner) * scaling / 1000
+ *
+ * @freq_turbo: MHz, max frequency running at TURBO fuse corner;
+ * @freq_corner: MHz, frequency running at a corner.
+ */
+ for (i = turbo_corner; i > normal_corner; i--) {
+ freq_corner = freq_mappings[i] / 1000000; /* MHz */
+ if (freq_corner > 0) {
+ cpr_vreg->quot_adjust[i] =
+ scaling * (freq_turbo - freq_corner) / 1000;
+ }
+ pr_info("adjusted quotient[%d] = %d\n", i,
+ (cpr_vreg->cpr_fuse_target_quot[cpr_vreg->corner_map[i]]
+ - cpr_vreg->quot_adjust[i]));
+ }
+ kfree(freq_mappings);
return 0;
}
@@ -1531,7 +1668,7 @@
}
}
- rc = cpr_get_of_corner_mappings(cpr_vreg, &pdev->dev);
+ rc = cpr_get_corner_quot_adjustment(cpr_vreg, &pdev->dev);
if (rc)
return rc;
@@ -1724,12 +1861,12 @@
cpr_vreg->efuse_addr = res->start;
len = res->end - res->start + 1;
- pr_info("efuse_addr = 0x%x (len=0x%x)\n", res->start, len);
+ pr_info("efuse_addr = %pa (len=0x%x)\n", &res->start, len);
cpr_vreg->efuse_base = ioremap(cpr_vreg->efuse_addr, len);
if (!cpr_vreg->efuse_base) {
- pr_err("Unable to map efuse_addr 0x%08x\n",
- cpr_vreg->efuse_addr);
+ pr_err("Unable to map efuse_addr %pa\n",
+ &cpr_vreg->efuse_addr);
return -EINVAL;
}
diff --git a/arch/arm/mach-msm/include/mach/board.h b/arch/arm/mach-msm/include/mach/board.h
index 89e3b51..c7f8b74 100644
--- a/arch/arm/mach-msm/include/mach/board.h
+++ b/arch/arm/mach-msm/include/mach/board.h
@@ -27,6 +27,12 @@
#include <linux/msm_ssbi.h>
#include <mach/msm_bus.h>
+#define WLAN_RF_REG_ADDR_START_OFFSET 0x3
+#define WLAN_RF_REG_DATA_START_OFFSET 0xf
+#define WLAN_RF_READ_REG_CMD 0x3
+#define WLAN_RF_WRITE_REG_CMD 0x2
+#define WLAN_RF_READ_CMD_MASK 0x3fff
+
struct msm_camera_io_ext {
uint32_t mdcphy;
uint32_t mdcsz;
@@ -677,4 +683,5 @@
extern phys_addr_t msm_shared_ram_phys; /* defined in arch/arm/mach-msm/io.c */
+u32 wcnss_rf_read_reg(u32 rf_reg_addr);
#endif
diff --git a/arch/arm/mach-msm/include/mach/kgsl.h b/arch/arm/mach-msm/include/mach/kgsl.h
index f398652..edfe6b4 100644
--- a/arch/arm/mach-msm/include/mach/kgsl.h
+++ b/arch/arm/mach-msm/include/mach/kgsl.h
@@ -91,6 +91,7 @@
struct coresight_device *csdev;
struct coresight_platform_data *coresight_pdata;
unsigned int chipid;
+ unsigned int pm_qos_latency;
};
#endif
diff --git a/arch/arm/mach-msm/include/mach/ocmem_priv.h b/arch/arm/mach-msm/include/mach/ocmem_priv.h
index 32d58d4..00aedb6 100644
--- a/arch/arm/mach-msm/include/mach/ocmem_priv.h
+++ b/arch/arm/mach-msm/include/mach/ocmem_priv.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -226,7 +226,6 @@
int process_dump(int, struct ocmem_handle *, unsigned long);
int ocmem_rdm_transfer(int, struct ocmem_map_list *,
unsigned long, int);
-int ocmem_clear(unsigned long, unsigned long);
unsigned long process_quota(int);
int ocmem_memory_off(int, unsigned long, unsigned long);
int ocmem_memory_on(int, unsigned long, unsigned long);
diff --git a/arch/arm/mach-msm/include/mach/socinfo.h b/arch/arm/mach-msm/include/mach/socinfo.h
index aeb32f8..24b5181 100644
--- a/arch/arm/mach-msm/include/mach/socinfo.h
+++ b/arch/arm/mach-msm/include/mach/socinfo.h
@@ -138,6 +138,11 @@
MSM_CPU_SAMARIUM,
};
+struct msm_soc_info {
+ enum msm_cpu generic_soc_type;
+ char *soc_id_string;
+};
+
enum pmic_model {
PMIC_MODEL_PM8058 = 13,
PMIC_MODEL_PM8028 = 14,
diff --git a/arch/arm/mach-msm/include/mach/subsystem_notif.h b/arch/arm/mach-msm/include/mach/subsystem_notif.h
index 5865eff..59e212f 100644
--- a/arch/arm/mach-msm/include/mach/subsystem_notif.h
+++ b/arch/arm/mach-msm/include/mach/subsystem_notif.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011, 2013-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -25,6 +25,7 @@
SUBSYS_BEFORE_POWERUP,
SUBSYS_AFTER_POWERUP,
SUBSYS_RAMDUMP_NOTIFICATION,
+ SUBSYS_POWERUP_FAILURE,
SUBSYS_NOTIF_TYPE_COUNT
};
diff --git a/arch/arm/mach-msm/krait-regulator.c b/arch/arm/mach-msm/krait-regulator.c
index f4456c0..a291b90 100644
--- a/arch/arm/mach-msm/krait-regulator.c
+++ b/arch/arm/mach-msm/krait-regulator.c
@@ -1443,11 +1443,17 @@
{
struct resource *res;
void __iomem *efuse;
- u32 efuse_data, efuse_version;
- bool scaling_factor_valid, use_efuse;
+ u32 efuse_data, efuse_version, efuse_version_data;
+ bool sf_valid, use_efuse;
+ int sf_pos, sf_mask;
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ int valid_sfs[4] = {0, 0, 0, 0};
+ int sf_versions_len;
+ int rc;
- use_efuse = of_property_read_bool(pdev->dev.of_node,
- "qcom,use-phase-scaling-factor");
+ use_efuse = of_property_read_bool(node,
+ "qcom,use-phase-scaling-factor");
/*
* Allow usage of the eFuse phase scaling factor if it is enabled in
* either device tree or by module parameter.
@@ -1462,6 +1468,7 @@
return -EINVAL;
}
+ /* Read efuse registers */
efuse = ioremap(res->start, 8);
if (!efuse) {
pr_err("could not map phase scaling eFuse address\n");
@@ -1469,25 +1476,47 @@
}
efuse_data = readl_relaxed(efuse);
- efuse_version = readl_relaxed(efuse + 4);
-
+ efuse_version_data = readl_relaxed(efuse + 4);
iounmap(efuse);
- scaling_factor_valid
- = ((efuse_version & PHASE_SCALING_EFUSE_VERSION_MASK) >>
- PHASE_SCALING_EFUSE_VERSION_POS)
- == PHASE_SCALING_EFUSE_VERSION_SET;
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,phase-scaling-factor-bits-pos",
+ &sf_pos);
+ if (rc < 0) {
+ dev_err(dev, "qcom,phase-scaling-factor-bits-pos missing rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
- if (scaling_factor_valid)
+ sf_mask = KRAIT_MASK(sf_pos + 2, sf_pos);
+
+ efuse_version
+ = ((efuse_version_data & PHASE_SCALING_EFUSE_VERSION_MASK) >>
+ PHASE_SCALING_EFUSE_VERSION_POS);
+
+ if (of_find_property(node, "qcom,valid-scaling-factor-versions",
+ &sf_versions_len)
+ && (sf_versions_len == 4 * sizeof(u32))) {
+ rc = of_property_read_u32_array(node,
+ "qcom,valid-scaling-factor-versions",
+ valid_sfs, 4);
+ sf_valid = (valid_sfs[efuse_version] == 1);
+ } else {
+ dev_err(dev, "qcom,valid-scaling-factor-versions missing or its size is incorrect rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+
+ if (sf_valid)
pvreg->efuse_phase_scaling_factor
- = ((efuse_data & PHASE_SCALING_EFUSE_VALUE_MASK)
- >> PHASE_SCALING_EFUSE_VALUE_POS) + 1;
+ = ((efuse_data & sf_mask)
+ >> sf_pos) + 1;
else
pvreg->efuse_phase_scaling_factor = PHASE_SCALING_REF;
pr_info("eFuse phase scaling factor = %d/%d%s\n",
pvreg->efuse_phase_scaling_factor, PHASE_SCALING_REF,
- scaling_factor_valid ? "" : " (eFuse not blown)");
+ sf_valid ? "" : " (eFuse not blown)");
pr_info("initial phase scaling factor = %d/%d%s\n",
use_efuse_phase_scaling_factor
? pvreg->efuse_phase_scaling_factor : PHASE_SCALING_REF,
diff --git a/arch/arm/mach-msm/lpm_levels.c b/arch/arm/mach-msm/lpm_levels.c
index 7128017..bd28131 100644
--- a/arch/arm/mach-msm/lpm_levels.c
+++ b/arch/arm/mach-msm/lpm_levels.c
@@ -82,6 +82,7 @@
static struct lpm_system_state sys_state;
static bool suspend_in_progress;
+static int64_t suspend_time;
struct lpm_lookup_table {
uint32_t modes;
@@ -526,7 +527,7 @@
if (latency_us < pwr->latency_us)
continue;
- if (next_event_us)
+ if (next_event_us) {
if (next_event_us < pwr->latency_us)
continue;
@@ -535,6 +536,7 @@
next_wakeup_us = next_event_us
- pwr->latency_us;
}
+ }
if (next_wakeup_us <= pwr->time_overhead_us)
continue;
@@ -544,11 +546,11 @@
if (!dev->cpu && msm_rpm_waiting_for_ack())
break;
- if ((next_wakeup_us >> 10) > pwr->latency_us) {
+ if ((next_wakeup_us >> 10) > pwr->time_overhead_us) {
power = pwr->ss_power;
} else {
power = pwr->ss_power;
- power -= (pwr->latency_us * pwr->ss_power)
+ power -= (pwr->time_overhead_us * pwr->ss_power)
/ next_wakeup_us;
power += pwr->energy_overhead / next_wakeup_us;
}
@@ -778,6 +780,11 @@
static int lpm_suspend_prepare(void)
{
+ struct timespec ts;
+
+ getnstimeofday(&ts);
+ suspend_time = timespec_to_ns(&ts);
+
suspend_in_progress = true;
msm_mpm_suspend_prepare();
return 0;
@@ -785,6 +792,12 @@
static void lpm_suspend_wake(void)
{
+ struct timespec ts;
+
+ getnstimeofday(&ts);
+ suspend_time = timespec_to_ns(&ts) - suspend_time;
+ msm_pm_add_stat(MSM_PM_STAT_SUSPEND, suspend_time);
+
msm_mpm_suspend_wake();
suspend_in_progress = false;
}
diff --git a/arch/arm/mach-msm/msm-pm.c b/arch/arm/mach-msm/msm-pm.c
index f9a9343..cb65a70 100644
--- a/arch/arm/mach-msm/msm-pm.c
+++ b/arch/arm/mach-msm/msm-pm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -25,10 +25,12 @@
#include <linux/platform_device.h>
#include <linux/of_platform.h>
#include <linux/cpu_pm.h>
+#include <linux/remote_spinlock.h>
#include <asm/uaccess.h>
#include <asm/suspend.h>
#include <asm/cacheflush.h>
#include <asm/outercache.h>
+#include <mach/remote_spinlock.h>
#include <mach/scm.h>
#include <mach/msm_bus.h>
#include <mach/jtag.h>
@@ -117,6 +119,12 @@
DEFINE_PER_CPU(struct clk *, cpu_clks);
static struct clk *l2_clk;
+static int cpu_count;
+static DEFINE_SPINLOCK(cpu_cnt_lock);
+#define SCM_HANDOFF_LOCK_ID "S:7"
+static bool need_scm_handoff_lock;
+static remote_spinlock_t scm_handoff_lock;
+
static void (*msm_pm_disable_l2_fn)(void);
static void (*msm_pm_enable_l2_fn)(void);
static void (*msm_pm_flush_l2_fn)(void);
@@ -478,8 +486,30 @@
static int msm_pm_collapse(unsigned long unused)
{
uint32_t cpu = smp_processor_id();
+ enum msm_pm_l2_scm_flag flag = MSM_SCM_L2_ON;
- if (msm_pm_get_l2_flush_flag() == MSM_SCM_L2_OFF) {
+ spin_lock(&cpu_cnt_lock);
+ cpu_count++;
+ if (cpu_count == num_online_cpus())
+ flag = msm_pm_get_l2_flush_flag();
+
+ pr_debug("cpu:%d cores_in_pc:%d L2 flag: %d\n",
+ cpu, cpu_count, flag);
+
+ /*
+ * The scm_handoff_lock will be release by the secure monitor.
+ * It is used to serialize power-collapses from this point on,
+ * so that both Linux and the secure context have a consistent
+ * view regarding the number of running cpus (cpu_count).
+ *
+ * It must be acquired before releasing cpu_cnt_lock.
+ */
+ if (need_scm_handoff_lock)
+ remote_spin_lock_rlock_id(&scm_handoff_lock,
+ REMOTE_SPINLOCK_TID_START + cpu);
+ spin_unlock(&cpu_cnt_lock);
+
+ if (flag == MSM_SCM_L2_OFF) {
flush_cache_all();
if (msm_pm_flush_l2_fn)
msm_pm_flush_l2_fn();
@@ -491,8 +521,7 @@
msm_pc_inc_debug_count(cpu, MSM_PC_ENTRY_COUNTER);
- scm_call_atomic1(SCM_SVC_BOOT, SCM_CMD_TERMINATE_PC,
- msm_pm_get_l2_flush_flag());
+ scm_call_atomic1(SCM_SVC_BOOT, SCM_CMD_TERMINATE_PC, flag);
msm_pc_inc_debug_count(cpu, MSM_PC_FALLTHRU_COUNTER);
@@ -534,6 +563,12 @@
collapsed = save_cpu_regs ?
!cpu_suspend(0, msm_pm_collapse) : msm_pm_pc_hotplug();
+ if (save_cpu_regs) {
+ spin_lock(&cpu_cnt_lock);
+ cpu_count--;
+ BUG_ON(cpu_count > num_online_cpus());
+ spin_unlock(&cpu_cnt_lock);
+ }
msm_jtag_restore_state();
if (collapsed) {
@@ -764,17 +799,19 @@
pr_info("CPU%u: %s mode:%d\n",
smp_processor_id(), __func__, mode);
- time = sched_clock();
+ if (from_idle)
+ time = sched_clock();
+
if (execute[mode])
exit_stat = execute[mode](from_idle);
- time = sched_clock() - time;
- if (from_idle)
+
+ if (from_idle) {
+ time = sched_clock() - time;
msm_pm_ftrace_lpm_exit(smp_processor_id(), mode, collapsed);
- else
- exit_stat = MSM_PM_STAT_SUSPEND;
- if (exit_stat >= 0)
- msm_pm_add_stat(exit_stat, time);
- do_div(time, 1000);
+ if (exit_stat >= 0)
+ msm_pm_add_stat(exit_stat, time);
+ }
+
return collapsed;
}
@@ -1166,6 +1203,7 @@
struct resource *res = NULL;
int i;
struct msm_pm_init_data_type pdata_local;
+ struct device_node *lpm_node;
int ret = 0;
memset(&pdata_local, 0, sizeof(struct msm_pm_init_data_type));
@@ -1192,6 +1230,23 @@
msm_pc_debug_counters_phys = 0;
}
+ lpm_node = of_parse_phandle(pdev->dev.of_node, "qcom,lpm-levels", 0);
+ if (!lpm_node) {
+ pr_warn("Could not get qcom,lpm-levels handle\n");
+ return -EINVAL;
+ }
+ need_scm_handoff_lock = of_property_read_bool(lpm_node,
+ "qcom,allow-synced-levels");
+ if (need_scm_handoff_lock) {
+ ret = remote_spin_lock_init(&scm_handoff_lock,
+ SCM_HANDOFF_LOCK_ID);
+ if (ret) {
+ pr_err("%s: Failed initializing scm_handoff_lock (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+ }
+
if (pdev->dev.of_node) {
enum msm_pm_pc_mode_type pc_mode;
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c b/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c
index 626c5e8..0ffc194 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c
@@ -364,7 +364,7 @@
{
struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev);
void *sel_cdata;
- long rounded_rate;
+ long rounded_rate, cur_rate;
sel_cdata = fabric->cdata[ctx];
@@ -379,16 +379,20 @@
}
/* Enable clocks before accessing QoS registers */
- if (fabric->info.nodeclk[DUAL_CTX].clk)
+ if (fabric->info.nodeclk[DUAL_CTX].clk) {
if (fabric->info.nodeclk[DUAL_CTX].rate == 0) {
- rounded_rate = clk_round_rate(fabric->
- info.nodeclk[DUAL_CTX].clk, 1);
+ cur_rate = clk_get_rate(
+ fabric->info.nodeclk[DUAL_CTX].clk);
+ rounded_rate = clk_round_rate(
+ fabric->info.nodeclk[DUAL_CTX].clk,
+ cur_rate ? cur_rate : 1);
if (clk_set_rate(fabric->info.nodeclk[DUAL_CTX].clk,
rounded_rate))
MSM_BUS_ERR("Error: clk: en: Node: %d rate: %ld",
fabric->fabdev.id, rounded_rate);
clk_prepare_enable(fabric->info.nodeclk[DUAL_CTX].clk);
+ }
}
if (info->iface_clk.clk)
@@ -514,22 +518,26 @@
struct msm_bus_inode_info *info, uint64_t req_clk, uint64_t req_bw)
{
struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev);
- long rounded_rate;
+ long rounded_rate, cur_rate;
if (fabdev->hw_algo.config_master == NULL)
return;
/* Enable clocks before accessing QoS registers */
- if (fabric->info.nodeclk[DUAL_CTX].clk)
+ if (fabric->info.nodeclk[DUAL_CTX].clk) {
if (fabric->info.nodeclk[DUAL_CTX].rate == 0) {
- rounded_rate = clk_round_rate(fabric->
- info.nodeclk[DUAL_CTX].clk, 1);
+ cur_rate = clk_get_rate(
+ fabric->info.nodeclk[DUAL_CTX].clk);
+ rounded_rate = clk_round_rate(
+ fabric->info.nodeclk[DUAL_CTX].clk,
+ cur_rate ? cur_rate : 1);
if (clk_set_rate(fabric->info.nodeclk[DUAL_CTX].clk,
rounded_rate))
MSM_BUS_ERR("Error: clk: en: Node: %d rate: %ld",
fabric->fabdev.id, rounded_rate);
clk_prepare_enable(fabric->info.nodeclk[DUAL_CTX].clk);
+ }
}
if (info->iface_clk.clk)
diff --git a/arch/arm/mach-msm/ocmem_core.c b/arch/arm/mach-msm/ocmem_core.c
index c186a5e..f753391 100644
--- a/arch/arm/mach-msm/ocmem_core.c
+++ b/arch/arm/mach-msm/ocmem_core.c
@@ -51,6 +51,7 @@
static struct ocmem_hw_region *region_ctrl;
static struct mutex region_ctrl_lock;
static void *ocmem_base;
+static void *ocmem_vbase;
#define OCMEM_V1_MACROS 8
#define OCMEM_V1_MACRO_SZ (SZ_64K)
@@ -562,6 +563,13 @@
ocmem_write(0x0, ocmem_base + OC_GFX_MPU_END);
}
+int ocmem_clear(unsigned long start, unsigned long size)
+{
+ memset((ocmem_vbase + start), 0x4D4D434F, size);
+ mb();
+ return 0;
+}
+
static int do_lock(enum ocmem_client id, unsigned long offset,
unsigned long len, enum region_mode mode)
{
@@ -1144,6 +1152,7 @@
pdata = platform_get_drvdata(pdev);
ocmem_base = pdata->reg_base;
+ ocmem_vbase = pdata->vbase;
rc = ocmem_enable_core_clock();
diff --git a/arch/arm/mach-msm/ocmem_rdm.c b/arch/arm/mach-msm/ocmem_rdm.c
index 4ff7212..9eac050 100644
--- a/arch/arm/mach-msm/ocmem_rdm.c
+++ b/arch/arm/mach-msm/ocmem_rdm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -149,38 +149,6 @@
return IRQ_HANDLED;
}
-#ifdef CONFIG_MSM_OCMEM_NONSECURE
-int ocmem_clear(unsigned long start, unsigned long size)
-{
- INIT_COMPLETION(dm_clear_event);
- /* Clear DM Mask */
- ocmem_write(DM_MASK_RESET, dm_base + DM_INTR_MASK);
- /* Clear DM Interrupts */
- ocmem_write(DM_INTR_RESET, dm_base + DM_INTR_CLR);
- /* DM CLR offset */
- ocmem_write(start, dm_base + DM_CLR_OFFSET);
- /* DM CLR size */
- ocmem_write(size, dm_base + DM_CLR_SIZE);
- /* Wipe out memory as "OCMM" */
- ocmem_write(0x4D4D434F, dm_base + DM_CLR_PATTERN);
- /* The offset, size and pattern for clearing must be set
- * before triggering the clearing engine
- */
- mb();
- /* Trigger Data Clear */
- ocmem_write(DM_CLR_ENABLE, dm_base + DM_CLR_TRIGGER);
-
- wait_for_completion(&dm_clear_event);
-
- return 0;
-}
-#else
-int ocmem_clear(unsigned long start, unsigned long size)
-{
- return 0;
-}
-#endif
-
/* Lock during transfers */
int ocmem_rdm_transfer(int id, struct ocmem_map_list *clist,
unsigned long start, int direction)
diff --git a/arch/arm/mach-msm/perf_debug.c b/arch/arm/mach-msm/perf_debug.c
index 05d3cef..3a87c78 100644
--- a/arch/arm/mach-msm/perf_debug.c
+++ b/arch/arm/mach-msm/perf_debug.c
@@ -46,6 +46,8 @@
"21 Perf: preserve registers across hotplug\n"
"22 msm: perf: fix formatting of trace entry\n"
"23 msm: perf: Fix cpu id logic in tracectr notifier\n"
+ "24 msm: perf: tracectr: Initialize cnts after hotplug\n"
+ "25 Perf: Reset pmu after hotplug\n"
;
static ssize_t desc_read(struct file *fp, char __user *buf,
diff --git a/arch/arm/mach-msm/perf_trace_counters.c b/arch/arm/mach-msm/perf_trace_counters.c
index 0a679b1..8eb1244 100644
--- a/arch/arm/mach-msm/perf_trace_counters.c
+++ b/arch/arm/mach-msm/perf_trace_counters.c
@@ -21,21 +21,17 @@
DEFINE_PER_CPU(u32[NUM_L1_CTRS], previous_l1_cnts);
DEFINE_PER_CPU(u32[NUM_L2_PERCPU], previous_l2_cnts);
DEFINE_PER_CPU(u32, old_pid);
+DEFINE_PER_CPU(u32, hotplug_flag);
/* Reset per_cpu variables that store counter values uppn CPU hotplug */
static int tracectr_cpu_hotplug_notifier(struct notifier_block *self,
unsigned long action, void *hcpu)
{
int ret = NOTIFY_OK;
int cpu = (int)hcpu;
- int i;
- if ((action & (~CPU_TASKS_FROZEN)) == CPU_UP_PREPARE) {
- per_cpu(previous_ccnt, cpu) = 0;
- for (i = 0; i < NUM_L1_CTRS; i++)
- per_cpu(previous_l1_cnts[i], cpu) = 0;
- for (i = 0; i < NUM_L2_PERCPU; i++)
- per_cpu(previous_l2_cnts[i], cpu) = 0;
- }
+ if ((action & (~CPU_TASKS_FROZEN)) == CPU_STARTING)
+ per_cpu(hotplug_flag, cpu) = 1;
+
return ret;
}
@@ -43,6 +39,35 @@
.notifier_call = tracectr_cpu_hotplug_notifier,
};
+static void setup_prev_cnts(u32 cpu)
+{
+ int i;
+ u32 cnten_val;
+
+ /* Read PMCNTENSET */
+ asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r"(cnten_val));
+ /* Disable all the counters that were enabled */
+ asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r"(cnten_val));
+ if (cnten_val & CC) {
+ /* Read value */
+ asm volatile("mrc p15, 0, %0, c9, c13, 0"
+ : "=r"(per_cpu(previous_ccnt, cpu)));
+ }
+
+ for (i = 0; i < NUM_L1_CTRS; i++) {
+ if (cnten_val & (1 << i)) {
+ /* Select */
+ asm volatile("mcr p15, 0, %0, c9, c12, 5"
+ : : "r"(i));
+ /* Read value */
+ asm volatile("mrc p15, 0, %0, c9, c13, 2"
+ : "=r"(per_cpu(previous_l1_cnts[i], cpu)));
+ }
+ }
+ /* Enable all the counters that were disabled */
+ asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r"(cnten_val));
+}
+
static int tracectr_notifier(struct notifier_block *self, unsigned long cmd,
void *v)
{
@@ -54,9 +79,14 @@
return -EFAULT;
current_pid = thread->task->pid;
- if (per_cpu(old_pid, cpu) != -1)
- trace_sched_switch_with_ctrs(per_cpu(old_pid, cpu),
- current_pid);
+ if (per_cpu(old_pid, cpu) != -1) {
+ if (per_cpu(hotplug_flag, cpu) == 1) {
+ per_cpu(hotplug_flag, cpu) = 0;
+ setup_prev_cnts(cpu);
+ } else
+ trace_sched_switch_with_ctrs(per_cpu(old_pid, cpu),
+ current_pid);
+ }
per_cpu(old_pid, cpu) = current_pid;
return NOTIFY_OK;
}
diff --git a/arch/arm/mach-msm/qdsp6v2/Makefile b/arch/arm/mach-msm/qdsp6v2/Makefile
index 6bd3efb..3d7638d 100644
--- a/arch/arm/mach-msm/qdsp6v2/Makefile
+++ b/arch/arm/mach-msm/qdsp6v2/Makefile
@@ -12,7 +12,7 @@
obj-$(CONFIG_FB_MSM_HDMI_MSM_PANEL) += lpa_if_hdmi.o
endif
obj-$(CONFIG_MSM_QDSP6_APR) += apr.o apr_v1.o apr_tal.o q6core.o dsp_debug.o
-obj-$(CONFIG_MSM_QDSP6_APRV2) += apr.o apr_v2.o apr_tal.o dsp_debug.o
+obj-$(CONFIG_MSM_QDSP6_APRV2) += apr.o apr_v2.o apr_tal.o dsp_debug.o voice_svc.o
ifdef CONFIG_ARCH_MSM9615
obj-y += audio_acdb.o
obj-y += rtac.o
diff --git a/arch/arm/mach-msm/qdsp6v2/apr.c b/arch/arm/mach-msm/qdsp6v2/apr.c
index 8d9ad29..937eeda 100644
--- a/arch/arm/mach-msm/qdsp6v2/apr.c
+++ b/arch/arm/mach-msm/qdsp6v2/apr.c
@@ -436,7 +436,7 @@
if (data.payload_size > 0)
data.payload = (char *)hdr + hdr_size;
- temp_port = ((data.src_port >> 8) * 8) + (data.src_port & 0xFF);
+ temp_port = ((data.dest_port >> 8) * 8) + (data.dest_port & 0xFF);
pr_debug("port = %d t_port = %d\n", data.src_port, temp_port);
if (c_svc->port_cnt && c_svc->port_fn[temp_port])
c_svc->port_fn[temp_port](&data, c_svc->port_priv[temp_port]);
diff --git a/arch/arm/mach-msm/qdsp6v2/msm_audio_ion.c b/arch/arm/mach-msm/qdsp6v2/msm_audio_ion.c
index 399e073..df7760a 100644
--- a/arch/arm/mach-msm/qdsp6v2/msm_audio_ion.c
+++ b/arch/arm/mach-msm/qdsp6v2/msm_audio_ion.c
@@ -344,7 +344,7 @@
pr_err("%s: ion import dma buffer failed\n",
__func__);
rc = -EINVAL;
- goto err_destroy_client;
+ goto err;
}
if (ionflag != NULL) {
@@ -380,10 +380,6 @@
err_ion_handle:
ion_free(client, *handle);
-err_destroy_client:
- msm_audio_ion_client_destroy(client);
- client = NULL;
- *handle = NULL;
err:
return rc;
}
diff --git a/arch/arm/mach-msm/qdsp6v2/voice_svc.c b/arch/arm/mach-msm/qdsp6v2/voice_svc.c
new file mode 100644
index 0000000..92b3003
--- /dev/null
+++ b/arch/arm/mach-msm/qdsp6v2/voice_svc.c
@@ -0,0 +1,593 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/cdev.h>
+#include <sound/voice_svc.h>
+#include <mach/qdsp6v2/apr_tal.h>
+#include <mach/qdsp6v2/apr.h>
+
+#define DRIVER_NAME "voice_svc"
+#define MINOR_NUMBER 1
+#define APR_MAX_RESPONSE 10
+
+#define MAX(a, b) ((a) >= (b) ? (a) : (b))
+
+struct voice_svc_device {
+ struct cdev *cdev;
+ struct device *dev;
+ int major;
+};
+
+struct voice_svc_prvt {
+ void* apr_q6_mvm;
+ void* apr_q6_cvs;
+ uint16_t response_count;
+ struct list_head response_queue;
+ wait_queue_head_t response_wait;
+ spinlock_t response_lock;
+};
+
+struct apr_data {
+ struct apr_hdr hdr;
+ __u8 payload[0];
+} __packed;
+
+struct apr_response_list {
+ struct list_head list;
+ struct voice_svc_cmd_response resp;
+};
+
+static struct voice_svc_device *voice_svc_dev;
+static struct class *voice_svc_class;
+dev_t device_num;
+
+static int32_t qdsp_apr_callback(struct apr_client_data *data, void *priv)
+{
+ struct voice_svc_prvt *prtd;
+ struct apr_response_list *response_list;
+ unsigned long spin_flags;
+
+ if ((data == NULL) || (priv == NULL)) {
+ pr_err("%s: data or priv is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ prtd = (struct voice_svc_prvt*)priv;
+
+ pr_debug("%s: data->opcode %x\n", __func__,
+ data->opcode);
+
+ if (data->opcode == RESET_EVENTS) {
+ if (data->reset_proc == APR_DEST_QDSP6) {
+ pr_debug("%s: Received reset event\n", __func__);
+
+ if (prtd->apr_q6_mvm != NULL) {
+ apr_reset(prtd->apr_q6_mvm);
+ prtd->apr_q6_mvm = NULL;
+ }
+
+ if (prtd->apr_q6_cvs != NULL) {
+ apr_reset(prtd->apr_q6_cvs);
+ prtd->apr_q6_cvs = NULL;
+ }
+ } else if (data->reset_proc ==APR_DEST_MODEM) {
+ pr_debug("%s: Received Modem reset event\n", __func__);
+ }
+ }
+
+ spin_lock_irqsave(&prtd->response_lock, spin_flags);
+
+ if (prtd->response_count < APR_MAX_RESPONSE) {
+ response_list = (struct apr_response_list *)kmalloc(
+ sizeof(struct apr_response_list) + data->payload_size,
+ GFP_ATOMIC);
+ if (response_list == NULL) {
+ pr_err("%s: kmalloc failed\n", __func__);
+
+ return -ENOMEM;
+ }
+
+ response_list->resp.src_port = data->src_port;
+ response_list->resp.dest_port = ((data->dest_port) >> 8);
+ response_list->resp.token = data->token;
+ response_list->resp.opcode = data->opcode;
+ response_list->resp.payload_size = data->payload_size;
+ if (data->payload != NULL && data->payload_size > 0) {
+ memcpy(response_list->resp.payload, data->payload,
+ data->payload_size);
+ }
+
+ list_add_tail(&response_list->list, &prtd->response_queue);
+ prtd->response_count++;
+
+ wake_up(&prtd->response_wait);
+ } else {
+ pr_err("%s: Response dropped since the queue is full\n", __func__);
+ }
+
+ spin_unlock_irqrestore(&prtd->response_lock, spin_flags);
+
+ return 0;
+}
+
+static void voice_svc_update_hdr(struct voice_svc_cmd_request* apr_req_data,
+ struct apr_data *aprdata,
+ struct voice_svc_prvt *prtd)
+{
+
+ aprdata->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \
+ APR_HDR_LEN(sizeof(struct apr_hdr)),\
+ APR_PKT_VER);
+ aprdata->hdr.src_port = ((apr_req_data->src_port) << 8 | 0x0001);
+ aprdata->hdr.dest_port = apr_req_data->dest_port;
+ aprdata->hdr.token = apr_req_data->token;
+ aprdata->hdr.opcode = apr_req_data->opcode;
+ aprdata->hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ apr_req_data->payload_size);
+ memcpy(aprdata->payload, apr_req_data->payload,
+ apr_req_data->payload_size);
+}
+
+static int voice_svc_send_req(struct voice_svc_cmd_request *apr_request,
+ struct voice_svc_prvt *prtd)
+{
+ int ret = 0;
+ void *apr_handle = NULL;
+ struct apr_data *aprdata = NULL;
+ uint32_t user_payload_size = 0;
+
+ if (apr_request == NULL) {
+ pr_err("%s: apr_request is NULL\n", __func__);
+
+ ret = -EINVAL;
+ goto done;
+ }
+
+ user_payload_size = apr_request->payload_size;
+
+ aprdata = kmalloc(sizeof(struct apr_data) + user_payload_size,
+ GFP_KERNEL);
+
+ if (aprdata == NULL) {
+ pr_err("%s: aprdata kmalloc failed.", __func__);
+
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ voice_svc_update_hdr(apr_request, aprdata, prtd);
+
+ if (!strncmp(apr_request->svc_name, VOICE_SVC_CVS_STR,
+ MAX(sizeof(apr_request->svc_name), sizeof(VOICE_SVC_CVS_STR)))) {
+ apr_handle = prtd->apr_q6_cvs;
+ } else if (!strncmp(apr_request->svc_name, VOICE_SVC_MVM_STR,
+ MAX(sizeof(apr_request->svc_name), sizeof(VOICE_SVC_MVM_STR)))) {
+ apr_handle = prtd->apr_q6_mvm;
+ } else {
+ pr_err("%s: Invalid service %s\n", __func__,
+ apr_request->svc_name);
+
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ret = apr_send_pkt(apr_handle, (uint32_t *)aprdata);
+
+ if (ret < 0) {
+ pr_err("%s: Fail in sending SNDRV_VOICE_SVC_REQUEST\n",
+ __func__);
+ ret = -EINVAL;
+ } else {
+ pr_debug("%s: apr packet sent successfully %d\n",
+ __func__, ret);
+ ret = 0;
+ }
+
+done:
+ if (aprdata != NULL)
+ kfree(aprdata);
+
+ return ret;
+}
+static int voice_svc_reg(char *svc, uint32_t src_port,
+ struct voice_svc_prvt *prtd, void **handle)
+{
+ int ret = 0;
+
+ if (handle == NULL) {
+ pr_err("%s: handle is NULL\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (*handle != NULL) {
+ pr_err("%s: svc handle not NULL\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ *handle = apr_register("ADSP",
+ svc, qdsp_apr_callback,
+ ((src_port) << 8 | 0x0001),
+ prtd);
+
+ if (*handle == NULL) {
+ pr_err("%s: Unable to register %s\n",
+ __func__, svc);
+
+ ret = -EFAULT;
+ goto done;
+ }
+ pr_debug("%s: register %s successful\n",
+ __func__, svc);
+done:
+ return ret;
+}
+
+static int voice_svc_dereg(char *svc, void **handle)
+{
+ int ret = 0;
+ if (handle == NULL) {
+ pr_err("%s: handle is NULL\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ apr_deregister(*handle);
+ *handle = NULL;
+ pr_debug("%s: deregister %s successful\n",
+ __func__, svc);
+
+done:
+ return 0;
+}
+
+static int process_reg_cmd(struct voice_svc_register apr_reg_svc,
+ struct voice_svc_prvt *prtd)
+{
+ int ret = 0;
+ char *svc = NULL;
+ void **handle = NULL;
+
+ if (!strncmp(apr_reg_svc.svc_name, VOICE_SVC_MVM_STR,
+ MAX(sizeof(apr_reg_svc.svc_name), sizeof(VOICE_SVC_MVM_STR)))) {
+ svc = VOICE_SVC_MVM_STR;
+ handle = &prtd->apr_q6_mvm;
+ } else if (!strncmp(apr_reg_svc.svc_name, VOICE_SVC_CVS_STR,
+ MAX(sizeof(apr_reg_svc.svc_name), sizeof(VOICE_SVC_CVS_STR)))) {
+ svc = VOICE_SVC_CVS_STR;
+ handle = &prtd->apr_q6_cvs;
+ } else {
+ pr_err("%s: Invalid Service: %s\n", __func__,
+ apr_reg_svc.svc_name);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (*handle == NULL &&
+ apr_reg_svc.reg_flag) {
+ ret = voice_svc_reg(svc, apr_reg_svc.src_port, prtd,
+ handle);
+ } else if (handle != NULL &&
+ !apr_reg_svc.reg_flag) {
+ ret = voice_svc_dereg(svc, handle);
+ }
+
+done:
+ return ret;
+}
+
+static long voice_svc_ioctl(struct file *file, unsigned int cmd,
+ unsigned long u_arg)
+{
+ int ret = 0;
+ struct voice_svc_prvt *prtd;
+ struct voice_svc_register apr_reg_svc;
+ struct voice_svc_cmd_request *apr_request = NULL;
+ struct voice_svc_cmd_response *apr_response = NULL;
+ struct apr_response_list *resp;
+ void __user *arg = (void __user *)u_arg;
+ uint32_t user_payload_size = 0;
+ unsigned long spin_flags;
+
+ pr_debug("%s: cmd: %u\n", __func__, cmd);
+
+ prtd = (struct voice_svc_prvt*)file->private_data;
+
+ switch (cmd) {
+ case SNDRV_VOICE_SVC_REGISTER_SVC:
+ pr_debug("%s: size of struct: %d\n", __func__,
+ sizeof(apr_reg_svc));
+ if (copy_from_user(&apr_reg_svc, arg, sizeof(apr_reg_svc))) {
+ pr_err("%s: copy_from_user failed\n", __func__);
+
+ ret = -EFAULT;
+ goto done;
+ }
+
+ ret = process_reg_cmd(apr_reg_svc, prtd);
+
+ break;
+ case SNDRV_VOICE_SVC_CMD_REQUEST:
+ if (!access_ok(VERIFY_READ, arg,
+ sizeof(struct voice_svc_cmd_request))) {
+ pr_err("%s: Unable to read user data", __func__);
+
+ ret = -EFAULT;
+ goto done;
+ }
+
+ user_payload_size =
+ ((struct voice_svc_cmd_request*)arg)->payload_size;
+
+ apr_request = kmalloc(sizeof(struct voice_svc_cmd_request) +
+ user_payload_size, GFP_KERNEL);
+
+ if (apr_request == NULL) {
+ pr_err("%s: apr_request kmalloc failed.", __func__);
+
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ if (copy_from_user(apr_request, arg,
+ sizeof(struct voice_svc_cmd_request) +
+ user_payload_size)) {
+ pr_err("%s: copy from user failed, size %d\n", __func__,
+ sizeof(struct voice_svc_cmd_request) +
+ user_payload_size);
+
+ ret = -EFAULT;
+ goto done;
+ }
+
+ ret = voice_svc_send_req(apr_request, prtd);
+
+ break;
+
+ case SNDRV_VOICE_SVC_CMD_RESPONSE:
+ do {
+ if (!access_ok(VERIFY_READ, arg,
+ sizeof(struct voice_svc_cmd_response))) {
+ pr_err("%s: Unable to read user data",
+ __func__);
+
+ ret = -EFAULT;
+ goto done;
+ }
+
+ user_payload_size =
+ ((struct voice_svc_cmd_response*)arg)->payload_size;
+ pr_debug("%s: RESPONSE: user payload size %d",
+ __func__, user_payload_size);
+
+ spin_lock_irqsave(&prtd->response_lock, spin_flags);
+ if (!list_empty(&prtd->response_queue)) {
+ resp = list_first_entry(&prtd->response_queue,
+ struct apr_response_list, list);
+
+ if (user_payload_size <
+ resp->resp.payload_size) {
+ pr_err("%s: Invalid payload size %d,%d",
+ __func__, user_payload_size,
+ resp->resp.payload_size);
+ ret = -ENOMEM;
+ spin_unlock_irqrestore(
+ &prtd->response_lock,
+ spin_flags);
+ goto done;
+ }
+
+ if (!access_ok(VERIFY_WRITE, arg,
+ sizeof(struct voice_svc_cmd_response) +
+ resp->resp.payload_size)) {
+ ret = -EFAULT;
+ spin_unlock_irqrestore(
+ &prtd->response_lock,
+ spin_flags);
+ goto done;
+ }
+
+ if (copy_to_user(arg, &resp->resp,
+ sizeof(struct voice_svc_cmd_response) +
+ resp->resp.payload_size)) {
+ pr_err("%s: copy to user failed, size \
+ %d\n", __func__,
+ sizeof(struct voice_svc_cmd_response) +
+ resp->resp.payload_size);
+
+ ret = -EFAULT;
+ spin_unlock_irqrestore(
+ &prtd->response_lock,
+ spin_flags);
+ goto done;
+ }
+
+ prtd->response_count--;
+
+ list_del(&resp->list);
+ kfree(resp);
+ spin_unlock_irqrestore(&prtd->response_lock,
+ spin_flags);
+ goto done;
+ } else {
+ spin_unlock_irqrestore(&prtd->response_lock,
+ spin_flags);
+ wait_event_interruptible(prtd->response_wait,
+ !list_empty(&prtd->response_queue));
+ pr_debug("%s: Interupt recieved for response",
+ __func__);
+ }
+ } while(!apr_response);
+ break;
+ default:
+ pr_debug("%s: cmd: %u\n", __func__, cmd);
+ ret = -EINVAL;
+ }
+
+done:
+ if (apr_request != NULL)
+ kfree(apr_request);
+
+ return ret;
+}
+
+static int voice_svc_open(struct inode *inode, struct file *file)
+{
+ struct voice_svc_prvt *prtd = NULL;
+
+ prtd = kmalloc(sizeof(struct voice_svc_prvt), GFP_KERNEL);
+
+ if (prtd == NULL) {
+ pr_err("%s: kmalloc failed", __func__);
+
+ return -ENOMEM;
+ }
+
+ memset(prtd, 0, sizeof(struct voice_svc_prvt));
+ prtd->apr_q6_cvs = NULL;
+ prtd->apr_q6_mvm = NULL;
+ prtd->response_count = 0;
+
+ INIT_LIST_HEAD(&prtd->response_queue);
+ init_waitqueue_head(&prtd->response_wait);
+ spin_lock_init(&prtd->response_lock);
+
+ file->private_data = (void*)prtd;
+
+ return 0;
+}
+
+static int voice_svc_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
+static const struct file_operations voice_svc_fops = {
+ .owner = THIS_MODULE,
+ .open = voice_svc_open,
+ .unlocked_ioctl = voice_svc_ioctl,
+ .release = voice_svc_release,
+};
+
+
+static int voice_svc_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ voice_svc_dev = devm_kzalloc(&pdev->dev, sizeof(struct voice_svc_device),
+ GFP_KERNEL);
+ if (!voice_svc_dev) {
+ pr_err("%s: kzalloc failed\n", __func__);
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ ret = alloc_chrdev_region(&device_num, 0, MINOR_NUMBER, DRIVER_NAME);
+ if (ret) {
+ pr_err("%s: Failed to alloc chrdev\n", __func__);
+ ret = -ENODEV;
+ goto done;
+ }
+
+ voice_svc_dev->major = MAJOR(device_num);
+ voice_svc_class = class_create(THIS_MODULE, DRIVER_NAME);
+ if (IS_ERR(voice_svc_class)) {
+ ret = PTR_ERR(voice_svc_class);
+ pr_err("%s: Failed to create class; err = %d\n", __func__,
+ ret);
+ goto class_err;
+ }
+
+ voice_svc_dev->dev = device_create(voice_svc_class, NULL, device_num,
+ NULL, DRIVER_NAME);
+ if (IS_ERR(voice_svc_dev->dev)) {
+ ret = PTR_ERR(voice_svc_dev->dev);
+ pr_err("%s: Failed to create device; err = %d\n", __func__,
+ ret);
+ goto dev_err;
+ }
+
+ voice_svc_dev->cdev = cdev_alloc();
+ cdev_init(voice_svc_dev->cdev, &voice_svc_fops);
+ ret = cdev_add(voice_svc_dev->cdev, device_num, MINOR_NUMBER);
+ if (ret) {
+ pr_err("%s: Failed to register chrdev; err = %d\n", __func__,
+ ret);
+ goto add_err;
+ }
+ pr_debug("%s: Device created\n", __func__);
+ goto done;
+
+add_err:
+ cdev_del(voice_svc_dev->cdev);
+ device_destroy(voice_svc_class, device_num);
+dev_err:
+ class_destroy(voice_svc_class);
+class_err:
+ unregister_chrdev_region(0, MINOR_NUMBER);
+done:
+ return ret;
+}
+
+static int voice_svc_remove(struct platform_device *pdev)
+{
+ cdev_del(voice_svc_dev->cdev);
+ kfree(voice_svc_dev->cdev);
+ device_destroy(voice_svc_class, device_num);
+ class_destroy(voice_svc_class);
+ unregister_chrdev_region(0, MINOR_NUMBER);
+ kfree(voice_svc_dev);
+
+ return 0;
+}
+
+static struct of_device_id voice_svc_of_match[] = {
+ {.compatible = "qcom,msm-voice-svc"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, voice_svc_of_match);
+
+static struct platform_driver voice_svc_driver = {
+ .probe = voice_svc_probe,
+ .remove = voice_svc_remove,
+ .driver = {
+ .name = "msm-voice-svc",
+ .owner = THIS_MODULE,
+ .of_match_table = voice_svc_of_match,
+ },
+};
+
+static int __init voice_svc_init(void)
+{
+ return platform_driver_register(&voice_svc_driver);
+}
+
+static void __exit voice_svc_exit(void)
+{
+ platform_driver_unregister(&voice_svc_driver);
+}
+
+module_init(voice_svc_init);
+module_exit(voice_svc_exit);
+
+MODULE_DESCRIPTION("Soc QDSP6v2 Audio APR driver");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/mach-msm/smd_tty.c b/arch/arm/mach-msm/smd_tty.c
index 428d5b0..9cb26e1 100644
--- a/arch/arm/mach-msm/smd_tty.c
+++ b/arch/arm/mach-msm/smd_tty.c
@@ -551,7 +551,7 @@
static void smd_tty_close(struct tty_struct *tty, struct file *f)
{
- struct smd_tty_info *info = tty->driver_data;
+ struct smd_tty_info *info = smd_tty + tty->index;
tty_port_close(&info->port, tty, f);
}
diff --git a/arch/arm/mach-msm/socinfo.c b/arch/arm/mach-msm/socinfo.c
index 9f50547..8e7adba 100644
--- a/arch/arm/mach-msm/socinfo.c
+++ b/arch/arm/mach-msm/socinfo.c
@@ -21,10 +21,12 @@
#include <linux/sys_soc.h>
#include <linux/slab.h>
#include <linux/stat.h>
+#include <linux/string.h>
#include <linux/sysdev.h>
#include <linux/types.h>
#include <asm/mach-types.h>
+#include <asm/system_misc.h>
#include <mach/socinfo.h>
#include <mach/msm_smem.h>
@@ -185,246 +187,247 @@
struct socinfo_v8 v8;
} *socinfo;
-static enum msm_cpu cpu_of_id[] = {
+static struct msm_soc_info cpu_of_id[] = {
/* 7x01 IDs */
- [1] = MSM_CPU_7X01,
- [16] = MSM_CPU_7X01,
- [17] = MSM_CPU_7X01,
- [18] = MSM_CPU_7X01,
- [19] = MSM_CPU_7X01,
- [23] = MSM_CPU_7X01,
- [25] = MSM_CPU_7X01,
- [26] = MSM_CPU_7X01,
- [32] = MSM_CPU_7X01,
- [33] = MSM_CPU_7X01,
- [34] = MSM_CPU_7X01,
- [35] = MSM_CPU_7X01,
+ [0] = {MSM_CPU_UNKNOWN, "Unknown CPU"},
+ [1] = {MSM_CPU_7X01, "MSM7X01"},
+ [16] = {MSM_CPU_7X01, "MSM7X01"},
+ [17] = {MSM_CPU_7X01, "MSM7X01"},
+ [18] = {MSM_CPU_7X01, "MSM7X01"},
+ [19] = {MSM_CPU_7X01, "MSM7X01"},
+ [23] = {MSM_CPU_7X01, "MSM7X01"},
+ [25] = {MSM_CPU_7X01, "MSM7X01"},
+ [26] = {MSM_CPU_7X01, "MSM7X01"},
+ [32] = {MSM_CPU_7X01, "MSM7X01"},
+ [33] = {MSM_CPU_7X01, "MSM7X01"},
+ [34] = {MSM_CPU_7X01, "MSM7X01"},
+ [35] = {MSM_CPU_7X01, "MSM7X01"},
/* 7x25 IDs */
- [20] = MSM_CPU_7X25,
- [21] = MSM_CPU_7X25, /* 7225 */
- [24] = MSM_CPU_7X25, /* 7525 */
- [27] = MSM_CPU_7X25, /* 7625 */
- [39] = MSM_CPU_7X25,
- [40] = MSM_CPU_7X25,
- [41] = MSM_CPU_7X25,
- [42] = MSM_CPU_7X25,
- [62] = MSM_CPU_7X25, /* 7625-1 */
- [63] = MSM_CPU_7X25, /* 7225-1 */
- [66] = MSM_CPU_7X25, /* 7225-2 */
+ [20] = {MSM_CPU_7X25, "MSM7X25"},
+ [21] = {MSM_CPU_7X25, "MSM7X25"},
+ [24] = {MSM_CPU_7X25, "MSM7X25"},
+ [27] = {MSM_CPU_7X25, "MSM7X25"},
+ [39] = {MSM_CPU_7X25, "MSM7X25"},
+ [40] = {MSM_CPU_7X25, "MSM7X25"},
+ [41] = {MSM_CPU_7X25, "MSM7X25"},
+ [42] = {MSM_CPU_7X25, "MSM7X25"},
+ [62] = {MSM_CPU_7X25, "MSM7X25"},
+ [63] = {MSM_CPU_7X25, "MSM7X25"},
+ [66] = {MSM_CPU_7X25, "MSM7X25"},
/* 7x27 IDs */
- [43] = MSM_CPU_7X27,
- [44] = MSM_CPU_7X27,
- [61] = MSM_CPU_7X27,
- [67] = MSM_CPU_7X27, /* 7227-1 */
- [68] = MSM_CPU_7X27, /* 7627-1 */
- [69] = MSM_CPU_7X27, /* 7627-2 */
+ [43] = {MSM_CPU_7X27, "MSM7X27"},
+ [44] = {MSM_CPU_7X27, "MSM7X27"},
+ [61] = {MSM_CPU_7X27, "MSM7X27"},
+ [67] = {MSM_CPU_7X27, "MSM7X27"},
+ [68] = {MSM_CPU_7X27, "MSM7X27"},
+ [69] = {MSM_CPU_7X27, "MSM7X27"},
/* 8x50 IDs */
- [30] = MSM_CPU_8X50,
- [36] = MSM_CPU_8X50,
- [37] = MSM_CPU_8X50,
- [38] = MSM_CPU_8X50,
+ [30] = {MSM_CPU_8X50, "MSM8X50"},
+ [36] = {MSM_CPU_8X50, "MSM8X50"},
+ [37] = {MSM_CPU_8X50, "MSM8X50"},
+ [38] = {MSM_CPU_8X50, "MSM8X50"},
/* 7x30 IDs */
- [59] = MSM_CPU_7X30,
- [60] = MSM_CPU_7X30,
+ [59] = {MSM_CPU_7X30, "MSM7X30"},
+ [60] = {MSM_CPU_7X30, "MSM7X30"},
/* 8x55 IDs */
- [74] = MSM_CPU_8X55,
- [75] = MSM_CPU_8X55,
- [85] = MSM_CPU_8X55,
+ [74] = {MSM_CPU_8X55, "MSM8X55"},
+ [75] = {MSM_CPU_8X55, "MSM8X55"},
+ [85] = {MSM_CPU_8X55, "MSM8X55"},
/* 8x60 IDs */
- [70] = MSM_CPU_8X60,
- [71] = MSM_CPU_8X60,
- [86] = MSM_CPU_8X60,
+ [70] = {MSM_CPU_8X60, "MSM8X60"},
+ [71] = {MSM_CPU_8X60, "MSM8X60"},
+ [86] = {MSM_CPU_8X60, "MSM8X60"},
/* 8960 IDs */
- [87] = MSM_CPU_8960,
+ [87] = {MSM_CPU_8960, "MSM8960"},
/* 7x25A IDs */
- [88] = MSM_CPU_7X25A,
- [89] = MSM_CPU_7X25A,
- [96] = MSM_CPU_7X25A,
+ [88] = {MSM_CPU_7X25A, "MSM7X25A"},
+ [89] = {MSM_CPU_7X25A, "MSM7X25A"},
+ [96] = {MSM_CPU_7X25A, "MSM7X25A"},
/* 7x27A IDs */
- [90] = MSM_CPU_7X27A,
- [91] = MSM_CPU_7X27A,
- [92] = MSM_CPU_7X27A,
- [97] = MSM_CPU_7X27A,
+ [90] = {MSM_CPU_7X27A, "MSM7X27A"},
+ [91] = {MSM_CPU_7X27A, "MSM7X27A"},
+ [92] = {MSM_CPU_7X27A, "MSM7X27A"},
+ [97] = {MSM_CPU_7X27A, "MSM7X27A"},
/* FSM9xxx ID */
- [94] = FSM_CPU_9XXX,
- [95] = FSM_CPU_9XXX,
+ [94] = {FSM_CPU_9XXX, "FSM9XXX"},
+ [95] = {FSM_CPU_9XXX, "FSM9XXX"},
/* 7x25AA ID */
- [98] = MSM_CPU_7X25AA,
- [99] = MSM_CPU_7X25AA,
- [100] = MSM_CPU_7X25AA,
+ [98] = {MSM_CPU_7X25AA, "MSM7X25AA"},
+ [99] = {MSM_CPU_7X25AA, "MSM7X25AA"},
+ [100] = {MSM_CPU_7X25AA, "MSM7X25AA"},
/* 7x27AA ID */
- [101] = MSM_CPU_7X27AA,
- [102] = MSM_CPU_7X27AA,
- [103] = MSM_CPU_7X27AA,
- [136] = MSM_CPU_7X27AA,
+ [101] = {MSM_CPU_7X27AA, "MSM7X27AA"},
+ [102] = {MSM_CPU_7X27AA, "MSM7X27AA"},
+ [103] = {MSM_CPU_7X27AA, "MSM7X27AA"},
+ [136] = {MSM_CPU_7X27AA, "MSM7X27AA"},
/* 9x15 ID */
- [104] = MSM_CPU_9615,
- [105] = MSM_CPU_9615,
- [106] = MSM_CPU_9615,
- [107] = MSM_CPU_9615,
- [171] = MSM_CPU_9615,
+ [104] = {MSM_CPU_9615, "MSM9615"},
+ [105] = {MSM_CPU_9615, "MSM9615"},
+ [106] = {MSM_CPU_9615, "MSM9615"},
+ [107] = {MSM_CPU_9615, "MSM9615"},
+ [171] = {MSM_CPU_9615, "MSM9615"},
/* 8064 IDs */
- [109] = MSM_CPU_8064,
+ [109] = {MSM_CPU_8064, "APQ8064"},
/* 8930 IDs */
- [116] = MSM_CPU_8930,
- [117] = MSM_CPU_8930,
- [118] = MSM_CPU_8930,
- [119] = MSM_CPU_8930,
- [179] = MSM_CPU_8930,
+ [116] = {MSM_CPU_8930, "MSM8930"},
+ [117] = {MSM_CPU_8930, "MSM8930"},
+ [118] = {MSM_CPU_8930, "MSM8930"},
+ [119] = {MSM_CPU_8930, "MSM8930"},
+ [179] = {MSM_CPU_8930, "MSM8930"},
/* 8627 IDs */
- [120] = MSM_CPU_8627,
- [121] = MSM_CPU_8627,
+ [120] = {MSM_CPU_8627, "MSM8627"},
+ [121] = {MSM_CPU_8627, "MSM8627"},
/* 8660A ID */
- [122] = MSM_CPU_8960,
+ [122] = {MSM_CPU_8960, "MSM8960"},
/* 8260A ID */
- [123] = MSM_CPU_8960,
+ [123] = {MSM_CPU_8960, "MSM8960"},
/* 8060A ID */
- [124] = MSM_CPU_8960,
+ [124] = {MSM_CPU_8960, "MSM8960"},
/* 8974 IDs */
- [126] = MSM_CPU_8974,
- [184] = MSM_CPU_8974,
- [185] = MSM_CPU_8974,
- [186] = MSM_CPU_8974,
+ [126] = {MSM_CPU_8974, "MSM8974"},
+ [184] = {MSM_CPU_8974, "MSM8974"},
+ [185] = {MSM_CPU_8974, "MSM8974"},
+ [186] = {MSM_CPU_8974, "MSM8974"},
/* 8974AA IDs */
- [208] = MSM_CPU_8974PRO_AA,
- [211] = MSM_CPU_8974PRO_AA,
- [214] = MSM_CPU_8974PRO_AA,
- [217] = MSM_CPU_8974PRO_AA,
+ [208] = {MSM_CPU_8974PRO_AA, "MSM8974PRO-AA"},
+ [211] = {MSM_CPU_8974PRO_AA, "MSM8974PRO-AA"},
+ [214] = {MSM_CPU_8974PRO_AA, "MSM8974PRO-AA"},
+ [217] = {MSM_CPU_8974PRO_AA, "MSM8974PRO-AA"},
/* 8974AB IDs */
- [209] = MSM_CPU_8974PRO_AB,
- [212] = MSM_CPU_8974PRO_AB,
- [215] = MSM_CPU_8974PRO_AB,
- [218] = MSM_CPU_8974PRO_AB,
+ [209] = {MSM_CPU_8974PRO_AB, "MSM8974PRO-AB"},
+ [212] = {MSM_CPU_8974PRO_AB, "MSM8974PRO-AB"},
+ [215] = {MSM_CPU_8974PRO_AB, "MSM8974PRO-AB"},
+ [218] = {MSM_CPU_8974PRO_AB, "MSM8974PRO-AB"},
/* 8974AC IDs */
- [194] = MSM_CPU_8974PRO_AC,
- [210] = MSM_CPU_8974PRO_AC,
- [213] = MSM_CPU_8974PRO_AC,
- [216] = MSM_CPU_8974PRO_AC,
+ [194] = {MSM_CPU_8974PRO_AC, "MSM8974PRO-AC"},
+ [210] = {MSM_CPU_8974PRO_AC, "MSM8974PRO-AC"},
+ [213] = {MSM_CPU_8974PRO_AC, "MSM8974PRO-AC"},
+ [216] = {MSM_CPU_8974PRO_AC, "MSM8974PRO-AC"},
/* 8625 IDs */
- [127] = MSM_CPU_8625,
- [128] = MSM_CPU_8625,
- [129] = MSM_CPU_8625,
- [137] = MSM_CPU_8625,
- [167] = MSM_CPU_8625,
+ [127] = {MSM_CPU_8625, "MSM8625"},
+ [128] = {MSM_CPU_8625, "MSM8625"},
+ [129] = {MSM_CPU_8625, "MSM8625"},
+ [137] = {MSM_CPU_8625, "MSM8625"},
+ [167] = {MSM_CPU_8625, "MSM8625"},
/* 8064 MPQ ID */
- [130] = MSM_CPU_8064,
+ [130] = {MSM_CPU_8064, "APQ8064"},
/* 7x25AB IDs */
- [131] = MSM_CPU_7X25AB,
- [132] = MSM_CPU_7X25AB,
- [133] = MSM_CPU_7X25AB,
- [135] = MSM_CPU_7X25AB,
+ [131] = {MSM_CPU_7X25AB, "MSM7X25AB"},
+ [132] = {MSM_CPU_7X25AB, "MSM7X25AB"},
+ [133] = {MSM_CPU_7X25AB, "MSM7X25AB"},
+ [135] = {MSM_CPU_7X25AB, "MSM7X25AB"},
/* 9625 IDs */
- [134] = MSM_CPU_9625,
- [148] = MSM_CPU_9625,
- [149] = MSM_CPU_9625,
- [150] = MSM_CPU_9625,
- [151] = MSM_CPU_9625,
- [152] = MSM_CPU_9625,
- [173] = MSM_CPU_9625,
- [174] = MSM_CPU_9625,
- [175] = MSM_CPU_9625,
+ [134] = {MSM_CPU_9625, "MSM9625"},
+ [148] = {MSM_CPU_9625, "MSM9625"},
+ [149] = {MSM_CPU_9625, "MSM9625"},
+ [150] = {MSM_CPU_9625, "MSM9625"},
+ [151] = {MSM_CPU_9625, "MSM9625"},
+ [152] = {MSM_CPU_9625, "MSM9625"},
+ [173] = {MSM_CPU_9625, "MSM9625"},
+ [174] = {MSM_CPU_9625, "MSM9625"},
+ [175] = {MSM_CPU_9625, "MSM9625"},
/* 8960AB IDs */
- [138] = MSM_CPU_8960AB,
- [139] = MSM_CPU_8960AB,
- [140] = MSM_CPU_8960AB,
- [141] = MSM_CPU_8960AB,
+ [138] = {MSM_CPU_8960AB, "MSM8960AB"},
+ [139] = {MSM_CPU_8960AB, "MSM8960AB"},
+ [140] = {MSM_CPU_8960AB, "MSM8960AB"},
+ [141] = {MSM_CPU_8960AB, "MSM8960AB"},
/* 8930AA IDs */
- [142] = MSM_CPU_8930AA,
- [143] = MSM_CPU_8930AA,
- [144] = MSM_CPU_8930AA,
- [160] = MSM_CPU_8930AA,
- [180] = MSM_CPU_8930AA,
+ [142] = {MSM_CPU_8930AA, "MSM8930AA"},
+ [143] = {MSM_CPU_8930AA, "MSM8930AA"},
+ [144] = {MSM_CPU_8930AA, "MSM8930AA"},
+ [160] = {MSM_CPU_8930AA, "MSM8930AA"},
+ [180] = {MSM_CPU_8930AA, "MSM8930AA"},
/* 8226 IDs */
- [145] = MSM_CPU_8226,
- [158] = MSM_CPU_8226,
- [159] = MSM_CPU_8226,
- [198] = MSM_CPU_8226,
- [199] = MSM_CPU_8226,
- [200] = MSM_CPU_8226,
- [205] = MSM_CPU_8226,
- [219] = MSM_CPU_8226,
- [220] = MSM_CPU_8226,
- [221] = MSM_CPU_8226,
- [222] = MSM_CPU_8226,
- [223] = MSM_CPU_8226,
- [224] = MSM_CPU_8226,
+ [145] = {MSM_CPU_8226, "MSM8626"},
+ [158] = {MSM_CPU_8226, "MSM8226"},
+ [159] = {MSM_CPU_8226, "MSM8526"},
+ [198] = {MSM_CPU_8226, "MSM8126"},
+ [199] = {MSM_CPU_8226, "APQ8026"},
+ [200] = {MSM_CPU_8226, "MSM8926"},
+ [205] = {MSM_CPU_8226, "MSM8326"},
+ [219] = {MSM_CPU_8226, "APQ8028"},
+ [220] = {MSM_CPU_8226, "MSM8128"},
+ [221] = {MSM_CPU_8226, "MSM8228"},
+ [222] = {MSM_CPU_8226, "MSM8528"},
+ [223] = {MSM_CPU_8226, "MSM8628"},
+ [224] = {MSM_CPU_8226, "MSM8928"},
/* 8092 IDs */
- [146] = MSM_CPU_8092,
+ [146] = {MSM_CPU_8092, "MSM8092"},
/* 8610 IDs */
- [147] = MSM_CPU_8610,
- [161] = MSM_CPU_8610,
- [162] = MSM_CPU_8610,
- [163] = MSM_CPU_8610,
- [164] = MSM_CPU_8610,
- [165] = MSM_CPU_8610,
- [166] = MSM_CPU_8610,
- [225] = MSM_CPU_8610,
- [226] = MSM_CPU_8610,
+ [147] = {MSM_CPU_8610, "MSM8610"},
+ [161] = {MSM_CPU_8610, "MSM8110"},
+ [162] = {MSM_CPU_8610, "MSM8210"},
+ [163] = {MSM_CPU_8610, "MSM8810"},
+ [164] = {MSM_CPU_8610, "MSM8212"},
+ [165] = {MSM_CPU_8610, "MSM8612"},
+ [166] = {MSM_CPU_8610, "MSM8112"},
+ [225] = {MSM_CPU_8610, "MSM8510"},
+ [226] = {MSM_CPU_8610, "MSM8512"},
/* 8064AB IDs */
- [153] = MSM_CPU_8064AB,
+ [153] = {MSM_CPU_8064AB, "APQ8064AB"},
/* 8930AB IDs */
- [154] = MSM_CPU_8930AB,
- [155] = MSM_CPU_8930AB,
- [156] = MSM_CPU_8930AB,
- [157] = MSM_CPU_8930AB,
- [181] = MSM_CPU_8930AB,
+ [154] = {MSM_CPU_8930AB, "MSM8930AB"},
+ [155] = {MSM_CPU_8930AB, "MSM8930AB"},
+ [156] = {MSM_CPU_8930AB, "MSM8930AB"},
+ [157] = {MSM_CPU_8930AB, "MSM8930AB"},
+ [181] = {MSM_CPU_8930AB, "MSM8930AB"},
/* 8625Q IDs */
- [168] = MSM_CPU_8625Q,
- [169] = MSM_CPU_8625Q,
- [170] = MSM_CPU_8625Q,
+ [168] = {MSM_CPU_8625Q, "MSM8225Q"},
+ [169] = {MSM_CPU_8625Q, "MSM8625Q"},
+ [170] = {MSM_CPU_8625Q, "MSM8125Q"},
/* 8064AA IDs */
- [172] = MSM_CPU_8064AA,
+ [172] = {MSM_CPU_8064AA, "APQ8064AA"},
/* 8084 IDs */
- [178] = MSM_CPU_8084,
+ [178] = {MSM_CPU_8084, "APQ8084"},
/* krypton IDs */
- [187] = MSM_CPU_KRYPTON,
+ [187] = {MSM_CPU_KRYPTON, "MSMKRYPTON"},
/* FSM9900 ID */
- [188] = FSM_CPU_9900,
+ [188] = {FSM_CPU_9900, "FSM9900"},
/* Samarium IDs */
- [195] = MSM_CPU_SAMARIUM,
+ [195] = {MSM_CPU_SAMARIUM, "MSMSAMARIUM"},
/* Uninitialized IDs are not known to run Linux.
MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are
@@ -455,6 +458,25 @@
return (socinfo) ? socinfo->v1.build_id : NULL;
}
+static char *msm_read_hardware_id(void)
+{
+ static char msm_soc_str[128] = "Qualcomm ";
+ static bool string_generated = false;
+
+ if (string_generated)
+ return msm_soc_str;
+ if (!socinfo)
+ goto err_path;
+ if (!cpu_of_id[socinfo->v1.id].soc_id_string)
+ goto err_path;
+
+ string_generated = true;
+ return strncat(msm_soc_str, cpu_of_id[socinfo->v1.id].soc_id_string,
+ sizeof(msm_soc_str) - strlen(msm_soc_str));
+err_path:
+ return "UNKNOWN SOC TYPE";
+}
+
uint32_t socinfo_get_raw_id(void)
{
return socinfo ?
@@ -678,6 +700,8 @@
char *buf)
{
uint32_t hw_subtype;
+ WARN_ONCE(1, "Deprecated, use platform_subtype_id instead\n");
+
if (!socinfo) {
pr_err("%s: No socinfo found!\n", __func__);
return 0;
@@ -707,6 +731,18 @@
}
static ssize_t
+socinfo_show_platform_subtype_id(struct sys_device *dev,
+ struct sysdev_attribute *attr,
+ char *buf)
+{
+ uint32_t hw_subtype;
+
+ hw_subtype = socinfo_get_platform_subtype();
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ hw_subtype);
+}
+
+static ssize_t
socinfo_show_pmic_model(struct sys_device *dev,
struct sysdev_attribute *attr,
char *buf)
@@ -829,6 +865,17 @@
}
static ssize_t
+msm_get_platform_subtype_id(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ uint32_t hw_subtype;
+ hw_subtype = socinfo_get_platform_subtype();
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ hw_subtype);
+}
+
+static ssize_t
msm_get_pmic_model(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -974,6 +1021,8 @@
static struct sysdev_attribute socinfo_v6_files[] = {
_SYSDEV_ATTR(platform_subtype, 0444,
socinfo_show_platform_subtype, NULL),
+ _SYSDEV_ATTR(platform_subtype_id, 0444,
+ socinfo_show_platform_subtype_id, NULL),
};
static struct sysdev_attribute socinfo_v7_files[] = {
@@ -1059,6 +1108,13 @@
__ATTR(platform_subtype, S_IRUGO,
msm_get_platform_subtype, NULL);
+/* Platform Subtype String is being deprecated. Use Platform
+ * Subtype ID instead.
+ */
+static struct device_attribute msm_soc_attr_platform_subtype_id =
+ __ATTR(platform_subtype_id, S_IRUGO,
+ msm_get_platform_subtype_id, NULL);
+
static struct device_attribute msm_soc_attr_pmic_model =
__ATTR(pmic_model, S_IRUGO,
msm_get_pmic_model, NULL);
@@ -1152,6 +1208,8 @@
case 6:
device_create_file(msm_soc_device,
&msm_soc_attr_platform_subtype);
+ device_create_file(msm_soc_device,
+ &msm_soc_attr_platform_subtype_id);
case 5:
device_create_file(msm_soc_device,
&msm_soc_attr_accessory_chip);
@@ -1401,14 +1459,16 @@
}
WARN(!socinfo_get_id(), "Unknown SOC ID!\n");
- WARN(socinfo_get_id() >= ARRAY_SIZE(cpu_of_id),
- "New IDs added! ID => CPU mapping might need an update.\n");
- if (socinfo->v1.id < ARRAY_SIZE(cpu_of_id))
- cur_cpu = cpu_of_id[socinfo->v1.id];
+ if (socinfo_get_id() >= ARRAY_SIZE(cpu_of_id))
+ BUG_ON("New IDs added! ID => CPU mapping might need an update.\n");
+
+ else
+ cur_cpu = cpu_of_id[socinfo->v1.id].generic_soc_type;
boot_stats_init();
socinfo_print();
+ arch_read_hardware_id = msm_read_hardware_id;
return 0;
}
diff --git a/arch/arm/mach-msm/subsystem_restart.c b/arch/arm/mach-msm/subsystem_restart.c
index 01e0985..bc4eb76 100644
--- a/arch/arm/mach-msm/subsystem_restart.c
+++ b/arch/arm/mach-msm/subsystem_restart.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -466,13 +466,20 @@
pr_info("[%p]: Powering up %s\n", current, name);
init_completion(&dev->err_ready);
- if (dev->desc->powerup(dev->desc) < 0)
+
+ if (dev->desc->powerup(dev->desc) < 0) {
+ notify_each_subsys_device(&dev, 1, SUBSYS_POWERUP_FAILURE,
+ NULL);
panic("[%p]: Powerup error: %s!", current, name);
+ }
ret = wait_for_err_ready(dev);
- if (ret)
+ if (ret) {
+ notify_each_subsys_device(&dev, 1, SUBSYS_POWERUP_FAILURE,
+ NULL);
panic("[%p]: Timed out waiting for error ready: %s!",
current, name);
+ }
subsys_set_state(dev, SUBSYS_ONLINE);
}
@@ -500,8 +507,11 @@
init_completion(&subsys->err_ready);
ret = subsys->desc->start(subsys->desc);
- if (ret)
+ if (ret){
+ notify_each_subsys_device(&subsys, 1, SUBSYS_POWERUP_FAILURE,
+ NULL);
return ret;
+ }
if (subsys->desc->is_not_loadable) {
subsys_set_state(subsys, SUBSYS_ONLINE);
@@ -509,12 +519,14 @@
}
ret = wait_for_err_ready(subsys);
- if (ret)
+ if (ret) {
/* pil-boot succeeded but we need to shutdown
* the device because error ready timed out.
*/
+ notify_each_subsys_device(&subsys, 1, SUBSYS_POWERUP_FAILURE,
+ NULL);
subsys->desc->stop(subsys->desc);
- else
+ } else
subsys_set_state(subsys, SUBSYS_ONLINE);
return ret;
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 885721f..606383a 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -547,9 +547,9 @@
VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
+ free_contig_range(pfn, count);
mutex_lock(&cma_mutex);
bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
- free_contig_range(pfn, count);
mutex_unlock(&cma_mutex);
return true;
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 606953d..99647a7 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -107,6 +107,10 @@
#define DIAG_STM_WCNSS 0x04
#define DIAG_STM_APPS 0x08
+#define DIAG_DIAG_STM 0x214
+
+#define BAD_PARAM_RESPONSE_MESSAGE 20
+
/*
* The status bit masks when received in a signal handler are to be
* used in conjunction with the peripheral list bit mask to determine the
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 8cc7515..0bbb012 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -56,6 +56,8 @@
#define STM_RSP_SMD_COMPLY_INDEX 9
#define STM_RSP_NUM_BYTES 10
+#define STM_COMMAND_VALID 1
+
#define SMD_DRAIN_BUF_SIZE 4096
int diag_debug_buf_idx;
@@ -1136,20 +1138,44 @@
}
}
-int diag_process_stm_cmd(unsigned char *buf)
+int diag_process_stm_cmd(unsigned char *buf, unsigned char *dest_buf)
{
- uint8_t version = *(buf+STM_CMD_VERSION_OFFSET);
- uint8_t mask = *(buf+STM_CMD_MASK_OFFSET);
- uint8_t cmd = *(buf+STM_CMD_DATA_OFFSET);
+ uint8_t version, mask, cmd;
uint8_t rsp_supported = 0;
uint8_t rsp_smd_comply = 0;
- int valid_command = 1;
int i;
- /* Check if command is valid */
- if ((version != 1) || (mask == 0) || (0 != (mask >> 4)) ||
- (cmd != ENABLE_STM && cmd != DISABLE_STM)) {
- valid_command = 0;
+ if (!buf || !dest_buf) {
+ pr_err("diag: Invalid pointers buf: %p, dest_buf %p in %s\n",
+ buf, dest_buf, __func__);
+ return -EIO;
+ }
+
+ version = *(buf + STM_CMD_VERSION_OFFSET);
+ mask = *(buf + STM_CMD_MASK_OFFSET);
+ cmd = *(buf + STM_CMD_DATA_OFFSET);
+
+ /*
+ * Check if command is valid. If the command is asking for
+ * status, then the processor mask field is to be ignored.
+ */
+ if ((version != 1) || (cmd > STATUS_STM) ||
+ ((cmd != STATUS_STM) && ((mask == 0) || (0 != (mask >> 4))))) {
+ /* Command is invalid. Send bad param message response */
+ dest_buf[0] = BAD_PARAM_RESPONSE_MESSAGE;
+ for (i = 0; i < STM_CMD_NUM_BYTES; i++)
+ dest_buf[i+1] = *(buf + i);
+ return STM_CMD_NUM_BYTES+1;
+ } else if (cmd == STATUS_STM) {
+ /*
+ * Only the status is being queried, so fill in whether diag
+ * over stm is supported or not
+ */
+ for (i = 0; i < NUM_SMD_CONTROL_CHANNELS; i++)
+ if (driver->peripheral_supports_stm[i])
+ rsp_supported |= 1 << i;
+
+ rsp_supported |= DIAG_STM_APPS;
} else {
if (mask & DIAG_STM_MODEM)
diag_process_stm_mask(cmd, DIAG_STM_MODEM, MODEM_DATA,
@@ -1169,15 +1195,13 @@
}
for (i = 0; i < STM_CMD_NUM_BYTES; i++)
- driver->apps_rsp_buf[i] = *(buf+i);
+ dest_buf[i] = *(buf + i);
- driver->apps_rsp_buf[STM_RSP_VALID_INDEX] = valid_command;
- driver->apps_rsp_buf[STM_RSP_SUPPORTED_INDEX] = rsp_supported;
- driver->apps_rsp_buf[STM_RSP_SMD_COMPLY_INDEX] = rsp_smd_comply;
+ dest_buf[STM_RSP_VALID_INDEX] = STM_COMMAND_VALID;
+ dest_buf[STM_RSP_SUPPORTED_INDEX] = rsp_supported;
+ dest_buf[STM_RSP_SMD_COMPLY_INDEX] = rsp_smd_comply;
- encode_rsp_and_send(STM_RSP_NUM_BYTES-1);
-
- return 0;
+ return STM_RSP_NUM_BYTES;
}
int diag_apps_responds()
@@ -1273,8 +1297,13 @@
encode_rsp_and_send(7);
return 0;
} else if ((*buf == 0x4b) && (*(buf+1) == 0x12) &&
- (*(uint16_t *)(buf+2) == 0x020E)) {
- return diag_process_stm_cmd(buf);
+ (*(uint16_t *)(buf+2) == DIAG_DIAG_STM)) {
+ len = diag_process_stm_cmd(buf, driver->apps_rsp_buf);
+ if (len > 0) {
+ encode_rsp_and_send(len - 1);
+ return 0;
+ }
+ return len;
}
/* Check for Apps Only & get event mask request */
else if (diag_apps_responds() && *buf == 0x81) {
diff --git a/drivers/char/diag/diagfwd_cntl.h b/drivers/char/diag/diagfwd_cntl.h
index d79195c..7f5ea03 100644
--- a/drivers/char/diag/diagfwd_cntl.h
+++ b/drivers/char/diag/diagfwd_cntl.h
@@ -54,8 +54,9 @@
#define ENABLE_SEPARATE_CMDRSP 1
#define DISABLE_SEPARATE_CMDRSP 0
-#define ENABLE_STM 1
#define DISABLE_STM 0
+#define ENABLE_STM 1
+#define STATUS_STM 2
#define UPDATE_PERIPHERAL_STM_STATE 1
#define CLEAR_PERIPHERAL_STM_STATE 2
diff --git a/drivers/coresight/coresight-tmc.c b/drivers/coresight/coresight-tmc.c
index f35ba53..0c7c9e0 100644
--- a/drivers/coresight/coresight-tmc.c
+++ b/drivers/coresight/coresight-tmc.c
@@ -177,6 +177,7 @@
bool byte_cntr_read_active;
wait_queue_head_t wq;
char *byte_cntr_node;
+ uint32_t mem_size;
};
static void tmc_wait_for_flush(struct tmc_drvdata *drvdata)
@@ -1310,6 +1311,32 @@
static DEVICE_ATTR(byte_cntr_value, S_IRUGO | S_IWUSR,
tmc_etr_show_byte_cntr_value, tmc_etr_store_byte_cntr_value);
+static ssize_t tmc_etr_show_mem_size(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val = drvdata->mem_size;
+
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t tmc_etr_store_mem_size(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ drvdata->mem_size = val;
+ return size;
+}
+static DEVICE_ATTR(mem_size, S_IRUGO | S_IWUSR,
+ tmc_etr_show_mem_size, tmc_etr_store_mem_size);
+
static struct attribute *tmc_attrs[] = {
&dev_attr_trigger_cntr.attr,
NULL,
@@ -1322,6 +1349,7 @@
static struct attribute *tmc_etr_attrs[] = {
&dev_attr_out_mode.attr,
&dev_attr_byte_cntr_value.attr,
+ &dev_attr_mem_size.attr,
NULL,
};
diff --git a/drivers/cpufreq/cpu-boost.c b/drivers/cpufreq/cpu-boost.c
index f20510d..b4aec53 100644
--- a/drivers/cpufreq/cpu-boost.c
+++ b/drivers/cpufreq/cpu-boost.c
@@ -335,8 +335,6 @@
int cpu, ret;
struct cpu_sync *s;
- cpufreq_register_notifier(&boost_adjust_nb, CPUFREQ_POLICY_NOTIFIER);
-
cpu_boost_wq = alloc_workqueue("cpuboost_wq", WQ_HIGHPRI, 0);
if (!cpu_boost_wq)
return -EFAULT;
@@ -354,10 +352,11 @@
"boost_sync/%d", cpu);
set_cpus_allowed(s->thread, *cpumask_of(cpu));
}
+ cpufreq_register_notifier(&boost_adjust_nb, CPUFREQ_POLICY_NOTIFIER);
atomic_notifier_chain_register(&migration_notifier_head,
&boost_migration_nb);
-
ret = input_register_handler(&cpuboost_input_handler);
+
return 0;
}
late_initcall(cpu_boost_init);
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
index 3aebaf0..48dc6ec 100644
--- a/drivers/crypto/msm/qcrypto.c
+++ b/drivers/crypto/msm/qcrypto.c
@@ -39,6 +39,7 @@
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/aead.h>
#include <mach/scm.h>
#include <linux/platform_data/qcom_crypto_device.h>
@@ -66,6 +67,8 @@
u32 aead_sha1_3des_dec;
u32 aead_ccm_aes_enc;
u32 aead_ccm_aes_dec;
+ u32 aead_rfc4309_ccm_aes_enc;
+ u32 aead_rfc4309_ccm_aes_dec;
u32 aead_op_success;
u32 aead_op_fail;
u32 aead_bad_msg;
@@ -231,6 +234,8 @@
/* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
#define QCRYPTO_MAX_IV_LENGTH 16
+#define QCRYPTO_CCM4309_NONCE_LEN 3
+
struct qcrypto_cipher_ctx {
u8 auth_key[QCRYPTO_MAX_KEY_SIZE];
u8 iv[QCRYPTO_MAX_IV_LENGTH];
@@ -244,10 +249,12 @@
struct crypto_priv *cp;
unsigned int flags;
struct crypto_engine *pengine; /* fixed engine assigned */
+ u8 ccm4309_nonce[QCRYPTO_CCM4309_NONCE_LEN];
};
struct qcrypto_cipher_req_ctx {
u8 *iv;
+ u8 rfc4309_iv[QCRYPTO_MAX_IV_LENGTH];
unsigned int ivsize;
int aead;
struct scatterlist asg; /* Formatted associated data sg */
@@ -736,7 +743,12 @@
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" AEAD CCM-AES decryption : %d\n",
pstat->aead_ccm_aes_dec);
-
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD RFC4309-CCM-AES encryption : %d\n",
+ pstat->aead_rfc4309_ccm_aes_enc);
+ len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+ " AEAD RFC4309-CCM-AES decryption : %d\n",
+ pstat->aead_rfc4309_ccm_aes_dec);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" AEAD operation success : %d\n",
pstat->aead_op_success);
@@ -1272,6 +1284,12 @@
uint32_t bytes = 0;
uint32_t num_sg = 0;
+ if (alen == 0) {
+ qreq->assoc = NULL;
+ qreq->assoclen = 0;
+ return 0;
+ }
+
qreq->assoc = kzalloc((alen + 0x64), GFP_ATOMIC);
if (!qreq->assoc) {
pr_err("qcrypto Memory allocation of adata FAIL, error %ld\n",
@@ -1455,7 +1473,10 @@
qreq.authkey = cipher_ctx->auth_key;
qreq.authklen = cipher_ctx->auth_key_len;
qreq.authsize = crypto_aead_authsize(aead);
- qreq.ivsize = crypto_aead_ivsize(aead);
+ if (qreq.mode == QCE_MODE_CCM)
+ qreq.ivsize = AES_BLOCK_SIZE;
+ else
+ qreq.ivsize = crypto_aead_ivsize(aead);
qreq.flags = cipher_ctx->flags;
if (qreq.mode == QCE_MODE_CCM) {
@@ -1502,8 +1523,9 @@
kzfree(qreq.assoc);
return -ENOMEM;
}
-
- memcpy((char *)rctx->data, qreq.assoc, qreq.assoclen);
+ if (qreq.assoclen)
+ memcpy((char *)rctx->data, qreq.assoc,
+ qreq.assoclen);
num_sg = qcrypto_count_sg(req->src, req->cryptlen);
bytes = qcrypto_sg_copy_to_buffer(req->src, num_sg,
@@ -1845,6 +1867,29 @@
return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
}
+static int _qcrypto_aead_rfc4309_enc_aes_ccm(struct aead_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+
+ rctx = aead_request_ctx(req);
+ rctx->aead = 1;
+ rctx->alg = CIPHER_ALG_AES;
+ rctx->dir = QCE_ENCRYPT;
+ rctx->mode = QCE_MODE_CCM;
+ memset(rctx->rfc4309_iv, 0, sizeof(rctx->rfc4309_iv));
+ rctx->rfc4309_iv[0] = 3; /* L -1 */
+ memcpy(&rctx->rfc4309_iv[1], ctx->ccm4309_nonce, 3);
+ memcpy(&rctx->rfc4309_iv[4], req->iv, 8);
+ rctx->iv = rctx->rfc4309_iv;
+ pstat->aead_rfc4309_ccm_aes_enc++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
static int _qcrypto_enc_des_ecb(struct ablkcipher_request *req)
{
struct qcrypto_cipher_req_ctx *rctx;
@@ -2136,6 +2181,27 @@
return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
}
+static int _qcrypto_aead_rfc4309_dec_aes_ccm(struct aead_request *req)
+{
+ struct qcrypto_cipher_req_ctx *rctx;
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_priv *cp = ctx->cp;
+ struct crypto_stat *pstat;
+
+ pstat = &_qcrypto_stat;
+ rctx = aead_request_ctx(req);
+ rctx->aead = 1;
+ rctx->alg = CIPHER_ALG_AES;
+ rctx->dir = QCE_DECRYPT;
+ rctx->mode = QCE_MODE_CCM;
+ memset(rctx->rfc4309_iv, 0, sizeof(rctx->rfc4309_iv));
+ rctx->rfc4309_iv[0] = 3; /* L -1 */
+ memcpy(&rctx->rfc4309_iv[1], ctx->ccm4309_nonce, 3);
+ memcpy(&rctx->rfc4309_iv[4], req->iv, 8);
+ rctx->iv = rctx->rfc4309_iv;
+ pstat->aead_rfc4309_ccm_aes_dec++;
+ return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
static int _qcrypto_aead_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
@@ -2166,6 +2232,24 @@
return 0;
}
+static int _qcrypto_aead_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc);
+
+ switch (authsize) {
+ case 8:
+ case 12:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+ ctx->authsize = authsize;
+ return 0;
+}
+
+
static int _qcrypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
@@ -2231,6 +2315,21 @@
return 0;
}
+static int _qcrypto_aead_rfc4309_ccm_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int key_len)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ if (key_len < QCRYPTO_CCM4309_NONCE_LEN)
+ return -EINVAL;
+ key_len -= QCRYPTO_CCM4309_NONCE_LEN;
+ memcpy(ctx->ccm4309_nonce, key + key_len, QCRYPTO_CCM4309_NONCE_LEN);
+ ret = _qcrypto_aead_ccm_setkey(aead, key, key_len);
+ return ret;
+};
+
static int _qcrypto_aead_encrypt_aes_cbc(struct aead_request *req)
{
struct qcrypto_cipher_req_ctx *rctx;
@@ -3762,7 +3861,7 @@
.cra_u = {
.aead = {
.ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA1_DIGEST_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
.setkey = _qcrypto_aead_ccm_setkey,
.setauthsize = _qcrypto_aead_ccm_setauthsize,
.encrypt = _qcrypto_aead_encrypt_aes_ccm,
@@ -3772,6 +3871,31 @@
}
};
+static struct crypto_alg _qcrypto_aead_rfc4309_ccm_algo = {
+ .cra_name = "rfc4309(ccm(aes))",
+ .cra_driver_name = "qcrypto-rfc4309-aes-ccm",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_nivaead_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = _qcrypto_cra_aead_init,
+ .cra_exit = _qcrypto_cra_aead_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8,
+ .maxauthsize = 16,
+ .setkey = _qcrypto_aead_rfc4309_ccm_setkey,
+ .setauthsize = _qcrypto_aead_rfc4309_ccm_setauthsize,
+ .encrypt = _qcrypto_aead_rfc4309_enc_aes_ccm,
+ .decrypt = _qcrypto_aead_rfc4309_dec_aes_ccm,
+ .geniv = "seqiv",
+ }
+ }
+};
+
static int _qcrypto_probe(struct platform_device *pdev)
{
@@ -4078,6 +4202,36 @@
dev_info(&pdev->dev, "%s\n",
q_alg->cipher_alg.cra_driver_name);
}
+
+ q_alg = _qcrypto_cipher_alg_alloc(cp,
+ &_qcrypto_aead_rfc4309_ccm_algo);
+ if (IS_ERR(q_alg)) {
+ rc = PTR_ERR(q_alg);
+ goto err;
+ }
+
+ if (cp->ce_support.use_sw_aes_ccm_algo) {
+ rc = _qcrypto_prefix_alg_cra_name(
+ q_alg->cipher_alg.cra_name,
+ strlen(q_alg->cipher_alg.cra_name));
+ if (rc) {
+ dev_err(&pdev->dev,
+ "The algorithm name %s is too long.\n",
+ q_alg->cipher_alg.cra_name);
+ kfree(q_alg);
+ goto err;
+ }
+ }
+ rc = crypto_register_alg(&q_alg->cipher_alg);
+ if (rc) {
+ dev_err(&pdev->dev, "%s alg registration failed\n",
+ q_alg->cipher_alg.cra_driver_name);
+ kfree(q_alg);
+ } else {
+ list_add_tail(&q_alg->entry, &cp->alg_list);
+ dev_info(&pdev->dev, "%s\n",
+ q_alg->cipher_alg.cra_driver_name);
+ }
}
mutex_unlock(&cp->engine_lock);
diff --git a/drivers/gpio/qpnp-pin.c b/drivers/gpio/qpnp-pin.c
index 8d934df..6176df9 100644
--- a/drivers/gpio/qpnp-pin.c
+++ b/drivers/gpio/qpnp-pin.c
@@ -182,6 +182,7 @@
struct device_node *int_ctrl;
struct list_head chip_list;
struct dentry *dfs_dir;
+ bool chip_registered;
};
static LIST_HEAD(qpnp_pin_chips);
@@ -912,7 +913,7 @@
static int qpnp_pin_free_chip(struct qpnp_pin_chip *q_chip)
{
struct spmi_device *spmi = q_chip->spmi;
- int rc, i;
+ int i, rc = 0;
if (q_chip->chip_gpios)
for (i = 0; i < spmi->num_dev_node; i++)
@@ -921,10 +922,12 @@
mutex_lock(&qpnp_pin_chips_lock);
list_del(&q_chip->chip_list);
mutex_unlock(&qpnp_pin_chips_lock);
- rc = gpiochip_remove(&q_chip->gpio_chip);
- if (rc)
- dev_err(&q_chip->spmi->dev, "%s: unable to remove gpio\n",
- __func__);
+ if (q_chip->chip_registered) {
+ rc = gpiochip_remove(&q_chip->gpio_chip);
+ if (rc)
+ dev_err(&q_chip->spmi->dev, "%s: unable to remove gpio\n",
+ __func__);
+ }
kfree(q_chip->chip_gpios);
kfree(q_chip->pmic_pins);
kfree(q_chip);
@@ -1342,6 +1345,7 @@
goto err_probe;
}
+ q_chip->chip_registered = true;
/* now configure gpio config defaults if they exist */
for (i = 0; i < spmi->num_dev_node; i++) {
q_spec = qpnp_chip_gpio_get_spec(q_chip, i);
diff --git a/drivers/gpu/ion/ion_cma_secure_heap.c b/drivers/gpu/ion/ion_cma_secure_heap.c
index d375c00..0aef596 100644
--- a/drivers/gpu/ion/ion_cma_secure_heap.c
+++ b/drivers/gpu/ion/ion_cma_secure_heap.c
@@ -446,6 +446,7 @@
if (ret) {
ret = ion_secure_cma_add_to_pool(sheap, len);
if (ret) {
+ mutex_unlock(&sheap->alloc_lock);
dev_err(sheap->dev, "Fail to allocate buffer\n");
goto err;
}
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 7717829..3196911 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -1625,17 +1625,19 @@
goto err;
}
- ret = of_property_read_u32_array(child, "reg", reg_val, 2);
- if (ret) {
- KGSL_CORE_ERR("Unable to read KGSL IOMMU 'reg'\n");
+ if (!strcmp("gfx3d_user", ctxs[ctx_index].iommu_ctx_name)) {
+ ctxs[ctx_index].ctx_id = 0;
+ } else if (!strcmp("gfx3d_priv",
+ ctxs[ctx_index].iommu_ctx_name)) {
+ ctxs[ctx_index].ctx_id = 1;
+ } else if (!strcmp("gfx3d_spare",
+ ctxs[ctx_index].iommu_ctx_name)) {
+ ctxs[ctx_index].ctx_id = 2;
+ } else {
+ KGSL_CORE_ERR("dt: IOMMU context %s is invalid\n",
+ ctxs[ctx_index].iommu_ctx_name);
goto err;
}
- if (msm_soc_version_supports_iommu_v0())
- ctxs[ctx_index].ctx_id = (reg_val[0] -
- data->physstart) >> KGSL_IOMMU_CTX_SHIFT;
- else
- ctxs[ctx_index].ctx_id = ((reg_val[0] -
- data->physstart) >> KGSL_IOMMU_CTX_SHIFT) - 8;
ctx_index++;
}
@@ -1690,6 +1692,12 @@
if (ret)
goto err;
+ /* get pm-qos-latency from target, set it to default if not found */
+ if (adreno_of_read_property(pdev->dev.of_node, "qcom,pm-qos-latency",
+ &pdata->pm_qos_latency))
+ pdata->pm_qos_latency = 501;
+
+
if (adreno_of_read_property(pdev->dev.of_node, "qcom,idle-timeout",
&pdata->idle_timeout))
pdata->idle_timeout = HZ/12;
@@ -2415,9 +2423,12 @@
if (tmp != adreno_dev->fast_hang_detect) {
if (adreno_dev->fast_hang_detect) {
- if (adreno_dev->gpudev->fault_detect_start)
+ if (adreno_dev->gpudev->fault_detect_start &&
+ !kgsl_active_count_get(&adreno_dev->dev)) {
adreno_dev->gpudev->fault_detect_start(
adreno_dev);
+ kgsl_active_count_put(&adreno_dev->dev);
+ }
} else {
if (adreno_dev->gpudev->fault_detect_stop)
adreno_dev->gpudev->fault_detect_stop(
@@ -2778,7 +2789,7 @@
* Return true if the RBBM status register for the GPU type indicates that the
* hardware is idle
*/
-static bool adreno_hw_isidle(struct kgsl_device *device)
+bool adreno_hw_isidle(struct kgsl_device *device)
{
unsigned int reg_rbbm_status;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
@@ -2891,6 +2902,12 @@
rptr = adreno_get_rptr(&adreno_dev->ringbuffer);
+ /*
+ * wptr is updated when we add commands to ringbuffer, add a barrier
+ * to make sure updated wptr is compared to rptr
+ */
+ smp_mb();
+
if (rptr == adreno_dev->ringbuffer.wptr)
return adreno_hw_isidle(device);
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 0f1e01d..9092a03 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -191,8 +191,6 @@
unsigned int fast_hang_detect;
unsigned int ft_policy;
unsigned int long_ib_detect;
- unsigned int long_ib;
- unsigned int long_ib_ts;
unsigned int ft_pf_policy;
unsigned int gpulist_index;
struct ocmem_buf *ocmem_hdl;
@@ -343,6 +341,7 @@
ADRENO_REG_TC_CNTL_STATUS,
ADRENO_REG_TP0_CHICKEN,
ADRENO_REG_RBBM_RBBM_CTL,
+ ADRENO_REG_UCHE_INVALIDATE0,
ADRENO_REG_REGISTER_MAX,
};
@@ -415,7 +414,9 @@
#define KGSL_FT_SKIPFRAME 3
#define KGSL_FT_DISABLE 4
#define KGSL_FT_TEMP_DISABLE 5
-#define KGSL_FT_DEFAULT_POLICY (BIT(KGSL_FT_REPLAY) + BIT(KGSL_FT_SKIPIB))
+#define KGSL_FT_THROTTLE 6
+#define KGSL_FT_DEFAULT_POLICY (BIT(KGSL_FT_REPLAY) + BIT(KGSL_FT_SKIPIB) \
+ + BIT(KGSL_FT_THROTTLE))
/* This internal bit is used to skip the PM dump on replayed command batches */
#define KGSL_FT_SKIP_PMDUMP 31
@@ -433,7 +434,8 @@
{ BIT(KGSL_FT_SKIPIB), "skipib" }, \
{ BIT(KGSL_FT_SKIPFRAME), "skipframe" }, \
{ BIT(KGSL_FT_DISABLE), "disable" }, \
- { BIT(KGSL_FT_TEMP_DISABLE), "temp" }
+ { BIT(KGSL_FT_TEMP_DISABLE), "temp" }, \
+ { BIT(KGSL_FT_THROTTLE), "throttle"}
extern struct adreno_gpudev adreno_a2xx_gpudev;
extern struct adreno_gpudev adreno_a3xx_gpudev;
@@ -463,6 +465,7 @@
void adreno_coresight_remove(struct platform_device *pdev);
int adreno_coresight_init(struct platform_device *pdev);
+bool adreno_hw_isidle(struct kgsl_device *device);
int adreno_idle(struct kgsl_device *device);
bool adreno_isidle(struct kgsl_device *device);
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index eed11c3..47ba854 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -4577,6 +4577,8 @@
ADRENO_REG_DEFINE(ADRENO_REG_TC_CNTL_STATUS, REG_TC_CNTL_STATUS),
ADRENO_REG_DEFINE(ADRENO_REG_TP0_CHICKEN, REG_TP0_CHICKEN),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_RBBM_CTL, A3XX_RBBM_RBBM_CTL),
+ ADRENO_REG_DEFINE(ADRENO_REG_UCHE_INVALIDATE0,
+ A3XX_UCHE_CACHE_INVALIDATE0_REG),
};
struct adreno_reg_offsets a3xx_reg_offsets = {
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index 95e4017..ab9d220 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -33,6 +33,15 @@
/* Number of command batches sent at a time from a single context */
static unsigned int _context_cmdbatch_burst = 5;
+/*
+ * GFT throttle parameters. If GFT recovered more than
+ * X times in Y ms invalidate the context and do not attempt recovery.
+ * X -> _fault_throttle_burst
+ * Y -> _fault_throttle_time
+ */
+static unsigned int _fault_throttle_time = 3000;
+static unsigned int _fault_throttle_burst = 3;
+
/* Number of command batches inflight in the ringbuffer at any time */
static unsigned int _dispatcher_inflight = 15;
@@ -78,15 +87,24 @@
static inline bool _isidle(struct kgsl_device *device)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- unsigned int ts;
+ unsigned int ts, i;
+
+ if (!kgsl_pwrctrl_isenabled(device))
+ goto ret;
ts = kgsl_readtimestamp(device, NULL, KGSL_TIMESTAMP_RETIRED);
- if (adreno_isidle(device) == true &&
- (ts >= adreno_dev->ringbuffer.global_ts))
- return true;
+ /* If GPU HW status is idle return true */
+ if (adreno_hw_isidle(device) ||
+ (ts == adreno_dev->ringbuffer.global_ts))
+ goto ret;
return false;
+
+ret:
+ for (i = 0; i < FT_DETECT_REGS_COUNT; i++)
+ fault_detect_regs[i] = 0;
+ return true;
}
/**
@@ -951,9 +969,6 @@
adreno_readreg(adreno_dev, ADRENO_REG_CP_ME_CNTL, ®);
reg |= (1 << 27) | (1 << 28);
adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_CNTL, reg);
-
- /* Skip the PM dump for a timeout because it confuses people */
- set_bit(KGSL_FT_SKIP_PMDUMP, &cmdbatch->fault_policy);
}
adreno_readreg(adreno_dev, ADRENO_REG_CP_IB1_BASE, &base);
@@ -1018,6 +1033,35 @@
cmdbatch = replay[0];
/*
+ * If GFT recovered more than X times in Y ms invalidate the context
+ * and do not attempt recovery.
+ * Example: X==3 and Y==3000 ms, GPU hung at 500ms, 1700ms, 25000ms and
+ * 3000ms for the same context, we will not try FT and invalidate the
+ * context @3000ms because context triggered GFT more than 3 times in
+ * last 3 seconds. If a context caused recoverable GPU hangs
+ * where 1st and 4th gpu hang are more than 3 seconds apart we
+ * won't disable GFT and invalidate the context.
+ */
+ if (test_bit(KGSL_FT_THROTTLE, &cmdbatch->fault_policy)) {
+ if (time_after(jiffies, (cmdbatch->context->fault_time
+ + msecs_to_jiffies(_fault_throttle_time)))) {
+ cmdbatch->context->fault_time = jiffies;
+ cmdbatch->context->fault_count = 1;
+ } else {
+ cmdbatch->context->fault_count++;
+ if (cmdbatch->context->fault_count >
+ _fault_throttle_burst) {
+ set_bit(KGSL_FT_DISABLE,
+ &cmdbatch->fault_policy);
+ pr_fault(device, cmdbatch,
+ "gpu fault threshold exceeded %d faults in %d msecs\n",
+ _fault_throttle_burst,
+ _fault_throttle_time);
+ }
+ }
+ }
+
+ /*
* If FT is disabled for this cmdbatch invalidate immediately
*/
@@ -1631,6 +1675,10 @@
static DISPATCHER_UINT_ATTR(context_queue_wait, 0644, 0, _context_queue_wait);
static DISPATCHER_UINT_ATTR(fault_detect_interval, 0644, 0,
_fault_timer_interval);
+static DISPATCHER_UINT_ATTR(fault_throttle_time, 0644, 0,
+ _fault_throttle_time);
+static DISPATCHER_UINT_ATTR(fault_throttle_burst, 0644, 0,
+ _fault_throttle_burst);
static struct attribute *dispatcher_attrs[] = {
&dispatcher_attr_inflight.attr,
@@ -1639,6 +1687,8 @@
&dispatcher_attr_cmdbatch_timeout.attr,
&dispatcher_attr_context_queue_wait.attr,
&dispatcher_attr_fault_detect_interval.attr,
+ &dispatcher_attr_fault_throttle_time.attr,
+ &dispatcher_attr_fault_throttle_burst.attr,
NULL,
};
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index 4db045a..136456a 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -607,7 +607,7 @@
struct adreno_context *context)
{
struct kgsl_device *device;
- unsigned int cmds[5];
+ unsigned int cmds[8];
if (adreno_dev == NULL || context == NULL)
return -EINVAL;
@@ -621,8 +621,14 @@
cmds[3] = device->memstore.gpuaddr +
KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context);
cmds[4] = context->base.id;
+ /* Flush the UCHE for new context */
+ cmds[5] = cp_type0_packet(
+ adreno_getreg(adreno_dev, ADRENO_REG_UCHE_INVALIDATE0), 2);
+ cmds[6] = 0;
+ if (adreno_is_a3xx(adreno_dev))
+ cmds[7] = 0x90000000;
return adreno_ringbuffer_issuecmds(device, context,
- KGSL_CMD_FLAGS_NONE, cmds, 5);
+ KGSL_CMD_FLAGS_NONE, cmds, 8);
}
diff --git a/drivers/gpu/msm/adreno_drawctxt.h b/drivers/gpu/msm/adreno_drawctxt.h
index 7656cd5..258cf94 100644
--- a/drivers/gpu/msm/adreno_drawctxt.h
+++ b/drivers/gpu/msm/adreno_drawctxt.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -133,7 +133,6 @@
*/
struct adreno_context {
struct kgsl_context base;
- unsigned int ib_gpu_time_used;
unsigned int timestamp;
unsigned int internal_timestamp;
int state;
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 5f63cb6..fc4b77e 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -28,7 +28,6 @@
#define KGSL_TIMEOUT_NONE 0
#define KGSL_TIMEOUT_DEFAULT 0xFFFFFFFF
#define KGSL_TIMEOUT_PART 50 /* 50 msec */
-#define KGSL_TIMEOUT_LONG_IB_DETECTION 2000 /* 2 sec*/
#define FIRST_TIMEOUT (HZ / 2)
@@ -350,6 +349,8 @@
* is set.
* @flags: flags from userspace controlling the behavior of this context
* @pwr_constraint: power constraint from userspace for this context
+ * @fault_count: number of times gpu hanged in last _context_throttle_time ms
+ * @fault_time: time of the first gpu hang in last _context_throttle_time ms
*/
struct kgsl_context {
struct kref refcount;
@@ -368,6 +369,8 @@
unsigned int pagefault_ts;
unsigned int flags;
struct kgsl_pwr_constraint pwr_constraint;
+ unsigned int fault_count;
+ unsigned long fault_time;
};
/**
diff --git a/drivers/gpu/msm/kgsl_events.c b/drivers/gpu/msm/kgsl_events.c
index e21fd88..ccd13d5 100644
--- a/drivers/gpu/msm/kgsl_events.c
+++ b/drivers/gpu/msm/kgsl_events.c
@@ -210,7 +210,7 @@
kgsl_event_func func, void *priv, void *owner)
{
struct kgsl_event *event;
- unsigned int queued, cur_ts;
+ unsigned int queued = 0, cur_ts;
struct kgsl_context *context = NULL;
BUG_ON(!mutex_is_locked(&device->mutex));
@@ -224,11 +224,21 @@
return -EINVAL;
}
- queued = kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_QUEUED);
+ /*
+ * If the caller is creating their own timestamps, let them schedule
+ * events in the future. Otherwise only allow timestamps that have been
+ * queued.
+ */
+ if (context == NULL ||
+ ((context->flags & KGSL_CONTEXT_USER_GENERATED_TS) == 0)) {
- if (timestamp_cmp(ts, queued) > 0) {
- kgsl_context_put(context);
- return -EINVAL;
+ queued = kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_QUEUED);
+
+ if (timestamp_cmp(ts, queued) > 0) {
+ kgsl_context_put(context);
+ return -EINVAL;
+ }
}
cur_ts = kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED);
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 2af8d27..69b953f 100755
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -514,6 +514,9 @@
else
/* something went wrong with the event handling mechanism */
BUG_ON(1);
+
+ /* Free param we are done using it */
+ kfree(param);
}
/*
@@ -638,16 +641,18 @@
phys_addr_t pt_base)
{
struct kgsl_iommu_pt *iommu_pt = pt ? pt->priv : NULL;
- phys_addr_t domain_ptbase = iommu_pt ?
- iommu_get_pt_base_addr(iommu_pt->domain) : 0;
+ phys_addr_t domain_ptbase;
- /* Only compare the valid address bits of the pt_base */
- domain_ptbase &= KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
+ if (iommu_pt == NULL)
+ return 0;
+
+ domain_ptbase = iommu_get_pt_base_addr(iommu_pt->domain)
+ & KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
pt_base &= KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
- return domain_ptbase && pt_base &&
- (domain_ptbase == pt_base);
+ return (domain_ptbase == pt_base);
+
}
/*
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index 6b04aad..d64d0d3 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -85,16 +85,8 @@
return status;
}
-static void kgsl_destroy_pagetable(struct kref *kref)
+static void _kgsl_destroy_pagetable(struct kgsl_pagetable *pagetable)
{
- struct kgsl_pagetable *pagetable = container_of(kref,
- struct kgsl_pagetable, refcount);
- unsigned long flags;
-
- spin_lock_irqsave(&kgsl_driver.ptlock, flags);
- list_del(&pagetable->list);
- spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
-
pagetable_remove_sysfs_objects(pagetable);
kgsl_cleanup_pt(pagetable);
@@ -109,6 +101,29 @@
kfree(pagetable);
}
+static void kgsl_destroy_pagetable(struct kref *kref)
+{
+ struct kgsl_pagetable *pagetable = container_of(kref,
+ struct kgsl_pagetable, refcount);
+ unsigned long flags;
+
+ spin_lock_irqsave(&kgsl_driver.ptlock, flags);
+ list_del(&pagetable->list);
+ spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
+
+ _kgsl_destroy_pagetable(pagetable);
+}
+
+static void kgsl_destroy_pagetable_locked(struct kref *kref)
+{
+ struct kgsl_pagetable *pagetable = container_of(kref,
+ struct kgsl_pagetable, refcount);
+
+ list_del(&pagetable->list);
+
+ _kgsl_destroy_pagetable(pagetable);
+}
+
static inline void kgsl_put_pagetable(struct kgsl_pagetable *pagetable)
{
if (pagetable)
@@ -128,7 +143,7 @@
ret = pt;
break;
}
- kref_put(&pt->refcount, kgsl_destroy_pagetable);
+ kref_put(&pt->refcount, kgsl_destroy_pagetable_locked);
}
}
@@ -139,12 +154,12 @@
static struct kgsl_pagetable *
_get_pt_from_kobj(struct kobject *kobj)
{
- unsigned long ptname;
+ unsigned int ptname;
if (!kobj)
return NULL;
- if (sscanf(kobj->name, "%ld", &ptname) != 1)
+ if (kstrtou32(kobj->name, 0, &ptname))
return NULL;
return kgsl_get_pagetable(ptname);
@@ -328,10 +343,11 @@
if (kref_get_unless_zero(&pt->refcount)) {
if (mmu->mmu_ops->mmu_pt_equal(mmu, pt, pt_base)) {
ptid = (int) pt->name;
- kref_put(&pt->refcount, kgsl_destroy_pagetable);
+ kref_put(&pt->refcount,
+ kgsl_destroy_pagetable_locked);
break;
}
- kref_put(&pt->refcount, kgsl_destroy_pagetable);
+ kref_put(&pt->refcount, kgsl_destroy_pagetable_locked);
}
}
spin_unlock(&kgsl_driver.ptlock);
@@ -356,18 +372,18 @@
if ((addr & ~(PAGE_SIZE-1)) == pt->fault_addr) {
ret = 1;
kref_put(&pt->refcount,
- kgsl_destroy_pagetable);
+ kgsl_destroy_pagetable_locked);
break;
} else {
pt->fault_addr =
(addr & ~(PAGE_SIZE-1));
ret = 0;
kref_put(&pt->refcount,
- kgsl_destroy_pagetable);
+ kgsl_destroy_pagetable_locked);
break;
}
}
- kref_put(&pt->refcount, kgsl_destroy_pagetable);
+ kref_put(&pt->refcount, kgsl_destroy_pagetable_locked);
}
}
spin_unlock(&kgsl_driver.ptlock);
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 9353b2e..656d7e2 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -1124,8 +1124,7 @@
pwr->pwrlevels[pwr->active_pwrlevel].
bus_freq);
- /* Set the CPU latency to 501usec to allow low latency PC modes */
- pwr->pm_qos_latency = 501;
+ pwr->pm_qos_latency = pdata->pm_qos_latency;
pm_runtime_enable(device->parentdev);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index d3adf84..c8ea471 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -73,12 +73,12 @@
_get_priv_from_kobj(struct kobject *kobj)
{
struct kgsl_process_private *private;
- unsigned long name;
+ unsigned int name;
if (!kobj)
return NULL;
- if (sscanf(kobj->name, "%ld", &name) != 1)
+ if (kstrtou32(kobj->name, 0, &name))
return NULL;
list_for_each_entry(private, &kgsl_driver.process_list, list) {
@@ -255,13 +255,13 @@
const char *buf, size_t count)
{
int ret;
- unsigned int thresh;
- ret = sscanf(buf, "%d", &thresh);
- if (ret != 1)
- return count;
+ unsigned int thresh = 0;
+
+ ret = kgsl_sysfs_store(buf, &thresh);
+ if (ret)
+ return ret;
kgsl_driver.full_cache_threshold = thresh;
-
return count;
}
diff --git a/drivers/gud/Makefile b/drivers/gud/Makefile
index ef0e083..c415ad8 100644
--- a/drivers/gud/Makefile
+++ b/drivers/gud/Makefile
@@ -3,34 +3,35 @@
#
GUD_ROOT_FOLDER := drivers/gud
# add our modules to kernel.
-obj-$(CONFIG_MOBICORE_API) += mckernelapi.o
-obj-$(CONFIG_MOBICORE_SUPPORT) += mcdrvmodule.o
+obj-$(CONFIG_MOBICORE_API) += mcKernelApi.o
+obj-$(CONFIG_MOBICORE_SUPPORT) += mcDrvModule.o
-mcdrvmodule-objs := mobicore_driver/logging.o \
- mobicore_driver/ops.o \
- mobicore_driver/mem.o \
- mobicore_driver/api.o \
- mobicore_driver/main.o \
- mobicore_driver/pm.o
+mcDrvModule-objs := MobiCoreDriver/logging.o \
+ MobiCoreDriver/ops.o \
+ MobiCoreDriver/mem.o \
+ MobiCoreDriver/api.o \
+ MobiCoreDriver/pm.o \
+ MobiCoreDriver/main.o
-mckernelapi-objs := mobicore_kernelapi/main.o \
- mobicore_kernelapi/clientlib.o \
- mobicore_kernelapi/device.o \
- mobicore_kernelapi/session.o \
- mobicore_kernelapi/connection.o
+mcKernelApi-objs := MobiCoreKernelApi/main.o \
+ MobiCoreKernelApi/clientlib.o \
+ MobiCoreKernelApi/device.o \
+ MobiCoreKernelApi/session.o \
+ MobiCoreKernelApi/connection.o
# Release mode by default
-ccflags-y := -DNDEBUG -include $(PWD)/$(GUD_ROOT_FOLDER)/mobicore_driver/build_tag.h
+ccflags-y := -DNDEBUG -I$(GUD_ROOT_FOLDER)
ccflags-y += -Wno-declaration-after-statement
ccflags-$(CONFIG_MOBICORE_DEBUG) += -DDEBUG
ccflags-$(CONFIG_MOBICORE_VERBOSE) += -DDEBUG_VERBOSE
# Choose one platform from the folder
-MOBICORE_PLATFORM := $(shell (ls -1 $(PWD)/$(GUD_ROOT_FOLDER)/mobicore_driver/platforms | tail -1) )
+MOBICORE_PLATFORM := $(shell (ls -1 $(PWD)/$(GUD_ROOT_FOLDER)/MobiCoreDriver/platforms | tail -1) )
# Use the available platform folder
-ccflags-y += -I$(GUD_ROOT_FOLDER)/mobicore_driver/platforms/$(MOBICORE_PLATFORM)
-
-
-ccflags-y += -I$(GUD_ROOT_FOLDER)/mobicore_driver/public
-ccflags-y += -I$(GUD_ROOT_FOLDER)/mobicore_kernelapi/include
+ccflags-y += -I$(GUD_ROOT_FOLDER)/MobiCoreDriver/platforms/$(MOBICORE_PLATFORM)
+# MobiCore Driver includes
+ccflags-y += -I$(GUD_ROOT_FOLDER)/MobiCoreDriver/public
+# MobiCore KernelApi required incldes
+ccflags-y += -I$(GUD_ROOT_FOLDER)/MobiCoreKernelApi/include
+ccflags-y += -I$(GUD_ROOT_FOLDER)/MobiCoreKernelApi/public
diff --git a/drivers/gud/MobiCoreDriver/Makefile b/drivers/gud/MobiCoreDriver/Makefile
new file mode 100644
index 0000000..c17f35e
--- /dev/null
+++ b/drivers/gud/MobiCoreDriver/Makefile
@@ -0,0 +1,26 @@
+#
+# this makefile is called from the kernel make system. Thus we basically
+# add things to "obj-m" here.
+
+ifeq ($(MODE),release)
+ ccflags-y = -O2 -DNDEBUG
+else
+ ccflags-y = -DDEBUG
+endif # DEBUG/RELEASE
+
+# CFLAGS from the build script
+ifdef MOBICORE_CFLAGS
+ ccflags-y += $(MOBICORE_CFLAGS)
+endif
+#EXTRA_CFLAGS+=-DDEBUG_VERBOSE
+
+ccflags-y += -I$(M) -Wall -D__$(PLATFORM)__
+# add our module to kernel.
+obj-m += mcDrvModule.o
+
+mcDrvModule-objs :=logging.o ops.o mem.o api.o pm.o main.o
+
+clean:
+ rm -rf *.o *~ core .depend .*.cmd *.ko *.mod.c .tmp_versions \
+ Module.markers Module.symvers modules.order
+
diff --git a/drivers/gud/mobicore_driver/api.c b/drivers/gud/MobiCoreDriver/api.c
similarity index 80%
rename from drivers/gud/mobicore_driver/api.c
rename to drivers/gud/MobiCoreDriver/api.c
index b47383a0..e7fa8e2 100644
--- a/drivers/gud/mobicore_driver/api.c
+++ b/drivers/gud/MobiCoreDriver/api.c
@@ -14,23 +14,12 @@
#include "mem.h"
#include "debug.h"
-
-/*
- * Map a virtual memory buffer structure to Mobicore
- * @param instance
- * @param addr address of the buffer(NB it must be kernel virtual!)
- * @param len buffer length
- * @param handle pointer to handle
- * @param phys_wsm_l2_table pointer to physical L2 table(?)
- *
- * @return 0 if no error
- *
- */
int mobicore_map_vmem(struct mc_instance *instance, void *addr,
- uint32_t len, uint32_t *handle, uint32_t *phys)
+ uint32_t len, uint32_t *handle)
{
- return mc_register_wsm_l2(instance, (uint32_t)addr, len,
- handle, phys);
+ phys_addr_t phys;
+ return mc_register_wsm_mmu(instance, addr, len,
+ handle, &phys);
}
EXPORT_SYMBOL(mobicore_map_vmem);
@@ -44,7 +33,7 @@
*/
int mobicore_unmap_vmem(struct mc_instance *instance, uint32_t handle)
{
- return mc_unregister_wsm_l2(instance, handle);
+ return mc_unregister_wsm_mmu(instance, handle);
}
EXPORT_SYMBOL(mobicore_unmap_vmem);
@@ -70,13 +59,11 @@
* @param requested_size size of the WSM
* @param handle pointer where the handle will be saved
* @param virt_kernel_addr pointer for the kernel virtual address
- * @param phys_addr pointer for the physical address
*
* @return error code or 0 for success
*/
int mobicore_allocate_wsm(struct mc_instance *instance,
- unsigned long requested_size, uint32_t *handle, void **virt_kernel_addr,
- void **phys_addr)
+ unsigned long requested_size, uint32_t *handle, void **virt_kernel_addr)
{
struct mc_buffer *buffer = NULL;
@@ -85,7 +72,6 @@
return -EFAULT;
*handle = buffer->handle;
- *phys_addr = buffer->phys;
*virt_kernel_addr = buffer->addr;
return 0;
}
@@ -117,3 +103,14 @@
}
EXPORT_SYMBOL(mobicore_release);
+/*
+ * Test if mobicore can sleep
+ *
+ * @return true if mobicore can sleep, false if it can't sleep
+ */
+bool mobicore_sleep_ready(void)
+{
+ return mc_sleep_ready();
+}
+EXPORT_SYMBOL(mobicore_sleep_ready);
+
diff --git a/drivers/gud/mobicore_driver/arm.h b/drivers/gud/MobiCoreDriver/arm.h
similarity index 100%
rename from drivers/gud/mobicore_driver/arm.h
rename to drivers/gud/MobiCoreDriver/arm.h
diff --git a/drivers/gud/MobiCoreDriver/build.sh b/drivers/gud/MobiCoreDriver/build.sh
new file mode 100644
index 0000000..db8410c
--- /dev/null
+++ b/drivers/gud/MobiCoreDriver/build.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+# source the setup script
+if [ -z $COMP_PATH_ROOT ]; then
+ echo "The build environment is not set!"
+ echo "Trying to source setupDrivers.sh automatically!"
+ source ../setupDrivers.sh || exit 1
+fi
+
+ROOT_PATH=$(dirname $(readlink -f $0))
+# These folders need to be relative to the kernel dir or absolute!
+PLATFORM=EXYNOS_5410_STD
+CODE_INCLUDE=$(readlink -f $ROOT_PATH/Locals/Code)
+PLATFORM_INCLUDE="$CODE_INCLUDE/platforms/$PLATFORM"
+MOBICORE_DAEMON=$COMP_PATH_MobiCoreDriverLib/Public
+
+MOBICORE_CFLAGS="-I$MOBICORE_DRIVER/Public -I$MOBICORE_DAEMON -I$COMP_PATH_MobiCore/inc/Mci -I$COMP_PATH_MobiCore/inc -I${PLATFORM_INCLUDE}"
+
+# Clean first
+make -C $CODE_INCLUDE clean
+
+make -C $LINUX_PATH \
+ MODE=$MODE \
+ ARCH=arm \
+ CROSS_COMPILE=$CROSS_COMPILE \
+ M=$CODE_INCLUDE \
+ "MOBICORE_CFLAGS=$MOBICORE_CFLAGS" \
+ modules
diff --git a/drivers/gud/mobicore_driver/debug.h b/drivers/gud/MobiCoreDriver/debug.h
similarity index 89%
rename from drivers/gud/mobicore_driver/debug.h
rename to drivers/gud/MobiCoreDriver/debug.h
index 1f9a632..d29efef 100644
--- a/drivers/gud/mobicore_driver/debug.h
+++ b/drivers/gud/MobiCoreDriver/debug.h
@@ -15,7 +15,7 @@
extern struct device *mcd;
#define MCDRV_DBG_ERROR(dev, txt, ...) \
- dev_err(dev, "MobiCore %s() ### ERROR: " txt, \
+ dev_err(dev, "MobiCore %s() ### ERROR: " txt "\n", \
__func__, \
##__VA_ARGS__)
@@ -32,12 +32,12 @@
#endif
#define MCDRV_DBG(dev, txt, ...) \
- dev_info(dev, "MobiCore %s(): " txt, \
+ dev_info(dev, "MobiCore %s(): " txt "\n", \
__func__, \
##__VA_ARGS__)
#define MCDRV_DBG_WARN(dev, txt, ...) \
- dev_warn(dev, "MobiCore %s() WARNING: " txt, \
+ dev_warn(dev, "MobiCore %s() WARNING: " txt "\n", \
__func__, \
##__VA_ARGS__)
diff --git a/drivers/gud/mobicore_driver/fastcall.h b/drivers/gud/MobiCoreDriver/fastcall.h
similarity index 70%
rename from drivers/gud/mobicore_driver/fastcall.h
rename to drivers/gud/MobiCoreDriver/fastcall.h
index 1c90520..33538df 100644
--- a/drivers/gud/mobicore_driver/fastcall.h
+++ b/drivers/gud/MobiCoreDriver/fastcall.h
@@ -36,9 +36,10 @@
*/
#define MC_FC_INIT -1
#define MC_FC_INFO -2
-#define MC_FC_POWER -3
-#define MC_FC_DUMP -4
#define MC_FC_NWD_TRACE -31 /* Mem trace setup fastcall */
+#ifdef TBASE_CORE_SWITCHER
+#define MC_FC_SWITCH_CORE 0x84000005
+#endif
/*
@@ -96,6 +97,23 @@
} as_out;
};
+#ifdef TBASE_CORE_SWITCHER
+/* fast call switch Core parameters */
+union mc_fc_swich_core {
+ union fc_generic as_generic;
+ struct {
+ uint32_t cmd;
+ uint32_t core_id;
+ uint32_t rfu[2];
+ } as_in;
+ struct {
+ uint32_t resp;
+ uint32_t ret;
+ uint32_t state;
+ uint32_t ext_info;
+ } as_out;
+};
+#endif
/*
* _smc() - fast call to MobiCore
*
@@ -104,23 +122,22 @@
static inline long _smc(void *data)
{
int ret = 0;
- union fc_generic fc_generic;
if (data == NULL)
return -EPERM;
#ifdef MC_SMC_FASTCALL
{
- ret = smc_fastcall(data, sizeof(fc_generic));
+ ret = smc_fastcall(data, sizeof(union fc_generic));
}
#else
- memcpy(&fc_generic, data, sizeof(union fc_generic));
{
- /* SVC expect values in r0-r3 */
- register u32 reg0 __asm__("r0") = fc_generic.as_in.cmd;
- register u32 reg1 __asm__("r1") = fc_generic.as_in.param[0];
- register u32 reg2 __asm__("r2") = fc_generic.as_in.param[1];
- register u32 reg3 __asm__("r3") = fc_generic.as_in.param[2];
+ union fc_generic *fc_generic = data;
+ /* SMC expect values in r0-r3 */
+ register u32 reg0 __asm__("r0") = fc_generic->as_in.cmd;
+ register u32 reg1 __asm__("r1") = fc_generic->as_in.param[0];
+ register u32 reg2 __asm__("r2") = fc_generic->as_in.param[1];
+ register u32 reg3 __asm__("r3") = fc_generic->as_in.param[2];
__asm__ volatile (
#ifdef MC_ARCH_EXTENSION_SEC
@@ -131,13 +148,23 @@
"smc 0\n"
: "+r"(reg0), "+r"(reg1), "+r"(reg2), "+r"(reg3)
);
+#ifdef __ARM_VE_A9X4_QEMU__
+ /* Qemu does not return to the address following the SMC
+ instruction so we have to insert several nop instructions to
+ workaround this Qemu bug. */
+ __asm__ volatile (
+ "nop\n"
+ "nop\n"
+ "nop\n"
+ "nop"
+ );
+#endif
/* set response */
- fc_generic.as_out.resp = reg0;
- fc_generic.as_out.ret = reg1;
- fc_generic.as_out.param[0] = reg2;
- fc_generic.as_out.param[1] = reg3;
- memcpy(data, &fc_generic, sizeof(union fc_generic));
+ fc_generic->as_out.resp = reg0;
+ fc_generic->as_out.ret = reg1;
+ fc_generic->as_out.param[0] = reg2;
+ fc_generic->as_out.param[1] = reg3;
}
#endif
return ret;
diff --git a/drivers/gud/mobicore_driver/logging.c b/drivers/gud/MobiCoreDriver/logging.c
similarity index 95%
rename from drivers/gud/mobicore_driver/logging.c
rename to drivers/gud/MobiCoreDriver/logging.c
index 1f599f9..507c4ed 100644
--- a/drivers/gud/mobicore_driver/logging.c
+++ b/drivers/gud/MobiCoreDriver/logging.c
@@ -251,7 +251,7 @@
*/
long mobicore_log_setup(void)
{
- unsigned long phys_log_buf;
+ phys_addr_t phys_log_buf;
union fc_generic fc_log;
struct sched_param param = { .sched_priority = 1 };
@@ -300,11 +300,12 @@
memset(&fc_log, 0, sizeof(fc_log));
fc_log.as_in.cmd = MC_FC_NWD_TRACE;
- fc_log.as_in.param[0] = phys_log_buf;
- fc_log.as_in.param[1] = log_size;
+ fc_log.as_in.param[0] = (uint32_t)phys_log_buf;
+ fc_log.as_in.param[1] = (uint32_t)(((uint64_t)phys_log_buf) >> 32);
+ fc_log.as_in.param[2] = log_size;
- MCDRV_DBG(mcd, "fc_log virt=%p phys=%p ",
- log_buf, (void *)phys_log_buf);
+ MCDRV_DBG(mcd, "fc_log virt=%p phys=0x%llX",
+ log_buf, (u64)phys_log_buf);
mc_fastcall(&fc_log);
MCDRV_DBG(mcd, "fc_log out ret=0x%08x", fc_log.as_out.ret);
@@ -319,7 +320,7 @@
set_task_state(log_thread, TASK_INTERRUPTIBLE);
- MCDRV_DBG(mcd, "fc_log Logger version %u\n", log_buf->version);
+ MCDRV_DBG(mcd, "fc_log Logger version %u", log_buf->version);
return 0;
err_stop_kthread:
diff --git a/drivers/gud/mobicore_driver/logging.h b/drivers/gud/MobiCoreDriver/logging.h
similarity index 100%
rename from drivers/gud/mobicore_driver/logging.h
rename to drivers/gud/MobiCoreDriver/logging.h
diff --git a/drivers/gud/mobicore_driver/main.c b/drivers/gud/MobiCoreDriver/main.c
similarity index 81%
rename from drivers/gud/mobicore_driver/main.c
rename to drivers/gud/MobiCoreDriver/main.c
index 0451452..ed2928a 100644
--- a/drivers/gud/mobicore_driver/main.c
+++ b/drivers/gud/MobiCoreDriver/main.c
@@ -112,12 +112,12 @@
int i;
struct page *page = virt_to_page(addr);
for (i = 0; i < (1<<order); i++) {
- MCDRV_DBG_VERBOSE(mcd, "free page at 0x%p\n", page);
- ClearPageReserved(page);
+ MCDRV_DBG_VERBOSE(mcd, "free page at 0x%p", page);
+ clear_bit(PG_reserved, &page->flags);
page++;
}
- MCDRV_DBG_VERBOSE(mcd, "freeing addr:%p, order:%x\n", addr, order);
+ MCDRV_DBG_VERBOSE(mcd, "freeing addr:%p, order:%x", addr, order);
free_pages((unsigned long)addr, order);
}
@@ -131,8 +131,9 @@
return -EINVAL;
MCDRV_DBG_VERBOSE(mcd,
- "handle=%u phys_addr=0x%p, virt_addr=0x%p len=%u\n",
- buffer->handle, buffer->phys, buffer->addr, buffer->len);
+ "handle=%u phys_addr=0x%llx, virt_addr=0x%p len=%u",
+ buffer->handle, (u64)buffer->phys,
+ buffer->addr, buffer->len);
if (!atomic_dec_and_test(&buffer->usage)) {
MCDRV_DBG_VERBOSE(mcd, "Could not free %u", buffer->handle);
@@ -147,7 +148,7 @@
}
static uint32_t mc_find_cont_wsm_addr(struct mc_instance *instance, void *uaddr,
- uint32_t *addr, uint32_t len)
+ void **addr, uint32_t len)
{
int ret = 0;
struct mc_buffer *buffer;
@@ -162,7 +163,7 @@
/* search for the given handle in the buffers list */
list_for_each_entry(buffer, &ctx.cont_bufs, list) {
if (buffer->uaddr == uaddr && buffer->len == len) {
- *addr = (uint32_t)buffer->addr;
+ *addr = buffer->addr;
goto found;
}
}
@@ -186,7 +187,7 @@
struct task_struct *peer = NULL;
bool ret = false;
- MCDRV_DBG(mcd, "Finding wsm for fd = %d\n", fd);
+ MCDRV_DBG_VERBOSE(mcd, "Finding wsm for fd = %d", fd);
if (!instance)
return false;
@@ -197,7 +198,7 @@
s = __get_socket(fp);
if (s) {
peer = get_pid_task(s->sk_peer_pid, PIDTYPE_PID);
- MCDRV_DBG(mcd, "Found pid for fd %d\n", peer->pid);
+ MCDRV_DBG_VERBOSE(mcd, "Found pid for fd %d", peer->pid);
}
if (peer) {
task_lock(peer);
@@ -209,11 +210,10 @@
if (!fp)
continue;
if (fp->private_data == instance) {
- MCDRV_DBG(mcd, "Found owner!");
+ MCDRV_DBG_VERBOSE(mcd, "Found owner!");
ret = true;
goto out;
}
-
}
} else {
MCDRV_DBG(mcd, "Owner not found!");
@@ -230,7 +230,7 @@
#endif
}
static uint32_t mc_find_cont_wsm(struct mc_instance *instance, uint32_t handle,
- int32_t fd, uint32_t *phys, uint32_t *len)
+ int32_t fd, phys_addr_t *phys, uint32_t *len)
{
int ret = 0;
struct mc_buffer *buffer;
@@ -239,7 +239,7 @@
return -EFAULT;
if (WARN_ON(!is_daemon(instance))) {
- MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon\n");
+ MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon");
return -EPERM;
}
@@ -251,7 +251,7 @@
list_for_each_entry(buffer, &ctx.cont_bufs, list) {
if (buffer->handle == handle) {
if (mc_check_owner_fd(buffer->instance, fd)) {
- *phys = (uint32_t)buffer->phys;
+ *phys = buffer->phys;
*len = buffer->len;
goto found;
} else {
@@ -326,7 +326,7 @@
/* Something is not right if we end up here, better not
* clean the buffer so we just leak memory instead of
* creating security issues */
- MCDRV_DBG_ERROR(mcd, "Memory can't be unmapped\n");
+ MCDRV_DBG_ERROR(mcd, "Memory can't be unmapped");
return -EINVAL;
}
}
@@ -370,7 +370,7 @@
{
struct mc_buffer *cbuffer = NULL;
void *addr = 0;
- void *phys = 0;
+ phys_addr_t phys = 0;
unsigned int order;
unsigned long allocated_size;
int ret = 0;
@@ -379,13 +379,13 @@
return -EFAULT;
if (len == 0) {
- MCDRV_DBG_WARN(mcd, "cannot allocate size 0\n");
+ MCDRV_DBG_WARN(mcd, "cannot allocate size 0");
return -ENOMEM;
}
order = get_order(len);
if (order > MAX_ORDER) {
- MCDRV_DBG_WARN(mcd, "Buffer size too large\n");
+ MCDRV_DBG_WARN(mcd, "Buffer size too large");
return -ENOMEM;
}
allocated_size = (1 << order) * PAGE_SIZE;
@@ -398,23 +398,23 @@
if (cbuffer == NULL) {
MCDRV_DBG_WARN(mcd,
- "MMAP_WSM request: could not allocate buffer\n");
+ "MMAP_WSM request: could not allocate buffer");
ret = -ENOMEM;
goto unlock_instance;
}
mutex_lock(&ctx.bufs_lock);
- MCDRV_DBG_VERBOSE(mcd, "size %ld -> order %d --> %ld (2^n pages)\n",
+ MCDRV_DBG_VERBOSE(mcd, "size %ld -> order %d --> %ld (2^n pages)",
len, order, allocated_size);
addr = (void *)__get_free_pages(GFP_USER | __GFP_ZERO, order);
if (addr == NULL) {
- MCDRV_DBG_WARN(mcd, "get_free_pages failed\n");
+ MCDRV_DBG_WARN(mcd, "get_free_pages failed");
ret = -ENOMEM;
goto err;
}
- phys = (void *)virt_to_phys(addr);
+ phys = virt_to_phys(addr);
cbuffer->handle = get_unique_id();
cbuffer->phys = phys;
cbuffer->addr = addr;
@@ -429,9 +429,11 @@
list_add(&cbuffer->list, &ctx.cont_bufs);
MCDRV_DBG_VERBOSE(mcd,
- "allocated phys=0x%p - 0x%p, size=%ld, kvirt=0x%p, h=%d\n",
- phys, (void *)((unsigned int)phys+allocated_size),
- allocated_size, addr, cbuffer->handle);
+ "allocated phys=0x%llx - 0x%llx, size=%ld, kvirt=0x%p"
+ ", h=%d",
+ (u64)phys,
+ (u64)(phys+allocated_size),
+ allocated_size, addr, cbuffer->handle);
*buffer = cbuffer;
goto unlock;
@@ -457,7 +459,7 @@
return -EFAULT;
if (WARN_ON(!is_daemon(instance))) {
- MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon\n");
+ MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon");
return -EPERM;
}
@@ -476,7 +478,7 @@
return ret;
}
-void *get_mci_base_phys(unsigned int len)
+static phys_addr_t get_mci_base_phys(unsigned int len)
{
if (ctx.mci_base.phys) {
return ctx.mci_base.phys;
@@ -487,45 +489,45 @@
ctx.mci_base.addr =
(void *)__get_free_pages(GFP_USER | __GFP_ZERO, order);
if (ctx.mci_base.addr == NULL) {
- MCDRV_DBG_WARN(mcd, "get_free_pages failed\n");
+ MCDRV_DBG_WARN(mcd, "get_free_pages failed");
memset(&ctx.mci_base, 0, sizeof(ctx.mci_base));
- return NULL;
+ return 0;
}
- ctx.mci_base.phys = (void *)virt_to_phys(ctx.mci_base.addr);
+ ctx.mci_base.phys = virt_to_phys(ctx.mci_base.addr);
return ctx.mci_base.phys;
}
}
/*
- * Create a l2 table from a virtual memory buffer which can be vmalloc
+ * Create a MMU table from a virtual memory buffer which can be vmalloc
* or user space virtual memory
*/
-int mc_register_wsm_l2(struct mc_instance *instance,
- uint32_t buffer, uint32_t len,
- uint32_t *handle, uint32_t *phys)
+int mc_register_wsm_mmu(struct mc_instance *instance,
+ void *buffer, uint32_t len,
+ uint32_t *handle, phys_addr_t *phys)
{
int ret = 0;
- struct mc_l2_table *table = NULL;
+ struct mc_mmu_table *table = NULL;
struct task_struct *task = current;
- uint32_t kbuff = 0x0;
+ void *kbuff = NULL;
if (WARN(!instance, "No instance data available"))
return -EFAULT;
if (len == 0) {
- MCDRV_DBG_ERROR(mcd, "len=0 is not supported!\n");
+ MCDRV_DBG_ERROR(mcd, "len=0 is not supported!");
return -EINVAL;
}
- MCDRV_DBG_VERBOSE(mcd, "buffer: %p, len=%08x\n", (void *)buffer, len);
+ MCDRV_DBG_VERBOSE(mcd, "buffer: %p, len=%08x", buffer, len);
- if (!mc_find_cont_wsm_addr(instance, (void *)buffer, &kbuff, len))
- table = mc_alloc_l2_table(instance, NULL, (void *)kbuff, len);
+ if (!mc_find_cont_wsm_addr(instance, buffer, &kbuff, len))
+ table = mc_alloc_mmu_table(instance, NULL, kbuff, len);
else
- table = mc_alloc_l2_table(instance, task, (void *)buffer, len);
+ table = mc_alloc_mmu_table(instance, task, buffer, len);
if (IS_ERR(table)) {
- MCDRV_DBG_ERROR(mcd, "new_used_l2_table() failed\n");
+ MCDRV_DBG_ERROR(mcd, "mc_alloc_mmu_table() failed");
return -EINVAL;
}
@@ -533,19 +535,19 @@
*handle = table->handle;
/* WARNING: daemon shouldn't know this either, but live with it */
if (is_daemon(instance))
- *phys = (uint32_t)table->phys;
+ *phys = table->phys;
else
*phys = 0;
- MCDRV_DBG_VERBOSE(mcd, "handle: %d, phys=%p\n",
- *handle, (void *)*phys);
+ MCDRV_DBG_VERBOSE(mcd, "handle: %d, phys=0x%llX",
+ *handle, (u64)(*phys));
- MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X\n", ret, ret);
+ MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X", ret, ret);
return ret;
}
-int mc_unregister_wsm_l2(struct mc_instance *instance, uint32_t handle)
+int mc_unregister_wsm_mmu(struct mc_instance *instance, uint32_t handle)
{
int ret = 0;
@@ -553,11 +555,11 @@
return -EFAULT;
/* free table (if no further locks exist) */
- mc_free_l2_table(instance, handle);
+ mc_free_mmu_table(instance, handle);
return ret;
}
-/* Lock the object from handle, it could be a WSM l2 table or a cont buffer! */
+/* Lock the object from handle, it could be a WSM MMU table or a cont buffer! */
static int mc_lock_handle(struct mc_instance *instance, uint32_t handle)
{
int ret = 0;
@@ -566,14 +568,14 @@
return -EFAULT;
if (WARN_ON(!is_daemon(instance))) {
- MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon\n");
+ MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon");
return -EPERM;
}
mutex_lock(&instance->lock);
- ret = mc_lock_l2_table(instance, handle);
+ ret = mc_lock_mmu_table(instance, handle);
- /* Handle was not a l2 table but a cont buffer */
+ /* Handle was not a MMU table but a cont buffer */
if (ret == -EINVAL) {
/* Call the non locking variant! */
ret = __lock_buffer(instance, handle);
@@ -592,14 +594,14 @@
return -EFAULT;
if (WARN_ON(!is_daemon(instance))) {
- MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon\n");
+ MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon");
return -EPERM;
}
mutex_lock(&instance->lock);
- ret = mc_free_l2_table(instance, handle);
+ ret = mc_free_mmu_table(instance, handle);
- /* Not a l2 table, then it must be a buffer */
+ /* Not a MMU table, then it must be a buffer */
if (ret == -EINVAL) {
/* Call the non locking variant! */
ret = __free_buffer(instance, handle, true);
@@ -609,35 +611,31 @@
return ret;
}
-static uint32_t mc_find_wsm_l2(struct mc_instance *instance,
+static phys_addr_t mc_find_wsm_mmu(struct mc_instance *instance,
uint32_t handle, int32_t fd)
{
- uint32_t ret = 0;
-
if (WARN(!instance, "No instance data available"))
return 0;
if (WARN_ON(!is_daemon(instance))) {
- MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon\n");
+ MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon");
return 0;
}
- ret = mc_find_l2_table(handle, fd);
-
- return ret;
+ return mc_find_mmu_table(handle, fd);
}
-static int mc_clean_wsm_l2(struct mc_instance *instance)
+static int mc_clean_wsm_mmu(struct mc_instance *instance)
{
if (WARN(!instance, "No instance data available"))
return -EFAULT;
if (WARN_ON(!is_daemon(instance))) {
- MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon\n");
+ MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon");
return -EPERM;
}
- mc_clean_l2_tables();
+ mc_clean_mmu_tables();
return 0;
}
@@ -646,19 +644,20 @@
{
struct mc_instance *instance = get_instance(file);
unsigned long len = vmarea->vm_end - vmarea->vm_start;
- void *paddr = (void *)(vmarea->vm_pgoff << PAGE_SHIFT);
+ phys_addr_t paddr = (vmarea->vm_pgoff << PAGE_SHIFT);
unsigned int pfn;
struct mc_buffer *buffer = 0;
int ret = 0;
- MCDRV_DBG_VERBOSE(mcd, "enter (vma start=0x%p, size=%ld, mci=%p)\n",
- (void *)vmarea->vm_start, len, ctx.mci_base.phys);
+ MCDRV_DBG_VERBOSE(mcd, "enter (vma start=0x%p, size=%ld, mci=0x%llX)",
+ (void *)vmarea->vm_start, len,
+ (u64)ctx.mci_base.phys);
if (WARN(!instance, "No instance data available"))
return -EFAULT;
if (len == 0) {
- MCDRV_DBG_ERROR(mcd, "cannot allocate size 0\n");
+ MCDRV_DBG_ERROR(mcd, "cannot allocate size 0");
return -ENOMEM;
}
if (paddr) {
@@ -722,7 +721,7 @@
vmarea->vm_page_prot);
}
- MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X\n", ret, ret);
+ MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X", ret, ret);
return ret;
}
@@ -768,21 +767,24 @@
case MC_IO_REG_WSM:{
struct mc_ioctl_reg_wsm reg;
+ phys_addr_t phys;
if (copy_from_user(®, uarg, sizeof(reg)))
return -EFAULT;
- ret = mc_register_wsm_l2(instance, reg.buffer,
- reg.len, ®.handle, ®.table_phys);
+ ret = mc_register_wsm_mmu(instance, (void *)reg.buffer,
+ reg.len, ®.handle, &phys);
+ reg.table_phys = phys;
+
if (!ret) {
if (copy_to_user(uarg, ®, sizeof(reg))) {
ret = -EFAULT;
- mc_unregister_wsm_l2(instance, reg.handle);
+ mc_unregister_wsm_mmu(instance, reg.handle);
}
}
break;
}
case MC_IO_UNREG_WSM:
- ret = mc_unregister_wsm_l2(instance, (uint32_t)arg);
+ ret = mc_unregister_wsm_mmu(instance, (uint32_t)arg);
break;
case MC_IO_VERSION:
@@ -803,7 +805,7 @@
return -EFAULT;
map.handle = buffer->handle;
- map.phys_addr = (unsigned long)buffer->phys;
+ map.phys_addr = buffer->phys;
map.reused = 0;
if (copy_to_user(uarg, &map, sizeof(map)))
ret = -EFAULT;
@@ -812,7 +814,7 @@
break;
}
default:
- MCDRV_DBG_ERROR(mcd, "unsupported cmd=%d\n", cmd);
+ MCDRV_DBG_ERROR(mcd, "unsupported cmd=0x%x", cmd);
ret = -ENOIOCTLCMD;
break;
@@ -836,7 +838,7 @@
return -EFAULT;
if (WARN_ON(!is_daemon(instance))) {
- MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon\n");
+ MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon");
return -EPERM;
}
@@ -856,8 +858,8 @@
return -EFAULT;
ctx.mcp = ctx.mci_base.addr + init.mcp_offset;
- ret = mc_init((uint32_t)ctx.mci_base.phys, init.nq_offset,
- init.nq_length, init.mcp_offset, init.mcp_length);
+ ret = mc_init(ctx.mci_base.phys, init.nq_length,
+ init.mcp_offset, init.mcp_length);
break;
}
case MC_IO_INFO: {
@@ -890,14 +892,14 @@
ret = mc_unlock_handle(instance, (uint32_t)arg);
break;
case MC_IO_CLEAN_WSM:
- ret = mc_clean_wsm_l2(instance);
+ ret = mc_clean_wsm_mmu(instance);
break;
case MC_IO_RESOLVE_WSM: {
- uint32_t phys;
+ phys_addr_t phys;
struct mc_ioctl_resolv_wsm wsm;
if (copy_from_user(&wsm, uarg, sizeof(wsm)))
return -EFAULT;
- phys = mc_find_wsm_l2(instance, wsm.handle, wsm.fd);
+ phys = mc_find_wsm_mmu(instance, wsm.handle, wsm.fd);
if (!phys)
return -EINVAL;
@@ -909,7 +911,8 @@
}
case MC_IO_RESOLVE_CONT_WSM: {
struct mc_ioctl_resolv_cont_wsm cont_wsm;
- uint32_t phys = 0, len = 0;
+ phys_addr_t phys = 0;
+ uint32_t len = 0;
if (copy_from_user(&cont_wsm, uarg, sizeof(cont_wsm)))
return -EFAULT;
ret = mc_find_cont_wsm(instance, cont_wsm.handle, cont_wsm.fd,
@@ -928,7 +931,7 @@
return -EFAULT;
map.reused = (ctx.mci_base.phys != 0);
- map.phys_addr = (unsigned long)get_mci_base_phys(map.len);
+ map.phys_addr = get_mci_base_phys(map.len);
if (!map.phys_addr) {
MCDRV_DBG_ERROR(mcd, "Failed to setup MCI buffer!");
return -EFAULT;
@@ -939,10 +942,6 @@
ret = 0;
break;
}
- case MC_IO_MAP_PWSM:{
- break;
- }
-
case MC_IO_LOG_SETUP: {
#ifdef MC_MEM_TRACES
ret = mobicore_log_setup();
@@ -985,27 +984,27 @@
return -EFAULT;
/* avoid debug output on non-error, because this is call quite often */
- MCDRV_DBG_VERBOSE(mcd, "enter\n");
+ MCDRV_DBG_VERBOSE(mcd, "enter");
/* only the MobiCore Daemon is allowed to call this function */
if (WARN_ON(!is_daemon(instance))) {
- MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon\n");
+ MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon");
return -EPERM;
}
if (buffer_len < sizeof(unsigned int)) {
- MCDRV_DBG_ERROR(mcd, "invalid length\n");
+ MCDRV_DBG_ERROR(mcd, "invalid length");
return -EINVAL;
}
for (;;) {
if (wait_for_completion_interruptible(&ctx.isr_comp)) {
- MCDRV_DBG_VERBOSE(mcd, "read interrupted\n");
+ MCDRV_DBG_VERBOSE(mcd, "read interrupted");
return -ERESTARTSYS;
}
ssiq_counter = atomic_read(&ctx.isr_counter);
- MCDRV_DBG_VERBOSE(mcd, "ssiq_counter=%i, ctx.counter=%i\n",
+ MCDRV_DBG_VERBOSE(mcd, "ssiq_counter=%i, ctx.counter=%i",
ssiq_counter, ctx.evt_counter);
if (ssiq_counter != ctx.evt_counter) {
@@ -1017,12 +1016,12 @@
/* end loop if non-blocking */
if (file->f_flags & O_NONBLOCK) {
- MCDRV_DBG_ERROR(mcd, "non-blocking read\n");
+ MCDRV_DBG_ERROR(mcd, "non-blocking read");
return -EAGAIN;
}
if (signal_pending(current)) {
- MCDRV_DBG_VERBOSE(mcd, "received signal.\n");
+ MCDRV_DBG_VERBOSE(mcd, "received signal.");
return -ERESTARTSYS;
}
}
@@ -1031,7 +1030,7 @@
ret = copy_to_user(buffer, &ctx.evt_counter, sizeof(unsigned int));
if (ret != 0) {
- MCDRV_DBG_ERROR(mcd, "copy_to_user failed\n");
+ MCDRV_DBG_ERROR(mcd, "copy_to_user failed");
return -EFAULT;
}
@@ -1061,6 +1060,43 @@
return instance;
}
+#if defined(TBASE_CORE_SWITCHER) && defined(DEBUG)
+static ssize_t mc_fd_write(struct file *file, const char __user *buffer,
+ size_t buffer_len, loff_t *x)
+{
+ uint32_t cpu_new;
+ /* we only consider one digit */
+ char buf[2];
+ struct mc_instance *instance = get_instance(file);
+
+ if (WARN(!instance, "No instance data available"))
+ return -EFAULT;
+
+ /* Invalid data, nothing to do */
+ if (buffer_len < 1)
+ return -EINVAL;
+
+ /* Invalid data, nothing to do */
+ if (copy_from_user(buf, buffer, min(sizeof(buf), buffer_len)))
+ return -EFAULT;
+
+ if (buf[0] == 'n') {
+ mc_nsiq();
+ /* If it's a digit then switch cores */
+ } else if ((buf[0] >= '0') && (buf[0] <= '9')) {
+ cpu_new = buf[0] - '0';
+ if (cpu_new <= 8) {
+ MCDRV_DBG_VERBOSE(mcd, "Set Active Cpu: %d\n", cpu_new);
+ mc_switch_core(cpu_new);
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ return buffer_len;
+}
+#endif
+
/*
* Release a mobicore instance object and all objects related to it
* @instance: instance
@@ -1074,7 +1110,7 @@
return -EFAULT;
mutex_lock(&instance->lock);
- mc_clear_l2_tables(instance);
+ mc_clear_mmu_tables(instance);
mutex_lock(&ctx.bufs_lock);
/* release all mapped data */
@@ -1112,7 +1148,7 @@
{
struct mc_instance *instance;
- MCDRV_DBG_VERBOSE(mcd, "enter\n");
+ MCDRV_DBG_VERBOSE(mcd, "enter");
instance = mc_alloc_instance();
if (instance == NULL)
@@ -1141,7 +1177,7 @@
return -ENOMEM;
instance = get_instance(file);
- MCDRV_DBG(mcd, "accept this as MobiCore Daemon\n");
+ MCDRV_DBG(mcd, "accept this as MobiCore Daemon");
ctx.daemon_inst = instance;
ctx.daemon = current;
@@ -1172,7 +1208,7 @@
/* check if daemon closes us. */
if (is_daemon(instance)) {
- MCDRV_DBG_WARN(mcd, "WARNING: MobiCore Daemon died\n");
+ MCDRV_DBG_WARN(mcd, "MobiCore Daemon died");
ctx.daemon_inst = NULL;
ctx.daemon = NULL;
}
@@ -1183,7 +1219,7 @@
* ret is quite irrelevant here as most apps don't care about the
* return value from close() and it's quite difficult to recover
*/
- MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X\n", ret, ret);
+ MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X", ret, ret);
return (int)ret;
}
@@ -1200,7 +1236,9 @@
/* signal the daemon */
complete(&ctx.isr_comp);
-
+#ifdef MC_MEM_TRACES
+ mobicore_log_read();
+#endif
return IRQ_HANDLED;
}
@@ -1221,6 +1259,9 @@
.release = mc_fd_release,
.unlocked_ioctl = mc_fd_user_ioctl,
.mmap = mc_fd_mmap,
+#if defined(TBASE_CORE_SWITCHER) && defined(DEBUG)
+ .write = mc_fd_write,
+#endif
};
static int create_devices(void)
@@ -1239,17 +1280,17 @@
ret = alloc_chrdev_region(&mc_dev_admin, 0, MC_DEV_MAX, "mobicore");
if (ret < 0) {
- MCDRV_DBG_ERROR(mcd, "failed to allocate char dev region\n");
+ MCDRV_DBG_ERROR(mcd, "failed to allocate char dev region");
goto error;
}
mc_dev_user = MKDEV(MAJOR(mc_dev_admin), 1);
- MCDRV_DBG_VERBOSE(mcd, "%s: dev %d", "mobicore", MAJOR(mc_dev_region));
+ MCDRV_DBG_VERBOSE(mcd, "%s: dev %d", "mobicore", MAJOR(mc_dev_admin));
/* First the ADMIN node */
ret = cdev_add(&mc_admin_cdev, mc_dev_admin, 1);
if (ret != 0) {
- MCDRV_DBG_ERROR(mcd, "admin device register failed\n");
+ MCDRV_DBG_ERROR(mcd, "admin device register failed");
goto error;
}
mc_admin_cdev.owner = THIS_MODULE;
@@ -1260,7 +1301,7 @@
ret = cdev_add(&mc_user_cdev, mc_dev_user, 1);
if (ret != 0) {
- MCDRV_DBG_ERROR(mcd, "user device register failed\n");
+ MCDRV_DBG_ERROR(mcd, "user device register failed");
goto error_unregister;
}
mc_user_cdev.owner = THIS_MODULE;
@@ -1301,13 +1342,13 @@
/* Hardware does not support ARM TrustZone -> Cannot continue! */
if (!has_security_extensions()) {
MCDRV_DBG_ERROR(mcd,
- "Hardware doesn't support ARM TrustZone!\n");
+ "Hardware doesn't support ARM TrustZone!");
return -ENODEV;
}
/* Running in secure mode -> Cannot load the driver! */
if (is_secure_mode()) {
- MCDRV_DBG_ERROR(mcd, "Running in secure MODE!\n");
+ MCDRV_DBG_ERROR(mcd, "Running in secure MODE!");
return -ENODEV;
}
@@ -1320,18 +1361,18 @@
/* initialize event counter for signaling of an IRQ to zero */
atomic_set(&ctx.isr_counter, 0);
- /* set up S-SIQ interrupt handler */
+ /* set up S-SIQ interrupt handler ************************/
ret = request_irq(MC_INTR_SSIQ, mc_ssiq_isr, IRQF_TRIGGER_RISING,
MC_ADMIN_DEVNODE, &ctx);
if (ret != 0) {
- MCDRV_DBG_ERROR(mcd, "interrupt request failed\n");
+ MCDRV_DBG_ERROR(mcd, "interrupt request failed");
goto err_req_irq;
}
#ifdef MC_PM_RUNTIME
ret = mc_pm_initialize(&ctx);
if (ret != 0) {
- MCDRV_DBG_ERROR(mcd, "Power Management init failed!\n");
+ MCDRV_DBG_ERROR(mcd, "Power Management init failed!");
goto free_isr;
}
#endif
@@ -1340,7 +1381,7 @@
if (ret != 0)
goto free_pm;
- ret = mc_init_l2_tables();
+ ret = mc_init_mmu_tables();
#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
ret = mc_pm_clock_initialize();
@@ -1361,7 +1402,7 @@
mutex_init(&ctx.bufs_lock);
memset(&ctx.mci_base, 0, sizeof(ctx.mci_base));
- MCDRV_DBG(mcd, "initialized\n");
+ MCDRV_DBG(mcd, "initialized");
return 0;
free_pm:
@@ -1381,12 +1422,12 @@
*/
static void __exit mobicore_exit(void)
{
- MCDRV_DBG_VERBOSE(mcd, "enter\n");
+ MCDRV_DBG_VERBOSE(mcd, "enter");
#ifdef MC_MEM_TRACES
mobicore_log_free();
#endif
- mc_release_l2_tables();
+ mc_release_mmu_tables();
#ifdef MC_PM_RUNTIME
mc_pm_free();
@@ -1408,6 +1449,15 @@
MCDRV_DBG_VERBOSE(mcd, "exit");
}
+bool mc_sleep_ready(void)
+{
+#ifdef MC_PM_RUNTIME
+ return mc_pm_sleep_ready();
+#else
+ return true;
+#endif
+}
+
/* Linux Driver Module Macros */
module_init(mobicore_init);
module_exit(mobicore_exit);
diff --git a/drivers/gud/mobicore_driver/main.h b/drivers/gud/MobiCoreDriver/main.h
similarity index 89%
rename from drivers/gud/mobicore_driver/main.h
rename to drivers/gud/MobiCoreDriver/main.h
index 871191e..11e304c 100644
--- a/drivers/gud/mobicore_driver/main.h
+++ b/drivers/gud/MobiCoreDriver/main.h
@@ -52,7 +52,7 @@
/* virtual Userspace start address */
void *uaddr;
/* physical start address */
- void *phys;
+ phys_addr_t phys;
/* order of number of pages */
unsigned int order;
uint32_t len;
@@ -83,8 +83,8 @@
};
struct mc_sleep_mode {
- uint16_t SleepReq;
- uint16_t ReadyToSleep;
+ uint16_t sleep_req;
+ uint16_t ready_to_sleep;
};
/* MobiCore is idle. No scheduling required. */
@@ -129,14 +129,14 @@
int mc_release_instance(struct mc_instance *instance);
/*
- * mc_register_wsm_l2() - Create a L2 table from a virtual memory buffer which
+ * mc_register_wsm_mmu() - Create a MMU table from a virtual memory buffer which
* can be vmalloc or user space virtual memory
*/
-int mc_register_wsm_l2(struct mc_instance *instance,
- uint32_t buffer, uint32_t len,
- uint32_t *handle, uint32_t *phys);
+int mc_register_wsm_mmu(struct mc_instance *instance,
+ void *buffer, uint32_t len,
+ uint32_t *handle, phys_addr_t *phys);
/* Unregister the buffer mapped above */
-int mc_unregister_wsm_l2(struct mc_instance *instance, uint32_t handle);
+int mc_unregister_wsm_mmu(struct mc_instance *instance, uint32_t handle);
/* Allocate one mc_buffer of contiguous space */
int mc_get_buffer(struct mc_instance *instance,
@@ -147,4 +147,7 @@
/* Check if the other end of the fd owns instance */
bool mc_check_owner_fd(struct mc_instance *instance, int32_t fd);
+/* Test if sleep is possible */
+bool mc_sleep_ready(void);
+
#endif /* _MC_MAIN_H_ */
diff --git a/drivers/gud/MobiCoreDriver/mem.c b/drivers/gud/MobiCoreDriver/mem.c
new file mode 100644
index 0000000..2d92f74
--- /dev/null
+++ b/drivers/gud/MobiCoreDriver/mem.c
@@ -0,0 +1,743 @@
+/*
+ * MobiCore Driver Kernel Module.
+ *
+ * This module is written as a Linux device driver.
+ * This driver represents the command proxy on the lowest layer, from the
+ * secure world to the non secure world, and vice versa.
+ * This driver is located in the non secure world (Linux).
+ * This driver offers IOCTL commands, for access to the secure world, and has
+ * the interface from the secure world to the normal world.
+ * The access to the driver is possible with a file descriptor,
+ * which has to be created by the fd = open(/dev/mobicore) command.
+ *
+ * <-- Copyright Giesecke & Devrient GmbH 2009-2012 -->
+ * <-- Copyright Trustonic Limited 2013 -->
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include "main.h"
+#include "debug.h"
+#include "mem.h"
+
+#include <linux/highmem.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/pagemap.h>
+#include <linux/device.h>
+
+#ifdef LPAE_SUPPORT
+#define MMU_TYPE_PAGE (3 << 0)
+#define MMU_BUFFERABLE (1 << 2) /* AttrIndx[0] */
+#define MMU_CACHEABLE (1 << 3) /* AttrIndx[1] */
+#define MMU_NS (1 << 5)
+#define MMU_AP_RW_ALL (1 << 6) /* AP[2:1], RW, at any privilege level */
+#define MMU_EXT_SHARED (3 << 8) /* SH[1:0], inner shareable */
+#define MMU_EXT_AF (1 << 10) /* Access Flag */
+#define MMU_EXT_NG (1 << 11)
+#define MMU_EXT_XN (((uint64_t)1) << 54) /* XN */
+#else
+#define MMU_TYPE_EXT (3 << 0) /* v5 */
+#define MMU_TYPE_SMALL (2 << 0)
+#define MMU_BUFFERABLE (1 << 2)
+#define MMU_CACHEABLE (1 << 3)
+#define MMU_EXT_AP0 (1 << 4)
+#define MMU_EXT_AP1 (2 << 4)
+#define MMU_EXT_TEX(x) ((x) << 6) /* v5 */
+#define MMU_EXT_SHARED (1 << 10) /* v6 */
+#define MMU_EXT_NG (1 << 11) /* v6 */
+#endif
+
+/* MobiCore memory context data */
+struct mc_mem_context mem_ctx;
+
+static inline void release_page(struct page *page)
+{
+ set_bit(PG_dirty, &page->flags);
+
+ page_cache_release(page);
+}
+
+static int lock_pages(struct task_struct *task, void *virt_start_page_addr,
+ int pages_no, struct page **pages)
+{
+ int locked_pages;
+
+ /* lock user pages, must hold the mmap_sem to do this. */
+ down_read(&(task->mm->mmap_sem));
+ locked_pages = get_user_pages(
+ task,
+ task->mm,
+ (unsigned long)virt_start_page_addr,
+ pages_no,
+ 1, /* write access */
+ 0,
+ pages,
+ NULL);
+ up_read(&(task->mm->mmap_sem));
+
+ /* check if we could lock all pages. */
+ if (locked_pages != pages_no) {
+ MCDRV_DBG_ERROR(mcd, "get_user_pages() failed, locked_pages=%d",
+ locked_pages);
+ if (locked_pages > 0) {
+ /* release all locked pages. */
+ release_pages(pages, locked_pages, 0);
+ }
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/* Get kernel pointer to shared MMU table given a per-process reference */
+static void *get_mmu_table_kernel_virt(struct mc_mmu_table *table)
+{
+ if (WARN(!table, "Invalid MMU table"))
+ return NULL;
+
+ if (WARN(!table->set, "Invalid MMU table set"))
+ return NULL;
+
+ if (WARN(!table->set->kernel_virt, "Invalid MMU pointer"))
+ return NULL;
+
+ return &(table->set->kernel_virt->table[table->idx]);
+}
+
+static inline int in_use(struct mc_mmu_table *table)
+{
+ return atomic_read(&table->usage) > 0;
+}
+
+/*
+ * Search the list of used MMU tables and return the one with the handle.
+ * Assumes the table_lock is taken.
+ */
+struct mc_mmu_table *find_mmu_table(unsigned int handle)
+{
+ struct mc_mmu_table *table;
+
+ list_for_each_entry(table, &mem_ctx.mmu_tables, list) {
+ if (table->handle == handle)
+ return table;
+ }
+ return NULL;
+}
+
+/*
+ * Allocate a new MMU table store plus MMU_TABLES_PER_PAGE in the MMU free
+ * tables list. Assumes the table_lock is already taken by the caller above.
+ */
+static int alloc_mmu_table_store(void)
+{
+ unsigned long store;
+ struct mc_mmu_tables_set *mmutable_set;
+ struct mc_mmu_table *mmutable, *mmutable2;
+ struct page *page;
+ int ret = 0, i;
+ /* temp list for holding the MMU tables */
+ LIST_HEAD(temp);
+
+ store = get_zeroed_page(GFP_KERNEL);
+ if (!store)
+ return -ENOMEM;
+
+ /*
+ * Actually, locking is not necessary, because kernel
+ * memory is not supposed to get swapped out. But we
+ * play safe....
+ */
+ page = virt_to_page(store);
+ set_bit(PG_reserved, &page->flags);
+
+ /* add all the descriptors to the free descriptors list */
+ mmutable_set = kmalloc(sizeof(*mmutable_set), GFP_KERNEL | __GFP_ZERO);
+ if (mmutable_set == NULL) {
+ ret = -ENOMEM;
+ goto free_store;
+ }
+ /* initialize */
+ mmutable_set->kernel_virt = (void *)store;
+ mmutable_set->page = page;
+ mmutable_set->phys = virt_to_phys((void *)store);
+ /* the set is not yet used */
+ atomic_set(&mmutable_set->used_tables, 0);
+
+ /* init add to list. */
+ INIT_LIST_HEAD(&(mmutable_set->list));
+ list_add(&mmutable_set->list, &mem_ctx.mmu_tables_sets);
+
+ for (i = 0; i < MMU_TABLES_PER_PAGE; i++) {
+ /* allocate a WSM MMU descriptor */
+ mmutable = kmalloc(sizeof(*mmutable), GFP_KERNEL | __GFP_ZERO);
+ if (mmutable == NULL) {
+ ret = -ENOMEM;
+ MCDRV_DBG_ERROR(mcd, "out of memory");
+ /* Free the full temp list and the store in this case */
+ goto free_temp_list;
+ }
+
+ /* set set reference */
+ mmutable->set = mmutable_set;
+ mmutable->idx = i;
+ mmutable->virt = get_mmu_table_kernel_virt(mmutable);
+ mmutable->phys = mmutable_set->phys+i*sizeof(struct mmutable);
+ atomic_set(&mmutable->usage, 0);
+
+ /* add to temp list. */
+ INIT_LIST_HEAD(&mmutable->list);
+ list_add_tail(&mmutable->list, &temp);
+ }
+
+ /*
+ * If everything went ok then merge the temp list with the global
+ * free list
+ */
+ list_splice_tail(&temp, &mem_ctx.free_mmu_tables);
+ return 0;
+free_temp_list:
+ list_for_each_entry_safe(mmutable, mmutable2, &temp, list) {
+ kfree(mmutable);
+ }
+
+ list_del(&mmutable_set->list);
+
+free_store:
+ free_page(store);
+ return ret;
+}
+/*
+ * Get a MMU table from the free tables list or allocate a new one and
+ * initialize it. Assumes the table_lock is already taken.
+ */
+static struct mc_mmu_table *alloc_mmu_table(struct mc_instance *instance)
+{
+ int ret = 0;
+ struct mc_mmu_table *table = NULL;
+
+ if (list_empty(&mem_ctx.free_mmu_tables)) {
+ ret = alloc_mmu_table_store();
+ if (ret) {
+ MCDRV_DBG_ERROR(mcd, "Failed to allocate new store!");
+ return ERR_PTR(-ENOMEM);
+ }
+ /* if it's still empty something wrong has happened */
+ if (list_empty(&mem_ctx.free_mmu_tables)) {
+ MCDRV_DBG_ERROR(mcd,
+ "Free list not updated correctly!");
+ return ERR_PTR(-EFAULT);
+ }
+ }
+
+ /* get a WSM MMU descriptor */
+ table = list_first_entry(&mem_ctx.free_mmu_tables,
+ struct mc_mmu_table, list);
+ if (table == NULL) {
+ MCDRV_DBG_ERROR(mcd, "out of memory");
+ return ERR_PTR(-ENOMEM);
+ }
+ /* Move it to the used MMU tables list */
+ list_move_tail(&table->list, &mem_ctx.mmu_tables);
+
+ table->handle = get_unique_id();
+ table->owner = instance;
+
+ atomic_inc(&table->set->used_tables);
+ atomic_inc(&table->usage);
+
+ MCDRV_DBG_VERBOSE(mcd,
+ "chunkPhys=0x%llX, idx=%d",
+ (u64)table->set->phys, table->idx);
+
+ return table;
+}
+
+/*
+ * Frees the object associated with a MMU table. Initially the object is moved
+ * to the free tables list, but if all the 4 lists of the store are free
+ * then the store is also released.
+ * Assumes the table_lock is already taken.
+ */
+static void free_mmu_table(struct mc_mmu_table *table)
+{
+ struct mc_mmu_tables_set *mmutable_set;
+
+ if (WARN(!table, "Invalid table"))
+ return;
+
+ mmutable_set = table->set;
+ if (WARN(!mmutable_set, "Invalid table set"))
+ return;
+
+ list_move_tail(&table->list, &mem_ctx.free_mmu_tables);
+
+ /* if nobody uses this set, we can release it. */
+ if (atomic_dec_and_test(&mmutable_set->used_tables)) {
+ struct mc_mmu_table *tmp;
+
+ /* remove from list */
+ list_del(&mmutable_set->list);
+ /*
+ * All the MMU tables are in the free list for this set
+ * so we can just remove them from there
+ */
+ list_for_each_entry_safe(table, tmp, &mem_ctx.free_mmu_tables,
+ list) {
+ if (table->set == mmutable_set) {
+ list_del(&table->list);
+ kfree(table);
+ }
+ } /* end while */
+
+ /*
+ * We shouldn't recover from this since it was some data
+ * corruption before
+ */
+ BUG_ON(!mmutable_set->page);
+ clear_bit(PG_reserved, &(mmutable_set->page)->flags);
+
+
+ BUG_ON(!mmutable_set->kernel_virt);
+ free_page((unsigned long)mmutable_set->kernel_virt);
+
+ kfree(mmutable_set);
+ }
+}
+
+/*
+ * Create a MMU table in a WSM container that has been allocates previously.
+ * Assumes the table lock is already taken or there is no need to take like
+ * when first creating the MMU table the full list is locked.
+ *
+ * @task pointer to task owning WSM
+ * @wsm_buffer user space WSM start
+ * @wsm_len WSM length
+ * @table Pointer to MMU table details
+ */
+static int map_buffer(struct task_struct *task, void *wsm_buffer,
+ unsigned int wsm_len, struct mc_mmu_table *table)
+{
+ int ret = 0;
+ unsigned int i, nr_of_pages;
+ /* start address of the 4 KiB page of wsm_buffer */
+ void *virt_addr_page;
+ struct page *page;
+ struct mmutable *mmutable;
+ struct page **mmutable_as_array_of_pointers_to_page;
+ /* page offset in wsm buffer */
+ unsigned int offset;
+
+ if (WARN(!wsm_buffer, "Invalid WSM buffer pointer"))
+ return -EINVAL;
+
+ if (WARN(wsm_len == 0, "Invalid WSM buffer length"))
+ return -EINVAL;
+
+ if (WARN(!table, "Invalid mapping table for WSM"))
+ return -EINVAL;
+
+ /* no size > 1Mib supported */
+ if (wsm_len > SZ_1M) {
+ MCDRV_DBG_ERROR(mcd, "size > 1 MiB");
+ return -EINVAL;
+ }
+
+ MCDRV_DBG_VERBOSE(mcd, "WSM addr=0x%p, len=0x%08x", wsm_buffer,
+ wsm_len);
+
+ /* calculate page usage */
+ virt_addr_page = (void *)(((unsigned long)(wsm_buffer)) & PAGE_MASK);
+ offset = (unsigned int) (((unsigned long)(wsm_buffer)) & (~PAGE_MASK));
+ nr_of_pages = PAGE_ALIGN(offset + wsm_len) / PAGE_SIZE;
+
+ MCDRV_DBG_VERBOSE(mcd, "virt addr page start=0x%p, pages=%d",
+ virt_addr_page, nr_of_pages);
+
+ /* MMU table can hold max 1MiB in 256 pages. */
+ if ((nr_of_pages * PAGE_SIZE) > SZ_1M) {
+ MCDRV_DBG_ERROR(mcd, "WSM paged exceed 1 MiB");
+ return -EINVAL;
+ }
+
+ mmutable = table->virt;
+ /*
+ * We use the memory for the MMU table to hold the pointer
+ * and convert them later. This works, as everything comes
+ * down to a 32 bit value.
+ */
+ mmutable_as_array_of_pointers_to_page = (struct page **)mmutable;
+
+ /* Request comes from user space */
+ if (task != NULL && !is_vmalloc_addr(wsm_buffer)) {
+ /*
+ * lock user page in memory, so they do not get swapped
+ * out.
+ * REV axh: Kernel 2.6.27 added a new get_user_pages_fast()
+ * function, maybe it is called fast_gup() in some versions.
+ * handle user process doing a fork().
+ * Child should not get things.
+ * http://osdir.com/ml/linux-media/2009-07/msg00813.html
+ * http://lwn.net/Articles/275808/
+ */
+ ret = lock_pages(task, virt_addr_page, nr_of_pages,
+ mmutable_as_array_of_pointers_to_page);
+ if (ret != 0) {
+ MCDRV_DBG_ERROR(mcd, "lock_user_pages() failed");
+ return ret;
+ }
+ }
+ /* Request comes from kernel space(cont buffer) */
+ else if (task == NULL && !is_vmalloc_addr(wsm_buffer)) {
+ void *uaddr = wsm_buffer;
+ for (i = 0; i < nr_of_pages; i++) {
+ page = virt_to_page(uaddr);
+ if (!page) {
+ MCDRV_DBG_ERROR(mcd, "failed to map address");
+ return -EINVAL;
+ }
+ get_page(page);
+ mmutable_as_array_of_pointers_to_page[i] = page;
+ uaddr += PAGE_SIZE;
+ }
+ }
+ /* Request comes from kernel space(vmalloc buffer) */
+ else {
+ void *uaddr = wsm_buffer;
+ for (i = 0; i < nr_of_pages; i++) {
+ page = vmalloc_to_page(uaddr);
+ if (!page) {
+ MCDRV_DBG_ERROR(mcd, "failed to map address");
+ return -EINVAL;
+ }
+ get_page(page);
+ mmutable_as_array_of_pointers_to_page[i] = page;
+ uaddr += PAGE_SIZE;
+ }
+ }
+
+ table->pages = nr_of_pages;
+
+ /*
+ * create MMU Table entries.
+ * used_mmutable->table contains a list of page pointers here.
+ * For a proper cleanup we have to ensure that the following
+ * code either works and used_mmutable contains a valid MMU table
+ * - or fails and used_mmutable->table contains the list of page
+ * pointers.
+ * Any mixed contents will make cleanup difficult.
+ * Fill the table in reverse order as the table is used as input and
+ * output.
+ */
+ i = MC_ARM_MMU_TABLE_ENTRIES-1;
+ do {
+ if (i < nr_of_pages) {
+#ifdef LPAE_SUPPORT
+ uint64_t pte;
+#elif defined(CONFIG_ARM_LPAE) && !defined(LPAE_SUPPORT)
+ /* Nwd supports 64bit addresses, SWD only 32bit */
+ uint64_t pte64;
+ uint32_t pte;
+#else
+ uint32_t pte;
+#endif
+ page = mmutable_as_array_of_pointers_to_page[i];
+
+ /*
+ * create MMU table entry, see ARM MMU docu for details
+ * about flags stored in the lowest 12 bits.
+ * As a side reference, the Article
+ * "ARM's multiply-mapped memory mess"
+ * found in the collection at
+ * http://lwn.net/Articles/409032/
+ * is also worth reading.
+ */
+#ifdef LPAE_SUPPORT
+ pte = page_to_phys(page);
+ pte |= MMU_EXT_XN
+ | MMU_EXT_NG
+ | MMU_EXT_AF
+ | MMU_AP_RW_ALL
+ | MMU_NS
+ | MMU_CACHEABLE | MMU_BUFFERABLE
+ | MMU_TYPE_PAGE;
+#elif defined(CONFIG_ARM_LPAE) && !defined(LPAE_SUPPORT)
+ /*
+ * NWD uses 64bit addresses but SWD can handle only
+ * short descriptors
+ * and physical addresses not bigger than 4GB
+ */
+ pte64 = page_to_phys(page);
+ if ((pte64 >> 32) != 0) {
+ MCDRV_DBG_ERROR(mcd,
+ "physical addresses bigger than 4GB not supported");
+ return -EINVAL;
+ }
+ pte = (uint32_t)pte64;
+ pte |= MMU_EXT_AP1 | MMU_EXT_AP0
+ | MMU_CACHEABLE | MMU_BUFFERABLE
+ | MMU_TYPE_SMALL | MMU_TYPE_EXT | MMU_EXT_NG;
+#else
+ pte = page_to_phys(page);
+ pte |= MMU_EXT_AP1 | MMU_EXT_AP0
+ | MMU_CACHEABLE | MMU_BUFFERABLE
+ | MMU_TYPE_SMALL | MMU_TYPE_EXT | MMU_EXT_NG;
+#endif /* LPAE_SUPPORT */
+ /*
+ * Linux uses different mappings for SMP systems(the
+ * sharing flag is set for the pte. In order not to
+ * confuse things too much in Mobicore make sure the
+ * shared buffers have the same flags.
+ * This should also be done in SWD side
+ */
+#ifdef CONFIG_SMP
+#ifdef LPAE_SUPPORT
+ pte |= MMU_EXT_SHARED;
+#else
+ pte |= MMU_EXT_SHARED | MMU_EXT_TEX(1);
+#endif /* LPAE_SUPPORT */
+#endif /* CONFIG_SMP */
+
+ mmutable->table_entries[i] = pte;
+ MCDRV_DBG_VERBOSE(mcd, "MMU entry %d: 0x%llx", i,
+ (u64)(pte));
+ } else {
+ /* ensure rest of table is empty */
+ mmutable->table_entries[i] = 0;
+ }
+ } while (i-- != 0);
+
+ return ret;
+}
+
+/*
+ * Remove a MMU table in a WSM container. Afterwards the container may be
+ * released. Assumes the table_lock and the lock is taken.
+ */
+static void unmap_buffers(struct mc_mmu_table *table)
+{
+ struct mmutable *mmutable;
+ int i;
+
+ if (WARN_ON(!table))
+ return;
+
+ /* found the table, now release the resources. */
+ MCDRV_DBG_VERBOSE(mcd,
+ "clear MMU table, phys_base=0x%llX,nr_of_pages=%d",
+ (u64)table->phys, table->pages);
+
+ mmutable = table->virt;
+
+ /* release all locked user space pages */
+ for (i = 0; i < table->pages; i++) {
+ /* convert physical entries from MMU table to page pointers */
+ struct page *page;
+ page = phys_to_page(mmutable->table_entries[i]);
+ release_page(page);
+ }
+
+ /* remember that all pages have been freed */
+ table->pages = 0;
+}
+
+/* Delete a used MMU table. Assumes the table_lock and the lock is taken */
+static void unmap_mmu_table(struct mc_mmu_table *table)
+{
+ /* Check if it's not locked by other processes too! */
+ if (!atomic_dec_and_test(&table->usage))
+ return;
+
+ /* release if Nwd and Swd/MC do no longer use it. */
+ unmap_buffers(table);
+ free_mmu_table(table);
+}
+
+int mc_free_mmu_table(struct mc_instance *instance, uint32_t handle)
+{
+ struct mc_mmu_table *table;
+ int ret = 0;
+
+ if (WARN(!instance, "No instance data available"))
+ return -EFAULT;
+
+ mutex_lock(&mem_ctx.table_lock);
+ table = find_mmu_table(handle);
+
+ if (table == NULL) {
+ MCDRV_DBG_VERBOSE(mcd, "entry not found");
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+ if (instance != table->owner && !is_daemon(instance)) {
+ MCDRV_DBG_ERROR(mcd, "instance does no own it");
+ ret = -EPERM;
+ goto err_unlock;
+ }
+ /* free table (if no further locks exist) */
+ unmap_mmu_table(table);
+err_unlock:
+ mutex_unlock(&mem_ctx.table_lock);
+
+ return ret;
+}
+
+int mc_lock_mmu_table(struct mc_instance *instance, uint32_t handle)
+{
+ int ret = 0;
+ struct mc_mmu_table *table = NULL;
+
+ if (WARN(!instance, "No instance data available"))
+ return -EFAULT;
+
+ mutex_lock(&mem_ctx.table_lock);
+ table = find_mmu_table(handle);
+
+ if (table == NULL) {
+ MCDRV_DBG_VERBOSE(mcd, "entry not found %u", handle);
+ ret = -EINVAL;
+ goto table_err;
+ }
+ if (instance != table->owner && !is_daemon(instance)) {
+ MCDRV_DBG_ERROR(mcd, "instance does no own it");
+ ret = -EPERM;
+ goto table_err;
+ }
+
+ /* lock entry */
+ atomic_inc(&table->usage);
+table_err:
+ mutex_unlock(&mem_ctx.table_lock);
+ return ret;
+}
+/*
+ * Allocate MMU table and map buffer into it.
+ * That is, create respective table entries.
+ */
+struct mc_mmu_table *mc_alloc_mmu_table(struct mc_instance *instance,
+ struct task_struct *task, void *wsm_buffer, unsigned int wsm_len)
+{
+ int ret = 0;
+ struct mc_mmu_table *table;
+
+ if (WARN(!instance, "No instance data available"))
+ return ERR_PTR(-EFAULT);
+
+ mutex_lock(&mem_ctx.table_lock);
+ table = alloc_mmu_table(instance);
+ if (IS_ERR(table)) {
+ MCDRV_DBG_ERROR(mcd, "alloc_mmu_table() failed");
+ ret = -ENOMEM;
+ goto err_no_mem;
+ }
+
+ /* create the MMU page for the WSM */
+ ret = map_buffer(task, wsm_buffer, wsm_len, table);
+
+ if (ret != 0) {
+ MCDRV_DBG_ERROR(mcd, "map_buffer() failed");
+ unmap_mmu_table(table);
+ goto err_no_mem;
+ }
+ MCDRV_DBG_VERBOSE(mcd,
+ "mapped buffer %p to table with handle %d @ 0x%llX",
+ wsm_buffer, table->handle, (u64)table->phys);
+
+ mutex_unlock(&mem_ctx.table_lock);
+ return table;
+err_no_mem:
+ mutex_unlock(&mem_ctx.table_lock);
+ return ERR_PTR(ret);
+}
+
+phys_addr_t mc_find_mmu_table(uint32_t handle, int32_t fd)
+{
+ phys_addr_t ret = 0;
+ struct mc_mmu_table *table = NULL;
+
+ mutex_lock(&mem_ctx.table_lock);
+ table = find_mmu_table(handle);
+
+ if (table == NULL) {
+ MCDRV_DBG_ERROR(mcd, "entry not found %u", handle);
+ ret = 0;
+ goto table_err;
+ }
+
+ /* It's safe here not to lock the instance since the owner of
+ * the table will be cleared only with the table lock taken */
+ if (!mc_check_owner_fd(table->owner, fd)) {
+ MCDRV_DBG_ERROR(mcd, "not valid owner %u", handle);
+ ret = 0;
+ goto table_err;
+ }
+
+ ret = table->phys;
+table_err:
+ mutex_unlock(&mem_ctx.table_lock);
+ return ret;
+}
+
+void mc_clean_mmu_tables(void)
+{
+ struct mc_mmu_table *table, *tmp;
+
+ mutex_lock(&mem_ctx.table_lock);
+ /* Check if some WSM is orphaned. */
+ list_for_each_entry_safe(table, tmp, &mem_ctx.mmu_tables, list) {
+ if (table->owner == NULL) {
+ MCDRV_DBG(mcd,
+ "cleariM MMU: p=0x%llX pages=%d",
+ (u64)table->phys,
+ table->pages);
+ unmap_mmu_table(table);
+ }
+ }
+ mutex_unlock(&mem_ctx.table_lock);
+}
+
+void mc_clear_mmu_tables(struct mc_instance *instance)
+{
+ struct mc_mmu_table *table, *tmp;
+
+ mutex_lock(&mem_ctx.table_lock);
+ /* Check if some WSM is still in use. */
+ list_for_each_entry_safe(table, tmp, &mem_ctx.mmu_tables, list) {
+ if (table->owner == instance) {
+ MCDRV_DBG(mcd, "release WSM MMU: p=0x%llX pages=%d",
+ (u64)table->phys,
+ table->pages);
+ /* unlock app usage and free or mark it as orphan */
+ table->owner = NULL;
+ unmap_mmu_table(table);
+ }
+ }
+ mutex_unlock(&mem_ctx.table_lock);
+}
+
+int mc_init_mmu_tables(void)
+{
+ /* init list for WSM MMU chunks. */
+ INIT_LIST_HEAD(&mem_ctx.mmu_tables_sets);
+
+ /* MMU table descriptor list. */
+ INIT_LIST_HEAD(&mem_ctx.mmu_tables);
+
+ /* MMU free table descriptor list. */
+ INIT_LIST_HEAD(&mem_ctx.free_mmu_tables);
+
+ mutex_init(&mem_ctx.table_lock);
+
+ return 0;
+}
+
+void mc_release_mmu_tables(void)
+{
+ struct mc_mmu_table *table;
+ /* Check if some WSM is still in use. */
+ list_for_each_entry(table, &mem_ctx.mmu_tables, list) {
+ WARN(1, "WSM MMU still in use: phys=0x%llX ,nr_of_pages=%d",
+ (u64)table->phys, table->pages);
+ }
+}
diff --git a/drivers/gud/MobiCoreDriver/mem.h b/drivers/gud/MobiCoreDriver/mem.h
new file mode 100644
index 0000000..5c9006a
--- /dev/null
+++ b/drivers/gud/MobiCoreDriver/mem.h
@@ -0,0 +1,139 @@
+/*
+ * MobiCore driver module.(interface to the secure world SWD)
+ *
+ * <-- Copyright Giesecke & Devrient GmbH 2009-2012 -->
+ * <-- Copyright Trustonic Limited 2013 -->
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _MC_MEM_H_
+#define _MC_MEM_H_
+
+#ifdef LPAE_SUPPORT
+/*
+ * Number of page table entries in one MMU table. This is ARM specific, an
+ * MMU table covers 2 MiB by using 512 entries referring to 4KiB pages each.
+ */
+#define MC_ARM_MMU_TABLE_ENTRIES 512
+
+/* ARM level 3 (MMU) table with 512 entries. Size: 4k */
+struct mmutable {
+ uint64_t table_entries[MC_ARM_MMU_TABLE_ENTRIES];
+};
+
+/* There is 1 table in each page. */
+#define MMU_TABLES_PER_PAGE 1
+#else
+/*
+ * MobiCore specific page tables for world shared memory.
+ * Linux uses shadow page tables, see arch/arm/include/asm/pgtable-2level.
+ * MobiCore uses the default ARM format.
+ *
+ * Number of page table entries in one MMU table. This is ARM specific, an
+ * MMU table covers 1 MiB by using 256 entries referring to 4KiB pages each.
+ */
+#define MC_ARM_MMU_TABLE_ENTRIES 256
+
+/* ARM level 2 (MMU) table with 256 entries. Size: 1k */
+struct mmutable {
+ uint32_t table_entries[MC_ARM_MMU_TABLE_ENTRIES];
+};
+
+/* There are 4 tables in each page. */
+#define MMU_TABLES_PER_PAGE 4
+#endif
+
+/* Store for four MMU tables in one 4kb page*/
+struct mc_mmu_table_store {
+ struct mmutable table[MMU_TABLES_PER_PAGE];
+};
+
+/* Usage and maintenance information about mc_mmu_table_store */
+struct mc_mmu_tables_set {
+ struct list_head list;
+ /* kernel virtual address */
+ struct mc_mmu_table_store *kernel_virt;
+ /* physical address */
+ phys_addr_t phys;
+ /* pointer to page struct */
+ struct page *page;
+ /* How many pages from this set are used */
+ atomic_t used_tables;
+};
+
+/*
+ * MMU table allocated to the Daemon or a TLC describing a world shared
+ * buffer.
+ * When users map a malloc()ed area into SWd, a MMU table is allocated.
+ * In addition, the area of maximum 1MB virtual address space is mapped into
+ * the MMU table and a handle for this table is returned to the user.
+ */
+struct mc_mmu_table {
+ struct list_head list;
+ /* Table lock */
+ struct mutex lock;
+ /* handle as communicated to user mode */
+ unsigned int handle;
+ /* Number of references kept to this MMU table */
+ atomic_t usage;
+ /* owner of this MMU table */
+ struct mc_instance *owner;
+ /* set describing where our MMU table is stored */
+ struct mc_mmu_tables_set *set;
+ /* index into MMU table set */
+ unsigned int idx;
+ /* size of buffer */
+ unsigned int pages;
+ /* virtual address*/
+ void *virt;
+ /* physical address */
+ phys_addr_t phys;
+};
+
+/* MobiCore Driver Memory context data. */
+struct mc_mem_context {
+ struct mc_instance *daemon_inst;
+ /* Backing store for MMU tables */
+ struct list_head mmu_tables_sets;
+ /* Bookkeeping for used MMU tables */
+ struct list_head mmu_tables;
+ /* Bookkeeping for free MMU tables */
+ struct list_head free_mmu_tables;
+ /* semaphore to synchronize access to above lists */
+ struct mutex table_lock;
+};
+
+/*
+ * Allocate MMU table and map buffer into it.
+ * That is, create respective table entries.
+ */
+struct mc_mmu_table *mc_alloc_mmu_table(struct mc_instance *instance,
+ struct task_struct *task, void *wsm_buffer, unsigned int wsm_len);
+
+/* Delete all the MMU tables associated with an instance */
+void mc_clear_mmu_tables(struct mc_instance *instance);
+
+/* Release all orphaned MMU tables */
+void mc_clean_mmu_tables(void);
+
+/* Delete a used MMU table. */
+int mc_free_mmu_table(struct mc_instance *instance, uint32_t handle);
+
+/*
+ * Lock a MMU table - the daemon adds +1 to refcount of the MMU table
+ * marking it in use by SWD so it doesn't get released when the TLC dies.
+ */
+int mc_lock_mmu_table(struct mc_instance *instance, uint32_t handle);
+
+/* Return the phys address of MMU table. */
+phys_addr_t mc_find_mmu_table(uint32_t handle, int32_t fd);
+/* Release all used MMU tables to Linux memory space */
+void mc_release_mmu_tables(void);
+
+/* Initialize all MMU tables structure */
+int mc_init_mmu_tables(void);
+
+#endif /* _MC_MEM_H_ */
diff --git a/drivers/gud/MobiCoreDriver/ops.c b/drivers/gud/MobiCoreDriver/ops.c
new file mode 100644
index 0000000..96b4f4f
--- /dev/null
+++ b/drivers/gud/MobiCoreDriver/ops.c
@@ -0,0 +1,398 @@
+/*
+ * MobiCore Driver Kernel Module.
+ *
+ * This module is written as a Linux device driver.
+ * This driver represents the command proxy on the lowest layer, from the
+ * secure world to the non secure world, and vice versa.
+ * This driver is located in the non secure world (Linux).
+ * This driver offers IOCTL commands, for access to the secure world, and has
+ * the interface from the secure world to the normal world.
+ * The access to the driver is possible with a file descriptor,
+ * which has to be created by the fd = open(/dev/mobicore) command.
+ *
+ * <-- Copyright Giesecke & Devrient GmbH 2009-2012 -->
+ * <-- Copyright Trustonic Limited 2013 -->
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include <linux/cpu.h>
+
+#include "main.h"
+#include "fastcall.h"
+#include "ops.h"
+#include "mem.h"
+#include "pm.h"
+#include "debug.h"
+
+/* MobiCore context data */
+static struct mc_context *ctx;
+#ifdef TBASE_CORE_SWITCHER
+static uint32_t active_cpu;
+
+static int mobicore_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu);
+static struct notifier_block mobicore_cpu_notifer = {
+ .notifier_call = mobicore_cpu_callback,
+};
+#endif
+
+static inline long smc(union fc_generic *fc)
+{
+ /* If we request sleep yields must be filtered out as they
+ * make no sense */
+ if (ctx->mcp)
+ if (ctx->mcp->flags.sleep_mode.sleep_req) {
+ if (fc->as_in.cmd == MC_SMC_N_YIELD)
+ return MC_FC_RET_ERR_INVALID;
+ }
+ return _smc(fc);
+}
+
+struct fastcall_work {
+#ifdef MC_FASTCALL_WORKER_THREAD
+ struct kthread_work work;
+#else
+ struct work_struct work;
+#endif
+ void *data;
+};
+
+#ifdef MC_FASTCALL_WORKER_THREAD
+static void fastcall_work_func(struct kthread_work *work);
+#else
+static void fastcall_work_func(struct work_struct *work);
+#endif
+
+
+#ifdef MC_FASTCALL_WORKER_THREAD
+
+static struct task_struct *fastcall_thread;
+static DEFINE_KTHREAD_WORKER(fastcall_worker);
+
+bool mc_fastcall(void *data)
+{
+ struct fastcall_work fc_work = {
+ KTHREAD_WORK_INIT(fc_work.work, fastcall_work_func),
+ .data = data,
+ };
+
+ if (!queue_kthread_work(&fastcall_worker, &fc_work.work))
+ return false;
+ flush_kthread_work(&fc_work.work);
+ return true;
+}
+
+int mc_fastcall_init(struct mc_context *context)
+{
+ int ret = 0;
+ ctx = context;
+
+ fastcall_thread = kthread_create(kthread_worker_fn, &fastcall_worker,
+ "mc_fastcall");
+ if (IS_ERR(fastcall_thread)) {
+ ret = PTR_ERR(fastcall_thread);
+ fastcall_thread = NULL;
+ MCDRV_DBG_ERROR(mcd, "cannot create fastcall wq (%d)", ret);
+ return ret;
+ }
+
+ wake_up_process(fastcall_thread);
+
+ /* this thread MUST run on CPU 0 at startup */
+ set_cpus_allowed(fastcall_thread, CPU_MASK_CPU0);
+#ifdef TBASE_CORE_SWITCHER
+ register_cpu_notifier(&mobicore_cpu_notifer);
+#endif
+ return 0;
+}
+
+void mc_fastcall_destroy(void)
+{
+ if (!IS_ERR_OR_NULL(fastcall_thread)) {
+ kthread_stop(fastcall_thread);
+ fastcall_thread = NULL;
+ }
+}
+#else
+
+bool mc_fastcall(void *data)
+{
+ struct fastcall_work work = {
+ .data = data,
+ };
+ INIT_WORK(&work.work, fastcall_work_func);
+ if (!schedule_work_on(0, &work.work))
+ return false;
+ flush_work(&work.work);
+ return true;
+}
+
+int mc_fastcall_init(struct mc_context *context)
+{
+ ctx = context;
+ return 0;
+};
+
+void mc_fastcall_destroy(void) {};
+#endif
+
+#ifdef MC_FASTCALL_WORKER_THREAD
+static void fastcall_work_func(struct kthread_work *work)
+#else
+static void fastcall_work_func(struct work_struct *work)
+#endif
+{
+ struct fastcall_work *fc_work =
+ container_of(work, struct fastcall_work, work);
+ union fc_generic *fc_generic = fc_work->data;
+#ifdef TBASE_CORE_SWITCHER
+ uint32_t cpu_swap = 0, new_cpu;
+ uint32_t cpu_id[] = CPU_IDS;
+#endif
+
+#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
+ mc_pm_clock_enable();
+#endif
+
+
+ if (fc_generic == NULL)
+ return;
+#ifdef TBASE_CORE_SWITCHER
+ if (fc_generic->as_in.cmd == MC_FC_SWITCH_CORE) {
+ cpu_swap = 1;
+ new_cpu = fc_generic->as_in.param[0];
+ fc_generic->as_in.param[0] = cpu_id[fc_generic->as_in.param[0]];
+ }
+#endif
+ smc(fc_work->data);
+#ifdef TBASE_CORE_SWITCHER
+ if (cpu_swap) {
+ if (fc_generic->as_out.ret == 0) {
+ cpumask_t cpu;
+ active_cpu = new_cpu;
+ MCDRV_DBG(mcd, "CoreSwap ok %d -> %d\n",
+ raw_smp_processor_id(), active_cpu);
+ cpumask_clear(&cpu);
+ cpumask_set_cpu(active_cpu, &cpu);
+#ifdef MC_FASTCALL_WORKER_THREAD
+ set_cpus_allowed(fastcall_thread, cpu);
+#endif
+ } else {
+ MCDRV_DBG(mcd, "CoreSwap failed %d -> %d\n",
+ raw_smp_processor_id(),
+ fc_generic->as_in.param[0]);
+ }
+ }
+#endif
+#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
+ mc_pm_clock_disable();
+#endif
+}
+
+int mc_info(uint32_t ext_info_id, uint32_t *state, uint32_t *ext_info)
+{
+ int ret = 0;
+ union mc_fc_info fc_info;
+
+ MCDRV_DBG_VERBOSE(mcd, "enter");
+
+ memset(&fc_info, 0, sizeof(fc_info));
+ fc_info.as_in.cmd = MC_FC_INFO;
+ fc_info.as_in.ext_info_id = ext_info_id;
+
+ MCDRV_DBG(mcd, "<- cmd=0x%08x, ext_info_id=0x%08x",
+ fc_info.as_in.cmd, fc_info.as_in.ext_info_id);
+
+ mc_fastcall(&(fc_info.as_generic));
+
+ MCDRV_DBG(mcd,
+ "-> r=0x%08x ret=0x%08x state=0x%08x "
+ "ext_info=0x%08x",
+ fc_info.as_out.resp,
+ fc_info.as_out.ret,
+ fc_info.as_out.state,
+ fc_info.as_out.ext_info);
+
+ ret = convert_fc_ret(fc_info.as_out.ret);
+
+ *state = fc_info.as_out.state;
+ *ext_info = fc_info.as_out.ext_info;
+
+ MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X", ret, ret);
+
+ return ret;
+}
+
+#ifdef TBASE_CORE_SWITCHER
+int mc_switch_core(uint32_t core_num)
+{
+ int32_t ret = 0;
+ union mc_fc_swich_core fc_switch_core;
+
+ if (!cpu_online(core_num))
+ return 1;
+
+ MCDRV_DBG_VERBOSE(mcd, "enter\n");
+
+ memset(&fc_switch_core, 0, sizeof(fc_switch_core));
+ fc_switch_core.as_in.cmd = MC_FC_SWITCH_CORE;
+
+ if (core_num < COUNT_OF_CPUS)
+ fc_switch_core.as_in.core_id = core_num;
+ else
+ fc_switch_core.as_in.core_id = 0;
+
+ MCDRV_DBG(
+ mcd, "<- cmd=0x%08x, core_num=0x%08x, "
+ "active_cpu=0x%08x, active_cpu=0x%08x\n",
+ fc_switch_core.as_in.cmd,
+ fc_switch_core.as_in.core_id,
+ core_num, active_cpu);
+ mc_fastcall(&(fc_switch_core.as_generic));
+
+ ret = convert_fc_ret(fc_switch_core.as_out.ret);
+
+ MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X\n", ret, ret);
+
+ return ret;
+}
+
+void mc_cpu_offfline(int cpu)
+{
+ if (active_cpu == cpu) {
+ int i;
+ /* Chose the first online CPU and switch! */
+ for_each_online_cpu(i) {
+ if (i == cpu) {
+ MCDRV_DBG(mcd, "Skipping CPU %d\n", cpu);
+ continue;
+ }
+ MCDRV_DBG(mcd, "CPU %d is dying, switching to %d\n",
+ cpu, i);
+ mc_switch_core(i);
+ break;
+ }
+ } else {
+ MCDRV_DBG(mcd, "not active CPU, no action taken\n");
+ }
+}
+
+static int mobicore_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch (action) {
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ dev_info(mcd, "Cpu %u is going to die\n", cpu);
+ mc_cpu_offfline(cpu);
+ break;
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ dev_info(mcd, "Cpu %u is dead\n", cpu);
+ break;
+ }
+ return NOTIFY_OK;
+}
+#endif
+
+/* Yield to MobiCore */
+int mc_yield(void)
+{
+ int ret = 0;
+ union fc_generic yield;
+
+ MCDRV_DBG_VERBOSE(mcd, "enter");
+ memset(&yield, 0, sizeof(yield));
+ yield.as_in.cmd = MC_SMC_N_YIELD;
+ mc_fastcall(&yield);
+ ret = convert_fc_ret(yield.as_out.ret);
+
+ return ret;
+}
+
+/* call common notify */
+int mc_nsiq(void)
+{
+ int ret = 0;
+ union fc_generic nsiq;
+ MCDRV_DBG_VERBOSE(mcd, "enter");
+ memset(&nsiq, 0, sizeof(nsiq));
+ nsiq.as_in.cmd = MC_SMC_N_SIQ;
+ mc_fastcall(&nsiq);
+ ret = convert_fc_ret(nsiq.as_out.ret);
+ return ret;
+}
+
+/* call common notify */
+int _nsiq(void)
+{
+ int ret = 0;
+ union fc_generic nsiq;
+ MCDRV_DBG_VERBOSE(mcd, "enter");
+ memset(&nsiq, 0, sizeof(nsiq));
+ nsiq.as_in.cmd = MC_SMC_N_SIQ;
+ _smc(&nsiq);
+ ret = convert_fc_ret(nsiq.as_out.ret);
+ return ret;
+}
+
+/* Call the INIT fastcall to setup MobiCore initialization */
+int mc_init(phys_addr_t base, uint32_t nq_length,
+ uint32_t mcp_offset, uint32_t mcp_length)
+{
+ int ret = 0;
+ union mc_fc_init fc_init;
+ uint64_t base_addr = (uint64_t)base;
+ uint32_t base_high = (uint32_t)(base_addr >> 32);
+
+ MCDRV_DBG_VERBOSE(mcd, "enter");
+
+ memset(&fc_init, 0, sizeof(fc_init));
+
+ fc_init.as_in.cmd = MC_FC_INIT;
+ /* base address of mci buffer 4KB aligned */
+ fc_init.as_in.base = (uint32_t)base_addr;
+ /* notification buffer start/length [16:16] [start, length] */
+ fc_init.as_in.nq_info = ((base_high && 0xFFFF) << 16) |
+ (nq_length & 0xFFFF);
+ /* mcp buffer start/length [16:16] [start, length] */
+ fc_init.as_in.mcp_info = (mcp_offset << 16) | (mcp_length & 0xFFFF);
+
+ /*
+ * Set KMOD notification queue to start of MCI
+ * mciInfo was already set up in mmap
+ */
+ MCDRV_DBG(mcd,
+ "cmd=0x%08x, base=0x%08x,nq_info=0x%08x, mcp_info=0x%08x",
+ fc_init.as_in.cmd, fc_init.as_in.base, fc_init.as_in.nq_info,
+ fc_init.as_in.mcp_info);
+ mc_fastcall(&fc_init.as_generic);
+ MCDRV_DBG(mcd, "out cmd=0x%08x, ret=0x%08x", fc_init.as_out.resp,
+ fc_init.as_out.ret);
+
+ ret = convert_fc_ret(fc_init.as_out.ret);
+
+ MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X", ret, ret);
+
+ return ret;
+}
+
+/* Return MobiCore driver version */
+uint32_t mc_get_version(void)
+{
+ MCDRV_DBG(mcd, "MobiCore driver version is %i.%i",
+ MCDRVMODULEAPI_VERSION_MAJOR,
+ MCDRVMODULEAPI_VERSION_MINOR);
+
+ return MC_VERSION(MCDRVMODULEAPI_VERSION_MAJOR,
+ MCDRVMODULEAPI_VERSION_MINOR);
+}
diff --git a/drivers/gud/mobicore_driver/ops.h b/drivers/gud/MobiCoreDriver/ops.h
similarity index 78%
rename from drivers/gud/mobicore_driver/ops.h
rename to drivers/gud/MobiCoreDriver/ops.h
index 910c1f4..f04eb3e 100644
--- a/drivers/gud/mobicore_driver/ops.h
+++ b/drivers/gud/MobiCoreDriver/ops.h
@@ -21,10 +21,13 @@
uint32_t mc_get_version(void);
int mc_info(uint32_t ext_info_id, uint32_t *state, uint32_t *ext_info);
-int mc_init(uint32_t base, uint32_t nq_offset, uint32_t nq_length,
- uint32_t mcp_offset, uint32_t mcp_length);
+int mc_init(phys_addr_t base, uint32_t nq_length, uint32_t mcp_offset,
+ uint32_t mcp_length);
+#ifdef TBASE_CORE_SWITCHER
+int mc_switch_core(uint32_t core_num);
+#endif
-void mc_fastcall(void *data);
+bool mc_fastcall(void *data);
int mc_fastcall_init(struct mc_context *context);
void mc_fastcall_destroy(void);
diff --git a/drivers/gud/mobicore_driver/platforms/msm8960_surf_std/platform.h b/drivers/gud/MobiCoreDriver/platforms/MSM8960_SURF_STD/platform.h
similarity index 85%
rename from drivers/gud/mobicore_driver/platforms/msm8960_surf_std/platform.h
rename to drivers/gud/MobiCoreDriver/platforms/MSM8960_SURF_STD/platform.h
index 7854fc5..72ea3ed 100644
--- a/drivers/gud/mobicore_driver/platforms/msm8960_surf_std/platform.h
+++ b/drivers/gud/MobiCoreDriver/platforms/MSM8960_SURF_STD/platform.h
@@ -36,20 +36,12 @@
}
/* Enable mobicore mem traces */
-/* #define MC_MEM_TRACES */
+#define MC_MEM_TRACES
/* Enable the use of vm_unamp instead of the deprecated do_munmap
* and other 3.7 features
*/
-#ifndef CONFIG_ARCH_MSM8960
#define MC_VM_UNMAP
-#endif
-
-
-#if defined(CONFIG_ARCH_MSM8974) || defined(CONFIG_ARCH_MSM8226)
-/* Perform clock enable/disable */
-#define MC_CRYPTO_CLOCK_MANAGEMENT
-#endif
/* Enable Power Management for Crypto Engine */
#define MC_CRYPTO_CLOCK_MANAGEMENT
diff --git a/drivers/gud/mobicore_driver/pm.c b/drivers/gud/MobiCoreDriver/pm.c
similarity index 66%
rename from drivers/gud/mobicore_driver/pm.c
rename to drivers/gud/MobiCoreDriver/pm.c
index 55a1ef7..40365ef 100644
--- a/drivers/gud/mobicore_driver/pm.c
+++ b/drivers/gud/MobiCoreDriver/pm.c
@@ -46,7 +46,7 @@
if (!ctx->mcp)
return false;
- if (!ctx->mcp->flags.sleep_mode.ReadyToSleep & READY_TO_SLEEP)
+ if (!(ctx->mcp->flags.sleep_mode.ready_to_sleep & READY_TO_SLEEP))
return false;
return true;
@@ -57,7 +57,7 @@
if (!ctx->mcp)
return;
- ctx->mcp->flags.sleep_mode.SleepReq = REQ_TO_SLEEP;
+ ctx->mcp->flags.sleep_mode.sleep_req = REQ_TO_SLEEP;
_nsiq();
}
DECLARE_WORK(suspend_work, mc_suspend_handler);
@@ -66,9 +66,9 @@
{
MCDRV_DBG(mcd, "MobiCore IDLE=%d!", flags->schedule);
MCDRV_DBG(mcd,
- "MobiCore Request Sleep=%d!", flags->sleep_mode.SleepReq);
+ "MobiCore Request Sleep=%d!", flags->sleep_mode.sleep_req);
MCDRV_DBG(mcd,
- "MobiCore Sleep Ready=%d!", flags->sleep_mode.ReadyToSleep);
+ "MobiCore Sleep Ready=%d!", flags->sleep_mode.ready_to_sleep);
}
static int mc_suspend_notifier(struct notifier_block *nb,
@@ -96,12 +96,12 @@
*/
dump_sleep_params(&mcp->flags);
if (!sleep_ready()) {
- ctx->mcp->flags.sleep_mode.SleepReq = REQ_TO_SLEEP;
+ ctx->mcp->flags.sleep_mode.sleep_req = REQ_TO_SLEEP;
schedule_work_on(0, &suspend_work);
flush_work(&suspend_work);
if (!sleep_ready()) {
dump_sleep_params(&mcp->flags);
- ctx->mcp->flags.sleep_mode.SleepReq = 0;
+ ctx->mcp->flags.sleep_mode.sleep_req = 0;
MCDRV_DBG_ERROR(mcd, "MobiCore can't SLEEP!");
return NOTIFY_BAD;
}
@@ -109,7 +109,7 @@
break;
case PM_POST_SUSPEND:
MCDRV_DBG(mcd, "Resume MobiCore system!");
- ctx->mcp->flags.sleep_mode.SleepReq = 0;
+ ctx->mcp->flags.sleep_mode.sleep_req = 0;
break;
default:
break;
@@ -121,57 +121,6 @@
.notifier_call = mc_suspend_notifier,
};
-#ifdef MC_BL_NOTIFIER
-
-static int bL_switcher_notifier_handler(struct notifier_block *this,
- unsigned long event, void *ptr)
-{
- unsigned int mpidr, cpu, cluster;
- struct mc_mcp_buffer *mcp = ctx->mcp;
-
- if (!mcp)
- return 0;
-
- asm volatile ("mrc\tp15, 0, %0, c0, c0, 5" : "=r" (mpidr));
- cpu = mpidr & 0x3;
- cluster = (mpidr >> 8) & 0xf;
- MCDRV_DBG(mcd, "%s switching!!, cpu: %u, Out=%u\n",
- (event == SWITCH_ENTER ? "Before" : "After"), cpu, cluster);
-
- if (cpu != 0)
- return 0;
-
- switch (event) {
- case SWITCH_ENTER:
- if (!sleep_ready()) {
- ctx->mcp->flags.sleep_mode.SleepReq = REQ_TO_SLEEP;
- _nsiq();
- /* By this time we should be ready for sleep or we are
- * in the middle of something important */
- if (!sleep_ready()) {
- dump_sleep_params(&mcp->flags);
- MCDRV_DBG(mcd,
- "MobiCore: Don't allow switch!\n");
- ctx->mcp->flags.sleep_mode.SleepReq = 0;
- return -EPERM;
- }
- }
- break;
- case SWITCH_EXIT:
- ctx->mcp->flags.sleep_mode.SleepReq = 0;
- break;
- default:
- MCDRV_DBG(mcd, "MobiCore: Unknown switch event!\n");
- }
-
- return 0;
-}
-
-static struct notifier_block switcher_nb = {
- .notifier_call = bL_switcher_notifier_handler,
-};
-#endif
-
int mc_pm_initialize(struct mc_context *context)
{
int ret = 0;
@@ -180,12 +129,7 @@
ret = register_pm_notifier(&mc_notif_block);
if (ret)
- MCDRV_DBG_ERROR(mcd, "device pm register failed\n");
-#ifdef MC_BL_NOTIFIER
- if (register_bL_swicher_notifier(&switcher_nb))
- MCDRV_DBG_ERROR(mcd,
- "Failed to register to bL_switcher_notifier\n");
-#endif
+ MCDRV_DBG_ERROR(mcd, "device pm register failed");
return ret;
}
@@ -194,15 +138,16 @@
{
int ret = unregister_pm_notifier(&mc_notif_block);
if (ret)
- MCDRV_DBG_ERROR(mcd, "device pm unregister failed\n");
-#ifdef MC_BL_NOTIFIER
- ret = unregister_bL_swicher_notifier(&switcher_nb);
- if (ret)
- MCDRV_DBG_ERROR(mcd, "device bl unregister failed\n");
-#endif
+ MCDRV_DBG_ERROR(mcd, "device pm unregister failed");
return ret;
}
+bool mc_pm_sleep_ready(void)
+{
+ if (ctx == 0)
+ return true;
+ return sleep_ready();
+}
#endif /* MC_PM_RUNTIME */
#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
@@ -215,7 +160,7 @@
mc_ce_core_clk = clk_get(mcd, "core_clk");
if (IS_ERR(mc_ce_core_clk)) {
ret = PTR_ERR(mc_ce_core_clk);
- MCDRV_DBG_ERROR(mcd, "cannot get core clock\n");
+ MCDRV_DBG_ERROR(mcd, "cannot get core clock");
goto error;
}
/* Get Interface clk */
@@ -223,7 +168,7 @@
if (IS_ERR(mc_ce_iface_clk)) {
clk_put(mc_ce_core_clk);
ret = PTR_ERR(mc_ce_iface_clk);
- MCDRV_DBG_ERROR(mcd, "cannot get iface clock\n");
+ MCDRV_DBG_ERROR(mcd, "cannot get iface clock");
goto error;
}
/* Get AXI clk */
@@ -232,7 +177,7 @@
clk_put(mc_ce_iface_clk);
clk_put(mc_ce_core_clk);
ret = PTR_ERR(mc_ce_bus_clk);
- MCDRV_DBG_ERROR(mcd, "cannot get AXI bus clock\n");
+ MCDRV_DBG_ERROR(mcd, "cannot get AXI bus clock");
goto error;
}
return ret;
@@ -263,17 +208,17 @@
rc = clk_prepare_enable(mc_ce_core_clk);
if (rc) {
- MCDRV_DBG_ERROR(mcd, "cannot enable clock\n");
+ MCDRV_DBG_ERROR(mcd, "cannot enable clock");
} else {
rc = clk_prepare_enable(mc_ce_iface_clk);
if (rc) {
clk_disable_unprepare(mc_ce_core_clk);
- MCDRV_DBG_ERROR(mcd, "cannot enable clock\n");
+ MCDRV_DBG_ERROR(mcd, "cannot enable clock");
} else {
rc = clk_prepare_enable(mc_ce_bus_clk);
if (rc) {
clk_disable_unprepare(mc_ce_iface_clk);
- MCDRV_DBG_ERROR(mcd, "cannot enable clock\n");
+ MCDRV_DBG_ERROR(mcd, "cannot enable clock");
}
}
}
diff --git a/drivers/gud/mobicore_driver/pm.h b/drivers/gud/MobiCoreDriver/pm.h
similarity index 94%
rename from drivers/gud/mobicore_driver/pm.h
rename to drivers/gud/MobiCoreDriver/pm.h
index 332da34..b71c603 100644
--- a/drivers/gud/mobicore_driver/pm.h
+++ b/drivers/gud/MobiCoreDriver/pm.h
@@ -13,10 +13,6 @@
#define _MC_PM_H_
#include "main.h"
-#ifdef MC_BL_NOTIFIER
-#include <asm/bL_switcher.h>
-#endif
-
#define NO_SLEEP_REQ 0
#define REQ_TO_SLEEP 1
@@ -39,5 +35,7 @@
int mc_pm_clock_enable(void);
/* Disable secure crypto clocks */
void mc_pm_clock_disable(void);
+/* Test if sleep is possible */
+bool mc_pm_sleep_ready(void);
#endif /* _MC_PM_H_ */
diff --git a/drivers/gud/mobicore_driver/public/mc_kernel_api.h b/drivers/gud/MobiCoreDriver/public/mc_kernel_api.h
similarity index 89%
rename from drivers/gud/mobicore_driver/public/mc_kernel_api.h
rename to drivers/gud/MobiCoreDriver/public/mc_kernel_api.h
index cca0636..15fd4a2 100644
--- a/drivers/gud/mobicore_driver/public/mc_kernel_api.h
+++ b/drivers/gud/MobiCoreDriver/public/mc_kernel_api.h
@@ -35,13 +35,12 @@
* @requested_size: memory size requested in bytes
* @handle: pointer to handle
* @kernel_virt_addr: virtual user start address
- * @phys_addr: physical start address
*
* Returns 0 if OK
*/
int mobicore_allocate_wsm(struct mc_instance *instance,
unsigned long requested_size, uint32_t *handle,
- void **virt_kernel_addr, void **phys_addr);
+ void **virt_kernel_addr);
/*
* mobicore_free() - Free a WSM buffer allocated with mobicore_allocate_wsm
@@ -58,12 +57,11 @@
* @addr: address of the buffer (NB it must be kernel virtual!)
* @len: buffer length (in bytes)
* @handle: unique handle
- * @phys: pointer for physical address of L2 table
*
* Returns 0 if no error
*/
int mobicore_map_vmem(struct mc_instance *instance, void *addr,
- uint32_t len, uint32_t *handle, uint32_t *phys);
+ uint32_t len, uint32_t *handle);
/*
* mobicore_unmap_vmem() - Unmap a virtual memory buffer from MobiCore
@@ -74,4 +72,12 @@
*/
int mobicore_unmap_vmem(struct mc_instance *instance, uint32_t handle);
+/*
+ * mobicore_sleep_ready() - Test if mobicore can sleep
+ *
+ * Returns true if mobicore can sleep, false if it can't sleep
+ */
+bool mobicore_sleep_ready(void);
+
+
#endif /* _MC_KERNEL_API_H_ */
diff --git a/drivers/gud/mobicore_driver/public/mc_linux.h b/drivers/gud/MobiCoreDriver/public/mc_linux.h
similarity index 81%
rename from drivers/gud/mobicore_driver/public/mc_linux.h
rename to drivers/gud/MobiCoreDriver/public/mc_linux.h
index af027dc..98e7af1 100644
--- a/drivers/gud/mobicore_driver/public/mc_linux.h
+++ b/drivers/gud/MobiCoreDriver/public/mc_linux.h
@@ -1,7 +1,7 @@
/*
* The MobiCore Driver Kernel Module is a Linux device driver, which represents
* the command proxy on the lowest layer to the secure world (Swd). Additional
- * services like memory allocation via mmap and generation of a L2 tables for
+ * services like memory allocation via mmap and generation of a MMU tables for
* given virtual memory are also supported. IRQ functionality receives
* information from the SWd in the non secure world (NWd).
* As customary the driver is handled as linux device driver with "open",
@@ -55,8 +55,6 @@
* INIT request data to SWD
*/
struct mc_ioctl_init {
- /* notification buffer start/length [16:16] [start, length] */
- uint32_t nq_offset;
/* length of notification queue */
uint32_t nq_length;
/* mcp buffer start/length [16:16] [start, length] */
@@ -76,8 +74,7 @@
};
/*
- * Data exchange structure of the MC_IO_MAP_WSM, MC_IO_MAP_MCI, and
- * MC_IO_MAP_PWSM commands.
+ * Data exchange structure of the MC_IO_MAP_WSM and MC_IO_MAP_MCI commands.
*
* Allocate a contiguous memory buffer for a process.
* The physical address can be used as for later calls to mmap.
@@ -86,19 +83,19 @@
* already. I.e. Daemon was restarted.
*/
struct mc_ioctl_map {
- size_t len; /* Buffer length */
- uint32_t handle; /* WSM handle */
- unsigned long addr; /* Virtual address */
- unsigned long phys_addr;/* physical address of WSM (or NULL) */
- bool reused; /* if WSM memory was reused, or new allocated */
+ size_t len; /* Buffer length */
+ uint32_t handle; /* WSM handle */
+ uint64_t phys_addr; /* physical address of WSM (or 0) */
+ unsigned long addr; /* Virtual address */
+ bool reused; /* if WSM memory was reused, or new allocated */
};
/*
* Data exchange structure of the MC_IO_REG_WSM command.
*
- * Allocates a physical L2 table and maps the buffer into this page.
- * Returns the physical address of the L2 table.
- * The page alignment will be created and the appropriated pSize and pOffsetL2
+ * Allocates a physical MMU table and maps the buffer into this page.
+ * Returns the physical address of the MMU table.
+ * The page alignment will be created and the appropriated pSize and pOffsetMMU
* will be modified to the used values.
*/
struct mc_ioctl_reg_wsm {
@@ -106,19 +103,7 @@
uint32_t len; /* size of the virtual address space */
uint32_t pid; /* process id */
uint32_t handle; /* driver handle for locked memory */
- uint32_t table_phys; /* physical address of the L2 table */
-};
-
-
-/*
- * Data exchange structure of the MC_DRV_MODULE_FC_EXECUTE ioctl command.
- * internal, unsupported
- */
-struct mc_ioctl_execute {
- /* base address of mobicore binary */
- uint32_t phys_start_addr;
- /* length of DDR area */
- uint32_t length;
+ uint64_t table_phys; /* physical address of the MMU table */
};
/*
@@ -127,10 +112,10 @@
struct mc_ioctl_resolv_cont_wsm {
/* driver handle for buffer */
uint32_t handle;
- /* base address of memory */
- uint32_t phys;
/* length memory */
uint32_t length;
+ /* base address of memory */
+ uint64_t phys;
/* fd to owner of the buffer */
int32_t fd;
};
@@ -144,7 +129,7 @@
/* fd to owner of the buffer */
int32_t fd;
/* base address of memory */
- uint32_t phys;
+ uint64_t phys;
};
@@ -180,28 +165,24 @@
*/
#define MC_IO_FREE _IO(MC_IOC_MAGIC, 5)
/*
- * Creates a L2 Table of the given base address and the size of the
+ * Creates a MMU Table of the given base address and the size of the
* data.
- * Parameter: mc_ioctl_app_reg_wsm_l2_params
+ * Parameter: mc_ioctl_reg_wsm
*/
#define MC_IO_REG_WSM _IOWR(MC_IOC_MAGIC, 6, struct mc_ioctl_reg_wsm)
#define MC_IO_UNREG_WSM _IO(MC_IOC_MAGIC, 7)
#define MC_IO_LOCK_WSM _IO(MC_IOC_MAGIC, 8)
#define MC_IO_UNLOCK_WSM _IO(MC_IOC_MAGIC, 9)
-#define MC_IO_EXECUTE _IOWR(MC_IOC_MAGIC, 10, struct mc_ioctl_execute)
/*
* Allocate contiguous memory for a process for later mapping with mmap.
- * MC_DRV_KMOD_MMAP_WSM usual operation, pages are registered in
+ * MC_IO_MAP_WSM usual operation, pages are registered in
* device structure and freed later.
- * MC_DRV_KMOD_MMAP_MCI get Instance of MCI, allocates or mmaps
+ * MC_IO_MAP_MCI get Instance of MCI, allocates or mmaps
* the MCI to daemon
- * MC_DRV_KMOD_MMAP_PERSISTENTWSM special operation, without
- * registration of pages
*/
#define MC_IO_MAP_WSM _IOWR(MC_IOC_MAGIC, 11, struct mc_ioctl_map)
#define MC_IO_MAP_MCI _IOWR(MC_IOC_MAGIC, 12, struct mc_ioctl_map)
-#define MC_IO_MAP_PWSM _IOWR(MC_IOC_MAGIC, 13, struct mc_ioctl_map)
/*
* Clean orphaned WSM buffers. Only available to the daemon and should
@@ -215,7 +196,7 @@
#define MC_IO_CLEAN_WSM _IO(MC_IOC_MAGIC, 14)
/*
- * Get L2 phys address of a buffer handle allocated to the user.
+ * Get MMU phys address of a buffer handle allocated to the user.
* Only available to the daemon.
*/
#define MC_IO_RESOLVE_WSM _IOWR(MC_IOC_MAGIC, 15, \
diff --git a/drivers/gud/mobicore_driver/public/version.h b/drivers/gud/MobiCoreDriver/public/version.h
similarity index 100%
rename from drivers/gud/mobicore_driver/public/version.h
rename to drivers/gud/MobiCoreDriver/public/version.h
diff --git a/drivers/gud/MobiCoreKernelApi/Makefile b/drivers/gud/MobiCoreKernelApi/Makefile
new file mode 100644
index 0000000..9b37eea
--- /dev/null
+++ b/drivers/gud/MobiCoreKernelApi/Makefile
@@ -0,0 +1,52 @@
+#
+# this makefile is called from the kernel make syste
+ifeq ($(MODE),release)
+ ccflags-y += -O2 -DNDEBUG
+else # DEBUG
+ # "-O" is needed to expand inlines
+ ccflags-y += -O -g3 -DDEBUG
+endif # DEBUG/RELEASE
+
+ifdef MOBICORE_CFLAGS
+ ccflags-y +=$(MOBICORE_CFLAGS)
+endif
+
+#Set the extra symbols
+ifdef MCDRV_SYMBOLS_FILE
+ KBUILD_EXTRA_SYMBOLS=$(MCDRV_SYMBOLS_FILE)
+endif
+
+ifeq ($(PLATFORM), ARM_VE_A9X4_QEMU)
+ ccflags-y += -DMC_NETLINK_COMPAT_V37
+endif
+
+ifeq ($(PLATFORM), MSM8974_SURF_STD)
+ ccflags-y += -DMC_NETLINK_COMPAT_V37
+endif
+
+ifeq ($(PLATFORM), EXYNOS_5422_STD)
+ ccflags-y += -DMC_NETLINK_COMPAT_V37
+endif
+
+ifeq ($(PLATFORM), EXYNOS_5430_STD)
+ ccflags-y += -DMC_NETLINK_COMPAT_V37
+endif
+
+#EXTRA_CFLAGS += -DDEBUG -DDEBUG_VERBOSE
+#EXTRA_CFLAGS += -Wno-declaration-after-statement
+ccflags-y += -Wno-declaration-after-statement
+# add our module to kernel.
+obj-m += mcKernelApi.o
+
+mcKernelApi-objs := main.o clientlib.o device.o session.o connection.o
+
+clean:
+ rm -rf *.o *~ core .depend .*.cmd *.ko *.mod.c .tmp_versions \
+ Module.markers Module.symvers modules.order
+
+depend .depend dep:
+ $(CC) $(CFLAGS) -M *.c > .depend
+
+ifeq (.depend,$(wildcard .depend))
+ include .depend
+endif
diff --git a/drivers/gud/MobiCoreKernelApi/build.sh b/drivers/gud/MobiCoreKernelApi/build.sh
new file mode 100644
index 0000000..86fe1b8
--- /dev/null
+++ b/drivers/gud/MobiCoreKernelApi/build.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+if [ -z $COMP_PATH_ROOT ]; then
+ echo "The build environment is not set!"
+ echo "Trying to source setupDrivers.sh automatically!"
+ source ../setupDrivers.sh || exit 1
+fi
+
+ROOT_PATH=$(dirname $(readlink -f $BASH_SOURCE))
+# These folders need to be relative to the kernel dir or absolute!
+PLATFORM=EXYNOS_4X12_STD
+CODE_INCLUDE=$(readlink -f $ROOT_PATH/Locals/Code)
+
+MOBICORE_DRIVER=$COMP_PATH_MobiCoreDriverMod
+MOBICORE_DAEMON=$COMP_PATH_MobiCoreDriverLib/Public
+MOBICORE_CFLAGS="-I$MOBICORE_DRIVER/Public -I$MOBICORE_DAEMON -I$COMP_PATH_MobiCore/inc/Mci -I$COMP_PATH_MobiCore/inc -I$CODE_INCLUDE/include -I$CODE_INCLUDE/public"
+MCDRV_SYMBOLS_FILE="$COMP_PATH_ROOT/MobiCoreDriverMod/Locals/Code/Module.symvers"
+
+if [ ! -f $MCDRV_SYMBOLS_FILE ]; then
+ echo "Please build the Mobicore Driver Module first!"
+ echo "Otherwise you will see warnings of missing symbols"
+fi
+
+# Clean first
+make -C $CODE_INCLUDE clean
+
+make -C $LINUX_PATH \
+ MODE=$MODE \
+ ARCH=arm \
+ CROSS_COMPILE=$CROSS_COMPILE \
+ M=$CODE_INCLUDE \
+ "MOBICORE_CFLAGS=$MOBICORE_CFLAGS" \
+ MCDRV_SYMBOLS_FILE=$MCDRV_SYMBOLS_FILE \
+ modules
diff --git a/drivers/gud/mobicore_kernelapi/clientlib.c b/drivers/gud/MobiCoreKernelApi/clientlib.c
similarity index 95%
rename from drivers/gud/mobicore_kernelapi/clientlib.c
rename to drivers/gud/MobiCoreKernelApi/clientlib.c
index 16b52e5..65b4a1c 100644
--- a/drivers/gud/mobicore_kernelapi/clientlib.c
+++ b/drivers/gud/MobiCoreKernelApi/clientlib.c
@@ -25,6 +25,7 @@
/* device list */
LIST_HEAD(devices);
+atomic_t device_usage = ATOMIC_INIT(0);
static struct mcore_device_t *resolve_device_id(uint32_t device_id)
{
@@ -71,14 +72,20 @@
do {
struct mcore_device_t *device = resolve_device_id(device_id);
if (device != NULL) {
- MCDRV_DBG_ERROR(mc_kapi,
- "Device %d already opened", device_id);
- mc_result = MC_DRV_ERR_INVALID_OPERATION;
+ MCDRV_DBG(mc_kapi,
+ "Device %d already opened\n", device_id);
+ atomic_inc(&device_usage);
+ mc_result = MC_DRV_OK;
break;
}
/* Open new connection to device */
dev_con = connection_new();
+ if (dev_con == NULL) {
+ mc_result = MC_DRV_ERR_NO_FREE_MEMORY;
+ break;
+ }
+
if (!connection_connect(dev_con, MC_DAEMON_PID)) {
MCDRV_DBG_ERROR(
mc_kapi,
@@ -144,6 +151,10 @@
/* there is no payload to read */
device = mcore_device_create(device_id, dev_con);
+ if (device == NULL) {
+ mc_result = MC_DRV_ERR_NO_FREE_MEMORY;
+ break;
+ }
if (!mcore_device_open(device, MC_DRV_MOD_DEVNODE_FULLPATH)) {
mcore_device_cleanup(device);
MCDRV_DBG_ERROR(mc_kapi,
@@ -154,6 +165,7 @@
}
add_device(device);
+ atomic_inc(&device_usage);
} while (false);
@@ -177,6 +189,12 @@
mc_result = MC_DRV_ERR_UNKNOWN_DEVICE;
break;
}
+ /* Check if it's not used by other modules */
+ if (!atomic_dec_and_test(&device_usage)) {
+ mc_result = MC_DRV_OK;
+ break;
+ }
+
struct connection *dev_con = device->connection;
/* Return if not all sessions have been closed */
@@ -274,12 +292,12 @@
}
struct connection *dev_con = device->connection;
- /* Get the physical address of the given TCI */
+ /* Get the wsm of the given TCI */
struct wsm *wsm =
mcore_device_find_contiguous_wsm(device, tci);
if (wsm == NULL) {
MCDRV_DBG_ERROR(mc_kapi,
- "Could not resolve TCI phy address ");
+ "Could not resolve TCI address ");
mc_result = MC_DRV_ERR_INVALID_PARAMETER;
break;
}
@@ -292,14 +310,14 @@
}
/* Prepare open session command */
- struct mc_drv_cmd_open_session_t cmdOpenSession = {
+ struct mc_drv_cmd_open_session_t cmd_open_session = {
{
MC_DRV_CMD_OPEN_SESSION
},
{
session->device_id,
*uuid,
- (uint32_t)(wsm->phys_addr) & 0xFFF,
+ (uint32_t)(wsm->virt_addr) & 0xFFF,
wsm->handle,
len
}
@@ -307,9 +325,9 @@
/* Transmit command data */
int len = connection_write_data(dev_con,
- &cmdOpenSession,
- sizeof(cmdOpenSession));
- if (len != sizeof(cmdOpenSession)) {
+ &cmd_open_session,
+ sizeof(cmd_open_session));
+ if (len != sizeof(cmd_open_session)) {
MCDRV_DBG_ERROR(mc_kapi,
"CMD_OPEN_SESSION writeData failed %d",
len);
@@ -370,6 +388,10 @@
/* Set up second channel for notifications */
struct connection *session_connection = connection_new();
+ if (session_connection == NULL) {
+ mc_result = MC_DRV_ERR_NO_FREE_MEMORY;
+ break;
+ }
if (!connection_connect(session_connection, MC_DAEMON_PID)) {
MCDRV_DBG_ERROR(
@@ -422,9 +444,13 @@
/* there is no payload. */
/* Session established, new session object must be created */
- mcore_device_create_new_session(device,
- session->session_id,
- session_connection);
+ if (!mcore_device_create_new_session(device,
+ session->session_id,
+ session_connection)) {
+ connection_cleanup(session_connection);
+ mc_result = MC_DRV_ERR_NO_FREE_MEMORY;
+ break;
+ }
} while (false);
@@ -706,7 +732,6 @@
MCDRV_DBG_VERBOSE(mc_kapi, "===%s()===", __func__);
do {
-
/* Get the device associated wit the given session */
device = resolve_device_id(device_id);
if (device == NULL) {
@@ -805,7 +830,7 @@
{
session->session_id,
bulk_buf->handle,
- (uint32_t)bulk_buf->phys_addr_wsm_l2,
+ 0,
(uint32_t)(bulk_buf->virt_addr) & 0xFFF,
bulk_buf->len
}
@@ -819,8 +844,8 @@
/* Read command response */
struct mc_drv_response_header_t rsp_header;
int len = connection_read_datablock(dev_con,
- &rsp_header,
- sizeof(rsp_header));
+ &rsp_header,
+ sizeof(rsp_header));
if (len != sizeof(rsp_header)) {
MCDRV_DBG_ERROR(mc_kapi,
"CMD_MAP_BULK_BUF readRsp failed %d",
diff --git a/drivers/gud/mobicore_kernelapi/common.h b/drivers/gud/MobiCoreKernelApi/common.h
similarity index 100%
rename from drivers/gud/mobicore_kernelapi/common.h
rename to drivers/gud/MobiCoreKernelApi/common.h
diff --git a/drivers/gud/mobicore_kernelapi/connection.c b/drivers/gud/MobiCoreKernelApi/connection.c
similarity index 96%
rename from drivers/gud/mobicore_kernelapi/connection.c
rename to drivers/gud/MobiCoreKernelApi/connection.c
index 03288a0..0372b82 100644
--- a/drivers/gud/mobicore_kernelapi/connection.c
+++ b/drivers/gud/MobiCoreKernelApi/connection.c
@@ -28,6 +28,10 @@
struct connection *conn;
conn = kzalloc(sizeof(*conn), GFP_KERNEL);
+ if (conn == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "Allocation failure");
+ return NULL;
+ }
conn->sequence_magic = mcapi_unique_id();
mutex_init(&conn->data_lock);
sema_init(&conn->data_available_sem, SEM_NO_DATA_AVAILABLE);
@@ -36,14 +40,6 @@
return conn;
}
-struct connection *connection_create(int socket_descriptor, pid_t dest)
-{
- struct connection *conn = connection_new();
-
- conn->peer_pid = dest;
- return conn;
-}
-
void connection_cleanup(struct connection *conn)
{
if (!conn)
diff --git a/drivers/gud/mobicore_kernelapi/connection.h b/drivers/gud/MobiCoreKernelApi/connection.h
similarity index 95%
rename from drivers/gud/mobicore_kernelapi/connection.h
rename to drivers/gud/MobiCoreKernelApi/connection.h
index 6c3ff00..57e783b 100644
--- a/drivers/gud/mobicore_kernelapi/connection.h
+++ b/drivers/gud/MobiCoreKernelApi/connection.h
@@ -44,7 +44,6 @@
};
struct connection *connection_new(void);
-struct connection *connection_create(int socket_descriptor, pid_t dest);
void connection_cleanup(struct connection *conn);
bool connection_connect(struct connection *conn, pid_t dest);
size_t connection_read_datablock(struct connection *conn, void *buffer,
diff --git a/drivers/gud/mobicore_kernelapi/device.c b/drivers/gud/MobiCoreKernelApi/device.c
similarity index 85%
rename from drivers/gud/mobicore_kernelapi/device.c
rename to drivers/gud/MobiCoreKernelApi/device.c
index a176322..04db4c3 100644
--- a/drivers/gud/mobicore_kernelapi/device.c
+++ b/drivers/gud/MobiCoreKernelApi/device.c
@@ -18,16 +18,18 @@
#include "device.h"
#include "common.h"
-struct wsm *wsm_create(void *virt_addr, uint32_t len, uint32_t handle,
- void *phys_addr)
+static struct wsm *wsm_create(void *virt_addr, uint32_t len, uint32_t handle)
{
struct wsm *wsm;
wsm = kzalloc(sizeof(*wsm), GFP_KERNEL);
+ if (wsm == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "Allocation failure");
+ return NULL;
+ }
wsm->virt_addr = virt_addr;
wsm->len = len;
wsm->handle = handle;
- wsm->phys_addr = phys_addr;
return wsm;
}
@@ -37,11 +39,15 @@
struct mcore_device_t *dev;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (dev == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "Allocation failure");
+ return NULL;
+ }
dev->device_id = device_id;
dev->connection = connection;
INIT_LIST_HEAD(&dev->session_vector);
- INIT_LIST_HEAD(&dev->wsm_l2_vector);
+ INIT_LIST_HEAD(&dev->wsm_mmu_vector);
return dev;
}
@@ -63,7 +69,7 @@
}
/* Free all allocated WSM descriptors */
- list_for_each_safe(pos, q, &dev->wsm_l2_vector) {
+ list_for_each_safe(pos, q, &dev->wsm_mmu_vector) {
wsm = list_entry(pos, struct wsm, list);
list_del(pos);
kfree(wsm);
@@ -74,7 +80,7 @@
kfree(dev);
}
-bool mcore_device_open(struct mcore_device_t *dev, const char *deviceName)
+bool mcore_device_open(struct mcore_device_t *dev, const char *device_name)
{
dev->instance = mobicore_open();
return (dev->instance != NULL);
@@ -102,6 +108,8 @@
}
struct session *session =
session_create(session_id, dev->instance, connection);
+ if (session == NULL)
+ return false;
list_add_tail(&(session->list), &(dev->session_vector));
return true;
}
@@ -154,16 +162,19 @@
/* Allocate shared memory */
void *virt_addr;
uint32_t handle;
- void *phys_addr;
int ret = mobicore_allocate_wsm(dev->instance, len, &handle,
- &virt_addr, &phys_addr);
+ &virt_addr);
if (ret != 0)
break;
- /* Register (vaddr,paddr) with device */
- wsm = wsm_create(virt_addr, len, handle, phys_addr);
+ /* Register (vaddr) with device */
+ wsm = wsm_create(virt_addr, len, handle);
+ if (wsm == NULL) {
+ mobicore_free_wsm(dev->instance, handle);
+ break;
+ }
- list_add_tail(&(wsm->list), &(dev->wsm_l2_vector));
+ list_add_tail(&(wsm->list), &(dev->wsm_mmu_vector));
} while (0);
@@ -177,7 +188,7 @@
struct wsm *tmp;
struct list_head *pos;
- list_for_each(pos, &dev->wsm_l2_vector) {
+ list_for_each(pos, &dev->wsm_mmu_vector) {
tmp = list_entry(pos, struct wsm, list);
if (tmp == wsm) {
ret = true;
@@ -205,7 +216,7 @@
struct wsm *wsm;
struct list_head *pos;
- list_for_each(pos, &dev->wsm_l2_vector) {
+ list_for_each(pos, &dev->wsm_mmu_vector) {
wsm = list_entry(pos, struct wsm, list);
if (virt_addr == wsm->virt_addr)
return wsm;
diff --git a/drivers/gud/mobicore_kernelapi/device.h b/drivers/gud/MobiCoreKernelApi/device.h
similarity index 95%
rename from drivers/gud/mobicore_kernelapi/device.h
rename to drivers/gud/MobiCoreKernelApi/device.h
index 16626bd..c795ee8 100644
--- a/drivers/gud/mobicore_kernelapi/device.h
+++ b/drivers/gud/MobiCoreKernelApi/device.h
@@ -21,7 +21,7 @@
struct mcore_device_t {
/* MobiCore Trustlet session associated with the device */
struct list_head session_vector;
- struct list_head wsm_l2_vector; /* WSM L2 Table */
+ struct list_head wsm_mmu_vector; /* WSM L2 or L3 Table */
uint32_t device_id; /* Device identifier */
struct connection *connection; /* The device connection */
@@ -36,7 +36,7 @@
void mcore_device_cleanup(struct mcore_device_t *dev);
-bool mcore_device_open(struct mcore_device_t *dev, const char *deviceName);
+bool mcore_device_open(struct mcore_device_t *dev, const char *device_name);
void mcore_device_close(struct mcore_device_t *dev);
bool mcore_device_has_sessions(struct mcore_device_t *dev);
bool mcore_device_create_new_session(
diff --git a/drivers/gud/mobicore_kernelapi/include/mcinq.h b/drivers/gud/MobiCoreKernelApi/include/mcinq.h
similarity index 100%
rename from drivers/gud/mobicore_kernelapi/include/mcinq.h
rename to drivers/gud/MobiCoreKernelApi/include/mcinq.h
diff --git a/drivers/gud/mobicore_kernelapi/include/mcuuid.h b/drivers/gud/MobiCoreKernelApi/include/mcuuid.h
similarity index 100%
rename from drivers/gud/mobicore_kernelapi/include/mcuuid.h
rename to drivers/gud/MobiCoreKernelApi/include/mcuuid.h
diff --git a/drivers/gud/mobicore_kernelapi/main.c b/drivers/gud/MobiCoreKernelApi/main.c
similarity index 97%
rename from drivers/gud/mobicore_kernelapi/main.c
rename to drivers/gud/MobiCoreKernelApi/main.c
index 8943c26..5da3ef7 100644
--- a/drivers/gud/mobicore_kernelapi/main.c
+++ b/drivers/gud/MobiCoreKernelApi/main.c
@@ -150,6 +150,10 @@
dev_info(mc_kapi, "Mobicore API module initialized!\n");
mod_ctx = kzalloc(sizeof(struct mc_kernelapi_ctx), GFP_KERNEL);
+ if (mod_ctx == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "Allocation failure");
+ return -ENOMEM;
+ }
#ifdef MC_NETLINK_COMPAT_V37
mod_ctx->sk = netlink_kernel_create(&init_net, MC_DAEMON_NETLINK,
&cfg);
diff --git a/drivers/gud/mobicore_kernelapi/public/mobicore_driver_api.h b/drivers/gud/MobiCoreKernelApi/public/mobicore_driver_api.h
similarity index 100%
rename from drivers/gud/mobicore_kernelapi/public/mobicore_driver_api.h
rename to drivers/gud/MobiCoreKernelApi/public/mobicore_driver_api.h
diff --git a/drivers/gud/mobicore_kernelapi/public/mobicore_driver_cmd.h b/drivers/gud/MobiCoreKernelApi/public/mobicore_driver_cmd.h
similarity index 99%
rename from drivers/gud/mobicore_kernelapi/public/mobicore_driver_cmd.h
rename to drivers/gud/MobiCoreKernelApi/public/mobicore_driver_cmd.h
index eaf7e6c..993d581 100644
--- a/drivers/gud/mobicore_kernelapi/public/mobicore_driver_cmd.h
+++ b/drivers/gud/MobiCoreKernelApi/public/mobicore_driver_cmd.h
@@ -171,7 +171,7 @@
struct mc_drv_cmd_map_bulk_mem_payload_t {
uint32_t session_id;
uint32_t handle;
- uint32_t phys_addr_l2;
+ uint32_t rfu;
uint32_t offset_payload;
uint32_t len_bulk_mem;
};
diff --git a/drivers/gud/mobicore_kernelapi/session.c b/drivers/gud/MobiCoreKernelApi/session.c
similarity index 86%
rename from drivers/gud/mobicore_kernelapi/session.c
rename to drivers/gud/MobiCoreKernelApi/session.c
index dae2c00..2ea50e8 100644
--- a/drivers/gud/mobicore_kernelapi/session.c
+++ b/drivers/gud/MobiCoreKernelApi/session.c
@@ -14,15 +14,18 @@
#include "session.h"
struct bulk_buffer_descriptor *bulk_buffer_descriptor_create(
- void *virt_addr, uint32_t len, uint32_t handle, void *phys_addr_wsm_l2)
+ void *virt_addr, uint32_t len, uint32_t handle)
{
struct bulk_buffer_descriptor *desc;
desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (desc == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "Allocation failure");
+ return NULL;
+ }
desc->virt_addr = virt_addr;
desc->len = len;
desc->handle = handle;
- desc->phys_addr_wsm_l2 = phys_addr_wsm_l2;
return desc;
}
@@ -33,6 +36,10 @@
struct session *session;
session = kzalloc(sizeof(*session), GFP_KERNEL);
+ if (session == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "Allocation failure");
+ return NULL;
+ }
session->session_id = session_id;
session->instance = instance;
session->notification_connection = connection;
@@ -47,19 +54,14 @@
{
struct bulk_buffer_descriptor *bulk_buf_descr;
struct list_head *pos, *q;
- unsigned int phys_addr_wsm_l2;
/* Unmap still mapped buffers */
list_for_each_safe(pos, q, &session->bulk_buffer_descriptors) {
bulk_buf_descr =
list_entry(pos, struct bulk_buffer_descriptor, list);
- phys_addr_wsm_l2 =
- (unsigned int)bulk_buf_descr->phys_addr_wsm_l2;
-
MCDRV_DBG_VERBOSE(mc_kapi,
- "Phys Addr of L2 Table = 0x%X, handle= %d",
- phys_addr_wsm_l2,
+ "handle= %d",
bulk_buf_descr->handle);
/* ignore any error, as we cannot do anything in this case. */
@@ -110,11 +112,10 @@
* Prepare the interface structure for memory registration in
* Kernel Module
*/
- uint32_t l2_table_phys;
uint32_t handle;
int ret = mobicore_map_vmem(session->instance, buf, len,
- &handle, &l2_table_phys);
+ &handle);
if (ret != 0) {
MCDRV_DBG_ERROR(mc_kapi,
@@ -123,15 +124,15 @@
break;
}
- MCDRV_DBG_VERBOSE(mc_kapi,
- "Phys Addr of L2 Table = 0x%X, handle=%d",
- (unsigned int)l2_table_phys, handle);
+ MCDRV_DBG_VERBOSE(mc_kapi, "handle=%d", handle);
/* Create new descriptor */
bulk_buf_descr =
- bulk_buffer_descriptor_create(buf, len,
- handle,
- (void *)l2_table_phys);
+ bulk_buffer_descriptor_create(buf, len, handle);
+ if (bulk_buf_descr == NULL) {
+ mobicore_unmap_vmem(session->instance, handle);
+ break;
+ }
/* Add to vector of descriptors */
list_add_tail(&(bulk_buf_descr->list),
@@ -165,8 +166,7 @@
MCDRV_DBG_ERROR(mc_kapi, "Virtual Address not found");
ret = false;
} else {
- MCDRV_DBG_VERBOSE(mc_kapi, "WsmL2 phys=0x%X, handle=%d",
- (unsigned int)bulk_buf->phys_addr_wsm_l2,
+ MCDRV_DBG_VERBOSE(mc_kapi, "Wsm handle=%d",
bulk_buf->handle);
/* ignore any error, as we cannot do anything */
diff --git a/drivers/gud/mobicore_kernelapi/session.h b/drivers/gud/MobiCoreKernelApi/session.h
similarity index 96%
rename from drivers/gud/mobicore_kernelapi/session.h
rename to drivers/gud/MobiCoreKernelApi/session.h
index 4a834e5..edcadcd 100644
--- a/drivers/gud/mobicore_kernelapi/session.h
+++ b/drivers/gud/MobiCoreKernelApi/session.h
@@ -19,9 +19,6 @@
uint32_t len; /* Length of the Bulk buffer */
uint32_t handle;
- /* The physical address of the L2 table of the Bulk buffer*/
- void *phys_addr_wsm_l2;
-
/* The list param for using the kernel lists*/
struct list_head list;
};
@@ -29,8 +26,7 @@
struct bulk_buffer_descriptor *bulk_buffer_descriptor_create(
void *virt_addr,
uint32_t len,
- uint32_t handle,
- void *phys_addr_wsm_l2
+ uint32_t handle
);
/*
diff --git a/drivers/gud/mobicore_kernelapi/wsm.h b/drivers/gud/MobiCoreKernelApi/wsm.h
similarity index 74%
rename from drivers/gud/mobicore_kernelapi/wsm.h
rename to drivers/gud/MobiCoreKernelApi/wsm.h
index f8a107c..3a1767d 100644
--- a/drivers/gud/mobicore_kernelapi/wsm.h
+++ b/drivers/gud/MobiCoreKernelApi/wsm.h
@@ -17,17 +17,7 @@
void *virt_addr;
uint32_t len;
uint32_t handle;
- void *phys_addr;
struct list_head list;
};
-struct wsm *wsm_create(
- void *virt_addr,
- uint32_t len,
- uint32_t handle,
-
- /* NULL this may be unknown, so is can be omitted */
- void *phys_addr
-);
-
#endif /* _MC_KAPI_WSM_H_ */
diff --git a/drivers/gud/mobicore_driver/build_tag.h b/drivers/gud/build_tag.h
similarity index 96%
rename from drivers/gud/mobicore_driver/build_tag.h
rename to drivers/gud/build_tag.h
index 4a24275..18faf5a 100644
--- a/drivers/gud/mobicore_driver/build_tag.h
+++ b/drivers/gud/build_tag.h
@@ -26,4 +26,4 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define MOBICORE_COMPONENT_BUILD_TAG \
- "*** t-base-202_V001 ###"
+ "*** t-base-300-QC-8974-Android-V001 ###"
diff --git a/drivers/gud/mobicore_driver/mem.c b/drivers/gud/mobicore_driver/mem.c
deleted file mode 100644
index 33c51b6..0000000
--- a/drivers/gud/mobicore_driver/mem.c
+++ /dev/null
@@ -1,708 +0,0 @@
-/*
- * MobiCore Driver Kernel Module.
- *
- * This module is written as a Linux device driver.
- * This driver represents the command proxy on the lowest layer, from the
- * secure world to the non secure world, and vice versa.
- * This driver is located in the non secure world (Linux).
- * This driver offers IOCTL commands, for access to the secure world, and has
- * the interface from the secure world to the normal world.
- * The access to the driver is possible with a file descriptor,
- * which has to be created by the fd = open(/dev/mobicore) command.
- *
- * <-- Copyright Giesecke & Devrient GmbH 2009-2012 -->
- * <-- Copyright Trustonic Limited 2013 -->
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include "main.h"
-#include "debug.h"
-#include "mem.h"
-
-#include <linux/highmem.h>
-#include <linux/slab.h>
-#include <linux/kthread.h>
-#include <linux/pagemap.h>
-#include <linux/device.h>
-
-
-/* MobiCore memory context data */
-struct mc_mem_context mem_ctx;
-
-/* convert L2 PTE to page pointer */
-static inline struct page *l2_pte_to_page(pte_t pte)
-{
- unsigned long phys_page_addr = ((unsigned long)pte & PAGE_MASK);
- unsigned int pfn = phys_page_addr >> PAGE_SHIFT;
- struct page *page = pfn_to_page(pfn);
- return page;
-}
-
-/* convert page pointer to L2 PTE */
-static inline pte_t page_to_l2_pte(struct page *page)
-{
- unsigned long pfn = page_to_pfn(page);
- unsigned long phys_addr = (pfn << PAGE_SHIFT);
- pte_t pte = (pte_t)(phys_addr & PAGE_MASK);
- return pte;
-}
-
-static inline void release_page(struct page *page)
-{
- SetPageDirty(page);
-
- page_cache_release(page);
-}
-
-static int lock_pages(struct task_struct *task, void *virt_start_page_addr,
- int pages_no, struct page **pages)
-{
- int locked_pages;
-
- /* lock user pages, must hold the mmap_sem to do this. */
- down_read(&(task->mm->mmap_sem));
- locked_pages = get_user_pages(
- task,
- task->mm,
- (unsigned long)virt_start_page_addr,
- pages_no,
- 1, /* write access */
- 0,
- pages,
- NULL);
- up_read(&(task->mm->mmap_sem));
-
- /* check if we could lock all pages. */
- if (locked_pages != pages_no) {
- MCDRV_DBG_ERROR(mcd, "get_user_pages() failed, locked_pages=%d",
- locked_pages);
- if (locked_pages > 0) {
- /* release all locked pages. */
- release_pages(pages, locked_pages, 0);
- }
- return -ENOMEM;
- }
-
- return 0;
-}
-
-/* Get kernel pointer to shared L2 table given a per-process reference */
-struct l2table *get_l2_table_kernel_virt(struct mc_l2_table *table)
-{
- if (WARN(!table, "Invalid L2 table"))
- return NULL;
-
- if (WARN(!table->set, "Invalid L2 table set"))
- return NULL;
-
- if (WARN(!table->set->kernel_virt, "Invalid L2 pointer"))
- return NULL;
-
- return &(table->set->kernel_virt->table[table->idx]);
-}
-
-/* Get physical address of a shared L2 table given a per-process reference */
-struct l2table *get_l2_table_phys(struct mc_l2_table *table)
-{
- if (WARN(!table, "Invalid L2 table"))
- return NULL;
- if (WARN(!table->set, "Invalid L2 table set"))
- return NULL;
- if (WARN(!table->set->kernel_virt, "Invalid L2 phys pointer"))
- return NULL;
-
- return &(table->set->phys->table[table->idx]);
-}
-
-static inline int in_use(struct mc_l2_table *table)
-{
- return atomic_read(&table->usage) > 0;
-}
-
-/*
- * Search the list of used l2 tables and return the one with the handle.
- * Assumes the table_lock is taken.
- */
-struct mc_l2_table *find_l2_table(unsigned int handle)
-{
- struct mc_l2_table *table;
-
- list_for_each_entry(table, &mem_ctx.l2_tables, list) {
- if (table->handle == handle)
- return table;
- }
- return NULL;
-}
-
-/*
- * Allocate a new l2 table store plus L2_TABLES_PER_PAGE in the l2 free tables
- * list. Assumes the table_lock is already taken by the caller above.
- */
-static int alloc_table_store(void)
-{
- unsigned long store;
- struct mc_l2_tables_set *l2table_set;
- struct mc_l2_table *l2table, *l2table2;
- struct page *page;
- int ret = 0, i;
- /* temp list for holding the l2 tables */
- LIST_HEAD(temp);
-
- store = get_zeroed_page(GFP_KERNEL);
- if (!store)
- return -ENOMEM;
-
- /*
- * Actually, locking is not necessary, because kernel
- * memory is not supposed to get swapped out. But we
- * play safe....
- */
- page = virt_to_page(store);
- SetPageReserved(page);
-
- /* add all the descriptors to the free descriptors list */
- l2table_set = kmalloc(sizeof(*l2table_set), GFP_KERNEL | __GFP_ZERO);
- if (l2table_set == NULL) {
- ret = -ENOMEM;
- goto free_store;
- }
- /* initialize */
- l2table_set->kernel_virt = (void *)store;
- l2table_set->page = page;
- l2table_set->phys = (void *)virt_to_phys((void *)store);
- /* the set is not yet used */
- atomic_set(&l2table_set->used_tables, 0);
-
- /* init add to list. */
- INIT_LIST_HEAD(&(l2table_set->list));
- list_add(&l2table_set->list, &mem_ctx.l2_tables_sets);
-
- for (i = 0; i < L2_TABLES_PER_PAGE; i++) {
- /* allocate a WSM L2 descriptor */
- l2table = kmalloc(sizeof(*l2table), GFP_KERNEL | __GFP_ZERO);
- if (l2table == NULL) {
- ret = -ENOMEM;
- MCDRV_DBG_ERROR(mcd, "out of memory\n");
- /* Free the full temp list and the store in this case */
- goto free_temp_list;
- }
-
- /* set set reference */
- l2table->set = l2table_set;
- l2table->idx = i;
- l2table->virt = get_l2_table_kernel_virt(l2table);
- l2table->phys = (unsigned long)get_l2_table_phys(l2table);
- atomic_set(&l2table->usage, 0);
-
- /* add to temp list. */
- INIT_LIST_HEAD(&l2table->list);
- list_add_tail(&l2table->list, &temp);
- }
-
- /*
- * If everything went ok then merge the temp list with the global
- * free list
- */
- list_splice_tail(&temp, &mem_ctx.free_l2_tables);
- return 0;
-free_temp_list:
- list_for_each_entry_safe(l2table, l2table2, &temp, list) {
- kfree(l2table);
- }
-
- list_del(&l2table_set->list);
-
-free_store:
- free_page(store);
- return ret;
-
-}
-/*
- * Get a l2 table from the free tables list or allocate a new one and
- * initialize it. Assumes the table_lock is already taken.
- */
-static struct mc_l2_table *alloc_l2_table(struct mc_instance *instance)
-{
- int ret = 0;
- struct mc_l2_table *table = NULL;
-
- if (list_empty(&mem_ctx.free_l2_tables)) {
- ret = alloc_table_store();
- if (ret) {
- MCDRV_DBG_ERROR(mcd, "Failed to allocate new store!");
- return ERR_PTR(-ENOMEM);
- }
- /* if it's still empty something wrong has happened */
- if (list_empty(&mem_ctx.free_l2_tables)) {
- MCDRV_DBG_ERROR(mcd,
- "Free list not updated correctly!");
- return ERR_PTR(-EFAULT);
- }
- }
-
- /* get a WSM L2 descriptor */
- table = list_first_entry(&mem_ctx.free_l2_tables,
- struct mc_l2_table, list);
- if (table == NULL) {
- MCDRV_DBG_ERROR(mcd, "out of memory\n");
- return ERR_PTR(-ENOMEM);
- }
- /* Move it to the used l2 tables list */
- list_move_tail(&table->list, &mem_ctx.l2_tables);
-
- table->handle = get_unique_id();
- table->owner = instance;
-
- atomic_inc(&table->set->used_tables);
- atomic_inc(&table->usage);
-
- MCDRV_DBG_VERBOSE(mcd,
- "chunkPhys=%p,idx=%d", table->set->phys, table->idx);
-
- return table;
-}
-
-/*
- * Frees the object associated with a l2 table. Initially the object is moved
- * to the free tables list, but if all the 4 lists of the store are free
- * then the store is also released.
- * Assumes the table_lock is already taken.
- */
-static void free_l2_table(struct mc_l2_table *table)
-{
- struct mc_l2_tables_set *l2table_set;
-
- if (WARN(!table, "Invalid table"))
- return;
-
- l2table_set = table->set;
- if (WARN(!l2table_set, "Invalid table set"))
- return;
-
- list_move_tail(&table->list, &mem_ctx.free_l2_tables);
-
- /* if nobody uses this set, we can release it. */
- if (atomic_dec_and_test(&l2table_set->used_tables)) {
- struct mc_l2_table *tmp;
-
- /* remove from list */
- list_del(&l2table_set->list);
- /*
- * All the l2 tables are in the free list for this set
- * so we can just remove them from there
- */
- list_for_each_entry_safe(table, tmp, &mem_ctx.free_l2_tables,
- list) {
- if (table->set == l2table_set) {
- list_del(&table->list);
- kfree(table);
- }
- } /* end while */
-
- /*
- * We shouldn't recover from this since it was some data
- * corruption before
- */
- BUG_ON(!l2table_set->page);
- ClearPageReserved(l2table_set->page);
-
- BUG_ON(!l2table_set->kernel_virt);
- free_page((unsigned long)l2table_set->kernel_virt);
-
- kfree(l2table_set);
- }
-}
-
-/*
- * Create a L2 table in a WSM container that has been allocates previously.
- * Assumes the table lock is already taken or there is no need to take like
- * when first creating the l2 table the full list is locked.
- *
- * @task pointer to task owning WSM
- * @wsm_buffer user space WSM start
- * @wsm_len WSM length
- * @table Pointer to L2 table details
- */
-static int map_buffer(struct task_struct *task, void *wsm_buffer,
- unsigned int wsm_len, struct mc_l2_table *table)
-{
- int ret = 0;
- unsigned int i, nr_of_pages;
- /* start address of the 4 KiB page of wsm_buffer */
- void *virt_addr_page;
- struct page *page;
- struct l2table *l2table;
- struct page **l2table_as_array_of_pointers_to_page;
- /* page offset in wsm buffer */
- unsigned int offset;
-
- if (WARN(!wsm_buffer, "Invalid WSM buffer pointer"))
- return -EINVAL;
-
- if (WARN(wsm_len == 0, "Invalid WSM buffer length"))
- return -EINVAL;
-
- if (WARN(!table, "Invalid mapping table for WSM"))
- return -EINVAL;
-
- /* no size > 1Mib supported */
- if (wsm_len > SZ_1M) {
- MCDRV_DBG_ERROR(mcd, "size > 1 MiB\n");
- return -EINVAL;
- }
-
- MCDRV_DBG_VERBOSE(mcd, "WSM addr=0x%p, len=0x%08x\n", wsm_buffer,
- wsm_len);
-
-
- /* calculate page usage */
- virt_addr_page = (void *)(((unsigned long)(wsm_buffer)) & PAGE_MASK);
- offset = (unsigned int) (((unsigned long)(wsm_buffer)) & (~PAGE_MASK));
- nr_of_pages = PAGE_ALIGN(offset + wsm_len) / PAGE_SIZE;
-
- MCDRV_DBG_VERBOSE(mcd, "virt addr page start=0x%p, pages=%d\n",
- virt_addr_page, nr_of_pages);
-
- /* L2 table can hold max 1MiB in 256 pages. */
- if ((nr_of_pages * PAGE_SIZE) > SZ_1M) {
- MCDRV_DBG_ERROR(mcd, "WSM paged exceed 1 MiB\n");
- return -EINVAL;
- }
-
- l2table = table->virt;
- /*
- * We use the memory for the L2 table to hold the pointer
- * and convert them later. This works, as everything comes
- * down to a 32 bit value.
- */
- l2table_as_array_of_pointers_to_page = (struct page **)l2table;
-
- /* Request comes from user space */
- if (task != NULL && !is_vmalloc_addr(wsm_buffer)) {
- /*
- * lock user page in memory, so they do not get swapped
- * out.
- * REV axh: Kernel 2.6.27 added a new get_user_pages_fast()
- * function, maybe it is called fast_gup() in some versions.
- * handle user process doing a fork().
- * Child should not get things.
- * http://osdir.com/ml/linux-media/2009-07/msg00813.html
- * http://lwn.net/Articles/275808/
- */
- ret = lock_pages(task, virt_addr_page, nr_of_pages,
- l2table_as_array_of_pointers_to_page);
- if (ret != 0) {
- MCDRV_DBG_ERROR(mcd, "lock_user_pages() failed\n");
- return ret;
- }
- }
- /* Request comes from kernel space(cont buffer) */
- else if (task == NULL && !is_vmalloc_addr(wsm_buffer)) {
- void *uaddr = wsm_buffer;
- for (i = 0; i < nr_of_pages; i++) {
- page = virt_to_page(uaddr);
- if (!page) {
- MCDRV_DBG_ERROR(mcd, "failed to map address");
- return -EINVAL;
- }
- get_page(page);
- l2table_as_array_of_pointers_to_page[i] = page;
- uaddr += PAGE_SIZE;
- }
- }
- /* Request comes from kernel space(vmalloc buffer) */
- else {
- void *uaddr = wsm_buffer;
- for (i = 0; i < nr_of_pages; i++) {
- page = vmalloc_to_page(uaddr);
- if (!page) {
- MCDRV_DBG_ERROR(mcd, "failed to map address");
- return -EINVAL;
- }
- get_page(page);
- l2table_as_array_of_pointers_to_page[i] = page;
- uaddr += PAGE_SIZE;
- }
- }
-
- table->pages = nr_of_pages;
-
- /*
- * create L2 Table entries.
- * used_l2table->table contains a list of page pointers here.
- * For a proper cleanup we have to ensure that the following
- * code either works and used_l2table contains a valid L2 table
- * - or fails and used_l2table->table contains the list of page
- * pointers.
- * Any mixed contents will make cleanup difficult.
- */
- for (i = 0; i < nr_of_pages; i++) {
- pte_t pte;
- page = l2table_as_array_of_pointers_to_page[i];
-
- /*
- * create L2 table entry, see ARM MMU docu for details
- * about flags stored in the lowest 12 bits.
- * As a side reference, the Article
- * "ARM's multiply-mapped memory mess"
- * found in the collection at
- * http://lwn.net/Articles/409032/
- * is also worth reading.
- */
- pte = page_to_l2_pte(page)
- | PTE_EXT_AP1 | PTE_EXT_AP0
- | PTE_CACHEABLE | PTE_BUFFERABLE
- | PTE_TYPE_SMALL | PTE_TYPE_EXT | PTE_EXT_NG;
- /*
- * Linux uses different mappings for SMP systems(the
- * sharing flag is set for the pte. In order not to
- * confuse things too much in Mobicore make sure the
- * shared buffers have the same flags.
- * This should also be done in SWD side
- */
-#ifdef CONFIG_SMP
- pte |= PTE_EXT_SHARED | PTE_EXT_TEX(1);
-#endif
-
- l2table->table_entries[i] = pte;
- MCDRV_DBG_VERBOSE(mcd, "L2 entry %d: 0x%08x\n", i,
- (unsigned int)(pte));
- }
-
- /* ensure rest of table is empty */
- while (i < 255)
- l2table->table_entries[i++] = (pte_t)0;
-
-
- return ret;
-}
-
-/*
- * Remove a L2 table in a WSM container. Afterwards the container may be
- * released. Assumes the table_lock and the lock is taken.
- */
-static void unmap_buffers(struct mc_l2_table *table)
-{
- struct l2table *l2table;
- int i;
-
- if (WARN_ON(!table))
- return;
-
- /* found the table, now release the resources. */
- MCDRV_DBG_VERBOSE(mcd, "clear L2 table, phys_base=%p, nr_of_pages=%d\n",
- (void *)table->phys, table->pages);
-
- l2table = table->virt;
-
- /* release all locked user space pages */
- for (i = 0; i < table->pages; i++) {
- /* convert physical entries from L2 table to page pointers */
- pte_t pte = l2table->table_entries[i];
- struct page *page = l2_pte_to_page(pte);
- release_page(page);
- }
-
- /* remember that all pages have been freed */
- table->pages = 0;
-}
-
-/* Delete a used l2 table. Assumes the table_lock and the lock is taken */
-static void unmap_l2_table(struct mc_l2_table *table)
-{
- /* Check if it's not locked by other processes too! */
- if (!atomic_dec_and_test(&table->usage))
- return;
-
- /* release if Nwd and Swd/MC do no longer use it. */
- unmap_buffers(table);
- free_l2_table(table);
-}
-
-int mc_free_l2_table(struct mc_instance *instance, uint32_t handle)
-{
- struct mc_l2_table *table;
- int ret = 0;
-
- if (WARN(!instance, "No instance data available"))
- return -EFAULT;
-
- mutex_lock(&mem_ctx.table_lock);
- table = find_l2_table(handle);
-
- if (table == NULL) {
- MCDRV_DBG_VERBOSE(mcd, "entry not found");
- ret = -EINVAL;
- goto err_unlock;
- }
- if (instance != table->owner && !is_daemon(instance)) {
- MCDRV_DBG_ERROR(mcd, "instance does no own it");
- ret = -EPERM;
- goto err_unlock;
- }
- /* free table (if no further locks exist) */
- unmap_l2_table(table);
-err_unlock:
- mutex_unlock(&mem_ctx.table_lock);
-
- return ret;
-}
-
-int mc_lock_l2_table(struct mc_instance *instance, uint32_t handle)
-{
- int ret = 0;
- struct mc_l2_table *table = NULL;
-
- if (WARN(!instance, "No instance data available"))
- return -EFAULT;
-
- mutex_lock(&mem_ctx.table_lock);
- table = find_l2_table(handle);
-
- if (table == NULL) {
- MCDRV_DBG_VERBOSE(mcd, "entry not found %u\n", handle);
- ret = -EINVAL;
- goto table_err;
- }
- if (instance != table->owner && !is_daemon(instance)) {
- MCDRV_DBG_ERROR(mcd, "instance does no own it\n");
- ret = -EPERM;
- goto table_err;
- }
-
- /* lock entry */
- atomic_inc(&table->usage);
-table_err:
- mutex_unlock(&mem_ctx.table_lock);
- return ret;
-}
-/*
- * Allocate L2 table and map buffer into it.
- * That is, create respective table entries.
- * Must hold Semaphore mem_ctx.wsm_l2_sem
- */
-struct mc_l2_table *mc_alloc_l2_table(struct mc_instance *instance,
- struct task_struct *task, void *wsm_buffer, unsigned int wsm_len)
-{
- int ret = 0;
- struct mc_l2_table *table;
-
- if (WARN(!instance, "No instance data available"))
- return ERR_PTR(-EFAULT);
-
- mutex_lock(&mem_ctx.table_lock);
- table = alloc_l2_table(instance);
- if (IS_ERR(table)) {
- MCDRV_DBG_ERROR(mcd, "allocate_used_l2_table() failed\n");
- ret = -ENOMEM;
- goto err_no_mem;
- }
-
- /* create the L2 page for the WSM */
- ret = map_buffer(task, wsm_buffer, wsm_len, table);
-
- if (ret != 0) {
- MCDRV_DBG_ERROR(mcd, "map_buffer() failed\n");
- unmap_l2_table(table);
- goto err_no_mem;
- }
- MCDRV_DBG(mcd, "mapped buffer %p to table with handle %d @ %lx",
- wsm_buffer, table->handle, table->phys);
-
- mutex_unlock(&mem_ctx.table_lock);
- return table;
-err_no_mem:
- mutex_unlock(&mem_ctx.table_lock);
- return ERR_PTR(ret);
-}
-
-uint32_t mc_find_l2_table(uint32_t handle, int32_t fd)
-{
- uint32_t ret = 0;
- struct mc_l2_table *table = NULL;
-
- mutex_lock(&mem_ctx.table_lock);
- table = find_l2_table(handle);
-
- if (table == NULL) {
- MCDRV_DBG_ERROR(mcd, "entry not found %u\n", handle);
- ret = 0;
- goto table_err;
- }
-
- /* It's safe here not to lock the instance since the owner of
- * the table will be cleared only with the table lock taken */
- if (!mc_check_owner_fd(table->owner, fd)) {
- MCDRV_DBG_ERROR(mcd, "not valid owner%u\n", handle);
- ret = 0;
- goto table_err;
- }
-
- ret = table->phys;
-table_err:
- mutex_unlock(&mem_ctx.table_lock);
- return ret;
-}
-
-void mc_clean_l2_tables(void)
-{
- struct mc_l2_table *table, *tmp;
-
- mutex_lock(&mem_ctx.table_lock);
- /* Check if some WSM is orphaned. */
- list_for_each_entry_safe(table, tmp, &mem_ctx.l2_tables, list) {
- if (table->owner == NULL) {
- MCDRV_DBG(mcd,
- "clearing orphaned WSM L2: p=%lx pages=%d\n",
- table->phys, table->pages);
- unmap_l2_table(table);
- }
- }
- mutex_unlock(&mem_ctx.table_lock);
-}
-
-void mc_clear_l2_tables(struct mc_instance *instance)
-{
- struct mc_l2_table *table, *tmp;
-
- mutex_lock(&mem_ctx.table_lock);
- /* Check if some WSM is still in use. */
- list_for_each_entry_safe(table, tmp, &mem_ctx.l2_tables, list) {
- if (table->owner == instance) {
- MCDRV_DBG(mcd, "release WSM L2: p=%lx pages=%d\n",
- table->phys, table->pages);
- /* unlock app usage and free or mark it as orphan */
- table->owner = NULL;
- unmap_l2_table(table);
- }
- }
- mutex_unlock(&mem_ctx.table_lock);
-}
-
-int mc_init_l2_tables(void)
-{
- /* init list for WSM L2 chunks. */
- INIT_LIST_HEAD(&mem_ctx.l2_tables_sets);
-
- /* L2 table descriptor list. */
- INIT_LIST_HEAD(&mem_ctx.l2_tables);
-
- /* L2 table descriptor list. */
- INIT_LIST_HEAD(&mem_ctx.free_l2_tables);
-
- mutex_init(&mem_ctx.table_lock);
-
- return 0;
-}
-
-void mc_release_l2_tables()
-{
- struct mc_l2_table *table;
- /* Check if some WSM is still in use. */
- list_for_each_entry(table, &mem_ctx.l2_tables, list) {
- WARN(1, "WSM L2 still in use: phys=%lx ,nr_of_pages=%d\n",
- table->phys, table->pages);
- }
-}
diff --git a/drivers/gud/mobicore_driver/mem.h b/drivers/gud/mobicore_driver/mem.h
deleted file mode 100644
index 397a6cc..0000000
--- a/drivers/gud/mobicore_driver/mem.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * MobiCore driver module.(interface to the secure world SWD)
- *
- * <-- Copyright Giesecke & Devrient GmbH 2009-2012 -->
- * <-- Copyright Trustonic Limited 2013 -->
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _MC_MEM_H_
-#define _MC_MEM_H_
-
-#define FREE_FROM_SWD 1
-#define FREE_FROM_NWD 0
-
-#define LOCKED_BY_APP (1U << 0)
-#define LOCKED_BY_MC (1U << 1)
-
-/*
- * MobiCore specific page tables for world shared memory.
- * Linux uses shadow page tables, see arch/arm/include/asm/pgtable-2level.
- * MobiCore uses the default ARM format.
- *
- * Number of page table entries in one L2 table. This is ARM specific, an
- * L2 table covers 1 MiB by using 256 entry referring to 4KiB pages each.
- */
-#define MC_ARM_L2_TABLE_ENTRIES 256
-
-/* ARM level 2 (L2) table with 256 entries. Size: 1k */
-struct l2table {
- pte_t table_entries[MC_ARM_L2_TABLE_ENTRIES];
-};
-
-/* Number of pages for L2 tables. There are 4 table in each page. */
-#define L2_TABLES_PER_PAGE 4
-
-/* Store for four L2 tables in one 4kb page*/
-struct mc_l2_table_store {
- struct l2table table[L2_TABLES_PER_PAGE];
-};
-
-/* Usage and maintenance information about mc_l2_table_store */
-struct mc_l2_tables_set {
- struct list_head list;
- /* kernel virtual address */
- struct mc_l2_table_store *kernel_virt;
- /* physical address */
- struct mc_l2_table_store *phys;
- /* pointer to page struct */
- struct page *page;
- /* How many pages from this set are used */
- atomic_t used_tables;
-};
-
-/*
- * L2 table allocated to the Daemon or a TLC describing a world shared buffer.
- * When users map a malloc()ed area into SWd, a L2 table is allocated.
- * In addition, the area of maximum 1MB virtual address space is mapped into
- * the L2 table and a handle for this table is returned to the user.
- */
-struct mc_l2_table {
- struct list_head list;
- /* Table lock */
- struct mutex lock;
- /* handle as communicated to user mode */
- unsigned int handle;
- /* Number of references kept to this l2 table */
- atomic_t usage;
- /* owner of this L2 table */
- struct mc_instance *owner;
- /* set describing where our L2 table is stored */
- struct mc_l2_tables_set *set;
- /* index into L2 table set */
- unsigned int idx;
- /* size of buffer */
- unsigned int pages;
- /* virtual address*/
- void *virt;
- unsigned long phys;
-};
-
-/* MobiCore Driver Memory context data. */
-struct mc_mem_context {
- struct mc_instance *daemon_inst;
- /* Backing store for L2 tables */
- struct list_head l2_tables_sets;
- /* Bookkeeping for used L2 tables */
- struct list_head l2_tables;
- /* Bookkeeping for free L2 tables */
- struct list_head free_l2_tables;
- /* semaphore to synchronize access to above lists */
- struct mutex table_lock;
-};
-
-/*
- * Allocate L2 table and map buffer into it.
- * That is, create respective table entries.
- */
-struct mc_l2_table *mc_alloc_l2_table(struct mc_instance *instance,
- struct task_struct *task, void *wsm_buffer, unsigned int wsm_len);
-
-/* Delete all the l2 tables associated with an instance */
-void mc_clear_l2_tables(struct mc_instance *instance);
-
-/* Release all orphaned L2 tables */
-void mc_clean_l2_tables(void);
-
-/* Delete a used l2 table. */
-int mc_free_l2_table(struct mc_instance *instance, uint32_t handle);
-
-/*
- * Lock a l2 table - the daemon adds +1 to refcount of the L2 table
- * marking it in use by SWD so it doesn't get released when the TLC dies.
- */
-int mc_lock_l2_table(struct mc_instance *instance, uint32_t handle);
-/* Unlock l2 table. */
-int mc_unlock_l2_table(struct mc_instance *instance, uint32_t handle);
-/* Return the phys address of l2 table. */
-uint32_t mc_find_l2_table(uint32_t handle, int32_t fd);
-/* Release all used l2 tables to Linux memory space */
-void mc_release_l2_tables(void);
-
-/* Initialize all l2 tables structure */
-int mc_init_l2_tables(void);
-
-#endif /* _MC_MEM_H_ */
diff --git a/drivers/gud/mobicore_driver/ops.c b/drivers/gud/mobicore_driver/ops.c
deleted file mode 100644
index 9d4af72..0000000
--- a/drivers/gud/mobicore_driver/ops.c
+++ /dev/null
@@ -1,286 +0,0 @@
-/*
- * MobiCore Driver Kernel Module.
- *
- * This module is written as a Linux device driver.
- * This driver represents the command proxy on the lowest layer, from the
- * secure world to the non secure world, and vice versa.
- * This driver is located in the non secure world (Linux).
- * This driver offers IOCTL commands, for access to the secure world, and has
- * the interface from the secure world to the normal world.
- * The access to the driver is possible with a file descriptor,
- * which has to be created by the fd = open(/dev/mobicore) command.
- *
- * <-- Copyright Giesecke & Devrient GmbH 2009-2012 -->
- * <-- Copyright Trustonic Limited 2013 -->
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kthread.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/workqueue.h>
-#include <linux/cpu.h>
-
-#include "main.h"
-#include "fastcall.h"
-#include "ops.h"
-#include "mem.h"
-#include "pm.h"
-#include "debug.h"
-
-/* MobiCore context data */
-static struct mc_context *ctx;
-
-static inline long smc(union fc_generic *fc)
-{
- /* If we request sleep yields must be filtered out as they
- * make no sense */
- if (ctx->mcp)
- if (ctx->mcp->flags.sleep_mode.SleepReq) {
- if (fc->as_in.cmd == MC_SMC_N_YIELD)
- return MC_FC_RET_ERR_INVALID;
- }
- return _smc(fc);
-}
-
-#ifdef MC_FASTCALL_WORKER_THREAD
-
-static struct task_struct *fastcall_thread;
-static DEFINE_KTHREAD_WORKER(fastcall_worker);
-
-struct fastcall_work {
- struct kthread_work work;
- void *data;
-};
-
-static void fastcall_work_func(struct kthread_work *work)
-{
- struct fastcall_work *fc_work =
- container_of(work, struct fastcall_work, work);
-
-#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
- mc_pm_clock_enable();
-#endif
-
- smc(fc_work->data);
-
-#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
- mc_pm_clock_disable();
-#endif
-}
-
-void mc_fastcall(void *data)
-{
- struct fastcall_work fc_work = {
- KTHREAD_WORK_INIT(fc_work.work, fastcall_work_func),
- .data = data,
- };
-
- queue_kthread_work(&fastcall_worker, &fc_work.work);
- flush_kthread_work(&fc_work.work);
-}
-
-int mc_fastcall_init(struct mc_context *context)
-{
- int ret = 0;
-
- ctx = context;
-
- fastcall_thread = kthread_create(kthread_worker_fn, &fastcall_worker,
- "mc_fastcall");
- if (IS_ERR(fastcall_thread)) {
- ret = PTR_ERR(fastcall_thread);
- fastcall_thread = NULL;
- MCDRV_DBG_ERROR(mcd, "cannot create fastcall wq (%d)\n", ret);
- return ret;
- }
-
- /* this thread MUST run on CPU 0 */
- kthread_bind(fastcall_thread, 0);
- wake_up_process(fastcall_thread);
-
- return 0;
-}
-
-void mc_fastcall_destroy(void)
-{
- if (!IS_ERR_OR_NULL(fastcall_thread)) {
- kthread_stop(fastcall_thread);
- fastcall_thread = NULL;
- }
-}
-#else
-
-struct fastcall_work_struct {
- struct work_struct work;
- void *data;
-};
-
-static void fastcall_work_func(struct work_struct *work)
-{
- struct fastcall_work_struct *fc_work =
- container_of(work, struct fastcall_work_struct, work);
-
-#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
- mc_pm_clock_enable();
-#endif
-
- smc(fc_work->data);
-
-#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
- mc_pm_clock_disable();
-#endif
-}
-
-void mc_fastcall(void *data)
-{
- struct fastcall_work_struct work = {
- .data = data,
- };
- INIT_WORK(&work.work, fastcall_work_func);
- schedule_work_on(0, &work.work);
-
- flush_work(&work.work);
-}
-
-int mc_fastcall_init(struct mc_context *context)
-{
- ctx = context;
- return 0;
-};
-
-void mc_fastcall_destroy(void) {};
-#endif
-
-int mc_info(uint32_t ext_info_id, uint32_t *state, uint32_t *ext_info)
-{
- int ret = 0;
- union mc_fc_info fc_info;
-
- MCDRV_DBG_VERBOSE(mcd, "enter\n");
-
- memset(&fc_info, 0, sizeof(fc_info));
- fc_info.as_in.cmd = MC_FC_INFO;
- fc_info.as_in.ext_info_id = ext_info_id;
-
- MCDRV_DBG(mcd, "fc_info <- cmd=0x%08x, ext_info_id=0x%08x\n",
- fc_info.as_in.cmd, fc_info.as_in.ext_info_id);
-
- mc_fastcall(&(fc_info.as_generic));
-
- MCDRV_DBG(mcd,
- "fc_info -> r=0x%08x ret=0x%08x state=0x%08x ext_info=0x%08x",
- fc_info.as_out.resp,
- fc_info.as_out.ret,
- fc_info.as_out.state,
- fc_info.as_out.ext_info);
-
- ret = convert_fc_ret(fc_info.as_out.ret);
-
- *state = fc_info.as_out.state;
- *ext_info = fc_info.as_out.ext_info;
-
- MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X\n", ret, ret);
-
- return ret;
-}
-
-/* Yield to MobiCore */
-int mc_yield(void)
-{
- int ret = 0;
- union fc_generic yield;
-
- MCDRV_DBG_VERBOSE(mcd, "enter\n");
-
- memset(&yield, 0, sizeof(yield));
- yield.as_in.cmd = MC_SMC_N_YIELD;
- mc_fastcall(&yield);
- ret = convert_fc_ret(yield.as_out.ret);
-
- return ret;
-}
-
-/* call common notify */
-int mc_nsiq(void)
-{
- int ret = 0;
- union fc_generic nsiq;
- MCDRV_DBG_VERBOSE(mcd, "enter\n");
-
- memset(&nsiq, 0, sizeof(nsiq));
- nsiq.as_in.cmd = MC_SMC_N_SIQ;
- mc_fastcall(&nsiq);
- ret = convert_fc_ret(nsiq.as_out.ret);
-
- return ret;
-}
-
-/* call common notify */
-int _nsiq(void)
-{
- int ret = 0;
- union fc_generic nsiq;
- MCDRV_DBG_VERBOSE(mcd, "enter\n");
-
- memset(&nsiq, 0, sizeof(nsiq));
- nsiq.as_in.cmd = MC_SMC_N_SIQ;
- _smc(&nsiq);
- ret = convert_fc_ret(nsiq.as_out.ret);
-
- return ret;
-}
-
-/* Call the INIT fastcall to setup MobiCore initialization */
-int mc_init(uint32_t base, uint32_t nq_offset, uint32_t nq_length,
- uint32_t mcp_offset, uint32_t mcp_length)
-{
- int ret = 0;
- union mc_fc_init fc_init;
-
- MCDRV_DBG_VERBOSE(mcd, "enter\n");
-
- memset(&fc_init, 0, sizeof(fc_init));
-
- fc_init.as_in.cmd = MC_FC_INIT;
- /* base address of mci buffer 4KB aligned */
- fc_init.as_in.base = base;
- /* notification buffer start/length [16:16] [start, length] */
- fc_init.as_in.nq_info = (nq_offset << 16) | (nq_length & 0xFFFF);
- /* mcp buffer start/length [16:16] [start, length] */
- fc_init.as_in.mcp_info = (mcp_offset << 16) | (mcp_length & 0xFFFF);
-
- /*
- * Set KMOD notification queue to start of MCI
- * mciInfo was already set up in mmap
- */
- MCDRV_DBG(mcd,
- "cmd=0x%08x, base=0x%08x,nq_info=0x%08x, mcp_info=0x%08x\n",
- fc_init.as_in.cmd, fc_init.as_in.base, fc_init.as_in.nq_info,
- fc_init.as_in.mcp_info);
-
- mc_fastcall(&fc_init.as_generic);
-
- MCDRV_DBG(mcd, "out cmd=0x%08x, ret=0x%08x\n", fc_init.as_out.resp,
- fc_init.as_out.ret);
-
- ret = convert_fc_ret(fc_init.as_out.ret);
-
- MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X\n", ret, ret);
-
- return ret;
-}
-
-/* Return MobiCore driver version */
-uint32_t mc_get_version(void)
-{
- MCDRV_DBG(mcd, "MobiCore driver version is %i.%i\n",
- MCDRVMODULEAPI_VERSION_MAJOR,
- MCDRVMODULEAPI_VERSION_MINOR);
-
- return MC_VERSION(MCDRVMODULEAPI_VERSION_MAJOR,
- MCDRVMODULEAPI_VERSION_MINOR);
-}
diff --git a/drivers/gud/setupDrivers.sh b/drivers/gud/setupDrivers.sh
new file mode 100644
index 0000000..8f877b7
--- /dev/null
+++ b/drivers/gud/setupDrivers.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+export COMP_PATH_ROOT=$(dirname $(readlink -f $BASH_SOURCE)) #set this to the absolute path of the folder containing this file
+
+# This part has to be set by the customer
+# To be set, absolute path of kernel folder
+export LINUX_PATH=
+# To be set, absolute path! CROSS_COMPILE variable needed by kernel eg /home/user/arm-2009q3/bin/arm-none-linux-gnueabi-
+export CROSS_COMPILE=
+# To be set, build mode debug or release
+export MODE=debug
+# To be set, the absolute path to the Linux Android NDK
+export NDK_PATH=
+
+# Global variables needed by build scripts
+export COMP_PATH_Logwrapper=$COMP_PATH_ROOT/Logwrapper/Out
+export COMP_PATH_MobiCore=$COMP_PATH_ROOT/MobiCore/Out
+export COMP_PATH_MobiCoreDriverMod=$COMP_PATH_ROOT/mobicore_driver/Out
+export COMP_PATH_MobiCoreDriverLib=$COMP_PATH_ROOT/daemon/Out
+export COMP_PATH_AndroidNdkLinux=$NDK_PATH
\ No newline at end of file
diff --git a/drivers/hwmon/qpnp-adc-current.c b/drivers/hwmon/qpnp-adc-current.c
index 067a887..44da261 100644
--- a/drivers/hwmon/qpnp-adc-current.c
+++ b/drivers/hwmon/qpnp-adc-current.c
@@ -1014,7 +1014,7 @@
int32_t qpnp_iadc_get_rsense(struct qpnp_iadc_chip *iadc, int32_t *rsense)
{
- uint8_t rslt_rsense;
+ uint8_t rslt_rsense = 0;
int32_t rc = 0, sign_bit = 0;
if (qpnp_iadc_is_valid(iadc) < 0)
@@ -1022,36 +1022,37 @@
if (iadc->external_rsense) {
*rsense = iadc->rsense;
- return rc;
- }
-
- if (iadc->default_internal_rsense) {
+ } else if (iadc->default_internal_rsense) {
*rsense = iadc->rsense_workaround_value;
- return rc;
- }
+ } else {
- rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_NOMINAL_RSENSE, &rslt_rsense);
- if (rc < 0) {
- pr_err("qpnp adc rsense read failed with %d\n", rc);
- return rc;
- }
+ rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_NOMINAL_RSENSE,
+ &rslt_rsense);
+ if (rc < 0) {
+ pr_err("qpnp adc rsense read failed with %d\n", rc);
+ return rc;
+ }
- pr_debug("rsense:0%x\n", rslt_rsense);
+ pr_debug("rsense:0%x\n", rslt_rsense);
- if (rslt_rsense & QPNP_RSENSE_MSB_SIGN_CHECK)
- sign_bit = 1;
+ if (rslt_rsense & QPNP_RSENSE_MSB_SIGN_CHECK)
+ sign_bit = 1;
- rslt_rsense &= ~QPNP_RSENSE_MSB_SIGN_CHECK;
+ rslt_rsense &= ~QPNP_RSENSE_MSB_SIGN_CHECK;
- if (sign_bit)
- *rsense = QPNP_IADC_INTERNAL_RSENSE_N_OHMS_FACTOR -
+ if (sign_bit)
+ *rsense = QPNP_IADC_INTERNAL_RSENSE_N_OHMS_FACTOR -
(rslt_rsense * QPNP_IADC_RSENSE_LSB_N_OHMS_PER_BIT);
- else
- *rsense = QPNP_IADC_INTERNAL_RSENSE_N_OHMS_FACTOR +
+ else
+ *rsense = QPNP_IADC_INTERNAL_RSENSE_N_OHMS_FACTOR +
(rslt_rsense * QPNP_IADC_RSENSE_LSB_N_OHMS_PER_BIT);
-
+ }
pr_debug("rsense value is %d\n", *rsense);
+ if (*rsense == 0)
+ pr_err("incorrect rsens value:%d rslt_rsense:%d\n",
+ *rsense, rslt_rsense);
+
return rc;
}
EXPORT_SYMBOL(qpnp_iadc_get_rsense);
@@ -1215,6 +1216,11 @@
if (qpnp_iadc_is_valid(iadc) < 0)
return -EPROBE_DEFER;
+ if ((iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw) == 0) {
+ pr_err("raw offset errors! run iadc calibration again\n");
+ return -EINVAL;
+ }
+
mutex_lock(&iadc->adc->adc_lock);
if (iadc->iadc_poll_eoc) {
@@ -1251,6 +1257,11 @@
result_current = i_result->result_uv;
result_current *= QPNP_IADC_NANO_VOLTS_FACTOR;
/* Intentional fall through. Process the result w/o comp */
+ if (!rsense_u_ohms) {
+ pr_err("rsense error=%d\n", rsense_u_ohms);
+ goto fail_release_vadc;
+ }
+
do_div(result_current, rsense_u_ohms);
if (sign) {
diff --git a/drivers/input/misc/mma8x5x.c b/drivers/input/misc/mma8x5x.c
index d708d94..a605720 100644
--- a/drivers/input/misc/mma8x5x.c
+++ b/drivers/input/misc/mma8x5x.c
@@ -26,8 +26,8 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/i2c.h>
-#include <linux/input-polldev.h>
#include <linux/sensors.h>
+#include <linux/input.h>
#include <linux/regulator/consumer.h>
#include <linux/of_gpio.h>
#include <linux/irq.h>
@@ -44,8 +44,6 @@
#define POLL_INTERVAL_MAX 10000
#define POLL_INTERVAL 100 /* msecs */
-/* if sensor is standby ,set POLL_STOP_TIME to slow down the poll */
-#define POLL_STOP_TIME 10000
#define INPUT_FUZZ 32
#define INPUT_FLAT 32
#define INPUT_DATA_DIVIDER 16
@@ -81,6 +79,7 @@
#define MMA_INT_ROUTING_CFG 0x01
#define MMA_POWER_CFG_MASK 0xFE
+#define MMA_ODR_MASK 0x38
struct sensor_regulator {
struct regulator *vreg;
@@ -189,7 +188,7 @@
};
struct mma8x5x_data {
struct i2c_client *client;
- struct input_polled_dev *poll_dev;
+ struct delayed_work dwork;
struct input_dev *idev;
struct mutex data_lock;
struct sensors_classdev cdev;
@@ -229,7 +228,6 @@
{{ 0, 1, 0}, { 1, 0, 0}, {0, 0, -1} },
{{ 1, 0, 0}, { 0, -1, 0}, {0, 0, -1} },
};
-static struct mma8x5x_data *drv_data;
static int mma8x5x_config_regulator(struct i2c_client *client, bool on)
{
int rc = 0, i;
@@ -408,7 +406,7 @@
if (result < 0)
goto out;
- val = (u8)result | val;
+ val = ((u8)result & ~MMA_ODR_MASK) | val;
result = i2c_smbus_write_byte_data(client, MMA8X5X_CTRL_REG1,
(val & MMA_POWER_CFG_MASK));
if (result < 0)
@@ -487,32 +485,27 @@
static void mma8x5x_report_data(struct mma8x5x_data *pdata)
{
- struct input_polled_dev *poll_dev = pdata->poll_dev;
struct mma8x5x_data_axis data;
+
mutex_lock(&pdata->data_lock);
- if ((pdata->active & MMA_STATE_MASK) == MMA_STANDBY) {
- poll_dev->poll_interval = POLL_STOP_TIME;
- /* if standby ,set as 10s to slow the poll. */
- goto out;
- } else {
- if (poll_dev->poll_interval == POLL_STOP_TIME)
- poll_dev->poll_interval = pdata->poll_delay;
- }
if (mma8x5x_read_data(pdata->client, &data) != 0)
goto out;
mma8x5x_data_convert(pdata, &data);
- input_report_abs(poll_dev->input, ABS_X, data.x);
- input_report_abs(poll_dev->input, ABS_Y, data.y);
- input_report_abs(poll_dev->input, ABS_Z, data.z);
- input_sync(poll_dev->input);
+ input_report_abs(pdata->idev, ABS_X, data.x);
+ input_report_abs(pdata->idev, ABS_Y, data.y);
+ input_report_abs(pdata->idev, ABS_Z, data.z);
+ input_sync(pdata->idev);
out:
mutex_unlock(&pdata->data_lock);
}
-static void mma8x5x_dev_poll(struct input_polled_dev *dev)
+static void mma8x5x_dev_poll(struct work_struct *work)
{
- struct mma8x5x_data *pdata = (struct mma8x5x_data *)dev->private;
+ struct mma8x5x_data *pdata = container_of((struct delayed_work *)work,
+ struct mma8x5x_data, dwork);
mma8x5x_report_data(pdata);
+ schedule_delayed_work(&pdata->dwork,
+ msecs_to_jiffies(pdata->poll_delay));
}
static irqreturn_t mma8x5x_interrupt(int vec, void *data)
@@ -577,12 +570,18 @@
dev_err(&client->dev, "change device state failed!");
goto err_failed;
}
+
+ schedule_delayed_work(&pdata->dwork,
+ msecs_to_jiffies(pdata->poll_delay));
+
pdata->active = MMA_ACTIVED;
dev_dbg(&client->dev, "%s:mma enable setting active.\n",
__func__);
}
} else if (enable == 0) {
if (pdata->active == MMA_ACTIVED) {
+ cancel_delayed_work_sync(&pdata->dwork);
+
val = i2c_smbus_read_byte_data(client,
MMA8X5X_CTRL_REG1);
if (val < 0) {
@@ -616,7 +615,7 @@
static ssize_t mma8x5x_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mma8x5x_data *pdata = drv_data;
+ struct mma8x5x_data *pdata = dev_get_drvdata(dev);
struct i2c_client *client;
u8 val;
int enable;
@@ -641,7 +640,7 @@
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct mma8x5x_data *pdata = drv_data;
+ struct mma8x5x_data *pdata = dev_get_drvdata(dev);
struct i2c_client *client;
int ret;
unsigned long enable;
@@ -663,7 +662,7 @@
static ssize_t mma8x5x_position_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mma8x5x_data *pdata = drv_data;
+ struct mma8x5x_data *pdata = dev_get_drvdata(dev);
int position = 0;
if (!pdata) {
@@ -680,7 +679,7 @@
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct mma8x5x_data *pdata = drv_data;
+ struct mma8x5x_data *pdata = dev_get_drvdata(dev);
int position;
int ret;
@@ -714,7 +713,6 @@
} else {
mutex_lock(&pdata->data_lock);
pdata->poll_delay = delay_ms;
- pdata->poll_dev->poll_interval = pdata->poll_delay;
mutex_unlock(&pdata->data_lock);
}
@@ -724,7 +722,7 @@
static ssize_t mma8x5x_poll_delay_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mma8x5x_data *pdata = drv_data;
+ struct mma8x5x_data *pdata = dev_get_drvdata(dev);
if (!pdata) {
dev_err(dev, "Invalid driver private data!");
@@ -738,7 +736,7 @@
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct mma8x5x_data *pdata = drv_data;
+ struct mma8x5x_data *pdata = dev_get_drvdata(dev);
int delay;
int ret;
@@ -825,7 +823,6 @@
struct input_dev *idev;
struct mma8x5x_data *pdata;
struct i2c_adapter *adapter;
- struct input_polled_dev *poll_dev;
adapter = to_i2c_adapter(client->dev.parent);
/* power on the device */
result = mma8x5x_config_regulator(client, 1);
@@ -836,7 +833,7 @@
I2C_FUNC_SMBUS_BYTE |
I2C_FUNC_SMBUS_BYTE_DATA);
if (!result)
- goto err_out;
+ goto err_check_id;
chip_id = i2c_smbus_read_byte_data(client, MMA8X5X_WHO_AM_I);
@@ -846,14 +843,14 @@
chip_id, MMA8451_ID, MMA8452_ID, MMA8453_ID,
MMA8652_ID, MMA8653_ID);
result = -EINVAL;
- goto err_out;
+ goto err_check_id;
}
/* set the private data */
pdata = kzalloc(sizeof(struct mma8x5x_data), GFP_KERNEL);
if (!pdata) {
result = -ENOMEM;
dev_err(&client->dev, "alloc data memory error!\n");
- goto err_out;
+ goto err_check_id;
}
if (client->dev.of_node) {
@@ -867,12 +864,10 @@
}
/* Initialize the MMA8X5X chip */
- drv_data = pdata;
pdata->client = client;
pdata->chip_id = chip_id;
pdata->mode = MODE_2G;
- pdata->poll_delay = POLL_STOP_TIME;
- pdata->poll_dev = NULL;
+ pdata->poll_delay = POLL_INTERVAL;
mutex_init(&pdata->data_lock);
i2c_set_clientdata(client, pdata);
@@ -894,29 +889,9 @@
if (result) {
dev_err(&client->dev,
"set_direction for irq gpio failed\n");
- goto err_set_direction;
+ goto err_set_gpio_direction;
}
}
- idev = input_allocate_device();
- if (!idev) {
- result = -ENOMEM;
- dev_err(&client->dev, "alloc input device failed!\n");
- goto err_alloc_poll_device;
- }
- input_set_drvdata(idev, pdata);
- idev->name = ACCEL_INPUT_DEV_NAME;
- idev->uniq = mma8x5x_id2name(pdata->chip_id);
- idev->id.bustype = BUS_I2C;
- idev->evbit[0] = BIT_MASK(EV_ABS);
- input_set_abs_params(idev, ABS_X, -0x7fff, 0x7fff, 0, 0);
- input_set_abs_params(idev, ABS_Y, -0x7fff, 0x7fff, 0, 0);
- input_set_abs_params(idev, ABS_Z, -0x7fff, 0x7fff, 0, 0);
- result = input_register_device(idev);
- if (result) {
- dev_err(&client->dev, "register input device failed!\n");
- goto err_register_device;
- }
- pdata->idev = idev;
device_init_wakeup(&client->dev, true);
enable_irq_wake(client->irq);
result = request_threaded_irq(client->irq, NULL,
@@ -930,34 +905,29 @@
}
mma8x5x_device_int_init(client);
} else {
- /* create the input poll device */
- poll_dev = input_allocate_polled_device();
- if (!poll_dev) {
- result = -ENOMEM;
- dev_err(&client->dev, "alloc poll device failed!\n");
- goto err_alloc_poll_device;
- }
- pdata->poll_dev = poll_dev;
- pdata->idev = NULL;
- poll_dev->poll = mma8x5x_dev_poll;
- poll_dev->poll_interval = POLL_STOP_TIME;
- poll_dev->poll_interval_min = POLL_INTERVAL_MIN;
- poll_dev->poll_interval_max = POLL_INTERVAL_MAX;
- poll_dev->private = pdata;
- idev = poll_dev->input;
- idev->name = ACCEL_INPUT_DEV_NAME;
- idev->uniq = mma8x5x_id2name(pdata->chip_id);
- idev->id.bustype = BUS_I2C;
- idev->evbit[0] = BIT_MASK(EV_ABS);
- input_set_abs_params(idev, ABS_X, -0x7fff, 0x7fff, 0, 0);
- input_set_abs_params(idev, ABS_Y, -0x7fff, 0x7fff, 0, 0);
- input_set_abs_params(idev, ABS_Z, -0x7fff, 0x7fff, 0, 0);
- result = input_register_polled_device(pdata->poll_dev);
- if (result) {
- dev_err(&client->dev, "register poll device failed!\n");
- goto err_register_device;
- }
+ INIT_DELAYED_WORK(&pdata->dwork, mma8x5x_dev_poll);
}
+ idev = input_allocate_device();
+ if (!idev) {
+ result = -ENOMEM;
+ dev_err(&client->dev, "alloc input device failed!\n");
+ goto err_alloc_poll_device;
+ }
+ input_set_drvdata(idev, pdata);
+ idev->name = ACCEL_INPUT_DEV_NAME;
+ idev->uniq = mma8x5x_id2name(pdata->chip_id);
+ idev->id.bustype = BUS_I2C;
+ idev->evbit[0] = BIT_MASK(EV_ABS);
+ input_set_abs_params(idev, ABS_X, -0x7fff, 0x7fff, 0, 0);
+ input_set_abs_params(idev, ABS_Y, -0x7fff, 0x7fff, 0, 0);
+ input_set_abs_params(idev, ABS_Z, -0x7fff, 0x7fff, 0, 0);
+ result = input_register_device(idev);
+ if (result) {
+ dev_err(&client->dev, "register input device failed!\n");
+ goto err_register_device;
+ }
+ pdata->idev = idev;
+
result = sysfs_create_group(&idev->dev.kobj, &mma8x5x_attr_group);
if (result) {
dev_err(&client->dev, "create device file failed!\n");
@@ -983,23 +953,20 @@
err_create_class_sysfs:
sysfs_remove_group(&idev->dev.kobj, &mma8x5x_attr_group);
err_create_sysfs:
- input_unregister_polled_device(pdata->poll_dev);
+ input_unregister_device(idev);
+err_register_device:
+ input_free_device(idev);
+err_alloc_poll_device:
err_register_irq:
if (pdata->use_int)
device_init_wakeup(&client->dev, false);
-err_register_device:
- if (pdata->use_int)
- input_free_device(idev);
- else
- input_free_polled_device(pdata->poll_dev);
-err_alloc_poll_device:
-err_set_direction:
+err_set_gpio_direction:
if (gpio_is_valid(pdata->int_pin) && pdata->use_int)
gpio_free(pdata->int_pin);
err_request_gpio:
err_parse_dt:
kfree(pdata);
-err_out:
+err_check_id:
mma8x5x_config_regulator(client, 0);
err_power_on:
return result;
@@ -1007,14 +974,22 @@
static int __devexit mma8x5x_remove(struct i2c_client *client)
{
struct mma8x5x_data *pdata = i2c_get_clientdata(client);
- struct input_polled_dev *poll_dev;
+ struct input_dev *idev;
+
mma8x5x_device_stop(client);
if (pdata) {
- poll_dev = pdata->poll_dev;
- input_unregister_polled_device(poll_dev);
- input_free_polled_device(poll_dev);
+ idev = pdata->idev;
+ sysfs_remove_group(&idev->dev.kobj, &mma8x5x_attr_group);
+ if (pdata->use_int) {
+ device_init_wakeup(&client->dev, false);
+ if (gpio_is_valid(pdata->int_pin))
+ gpio_free(pdata->int_pin);
+ }
+ input_unregister_device(idev);
+ input_free_device(idev);
kfree(pdata);
}
+ mma8x5x_config_regulator(client, 0);
return 0;
}
@@ -1026,8 +1001,10 @@
if (pdata->use_int && pdata->active == MMA_ACTIVED)
return 0;
- if (pdata->active == MMA_ACTIVED)
+ if (pdata->active == MMA_ACTIVED) {
mma8x5x_device_stop(client);
+ cancel_delayed_work_sync(&pdata->dwork);
+ }
if (pdata->active & MMA_SHUTTEDDOWN)
return 0;
if (!mma8x5x_config_regulator(client, 0))
@@ -1058,6 +1035,8 @@
if (pdata->active == MMA_ACTIVED) {
val = i2c_smbus_read_byte_data(client, MMA8X5X_CTRL_REG1);
i2c_smbus_write_byte_data(client, MMA8X5X_CTRL_REG1, val|0x01);
+ schedule_delayed_work(&pdata->dwork,
+ msecs_to_jiffies(pdata->poll_delay));
}
return 0;
diff --git a/drivers/leds/leds-qpnp.c b/drivers/leds/leds-qpnp.c
index d87520f..eba5ca8 100644
--- a/drivers/leds/leds-qpnp.c
+++ b/drivers/leds/leds-qpnp.c
@@ -691,7 +691,7 @@
{
int rc;
u8 val;
- int duty_us;
+ int duty_us, duty_ns, period_us;
if (led->cdev.brightness) {
if (led->cdev.brightness < led->mpp_cfg->min_brightness) {
@@ -710,13 +710,23 @@
}
}
if (led->mpp_cfg->pwm_mode == PWM_MODE) {
- pwm_disable(led->mpp_cfg->pwm_cfg->pwm_dev);
- duty_us = (led->mpp_cfg->pwm_cfg->pwm_period_us *
- led->cdev.brightness) / LED_FULL;
/*config pwm for brightness scaling*/
- rc = pwm_config_us(led->mpp_cfg->pwm_cfg->pwm_dev,
+ period_us = led->mpp_cfg->pwm_cfg->pwm_period_us;
+ if (period_us > INT_MAX / NSEC_PER_USEC) {
+ duty_us = (period_us * led->cdev.brightness) /
+ LED_FULL;
+ rc = pwm_config_us(
+ led->mpp_cfg->pwm_cfg->pwm_dev,
duty_us,
- led->mpp_cfg->pwm_cfg->pwm_period_us);
+ period_us);
+ } else {
+ duty_ns = ((period_us * NSEC_PER_USEC) /
+ LED_FULL) * led->cdev.brightness;
+ rc = pwm_config(
+ led->mpp_cfg->pwm_cfg->pwm_dev,
+ duty_ns,
+ period_us * NSEC_PER_USEC);
+ }
if (rc < 0) {
dev_err(&led->spmi_dev->dev, "Failed to " \
"configure pwm for new values\n");
@@ -1219,8 +1229,8 @@
static int qpnp_kpdbl_set(struct qpnp_led_data *led)
{
- int duty_us;
int rc;
+ int duty_us, duty_ns, period_us;
if (led->cdev.brightness) {
if (!led->kpdbl_cfg->pwm_cfg->blinking)
@@ -1237,11 +1247,22 @@
}
if (led->kpdbl_cfg->pwm_cfg->mode == PWM_MODE) {
- duty_us = (led->kpdbl_cfg->pwm_cfg->pwm_period_us *
- led->cdev.brightness) / KPDBL_MAX_LEVEL;
- rc = pwm_config_us(led->kpdbl_cfg->pwm_cfg->pwm_dev,
+ period_us = led->kpdbl_cfg->pwm_cfg->pwm_period_us;
+ if (period_us > INT_MAX / NSEC_PER_USEC) {
+ duty_us = (period_us * led->cdev.brightness) /
+ KPDBL_MAX_LEVEL;
+ rc = pwm_config_us(
+ led->kpdbl_cfg->pwm_cfg->pwm_dev,
duty_us,
- led->kpdbl_cfg->pwm_cfg->pwm_period_us);
+ period_us);
+ } else {
+ duty_ns = ((period_us * NSEC_PER_USEC) /
+ KPDBL_MAX_LEVEL) * led->cdev.brightness;
+ rc = pwm_config(
+ led->kpdbl_cfg->pwm_cfg->pwm_dev,
+ duty_ns,
+ period_us * NSEC_PER_USEC);
+ }
if (rc < 0) {
dev_err(&led->spmi_dev->dev, "pwm config failed\n");
return rc;
@@ -1262,7 +1283,7 @@
if (led->kpdbl_cfg->always_on) {
rc = pwm_config_us(led->kpdbl_cfg->pwm_cfg->pwm_dev, 0,
- led->kpdbl_cfg->pwm_cfg->pwm_period_us);
+ led->kpdbl_cfg->pwm_cfg->pwm_period_us);
if (rc < 0) {
dev_err(&led->spmi_dev->dev,
"pwm config failed\n");
@@ -1300,19 +1321,30 @@
static int qpnp_rgb_set(struct qpnp_led_data *led)
{
- int duty_us;
int rc;
+ int duty_us, duty_ns, period_us;
if (led->cdev.brightness) {
if (!led->rgb_cfg->pwm_cfg->blinking)
led->rgb_cfg->pwm_cfg->mode =
led->rgb_cfg->pwm_cfg->default_mode;
if (led->rgb_cfg->pwm_cfg->mode == PWM_MODE) {
- duty_us = (led->rgb_cfg->pwm_cfg->pwm_period_us *
- led->cdev.brightness) / LED_FULL;
- rc = pwm_config_us(led->rgb_cfg->pwm_cfg->pwm_dev,
+ period_us = led->rgb_cfg->pwm_cfg->pwm_period_us;
+ if (period_us > INT_MAX / NSEC_PER_USEC) {
+ duty_us = (period_us * led->cdev.brightness) /
+ LED_FULL;
+ rc = pwm_config_us(
+ led->rgb_cfg->pwm_cfg->pwm_dev,
duty_us,
- led->rgb_cfg->pwm_cfg->pwm_period_us);
+ period_us);
+ } else {
+ duty_ns = ((period_us * NSEC_PER_USEC) /
+ LED_FULL) * led->cdev.brightness;
+ rc = pwm_config(
+ led->rgb_cfg->pwm_cfg->pwm_dev,
+ duty_ns,
+ period_us * NSEC_PER_USEC);
+ }
if (rc < 0) {
dev_err(&led->spmi_dev->dev,
"pwm config failed\n");
diff --git a/drivers/md/dm-req-crypt.c b/drivers/md/dm-req-crypt.c
index 16141b5..ab21404 100644
--- a/drivers/md/dm-req-crypt.c
+++ b/drivers/md/dm-req-crypt.c
@@ -41,7 +41,7 @@
#define MAX_ENCRYPTION_BUFFERS 1
#define MIN_IOS 16
#define MIN_POOL_PAGES 32
-#define KEY_SIZE_XTS 32
+#define KEY_SIZE_XTS 64
#define AES_XTS_IV_LEN 16
#define DM_REQ_CRYPT_ERROR -1
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index 4f39838..7d97c26 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -139,7 +139,9 @@
unsigned int p;
for (p = 0; p < entity->num_pads; p++) {
- struct media_pad_desc pad = {0};
+ struct media_pad_desc pad;
+
+ memset(&pad, 0, sizeof(pad));
media_device_kpad_to_upad(&entity->pads[p], &pad);
if (copy_to_user(&links.pads[p], &pad, sizeof(pad)))
return -EFAULT;
@@ -157,6 +159,7 @@
if (entity->links[l].source->entity != entity)
continue;
+ memset(&link, 0, sizeof(link));
media_device_kpad_to_upad(entity->links[l].source,
&link.source);
media_device_kpad_to_upad(entity->links[l].sink,
diff --git a/drivers/media/platform/msm/camera_v2/camera/camera.c b/drivers/media/platform/msm/camera_v2/camera/camera.c
index 3f7ba6b..43cdcbb 100644
--- a/drivers/media/platform/msm/camera_v2/camera/camera.c
+++ b/drivers/media/platform/msm/camera_v2/camera/camera.c
@@ -673,12 +673,22 @@
return rc;
}
+#ifdef CONFIG_COMPAT
+long camera_v4l2_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return -ENOIOCTLCMD;
+}
+#endif
static struct v4l2_file_operations camera_v4l2_fops = {
.owner = THIS_MODULE,
.open = camera_v4l2_open,
.poll = camera_v4l2_poll,
.release = camera_v4l2_close,
.ioctl = video_ioctl2,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = camera_v4l2_compat_ioctl,
+#endif
};
int camera_init_v4l2(struct device *dev, unsigned int *session)
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
index d33d34b..334a293 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
@@ -459,6 +459,7 @@
uint8_t vt_enable;
void __iomem *p_avtimer_msw;
void __iomem *p_avtimer_lsw;
+ uint8_t ignore_error;
};
#endif
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
index 044f6f1..eb05015 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
@@ -176,15 +176,15 @@
uint32_t irq_status0, uint32_t irq_status1,
struct msm_isp_timestamp *ts)
{
+ uint32_t cnt;
if (!(irq_status0 & 0x1F))
return;
if (irq_status0 & BIT(0)) {
ISP_DBG("%s: SOF IRQ\n", __func__);
- if (vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count > 0
- && vfe_dev->axi_data.src_info[VFE_PIX_0].
- pix_stream_count == 0) {
- msm_isp_sof_notify(vfe_dev, VFE_PIX_0, ts);
+ cnt = vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count;
+ if (cnt > 0) {
+ msm_isp_sof_notify(vfe_dev, VFE_RAW_0, ts);
if (vfe_dev->axi_data.stream_update)
msm_isp_axi_stream_update(vfe_dev);
msm_isp_update_framedrop_reg(vfe_dev);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
index d53d7f6..e443e9a 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -370,15 +370,16 @@
uint32_t irq_status0, uint32_t irq_status1,
struct msm_isp_timestamp *ts)
{
+ int cnt;
+
if (!(irq_status0 & 0xF))
return;
if (irq_status0 & (1 << 0)) {
ISP_DBG("%s: SOF IRQ\n", __func__);
- if (vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count > 0
- && vfe_dev->axi_data.src_info[VFE_PIX_0].
- pix_stream_count == 0) {
- msm_isp_sof_notify(vfe_dev, VFE_PIX_0, ts);
+ cnt = vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count;
+ if (cnt > 0) {
+ msm_isp_sof_notify(vfe_dev, VFE_RAW_0, ts);
if (vfe_dev->axi_data.stream_update)
msm_isp_axi_stream_update(vfe_dev);
msm_isp_update_framedrop_reg(vfe_dev);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index 0264d6d..206620c 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -81,6 +81,10 @@
return rc;
switch (stream_cfg_cmd->output_format) {
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
case V4L2_PIX_FMT_SBGGR8:
case V4L2_PIX_FMT_SGBRG8:
case V4L2_PIX_FMT_SGRBG8:
@@ -167,6 +171,10 @@
uint32_t size = 0;
struct msm_vfe_axi_plane_cfg *plane_cfg = stream_info->plane_cfg;
switch (stream_info->output_format) {
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
case V4L2_PIX_FMT_SBGGR8:
case V4L2_PIX_FMT_SGBRG8:
case V4L2_PIX_FMT_SGRBG8:
@@ -444,7 +452,7 @@
sof_event.frame_id = vfe_dev->axi_data.src_info[frame_src].frame_id;
sof_event.timestamp = ts->event_time;
sof_event.mono_timestamp = ts->buf_time;
- msm_isp_send_event(vfe_dev, ISP_EVENT_SOF, &sof_event);
+ msm_isp_send_event(vfe_dev, ISP_EVENT_SOF + frame_src, &sof_event);
}
void msm_isp_calculate_framedrop(
@@ -1215,6 +1223,8 @@
msm_isp_axi_stream_enable_cfg(vfe_dev, stream_info);
stream_info->state = ACTIVE;
}
+ vfe_dev->axi_data.src_info[
+ SRC_TO_INTF(stream_info->stream_src)].frame_id = 0;
}
msm_isp_update_stream_bandwidth(vfe_dev);
vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev, wm_reload_mask);
@@ -1228,16 +1238,6 @@
update_camif_state(vfe_dev, camif_update);
}
- if (vfe_dev->axi_data.src_info[VFE_RAW_0].raw_stream_count > 0) {
- vfe_dev->axi_data.src_info[VFE_RAW_0].frame_id = 0;
- }
- else if (vfe_dev->axi_data.src_info[VFE_RAW_1].raw_stream_count > 0) {
- vfe_dev->axi_data.src_info[VFE_RAW_1].frame_id = 0;
- }
- else if (vfe_dev->axi_data.src_info[VFE_RAW_2].raw_stream_count > 0) {
- vfe_dev->axi_data.src_info[VFE_RAW_2].frame_id = 0;
- }
-
if (wait_for_complete)
rc = msm_isp_axi_wait_for_cfg_done(vfe_dev, camif_update);
@@ -1308,11 +1308,13 @@
msm_isp_update_rdi_output_count(vfe_dev, stream_cfg_cmd);
cur_stream_cnt = msm_isp_get_curr_stream_cnt(vfe_dev);
if (cur_stream_cnt == 0) {
+ vfe_dev->ignore_error = 1;
if (camif_update == DISABLE_CAMIF_IMMEDIATELY) {
vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev);
}
vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, ISP_RST_SOFT);
vfe_dev->hw_info->vfe_ops.core_ops.init_hw_reg(vfe_dev);
+ vfe_dev->ignore_error = 0;
}
for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
@@ -1389,7 +1391,10 @@
return -EINVAL;
}
if (stream_info->state == ACTIVE &&
- stream_info->stream_type == BURST_STREAM) {
+ stream_info->stream_type == BURST_STREAM &&
+ (1 != update_cmd->num_streams ||
+ UPDATE_STREAM_FRAMEDROP_PATTERN !=
+ update_cmd->update_type)) {
pr_err("%s: Cannot update active burst stream\n",
__func__);
return -EINVAL;
@@ -1416,7 +1421,10 @@
msm_isp_get_framedrop_period(
update_info->skip_pattern);
stream_info->runtime_init_frame_drop = 0;
- stream_info->framedrop_pattern = 0x1;
+ if (update_info->skip_pattern == SKIP_ALL)
+ stream_info->framedrop_pattern = 0x0;
+ else
+ stream_info->framedrop_pattern = 0x1;
stream_info->framedrop_period = framedrop_period - 1;
vfe_dev->hw_info->vfe_ops.axi_ops.
cfg_framedrop(vfe_dev, stream_info);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
index 057e87f..ffe0b9c 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
@@ -806,6 +806,12 @@
case V4L2_PIX_FMT_NV61:
val = CAL_WORD(pixel_per_line, 1, 8);
break;
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ val = CAL_WORD(pixel_per_line, 2, 8);
+ break;
/*TD: Add more image format*/
default:
msm_isp_print_fourcc_error(__func__, output_format);
@@ -995,7 +1001,8 @@
error_mask1 &= irq_status1;
irq_status0 &= ~error_mask0;
irq_status1 &= ~error_mask1;
- if ((error_mask0 != 0) || (error_mask1 != 0))
+ if (!vfe_dev->ignore_error &&
+ ((error_mask0 != 0) || (error_mask1 != 0)))
msm_isp_update_error_info(vfe_dev, error_mask0, error_mask1);
if ((irq_status0 == 0) && (irq_status1 == 0) &&
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
index 981c210..8662657 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
@@ -457,9 +457,8 @@
break;
}
for (i = 0; i < csid_params.lut_params.num_cid; i++) {
- vc_cfg = kzalloc(csid_params.lut_params.num_cid *
- sizeof(struct msm_camera_csid_vc_cfg),
- GFP_KERNEL);
+ vc_cfg = kzalloc(sizeof(struct msm_camera_csid_vc_cfg),
+ GFP_KERNEL);
if (!vc_cfg) {
pr_err("%s: %d failed\n", __func__, __LINE__);
for (i--; i >= 0; i--)
@@ -469,8 +468,7 @@
}
if (copy_from_user(vc_cfg,
(void *)csid_params.lut_params.vc_cfg[i],
- (csid_params.lut_params.num_cid *
- sizeof(struct msm_camera_csid_vc_cfg)))) {
+ sizeof(struct msm_camera_csid_vc_cfg))) {
pr_err("%s: %d failed\n", __func__, __LINE__);
kfree(vc_cfg);
for (i--; i >= 0; i--)
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index ea7d670..66d6878 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -693,11 +693,20 @@
int rc = 0;
int i;
struct hal_buffer_requirements *buff_req_buffer;
+
if (!inst || !f || !inst->core || !inst->core->device) {
dprintk(VIDC_ERR,
"Invalid input, inst = %p, format = %p\n", inst, f);
return -EINVAL;
}
+
+ rc = msm_comm_try_get_bufreqs(inst);
+ if (rc) {
+ dprintk(VIDC_ERR, "Getting buffer requirements failed: %d\n",
+ rc);
+ return rc;
+ }
+
hdev = inst->core->device;
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
fmt = inst->fmts[CAPTURE_PORT];
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index d8b608437..18432dd 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -116,6 +116,7 @@
"Extradata input crop",
"Extradata digital zoom",
"Extradata aspect ratio",
+ "Extradata LTR",
"Extradata macroblock metadata",
};
@@ -2614,6 +2615,14 @@
"Invalid input, inst = %p, format = %p\n", inst, f);
return -EINVAL;
}
+
+ rc = msm_comm_try_get_bufreqs(inst);
+ if (rc) {
+ dprintk(VIDC_WARN, "Getting new buffer requirements failed: %d\n",
+ rc);
+ return rc;
+ }
+
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
fmt = inst->fmts[CAPTURE_PORT];
height = inst->prop.height[CAPTURE_PORT];
@@ -2643,8 +2652,16 @@
buff_req_buffer->buffer_size : 0;
}
for (i = 0; i < fmt->num_planes; ++i) {
- inst->bufq[CAPTURE_PORT].vb2_bufq.plane_sizes[i] =
- f->fmt.pix_mp.plane_fmt[i].sizeimage;
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ inst->bufq[OUTPUT_PORT].vb2_bufq.
+ plane_sizes[i] =
+ f->fmt.pix_mp.plane_fmt[i].sizeimage;
+ } else if (f->type ==
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ inst->bufq[CAPTURE_PORT].vb2_bufq.
+ plane_sizes[i] =
+ f->fmt.pix_mp.plane_fmt[i].sizeimage;
+ }
}
} else {
dprintk(VIDC_ERR,
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index fe0a42d..9dbecfb 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -814,6 +814,10 @@
int msm_vidc_encoder_cmd(void *instance, struct v4l2_encoder_cmd *enc)
{
struct msm_vidc_inst *inst = instance;
+ if (!inst || !inst->core || !enc) {
+ dprintk(VIDC_ERR, "%s invalid params\n", __func__);
+ return -EINVAL;
+ }
if (inst->session_type == MSM_VIDC_ENCODER)
return msm_venc_cmd(instance, enc);
return -EINVAL;
@@ -822,6 +826,10 @@
int msm_vidc_decoder_cmd(void *instance, struct v4l2_decoder_cmd *dec)
{
struct msm_vidc_inst *inst = instance;
+ if (!inst || !inst->core || !dec) {
+ dprintk(VIDC_ERR, "%s invalid params\n", __func__);
+ return -EINVAL;
+ }
if (inst->session_type == MSM_VIDC_DECODER)
return msm_vdec_cmd(instance, dec);
return -EINVAL;
@@ -1335,7 +1343,6 @@
mutex_lock(&inst->lock);
}
mutex_unlock(&inst->lock);
- msm_smem_delete_client(inst->mem_client);
debugfs_remove_recursive(inst->debugfs_root);
}
}
@@ -1393,7 +1400,9 @@
for (i = 0; i < MAX_PORT_NUM; i++)
vb2_queue_release(&inst->bufq[i].vb2_bufq);
+ msm_smem_delete_client(inst->mem_client);
pr_info(VIDC_DBG_TAG "Closed video instance: %p\n", VIDC_INFO, inst);
kfree(inst);
+
return 0;
}
diff --git a/drivers/media/platform/msm/vidc/q6_hfi.c b/drivers/media/platform/msm/vidc/q6_hfi.c
index e70635d..5404af6 100644
--- a/drivers/media/platform/msm/vidc/q6_hfi.c
+++ b/drivers/media/platform/msm/vidc/q6_hfi.c
@@ -554,6 +554,10 @@
new_session = (struct hal_session *)
kzalloc(sizeof(struct hal_session), GFP_KERNEL);
+ if (!new_session) {
+ dprintk(VIDC_ERR, "new session fail: Out of memory\n");
+ return NULL;
+ }
new_session->session_id = (u32) session_id;
if (session_type == 1)
new_session->is_decoder = 0;
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 4fcd20e..008407d 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -527,6 +527,25 @@
return rc;
}
+static void venus_hfi_set_registers(struct venus_hfi_device *device)
+{
+ struct reg_set *reg_set;
+ int i;
+
+ if (!device->res) {
+ dprintk(VIDC_ERR,
+ "device resources null, cannot set registers\n");
+ return;
+ }
+
+ reg_set = &device->res->reg_set;
+ for (i = 0; i < reg_set->count; i++) {
+ venus_hfi_write_register(device,
+ reg_set->reg_tbl[i].reg,
+ reg_set->reg_tbl[i].value, 0);
+ }
+}
+
static int venus_hfi_core_start_cpu(struct venus_hfi_device *device)
{
u32 ctrl_status = 0, count = 0, rc = 0;
@@ -962,6 +981,7 @@
device->power_enabled = 0;
--device->pwr_cnt;
+ dprintk(VIDC_INFO, "entering power collapse\n");
already_disabled:
return rc;
}
@@ -1001,18 +1021,45 @@
goto err_enable_clk;
}
+
+ /*
+ * Re-program all of the registers that get reset as a result of
+ * regulator_disable() and _enable()
+ */
+ venus_hfi_set_registers(device);
+
+ venus_hfi_write_register(device, VIDC_UC_REGION_ADDR,
+ (u32)device->iface_q_table.align_device_addr, 0);
+ venus_hfi_write_register(device,
+ VIDC_UC_REGION_SIZE, SHARED_QSIZE, 0);
+ venus_hfi_write_register(device, VIDC_CPU_CS_SCIACMDARG2,
+ (u32)device->iface_q_table.align_device_addr,
+ device->iface_q_table.align_virtual_addr);
+
+ if (!IS_ERR_OR_NULL(device->sfr.align_device_addr))
+ venus_hfi_write_register(device, VIDC_SFR_ADDR,
+ (u32)device->sfr.align_device_addr, 0);
+ if (!IS_ERR_OR_NULL(device->qdss.align_device_addr))
+ venus_hfi_write_register(device, VIDC_MMAP_ADDR,
+ (u32)device->qdss.align_device_addr, 0);
+
+ /* Reboot the firmware */
rc = venus_hfi_tzbsp_set_video_state(TZBSP_VIDEO_STATE_RESUME);
if (rc) {
dprintk(VIDC_ERR, "Failed to resume video core %d\n", rc);
goto err_set_video_state;
}
+
+ /* Wait for boot completion */
rc = venus_hfi_reset_core(device);
if (rc) {
dprintk(VIDC_ERR, "Failed to reset venus core");
goto err_reset_core;
}
+
device->power_enabled = 1;
++device->pwr_cnt;
+ dprintk(VIDC_INFO, "resuming from power collapse\n");
return rc;
err_reset_core:
venus_hfi_tzbsp_set_video_state(TZBSP_VIDEO_STATE_SUSPEND);
@@ -1497,25 +1544,6 @@
return -ENOMEM;
}
-static void venus_hfi_set_registers(struct venus_hfi_device *device)
-{
- struct reg_set *reg_set;
- int i;
-
- if (!device->res) {
- dprintk(VIDC_ERR,
- "device resources null, cannot set registers\n");
- return;
- }
-
- reg_set = &device->res->reg_set;
- for (i = 0; i < reg_set->count; i++) {
- venus_hfi_write_register(device,
- reg_set->reg_tbl[i].reg,
- reg_set->reg_tbl[i].value, 0);
- }
-}
-
static int venus_hfi_sys_set_debug(struct venus_hfi_device *device, int debug)
{
u8 packet[VIDC_IFACEQ_VAR_SMALL_PKT_SIZE];
@@ -1561,9 +1589,6 @@
dev->intr_status = 0;
INIT_LIST_HEAD(&dev->sess_head);
- mutex_init(&dev->read_lock);
- mutex_init(&dev->write_lock);
- mutex_init(&dev->session_lock);
venus_hfi_set_registers(dev);
if (!dev->hal_client) {
@@ -2028,6 +2053,10 @@
new_session = (struct hal_session *)
kzalloc(sizeof(struct hal_session), GFP_KERNEL);
+ if (!new_session) {
+ dprintk(VIDC_ERR, "new session fail: Out of memory\n");
+ return NULL;
+ }
new_session->session_id = (u32) session_id;
if (session_type == 1)
new_session->is_decoder = 0;
@@ -3301,7 +3330,6 @@
__func__, device);
return -EINVAL;
}
- mutex_init(&device->clk_pwr_lock);
device->clk_gating_level = VCODEC_CLK;
rc = venus_hfi_iommu_attach(device);
if (rc) {
@@ -3574,6 +3602,11 @@
goto error_createq_pm;
}
+ mutex_init(&hdevice->read_lock);
+ mutex_init(&hdevice->write_lock);
+ mutex_init(&hdevice->session_lock);
+ mutex_init(&hdevice->clk_pwr_lock);
+
if (hal_ctxt.dev_count == 0)
INIT_LIST_HEAD(&hal_ctxt.dev_head);
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 1839d07..efafa23 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -164,13 +164,14 @@
uint32_t qsee_perf_client;
struct qseecom_clk qsee;
struct qseecom_clk ce_drv;
- struct cdev cdev;
bool support_bus_scaling;
uint32_t cumulative_mode;
enum qseecom_bandwidth_request_mode current_mode;
struct timer_list bw_scale_down_timer;
struct work_struct bw_inactive_req_ws;
+ struct cdev cdev;
+ bool timer_running;
};
struct qseecom_client_handle {
@@ -490,6 +491,7 @@
__qseecom_set_msm_bus_request(INACTIVE);
pr_debug("current_mode = %d, cumulative_mode = %d\n",
qseecom.current_mode, qseecom.cumulative_mode);
+ qseecom.timer_running = false;
mutex_unlock(&qsee_bw_mutex);
mutex_unlock(&app_access_lock);
return;
@@ -501,6 +503,25 @@
return;
}
+static void __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
+{
+ struct qseecom_clk *qclk;
+ mutex_lock(&clk_access_lock);
+ if (ce == CLK_QSEE)
+ qclk = &qseecom.qsee;
+ else
+ qclk = &qseecom.ce_drv;
+
+ if (qclk->clk_access_cnt == 0) {
+ mutex_unlock(&clk_access_lock);
+ return;
+ }
+ qclk->clk_access_cnt--;
+ mutex_unlock(&clk_access_lock);
+ return;
+}
+
+
static int qseecom_scale_bus_bandwidth_timer(uint32_t mode, uint32_t duration)
{
int32_t ret = 0;
@@ -515,13 +536,12 @@
} else {
request_mode = mode;
}
+
__qseecom_set_msm_bus_request(request_mode);
-
- del_timer_sync(&(qseecom.bw_scale_down_timer));
- qseecom.bw_scale_down_timer.expires = jiffies +
- msecs_to_jiffies(duration);
- add_timer(&(qseecom.bw_scale_down_timer));
-
+ if (qseecom.timer_running) {
+ __qseecom_decrease_clk_ref_count(CLK_QSEE);
+ del_timer_sync(&(qseecom.bw_scale_down_timer));
+ }
mutex_unlock(&qsee_bw_mutex);
return ret;
}
@@ -582,6 +602,14 @@
{
if (!qseecom.support_bus_scaling)
qsee_disable_clock_vote(data, CLK_SFPB);
+ else {
+ mutex_lock(&qsee_bw_mutex);
+ qseecom.bw_scale_down_timer.expires = jiffies +
+ msecs_to_jiffies(QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
+ add_timer(&(qseecom.bw_scale_down_timer));
+ qseecom.timer_running = true;
+ mutex_unlock(&qsee_bw_mutex);
+ }
return;
}
@@ -1061,16 +1089,13 @@
return -EINVAL;
}
- if (((uint32_t)req_ptr->cmd_req_buf <
- data_ptr->client.user_virt_sb_base)
- || ((uint32_t)req_ptr->cmd_req_buf >=
- (data_ptr->client.user_virt_sb_base +
- data_ptr->client.sb_length))) {
- pr_err("cmd buffer address not within shared bufffer\n");
+ /* Clients need to ensure req_buf is at base offset of shared buffer */
+ if ((uint32_t)req_ptr->cmd_req_buf !=
+ data_ptr->client.user_virt_sb_base) {
+ pr_err("cmd buf not pointing to base offset of shared buffer\n");
return -EINVAL;
}
-
if (((uint32_t)req_ptr->resp_buf < data_ptr->client.user_virt_sb_base)
|| ((uint32_t)req_ptr->resp_buf >=
(data_ptr->client.user_virt_sb_base +
@@ -1089,8 +1114,6 @@
(uint32_t)req_ptr->resp_buf));
send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
- pr_debug("CMD ID (%x), KEY_TYPE (%d)\n", send_svc_ireq_ptr->qsee_cmd_id,
- ((struct qseecom_rpmb_provision_key *)req_ptr->cmd_req_buf)->key_type);
return ret;
}
@@ -1115,6 +1138,21 @@
return -EINVAL;
}
+ if (data->client.sb_virt == NULL) {
+ pr_err("sb_virt null\n");
+ return -EINVAL;
+ }
+
+ if (data->client.user_virt_sb_base == 0) {
+ pr_err("user_virt_sb_base is null\n");
+ return -EINVAL;
+ }
+
+ if (data->client.sb_length == 0) {
+ pr_err("sb_length is 0\n");
+ return -EINVAL;
+ }
+
data->type = QSEECOM_SECURE_SERVICE;
switch (req.cmd_id) {
@@ -1164,7 +1202,16 @@
if (!qseecom.support_bus_scaling) {
qsee_disable_clock_vote(data, CLK_DFAB);
qsee_disable_clock_vote(data, CLK_SFPB);
+ } else {
+ mutex_lock(&qsee_bw_mutex);
+ qseecom.bw_scale_down_timer.expires = jiffies +
+ msecs_to_jiffies(
+ QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+ add_timer(&(qseecom.bw_scale_down_timer));
+ qseecom.timer_running = true;
+ mutex_unlock(&qsee_bw_mutex);
}
+
goto exit;
}
@@ -1188,6 +1235,18 @@
ret = -EINVAL;
break;
}
+ if (!qseecom.support_bus_scaling) {
+ qsee_disable_clock_vote(data, CLK_DFAB);
+ qsee_disable_clock_vote(data, CLK_SFPB);
+ } else {
+ mutex_lock(&qsee_bw_mutex);
+ qseecom.bw_scale_down_timer.expires = jiffies +
+ msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+ add_timer(&(qseecom.bw_scale_down_timer));
+ qseecom.timer_running = true;
+ mutex_unlock(&qsee_bw_mutex);
+ }
+
exit:
return ret;
}
@@ -2024,6 +2083,14 @@
qseecom_scale_bus_bandwidth_timer(INACTIVE,
QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
ret = __qseecom_send_cmd(data, &req);
+ if (qseecom.support_bus_scaling) {
+ mutex_lock(&qsee_bw_mutex);
+ qseecom.bw_scale_down_timer.expires = jiffies +
+ msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+ add_timer(&(qseecom.bw_scale_down_timer));
+ qseecom.timer_running = true;
+ mutex_unlock(&qsee_bw_mutex);
+ }
atomic_dec(&data->ioctl_count);
mutex_unlock(&app_access_lock);
@@ -3148,6 +3215,15 @@
QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
atomic_inc(&data->ioctl_count);
ret = qseecom_send_cmd(data, argp);
+ if (qseecom.support_bus_scaling) {
+ mutex_lock(&qsee_bw_mutex);
+ qseecom.bw_scale_down_timer.expires = jiffies +
+ msecs_to_jiffies(
+ QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+ add_timer(&(qseecom.bw_scale_down_timer));
+ qseecom.timer_running = true;
+ mutex_unlock(&qsee_bw_mutex);
+ }
atomic_dec(&data->ioctl_count);
wake_up_all(&data->abort_wq);
mutex_unlock(&app_access_lock);
@@ -3171,7 +3247,15 @@
QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
atomic_inc(&data->ioctl_count);
ret = qseecom_send_modfd_cmd(data, argp);
- atomic_dec(&data->ioctl_count);
+ if (qseecom.support_bus_scaling) {
+ mutex_lock(&qsee_bw_mutex);
+ qseecom.bw_scale_down_timer.expires = jiffies +
+ msecs_to_jiffies(
+ QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+ add_timer(&(qseecom.bw_scale_down_timer));
+ qseecom.timer_running = true;
+ mutex_unlock(&qsee_bw_mutex);
+ } atomic_dec(&data->ioctl_count);
wake_up_all(&data->abort_wq);
mutex_unlock(&app_access_lock);
if (ret)
@@ -3820,6 +3904,7 @@
/* register client for bus scaling */
if (pdev->dev.of_node) {
+ qseecom.pdev->of_node = pdev->dev.of_node;
qseecom.support_bus_scaling =
of_property_read_bool((&pdev->dev)->of_node,
"qcom,support-bus-scaling");
@@ -3924,6 +4009,7 @@
qseecom.bw_scale_down_timer.function =
qseecom_scale_bus_bandwidth_timer_callback;
}
+ qseecom.timer_running = false;
qseecom.qsee_perf_client = msm_bus_scale_register_client(
qseecom_platform_support);
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 9c6bef6..6de1cde 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -2638,15 +2638,13 @@
struct mmc_host *host = card->host;
unsigned long flags;
+ if (req && !mq->mqrq_prev->req) {
+ mmc_rpm_hold(host, &card->dev);
#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
if (mmc_bus_needs_resume(card->host)) {
mmc_resume_bus(card->host);
- mmc_blk_set_blksize(md, card);
}
#endif
-
- if (req && !mq->mqrq_prev->req) {
- mmc_rpm_hold(host, &card->dev);
/* claim host only for the first request */
mmc_claim_host(card->host);
if (card->ext_csd.bkops_en)
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index c496077..1feb26b 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -983,6 +983,10 @@
*/
void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
{
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ if (mmc_bus_needs_resume(host))
+ mmc_resume_bus(host);
+#endif
__mmc_start_req(host, mrq);
mmc_wait_for_req_done(host, mrq);
}
@@ -2011,9 +2015,6 @@
host->bus_ops->resume(host);
}
- if (host->bus_ops->detect && !host->bus_dead)
- host->bus_ops->detect(host);
-
mmc_bus_put(host);
printk("%s: Deferred resume completed\n", mmc_hostname(host));
return 0;
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 885d0d2..63952e7 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -340,6 +340,8 @@
card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
mmc_select_card_type(card);
+ card->ext_csd.raw_drive_strength = ext_csd[EXT_CSD_DRIVE_STRENGTH];
+
card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
card->ext_csd.raw_erase_timeout_mult =
ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 32f5220..822548e 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1503,7 +1503,8 @@
mrq->cmd->error = -EIO;
if (mrq->data)
mrq->data->error = -EIO;
- tasklet_schedule(&host->finish_tasklet);
+ mmc_request_done(host->mmc, mrq);
+ sdhci_runtime_pm_put(host);
return;
}
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 0fde9fc..40d3d9f 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -1224,7 +1224,11 @@
mtd = open_mtd_device(p->name);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
- goto out_detach;
+ ubi_err("cannot open mtd %s, error %d", p->name, err);
+ /* See comment below re-ubi_is_module(). */
+ if (ubi_is_module())
+ goto out_detach;
+ continue;
}
mutex_lock(&ubi_devices_mutex);
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 7c1a9bf..444e4ff 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -712,6 +712,9 @@
if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
dbg_wl("no WL needed: min used EC %d, max free EC %d",
e1->ec, e2->ec);
+
+ /* Give the unused PEB back */
+ wl_tree_add(e2, &ubi->free);
goto out_cancel;
}
paranoid_check_in_wl_tree(ubi, e1, &ubi->used);
diff --git a/drivers/net/usb/rmnet_usb_data.c b/drivers/net/usb/rmnet_usb_data.c
index b0db01e..84b3324 100644
--- a/drivers/net/usb/rmnet_usb_data.c
+++ b/drivers/net/usb/rmnet_usb_data.c
@@ -550,8 +550,7 @@
break;
default:
- dev_err(&unet->intf->dev, "[%s] error: "
- "rmnet_ioct called for unsupported cmd[%d]",
+ dev_dbg(&unet->intf->dev, "[%s] error: rmnet_ioctl called for unsupported cmd[0x%x]\n",
dev->name, cmd);
return -EINVAL;
}
diff --git a/drivers/net/wireless/wcnss/wcnss_wlan.c b/drivers/net/wireless/wcnss/wcnss_wlan.c
index 38c70a3..a3e88b4 100644
--- a/drivers/net/wireless/wcnss/wcnss_wlan.c
+++ b/drivers/net/wireless/wcnss/wcnss_wlan.c
@@ -36,9 +36,11 @@
#include <linux/mfd/pm8xxx/misc.h>
#include <linux/qpnp/qpnp-adc.h>
+#include <mach/board.h>
#include <mach/msm_smd.h>
#include <mach/msm_iomap.h>
#include <mach/subsystem_restart.h>
+#include <mach/subsystem_notif.h>
#ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
#include "wcnss_prealloc.h"
@@ -144,6 +146,20 @@
#define MSM_PRONTO_PLL_BASE 0xfb21b1c0
#define PRONTO_PLL_STATUS_OFFSET 0x1c
+#define MSM_PRONTO_MCU_BASE 0xfb080c00
+#define MCU_CBR_CCAHB_ERR_OFFSET 0x380
+#define MCU_CBR_CAHB_ERR_OFFSET 0x384
+#define MCU_CBR_CCAHB_TIMEOUT_OFFSET 0x388
+#define MCU_CBR_CAHB_TIMEOUT_OFFSET 0x38c
+#define MCU_DBR_CDAHB_ERR_OFFSET 0x390
+#define MCU_DBR_DAHB_ERR_OFFSET 0x394
+#define MCU_DBR_CDAHB_TIMEOUT_OFFSET 0x398
+#define MCU_DBR_DAHB_TIMEOUT_OFFSET 0x39c
+#define MCU_FDBR_CDAHB_ERR_OFFSET 0x3a0
+#define MCU_FDBR_FDAHB_ERR_OFFSET 0x3a4
+#define MCU_FDBR_CDAHB_TIMEOUT_OFFSET 0x3a8
+#define MCU_FDBR_FDAHB_TIMEOUT_OFFSET 0x3ac
+
#define MSM_PRONTO_TXP_STATUS 0xfb08040c
#define MSM_PRONTO_TXP_PHY_ABORT 0xfb080488
#define MSM_PRONTO_BRDG_ERR_SRC 0xfb080fb0
@@ -188,6 +204,7 @@
/* max 20mhz channel count */
#define WCNSS_MAX_CH_NUM 45
+#define WCNSS_MAX_PIL_RETRY 3
#define VALID_VERSION(version) \
((strncmp(version, "INVALID", WCNSS_VERSION_LEN)) ? 1 : 0)
@@ -360,6 +377,7 @@
void __iomem *pronto_ccpu_base;
void __iomem *pronto_saw2_base;
void __iomem *pronto_pll_base;
+ void __iomem *pronto_mcu_base;
void __iomem *wlan_tx_status;
void __iomem *wlan_tx_phy_aborts;
void __iomem *wlan_brdg_err_source;
@@ -389,6 +407,7 @@
struct mutex vbat_monitor_mutex;
u16 unsafe_ch_count;
u16 unsafe_ch_list[WCNSS_MAX_CH_NUM];
+ void *wcnss_notif_hdle;
} *penv = NULL;
static ssize_t wcnss_wlan_macaddr_store(struct device *dev,
@@ -561,45 +580,47 @@
reg_addr = penv->msm_wcnss_base + PRONTO_PMU_SPARE_OFFSET;
reg = readl_relaxed(reg_addr);
- pr_info_ratelimited("%s: PRONTO_PMU_SPARE %08x\n", __func__, reg);
+ pr_err("PRONTO_PMU_SPARE %08x\n", reg);
reg_addr = penv->msm_wcnss_base + PRONTO_PMU_COM_CPU_CBCR_OFFSET;
reg = readl_relaxed(reg_addr);
- pr_info_ratelimited("%s: PRONTO_PMU_COM_CPU_CBCR %08x\n",
- __func__, reg);
+ pr_err("PRONTO_PMU_COM_CPU_CBCR %08x\n", reg);
reg_addr = penv->msm_wcnss_base + PRONTO_PMU_COM_AHB_CBCR_OFFSET;
reg = readl_relaxed(reg_addr);
- pr_info_ratelimited("%s: PRONTO_PMU_COM_AHB_CBCR %08x\n",
- __func__, reg);
+ pr_err("PRONTO_PMU_COM_AHB_CBCR %08x\n", reg);
reg_addr = penv->msm_wcnss_base + PRONTO_PMU_CFG_OFFSET;
reg = readl_relaxed(reg_addr);
- pr_info_ratelimited("%s: PRONTO_PMU_CFG %08x\n", __func__, reg);
+ pr_err("PRONTO_PMU_CFG %08x\n", reg);
reg_addr = penv->msm_wcnss_base + PRONTO_PMU_COM_CSR_OFFSET;
reg = readl_relaxed(reg_addr);
- pr_info_ratelimited("%s: PRONTO_PMU_COM_CSR %08x\n",
- __func__, reg);
+ pr_err("PRONTO_PMU_COM_CSR %08x\n", reg);
reg_addr = penv->msm_wcnss_base + PRONTO_PMU_SOFT_RESET_OFFSET;
reg = readl_relaxed(reg_addr);
- pr_info_ratelimited("%s: PRONTO_PMU_SOFT_RESET %08x\n",
- __func__, reg);
+ pr_err("PRONTO_PMU_SOFT_RESET %08x\n", reg);
reg_addr = penv->pronto_saw2_base + PRONTO_SAW2_SPM_STS_OFFSET;
reg = readl_relaxed(reg_addr);
- pr_info_ratelimited("%s: PRONTO_SAW2_SPM_STS %08x\n", __func__, reg);
+ pr_err("PRONTO_SAW2_SPM_STS %08x\n", reg);
+
+ reg_addr = penv->pronto_pll_base + PRONTO_PLL_STATUS_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("PRONTO_PLL_STATUS %08x\n", reg);
+
+ reg_addr = penv->msm_wcnss_base + PRONTO_PMU_CPU_AHB_CMD_RCGR_OFFSET;
+ reg4 = readl_relaxed(reg_addr);
+ pr_err("PMU_CPU_CMD_RCGR %08x\n", reg4);
reg_addr = penv->msm_wcnss_base + PRONTO_PMU_COM_GDSCR_OFFSET;
reg = readl_relaxed(reg_addr);
- pr_info_ratelimited("%s: PRONTO_PMU_COM_GDSCR %08x\n",
- __func__, reg);
+ pr_err("PRONTO_PMU_COM_GDSCR %08x\n", reg);
reg >>= 31;
if (!reg) {
- pr_info_ratelimited("%s: Cannot log, Pronto common SS is power collapsed\n",
- __func__);
+ pr_err("Cannot log, Pronto common SS is power collapsed\n");
return;
}
reg &= ~(PRONTO_PMU_COM_GDSCR_SW_COLLAPSE
@@ -613,35 +634,31 @@
reg_addr = penv->pronto_a2xb_base + A2XB_CFG_OFFSET;
reg = readl_relaxed(reg_addr);
- pr_info_ratelimited("%s: A2XB_CFG_OFFSET %08x\n", __func__, reg);
+ pr_err("A2XB_CFG_OFFSET %08x\n", reg);
reg_addr = penv->pronto_a2xb_base + A2XB_INT_SRC_OFFSET;
reg = readl_relaxed(reg_addr);
- pr_info_ratelimited("%s: A2XB_INT_SRC_OFFSET %08x\n", __func__, reg);
+ pr_err("A2XB_INT_SRC_OFFSET %08x\n", reg);
reg_addr = penv->pronto_a2xb_base + A2XB_ERR_INFO_OFFSET;
reg = readl_relaxed(reg_addr);
- pr_info_ratelimited("%s: A2XB_ERR_INFO_OFFSET %08x\n", __func__, reg);
+ pr_err("A2XB_ERR_INFO_OFFSET %08x\n", reg);
reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_INVALID_ADDR_OFFSET;
reg = readl_relaxed(reg_addr);
- pr_info_ratelimited("%s: CCU_CCPU_INVALID_ADDR %08x\n", __func__, reg);
+ pr_err("CCU_CCPU_INVALID_ADDR %08x\n", reg);
reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_LAST_ADDR0_OFFSET;
reg = readl_relaxed(reg_addr);
- pr_info_ratelimited("%s: CCU_CCPU_LAST_ADDR0 %08x\n", __func__, reg);
+ pr_err("CCU_CCPU_LAST_ADDR0 %08x\n", reg);
reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_LAST_ADDR1_OFFSET;
reg = readl_relaxed(reg_addr);
- pr_info_ratelimited("%s: CCU_CCPU_LAST_ADDR1 %08x\n", __func__, reg);
+ pr_err("CCU_CCPU_LAST_ADDR1 %08x\n", reg);
reg_addr = penv->pronto_ccpu_base + CCU_PRONTO_LAST_ADDR2_OFFSET;
reg = readl_relaxed(reg_addr);
- pr_info_ratelimited("%s: CCU_CCPU_LAST_ADDR2 %08x\n", __func__, reg);
-
- reg_addr = penv->pronto_pll_base + PRONTO_PLL_STATUS_OFFSET;
- reg = readl_relaxed(reg_addr);
- pr_info_ratelimited("%s: PRONTO_PLL_STATUS %08x\n", __func__, reg);
+ pr_err("CCU_CCPU_LAST_ADDR2 %08x\n", reg);
tst_addr = penv->pronto_a2xb_base + A2XB_TSTBUS_OFFSET;
tst_ctrl_addr = penv->pronto_a2xb_base + A2XB_TSTBUS_CTRL_OFFSET;
@@ -651,24 +668,21 @@
reg = reg | WCNSS_TSTBUS_CTRL_EN | WCNSS_TSTBUS_CTRL_RDFIFO;
writel_relaxed(reg, tst_ctrl_addr);
reg = readl_relaxed(tst_addr);
- pr_info_ratelimited("%s: Read data FIFO testbus %08x\n",
- __func__, reg);
+ pr_err("Read data FIFO testbus %08x\n", reg);
/* command FIFO */
reg = 0;
reg = reg | WCNSS_TSTBUS_CTRL_EN | WCNSS_TSTBUS_CTRL_CMDFIFO;
writel_relaxed(reg, tst_ctrl_addr);
reg = readl_relaxed(tst_addr);
- pr_info_ratelimited("%s: Command FIFO testbus %08x\n",
- __func__, reg);
+ pr_err("Command FIFO testbus %08x\n", reg);
/* write data FIFO */
reg = 0;
reg = reg | WCNSS_TSTBUS_CTRL_EN | WCNSS_TSTBUS_CTRL_WRFIFO;
writel_relaxed(reg, tst_ctrl_addr);
reg = readl_relaxed(tst_addr);
- pr_info_ratelimited("%s: Rrite data FIFO testbus %08x\n",
- __func__, reg);
+ pr_err("Rrite data FIFO testbus %08x\n", reg);
/* AXIM SEL CFG0 */
reg = 0;
@@ -676,8 +690,7 @@
WCNSS_TSTBUS_CTRL_AXIM_CFG0;
writel_relaxed(reg, tst_ctrl_addr);
reg = readl_relaxed(tst_addr);
- pr_info_ratelimited("%s: AXIM SEL CFG0 testbus %08x\n",
- __func__, reg);
+ pr_err("AXIM SEL CFG0 testbus %08x\n", reg);
/* AXIM SEL CFG1 */
reg = 0;
@@ -685,8 +698,7 @@
WCNSS_TSTBUS_CTRL_AXIM_CFG1;
writel_relaxed(reg, tst_ctrl_addr);
reg = readl_relaxed(tst_addr);
- pr_info_ratelimited("%s: AXIM SEL CFG1 testbus %08x\n",
- __func__, reg);
+ pr_err("AXIM SEL CFG1 testbus %08x\n", reg);
/* CTRL SEL CFG0 */
reg = 0;
@@ -694,8 +706,7 @@
WCNSS_TSTBUS_CTRL_CTRL_CFG0;
writel_relaxed(reg, tst_ctrl_addr);
reg = readl_relaxed(tst_addr);
- pr_info_ratelimited("%s: CTRL SEL CFG0 testbus %08x\n",
- __func__, reg);
+ pr_err("CTRL SEL CFG0 testbus %08x\n", reg);
/* CTRL SEL CFG1 */
reg = 0;
@@ -703,7 +714,7 @@
WCNSS_TSTBUS_CTRL_CTRL_CFG1;
writel_relaxed(reg, tst_ctrl_addr);
reg = readl_relaxed(tst_addr);
- pr_info_ratelimited("%s: CTRL SEL CFG1 testbus %08x\n", __func__, reg);
+ pr_err("CTRL SEL CFG1 testbus %08x\n", reg);
reg_addr = penv->msm_wcnss_base + PRONTO_PMU_WLAN_BCR_OFFSET;
@@ -714,30 +725,75 @@
reg_addr = penv->msm_wcnss_base + PRONTO_PMU_WLAN_AHB_CBCR_OFFSET;
reg3 = readl_relaxed(reg_addr);
- pr_info_ratelimited("%s: PMU_WLAN_AHB_CBCR %08x\n", __func__, reg3);
-
- reg_addr = penv->msm_wcnss_base + PRONTO_PMU_CPU_AHB_CMD_RCGR_OFFSET;
- reg4 = readl_relaxed(reg_addr);
- pr_info_ratelimited("%s: PMU_CPU_CMD_RCGR %08x\n", __func__, reg4);
+ pr_err("PMU_WLAN_AHB_CBCR %08x\n", reg3);
if ((reg & PRONTO_PMU_WLAN_BCR_BLK_ARES) ||
(reg2 & PRONTO_PMU_WLAN_GDSCR_SW_COLLAPSE) ||
(!(reg4 & PRONTO_PMU_CPU_AHB_CMD_RCGR_ROOT_EN)) ||
(reg3 & PRONTO_PMU_WLAN_AHB_CBCR_CLK_OFF) ||
(!(reg3 & PRONTO_PMU_WLAN_AHB_CBCR_CLK_EN))) {
- pr_info_ratelimited("%s: Cannot log, wlan domain is power collapsed\n",
- __func__);
+ pr_err("Cannot log, wlan domain is power collapsed\n");
return;
}
+ msleep(50);
+
reg = readl_relaxed(penv->wlan_tx_phy_aborts);
- pr_info_ratelimited("%s: WLAN_TX_PHY_ABORTS %08x\n", __func__, reg);
+ pr_err("WLAN_TX_PHY_ABORTS %08x\n", reg);
+
+ reg_addr = penv->pronto_mcu_base + MCU_CBR_CCAHB_ERR_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("MCU_CBR_CCAHB_ERR %08x\n", reg);
+
+ reg_addr = penv->pronto_mcu_base + MCU_CBR_CAHB_ERR_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("MCU_CBR_CAHB_ERR %08x\n", reg);
+
+ reg_addr = penv->pronto_mcu_base + MCU_CBR_CCAHB_TIMEOUT_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("MCU_CBR_CCAHB_TIMEOUT %08x\n", reg);
+
+ reg_addr = penv->pronto_mcu_base + MCU_CBR_CAHB_TIMEOUT_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("MCU_CBR_CAHB_TIMEOUT %08x\n", reg);
+
+ reg_addr = penv->pronto_mcu_base + MCU_DBR_CDAHB_ERR_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("MCU_DBR_CDAHB_ERR %08x\n", reg);
+
+ reg_addr = penv->pronto_mcu_base + MCU_DBR_DAHB_ERR_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("MCU_DBR_DAHB_ERR %08x\n", reg);
+
+ reg_addr = penv->pronto_mcu_base + MCU_DBR_CDAHB_TIMEOUT_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("MCU_DBR_CDAHB_TIMEOUT %08x\n", reg);
+
+ reg_addr = penv->pronto_mcu_base + MCU_DBR_DAHB_TIMEOUT_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("MCU_DBR_DAHB_TIMEOUT %08x\n", reg);
+
+ reg_addr = penv->pronto_mcu_base + MCU_FDBR_CDAHB_ERR_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("MCU_FDBR_CDAHB_ERR %08x\n", reg);
+
+ reg_addr = penv->pronto_mcu_base + MCU_FDBR_FDAHB_ERR_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("MCU_FDBR_FDAHB_ERR %08x\n", reg);
+
+ reg_addr = penv->pronto_mcu_base + MCU_FDBR_CDAHB_TIMEOUT_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("MCU_FDBR_CDAHB_TIMEOUT %08x\n", reg);
+
+ reg_addr = penv->pronto_mcu_base + MCU_FDBR_FDAHB_TIMEOUT_OFFSET;
+ reg = readl_relaxed(reg_addr);
+ pr_err("MCU_FDBR_FDAHB_TIMEOUT %08x\n", reg);
reg = readl_relaxed(penv->wlan_brdg_err_source);
- pr_info_ratelimited("%s: WLAN_BRDG_ERR_SOURCE %08x\n", __func__, reg);
+ pr_err("WLAN_BRDG_ERR_SOURCE %08x\n", reg);
reg = readl_relaxed(penv->wlan_tx_status);
- pr_info_ratelimited("%s: WLAN_TX_STATUS %08x\n", __func__, reg);
+ pr_err("WLAN_TXP_STATUS %08x\n", reg);
reg = readl_relaxed(penv->alarms_txctl);
pr_err("ALARMS_TXCTL %08x\n", reg);
@@ -748,6 +804,22 @@
EXPORT_SYMBOL(wcnss_pronto_log_debug_regs);
#ifdef CONFIG_WCNSS_REGISTER_DUMP_ON_BITE
+static void wcnss_log_iris_regs(void)
+{
+ int i;
+ u32 reg_val;
+ u32 regs_array[] = {
+ 0x04, 0x05, 0x11, 0x1e, 0x40, 0x48,
+ 0x49, 0x4b, 0x00, 0x01, 0x4d};
+
+ pr_info("IRIS Registers [address] : value\n");
+
+ for (i = 0; i < ARRAY_SIZE(regs_array); i++) {
+ reg_val = wcnss_rf_read_reg(regs_array[i]);
+ pr_info("[0x%08x] : 0x%08x\n", regs_array[i], reg_val);
+ }
+}
+
void wcnss_log_debug_regs_on_bite(void)
{
struct platform_device *pdev = wcnss_get_platform_device();
@@ -768,10 +840,12 @@
clk_rate = clk_get_rate(measure);
pr_debug("wcnss: clock frequency is: %luHz\n", clk_rate);
- if (clk_rate)
+ if (clk_rate) {
wcnss_pronto_log_debug_regs();
- else
+ } else {
pr_err("clock frequency is zero, cannot access PMU or other registers\n");
+ wcnss_log_iris_regs();
+ }
}
}
#endif
@@ -2049,6 +2123,7 @@
unsigned long wcnss_phys_addr;
int size = 0;
struct resource *res;
+ int pil_retry = 0;
int has_pronto_hw = of_property_read_bool(pdev->dev.of_node,
"qcom,has-pronto-hw");
@@ -2230,6 +2305,13 @@
pr_err("%s: ioremap alarms TACTL failed\n", __func__);
goto fail_ioremap11;
}
+ penv->pronto_mcu_base = ioremap(MSM_PRONTO_MCU_BASE, SZ_1K);
+ if (!penv->pronto_mcu_base) {
+ ret = -ENOMEM;
+ pr_err("%s: ioremap wcnss physical(mcu) failed\n",
+ __func__);
+ goto fail_ioremap12;
+ }
}
penv->adc_tm_dev = qpnp_get_adc_tm(&penv->pdev->dev, "wcnss");
if (IS_ERR(penv->adc_tm_dev)) {
@@ -2240,12 +2322,17 @@
penv->fw_vbatt_state = WCNSS_CONFIG_UNSPECIFIED;
}
- /* trigger initialization of the WCNSS */
- penv->pil = subsystem_get(WCNSS_PIL_DEVICE);
- if (IS_ERR(penv->pil)) {
- dev_err(&pdev->dev, "Peripheral Loader failed on WCNSS.\n");
- ret = PTR_ERR(penv->pil);
- wcnss_pronto_log_debug_regs();
+ do {
+ /* trigger initialization of the WCNSS */
+ penv->pil = subsystem_get(WCNSS_PIL_DEVICE);
+ if (IS_ERR(penv->pil)) {
+ dev_err(&pdev->dev, "Peripheral Loader failed on WCNSS.\n");
+ ret = PTR_ERR(penv->pil);
+ wcnss_pronto_log_debug_regs();
+ }
+ } while (pil_retry++ < WCNSS_MAX_PIL_RETRY && IS_ERR(penv->pil));
+
+ if (pil_retry >= WCNSS_MAX_PIL_RETRY) {
penv->pil = NULL;
goto fail_pil;
}
@@ -2255,6 +2342,9 @@
fail_pil:
if (penv->riva_ccu_base)
iounmap(penv->riva_ccu_base);
+ if (penv->pronto_mcu_base)
+ iounmap(penv->pronto_mcu_base);
+fail_ioremap12:
if (penv->alarms_tactl)
iounmap(penv->alarms_tactl);
fail_ioremap11:
@@ -2421,6 +2511,22 @@
}
+static int wcnss_notif_cb(struct notifier_block *this, unsigned long code,
+ void *ss_handle)
+{
+ pr_debug("%s: wcnss notification event: %lu\n", __func__, code);
+
+ if (SUBSYS_POWERUP_FAILURE == code)
+ wcnss_pronto_log_debug_regs();
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block wnb = {
+ .notifier_call = wcnss_notif_cb,
+};
+
+
static const struct file_operations wcnss_node_fops = {
.owner = THIS_MODULE,
.open = wcnss_node_open,
@@ -2460,6 +2566,13 @@
return -ENOENT;
}
+ /* register wcnss event notification */
+ penv->wcnss_notif_hdle = subsys_notif_register_notifier("wcnss", &wnb);
+ if (IS_ERR(penv->wcnss_notif_hdle)) {
+ pr_err("wcnss: register event notification failed!\n");
+ return PTR_ERR(penv->wcnss_notif_hdle);
+ }
+
mutex_init(&penv->dev_lock);
mutex_init(&penv->ctrl_lock);
mutex_init(&penv->vbat_monitor_mutex);
@@ -2484,6 +2597,8 @@
static int __devexit
wcnss_wlan_remove(struct platform_device *pdev)
{
+ if (penv->wcnss_notif_hdle)
+ subsys_notif_unregister_notifier(penv->wcnss_notif_hdle, &wnb);
wcnss_remove_sysfs(&pdev->dev);
penv = NULL;
return 0;
diff --git a/drivers/nfc/nfc-nci.c b/drivers/nfc/nfc-nci.c
index c6192ed..a44c06c 100644
--- a/drivers/nfc/nfc-nci.c
+++ b/drivers/nfc/nfc-nci.c
@@ -1397,18 +1397,6 @@
gpio_set_value(platform_data->dis_gpio, 1);
goto err_nfcc_not_present;
}
- regulators.regulator = regulator_get(&client->dev, regulators.name);
- if (IS_ERR(regulators.regulator)) {
- r = PTR_ERR(regulators.regulator);
- pr_err("regulator get of %s failed (%d)\n", regulators.name, r);
- } else {
- /* Enable the regulator */
- r = regulator_enable(regulators.regulator);
- if (r) {
- pr_err("vreg %s enable failed (%d)\n",
- regulators.name, r);
- }
- }
logging_level = 0;
/* request irq. The irq is set whenever the chip has data available
diff --git a/drivers/nfc/nfc-nci.h b/drivers/nfc/nfc-nci.h
index 9bfb77d..297c152 100644
--- a/drivers/nfc/nfc-nci.h
+++ b/drivers/nfc/nfc-nci.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -223,9 +223,3 @@
unsigned int reg;
};
#endif
-/* enable LDO */
-struct vregs_info {
- const char * const name;
- struct regulator *regulator;
-};
-struct vregs_info regulators = {"vlogic", NULL};
diff --git a/drivers/power/qpnp-bms.c b/drivers/power/qpnp-bms.c
index a0d9a24..82c61c9 100644
--- a/drivers/power/qpnp-bms.c
+++ b/drivers/power/qpnp-bms.c
@@ -2205,7 +2205,7 @@
pr_err("adc vbat failed err = %d\n", rc);
return soc;
}
- if (soc == 0 && vbat_uv > chip->v_cutoff_uv) {
+ if (soc <= 0 && vbat_uv > chip->v_cutoff_uv) {
pr_debug("clamping soc to 1, vbat (%d) > cutoff (%d)\n",
vbat_uv, chip->v_cutoff_uv);
return 1;
@@ -2418,8 +2418,13 @@
}
mutex_unlock(&chip->soc_invalidation_mutex);
- pr_debug("SOC before adjustment = %d\n", soc);
- new_calculated_soc = adjust_soc(chip, ¶ms, soc, batt_temp);
+ if (chip->first_time_calc_soc && !chip->shutdown_soc_invalid) {
+ pr_debug("Skip adjustment when shutdown SOC has been forced\n");
+ new_calculated_soc = soc;
+ } else {
+ pr_debug("SOC before adjustment = %d\n", soc);
+ new_calculated_soc = adjust_soc(chip, ¶ms, soc, batt_temp);
+ }
/* always clamp soc due to BMS hw/sw immaturities */
new_calculated_soc = clamp_soc_based_on_voltage(chip,
diff --git a/drivers/power/qpnp-charger.c b/drivers/power/qpnp-charger.c
index 2dc77e6..0870202 100644
--- a/drivers/power/qpnp-charger.c
+++ b/drivers/power/qpnp-charger.c
@@ -3558,7 +3558,7 @@
state == ADC_TM_WARM_STATE ? "warm" : "cool");
if (state == ADC_TM_WARM_STATE) {
- if (temp > chip->warm_bat_decidegc) {
+ if (temp >= chip->warm_bat_decidegc) {
/* Normal to warm */
bat_warm = true;
bat_cool = false;
@@ -3566,7 +3566,7 @@
chip->warm_bat_decidegc - HYSTERISIS_DECIDEGC;
chip->adc_param.state_request =
ADC_TM_COOL_THR_ENABLE;
- } else if (temp >
+ } else if (temp >=
chip->cool_bat_decidegc + HYSTERISIS_DECIDEGC){
/* Cool to normal */
bat_warm = false;
@@ -3578,7 +3578,7 @@
ADC_TM_HIGH_LOW_THR_ENABLE;
}
} else {
- if (temp < chip->cool_bat_decidegc) {
+ if (temp <= chip->cool_bat_decidegc) {
/* Normal to cool */
bat_warm = false;
bat_cool = true;
@@ -3586,7 +3586,7 @@
chip->cool_bat_decidegc + HYSTERISIS_DECIDEGC;
chip->adc_param.state_request =
ADC_TM_WARM_THR_ENABLE;
- } else if (temp <
+ } else if (temp <=
chip->warm_bat_decidegc - HYSTERISIS_DECIDEGC){
/* Warm to normal */
bat_warm = false;
diff --git a/drivers/rtc/alarm.c b/drivers/rtc/alarm.c
index 51e176f..06749b9 100644
--- a/drivers/rtc/alarm.c
+++ b/drivers/rtc/alarm.c
@@ -520,8 +520,11 @@
spin_lock_irqsave(&alarm_slock, flags);
- if (!power_on_alarm)
+ if (!power_on_alarm) {
+ spin_unlock_irqrestore(&alarm_slock, flags);
goto disable_alarm;
+ }
+ spin_unlock_irqrestore(&alarm_slock, flags);
rtc_read_time(alarm_rtc_dev, &rtc_time);
getnstimeofday(&wall_time);
@@ -549,11 +552,9 @@
pr_alarm(FLOW, "Power-on alarm set to %lu\n",
alarm_time);
- spin_unlock_irqrestore(&alarm_slock, flags);
return;
disable_alarm:
- spin_unlock_irqrestore(&alarm_slock, flags);
rtc_alarm_irq_enable(alarm_rtc_dev, 0);
}
diff --git a/drivers/slimbus/slim-msm-ngd.c b/drivers/slimbus/slim-msm-ngd.c
index f858822..496b31d 100644
--- a/drivers/slimbus/slim-msm-ngd.c
+++ b/drivers/slimbus/slim-msm-ngd.c
@@ -27,7 +27,6 @@
#include <linux/timer.h>
#include <mach/sps.h>
#include "slim-msm.h"
-#include <mach/qdsp6v2/apr.h>
#define NGD_SLIM_NAME "ngd_msm_ctrl"
#define SLIM_LA_MGR 0xFF
@@ -265,9 +264,13 @@
u8 txn_mt;
u16 txn_mc = txn->mc;
u8 wbuf[SLIM_MSGQ_BUF_LEN];
+ bool report_sat = false;
+ if (txn->mc == SLIM_USR_MC_REPORT_SATELLITE &&
+ txn->mt == SLIM_MSG_MT_SRC_REFERRED_USER)
+ report_sat = true;
if (!pm_runtime_enabled(dev->dev) && dev->state == MSM_CTRL_ASLEEP &&
- txn->mc != SLIM_USR_MC_REPORT_SATELLITE) {
+ report_sat == false) {
/*
* Counter-part of system-suspend when runtime-pm is not enabled
* This way, resume can be left empty and device will be put in
@@ -295,7 +298,7 @@
return 0;
}
/* If txn is tried when controller is down, wait for ADSP to boot */
- if (txn->mc != SLIM_USR_MC_REPORT_SATELLITE) {
+ if (!report_sat) {
if (dev->state == MSM_CTRL_DOWN) {
u8 mc = (u8)txn->mc;
int timeout;
@@ -358,8 +361,7 @@
}
mutex_lock(&dev->tx_lock);
- if (txn->mc != SLIM_USR_MC_REPORT_SATELLITE &&
- (dev->state != MSM_CTRL_AWAKE)) {
+ if (report_sat == false && dev->state != MSM_CTRL_AWAKE) {
dev_err(dev->dev, "controller not ready");
mutex_unlock(&dev->tx_lock);
msm_slim_put_ctrl(dev);
@@ -436,11 +438,13 @@
puc = ((u8 *)pbuf) + 2;
if (txn->rbuf)
*(puc++) = txn->tid;
- if ((txn->mt == SLIM_MSG_MT_CORE) &&
+ if (((txn->mt == SLIM_MSG_MT_CORE) &&
((txn->mc >= SLIM_MSG_MC_REQUEST_INFORMATION &&
txn->mc <= SLIM_MSG_MC_REPORT_INFORMATION) ||
(txn->mc >= SLIM_MSG_MC_REQUEST_VALUE &&
- txn->mc <= SLIM_MSG_MC_CHANGE_VALUE))) {
+ txn->mc <= SLIM_MSG_MC_CHANGE_VALUE))) ||
+ (txn->mc == SLIM_USR_MC_REPEAT_CHANGE_VALUE &&
+ txn->mt == SLIM_MSG_MT_DEST_REFERRED_USER)) {
*(puc++) = (txn->ec & 0xFF);
*(puc++) = (txn->ec >> 8)&0xFF;
}
@@ -540,11 +544,49 @@
}
ngd_xfer_err:
mutex_unlock(&dev->tx_lock);
- if (txn_mc != SLIM_USR_MC_REPORT_SATELLITE)
+ if (!report_sat)
msm_slim_put_ctrl(dev);
return ret ? ret : dev->err;
}
+static int ngd_user_msg(struct slim_controller *ctrl, u8 la, u8 mt, u8 mc,
+ struct slim_ele_access *msg, u8 *buf, u8 len)
+{
+ struct slim_msg_txn txn;
+
+ if (mt != SLIM_MSG_MT_DEST_REFERRED_USER ||
+ mc != SLIM_USR_MC_REPEAT_CHANGE_VALUE) {
+ return -EPROTONOSUPPORT;
+ }
+ if (len > SLIM_MAX_VE_SLC_BYTES ||
+ msg->start_offset > MSM_SLIM_VE_MAX_MAP_ADDR)
+ return -EINVAL;
+ if (len <= 4) {
+ txn.ec = len - 1;
+ } else if (len <= 8) {
+ if (len & 0x1)
+ return -EINVAL;
+ txn.ec = ((len >> 1) + 1);
+ } else {
+ if (len & 0x3)
+ return -EINVAL;
+ txn.ec = ((len >> 2) + 3);
+ }
+ txn.ec |= (0x8 | ((msg->start_offset & 0xF) << 4));
+ txn.ec |= ((msg->start_offset & 0xFF0) << 4);
+
+ txn.la = la;
+ txn.mt = mt;
+ txn.mc = mc;
+ txn.dt = SLIM_MSG_DEST_LOGICALADDR;
+ txn.len = len;
+ txn.rl = len + 6;
+ txn.wbuf = buf;
+ txn.rbuf = NULL;
+ txn.comp = msg->comp;
+ return ngd_xfer_msg(ctrl, &txn);
+}
+
static int ngd_xferandwait_ack(struct slim_controller *ctrl,
struct slim_msg_txn *txn)
{
@@ -814,7 +856,7 @@
prev_state);
/* ADSP SSR, send device_up notifications */
if (prev_state == MSM_CTRL_DOWN)
- schedule_work(&dev->slave_notify);
+ complete(&dev->qmi.slave_notify);
} else if (ret == -EIO) {
pr_info("capability message NACKed, retrying");
if (retries < INIT_MX_RETRIES) {
@@ -1065,31 +1107,54 @@
return 0;
}
-static void ngd_laddr_lookup(struct work_struct *work)
+static int ngd_notify_slaves(void *data)
{
- struct msm_slim_ctrl *dev =
- container_of(work, struct msm_slim_ctrl, slave_notify);
+ struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)data;
struct slim_controller *ctrl = &dev->ctrl;
struct slim_device *sbdev;
struct list_head *pos, *next;
- int i;
- slim_framer_booted(ctrl);
- mutex_lock(&ctrl->m_ctrl);
- list_for_each_safe(pos, next, &ctrl->devs) {
- int ret = 0;
- sbdev = list_entry(pos, struct slim_device, dev_list);
- mutex_unlock(&ctrl->m_ctrl);
- for (i = 0; i < LADDR_RETRY; i++) {
- ret = slim_get_logical_addr(sbdev, sbdev->e_addr,
- 6, &sbdev->laddr);
- if (!ret)
- break;
- else /* time for ADSP to assign LA */
- msleep(20);
+ int ret, i = 0;
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ ret = wait_for_completion_timeout(&dev->qmi.slave_notify,
+ HZ);
+ if (!ret) {
+ dev_err(dev->dev, "slave thread wait err:%d", ret);
+ continue;
}
+ /* Probe devices for first notification */
+ if (!i) {
+ dev->err = 0;
+ if (dev->dev->of_node)
+ of_register_slim_devices(&dev->ctrl);
+
+ /*
+ * Add devices registered with board-info now that
+ * controller is up
+ */
+ slim_ctrl_add_boarddevs(&dev->ctrl);
+ } else {
+ slim_framer_booted(ctrl);
+ }
+ i++;
mutex_lock(&ctrl->m_ctrl);
+ list_for_each_safe(pos, next, &ctrl->devs) {
+ sbdev = list_entry(pos, struct slim_device, dev_list);
+ mutex_unlock(&ctrl->m_ctrl);
+ for (i = 0; i < LADDR_RETRY; i++) {
+ ret = slim_get_logical_addr(sbdev,
+ sbdev->e_addr,
+ 6, &sbdev->laddr);
+ if (!ret)
+ break;
+ else /* time for ADSP to assign LA */
+ msleep(20);
+ }
+ mutex_lock(&ctrl->m_ctrl);
+ }
+ mutex_unlock(&ctrl->m_ctrl);
}
- mutex_unlock(&ctrl->m_ctrl);
+ return 0;
}
static void ngd_adsp_down(struct work_struct *work)
@@ -1130,18 +1195,9 @@
struct resource *bam_mem;
struct resource *slim_mem;
struct resource *irq, *bam_irq;
- enum apr_subsys_state q6_state;
bool rxreg_access = false;
bool slim_mdm = false;
- q6_state = apr_get_q6_state();
- if (q6_state == APR_SUBSYS_DOWN) {
- dev_dbg(&pdev->dev, "defering %s, adsp_state %d\n", __func__,
- q6_state);
- return -EPROBE_DEFER;
- } else
- dev_dbg(&pdev->dev, "adsp is ready\n");
-
slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"slimbus_physical");
if (!slim_mem) {
@@ -1222,6 +1278,7 @@
dev->ctrl.get_laddr = ngd_get_laddr;
dev->ctrl.allocbw = ngd_allocbw;
dev->ctrl.xfer_msg = ngd_xfer_msg;
+ dev->ctrl.xfer_user_msg = ngd_user_msg;
dev->ctrl.wakeup = ngd_clk_pause_wakeup;
dev->ctrl.alloc_port = msm_alloc_port;
dev->ctrl.dealloc_port = msm_dealloc_port;
@@ -1246,6 +1303,7 @@
dev->use_tx_msgqs = MSM_MSGQ_RESET;
init_completion(&dev->rx_msgq_notify);
+ init_completion(&dev->qmi.slave_notify);
/* Register with framework */
ret = slim_add_numbered_controller(&dev->ctrl);
@@ -1267,6 +1325,7 @@
}
init_completion(&dev->qmi.qmi_comp);
+ dev->err = -EPROBE_DEFER;
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, MSM_SLIM_AUTOSUSPEND);
pm_runtime_set_suspended(dev->dev);
@@ -1282,7 +1341,6 @@
dev->mdm.ssr);
}
- INIT_WORK(&dev->slave_notify, ngd_laddr_lookup);
INIT_WORK(&dev->qmi.ssr_down, ngd_adsp_down);
INIT_WORK(&dev->qmi.ssr_up, ngd_adsp_up);
dev->qmi.nb.notifier_call = ngd_qmi_available;
@@ -1298,23 +1356,27 @@
/* Fire up the Rx message queue thread */
dev->rx_msgq_thread = kthread_run(ngd_slim_rx_msgq_thread, dev,
- NGD_SLIM_NAME "_ngd_msgq_thread");
+ "ngd_rx_thread%d", dev->ctrl.nr);
if (IS_ERR(dev->rx_msgq_thread)) {
ret = PTR_ERR(dev->rx_msgq_thread);
- dev_err(dev->dev, "Failed to start Rx message queue thread\n");
- goto err_thread_create_failed;
+ dev_err(dev->dev, "Failed to start Rx thread:%d\n", ret);
+ goto err_rx_thread_create_failed;
}
- if (pdev->dev.of_node)
- of_register_slim_devices(&dev->ctrl);
-
- /* Add devices registered with board-info now that controller is up */
- slim_ctrl_add_boarddevs(&dev->ctrl);
-
+ /* Start thread to probe, and notify slaves */
+ dev->qmi.slave_thread = kthread_run(ngd_notify_slaves, dev,
+ "ngd_notify_sl%d", dev->ctrl.nr);
+ if (IS_ERR(dev->qmi.slave_thread)) {
+ ret = PTR_ERR(dev->qmi.slave_thread);
+ dev_err(dev->dev, "Failed to start notifier thread:%d\n", ret);
+ goto err_notify_thread_create_failed;
+ }
dev_dbg(dev->dev, "NGD SB controller is up!\n");
return 0;
-err_thread_create_failed:
+err_notify_thread_create_failed:
+ kthread_stop(dev->rx_msgq_thread);
+err_rx_thread_create_failed:
qmi_svc_event_notifier_unregister(SLIMBUS_QMI_SVC_ID,
SLIMBUS_QMI_SVC_V1,
SLIMBUS_QMI_INS_ID, &dev->qmi.nb);
diff --git a/drivers/slimbus/slim-msm.c b/drivers/slimbus/slim-msm.c
index 5d30e54..8589b9f 100644
--- a/drivers/slimbus/slim-msm.c
+++ b/drivers/slimbus/slim-msm.c
@@ -305,7 +305,7 @@
}
enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr,
- u8 pn, u8 **done_buf, u32 *done_len)
+ u8 pn, phys_addr_t *done_buf, u32 *done_len)
{
struct msm_slim_ctrl *dev = slim_get_ctrldata(ctr);
struct sps_iovec sio;
@@ -313,7 +313,7 @@
if (done_len)
*done_len = 0;
if (done_buf)
- *done_buf = NULL;
+ *done_buf = 0;
if (!dev->pipes[pn].connected)
return SLIM_P_DISCONNECT;
ret = sps_get_iovec(dev->pipes[pn].sps, &sio);
@@ -321,7 +321,7 @@
if (done_len)
*done_len = sio.size;
if (done_buf)
- *done_buf = (u8 *)sio.addr;
+ *done_buf = (phys_addr_t)sio.addr;
}
dev_dbg(dev->dev, "get iovec returned %d\n", ret);
return SLIM_P_INPROGRESS;
@@ -346,7 +346,7 @@
complete(comp);
}
-int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, u8 *iobuf,
+int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, phys_addr_t iobuf,
u32 len, struct completion *comp)
{
struct sps_register_event sreg;
@@ -366,7 +366,7 @@
dev_dbg(dev->dev, "sps register event error:%x\n", ret);
return ret;
}
- ret = sps_transfer_one(dev->pipes[pn].sps, (u32)iobuf, len, comp,
+ ret = sps_transfer_one(dev->pipes[pn].sps, iobuf, len, comp,
SPS_IOVEC_FLAG_INT);
dev_dbg(dev->dev, "sps submit xfer error code:%x\n", ret);
if (!ret) {
diff --git a/drivers/slimbus/slim-msm.h b/drivers/slimbus/slim-msm.h
index 5ffa300..63178cc 100644
--- a/drivers/slimbus/slim-msm.h
+++ b/drivers/slimbus/slim-msm.h
@@ -37,6 +37,10 @@
#define SLIM_USR_MC_CONNECT_SINK 0x2D
#define SLIM_USR_MC_DISCONNECT_PORT 0x2E
+#define SLIM_USR_MC_REPEAT_CHANGE_VALUE 0x0
+#define MSM_SLIM_VE_MAX_MAP_ADDR 0xFFF
+#define SLIM_MAX_VE_SLC_BYTES 16
+
#define MSM_SLIM_AUTOSUSPEND MSEC_PER_SEC
/*
@@ -199,6 +203,8 @@
struct msm_slim_qmi {
struct qmi_handle *handle;
struct task_struct *task;
+ struct task_struct *slave_thread;
+ struct completion slave_notify;
struct kthread_work kwork;
struct kthread_worker kworker;
struct completion qmi_comp;
@@ -257,7 +263,6 @@
struct completion ctrl_up;
int nsats;
u32 ver;
- struct work_struct slave_notify;
struct msm_slim_qmi qmi;
struct msm_slim_pdata pdata;
struct msm_slim_mdm mdm;
@@ -306,8 +311,8 @@
void msm_dealloc_port(struct slim_controller *ctrl, u8 pn);
int msm_slim_connect_pipe_port(struct msm_slim_ctrl *dev, u8 pn);
enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr,
- u8 pn, u8 **done_buf, u32 *done_len);
-int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, u8 *iobuf,
+ u8 pn, phys_addr_t *done_buf, u32 *done_len);
+int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, phys_addr_t iobuf,
u32 len, struct completion *comp);
int msm_send_msg_buf(struct msm_slim_ctrl *dev, u32 *buf, u8 len, u32 tx_reg);
u32 *msm_get_msg_buf(struct msm_slim_ctrl *dev, int len);
diff --git a/drivers/slimbus/slimbus.c b/drivers/slimbus/slimbus.c
index caf7a87..fecf5ec 100644
--- a/drivers/slimbus/slimbus.c
+++ b/drivers/slimbus/slimbus.c
@@ -1107,6 +1107,28 @@
EXPORT_SYMBOL_GPL(slim_xfer_msg);
/*
+ * User message:
+ * slim_user_msg: Send user message that is interpreted by destination device
+ * @sb: Client handle sending the message
+ * @la: Destination device for this user message
+ * @mt: Message Type (Soruce-referred, or Destination-referred)
+ * @mc: Message Code
+ * @msg: Message structure (start offset, number of bytes) to be sent
+ * @buf: data buffer to be sent
+ * @len: data buffer size in bytes
+ */
+int slim_user_msg(struct slim_device *sb, u8 la, u8 mt, u8 mc,
+ struct slim_ele_access *msg, u8 *buf, u8 len)
+{
+ if (!sb || !sb->ctrl || !msg || mt == SLIM_MSG_MT_CORE)
+ return -EINVAL;
+ if (!sb->ctrl->xfer_user_msg)
+ return -EPROTONOSUPPORT;
+ return sb->ctrl->xfer_user_msg(sb->ctrl, la, mt, mc, msg, buf, len);
+}
+EXPORT_SYMBOL(slim_user_msg);
+
+/*
* slim_alloc_mgrports: Allocate port on manager side.
* @sb: device/client handle.
* @req: Port request type.
@@ -1462,7 +1484,7 @@
* Client will call slim_port_get_xfer_status to get error and/or number of
* bytes transferred if used asynchronously.
*/
-int slim_port_xfer(struct slim_device *sb, u32 ph, u8 *iobuf, u32 len,
+int slim_port_xfer(struct slim_device *sb, u32 ph, phys_addr_t iobuf, u32 len,
struct completion *comp)
{
struct slim_controller *ctrl = sb->ctrl;
@@ -1492,7 +1514,7 @@
* processed from the multiple transfers.
*/
enum slim_port_err slim_port_get_xfer_status(struct slim_device *sb, u32 ph,
- u8 **done_buf, u32 *done_len)
+ phys_addr_t *done_buf, u32 *done_len)
{
struct slim_controller *ctrl = sb->ctrl;
u8 pn = SLIM_HDL_TO_PORT(ph);
@@ -1505,7 +1527,7 @@
*/
if (la != SLIM_LA_MANAGER) {
if (done_buf)
- *done_buf = NULL;
+ *done_buf = 0;
if (done_len)
*done_len = 0;
return SLIM_P_NOT_OWNED;
diff --git a/drivers/spmi/qpnp-int.c b/drivers/spmi/qpnp-int.c
index 3e14333..9fc7299 100644
--- a/drivers/spmi/qpnp-int.c
+++ b/drivers/spmi/qpnp-int.c
@@ -184,6 +184,22 @@
return 0;
}
+static void qpnpint_irq_ack(struct irq_data *d)
+{
+ struct q_irq_data *irq_d = irq_data_get_irq_chip_data(d);
+ int rc;
+
+ pr_debug("hwirq %lu irq: %d\n", d->hwirq, d->irq);
+
+ rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_LATCHED_CLR,
+ &irq_d->mask_shift, 1);
+ if (rc) {
+ pr_err_ratelimited("spmi write failure on irq %d, rc=%d\n",
+ d->irq, rc);
+ return;
+ }
+}
+
static void qpnpint_irq_mask(struct irq_data *d)
{
struct q_irq_data *irq_d = irq_data_get_irq_chip_data(d);
@@ -223,44 +239,10 @@
static void qpnpint_irq_mask_ack(struct irq_data *d)
{
- struct q_irq_data *irq_d = irq_data_get_irq_chip_data(d);
- struct q_chip_data *chip_d = irq_d->chip_d;
- struct q_perip_data *per_d = irq_d->per_d;
- int rc;
- uint8_t prev_int_en = per_d->int_en;
-
pr_debug("hwirq %lu irq: %d\n", d->hwirq, d->irq);
- if (!chip_d->cb) {
- pr_warn_ratelimited("No arbiter on bus=%u slave=%u offset=%u\n",
- chip_d->bus_nr, irq_d->spmi_slave,
- irq_d->spmi_offset);
- return;
- }
-
- per_d->int_en &= ~irq_d->mask_shift;
-
- if (prev_int_en && !(per_d->int_en)) {
- /*
- * no interrupt on this peripheral is enabled
- * ask the arbiter to ignore this peripheral
- */
- qpnpint_arbiter_op(d, irq_d, chip_d->cb->mask);
- }
-
- rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_EN_CLR,
- &irq_d->mask_shift, 1);
- if (rc) {
- pr_err("spmi failure on irq %d\n", d->irq);
- return;
- }
-
- rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_LATCHED_CLR,
- &irq_d->mask_shift, 1);
- if (rc) {
- pr_err("spmi failure on irq %d\n", d->irq);
- return;
- }
+ qpnpint_irq_mask(d);
+ qpnpint_irq_ack(d);
}
static void qpnpint_irq_unmask(struct irq_data *d)
@@ -269,6 +251,7 @@
struct q_chip_data *chip_d = irq_d->chip_d;
struct q_perip_data *per_d = irq_d->per_d;
int rc;
+ uint8_t buf[2];
uint8_t prev_int_en = per_d->int_en;
pr_debug("hwirq %lu irq: %d\n", d->hwirq, d->irq);
@@ -289,12 +272,29 @@
*/
qpnpint_arbiter_op(d, irq_d, chip_d->cb->unmask);
}
- rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_EN_SET,
- &irq_d->mask_shift, 1);
+
+ /* Check the current state of the interrupt enable bit. */
+ rc = qpnpint_spmi_read(irq_d, QPNPINT_REG_EN_SET, buf, 1);
if (rc) {
- pr_err("spmi failure on irq %d\n", d->irq);
+ pr_err("SPMI read failure for IRQ %d, rc=%d\n", d->irq, rc);
return;
}
+
+ if (!(buf[0] & irq_d->mask_shift)) {
+ /*
+ * Since the interrupt is currently disabled, write to both the
+ * LATCHED_CLR and EN_SET registers so that a spurious interrupt
+ * cannot be triggered when the interrupt is enabled.
+ */
+ buf[0] = irq_d->mask_shift;
+ buf[1] = irq_d->mask_shift;
+ rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_LATCHED_CLR, buf, 2);
+ if (rc) {
+ pr_err("SPMI write failure for IRQ %d, rc=%d\n", d->irq,
+ rc);
+ return;
+ }
+ }
}
static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type)
@@ -336,6 +336,11 @@
return rc;
}
+ if (flow_type & IRQ_TYPE_EDGE_BOTH)
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
+ else
+ __irq_set_handler_locked(d->irq, handle_level_irq);
+
return 0;
}
@@ -363,6 +368,7 @@
static struct irq_chip qpnpint_chip = {
.name = "qpnp-int",
+ .irq_ack = qpnpint_irq_ack,
.irq_mask = qpnpint_irq_mask,
.irq_mask_ack = qpnpint_irq_mask_ack,
.irq_unmask = qpnpint_irq_unmask,
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 7ca247a..41ebc1c 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -365,7 +365,9 @@
if (!sc->nr_to_scan)
return lru_count;
- mutex_lock(&ashmem_mutex);
+ if (!mutex_trylock(&ashmem_mutex))
+ return -1;
+
list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
struct inode *inode = range->asma->file->f_dentry->d_inode;
loff_t start = range->pgstart * PAGE_SIZE;
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index 517ec05..4058bec 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -3,7 +3,7 @@
* MSM 7k High speed uart driver
*
* Copyright (c) 2008 Google Inc.
- * Copyright (c) 2007-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2007-2014, The Linux Foundation. All rights reserved.
* Modified: Nick Pelly <npelly@google.com>
*
* All source code in this file is licensed under the following license
@@ -94,7 +94,6 @@
#define MSM_HS_DBG(x...) do { \
if (hs_serial_debug_mask >= DBG_LEV) { \
- pr_debug(x); \
if (ipc_msm_hs_log_ctxt) \
ipc_log_string(ipc_msm_hs_log_ctxt, x); \
} \
@@ -102,7 +101,6 @@
#define MSM_HS_INFO(x...) do { \
if (hs_serial_debug_mask >= INFO_LEV) {\
- pr_info(x); \
if (ipc_msm_hs_log_ctxt) \
ipc_log_string(ipc_msm_hs_log_ctxt, x); \
} \
@@ -141,7 +139,7 @@
FLUSH_NONE,
FLUSH_DATA_READY,
FLUSH_DATA_INVALID, /* values after this indicate invalid data */
- FLUSH_IGNORE = FLUSH_DATA_INVALID,
+ FLUSH_IGNORE,
FLUSH_STOP,
FLUSH_SHUTDOWN,
};
@@ -205,6 +203,8 @@
struct delayed_work flip_insert_work;
struct tasklet_struct tlet;
struct msm_hs_sps_ep_conn_data prod;
+ bool rx_cmd_queued;
+ bool rx_cmd_exec;
};
enum buffer_states {
NONE_PENDING = 0x0,
@@ -271,6 +271,8 @@
int rx_count_callback;
bool rx_bam_inprogress;
unsigned int *reg_ptr;
+ wait_queue_head_t bam_disconnect_wait;
+
};
unsigned int regmap_nonblsp[UART_DM_LAST] = {
@@ -344,7 +346,6 @@
#define BLSP_UART_CLK_FMAX 63160000
static struct dentry *debug_base;
-static struct msm_hs_port q_uart_port[UARTDM_NR];
static struct platform_driver msm_serial_hs_platform_driver;
static struct uart_driver msm_hs_driver;
static struct uart_ops msm_hs_ops;
@@ -352,6 +353,7 @@
static void msm_serial_hs_rx_tlet(unsigned long tlet_ptr);
static void flip_insert_work(struct work_struct *work);
static void msm_hs_bus_voting(struct msm_hs_port *msm_uport, unsigned int vote);
+static struct msm_hs_port *msm_hs_get_hs_port(int port_index);
#define UARTDM_TO_MSM(uart_port) \
container_of((uart_port), struct msm_hs_port, uport)
@@ -424,6 +426,7 @@
}
}
msm_uport->clk_state = MSM_HS_CLK_ON;
+ MSM_HS_DBG("%s: Clock ON successful\n", __func__);
}
@@ -432,71 +435,98 @@
static void msm_hs_clock_unvote(struct msm_hs_port *msm_uport)
{
- int rc = atomic_dec_return(&msm_uport->clk_count);
+ int rc = atomic_read(&msm_uport->clk_count);
- if (rc < 0) {
- msm_hs_bus_voting(msm_uport, BUS_RESET);
+ if (rc <= 0) {
WARN(rc, "msm_uport->clk_count < 0!");
dev_err(msm_uport->uport.dev,
- "%s: Clocks count invalid [%d]\n", __func__,
- atomic_read(&msm_uport->clk_count));
+ "%s: Clocks count invalid [%d]\n", __func__, rc);
return;
}
+ rc = atomic_dec_return(&msm_uport->clk_count);
if (0 == rc) {
- msm_hs_bus_voting(msm_uport, BUS_RESET);
/* Turn off the core clk and iface clk*/
clk_disable_unprepare(msm_uport->clk);
if (msm_uport->pclk)
clk_disable_unprepare(msm_uport->pclk);
+ /* Unvote the PNOC clock */
+ msm_hs_bus_voting(msm_uport, BUS_RESET);
msm_uport->clk_state = MSM_HS_CLK_OFF;
+ MSM_HS_DBG("%s: Clock OFF successful\n", __func__);
}
}
+/* Check if the uport line number matches with user id stored in pdata.
+ * User id information is stored during initialization. This function
+ * ensues that the same device is selected */
+
+static struct msm_hs_port *get_matching_hs_port(struct platform_device *pdev)
+{
+ struct msm_serial_hs_platform_data *pdata = pdev->dev.platform_data;
+ struct msm_hs_port *msm_uport = msm_hs_get_hs_port(pdev->id);
+
+ if ((!msm_uport) || (msm_uport->uport.line != pdev->id
+ && msm_uport->uport.line != pdata->userid)) {
+ MSM_HS_ERR("uport line number mismatch!");
+ WARN_ON(1);
+ return NULL;
+ }
+
+ return msm_uport;
+}
+
static ssize_t show_clock(struct device *dev, struct device_attribute *attr,
char *buf)
{
int state = 1;
+ ssize_t ret = 0;
enum msm_hs_clk_states_e clk_state;
unsigned long flags;
-
struct platform_device *pdev = container_of(dev, struct
platform_device, dev);
- struct msm_hs_port *msm_uport = &q_uart_port[pdev->id];
+ struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
- spin_lock_irqsave(&msm_uport->uport.lock, flags);
- clk_state = msm_uport->clk_state;
- spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
+ /* This check should not fail */
+ if (msm_uport) {
+ spin_lock_irqsave(&msm_uport->uport.lock, flags);
+ clk_state = msm_uport->clk_state;
+ spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
- if (clk_state <= MSM_HS_CLK_OFF)
- state = 0;
+ if (clk_state <= MSM_HS_CLK_OFF)
+ state = 0;
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", state);
+ }
- return snprintf(buf, PAGE_SIZE, "%d\n", state);
+ return ret;
}
static ssize_t set_clock(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int state;
+ ssize_t ret = 0;
struct platform_device *pdev = container_of(dev, struct
platform_device, dev);
- struct msm_hs_port *msm_uport = &q_uart_port[pdev->id];
+ struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
- state = buf[0] - '0';
- switch (state) {
- case 0: {
- msm_hs_request_clock_off(&msm_uport->uport);
- break;
+ /* This check should not fail */
+ if (msm_uport) {
+ state = buf[0] - '0';
+ switch (state) {
+ case 0:
+ msm_hs_request_clock_off(&msm_uport->uport);
+ ret = count;
+ break;
+ case 1:
+ msm_hs_request_clock_on(&msm_uport->uport);
+ ret = count;
+ break;
+ default:
+ ret = -EINVAL;
+ }
}
- case 1: {
- msm_hs_request_clock_on(&msm_uport->uport);
- break;
- }
- default: {
- return -EINVAL;
- }
- }
- return count;
+ return ret;
}
static DEVICE_ATTR(clock, S_IWUSR | S_IRUGO, show_clock, set_clock);
@@ -583,22 +613,33 @@
*/
static void dump_uart_hs_registers(struct msm_hs_port *msm_uport)
{
- msm_hs_clock_vote(msm_uport);
- MSM_HS_DBG("============= UART Registers ================\n");
- MSM_HS_DBG("UART_DM_MR1:%x\n", msm_hs_read(&(msm_uport->uport),
- UART_DM_MR1));
- MSM_HS_DBG("UART_DM_MR2:%x\n", msm_hs_read(&(msm_uport->uport),
- UART_DM_MR2));
- MSM_HS_DBG("UART_DM_IPR:%x\n", msm_hs_read(&(msm_uport->uport),
- UART_DM_IPR));
- MSM_HS_DBG("UART_DM_RFWR:%x\n", msm_hs_read(&(msm_uport->uport),
- UART_DM_RFWR));
- MSM_HS_DBG("UART_DM_SR:%x\n", msm_hs_read(&(msm_uport->uport),
- UART_DM_SR));
- MSM_HS_DBG("UART_DM_IMR: %x\n", msm_hs_read(&(msm_uport->uport),
- UART_DM_IMR));
- MSM_HS_DBG("=============================================\n");
- msm_hs_clock_unvote(msm_uport);
+ struct uart_port *uport = &(msm_uport->uport);
+ if (msm_uport->clk_state != MSM_HS_CLK_ON) {
+ MSM_HS_WARN("%s: Failed.Clocks are OFF\n", __func__);
+ return;
+ }
+
+ MSM_HS_DBG(
+ "MR1:%x MR2:%x TFWR:%x RFWR:%x DMEN:%x IMR:%x MISR:%x NCF_TX:%x\n",
+ msm_hs_read(uport, UART_DM_MR1),
+ msm_hs_read(uport, UART_DM_MR2),
+ msm_hs_read(uport, UART_DM_TFWR),
+ msm_hs_read(uport, UART_DM_RFWR),
+ msm_hs_read(uport, UART_DM_DMEN),
+ msm_hs_read(uport, UART_DM_IMR),
+ msm_hs_read(uport, UART_DM_MISR),
+ msm_hs_read(uport, UART_DM_NCF_TX));
+ MSM_HS_INFO("SR:%x ISR:%x DMRX:%x RX_SNAP:%x TXFS:%x RXFS:%x\n",
+ msm_hs_read(uport, UART_DM_SR),
+ msm_hs_read(uport, UART_DM_ISR),
+ msm_hs_read(uport, UART_DM_DMRX),
+ msm_hs_read(uport, UART_DM_RX_TOTAL_SNAP),
+ msm_hs_read(uport, UART_DM_TXFS),
+ msm_hs_read(uport, UART_DM_RXFS));
+ MSM_HS_DBG("clk_req_state:0x%x rx.flush:%u\n",
+ msm_uport->clk_req_off_state,
+ msm_uport->rx.flush);
+ MSM_HS_DBG("clk_state:%d", msm_uport->clk_state);
}
static void msm_hs_release_port(struct uart_port *port)
@@ -742,9 +783,11 @@
return -EINVAL;
}
- msm_uport = &q_uart_port[pdev->id];
- dev = msm_uport->uport.dev;
+ msm_uport = get_matching_hs_port(pdev);
+ if (!msm_uport)
+ return -EINVAL;
+ dev = msm_uport->uport.dev;
sysfs_remove_file(&pdev->dev.kobj, &dev_attr_clock.attr);
debugfs_remove(msm_uport->loopback_dir);
@@ -1096,6 +1139,10 @@
struct msm_hs_rx *rx = &msm_uport->rx;
struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
+ if (msm_uport->clk_state != MSM_HS_CLK_ON) {
+ MSM_HS_WARN("%s: Failed.Clocks are OFF\n", __func__);
+ return;
+ }
mutex_lock(&msm_uport->clk_mutex);
msm_hs_write(uport, UART_DM_IMR, 0);
@@ -1109,6 +1156,8 @@
data = msm_hs_read(uport, UART_DM_MR1);
data &= ~UARTDM_MR1_RX_RDY_CTL_BMSK;
msm_hs_write(uport, UART_DM_MR1, data);
+ /* set RFR_N to high */
+ msm_hs_write(uport, UART_DM_CR, RFR_HIGH);
/*
* Disable Rx channel of UARTDM
@@ -1195,10 +1244,12 @@
msm_hs_write(uport, UART_DM_CR, RESET_RX);
msm_hs_write(uport, UART_DM_CR, RESET_TX);
+ /* Issue TX BAM Start IFC command */
+ msm_hs_write(uport, UART_DM_CR, START_TX_BAM_IFC);
if (msm_uport->rx.flush == FLUSH_NONE) {
wake_lock(&msm_uport->rx.wake_lock);
- msm_uport->rx.flush = FLUSH_IGNORE;
+ msm_uport->rx.flush = FLUSH_DATA_INVALID;
/*
* Before using dmov APIs make sure that
* previous writel are completed. Hence
@@ -1215,6 +1266,7 @@
MSM_HS_ERR("%s(): sps_disconnect failed\n",
__func__);
msm_hs_spsconnect_rx(uport);
+ msm_uport->rx.flush = FLUSH_IGNORE;
msm_serial_hs_rx_tlet((unsigned long) &rx->tlet);
} else {
msm_uport->rx_discard_flush_issued = true;
@@ -1237,12 +1289,12 @@
* UART Core would trigger RFR if it is not having any space with
* RX FIFO.
*/
+ /* Pulling RFR line high */
+ msm_hs_write(uport, UART_DM_CR, RFR_LOW);
data = msm_hs_read(uport, UART_DM_MR1);
data &= ~(UARTDM_MR1_CTS_CTL_BMSK | UARTDM_MR1_RX_RDY_CTL_BMSK);
- if (c_cflag & CRTSCTS) {
data |= UARTDM_MR1_CTS_CTL_BMSK;
data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
- }
msm_hs_write(uport, UART_DM_MR1, data);
msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
@@ -1265,6 +1317,7 @@
msm_hs_clock_vote(msm_uport);
data = msm_hs_read(uport, UART_DM_SR);
msm_hs_clock_unvote(msm_uport);
+ MSM_HS_DBG("%s(): SR Reg Read 0x%x", __func__, data);
if (data & UARTDM_SR_TXEMT_BMSK)
ret = TIOCSER_TEMT;
@@ -1300,6 +1353,9 @@
wake_lock_timeout(&msm_uport->rx.wake_lock, HZ / 2);
msm_uport->rx.flush = FLUSH_SHUTDOWN;
+ MSM_HS_DBG("%s: Calling Completion\n", __func__);
+ wake_up(&msm_uport->bam_disconnect_wait);
+ MSM_HS_DBG("%s: Done Completion\n", __func__);
wake_up(&msm_uport->rx.wait);
}
@@ -1316,16 +1372,19 @@
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
unsigned int data;
- /* disable dlink */
- data = msm_hs_read(uport, UART_DM_DMEN);
- if (is_blsp_uart(msm_uport))
- data &= ~UARTDM_RX_BAM_ENABLE_BMSK;
- else
- data &= ~UARTDM_RX_DM_EN_BMSK;
- msm_hs_write(uport, UART_DM_DMEN, data);
+ MSM_HS_DBG("In %s():\n", __func__);
+ if (msm_uport->clk_state != MSM_HS_CLK_OFF) {
+ /* disable dlink */
+ data = msm_hs_read(uport, UART_DM_DMEN);
+ if (is_blsp_uart(msm_uport))
+ data &= ~UARTDM_RX_BAM_ENABLE_BMSK;
+ else
+ data &= ~UARTDM_RX_DM_EN_BMSK;
+ msm_hs_write(uport, UART_DM_DMEN, data);
- /* calling DMOV or CLOCK API. Hence mb() */
- mb();
+ /* calling DMOV or CLOCK API. Hence mb() */
+ mb();
+ }
/* Disable the receiver */
if (msm_uport->rx.flush == FLUSH_NONE) {
wake_lock(&msm_uport->rx.wake_lock);
@@ -1352,7 +1411,7 @@
int aligned_tx_count;
dma_addr_t src_addr;
dma_addr_t aligned_src_addr;
- u32 flags = SPS_IOVEC_FLAG_EOT;
+ u32 flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_INT;
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
struct msm_hs_tx *tx = &msm_uport->tx;
struct circ_buf *tx_buf = &msm_uport->uport.state->xmit;
@@ -1360,6 +1419,12 @@
if (uart_circ_empty(tx_buf) || uport->state->port.tty->stopped) {
msm_hs_stop_tx_locked(uport);
+ if (msm_uport->clk_state == MSM_HS_CLK_REQUEST_OFF) {
+ MSM_HS_DBG("%s(): Clock off requested calling WQ",
+ __func__);
+ queue_work(msm_uport->hsuart_wq,
+ &msm_uport->clock_off_w);
+ }
return;
}
@@ -1385,10 +1450,9 @@
dma_sync_single_for_device(uport->dev, aligned_src_addr,
aligned_tx_count, DMA_TO_DEVICE);
- if (is_blsp_uart(msm_uport)) {
- /* Issue TX BAM Start IFC command */
- msm_hs_write(uport, UART_DM_CR, START_TX_BAM_IFC);
- } else {
+ if (is_blsp_uart(msm_uport))
+ tx->tx_count = tx_count;
+ else {
tx->command_ptr->num_rows =
(((tx_count + 15) >> 4) << 16) |
((tx_count + 15) >> 4);
@@ -1399,18 +1463,16 @@
*tx->command_ptr_ptr = CMD_PTR_LP |
DMOV_CMD_ADDR(tx->mapped_cmd_ptr);
+ /* Save tx_count to use in Callback */
+ tx->tx_count = tx_count;
+ msm_hs_write(uport, UART_DM_NCF_TX, tx_count);
+ msm_uport->imr_reg &= ~UARTDM_ISR_TX_READY_BMSK;
+ msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
+ /* Calling next DMOV API. Hence mb() here. */
+ mb();
+
}
- /* Save tx_count to use in Callback */
- tx->tx_count = tx_count;
- msm_hs_write(uport, UART_DM_NCF_TX, tx_count);
-
- /* Disable the tx_ready interrupt */
- msm_uport->imr_reg &= ~UARTDM_ISR_TX_READY_BMSK;
- msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
- /* Calling next DMOV API. Hence mb() here. */
- mb();
-
msm_uport->tx.flush = FLUSH_NONE;
if (is_blsp_uart(msm_uport)) {
@@ -1439,6 +1501,16 @@
unsigned int buffer_pending = msm_uport->rx.buffer_pending;
unsigned int data;
+ if (msm_uport->clk_state != MSM_HS_CLK_ON) {
+ MSM_HS_WARN("%s: Failed.Clocks are OFF\n", __func__);
+ return;
+ }
+ if (rx->rx_cmd_exec) {
+ MSM_HS_DBG("%s: Rx Cmd got executed, wait for rx_tlet\n",
+ __func__);
+ rx->flush = FLUSH_IGNORE;
+ return;
+ }
msm_uport->rx.buffer_pending = 0;
if (buffer_pending && hs_serial_debug_mask)
MSM_HS_ERR("Error: rx started in buffer state = %x",
@@ -1447,8 +1519,6 @@
msm_hs_write(uport, UART_DM_CR, RESET_STALE_INT);
msm_hs_write(uport, UART_DM_DMRX, UARTDM_RX_BUF_SIZE);
msm_hs_write(uport, UART_DM_CR, STALE_EVENT_ENABLE);
- msm_uport->imr_reg |= UARTDM_ISR_RXLEV_BMSK;
-
/*
* Enable UARTDM Rx Interface as previously it has been
* disable in set_termios before configuring baud rate.
@@ -1488,10 +1558,8 @@
sps_transfer_one(sps_pipe_handle, rx->rbuffer,
UARTDM_RX_BUF_SIZE, msm_uport, flags);
msm_uport->rx_bam_inprogress = false;
+ msm_uport->rx.rx_cmd_queued = true;
wake_up(&msm_uport->rx.wait);
- } else {
- msm_dmov_enqueue_cmd(msm_uport->dma_rx_channel,
- &msm_uport->rx.xfer);
}
MSM_HS_DBG("%s:Enqueue Rx Cmd\n", __func__);
dump_uart_hs_registers(msm_uport);
@@ -1551,7 +1619,7 @@
static void msm_serial_hs_rx_tlet(unsigned long tlet_ptr)
{
int retval;
- int rx_count;
+ int rx_count = 0;
unsigned long status;
unsigned long flags;
unsigned int error_f = 0;
@@ -1571,6 +1639,9 @@
notify = &msm_uport->notify;
rx = &msm_uport->rx;
+ msm_uport->rx.rx_cmd_queued = false;
+ msm_uport->rx.rx_cmd_exec = false;
+
status = msm_hs_read(uport, UART_DM_SR);
spin_lock_irqsave(&uport->lock, flags);
@@ -1619,29 +1690,30 @@
}
}
- if (error_f)
- msm_hs_write(uport, UART_DM_CR, RESET_ERROR_STATUS);
-
- if (msm_uport->clk_req_off_state == CLK_REQ_OFF_FLUSH_ISSUED)
- msm_uport->clk_req_off_state = CLK_REQ_OFF_RXSTALE_FLUSHED;
+ if (error_f) {
+ if (msm_uport->clk_state == MSM_HS_CLK_ON)
+ msm_hs_write(uport, UART_DM_CR, RESET_ERROR_STATUS);
+ else
+ MSM_HS_WARN("%s: Failed.Clocks are OFF\n", __func__);
+ }
flush = msm_uport->rx.flush;
if (flush == FLUSH_IGNORE)
- if (!msm_uport->rx.buffer_pending)
+ if (!msm_uport->rx.buffer_pending) {
+ MSM_HS_DBG("%s: calling start_rx_locked\n", __func__);
msm_hs_start_rx_locked(uport);
-
- if (flush == FLUSH_STOP) {
- msm_uport->rx.flush = FLUSH_SHUTDOWN;
- wake_up(&msm_uport->rx.wait);
- }
+ }
if (flush >= FLUSH_DATA_INVALID)
goto out;
if (is_blsp_uart(msm_uport)) {
rx_count = msm_uport->rx_count_callback;
} else {
- rx_count = msm_hs_read(uport, UART_DM_RX_TOTAL_SNAP);
- /* order the read of rx.buffer */
- rmb();
+ if (msm_uport->clk_state == MSM_HS_CLK_ON) {
+ rx_count = msm_hs_read(uport, UART_DM_RX_TOTAL_SNAP);
+ /* order the read of rx.buffer */
+ rmb();
+ } else
+ MSM_HS_WARN("%s: Failed.Clocks are OFF\n", __func__);
}
MSM_HS_DBG("%s():[UART_RX]<%d>\n", __func__, rx_count);
@@ -1656,24 +1728,21 @@
retval << 5 | (rx_count - retval) << 16;
}
}
-
- MSM_HS_DBG("%s() read rx buffer complete", __func__);
- /* order the read of rx.buffer and the start of next rx xfer */
- wmb();
-
- if (!msm_uport->rx.buffer_pending) {
+ if (!msm_uport->rx.buffer_pending && !msm_uport->rx.rx_cmd_queued) {
if (is_blsp_uart(msm_uport)) {
msm_uport->rx.flush = FLUSH_NONE;
msm_uport->rx_bam_inprogress = true;
sps_pipe_handle = rx->prod.pipe_handle;
+ MSM_HS_DBG("Queing bam descriptor\n");
/* Queue transfer request to SPS */
sps_transfer_one(sps_pipe_handle, rx->rbuffer,
UARTDM_RX_BUF_SIZE, msm_uport, sps_flags);
msm_uport->rx_bam_inprogress = false;
+ msm_uport->rx.rx_cmd_queued = true;
wake_up(&msm_uport->rx.wait);
- } else {
+
+ } else
msm_hs_start_rx_locked(uport);
- }
}
out:
if (msm_uport->rx.buffer_pending) {
@@ -1695,8 +1764,12 @@
{
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+ if (msm_uport->clk_state != MSM_HS_CLK_ON) {
+ MSM_HS_WARN("%s: Failed.Clocks are OFF\n", __func__);
+ }
if (msm_uport->tx.tx_ready_int_en == 0) {
- msm_uport->tx.tx_ready_int_en = 1;
+ if (!is_blsp_uart(msm_uport))
+ msm_uport->tx.tx_ready_int_en = 1;
if (msm_uport->tx.dma_in_flight == 0)
msm_hs_submit_tx_locked(uport);
}
@@ -1718,11 +1791,12 @@
((struct sps_event_notify *)notify)->user;
msm_uport->notify = *notify;
- MSM_HS_DBG("%s: sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
- __func__, notify->event_id,
- notify->data.transfer.iovec.addr,
- notify->data.transfer.iovec.size,
- notify->data.transfer.iovec.flags);
+ MSM_HS_DBG("%s: ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x, line=%d\n",
+ __func__, notify->event_id,
+ notify->data.transfer.iovec.addr,
+ notify->data.transfer.iovec.size,
+ notify->data.transfer.iovec.flags,
+ msm_uport->uport.line);
tasklet_schedule(&msm_uport->tx.tlet);
}
@@ -1756,6 +1830,35 @@
unsigned long flags;
struct msm_hs_port *msm_uport = container_of((struct tasklet_struct *)
tlet_ptr, struct msm_hs_port, tx.tlet);
+ struct uart_port *uport = &msm_uport->uport;
+ struct circ_buf *tx_buf = &uport->state->xmit;
+ struct msm_hs_tx *tx = &msm_uport->tx;
+
+ /*
+ * Do the work buffer related work in BAM
+ * mode that is equivalent to legacy mode
+ */
+
+ if (!msm_uport->tty_flush_receive)
+ tx_buf->tail = (tx_buf->tail +
+ tx->tx_count) & ~UART_XMIT_SIZE;
+ else
+ msm_uport->tty_flush_receive = false;
+
+ tx->dma_in_flight = 0;
+
+ uport->icount.tx += tx->tx_count;
+
+ /*
+ * Calling to send next chunk of data
+ * If the circ buffer is empty, we stop
+ * If the clock off was requested, the clock
+ * off sequence is kicked off
+ */
+ msm_hs_submit_tx_locked(uport);
+
+ if (uart_circ_chars_pending(tx_buf) < WAKEUP_CHARS)
+ uart_write_wakeup(uport);
spin_lock_irqsave(&(msm_uport->uport.lock), flags);
if (msm_uport->tx.flush == FLUSH_STOP) {
@@ -1765,10 +1868,14 @@
return;
}
- msm_uport->imr_reg |= UARTDM_ISR_TX_READY_BMSK;
- msm_hs_write(&(msm_uport->uport), UART_DM_IMR, msm_uport->imr_reg);
- /* Calling clk API. Hence mb() requires. */
- mb();
+ /* TX_READY_BMSK only if non BAM mode */
+ if (!is_blsp_uart(msm_uport)) {
+ msm_uport->imr_reg |= UARTDM_ISR_TX_READY_BMSK;
+ msm_hs_write(&(msm_uport->uport), UART_DM_IMR,
+ msm_uport->imr_reg);
+ /* Calling clk API. Hence mb() requires. */
+ mb();
+ }
spin_unlock_irqrestore(&(msm_uport->uport.lock), flags);
MSM_HS_DBG("In %s()\n", __func__);
@@ -1804,6 +1911,7 @@
if (msm_uport->rx.flush == FLUSH_NONE) {
spin_lock_irqsave(&uport->lock, flags);
msm_uport->rx_count_callback = notify->data.transfer.iovec.size;
+ msm_uport->rx.rx_cmd_exec = true;
spin_unlock_irqrestore(&uport->lock, flags);
tasklet_schedule(&msm_uport->rx.tlet);
}
@@ -1873,7 +1981,12 @@
{
unsigned int set_rts;
unsigned int data;
+ struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+ if (msm_uport->clk_state != MSM_HS_CLK_ON) {
+ MSM_HS_WARN("%s:Failed.Clocks are OFF\n", __func__);
+ return;
+ }
/* RTS is active low */
set_rts = TIOCM_RTS & mctrl ? 0 : 1;
@@ -1911,6 +2024,11 @@
{
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+ if (msm_uport->clk_state != MSM_HS_CLK_ON) {
+ MSM_HS_WARN("%s:Failed.Clocks are OFF\n", __func__);
+ return;
+ }
+
/* Enable DELTA_CTS Interrupt */
msm_uport->imr_reg |= UARTDM_ISR_DELTA_CTS_BMSK;
msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
@@ -1935,6 +2053,12 @@
static void msm_hs_break_ctl(struct uart_port *uport, int ctl)
{
unsigned long flags;
+ struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+ if (msm_uport->clk_state != MSM_HS_CLK_ON) {
+ MSM_HS_WARN("%s: Failed.Clocks are OFF\n", __func__);
+ return;
+ }
spin_lock_irqsave(&uport->lock, flags);
msm_hs_write(uport, UART_DM_CR, ctl ? START_BREAK : STOP_BREAK);
@@ -1967,6 +2091,12 @@
/* Handle CTS changes (Called from interrupt handler) */
static void msm_hs_handle_delta_cts_locked(struct uart_port *uport)
{
+ struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+ if (msm_uport->clk_state != MSM_HS_CLK_ON) {
+ MSM_HS_WARN("%s: Failed.Clocks are OFF\n", __func__);
+ return;
+ }
/* clear interrupt */
msm_hs_write(uport, UART_DM_CR, RESET_CTS);
/* Calling CLOCK API. Hence mb() requires here. */
@@ -1986,60 +2116,47 @@
{
unsigned long sr_status;
unsigned long flags;
- int ret;
+ unsigned int data;
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
struct circ_buf *tx_buf = &uport->state->xmit;
mutex_lock(&msm_uport->clk_mutex);
spin_lock_irqsave(&uport->lock, flags);
+ MSM_HS_DBG("In %s:\n", __func__);
/* Cancel if tx tty buffer is not empty, dma is in flight,
- * or tx fifo is not empty */
+ * or tx fifo is not empty
+ */
if (msm_uport->clk_state != MSM_HS_CLK_REQUEST_OFF ||
!uart_circ_empty(tx_buf) || msm_uport->tx.dma_in_flight ||
msm_uport->imr_reg & UARTDM_ISR_TXLEV_BMSK) {
spin_unlock_irqrestore(&uport->lock, flags);
mutex_unlock(&msm_uport->clk_mutex);
+ if (msm_uport->clk_state == MSM_HS_CLK_REQUEST_OFF) {
+ msm_uport->clk_state = MSM_HS_CLK_ON;
+ /* Pulling RFR line high */
+ msm_hs_write(uport, UART_DM_CR, RFR_LOW);
+ /* Enable auto RFR */
+ data = msm_hs_read(uport, UART_DM_MR1);
+ data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
+ msm_hs_write(uport, UART_DM_MR1, data);
+ mb();
+ }
+ MSM_HS_DBG("%s(): clkstate %d", __func__, msm_uport->clk_state);
return -1;
}
- /* Make sure the uart is finished with the last byte */
- sr_status = msm_hs_read(uport, UARTDM_SR);
+ /* Make sure the uart is finished with the last byte,
+ * use BFamily Register
+ */
+ sr_status = msm_hs_read(uport, UART_DM_SR);
if (!(sr_status & UARTDM_SR_TXEMT_BMSK)) {
spin_unlock_irqrestore(&uport->lock, flags);
mutex_unlock(&msm_uport->clk_mutex);
+ MSM_HS_DBG("%s(): SR TXEMT fail %lx", __func__, sr_status);
return 0; /* retry */
}
- /* Make sure forced RXSTALE flush complete */
- switch (msm_uport->clk_req_off_state) {
- case CLK_REQ_OFF_START:
- msm_uport->clk_req_off_state = CLK_REQ_OFF_RXSTALE_ISSUED;
-
- if (!is_blsp_uart(msm_uport)) {
- msm_hs_write(uport, UART_DM_CR, FORCE_STALE_EVENT);
- /*
- * Before returning make sure that device writel
- * completed. Hence mb() requires here.
- */
- mb();
- }
- spin_unlock_irqrestore(&uport->lock, flags);
- mutex_unlock(&msm_uport->clk_mutex);
- return 0; /* RXSTALE flush not complete - retry */
- case CLK_REQ_OFF_RXSTALE_ISSUED:
- case CLK_REQ_OFF_FLUSH_ISSUED:
- spin_unlock_irqrestore(&uport->lock, flags);
- if (is_blsp_uart(msm_uport)) {
- msm_uport->clk_req_off_state =
- CLK_REQ_OFF_RXSTALE_FLUSHED;
- }
- mutex_unlock(&msm_uport->clk_mutex);
- return 0; /* RXSTALE flush not complete - retry */
- case CLK_REQ_OFF_RXSTALE_FLUSHED:
- break; /* continue */
- }
-
if (msm_uport->rx.flush != FLUSH_SHUTDOWN) {
if (msm_uport->rx.flush == FLUSH_NONE) {
msm_hs_stop_rx_locked(uport);
@@ -2047,24 +2164,23 @@
msm_uport->rx_discard_flush_issued = true;
}
+ MSM_HS_DBG("%s: rx.flush %d clk_state %d\n", __func__,
+ msm_uport->rx.flush, msm_uport->clk_state);
spin_unlock_irqrestore(&uport->lock, flags);
- if (msm_uport->rx_discard_flush_issued) {
- MSM_HS_DBG("%s(): wainting for flush completion.\n",
- __func__);
- ret = wait_event_timeout(msm_uport->rx.wait,
- msm_uport->rx_discard_flush_issued == false,
- RX_FLUSH_COMPLETE_TIMEOUT);
- if (!ret)
- MSM_HS_ERR("%s(): Flush complete pending.\n",
- __func__);
- }
-
mutex_unlock(&msm_uport->clk_mutex);
return 0; /* come back later to really clock off */
}
spin_unlock_irqrestore(&uport->lock, flags);
+ /* Pulling RFR line high */
+ msm_hs_write(uport, UART_DM_CR, RFR_LOW);
+ /* Enable auto RFR */
+ data = msm_hs_read(uport, UART_DM_MR1);
+ data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
+ msm_hs_write(uport, UART_DM_MR1, data);
+ mb();
+
/* we really want to clock off */
msm_hs_clock_unvote(msm_uport);
@@ -2077,8 +2193,6 @@
spin_unlock_irqrestore(&uport->lock, flags);
- /* Reset PNOC Bus Scaling */
- msm_hs_bus_voting(msm_uport, BUS_RESET);
mutex_unlock(&msm_uport->clk_mutex);
return 1;
@@ -2142,11 +2256,6 @@
mb();
MSM_HS_DBG("%s:Stal Interrupt\n", __func__);
- if (msm_uport->clk_req_off_state ==
- CLK_REQ_OFF_RXSTALE_ISSUED)
- msm_uport->clk_req_off_state =
- CLK_REQ_OFF_FLUSH_ISSUED;
-
if (!is_blsp_uart(msm_uport) && (rx->flush == FLUSH_NONE)) {
rx->flush = FLUSH_DATA_READY;
msm_dmov_flush(msm_uport->dma_rx_channel, 1);
@@ -2170,7 +2279,8 @@
/* Complete DMA TX transactions and submit new transactions */
/* Do not update tx_buf.tail if uart_flush_buffer already
- called in serial core */
+ * called in serial core
+ */
if (!msm_uport->tty_flush_receive)
tx_buf->tail = (tx_buf->tail +
tx->tx_count) & ~UART_XMIT_SIZE;
@@ -2207,37 +2317,57 @@
return IRQ_HANDLED;
}
-/*
- * Find UART device port using its port index value.
+/* The following two functions provide interfaces to get the underlying
+ * port structure (struct uart_port or struct msm_hs_port) given
+ * the port index. msm_hs_get_uart port is called by clients.
+ * The function msm_hs_get_hs_port is for internal use
*/
+
struct uart_port *msm_hs_get_uart_port(int port_index)
{
- int i;
+ struct uart_state *state = msm_hs_driver.state + port_index;
- for (i = 0; i < UARTDM_NR; i++) {
- if (q_uart_port[i].uport.line == port_index)
- return &q_uart_port[i].uport;
- }
+ /* The uart_driver structure stores the states in an array.
+ * Thus the corresponding offset from the drv->state returns
+ * the state for the uart_port that is requested
+ */
+ if (port_index == state->uart_port->line)
+ return state->uart_port;
return NULL;
}
EXPORT_SYMBOL(msm_hs_get_uart_port);
+static struct msm_hs_port *msm_hs_get_hs_port(int port_index)
+{
+ struct uart_port *uport = msm_hs_get_uart_port(port_index);
+ if (uport)
+ return UARTDM_TO_MSM(uport);
+ return NULL;
+}
+
/* request to turn off uart clock once pending TX is flushed */
void msm_hs_request_clock_off(struct uart_port *uport) {
unsigned long flags;
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+ int data;
spin_lock_irqsave(&uport->lock, flags);
if (msm_uport->clk_state == MSM_HS_CLK_ON) {
msm_uport->clk_state = MSM_HS_CLK_REQUEST_OFF;
- msm_uport->clk_req_off_state = CLK_REQ_OFF_START;
- msm_uport->imr_reg |= UARTDM_ISR_TXLEV_BMSK;
- msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
- /*
- * Complete device write before retuning back.
- * Hence mb() requires here.
- */
+ data = msm_hs_read(uport, UART_DM_MR1);
+ /*disable auto ready-for-receiving */
+ data &= ~UARTDM_MR1_RX_RDY_CTL_BMSK;
+ msm_hs_write(uport, UART_DM_MR1, data);
+ mb();
+ /* set RFR_N to high */
+ msm_hs_write(uport, UART_DM_CR, RFR_HIGH);
+
+ data = msm_hs_read(uport, UART_DM_SR);
+ MSM_HS_DBG("%s(): TXEMT, queuing clock off work\n",
+ __func__);
+ queue_work(msm_uport->hsuart_wq, &msm_uport->clock_off_w);
+
mb();
}
spin_unlock_irqrestore(&uport->lock, flags);
@@ -2254,6 +2384,15 @@
mutex_lock(&msm_uport->clk_mutex);
spin_lock_irqsave(&uport->lock, flags);
+ if (msm_uport->clk_state == MSM_HS_CLK_REQUEST_OFF) {
+ /* Pulling RFR line high */
+ msm_hs_write(uport, UART_DM_CR, RFR_LOW);
+ /* Enable auto RFR */
+ data = msm_hs_read(uport, UART_DM_MR1);
+ data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
+ msm_hs_write(uport, UART_DM_MR1, data);
+ mb();
+ }
switch (msm_uport->clk_state) {
case MSM_HS_CLK_OFF:
wake_lock(&msm_uport->dma_wake_lock);
@@ -2263,7 +2402,7 @@
ret = msm_hs_clock_vote(msm_uport);
if (ret) {
- dev_err(uport->dev, "Clock ON Failure"
+ MSM_HS_INFO("Clock ON Failure"
"For UART CLK Stalling HSUART\n");
break;
}
@@ -2271,6 +2410,22 @@
spin_lock_irqsave(&uport->lock, flags);
/* else fall-through */
case MSM_HS_CLK_REQUEST_OFF:
+ hrtimer_cancel(&msm_uport->clk_off_timer);
+ if (msm_uport->rx.flush == FLUSH_STOP) {
+ spin_unlock_irqrestore(&uport->lock, flags);
+ MSM_HS_DBG("%s:Calling wait forxcompletion\n",
+ __func__);
+ ret = wait_event_timeout(msm_uport->bam_disconnect_wait,
+ msm_uport->rx.flush == FLUSH_SHUTDOWN, 300);
+ if (!ret)
+ MSM_HS_ERR("BAM Disconnect not happened\n");
+ spin_lock_irqsave(&uport->lock, flags);
+ MSM_HS_DBG("%s:DONE wait for completion\n", __func__);
+ }
+ MSM_HS_DBG("%s:clock state %d\n\n", __func__,
+ msm_uport->clk_state);
+ if (msm_uport->clk_state == MSM_HS_CLK_REQUEST_OFF)
+ msm_uport->clk_state = MSM_HS_CLK_ON;
if (msm_uport->rx.flush == FLUSH_STOP ||
msm_uport->rx.flush == FLUSH_SHUTDOWN) {
msm_hs_write(uport, UART_DM_CR, RESET_RX);
@@ -2283,7 +2438,8 @@
/* Complete above device write. Hence mb() here. */
mb();
}
- hrtimer_try_to_cancel(&msm_uport->clk_off_timer);
+
+ MSM_HS_DBG("%s: rx.flush %d\n", __func__, msm_uport->rx.flush);
if (msm_uport->rx.flush == FLUSH_SHUTDOWN) {
if (is_blsp_uart(msm_uport)) {
spin_unlock_irqrestore(&uport->lock, flags);
@@ -2294,7 +2450,7 @@
}
if (msm_uport->rx.flush == FLUSH_STOP)
msm_uport->rx.flush = FLUSH_IGNORE;
- msm_uport->clk_state = MSM_HS_CLK_ON;
+
break;
case MSM_HS_CLK_ON:
break;
@@ -2318,7 +2474,8 @@
spin_lock_irqsave(&uport->lock, flags);
if (msm_uport->clk_state == MSM_HS_CLK_OFF) {
/* ignore the first irq - it is a pending irq that occured
- * before enable_irq() */
+ * before enable_irq()
+ */
if (msm_uport->wakeup.ignore)
msm_uport->wakeup.ignore = 0;
else
@@ -2327,7 +2484,8 @@
if (wakeup) {
/* the uart was clocked off during an rx, wake up and
- * optionally inject char into tty rx */
+ * optionally inject char into tty rx
+ */
spin_unlock_irqrestore(&uport->lock, flags);
msm_hs_request_clock_on(uport);
spin_lock_irqsave(&uport->lock, flags);
@@ -2507,6 +2665,7 @@
}
}
+ msm_hs_write(uport, UARTDM_BCR_ADDR, 0x003F);
/* Set auto RFR Level */
data = msm_hs_read(uport, UART_DM_MR1);
data &= ~UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK;
@@ -2548,6 +2707,7 @@
/* Initialize the tx */
tx->tx_ready_int_en = 0;
tx->dma_in_flight = 0;
+ rx->rx_cmd_exec = false;
msm_uport->tty_flush_receive = false;
MSM_HS_DBG("%s: Setting tty_flush_receive to false\n", __func__);
@@ -2572,7 +2732,7 @@
msm_uport->imr_reg |= UARTDM_ISR_CURRENT_CTS_BMSK;
/* TXLEV on empty TX fifo */
- msm_hs_write(uport, UART_DM_TFWR, 0);
+ msm_hs_write(uport, UART_DM_TFWR, 4);
/*
* Complete all device write related configuration before
* queuing RX request. Hence mb() requires here.
@@ -2606,17 +2766,12 @@
}
disable_irq(msm_uport->wakeup.irq);
}
-
- msm_hs_clock_vote(msm_uport);
-
spin_lock_irqsave(&uport->lock, flags);
msm_hs_start_rx_locked(uport);
spin_unlock_irqrestore(&uport->lock, flags);
- msm_hs_clock_unvote(msm_uport);
-
pm_runtime_enable(uport->dev);
return 0;
@@ -2652,6 +2807,7 @@
init_waitqueue_head(&rx->wait);
init_waitqueue_head(&tx->wait);
+ init_waitqueue_head(&msm_uport->bam_disconnect_wait);
wake_lock_init(&rx->wake_lock, WAKE_LOCK_SUSPEND, "msm_serial_hs_rx");
wake_lock_init(&msm_uport->dma_wake_lock, WAKE_LOCK_SUSPEND,
"msm_serial_hs_dma");
@@ -2932,7 +3088,6 @@
sps_config->mode = SPS_MODE_SRC;
sps_config->src_pipe_index = msm_uport->bam_rx_ep_pipe_index;
sps_config->dest_pipe_index = 0;
- sps_config->options = SPS_O_DESC_DONE;
} else {
/* For UART consumer transfer, source is system memory
where as destination is UART peripheral */
@@ -2941,9 +3096,9 @@
sps_config->mode = SPS_MODE_DEST;
sps_config->src_pipe_index = 0;
sps_config->dest_pipe_index = msm_uport->bam_tx_ep_pipe_index;
- sps_config->options = SPS_O_EOT;
}
+ sps_config->options = SPS_O_EOT | SPS_O_DESC_DONE | SPS_O_AUTO_ENABLE;
sps_config->event_thresh = 0x10;
/* Allocate maximum descriptor fifo size */
@@ -2963,12 +3118,11 @@
if (is_producer) {
sps_event->callback = msm_hs_sps_rx_callback;
- sps_event->options = SPS_O_DESC_DONE;
} else {
sps_event->callback = msm_hs_sps_tx_callback;
- sps_event->options = SPS_O_EOT;
}
+ sps_event->options = SPS_O_DESC_DONE | SPS_O_EOT;
sps_event->user = (void *)msm_uport;
/* Now save the sps pipe handle */
@@ -3113,7 +3267,9 @@
return -EINVAL;
}
- msm_uport = &q_uart_port[pdev->id];
+ msm_uport = devm_kzalloc(&pdev->dev, sizeof(struct msm_hs_port),
+ GFP_KERNEL);
+ msm_uport->uport.type = PORT_UNKNOWN;
uport = &msm_uport->uport;
uport->dev = &pdev->dev;
@@ -3386,17 +3542,12 @@
static int __init msm_serial_hs_init(void)
{
int ret;
- int i;
ipc_msm_hs_log_ctxt = ipc_log_context_create(IPC_MSM_HS_LOG_PAGES,
"msm_serial_hs");
if (!ipc_msm_hs_log_ctxt)
MSM_HS_WARN("%s: error creating logging context", __func__);
- /* Init all UARTS as non-configured */
- for (i = 0; i < UARTDM_NR; i++)
- q_uart_port[i].uport.type = PORT_UNKNOWN;
-
ret = uart_register_driver(&msm_hs_driver);
if (unlikely(ret)) {
MSM_HS_WARN("%s failed to load\n", __func__);
@@ -3487,12 +3638,12 @@
*/
mb();
+ msm_hs_clock_unvote(msm_uport);
if (msm_uport->clk_state != MSM_HS_CLK_OFF) {
/* to balance clk_state */
msm_hs_clock_unvote(msm_uport);
wake_unlock(&msm_uport->dma_wake_lock);
}
- msm_hs_clock_unvote(msm_uport);
msm_uport->clk_state = MSM_HS_CLK_PORT_OFF;
dma_unmap_single(uport->dev, msm_uport->tx.dma_base,
@@ -3536,8 +3687,13 @@
{
struct platform_device *pdev = container_of(dev, struct
platform_device, dev);
- struct msm_hs_port *msm_uport = &q_uart_port[pdev->id];
- msm_hs_request_clock_on(&msm_uport->uport);
+ struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
+
+ /* This check should not fail
+ * During probe, we set uport->line to either pdev->id or userid */
+ if (msm_uport)
+ msm_hs_request_clock_on(&msm_uport->uport);
+
return 0;
}
@@ -3545,8 +3701,12 @@
{
struct platform_device *pdev = container_of(dev, struct
platform_device, dev);
- struct msm_hs_port *msm_uport = &q_uart_port[pdev->id];
- msm_hs_request_clock_off(&msm_uport->uport);
+ struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
+
+ /* This check should not fail
+ * During probe, we set uport->line to either pdev->id or userid */
+ if (msm_uport)
+ msm_hs_request_clock_off(&msm_uport->uport);
return 0;
}
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index b1ec3fc..2a66c4c 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -28,6 +28,7 @@
obj-$(CONFIG_USB_IMX21_HCD) += host/
obj-$(CONFIG_USB_FSL_MPH_DR_OF) += host/
obj-$(CONFIG_USB_PEHCI_HCD) += host/
+obj-$(CONFIG_USB_ICE40_HCD) += host/
obj-$(CONFIG_USB_C67X00_HCD) += c67x00/
@@ -37,6 +38,7 @@
obj-$(CONFIG_USB_PRINTER) += class/
obj-$(CONFIG_USB_WDM) += class/
obj-$(CONFIG_USB_TMC) += class/
+obj-$(CONFIG_USB_CCID_BRIDGE) += class/
obj-$(CONFIG_USB_STORAGE) += storage/
obj-$(CONFIG_USB) += storage/
diff --git a/drivers/usb/class/ccid_bridge.c b/drivers/usb/class/ccid_bridge.c
index a3e100a..05483fd 100644
--- a/drivers/usb/class/ccid_bridge.c
+++ b/drivers/usb/class/ccid_bridge.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -34,7 +34,7 @@
#define CCID_BRIDGE_MSG_SZ 512
#define CCID_BRIDGE_OPEN_TIMEOUT 500 /* msec */
#define CCID_CONTROL_TIMEOUT 500 /* msec */
-#define CCID_BRIDGE_MSG_TIMEOUT 500 /* msec */
+#define CCID_BRIDGE_MSG_TIMEOUT 1000 /* msec */
struct ccid_bridge {
struct usb_device *udev;
@@ -698,6 +698,7 @@
}
usb_set_intfdata(intf, ccid);
+ usb_enable_autosuspend(ccid->udev);
mutex_lock(&ccid->open_mutex);
ccid->intf = intf;
@@ -752,6 +753,7 @@
}
ccid->intf = NULL;
+ usb_put_dev(ccid->udev);
mutex_unlock(&ccid->event_mutex);
mutex_unlock(&ccid->read_mutex);
diff --git a/drivers/usb/gadget/f_diag.c b/drivers/usb/gadget/f_diag.c
index e218130..16f961e 100644
--- a/drivers/usb/gadget/f_diag.c
+++ b/drivers/usb/gadget/f_diag.c
@@ -26,6 +26,7 @@
#include <linux/usb/gadget.h>
#include <linux/workqueue.h>
#include <linux/debugfs.h>
+#include <linux/kmemleak.h>
static DEFINE_SPINLOCK(ch_lock);
static LIST_HEAD(usb_diag_ch_list);
@@ -381,6 +382,7 @@
req = usb_ep_alloc_request(ctxt->in, GFP_ATOMIC);
if (!req)
goto fail;
+ kmemleak_not_leak(req);
req->complete = diag_write_complete;
list_add_tail(&req->list, &ctxt->write_pool);
}
@@ -389,6 +391,7 @@
req = usb_ep_alloc_request(ctxt->out, GFP_ATOMIC);
if (!req)
goto fail;
+ kmemleak_not_leak(req);
req->complete = diag_read_complete;
list_add_tail(&req->list, &ctxt->read_pool);
}
diff --git a/drivers/usb/gadget/f_rmnet.c b/drivers/usb/gadget/f_rmnet.c
index 2fa8c63..ebcec96 100644
--- a/drivers/usb/gadget/f_rmnet.c
+++ b/drivers/usb/gadget/f_rmnet.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -47,7 +47,7 @@
/* control info */
struct list_head cpkt_resp_q;
- atomic_t notify_count;
+ unsigned long notify_count;
unsigned long cpkts_len;
};
@@ -605,7 +605,7 @@
list_del(&cpkt->list);
rmnet_free_ctrl_pkt(cpkt);
}
- atomic_set(&dev->notify_count, 0);
+ dev->notify_count = 0;
spin_unlock_irqrestore(&dev->lock, flags);
}
@@ -619,6 +619,7 @@
__func__, xport_to_str(dxport),
dev, dev->port_num);
+ usb_ep_fifo_flush(dev->notify);
frmnet_purge_responses(dev);
port_num = rmnet_ports[dev->port_num].data_xport_num;
@@ -754,7 +755,7 @@
return;
}
- if (atomic_inc_return(&dev->notify_count) != 1) {
+ if (++dev->notify_count != 1) {
spin_unlock_irqrestore(&dev->lock, flags);
return;
}
@@ -772,7 +773,14 @@
if (ret) {
spin_lock_irqsave(&dev->lock, flags);
if (!list_empty(&dev->cpkt_resp_q)) {
- atomic_dec(&dev->notify_count);
+ if (dev->notify_count > 0)
+ dev->notify_count--;
+ else {
+ pr_debug("%s: Invalid notify_count=%lu to decrement\n",
+ __func__, dev->notify_count);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return;
+ }
cpkt = list_first_entry(&dev->cpkt_resp_q,
struct rmnet_ctrl_pkt, list);
list_del(&cpkt->list);
@@ -911,7 +919,9 @@
case -ECONNRESET:
case -ESHUTDOWN:
/* connection gone */
- atomic_set(&dev->notify_count, 0);
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->notify_count = 0;
+ spin_unlock_irqrestore(&dev->lock, flags);
break;
default:
pr_err("rmnet notify ep error %d\n", status);
@@ -920,14 +930,34 @@
if (!atomic_read(&dev->ctrl_online))
break;
- if (atomic_dec_and_test(&dev->notify_count))
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->notify_count > 0) {
+ dev->notify_count--;
+ if (dev->notify_count == 0) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ break;
+ }
+ } else {
+ pr_debug("%s: Invalid notify_count=%lu to decrement\n",
+ __func__, dev->notify_count);
+ spin_unlock_irqrestore(&dev->lock, flags);
break;
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
status = usb_ep_queue(dev->notify, req, GFP_ATOMIC);
if (status) {
spin_lock_irqsave(&dev->lock, flags);
if (!list_empty(&dev->cpkt_resp_q)) {
- atomic_dec(&dev->notify_count);
+ if (dev->notify_count > 0)
+ dev->notify_count--;
+ else {
+ pr_err("%s: Invalid notify_count=%lu to decrement\n",
+ __func__, dev->notify_count);
+ spin_unlock_irqrestore(&dev->lock,
+ flags);
+ break;
+ }
cpkt = list_first_entry(&dev->cpkt_resp_q,
struct rmnet_ctrl_pkt, list);
list_del(&cpkt->list);
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 4357867..2a24bec 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -693,3 +693,16 @@
config USB_OCTEON2_COMMON
bool
default y if USB_OCTEON_EHCI || USB_OCTEON_OHCI
+
+config USB_ICE40_HCD
+ tristate "ICE40 FPGA based SPI to Inter-Chip USB host controller"
+ depends on USB && SPI
+ help
+ A driver for ICE40 FPGA based SPI to Inter-Chip USB host
+ controller. This driver registers as a SPI protocol driver
+ and interacts with the SPI subsystem on one side and interacts
+ with the USB core on the other side. Control and Bulk transfers
+ are supported.
+
+ To compile this driver a module, choose M here: the module
+ will be called "ice40-hcd".
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 7d35f5b..7c5b452 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -42,3 +42,4 @@
obj-$(CONFIG_USB_FSL_MPH_DR_OF) += fsl-mph-dr-of.o
obj-$(CONFIG_USB_OCTEON2_COMMON) += octeon2-common.o
obj-$(CONFIG_MIPS_ALCHEMY) += alchemy-common.o
+obj-$(CONFIG_USB_ICE40_HCD) += ice40-hcd.o
diff --git a/drivers/usb/host/ice40-hcd.c b/drivers/usb/host/ice40-hcd.c
new file mode 100644
index 0000000..4d62a3e
--- /dev/null
+++ b/drivers/usb/host/ice40-hcd.c
@@ -0,0 +1,2092 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2001-2004 by David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+ * Root HUB management and Asynchronous scheduling traversal
+ * Based on ehci-hub.c and ehci-q.c
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/ktime.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/spinlock.h>
+#include <linux/firmware.h>
+#include <linux/spi/spi.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/ch11.h>
+
+#include <asm/unaligned.h>
+#include <mach/gpiomux.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/ice40.h>
+
+#define FADDR_REG 0x00 /* R/W: Device address */
+#define HCMD_REG 0x01 /* R/W: Host transfer command */
+#define XFRST_REG 0x02 /* R: Transfer status */
+#define IRQ_REG 0x03 /* R/C: IRQ status */
+#define IEN_REG 0x04 /* R/W: IRQ enable */
+#define CTRL0_REG 0x05 /* R/W: Host control command */
+#define CTRL1_REG 0x06 /* R/W: Host control command */
+#define WBUF0_REG 0x10 /* W: Tx fifo 0 */
+#define WBUF1_REG 0x11 /* W: Tx fifo 1 */
+#define SUBUF_REG 0x12 /* W: SETUP fifo */
+#define WBLEN_REG 0x13 /* W: Tx fifo size */
+#define RBUF0_REG 0x18 /* R: Rx fifo 0 */
+#define RBUF1_REG 0x19 /* R: Rx fifo 1 */
+#define RBLEN_REG 0x1B /* R: Rx fifo size */
+
+#define WRITE_CMD(addr) ((addr << 3) | 1)
+#define READ_CMD(addr) ((addr << 3) | 0)
+
+/* Host controller command register definitions */
+#define HCMD_EP(ep) (ep & 0xF)
+#define HCMD_BSEL(sel) (sel << 4)
+#define HCMD_TOGV(toggle) (toggle << 5)
+#define HCMD_PT(token) (token << 6)
+
+/* Transfer status register definitions */
+#define XFR_MASK(xfr) (xfr & 0xF)
+#define XFR_SUCCESS 0x0
+#define XFR_BUSY 0x1
+#define XFR_PKTERR 0x2
+#define XFR_PIDERR 0x3
+#define XFR_NAK 0x4
+#define XFR_STALL 0x5
+#define XFR_WRONGPID 0x6
+#define XFR_CRCERR 0x7
+#define XFR_TOGERR 0x8
+#define XFR_BADLEN 0x9
+#define XFR_TIMEOUT 0xA
+
+#define LINE_STATE(xfr) ((xfr & 0x30) >> 4) /* D+, D- */
+#define DPST BIT(5)
+#define DMST BIT(4)
+#define PLLOK BIT(6)
+#define R64B BIT(7)
+
+/* Interrupt enable/status register definitions */
+#define RESET_IRQ BIT(0)
+#define RESUME_IRQ BIT(1)
+#define SUSP_IRQ BIT(3)
+#define DISCONNECT_IRQ BIT(4)
+#define CONNECT_IRQ BIT(5)
+#define FRAME_IRQ BIT(6)
+#define XFR_IRQ BIT(7)
+
+/* Control 0 register definitions */
+#define RESET_CTRL BIT(0)
+#define FRAME_RESET_CTRL BIT(1)
+#define DET_BUS_CTRL BIT(2)
+#define RESUME_CTRL BIT(3)
+#define SOFEN_CTRL BIT(4)
+#define DM_PD_CTRL BIT(6)
+#define DP_PD_CTRL BIT(7)
+#define HRST_CTRL BIT(5)
+
+/* Control 1 register definitions */
+#define INT_EN_CTRL BIT(0)
+
+enum ice40_xfr_type {
+ FIRMWARE_XFR,
+ REG_WRITE_XFR,
+ REG_READ_XFR,
+ SETUP_XFR,
+ DATA_IN_XFR,
+ DATA_OUT_XFR,
+};
+
+enum ice40_ep_phase {
+ SETUP_PHASE = 1,
+ DATA_PHASE,
+ STATUS_PHASE,
+};
+
+struct ice40_ep {
+ u8 xcat_err;
+ bool unlinking;
+ bool halted;
+ struct usb_host_endpoint *ep;
+ struct list_head ep_list;
+};
+
+struct ice40_hcd {
+ spinlock_t lock;
+
+ struct mutex wlock;
+ struct mutex rlock;
+
+ u8 devnum;
+ u32 port_flags;
+ u8 ctrl0;
+ u8 wblen0;
+
+ enum ice40_ep_phase ep0_state;
+ struct usb_hcd *hcd;
+
+ struct list_head async_list;
+ struct workqueue_struct *wq;
+ struct work_struct async_work;
+
+ int reset_gpio;
+ int slave_select_gpio;
+ int config_done_gpio;
+ int vcc_en_gpio;
+ int clk_en_gpio;
+
+ struct regulator *core_vcc;
+ struct regulator *spi_vcc;
+ struct regulator *gpio_vcc;
+ bool powered;
+
+ struct dentry *dbg_root;
+ bool pcd_pending;
+
+ /* SPI stuff later */
+ struct spi_device *spi;
+
+ struct spi_message *fmsg;
+ struct spi_transfer *fmsg_xfr; /* size 1 */
+
+ struct spi_message *wmsg;
+ struct spi_transfer *wmsg_xfr; /* size 1 */
+ u8 *w_tx_buf;
+ u8 *w_rx_buf;
+
+ struct spi_message *rmsg;
+ struct spi_transfer *rmsg_xfr; /* size 1 */
+ u8 *r_tx_buf;
+ u8 *r_rx_buf;
+
+ struct spi_message *setup_msg;
+ struct spi_transfer *setup_xfr; /* size 2 */
+ u8 *setup_buf; /* size 1 for SUBUF */
+
+ struct spi_message *in_msg;
+ struct spi_transfer *in_xfr; /* size 2 */
+ u8 *in_buf; /* size 2 for reading from RBUF0 */
+
+ struct spi_message *out_msg;
+ struct spi_transfer *out_xfr; /* size 2 */
+ u8 *out_buf; /* size 1 for writing WBUF0 */
+};
+
+static char fw_name[16] = "ice40.bin";
+module_param_string(fw, fw_name, sizeof(fw_name), S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(fw, "firmware blob file name");
+
+static bool debugger;
+module_param(debugger, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debugger, "true to use the debug port");
+
+static inline struct ice40_hcd *hcd_to_ihcd(struct usb_hcd *hcd)
+{
+ return *((struct ice40_hcd **) hcd->hcd_priv);
+}
+
+static void ice40_spi_reg_write(struct ice40_hcd *ihcd, u8 val, u8 addr)
+{
+ int ret;
+
+ /*
+ * Register Write Pattern:
+ * TX: 1st byte is CMD (register + write), 2nd byte is value
+ * RX: Ignore
+ *
+ * The Mutex is to protect concurrent register writes as
+ * we have only 1 SPI message struct.
+ */
+
+ mutex_lock(&ihcd->wlock);
+
+ ihcd->w_tx_buf[0] = WRITE_CMD(addr);
+ ihcd->w_tx_buf[1] = val;
+ ret = spi_sync(ihcd->spi, ihcd->wmsg);
+ if (ret < 0) /* should not happen */
+ pr_err("failed. val = %d addr = %d\n", val, addr);
+
+ trace_ice40_reg_write(addr, val, ihcd->w_tx_buf[0],
+ ihcd->w_tx_buf[1], ret);
+
+ mutex_unlock(&ihcd->wlock);
+}
+
+static int ice40_spi_reg_read(struct ice40_hcd *ihcd, u8 addr)
+{
+ int ret;
+
+ /*
+ * Register Read Pattern:
+ * TX: 1st byte is CMD (register + read)
+ * RX: 1st, 2nd byte Ignore, 3rd byte value.
+ *
+ * The Mutex is to protect concurrent register reads as
+ * we have only 1 SPI message struct.
+ */
+
+ mutex_lock(&ihcd->rlock);
+
+ ihcd->r_tx_buf[0] = READ_CMD(addr);
+ ret = spi_sync(ihcd->spi, ihcd->rmsg);
+ if (ret < 0)
+ pr_err("failed. addr = %d\n", addr);
+ else
+ ret = ihcd->r_rx_buf[2];
+
+ trace_ice40_reg_read(addr, ihcd->r_tx_buf[0], ret);
+
+ mutex_unlock(&ihcd->rlock);
+
+ return ret;
+}
+
+static int ice40_poll_xfer(struct ice40_hcd *ihcd, int usecs)
+{
+ ktime_t start = ktime_get();
+ u8 val, retry = 0;
+ u8 ret = ~0; /* time out */
+
+again:
+
+ /*
+ * The SPI transaction may take tens of usec. Use ktime
+ * based checks rather than loop count.
+ */
+ do {
+ val = ice40_spi_reg_read(ihcd, XFRST_REG);
+
+ if (XFR_MASK(val) != XFR_BUSY)
+ return val;
+
+ } while (ktime_us_delta(ktime_get(), start) < usecs);
+
+ /*
+ * The SPI transaction involves a context switch. For any
+ * reason, if we are scheduled out more than usecs after
+ * the 1st read, this extra read will help.
+ */
+ if (!retry) {
+ retry = 1;
+ goto again;
+ }
+
+ return ret;
+}
+
+static int
+ice40_handshake(struct ice40_hcd *ihcd, u8 reg, u8 mask, u8 done, int usecs)
+{
+ ktime_t start = ktime_get();
+ u8 val, retry = 0;
+
+again:
+ do {
+ val = ice40_spi_reg_read(ihcd, reg);
+ val &= mask;
+
+ if (val == done)
+ return 0;
+
+ } while (ktime_us_delta(ktime_get(), start) < usecs);
+
+ if (!retry) {
+ retry = 1;
+ goto again;
+ }
+
+ return -ETIMEDOUT;
+}
+
+
+static const char hcd_name[] = "ice40-hcd";
+
+static int ice40_reset(struct usb_hcd *hcd)
+{
+ struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
+ u8 ctrl, status;
+ int ret = 0;
+
+ /*
+ * Program the defualt address 0. The device address is
+ * re-programmed after SET_ADDRESS in URB handling path.
+ */
+ ihcd->devnum = 0;
+ ice40_spi_reg_write(ihcd, 0, FADDR_REG);
+
+ ihcd->wblen0 = ~0;
+ /*
+ * Read the line state. This driver is loaded after the
+ * UICC card insertion. So the line state should indicate
+ * that a Full-speed device is connected. Return error
+ * if there is no device connected.
+ *
+ * There can be no device connected during debug. A debugfs
+ * file is provided to sample the bus line and update the
+ * port flags accordingly.
+ */
+
+ if (debugger)
+ goto out;
+
+ ctrl = ice40_spi_reg_read(ihcd, CTRL0_REG);
+ ice40_spi_reg_write(ihcd, ctrl | DET_BUS_CTRL, CTRL0_REG);
+
+ ret = ice40_handshake(ihcd, CTRL0_REG, DET_BUS_CTRL, 0, 5000);
+ if (ret) {
+ pr_err("bus detection failed\n");
+ goto out;
+ }
+
+ status = ice40_spi_reg_read(ihcd, XFRST_REG);
+ pr_debug("line state (D+, D-) is %d\n", LINE_STATE(status));
+
+ if (status & DPST) {
+ pr_debug("Full speed device connected\n");
+ ihcd->port_flags |= USB_PORT_STAT_CONNECTION;
+ } else {
+ pr_err("No device connected\n");
+ ret = -ENODEV;
+ }
+out:
+ return ret;
+}
+
+static int ice40_run(struct usb_hcd *hcd)
+{
+ struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
+
+ /*
+ * HCD_FLAG_POLL_RH flag is not set by us. Core will not poll
+ * for the port status periodically. This uses_new_polling
+ * flag tells core that this hcd will call usb_hcd_poll_rh_status
+ * upon port change.
+ */
+ hcd->uses_new_polling = 1;
+
+ /*
+ * Cache the ctrl0 register to avoid multiple reads. This register
+ * is written during reset and resume.
+ */
+ ihcd->ctrl0 = ice40_spi_reg_read(ihcd, CTRL0_REG);
+ ihcd->ctrl0 |= SOFEN_CTRL;
+ ice40_spi_reg_write(ihcd, ihcd->ctrl0, CTRL0_REG);
+
+ return 0;
+}
+
+static void ice40_stop(struct usb_hcd *hcd)
+{
+ struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
+
+ cancel_work_sync(&ihcd->async_work);
+}
+
+/*
+ * The _Error looks odd. But very helpful when looking for
+ * any errors in logs.
+ */
+static char __maybe_unused *xfr_status_string(int status)
+{
+ switch (XFR_MASK(status)) {
+ case XFR_SUCCESS:
+ return "Ack";
+ case XFR_BUSY:
+ return "Busy_Error";
+ case XFR_PKTERR:
+ return "Pkt_Error";
+ case XFR_PIDERR:
+ return "PID_Error";
+ case XFR_NAK:
+ return "Nak";
+ case XFR_STALL:
+ return "Stall_Error";
+ case XFR_WRONGPID:
+ return "WrongPID_Error";
+ case XFR_CRCERR:
+ return "CRC_Error";
+ case XFR_TOGERR:
+ return "Togg_Error";
+ case XFR_BADLEN:
+ return "BadLen_Error";
+ case XFR_TIMEOUT:
+ return "Timeout_Error";
+ default:
+ return "Unknown_Error";
+ }
+}
+
+static int ice40_xfer_setup(struct ice40_hcd *ihcd, struct urb *urb)
+{
+ struct usb_host_endpoint *ep = urb->ep;
+ struct ice40_ep *iep = ep->hcpriv;
+ void *buf = urb->setup_packet;
+ int ret, status;
+ u8 cmd;
+
+ /*
+ * SETUP transaction Handling:
+ * - copy the setup buffer to SUBUF fifo
+ * - Program HCMD register to initiate the SETP transaction.
+ * - poll for completion by reading XFRST register.
+ * - Interpret the result.
+ */
+
+ ihcd->setup_buf[0] = WRITE_CMD(SUBUF_REG);
+ ihcd->setup_xfr[1].tx_buf = buf;
+ ihcd->setup_xfr[1].len = sizeof(struct usb_ctrlrequest);
+
+ ret = spi_sync(ihcd->spi, ihcd->setup_msg);
+ if (ret < 0) {
+ pr_err("SPI transfer failed\n");
+ status = ret = -EIO;
+ goto out;
+ }
+
+ cmd = HCMD_PT(2) | HCMD_TOGV(0) | HCMD_BSEL(0) | HCMD_EP(0);
+ ice40_spi_reg_write(ihcd, cmd, HCMD_REG);
+
+ status = ice40_poll_xfer(ihcd, 1000);
+ switch (XFR_MASK(status)) {
+ case XFR_SUCCESS:
+ iep->xcat_err = 0;
+ ret = 0;
+ break;
+ case XFR_NAK: /* Device should not return Nak for SETUP */
+ case XFR_STALL:
+ iep->xcat_err = 0;
+ ret = -EPIPE;
+ break;
+ case XFR_PKTERR:
+ case XFR_PIDERR:
+ case XFR_WRONGPID:
+ case XFR_CRCERR:
+ case XFR_TIMEOUT:
+ if (++iep->xcat_err < 8)
+ ret = -EINPROGRESS;
+ else
+ ret = -EPROTO;
+ break;
+ default:
+ pr_err("transaction timed out\n");
+ ret = -EIO;
+ }
+
+out:
+ trace_ice40_setup(xfr_status_string(status), ret);
+ return ret;
+}
+
+static int ice40_xfer_in(struct ice40_hcd *ihcd, struct urb *urb)
+{
+ struct usb_host_endpoint *ep = urb->ep;
+ struct usb_device *udev = urb->dev;
+ u32 total_len = urb->transfer_buffer_length;
+ u16 maxpacket = usb_endpoint_maxp(&ep->desc);
+ u8 epnum = usb_pipeendpoint(urb->pipe);
+ bool is_out = usb_pipeout(urb->pipe);
+ struct ice40_ep *iep = ep->hcpriv;
+ u8 cmd, status, len = 0, t, expected_len;
+ void *buf;
+ int ret;
+ bool short_packet = true;
+
+ if (epnum == 0 && ihcd->ep0_state == STATUS_PHASE) {
+ expected_len = 0;
+ buf = NULL;
+ t = 1; /* STATUS PHASE is always DATA1 */
+ } else {
+ expected_len = min_t(u32, maxpacket,
+ total_len - urb->actual_length);
+ buf = urb->transfer_buffer + urb->actual_length;
+ t = usb_gettoggle(udev, epnum, is_out);
+ }
+
+ /*
+ * IN transaction Handling:
+ * - Program HCMD register to initiate the IN transaction.
+ * - poll for completion by reading XFRST register.
+ * - Interpret the result.
+ * - If ACK is received and we expect some data, read RBLEN
+ * - Read the data from RBUF
+ */
+
+ cmd = HCMD_PT(0) | HCMD_TOGV(t) | HCMD_BSEL(0) | HCMD_EP(epnum);
+ ice40_spi_reg_write(ihcd, cmd, HCMD_REG);
+
+ status = ice40_poll_xfer(ihcd, 1000);
+ switch (XFR_MASK(status)) {
+ case XFR_SUCCESS:
+ usb_dotoggle(udev, epnum, is_out);
+ iep->xcat_err = 0;
+ ret = 0;
+ if ((expected_len == 64) && (status & R64B))
+ short_packet = false;
+ break;
+ case XFR_NAK:
+ iep->xcat_err = 0;
+ ret = -EINPROGRESS;
+ break;
+ case XFR_TOGERR:
+ /*
+ * Peripheral had missed the previous Ack and sent
+ * the same packet again. Ack is sent by the hardware.
+ * As the data is received already, ignore this
+ * event.
+ */
+ ret = -EINPROGRESS;
+ break;
+ case XFR_PKTERR:
+ case XFR_PIDERR:
+ case XFR_WRONGPID:
+ case XFR_CRCERR:
+ case XFR_TIMEOUT:
+ if (++iep->xcat_err < 8)
+ ret = -EINPROGRESS;
+ else
+ ret = -EPROTO;
+ break;
+ case XFR_STALL:
+ ret = -EPIPE;
+ break;
+ case XFR_BADLEN:
+ ret = -EOVERFLOW;
+ break;
+ default:
+ pr_err("transaction timed out\n");
+ ret = -EIO;
+ }
+
+ /*
+ * Proceed further only if Ack is received and
+ * we are expecting some data.
+ */
+ if (ret || !expected_len)
+ goto out;
+
+ if (short_packet)
+ len = ice40_spi_reg_read(ihcd, RBLEN_REG);
+ else
+ len = 64;
+
+ /* babble condition */
+ if (len > expected_len) {
+ pr_err("overflow condition\n");
+ ret = -EOVERFLOW;
+ goto out;
+ }
+
+ /*
+ * zero len packet received. nothing to read from
+ * FIFO.
+ */
+ if (len == 0) {
+ ret = 0;
+ goto out;
+ }
+
+ ihcd->in_buf[0] = READ_CMD(RBUF0_REG);
+
+ ihcd->in_xfr[1].rx_buf = buf;
+ ihcd->in_xfr[1].len = len;
+
+ ret = spi_sync(ihcd->spi, ihcd->in_msg);
+ if (ret < 0) {
+ pr_err("SPI transfer failed\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ urb->actual_length += len;
+ if ((urb->actual_length == total_len) ||
+ (len < expected_len))
+ ret = 0; /* URB completed */
+ else
+ ret = -EINPROGRESS; /* still pending */
+out:
+ trace_ice40_in(epnum, xfr_status_string(status), len,
+ expected_len, ret);
+ return ret;
+}
+
+static int ice40_xfer_out(struct ice40_hcd *ihcd, struct urb *urb)
+{
+ struct usb_host_endpoint *ep = urb->ep;
+ struct usb_device *udev = urb->dev;
+ u32 total_len = urb->transfer_buffer_length;
+ u16 maxpacket = usb_endpoint_maxp(&ep->desc);
+ u8 epnum = usb_pipeendpoint(urb->pipe);
+ bool is_out = usb_pipeout(urb->pipe);
+ struct ice40_ep *iep = ep->hcpriv;
+ u8 cmd, status, len, t;
+ void *buf;
+ int ret;
+
+ if (epnum == 0 && ihcd->ep0_state == STATUS_PHASE) {
+ len = 0;
+ buf = NULL;
+ t = 1; /* STATUS PHASE is always DATA1 */
+ } else {
+ len = min_t(u32, maxpacket, total_len - urb->actual_length);
+ buf = urb->transfer_buffer + urb->actual_length;
+ t = usb_gettoggle(udev, epnum, is_out);
+ }
+
+ /*
+ * OUT transaction Handling:
+ * - If we need to send data, write the data to WBUF Fifo
+ * - Program the WBLEN register
+ * - Program HCMD register to initiate the OUT transaction.
+ * - poll for completion by reading XFRST register.
+ * - Interpret the result.
+ */
+
+
+ if (!len)
+ goto no_data;
+
+ ihcd->out_buf[0] = WRITE_CMD(WBUF0_REG);
+
+ ihcd->out_xfr[1].tx_buf = buf;
+ ihcd->out_xfr[1].len = len;
+
+ ret = spi_sync(ihcd->spi, ihcd->out_msg);
+ if (ret < 0) {
+ pr_err("SPI transaction failed\n");
+ status = ret = -EIO;
+ goto out;
+ }
+
+no_data:
+ /*
+ * Cache the WBLEN register and update it only if it
+ * is changed from the previous value.
+ */
+ if (len != ihcd->wblen0) {
+ ice40_spi_reg_write(ihcd, len, WBLEN_REG);
+ ihcd->wblen0 = len;
+ }
+
+ cmd = HCMD_PT(1) | HCMD_TOGV(t) | HCMD_BSEL(0) | HCMD_EP(epnum);
+ ice40_spi_reg_write(ihcd, cmd, HCMD_REG);
+
+ status = ice40_poll_xfer(ihcd, 1000);
+ switch (XFR_MASK(status)) {
+ case XFR_SUCCESS:
+ usb_dotoggle(udev, epnum, is_out);
+ urb->actual_length += len;
+ iep->xcat_err = 0;
+ if (!len || (urb->actual_length == total_len))
+ ret = 0; /* URB completed */
+ else
+ ret = -EINPROGRESS; /* pending */
+ break;
+ case XFR_NAK:
+ iep->xcat_err = 0;
+ ret = -EINPROGRESS;
+ break;
+ case XFR_PKTERR:
+ case XFR_PIDERR:
+ case XFR_WRONGPID:
+ case XFR_CRCERR:
+ case XFR_TIMEOUT:
+ if (++iep->xcat_err < 8)
+ ret = -EINPROGRESS;
+ else
+ ret = -EPROTO;
+ break;
+ case XFR_STALL:
+ ret = -EPIPE;
+ break;
+ case XFR_BADLEN:
+ ret = -EOVERFLOW;
+ break;
+ default:
+ pr_err("transaction timed out\n");
+ ret = -EIO;
+ }
+
+out:
+ trace_ice40_out(epnum, xfr_status_string(status), len, ret);
+ return ret;
+}
+
+static int ice40_process_urb(struct ice40_hcd *ihcd, struct urb *urb)
+{
+ struct usb_device *udev = urb->dev;
+ u8 devnum = usb_pipedevice(urb->pipe);
+ bool is_out = usb_pipeout(urb->pipe);
+ u32 total_len = urb->transfer_buffer_length;
+ int ret = 0;
+
+ /*
+ * The USB device address can be reset to 0 by core temporarily
+ * during reset recovery process. Don't assume anything about
+ * device address. The device address is programmed as 0 by
+ * default. If the device address is different to the previous
+ * cached value, re-program it here before proceeding. The device
+ * address register (FADDR) holds the value across multiple
+ * transactions and we support only one device.
+ */
+ if (ihcd->devnum != devnum) {
+ ice40_spi_reg_write(ihcd, devnum, FADDR_REG);
+ ihcd->devnum = devnum;
+ }
+
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_CONTROL:
+ switch (ihcd->ep0_state) {
+ case SETUP_PHASE:
+ trace_ice40_ep0("SETUP");
+ ret = ice40_xfer_setup(ihcd, urb);
+ if (ret)
+ break;
+ if (total_len) {
+ ihcd->ep0_state = DATA_PHASE;
+ /*
+ * Data stage always begin with
+ * DATA1 PID.
+ */
+ usb_settoggle(udev, 0, is_out, 1);
+ } else {
+ ihcd->ep0_state = STATUS_PHASE;
+ goto do_status;
+ }
+ /* fall through */
+ case DATA_PHASE:
+ trace_ice40_ep0("DATA");
+ if (is_out)
+ ret = ice40_xfer_out(ihcd, urb);
+ else
+ ret = ice40_xfer_in(ihcd, urb);
+ if (ret)
+ break;
+ /* DATA Phase is completed successfully */
+ ihcd->ep0_state = STATUS_PHASE;
+ /* fall through */
+ case STATUS_PHASE:
+do_status:
+ trace_ice40_ep0("STATUS");
+ /* zero len DATA transfers have IN status */
+ if (!total_len || is_out)
+ ret = ice40_xfer_in(ihcd, urb);
+ else
+ ret = ice40_xfer_out(ihcd, urb);
+ if (ret)
+ break;
+ ihcd->ep0_state = SETUP_PHASE;
+ break;
+ default:
+ pr_err("unknown stage for a control transfer\n");
+ break;
+ }
+ break;
+ case PIPE_BULK:
+ if (is_out)
+ ret = ice40_xfer_out(ihcd, urb);
+ else
+ ret = ice40_xfer_in(ihcd, urb);
+ /*
+ * We may have to support zero len packet terminations
+ * for URB_ZERO_PACKET URBs.
+ */
+ break;
+ default:
+ pr_err("IN/ISO transfers not supported\n");
+ break;
+ }
+
+ return ret;
+}
+
+/* Must be called with spin lock and interrupts disabled */
+static void ice40_complete_urb(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+ struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
+ struct usb_host_endpoint *ep = urb->ep;
+ struct ice40_ep *iep = ep->hcpriv;
+ struct urb *first_urb;
+ bool needs_update = false;
+ bool control = usb_pipecontrol(urb->pipe);
+
+ /*
+ * If the active URB i.e the first URB in the ep list is being
+ * removed, clear the transaction error count. If it is a control
+ * URB ep0_state needs to be reset to SETUP_PHASE.
+ */
+ first_urb = list_first_entry(&ep->urb_list, struct urb, urb_list);
+ if (urb == first_urb)
+ needs_update = true;
+
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ spin_unlock(&ihcd->lock);
+ trace_ice40_urb_done(urb, status);
+ usb_hcd_giveback_urb(ihcd->hcd, urb, status);
+ spin_lock(&ihcd->lock);
+
+ if (needs_update) {
+ iep->xcat_err = 0;
+ if (control)
+ ihcd->ep0_state = SETUP_PHASE;
+ }
+}
+
+static void ice40_async_work(struct work_struct *work)
+{
+ struct ice40_hcd *ihcd = container_of(work,
+ struct ice40_hcd, async_work);
+ struct usb_hcd *hcd = ihcd->hcd;
+ struct list_head *tmp, *uent, *utmp;
+ struct ice40_ep *iep;
+ struct usb_host_endpoint *ep;
+ struct urb *urb;
+ unsigned long flags;
+ int status;
+
+ /*
+ * Traverse the active endpoints circularly and process URBs.
+ * If any endpoint is marked for unlinking, the URBs are
+ * completed here. The endpoint is removed from active list
+ * if a URB is retired with -EPIPE/-EPROTO errors.
+ */
+
+ spin_lock_irqsave(&ihcd->lock, flags);
+
+ if (list_empty(&ihcd->async_list))
+ goto out;
+
+ iep = list_first_entry(&ihcd->async_list, struct ice40_ep, ep_list);
+ while (1) {
+ ep = iep->ep;
+
+ urb = list_first_entry(&ep->urb_list, struct urb, urb_list);
+ if (urb->unlinked) {
+ status = urb->unlinked;
+ } else {
+ spin_unlock_irqrestore(&ihcd->lock, flags);
+ status = ice40_process_urb(ihcd, urb);
+ spin_lock_irqsave(&ihcd->lock, flags);
+ }
+
+ if ((status == -EPIPE) || (status == -EPROTO))
+ iep->halted = true;
+
+ if (status != -EINPROGRESS)
+ ice40_complete_urb(hcd, urb, status);
+
+ if (iep->unlinking) {
+ list_for_each_safe(uent, utmp, &ep->urb_list) {
+ urb = list_entry(uent, struct urb, urb_list);
+ if (urb->unlinked)
+ ice40_complete_urb(hcd, urb, 0);
+ }
+ iep->unlinking = false;
+ }
+
+ tmp = iep->ep_list.next;
+ if (list_empty(&ep->urb_list) || iep->halted) {
+ list_del_init(&iep->ep_list);
+
+ if (list_empty(&ihcd->async_list))
+ break;
+ }
+
+ if (tmp == &ihcd->async_list)
+ tmp = tmp->next;
+ iep = list_entry(tmp, struct ice40_ep, ep_list);
+ }
+out:
+ spin_unlock_irqrestore(&ihcd->lock, flags);
+}
+
+static int
+ice40_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
+{
+ struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
+ struct usb_device *udev = urb->dev;
+ struct usb_host_endpoint *ep = urb->ep;
+ bool is_out = usb_pipeout(urb->pipe);
+ u8 epnum = usb_pipeendpoint(urb->pipe);
+ struct ice40_ep *iep;
+ unsigned long flags;
+ int ret;
+
+ /*
+ * This bridge chip supports only Full-speed. So ISO is not
+ * supported. Interrupt support is not implemented as there
+ * is no use case.
+ */
+ if (usb_pipeisoc(urb->pipe) || usb_pipeint(urb->pipe)) {
+ pr_debug("iso and int xfers not supported\n");
+ ret = -ENOTSUPP;
+ goto out;
+ }
+
+ spin_lock_irqsave(&ihcd->lock, flags);
+
+ ret = usb_hcd_link_urb_to_ep(hcd, urb);
+ if (ret)
+ goto rel_lock;
+
+ trace_ice40_urb_enqueue(urb);
+
+ iep = ep->hcpriv;
+ if (!iep) {
+ iep = kzalloc(sizeof(struct ice40_ep), GFP_ATOMIC);
+ if (!iep) {
+ pr_debug("fail to allocate iep\n");
+ ret = -ENOMEM;
+ goto unlink;
+ }
+ ep->hcpriv = iep;
+ INIT_LIST_HEAD(&iep->ep_list);
+ iep->ep = ep;
+ usb_settoggle(udev, epnum, is_out, 0);
+ if (usb_pipecontrol(urb->pipe))
+ ihcd->ep0_state = SETUP_PHASE;
+ }
+
+ /*
+ * We expect the interface driver to clear the stall condition
+ * before queueing another URB. For example mass storage
+ * device may STALL a bulk endpoint for un-supported command.
+ * The storage driver clear the STALL condition before queueing
+ * another URB.
+ */
+ iep->halted = false;
+ if (list_empty(&iep->ep_list))
+ list_add_tail(&iep->ep_list, &ihcd->async_list);
+
+ queue_work(ihcd->wq, &ihcd->async_work);
+
+ spin_unlock_irqrestore(&ihcd->lock, flags);
+
+ return 0;
+unlink:
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+rel_lock:
+ spin_unlock_irqrestore(&ihcd->lock, flags);
+out:
+ return ret;
+}
+
+static int
+ice40_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+ struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
+ struct usb_host_endpoint *ep = urb->ep;
+ struct ice40_ep *iep;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ihcd->lock, flags);
+
+ ret = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (ret)
+ goto rel_lock;
+
+ trace_ice40_urb_dequeue(urb);
+ iep = ep->hcpriv;
+
+ /*
+ * If the endpoint is not in asynchronous schedule, complete
+ * the URB immediately. Otherwise mark it as being unlinked.
+ * The asynchronous schedule work will take care of completing
+ * the URB when this endpoint is encountered during traversal.
+ */
+ if (list_empty(&iep->ep_list))
+ ice40_complete_urb(hcd, urb, status);
+ else
+ iep->unlinking = true;
+
+rel_lock:
+ spin_unlock_irqrestore(&ihcd->lock, flags);
+ return ret;
+}
+
+static void
+ice40_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+{
+ struct ice40_ep *iep = ep->hcpriv;
+
+ /*
+ * If there is no I/O on this endpoint before, ep->hcpriv
+ * will be NULL. nothing to do in this case.
+ */
+ if (!iep)
+ return;
+
+ if (!list_empty(&ep->urb_list))
+ pr_err("trying to disable an non-empty endpoint\n");
+
+ kfree(iep);
+ ep->hcpriv = NULL;
+}
+
+
+static int ice40_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
+ int ret = 0;
+
+ /*
+ * core calls hub_status_method during suspend/resume.
+ * return 0 if there is no port change. pcd_pending
+ * is set to true when a device is connected and line
+ * state is sampled via debugfs command. clear this
+ * flag after returning the port change status.
+ */
+ if (ihcd->pcd_pending) {
+ *buf = (1 << 1);
+ ret = 1;
+ ihcd->pcd_pending = false;
+ }
+
+ return ret;
+}
+
+static void ice40_hub_descriptor(struct usb_hub_descriptor *desc)
+{
+ /* There is nothing special about us!! */
+ desc->bDescLength = 9;
+ desc->bDescriptorType = 0x29;
+ desc->bNbrPorts = 1;
+ desc->wHubCharacteristics = cpu_to_le16(HUB_CHAR_NO_LPSM |
+ HUB_CHAR_NO_OCPM);
+ desc->bPwrOn2PwrGood = 0;
+ desc->bHubContrCurrent = 0;
+ desc->u.hs.DeviceRemovable[0] = 0;
+ desc->u.hs.DeviceRemovable[1] = ~0;
+}
+
+static int
+ice40_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength)
+{
+ int ret = 0;
+ u8 ctrl;
+ struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
+
+ /*
+ * We have only 1 port. No special locking is required while
+ * handling root hub commands. The bridge chip does not maintain
+ * any port states. Maintain different port states in software.
+ */
+ switch (typeReq) {
+ case ClearPortFeature:
+ if (wIndex != 1 || wLength != 0)
+ goto error;
+ switch (wValue) {
+ case USB_PORT_FEAT_SUSPEND:
+ /*
+ * The device is resumed as part of the root hub
+ * resume to simplify the resume sequence. so
+ * we may simply return from here. If device is
+ * resumed before root hub is suspended, this
+ * flags will be cleared here.
+ */
+ if (!(ihcd->port_flags & USB_PORT_STAT_SUSPEND))
+ break;
+ ihcd->port_flags &= ~USB_PORT_STAT_SUSPEND;
+ break;
+ case USB_PORT_FEAT_ENABLE:
+ ihcd->port_flags &= ~USB_PORT_STAT_ENABLE;
+ break;
+ case USB_PORT_FEAT_POWER:
+ ihcd->port_flags &= ~USB_PORT_STAT_POWER;
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ ihcd->port_flags &= ~(USB_PORT_STAT_C_CONNECTION << 16);
+ break;
+ case USB_PORT_FEAT_C_ENABLE:
+ case USB_PORT_FEAT_C_SUSPEND:
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ case USB_PORT_FEAT_C_RESET:
+ /* nothing special here */
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case GetHubDescriptor:
+ ice40_hub_descriptor((struct usb_hub_descriptor *) buf);
+ break;
+ case GetHubStatus:
+ put_unaligned_le32(0, buf);
+ break;
+ case GetPortStatus:
+ if (wIndex != 1)
+ goto error;
+
+ /*
+ * Core resets the device and requests port status to
+ * stop the reset signaling. If there is a reset in
+ * progress, finish it here.
+ */
+ ctrl = ice40_spi_reg_read(ihcd, CTRL0_REG);
+ if (!(ctrl & RESET_CTRL))
+ ihcd->port_flags &= ~USB_PORT_STAT_RESET;
+
+ put_unaligned_le32(ihcd->port_flags, buf);
+ break;
+ case SetPortFeature:
+ if (wIndex != 1 || wLength != 0)
+ goto error;
+ switch (wValue) {
+ case USB_PORT_FEAT_SUSPEND:
+ if (ihcd->port_flags & USB_PORT_STAT_RESET)
+ goto error;
+ if (!(ihcd->port_flags & USB_PORT_STAT_ENABLE))
+ goto error;
+ /* SOFs will be stopped during root hub suspend */
+ ihcd->port_flags |= USB_PORT_STAT_SUSPEND;
+ break;
+ case USB_PORT_FEAT_POWER:
+ ihcd->port_flags |= USB_PORT_STAT_POWER;
+ break;
+ case USB_PORT_FEAT_RESET:
+ /* Good time to enable the port */
+ ice40_spi_reg_write(ihcd, ihcd->ctrl0 |
+ RESET_CTRL, CTRL0_REG);
+ ihcd->port_flags |= USB_PORT_STAT_RESET;
+ ihcd->port_flags |= USB_PORT_STAT_ENABLE;
+ break;
+ default:
+ goto error;
+ }
+ break;
+ default:
+error:
+ /* "protocol stall" on error */
+ ret = -EPIPE;
+ }
+
+ trace_ice40_hub_control(typeReq, wValue, wIndex, wLength, ret);
+ return ret;
+}
+
+static void ice40_spi_power_off(struct ice40_hcd *ihcd);
+static int ice40_bus_suspend(struct usb_hcd *hcd)
+{
+ struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
+
+ trace_ice40_bus_suspend(0); /* start */
+
+ /* This happens only during debugging */
+ if (!ihcd->devnum) {
+ pr_debug("device still not connected. abort suspend\n");
+ trace_ice40_bus_suspend(2); /* failure */
+ return -EAGAIN;
+ }
+ /*
+ * Stop sending the SOFs on downstream port. The device
+ * finds the bus idle and enter suspend. The device
+ * takes ~3 msec to enter suspend.
+ */
+ ihcd->ctrl0 &= ~SOFEN_CTRL;
+ ice40_spi_reg_write(ihcd, ihcd->ctrl0, CTRL0_REG);
+ usleep_range(4500, 5000);
+
+ /*
+ * Power collapse the bridge chip to avoid the leakage
+ * current.
+ */
+ ice40_spi_power_off(ihcd);
+
+ trace_ice40_bus_suspend(1); /* successful */
+ pm_relax(&ihcd->spi->dev);
+ return 0;
+}
+
+static int ice40_spi_load_fw(struct ice40_hcd *ihcd);
+static int ice40_bus_resume(struct usb_hcd *hcd)
+{
+ struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
+ u8 ctrl0;
+ int ret;
+
+ pm_stay_awake(&ihcd->spi->dev);
+ trace_ice40_bus_resume(0); /* start */
+ /*
+ * Power up the bridge chip and load the configuration file.
+ * Re-program the previous settings. For now we need to
+ * update the device address only.
+ */
+ ice40_spi_load_fw(ihcd);
+ ice40_spi_reg_write(ihcd, ihcd->devnum, FADDR_REG);
+ ihcd->wblen0 = ~0;
+
+ /*
+ * Program the bridge chip to drive resume signaling. The SOFs
+ * are automatically transmitted after resume completion. It
+ * will take ~20 msec for resume completion.
+ */
+ ice40_spi_reg_write(ihcd, ihcd->ctrl0 | RESUME_CTRL, CTRL0_REG);
+ usleep_range(20000, 21000);
+ ret = ice40_handshake(ihcd, CTRL0_REG, RESUME_CTRL, 0, 5000);
+ if (ret) {
+ pr_err("resume failed\n");
+ trace_ice40_bus_resume(2); /* failure */
+ return -ENODEV;
+ }
+
+ ctrl0 = ice40_spi_reg_read(ihcd, CTRL0_REG);
+ if (!(ctrl0 & SOFEN_CTRL)) {
+ pr_err("SOFs are not transmitted after resume\n");
+ trace_ice40_bus_resume(3); /* failure */
+ return -ENODEV;
+ }
+
+ ihcd->port_flags &= ~USB_PORT_STAT_SUSPEND;
+ ihcd->ctrl0 |= SOFEN_CTRL;
+
+ trace_ice40_bus_resume(1); /* success */
+ return 0;
+}
+
+static void ice40_set_autosuspend_delay(struct usb_device *dev)
+{
+ /*
+ * Immediate suspend for root hub and 500 msec auto-suspend
+ * timeout for the card.
+ */
+ if (!dev->parent)
+ pm_runtime_set_autosuspend_delay(&dev->dev, 0);
+ else
+ pm_runtime_set_autosuspend_delay(&dev->dev, 500);
+}
+
+static const struct hc_driver ice40_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "ICE40 SPI Host Controller",
+ .hcd_priv_size = sizeof(struct ice40_hcd *),
+ .flags = HCD_USB11,
+
+ /* setup and clean up */
+ .reset = ice40_reset,
+ .start = ice40_run,
+ .stop = ice40_stop,
+
+ /* endpoint and I/O routines */
+ .urb_enqueue = ice40_urb_enqueue,
+ .urb_dequeue = ice40_urb_dequeue,
+ .endpoint_disable = ice40_endpoint_disable,
+
+ /* Root hub operations */
+ .hub_status_data = ice40_hub_status_data,
+ .hub_control = ice40_hub_control,
+ .bus_suspend = ice40_bus_suspend,
+ .bus_resume = ice40_bus_resume,
+
+ .set_autosuspend_delay = ice40_set_autosuspend_delay,
+};
+
+static int ice40_spi_parse_dt(struct ice40_hcd *ihcd)
+{
+ struct device_node *node = ihcd->spi->dev.of_node;
+ int ret = 0;
+
+ if (!node) {
+ pr_err("device specific info missing\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ ihcd->reset_gpio = of_get_named_gpio(node, "lattice,reset-gpio", 0);
+ if (ihcd->reset_gpio < 0) {
+ pr_err("reset gpio is missing\n");
+ ret = ihcd->reset_gpio;
+ goto out;
+ }
+
+ ihcd->slave_select_gpio = of_get_named_gpio(node,
+ "lattice,slave-select-gpio", 0);
+ if (ihcd->slave_select_gpio < 0) {
+ pr_err("slave select gpio is missing\n");
+ ret = ihcd->slave_select_gpio;
+ goto out;
+ }
+
+ ihcd->config_done_gpio = of_get_named_gpio(node,
+ "lattice,config-done-gpio", 0);
+ if (ihcd->config_done_gpio < 0) {
+ pr_err("config done gpio is missing\n");
+ ret = ihcd->config_done_gpio;
+ goto out;
+ }
+
+ ihcd->vcc_en_gpio = of_get_named_gpio(node, "lattice,vcc-en-gpio", 0);
+ if (ihcd->vcc_en_gpio < 0) {
+ pr_err("vcc enable gpio is missing\n");
+ ret = ihcd->vcc_en_gpio;
+ goto out;
+ }
+
+ /*
+ * When clk-en-gpio is present, it is used to enable the 19.2 MHz
+ * clock from MSM to the bridge chip. Otherwise on-board clock
+ * is used.
+ */
+ ihcd->clk_en_gpio = of_get_named_gpio(node, "lattice,clk-en-gpio", 0);
+ if (ihcd->clk_en_gpio < 0)
+ ihcd->clk_en_gpio = 0;
+out:
+ return ret;
+}
+
+static void ice40_spi_power_off(struct ice40_hcd *ihcd)
+{
+ if (!ihcd->powered)
+ return;
+
+ gpio_direction_output(ihcd->vcc_en_gpio, 0);
+ regulator_disable(ihcd->core_vcc);
+ regulator_disable(ihcd->spi_vcc);
+ if (ihcd->gpio_vcc)
+ regulator_disable(ihcd->gpio_vcc);
+ if (ihcd->clk_en_gpio)
+ gpio_direction_output(ihcd->clk_en_gpio, 0);
+
+ ihcd->powered = false;
+}
+
+static int ice40_spi_power_up(struct ice40_hcd *ihcd)
+{
+ int ret;
+
+ if (ihcd->clk_en_gpio) {
+ ret = gpio_direction_output(ihcd->clk_en_gpio, 1);
+ if (ret < 0) {
+ pr_err("fail to enabel clk %d\n", ret);
+ goto out;
+ }
+ }
+
+ if (ihcd->gpio_vcc) {
+ ret = regulator_enable(ihcd->gpio_vcc); /* 1.8 V */
+ if (ret < 0) {
+ pr_err("fail to enable gpio vcc\n");
+ goto disable_clk;
+ }
+ }
+
+ ret = regulator_enable(ihcd->spi_vcc); /* 1.8 V */
+ if (ret < 0) {
+ pr_err("fail to enable spi vcc\n");
+ goto disable_gpio_vcc;
+ }
+
+ ret = regulator_enable(ihcd->core_vcc); /* 1.2 V */
+ if (ret < 0) {
+ pr_err("fail to enable core vcc\n");
+ goto disable_spi_vcc;
+ }
+
+ ret = gpio_direction_output(ihcd->vcc_en_gpio, 1);
+ if (ret < 0) {
+ pr_err("fail to assert vcc gpio\n");
+ goto disable_core_vcc;
+ }
+
+ ihcd->powered = true;
+
+ return 0;
+
+disable_core_vcc:
+ regulator_disable(ihcd->core_vcc);
+disable_spi_vcc:
+ regulator_disable(ihcd->spi_vcc);
+disable_gpio_vcc:
+ if (ihcd->gpio_vcc)
+ regulator_disable(ihcd->gpio_vcc);
+disable_clk:
+ if (ihcd->clk_en_gpio)
+ gpio_direction_output(ihcd->clk_en_gpio, 0);
+out:
+ return ret;
+}
+
+static struct gpiomux_setting slave_select_setting = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_NONE,
+ .dir = GPIOMUX_OUT_LOW,
+};
+
+static int ice40_spi_cache_fw(struct ice40_hcd *ihcd)
+{
+ const struct firmware *fw;
+ void *buf;
+ size_t buf_len;
+ int ret;
+
+ ret = request_firmware(&fw, fw_name, &ihcd->spi->dev);
+ if (ret < 0) {
+ pr_err("fail to get the firmware\n");
+ goto out;
+ }
+
+ pr_debug("received firmware size = %zu\n", fw->size);
+
+ /*
+ * The bridge expects additional clock cycles after
+ * receiving the configuration data. We don't have a
+ * direct control over SPI clock. Add extra bytes
+ * to the confiration data.
+ */
+ buf_len = fw->size + 16;
+ buf = devm_kzalloc(&ihcd->spi->dev, buf_len, GFP_KERNEL);
+ if (!buf) {
+ pr_err("fail to allocate firmware buffer\n");
+ ret = -ENOMEM;
+ goto release;
+ }
+
+ /*
+ * The firmware buffer can not be used for DMA as it
+ * is not physically contiguous. We copy the data
+ * in kmalloc buffer. This buffer will be freed only
+ * during unbind or rmmod.
+ */
+ memcpy(buf, fw->data, fw->size);
+ release_firmware(fw);
+
+ /*
+ * The bridge supports only 25 MHz during configuration
+ * file loading.
+ */
+ ihcd->fmsg_xfr[0].tx_buf = buf;
+ ihcd->fmsg_xfr[0].len = buf_len;
+ ihcd->fmsg_xfr[0].speed_hz = 25000000;
+
+ return 0;
+
+release:
+ release_firmware(fw);
+out:
+ return ret;
+}
+
+static int ice40_spi_load_fw(struct ice40_hcd *ihcd)
+{
+ int ret, i;
+ struct gpiomux_setting old_setting;
+
+ ret = gpio_direction_output(ihcd->reset_gpio, 0);
+ if (ret < 0) {
+ pr_err("fail to assert reset %d\n", ret);
+ goto out;
+ }
+
+ ret = gpio_direction_output(ihcd->vcc_en_gpio, 0);
+ if (ret < 0) {
+ pr_err("fail to de-assert vcc_en gpio %d\n", ret);
+ goto out;
+ }
+
+ /*
+ * The bridge chip samples the chip select signal during
+ * power-up. If it is low, it enters SPI slave mode and
+ * accepts the configuration data from us. The chip
+ * select signal is managed by the SPI controller driver.
+ * We temporarily override the chip select config to
+ * drive it low. The SPI bus needs to be locked down during
+ * this period to avoid other slave data going to our
+ * bridge chip.
+ *
+ */
+ spi_bus_lock(ihcd->spi->master);
+
+ ret = msm_gpiomux_write(ihcd->slave_select_gpio, GPIOMUX_SUSPENDED,
+ &slave_select_setting, &old_setting);
+ if (ret < 0) {
+ pr_err("fail to select the slave %d\n", ret);
+ goto out;
+ }
+
+ ret = ice40_spi_power_up(ihcd);
+ if (ret < 0) {
+ pr_err("fail to power up the chip\n");
+ goto out;
+ }
+
+
+ /*
+ * The databook says 1200 usec is required before the
+ * chip becomes ready for the SPI transfer.
+ */
+ usleep_range(1200, 1250);
+
+ ret = msm_gpiomux_write(ihcd->slave_select_gpio, GPIOMUX_SUSPENDED,
+ &old_setting, NULL);
+ if (ret < 0) {
+ pr_err("fail to de-select the slave %d\n", ret);
+ goto power_off;
+ }
+
+ ret = spi_sync_locked(ihcd->spi, ihcd->fmsg);
+
+ spi_bus_unlock(ihcd->spi->master);
+
+ if (ret < 0) {
+ pr_err("spi write failed\n");
+ goto power_off;
+ }
+
+ for (i = 0; i < 1000; i++) {
+ ret = gpio_get_value(ihcd->config_done_gpio);
+ if (ret) {
+ pr_debug("config done asserted %d\n", i);
+ break;
+ }
+ udelay(1);
+ }
+
+ if (ret <= 0) {
+ pr_err("config done not asserted\n");
+ ret = -ENODEV;
+ goto power_off;
+ }
+
+ ret = gpio_direction_output(ihcd->reset_gpio, 1);
+ if (ret < 0) {
+ pr_err("fail to assert reset %d\n", ret);
+ goto power_off;
+ }
+ udelay(50);
+
+ ret = ice40_spi_reg_read(ihcd, XFRST_REG);
+ pr_debug("XFRST val is %x\n", ret);
+ if (!(ret & PLLOK)) {
+ pr_err("The PLL2 is not synchronized\n");
+ goto power_off;
+ }
+
+ pr_info("Firmware load success\n");
+
+ return 0;
+
+power_off:
+ ice40_spi_power_off(ihcd);
+out:
+ return ret;
+}
+
+static int ice40_spi_init_regulators(struct ice40_hcd *ihcd)
+{
+ int ret;
+
+ ihcd->spi_vcc = devm_regulator_get(&ihcd->spi->dev, "spi-vcc");
+ if (IS_ERR(ihcd->spi_vcc)) {
+ ret = PTR_ERR(ihcd->spi_vcc);
+ if (ret != -EPROBE_DEFER)
+ pr_err("fail to get spi-vcc %d\n", ret);
+ goto out;
+ }
+
+ ret = regulator_set_voltage(ihcd->spi_vcc, 1800000, 1800000);
+ if (ret < 0) {
+ pr_err("fail to set spi-vcc %d\n", ret);
+ goto out;
+ }
+
+ ihcd->core_vcc = devm_regulator_get(&ihcd->spi->dev, "core-vcc");
+ if (IS_ERR(ihcd->core_vcc)) {
+ ret = PTR_ERR(ihcd->core_vcc);
+ if (ret != -EPROBE_DEFER)
+ pr_err("fail to get core-vcc %d\n", ret);
+ goto out;
+ }
+
+ ret = regulator_set_voltage(ihcd->core_vcc, 1200000, 1200000);
+ if (ret < 0) {
+ pr_err("fail to set core-vcc %d\n", ret);
+ goto out;
+ }
+
+ if (!of_get_property(ihcd->spi->dev.of_node, "gpio-supply", NULL))
+ goto out;
+
+ ihcd->gpio_vcc = devm_regulator_get(&ihcd->spi->dev, "gpio");
+ if (IS_ERR(ihcd->gpio_vcc)) {
+ ret = PTR_ERR(ihcd->gpio_vcc);
+ if (ret != -EPROBE_DEFER)
+ pr_err("fail to get gpio_vcc %d\n", ret);
+ goto out;
+ }
+
+ ret = regulator_set_voltage(ihcd->gpio_vcc, 1800000, 1800000);
+ if (ret < 0) {
+ pr_err("fail to set gpio_vcc %d\n", ret);
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static int ice40_spi_request_gpios(struct ice40_hcd *ihcd)
+{
+ int ret;
+
+ ret = devm_gpio_request(&ihcd->spi->dev, ihcd->reset_gpio,
+ "ice40_reset");
+ if (ret < 0) {
+ pr_err("fail to request reset gpio\n");
+ goto out;
+ }
+
+ ret = devm_gpio_request(&ihcd->spi->dev, ihcd->config_done_gpio,
+ "ice40_config_done");
+ if (ret < 0) {
+ pr_err("fail to request config_done gpio\n");
+ goto out;
+ }
+
+ ret = devm_gpio_request(&ihcd->spi->dev, ihcd->vcc_en_gpio,
+ "ice40_vcc_en");
+ if (ret < 0) {
+ pr_err("fail to request vcc_en gpio\n");
+ goto out;
+ }
+
+ if (ihcd->clk_en_gpio) {
+
+ ret = devm_gpio_request(&ihcd->spi->dev, ihcd->clk_en_gpio,
+ "ice40_clk_en");
+ if (ret < 0)
+ pr_err("fail to request clk_en gpio\n");
+ }
+
+out:
+ return ret;
+}
+
+static int
+ice40_spi_init_one_xfr(struct ice40_hcd *ihcd, enum ice40_xfr_type type)
+{
+ struct spi_message **m;
+ struct spi_transfer **t;
+ int n;
+
+ switch (type) {
+ case FIRMWARE_XFR:
+ m = &ihcd->fmsg;
+ t = &ihcd->fmsg_xfr;
+ n = 1;
+ break;
+ case REG_WRITE_XFR:
+ m = &ihcd->wmsg;
+ t = &ihcd->wmsg_xfr;
+ n = 1;
+ break;
+ case REG_READ_XFR:
+ m = &ihcd->rmsg;
+ t = &ihcd->rmsg_xfr;
+ n = 1;
+ break;
+ case SETUP_XFR:
+ m = &ihcd->setup_msg;
+ t = &ihcd->setup_xfr;
+ n = 2;
+ break;
+ case DATA_IN_XFR:
+ m = &ihcd->in_msg;
+ t = &ihcd->in_xfr;
+ n = 2;
+ break;
+ case DATA_OUT_XFR:
+ m = &ihcd->out_msg;
+ t = &ihcd->out_xfr;
+ n = 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *m = devm_kzalloc(&ihcd->spi->dev, sizeof(**m), GFP_KERNEL);
+ if (*m == NULL)
+ goto out;
+
+ *t = devm_kzalloc(&ihcd->spi->dev, n * sizeof(**t), GFP_KERNEL);
+ if (*t == NULL)
+ goto out;
+
+ spi_message_init_with_transfers(*m, *t, n);
+
+ return 0;
+out:
+ return -ENOMEM;
+}
+
+static int ice40_spi_init_xfrs(struct ice40_hcd *ihcd)
+{
+ int ret = -ENOMEM;
+
+ ret = ice40_spi_init_one_xfr(ihcd, FIRMWARE_XFR);
+ if (ret < 0)
+ goto out;
+
+ ret = ice40_spi_init_one_xfr(ihcd, REG_WRITE_XFR);
+ if (ret < 0)
+ goto out;
+
+ ihcd->w_tx_buf = devm_kzalloc(&ihcd->spi->dev, 2, GFP_KERNEL);
+ if (!ihcd->w_tx_buf)
+ goto out;
+
+ ihcd->w_rx_buf = devm_kzalloc(&ihcd->spi->dev, 2, GFP_KERNEL);
+ if (!ihcd->w_rx_buf)
+ goto out;
+
+ ihcd->wmsg_xfr[0].tx_buf = ihcd->w_tx_buf;
+ ihcd->wmsg_xfr[0].rx_buf = ihcd->w_rx_buf;
+ ihcd->wmsg_xfr[0].len = 2;
+
+ ret = ice40_spi_init_one_xfr(ihcd, REG_READ_XFR);
+ if (ret < 0)
+ goto out;
+
+ ihcd->r_tx_buf = devm_kzalloc(&ihcd->spi->dev, 3, GFP_KERNEL);
+ if (!ihcd->r_tx_buf)
+ goto out;
+
+ ihcd->r_rx_buf = devm_kzalloc(&ihcd->spi->dev, 3, GFP_KERNEL);
+ if (!ihcd->r_rx_buf)
+ goto out;
+
+ ihcd->rmsg_xfr[0].tx_buf = ihcd->r_tx_buf;
+ ihcd->rmsg_xfr[0].rx_buf = ihcd->r_rx_buf;
+ ihcd->rmsg_xfr[0].len = 3;
+
+ ret = ice40_spi_init_one_xfr(ihcd, SETUP_XFR);
+ if (ret < 0)
+ goto out;
+
+ ihcd->setup_buf = devm_kzalloc(&ihcd->spi->dev, 1, GFP_KERNEL);
+ if (!ihcd->setup_buf)
+ goto out;
+ ihcd->setup_xfr[0].tx_buf = ihcd->setup_buf;
+ ihcd->setup_xfr[0].len = 1;
+
+ ret = ice40_spi_init_one_xfr(ihcd, DATA_IN_XFR);
+ if (ret < 0)
+ goto out;
+ ihcd->in_buf = devm_kzalloc(&ihcd->spi->dev, 2, GFP_KERNEL);
+ if (!ihcd->in_buf)
+ goto out;
+ ihcd->in_xfr[0].tx_buf = ihcd->in_buf;
+ ihcd->in_xfr[0].len = 2;
+
+ ret = ice40_spi_init_one_xfr(ihcd, DATA_OUT_XFR);
+ if (ret < 0)
+ goto out;
+ ihcd->out_buf = devm_kzalloc(&ihcd->spi->dev, 1, GFP_KERNEL);
+ if (!ihcd->out_buf)
+ goto out;
+ ihcd->out_xfr[0].tx_buf = ihcd->out_buf;
+ ihcd->out_xfr[0].len = 1;
+
+ return 0;
+
+out:
+ return -ENOMEM;
+}
+
+static int ice40_dbg_cmd_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, NULL, inode->i_private);
+}
+
+static ssize_t ice40_dbg_cmd_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct ice40_hcd *ihcd = s->private;
+ char buf[32];
+ int ret;
+ u8 status, addr;
+
+ memset(buf, 0x00, sizeof(buf));
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (!strcmp(buf, "poll")) {
+ if (!HCD_RH_RUNNING(ihcd->hcd)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+ /*
+ * The bridge chip supports interrupt for device
+ * connect and disconnect. We don;t have a real
+ * use case of connect/disconnect. This debugfs
+ * interface provides a way to enumerate the
+ * attached device.
+ */
+ ice40_spi_reg_write(ihcd, ihcd->ctrl0 |
+ DET_BUS_CTRL, CTRL0_REG);
+ ice40_handshake(ihcd, CTRL0_REG, DET_BUS_CTRL, 0, 5000);
+ status = ice40_spi_reg_read(ihcd, XFRST_REG);
+ if ((status & DPST)) {
+ ihcd->port_flags |= USB_PORT_STAT_CONNECTION;
+ ihcd->port_flags |= USB_PORT_STAT_C_CONNECTION << 16;
+ ihcd->pcd_pending = true;
+ usb_hcd_poll_rh_status(ihcd->hcd);
+ } else if (ihcd->port_flags & USB_PORT_STAT_CONNECTION) {
+ ihcd->port_flags &= ~USB_PORT_STAT_ENABLE;
+ ihcd->port_flags &= ~USB_PORT_STAT_CONNECTION;
+ ihcd->port_flags |= (USB_PORT_STAT_C_CONNECTION << 16);
+ ihcd->pcd_pending = true;
+ usb_hcd_poll_rh_status(ihcd->hcd);
+ }
+ } else if (!strcmp(buf, "rwtest")) {
+ ihcd->devnum = 1;
+ ice40_spi_reg_write(ihcd, 0x1, FADDR_REG);
+ addr = ice40_spi_reg_read(ihcd, FADDR_REG);
+ pr_info("addr written was 0x1 read as %x\n", addr);
+ } else if (!strcmp(buf, "force_disconnect")) {
+ if (!HCD_RH_RUNNING(ihcd->hcd)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+ /*
+ * Forcfully disconnect the device. This is required
+ * for simulating the disconnect on a USB port which
+ * does not have pull-down resistors.
+ */
+ ihcd->port_flags &= ~USB_PORT_STAT_ENABLE;
+ ihcd->port_flags &= ~USB_PORT_STAT_CONNECTION;
+ ihcd->port_flags |= (USB_PORT_STAT_C_CONNECTION << 16);
+ ihcd->pcd_pending = true;
+ usb_hcd_poll_rh_status(ihcd->hcd);
+ } else {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = count;
+out:
+ return ret;
+}
+
+const struct file_operations ice40_dbg_cmd_ops = {
+ .open = ice40_dbg_cmd_open,
+ .write = ice40_dbg_cmd_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int ice40_debugfs_init(struct ice40_hcd *ihcd)
+{
+ struct dentry *dir;
+ int ret = 0;
+
+ dir = debugfs_create_dir("ice40_hcd", NULL);
+
+ if (!dir || IS_ERR(dir)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ ihcd->dbg_root = dir;
+
+ dir = debugfs_create_file("command", S_IWUSR, ihcd->dbg_root, ihcd,
+ &ice40_dbg_cmd_ops);
+
+ if (!dir) {
+ debugfs_remove_recursive(ihcd->dbg_root);
+ ihcd->dbg_root = NULL;
+ ret = -ENODEV;
+ }
+
+out:
+ return ret;
+}
+
+static int ice40_spi_probe(struct spi_device *spi)
+{
+ struct ice40_hcd *ihcd;
+ int ret;
+
+ ihcd = devm_kzalloc(&spi->dev, sizeof(*ihcd), GFP_KERNEL);
+ if (!ihcd) {
+ pr_err("fail to allocate ihcd\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ ihcd->spi = spi;
+
+ ret = ice40_spi_parse_dt(ihcd);
+ if (ret) {
+ pr_err("fail to parse dt node\n");
+ goto out;
+ }
+
+ ret = ice40_spi_init_regulators(ihcd);
+ if (ret) {
+ pr_err("fail to init regulators\n");
+ goto out;
+ }
+
+ ret = ice40_spi_request_gpios(ihcd);
+ if (ret) {
+ pr_err("fail to request gpios\n");
+ goto out;
+ }
+
+ spin_lock_init(&ihcd->lock);
+ INIT_LIST_HEAD(&ihcd->async_list);
+ INIT_WORK(&ihcd->async_work, ice40_async_work);
+ mutex_init(&ihcd->wlock);
+ mutex_init(&ihcd->rlock);
+
+ /*
+ * Enable all our trace points. Useful in debugging card
+ * enumeration issues.
+ */
+ ret = trace_set_clr_event(__stringify(TRACE_SYSTEM), NULL, 1);
+ if (ret < 0)
+ pr_err("fail to enable trace points with %d\n", ret);
+
+ ihcd->wq = create_singlethread_workqueue("ice40_wq");
+ if (!ihcd->wq) {
+ pr_err("fail to create workqueue\n");
+ ret = -ENOMEM;
+ goto destroy_mutex;
+ }
+
+ ret = ice40_spi_init_xfrs(ihcd);
+ if (ret) {
+ pr_err("fail to init spi xfrs %d\n", ret);
+ goto destroy_wq;
+ }
+
+ ret = ice40_spi_cache_fw(ihcd);
+ if (ret) {
+ pr_err("fail to cache fw %d\n", ret);
+ goto destroy_wq;
+ }
+
+ ret = ice40_spi_load_fw(ihcd);
+ if (ret) {
+ pr_err("fail to load fw %d\n", ret);
+ goto destroy_wq;
+ }
+
+ ihcd->hcd = usb_create_hcd(&ice40_hc_driver, &spi->dev, "ice40");
+ if (!ihcd->hcd) {
+ pr_err("fail to alloc hcd\n");
+ ret = -ENOMEM;
+ goto power_off;
+ }
+ *((struct ice40_hcd **) ihcd->hcd->hcd_priv) = ihcd;
+
+ ret = usb_add_hcd(ihcd->hcd, 0, 0);
+
+ if (ret < 0) {
+ pr_err("fail to add HCD\n");
+ goto put_hcd;
+ }
+
+ ice40_debugfs_init(ihcd);
+
+ /*
+ * We manage the power states of the bridge chip
+ * as part of root hub suspend/resume. We don't
+ * need to implement any additional runtime PM
+ * methods.
+ */
+ pm_runtime_no_callbacks(&spi->dev);
+ pm_runtime_set_active(&spi->dev);
+ pm_runtime_enable(&spi->dev);
+
+ /*
+ * This does not mean bridge chip can wakeup the
+ * system from sleep. It's activity can prevent
+ * or abort the system sleep. The device_init_wakeup
+ * creates the wakeup source for us which we will
+ * use to control system sleep.
+ */
+ device_init_wakeup(&spi->dev, 1);
+ pm_stay_awake(&spi->dev);
+
+ pr_debug("success\n");
+
+ return 0;
+
+put_hcd:
+ usb_put_hcd(ihcd->hcd);
+power_off:
+ ice40_spi_power_off(ihcd);
+destroy_wq:
+ destroy_workqueue(ihcd->wq);
+destroy_mutex:
+ mutex_destroy(&ihcd->rlock);
+ mutex_destroy(&ihcd->wlock);
+out:
+ pr_info("ice40_spi_probe failed\n");
+ return ret;
+}
+
+static int ice40_spi_remove(struct spi_device *spi)
+{
+ struct usb_hcd *hcd = spi_get_drvdata(spi);
+ struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
+
+ debugfs_remove_recursive(ihcd->dbg_root);
+
+ usb_remove_hcd(hcd);
+ usb_put_hcd(hcd);
+ destroy_workqueue(ihcd->wq);
+ ice40_spi_power_off(ihcd);
+
+ pm_runtime_disable(&spi->dev);
+ pm_relax(&spi->dev);
+
+ return 0;
+}
+
+static struct of_device_id ice40_spi_of_match_table[] = {
+ { .compatible = "lattice,ice40-spi-usb", },
+ {},
+};
+
+static struct spi_driver ice40_spi_driver = {
+ .driver = {
+ .name = "ice40_spi",
+ .owner = THIS_MODULE,
+ .of_match_table = ice40_spi_of_match_table,
+ },
+ .probe = ice40_spi_probe,
+ .remove = ice40_spi_remove,
+};
+
+module_spi_driver(ice40_spi_driver);
+
+MODULE_DESCRIPTION("ICE40 FPGA based SPI-USB bridge HCD");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index ad09139..0a82e58 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1013,9 +1013,6 @@
}
xhci = hcd_to_xhci(hcd);
- if (xhci->xhc_state & XHCI_STATE_HALTED)
- return -ENODEV;
-
if (check_virt_dev) {
if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
printk(KERN_DEBUG "xHCI %s called with unaddressed "
@@ -1031,6 +1028,9 @@
}
}
+ if (xhci->xhc_state & XHCI_STATE_HALTED)
+ return -ENODEV;
+
return 1;
}
diff --git a/drivers/video/msm/mdss/dsi_host_v2.c b/drivers/video/msm/mdss/dsi_host_v2.c
index ca7f199..884f7f2 100644
--- a/drivers/video/msm/mdss/dsi_host_v2.c
+++ b/drivers/video/msm/mdss/dsi_host_v2.c
@@ -1216,6 +1216,13 @@
mutex_unlock(&ctrl_pdata->mutex);
return ret;
}
+ pinfo->panel_power_on = 1;
+ ret = mdss_dsi_panel_reset(pdata, 1);
+ if (ret) {
+ pr_err("%s: Panel reset failed\n", __func__);
+ mutex_unlock(&ctrl_pdata->mutex);
+ return ret;
+ }
msm_dsi_ahb_ctrl(1);
msm_dsi_prepare_clocks();
diff --git a/drivers/video/msm/mdss/dsi_v2.c b/drivers/video/msm/mdss/dsi_v2.c
index ccde545..bc76fd0 100644
--- a/drivers/video/msm/mdss/dsi_v2.c
+++ b/drivers/video/msm/mdss/dsi_v2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -66,6 +66,7 @@
if (enable) {
dsi_ctrl_gpio_request(ctrl_pdata);
mdss_dsi_panel_reset(pdata, 1);
+ pdata->panel_info.panel_power_on = 1;
rc = ctrl_pdata->on(pdata);
if (rc)
pr_err("dsi_panel_handler panel on failed %d\n", rc);
@@ -73,6 +74,7 @@
if (dsi_intf.op_mode_config)
dsi_intf.op_mode_config(DSI_CMD_MODE, pdata);
rc = ctrl_pdata->off(pdata);
+ pdata->panel_info.panel_power_on = 0;
mdss_dsi_panel_reset(pdata, 0);
dsi_ctrl_gpio_free(ctrl_pdata);
}
@@ -202,75 +204,23 @@
{
int rc = 0;
- if (gpio_is_valid(ctrl_pdata->disp_en_gpio)) {
- rc = gpio_request(ctrl_pdata->disp_en_gpio, "disp_enable");
- if (rc)
- goto gpio_request_err4;
-
- ctrl_pdata->disp_en_gpio_requested = 1;
- }
-
- if (gpio_is_valid(ctrl_pdata->rst_gpio)) {
- rc = gpio_request(ctrl_pdata->rst_gpio, "disp_rst_n");
- if (rc)
- goto gpio_request_err3;
-
- ctrl_pdata->rst_gpio_requested = 1;
- }
-
if (gpio_is_valid(ctrl_pdata->disp_te_gpio)) {
rc = gpio_request(ctrl_pdata->disp_te_gpio, "disp_te");
if (rc)
- goto gpio_request_err2;
-
- ctrl_pdata->disp_te_gpio_requested = 1;
+ ctrl_pdata->disp_te_gpio_requested = 0;
+ else
+ ctrl_pdata->disp_te_gpio_requested = 1;
}
- if (gpio_is_valid(ctrl_pdata->mode_gpio)) {
- rc = gpio_request(ctrl_pdata->mode_gpio, "panel_mode");
- if (rc)
- goto gpio_request_err1;
-
- ctrl_pdata->mode_gpio_requested = 1;
- }
-
- return rc;
-
-gpio_request_err1:
- if (gpio_is_valid(ctrl_pdata->disp_te_gpio))
- gpio_free(ctrl_pdata->disp_te_gpio);
-gpio_request_err2:
- if (gpio_is_valid(ctrl_pdata->rst_gpio))
- gpio_free(ctrl_pdata->rst_gpio);
-gpio_request_err3:
- if (gpio_is_valid(ctrl_pdata->disp_en_gpio))
- gpio_free(ctrl_pdata->disp_en_gpio);
-gpio_request_err4:
- ctrl_pdata->disp_en_gpio_requested = 0;
- ctrl_pdata->rst_gpio_requested = 0;
- ctrl_pdata->disp_te_gpio_requested = 0;
- ctrl_pdata->mode_gpio_requested = 0;
return rc;
}
void dsi_ctrl_gpio_free(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
{
- if (ctrl_pdata->disp_en_gpio_requested) {
- gpio_free(ctrl_pdata->disp_en_gpio);
- ctrl_pdata->disp_en_gpio_requested = 0;
- }
- if (ctrl_pdata->rst_gpio_requested) {
- gpio_free(ctrl_pdata->rst_gpio);
- ctrl_pdata->rst_gpio_requested = 0;
- }
if (ctrl_pdata->disp_te_gpio_requested) {
gpio_free(ctrl_pdata->disp_te_gpio);
ctrl_pdata->disp_te_gpio_requested = 0;
}
- if (ctrl_pdata->mode_gpio_requested) {
- gpio_free(ctrl_pdata->mode_gpio);
- ctrl_pdata->mode_gpio_requested = 0;
- }
}
static int dsi_parse_vreg(struct device *dev, struct dss_module_power *mp)
diff --git a/drivers/video/msm/mdss/mdp3_ctrl.c b/drivers/video/msm/mdss/mdp3_ctrl.c
index b324130..da714ad 100644
--- a/drivers/video/msm/mdss/mdp3_ctrl.c
+++ b/drivers/video/msm/mdss/mdp3_ctrl.c
@@ -798,7 +798,7 @@
rc = mdp3_dma->stop(mdp3_dma, mdp3_session->intf);
if (rc) {
- pr_err("fail to stop the MDP3 dma\n");
+ pr_err("fail to stop the MDP3 dma %d\n", rc);
goto reset_error;
}
@@ -897,9 +897,7 @@
dma->source_config.stride = stride;
dma->output_config.pack_pattern =
mdp3_ctrl_get_pack_pattern(req->src.format);
- mdp3_clk_enable(1, 0);
- mdp3_session->dma->dma_config_source(dma);
- mdp3_clk_enable(0, 0);
+ dma->update_src_cfg = true;
}
mdp3_session->overlay.id = 1;
req->id = 1;
@@ -923,14 +921,6 @@
mutex_lock(&mdp3_session->lock);
if (mdp3_session->overlay.id == ndx && ndx == 1) {
- struct mdp3_dma *dma = mdp3_session->dma;
- dma->source_config.format = format;
- dma->source_config.stride = fix->line_length;
- dma->output_config.pack_pattern =
- mdp3_ctrl_get_pack_pattern(mfd->fb_imgType);
- mdp3_clk_enable(1, 0);
- mdp3_session->dma->dma_config_source(dma);
- mdp3_clk_enable(0, 0);
mdp3_session->overlay.id = MSMFB_NEW_REQUEST;
mdp3_bufq_deinit(&mdp3_session->bufq_in);
} else {
@@ -1011,7 +1001,11 @@
panel = mdp3_session->panel;
if (!mdp3_iommu_is_attached(MDP3_CLIENT_DMA_P)) {
pr_debug("continuous splash screen, IOMMU not attached\n");
- mdp3_ctrl_reset(mfd);
+ rc = mdp3_ctrl_reset(mfd);
+ if (rc) {
+ pr_err("fail to reset display\n");
+ return -EINVAL;
+ }
reset_done = true;
}
@@ -1092,7 +1086,11 @@
if (!mdp3_iommu_is_attached(MDP3_CLIENT_DMA_P)) {
pr_debug("continuous splash screen, IOMMU not attached\n");
- mdp3_ctrl_reset(mfd);
+ rc = mdp3_ctrl_reset(mfd);
+ if (rc) {
+ pr_err("fail to reset display\n");
+ return;
+ }
}
mutex_lock(&mdp3_session->lock);
diff --git a/drivers/video/msm/mdss/mdp3_dma.c b/drivers/video/msm/mdss/mdp3_dma.c
index 800c4b3..8a13de2 100644
--- a/drivers/video/msm/mdss/mdp3_dma.c
+++ b/drivers/video/msm/mdss/mdp3_dma.c
@@ -18,7 +18,7 @@
#include "mdp3_hwio.h"
#define DMA_STOP_POLL_SLEEP_US 1000
-#define DMA_STOP_POLL_TIMEOUT_US 32000
+#define DMA_STOP_POLL_TIMEOUT_US 200000
#define DMA_HISTO_RESET_TIMEOUT_MS 40
#define DMA_LUT_CONFIG_MASK 0xfffffbe8
#define DMA_CCS_CONFIG_MASK 0xfffffc17
@@ -605,6 +605,13 @@
}
}
}
+ if (dma->update_src_cfg) {
+ if (dma->output_config.out_sel ==
+ MDP3_DMA_OUTPUT_SEL_DSI_VIDEO && intf->active)
+ pr_err("configuring dma source while dma is active\n");
+ dma->dma_config_source(dma);
+ dma->update_src_cfg = false;
+ }
spin_lock_irqsave(&dma->dma_lock, flag);
MDP3_REG_WRITE(MDP3_REG_DMA_P_IBUF_ADDR, (u32)buf);
dma->source_config.buf = buf;
@@ -961,6 +968,7 @@
dma->vsync_client.handler = NULL;
dma->vsync_client.arg = NULL;
dma->histo_state = MDP3_DMA_HISTO_STATE_IDLE;
+ dma->update_src_cfg = false;
memset(&dma->cursor, 0, sizeof(dma->cursor));
memset(&dma->ccs_config, 0, sizeof(dma->ccs_config));
diff --git a/drivers/video/msm/mdss/mdp3_dma.h b/drivers/video/msm/mdss/mdp3_dma.h
index 207168f..80ebb9b 100644
--- a/drivers/video/msm/mdss/mdp3_dma.h
+++ b/drivers/video/msm/mdss/mdp3_dma.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -259,6 +259,7 @@
int histo_state;
struct mdp3_dma_histogram_data histo_data;
unsigned int vsync_status;
+ bool update_src_cfg;
int (*dma_config)(struct mdp3_dma *dma,
struct mdp3_dma_source *source_config,
diff --git a/drivers/video/msm/mdss/mdp3_ppp.c b/drivers/video/msm/mdss/mdp3_ppp.c
index 8cc29da8..7e590b3a 100644
--- a/drivers/video/msm/mdss/mdp3_ppp.c
+++ b/drivers/video/msm/mdss/mdp3_ppp.c
@@ -272,11 +272,11 @@
int ret = 1;
/*
- * wait 40 ms for ppp operation to complete before declaring
+ * wait 200 ms for ppp operation to complete before declaring
* the MDP hung
*/
ret = wait_for_completion_timeout(
- &ppp_stat->ppp_comp, msecs_to_jiffies(40));
+ &ppp_stat->ppp_comp, msecs_to_jiffies(200));
if (!ret)
pr_err("%s: Timed out waiting for the MDP.\n",
__func__);
diff --git a/drivers/video/msm/mdss/mdss.h b/drivers/video/msm/mdss/mdss.h
index ada1281..7e6faa8 100644
--- a/drivers/video/msm/mdss/mdss.h
+++ b/drivers/video/msm/mdss/mdss.h
@@ -192,6 +192,7 @@
int handoff_pending;
struct mdss_prefill_data prefill_data;
+ bool ulps;
};
extern struct mdss_data_type *mdss_res;
diff --git a/drivers/video/msm/mdss/mdss_dsi.c b/drivers/video/msm/mdss/mdss_dsi.c
index 2efb973..7b4b065 100644
--- a/drivers/video/msm/mdss/mdss_dsi.c
+++ b/drivers/video/msm/mdss/mdss_dsi.c
@@ -27,8 +27,6 @@
#include "mdss_dsi.h"
#include "mdss_debug.h"
-static unsigned char *mdss_dsi_base;
-
static int mdss_dsi_regulator_init(struct platform_device *pdev)
{
struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
@@ -74,13 +72,25 @@
goto error;
}
- if (pdata->panel_info.panel_power_on == 0)
- mdss_dsi_panel_reset(pdata, 1);
-
+ if (!pdata->panel_info.mipi.lp11_init) {
+ ret = mdss_dsi_panel_reset(pdata, 1);
+ if (ret) {
+ pr_err("%s: Panel reset failed. rc=%d\n",
+ __func__, ret);
+ if (msm_dss_enable_vreg(
+ ctrl_pdata->power_data.vreg_config,
+ ctrl_pdata->power_data.num_vreg, 0))
+ pr_err("Disable vregs failed\n");
+ goto error;
+ }
+ }
} else {
-
- mdss_dsi_panel_reset(pdata, 0);
-
+ ret = mdss_dsi_panel_reset(pdata, 0);
+ if (ret) {
+ pr_err("%s: Panel reset failed. rc=%d\n",
+ __func__, ret);
+ goto error;
+ }
ret = msm_dss_enable_vreg(
ctrl_pdata->power_data.vreg_config,
ctrl_pdata->power_data.num_vreg, 0);
@@ -298,7 +308,7 @@
if (!pdata->panel_info.panel_power_on) {
pr_warn("%s:%d Panel already off.\n", __func__, __LINE__);
- return -EPERM;
+ return 0;
}
pdata->panel_info.panel_power_on = 0;
@@ -337,63 +347,22 @@
return ret;
}
-int mdss_dsi_on(struct mdss_panel_data *pdata)
+static void __mdss_dsi_ctrl_setup(struct mdss_panel_data *pdata)
{
- int ret = 0;
- u32 clk_rate;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
struct mdss_panel_info *pinfo;
struct mipi_panel_info *mipi;
+ u32 clk_rate;
u32 hbp, hfp, vbp, vfp, hspw, vspw, width, height;
u32 ystride, bpp, data, dst_bpp;
u32 dummy_xres, dummy_yres;
- struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
u32 hsync_period, vsync_period;
- if (pdata == NULL) {
- pr_err("%s: Invalid input data\n", __func__);
- return -EINVAL;
- }
-
- if (pdata->panel_info.panel_power_on) {
- pr_warn("%s:%d Panel already on.\n", __func__, __LINE__);
- return 0;
- }
-
ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
panel_data);
- pr_debug("%s+: ctrl=%p ndx=%d\n",
- __func__, ctrl_pdata, ctrl_pdata->ndx);
-
pinfo = &pdata->panel_info;
- ret = msm_dss_enable_vreg(ctrl_pdata->power_data.vreg_config,
- ctrl_pdata->power_data.num_vreg, 1);
- if (ret) {
- pr_err("%s:Failed to enable vregs. rc=%d\n", __func__, ret);
- return ret;
- }
-
- pdata->panel_info.panel_power_on = 1;
-
- if (!pdata->panel_info.mipi.lp11_init)
- mdss_dsi_panel_reset(pdata, 1);
-
- ret = mdss_dsi_bus_clk_start(ctrl_pdata);
- if (ret) {
- pr_err("%s: failed to enable bus clocks. rc=%d\n", __func__,
- ret);
- mdss_dsi_panel_power_on(pdata, 0);
- pdata->panel_info.panel_power_on = 0;
- return ret;
- }
-
- mdss_dsi_phy_sw_reset((ctrl_pdata->ctrl_base));
- mdss_dsi_phy_init(pdata);
- mdss_dsi_bus_clk_stop(ctrl_pdata);
-
- mdss_dsi_clk_ctrl(ctrl_pdata, 1);
-
clk_rate = pdata->panel_info.clk_rate;
clk_rate = min(clk_rate, pdata->panel_info.clk_max);
@@ -423,7 +392,7 @@
vsync_period = vspw + vbp + height + dummy_yres + vfp;
hsync_period = hspw + hbp + width + dummy_xres + hfp;
- mipi = &pdata->panel_info.mipi;
+ mipi = &pdata->panel_info.mipi;
if (pdata->panel_info.type == MIPI_VIDEO_PANEL) {
MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x24,
((hspw + hbp + width + dummy_xres) << 16 |
@@ -461,19 +430,195 @@
MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x64, data);
MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x5C, data);
}
+}
+static inline bool __mdss_dsi_ulps_feature_enabled(
+ struct mdss_panel_data *pdata)
+{
+ return pdata->panel_info.ulps_feature_enabled;
+}
+
+static int mdss_dsi_ulps_config_sub(struct mdss_dsi_ctrl_pdata *ctrl_pdata,
+ int enable)
+{
+ int ret = 0;
+ struct mdss_panel_data *pdata = NULL;
+ u32 lane_status = 0;
+
+ if (!ctrl_pdata) {
+ pr_err("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ pdata = &ctrl_pdata->panel_data;
+
+ if (!__mdss_dsi_ulps_feature_enabled(pdata)) {
+ pr_debug("%s: ULPS feature not supported. enable=%d\n",
+ __func__, enable);
+ return -ENOTSUPP;
+ }
+
+ if (enable && !ctrl_pdata->ulps) {
+ /* No need to configure ULPS mode when entering suspend state */
+ if (!pdata->panel_info.panel_power_on) {
+ pr_err("%s: panel off. returning\n", __func__);
+ goto error;
+ }
+
+ if (__mdss_dsi_clk_enabled(ctrl_pdata)) {
+ pr_err("%s: cannot enter ulps mode if dsi clocks are on\n",
+ __func__);
+ ret = -EPERM;
+ goto error;
+ }
+
+ mdss_dsi_clk_ctrl(ctrl_pdata, 1);
+ /*
+ * ULPS Entry Request.
+ * Wait for a short duration to ensure that the lanes
+ * enter ULP state.
+ */
+ MIPI_OUTP(ctrl_pdata->ctrl_base + 0x0AC, 0x01F);
+ usleep(100);
+
+ /* Enable MMSS DSI Clamps */
+ MIPI_OUTP(ctrl_pdata->mmss_misc_io.base + 0x14, 0x3FF);
+ MIPI_OUTP(ctrl_pdata->mmss_misc_io.base + 0x14, 0x83FF);
+
+ wmb();
+
+ MIPI_OUTP(ctrl_pdata->mmss_misc_io.base + 0x108, 0x1);
+ /* disable DSI controller */
+ mdss_dsi_controller_cfg(0, pdata);
+
+ lane_status = MIPI_INP(ctrl_pdata->ctrl_base + 0xA8),
+ mdss_dsi_clk_ctrl(ctrl_pdata, 0);
+ ctrl_pdata->ulps = true;
+ } else if (ctrl_pdata->ulps) {
+ mdss_dsi_phy_init(pdata);
+
+ __mdss_dsi_ctrl_setup(pdata);
+ mdss_dsi_sw_reset(pdata);
+ mdss_dsi_host_init(pdata);
+ mdss_dsi_op_mode_config(pdata->panel_info.mipi.mode,
+ pdata);
+
+ /* Disable MMSS DSI Clamps */
+ MIPI_OUTP(ctrl_pdata->mmss_misc_io.base + 0x14, 0x0);
+
+ /*
+ * ULPS Exit Request
+ * Hardware requirement is to wait for at least 1ms
+ */
+ MIPI_OUTP(ctrl_pdata->ctrl_base + 0x0AC, 0x1F00);
+ usleep(1000);
+ MIPI_OUTP(ctrl_pdata->ctrl_base + 0x0AC, 0x0);
+
+ /*
+ * Wait for a short duration before enabling
+ * data transmission
+ */
+ usleep(100);
+
+ lane_status = MIPI_INP(ctrl_pdata->ctrl_base + 0xA8),
+ ctrl_pdata->ulps = false;
+ }
+
+ pr_debug("%s: DSI lane status = 0x%08x. Ulps %s\n", __func__,
+ lane_status, enable ? "enabled" : "disabled");
+
+error:
+ return ret;
+}
+
+static int mdss_dsi_ulps_config(struct mdss_dsi_ctrl_pdata *ctrl,
+ int enable)
+{
+ int rc;
+ struct mdss_dsi_ctrl_pdata *sctrl = NULL;
+
+ if (ctrl->flags & DSI_FLAG_CLOCK_MASTER)
+ sctrl = mdss_dsi_ctrl_slave(ctrl);
+
+ if (sctrl) {
+ pr_debug("%s: configuring ulps (%s) for slave ctrl\n",
+ __func__, (enable ? "on" : "off"));
+ rc = mdss_dsi_ulps_config_sub(sctrl, enable);
+ if (rc)
+ return rc;
+ }
+
+ pr_debug("%s: configuring ulps (%s) for master ctrl\n",
+ __func__, (enable ? "on" : "off"));
+ return mdss_dsi_ulps_config_sub(ctrl, enable);
+}
+
+int mdss_dsi_on(struct mdss_panel_data *pdata)
+{
+ int ret = 0;
+ struct mdss_panel_info *pinfo;
+ struct mipi_panel_info *mipi;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+ if (pdata == NULL) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -EINVAL;
+ }
+
+ if (pdata->panel_info.panel_power_on) {
+ pr_warn("%s:%d Panel already on.\n", __func__, __LINE__);
+ return 0;
+ }
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ pr_debug("%s+: ctrl=%p ndx=%d\n",
+ __func__, ctrl_pdata, ctrl_pdata->ndx);
+
+ pinfo = &pdata->panel_info;
+ mipi = &pdata->panel_info.mipi;
+
+ ret = mdss_dsi_panel_power_on(pdata, 1);
+ if (ret) {
+ pr_err("%s:Panel power on failed. rc=%d\n", __func__, ret);
+ return ret;
+ }
+
+ ret = mdss_dsi_bus_clk_start(ctrl_pdata);
+ if (ret) {
+ pr_err("%s: failed to enable bus clocks. rc=%d\n", __func__,
+ ret);
+ ret = mdss_dsi_panel_power_on(pdata, 0);
+ if (ret) {
+ pr_err("%s: Panel reset failed. rc=%d\n",
+ __func__, ret);
+ return ret;
+ }
+ pdata->panel_info.panel_power_on = 0;
+ return ret;
+ }
+ pdata->panel_info.panel_power_on = 1;
+
+ mdss_dsi_phy_sw_reset((ctrl_pdata->ctrl_base));
+ mdss_dsi_phy_init(pdata);
+ mdss_dsi_bus_clk_stop(ctrl_pdata);
+
+ mdss_dsi_clk_ctrl(ctrl_pdata, 1);
+
+ __mdss_dsi_ctrl_setup(pdata);
mdss_dsi_sw_reset(pdata);
- mdss_dsi_host_init(mipi, pdata);
+ mdss_dsi_host_init(pdata);
/*
* Issue hardware reset line after enabling the DSI clocks and data
* data lanes for LP11 init
*/
- if (pdata->panel_info.mipi.lp11_init)
+ if (mipi->lp11_init)
mdss_dsi_panel_reset(pdata, 1);
- if (pdata->panel_info.mipi.init_delay)
- usleep(pdata->panel_info.mipi.init_delay);
+ if (mipi->init_delay)
+ usleep(mipi->init_delay);
if (mipi->force_clk_lane_hs) {
u32 tmp;
@@ -547,6 +692,17 @@
panel_data);
mipi = &pdata->panel_info.mipi;
+ if (__mdss_dsi_ulps_feature_enabled(pdata) &&
+ (ctrl_pdata->ulps)) {
+ /* Disable ULPS mode before blanking the panel */
+ ret = mdss_dsi_ulps_config(ctrl_pdata, 0);
+ if (ret) {
+ pr_err("%s: failed to exit ULPS mode. rc=%d\n",
+ __func__, ret);
+ return ret;
+ }
+ }
+
mdss_dsi_op_mode_config(DSI_CMD_MODE, pdata);
if (pdata->panel_info.type == MIPI_CMD_PANEL) {
@@ -593,7 +749,7 @@
"Incorrect Ctrl state=0x%x\n", ctrl_pdata->ctrl_state);
mdss_dsi_sw_reset(pdata);
- mdss_dsi_host_init(mipi, pdata);
+ mdss_dsi_host_init(pdata);
mdss_dsi_op_mode_config(mipi->mode, pdata);
if (ctrl_pdata->on_cmds.link_state == DSI_LP_MODE) {
@@ -800,6 +956,9 @@
case MDSS_EVENT_ENABLE_PARTIAL_UPDATE:
rc = mdss_dsi_ctl_partial_update(pdata);
break;
+ case MDSS_EVENT_DSI_ULPS_CTRL:
+ rc = mdss_dsi_ulps_config(ctrl_pdata, (int)arg);
+ break;
default:
pr_debug("%s: unhandled event=%d\n", __func__, event);
break;
@@ -906,7 +1065,6 @@
struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
struct device_node *dsi_pan_node = NULL;
char panel_cfg[MDSS_MAX_PANEL_LEN];
- struct resource *mdss_dsi_mres;
const char *ctrl_name;
bool cmd_cfg_cont_splash = true;
@@ -956,30 +1114,13 @@
else
pdev->id = 2;
- mdss_dsi_mres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mdss_dsi_mres) {
- pr_err("%s:%d unable to get the MDSS resources",
- __func__, __LINE__);
- rc = -ENOMEM;
- goto error_no_mem;
- }
-
- mdss_dsi_base = ioremap(mdss_dsi_mres->start,
- resource_size(mdss_dsi_mres));
- if (!mdss_dsi_base) {
- pr_err("%s:%d unable to remap dsi resources",
- __func__, __LINE__);
- rc = -ENOMEM;
- goto error_no_mem;
- }
-
rc = of_platform_populate(pdev->dev.of_node,
NULL, NULL, &pdev->dev);
if (rc) {
dev_err(&pdev->dev,
"%s: failed to add child nodes, rc=%d\n",
__func__, rc);
- goto error_ioremap;
+ goto error_no_mem;
}
/* Parse the regulator information */
@@ -1026,8 +1167,6 @@
of_node_put(dsi_pan_node);
error_vreg:
mdss_dsi_put_dt_vreg_data(&pdev->dev, &ctrl_pdata->power_data);
-error_ioremap:
- iounmap(mdss_dsi_base);
error_no_mem:
devm_kfree(&pdev->dev, ctrl_pdata);
@@ -1050,7 +1189,7 @@
pr_err("%s: failed to de-init vregs\n", __func__);
mdss_dsi_put_dt_vreg_data(&pdev->dev, &ctrl_pdata->power_data);
mfd = platform_get_drvdata(pdev);
- iounmap(mdss_dsi_base);
+ msm_dss_iounmap(&ctrl_pdata->mmss_misc_io);
return 0;
}
@@ -1109,6 +1248,13 @@
pr_info("%s: dsi base=%x size=%x\n",
__func__, (int)ctrl->ctrl_base, ctrl->reg_size);
+ rc = msm_dss_ioremap_byname(pdev, &ctrl->mmss_misc_io,
+ "mmss_misc_phys");
+ if (rc) {
+ pr_err("%s:%d mmss_misc IO remap failed\n", __func__, __LINE__);
+ return rc;
+ }
+
return 0;
}
@@ -1246,18 +1392,9 @@
ctrl_pdata->disp_en_gpio = of_get_named_gpio(ctrl_pdev->dev.of_node,
"qcom,platform-enable-gpio", 0);
- if (!gpio_is_valid(ctrl_pdata->disp_en_gpio)) {
+ if (!gpio_is_valid(ctrl_pdata->disp_en_gpio))
pr_err("%s:%d, Disp_en gpio not specified\n",
__func__, __LINE__);
- } else {
- rc = gpio_request(ctrl_pdata->disp_en_gpio, "disp_enable");
- if (rc) {
- pr_err("request reset gpio failed, rc=%d\n",
- rc);
- gpio_free(ctrl_pdata->disp_en_gpio);
- return -ENODEV;
- }
- }
if (pinfo->type == MIPI_CMD_PANEL) {
ctrl_pdata->disp_te_gpio = of_get_named_gpio(ctrl_pdev->dev.of_node,
@@ -1274,7 +1411,6 @@
if (rc) {
pr_err("request TE gpio failed, rc=%d\n",
rc);
- gpio_free(ctrl_pdata->disp_te_gpio);
return -ENODEV;
}
rc = gpio_tlmm_config(GPIO_CFG(
@@ -1304,44 +1440,18 @@
ctrl_pdata->rst_gpio = of_get_named_gpio(ctrl_pdev->dev.of_node,
"qcom,platform-reset-gpio", 0);
- if (!gpio_is_valid(ctrl_pdata->rst_gpio)) {
+ if (!gpio_is_valid(ctrl_pdata->rst_gpio))
pr_err("%s:%d, reset gpio not specified\n",
__func__, __LINE__);
- } else {
- rc = gpio_request(ctrl_pdata->rst_gpio, "disp_rst_n");
- if (rc) {
- pr_err("request reset gpio failed, rc=%d\n",
- rc);
- gpio_free(ctrl_pdata->rst_gpio);
- if (gpio_is_valid(ctrl_pdata->disp_en_gpio))
- gpio_free(ctrl_pdata->disp_en_gpio);
- return -ENODEV;
- }
- }
if (pinfo->mode_gpio_state != MODE_GPIO_NOT_VALID) {
ctrl_pdata->mode_gpio = of_get_named_gpio(
ctrl_pdev->dev.of_node,
"qcom,platform-mode-gpio", 0);
- if (!gpio_is_valid(ctrl_pdata->mode_gpio)) {
+ if (!gpio_is_valid(ctrl_pdata->mode_gpio))
pr_info("%s:%d, mode gpio not specified\n",
__func__, __LINE__);
- } else {
- rc = gpio_request(ctrl_pdata->mode_gpio, "panel_mode");
- if (rc) {
- pr_err("request panel mode gpio failed,rc=%d\n",
- rc);
- gpio_free(ctrl_pdata->mode_gpio);
- if (gpio_is_valid(ctrl_pdata->disp_en_gpio))
- gpio_free(ctrl_pdata->disp_en_gpio);
- if (gpio_is_valid(ctrl_pdata->rst_gpio))
- gpio_free(ctrl_pdata->rst_gpio);
- if (gpio_is_valid(ctrl_pdata->disp_te_gpio))
- gpio_free(ctrl_pdata->disp_te_gpio);
- return -ENODEV;
- }
- }
}
if (mdss_dsi_clk_init(ctrl_pdev, ctrl_pdata)) {
@@ -1392,10 +1502,6 @@
rc = mdss_register_panel(ctrl_pdev, &(ctrl_pdata->panel_data));
if (rc) {
pr_err("%s: unable to register MIPI DSI panel\n", __func__);
- if (ctrl_pdata->rst_gpio)
- gpio_free(ctrl_pdata->rst_gpio);
- if (gpio_is_valid(ctrl_pdata->disp_en_gpio))
- gpio_free(ctrl_pdata->disp_en_gpio);
return rc;
}
@@ -1450,7 +1556,6 @@
static void __exit mdss_dsi_driver_cleanup(void)
{
- iounmap(mdss_dsi_base);
platform_driver_unregister(&mdss_dsi_ctrl_driver);
}
module_exit(mdss_dsi_driver_cleanup);
diff --git a/drivers/video/msm/mdss/mdss_dsi.h b/drivers/video/msm/mdss/mdss_dsi.h
index 2c9c37d..57b0e75 100644
--- a/drivers/video/msm/mdss/mdss_dsi.h
+++ b/drivers/video/msm/mdss/mdss_dsi.h
@@ -238,6 +238,7 @@
int (*cmdlist_commit)(struct mdss_dsi_ctrl_pdata *ctrl, int from_mdp);
struct mdss_panel_data panel_data;
unsigned char *ctrl_base;
+ struct dss_io_data mmss_misc_io;
int reg_size;
u32 clk_cnt;
int clk_cnt_sub;
@@ -245,6 +246,7 @@
struct clk *mdp_core_clk;
struct clk *ahb_clk;
struct clk *axi_clk;
+ struct clk *mmss_misc_ahb_clk;
struct clk *byte_clk;
struct clk *esc_clk;
struct clk *pixel_clk;
@@ -256,10 +258,7 @@
int disp_en_gpio;
int disp_te_gpio;
int mode_gpio;
- int rst_gpio_requested;
- int disp_en_gpio_requested;
int disp_te_gpio_requested;
- int mode_gpio_requested;
int bklt_ctrl; /* backlight ctrl */
int pwm_period;
int pwm_pmic_gpio;
@@ -290,6 +289,8 @@
struct mutex mutex;
struct mutex cmd_mutex;
+ bool ulps;
+
struct dsi_buf tx_buf;
struct dsi_buf rx_buf;
};
@@ -303,8 +304,7 @@
int mdss_dsi_cmds_rx(struct mdss_dsi_ctrl_pdata *ctrl,
struct dsi_cmd_desc *cmds, int rlen);
-void mdss_dsi_host_init(struct mipi_panel_info *pinfo,
- struct mdss_panel_data *pdata);
+void mdss_dsi_host_init(struct mdss_panel_data *pdata);
void mdss_dsi_op_mode_config(int mode,
struct mdss_panel_data *pdata);
void mdss_dsi_cmd_mode_ctrl(int enable);
@@ -337,7 +337,7 @@
void mdss_dsi_clk_deinit(struct mdss_dsi_ctrl_pdata *ctrl_pdata);
int mdss_dsi_enable_bus_clocks(struct mdss_dsi_ctrl_pdata *ctrl_pdata);
void mdss_dsi_disable_bus_clocks(struct mdss_dsi_ctrl_pdata *ctrl_pdata);
-void mdss_dsi_panel_reset(struct mdss_panel_data *pdata, int enable);
+int mdss_dsi_panel_reset(struct mdss_panel_data *pdata, int enable);
void mdss_dsi_phy_disable(struct mdss_dsi_ctrl_pdata *ctrl);
void mdss_dsi_phy_init(struct mdss_panel_data *pdata);
void mdss_dsi_phy_sw_reset(unsigned char *ctrl_base);
@@ -351,6 +351,7 @@
int mdss_dsi_cmdlist_commit(struct mdss_dsi_ctrl_pdata *ctrl, int from_mdp);
void mdss_dsi_cmdlist_kickoff(int intf);
int mdss_dsi_bta_status_check(struct mdss_dsi_ctrl_pdata *ctrl);
+bool __mdss_dsi_clk_enabled(struct mdss_dsi_ctrl_pdata *ctrl);
int mdss_dsi_panel_init(struct device_node *node,
struct mdss_dsi_ctrl_pdata *ctrl_pdata,
diff --git a/drivers/video/msm/mdss/mdss_dsi_host.c b/drivers/video/msm/mdss/mdss_dsi_host.c
index b4478ac..f342c56 100644
--- a/drivers/video/msm/mdss/mdss_dsi_host.c
+++ b/drivers/video/msm/mdss/mdss_dsi_host.c
@@ -90,6 +90,8 @@
ctrl->ndx = DSI_CTRL_1;
}
+ ctrl->panel_mode = ctrl->panel_data.panel_info.mipi.mode;
+
ctrl_list[ctrl->ndx] = ctrl; /* keep it */
if (ctrl->shared_pdata.broadcast_enable)
@@ -250,12 +252,12 @@
MIPI_OUTP((ctrl->ctrl_base) + 0x015c, 0x0);
}
-void mdss_dsi_host_init(struct mipi_panel_info *pinfo,
- struct mdss_panel_data *pdata)
+void mdss_dsi_host_init(struct mdss_panel_data *pdata)
{
u32 dsi_ctrl, intr_ctrl;
u32 data;
struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+ struct mipi_panel_info *pinfo = NULL;
if (pdata == NULL) {
pr_err("%s: Invalid input data\n", __func__);
@@ -265,9 +267,9 @@
ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
panel_data);
- pinfo->rgb_swap = DSI_RGB_SWAP_RGB;
+ pinfo = &pdata->panel_info.mipi;
- ctrl_pdata->panel_mode = pinfo->mode;
+ pinfo->rgb_swap = DSI_RGB_SWAP_RGB;
if (pinfo->mode == DSI_VIDEO_MODE) {
data = 0;
@@ -1477,7 +1479,9 @@
u32 isr0;
isr0 = MIPI_INP(left_ctrl_pdata->ctrl_base
+ 0x0110);/* DSI_INTR_CTRL */
- MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x0110, isr0);
+ if (isr0 & DSI_INTR_CMD_DMA_DONE)
+ MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x0110,
+ DSI_INTR_CMD_DMA_DONE);
}
pr_debug("%s: ndx=%d isr=%x\n", __func__, ctrl->ndx, isr);
diff --git a/drivers/video/msm/mdss/mdss_dsi_panel.c b/drivers/video/msm/mdss/mdss_dsi_panel.c
index 76e6d1b..5415a7e 100644
--- a/drivers/video/msm/mdss/mdss_dsi_panel.c
+++ b/drivers/video/msm/mdss/mdss_dsi_panel.c
@@ -153,15 +153,53 @@
mdss_dsi_cmdlist_put(ctrl, &cmdreq);
}
-void mdss_dsi_panel_reset(struct mdss_panel_data *pdata, int enable)
+static int mdss_dsi_request_gpios(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ int rc = 0;
+
+ if (gpio_is_valid(ctrl_pdata->disp_en_gpio)) {
+ rc = gpio_request(ctrl_pdata->disp_en_gpio,
+ "disp_enable");
+ if (rc) {
+ pr_err("request disp_en gpio failed, rc=%d\n",
+ rc);
+ goto disp_en_gpio_err;
+ }
+ }
+ rc = gpio_request(ctrl_pdata->rst_gpio, "disp_rst_n");
+ if (rc) {
+ pr_err("request reset gpio failed, rc=%d\n",
+ rc);
+ goto rst_gpio_err;
+ }
+ if (gpio_is_valid(ctrl_pdata->mode_gpio)) {
+ rc = gpio_request(ctrl_pdata->mode_gpio, "panel_mode");
+ if (rc) {
+ pr_err("request panel mode gpio failed,rc=%d\n",
+ rc);
+ goto mode_gpio_err;
+ }
+ }
+ return rc;
+
+mode_gpio_err:
+ gpio_free(ctrl_pdata->rst_gpio);
+rst_gpio_err:
+ if (gpio_is_valid(ctrl_pdata->disp_en_gpio))
+ gpio_free(ctrl_pdata->disp_en_gpio);
+disp_en_gpio_err:
+ return rc;
+}
+
+int mdss_dsi_panel_reset(struct mdss_panel_data *pdata, int enable)
{
struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
struct mdss_panel_info *pinfo = NULL;
- int i;
+ int i, rc = 0;
if (pdata == NULL) {
pr_err("%s: Invalid input data\n", __func__);
- return;
+ return -EINVAL;
}
ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
@@ -175,21 +213,28 @@
if (!gpio_is_valid(ctrl_pdata->rst_gpio)) {
pr_debug("%s:%d, reset line not configured\n",
__func__, __LINE__);
- return;
+ return rc;
}
pr_debug("%s: enable = %d\n", __func__, enable);
pinfo = &(ctrl_pdata->panel_data.panel_info);
if (enable) {
- if (gpio_is_valid(ctrl_pdata->disp_en_gpio))
- gpio_set_value((ctrl_pdata->disp_en_gpio), 1);
+ rc = mdss_dsi_request_gpios(ctrl_pdata);
+ if (rc) {
+ pr_err("gpio request failed\n");
+ return rc;
+ }
+ if (!pinfo->panel_power_on) {
+ if (gpio_is_valid(ctrl_pdata->disp_en_gpio))
+ gpio_set_value((ctrl_pdata->disp_en_gpio), 1);
- for (i = 0; i < pdata->panel_info.rst_seq_len; ++i) {
- gpio_set_value((ctrl_pdata->rst_gpio),
- pdata->panel_info.rst_seq[i]);
- if (pdata->panel_info.rst_seq[++i])
- usleep(pdata->panel_info.rst_seq[i] * 1000);
+ for (i = 0; i < pdata->panel_info.rst_seq_len; ++i) {
+ gpio_set_value((ctrl_pdata->rst_gpio),
+ pdata->panel_info.rst_seq[i]);
+ if (pdata->panel_info.rst_seq[++i])
+ usleep(pinfo->rst_seq[i] * 1000);
+ }
}
if (gpio_is_valid(ctrl_pdata->mode_gpio)) {
@@ -205,10 +250,16 @@
pr_debug("%s: Reset panel done\n", __func__);
}
} else {
- gpio_set_value((ctrl_pdata->rst_gpio), 0);
- if (gpio_is_valid(ctrl_pdata->disp_en_gpio))
+ if (gpio_is_valid(ctrl_pdata->disp_en_gpio)) {
gpio_set_value((ctrl_pdata->disp_en_gpio), 0);
+ gpio_free(ctrl_pdata->disp_en_gpio);
+ }
+ gpio_set_value((ctrl_pdata->rst_gpio), 0);
+ gpio_free(ctrl_pdata->rst_gpio);
+ if (gpio_is_valid(ctrl_pdata->mode_gpio))
+ gpio_free(ctrl_pdata->mode_gpio);
}
+ return rc;
}
static char caset[] = {0x2a, 0x00, 0x00, 0x03, 0x00}; /* DTYPE_DCS_LWRITE */
@@ -266,6 +317,17 @@
return rc;
}
+static struct mdss_dsi_ctrl_pdata *get_rctrl_data(struct mdss_panel_data *pdata)
+{
+ if (!pdata || !pdata->next) {
+ pr_err("%s: Invalid panel data\n", __func__);
+ return NULL;
+ }
+
+ return container_of(pdata->next, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+}
+
static void mdss_dsi_panel_bl_ctrl(struct mdss_panel_data *pdata,
u32 bl_level)
{
@@ -297,6 +359,16 @@
break;
case BL_DCS_CMD:
mdss_dsi_panel_bklt_dcs(ctrl_pdata, bl_level);
+ if (ctrl_pdata->shared_pdata.broadcast_enable &&
+ ctrl_pdata->ndx == DSI_CTRL_0) {
+ struct mdss_dsi_ctrl_pdata *rctrl_pdata = NULL;
+ rctrl_pdata = get_rctrl_data(pdata);
+ if (!rctrl_pdata) {
+ pr_err("%s: Right ctrl data NULL\n", __func__);
+ return;
+ }
+ mdss_dsi_panel_bklt_dcs(rctrl_pdata, bl_level);
+ }
break;
default:
pr_err("%s: Unknown bl_ctrl configuration\n",
@@ -632,6 +704,35 @@
return 0;
}
+static int mdss_dsi_parse_panel_features(struct device_node *np,
+ struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ struct mdss_panel_info *pinfo;
+
+ if (!np || !ctrl) {
+ pr_err("%s: Invalid arguments\n", __func__);
+ return -ENODEV;
+ }
+
+ pinfo = &ctrl->panel_data.panel_info;
+
+ pinfo->cont_splash_enabled = of_property_read_bool(np,
+ "qcom,cont-splash-enabled");
+
+ pinfo->partial_update_enabled = of_property_read_bool(np,
+ "qcom,partial-update-enabled");
+ pr_info("%s:%d Partial update %s\n", __func__, __LINE__,
+ (pinfo->partial_update_enabled ? "enabled" : "disabled"));
+ if (pinfo->partial_update_enabled)
+ ctrl->partial_update_fnc = mdss_dsi_panel_partial_update;
+
+ pinfo->ulps_feature_enabled = of_property_read_bool(np,
+ "qcom,ulps-enabled");
+ pr_info("%s: ulps feature %s", __func__,
+ (pinfo->ulps_feature_enabled ? "enabled" : "disabled"));
+
+ return 0;
+}
static int mdss_panel_parse_dt(struct device_node *np,
struct mdss_dsi_ctrl_pdata *ctrl_pdata)
@@ -856,6 +957,11 @@
rc = of_property_read_u32(np, "qcom,mdss-dsi-t-clk-post", &tmp);
pinfo->mipi.t_clk_post = (!rc ? tmp : 0x03);
+ pinfo->mipi.rx_eot_ignore = of_property_read_bool(np,
+ "qcom,mdss-dsi-rx-eot-ignore");
+ pinfo->mipi.tx_eot_append = of_property_read_bool(np,
+ "qcom,mdss-dsi-tx-eot-append");
+
rc = of_property_read_u32(np, "qcom,mdss-dsi-stream", &tmp);
pinfo->mipi.stream = (!rc ? tmp : 0);
@@ -906,6 +1012,12 @@
mdss_dsi_parse_dcs_cmds(np, &ctrl_pdata->off_cmds,
"qcom,mdss-dsi-off-command", "qcom,mdss-dsi-off-command-state");
+ rc = mdss_dsi_parse_panel_features(np, ctrl_pdata);
+ if (rc) {
+ pr_err("%s: failed to parse panel features\n", __func__);
+ goto error;
+ }
+
return 0;
error:
@@ -918,14 +1030,15 @@
{
int rc = 0;
static const char *panel_name;
- bool cont_splash_enabled;
- bool partial_update_enabled;
+ struct mdss_panel_info *pinfo;
- if (!node) {
- pr_err("%s: no panel node\n", __func__);
+ if (!node || !ctrl_pdata) {
+ pr_err("%s: Invalid arguments\n", __func__);
return -ENODEV;
}
+ pinfo = &ctrl_pdata->panel_data.panel_info;
+
pr_debug("%s:%d\n", __func__, __LINE__);
panel_name = of_get_property(node, "qcom,mdss-dsi-panel-name", NULL);
if (!panel_name)
@@ -940,33 +1053,10 @@
return rc;
}
- if (cmd_cfg_cont_splash)
- cont_splash_enabled = of_property_read_bool(node,
- "qcom,cont-splash-enabled");
- else
- cont_splash_enabled = false;
- if (!cont_splash_enabled) {
- pr_info("%s:%d Continuous splash flag not found.\n",
- __func__, __LINE__);
- ctrl_pdata->panel_data.panel_info.cont_splash_enabled = 0;
- } else {
- pr_info("%s:%d Continuous splash flag enabled.\n",
- __func__, __LINE__);
-
- ctrl_pdata->panel_data.panel_info.cont_splash_enabled = 1;
- }
-
- partial_update_enabled = of_property_read_bool(node,
- "qcom,partial-update-enabled");
- if (partial_update_enabled) {
- pr_info("%s:%d Partial update enabled.\n", __func__, __LINE__);
- ctrl_pdata->panel_data.panel_info.partial_update_enabled = 1;
- ctrl_pdata->partial_update_fnc = mdss_dsi_panel_partial_update;
- } else {
- pr_info("%s:%d Partial update disabled.\n", __func__, __LINE__);
- ctrl_pdata->panel_data.panel_info.partial_update_enabled = 0;
- ctrl_pdata->partial_update_fnc = NULL;
- }
+ if (!cmd_cfg_cont_splash)
+ pinfo->cont_splash_enabled = false;
+ pr_info("%s: Continuous splash %s", __func__,
+ pinfo->cont_splash_enabled ? "enabled" : "disabled");
ctrl_pdata->on = mdss_dsi_panel_on;
ctrl_pdata->off = mdss_dsi_panel_off;
diff --git a/drivers/video/msm/mdss/mdss_fb.c b/drivers/video/msm/mdss/mdss_fb.c
index 1df2903..252a86e 100644
--- a/drivers/video/msm/mdss/mdss_fb.c
+++ b/drivers/video/msm/mdss/mdss_fb.c
@@ -1214,9 +1214,11 @@
struct mdss_fb_proc_info *pinfo = NULL;
int result;
int pid = current->tgid;
+ struct task_struct *task = current->group_leader;
if (mfd->shutdown_pending) {
- pr_err("Shutdown pending. Aborting operation\n");
+ pr_err("Shutdown pending. Aborting operation. Request from pid:%d name=%s\n",
+ pid, task->comm);
return -EPERM;
}
@@ -2213,7 +2215,8 @@
mdss_fb_power_setting_idle(mfd);
if ((cmd != MSMFB_VSYNC_CTRL) && (cmd != MSMFB_OVERLAY_VSYNC_CTRL) &&
(cmd != MSMFB_ASYNC_BLIT) && (cmd != MSMFB_BLIT) &&
- (cmd != MSMFB_NOTIFY_UPDATE)) {
+ (cmd != MSMFB_NOTIFY_UPDATE) &&
+ (cmd != MSMFB_OVERLAY_PREPARE)) {
ret = mdss_fb_pan_idle(mfd);
if (ret) {
pr_debug("Shutdown pending. Aborting operation %x\n",
diff --git a/drivers/video/msm/mdss/mdss_hdmi_hdcp.c b/drivers/video/msm/mdss/mdss_hdmi_hdcp.c
index e56e9fa..2240941 100644
--- a/drivers/video/msm/mdss/mdss_hdmi_hdcp.c
+++ b/drivers/video/msm/mdss/mdss_hdmi_hdcp.c
@@ -1050,7 +1050,10 @@
}
if (++hdcp_ctrl->auth_retries == AUTH_RETRIES_TIME) {
- hdmi_hdcp_off(hdcp_ctrl);
+ mutex_lock(hdcp_ctrl->init_data.mutex);
+ hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE;
+ mutex_unlock(hdcp_ctrl->init_data.mutex);
+
hdcp_ctrl->auth_retries = 0;
ret = -ERANGE;
}
@@ -1077,13 +1080,6 @@
return 0;
}
- ret = hdmi_msm_if_abort_reauth(hdcp_ctrl);
-
- if (ret) {
- DEV_ERR("%s: abort reauthentication!\n", __func__);
- return ret;
- }
-
/*
* Disable HPD circuitry.
* This is needed to reset the HDCP cipher engine so that when we
@@ -1109,6 +1105,13 @@
DSS_REG_R(hdcp_ctrl->init_data.core_io,
HDMI_HPD_CTRL) | BIT(28));
+ ret = hdmi_msm_if_abort_reauth(hdcp_ctrl);
+
+ if (ret) {
+ DEV_ERR("%s: abort reauthentication!\n", __func__);
+ return ret;
+ }
+
/* Restart authentication attempt */
DEV_DBG("%s: %s: Scheduling work to start HDCP authentication",
__func__, HDCP_STATE_NAME);
diff --git a/drivers/video/msm/mdss/mdss_hdmi_tx.c b/drivers/video/msm/mdss/mdss_hdmi_tx.c
index 79afdca..2b409f5 100644
--- a/drivers/video/msm/mdss/mdss_hdmi_tx.c
+++ b/drivers/video/msm/mdss/mdss_hdmi_tx.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1127,6 +1127,17 @@
DEV_DBG("%s: Got HPD interrupt\n", __func__);
if (hdmi_ctrl->hpd_state) {
+ /*
+ * If a down stream device or bridge chip is attached to hdmi
+ * Tx core output, it is likely that it might be powering the
+ * hpd module ON/OFF on cable connect/disconnect as it would
+ * have its own mechanism of detecting cable. Flush power off
+ * work is needed in case there is any race condidtion between
+ * power off and on during fast cable plug in/out.
+ */
+ if (hdmi_ctrl->ds_registered)
+ flush_work(&hdmi_ctrl->power_off_work);
+
if (hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_DDC_PM, true)) {
DEV_ERR("%s: Failed to enable ddc power\n", __func__);
return;
@@ -1143,6 +1154,13 @@
hdmi_tx_set_audio_switch_node(hdmi_ctrl, 0, false);
hdmi_tx_wait_for_audio_engine(hdmi_ctrl);
+ if (!hdmi_ctrl->panel_power_on) {
+ if (hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_DDC_PM,
+ false))
+ DEV_WARN("%s: Failed to disable ddc power\n",
+ __func__);
+ }
+
hdmi_tx_send_cable_notification(hdmi_ctrl, 0);
DEV_INFO("%s: sense cable DISCONNECTED: state switch to %d\n",
__func__, hdmi_ctrl->sdev.state);
@@ -2344,6 +2362,8 @@
ops->set_mhl_max_pclk = hdmi_tx_set_mhl_max_pclk;
ops->set_upstream_hpd = hdmi_tx_set_mhl_hpd;
+ hdmi_ctrl->ds_registered = true;
+
return 0;
}
@@ -3872,6 +3892,7 @@
static const struct of_device_id hdmi_tx_dt_match[] = {
{.compatible = COMPATIBLE_NAME,},
+ { /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, hdmi_tx_dt_match);
diff --git a/drivers/video/msm/mdss/mdss_hdmi_tx.h b/drivers/video/msm/mdss/mdss_hdmi_tx.h
index 8233ba8..54d80dc 100644
--- a/drivers/video/msm/mdss/mdss_hdmi_tx.h
+++ b/drivers/video/msm/mdss/mdss_hdmi_tx.h
@@ -83,6 +83,7 @@
struct work_struct cable_notify_work;
bool hdcp_feature_on;
+ bool ds_registered;
u32 present_hdcp;
u8 spd_vendor_name[9];
diff --git a/drivers/video/msm/mdss/mdss_mdp.c b/drivers/video/msm/mdss/mdss_mdp.c
index e1786a6..cfa594c 100644
--- a/drivers/video/msm/mdss/mdss_mdp.c
+++ b/drivers/video/msm/mdss/mdss_mdp.c
@@ -2405,8 +2405,10 @@
pr_debug("Enable MDP FS\n");
if (!mdata->fs_ena) {
regulator_enable(mdata->fs);
- mdss_mdp_cx_ctrl(mdata, true);
- mdss_mdp_batfet_ctrl(mdata, true);
+ if (!mdata->ulps) {
+ mdss_mdp_cx_ctrl(mdata, true);
+ mdss_mdp_batfet_ctrl(mdata, true);
+ }
}
mdata->fs_ena = true;
} else {
@@ -2414,13 +2416,41 @@
mdss_iommu_dettach(mdata);
if (mdata->fs_ena) {
regulator_disable(mdata->fs);
- mdss_mdp_cx_ctrl(mdata, false);
- mdss_mdp_batfet_ctrl(mdata, false);
+ if (!mdata->ulps) {
+ mdss_mdp_cx_ctrl(mdata, false);
+ mdss_mdp_batfet_ctrl(mdata, false);
+ }
}
mdata->fs_ena = false;
}
}
+/**
+ * mdss_mdp_footswitch_ctrl_ulps() - MDSS GDSC control with ULPS feature
+ * @on: 1 to turn on footswitch, 0 to turn off footswitch
+ * @dev: framebuffer device node
+ *
+ * MDSS GDSC can be voted off during idle-screen usecase for MIPI DSI command
+ * mode displays with Ultra-Low Power State (ULPS) feature enabled. Upon
+ * subsequent frame update, MDSS GDSC needs to turned back on and hw state
+ * needs to be restored.
+ */
+void mdss_mdp_footswitch_ctrl_ulps(int on, struct device *dev)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ pr_debug("called on=%d\n", on);
+ if (on) {
+ pm_runtime_get_sync(dev);
+ mdss_iommu_attach(mdata);
+ mdss_hw_init(mdata);
+ mdata->ulps = false;
+ } else {
+ mdata->ulps = true;
+ pm_runtime_put_sync(dev);
+ }
+}
+
static inline int mdss_mdp_suspend_sub(struct mdss_data_type *mdata)
{
mdata->suspend_fs_ena = mdata->fs_ena;
diff --git a/drivers/video/msm/mdss/mdss_mdp.h b/drivers/video/msm/mdss/mdss_mdp.h
index 3afb27e..f3b7ce1 100644
--- a/drivers/video/msm/mdss/mdss_mdp.h
+++ b/drivers/video/msm/mdss/mdss_mdp.h
@@ -452,6 +452,16 @@
};
#define is_vig_pipe(_pipe_id_) ((_pipe_id_) <= MDSS_MDP_SSPP_VIG2)
+
+static inline struct mdss_mdp_ctl *mdss_mdp_get_split_ctl(
+ struct mdss_mdp_ctl *ctl)
+{
+ if (ctl && ctl->mixer_right && (ctl->mixer_right->ctl != ctl))
+ return ctl->mixer_right->ctl;
+
+ return NULL;
+}
+
static inline void mdss_mdp_ctl_write(struct mdss_mdp_ctl *ctl,
u32 reg, u32 val)
{
@@ -681,6 +691,8 @@
int mdss_mdp_wb_set_secure(struct msm_fb_data_type *mfd, int enable);
int mdss_mdp_wb_get_secure(struct msm_fb_data_type *mfd, uint8_t *enable);
+void mdss_mdp_ctl_restore(struct mdss_mdp_ctl *ctl);
+void mdss_mdp_footswitch_ctrl_ulps(int on, struct device *dev);
int mdss_mdp_pipe_program_pixel_extn(struct mdss_mdp_pipe *pipe);
#define mfd_to_mdp5_data(mfd) (mfd->mdp.private1)
diff --git a/drivers/video/msm/mdss/mdss_mdp_ctl.c b/drivers/video/msm/mdss/mdss_mdp_ctl.c
index b445afa..fe42669 100644
--- a/drivers/video/msm/mdss/mdss_mdp_ctl.c
+++ b/drivers/video/msm/mdss/mdss_mdp_ctl.c
@@ -1133,15 +1133,6 @@
return 0;
}
-static inline struct mdss_mdp_ctl *mdss_mdp_get_split_ctl(
- struct mdss_mdp_ctl *ctl)
-{
- if (ctl && ctl->mixer_right && (ctl->mixer_right->ctl != ctl))
- return ctl->mixer_right->ctl;
-
- return NULL;
-}
-
int mdss_mdp_ctl_splash_finish(struct mdss_mdp_ctl *ctl, bool handoff)
{
struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
@@ -1588,6 +1579,25 @@
return rc;
}
+/*
+ * mdss_mdp_ctl_restore() - restore mdp ctl path
+ * @ctl: mdp controller.
+ *
+ * This function is called whenever MDP comes out of a power collapse as
+ * a result of a screen update when DSI ULPS mode is enabled. It restores
+ * the MDP controller's software state to the hardware registers.
+ */
+void mdss_mdp_ctl_restore(struct mdss_mdp_ctl *ctl)
+{
+ u32 temp;
+
+ temp = readl_relaxed(ctl->mdata->mdp_base +
+ MDSS_MDP_REG_DISP_INTF_SEL);
+ temp |= (ctl->intf_type << ((ctl->intf_num - MDSS_MDP_INTF0) * 8));
+ writel_relaxed(temp, ctl->mdata->mdp_base +
+ MDSS_MDP_REG_DISP_INTF_SEL);
+}
+
static int mdss_mdp_ctl_start_sub(struct mdss_mdp_ctl *ctl, bool handoff)
{
struct mdss_mdp_mixer *mixer;
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c b/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
index 79bdee2..78ecf16 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
@@ -27,6 +27,7 @@
#define KOFF_TIMEOUT msecs_to_jiffies(84)
#define STOP_TIMEOUT msecs_to_jiffies(16 * (VSYNC_EXPIRE_TICK + 2))
+#define ULPS_ENTER_TIME msecs_to_jiffies(100)
struct mdss_mdp_cmd_ctx {
struct mdss_mdp_ctl *ctl;
@@ -43,6 +44,7 @@
struct mutex clk_mtx;
spinlock_t clk_lock;
struct work_struct clk_work;
+ struct delayed_work ulps_work;
struct work_struct pp_done_work;
atomic_t pp_done_cnt;
@@ -53,6 +55,7 @@
u16 start_threshold;
u32 vclk_line; /* vsync clock per line */
struct mdss_panel_recovery recovery;
+ bool ulps;
};
struct mdss_mdp_cmd_ctx mdss_mdp_cmd_ctx_list[MAX_SESSIONS];
@@ -200,8 +203,19 @@
mutex_lock(&ctx->clk_mtx);
if (!ctx->clk_enabled) {
ctx->clk_enabled = 1;
+ if (cancel_delayed_work_sync(&ctx->ulps_work))
+ pr_debug("deleted pending ulps work\n");
mdss_mdp_ctl_intf_event
(ctx->ctl, MDSS_EVENT_PANEL_CLK_CTRL, (void *)1);
+
+ if (ctx->ulps) {
+ if (mdss_mdp_cmd_tearcheck_setup(ctx->ctl, 1))
+ pr_warn("tearcheck setup failed\n");
+ mdss_mdp_ctl_intf_event(ctx->ctl,
+ MDSS_EVENT_DSI_ULPS_CTRL, (void *)0);
+ ctx->ulps = false;
+ }
+
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
mdss_mdp_hist_intr_setup(&mdata->hist_intr, MDSS_IRQ_RESUME);
}
@@ -231,6 +245,8 @@
mdss_mdp_ctl_intf_event
(ctx->ctl, MDSS_EVENT_PANEL_CLK_CTRL, (void *)0);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+ if (ctx->panel_on)
+ schedule_delayed_work(&ctx->ulps_work, ULPS_ENTER_TIME);
}
mutex_unlock(&ctx->clk_mtx);
}
@@ -365,11 +381,44 @@
mdss_mdp_cmd_clk_off(ctx);
}
+static void __mdss_mdp_cmd_ulps_work(struct work_struct *work)
+{
+ struct delayed_work *dw = to_delayed_work(work);
+ struct mdss_mdp_cmd_ctx *ctx =
+ container_of(dw, struct mdss_mdp_cmd_ctx, ulps_work);
+
+ if (!ctx) {
+ pr_err("%s: invalid ctx\n", __func__);
+ return;
+ }
+
+ mutex_lock(&ctx->clk_mtx);
+ if (ctx->clk_enabled) {
+ mutex_unlock(&ctx->clk_mtx);
+ pr_warn("Cannot enter ulps mode if DSI clocks are on\n");
+ return;
+ }
+ mutex_unlock(&ctx->clk_mtx);
+
+ if (!ctx->panel_on) {
+ pr_err("Panel is off. skipping ULPS configuration\n");
+ return;
+ }
+
+ if (!mdss_mdp_ctl_intf_event(ctx->ctl, MDSS_EVENT_DSI_ULPS_CTRL,
+ (void *)1)) {
+ ctx->ulps = true;
+ ctx->ctl->play_cnt = 0;
+ mdss_mdp_footswitch_ctrl_ulps(0, &ctx->ctl->mfd->pdev->dev);
+ }
+}
+
static int mdss_mdp_cmd_add_vsync_handler(struct mdss_mdp_ctl *ctl,
struct mdss_mdp_vsync_handler *handle)
{
struct mdss_mdp_cmd_ctx *ctx;
unsigned long flags;
+ bool enable_rdptr = false;
ctx = (struct mdss_mdp_cmd_ctx *) ctl->priv_data;
if (!ctx) {
@@ -381,12 +430,14 @@
if (!handle->enabled) {
handle->enabled = true;
list_add(&handle->list, &ctx->vsync_handlers);
- if (!handle->cmd_post_flush)
- ctx->vsync_enabled = 1;
+
+ enable_rdptr = !handle->cmd_post_flush;
+ if (enable_rdptr)
+ ctx->vsync_enabled++;
}
spin_unlock_irqrestore(&ctx->clk_lock, flags);
- if (!handle->cmd_post_flush)
+ if (enable_rdptr)
mdss_mdp_cmd_clk_on(ctx);
return 0;
@@ -395,11 +446,8 @@
static int mdss_mdp_cmd_remove_vsync_handler(struct mdss_mdp_ctl *ctl,
struct mdss_mdp_vsync_handler *handle)
{
-
struct mdss_mdp_cmd_ctx *ctx;
unsigned long flags;
- struct mdss_mdp_vsync_handler *tmp;
- int num_rdptr_vsync = 0;
ctx = (struct mdss_mdp_cmd_ctx *) ctl->priv_data;
if (!ctx) {
@@ -407,19 +455,17 @@
return -ENODEV;
}
-
spin_lock_irqsave(&ctx->clk_lock, flags);
if (handle->enabled) {
handle->enabled = false;
list_del_init(&handle->list);
- }
- list_for_each_entry(tmp, &ctx->vsync_handlers, list) {
- if (!tmp->cmd_post_flush)
- num_rdptr_vsync++;
- }
- if (!num_rdptr_vsync) {
- ctx->vsync_enabled = 0;
- ctx->rdptr_enabled = VSYNC_EXPIRE_TICK;
+
+ if (!handle->cmd_post_flush) {
+ if (ctx->vsync_enabled)
+ ctx->vsync_enabled--;
+ else
+ WARN(1, "unbalanced vsync disable");
+ }
}
spin_unlock_irqrestore(&ctx->clk_lock, flags);
return 0;
@@ -583,11 +629,14 @@
if (cancel_work_sync(&ctx->clk_work))
pr_debug("no pending clk work\n");
+ if (cancel_delayed_work_sync(&ctx->ulps_work))
+ pr_debug("deleted pending ulps work\n");
+
+ ctx->panel_on = 0;
mdss_mdp_cmd_clk_off(ctx);
flush_work(&ctx->pp_done_work);
- ctx->panel_on = 0;
mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_PING_PONG_RD_PTR, ctx->pp_num,
NULL, NULL);
@@ -653,6 +702,7 @@
spin_lock_init(&ctx->clk_lock);
mutex_init(&ctx->clk_mtx);
INIT_WORK(&ctx->clk_work, clk_ctrl_work);
+ INIT_DELAYED_WORK(&ctx->ulps_work, __mdss_mdp_cmd_ulps_work);
INIT_WORK(&ctx->pp_done_work, pingpong_done_work);
atomic_set(&ctx->pp_done_cnt, 0);
INIT_LIST_HEAD(&ctx->vsync_handlers);
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_video.c b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
index 55a4a4d..f8bdc04 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_video.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
@@ -298,6 +298,7 @@
{
struct mdss_mdp_video_ctx *ctx;
struct mdss_mdp_vsync_handler *tmp, *handle;
+ struct mdss_mdp_ctl *sctl;
int rc;
u32 frame_rate = 0;
@@ -335,6 +336,10 @@
mdss_mdp_irq_disable(MDSS_MDP_IRQ_INTF_UNDER_RUN,
ctl->intf_num);
+ sctl = mdss_mdp_get_split_ctl(ctl);
+ if (sctl)
+ mdss_mdp_irq_disable(MDSS_MDP_IRQ_INTF_UNDER_RUN,
+ sctl->intf_num);
}
list_for_each_entry_safe(handle, tmp, &ctx->vsync_handlers, list)
@@ -641,6 +646,7 @@
static int mdss_mdp_video_display(struct mdss_mdp_ctl *ctl, void *arg)
{
struct mdss_mdp_video_ctx *ctx;
+ struct mdss_mdp_ctl *sctl;
int rc;
pr_debug("kickoff ctl=%d\n", ctl->num);
@@ -674,6 +680,11 @@
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
mdss_mdp_irq_enable(MDSS_MDP_IRQ_INTF_UNDER_RUN, ctl->intf_num);
+ sctl = mdss_mdp_get_split_ctl(ctl);
+ if (sctl)
+ mdss_mdp_irq_enable(MDSS_MDP_IRQ_INTF_UNDER_RUN,
+ sctl->intf_num);
+
mdp_video_write(ctx, MDSS_MDP_REG_INTF_TIMING_ENGINE_EN, 1);
wmb();
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index 5d6ecdc..34cfe23 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -159,6 +159,9 @@
} else if (req->flags & MDP_BWC_EN) {
pr_err("Decimation can't be enabled with BWC\n");
return -EINVAL;
+ } else if (fmt->tile) {
+ pr_err("Decimation can't be enabled with MacroTile format\n");
+ return -EINVAL;
}
}
@@ -276,7 +279,7 @@
* mdp clock requirement
*/
if (mdata->has_decimation && (pipe->vert_deci < MAX_DECIMATION)
- && !pipe->bwc_mode)
+ && !pipe->bwc_mode && !pipe->src_fmt->tile)
pipe->vert_deci++;
else
return -EPERM;
@@ -844,6 +847,11 @@
struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
if (ctl->power_on) {
+ if (mdp5_data->mdata->ulps) {
+ mdss_mdp_footswitch_ctrl_ulps(1, &mfd->pdev->dev);
+ mdss_mdp_ctl_restore(ctl);
+ }
+
if (!mdp5_data->mdata->batfet)
mdss_mdp_batfet_ctrl(mdp5_data->mdata, true);
if (!mfd->panel_info->cont_splash_enabled)
@@ -966,6 +974,11 @@
int ret = 0;
int sd_in_pipe = 0;
+ if (!ctl) {
+ pr_warn("kickoff on fb=%d without a ctl attched\n", mfd->index);
+ return ret;
+ }
+
if (ctl->shared_lock)
mutex_lock(ctl->shared_lock);
@@ -1039,11 +1052,13 @@
pipe->mixer = mdss_mdp_mixer_get(tmp,
MDSS_MDP_MIXER_MUX_DEFAULT);
}
+
+ /* ensure pipes are always reconfigured after power off/on */
+ if (ctl->play_cnt == 0)
+ pipe->params_changed++;
+
if (pipe->back_buf.num_planes) {
buf = &pipe->back_buf;
- } else if (ctl->play_cnt == 0 && pipe->front_buf.num_planes) {
- pipe->params_changed++;
- buf = &pipe->front_buf;
} else if (!pipe->params_changed) {
continue;
} else if (pipe->front_buf.num_planes) {
@@ -2267,10 +2282,12 @@
struct mdp_overlay_list *ovlist,
struct mdp_overlay *overlays)
{
+ struct mdss_mdp_pipe *right_plist[MDSS_MDP_MAX_STAGE] = { 0 };
+ struct mdss_mdp_pipe *left_plist[MDSS_MDP_MAX_STAGE] = { 0 };
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
struct mdss_mdp_pipe *pipe;
struct mdp_overlay *req;
- int ret = 0;
+ int ret = 0, left_cnt = 0, right_cnt = 0;
int i;
u32 new_reqs = 0;
@@ -2299,8 +2316,29 @@
/* keep track of the new overlays to unset in case of errors */
if (pipe->play_cnt == 0)
new_reqs |= pipe->ndx;
+
+ if (pipe->flags & MDSS_MDP_RIGHT_MIXER) {
+ if (right_cnt >= MDSS_MDP_MAX_STAGE) {
+ pr_err("too many pipes on right mixer\n");
+ ret = -EINVAL;
+ goto validate_exit;
+ }
+ right_plist[right_cnt] = pipe;
+ right_cnt++;
+ } else {
+ if (left_cnt >= MDSS_MDP_MAX_STAGE) {
+ pr_err("too many pipes on left mixer\n");
+ ret = -EINVAL;
+ goto validate_exit;
+ }
+ left_plist[left_cnt] = pipe;
+ left_cnt++;
+ }
}
+ ret = mdss_mdp_perf_bw_check(mdp5_data->ctl, left_plist, left_cnt,
+ right_plist, right_cnt);
+
validate_exit:
if (IS_ERR_VALUE(ret))
mdss_mdp_overlay_release(mfd, new_reqs);
@@ -2660,6 +2698,56 @@
return 0;
}
+static int __mdss_mdp_ctl_handoff(struct mdss_mdp_ctl *ctl,
+ struct mdss_data_type *mdata)
+{
+ int rc = 0;
+ int i, j;
+ u32 mixercfg;
+ struct mdss_mdp_pipe *pipe = NULL;
+
+ if (!ctl || !mdata)
+ return -EINVAL;
+
+ for (i = 0; i < mdata->nmixers_intf; i++) {
+ mixercfg = mdss_mdp_ctl_read(ctl, MDSS_MDP_REG_CTL_LAYER(i));
+ pr_debug("for lm%d mixercfg = 0x%09x\n", i, mixercfg);
+
+ j = MDSS_MDP_SSPP_VIG0;
+ for (; j < MDSS_MDP_MAX_SSPP && mixercfg; j++) {
+ u32 cfg = j * 3;
+ if ((j == MDSS_MDP_SSPP_VIG3) ||
+ (j == MDSS_MDP_SSPP_RGB3)) {
+ /* Add 2 to account for Cursor & Border bits */
+ cfg += 2;
+ }
+ if (mixercfg & (0x7 << cfg)) {
+ pr_debug("Pipe %d staged\n", j);
+ pipe = mdss_mdp_pipe_search(mdata, BIT(j));
+ if (!pipe) {
+ pr_warn("Invalid pipe %d staged\n", j);
+ continue;
+ }
+
+ rc = mdss_mdp_pipe_handoff(pipe);
+ if (rc) {
+ pr_err("Failed to handoff pipe%d\n",
+ pipe->num);
+ goto exit;
+ }
+
+ rc = mdss_mdp_mixer_handoff(ctl, i, pipe);
+ if (rc) {
+ pr_err("failed to handoff mix%d\n", i);
+ goto exit;
+ }
+ }
+ }
+ }
+exit:
+ return rc;
+}
+
/**
* mdss_mdp_overlay_handoff() - Read MDP registers to handoff an active ctl path
* @mfd: Msm frame buffer structure associated with the fb device.
@@ -2675,10 +2763,8 @@
int rc = 0;
struct mdss_data_type *mdata = mfd_to_mdata(mfd);
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
- int i, j;
- u32 reg;
- struct mdss_mdp_pipe *pipe = NULL;
struct mdss_mdp_ctl *ctl = NULL;
+ struct mdss_mdp_ctl *sctl = NULL;
if (!mdp5_data->ctl) {
ctl = __mdss_mdp_overlay_ctl_init(mfd);
@@ -2702,38 +2788,23 @@
ctl->clk_rate = mdss_mdp_get_clk_rate(MDSS_CLK_MDP_SRC);
pr_debug("Set the ctl clock rate to %d Hz\n", ctl->clk_rate);
- for (i = 0; i < mdata->nmixers_intf; i++) {
- reg = mdss_mdp_ctl_read(ctl, MDSS_MDP_REG_CTL_LAYER(i));
- pr_debug("for lm%d reg = 0x%09x\n", i, reg);
- for (j = MDSS_MDP_SSPP_VIG0; j < MDSS_MDP_MAX_SSPP; j++) {
- u32 cfg = j * 3;
- if ((j == MDSS_MDP_SSPP_VIG3) ||
- (j == MDSS_MDP_SSPP_RGB3)) {
- /* Add 2 to account for Cursor & Border bits */
- cfg += 2;
- }
- if (reg & (0x7 << cfg)) {
- pr_debug("Pipe %d staged\n", j);
- pipe = mdss_mdp_pipe_search(mdata, BIT(j));
- if (!pipe) {
- pr_warn("Invalid pipe %d staged\n", j);
- continue;
- }
+ rc = __mdss_mdp_ctl_handoff(ctl, mdata);
+ if (rc) {
+ pr_err("primary ctl handoff failed. rc=%d\n", rc);
+ goto error;
+ }
- rc = mdss_mdp_pipe_handoff(pipe);
- if (rc) {
- pr_err("Failed to handoff pipe num %d\n"
- , pipe->num);
- goto error;
- }
-
- rc = mdss_mdp_mixer_handoff(ctl, i, pipe);
- if (rc) {
- pr_err("failed to handoff mixer num %d\n"
- , i);
- goto error;
- }
- }
+ if (mfd->split_display) {
+ sctl = mdss_mdp_get_split_ctl(ctl);
+ if (!sctl) {
+ pr_err("cannot get secondary ctl. fail the handoff\n");
+ rc = -EPERM;
+ goto error;
+ }
+ rc = __mdss_mdp_ctl_handoff(sctl, mdata);
+ if (rc) {
+ pr_err("secondary ctl handoff failed. rc=%d\n", rc);
+ goto error;
}
}
diff --git a/drivers/video/msm/mdss/mdss_mdp_pp.c b/drivers/video/msm/mdss/mdss_mdp_pp.c
index 5c9ad9c..54ec6f8 100644
--- a/drivers/video/msm/mdss/mdss_mdp_pp.c
+++ b/drivers/video/msm/mdss/mdss_mdp_pp.c
@@ -1625,7 +1625,7 @@
*/
int mdss_mdp_pp_resume(struct mdss_mdp_ctl *ctl, u32 dspp_num)
{
- u32 flags = 0, disp_num, bl;
+ u32 flags = 0, disp_num, bl, ret = 0;
struct pp_sts_type pp_sts;
struct mdss_ad_info *ad;
struct mdss_data_type *mdata = ctl->mdata;
@@ -1636,7 +1636,9 @@
disp_num = ctl->mfd->index;
if (dspp_num < mdata->nad_cfgs) {
- ad = &mdata->ad_cfgs[dspp_num];
+ ret = mdss_mdp_get_ad(ctl->mfd, &ad);
+ if (ret)
+ return ret;
if (PP_AD_STATE_CFG & ad->state)
pp_ad_cfg_write(&mdata->ad_off[dspp_num], ad);
diff --git a/drivers/video/msm/mdss/mdss_panel.h b/drivers/video/msm/mdss/mdss_panel.h
index 274c523..135a00a 100644
--- a/drivers/video/msm/mdss/mdss_panel.h
+++ b/drivers/video/msm/mdss/mdss_panel.h
@@ -127,6 +127,11 @@
- 1 clock enable
* @MDSS_EVENT_ENABLE_PARTIAL_UPDATE: Event to update ROI of the panel.
* @MDSS_EVENT_DSI_CMDLIST_KOFF: acquire dsi_mdp_busy lock before kickoff.
+ * @MDSS_EVENT_DSI_ULPS_CTRL: Event to configure Ultra Lower Power Saving
+ * mode for the DSI data and clock lanes. The
+ * event arguments can have one of these values:
+ * - 0: Disable ULPS mode
+ * - 1: Enable ULPS mode
*/
enum mdss_intf_events {
MDSS_EVENT_RESET = 1,
@@ -145,6 +150,7 @@
MDSS_EVENT_PANEL_CLK_CTRL,
MDSS_EVENT_DSI_CMDLIST_KOFF,
MDSS_EVENT_ENABLE_PARTIAL_UPDATE,
+ MDSS_EVENT_DSI_ULPS_CTRL,
};
struct lcd_panel_info {
@@ -300,6 +306,7 @@
int pwm_period;
u32 mode_gpio_state;
bool dynamic_fps;
+ bool ulps_feature_enabled;
char dfps_update;
int new_fps;
diff --git a/drivers/video/msm/mdss/msm_mdss_io_8974.c b/drivers/video/msm/mdss/msm_mdss_io_8974.c
index 6ebcdf6..a0663e3 100644
--- a/drivers/video/msm/mdss/msm_mdss_io_8974.c
+++ b/drivers/video/msm/mdss/msm_mdss_io_8974.c
@@ -64,6 +64,16 @@
goto mdss_dsi_clk_err;
}
+ if (ctrl_pdata->panel_data.panel_info.type == MIPI_CMD_PANEL) {
+ ctrl_pdata->mmss_misc_ahb_clk = clk_get(dev, "core_mmss_clk");
+ if (IS_ERR(ctrl_pdata->mmss_misc_ahb_clk)) {
+ rc = PTR_ERR(ctrl_pdata->mmss_misc_ahb_clk);
+ pr_err("%s: Unable to get mmss misc ahb clk. rc=%d\n",
+ __func__, rc);
+ goto mdss_dsi_clk_err;
+ }
+ }
+
ctrl_pdata->byte_clk = clk_get(dev, "byte_clk");
if (IS_ERR(ctrl_pdata->byte_clk)) {
rc = PTR_ERR(ctrl_pdata->byte_clk);
@@ -105,6 +115,8 @@
clk_put(ctrl_pdata->esc_clk);
if (ctrl_pdata->pixel_clk)
clk_put(ctrl_pdata->pixel_clk);
+ if (ctrl_pdata->mmss_misc_ahb_clk)
+ clk_put(ctrl_pdata->mmss_misc_ahb_clk);
if (ctrl_pdata->axi_clk)
clk_put(ctrl_pdata->axi_clk);
if (ctrl_pdata->ahb_clk)
@@ -275,12 +287,26 @@
goto error;
}
+ if (ctrl_pdata->mmss_misc_ahb_clk) {
+ rc = clk_prepare_enable(ctrl_pdata->mmss_misc_ahb_clk);
+ if (rc) {
+ pr_err("%s: failed to enable mmss misc ahb clk.rc=%d\n",
+ __func__, rc);
+ clk_disable_unprepare(ctrl_pdata->axi_clk);
+ clk_disable_unprepare(ctrl_pdata->ahb_clk);
+ clk_disable_unprepare(ctrl_pdata->mdp_core_clk);
+ goto error;
+ }
+ }
+
error:
return rc;
}
void mdss_dsi_bus_clk_stop(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
{
+ if (ctrl_pdata->mmss_misc_ahb_clk)
+ clk_disable_unprepare(ctrl_pdata->mmss_misc_ahb_clk);
clk_disable_unprepare(ctrl_pdata->axi_clk);
clk_disable_unprepare(ctrl_pdata->ahb_clk);
clk_disable_unprepare(ctrl_pdata->mdp_core_clk);
@@ -507,6 +533,16 @@
static DEFINE_MUTEX(dsi_clk_lock); /* per system */
+bool __mdss_dsi_clk_enabled(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ bool enabled;
+ mutex_lock(&dsi_clk_lock);
+ enabled = ctrl->clk_cnt ? true : false;
+ mutex_unlock(&dsi_clk_lock);
+
+ return enabled;
+}
+
void mdss_dsi_clk_ctrl(struct mdss_dsi_ctrl_pdata *ctrl, int enable)
{
int changed = 0;
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 272fe77..8b604e3 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -94,6 +94,7 @@
u8 raw_erased_mem_count; /* 181 */
u8 raw_ext_csd_structure; /* 194 */
u8 raw_card_type; /* 196 */
+ u8 raw_drive_strength; /* 197 */
u8 out_of_int_time; /* 198 */
u8 raw_s_a_timeout; /* 217 */
u8 raw_hc_erase_gap_size; /* 221 */
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 764beec..b626915 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -303,6 +303,7 @@
#define EXT_CSD_REV 192 /* RO */
#define EXT_CSD_STRUCTURE 194 /* RO */
#define EXT_CSD_CARD_TYPE 196 /* RO */
+#define EXT_CSD_DRIVE_STRENGTH 197 /* RO */
#define EXT_CSD_OUT_OF_INTERRUPT_TIME 198 /* RO */
#define EXT_CSD_PART_SWITCH_TIME 199 /* RO */
#define EXT_CSD_PWR_CL_52_195 200 /* RO */
diff --git a/include/linux/slimbus/slimbus.h b/include/linux/slimbus/slimbus.h
index 56a3a5f..7a7b3eb 100644
--- a/include/linux/slimbus/slimbus.h
+++ b/include/linux/slimbus/slimbus.h
@@ -525,6 +525,8 @@
* @port_xfer_status: Called by framework when client calls get_xfer_status
* API. Returns how much buffer is actually processed and the port
* errors (e.g. overflow/underflow) if any.
+ * @xfer_user_msg: Send user message to specified logical address. Underlying
+ * controller has to support sending user messages. Returns error if any.
*/
struct slim_controller {
struct device dev;
@@ -567,10 +569,13 @@
int (*framer_handover)(struct slim_controller *ctrl,
struct slim_framer *new_framer);
int (*port_xfer)(struct slim_controller *ctrl,
- u8 pn, u8 *iobuf, u32 len,
+ u8 pn, phys_addr_t iobuf, u32 len,
struct completion *comp);
enum slim_port_err (*port_xfer_status)(struct slim_controller *ctr,
- u8 pn, u8 **done_buf, u32 *done_len);
+ u8 pn, phys_addr_t *done_buf, u32 *done_len);
+ int (*xfer_user_msg)(struct slim_controller *ctrl,
+ u8 la, u8 mt, u8 mc,
+ struct slim_ele_access *msg, u8 *buf, u8 len);
};
#define to_slim_controller(d) container_of(d, struct slim_controller, dev)
@@ -744,6 +749,20 @@
extern int slim_xfer_msg(struct slim_controller *ctrl,
struct slim_device *sbdev, struct slim_ele_access *msg,
u16 mc, u8 *rbuf, const u8 *wbuf, u8 len);
+
+/*
+ * User message:
+ * slim_user_msg: Send user message that is interpreted by destination device
+ * @sb: Client handle sending the message
+ * @la: Destination device for this user message
+ * @mt: Message Type (Soruce-referred, or Destination-referred)
+ * @mc: Message Code
+ * @msg: Message structure (start offset, number of bytes) to be sent
+ * @buf: data buffer to be sent
+ * @len: data buffer size in bytes
+ */
+extern int slim_user_msg(struct slim_device *sb, u8 la, u8 mt, u8 mc,
+ struct slim_ele_access *msg, u8 *buf, u8 len);
/* end of message apis */
/* Port management for manager device APIs */
@@ -783,8 +802,8 @@
* Client will call slim_port_get_xfer_status to get error and/or number of
* bytes transferred if used asynchronously.
*/
-extern int slim_port_xfer(struct slim_device *sb, u32 ph, u8 *iobuf, u32 len,
- struct completion *comp);
+extern int slim_port_xfer(struct slim_device *sb, u32 ph, phys_addr_t iobuf,
+ u32 len, struct completion *comp);
/*
* slim_port_get_xfer_status: Poll for port transfers, or get transfer status
@@ -806,7 +825,7 @@
* processed from the multiple transfers.
*/
extern enum slim_port_err slim_port_get_xfer_status(struct slim_device *sb,
- u32 ph, u8 **done_buf, u32 *done_len);
+ u32 ph, phys_addr_t *done_buf, u32 *done_len);
/*
* slim_connect_src: Connect source port to channel.
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index fa702ae..9d13091 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -588,6 +588,26 @@
list_del(&t->transfer_list);
}
+/**
+ * spi_message_init_with_transfers - Initialize spi_message and append transfers
+ * @m: spi_message to be initialized
+ * @xfers: An array of spi transfers
+ * @num_xfers: Number of items in the xfer array
+ *
+ * This function initializes the given spi_message and adds each spi_transfer in
+ * the given array to the message.
+ */
+static inline void
+spi_message_init_with_transfers(struct spi_message *m,
+struct spi_transfer *xfers, unsigned int num_xfers)
+{
+ unsigned int i;
+
+ spi_message_init(m);
+ for (i = 0; i < num_xfers; ++i)
+ spi_message_add_tail(&xfers[i], m);
+}
+
/* It's fine to embed message and transaction structures in other data
* structures so long as you don't free them while they're in use.
*/
@@ -680,6 +700,30 @@
return spi_sync(spi, &m);
}
+/**
+ * spi_sync_transfer - synchronous SPI data transfer
+ * @spi: device with which data will be exchanged
+ * @xfers: An array of spi_transfers
+ * @num_xfers: Number of items in the xfer array
+ * Context: can sleep
+ *
+ * Does a synchronous SPI data transfer of the given spi_transfer array.
+ *
+ * For more specific semantics see spi_sync().
+ *
+ * It returns zero on success, else a negative error code.
+ */
+static inline int
+spi_sync_transfer(struct spi_device *spi, struct spi_transfer *xfers,
+ unsigned int num_xfers)
+{
+ struct spi_message msg;
+
+ spi_message_init_with_transfers(&msg, xfers, num_xfers);
+
+ return spi_sync(spi, &msg);
+}
+
/* this copies txbuf and rxbuf data; for small transfers only! */
extern int spi_write_then_read(struct spi_device *spi,
const void *txbuf, unsigned n_tx,
diff --git a/include/linux/tick.h b/include/linux/tick.h
index dc15221..78ae909 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -92,7 +92,16 @@
# ifdef CONFIG_TICK_ONESHOT
extern struct cpumask *tick_get_broadcast_oneshot_mask(void);
# endif
+#else
+static inline struct tick_device *tick_get_broadcast_device(void)
+{
+ return NULL;
+}
+static inline struct cpumask *tick_get_broadcast_mask(void)
+{
+ return NULL;
+}
# endif /* BROADCAST */
# ifdef CONFIG_TICK_ONESHOT
@@ -109,6 +118,10 @@
static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
static inline void tick_check_idle(int cpu) { }
static inline int tick_oneshot_mode_active(void) { return 0; }
+static inline struct cpumask *tick_get_broadcast_oneshot_mask(void)
+{
+ return NULL;
+}
# endif
#else /* CONFIG_GENERIC_CLOCKEVENTS */
diff --git a/include/media/msmb_isp.h b/include/media/msmb_isp.h
index 3ba0abe..3828221 100644
--- a/include/media/msmb_isp.h
+++ b/include/media/msmb_isp.h
@@ -1,3 +1,14 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
#ifndef __MSMB_ISP__
#define __MSMB_ISP__
@@ -302,15 +313,16 @@
ISP_WM_BUS_OVERFLOW = 4,
ISP_STATS_OVERFLOW = 5,
ISP_CAMIF_ERROR = 6,
- ISP_SOF = 7,
- ISP_EOF = 8,
- ISP_EVENT_MAX = 9
+ ISP_BUF_DONE = 9,
+ ISP_EVENT_MAX = 10
};
#define ISP_EVENT_OFFSET 8
#define ISP_EVENT_BASE (V4L2_EVENT_PRIVATE_START)
#define ISP_BUF_EVENT_BASE (ISP_EVENT_BASE + (1 << ISP_EVENT_OFFSET))
#define ISP_STATS_EVENT_BASE (ISP_EVENT_BASE + (2 << ISP_EVENT_OFFSET))
+#define ISP_SOF_EVENT_BASE (ISP_EVENT_BASE + (3 << ISP_EVENT_OFFSET))
+#define ISP_EOF_EVENT_BASE (ISP_EVENT_BASE + (4 << ISP_EVENT_OFFSET))
#define ISP_EVENT_REG_UPDATE (ISP_EVENT_BASE + ISP_REG_UPDATE)
#define ISP_EVENT_START_ACK (ISP_EVENT_BASE + ISP_START_ACK)
#define ISP_EVENT_STOP_ACK (ISP_EVENT_BASE + ISP_STOP_ACK)
@@ -318,8 +330,9 @@
#define ISP_EVENT_WM_BUS_OVERFLOW (ISP_EVENT_BASE + ISP_WM_BUS_OVERFLOW)
#define ISP_EVENT_STATS_OVERFLOW (ISP_EVENT_BASE + ISP_STATS_OVERFLOW)
#define ISP_EVENT_CAMIF_ERROR (ISP_EVENT_BASE + ISP_CAMIF_ERROR)
-#define ISP_EVENT_SOF (ISP_EVENT_BASE + ISP_SOF)
-#define ISP_EVENT_EOF (ISP_EVENT_BASE + ISP_EOF)
+#define ISP_EVENT_SOF (ISP_SOF_EVENT_BASE)
+#define ISP_EVENT_EOF (ISP_EOF_EVENT_BASE)
+#define ISP_EVENT_BUF_DONE (ISP_EVENT_BASE + ISP_BUF_DONE)
#define ISP_EVENT_BUF_DIVERT (ISP_BUF_EVENT_BASE)
#define ISP_EVENT_STATS_NOTIFY (ISP_STATS_EVENT_BASE)
#define ISP_EVENT_COMP_STATS_NOTIFY (ISP_EVENT_STATS_NOTIFY + MSM_ISP_STATS_MAX)
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 9f0d486..ecaef21 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -3224,8 +3224,8 @@
static inline struct sk_buff *
cfg80211_vendor_cmd_alloc_reply_skb(struct wiphy *wiphy, int approxlen)
{
- return __cfg80211_alloc_reply_skb(wiphy, NL80211_CMD_TESTMODE,
- NL80211_ATTR_TESTDATA, approxlen);
+ return __cfg80211_alloc_reply_skb(wiphy, NL80211_CMD_VENDOR,
+ NL80211_ATTR_VENDOR_DATA, approxlen);
}
/**
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 50660b3..dce56a6 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -131,6 +131,8 @@
* most likely due to retrans in 3WHS.
*/
+#define TCP_DELACK_SEG 1 /*Number of full MSS to receive before Acking RFC2581*/
+
#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
* for local resources.
*/
@@ -253,6 +255,10 @@
extern int sysctl_tcp_thin_linear_timeouts;
extern int sysctl_tcp_thin_dupack;
+/* sysctl variables for controlling various tcp parameters */
+extern int sysctl_tcp_delack_seg;
+extern int sysctl_tcp_use_userconfig;
+
extern atomic_long_t tcp_memory_allocated;
extern struct percpu_counter tcp_sockets_allocated;
extern int tcp_memory_pressure;
@@ -346,6 +352,10 @@
struct pipe_inode_info *pipe, size_t len,
unsigned int flags);
+extern int tcp_use_userconfig_sysctl_handler(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
+extern int tcp_proc_delayed_ack_control(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
static inline void tcp_dec_quickack_mode(struct sock *sk,
const unsigned int pkts)
{
diff --git a/include/sound/Kbuild b/include/sound/Kbuild
index 60847b0..aeccfed 100644
--- a/include/sound/Kbuild
+++ b/include/sound/Kbuild
@@ -13,3 +13,4 @@
header-y += compress_offload.h
header-y += lsm_params.h
header-y += voice_params.h
+header-y += voice_svc.h
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
index 2c969cd..1c6ea04 100644
--- a/include/sound/apr_audio-v2.h
+++ b/include/sound/apr_audio-v2.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -579,6 +579,15 @@
/* Clients must set this field to zero.*/
} __packed;
+#define ASM_PARAM_ID_AAC_STEREO_MIX_COEFF_SELECTION_FLAG_V2 (0x00010DD8)
+
+struct asm_aac_stereo_mix_coeff_selection_param_v2 {
+ struct apr_hdr hdr;
+ u32 param_id;
+ u32 param_size;
+ u32 aac_stereo_mix_coeff_flag;
+} __packed;
+
/* Allows a client to connect the desired stream to
* the desired AFE port through the stream router
*
@@ -6874,6 +6883,7 @@
#define Q6AFE_LPASS_IBIT_CLK_1_P024_MHZ 0xFA000
#define Q6AFE_LPASS_IBIT_CLK_768_KHZ 0xBB800
#define Q6AFE_LPASS_IBIT_CLK_512_KHZ 0x7D000
+#define Q6AFE_LPASS_IBIT_CLK_256_KHZ 0x3E800
#define Q6AFE_LPASS_IBIT_CLK_DISABLE 0x0
/* Supported LPASS CLK sources */
diff --git a/include/sound/voice_svc.h b/include/sound/voice_svc.h
new file mode 100644
index 0000000..7045018
--- /dev/null
+++ b/include/sound/voice_svc.h
@@ -0,0 +1,46 @@
+#ifndef __VOICE_SVC_H__
+#define __VOICE_SVC_H__
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define VOICE_SVC_DRIVER_NAME "voice_svc"
+
+#define VOICE_SVC_MVM_STR "MVM"
+#define VOICE_SVC_CVS_STR "CVS"
+#define MAX_APR_SERVICE_NAME_LEN 64
+
+struct voice_svc_register {
+ char svc_name[MAX_APR_SERVICE_NAME_LEN];
+ __u32 src_port;
+ __u8 reg_flag;
+};
+
+struct voice_svc_cmd_response {
+ __u32 src_port;
+ __u32 dest_port;
+ __u32 token;
+ __u32 opcode;
+ __u32 payload_size;
+ __u8 payload[0];
+};
+
+struct voice_svc_cmd_request {
+ char svc_name[MAX_APR_SERVICE_NAME_LEN];
+ __u32 src_port;
+ __u32 dest_port;
+ __u32 token;
+ __u32 opcode;
+ __u32 payload_size;
+ __u8 payload[0];
+};
+
+#define VOICE_SVC_MAGIC 'N'
+
+#define SNDRV_VOICE_SVC_REGISTER_SVC _IOWR(VOICE_SVC_MAGIC, \
+ 0x01, struct voice_svc_register)
+#define SNDRV_VOICE_SVC_CMD_RESPONSE _IOWR(VOICE_SVC_MAGIC, \
+ 0x02, struct voice_svc_cmd_response)
+#define SNDRV_VOICE_SVC_CMD_REQUEST _IOWR(VOICE_SVC_MAGIC, \
+ 0x03, struct voice_svc_cmd_request)
+#endif
diff --git a/include/trace/events/ice40.h b/include/trace/events/ice40.h
new file mode 100644
index 0000000..c0649a8
--- /dev/null
+++ b/include/trace/events/ice40.h
@@ -0,0 +1,310 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ice40
+
+#if !defined(_TRACE_ICE40_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_ICE40_H
+
+#include <linux/tracepoint.h>
+#include <linux/usb.h>
+
+TRACE_EVENT(ice40_reg_write,
+
+ TP_PROTO(u8 addr, u8 val, u8 cmd0, u8 cmd1, int ret),
+
+ TP_ARGS(addr, val, cmd0, cmd1, ret),
+
+ TP_STRUCT__entry(
+ __field(u8, addr)
+ __field(u8, val)
+ __field(u8, cmd0)
+ __field(u8, cmd1)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->addr = addr;
+ __entry->val = val;
+ __entry->cmd0 = cmd0;
+ __entry->cmd1 = cmd1;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("addr = %x val = %x cmd0 = %x cmd1 = %x ret = %d",
+ __entry->addr, __entry->val, __entry->cmd0,
+ __entry->cmd1, __entry->ret)
+);
+
+TRACE_EVENT(ice40_reg_read,
+
+ TP_PROTO(u8 addr, u8 cmd0, int ret),
+
+ TP_ARGS(addr, cmd0, ret),
+
+ TP_STRUCT__entry(
+ __field(u8, addr)
+ __field(u8, cmd0)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->addr = addr;
+ __entry->cmd0 = cmd0;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("addr = %x cmd0 = %x ret = %x", __entry->addr,
+ __entry->cmd0, __entry->ret)
+);
+
+TRACE_EVENT(ice40_hub_control,
+
+ TP_PROTO(u16 req, u16 val, u16 index, u16 len, int ret),
+
+ TP_ARGS(req, val, index, len, ret),
+
+ TP_STRUCT__entry(
+ __field(u16, req)
+ __field(u16, val)
+ __field(u16, index)
+ __field(u16, len)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->req = req;
+ __entry->val = val;
+ __entry->index = index;
+ __entry->len = len;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("req = %x val = %x index = %x len = %x ret = %d",
+ __entry->req, __entry->val, __entry->index,
+ __entry->len, __entry->ret)
+);
+
+TRACE_EVENT(ice40_ep0,
+
+ TP_PROTO(const char *state),
+
+ TP_ARGS(state),
+
+ TP_STRUCT__entry(
+ __string(state, state)
+ ),
+
+ TP_fast_assign(
+ __assign_str(state, state);
+ ),
+
+ TP_printk("ep0 state: %s", __get_str(state))
+);
+
+TRACE_EVENT(ice40_urb_enqueue,
+
+ TP_PROTO(struct urb *urb),
+
+ TP_ARGS(urb),
+
+ TP_STRUCT__entry(
+ __field(u16, epnum)
+ __field(u8, dir)
+ __field(u8, type)
+ __field(u32, len)
+ ),
+
+ TP_fast_assign(
+ __entry->epnum = usb_pipeendpoint(urb->pipe);
+ __entry->dir = usb_urb_dir_in(urb);
+ __entry->type = usb_pipebulk(urb->pipe);
+ __entry->len = urb->transfer_buffer_length;
+ ),
+
+ TP_printk("URB_LOG: E: ep %d %s %s len %d", __entry->epnum,
+ __entry->dir ? "In" : "Out",
+ __entry->type ? "Bulk" : "ctrl",
+ __entry->len)
+);
+
+TRACE_EVENT(ice40_urb_dequeue,
+
+ TP_PROTO(struct urb *urb),
+
+ TP_ARGS(urb),
+
+ TP_STRUCT__entry(
+ __field(u16, epnum)
+ __field(u8, dir)
+ __field(u8, type)
+ __field(u32, len)
+ __field(int, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->epnum = usb_pipeendpoint(urb->pipe);
+ __entry->dir = usb_urb_dir_in(urb);
+ __entry->type = usb_pipebulk(urb->pipe);
+ __entry->len = urb->transfer_buffer_length;
+ __entry->reason = urb->unlinked;
+ ),
+
+ TP_printk("URB_LOG: D: ep %d %s %s len %d reason %d",
+ __entry->epnum,
+ __entry->dir ? "In" : "Out",
+ __entry->type ? "Bulk" : "ctrl",
+ __entry->len, __entry->reason)
+);
+
+TRACE_EVENT(ice40_urb_done,
+
+ TP_PROTO(struct urb *urb, int result),
+
+ TP_ARGS(urb, result),
+
+ TP_STRUCT__entry(
+ __field(int, result)
+ __field(u16, epnum)
+ __field(u8, dir)
+ __field(u8, type)
+ __field(u32, len)
+ __field(u32, actual)
+ ),
+
+ TP_fast_assign(
+ __entry->result = result;
+ __entry->epnum = usb_pipeendpoint(urb->pipe);
+ __entry->dir = usb_urb_dir_in(urb);
+ __entry->type = usb_pipebulk(urb->pipe);
+ __entry->len = urb->transfer_buffer_length;
+ __entry->actual = urb->actual_length;
+ ),
+
+ TP_printk("URB_LOG: C: ep %d %s %s len %d actual %d result %d",
+ __entry->epnum, __entry->dir ? "In" : "Out",
+ __entry->type ? "Bulk" : "ctrl", __entry->len,
+ __entry->actual, __entry->result)
+);
+
+TRACE_EVENT(ice40_bus_suspend,
+
+ TP_PROTO(u8 status),
+
+ TP_ARGS(status),
+
+ TP_STRUCT__entry(
+ __field(u8, status)
+ ),
+
+ TP_fast_assign(
+ __entry->status = status;
+ ),
+
+ TP_printk("bus_suspend status %d", __entry->status)
+);
+
+TRACE_EVENT(ice40_bus_resume,
+
+ TP_PROTO(u8 status),
+
+ TP_ARGS(status),
+
+ TP_STRUCT__entry(
+ __field(u8, status)
+ ),
+
+ TP_fast_assign(
+ __entry->status = status;
+ ),
+
+ TP_printk("bus_resume status %d", __entry->status)
+);
+
+TRACE_EVENT(ice40_setup,
+
+ TP_PROTO(const char *token, int ret),
+
+ TP_ARGS(token, ret),
+
+ TP_STRUCT__entry(
+ __string(token, token)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __assign_str(token, token);
+ __entry->ret = ret;
+ ),
+
+ TP_printk("Trace: SETUP %s ret %d",
+ __get_str(token), __entry->ret)
+);
+
+TRACE_EVENT(ice40_in,
+
+ TP_PROTO(u16 ep, const char *token, u8 len, u8 expected, int ret),
+
+ TP_ARGS(ep, token, len, expected, ret),
+
+ TP_STRUCT__entry(
+ __field(u16, ep)
+ __string(token, token)
+ __field(u8, len)
+ __field(u8, expected)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->ep = ep;
+ __assign_str(token, token);
+ __entry->len = len;
+ __entry->expected = expected;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("Trace: %d IN %s len %d expected %d ret %d",
+ __entry->ep, __get_str(token),
+ __entry->len, __entry->expected,
+ __entry->ret)
+);
+
+TRACE_EVENT(ice40_out,
+
+ TP_PROTO(u16 ep, const char *token, u8 len, int ret),
+
+ TP_ARGS(ep, token, len, ret),
+
+ TP_STRUCT__entry(
+ __field(u16, ep)
+ __string(token, token)
+ __field(u8, len)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->ep = ep;
+ __assign_str(token, token);
+ __entry->len = len;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("Trace: %d OUT %s len %d ret %d",
+ __entry->ep, __get_str(token),
+ __entry->len, __entry->ret)
+);
+#endif /* if !defined(_TRACE_ICE40_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 31b6f25..e10e171 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -35,6 +35,7 @@
u64 elapsed_csecs64;
unsigned int elapsed_csecs;
bool wakeup = false;
+ int sleep_usecs = USEC_PER_MSEC;
do_gettimeofday(&start);
@@ -81,9 +82,12 @@
/*
* We need to retry, but first give the freezing tasks some
- * time to enter the regrigerator.
+ * time to enter the refrigerator. Start with an initial
+ * 1 ms sleep followed by exponential backoff until 8 ms.
*/
- msleep(10);
+ usleep_range(sleep_usecs / 2, sleep_usecs);
+ if (sleep_usecs < 8 * USEC_PER_MSEC)
+ sleep_usecs *= 2;
}
do_gettimeofday(&end);
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 0218f4b..fd699ca 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -177,7 +177,7 @@
struct gen_pool_chunk *chunk;
int nbits = size >> pool->min_alloc_order;
int nbytes = sizeof(struct gen_pool_chunk) +
- (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
+ BITS_TO_LONGS(nbits) * sizeof(long);
if (nbytes <= PAGE_SIZE)
chunk = kmalloc_node(nbytes, __GFP_ZERO, nid);
diff --git a/mm/compaction.c b/mm/compaction.c
index 673142d..35bb243 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -243,7 +243,6 @@
{
int nr_scanned = 0, total_isolated = 0;
struct page *cursor, *valid_page = NULL;
- unsigned long nr_strict_required = end_pfn - blockpfn;
unsigned long flags;
bool locked = false;
@@ -256,11 +255,12 @@
nr_scanned++;
if (!pfn_valid_within(blockpfn))
- continue;
+ goto isolate_fail;
+
if (!valid_page)
valid_page = page;
if (!PageBuddy(page))
- continue;
+ goto isolate_fail;
/*
* The zone lock must be held to isolate freepages.
@@ -281,12 +281,10 @@
/* Recheck this is a buddy page under lock */
if (!PageBuddy(page))
- continue;
+ goto isolate_fail;
/* Found a free page, break it into order-0 pages */
isolated = split_free_page(page);
- if (!isolated && strict)
- break;
total_isolated += isolated;
for (i = 0; i < isolated; i++) {
list_add(&page->lru, freelist);
@@ -297,7 +295,13 @@
if (isolated) {
blockpfn += isolated - 1;
cursor += isolated - 1;
+ continue;
}
+
+isolate_fail:
+ if (strict)
+ break;
+
}
trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
@@ -307,7 +311,7 @@
* pages requested were isolated. If there were any failures, 0 is
* returned and CMA will fail.
*/
- if (strict && nr_strict_required > total_isolated)
+ if (strict && blockpfn < end_pfn)
total_isolated = 0;
if (locked)
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 7f38d35..a8d7ed0 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -896,13 +896,13 @@
sin6->sin6_port = 0;
sin6->sin6_addr = ip6->saddr;
+ sin6->sin6_flowinfo = 0;
if (np->sndflow)
sin6->sin6_flowinfo =
*(__be32 *)ip6 & IPV6_FLOWINFO_MASK;
- if (__ipv6_addr_needs_scope_id(
- ipv6_addr_type(&sin6->sin6_addr)))
- sin6->sin6_scope_id = IP6CB(skb)->iif;
+ sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
+ IP6CB(skb)->iif);
if (inet6_sk(sk)->rxopt.all)
pingv6_ops.datagram_recv_ctl(sk, msg, skb);
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 7a7724d..6bd622f 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -36,6 +36,10 @@
static int ip_ttl_max = 255;
static int ip_ping_group_range_min[] = { 0, 0 };
static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
+static int tcp_delack_seg_min = TCP_DELACK_MIN;
+static int tcp_delack_seg_max = 60;
+static int tcp_use_userconfig_min;
+static int tcp_use_userconfig_max = 1;
/* Update system visible IP port range */
static void set_local_port_range(int range[2])
@@ -699,6 +703,25 @@
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero
},
+ {
+ .procname = "tcp_delack_seg",
+ .data = &sysctl_tcp_delack_seg,
+ .maxlen = sizeof(sysctl_tcp_delack_seg),
+ .mode = 0644,
+ .proc_handler = tcp_proc_delayed_ack_control,
+ .extra1 = &tcp_delack_seg_min,
+ .extra2 = &tcp_delack_seg_max,
+ },
+ {
+ .procname = "tcp_use_userconfig",
+ .data = &sysctl_tcp_use_userconfig,
+ .maxlen = sizeof(sysctl_tcp_use_userconfig),
+ .mode = 0644,
+ .proc_handler = tcp_use_userconfig_sysctl_handler,
+ .extra1 = &tcp_use_userconfig_min,
+ .extra2 = &tcp_use_userconfig_max,
+ },
+
{ }
};
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 74a286c..706899e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -294,6 +294,12 @@
EXPORT_SYMBOL(sysctl_tcp_rmem);
EXPORT_SYMBOL(sysctl_tcp_wmem);
+int sysctl_tcp_delack_seg __read_mostly = TCP_DELACK_SEG;
+EXPORT_SYMBOL(sysctl_tcp_delack_seg);
+
+int sysctl_tcp_use_userconfig __read_mostly;
+EXPORT_SYMBOL(sysctl_tcp_use_userconfig);
+
atomic_long_t tcp_memory_allocated; /* Current allocated memory. */
EXPORT_SYMBOL(tcp_memory_allocated);
@@ -1213,8 +1219,11 @@
/* Delayed ACKs frequently hit locked sockets during bulk
* receive. */
if (icsk->icsk_ack.blocked ||
- /* Once-per-two-segments ACK was not sent by tcp_input.c */
- tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
+ /* Once-per-sysctl_tcp_delack_seg segments
+ * ACK was not sent by tcp_input.c
+ */
+ tp->rcv_nxt - tp->rcv_wup > (icsk->icsk_ack.rcv_mss) *
+ sysctl_tcp_delack_seg ||
/*
* If this read emptied read buffer, we send ACK, if
* connection is not bidirectional, user drained
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 257b617..7c3612b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5047,7 +5047,8 @@
struct tcp_sock *tp = tcp_sk(sk);
/* More than one full frame received... */
- if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss &&
+ if (((tp->rcv_nxt - tp->rcv_wup) > (inet_csk(sk)->icsk_ack.rcv_mss) *
+ sysctl_tcp_delack_seg &&
/* ... and right edge of window advances far enough.
* (tcp_recvmsg() will send ACK otherwise). Or...
*/
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 34d4a02..d1b4792 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -34,7 +34,39 @@
static void tcp_write_timer(unsigned long);
static void tcp_delack_timer(unsigned long);
-static void tcp_keepalive_timer (unsigned long data);
+static void tcp_keepalive_timer(unsigned long data);
+
+/*Function to reset tcp_ack related sysctl on resetting master control */
+void set_tcp_default(void)
+{
+ sysctl_tcp_delack_seg = TCP_DELACK_SEG;
+}
+
+/*sysctl handler for tcp_ack realted master control */
+int tcp_proc_delayed_ack_control(ctl_table *table, int write,
+ void __user *buffer, size_t *length, loff_t *ppos)
+{
+ int ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
+
+ /* The ret value will be 0 if the input validation is successful
+ * and the values are written to sysctl table. If not, the stack
+ * will continue to work with currently configured values
+ */
+ return ret;
+}
+
+/*sysctl handler for tcp_ack realted master control */
+int tcp_use_userconfig_sysctl_handler(ctl_table *table, int write,
+ void __user *buffer, size_t *length, loff_t *ppos)
+{
+ int ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
+
+ if (write && ret == 0) {
+ if (!sysctl_tcp_use_userconfig)
+ set_tcp_default();
+ }
+ return ret;
+}
void tcp_init_xmit_timers(struct sock *sk)
{
diff --git a/net/wireless/db.txt b/net/wireless/db.txt
index c27f165..f6c74c9 100644
--- a/net/wireless/db.txt
+++ b/net/wireless/db.txt
@@ -18,7 +18,7 @@
(2402 - 2482 @ 40), (N/A, 20)
(5170 - 5250 @ 80), (3, 17)
(5250 - 5330 @ 80), (3, 24), DFS
- (5490 - 5710 @ 80), (3, 24), DFS
+ (5490 - 5730 @ 80), (3, 24), DFS
(5735 - 5835 @ 80), (3, 30)
country AL:
@@ -42,7 +42,7 @@
(2402 - 2482 @ 40), (N/A, 20)
(5170 - 5250 @ 80), (3, 17)
(5250 - 5330 @ 80), (3, 24), DFS
- (5490 - 5710 @ 80), (3, 24), DFS
+ (5490 - 5730 @ 80), (3, 24), DFS
(5735 - 5835 @ 80), (3, 30)
country AS:
@@ -64,7 +64,7 @@
(2402 - 2482 @ 40), (N/A, 20)
(5170 - 5250 @ 80), (3, 17)
(5250 - 5330 @ 80), (3, 24), DFS
- (5490 - 5710 @ 80), (3, 24), DFS
+ (5490 - 5730 @ 80), (3, 24), DFS
(5735 - 5835 @ 80), (3, 30)
country AW:
@@ -139,7 +139,7 @@
(2402 - 2482 @ 40), (N/A, 20)
(5170 - 5250 @ 80), (3, 17)
(5250 - 5330 @ 80), (3, 24), DFS
- (5490 - 5710 @ 80), (3, 24), DFS
+ (5490 - 5730 @ 80), (3, 24), DFS
(5735 - 5835 @ 80), (3, 30)
country BS:
@@ -163,7 +163,7 @@
(2402 - 2472 @ 40), (N/A, 27)
(5170 - 5250 @ 80), (3, 17)
(5250 - 5330 @ 80), (3, 24), DFS
- (5490 - 5710 @ 80), (3, 24), DFS
+ (5490 - 5730 @ 80), (3, 24), DFS
(5735 - 5835 @ 80), (3, 30)
country CH: DFS-ETSI
@@ -196,14 +196,14 @@
(2402 - 2472 @ 40), (N/A, 27)
(5170 - 5250 @ 80), (3, 17)
(5250 - 5330 @ 80), (3, 24), DFS
- (5490 - 5710 @ 80), (3, 24), DFS
+ (5490 - 5730 @ 80), (3, 24), DFS
(5735 - 5835 @ 80), (3, 30)
country CR:
(2402 - 2482 @ 40), (N/A, 20)
(5170 - 5250 @ 20), (3, 17)
(5250 - 5330 @ 20), (3, 24), DFS
- (5490 - 5710 @ 20), (3, 24), DFS
+ (5490 - 5730 @ 20), (3, 24), DFS
(5735 - 5835 @ 20), (3, 30)
country CY: DFS-ETSI
@@ -271,7 +271,7 @@
(2402 - 2482 @ 40), (N/A, 20)
(5170 - 5250 @ 20), (3, 17)
(5250 - 5330 @ 20), (3, 24), DFS
- (5490 - 5710 @ 20), (3, 24), DFS
+ (5490 - 5730 @ 20), (3, 24), DFS
(5735 - 5835 @ 20), (3, 30)
country EE: DFS-ETSI
@@ -342,7 +342,7 @@
(2402 - 2472 @ 40), (3, 30)
(5170 - 5250 @ 80), (6, 17)
(5250 - 5330 @ 80), (6, 24), DFS
- (5490 - 5710 @ 80), (6, 24), DFS
+ (5490 - 5730 @ 80), (6, 24), DFS
(5735 - 5835 @ 80), (6, 30)
country GP:
@@ -375,21 +375,21 @@
(2402 - 2472 @ 40), (3, 30)
(5170 - 5250 @ 20), (6, 17)
(5250 - 5330 @ 20), (6, 24), DFS
- (5490 - 5710 @ 20), (6, 24), DFS
+ (5490 - 5730 @ 20), (6, 24), DFS
(5735 - 5835 @ 20), (6, 30)
country HN:
(2402 - 2482 @ 40), (N/A, 20)
(5170 - 5250 @ 80), (6, 17)
(5250 - 5330 @ 80), (6, 24), DFS
- (5490 - 5710 @ 80), (6, 24), DFS
+ (5490 - 5730 @ 80), (6, 24), DFS
(5735 - 5835 @ 80), (6, 30)
country HK:
(2402 - 2482 @ 40), (N/A, 20)
(5170 - 5250 @ 80), (6, 17)
(5250 - 5330 @ 80), (6, 24), DFS
- (5490 - 5710 @ 80), (6, 24), DFS
+ (5490 - 5730 @ 80), (6, 24), DFS
(5735 - 5835 @ 80), (6, 30)
country HR: DFS-ETSI
@@ -462,7 +462,7 @@
(2402 - 2482 @ 40), (N/A, 20)
(5170 - 5250 @ 80), (6, 17)
(5250 - 5330 @ 80), (6, 24), DFS
- (5490 - 5710 @ 80), (6, 24), DFS
+ (5490 - 5730 @ 80), (6, 24), DFS
(5735 - 5835 @ 80), (6, 30)
country JP:
@@ -495,7 +495,7 @@
(2402 - 2472 @ 40), (N/A, 27)
(5170 - 5250 @ 80), (3, 17)
(5250 - 5330 @ 80), (3, 24), DFS
- (5490 - 5710 @ 80), (3, 24), DFS
+ (5490 - 5730 @ 80), (3, 24), DFS
(5735 - 5835 @ 80), (3, 30)
country KP:
@@ -537,7 +537,7 @@
(2402 - 2482 @ 40), (N/A, 20)
(5170 - 5250 @ 20), (3, 17)
(5250 - 5330 @ 20), (3, 20), DFS
- (5490 - 5710 @ 20), (3, 20), DFS
+ (5490 - 5730 @ 20), (3, 20), DFS
(5735 - 5835 @ 20), (3, 30)
country LT: DFS-ETSI
@@ -634,7 +634,7 @@
(2402 - 2472 @ 40), (3, 27)
(5170 - 5250 @ 80), (3, 17)
(5250 - 5330 @ 80), (3, 24), DFS
- (5490 - 5710 @ 80), (3, 24), DFS
+ (5490 - 5730 @ 80), (3, 24), DFS
(5735 - 5835 @ 80), (3, 30)
country MW:
@@ -681,7 +681,7 @@
(2402 - 2482 @ 40), (N/A, 30)
(5170 - 5250 @ 80), (6, 17)
(5250 - 5330 @ 80), (6, 24), DFS
- (5490 - 5710 @ 80), (6, 24), DFS
+ (5490 - 5730 @ 80), (6, 24), DFS
(5735 - 5835 @ 80), (6, 30)
country OM:
@@ -700,7 +700,7 @@
(2402 - 2482 @ 40), (N/A, 20)
(5170 - 5250 @ 80), (6, 20)
(5250 - 5330 @ 80), (6, 20), DFS
- (5490 - 5710 @ 80), (6, 27), DFS
+ (5490 - 5730 @ 80), (6, 27), DFS
(5735 - 5835 @ 80), (6, 30)
country PF:
@@ -720,7 +720,7 @@
(2402 - 2482 @ 40), (N/A, 20)
(5170 - 5250 @ 80), (6, 17)
(5250 - 5330 @ 80), (6, 24), DFS
- (5490 - 5710 @ 80), (6, 24), DFS
+ (5490 - 5730 @ 80), (6, 24), DFS
(5735 - 5835 @ 80), (6, 30)
country PK:
@@ -747,7 +747,7 @@
(2402 - 2472 @ 40), (3, 30)
(5170 - 5250 @ 80), (6, 17)
(5250 - 5330 @ 80), (6, 24), DFS
- (5490 - 5710 @ 80), (6, 24), DFS
+ (5490 - 5730 @ 80), (6, 24), DFS
(5735 - 5835 @ 80), (6, 30)
country PY:
@@ -819,7 +819,7 @@
(2402 - 2482 @ 40), (N/A, 20)
(5170 - 5250 @ 80), (6, 17)
(5250 - 5330 @ 80), (6, 24), DFS
- (5490 - 5710 @ 80), (6, 24), DFS
+ (5490 - 5730 @ 80), (6, 24), DFS
(5735 - 5835 @ 80), (6, 30)
country SI: DFS-ETSI
@@ -856,21 +856,21 @@
country TW:
(2402 - 2472 @ 40), (3, 27)
(5270 - 5330 @ 40), (6, 17), DFS
- (5490 - 5710 @ 80), (6, 30), DFS
+ (5490 - 5730 @ 80), (6, 30), DFS
(5735 - 5815 @ 80), (6, 30)
country TH:
(2402 - 2482 @ 40), (N/A, 20)
(5170 - 5250 @ 80), (3, 17)
(5250 - 5330 @ 80), (3, 24), DFS
- (5490 - 5710 @ 80), (3, 24), DFS
+ (5490 - 5730 @ 80), (3, 24), DFS
(5735 - 5835 @ 80), (3, 30)
country TT:
(2402 - 2482 @ 40), (N/A, 20)
(5170 - 5250 @ 40), (3, 17)
(5250 - 5330 @ 40), (3, 20), DFS
- (5490 - 5710 @ 40), (3, 20), DFS
+ (5490 - 5730 @ 40), (3, 20), DFS
(5735 - 5835 @ 40), (3, 30)
country TN:
@@ -914,8 +914,8 @@
(5170 - 5250 @ 80), (3, 17)
(5250 - 5330 @ 80), (3, 24), DFS
(5490 - 5600 @ 80), (3, 24), DFS
- (5650 - 5710 @ 40), (3, 24), DFS
- (5710 - 5835 @ 80), (3, 30)
+ (5650 - 5730 @ 40), (3, 24), DFS
+ (5735 - 5835 @ 80), (3, 30)
# 60g band
# reference: http://cfr.regstoday.com/47cfr15.aspx#47_CFR_15p255
# channels 1,2,3, EIRP=40dBm(43dBm peak)
@@ -935,14 +935,14 @@
(2402 - 2482 @ 40), (N/A, 20)
(5170 - 5250 @ 40), (3, 17)
(5250 - 5330 @ 40), (3, 20), DFS
- (5490 - 5710 @ 40), (3, 20), DFS
+ (5490 - 5730 @ 40), (3, 20), DFS
(5735 - 5835 @ 40), (3, 30)
country UZ:
(2402 - 2472 @ 40), (3, 27)
(5170 - 5250 @ 40), (3, 17)
(5250 - 5330 @ 40), (3, 20), DFS
- (5490 - 5710 @ 40), (3, 20), DFS
+ (5490 - 5730 @ 40), (3, 20), DFS
(5735 - 5835 @ 40), (3, 30)
country VE:
@@ -956,7 +956,7 @@
(2402 - 2482 @ 40), (N/A, 20)
(5170 - 5250 @ 80), (3, 17)
(5250 - 5330 @ 80), (3, 24), DFS
- (5490 - 5710 @ 80), (3, 24), DFS
+ (5490 - 5730 @ 80), (3, 24), DFS
(5735 - 5835 @ 80), (3, 30)
country VI:
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index e90ef68..b73cfe5 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -5431,6 +5431,8 @@
return err;
}
+#endif
+
struct sk_buff *__cfg80211_alloc_event_skb(struct wiphy *wiphy,
enum nl80211_commands cmd,
enum nl80211_attrs attr,
@@ -5478,7 +5480,7 @@
nl80211_testmode_mcgrp.id, gfp);
}
EXPORT_SYMBOL(__cfg80211_send_event_skb);
-#endif
+
static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
{
diff --git a/sound/soc/codecs/msm8x10-wcd.c b/sound/soc/codecs/msm8x10-wcd.c
index 452bbab..c73b2c8 100644
--- a/sound/soc/codecs/msm8x10-wcd.c
+++ b/sound/soc/codecs/msm8x10-wcd.c
@@ -1200,6 +1200,13 @@
MSM8X10_WCD_A_CDC_TX2_VOL_CTL_GAIN,
-84, 40, digital_gain),
+ SOC_SINGLE_TLV("ADC1 Volume", MSM8X10_WCD_A_TX_1_EN, 2,
+ 19, 0, analog_gain),
+ SOC_SINGLE_TLV("ADC2 Volume", MSM8X10_WCD_A_TX_2_EN, 2,
+ 19, 0, analog_gain),
+ SOC_SINGLE_TLV("ADC3 Volume", MSM8X10_WCD_A_TX_3_EN, 2,
+ 19, 0, analog_gain),
+
SOC_SINGLE_S8_TLV("IIR1 INP1 Volume",
MSM8X10_WCD_A_CDC_IIR1_GAIN_B1_CTL,
-84, 40, digital_gain),
diff --git a/sound/soc/codecs/wcd9306.c b/sound/soc/codecs/wcd9306.c
index dd50020..95f2041 100644
--- a/sound/soc/codecs/wcd9306.c
+++ b/sound/soc/codecs/wcd9306.c
@@ -43,6 +43,8 @@
#define TAPAN_HPH_PA_SETTLE_COMP_OFF 13000
#define DAPM_MICBIAS2_EXTERNAL_STANDALONE "MIC BIAS2 External Standalone"
+#define TAPAN_VALIDATE_RX_SBPORT_RANGE(port) ((port >= 16) && (port <= 20))
+#define TAPAN_CONVERT_RX_SBPORT_ID(port) (port - 16) /* RX1 port ID = 0 */
#define TAPAN_VDD_CX_OPTIMAL_UA 10000
#define TAPAN_VDD_CX_SLEEP_UA 2000
@@ -1110,14 +1112,14 @@
SOC_ENUM_EXT("EAR PA Gain", tapan_ear_pa_gain_enum[0],
tapan_pa_gain_get, tapan_pa_gain_put),
- SOC_SINGLE_TLV("HPHL Volume", TAPAN_A_RX_HPH_L_GAIN, 0, 14, 1,
+ SOC_SINGLE_TLV("HPHL Volume", TAPAN_A_RX_HPH_L_GAIN, 0, 20, 1,
line_gain),
- SOC_SINGLE_TLV("HPHR Volume", TAPAN_A_RX_HPH_R_GAIN, 0, 14, 1,
+ SOC_SINGLE_TLV("HPHR Volume", TAPAN_A_RX_HPH_R_GAIN, 0, 20, 1,
line_gain),
- SOC_SINGLE_TLV("LINEOUT1 Volume", TAPAN_A_RX_LINE_1_GAIN, 0, 14, 1,
+ SOC_SINGLE_TLV("LINEOUT1 Volume", TAPAN_A_RX_LINE_1_GAIN, 0, 20, 1,
line_gain),
- SOC_SINGLE_TLV("LINEOUT2 Volume", TAPAN_A_RX_LINE_2_GAIN, 0, 14, 1,
+ SOC_SINGLE_TLV("LINEOUT2 Volume", TAPAN_A_RX_LINE_2_GAIN, 0, 20, 1,
line_gain),
SOC_SINGLE_TLV("SPK DRV Volume", TAPAN_A_SPKR_DRV_GAIN, 3, 8, 1,
@@ -3236,6 +3238,8 @@
}
#define TAPAN_FORMATS (SNDRV_PCM_FMTBIT_S16_LE)
+#define TAPAN_FORMATS_S16_S24_LE (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FORMAT_S24_LE)
static int tapan_write(struct snd_soc_codec *codec, unsigned int reg,
unsigned int value)
{
@@ -3643,6 +3647,68 @@
return 0;
}
+static void tapan_set_rxsb_port_format(struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct tapan_priv *tapan_p = snd_soc_codec_get_drvdata(codec);
+ struct wcd9xxx_codec_dai_data *cdc_dai;
+ struct wcd9xxx_ch *ch;
+ int port;
+ u8 bit_sel;
+ u16 sb_ctl_reg, field_shift;
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ bit_sel = 0x2;
+ tapan_p->dai[dai->id].bit_width = 16;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ bit_sel = 0x0;
+ tapan_p->dai[dai->id].bit_width = 24;
+ break;
+ default:
+ dev_err(codec->dev, "Invalid format %x\n",
+ params_format(params));
+ return;
+ }
+
+ cdc_dai = &tapan_p->dai[dai->id];
+
+ list_for_each_entry(ch, &cdc_dai->wcd9xxx_ch_list, list) {
+ port = wcd9xxx_get_slave_port(ch->ch_num);
+
+ if (IS_ERR_VALUE(port) ||
+ !TAPAN_VALIDATE_RX_SBPORT_RANGE(port)) {
+ dev_warn(codec->dev,
+ "%s: invalid port ID %d returned for RX DAI\n",
+ __func__, port);
+ return;
+ }
+
+ port = TAPAN_CONVERT_RX_SBPORT_ID(port);
+
+ if (port <= 3) {
+ sb_ctl_reg = TAPAN_A_CDC_CONN_RX_SB_B1_CTL;
+ field_shift = port << 1;
+ } else if (port <= 4) {
+ sb_ctl_reg = TAPAN_A_CDC_CONN_RX_SB_B2_CTL;
+ field_shift = (port - 4) << 1;
+ } else { /* should not happen */
+ dev_warn(codec->dev,
+ "%s: bad port ID %d\n", __func__, port);
+ return;
+ }
+
+ dev_dbg(codec->dev, "%s: sb_ctl_reg %x field_shift %x\n"
+ "bit_sel %x\n", __func__, sb_ctl_reg, field_shift,
+ bit_sel);
+ snd_soc_update_bits(codec, sb_ctl_reg, 0x3 << field_shift,
+ bit_sel << field_shift);
+ }
+}
+
+
static int tapan_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
@@ -3755,29 +3821,7 @@
snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_I2S_CTL,
0x03, (rx_fs_rate >> 0x05));
} else {
- switch (params_format(params)) {
- case SNDRV_PCM_FORMAT_S16_LE:
- snd_soc_update_bits(codec,
- TAPAN_A_CDC_CONN_RX_SB_B1_CTL,
- 0xFF, 0xAA);
- snd_soc_update_bits(codec,
- TAPAN_A_CDC_CONN_RX_SB_B2_CTL,
- 0xFF, 0x2A);
- tapan->dai[dai->id].bit_width = 16;
- break;
- case SNDRV_PCM_FORMAT_S24_LE:
- snd_soc_update_bits(codec,
- TAPAN_A_CDC_CONN_RX_SB_B1_CTL,
- 0xFF, 0x00);
- snd_soc_update_bits(codec,
- TAPAN_A_CDC_CONN_RX_SB_B2_CTL,
- 0xFF, 0x00);
- tapan->dai[dai->id].bit_width = 24;
- break;
- default:
- dev_err(codec->dev, "Invalid format\n");
- break;
- }
+ tapan_set_rxsb_port_format(params, dai);
tapan->dai[dai->id].rate = params_rate(params);
}
break;
@@ -3894,7 +3938,7 @@
.playback = {
.stream_name = "AIF1 Playback",
.rates = WCD9306_RATES,
- .formats = TAPAN_FORMATS,
+ .formats = TAPAN_FORMATS_S16_S24_LE,
.rate_max = 192000,
.rate_min = 8000,
.channels_min = 1,
@@ -3922,7 +3966,7 @@
.playback = {
.stream_name = "AIF2 Playback",
.rates = WCD9306_RATES,
- .formats = TAPAN_FORMATS,
+ .formats = TAPAN_FORMATS_S16_S24_LE,
.rate_min = 8000,
.rate_max = 192000,
.channels_min = 1,
@@ -3964,7 +4008,7 @@
.playback = {
.stream_name = "AIF3 Playback",
.rates = WCD9306_RATES,
- .formats = TAPAN_FORMATS,
+ .formats = TAPAN_FORMATS_S16_S24_LE,
.rate_min = 8000,
.rate_max = 192000,
.channels_min = 1,
diff --git a/sound/soc/codecs/wcd9320.c b/sound/soc/codecs/wcd9320.c
index 5dedec8..b72590f 100644
--- a/sound/soc/codecs/wcd9320.c
+++ b/sound/soc/codecs/wcd9320.c
@@ -1652,6 +1652,21 @@
"DEC9", "DEC10", "RX1", "RX2", "RX3", "RX4", "RX5", "RX6", "RX7"
};
+static const char * const iir_inp2_text[] = {
+ "ZERO", "DEC1", "DEC2", "DEC3", "DEC4", "DEC5", "DEC6", "DEC7", "DEC8",
+ "DEC9", "DEC10", "RX1", "RX2", "RX3", "RX4", "RX5", "RX6", "RX7"
+};
+
+static const char * const iir_inp3_text[] = {
+ "ZERO", "DEC1", "DEC2", "DEC3", "DEC4", "DEC5", "DEC6", "DEC7", "DEC8",
+ "DEC9", "DEC10", "RX1", "RX2", "RX3", "RX4", "RX5", "RX6", "RX7"
+};
+
+static const char * const iir_inp4_text[] = {
+ "ZERO", "DEC1", "DEC2", "DEC3", "DEC4", "DEC5", "DEC6", "DEC7", "DEC8",
+ "DEC9", "DEC10", "RX1", "RX2", "RX3", "RX4", "RX5", "RX6", "RX7"
+};
+
static const struct soc_enum rx_mix1_inp1_chain_enum =
SOC_ENUM_SINGLE(TAIKO_A_CDC_CONN_RX1_B1_CTL, 0, 12, rx_mix1_text);
@@ -1800,6 +1815,24 @@
static const struct soc_enum iir2_inp1_mux_enum =
SOC_ENUM_SINGLE(TAIKO_A_CDC_CONN_EQ2_B1_CTL, 0, 18, iir_inp1_text);
+static const struct soc_enum iir1_inp2_mux_enum =
+ SOC_ENUM_SINGLE(TAIKO_A_CDC_CONN_EQ1_B2_CTL, 0, 18, iir_inp2_text);
+
+static const struct soc_enum iir2_inp2_mux_enum =
+ SOC_ENUM_SINGLE(TAIKO_A_CDC_CONN_EQ2_B2_CTL, 0, 18, iir_inp2_text);
+
+static const struct soc_enum iir1_inp3_mux_enum =
+ SOC_ENUM_SINGLE(TAIKO_A_CDC_CONN_EQ1_B3_CTL, 0, 18, iir_inp3_text);
+
+static const struct soc_enum iir2_inp3_mux_enum =
+ SOC_ENUM_SINGLE(TAIKO_A_CDC_CONN_EQ2_B3_CTL, 0, 18, iir_inp3_text);
+
+static const struct soc_enum iir1_inp4_mux_enum =
+ SOC_ENUM_SINGLE(TAIKO_A_CDC_CONN_EQ1_B4_CTL, 0, 18, iir_inp4_text);
+
+static const struct soc_enum iir2_inp4_mux_enum =
+ SOC_ENUM_SINGLE(TAIKO_A_CDC_CONN_EQ2_B4_CTL, 0, 18, iir_inp4_text);
+
static const struct snd_kcontrol_new rx_mix1_inp1_mux =
SOC_DAPM_ENUM("RX1 MIX1 INP1 Mux", rx_mix1_inp1_chain_enum);
@@ -2025,6 +2058,24 @@
static const struct snd_kcontrol_new iir2_inp1_mux =
SOC_DAPM_ENUM("IIR2 INP1 Mux", iir2_inp1_mux_enum);
+static const struct snd_kcontrol_new iir1_inp2_mux =
+ SOC_DAPM_ENUM("IIR1 INP2 Mux", iir1_inp2_mux_enum);
+
+static const struct snd_kcontrol_new iir2_inp2_mux =
+ SOC_DAPM_ENUM("IIR2 INP2 Mux", iir2_inp2_mux_enum);
+
+static const struct snd_kcontrol_new iir1_inp3_mux =
+ SOC_DAPM_ENUM("IIR1 INP3 Mux", iir1_inp3_mux_enum);
+
+static const struct snd_kcontrol_new iir2_inp3_mux =
+ SOC_DAPM_ENUM("IIR2 INP3 Mux", iir2_inp3_mux_enum);
+
+static const struct snd_kcontrol_new iir1_inp4_mux =
+ SOC_DAPM_ENUM("IIR1 INP4 Mux", iir1_inp4_mux_enum);
+
+static const struct snd_kcontrol_new iir2_inp4_mux =
+ SOC_DAPM_ENUM("IIR2 INP4 Mux", iir2_inp4_mux_enum);
+
static const struct snd_kcontrol_new anc1_mux =
SOC_DAPM_ENUM("ANC1 MUX Mux", anc1_mux_enum);
@@ -4012,6 +4063,120 @@
{"IIR2 INP1 MUX", "RX6", "SLIM RX6"},
{"IIR2 INP1 MUX", "RX7", "SLIM RX7"},
+ {"IIR1", NULL, "IIR1 INP2 MUX"},
+ {"IIR1 INP2 MUX", "DEC1", "DEC1 MUX"},
+ {"IIR1 INP2 MUX", "DEC2", "DEC2 MUX"},
+ {"IIR1 INP2 MUX", "DEC3", "DEC3 MUX"},
+ {"IIR1 INP2 MUX", "DEC4", "DEC4 MUX"},
+ {"IIR1 INP2 MUX", "DEC5", "DEC5 MUX"},
+ {"IIR1 INP2 MUX", "DEC6", "DEC6 MUX"},
+ {"IIR1 INP2 MUX", "DEC7", "DEC7 MUX"},
+ {"IIR1 INP2 MUX", "DEC8", "DEC8 MUX"},
+ {"IIR1 INP2 MUX", "DEC9", "DEC9 MUX"},
+ {"IIR1 INP2 MUX", "DEC10", "DEC10 MUX"},
+ {"IIR1 INP2 MUX", "RX1", "SLIM RX1"},
+ {"IIR1 INP2 MUX", "RX2", "SLIM RX2"},
+ {"IIR1 INP2 MUX", "RX3", "SLIM RX3"},
+ {"IIR1 INP2 MUX", "RX4", "SLIM RX4"},
+ {"IIR1 INP2 MUX", "RX5", "SLIM RX5"},
+ {"IIR1 INP2 MUX", "RX6", "SLIM RX6"},
+ {"IIR1 INP2 MUX", "RX7", "SLIM RX7"},
+
+ {"IIR2", NULL, "IIR2 INP2 MUX"},
+ {"IIR2 INP2 MUX", "DEC1", "DEC1 MUX"},
+ {"IIR2 INP2 MUX", "DEC2", "DEC2 MUX"},
+ {"IIR2 INP2 MUX", "DEC3", "DEC3 MUX"},
+ {"IIR2 INP2 MUX", "DEC4", "DEC4 MUX"},
+ {"IIR2 INP2 MUX", "DEC5", "DEC5 MUX"},
+ {"IIR2 INP2 MUX", "DEC6", "DEC6 MUX"},
+ {"IIR2 INP2 MUX", "DEC7", "DEC7 MUX"},
+ {"IIR2 INP2 MUX", "DEC8", "DEC8 MUX"},
+ {"IIR2 INP2 MUX", "DEC9", "DEC9 MUX"},
+ {"IIR2 INP2 MUX", "DEC10", "DEC10 MUX"},
+ {"IIR2 INP2 MUX", "RX1", "SLIM RX1"},
+ {"IIR2 INP2 MUX", "RX2", "SLIM RX2"},
+ {"IIR2 INP2 MUX", "RX3", "SLIM RX3"},
+ {"IIR2 INP2 MUX", "RX4", "SLIM RX4"},
+ {"IIR2 INP2 MUX", "RX5", "SLIM RX5"},
+ {"IIR2 INP2 MUX", "RX6", "SLIM RX6"},
+ {"IIR2 INP2 MUX", "RX7", "SLIM RX7"},
+
+ {"IIR1", NULL, "IIR1 INP3 MUX"},
+ {"IIR1 INP3 MUX", "DEC1", "DEC1 MUX"},
+ {"IIR1 INP3 MUX", "DEC2", "DEC2 MUX"},
+ {"IIR1 INP3 MUX", "DEC3", "DEC3 MUX"},
+ {"IIR1 INP3 MUX", "DEC4", "DEC4 MUX"},
+ {"IIR1 INP3 MUX", "DEC5", "DEC5 MUX"},
+ {"IIR1 INP3 MUX", "DEC6", "DEC6 MUX"},
+ {"IIR1 INP3 MUX", "DEC7", "DEC7 MUX"},
+ {"IIR1 INP3 MUX", "DEC8", "DEC8 MUX"},
+ {"IIR1 INP3 MUX", "DEC9", "DEC9 MUX"},
+ {"IIR1 INP3 MUX", "DEC10", "DEC10 MUX"},
+ {"IIR1 INP3 MUX", "RX1", "SLIM RX1"},
+ {"IIR1 INP3 MUX", "RX2", "SLIM RX2"},
+ {"IIR1 INP3 MUX", "RX3", "SLIM RX3"},
+ {"IIR1 INP3 MUX", "RX4", "SLIM RX4"},
+ {"IIR1 INP3 MUX", "RX5", "SLIM RX5"},
+ {"IIR1 INP3 MUX", "RX6", "SLIM RX6"},
+ {"IIR1 INP3 MUX", "RX7", "SLIM RX7"},
+
+ {"IIR2", NULL, "IIR2 INP3 MUX"},
+ {"IIR2 INP3 MUX", "DEC1", "DEC1 MUX"},
+ {"IIR2 INP3 MUX", "DEC2", "DEC2 MUX"},
+ {"IIR2 INP3 MUX", "DEC3", "DEC3 MUX"},
+ {"IIR2 INP3 MUX", "DEC4", "DEC4 MUX"},
+ {"IIR2 INP3 MUX", "DEC5", "DEC5 MUX"},
+ {"IIR2 INP3 MUX", "DEC6", "DEC6 MUX"},
+ {"IIR2 INP3 MUX", "DEC7", "DEC7 MUX"},
+ {"IIR2 INP3 MUX", "DEC8", "DEC8 MUX"},
+ {"IIR2 INP3 MUX", "DEC9", "DEC9 MUX"},
+ {"IIR2 INP3 MUX", "DEC10", "DEC10 MUX"},
+ {"IIR2 INP3 MUX", "RX1", "SLIM RX1"},
+ {"IIR2 INP3 MUX", "RX2", "SLIM RX2"},
+ {"IIR2 INP3 MUX", "RX3", "SLIM RX3"},
+ {"IIR2 INP3 MUX", "RX4", "SLIM RX4"},
+ {"IIR2 INP3 MUX", "RX5", "SLIM RX5"},
+ {"IIR2 INP3 MUX", "RX6", "SLIM RX6"},
+ {"IIR2 INP3 MUX", "RX7", "SLIM RX7"},
+
+ {"IIR1", NULL, "IIR1 INP4 MUX"},
+ {"IIR1 INP4 MUX", "DEC1", "DEC1 MUX"},
+ {"IIR1 INP4 MUX", "DEC2", "DEC2 MUX"},
+ {"IIR1 INP4 MUX", "DEC3", "DEC3 MUX"},
+ {"IIR1 INP4 MUX", "DEC4", "DEC4 MUX"},
+ {"IIR1 INP4 MUX", "DEC5", "DEC5 MUX"},
+ {"IIR1 INP4 MUX", "DEC6", "DEC6 MUX"},
+ {"IIR1 INP4 MUX", "DEC7", "DEC7 MUX"},
+ {"IIR1 INP4 MUX", "DEC8", "DEC8 MUX"},
+ {"IIR1 INP4 MUX", "DEC9", "DEC9 MUX"},
+ {"IIR1 INP4 MUX", "DEC10", "DEC10 MUX"},
+ {"IIR1 INP4 MUX", "RX1", "SLIM RX1"},
+ {"IIR1 INP4 MUX", "RX2", "SLIM RX2"},
+ {"IIR1 INP4 MUX", "RX3", "SLIM RX3"},
+ {"IIR1 INP4 MUX", "RX4", "SLIM RX4"},
+ {"IIR1 INP4 MUX", "RX5", "SLIM RX5"},
+ {"IIR1 INP4 MUX", "RX6", "SLIM RX6"},
+ {"IIR1 INP4 MUX", "RX7", "SLIM RX7"},
+
+ {"IIR2", NULL, "IIR2 INP4 MUX"},
+ {"IIR2 INP4 MUX", "DEC1", "DEC1 MUX"},
+ {"IIR2 INP4 MUX", "DEC2", "DEC2 MUX"},
+ {"IIR2 INP4 MUX", "DEC3", "DEC3 MUX"},
+ {"IIR2 INP4 MUX", "DEC4", "DEC4 MUX"},
+ {"IIR2 INP4 MUX", "DEC5", "DEC5 MUX"},
+ {"IIR2 INP4 MUX", "DEC6", "DEC6 MUX"},
+ {"IIR2 INP4 MUX", "DEC7", "DEC7 MUX"},
+ {"IIR2 INP4 MUX", "DEC8", "DEC8 MUX"},
+ {"IIR2 INP4 MUX", "DEC9", "DEC9 MUX"},
+ {"IIR2 INP4 MUX", "DEC10", "DEC10 MUX"},
+ {"IIR2 INP4 MUX", "RX1", "SLIM RX1"},
+ {"IIR2 INP4 MUX", "RX2", "SLIM RX2"},
+ {"IIR2 INP4 MUX", "RX3", "SLIM RX3"},
+ {"IIR2 INP4 MUX", "RX4", "SLIM RX4"},
+ {"IIR2 INP4 MUX", "RX5", "SLIM RX5"},
+ {"IIR2 INP4 MUX", "RX6", "SLIM RX6"},
+ {"IIR2 INP4 MUX", "RX7", "SLIM RX7"},
+
{"MIC BIAS1 Internal1", NULL, "LDO_H"},
{"MIC BIAS1 Internal2", NULL, "LDO_H"},
{"MIC BIAS1 External", NULL, "LDO_H"},
@@ -5667,12 +5832,36 @@
&iir1_inp1_mux, taiko_codec_iir_mux_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MUX_E("IIR1 INP2 MUX", TAIKO_A_CDC_IIR1_GAIN_B2_CTL, 0, 0,
+ &iir1_inp2_mux, taiko_codec_iir_mux_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MUX_E("IIR1 INP3 MUX", TAIKO_A_CDC_IIR1_GAIN_B3_CTL, 0, 0,
+ &iir1_inp3_mux, taiko_codec_iir_mux_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MUX_E("IIR1 INP4 MUX", TAIKO_A_CDC_IIR1_GAIN_B4_CTL, 0, 0,
+ &iir1_inp4_mux, taiko_codec_iir_mux_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
SND_SOC_DAPM_MIXER("IIR1", TAIKO_A_CDC_CLK_SD_CTL, 0, 0, NULL, 0),
SND_SOC_DAPM_MUX_E("IIR2 INP1 MUX", TAIKO_A_CDC_IIR2_GAIN_B1_CTL, 0, 0,
&iir2_inp1_mux, taiko_codec_iir_mux_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MUX_E("IIR2 INP2 MUX", TAIKO_A_CDC_IIR2_GAIN_B2_CTL, 0, 0,
+ &iir2_inp2_mux, taiko_codec_iir_mux_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MUX_E("IIR2 INP3 MUX", TAIKO_A_CDC_IIR2_GAIN_B3_CTL, 0, 0,
+ &iir2_inp3_mux, taiko_codec_iir_mux_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MUX_E("IIR2 INP4 MUX", TAIKO_A_CDC_IIR2_GAIN_B4_CTL, 0, 0,
+ &iir2_inp4_mux, taiko_codec_iir_mux_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
SND_SOC_DAPM_MIXER("IIR2", TAIKO_A_CDC_CLK_SD_CTL, 1, 0, NULL, 0),
/* AUX PGA */
diff --git a/sound/soc/msm/msm-dai-fe.c b/sound/soc/msm/msm-dai-fe.c
index 2b43ab6..045a6d0 100644
--- a/sound/soc/msm/msm-dai-fe.c
+++ b/sound/soc/msm/msm-dai-fe.c
@@ -899,6 +899,30 @@
.ops = &msm_fe_dai_ops,
.name = "LSM8",
},
+ {
+ .playback = {
+ .stream_name = "VoWLAN Playback",
+ .aif_name = "VoWLAN_DL",
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 1,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .capture = {
+ .stream_name = "VoWLAN Capture",
+ .aif_name = "VoWLAN_UL",
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 1,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .ops = &msm_fe_dai_ops,
+ .name = "VoWLAN",
+ },
};
static __devinit int msm_fe_dai_dev_probe(struct platform_device *pdev)
diff --git a/sound/soc/msm/msm8226.c b/sound/soc/msm/msm8226.c
index b512bb5..cc27fc0 100644
--- a/sound/soc/msm/msm8226.c
+++ b/sound/soc/msm/msm8226.c
@@ -32,6 +32,10 @@
#include "../codecs/wcd9xxx-common.h"
#include "../codecs/wcd9306.h"
+#define SAMPLING_RATE_48KHZ 48000
+#define SAMPLING_RATE_96KHZ 96000
+#define SAMPLING_RATE_192KHZ 192000
+
#define DRV_NAME "msm8226-asoc-tapan"
#define MSM_SLIM_0_RX_MAX_CHANNELS 2
@@ -154,6 +158,8 @@
static int ext_spk_amp_gpio = -1;
static int vdd_spkr_gpio = -1;
static int msm_proxy_rx_ch = 2;
+
+static int slim0_rx_sample_rate = SAMPLING_RATE_48KHZ;
static int slim0_rx_bit_format = SNDRV_PCM_FORMAT_S16_LE;
static inline int param_is_mask(int p)
@@ -388,6 +394,9 @@
static const char *const slim0_tx_ch_text[] = {"One", "Two", "Three", "Four"};
static const char *const proxy_rx_ch_text[] = {"One", "Two", "Three", "Four",
"Five", "Six", "Seven", "Eight"};
+static char const *rx_bit_format_text[] = {"S16_LE", "S24_LE"};
+static char const *slim0_rx_sample_rate_text[] = {"KHZ_48", "KHZ_96",
+ "KHZ_192"};
static const struct soc_enum msm_enum[] = {
SOC_ENUM_SINGLE_EXT(2, slim0_rx_ch_text),
@@ -399,6 +408,58 @@
SOC_ENUM_SINGLE_EXT(2, btsco_rate_text),
};
+static int slim0_rx_sample_rate_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int sample_rate_val = 0;
+
+ switch (slim0_rx_sample_rate) {
+ case SAMPLING_RATE_192KHZ:
+ sample_rate_val = 2;
+ break;
+
+ case SAMPLING_RATE_96KHZ:
+ sample_rate_val = 1;
+ break;
+
+ case SAMPLING_RATE_48KHZ:
+ default:
+ sample_rate_val = 0;
+ break;
+ }
+
+ ucontrol->value.integer.value[0] = sample_rate_val;
+ pr_debug("%s: slim0_rx_sample_rate = %d\n", __func__,
+ slim0_rx_sample_rate);
+
+ return 0;
+}
+
+static int slim0_rx_sample_rate_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: ucontrol value = %ld\n", __func__,
+ ucontrol->value.integer.value[0]);
+
+ switch (ucontrol->value.integer.value[0]) {
+ case 2:
+ slim0_rx_sample_rate = SAMPLING_RATE_192KHZ;
+ break;
+ case 1:
+ slim0_rx_sample_rate = SAMPLING_RATE_96KHZ;
+ break;
+ case 0:
+ default:
+ slim0_rx_sample_rate = SAMPLING_RATE_48KHZ;
+ break;
+ }
+
+ pr_debug("%s: slim0_rx_sample_rate = %d\n", __func__,
+ slim0_rx_sample_rate);
+
+ return 0;
+}
+
static int msm_slim_0_rx_ch_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -725,8 +786,7 @@
pr_debug("%s()\n", __func__);
param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
slim0_rx_bit_format);
-
- rate->min = rate->max = 48000;
+ rate->min = rate->max = slim0_rx_sample_rate;
channels->min = channels->max = msm_slim_0_rx_ch;
return 0;
@@ -780,6 +840,8 @@
SOC_ENUM_SINGLE_EXT(2, slim0_rx_ch_text),
SOC_ENUM_SINGLE_EXT(4, slim0_tx_ch_text),
SOC_ENUM_SINGLE_EXT(8, proxy_rx_ch_text),
+ SOC_ENUM_SINGLE_EXT(2, rx_bit_format_text),
+ SOC_ENUM_SINGLE_EXT(3, slim0_rx_sample_rate_text),
};
static const struct snd_kcontrol_new msm_snd_controls[] = {
@@ -795,7 +857,8 @@
msm_proxy_rx_ch_get, msm_proxy_rx_ch_put),
SOC_ENUM_EXT("SLIM_0_RX Format", msm_snd_enum[3],
slim0_rx_bit_format_get, slim0_rx_bit_format_put),
-
+ SOC_ENUM_EXT("SLIM_0_RX SampleRate", msm_snd_enum[4],
+ slim0_rx_sample_rate_get, slim0_rx_sample_rate_put),
};
static int msm_afe_set_config(struct snd_soc_codec *codec)
@@ -1544,7 +1607,7 @@
.codec_name = "snd-soc-dummy",
.be_id = MSM_FRONTEND_DAI_LSM8,
},
- {
+ {/* hw:x,28 */
.name = "INT_HFP_BT Hostless",
.stream_name = "INT_HFP_BT Hostless",
.cpu_dai_name = "INT_HFP_BT_HOSTLESS",
@@ -1559,7 +1622,7 @@
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
},
- {
+ {/* hw:x,29 */
.name = "MSM8226 HFP TX",
.stream_name = "MultiMedia6",
.cpu_dai_name = "MultiMedia6",
@@ -1575,6 +1638,21 @@
.ignore_pmdown_time = 1,
.be_id = MSM_FRONTEND_DAI_MULTIMEDIA6,
},
+ {/* hw:x,30 */
+ .name = "VoWLAN",
+ .stream_name = "VoWLAN",
+ .cpu_dai_name = "VoWLAN",
+ .platform_name = "msm-pcm-voice",
+ .dynamic = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .be_id = MSM_FRONTEND_DAI_VOWLAN,
+ },
/* Backend BT/FM DAI Links */
{
.name = LPASS_BE_INT_BT_SCO_RX,
diff --git a/sound/soc/msm/msm8x10.c b/sound/soc/msm/msm8x10.c
index 7b3a028..89df806 100644
--- a/sound/soc/msm/msm8x10.c
+++ b/sound/soc/msm/msm8x10.c
@@ -624,8 +624,8 @@
btn_low[5] = 190;
btn_high[5] = 228;
btn_low[6] = 229;
- btn_high[6] = 269;
- btn_low[7] = 270;
+ btn_high[6] = 264;
+ btn_low[7] = 265;
btn_high[7] = 500;
n_ready = wcd9xxx_mbhc_cal_btn_det_mp(btn_cfg, MBHC_BTN_DET_N_READY);
n_ready[0] = 80;
@@ -894,6 +894,21 @@
.codec_name = "snd-soc-dummy",
.be_id = MSM_FRONTEND_DAI_QCHAT,
},
+ {/* hw:x,15 */
+ .name = "MSM8X10 Media9",
+ .stream_name = "MultiMedia9",
+ .cpu_dai_name = "MultiMedia9",
+ .platform_name = "msm-pcm-dsp.0",
+ .dynamic = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .ignore_suspend = 1,
+ /* this dainlink has playback support */
+ .ignore_pmdown_time = 1,
+ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA9
+ },
/* Backend I2S DAI Links */
{
.name = LPASS_BE_SEC_MI2S_RX,
@@ -1039,6 +1054,19 @@
.be_hw_params_fixup = msm_be_hw_params_fixup,
.ignore_suspend = 1,
},
+ /* Incall Music 2 BACK END DAI Link */
+ {
+ .name = LPASS_BE_VOICE2_PLAYBACK_TX,
+ .stream_name = "Voice2 Farend Playback",
+ .cpu_dai_name = "msm-dai-q6-dev.32770",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .be_id = MSM_BACKEND_DAI_VOICE2_PLAYBACK_TX,
+ .be_hw_params_fixup = msm_be_hw_params_fixup,
+ .ignore_suspend = 1,
+ },
};
struct snd_soc_card snd_soc_card_msm8x10 = {
diff --git a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
index 1434970..8b2c443 100644
--- a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
@@ -57,6 +57,7 @@
struct msm_dai_q6_dai_data {
DECLARE_BITMAP(status_mask, STATUS_MAX);
+ DECLARE_BITMAP(hwfree_status, STATUS_MAX);
u32 rate;
u32 channels;
u32 bitwidth;
@@ -1510,6 +1511,11 @@
set_bit(STATUS_PORT_STARTED,
dai_data->status_mask);
}
+ if (!test_bit(STATUS_PORT_STARTED, dai_data->hwfree_status)) {
+ set_bit(STATUS_PORT_STARTED, dai_data->hwfree_status);
+ dev_dbg(dai->dev, "%s: set hwfree_status to started\n",
+ __func__);
+ }
return rc;
}
@@ -1525,7 +1531,6 @@
struct msm_dai_q6_dai_data *dai_data = &mi2s_dai_config->mi2s_dai_data;
struct afe_param_id_i2s_cfg *i2s = &dai_data->port_config.i2s;
-
dai_data->channels = params_channels(params);
switch (dai_data->channels) {
case 8:
@@ -1602,10 +1607,14 @@
dai_data->port_config.i2s.i2s_cfg_minor_version =
AFE_API_VERSION_I2S_CONFIG;
dai_data->port_config.i2s.sample_rate = dai_data->rate;
- if (test_bit(STATUS_PORT_STARTED,
- mi2s_dai_data->rx_dai.mi2s_dai_data.status_mask) ||
+ if ((test_bit(STATUS_PORT_STARTED,
+ mi2s_dai_data->rx_dai.mi2s_dai_data.status_mask) &&
test_bit(STATUS_PORT_STARTED,
- mi2s_dai_data->tx_dai.mi2s_dai_data.status_mask)) {
+ mi2s_dai_data->rx_dai.mi2s_dai_data.hwfree_status)) ||
+ (test_bit(STATUS_PORT_STARTED,
+ mi2s_dai_data->tx_dai.mi2s_dai_data.status_mask) &&
+ test_bit(STATUS_PORT_STARTED,
+ mi2s_dai_data->tx_dai.mi2s_dai_data.hwfree_status))) {
if ((mi2s_dai_data->tx_dai.mi2s_dai_data.rate !=
mi2s_dai_data->rx_dai.mi2s_dai_data.rate) ||
(mi2s_dai_data->rx_dai.mi2s_dai_data.bitwidth !=
@@ -1669,6 +1678,23 @@
return 0;
}
+static int msm_dai_q6_mi2s_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct msm_dai_q6_mi2s_dai_data *mi2s_dai_data =
+ dev_get_drvdata(dai->dev);
+ struct msm_dai_q6_dai_data *dai_data =
+ (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
+ &mi2s_dai_data->rx_dai.mi2s_dai_data :
+ &mi2s_dai_data->tx_dai.mi2s_dai_data);
+
+ if (test_bit(STATUS_PORT_STARTED, dai_data->hwfree_status)) {
+ clear_bit(STATUS_PORT_STARTED, dai_data->hwfree_status);
+ dev_dbg(dai->dev, "%s: clear hwfree_status\n", __func__);
+ }
+ return 0;
+}
+
static void msm_dai_q6_mi2s_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
@@ -1696,12 +1722,15 @@
dev_err(dai->dev, "fail to close AFE port\n");
clear_bit(STATUS_PORT_STARTED, dai_data->status_mask);
}
+ if (test_bit(STATUS_PORT_STARTED, dai_data->hwfree_status))
+ clear_bit(STATUS_PORT_STARTED, dai_data->hwfree_status);
}
static struct snd_soc_dai_ops msm_dai_q6_mi2s_ops = {
.startup = msm_dai_q6_mi2s_startup,
.prepare = msm_dai_q6_mi2s_prepare,
.hw_params = msm_dai_q6_mi2s_hw_params,
+ .hw_free = msm_dai_q6_mi2s_hw_free,
.set_fmt = msm_dai_q6_mi2s_set_fmt,
.shutdown = msm_dai_q6_mi2s_shutdown,
};
diff --git a/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c b/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c
index 63ac5d3..161904c 100644
--- a/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c
+++ b/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c
@@ -211,6 +211,14 @@
DOLBY_ENDDEP_PARAM_VMB_OFFSET},
{-320, -320, 144}
},
+ {PROXY, 6, DOLBY_ENDP_EXT_SPEAKERS,
+ {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
+ {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
+ DOLBY_ENDDEP_PARAM_VMB_LENGTH},
+ {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
+ DOLBY_ENDDEP_PARAM_VMB_OFFSET},
+ {-320, -320, 144}
+ },
{FM, 2, DOLBY_ENDP_HDMI,
{DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
{DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
@@ -409,7 +417,8 @@
for (idx = 0; idx < NUM_DOLBY_ENDP_DEVICE; idx++) {
if (dolby_dap_endp_params[idx].device ==
dolby_dap_params_states.device) {
- if (dolby_dap_params_states.device == AUX_DIGITAL) {
+ if (dolby_dap_params_states.device == AUX_DIGITAL ||
+ dolby_dap_params_states.device == PROXY) {
if (dolby_dap_endp_params[idx].device_ch_caps ==
device_channels)
break;
diff --git a/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.h b/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.h
index 4544fea..14586f4 100644
--- a/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.h
+++ b/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.h
@@ -249,7 +249,7 @@
#define DOLBY_AUTO_ENDDEP_IDX (MAX_DOLBY_PARAMS+4)
#define TOTAL_LENGTH_DOLBY_PARAM 745
-#define NUM_DOLBY_ENDP_DEVICE 23
+#define NUM_DOLBY_ENDP_DEVICE 24
#define DOLBY_VIS_PARAM_HEADER_SIZE 25
#define DOLBY_INVALID_PORT_ID -1
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index e72502c..8e69a2b 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -430,7 +430,7 @@
path_type,
msm_bedais[i].sample_rate,
msm_bedais[i].channel,
- topology, false,
+ topology, perf_mode,
bits_per_sample);
payload.copp_ids[payload.num_copps++] =
@@ -688,6 +688,8 @@
session_id = voc_get_session_id(VOICE_SESSION_NAME);
else if (val == MSM_FRONTEND_DAI_VOLTE)
session_id = voc_get_session_id(VOLTE_SESSION_NAME);
+ else if (val == MSM_FRONTEND_DAI_VOWLAN)
+ session_id = voc_get_session_id(VOWLAN_SESSION_NAME);
else if (val == MSM_FRONTEND_DAI_VOICE2)
session_id = voc_get_session_id(VOICE2_SESSION_NAME);
else if (val == MSM_FRONTEND_DAI_QCHAT)
@@ -2110,6 +2112,9 @@
SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_PRI_I2S_RX,
MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_PRI_I2S_RX,
+ MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_PRI_I2S_RX,
MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
@@ -2131,6 +2136,9 @@
SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_SEC_I2S_RX,
MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_SEC_I2S_RX,
+ MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_SEC_I2S_RX,
MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
@@ -2152,6 +2160,9 @@
SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+ MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
@@ -2176,6 +2187,9 @@
SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_SLIMBUS_0_RX ,
MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_SLIMBUS_0_RX ,
+ MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_SLIMBUS_0_RX ,
MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
@@ -2200,6 +2214,9 @@
SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_INT_BT_SCO_RX ,
MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_INT_BT_SCO_RX ,
+ MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_INT_BT_SCO_RX ,
MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
@@ -2224,6 +2241,9 @@
SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_MI2S_RX,
MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_MI2S_RX,
+ MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_MI2S_RX,
MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
@@ -2248,6 +2268,9 @@
SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_AFE_PCM_RX,
MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_AFE_PCM_RX,
+ MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_AFE_PCM_RX,
MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
@@ -2272,6 +2295,9 @@
SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_AUXPCM_RX,
MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_AUXPCM_RX,
+ MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_AUXPCM_RX,
MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
@@ -2293,6 +2319,9 @@
SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+ MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
SOC_SINGLE_EXT("DTMF", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
MSM_FRONTEND_DAI_DTMF_RX, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
@@ -2314,6 +2343,9 @@
SOC_SINGLE_EXT("VoLTE", MSM_BACKEND_DAI_HDMI_RX,
MSM_FRONTEND_DAI_VOLTE, 1, 0, msm_routing_get_voice_mixer,
msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("VoWLAN", MSM_BACKEND_DAI_HDMI_RX,
+ MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
SOC_SINGLE_EXT("Voice Stub", MSM_BACKEND_DAI_HDMI_RX,
MSM_FRONTEND_DAI_VOICE_STUB, 1, 0, msm_routing_get_voice_stub_mixer,
msm_routing_put_voice_stub_mixer),
@@ -2418,6 +2450,33 @@
msm_routing_put_voice_mixer),
};
+static const struct snd_kcontrol_new tx_vowlan_mixer_controls[] = {
+ SOC_SINGLE_EXT("PRI_TX_VoWLAN", MSM_BACKEND_DAI_PRI_I2S_TX,
+ MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("SLIM_0_TX_VoWLAN", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+ MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX_VoWLAN",
+ MSM_BACKEND_DAI_INT_BT_SCO_TX, MSM_FRONTEND_DAI_VOWLAN, 1, 0,
+ msm_routing_get_voice_mixer, msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("AFE_PCM_TX_VoWLAN", MSM_BACKEND_DAI_AFE_PCM_TX,
+ MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("AUX_PCM_TX_VoWLAN", MSM_BACKEND_DAI_AUXPCM_TX,
+ MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("SEC_AUX_PCM_TX_VoWLAN", MSM_BACKEND_DAI_SEC_AUXPCM_TX,
+ MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("MI2S_TX_VoWLAN", MSM_BACKEND_DAI_MI2S_TX,
+ MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
+ SOC_SINGLE_EXT("PRI_MI2S_TX_VoWLAN", MSM_BACKEND_DAI_PRI_MI2S_TX,
+ MSM_FRONTEND_DAI_VOWLAN, 1, 0, msm_routing_get_voice_mixer,
+ msm_routing_put_voice_mixer),
+};
+
static const struct snd_kcontrol_new tx_voip_mixer_controls[] = {
SOC_SINGLE_EXT("PRI_TX_Voip", MSM_BACKEND_DAI_PRI_I2S_TX,
MSM_FRONTEND_DAI_VOIP, 1, 0, msm_routing_get_voice_mixer,
@@ -2611,6 +2670,11 @@
0, 1, 0, msm_routing_get_fm_pcmrx_switch_mixer,
msm_routing_put_fm_pcmrx_switch_mixer);
+static const struct snd_kcontrol_new pri_mi2s_rx_switch_mixer_controls =
+ SOC_SINGLE_EXT("Switch", SND_SOC_NOPM,
+ 0, 1, 0, msm_routing_get_switch_mixer,
+ msm_routing_put_switch_mixer);
+
static const struct soc_enum lsm_mux_enum =
SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(mad_audio_mux_text), mad_audio_mux_text);
@@ -3151,6 +3215,8 @@
SND_SOC_DAPM_AIF_OUT("VOICE2_UL", "Voice2 Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_IN("VoLTE_DL", "VoLTE Playback", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("VoLTE_UL", "VoLTE Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("VoWLAN_DL", "VoWLAN Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("VoWLAN_UL", "VoWLAN Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("VOIP_UL", "VoIP Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_IN("SLIM0_DL_HL", "SLIMBUS0_HOSTLESS Playback",
0, 0, 0, 0),
@@ -3192,6 +3258,9 @@
SND_SOC_DAPM_AIF_OUT("PRI_MI2S_UL_HL",
"Primary MI2S_TX Hostless Capture",
0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("PRI_MI2S_DL_HL",
+ "Primary MI2S_RX Hostless Playback",
+ 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("MI2S_DL_HL", "MI2S_RX_HOSTLESS Playback",
0, 0, 0, 0),
@@ -3298,6 +3367,8 @@
&fm_switch_mixer_controls),
SND_SOC_DAPM_SWITCH("PCM_RX_DL_HL", SND_SOC_NOPM, 0, 0,
&pcm_rx_switch_mixer_controls),
+ SND_SOC_DAPM_SWITCH("PRI_MI2S_RX_DL_HL", SND_SOC_NOPM, 0, 0,
+ &pri_mi2s_rx_switch_mixer_controls),
/* Mux Definitions */
SND_SOC_DAPM_MUX("LSM1 MUX", SND_SOC_NOPM, 0, 0, &lsm1_mux),
@@ -3415,6 +3486,9 @@
SND_SOC_DAPM_MIXER("VoLTE_Tx Mixer",
SND_SOC_NOPM, 0, 0, tx_volte_mixer_controls,
ARRAY_SIZE(tx_volte_mixer_controls)),
+ SND_SOC_DAPM_MIXER("VoWLAN_Tx Mixer",
+ SND_SOC_NOPM, 0, 0, tx_vowlan_mixer_controls,
+ ARRAY_SIZE(tx_vowlan_mixer_controls)),
SND_SOC_DAPM_MIXER("INTERNAL_BT_SCO_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
int_bt_sco_rx_mixer_controls, ARRAY_SIZE(int_bt_sco_rx_mixer_controls)),
SND_SOC_DAPM_MIXER("INTERNAL_FM_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
@@ -3696,6 +3770,7 @@
{"PRI_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
{"PRI_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
{"PRI_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+ {"PRI_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
{"PRI_RX_Voice Mixer", "Voip", "VOIP_DL"},
{"PRI_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
{"PRI_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
@@ -3704,6 +3779,7 @@
{"SEC_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
{"SEC_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
{"SEC_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+ {"SEC_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
{"SEC_RX_Voice Mixer", "Voip", "VOIP_DL"},
{"SEC_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
{"SEC_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
@@ -3712,6 +3788,7 @@
{"SEC_MI2S_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
{"SEC_MI2S_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
{"SEC_MI2S_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+ {"SEC_MI2S_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
{"SEC_MI2S_RX_Voice Mixer", "Voip", "VOIP_DL"},
{"SEC_MI2S_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
{"SEC_MI2S_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
@@ -3720,6 +3797,7 @@
{"SLIM_0_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
{"SLIM_0_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
{"SLIM_0_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+ {"SLIM_0_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
{"SLIM_0_RX_Voice Mixer", "Voip", "VOIP_DL"},
{"SLIM_0_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
{"SLIM_0_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
@@ -3729,6 +3807,7 @@
{"INTERNAL_BT_SCO_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
{"INTERNAL_BT_SCO_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
{"INTERNAL_BT_SCO_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+ {"INTERNAL_BT_SCO_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
{"INTERNAL_BT_SCO_RX_Voice Mixer", "Voip", "VOIP_DL"},
{"INTERNAL_BT_SCO_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
{"INTERNAL_BT_SCO_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
@@ -3737,6 +3816,7 @@
{"AFE_PCM_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
{"AFE_PCM_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
{"AFE_PCM_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+ {"AFE_PCM_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
{"AFE_PCM_RX_Voice Mixer", "Voip", "VOIP_DL"},
{"AFE_PCM_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
{"AFE_PCM_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
@@ -3745,6 +3825,7 @@
{"AUX_PCM_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
{"AUX_PCM_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
{"AUX_PCM_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+ {"AUX_PCM_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
{"AUX_PCM_RX_Voice Mixer", "Voip", "VOIP_DL"},
{"AUX_PCM_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
{"AUX_PCM_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
@@ -3753,6 +3834,7 @@
{"SEC_AUX_PCM_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
{"SEC_AUX_PCM_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+ {"SEC_AUX_PCM_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
{"SEC_AUX_PCM_RX_Voice Mixer", "Voip", "VOIP_DL"},
{"SEC_AUX_PCM_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
{"SEC_AUX_PCM_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
@@ -3762,6 +3844,7 @@
{"HDMI_RX_Voice Mixer", "CSVoice", "CS-VOICE_DL1"},
{"HDMI_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
{"HDMI_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+ {"HDMI_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
{"HDMI_RX_Voice Mixer", "Voip", "VOIP_DL"},
{"HDMI_RX_Voice Mixer", "DTMF", "DTMF_DL_HL"},
{"HDMI_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
@@ -3772,6 +3855,7 @@
{"MI2S_RX_Voice Mixer", "Voice2", "VOICE2_DL"},
{"MI2S_RX_Voice Mixer", "Voip", "VOIP_DL"},
{"MI2S_RX_Voice Mixer", "VoLTE", "VoLTE_DL"},
+ {"MI2S_RX_Voice Mixer", "VoWLAN", "VoWLAN_DL"},
{"MI2S_RX_Voice Mixer", "Voice Stub", "VOICE_STUB_DL"},
{"MI2S_RX_Voice Mixer", "QCHAT", "QCHAT_DL"},
{"MI2S_RX", NULL, "MI2S_RX_Voice Mixer"},
@@ -3809,6 +3893,16 @@
{"VoLTE_Tx Mixer", "SEC_AUX_PCM_TX_VoLTE", "SEC_AUX_PCM_TX"},
{"VoLTE_Tx Mixer", "MI2S_TX_VoLTE", "MI2S_TX"},
{"VoLTE_UL", NULL, "VoLTE_Tx Mixer"},
+
+ {"VoWLAN_Tx Mixer", "PRI_TX_VoWLAN", "PRI_I2S_TX"},
+ {"VoWLAN_Tx Mixer", "SLIM_0_TX_VoWLAN", "SLIMBUS_0_TX"},
+ {"VoWLAN_Tx Mixer", "INTERNAL_BT_SCO_TX_VoWLAN", "INT_BT_SCO_TX"},
+ {"VoWLAN_Tx Mixer", "AFE_PCM_TX_VoWLAN", "PCM_TX"},
+ {"VoWLAN_Tx Mixer", "AUX_PCM_TX_VoWLAN", "AUX_PCM_TX"},
+ {"VoWLAN_Tx Mixer", "SEC_AUX_PCM_TX_VoWLAN", "SEC_AUX_PCM_TX"},
+ {"VoWLAN_Tx Mixer", "MI2S_TX_VoWLAN", "MI2S_TX"},
+ {"VoWLAN_UL", NULL, "VoWLAN_Tx Mixer"},
+
{"Voip_Tx Mixer", "PRI_TX_Voip", "PRI_I2S_TX"},
{"Voip_Tx Mixer", "MI2S_TX_Voip", "MI2S_TX"},
{"Voip_Tx Mixer", "SLIM_0_TX_Voip", "SLIMBUS_0_TX"},
@@ -3914,6 +4008,8 @@
{"MI2S_UL_HL", NULL, "MI2S_TX"},
{"PCM_RX_DL_HL", "Switch", "SLIM0_DL_HL"},
{"PCM_RX", NULL, "PCM_RX_DL_HL"},
+ {"PRI_MI2S_RX_DL_HL", "Switch", "PRI_MI2S_DL_HL"},
+ {"PRI_MI2S_RX", NULL, "PRI_MI2S_RX_DL_HL"},
{"MI2S_UL_HL", NULL, "TERT_MI2S_TX"},
{"SEC_I2S_RX", NULL, "SEC_I2S_DL_HL"},
{"PRI_MI2S_UL_HL", NULL, "PRI_MI2S_TX"},
@@ -4171,7 +4267,7 @@
path_type,
bedai->sample_rate,
channels,
- topology, false,
+ topology, fe_dai_perf_mode[i][session_type],
bits_per_sample);
}
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
index f2b0436..87e44b2 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
@@ -89,6 +89,7 @@
MSM_FRONTEND_DAI_LSM6,
MSM_FRONTEND_DAI_LSM7,
MSM_FRONTEND_DAI_LSM8,
+ MSM_FRONTEND_DAI_VOWLAN,
MSM_FRONTEND_DAI_MAX,
};
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c
index 1074d76..fac5845 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.c
@@ -75,6 +75,14 @@
return false;
}
+static bool is_vowlan(struct msm_voice *pvowlan)
+{
+ if (pvowlan == &voice_info[VOWLAN_SESSION_INDEX])
+ return true;
+ else
+ return false;
+}
+
static uint32_t get_session_id(struct msm_voice *pvoc)
{
uint32_t session_id = 0;
@@ -85,6 +93,8 @@
session_id = voc_get_session_id(VOICE2_SESSION_NAME);
else if (is_qchat(pvoc))
session_id = voc_get_session_id(QCHAT_SESSION_NAME);
+ else if (is_vowlan(pvoc))
+ session_id = voc_get_session_id(VOWLAN_SESSION_NAME);
else
session_id = voc_get_session_id(VOICE_SESSION_NAME);
@@ -134,6 +144,10 @@
voice = &voice_info[QCHAT_SESSION_INDEX];
pr_debug("%s: Open QCHAT Substream Id=%s\n",
__func__, substream->pcm->id);
+ } else if (!strncmp("VoWLAN", substream->pcm->id, 6)) {
+ voice = &voice_info[VOWLAN_SESSION_INDEX];
+ pr_debug("%s: Open VoWLAN Substream Id=%s\n",
+ __func__, substream->pcm->id);
} else {
voice = &voice_info[VOICE_SESSION_INDEX];
pr_debug("%s: Open VOICE Substream Id=%s\n",
@@ -448,6 +462,7 @@
voc_set_tty_mode(voc_get_session_id(VOICE_SESSION_NAME), tty_mode);
voc_set_tty_mode(voc_get_session_id(VOICE2_SESSION_NAME), tty_mode);
voc_set_tty_mode(voc_get_session_id(VOLTE_SESSION_NAME), tty_mode);
+ voc_set_tty_mode(voc_get_session_id(VOWLAN_SESSION_NAME), tty_mode);
return 0;
}
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.h b/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.h
index f199be6..62c5732 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.h
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-voice-v2.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -18,6 +18,7 @@
VOLTE_SESSION_INDEX,
VOICE2_SESSION_INDEX,
QCHAT_SESSION_INDEX,
+ VOWLAN_SESSION_INDEX,
VOICE_SESSION_INDEX_MAX,
};
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
index e3c8944..6b32064 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
@@ -47,8 +47,27 @@
#define MODE_AMR_WB 0xD
#define MODE_PCM 0xC
#define MODE_4GV_NW 0xE
+#define MODE_G711 0xA
+#define MODE_G711A 0xF
-#define VOIP_MODE_MAX MODE_4GV_NW
+enum msm_audio_g711a_frame_type {
+ MVS_G711A_SPEECH_GOOD,
+ MVS_G711A_SID,
+ MVS_G711A_NO_DATA,
+ MVS_G711A_ERASURE
+};
+
+enum msm_audio_g711a_mode {
+ MVS_G711A_MODE_MULAW,
+ MVS_G711A_MODE_ALAW
+};
+
+enum msm_audio_g711_mode {
+ MVS_G711_MODE_MULAW,
+ MVS_G711_MODE_ALAW
+};
+
+#define VOIP_MODE_MAX MODE_G711A
#define VOIP_RATE_MAX 23850
enum format {
@@ -153,7 +172,7 @@
uint32_t evrc_max_rate;
};
-static int voip_get_media_type(uint32_t mode,
+static int voip_get_media_type(uint32_t mode, uint32_t rate_type,
unsigned int samp_rate,
unsigned int *media_type);
static int voip_get_rate_type(uint32_t mode,
@@ -358,6 +377,81 @@
list_add_tail(&buf_node->list, &prtd->out_queue);
break;
}
+ case MODE_G711:
+ case MODE_G711A:{
+ /* G711 frames are 10ms each, but the DSP works with
+ * 20ms frames and sends two 10ms frames per buffer.
+ * Extract the two frames and put them in separate
+ * buffers.
+ */
+ /* Remove the first DSP frame info header.
+ * Header format: G711A
+ * Bits 0-1: Frame type
+ * Bits 2-3: Frame rate
+ *
+ * Header format: G711
+ * Bits 2-3: Frame rate
+ */
+ if (prtd->mode == MODE_G711A)
+ buf_node->frame.frm_hdr.frame_type =
+ (*voc_pkt) & 0x03;
+ buf_node->frame.frm_hdr.timestamp = timestamp;
+ voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
+
+ /* There are two frames in the buffer. Length of the
+ * first frame:
+ */
+ buf_node->frame.pktlen = (pkt_len -
+ 2 * DSP_FRAME_HDR_LEN) / 2;
+
+ memcpy(&buf_node->frame.voc_pkt[0],
+ voc_pkt,
+ buf_node->frame.pktlen);
+ voc_pkt = voc_pkt + buf_node->frame.pktlen;
+
+ list_add_tail(&buf_node->list, &prtd->out_queue);
+
+ /* Get another buffer from the free Q and fill in the
+ * second frame.
+ */
+ if (!list_empty(&prtd->free_out_queue)) {
+ buf_node =
+ list_first_entry(&prtd->free_out_queue,
+ struct voip_buf_node,
+ list);
+ list_del(&buf_node->list);
+
+ /* Remove the second DSP frame info header.
+ * Header format:
+ * Bits 0-1: Frame type
+ * Bits 2-3: Frame rate
+ */
+
+ if (prtd->mode == MODE_G711A)
+ buf_node->frame.frm_hdr.frame_type =
+ (*voc_pkt) & 0x03;
+ buf_node->frame.frm_hdr.timestamp = timestamp;
+ voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
+
+ /* There are two frames in the buffer. Length
+ * of the second frame:
+ */
+ buf_node->frame.pktlen = (pkt_len -
+ 2 * DSP_FRAME_HDR_LEN) / 2;
+
+ memcpy(&buf_node->frame.voc_pkt[0],
+ voc_pkt,
+ buf_node->frame.pktlen);
+
+ list_add_tail(&buf_node->list,
+ &prtd->out_queue);
+ } else {
+ /* Drop the second frame */
+ pr_err("%s: UL data dropped, read is slow\n",
+ __func__);
+ }
+ break;
+ }
default: {
buf_node->frame.frm_hdr.timestamp = timestamp;
buf_node->frame.pktlen = pkt_len;
@@ -389,6 +483,8 @@
unsigned long dsp_flags;
uint32_t rate_type;
uint32_t frame_rate;
+ u32 pkt_len;
+ u8 *voc_addr = NULL;
if (prtd->playback_substream == NULL)
return;
@@ -454,6 +550,70 @@
list_add_tail(&buf_node->list, &prtd->free_in_queue);
break;
}
+ case MODE_G711:
+ case MODE_G711A:{
+ /* G711 frames are 10ms each but the DSP expects 20ms
+ * worth of data, so send two 10ms frames per buffer.
+ */
+ /* Add the first DSP frame info header. Header format:
+ * Bits 0-1: Frame type
+ * Bits 2-3: Frame rate
+ */
+ voc_addr = voc_pkt;
+ voc_pkt = voc_pkt + sizeof(uint32_t);
+
+ *voc_pkt = ((prtd->rate_type & 0x0F) << 2) |
+ (buf_node->frame.frm_hdr.frame_type & 0x03);
+ voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
+
+ pkt_len = buf_node->frame.pktlen + DSP_FRAME_HDR_LEN;
+
+ memcpy(voc_pkt,
+ &buf_node->frame.voc_pkt[0],
+ buf_node->frame.pktlen);
+ voc_pkt = voc_pkt + buf_node->frame.pktlen;
+
+ list_add_tail(&buf_node->list, &prtd->free_in_queue);
+
+ if (!list_empty(&prtd->in_queue)) {
+ /* Get the second buffer. */
+ buf_node = list_first_entry(&prtd->in_queue,
+ struct voip_buf_node,
+ list);
+ list_del(&buf_node->list);
+
+ /* Add the second DSP frame info header.
+ * Header format:
+ * Bits 0-1: Frame type
+ * Bits 2-3: Frame rate
+ */
+ *voc_pkt = ((prtd->rate_type & 0x0F) << 2) |
+ (buf_node->frame.frm_hdr.frame_type & 0x03);
+ voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
+
+ pkt_len = pkt_len + buf_node->frame.pktlen +
+ DSP_FRAME_HDR_LEN;
+
+ memcpy(voc_pkt,
+ &buf_node->frame.voc_pkt[0],
+ buf_node->frame.pktlen);
+
+ list_add_tail(&buf_node->list,
+ &prtd->free_in_queue);
+ } else {
+ /* Only 10ms worth of data is available, signal
+ * erasure frame.
+ */
+ *voc_pkt = ((prtd->rate_type & 0x0F) << 2) |
+ (MVS_G711A_ERASURE & 0x03);
+
+ pkt_len = pkt_len + DSP_FRAME_HDR_LEN;
+ pr_debug("%s, Only 10ms read, erase 2nd frame\n",
+ __func__);
+ }
+ *((uint32_t *)voc_addr) = pkt_len;
+ break;
+ }
default: {
*((uint32_t *)voc_pkt) = buf_node->frame.pktlen;
voc_pkt = voc_pkt + sizeof(uint32_t);
@@ -829,10 +989,12 @@
pr_debug("%s(): mode=%d, playback sample rate=%d, capture sample rate=%d\n",
__func__, prtd->mode, prtd->play_samp_rate, prtd->cap_samp_rate);
- if ((runtime->format != FORMAT_S16_LE) && ((prtd->mode == MODE_PCM) ||
- (prtd->mode == MODE_AMR) || (prtd->mode == MODE_AMR_WB) ||
+ if ((runtime->format != FORMAT_S16_LE &&
+ runtime->format != FORMAT_SPECIAL) &&
+ ((prtd->mode == MODE_AMR) || (prtd->mode == MODE_AMR_WB) ||
(prtd->mode == MODE_IS127) || (prtd->mode == MODE_4GV_NB) ||
- (prtd->mode == MODE_4GV_WB) || (prtd->mode == MODE_4GV_NW))) {
+ (prtd->mode == MODE_4GV_WB) || (prtd->mode == MODE_4GV_NW) ||
+ (prtd->mode == MODE_G711) || (prtd->mode == MODE_G711A))) {
pr_err("%s(): mode:%d and format:%u are not matched\n",
__func__, prtd->mode, (uint32_t)runtime->format);
@@ -840,21 +1002,19 @@
goto done;
}
- ret = voip_get_media_type(prtd->mode,
- prtd->play_samp_rate,
- &media_type);
- if (ret < 0) {
- pr_err("%s(): fail at getting media_type, ret=%d\n",
- __func__, ret);
+ if (runtime->format != FORMAT_S16_LE && (prtd->mode == MODE_PCM)) {
+ pr_err("%s(): mode:%d and format:%u are not matched\n",
+ __func__, prtd->mode, runtime->format);
- ret = -EINVAL;
+ ret = -EINVAL;
goto done;
}
- pr_debug("%s(): media_type=%d\n", __func__, media_type);
if ((prtd->mode == MODE_PCM) ||
(prtd->mode == MODE_AMR) ||
- (prtd->mode == MODE_AMR_WB)) {
+ (prtd->mode == MODE_AMR_WB) ||
+ (prtd->mode == MODE_G711) ||
+ (prtd->mode == MODE_G711A)) {
ret = voip_get_rate_type(prtd->mode,
prtd->rate,
&rate_type);
@@ -909,6 +1069,19 @@
pr_debug("%s(): min rate=%d, max rate=%d\n",
__func__, evrc_min_rate_type, evrc_max_rate_type);
}
+ ret = voip_get_media_type(prtd->mode,
+ prtd->rate_type,
+ prtd->play_samp_rate,
+ &media_type);
+ if (ret < 0) {
+ pr_err("%s(): fail at getting media_type, ret=%d\n",
+ __func__, ret);
+
+ ret = -EINVAL;
+ goto done;
+ }
+ pr_debug("%s(): media_type=%d\n", __func__, media_type);
+
if ((prtd->play_samp_rate == 8000) &&
(prtd->cap_samp_rate == 8000))
voc_config_vocoder(media_type, rate_type,
@@ -1285,6 +1458,10 @@
}
break;
}
+ case MODE_G711:
+ case MODE_G711A:
+ *rate_type = rate;
+ break;
default:
pr_err("wrong mode type.\n");
ret = -EINVAL;
@@ -1294,9 +1471,9 @@
return ret;
}
-static int voip_get_media_type(uint32_t mode,
- unsigned int samp_rate,
- unsigned int *media_type)
+static int voip_get_media_type(uint32_t mode, uint32_t rate_type,
+ unsigned int samp_rate,
+ unsigned int *media_type)
{
int ret = 0;
@@ -1327,6 +1504,13 @@
case MODE_4GV_NW: /* EVRC-NW */
*media_type = VSS_MEDIA_ID_4GV_NW_MODEM;
break;
+ case MODE_G711:
+ case MODE_G711A:
+ if (rate_type == MVS_G711A_MODE_MULAW)
+ *media_type = VSS_MEDIA_ID_G711_MULAW;
+ else
+ *media_type = VSS_MEDIA_ID_G711_ALAW;
+ break;
default:
pr_debug(" input mode is not supported\n");
ret = -EINVAL;
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index 74b79dd..badc3c3 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -453,7 +453,7 @@
q6asm_add_hdr_custom_topology(ac, &asm_top.hdr,
APR_PKT_SIZE(APR_HDR_SIZE,
sizeof(asm_top)), TRUE);
-
+ atomic_set(&ac->cmd_state, 1);
asm_top.hdr.opcode = ASM_CMD_ADD_TOPOLOGIES;
asm_top.payload_addr_lsw = cal_block.cal_paddr;
asm_top.payload_addr_msw = 0;
@@ -1626,7 +1626,6 @@
hdr->dest_port = ((ac->session << 8) & 0xFF00) | (stream_id);
if (cmd_flg) {
hdr->token = ac->session;
- atomic_set(&ac->cmd_state, 1);
}
hdr->pkt_size = pkt_size;
mutex_unlock(&ac->cmd_lock);
@@ -1667,7 +1666,6 @@
hdr->dest_port = ((ac->session << 8) & 0xFF00) | (stream_id);
if (cmd_flg) {
hdr->token = ac->session;
- atomic_set(&ac->cmd_state, 1);
}
hdr->pkt_size = pkt_size;
return;
@@ -1711,7 +1709,6 @@
hdr->dest_port = 0;
if (cmd_flg) {
hdr->token = ((ac->session << 8) | 0x0001) ;
- atomic_set(&ac->cmd_state, 1);
}
hdr->pkt_size = pkt_size;
mutex_unlock(&ac->cmd_lock);
@@ -1728,7 +1725,6 @@
hdr->dest_port = 0;
if (cmd_flg) {
hdr->token = token;
- atomic_set(&ac->cmd_state, 1);
}
hdr->pkt_size = pkt_size;
return;
@@ -1748,6 +1744,7 @@
pr_debug("%s:session[%d]", __func__, ac->session);
q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
+ atomic_set(&ac->cmd_state, 1);
open.hdr.opcode = ASM_STREAM_CMD_OPEN_READ_V3;
/* Stream prio : High, provide meta info with encoded frames */
open.src_endpointype = ASM_END_POINT_DEVICE_MATRIX;
@@ -1841,7 +1838,7 @@
format);
q6asm_stream_add_hdr(ac, &open.hdr, sizeof(open), TRUE, stream_id);
-
+ atomic_set(&ac->cmd_state, 1);
/*
* Updated the token field with stream/session for compressed playback
* Platform driver must know the the stream with which the command is
@@ -1959,6 +1956,7 @@
ac->io_mode |= NT_MODE;
q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
+ atomic_set(&ac->cmd_state, 1);
open.hdr.opcode = ASM_STREAM_CMD_OPEN_READWRITE_V2;
open.mode_flags = BUFFER_META_ENABLE;
@@ -2068,6 +2066,7 @@
pr_debug("%s: session[%d]", __func__, ac->session);
q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
+ atomic_set(&ac->cmd_state, 1);
open.hdr.opcode = ASM_STREAM_CMD_OPEN_LOOPBACK_V2;
open.mode_flags = 0;
@@ -2110,6 +2109,7 @@
}
pr_debug("%s session[%d]", __func__, ac->session);
q6asm_add_hdr(ac, &run.hdr, sizeof(run), TRUE);
+ atomic_set(&ac->cmd_state, 1);
run.hdr.opcode = ASM_SESSION_CMD_RUN_V2;
run.flags = flags;
@@ -2147,7 +2147,7 @@
}
pr_debug("session[%d]", ac->session);
q6asm_stream_add_hdr_async(ac, &run.hdr, sizeof(run), TRUE, stream_id);
-
+ atomic_set(&ac->cmd_state, 1);
run.hdr.opcode = ASM_SESSION_CMD_RUN_V2;
run.flags = flags;
run.time_lsw = lsw_ts;
@@ -2189,6 +2189,7 @@
sample_rate, channels, bit_rate, mode, format);
q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+ atomic_set(&ac->cmd_state, 1);
enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
@@ -2229,6 +2230,7 @@
pr_debug("%s: Session %d, num_channels = %d\n",
__func__, ac->session, num_channels);
q6asm_add_hdr(ac, &chan_map.hdr, sizeof(chan_map), TRUE);
+ atomic_set(&ac->cmd_state, 1);
chan_map.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
chan_map.encdec.param_id = ASM_PARAM_ID_DEC_OUTPUT_CHAN_MAP;
chan_map.encdec.param_size = sizeof(struct asm_dec_out_chan_map_param) -
@@ -2273,6 +2275,7 @@
ac->session, rate, channels);
q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+ atomic_set(&ac->cmd_state, 1);
enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
enc_cfg.encdec.param_size = sizeof(enc_cfg) - sizeof(enc_cfg.hdr) -
@@ -2334,7 +2337,7 @@
ac->session, rate, channels);
q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
-
+ atomic_set(&ac->cmd_state, 1);
enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
enc_cfg.encdec.param_size = sizeof(enc_cfg) - sizeof(enc_cfg.hdr) -
@@ -2433,6 +2436,7 @@
pr_debug("%s: Session %d\n", __func__, ac->session);
q6asm_add_hdr(ac, &sbrps.hdr, sizeof(sbrps), TRUE);
+ atomic_set(&ac->cmd_state, 1);
sbrps.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
sbrps.encdec.param_id = ASM_PARAM_ID_AAC_SBR_PS_FLAG;
@@ -2474,6 +2478,7 @@
__func__, ac->session, sce_left, sce_right);
q6asm_add_hdr(ac, &dual_mono.hdr, sizeof(dual_mono), TRUE);
+ atomic_set(&ac->cmd_state, 1);
dual_mono.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
dual_mono.encdec.param_id = ASM_PARAM_ID_AAC_DUAL_MONO_MAPPING;
@@ -2505,8 +2510,36 @@
/* Support for selecting stereo mixing coefficients for B family not done */
int q6asm_cfg_aac_sel_mix_coef(struct audio_client *ac, uint32_t mix_coeff)
{
- /* To Be Done */
+ struct asm_aac_stereo_mix_coeff_selection_param_v2 aac_mix_coeff;
+ int rc = 0;
+
+ q6asm_add_hdr(ac, &aac_mix_coeff.hdr, sizeof(aac_mix_coeff), TRUE);
+ atomic_set(&ac->cmd_state, 1);
+ aac_mix_coeff.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+ aac_mix_coeff.param_id =
+ ASM_PARAM_ID_AAC_STEREO_MIX_COEFF_SELECTION_FLAG_V2;
+ aac_mix_coeff.param_size =
+ sizeof(struct asm_aac_stereo_mix_coeff_selection_param_v2);
+ aac_mix_coeff.aac_stereo_mix_coeff_flag = mix_coeff;
+ pr_debug("%s, mix_coeff = %u", __func__, mix_coeff);
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &aac_mix_coeff);
+ if (rc < 0) {
+ pr_err("%s:Command opcode[0x%x]paramid[0x%x] failed\n",
+ __func__, ASM_STREAM_CMD_SET_ENCDEC_PARAM,
+ ASM_PARAM_ID_AAC_STEREO_MIX_COEFF_SELECTION_FLAG_V2);
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s:timeout opcode[0x%x]\n",
+ __func__, aac_mix_coeff.hdr.opcode);
+ rc = -ETIMEDOUT;
+ goto fail_cmd;
+ }
return 0;
+fail_cmd:
+ return rc;
}
int q6asm_enc_cfg_blk_qcelp(struct audio_client *ac, uint32_t frames_per_buf,
@@ -2522,6 +2555,7 @@
reduced_rate_level, rate_modulation_cmd);
q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+ atomic_set(&ac->cmd_state, 1);
enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
enc_cfg.encdec.param_size = sizeof(struct asm_v13k_enc_cfg) -
@@ -2563,6 +2597,7 @@
frames_per_buf, min_rate, max_rate, rate_modulation_cmd);
q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+ atomic_set(&ac->cmd_state, 1);
enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
enc_cfg.encdec.param_size = sizeof(struct asm_evrc_enc_cfg) -
@@ -2602,6 +2637,7 @@
__func__, ac->session, frames_per_buf, band_mode, dtx_enable);
q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+ atomic_set(&ac->cmd_state, 1);
enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
enc_cfg.encdec.param_size = sizeof(struct asm_amrnb_enc_cfg) -
@@ -2639,6 +2675,7 @@
__func__, ac->session, frames_per_buf, band_mode, dtx_enable);
q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+ atomic_set(&ac->cmd_state, 1);
enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2;
enc_cfg.encdec.param_size = sizeof(struct asm_amrwb_enc_cfg) -
@@ -2679,6 +2716,7 @@
channels);
q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
+ atomic_set(&ac->cmd_state, 1);
fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
fmt.fmt_blk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
@@ -2739,6 +2777,7 @@
channels);
q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
+ atomic_set(&ac->cmd_state, 1);
fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
fmt.fmt_blk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
@@ -2807,7 +2846,7 @@
cfg->sample_rate, cfg->ch_cfg);
q6asm_stream_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE, stream_id);
-
+ atomic_set(&ac->cmd_state, 1);
/*
* Updated the token field with stream/session for compressed playback
* Platform driver must know the the stream with which the command is
@@ -2883,6 +2922,7 @@
wma_cfg->ch_mask, wma_cfg->encode_opt);
q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
+ atomic_set(&ac->cmd_state, 1);
fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
fmt.fmtblk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
@@ -2928,6 +2968,7 @@
wmapro_cfg->adv_encode_opt, wmapro_cfg->adv_encode_opt2);
q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
+ atomic_set(&ac->cmd_state, 1);
fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
fmt.fmtblk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
@@ -2975,6 +3016,7 @@
cfg->num_channels);
q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
+ atomic_set(&ac->cmd_state, 1);
fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
fmt.fmtblk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) -
@@ -3006,6 +3048,7 @@
pr_debug("%s: session[%d]param_id[%d]param_value[%d]", __func__,
ac->session, param_id, param_value);
q6asm_add_hdr(ac, &ddp_cfg.hdr, sizeof(ddp_cfg), TRUE);
+ atomic_set(&ac->cmd_state, 1);
ddp_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
ddp_cfg.encdec.param_id = param_id;
ddp_cfg.encdec.param_size = sizeof(struct asm_dec_ddp_endp_param_v2) -
@@ -3066,6 +3109,7 @@
mmap_region_cmd;
q6asm_add_mmaphdr(ac, &mmap_regions->hdr, cmd_size,
TRUE, ((ac->session << 8) | dir));
+ atomic_set(&ac->cmd_state, 1);
mmap_regions->hdr.opcode = ASM_CMD_SHARED_MEM_MAP_REGIONS;
mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL;
mmap_regions->num_regions = bufcnt & 0x00ff;
@@ -3129,7 +3173,7 @@
q6asm_add_mmaphdr(ac, &mem_unmap.hdr,
sizeof(struct avs_cmd_shared_mem_unmap_regions),
TRUE, ((ac->session << 8) | dir));
-
+ atomic_set(&ac->cmd_state, 1);
mem_unmap.hdr.opcode = ASM_CMD_SHARED_MEM_UNMAP_REGIONS;
list_for_each_safe(ptr, next, &ac->port[dir].mem_map_handle) {
buf_node = list_entry(ptr, struct asm_buffer_node,
@@ -3227,6 +3271,7 @@
mmap_region_cmd;
q6asm_add_mmaphdr(ac, &mmap_regions->hdr, cmd_size, TRUE,
((ac->session << 8) | dir));
+ atomic_set(&ac->cmd_state, 1);
pr_debug("mmap_region=0x%p token=0x%x\n",
mmap_regions, ((ac->session << 8) | dir));
@@ -3308,6 +3353,7 @@
cmd_size = sizeof(struct avs_cmd_shared_mem_unmap_regions);
q6asm_add_mmaphdr(ac, &mem_unmap.hdr, cmd_size,
TRUE, ((ac->session << 8) | dir));
+ atomic_set(&ac->cmd_state, 1);
port = &ac->port[dir];
buf_add = (uint32_t)port->buf->phys;
mem_unmap.hdr.opcode = ASM_CMD_SHARED_MEM_UNMAP_REGIONS;
@@ -3365,6 +3411,7 @@
sz = sizeof(struct asm_volume_ctrl_lr_chan_gain);
q6asm_add_hdr_async(ac, &lrgain.hdr, sz, TRUE);
+ atomic_set(&ac->cmd_state, 1);
lrgain.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
lrgain.param.data_payload_addr_lsw = 0;
lrgain.param.data_payload_addr_msw = 0;
@@ -3413,6 +3460,7 @@
sz = sizeof(struct asm_volume_ctrl_mute_config);
q6asm_add_hdr_async(ac, &mute.hdr, sz, TRUE);
+ atomic_set(&ac->cmd_state, 1);
mute.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
mute.param.data_payload_addr_lsw = 0;
mute.param.data_payload_addr_msw = 0;
@@ -3460,6 +3508,7 @@
sz = sizeof(struct asm_volume_ctrl_master_gain);
q6asm_add_hdr_async(ac, &vol.hdr, sz, TRUE);
+ atomic_set(&ac->cmd_state, 1);
vol.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
vol.param.data_payload_addr_lsw = 0;
vol.param.data_payload_addr_msw = 0;
@@ -3508,6 +3557,7 @@
sz = sizeof(struct asm_soft_pause_params);
q6asm_add_hdr_async(ac, &softpause.hdr, sz, TRUE);
+ atomic_set(&ac->cmd_state, 1);
softpause.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
softpause.param.data_payload_addr_lsw = 0;
@@ -3561,6 +3611,7 @@
sz = sizeof(struct asm_soft_step_volume_params);
q6asm_add_hdr_async(ac, &softvol.hdr, sz, TRUE);
+ atomic_set(&ac->cmd_state, 1);
softvol.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
softvol.param.data_payload_addr_lsw = 0;
softvol.param.data_payload_addr_msw = 0;
@@ -3619,6 +3670,7 @@
sz = sizeof(struct asm_eq_params);
eq_params = (struct msm_audio_eq_stream_config *) eq_p;
q6asm_add_hdr(ac, &eq.hdr, sz, TRUE);
+ atomic_set(&ac->cmd_state, 1);
eq.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
eq.param.data_payload_addr_lsw = 0;
@@ -3821,7 +3873,6 @@
q6asm_stream_add_hdr_async(
ac, &write.hdr, sizeof(write), FALSE, ac->stream_id);
-
port = &ac->port[IN];
ab = &port->buf[port->dsp_buf];
@@ -4130,6 +4181,7 @@
q6asm_add_hdr_async(ac, &hdr, (sizeof(struct apr_hdr) +
sizeof(struct asm_stream_cmd_set_pp_params_v2) +
params_length), TRUE);
+ atomic_set(&ac->cmd_state, 1);
hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
payload_params.data_payload_addr_lsw = 0;
payload_params.data_payload_addr_msw = 0;
@@ -4170,7 +4222,7 @@
return -EINVAL;
}
q6asm_stream_add_hdr(ac, &hdr, sizeof(hdr), TRUE, stream_id);
-
+ atomic_set(&ac->cmd_state, 1);
/*
* Updated the token field with stream/session for compressed playback
* Platform driver must know the the stream with which the command is
@@ -4279,7 +4331,7 @@
return -EINVAL;
}
q6asm_stream_add_hdr_async(ac, &hdr, sizeof(hdr), TRUE, stream_id);
-
+ atomic_set(&ac->cmd_state, 1);
/*
* Updated the token field with stream/session for compressed playback
* Platform driver must know the the stream with which the command is
@@ -4437,6 +4489,7 @@
pr_debug("%s:session[%d]enable[%d]\n", __func__,
ac->session, enable);
q6asm_add_hdr(ac, &tx_overflow.hdr, sizeof(tx_overflow), TRUE);
+ atomic_set(&ac->cmd_state, 1);
tx_overflow.hdr.opcode = \
ASM_SESSION_CMD_REGISTER_FORX_OVERFLOW_EVENTS;
diff --git a/sound/soc/msm/qdsp6v2/q6voice.c b/sound/soc/msm/qdsp6v2/q6voice.c
index 61a262f..ac8b018 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.c
+++ b/sound/soc/msm/qdsp6v2/q6voice.c
@@ -141,6 +141,7 @@
case VOLTE_SESSION_VSID:
case VOIP_SESSION_VSID:
case QCHAT_SESSION_VSID:
+ case VOWLAN_SESSION_VSID:
case ALL_SESSION_VSID:
ret = true;
break;
@@ -234,6 +235,9 @@
} else if (session_id ==
common.voice[VOC_PATH_QCHAT_PASSIVE].session_id) {
session_name = QCHAT_SESSION_NAME;
+ } else if (session_id ==
+ common.voice[VOC_PATH_VOWLAN_PASSIVE].session_id) {
+ session_name = VOWLAN_SESSION_NAME;
} else if (session_id == common.voice[VOC_PATH_FULL].session_id) {
session_name = VOIP_SESSION_NAME;
}
@@ -256,6 +260,9 @@
else if (!strncmp(name, "QCHAT session", 13))
session_id =
common.voice[VOC_PATH_QCHAT_PASSIVE].session_id;
+ else if (!strncmp(name, "VoWLAN session", 14))
+ session_id =
+ common.voice[VOC_PATH_VOWLAN_PASSIVE].session_id;
else
session_id = common.voice[VOC_PATH_FULL].session_id;
@@ -291,6 +298,10 @@
v = &common.voice[VOC_PATH_QCHAT_PASSIVE];
break;
+ case VOWLAN_SESSION_VSID:
+ v = &common.voice[VOC_PATH_VOWLAN_PASSIVE];
+ break;
+
case ALL_SESSION_VSID:
break;
@@ -331,6 +342,10 @@
idx = VOC_PATH_QCHAT_PASSIVE;
break;
+ case VOWLAN_SESSION_VSID:
+ idx = VOC_PATH_VOWLAN_PASSIVE;
+ break;
+
case ALL_SESSION_VSID:
idx = MAX_VOC_SESSIONS - 1;
break;
@@ -375,6 +390,11 @@
return (session_id == common.voice[VOC_PATH_QCHAT_PASSIVE].session_id);
}
+static bool is_vowlan_session(u32 session_id)
+{
+ return (session_id == common.voice[VOC_PATH_VOWLAN_PASSIVE].session_id);
+}
+
static bool is_voc_state_active(int voc_state)
{
if ((voc_state == VOC_RUN) ||
@@ -433,6 +453,7 @@
common.voice[VOC_PATH_VOICE2_PASSIVE].session_id = VOICE2_SESSION_VSID;
common.voice[VOC_PATH_FULL].session_id = VOIP_SESSION_VSID;
common.voice[VOC_PATH_QCHAT_PASSIVE].session_id = QCHAT_SESSION_VSID;
+ common.voice[VOC_PATH_VOWLAN_PASSIVE].session_id = VOWLAN_SESSION_VSID;
}
static int voice_apr_register(void)
@@ -661,6 +682,10 @@
strlcpy(mvm_session_cmd.mvm_session.name,
QCHAT_SESSION_VSID_STR,
sizeof(mvm_session_cmd.mvm_session.name));
+ } else if (is_vowlan_session(v->session_id)) {
+ strlcpy(mvm_session_cmd.mvm_session.name,
+ VOWLAN_SESSION_VSID_STR,
+ sizeof(mvm_session_cmd.mvm_session.name));
} else {
strlcpy(mvm_session_cmd.mvm_session.name,
"default modem voice",
@@ -753,6 +778,10 @@
strlcpy(cvs_session_cmd.cvs_session.name,
QCHAT_SESSION_VSID_STR,
sizeof(cvs_session_cmd.cvs_session.name));
+ } else if (is_vowlan_session(v->session_id)) {
+ strlcpy(cvs_session_cmd.cvs_session.name,
+ VOWLAN_SESSION_VSID_STR,
+ sizeof(cvs_session_cmd.cvs_session.name));
} else {
strlcpy(cvs_session_cmd.cvs_session.name,
"default modem voice",
diff --git a/sound/soc/msm/qdsp6v2/q6voice.h b/sound/soc/msm/qdsp6v2/q6voice.h
index 9efc9fc..59c86cd 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.h
+++ b/sound/soc/msm/qdsp6v2/q6voice.h
@@ -1319,7 +1319,7 @@
void *buf;
};
-#define MAX_VOC_SESSIONS 5
+#define MAX_VOC_SESSIONS 6
struct common_data {
/* these default values are for all devices */
@@ -1393,21 +1393,25 @@
#define VOC_PATH_VOLTE_PASSIVE 2
#define VOC_PATH_VOICE2_PASSIVE 3
#define VOC_PATH_QCHAT_PASSIVE 4
+#define VOC_PATH_VOWLAN_PASSIVE 5
#define MAX_SESSION_NAME_LEN 32
-#define VOICE_SESSION_NAME "Voice session"
-#define VOIP_SESSION_NAME "VoIP session"
-#define VOLTE_SESSION_NAME "VoLTE session"
-#define VOICE2_SESSION_NAME "Voice2 session"
-#define QCHAT_SESSION_NAME "QCHAT session"
+#define VOICE_SESSION_NAME "Voice session"
+#define VOIP_SESSION_NAME "VoIP session"
+#define VOLTE_SESSION_NAME "VoLTE session"
+#define VOICE2_SESSION_NAME "Voice2 session"
+#define QCHAT_SESSION_NAME "QCHAT session"
+#define VOWLAN_SESSION_NAME "VoWLAN session"
#define VOICE2_SESSION_VSID_STR "10DC1000"
#define QCHAT_SESSION_VSID_STR "10803000"
+#define VOWLAN_SESSION_VSID_STR "10002000"
#define VOICE_SESSION_VSID 0x10C01000
#define VOICE2_SESSION_VSID 0x10DC1000
#define VOLTE_SESSION_VSID 0x10C02000
#define VOIP_SESSION_VSID 0x10004000
#define QCHAT_SESSION_VSID 0x10803000
+#define VOWLAN_SESSION_VSID 0x10002000
#define ALL_SESSION_VSID 0xFFFFFFFF
#define VSID_MAX ALL_SESSION_VSID
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 2f9e319..56efb97 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -2308,6 +2308,7 @@
if (ret < 0) {
dev_err(dapm->dev, "Failed to add route %s->%s\n",
route->source, route->sink);
+ mutex_unlock(&dapm->card->dapm_mutex);
return ret;
}
route++;
@@ -3017,6 +3018,7 @@
dev_err(dapm->dev,
"ASoC: Failed to create DAPM control %s: %d\n",
widget->name, ret);
+ mutex_unlock(&dapm->card->dapm_mutex);
return ret;
}
widget++;