Merge "tty: serial: qcom_geni_serial: Fix wrap around of TX buffer"
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index 4a5a887..ee4cc72 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -314,6 +314,8 @@
- Hotpluggable memory support, how to use and current status.
metag/
- directory with info about Linux on Meta architecture.
+mhi.txt
+ - Modem Host Interface
mips/
- directory with info about Linux on MIPS architecture.
misc-devices/
diff --git a/Documentation/devicetree/bindings/bus/mhi.txt b/Documentation/devicetree/bindings/bus/mhi.txt
new file mode 100644
index 0000000..dfbd846
--- /dev/null
+++ b/Documentation/devicetree/bindings/bus/mhi.txt
@@ -0,0 +1,320 @@
+MHI Host Interface
+
+MHI used by the host to control and communicate with modem over
+high speed peripheral bus.
+
+==============
+Node Structure
+==============
+
+Main node properties:
+
+- mhi,max-channels
+ Usage: required
+ Value type: <u32>
+ Definition: Maximum number of channels supported by this controller
+
+- mhi,timeout
+ Usage: optional
+ Value type: <u32>
+ Definition: Maximum timeout in ms wait for state and cmd completion
+
+- mhi,use-bb
+ Usage: optional
+ Value type: <bool>
+ Definition: Set true, if PCIe controller does not have full access to host
+ DDR, and we're using a dedicated memory pool like cma, or
+ carveout pool. Pool must support atomic allocation.
+
+- mhi,buffer-len
+ Usage: optional
+ Value type: <bool>
+ Definition: MHI automatically pre-allocate buffers for some channel.
+ Set the length of buffer size to allocate. If not default
+ size MHI_MAX_MTU will be used.
+
+============================
+mhi channel node properties:
+============================
+
+- reg
+ Usage: required
+ Value type: <u32>
+ Definition: physical channel number
+
+- label
+ Usage: required
+ Value type: <string>
+ Definition: given name for the channel
+
+- mhi,num-elements
+ Usage: optional
+ Value type: <u32>
+ Definition: Number of elements transfer ring support
+
+- mhi,event-ring
+ Usage: required
+ Value type: <u32>
+ Definition: Event ring index associated with this channel
+
+- mhi,chan-dir
+ Usage: required
+ Value type: <u32>
+ Definition: Channel direction as defined by enum dma_data_direction
+ 0 = Bidirectional data transfer
+ 1 = UL data transfer
+ 2 = DL data transfer
+ 3 = No direction, not a regular data transfer channel
+
+- mhi,ee
+ Usage: required
+ Value type: <u32>
+ Definition: Channel execution enviornment (EE) mask as defined by enum
+ mhi_ch_ee_mask
+ BIT(0) = Channel supported in PBL EE
+ BIT(1) = Channel supported in SBL EE
+ BIT(2) = Channel supported in AMSS EE
+ BIT(3) = Channel supported in RDDM EE
+ BIT(4) = Channel supported in WFW EE
+ BIT(5) = Channel supported in PTHRU EE
+ BIT(6) = Channel supported in EDL EE
+
+- mhi,pollcfg
+ Usage: optional
+ Value type: <u32>
+ Definition: MHI poll configuration, valid only when burst mode is enabled
+ 0 = Use default (device specific) polling configuration
+ For UL channels, value specifies the timer to poll MHI context in
+ milliseconds.
+ For DL channels, the threshold to poll the MHI context in multiple of
+ eight ring element.
+
+- mhi,data-type
+ Usage: required
+ Value type: <u32>
+ Definition: Data transfer type accepted as defined by enum MHI_XFER_TYPE
+ 0 = accept cpu address for buffer
+ 1 = accept skb
+ 2 = accept scatterlist
+ 3 = offload channel, does not accept any transfer type
+ 4 = accept pre-mapped buffers
+ 5 = rsc channel type, accept pre-mapped buffers
+
+- mhi,doorbell-mode
+ Usage: required
+ Value type: <u32>
+ Definition: Channel doorbell mode configuration as defined by enum
+ MHI_BRSTMODE
+ 2 = burst mode disabled
+ 3 = burst mode enabled
+
+- mhi,lpm-notify
+ Usage: optional
+ Value type: <bool>
+ Definition: This channel master require low power mode enter and exit
+ notifications from mhi bus master.
+
+- mhi,offload-chan
+ Usage: optional
+ Value type: <bool>
+ Definition: Client managed channel, MHI host only involved in setting up
+ the data path, not involved in active data path.
+
+- mhi,db-mode-switch
+ Usage: optional
+ Value type: <bool>
+ Definition: Must switch to doorbell mode whenever MHI M0 state transition
+ happens.
+
+- mhi,auto-queue
+ Usage: optional
+ Value type: <bool>
+ Definition: MHI bus driver will pre-allocate buffers for this channel and
+ queue to hardware. If set, client not allowed to queue buffers. Valid
+ only for downlink direction.
+
+- mhi,auto-start
+ Usage: optional
+ Value type: <bool>
+ Definition: MHI host driver to automatically start channels once mhi device
+ driver probe is complete. This should be only set true if initial
+ handshake iniaitead by external modem.
+
+- mhi,wake-capable
+ Usage: optional
+ Value type: <bool>
+ Definition: Time sensitive data channel, host should process all pending data
+ before system suspend.
+
+- mhi,chan-type
+ Usage: optional
+ Value type: <u32>
+ Definition: By default, chan-type is same as 'chan,dir' property except
+ in some special channels, chan type supplement chan direction.
+ 3 = default no direction, or inbound coalesced channel
+
+==========================
+mhi event node properties:
+==========================
+
+- mhi,num-elements
+ Usage: required
+ Value type: <u32>
+ Definition: Number of elements event ring support
+
+- mhi,intmod
+ Usage: required
+ Value type: <u32>
+ Definition: interrupt moderation time in ms
+
+- mhi,msi
+ Usage: required
+ Value type: <u32>
+ Definition: MSI associated with this event ring
+
+- mhi,chan
+ Usage: optional
+ Value type: <u32>
+ Definition: Dedicated channel number, if it's a dedicated event ring
+
+- mhi,priority
+ Usage: required
+ Value type: <u32>
+ Definition: Event ring priority, set to 1 for now
+
+- mhi,brstmode
+ Usage: required
+ Value type: <u32>
+ Definition: Event doorbell mode configuration as defined by
+ enum MHI_BRSTMODE
+ 2 = burst mode disabled
+ 3 = burst mode enabled
+
+- mhi,data-type
+ Usage: optional
+ Value type: <u32>
+ Definition: Type of data this event ring will process as defined
+ by enum mhi_er_data_type
+ 0 = process data packets (default)
+ 1 = process mhi control packets
+
+- mhi,hw-ev
+ Usage: optional
+ Value type: <bool>
+ Definition: Event ring associated with hardware channels
+
+- mhi,client-manage
+ Usage: optional
+ Value type: <bool>
+ Definition: Client manages the event ring (use by napi_poll)
+
+- mhi,offload
+ Usage: optional
+ Value type: <bool>
+ Definition: Event ring associated with offload channel
+
+
+Children node properties:
+
+MHI drivers that require DT can add driver specific information as a child node.
+
+- mhi,chan
+ Usage: Required
+ Value type: <string>
+ Definition: Channel name
+
+========
+Example:
+========
+mhi_controller {
+ mhi,max-channels = <105>;
+
+ mhi_chan@0 {
+ reg = <0>;
+ label = "LOOPBACK";
+ mhi,num-elements = <64>;
+ mhi,event-ring = <2>;
+ mhi,chan-dir = <1>;
+ mhi,data-type = <0>;
+ mhi,doorbell-mode = <2>;
+ mhi,ee = <0x4>;
+ };
+
+ mhi_chan@1 {
+ reg = <1>;
+ label = "LOOPBACK";
+ mhi,num-elements = <64>;
+ mhi,event-ring = <2>;
+ mhi,chan-dir = <2>;
+ mhi,data-type = <0>;
+ mhi,doorbell-mode = <2>;
+ mhi,ee = <0x4>;
+ };
+
+ mhi_event@0 {
+ mhi,num-elements = <32>;
+ mhi,intmod = <1>;
+ mhi,msi = <1>;
+ mhi,chan = <0>;
+ mhi,priority = <1>;
+ mhi,bstmode = <2>;
+ mhi,data-type = <1>;
+ };
+
+ mhi_event@1 {
+ mhi,num-elements = <256>;
+ mhi,intmod = <1>;
+ mhi,msi = <2>;
+ mhi,chan = <0>;
+ mhi,priority = <1>;
+ mhi,bstmode = <2>;
+ };
+
+ mhi,timeout = <500>;
+
+ children_node {
+ mhi,chan = "LOOPBACK"
+ <driver specific properties>
+ };
+};
+
+================
+Children Devices
+================
+
+MHI netdev properties
+
+- mhi,chan
+ Usage: required
+ Value type: <string>
+ Definition: Channel name MHI netdev support
+
+- mhi,mru
+ Usage: required
+ Value type: <u32>
+ Definition: Largest packet size interface can receive in bytes.
+
+- mhi,interface-name
+ Usage: optional
+ Value type: <string>
+ Definition: Interface name to be given so clients can identify it
+
+- aliases
+ Usage: required
+ Value type: <string>
+ Definition: mhi net_device should have numbered alias in the alias node,
+ in the form of mhi_netdevN, N = 0, 1..n for each network interface.
+
+========
+Example:
+========
+
+aliases {
+ mhi_netdev0 = &mhi_netdev_0;
+};
+
+mhi_netdev_0: mhi_rmnet@0 {
+ mhi,chan = "IP_HW0";
+ mhi,interface-name = "rmnet_mhi";
+ mhi,mru = <0x4000>;
+};
diff --git a/Documentation/devicetree/bindings/media/video/msm-camera-flash.txt b/Documentation/devicetree/bindings/media/video/msm-camera-flash.txt
index d24314a..970dae2 100644
--- a/Documentation/devicetree/bindings/media/video/msm-camera-flash.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-camera-flash.txt
@@ -124,3 +124,21 @@
qcom,max-current = <1500>;
qcom,max-duration = <1200>;
};
+
+
+Example:
+
+flash0: qcom,camera-flash {
+ cell-index = <0>;
+ compatible = "qcom,qm215-gpio-flash";
+ qcom,flash-type = <2>;
+ gpios = <&tlmm 34 0>;
+ <&tlmm 33 0>;
+ qcom,gpio-req-tbl-num = <0 1>;
+ qcom,gpio-req-tbl-flags = <1 0>;
+ qcom,gpio-flash-en = <0>;
+ qcom,gpio-flash-now = <1>;
+ qcom,gpio-req-tbl-label = "CAM_FLASH",
+ "CAM_TORCH";
+ status = "ok";
+ };
diff --git a/Documentation/devicetree/bindings/platform/msm/ipa.txt b/Documentation/devicetree/bindings/platform/msm/ipa.txt
index 7652aa4..eded4f4 100644
--- a/Documentation/devicetree/bindings/platform/msm/ipa.txt
+++ b/Documentation/devicetree/bindings/platform/msm/ipa.txt
@@ -89,6 +89,7 @@
monitoring of holb via IPA uc is required.
- qcom,wlan-ce-db-over-pcie: Boolean context flag to represent WLAN CE DB
over pcie bus or not.
+- qcom,use-xbl-boot: Boolean to indicate XBL boot loading for IPA FW
IPA pipe sub nodes (A2 static pipes configurations):
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index 039f881..b23d058 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -2580,7 +2580,7 @@
<&afe_pcm_rx>, <&afe_pcm_tx>, <&afe_proxy_rx>,
<&afe_proxy_tx>, <&incall_record_rx>,
<&incall_record_tx>, <&incall_music_rx>,
- <&dai_sec_auxpcm>;
+ <&dai_sec_auxpcm>, <&incall_music_dl_rx>;
asoc-cpu-names = "msm-dai-q6-auxpcm.1",
"msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
"msm-dai-stub-dev.4", "msm-dai-stub-dev.5",
@@ -2589,7 +2589,7 @@
"msm-dai-q6-dev.225", "msm-dai-q6-dev.241",
"msm-dai-q6-dev.240", "msm-dai-q6-dev.32771",
"msm-dai-q6-dev.32772", "msm-dai-q6-dev.32773",
- "msm-dai-q6-auxpcm.2";
+ "msm-dai-q6-auxpcm.2", "msm-dai-q6-dev.32774";
};
* SDX ASoC Auto Machine driver
diff --git a/Documentation/devicetree/bindings/usb/msm-hsusb.txt b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
index 431c32a..f1130e8 100644
--- a/Documentation/devicetree/bindings/usb/msm-hsusb.txt
+++ b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
@@ -191,6 +191,8 @@
both "USB" and "USB-HOST" events.
- qcom,phy-id-high-as-peripheral: If present, specifies device to switch to device mode
if PHY ID state is high or host mode if PHY ID state is low.
+- qcom,enumeration-check-for-sdp: If present, start timer for SDP charger to check enumeration
+ happen or not.
Example HSUSB OTG controller device node :
usb@f9690000 {
diff --git a/Documentation/mhi.txt b/Documentation/mhi.txt
new file mode 100644
index 0000000..9287899
--- /dev/null
+++ b/Documentation/mhi.txt
@@ -0,0 +1,276 @@
+Overview of Linux kernel MHI support
+====================================
+
+Modem-Host Interface (MHI)
+=========================
+MHI used by the host to control and communicate with modem over high speed
+peripheral bus. Even though MHI can be easily adapt to any peripheral buses,
+primarily used with PCIe based devices. The host has one or more PCIe root
+ports connected to modem device. The host has limited access to device memory
+space, including register configuration and control the device operation.
+Data transfers are invoked from the device.
+
+All data structures used by MHI are in the host system memory. Using PCIe
+interface, the device accesses those data structures. MHI data structures and
+data buffers in the host system memory regions are mapped for device.
+
+Memory spaces
+-------------
+PCIe Configurations : Used for enumeration and resource management, such as
+interrupt and base addresses. This is done by mhi control driver.
+
+MMIO
+----
+MHI MMIO : Memory mapped IO consists of set of registers in the device hardware,
+which are mapped to the host memory space through PCIe base address register
+(BAR)
+
+MHI control registers : Access to MHI configurations registers
+(struct mhi_controller.regs).
+
+MHI BHI register: Boot host interface registers (struct mhi_controller.bhi) used
+for firmware download before MHI initialization.
+
+Channel db array : Doorbell registers (struct mhi_chan.tre_ring.db_addr) used by
+host to notify device there is new work to do.
+
+Event db array : Associated with event context array
+(struct mhi_event.ring.db_addr), host uses to notify device free events are
+available.
+
+Data structures
+---------------
+Host memory : Directly accessed by the host to manage the MHI data structures
+and buffers. The device accesses the host memory over the PCIe interface.
+
+Channel context array : All channel configurations are organized in channel
+context data array.
+
+struct __packed mhi_chan_ctxt;
+struct mhi_ctxt.chan_ctxt;
+
+Transfer rings : Used by host to schedule work items for a channel and organized
+as a circular queue of transfer descriptors (TD).
+
+struct __packed mhi_tre;
+struct mhi_chan.tre_ring;
+
+Event context array : All event configurations are organized in event context
+data array.
+
+struct mhi_ctxt.er_ctxt;
+struct __packed mhi_event_ctxt;
+
+Event rings: Used by device to send completion and state transition messages to
+host
+
+struct mhi_event.ring;
+struct __packed mhi_tre;
+
+Command context array: All command configurations are organized in command
+context data array.
+
+struct __packed mhi_cmd_ctxt;
+struct mhi_ctxt.cmd_ctxt;
+
+Command rings: Used by host to send MHI commands to device
+
+struct __packed mhi_tre;
+struct mhi_cmd.ring;
+
+Transfer rings
+--------------
+MHI channels are logical, unidirectional data pipes between host and device.
+Each channel associated with a single transfer ring. The data direction can be
+either inbound (device to host) or outbound (host to device). Transfer
+descriptors are managed by using transfer rings, which are defined for each
+channel between device and host and resides in the host memory.
+
+Transfer ring Pointer: Transfer Ring Array
+[Read Pointer (RP)] ----------->[Ring Element] } TD
+[Write Pointer (WP)]- [Ring Element]
+ - [Ring Element]
+ --------->[Ring Element]
+ [Ring Element]
+
+1. Host allocate memory for transfer ring
+2. Host sets base, read pointer, write pointer in corresponding channel context
+3. Ring is considered empty when RP == WP
+4. Ring is considered full when WP + 1 == RP
+4. RP indicates the next element to be serviced by device
+4. When host new buffer to send, host update the Ring element with buffer
+ information
+5. Host increment the WP to next element
+6. Ring the associated channel DB.
+
+Event rings
+-----------
+Events from the device to host are organized in event rings and defined in event
+descriptors. Event rings are array of EDs that resides in the host memory.
+
+Transfer ring Pointer: Event Ring Array
+[Read Pointer (RP)] ----------->[Ring Element] } ED
+[Write Pointer (WP)]- [Ring Element]
+ - [Ring Element]
+ --------->[Ring Element]
+ [Ring Element]
+
+1. Host allocate memory for event ring
+2. Host sets base, read pointer, write pointer in corresponding channel context
+3. Both host and device has local copy of RP, WP
+3. Ring is considered empty (no events to service) when WP + 1 == RP
+4. Ring is full of events when RP == WP
+4. RP - 1 = last event device programmed
+4. When there is a new event device need to send, device update ED pointed by RP
+5. Device increment RP to next element
+6. Device trigger and interrupt
+
+Example Operation for data transfer:
+
+1. Host prepare TD with buffer information
+2. Host increment Chan[id].ctxt.WP
+3. Host ring channel DB register
+4. Device wakes up process the TD
+5. Device generate a completion event for that TD by updating ED
+6. Device increment Event[id].ctxt.RP
+7. Device trigger MSI to wake host
+8. Host wakes up and check event ring for completion event
+9. Host update the Event[i].ctxt.WP to indicate processed of completion event.
+
+Time sync
+---------
+To synchronize two applications between host and external modem, MHI provide
+native support to get external modems free running timer value in a fast
+reliable method. MHI clients do not need to create client specific methods to
+get modem time.
+
+When client requests modem time, MHI host will automatically capture host time
+at that moment so clients are able to do accurate drift adjustment.
+
+Example:
+
+Client request time @ time T1
+
+Host Time: Tx
+Modem Time: Ty
+
+Client request time @ time T2
+Host Time: Txx
+Modem Time: Tyy
+
+Then drift is:
+Tyy - Ty + <drift> == Txx - Tx
+
+Clients are free to implement their own drift algorithms, what MHI host provide
+is a way to accurately correlate host time with external modem time.
+
+To avoid link level latencies, controller must support capabilities to disable
+any link level latency.
+
+During Time capture host will:
+ 1. Capture host time
+ 2. Trigger doorbell to capture modem time
+
+It's important time between Step 2 to Step 1 is deterministic as possible.
+Therefore, MHI host will:
+ 1. Disable any MHI related to low power modes.
+ 2. Disable preemption
+ 3. Request bus master to disable any link level latencies. Controller
+ should disable all low power modes such as L0s, L1, L1ss.
+
+MHI States
+----------
+
+enum MHI_STATE {
+MHI_STATE_RESET : MHI is in reset state, POR state. Host is not allowed to
+ access device MMIO register space.
+MHI_STATE_READY : Device is ready for initialization. Host can start MHI
+ initialization by programming MMIO
+MHI_STATE_M0 : MHI is in fully active state, data transfer is active
+MHI_STATE_M1 : Device in a suspended state
+MHI_STATE_M2 : MHI in low power mode, device may enter lower power mode.
+MHI_STATE_M3 : Both host and device in suspended state. PCIe link is not
+ accessible to device.
+
+MHI Initialization
+------------------
+
+1. After system boots, the device is enumerated over PCIe interface
+2. Host allocate MHI context for event, channel and command arrays
+3. Initialize context array, and prepare interrupts
+3. Host waits until device enter READY state
+4. Program MHI MMIO registers and set device into MHI_M0 state
+5. Wait for device to enter M0 state
+
+Linux Software Architecture
+===========================
+
+MHI Controller
+--------------
+MHI controller is also the MHI bus master. In charge of managing the physical
+link between host and device. Not involved in actual data transfer. At least
+for PCIe based buses, for other type of bus, we can expand to add support.
+
+Roles:
+1. Turn on PCIe bus and configure the link
+2. Configure MSI, SMMU, and IOMEM
+3. Allocate struct mhi_controller and register with MHI bus framework
+2. Initiate power on and shutdown sequence
+3. Initiate suspend and resume
+
+Usage
+-----
+
+1. Allocate control data structure by calling mhi_alloc_controller()
+2. Initialize mhi_controller with all the known information such as:
+ - Device Topology
+ - IOMMU window
+ - IOMEM mapping
+ - Device to use for memory allocation, and of_node with DT configuration
+ - Configure asynchronous callback functions
+3. Register MHI controller with MHI bus framework by calling
+ of_register_mhi_controller()
+
+After successfully registering controller can initiate any of these power modes:
+
+1. Power up sequence
+ - mhi_prepare_for_power_up()
+ - mhi_async_power_up()
+ - mhi_sync_power_up()
+2. Power down sequence
+ - mhi_power_down()
+ - mhi_unprepare_after_power_down()
+3. Initiate suspend
+ - mhi_pm_suspend()
+4. Initiate resume
+ - mhi_pm_resume()
+
+MHI Devices
+-----------
+Logical device that bind to maximum of two physical MHI channels. Once MHI is in
+powered on state, each supported channel by controller will be allocated as a
+mhi_device.
+
+Each supported device would be enumerated under
+/sys/bus/mhi/devices/
+
+struct mhi_device;
+
+MHI Driver
+----------
+Each MHI driver can bind to one or more MHI devices. MHI host driver will bind
+mhi_device to mhi_driver.
+
+All registered drivers are visible under
+/sys/bus/mhi/drivers/
+
+struct mhi_driver;
+
+Usage
+-----
+
+1. Register driver using mhi_driver_register
+2. Before sending data, prepare device for transfer by calling
+ mhi_prepare_for_transfer
+3. Initiate data transfer by calling mhi_queue_transfer
+4. After finish, call mhi_unprepare_from_transfer to end data transfer
diff --git a/Makefile b/Makefile
index 1cfb7b3..24cdf7e 100755
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 9
-SUBLEVEL = 193
+SUBLEVEL = 194
EXTRAVERSION =
NAME = Roaring Lionus
@@ -306,6 +306,11 @@
HOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
HOSTCXXFLAGS = -O2
+ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
+HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
+ -Wno-missing-field-initializers
+endif
+
# Decide whether to build built-in, modular, or both.
# Normally, just do built-in.
@@ -532,8 +537,13 @@
KBUILD_CFLAGS += $(call cc-disable-warning, duplicate-decl-specifier)
KBUILD_CFLAGS += -Wno-undefined-optimized
KBUILD_CFLAGS += -Wno-tautological-constant-out-of-range-compare
+KBUILD_CFLAGS += $(call cc-option, -Wno-sometimes-uninitialized)
+KBUILD_CFLAGS += -Wno-asm-operand-widths
+KBUILD_CFLAGS += -Wno-initializer-overrides
+KBUILD_CFLAGS += -fno-builtin
# Quiet clang warning: comparison of unsigned expression < 0 is always false
+
KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
# source of a reference will be _MergedGlobals and not on of the whitelisted names.
@@ -546,6 +556,7 @@
KBUILD_AFLAGS += $(CLANG_FLAGS)
else
+KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
# These warnings generated too much noise in a regular build.
# Use make W=1 to enable them (see scripts/Makefile.build)
KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c
index 2fb0cd3..cd6e361 100644
--- a/arch/arc/kernel/traps.c
+++ b/arch/arc/kernel/traps.c
@@ -163,3 +163,4 @@
{
__asm__ __volatile__("trap_s 5\n");
}
+EXPORT_SYMBOL(abort);
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 628715e..8f70fb1 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -573,8 +573,6 @@
select USE_OF
select PINCTRL
select ARCH_WANT_KMAP_ATOMIC_FLUSH
- select SND_SOC_COMPRESS
- select SND_HWDEP
help
Support for Qualcomm MSM/QSD based systems. This runs on the
apps processor of the MSM/QSD and depends on a shared memory
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 82945ce..1369444 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -35,6 +35,9 @@
# This should work on most of the modern platforms
KBUILD_DEFCONFIG := multi_v7_defconfig
+ifeq ($(COMPILER),clang)
+KBUILD_CFLAGS += -Wa,-mno-warn-deprecated
+endif
# defines filename extension depending memory management type.
ifeq ($(CONFIG_MMU),)
@@ -43,15 +46,17 @@
endif
ifeq ($(CONFIG_FRAME_POINTER),y)
-KBUILD_CFLAGS +=-fno-omit-frame-pointer -mapcs -mno-sched-prolog
+KBUILD_CFLAGS +=-fno-omit-frame-pointer $(call cc-option,-mapcs,) $(call cc-option,-mno-sched-prolog,)
endif
ifeq ($(CONFIG_CPU_BIG_ENDIAN),y)
-KBUILD_CPPFLAGS += -mbig-endian
+KBUILD_CPPFLAGS += $(call cc-option,-mbig-endian)
+CHECKFLAGS += -D__ARMEB__
AS += -EB
LD += -EB
else
-KBUILD_CPPFLAGS += -mlittle-endian
+KBUILD_CPPFLAGS += $(call cc-option,-mlittle-endian)
+CHECKFLAGS += -D__ARMEL__
AS += -EL
LD += -EL
endif
@@ -108,7 +113,7 @@
tune-y := $(tune-y)
ifeq ($(CONFIG_AEABI),y)
-CFLAGS_ABI :=-mabi=aapcs-linux -mfpu=vfp
+CFLAGS_ABI :=-mabi=aapcs-linux $(call cc-option,-mno-thumb-interwork,) -mfpu=vfp
else
CFLAGS_ABI :=$(call cc-option,-mapcs-32,-mabi=apcs-gnu) $(call cc-option,-mno-thumb-interwork,)
endif
@@ -132,7 +137,7 @@
endif
# Need -Uarm for gcc < 3.x
-KBUILD_CFLAGS +=$(CFLAGS_ABI) $(CFLAGS_ISA) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm
+KBUILD_CFLAGS +=$(CFLAGS_ABI) $(CFLAGS_ISA) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float $(call cc-option, -Uarm,)
KBUILD_AFLAGS +=$(CFLAGS_ABI) $(AFLAGS_ISA) $(arch-y) $(tune-y) -include asm/unified.h -msoft-float
CHECKFLAGS += -D__arm__ -m32
diff --git a/arch/arm/boot/dts/qcom/sa415m-ccard.dtsi b/arch/arm/boot/dts/qcom/sa415m-ccard.dtsi
index 42676f0..00564c7 100644
--- a/arch/arm/boot/dts/qcom/sa415m-ccard.dtsi
+++ b/arch/arm/boot/dts/qcom/sa415m-ccard.dtsi
@@ -51,6 +51,8 @@
&i2c_4 {
asm330@6a {
reg = <0x6b>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sensor_enable_default>;
};
};
@@ -122,7 +124,7 @@
<&incall_record_tx>, <&incall_music_rx>,
<&dai_pri_tdm_rx_0>, <&dai_pri_tdm_tx_0>,
<&dai_sec_tdm_rx_0>, <&dai_sec_tdm_tx_0>,
- <&dai_sec_auxpcm>;
+ <&dai_sec_auxpcm>, <&incall_music_dl_rx>;
asoc-cpu-names = "msm-dai-q6-auxpcm.1",
"msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
"msm-dai-stub-dev.4", "msm-dai-stub-dev.5",
@@ -133,7 +135,7 @@
"msm-dai-q6-dev.32772", "msm-dai-q6-dev.32773",
"msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36865",
"msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36881",
- "msm-dai-q6-auxpcm.2";
+ "msm-dai-q6-auxpcm.2", "msm-dai-q6-dev.32774";
asoc-codec = <&tlv320aic3x_codec>, <&stub_codec>;
asoc-codec-names = "tlv320aic3x-codec", "msm-stub-codec.1";
};
diff --git a/arch/arm/boot/dts/qcom/sa415m-cdp.dtsi b/arch/arm/boot/dts/qcom/sa415m-cdp.dtsi
index edc08cc..b7996e5 100644
--- a/arch/arm/boot/dts/qcom/sa415m-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/sa415m-cdp.dtsi
@@ -16,6 +16,10 @@
status = "okay";
};
+&qcom_seecom {
+ status = "okay";
+};
+
&usb {
status = "okay";
qcom,connector-type-uAB;
@@ -28,3 +32,17 @@
&smb138x {
status = "disabled";
};
+
+&thermal_zones {
+ mdm-core-step {
+ trips {
+ mdm-step-trip-0 {
+ temperature = <105000>;
+ };
+
+ mdm-step-trip-1 {
+ temperature = <115000>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/sa415m-mtp-256.dts b/arch/arm/boot/dts/qcom/sa415m-mtp-256.dts
index 39ed5c6..913b3a8 100644
--- a/arch/arm/boot/dts/qcom/sa415m-mtp-256.dts
+++ b/arch/arm/boot/dts/qcom/sa415m-mtp-256.dts
@@ -32,3 +32,17 @@
&smb138x {
status = "disabled";
};
+
+&thermal_zones {
+ mdm-core-step {
+ trips {
+ mdm-step-trip-0 {
+ temperature = <105000>;
+ };
+
+ mdm-step-trip-1 {
+ temperature = <115000>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/sa415m-ttp.dtsi b/arch/arm/boot/dts/qcom/sa415m-ttp.dtsi
index f918935..29091d3 100644
--- a/arch/arm/boot/dts/qcom/sa415m-ttp.dtsi
+++ b/arch/arm/boot/dts/qcom/sa415m-ttp.dtsi
@@ -83,6 +83,10 @@
};
};
+&ipa_hw {
+ qcom,use-xbl-boot;
+};
+
&i2c_3 {
eeprom@52 {
compatible = "atmel,24c128";
@@ -95,6 +99,20 @@
status = "disabled";
};
+&thermal_zones {
+ mdm-core-step {
+ trips {
+ mdm-step-trip-0 {
+ temperature = <105000>;
+ };
+
+ mdm-step-trip-1 {
+ temperature = <115000>;
+ };
+ };
+ };
+};
+
&i2c_4 {
status = "okay";
diff --git a/arch/arm/boot/dts/qcom/sa415m-v2-mtp.dts b/arch/arm/boot/dts/qcom/sa415m-v2-mtp.dts
index fe59e05..4e0aefe 100644
--- a/arch/arm/boot/dts/qcom/sa415m-v2-mtp.dts
+++ b/arch/arm/boot/dts/qcom/sa415m-v2-mtp.dts
@@ -37,3 +37,17 @@
&smb138x {
status = "disabled";
};
+
+&thermal_zones {
+ mdm-core-step {
+ trips {
+ mdm-step-trip-0 {
+ temperature = <105000>;
+ };
+
+ mdm-step-trip-1 {
+ temperature = <115000>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/sdx-audio-lpass.dtsi b/arch/arm/boot/dts/qcom/sdx-audio-lpass.dtsi
index eade6b7..8e358b6 100644
--- a/arch/arm/boot/dts/qcom/sdx-audio-lpass.dtsi
+++ b/arch/arm/boot/dts/qcom/sdx-audio-lpass.dtsi
@@ -307,6 +307,7 @@
};
prim_slave: prim_slave_pinctrl {
+ status = "disabled";
compatible = "qcom,msm-cdc-pinctrl";
pinctrl-names = "aud_active", "aud_sleep";
pinctrl-0 = <&pri_ws_active_slave
@@ -335,6 +336,7 @@
};
sec_slave: sec_slave_pinctrl {
+ status = "disabled";
compatible = "qcom,msm-cdc-pinctrl";
pinctrl-names = "aud_active", "aud_sleep";
pinctrl-0 = <&sec_ws_active_slave
diff --git a/arch/arm/boot/dts/qcom/sdx-wsa881x.dtsi b/arch/arm/boot/dts/qcom/sdx-wsa881x.dtsi
index a294e6c..7c761b5 100644
--- a/arch/arm/boot/dts/qcom/sdx-wsa881x.dtsi
+++ b/arch/arm/boot/dts/qcom/sdx-wsa881x.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, 2019 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -18,18 +18,21 @@
#size-cells = <0>;
wsa881x_0211: wsa881x@20170211 {
+ status = "disabled";
compatible = "qcom,wsa881x";
reg = <0x00 0x20170211>;
qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd1>;
};
wsa881x_0212: wsa881x@20170212 {
+ status = "disabled";
compatible = "qcom,wsa881x";
reg = <0x00 0x20170212>;
qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd2>;
};
wsa881x_0213: wsa881x@21170213 {
+ status = "disabled";
compatible = "qcom,wsa881x";
reg = <0x00 0x21170213>;
qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd1>;
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi
index f441b3f..033f8a3 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi
@@ -1751,6 +1751,20 @@
output-high;
};
};
+
+ sensor_enable_default: sensor_enable_default {
+ mux {
+ pins = "gpio90";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio90";
+ bias-pull-up;
+ drive-strength = <16>;
+ output-high;
+ };
+ };
};
};
diff --git a/arch/arm/configs/sdw3300-perf_defconfig b/arch/arm/configs/msm8909-minimal-perf_defconfig
similarity index 63%
copy from arch/arm/configs/sdw3300-perf_defconfig
copy to arch/arm/configs/msm8909-minimal-perf_defconfig
index bb2bb1d..bd3cae3 100644
--- a/arch/arm/configs/sdw3300-perf_defconfig
+++ b/arch/arm/configs/msm8909-minimal-perf_defconfig
@@ -1,6 +1,5 @@
CONFIG_LOCALVERSION="-perf"
-# CONFIG_LOCALVERSION_AUTO is not set
-# CONFIG_FHANDLE is not set
+CONFIG_POSIX_MQUEUE=y
CONFIG_AUDIT=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
@@ -11,9 +10,10 @@
CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_RCU_EXPERT=y
CONFIG_RCU_FAST_NO_HZ=y
-CONFIG_RCU_NOCB_CPU=y
-CONFIG_RCU_NOCB_CPU_ALL=y
-CONFIG_LOG_CPU_MAX_BUF_SHIFT=13
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_SCHEDTUNE=y
@@ -21,8 +21,6 @@
CONFIG_CGROUP_BPF=y
CONFIG_SCHED_CORE_CTL=y
CONFIG_NAMESPACES=y
-# CONFIG_UTS_NS is not set
-# CONFIG_PID_NS is not set
CONFIG_SCHED_AUTOGROUP=y
CONFIG_SCHED_TUNE=y
CONFIG_DEFAULT_USE_ENERGY_AWARE=y
@@ -32,6 +30,7 @@
# CONFIG_RD_XZ is not set
# CONFIG_RD_LZO is not set
# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_KALLSYMS_ALL=y
CONFIG_BPF_SYSCALL=y
# CONFIG_MEMBARRIER is not set
@@ -39,7 +38,8 @@
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_CC_STACKPROTECTOR_STRONG=y
+CONFIG_OPROFILE=m
+CONFIG_CC_STACKPROTECTOR_REGULAR=y
CONFIG_ARCH_MMAP_RND_BITS=16
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -49,35 +49,20 @@
CONFIG_MODULE_SIG_FORCE=y
CONFIG_MODULE_SIG_SHA512=y
CONFIG_PARTITION_ADVANCED=y
-# CONFIG_IOSCHED_DEADLINE is not set
CONFIG_ARCH_QCOM=y
-CONFIG_ARCH_MSM8937=y
-CONFIG_ARCH_MSM8917=y
-CONFIG_ARCH_SDM439=y
-CONFIG_ARCH_SDM429=y
-# CONFIG_VDSO is not set
+CONFIG_ARCH_MSM8909=y
CONFIG_SMP=y
CONFIG_SCHED_MC=y
-CONFIG_NR_CPUS=8
-CONFIG_ARM_PSCI=y
CONFIG_PREEMPT=y
CONFIG_AEABI=y
-CONFIG_HIGHMEM=y
CONFIG_ARM_MODULE_PLTS=y
CONFIG_CMA=y
-CONFIG_CMA_DEBUGFS=y
CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
CONFIG_PROCESS_RECLAIM=y
CONFIG_SECCOMP=y
CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
-CONFIG_CPU_FREQ=y
-CONFIG_CPU_FREQ_GOV_POWERSAVE=y
-CONFIG_CPU_FREQ_GOV_USERSPACE=y
-CONFIG_CPU_FREQ_GOV_ONDEMAND=y
-CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
-CONFIG_CPU_BOOST=y
CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
-CONFIG_CPU_FREQ_MSM=y
CONFIG_CPU_IDLE=y
CONFIG_VFP=y
CONFIG_NEON=y
@@ -100,10 +85,8 @@
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
-CONFIG_NET_IPVTI=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
-CONFIG_INET_IPCOMP=y
CONFIG_INET_DIAG_DESTROY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
@@ -112,9 +95,10 @@
CONFIG_INET6_ESP=y
CONFIG_INET6_IPCOMP=y
CONFIG_IPV6_MIP6=y
-CONFIG_IPV6_VTI=y
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
+CONFIG_NETLABEL=y
+CONFIG_NETWORK_SECMARK=y
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=y
CONFIG_NF_CONNTRACK_SECMARK=y
@@ -134,14 +118,12 @@
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_CT=y
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
-CONFIG_NETFILTER_XT_TARGET_LOG=y
CONFIG_NETFILTER_XT_TARGET_MARK=y
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
-CONFIG_NETFILTER_XT_TARGET_TEE=y
CONFIG_NETFILTER_XT_TARGET_TPROXY=y
CONFIG_NETFILTER_XT_TARGET_TRACE=y
CONFIG_NETFILTER_XT_TARGET_SECMARK=y
@@ -155,16 +137,17 @@
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
CONFIG_NETFILTER_XT_MATCH_HELPER=y
CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
-# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
CONFIG_NETFILTER_XT_MATCH_LENGTH=y
CONFIG_NETFILTER_XT_MATCH_LIMIT=y
CONFIG_NETFILTER_XT_MATCH_MAC=y
CONFIG_NETFILTER_XT_MATCH_MARK=y
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
CONFIG_NETFILTER_XT_MATCH_POLICY=y
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
CONFIG_NETFILTER_XT_MATCH_QUOTA=y
CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
CONFIG_NETFILTER_XT_MATCH_SOCKET=y
CONFIG_NETFILTER_XT_MATCH_STATE=y
CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
@@ -199,7 +182,6 @@
CONFIG_BRIDGE_NF_EBTABLES=y
CONFIG_BRIDGE_EBT_BROUTE=y
CONFIG_L2TP=y
-CONFIG_L2TP_DEBUGFS=y
CONFIG_L2TP_V3=y
CONFIG_L2TP_IP=y
CONFIG_L2TP_ETH=y
@@ -209,7 +191,6 @@
CONFIG_NET_SCH_PRIO=y
CONFIG_NET_CLS_FW=y
CONFIG_NET_CLS_U32=y
-CONFIG_CLS_U32_MARK=y
CONFIG_NET_CLS_FLOW=y
CONFIG_NET_EMATCH=y
CONFIG_NET_EMATCH_CMP=y
@@ -218,22 +199,21 @@
CONFIG_NET_EMATCH_META=y
CONFIG_NET_EMATCH_TEXT=y
CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_DNS_RESOLVER=y
CONFIG_RMNET_DATA=y
CONFIG_RMNET_DATA_FC=y
CONFIG_RMNET_DATA_DEBUG_PKT=y
CONFIG_BT=y
-# CONFIG_BT_BREDR is not set
-# CONFIG_BT_LE is not set
CONFIG_MSM_BT_POWER=y
CONFIG_CFG80211=y
CONFIG_CFG80211_INTERNAL_REGDB=y
-# CONFIG_CFG80211_CRDA_SUPPORT is not set
CONFIG_RFKILL=y
-CONFIG_NFC_NQ=y
CONFIG_IPC_ROUTER=y
CONFIG_IPC_ROUTER_SECURITY=y
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
-CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
CONFIG_DMA_CMA=y
CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=y
@@ -241,28 +221,21 @@
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_HDCP_QSEECOM=y
CONFIG_QSEECOM=y
-CONFIG_UID_SYS_STATS=y
-CONFIG_QPNP_MISC=y
+CONFIG_MEMORY_STATE_TIME=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
-CONFIG_DM_REQ_CRYPT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
CONFIG_DM_VERITY_FEC=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
CONFIG_TUN=y
-# CONFIG_NET_VENDOR_AMAZON is not set
-# CONFIG_NET_CADENCE is not set
-# CONFIG_NET_VENDOR_EZCHIP is not set
-# CONFIG_NET_VENDOR_HISILICON is not set
-# CONFIG_NET_VENDOR_MARVELL is not set
-# CONFIG_NET_VENDOR_NETRONOME is not set
-# CONFIG_NET_VENDOR_ROCKER is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=y
CONFIG_PPP_BSDCOMP=y
CONFIG_PPP_DEFLATE=y
@@ -275,42 +248,19 @@
CONFIG_PPPOPNS=y
CONFIG_PPP_ASYNC=y
CONFIG_PPP_SYNC_TTY=y
-CONFIG_USB_USBNET=y
-# CONFIG_WLAN_VENDOR_ADMTEK is not set
-# CONFIG_WLAN_VENDOR_ATH is not set
-# CONFIG_WLAN_VENDOR_ATMEL is not set
-# CONFIG_WLAN_VENDOR_BROADCOM is not set
-# CONFIG_WLAN_VENDOR_CISCO is not set
-# CONFIG_WLAN_VENDOR_INTEL is not set
-# CONFIG_WLAN_VENDOR_INTERSIL is not set
-# CONFIG_WLAN_VENDOR_MARVELL is not set
-# CONFIG_WLAN_VENDOR_MEDIATEK is not set
-# CONFIG_WLAN_VENDOR_RALINK is not set
-# CONFIG_WLAN_VENDOR_REALTEK is not set
-# CONFIG_WLAN_VENDOR_RSI is not set
-# CONFIG_WLAN_VENDOR_ST is not set
-# CONFIG_WLAN_VENDOR_TI is not set
-# CONFIG_WLAN_VENDOR_ZYDAS is not set
CONFIG_WCNSS_MEM_PRE_ALLOC=y
-CONFIG_CLD_LL_CORE=y
+CONFIG_CNSS=y
+CONFIG_CNSS_SDIO=y
+CONFIG_CLD_HL_SDIO_CORE=y
CONFIG_INPUT_EVDEV=y
CONFIG_KEYBOARD_GPIO=y
-# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_JOYSTICK=y
-CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26=y
-CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v26=y
-CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v26=y
-CONFIG_SECURE_TOUCH_SYNAPTICS_DSX_V26=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_QPNP_POWER_ON=y
-CONFIG_INPUT_QTI_HAPTICS=y
+CONFIG_STMVL53L0X=y
CONFIG_INPUT_UINPUT=y
-# CONFIG_SERIO_SERPORT is not set
# CONFIG_VT is not set
# CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVMEM is not set
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_MSM_HS=y
CONFIG_SERIAL_MSM_SMD=y
CONFIG_DIAG_CHAR=y
@@ -319,27 +269,28 @@
CONFIG_HW_RANDOM_MSM_LEGACY=y
CONFIG_MSM_SMD_PKT=y
CONFIG_MSM_ADSPRPC=y
-CONFIG_MSM_RDBG=m
+CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
CONFIG_I2C_MSM_V2=y
CONFIG_SPI=y
CONFIG_SPI_QUP=y
CONFIG_SPI_SPIDEV=y
CONFIG_SLIMBUS_MSM_NGD=y
CONFIG_SPMI=y
-CONFIG_PINCTRL_MSM8937=y
-CONFIG_PINCTRL_MSM8917=y
+CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
+CONFIG_PINCTRL_MSM8909=y
CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
CONFIG_GPIO_SYSFS=y
-CONFIG_GPIO_QPNP_PIN=y
CONFIG_POWER_RESET=y
CONFIG_POWER_RESET_QCOM=y
CONFIG_QCOM_DLOAD_MODE=y
CONFIG_POWER_RESET_SYSCON=y
+CONFIG_POWER_RESET_SYSCON_POWEROFF=y
CONFIG_POWER_SUPPLY=y
-CONFIG_QPNP_FG_GEN3=y
-CONFIG_QPNP_SMB2=y
-CONFIG_MSM_APM=y
+CONFIG_SMB1360_CHARGER_FG=y
+CONFIG_QPNP_VM_BMS=y
+CONFIG_QPNP_LINEAR_CHARGER=y
CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
CONFIG_THERMAL=y
CONFIG_THERMAL_WRITABLE_TRIPS=y
@@ -350,97 +301,43 @@
CONFIG_THERMAL_QPNP=y
CONFIG_THERMAL_QPNP_ADC_TM=y
CONFIG_THERMAL_TSENS=y
-CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_MSM_BCL_PERIPHERAL_CTL=y
CONFIG_QTI_QMI_COOLING_DEVICE=y
CONFIG_REGULATOR_COOLING_DEVICE=y
-CONFIG_QTI_BCL_PMIC5=y
-CONFIG_QTI_BCL_SOC_DRIVER=y
+CONFIG_MFD_QCOM_RPM=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_PROXY_CONSUMER=y
-CONFIG_REGULATOR_GPIO=y
+CONFIG_REGULATOR_QCOM_RPM=y
+CONFIG_REGULATOR_QCOM_SPMI=y
CONFIG_REGULATOR_CPR=y
-CONFIG_REGULATOR_CPR4_APSS=y
-CONFIG_REGULATOR_CPRH_KBSS=y
CONFIG_REGULATOR_MEM_ACC=y
CONFIG_REGULATOR_MSM_GFX_LDO=y
CONFIG_REGULATOR_QPNP=y
CONFIG_REGULATOR_RPM_SMD=y
CONFIG_REGULATOR_SPM=y
CONFIG_REGULATOR_STUB=y
-CONFIG_MEDIA_SUPPORT=y
-CONFIG_MEDIA_CAMERA_SUPPORT=y
-CONFIG_MEDIA_CONTROLLER=y
-CONFIG_VIDEO_V4L2_SUBDEV_API=y
-CONFIG_MEDIA_USB_SUPPORT=y
-CONFIG_USB_VIDEO_CLASS=y
-CONFIG_V4L_PLATFORM_DRIVERS=y
-CONFIG_MSM_CAMERA=y
-CONFIG_MSM_CAMERA_DEBUG=y
-CONFIG_MSMB_CAMERA=y
-CONFIG_MSMB_CAMERA_DEBUG=y
-CONFIG_MSM_CAMERA_SENSOR=y
-CONFIG_MSM_CPP=y
-CONFIG_MSM_CCI=y
-CONFIG_MSM_CSI20_HEADER=y
-CONFIG_MSM_CSI22_HEADER=y
-CONFIG_MSM_CSI30_HEADER=y
-CONFIG_MSM_CSI31_HEADER=y
-CONFIG_MSM_CSIPHY=y
-CONFIG_MSM_CSID=y
-CONFIG_MSM_EEPROM=y
-CONFIG_MSM_ISPIF_V2=y
-CONFIG_IMX134=y
-CONFIG_IMX132=y
-CONFIG_OV9724=y
-CONFIG_OV5648=y
-CONFIG_GC0339=y
-CONFIG_OV8825=y
-CONFIG_OV8865=y
-CONFIG_s5k4e1=y
-CONFIG_OV12830=y
-CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y
-CONFIG_MSMB_JPEG=y
-CONFIG_MSM_FD=y
-CONFIG_MSM_VIDC_3X_V4L2=y
-CONFIG_MSM_VIDC_3X_GOVERNORS=y
-CONFIG_RADIO_IRIS=y
-CONFIG_RADIO_IRIS_TRANSPORT=y
-CONFIG_QCOM_KGSL=y
-CONFIG_FB=y
-CONFIG_FB_MSM=y
-CONFIG_FB_MSM_MDSS=y
-CONFIG_FB_MSM_MDSS_WRITEBACK=y
-CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS=y
-CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
-# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_DYNAMIC_MINORS=y
-CONFIG_SND_USB_AUDIO=y
CONFIG_SND_SOC=y
-CONFIG_SND_SOC_AW8896=y
CONFIG_UHID=y
-CONFIG_HID_APPLE=y
-CONFIG_HID_ELECOM=y
-CONFIG_HID_MAGICMOUSE=y
-CONFIG_HID_MICROSOFT=y
-CONFIG_HID_MULTITOUCH=y
-CONFIG_USB_HIDDEV=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_MONTEREY=y
CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_MSM=y
-CONFIG_USB_EHCI_HCD_PLATFORM=y
-CONFIG_USB_ACM=y
CONFIG_USB_STORAGE=y
-CONFIG_USB_SERIAL=y
-CONFIG_USB_EHSET_TEST_FIXTURE=y
-CONFIG_NOP_USB_XCEIV=y
-CONFIG_DUAL_ROLE_USB_INTF=y
CONFIG_USB_GADGET=y
CONFIG_USB_GADGET_DEBUG_FILES=y
CONFIG_USB_GADGET_DEBUG_FS=y
@@ -448,26 +345,13 @@
CONFIG_USB_CI13XXX_MSM=y
CONFIG_USB_CONFIGFS=y
CONFIG_USB_CONFIGFS_SERIAL=y
-CONFIG_USB_CONFIGFS_NCM=y
-CONFIG_USB_CONFIGFS_RNDIS=y
-CONFIG_USB_CONFIGFS_RMNET_BAM=y
CONFIG_USB_CONFIGFS_MASS_STORAGE=y
CONFIG_USB_CONFIGFS_F_FS=y
-CONFIG_USB_CONFIGFS_F_MTP=y
-CONFIG_USB_CONFIGFS_F_PTP=y
-CONFIG_USB_CONFIGFS_F_ACC=y
-CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
CONFIG_USB_CONFIGFS_UEVENT=y
-CONFIG_USB_CONFIGFS_F_MIDI=y
-CONFIG_USB_CONFIGFS_F_HID=y
CONFIG_USB_CONFIGFS_F_DIAG=y
CONFIG_USB_CONFIGFS_F_CDEV=y
-CONFIG_USB_CONFIGFS_F_CCID=y
-CONFIG_USB_CONFIGFS_F_QDSS=y
CONFIG_MMC=y
CONFIG_MMC_PERF_PROFILING=y
-# CONFIG_PWRSEQ_EMMC is not set
-# CONFIG_PWRSEQ_SIMPLE is not set
CONFIG_MMC_PARANOID_SD_INIT=y
CONFIG_MMC_CLKGATE=y
CONFIG_MMC_BLOCK_MINORS=32
@@ -475,17 +359,17 @@
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
-CONFIG_MMC_SDHCI_MSM_ICE=y
-CONFIG_MMC_CQ_HCI=y
-CONFIG_LEDS_QPNP_HAPTICS=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_CLASS_FLASH=y
+CONFIG_LEDS_QPNP=y
+CONFIG_LEDS_QPNP_VIBRATOR=y
CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_EDAC=y
-CONFIG_EDAC_MM_EDAC=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_QPNP=y
CONFIG_DMADEVICES=y
CONFIG_QCOM_SPS_DMA=y
+CONFIG_SYNC_FILE=y
CONFIG_UIO=y
CONFIG_UIO_MSM_SHAREDMEM=y
CONFIG_STAGING=y
@@ -493,27 +377,21 @@
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
CONFIG_ION=y
CONFIG_ION_MSM=y
-CONFIG_IPA=y
-CONFIG_RMNET_IPA=y
-CONFIG_RNDIS_IPA=y
CONFIG_SPS=y
CONFIG_SPS_SUPPORT_NDP_BAM=y
-CONFIG_QPNP_COINCELL=y
CONFIG_QPNP_REVID=y
CONFIG_USB_BAM=y
-CONFIG_MSM_RMNET_BAM=y
-CONFIG_MSM_MDSS_PLL=y
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_MAILBOX=y
CONFIG_ARM_SMMU=y
CONFIG_QCOM_LAZY_MAPPING=y
-CONFIG_QCOM_RUN_QUEUE_STATS=y
CONFIG_MSM_SPM=y
CONFIG_MSM_L2_SPM=y
CONFIG_MSM_BOOT_STATS=y
+CONFIG_MSM_CORE_HANG_DETECT=y
+CONFIG_MSM_GLADIATOR_HANG_DETECT=y
CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_QCOM_MEMORY_DUMP_V2=y
-CONFIG_MSM_RPM_SMD=y
CONFIG_QCOM_BUS_SCALING=y
CONFIG_QCOM_SECURE_BUFFER=y
CONFIG_QCOM_EARLY_RANDOM=y
@@ -521,20 +399,15 @@
CONFIG_MSM_SMD=y
CONFIG_MSM_SMD_DEBUG=y
CONFIG_MSM_TZ_SMMU=y
+CONFIG_TRACER_PKT=y
CONFIG_MSM_SMP2P=y
CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
CONFIG_MSM_QMI_INTERFACE=y
CONFIG_MSM_SUBSYSTEM_RESTART=y
-CONFIG_MSM_SYSMON_COMM=y
CONFIG_MSM_PIL=y
CONFIG_MSM_PIL_SSR_GENERIC=y
CONFIG_MSM_PIL_MSS_QDSP6V5=y
-CONFIG_ICNSS=y
-CONFIG_MSM_PERFORMANCE=y
CONFIG_MSM_EVENT_TIMER=y
-CONFIG_MSM_AVTIMER=y
-CONFIG_MSM_PM=y
-CONFIG_QCOM_DCC=y
CONFIG_QTI_RPM_STATS_LOG=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
CONFIG_MEM_SHARE_QMI_SERVICE=y
@@ -542,63 +415,55 @@
CONFIG_WCNSS_CORE=y
CONFIG_WCNSS_CORE_PRONTO=y
CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
-CONFIG_BIG_CLUSTER_MIN_FREQ_ADJUST=y
-CONFIG_QCOM_BIMC_BWMON=y
-CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
-CONFIG_DEVFREQ_SIMPLE_DEV=y
-CONFIG_QCOM_DEVFREQ_DEVBW=y
-CONFIG_SPDM_SCM=y
-CONFIG_DEVFREQ_SPDM=y
+CONFIG_CNSS_CRYPTO=y
CONFIG_IIO=y
-CONFIG_QCOM_RRADC=y
+CONFIG_INV_ICM20602_IIO=y
CONFIG_PWM=y
+CONFIG_PWM_QPNP=y
+CONFIG_QCOM_SHOW_RESUME_IRQ=y
CONFIG_QTI_MPM=y
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ANDROID_BINDER_IPC_32BIT=y
+CONFIG_STM=y
CONFIG_SENSORS_SSC=y
CONFIG_MSM_TZ_LOG=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_SECURITY=y
-CONFIG_EXT4_ENCRYPTION=y
-CONFIG_EXT4_FS_ENCRYPTION=y
-CONFIG_EXT4_FS_ICE_ENCRYPTION=y
-CONFIG_F2FS_FS=y
-CONFIG_F2FS_FS_SECURITY=y
-CONFIG_F2FS_FS_ENCRYPTION=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
-CONFIG_QFMT_V2=y
CONFIG_FUSE_FS=y
-CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_SDCARD_FS=y
-# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
CONFIG_FRAME_WARN=2048
CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_WQ_WATCHDOG=y
CONFIG_PANIC_TIMEOUT=5
-# CONFIG_SCHED_DEBUG is not set
+CONFIG_PANIC_ON_SCHED_BUG=y
+CONFIG_PANIC_ON_RT_THROTTLING=y
# CONFIG_DEBUG_PREEMPT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_STACKTRACE=y
# CONFIG_FTRACE is not set
-# CONFIG_ARM_UNWIND is not set
-CONFIG_PFK=y
+CONFIG_DEBUG_SET_MODULE_RONX=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
-CONFIG_LSM_MMAP_MIN_ADDR=4096
+CONFIG_SECURITYFS=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_PATH=y
CONFIG_HARDENED_USERCOPY=y
-CONFIG_SECURITY_SELINUX=y
-CONFIG_SECURITY_SMACK=y
CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_CRC32=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCE=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
-CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
diff --git a/arch/arm/configs/sdw3300_defconfig b/arch/arm/configs/msm8909-minimal_defconfig
similarity index 68%
copy from arch/arm/configs/sdw3300_defconfig
copy to arch/arm/configs/msm8909-minimal_defconfig
index 5e6fafb..7862d2f 100644
--- a/arch/arm/configs/sdw3300_defconfig
+++ b/arch/arm/configs/msm8909-minimal_defconfig
@@ -1,5 +1,4 @@
-# CONFIG_LOCALVERSION_AUTO is not set
-# CONFIG_FHANDLE is not set
+CONFIG_POSIX_MQUEUE=y
CONFIG_AUDIT=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
@@ -16,6 +15,8 @@
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_SCHEDTUNE=y
@@ -23,26 +24,18 @@
CONFIG_CGROUP_BPF=y
CONFIG_SCHED_CORE_CTL=y
CONFIG_NAMESPACES=y
-# CONFIG_UTS_NS is not set
-# CONFIG_PID_NS is not set
CONFIG_SCHED_AUTOGROUP=y
CONFIG_SCHED_TUNE=y
CONFIG_DEFAULT_USE_ENERGY_AWARE=y
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_RD_BZIP2 is not set
-# CONFIG_RD_LZMA is not set
-# CONFIG_RD_XZ is not set
-# CONFIG_RD_LZO is not set
-# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_KALLSYMS_ALL=y
CONFIG_BPF_SYSCALL=y
-# CONFIG_MEMBARRIER is not set
CONFIG_EMBEDDED=y
-# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_OPROFILE=m
CONFIG_KPROBES=y
-CONFIG_CC_STACKPROTECTOR_STRONG=y
+CONFIG_CC_STACKPROTECTOR_REGULAR=y
CONFIG_ARCH_MMAP_RND_BITS=16
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -52,17 +45,10 @@
CONFIG_MODULE_SIG_FORCE=y
CONFIG_MODULE_SIG_SHA512=y
CONFIG_PARTITION_ADVANCED=y
-# CONFIG_IOSCHED_DEADLINE is not set
CONFIG_ARCH_QCOM=y
-CONFIG_ARCH_MSM8937=y
-CONFIG_ARCH_MSM8917=y
-CONFIG_ARCH_SDM439=y
-CONFIG_ARCH_SDM429=y
-# CONFIG_VDSO is not set
+CONFIG_ARCH_MSM8909=y
CONFIG_SMP=y
CONFIG_SCHED_MC=y
-CONFIG_NR_CPUS=8
-CONFIG_ARM_PSCI=y
CONFIG_PREEMPT=y
CONFIG_AEABI=y
CONFIG_HIGHMEM=y
@@ -70,26 +56,18 @@
CONFIG_CMA=y
CONFIG_CMA_DEBUGFS=y
CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
CONFIG_PROCESS_RECLAIM=y
CONFIG_SECCOMP=y
CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
-CONFIG_CPU_FREQ=y
-CONFIG_CPU_FREQ_GOV_POWERSAVE=y
-CONFIG_CPU_FREQ_GOV_USERSPACE=y
-CONFIG_CPU_FREQ_GOV_ONDEMAND=y
-CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
-CONFIG_CPU_BOOST=y
CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
-CONFIG_CPU_FREQ_MSM=y
CONFIG_CPU_IDLE=y
CONFIG_VFP=y
CONFIG_NEON=y
CONFIG_KERNEL_MODE_NEON=y
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_PM_AUTOSLEEP=y
CONFIG_PM_WAKELOCKS=y
CONFIG_PM_WAKELOCKS_LIMIT=0
-# CONFIG_PM_WAKELOCKS_GC is not set
CONFIG_PM_DEBUG=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -104,7 +82,6 @@
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
-CONFIG_NET_IPVTI=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
@@ -116,9 +93,10 @@
CONFIG_INET6_ESP=y
CONFIG_INET6_IPCOMP=y
CONFIG_IPV6_MIP6=y
-CONFIG_IPV6_VTI=y
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
+CONFIG_NETLABEL=y
+CONFIG_NETWORK_SECMARK=y
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=y
CONFIG_NF_CONNTRACK_SECMARK=y
@@ -150,7 +128,6 @@
CONFIG_NETFILTER_XT_TARGET_TRACE=y
CONFIG_NETFILTER_XT_TARGET_SECMARK=y
CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
-CONFIG_NETFILTER_XT_MATCH_BPF=y
CONFIG_NETFILTER_XT_MATCH_COMMENT=y
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
@@ -160,7 +137,6 @@
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
CONFIG_NETFILTER_XT_MATCH_HELPER=y
CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
-# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
CONFIG_NETFILTER_XT_MATCH_LENGTH=y
CONFIG_NETFILTER_XT_MATCH_LIMIT=y
CONFIG_NETFILTER_XT_MATCH_MAC=y
@@ -171,6 +147,7 @@
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
CONFIG_NETFILTER_XT_MATCH_QUOTA=y
CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
CONFIG_NETFILTER_XT_MATCH_SOCKET=y
CONFIG_NETFILTER_XT_MATCH_STATE=y
CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
@@ -213,6 +190,8 @@
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_HTB=y
CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_INGRESS=y
CONFIG_NET_CLS_FW=y
CONFIG_NET_CLS_U32=y
CONFIG_CLS_U32_MARK=y
@@ -224,23 +203,22 @@
CONFIG_NET_EMATCH_META=y
CONFIG_NET_EMATCH_TEXT=y
CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
CONFIG_DNS_RESOLVER=y
CONFIG_RMNET_DATA=y
CONFIG_RMNET_DATA_FC=y
CONFIG_RMNET_DATA_DEBUG_PKT=y
CONFIG_BT=y
-# CONFIG_BT_BREDR is not set
-# CONFIG_BT_LE is not set
CONFIG_MSM_BT_POWER=y
CONFIG_CFG80211=y
CONFIG_CFG80211_INTERNAL_REGDB=y
-# CONFIG_CFG80211_CRDA_SUPPORT is not set
CONFIG_RFKILL=y
CONFIG_NFC_NQ=y
CONFIG_IPC_ROUTER=y
CONFIG_IPC_ROUTER_SECURITY=y
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
-CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
CONFIG_DMA_CMA=y
CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=y
@@ -249,27 +227,29 @@
CONFIG_HDCP_QSEECOM=y
CONFIG_QSEECOM=y
CONFIG_UID_SYS_STATS=y
-CONFIG_QPNP_MISC=y
+CONFIG_MEMORY_STATE_TIME=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
+CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
+CONFIG_DM_DEBUG=y
CONFIG_DM_CRYPT=y
-CONFIG_DM_REQ_CRYPT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
CONFIG_DM_VERITY_FEC=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
CONFIG_TUN=y
-# CONFIG_NET_VENDOR_AMAZON is not set
-# CONFIG_NET_CADENCE is not set
-# CONFIG_NET_VENDOR_EZCHIP is not set
-# CONFIG_NET_VENDOR_HISILICON is not set
-# CONFIG_NET_VENDOR_MARVELL is not set
-# CONFIG_NET_VENDOR_NETRONOME is not set
-# CONFIG_NET_VENDOR_ROCKER is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=y
CONFIG_PPP_BSDCOMP=y
CONFIG_PPP_DEFLATE=y
@@ -282,39 +262,21 @@
CONFIG_PPPOPNS=y
CONFIG_PPP_ASYNC=y
CONFIG_PPP_SYNC_TTY=y
-CONFIG_USB_USBNET=y
-# CONFIG_WLAN_VENDOR_ADMTEK is not set
-# CONFIG_WLAN_VENDOR_ATH is not set
-# CONFIG_WLAN_VENDOR_ATMEL is not set
-# CONFIG_WLAN_VENDOR_BROADCOM is not set
-# CONFIG_WLAN_VENDOR_CISCO is not set
-# CONFIG_WLAN_VENDOR_INTEL is not set
-# CONFIG_WLAN_VENDOR_INTERSIL is not set
-# CONFIG_WLAN_VENDOR_MARVELL is not set
-# CONFIG_WLAN_VENDOR_MEDIATEK is not set
-# CONFIG_WLAN_VENDOR_RALINK is not set
-# CONFIG_WLAN_VENDOR_REALTEK is not set
-# CONFIG_WLAN_VENDOR_RSI is not set
-# CONFIG_WLAN_VENDOR_ST is not set
-# CONFIG_WLAN_VENDOR_TI is not set
-# CONFIG_WLAN_VENDOR_ZYDAS is not set
CONFIG_WCNSS_MEM_PRE_ALLOC=y
-CONFIG_CLD_LL_CORE=y
+CONFIG_CNSS=y
+CONFIG_CNSS_SDIO=y
+CONFIG_CLD_HL_SDIO_CORE=y
CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_EVBUG=y
CONFIG_KEYBOARD_GPIO=y
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_JOYSTICK=y
-CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26=y
-CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v26=y
-CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v26=y
-CONFIG_SECURE_TOUCH_SYNAPTICS_DSX_V26=y
+CONFIG_JOYSTICK_XPAD=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_QPNP_POWER_ON=y
-CONFIG_INPUT_QTI_HAPTICS=y
+CONFIG_STMVL53L0X=y
CONFIG_INPUT_UINPUT=y
-# CONFIG_SERIO_SERPORT is not set
-# CONFIG_VT is not set
+CONFIG_INPUT_GPIO=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVMEM is not set
# CONFIG_DEVKMEM is not set
@@ -328,27 +290,29 @@
CONFIG_HW_RANDOM_MSM_LEGACY=y
CONFIG_MSM_SMD_PKT=y
CONFIG_MSM_ADSPRPC=y
-CONFIG_MSM_RDBG=m
+CONFIG_MSM_RDBG=y
+CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
CONFIG_I2C_MSM_V2=y
CONFIG_SPI=y
CONFIG_SPI_QUP=y
CONFIG_SPI_SPIDEV=y
CONFIG_SLIMBUS_MSM_NGD=y
CONFIG_SPMI=y
-CONFIG_PINCTRL_MSM8937=y
-CONFIG_PINCTRL_MSM8917=y
+CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
+CONFIG_PINCTRL_MSM8909=y
CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
CONFIG_GPIO_SYSFS=y
-CONFIG_GPIO_QPNP_PIN=y
CONFIG_POWER_RESET=y
CONFIG_POWER_RESET_QCOM=y
CONFIG_QCOM_DLOAD_MODE=y
CONFIG_POWER_RESET_SYSCON=y
+CONFIG_POWER_RESET_SYSCON_POWEROFF=y
CONFIG_POWER_SUPPLY=y
-CONFIG_QPNP_FG_GEN3=y
-CONFIG_QPNP_SMB2=y
-CONFIG_MSM_APM=y
+CONFIG_SMB1360_CHARGER_FG=y
+CONFIG_QPNP_VM_BMS=y
+CONFIG_QPNP_LINEAR_CHARGER=y
CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
CONFIG_THERMAL=y
CONFIG_THERMAL_WRITABLE_TRIPS=y
@@ -359,97 +323,53 @@
CONFIG_THERMAL_QPNP=y
CONFIG_THERMAL_QPNP_ADC_TM=y
CONFIG_THERMAL_TSENS=y
-CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_MSM_BCL_PERIPHERAL_CTL=y
+CONFIG_QTI_THERMAL_LIMITS_DCVS=y
CONFIG_QTI_QMI_COOLING_DEVICE=y
CONFIG_REGULATOR_COOLING_DEVICE=y
-CONFIG_QTI_BCL_PMIC5=y
-CONFIG_QTI_BCL_SOC_DRIVER=y
+CONFIG_MFD_QCOM_RPM=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_PROXY_CONSUMER=y
-CONFIG_REGULATOR_GPIO=y
+CONFIG_REGULATOR_QCOM_RPM=y
+CONFIG_REGULATOR_QCOM_SPMI=y
CONFIG_REGULATOR_CPR=y
-CONFIG_REGULATOR_CPR4_APSS=y
-CONFIG_REGULATOR_CPRH_KBSS=y
CONFIG_REGULATOR_MEM_ACC=y
CONFIG_REGULATOR_MSM_GFX_LDO=y
CONFIG_REGULATOR_QPNP=y
CONFIG_REGULATOR_RPM_SMD=y
CONFIG_REGULATOR_SPM=y
CONFIG_REGULATOR_STUB=y
-CONFIG_MEDIA_SUPPORT=y
-CONFIG_MEDIA_CAMERA_SUPPORT=y
-CONFIG_MEDIA_CONTROLLER=y
-CONFIG_VIDEO_V4L2_SUBDEV_API=y
-CONFIG_MEDIA_USB_SUPPORT=y
-CONFIG_USB_VIDEO_CLASS=y
-CONFIG_V4L_PLATFORM_DRIVERS=y
-CONFIG_MSM_CAMERA=y
-CONFIG_MSM_CAMERA_DEBUG=y
-CONFIG_MSMB_CAMERA=y
-CONFIG_MSMB_CAMERA_DEBUG=y
-CONFIG_MSM_CAMERA_SENSOR=y
-CONFIG_MSM_CPP=y
-CONFIG_MSM_CCI=y
-CONFIG_MSM_CSI20_HEADER=y
-CONFIG_MSM_CSI22_HEADER=y
-CONFIG_MSM_CSI30_HEADER=y
-CONFIG_MSM_CSI31_HEADER=y
-CONFIG_MSM_CSIPHY=y
-CONFIG_MSM_CSID=y
-CONFIG_MSM_EEPROM=y
-CONFIG_MSM_ISPIF_V2=y
-CONFIG_IMX134=y
-CONFIG_IMX132=y
-CONFIG_OV9724=y
-CONFIG_OV5648=y
-CONFIG_GC0339=y
-CONFIG_OV8825=y
-CONFIG_OV8865=y
-CONFIG_s5k4e1=y
-CONFIG_OV12830=y
-CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y
-CONFIG_MSMB_JPEG=y
-CONFIG_MSM_FD=y
-CONFIG_MSM_VIDC_3X_V4L2=y
-CONFIG_MSM_VIDC_3X_GOVERNORS=y
-CONFIG_RADIO_IRIS=y
-CONFIG_RADIO_IRIS_TRANSPORT=y
-CONFIG_QCOM_KGSL=y
-CONFIG_FB=y
-CONFIG_FB_MSM=y
-CONFIG_FB_MSM_MDSS=y
-CONFIG_FB_MSM_MDSS_WRITEBACK=y
-CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS=y
-CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
-# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_DYNAMIC_MINORS=y
-CONFIG_SND_USB_AUDIO=y
CONFIG_SND_SOC=y
-CONFIG_SND_SOC_AW8896=y
CONFIG_UHID=y
+CONFIG_HID_A4TECH=y
CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_CYPRESS=y
CONFIG_HID_ELECOM=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_LOGITECH=y
CONFIG_HID_MAGICMOUSE=y
CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
CONFIG_HID_MULTITOUCH=y
-CONFIG_USB_HIDDEV=y
-CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_MSM=y
-CONFIG_USB_EHCI_HCD_PLATFORM=y
CONFIG_USB_ACM=y
CONFIG_USB_STORAGE=y
CONFIG_USB_SERIAL=y
+CONFIG_USB_SERIAL_CP210X=y
+CONFIG_USB_SERIAL_FTDI_SIO=y
CONFIG_USB_EHSET_TEST_FIXTURE=y
-CONFIG_NOP_USB_XCEIV=y
-CONFIG_DUAL_ROLE_USB_INTF=y
CONFIG_USB_GADGET=y
CONFIG_USB_GADGET_DEBUG_FILES=y
CONFIG_USB_GADGET_DEBUG_FS=y
@@ -458,45 +378,36 @@
CONFIG_USB_CONFIGFS=y
CONFIG_USB_CONFIGFS_SERIAL=y
CONFIG_USB_CONFIGFS_NCM=y
-CONFIG_USB_CONFIGFS_QCRNDIS=y
-CONFIG_USB_CONFIGFS_RNDIS=y
CONFIG_USB_CONFIGFS_RMNET_BAM=y
CONFIG_USB_CONFIGFS_MASS_STORAGE=y
CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_USB_CONFIGFS_F_MTP=y
CONFIG_USB_CONFIGFS_F_PTP=y
-CONFIG_USB_CONFIGFS_F_ACC=y
-CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
CONFIG_USB_CONFIGFS_UEVENT=y
-CONFIG_USB_CONFIGFS_F_MIDI=y
-CONFIG_USB_CONFIGFS_F_HID=y
CONFIG_USB_CONFIGFS_F_DIAG=y
CONFIG_USB_CONFIGFS_F_CDEV=y
-CONFIG_USB_CONFIGFS_F_CCID=y
-CONFIG_USB_CONFIGFS_F_QDSS=y
CONFIG_MMC=y
CONFIG_MMC_PERF_PROFILING=y
-# CONFIG_PWRSEQ_EMMC is not set
-# CONFIG_PWRSEQ_SIMPLE is not set
CONFIG_MMC_RING_BUFFER=y
CONFIG_MMC_PARANOID_SD_INIT=y
CONFIG_MMC_CLKGATE=y
CONFIG_MMC_BLOCK_MINORS=32
CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
+CONFIG_MMC_TEST=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
-CONFIG_MMC_SDHCI_MSM_ICE=y
-CONFIG_MMC_CQ_HCI=y
-CONFIG_LEDS_QPNP_HAPTICS=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_CLASS_FLASH=y
+CONFIG_LEDS_QPNP=y
+CONFIG_LEDS_QPNP_VIBRATOR=y
CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_EDAC=y
-CONFIG_EDAC_MM_EDAC=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_QPNP=y
CONFIG_DMADEVICES=y
CONFIG_QCOM_SPS_DMA=y
+CONFIG_SYNC_FILE=y
CONFIG_UIO=y
CONFIG_UIO_MSM_SHAREDMEM=y
CONFIG_STAGING=y
@@ -506,95 +417,78 @@
CONFIG_ION_MSM=y
CONFIG_IPA=y
CONFIG_RMNET_IPA=y
-CONFIG_RNDIS_IPA=y
CONFIG_SPS=y
CONFIG_SPS_SUPPORT_NDP_BAM=y
-CONFIG_QPNP_COINCELL=y
CONFIG_QPNP_REVID=y
CONFIG_USB_BAM=y
-CONFIG_MSM_RMNET_BAM=y
-CONFIG_MSM_MDSS_PLL=y
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_MAILBOX=y
CONFIG_ARM_SMMU=y
CONFIG_QCOM_LAZY_MAPPING=y
-CONFIG_IOMMU_DEBUG=y
-CONFIG_IOMMU_DEBUG_TRACKING=y
-CONFIG_IOMMU_TESTS=y
-CONFIG_QCOM_CPUSS_DUMP=y
-CONFIG_QCOM_RUN_QUEUE_STATS=y
CONFIG_MSM_SPM=y
CONFIG_MSM_L2_SPM=y
CONFIG_MSM_BOOT_STATS=y
CONFIG_MSM_CORE_HANG_DETECT=y
+CONFIG_MSM_GLADIATOR_HANG_DETECT=y
CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_QCOM_MEMORY_DUMP_V2=y
-CONFIG_MSM_DEBUG_LAR_UNLOCK=y
-CONFIG_MSM_RPM_SMD=y
CONFIG_QCOM_BUS_SCALING=y
CONFIG_QCOM_SECURE_BUFFER=y
CONFIG_QCOM_EARLY_RANDOM=y
CONFIG_MSM_SMEM=y
CONFIG_MSM_SMD=y
CONFIG_MSM_SMD_DEBUG=y
+CONFIG_MSM_GLINK=y
CONFIG_MSM_TZ_SMMU=y
+CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
+CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
+CONFIG_MSM_GLINK_SPI_XPRT=y
CONFIG_TRACER_PKT=y
CONFIG_MSM_SMP2P=y
CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
+CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
CONFIG_MSM_QMI_INTERFACE=y
+CONFIG_MSM_GLINK_PKT=y
CONFIG_MSM_SUBSYSTEM_RESTART=y
-CONFIG_MSM_SYSMON_COMM=y
CONFIG_MSM_PIL=y
CONFIG_MSM_PIL_SSR_GENERIC=y
CONFIG_MSM_PIL_MSS_QDSP6V5=y
-CONFIG_ICNSS=y
-CONFIG_MSM_PERFORMANCE=y
CONFIG_MSM_EVENT_TIMER=y
-CONFIG_MSM_AVTIMER=y
-CONFIG_MSM_PM=y
-CONFIG_QCOM_DCC=y
CONFIG_QTI_RPM_STATS_LOG=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
CONFIG_MEM_SHARE_QMI_SERVICE=y
-# CONFIG_MSM_JTAGV8 is not set
CONFIG_MSM_BAM_DMUX=y
CONFIG_WCNSS_CORE=y
CONFIG_WCNSS_CORE_PRONTO=y
CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
-CONFIG_BIG_CLUSTER_MIN_FREQ_ADJUST=y
-CONFIG_QCOM_BIMC_BWMON=y
-CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
-CONFIG_DEVFREQ_SIMPLE_DEV=y
-CONFIG_QCOM_DEVFREQ_DEVBW=y
-CONFIG_SPDM_SCM=y
-CONFIG_DEVFREQ_SPDM=y
+CONFIG_CNSS_CRYPTO=y
CONFIG_IIO=y
-CONFIG_QCOM_RRADC=y
+CONFIG_INV_ICM20602_IIO=y
CONFIG_PWM=y
+CONFIG_PWM_QPNP=y
CONFIG_QCOM_SHOW_RESUME_IRQ=y
CONFIG_QTI_MPM=y
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ANDROID_BINDER_IPC_32BIT=y
CONFIG_SENSORS_SSC=y
CONFIG_MSM_TZ_LOG=y
-CONFIG_EXT4_FS=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT3_FS=y
CONFIG_EXT4_FS_SECURITY=y
-CONFIG_EXT4_ENCRYPTION=y
-CONFIG_EXT4_FS_ENCRYPTION=y
-CONFIG_EXT4_FS_ICE_ENCRYPTION=y
-CONFIG_F2FS_FS=y
-CONFIG_F2FS_FS_SECURITY=y
-CONFIG_F2FS_FS_ENCRYPTION=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
-CONFIG_QFMT_V2=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
CONFIG_FUSE_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
CONFIG_SDCARD_FS=y
-# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
@@ -606,8 +500,6 @@
CONFIG_DEBUG_PAGEALLOC=y
CONFIG_SLUB_DEBUG_PANIC_ON=y
CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
-CONFIG_PAGE_POISONING=y
-CONFIG_PAGE_POISONING_ENABLE_DEFAULT=y
CONFIG_DEBUG_OBJECTS=y
CONFIG_DEBUG_OBJECTS_FREE=y
CONFIG_DEBUG_OBJECTS_TIMERS=y
@@ -621,13 +513,14 @@
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_LOCKUP_DETECTOR=y
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
+# CONFIG_DETECT_HUNG_TASK is not set
CONFIG_WQ_WATCHDOG=y
CONFIG_PANIC_TIMEOUT=5
CONFIG_PANIC_ON_SCHED_BUG=y
CONFIG_PANIC_ON_RT_THROTTLING=y
CONFIG_SCHEDSTATS=y
CONFIG_SCHED_STACK_END_CHECK=y
-# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_DEBUG_ATOMIC_SLEEP=y
CONFIG_DEBUG_LIST=y
@@ -647,12 +540,12 @@
CONFIG_MEMTEST=y
CONFIG_PANIC_ON_DATA_CORRUPTION=y
CONFIG_DEBUG_USER=y
-CONFIG_FORCE_PAGES=y
CONFIG_PID_IN_CONTEXTIDR=y
CONFIG_DEBUG_SET_MODULE_RONX=y
CONFIG_CORESIGHT=y
CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
-CONFIG_CORESIGHT_SOURCE_ETM4X=y
+CONFIG_CORESIGHT_SINK_TPIU=y
+CONFIG_CORESIGHT_SOURCE_ETM3X=y
CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
CONFIG_CORESIGHT_QCOM_REPLICATOR=y
@@ -663,17 +556,18 @@
CONFIG_CORESIGHT_CTI=y
CONFIG_CORESIGHT_EVENT=y
CONFIG_CORESIGHT_HWEVENT=y
-CONFIG_PFK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
-CONFIG_LSM_MMAP_MIN_ADDR=4096
+CONFIG_SECURITYFS=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_PATH=y
CONFIG_HARDENED_USERCOPY=y
-CONFIG_SECURITY_SELINUX=y
-CONFIG_SECURITY_SMACK=y
CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_CRC32=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCE=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
diff --git a/arch/arm/configs/msm8909-perf_defconfig b/arch/arm/configs/msm8909-perf_defconfig
index 458d60a..4ad91f3 100755
--- a/arch/arm/configs/msm8909-perf_defconfig
+++ b/arch/arm/configs/msm8909-perf_defconfig
@@ -400,6 +400,7 @@
CONFIG_SPS_SUPPORT_NDP_BAM=y
CONFIG_QPNP_REVID=y
CONFIG_USB_BAM=y
+CONFIG_MSM_RMNET_BAM=y
CONFIG_MSM_MDSS_PLL=y
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_MAILBOX=y
diff --git a/arch/arm/configs/msm8909_defconfig b/arch/arm/configs/msm8909_defconfig
index 38d48ac..61e623d 100644
--- a/arch/arm/configs/msm8909_defconfig
+++ b/arch/arm/configs/msm8909_defconfig
@@ -442,6 +442,7 @@
CONFIG_SPS_SUPPORT_NDP_BAM=y
CONFIG_QPNP_REVID=y
CONFIG_USB_BAM=y
+CONFIG_MSM_RMNET_BAM=y
CONFIG_MSM_MDSS_PLL=y
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_MAILBOX=y
diff --git a/arch/arm/configs/sa415m-perf_defconfig b/arch/arm/configs/sa415m-perf_defconfig
index b095036..ff8219a 100644
--- a/arch/arm/configs/sa415m-perf_defconfig
+++ b/arch/arm/configs/sa415m-perf_defconfig
@@ -185,6 +185,9 @@
CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=12
+CONFIG_MHI_BUS=y
+CONFIG_MHI_NETDEV=y
+CONFIG_MHI_UCI=y
CONFIG_MTD=y
CONFIG_MTD_TESTS=m
CONFIG_MTD_CMDLINE_PARTS=y
@@ -214,14 +217,12 @@
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+CONFIG_PHYLIB=y
CONFIG_AT803X_PHY=y
CONFIG_MICREL_PHY=y
CONFIG_PPP=y
CONFIG_PPPOL2TP=y
CONFIG_PPP_ASYNC=y
-CONFIG_USB_USBNET=y
-CONFIG_USB_NET_SMSC75XX=y
-CONFIG_USB_NET_SMSC95XX=y
CONFIG_WCNSS_MEM_PRE_ALLOC=y
CONFIG_CNSS2=y
CONFIG_CNSS2_DEBUG=y
@@ -291,23 +292,16 @@
CONFIG_REGULATOR_QPNP=y
CONFIG_REGULATOR_RPMH=y
# CONFIG_VGA_ARB is not set
-CONFIG_SOUND=y
-CONFIG_SND=y
+CONFIG_SOUND=m
+CONFIG_SND=m
CONFIG_SND_DYNAMIC_MINORS=y
-CONFIG_SND_USB_AUDIO=y
-CONFIG_SND_USB_AUDIO_QMI=y
-CONFIG_SND_SOC=y
-CONFIG_SND_SOC_TLV320AIC3X=y
+# CONFIG_SND_USB is not set
+CONFIG_SND_SOC=m
+CONFIG_SND_SOC_TLV320AIC3X=m
# CONFIG_HID_GENERIC is not set
CONFIG_USB=y
-CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-CONFIG_USB_XHCI_HCD=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_ACM=y
CONFIG_USB_DWC3=y
CONFIG_USB_DWC3_MSM=y
-CONFIG_USB_EHSET_TEST_FIXTURE=y
-CONFIG_USB_LINK_LAYER_TEST=y
CONFIG_NOP_USB_XCEIV=y
CONFIG_USB_MSM_SSPHY_QMP=y
CONFIG_MSM_HSUSB_PHY=y
@@ -454,10 +448,12 @@
CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
# CONFIG_SECURITY_SELINUX_AVC_STATS is not set
+CONFIG_CRYPTO_AUTHENC=y
CONFIG_CRYPTO_CRC32C=y
-CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
-CONFIG_CRYPTO_DEV_QCRYPTO=y
-CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=m
+CONFIG_CRYPTO_DEV_QCRYPTO=m
+CONFIG_CRYPTO_DEV_QCEDEV=m
# CONFIG_XZ_DEC_X86 is not set
# CONFIG_XZ_DEC_POWERPC is not set
# CONFIG_XZ_DEC_IA64 is not set
diff --git a/arch/arm/configs/sa415m_defconfig b/arch/arm/configs/sa415m_defconfig
index 489d4b3..72f4ea3 100644
--- a/arch/arm/configs/sa415m_defconfig
+++ b/arch/arm/configs/sa415m_defconfig
@@ -185,6 +185,10 @@
CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=12
+CONFIG_MHI_BUS=y
+CONFIG_MHI_DEBUG=y
+CONFIG_MHI_NETDEV=y
+CONFIG_MHI_UCI=y
CONFIG_MTD=y
CONFIG_MTD_TESTS=m
CONFIG_MTD_CMDLINE_PARTS=y
@@ -214,14 +218,12 @@
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_STMICRO is not set
+CONFIG_PHYLIB=y
CONFIG_AT803X_PHY=y
CONFIG_MICREL_PHY=y
CONFIG_PPP=y
CONFIG_PPPOL2TP=y
CONFIG_PPP_ASYNC=y
-CONFIG_USB_USBNET=y
-CONFIG_USB_NET_SMSC75XX=y
-CONFIG_USB_NET_SMSC95XX=y
CONFIG_WCNSS_MEM_PRE_ALLOC=y
CONFIG_CNSS2=y
CONFIG_CNSS2_DEBUG=y
@@ -297,13 +299,12 @@
CONFIG_FB_MSM=y
CONFIG_FB_MSM_MDP_NONE=y
CONFIG_FB_MSM_QPIC_PANEL_DETECT=y
-CONFIG_SOUND=y
-CONFIG_SND=y
+CONFIG_SOUND=m
+CONFIG_SND=m
CONFIG_SND_DYNAMIC_MINORS=y
-CONFIG_SND_USB_AUDIO=y
-CONFIG_SND_USB_AUDIO_QMI=y
-CONFIG_SND_SOC=y
-CONFIG_SND_SOC_TLV320AIC3X=y
+# CONFIG_SND_USB is not set
+CONFIG_SND_SOC=m
+CONFIG_SND_SOC_TLV320AIC3X=m
CONFIG_UHID=y
CONFIG_HID_APPLE=y
CONFIG_HID_ELECOM=y
@@ -311,27 +312,8 @@
CONFIG_HID_MICROSOFT=y
CONFIG_HID_MULTITOUCH=y
CONFIG_USB=y
-CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-CONFIG_USB_XHCI_HCD=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_ACM=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_STORAGE_DEBUG=y
-CONFIG_USB_STORAGE_DATAFAB=y
-CONFIG_USB_STORAGE_FREECOM=y
-CONFIG_USB_STORAGE_ISD200=y
-CONFIG_USB_STORAGE_USBAT=y
-CONFIG_USB_STORAGE_SDDR09=y
-CONFIG_USB_STORAGE_SDDR55=y
-CONFIG_USB_STORAGE_JUMPSHOT=y
-CONFIG_USB_STORAGE_ALAUDA=y
-CONFIG_USB_STORAGE_ONETOUCH=y
-CONFIG_USB_STORAGE_KARMA=y
-CONFIG_USB_STORAGE_CYPRESS_ATACB=y
CONFIG_USB_DWC3=y
CONFIG_USB_DWC3_MSM=y
-CONFIG_USB_EHSET_TEST_FIXTURE=y
-CONFIG_USB_LINK_LAYER_TEST=y
CONFIG_NOP_USB_XCEIV=y
CONFIG_USB_MSM_SSPHY_QMP=y
CONFIG_MSM_HSUSB_PHY=y
@@ -505,8 +487,10 @@
CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
# CONFIG_SECURITY_SELINUX_AVC_STATS is not set
-CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
-CONFIG_CRYPTO_DEV_QCRYPTO=y
-CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_CRYPTO_AUTHENC=y
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=m
+CONFIG_CRYPTO_DEV_QCRYPTO=m
+CONFIG_CRYPTO_DEV_QCEDEV=m
CONFIG_XZ_DEC=y
CONFIG_QMI_ENCDEC=y
diff --git a/arch/arm/configs/sdw3300-perf_defconfig b/arch/arm/configs/sdm429-bg-perf_defconfig
similarity index 88%
rename from arch/arm/configs/sdw3300-perf_defconfig
rename to arch/arm/configs/sdm429-bg-perf_defconfig
index bb2bb1d..7bb5cf9 100644
--- a/arch/arm/configs/sdw3300-perf_defconfig
+++ b/arch/arm/configs/sdm429-bg-perf_defconfig
@@ -14,6 +14,7 @@
CONFIG_RCU_NOCB_CPU=y
CONFIG_RCU_NOCB_CPU_ALL=y
CONFIG_LOG_CPU_MAX_BUF_SHIFT=13
+CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_SCHEDTUNE=y
@@ -32,6 +33,7 @@
# CONFIG_RD_XZ is not set
# CONFIG_RD_LZO is not set
# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_KALLSYMS_ALL=y
CONFIG_BPF_SYSCALL=y
# CONFIG_MEMBARRIER is not set
@@ -135,13 +137,9 @@
CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
-CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
-CONFIG_NETFILTER_XT_TARGET_LOG=y
CONFIG_NETFILTER_XT_TARGET_MARK=y
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
-CONFIG_NETFILTER_XT_TARGET_TEE=y
CONFIG_NETFILTER_XT_TARGET_TPROXY=y
CONFIG_NETFILTER_XT_TARGET_TRACE=y
CONFIG_NETFILTER_XT_TARGET_SECMARK=y
@@ -151,11 +149,9 @@
CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
CONFIG_NETFILTER_XT_MATCH_DSCP=y
-CONFIG_NETFILTER_XT_MATCH_ESP=y
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
CONFIG_NETFILTER_XT_MATCH_HELPER=y
CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
-# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
CONFIG_NETFILTER_XT_MATCH_LENGTH=y
CONFIG_NETFILTER_XT_MATCH_LIMIT=y
CONFIG_NETFILTER_XT_MATCH_MAC=y
@@ -196,13 +192,6 @@
CONFIG_IP6_NF_TARGET_REJECT=y
CONFIG_IP6_NF_MANGLE=y
CONFIG_IP6_NF_RAW=y
-CONFIG_BRIDGE_NF_EBTABLES=y
-CONFIG_BRIDGE_EBT_BROUTE=y
-CONFIG_L2TP=y
-CONFIG_L2TP_DEBUGFS=y
-CONFIG_L2TP_V3=y
-CONFIG_L2TP_IP=y
-CONFIG_L2TP_ETH=y
CONFIG_BRIDGE=y
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_HTB=y
@@ -211,6 +200,7 @@
CONFIG_NET_CLS_U32=y
CONFIG_CLS_U32_MARK=y
CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_CLS_BPF=y
CONFIG_NET_EMATCH=y
CONFIG_NET_EMATCH_CMP=y
CONFIG_NET_EMATCH_NBYTE=y
@@ -234,6 +224,7 @@
CONFIG_IPC_ROUTER_SECURITY=y
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
+CONFIG_REGMAP_SND_DIGCDC=y
CONFIG_DMA_CMA=y
CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=y
@@ -243,12 +234,9 @@
CONFIG_QSEECOM=y
CONFIG_UID_SYS_STATS=y
CONFIG_QPNP_MISC=y
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
-CONFIG_DM_REQ_CRYPT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
CONFIG_DM_VERITY_FEC=y
@@ -263,6 +251,7 @@
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_ROCKER is not set
# CONFIG_NET_VENDOR_SYNOPSYS is not set
+CONFIG_PHYLIB=y
CONFIG_PPP=y
CONFIG_PPP_BSDCOMP=y
CONFIG_PPP_DEFLATE=y
@@ -270,12 +259,10 @@
CONFIG_PPP_MPPE=y
CONFIG_PPP_MULTILINK=y
CONFIG_PPPOE=y
-CONFIG_PPPOL2TP=y
CONFIG_PPPOLAC=y
CONFIG_PPPOPNS=y
CONFIG_PPP_ASYNC=y
CONFIG_PPP_SYNC_TTY=y
-CONFIG_USB_USBNET=y
# CONFIG_WLAN_VENDOR_ADMTEK is not set
# CONFIG_WLAN_VENDOR_ATH is not set
# CONFIG_WLAN_VENDOR_ATMEL is not set
@@ -350,22 +337,18 @@
CONFIG_THERMAL_QPNP=y
CONFIG_THERMAL_QPNP_ADC_TM=y
CONFIG_THERMAL_TSENS=y
+CONFIG_MSM_BCL_PERIPHERAL_CTL=y
CONFIG_QTI_VIRTUAL_SENSOR=y
CONFIG_QTI_QMI_COOLING_DEVICE=y
CONFIG_REGULATOR_COOLING_DEVICE=y
-CONFIG_QTI_BCL_PMIC5=y
-CONFIG_QTI_BCL_SOC_DRIVER=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_PROXY_CONSUMER=y
CONFIG_REGULATOR_GPIO=y
CONFIG_REGULATOR_CPR=y
-CONFIG_REGULATOR_CPR4_APSS=y
-CONFIG_REGULATOR_CPRH_KBSS=y
CONFIG_REGULATOR_MEM_ACC=y
CONFIG_REGULATOR_MSM_GFX_LDO=y
-CONFIG_REGULATOR_QPNP=y
CONFIG_REGULATOR_RPM_SMD=y
CONFIG_REGULATOR_SPM=y
CONFIG_REGULATOR_STUB=y
@@ -373,73 +356,33 @@
CONFIG_MEDIA_CAMERA_SUPPORT=y
CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
-CONFIG_MEDIA_USB_SUPPORT=y
-CONFIG_USB_VIDEO_CLASS=y
CONFIG_V4L_PLATFORM_DRIVERS=y
-CONFIG_MSM_CAMERA=y
-CONFIG_MSM_CAMERA_DEBUG=y
-CONFIG_MSMB_CAMERA=y
-CONFIG_MSMB_CAMERA_DEBUG=y
-CONFIG_MSM_CAMERA_SENSOR=y
-CONFIG_MSM_CPP=y
-CONFIG_MSM_CCI=y
-CONFIG_MSM_CSI20_HEADER=y
-CONFIG_MSM_CSI22_HEADER=y
-CONFIG_MSM_CSI30_HEADER=y
-CONFIG_MSM_CSI31_HEADER=y
-CONFIG_MSM_CSIPHY=y
-CONFIG_MSM_CSID=y
-CONFIG_MSM_EEPROM=y
-CONFIG_MSM_ISPIF_V2=y
-CONFIG_IMX134=y
-CONFIG_IMX132=y
-CONFIG_OV9724=y
-CONFIG_OV5648=y
-CONFIG_GC0339=y
-CONFIG_OV8825=y
-CONFIG_OV8865=y
-CONFIG_s5k4e1=y
-CONFIG_OV12830=y
-CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y
-CONFIG_MSMB_JPEG=y
-CONFIG_MSM_FD=y
CONFIG_MSM_VIDC_3X_V4L2=y
CONFIG_MSM_VIDC_3X_GOVERNORS=y
-CONFIG_RADIO_IRIS=y
-CONFIG_RADIO_IRIS_TRANSPORT=y
+CONFIG_ADSP_SHMEM=y
CONFIG_QCOM_KGSL=y
CONFIG_FB=y
+CONFIG_FB_VIRTUAL=y
CONFIG_FB_MSM=y
CONFIG_FB_MSM_MDSS=y
CONFIG_FB_MSM_MDSS_WRITEBACK=y
CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS=y
CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
-# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_GENERIC is not set
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_DYNAMIC_MINORS=y
CONFIG_SND_USB_AUDIO=y
CONFIG_SND_SOC=y
-CONFIG_SND_SOC_AW8896=y
CONFIG_UHID=y
CONFIG_HID_APPLE=y
CONFIG_HID_ELECOM=y
CONFIG_HID_MAGICMOUSE=y
CONFIG_HID_MICROSOFT=y
CONFIG_HID_MULTITOUCH=y
-CONFIG_USB_HIDDEV=y
CONFIG_USB=y
-CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-CONFIG_USB_MON=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_MSM=y
-CONFIG_USB_EHCI_HCD_PLATFORM=y
-CONFIG_USB_ACM=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_SERIAL=y
-CONFIG_USB_EHSET_TEST_FIXTURE=y
-CONFIG_NOP_USB_XCEIV=y
CONFIG_DUAL_ROLE_USB_INTF=y
CONFIG_USB_GADGET=y
CONFIG_USB_GADGET_DEBUG_FILES=y
@@ -449,8 +392,6 @@
CONFIG_USB_CONFIGFS=y
CONFIG_USB_CONFIGFS_SERIAL=y
CONFIG_USB_CONFIGFS_NCM=y
-CONFIG_USB_CONFIGFS_RNDIS=y
-CONFIG_USB_CONFIGFS_RMNET_BAM=y
CONFIG_USB_CONFIGFS_MASS_STORAGE=y
CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_USB_CONFIGFS_F_MTP=y
@@ -463,7 +404,6 @@
CONFIG_USB_CONFIGFS_F_DIAG=y
CONFIG_USB_CONFIGFS_F_CDEV=y
CONFIG_USB_CONFIGFS_F_CCID=y
-CONFIG_USB_CONFIGFS_F_QDSS=y
CONFIG_MMC=y
CONFIG_MMC_PERF_PROFILING=y
# CONFIG_PWRSEQ_EMMC is not set
@@ -476,7 +416,8 @@
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_MMC_SDHCI_MSM_ICE=y
-CONFIG_MMC_CQ_HCI=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
CONFIG_LEDS_QPNP_HAPTICS=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
@@ -493,12 +434,8 @@
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
CONFIG_ION=y
CONFIG_ION_MSM=y
-CONFIG_IPA=y
-CONFIG_RMNET_IPA=y
-CONFIG_RNDIS_IPA=y
CONFIG_SPS=y
CONFIG_SPS_SUPPORT_NDP_BAM=y
-CONFIG_QPNP_COINCELL=y
CONFIG_QPNP_REVID=y
CONFIG_USB_BAM=y
CONFIG_MSM_RMNET_BAM=y
@@ -520,10 +457,12 @@
CONFIG_MSM_SMEM=y
CONFIG_MSM_SMD=y
CONFIG_MSM_SMD_DEBUG=y
+CONFIG_MSM_GLINK=y
CONFIG_MSM_TZ_SMMU=y
CONFIG_MSM_SMP2P=y
CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
CONFIG_MSM_QMI_INTERFACE=y
+CONFIG_MSM_GLINK_PKT=y
CONFIG_MSM_SUBSYSTEM_RESTART=y
CONFIG_MSM_SYSMON_COMM=y
CONFIG_MSM_PIL=y
@@ -539,6 +478,11 @@
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
CONFIG_MEM_SHARE_QMI_SERVICE=y
CONFIG_MSM_BAM_DMUX=y
+CONFIG_MSM_GLINK_BGCOM_XPRT=y
+CONFIG_MSM_BGCOM_INTERFACE=y
+CONFIG_MSM_BGRSB=y
+CONFIG_MSM_PIL_SSR_BG=y
+CONFIG_MSM_BGCOM=y
CONFIG_WCNSS_CORE=y
CONFIG_WCNSS_CORE_PRONTO=y
CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
@@ -593,6 +537,7 @@
CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y
diff --git a/arch/arm/configs/sdw3300_defconfig b/arch/arm/configs/sdm429-bg_defconfig
similarity index 89%
rename from arch/arm/configs/sdw3300_defconfig
rename to arch/arm/configs/sdm429-bg_defconfig
index 5e6fafb..a1cf1bb 100644
--- a/arch/arm/configs/sdw3300_defconfig
+++ b/arch/arm/configs/sdm429-bg_defconfig
@@ -16,6 +16,7 @@
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_SCHEDTUNE=y
@@ -34,6 +35,7 @@
# CONFIG_RD_XZ is not set
# CONFIG_RD_LZO is not set
# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_KALLSYMS_ALL=y
CONFIG_BPF_SYSCALL=y
# CONFIG_MEMBARRIER is not set
@@ -139,13 +141,9 @@
CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
-CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
-CONFIG_NETFILTER_XT_TARGET_LOG=y
CONFIG_NETFILTER_XT_TARGET_MARK=y
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
-CONFIG_NETFILTER_XT_TARGET_TEE=y
CONFIG_NETFILTER_XT_TARGET_TPROXY=y
CONFIG_NETFILTER_XT_TARGET_TRACE=y
CONFIG_NETFILTER_XT_TARGET_SECMARK=y
@@ -156,11 +154,9 @@
CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
CONFIG_NETFILTER_XT_MATCH_DSCP=y
-CONFIG_NETFILTER_XT_MATCH_ESP=y
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
CONFIG_NETFILTER_XT_MATCH_HELPER=y
CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
-# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
CONFIG_NETFILTER_XT_MATCH_LENGTH=y
CONFIG_NETFILTER_XT_MATCH_LIMIT=y
CONFIG_NETFILTER_XT_MATCH_MAC=y
@@ -171,6 +167,7 @@
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
CONFIG_NETFILTER_XT_MATCH_QUOTA=y
CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
CONFIG_NETFILTER_XT_MATCH_SOCKET=y
CONFIG_NETFILTER_XT_MATCH_STATE=y
CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
@@ -202,13 +199,6 @@
CONFIG_IP6_NF_TARGET_REJECT=y
CONFIG_IP6_NF_MANGLE=y
CONFIG_IP6_NF_RAW=y
-CONFIG_BRIDGE_NF_EBTABLES=y
-CONFIG_BRIDGE_EBT_BROUTE=y
-CONFIG_L2TP=y
-CONFIG_L2TP_DEBUGFS=y
-CONFIG_L2TP_V3=y
-CONFIG_L2TP_IP=y
-CONFIG_L2TP_ETH=y
CONFIG_BRIDGE=y
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_HTB=y
@@ -217,6 +207,7 @@
CONFIG_NET_CLS_U32=y
CONFIG_CLS_U32_MARK=y
CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_CLS_BPF=y
CONFIG_NET_EMATCH=y
CONFIG_NET_EMATCH_CMP=y
CONFIG_NET_EMATCH_NBYTE=y
@@ -241,6 +232,7 @@
CONFIG_IPC_ROUTER_SECURITY=y
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
+CONFIG_REGMAP_SND_DIGCDC=y
CONFIG_DMA_CMA=y
CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=y
@@ -250,12 +242,9 @@
CONFIG_QSEECOM=y
CONFIG_UID_SYS_STATS=y
CONFIG_QPNP_MISC=y
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
-CONFIG_DM_REQ_CRYPT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
CONFIG_DM_VERITY_FEC=y
@@ -270,6 +259,7 @@
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_ROCKER is not set
# CONFIG_NET_VENDOR_SYNOPSYS is not set
+CONFIG_PHYLIB=y
CONFIG_PPP=y
CONFIG_PPP_BSDCOMP=y
CONFIG_PPP_DEFLATE=y
@@ -277,12 +267,10 @@
CONFIG_PPP_MPPE=y
CONFIG_PPP_MULTILINK=y
CONFIG_PPPOE=y
-CONFIG_PPPOL2TP=y
CONFIG_PPPOLAC=y
CONFIG_PPPOPNS=y
CONFIG_PPP_ASYNC=y
CONFIG_PPP_SYNC_TTY=y
-CONFIG_USB_USBNET=y
# CONFIG_WLAN_VENDOR_ADMTEK is not set
# CONFIG_WLAN_VENDOR_ATH is not set
# CONFIG_WLAN_VENDOR_ATMEL is not set
@@ -359,22 +347,18 @@
CONFIG_THERMAL_QPNP=y
CONFIG_THERMAL_QPNP_ADC_TM=y
CONFIG_THERMAL_TSENS=y
+CONFIG_MSM_BCL_PERIPHERAL_CTL=y
CONFIG_QTI_VIRTUAL_SENSOR=y
CONFIG_QTI_QMI_COOLING_DEVICE=y
CONFIG_REGULATOR_COOLING_DEVICE=y
-CONFIG_QTI_BCL_PMIC5=y
-CONFIG_QTI_BCL_SOC_DRIVER=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_PROXY_CONSUMER=y
CONFIG_REGULATOR_GPIO=y
CONFIG_REGULATOR_CPR=y
-CONFIG_REGULATOR_CPR4_APSS=y
-CONFIG_REGULATOR_CPRH_KBSS=y
CONFIG_REGULATOR_MEM_ACC=y
CONFIG_REGULATOR_MSM_GFX_LDO=y
-CONFIG_REGULATOR_QPNP=y
CONFIG_REGULATOR_RPM_SMD=y
CONFIG_REGULATOR_SPM=y
CONFIG_REGULATOR_STUB=y
@@ -382,73 +366,33 @@
CONFIG_MEDIA_CAMERA_SUPPORT=y
CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
-CONFIG_MEDIA_USB_SUPPORT=y
-CONFIG_USB_VIDEO_CLASS=y
CONFIG_V4L_PLATFORM_DRIVERS=y
-CONFIG_MSM_CAMERA=y
-CONFIG_MSM_CAMERA_DEBUG=y
-CONFIG_MSMB_CAMERA=y
-CONFIG_MSMB_CAMERA_DEBUG=y
-CONFIG_MSM_CAMERA_SENSOR=y
-CONFIG_MSM_CPP=y
-CONFIG_MSM_CCI=y
-CONFIG_MSM_CSI20_HEADER=y
-CONFIG_MSM_CSI22_HEADER=y
-CONFIG_MSM_CSI30_HEADER=y
-CONFIG_MSM_CSI31_HEADER=y
-CONFIG_MSM_CSIPHY=y
-CONFIG_MSM_CSID=y
-CONFIG_MSM_EEPROM=y
-CONFIG_MSM_ISPIF_V2=y
-CONFIG_IMX134=y
-CONFIG_IMX132=y
-CONFIG_OV9724=y
-CONFIG_OV5648=y
-CONFIG_GC0339=y
-CONFIG_OV8825=y
-CONFIG_OV8865=y
-CONFIG_s5k4e1=y
-CONFIG_OV12830=y
-CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y
-CONFIG_MSMB_JPEG=y
-CONFIG_MSM_FD=y
CONFIG_MSM_VIDC_3X_V4L2=y
CONFIG_MSM_VIDC_3X_GOVERNORS=y
-CONFIG_RADIO_IRIS=y
-CONFIG_RADIO_IRIS_TRANSPORT=y
+CONFIG_ADSP_SHMEM=y
CONFIG_QCOM_KGSL=y
CONFIG_FB=y
+CONFIG_FB_VIRTUAL=y
CONFIG_FB_MSM=y
CONFIG_FB_MSM_MDSS=y
CONFIG_FB_MSM_MDSS_WRITEBACK=y
CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS=y
CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
-# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_GENERIC is not set
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_DYNAMIC_MINORS=y
CONFIG_SND_USB_AUDIO=y
CONFIG_SND_SOC=y
-CONFIG_SND_SOC_AW8896=y
CONFIG_UHID=y
CONFIG_HID_APPLE=y
CONFIG_HID_ELECOM=y
CONFIG_HID_MAGICMOUSE=y
CONFIG_HID_MICROSOFT=y
CONFIG_HID_MULTITOUCH=y
-CONFIG_USB_HIDDEV=y
CONFIG_USB=y
-CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-CONFIG_USB_MON=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_MSM=y
-CONFIG_USB_EHCI_HCD_PLATFORM=y
-CONFIG_USB_ACM=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_SERIAL=y
-CONFIG_USB_EHSET_TEST_FIXTURE=y
-CONFIG_NOP_USB_XCEIV=y
CONFIG_DUAL_ROLE_USB_INTF=y
CONFIG_USB_GADGET=y
CONFIG_USB_GADGET_DEBUG_FILES=y
@@ -458,9 +402,6 @@
CONFIG_USB_CONFIGFS=y
CONFIG_USB_CONFIGFS_SERIAL=y
CONFIG_USB_CONFIGFS_NCM=y
-CONFIG_USB_CONFIGFS_QCRNDIS=y
-CONFIG_USB_CONFIGFS_RNDIS=y
-CONFIG_USB_CONFIGFS_RMNET_BAM=y
CONFIG_USB_CONFIGFS_MASS_STORAGE=y
CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_USB_CONFIGFS_F_MTP=y
@@ -473,7 +414,6 @@
CONFIG_USB_CONFIGFS_F_DIAG=y
CONFIG_USB_CONFIGFS_F_CDEV=y
CONFIG_USB_CONFIGFS_F_CCID=y
-CONFIG_USB_CONFIGFS_F_QDSS=y
CONFIG_MMC=y
CONFIG_MMC_PERF_PROFILING=y
# CONFIG_PWRSEQ_EMMC is not set
@@ -487,7 +427,8 @@
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_MMC_SDHCI_MSM_ICE=y
-CONFIG_MMC_CQ_HCI=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
CONFIG_LEDS_QPNP_HAPTICS=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
@@ -504,12 +445,8 @@
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
CONFIG_ION=y
CONFIG_ION_MSM=y
-CONFIG_IPA=y
-CONFIG_RMNET_IPA=y
-CONFIG_RNDIS_IPA=y
CONFIG_SPS=y
CONFIG_SPS_SUPPORT_NDP_BAM=y
-CONFIG_QPNP_COINCELL=y
CONFIG_QPNP_REVID=y
CONFIG_USB_BAM=y
CONFIG_MSM_RMNET_BAM=y
@@ -537,17 +474,18 @@
CONFIG_MSM_SMEM=y
CONFIG_MSM_SMD=y
CONFIG_MSM_SMD_DEBUG=y
+CONFIG_MSM_GLINK=y
CONFIG_MSM_TZ_SMMU=y
CONFIG_TRACER_PKT=y
CONFIG_MSM_SMP2P=y
CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
CONFIG_MSM_QMI_INTERFACE=y
+CONFIG_MSM_GLINK_PKT=y
CONFIG_MSM_SUBSYSTEM_RESTART=y
CONFIG_MSM_SYSMON_COMM=y
CONFIG_MSM_PIL=y
CONFIG_MSM_PIL_SSR_GENERIC=y
CONFIG_MSM_PIL_MSS_QDSP6V5=y
-CONFIG_ICNSS=y
CONFIG_MSM_PERFORMANCE=y
CONFIG_MSM_EVENT_TIMER=y
CONFIG_MSM_AVTIMER=y
@@ -558,6 +496,11 @@
CONFIG_MEM_SHARE_QMI_SERVICE=y
# CONFIG_MSM_JTAGV8 is not set
CONFIG_MSM_BAM_DMUX=y
+CONFIG_MSM_GLINK_BGCOM_XPRT=y
+CONFIG_MSM_BGCOM_INTERFACE=y
+CONFIG_MSM_BGRSB=y
+CONFIG_MSM_PIL_SSR_BG=y
+CONFIG_MSM_BGCOM=y
CONFIG_WCNSS_CORE=y
CONFIG_WCNSS_CORE_PRONTO=y
CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
@@ -670,6 +613,7 @@
CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y
diff --git a/arch/arm/configs/sdxpoorwills_defconfig b/arch/arm/configs/sdxpoorwills_defconfig
index 2b02a48..11b9d90 100644
--- a/arch/arm/configs/sdxpoorwills_defconfig
+++ b/arch/arm/configs/sdxpoorwills_defconfig
@@ -242,6 +242,8 @@
CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
CONFIG_PINCTRL_SDXPOORWILLS=y
CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_DEBUG_GPIO=y
+CONFIG_GPIO_SYSFS=y
CONFIG_POWER_RESET=y
CONFIG_POWER_RESET_QCOM=y
CONFIG_QCOM_DLOAD_MODE=y
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index cf65ab8..e5dcbda 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -131,6 +131,9 @@
struct device_node *np;
struct gen_pool *sram_pool;
+ if (!soc_is_omap44xx() && !soc_is_omap54xx())
+ return 0;
+
np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
if (!np)
pr_warn("%s:Unable to allocate sram needed to handle errata I688\n",
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index 1ab7096..f850fc3 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -387,7 +387,8 @@
static struct omap_hwmod_class_sysconfig dra7xx_epwmss_sysc = {
.rev_offs = 0x0,
.sysc_offs = 0x4,
- .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET,
+ .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
+ SYSC_HAS_RESET_STATUS,
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
.sysc_fields = &omap_hwmod_sysc_type2,
};
diff --git a/arch/arm/mach-qcom/board-qm215.c b/arch/arm/mach-qcom/board-qm215.c
index 62f9175..a1682f1d 100644
--- a/arch/arm/mach-qcom/board-qm215.c
+++ b/arch/arm/mach-qcom/board-qm215.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -17,6 +17,7 @@
static const char *qm215_dt_match[] __initconst = {
"qcom,qm215",
+ "qcom,qcm2150",
NULL
};
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index efbc7d5..1dee963 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -805,7 +805,8 @@
if (t->flags & PF_KTHREAD)
continue;
for_each_thread(t, s)
- set_section_perms(perms, n, true, s->mm);
+ if (s->mm)
+ set_section_perms(perms, n, true, s->mm);
}
read_unlock(&tasklist_lock);
set_section_perms(perms, n, true, current->active_mm);
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 4f15dc1..eed610c 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -364,8 +364,7 @@
msm8953-iot-mtp-overlay.dtbo \
msm8953-ext-codec-mtp-overlay.dtbo \
msm8953-ext-codec-rcm-overlay.dtbo \
- msm8953-cdp-1200p-overlay.dtbo \
- msm8953-no-pmi-overlay.dtbo
+ msm8953-cdp-1200p-overlay.dtbo
dtbo-$(CONFIG_ARCH_SDM450) += msm8953-mtp-overlay.dtbo \
msm8953-cdp-overlay.dtbo \
@@ -374,8 +373,7 @@
msm8953-iot-mtp-overlay.dtbo \
sdm450-cdp-s2-overlay.dtbo \
sdm450-mtp-s3-overlay.dtbo \
- sdm450-qrd-sku4-overlay.dtbo\
- sdm450-no-pmi-mtp-overlay.dtbo
+ sdm450-qrd-sku4-overlay.dtbo
dtbo-$(CONFIG_ARCH_SDM632) += sdm632-rumi-overlay.dtbo \
sdm450-cdp-s2-overlay.dtbo \
@@ -398,7 +396,11 @@
sdm429-spyro-qrd-evt-overlay.dtbo \
sdm429-spyro-qrd-dvt-overlay.dtbo \
sda429-spyro-qrd-dvt-overlay.dtbo \
- sdm429-spyro-qrd-wdp-overlay.dtbo
+ sdm429-spyro-qrd-wdp-overlay.dtbo \
+ sdm429-bg-wtp-overlay.dtbo \
+ sdm429-bg-wdp-overlay.dtbo \
+ sda429-bg-wdp-overlay.dtbo \
+ sda429-bg-wtp-overlay.dtbo
msm8940-mtp-overlay.dtbo-base := msm8940-pmi8950.dtb \
msm8940-pmi8937.dtb \
@@ -471,9 +473,6 @@
msm8953-ext-codec-rcm-overlay.dtbo-base := msm8953.dtb \
apq8053.dtb
msm8953-cdp-1200p-overlay.dtbo-base := msm8953.dtb
-
-msm8953-no-pmi-overlay.dtbo-base := msm8953-no-pmi.dtb
-
sdm450-cdp-s2-overlay.dtbo-base := sdm450-pmi632.dtb \
sdm632.dtb \
sdm632-pm8004.dtb \
@@ -486,9 +485,6 @@
sdm450-qrd-sku4-overlay.dtbo-base := sdm450-pmi632.dtb \
sdm632.dtb \
sdm632-pm8004.dtb
-
-sdm450-no-pmi-mtp-overlay.dtbo-base := sdm450-no-pmi.dtb
-
sdm632-rumi-overlay.dtbo-base := sdm632.dtb
sdm632-ext-codec-cdp-s3-overlay.dtbo-base := sdm632.dtb \
sdm632-pm8004.dtb
@@ -522,6 +518,10 @@
sdm429-spyro-qrd-dvt-overlay.dtbo-base := sdm429-spyro-dvt.dtb
sda429-spyro-qrd-dvt-overlay.dtbo-base := sda429-spyro-dvt.dtb
sdm429-spyro-qrd-wdp-overlay.dtbo-base := sdm429-spyro-wdp.dtb
+sdm429-bg-wtp-overlay.dtbo-base := sdm429-bg-wtp.dtb
+sda429-bg-wtp-overlay.dtbo-base := sda429-bg-wtp.dtb
+sdm429-bg-wdp-overlay.dtbo-base := sdm429-bg-wdp.dtb
+sda429-bg-wdp-overlay.dtbo-base := sda429-bg-wdp.dtb
else
dtb-$(CONFIG_ARCH_MSM8953) += msm8953-cdp.dtb \
msm8953-mtp.dtb \
@@ -555,8 +555,7 @@
msm8953-pmi8940-ext-codec-mtp.dtb \
msm8953-pmi8937-ext-codec-mtp.dtb \
msm8953-pmi632-cdp-s2.dtb \
- apq8053-batcam.dtb \
- msm8953-no-pmi.dtb
+ apq8053-batcam.dtb
dtb-$(CONFIG_ARCH_MSM8937) += msm8937-pmi8950-mtp.dtb \
msm8937-interposer-sdm439-cdp.dtb \
@@ -620,8 +619,7 @@
sdm450-pmi632-cdp-s2.dtb \
sdm450-pmi632-mtp-s3.dtb \
sda450-pmi632-cdp-s2.dtb \
- sda450-pmi632-mtp-s3.dtb \
- sdm450-no-pmi-mtp.dtb
+ sda450-pmi632-mtp-s3.dtb
dtb-$(CONFIG_ARCH_SDM632) += sdm632-rumi.dtb \
sdm632-cdp-s2.dtb \
@@ -659,7 +657,7 @@
sdm429-spyro-dvt.dtb \
sdm429-spyro-wdp.dtb \
sda429-spyro-dvt.dtb \
- sdw3300-bg-1gb-wtp.dtb
+ sdm429-bg-iot-wtp.dtb
endif
diff --git a/arch/arm64/boot/dts/qcom/apq8009-robot-som-refboard.dts b/arch/arm64/boot/dts/qcom/apq8009-robot-som-refboard.dts
index 103ef7f..f57383c 100644
--- a/arch/arm64/boot/dts/qcom/apq8009-robot-som-refboard.dts
+++ b/arch/arm64/boot/dts/qcom/apq8009-robot-som-refboard.dts
@@ -387,6 +387,10 @@
status = "okay";
};
+&mdss_fb0 {
+ /delete-node/ qcom,cont-splash-memory;
+};
+
&mdss_mdp {
qcom,mdss-pref-prim-intf = "dsi";
status = "okay";
@@ -436,3 +440,5 @@
status = "okay";
};
};
+
+/delete-node/ &cont_splash_mem;
diff --git a/arch/arm64/boot/dts/qcom/msm8909-pm8916-mtp.dts b/arch/arm64/boot/dts/qcom/msm8909-pm8916-mtp.dts
index f1caab6..2dd300c 100644
--- a/arch/arm64/boot/dts/qcom/msm8909-pm8916-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/msm8909-pm8916-mtp.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016,2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016,2018,2019 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -22,7 +22,7 @@
model = "Qualcomm Technologies, Inc. MSM8909-PM8916 1GB MTP";
compatible = "qcom,msm8909-mtp", "qcom,msm8909", "qcom,mtp";
qcom,msm-id = <245 0x20000>, <245 0x0>;
- qcom,board-id= <0x02010008 0x102>;
+ qcom,board-id= <0x8 0x2>;
qcom,pmic-id = <0x1000B 0x0 0x0 0x0>;
};
diff --git a/arch/arm64/boot/dts/qcom/msm8909w.dtsi b/arch/arm64/boot/dts/qcom/msm8909w.dtsi
index 7229564..c023650 100644
--- a/arch/arm64/boot/dts/qcom/msm8909w.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8909w.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -75,6 +75,10 @@
};
};
+&usb_otg {
+ qcom,enumeration-check-for-sdp;
+};
+
&qcom_crypto {
qcom,msm-bus,vectors-KBps =
<55 512 0 0>,
diff --git a/arch/arm64/boot/dts/qcom/msm8917.dtsi b/arch/arm64/boot/dts/qcom/msm8917.dtsi
index 0673013..4ee2500 100644
--- a/arch/arm64/boot/dts/qcom/msm8917.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917.dtsi
@@ -142,6 +142,12 @@
cont_splash_mem: splash_region@83000000 {
reg = <0x0 0x90000000 0x0 0x1400000>;
};
+
+ dump_mem: mem_dump_region {
+ compatible = "shared-dma-pool";
+ reusable;
+ size = <0 0x400000>;
+ };
};
soc: soc { };
@@ -229,6 +235,47 @@
thermal_zones: thermal-zones {};
+ mem_dump {
+ compatible = "qcom,mem-dump";
+ memory-region = <&dump_mem>;
+
+ rpm_sw_dump {
+ qcom,dump-size = <0x28000>;
+ qcom,dump-id = <0xea>;
+ };
+
+ pmic_dump {
+ qcom,dump-size = <0x10000>;
+ qcom,dump-id = <0xe4>;
+ };
+
+ vsense_dump {
+ qcom,dump-size = <0x1000>;
+ qcom,dump-id = <0xe9>;
+ };
+
+ tmc_etf_dump {
+ qcom,dump-size = <0x10000>;
+ qcom,dump-id = <0xf0>;
+ };
+
+ tmc_etr_reg_dump {
+ qcom,dump-size = <0x1000>;
+ qcom,dump-id = <0x100>;
+ };
+
+ tmc_etf_reg_dump {
+ qcom,dump-size = <0x1000>;
+ qcom,dump-id = <0x101>;
+ };
+
+ misc_data_dump {
+ qcom,dump-size = <0x1000>;
+ qcom,dump-id = <0xe8>;
+ };
+
+ };
+
tsens0: tsens@4a8000 {
compatible = "qcom,msm8937-tsens";
reg = <0x4a8000 0x1000>,
diff --git a/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-mtp.dtsi
index b9c901c4..f3e13c0 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-mtp.dtsi
@@ -159,6 +159,7 @@
qcom,csiphy-sd-index = <0>;
qcom,csid-sd-index = <0>;
qcom,mount-angle = <270>;
+ qcom,led-flash-src = <&led_flash0>;
qcom,eeprom-src = <&eeprom0>;
qcom,actuator-src = <&actuator0>;
cam_vio-supply = <&pm8953_l6>;
diff --git a/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-qrd.dtsi b/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-qrd.dtsi
index be954d7..051b8b7 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-qrd.dtsi
@@ -33,7 +33,7 @@
qcom,cci-master = <0>;
reg = <0x0>;
cam_vio-supply = <&pm8953_l6>;
- cam_vdig-supply = <&pm8953_l23>;
+ cam_vdig-supply = <&pm8953_l2>;
cam_vaf-supply = <&pm8953_l17>;
qcom,cam-vreg-name = "cam_vio", "cam_vdig", "cam_vaf";
qcom,cam-vreg-min-voltage = <0 1200000 2850000>;
@@ -111,7 +111,7 @@
qcom,eeprom-src = <&eeprom0>;
qcom,actuator-src = <&actuator0>;
cam_vio-supply = <&pm8953_l6>;
- cam_vdig-supply = <&pm8953_l23>;
+ cam_vdig-supply = <&pm8953_l2>;
cam_vaf-supply = <&pm8953_l17>;
cam_vana-supply = <&pm8953_l22>;
qcom,cam-vreg-name = "cam_vio", "cam_vdig", "cam_vaf",
diff --git a/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp-overlay.dts
index 1b09b3c..350d88a 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp-overlay.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -46,6 +46,7 @@
qcom,battery-data = <&mtp_batterydata>;
qcom,chg-led-sw-controls;
qcom,chg-led-support;
+ qcom,usbin-vadc = <&pmi8950_vadc>;
};
&int_codec {
diff --git a/arch/arm64/boot/dts/qcom/msm8953-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/msm8953-mtp-overlay.dts
index 00614b2..92cd98d 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-mtp-overlay.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -44,4 +44,5 @@
qcom,battery-data = <&mtp_batterydata>;
qcom,chg-led-sw-controls;
qcom,chg-led-support;
+ qcom,usbin-vadc = <&pmi8950_vadc>;
};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-mtp.dts b/arch/arm64/boot/dts/qcom/msm8953-mtp.dts
index 9b20013..3dfe202 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-mtp.dts
@@ -48,9 +48,5 @@
qcom,battery-data = <&mtp_batterydata>;
qcom,chg-led-sw-controls;
qcom,chg-led-support;
-};
-&cci {
- qcom,camera@0 {
- qcom,led-flash-src = <&led_flash0>;
- };
+ qcom,usbin-vadc = <&pmi8950_vadc>;
};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-qrd-sku3.dts b/arch/arm64/boot/dts/qcom/msm8953-qrd-sku3.dts
index 5d892fd..fc8493e 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-qrd-sku3.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-qrd-sku3.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, 2019 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
/dts-v1/;
#include "msm8953.dtsi"
+#include "pmi8950.dtsi"
#include "msm8953-qrd-sku3.dtsi"
+#include "msm8953-pmi8950.dtsi"
/ {
model = "Qualcomm Technologies, Inc. MSM8953 + PMI8950 QRD SKU3";
diff --git a/arch/arm64/boot/dts/qcom/msm8953-qrd-sku3.dtsi b/arch/arm64/boot/dts/qcom/msm8953-qrd-sku3.dtsi
index 96e185b..27107e4 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-qrd-sku3.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-qrd-sku3.dtsi
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, 2019 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -12,4 +12,105 @@
*/
#include "msm8953-qrd.dtsi"
+#include "msm8953-camera-sensor-qrd.dtsi"
+&spmi_bus {
+ qcom,pmi8950@3 {
+ labibb: qpnp-labibb-regulator {
+ ibb_regulator: qcom,ibb@dc00 {
+ /delete-property/
+ qcom,qpnp-ibb-use-default-voltage;
+ qcom,qpnp-ibb-init-lcd-voltage = <5700000>;
+ };
+
+ lab_regulator: qcom,lab@de00 {
+ /delete-property/
+ qcom,qpnp-lab-use-default-voltage;
+ qcom,qpnp-lab-init-lcd-voltage = <5700000>;
+ };
+ };
+ };
+};
+
+#include "msm8953-mdss-panels.dtsi"
+
+&tlmm {
+ pmx_mdss {
+ mdss_dsi_active: mdss_dsi_active {
+ mux {
+ pins = "gpio61";
+ };
+ config {
+ pins = "gpio61";
+ };
+ };
+ mdss_dsi_suspend: mdss_dsi_suspend {
+ mux {
+ pins = "gpio61";
+ };
+ config {
+ pins = "gpio61";
+ };
+ };
+ };
+};
+
+&dsi_panel_pwr_supply {
+ qcom,panel-supply-entry@2 {
+ reg = <2>;
+ qcom,supply-name = "lab";
+ qcom,supply-min-voltage = <4600000>;
+ qcom,supply-max-voltage = <6000000>;
+ qcom,supply-enable-load = <100000>;
+ qcom,supply-disable-load = <100>;
+ };
+
+ qcom,panel-supply-entry@3 {
+ reg = <3>;
+ qcom,supply-name = "ibb";
+ qcom,supply-min-voltage = <4600000>;
+ qcom,supply-max-voltage = <6000000>;
+ qcom,supply-enable-load = <100000>;
+ qcom,supply-disable-load = <100>;
+ qcom,supply-post-on-sleep = <10>;
+ };
+};
+
+&mdss_mdp {
+ qcom,mdss-pref-prim-intf = "dsi";
+};
+
+&mdss_dsi {
+ hw-config = "single_dsi";
+};
+
+&mdss_dsi0 {
+ lab-supply = <&lab_regulator>;
+ ibb-supply = <&ibb_regulator>;
+ /delete-property/ vdd-supply;
+
+ qcom,dsi-pref-prim-pan = <&dsi_r69006_1080p_cmd>;
+ pinctrl-names = "mdss_default", "mdss_sleep";
+ pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
+ pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
+ qcom,platform-te-gpio = <&tlmm 24 0>;
+ qcom,platform-reset-gpio = <&tlmm 61 0>;
+};
+
+&mdss_dsi1 {
+ status = "disabled";
+};
+
+&labibb {
+ status = "ok";
+ qpnp,qpnp-labibb-mode = "lcd";
+};
+
+&dsi_r69006_1080p_cmd {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,esd-check-enabled;
+};
+
+&dsi_r69006_1080p_video {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-qrd.dtsi b/arch/arm64/boot/dts/qcom/msm8953-qrd.dtsi
index 2115dcb..dcdfd10 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-qrd.dtsi
@@ -16,7 +16,32 @@
&soc {
i2c@78b7000 { /* BLSP1 QUP3 */
- /delete-node/ synaptics@4b;
+ status = "okay";
+ synaptics@4b {
+ compatible = "synaptics,dsx-i2c";
+ reg = <0x4b>;
+ interrupt-parent = <&tlmm>;
+ interrupts = <65 0x2008>;
+ vdd_ana-supply = <&vdd_vreg>;
+ vcc_i2c-supply = <&pm8953_l6>;
+ synaptics,pwr-reg-name = "vdd_ana";
+ synaptics,bus-reg-name = "vcc_i2c";
+ synaptics,irq-gpio = <&tlmm 65 0x2008>;
+ synaptics,irq-on-state = <0>;
+ synaptics,irq-flags = <0x2008>;
+ synaptics,power-delay-ms = <200>;
+ synaptics,reset-delay-ms = <200>;
+ synaptics,max-y-for-2d = <1919>;
+ synaptics,cap-button-codes = <139 158 172>;
+ synaptics,vir-button-codes = <139 180 2000 320 160
+ 158 540 2000 320 160
+ 172 900 2000 320 160>;
+ synaptics,resume-in-workqueue;
+ /* Underlying clocks used by secure touch */
+ clock-names = "iface_clk", "core_clk";
+ clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+ <&clock_gcc clk_gcc_blsp1_qup3_i2c_apps_clk>;
+ };
};
vdd_vreg: vdd_vreg {
diff --git a/arch/arm64/boot/dts/qcom/msm8953.dtsi b/arch/arm64/boot/dts/qcom/msm8953.dtsi
index 6c9a989..1d78b6b 100644
--- a/arch/arm64/boot/dts/qcom/msm8953.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953.dtsi
@@ -137,8 +137,12 @@
};
adsp_shmem_device_mem: adsp_shmem_device_region@0xc0100000 {
- reg = <0x0 0xc0100000 0x0 0x08200000>;
+ compatible = "shared-dma-pool";
label = "adsp_shmem_device_mem";
+ reusable;
+ alloc-ranges = <0x0 0xc0100000 0x0 0x08200000>;
+ alignment = <0 0x400000>;
+ size = <0 0x800000>;
};
gpu_mem: gpu_region@0 {
diff --git a/arch/arm64/boot/dts/qcom/pm660-rpm-regulator.dtsi b/arch/arm64/boot/dts/qcom/pm660-rpm-regulator.dtsi
index 010694d..5896615 100644
--- a/arch/arm64/boot/dts/qcom/pm660-rpm-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm660-rpm-regulator.dtsi
@@ -112,7 +112,7 @@
qcom,resource-name = "ldoa";
qcom,resource-id = <1>;
qcom,regulator-type = <0>;
- qcom,hpm-min-load = <10000>;
+ qcom,hpm-min-load = <30000>;
status = "disabled";
regulator-l1 {
@@ -128,7 +128,7 @@
qcom,resource-name = "ldoa";
qcom,resource-id = <2>;
qcom,regulator-type = <0>;
- qcom,hpm-min-load = <10000>;
+ qcom,hpm-min-load = <30000>;
status = "disabled";
regulator-l2 {
@@ -144,7 +144,7 @@
qcom,resource-name = "ldoa";
qcom,resource-id = <3>;
qcom,regulator-type = <0>;
- qcom,hpm-min-load = <10000>;
+ qcom,hpm-min-load = <30000>;
status = "disabled";
regulator-l3 {
@@ -160,7 +160,7 @@
qcom,resource-name = "ldoa";
qcom,resource-id = <5>;
qcom,regulator-type = <0>;
- qcom,hpm-min-load = <10000>;
+ qcom,hpm-min-load = <30000>;
status = "disabled";
regulator-l5 {
@@ -176,7 +176,7 @@
qcom,resource-name = "ldoa";
qcom,resource-id = <6>;
qcom,regulator-type = <0>;
- qcom,hpm-min-load = <10000>;
+ qcom,hpm-min-load = <30000>;
status = "disabled";
regulator-l6 {
@@ -192,7 +192,7 @@
qcom,resource-name = "ldoa";
qcom,resource-id = <7>;
qcom,regulator-type = <0>;
- qcom,hpm-min-load = <10000>;
+ qcom,hpm-min-load = <30000>;
status = "disabled";
regulator-l7 {
diff --git a/arch/arm64/boot/dts/qcom/qcs605.dtsi b/arch/arm64/boot/dts/qcom/qcs605.dtsi
index ed6ca20..3f3c8da 100644
--- a/arch/arm64/boot/dts/qcom/qcs605.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs605.dtsi
@@ -18,52 +18,40 @@
qcom,msm-id = <347 0x0>;
};
-&removed_region {
- reg = <0 0x85fc0000 0 0x1540000>;
-};
-
-&pil_camera_mem {
- reg = <0 0x8b800000 0 0x500000>;
-};
-
&pil_modem_mem {
- reg = <0 0x8bd00000 0 0x3100000>;
+ reg = <0 0x8b000000 0 0x3100000>;
};
&pil_video_mem {
- reg = <0 0x8ee00000 0 0x500000>;
+ reg = <0 0x8e100000 0 0x500000>;
};
&wlan_msa_mem {
- reg = <0 0x8f300000 0 0x100000>;
+ reg = <0 0x8e600000 0 0x100000>;
};
&pil_cdsp_mem {
- reg = <0 0x8f400000 0 0x800000>;
+ reg = <0 0x8e700000 0 0x800000>;
};
&pil_mba_mem {
- reg = <0 0x8fc00000 0 0x200000>;
+ reg = <0 0x8ef00000 0 0x200000>;
};
&pil_adsp_mem {
- reg = <0 0x8fe00000 0 0x1e00000>;
+ reg = <0 0x8f100000 0 0x1e00000>;
};
&pil_ipa_fw_mem {
- reg = <0 0x91c00000 0 0x10000>;
+ reg = <0 0x90f00000 0 0x10000>;
};
&pil_ipa_gsi_mem {
- reg = <0 0x91c10000 0 0x5000>;
+ reg = <0 0x90f10000 0 0x5000>;
};
&pil_gpu_mem {
- reg = <0 0x91c15000 0 0x2000>;
-};
-
-&qseecom_mem {
- reg = <0 0x9e800000 0 0x1000000>;
+ reg = <0 0x90f15000 0 0x2000>;
};
&adsp_mem {
@@ -71,7 +59,7 @@
};
&secure_display_memory {
- status = "disabled";
+ size = <0 0xa000000>;
};
&qcom_seecom {
diff --git a/arch/arm64/boot/dts/qcom/qm215-camera-sensor-qrd.dtsi b/arch/arm64/boot/dts/qcom/qm215-camera-sensor-qrd.dtsi
index 08658dc..ae928ce 100644
--- a/arch/arm64/boot/dts/qcom/qm215-camera-sensor-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/qm215-camera-sensor-qrd.dtsi
@@ -100,6 +100,21 @@
qcom,clock-rates = <19200000 0>;
};
+ flash0: qcom,camera-flash {
+ cell-index = <0>;
+ compatible = "qcom,qm215-gpio-flash";
+ qcom,flash-type = <2>;
+ gpios = <&tlmm 34 0>,
+ <&tlmm 33 0>;
+ qcom,gpio-req-tbl-num = <0 1>;
+ qcom,gpio-req-tbl-flags = <1 0>;
+ qcom,gpio-flash-en = <0>;
+ qcom,gpio-flash-now = <1>;
+ qcom,gpio-req-tbl-label = "CAM_FLASH",
+ "CAM_TORCH";
+ status = "ok";
+ };
+
qcom,camera@0 {
cell-index = <0>;
compatible = "qcom,camera";
@@ -107,6 +122,7 @@
qcom,csiphy-sd-index = <0>;
qcom,csid-sd-index = <0>;
qcom,mount-angle = <90>;
+ qcom,led-flash-src = <&flash0>;
qcom,actuator-src = <&actuator0>;
qcom,eeprom-src = <&eeprom0>;
cam_vana-supply = <&pm8916_l16>;
diff --git a/arch/arm64/boot/dts/qcom/sda429-bg-wdp-overlay.dts b/arch/arm64/boot/dts/qcom/sda429-bg-wdp-overlay.dts
new file mode 100644
index 0000000..f895f17
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda429-bg-wdp-overlay.dts
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include "sdm429-spyro-qrd-evt.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDA429 QRD Spyro BG WDP Overlay";
+ compatible = "qcom,sdm429w-qrd", "qcom,sdm429w", "qcom,qrd";
+ qcom,msm-id = <437 0x0>;
+ qcom,board-id = <0x01000b 9>;
+ qcom,pmic-id = <0x0002001b 0x0 0x0 0x0>;
+};
+
+&usb_otg {
+ HSUSB_3p3-supply = <&L16A>;
+};
+
+&msm_dig_codec {
+ cdc-vdd-digital-supply = <&pm660_l11>;
+};
+
+&ext_smart_pa {
+ dvdd-supply = <&pm660_l11>;
+};
+
+&sdhc_2 {
+ cd-gpios = <&tlmm 67 0x1>;
+};
+
+&mdss_dsi {
+ vddio-supply = <&L12A>; /* 1.8v */
+};
+
+&mdss_dsi0_pll {
+ vddio-supply = <&L12A>; /* 1.8V */
+};
+
+&mdss_dsi1_pll {
+ vddio-supply = <&L12A>; /* 1.8V */
+};
+
+&mdss_dsi0 {
+ qcom,platform-enable-gpio = <&pm660_gpios 12 0>;
+ /delete-property/ vdd-supply;
+ qcom,dsi-pref-prim-pan = <&dsi_truly_rm69090_qvga_cmd>;
+};
+&dsi_pm660_panel_pwr_supply {
+ /delete-node/ qcom,panel-supply-entry@0;
+};
+
+&pm660_gpios {
+ gpio@cb00 {
+ status = "ok";
+ qcom,mode = <1>;
+ qcom,vin-sel = <0>;
+ qcom,src-sel = <0>;
+ qcom,master-en = <1>;
+ qcom,out-strength = <2>;
+ };
+};
+
+&i2c_4 {
+ status = "ok";
+
+ tsc@24 {
+ cy,core {
+ cy,mt {
+ cy,name = "cyttsp5_mt";
+
+ cy,inp_dev_name = "cyttsp5_mt";
+ cy,flags = <0x8>;
+ cy,abs =
+ <0x35 0 368 0 0
+ 0x36 0 448 0 0
+ 0x3a 0 255 0 0
+ 0xffff 0 255 0 0
+ 0x39 0 15 0 0
+ 0x30 0 255 0 0
+ 0x31 0 255 0 0
+ 0x34 0xffffff81 127 0 0
+ 0x37 0 1 0 0
+ 0x3b 0 255 0 0>;
+
+ };
+
+ };
+ };
+
+};
+
+&firmware {
+ android {
+ fstab {
+ system {
+ status = "disabled";
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-no-pmi.dts b/arch/arm64/boot/dts/qcom/sda429-bg-wdp.dts
similarity index 65%
copy from arch/arm64/boot/dts/qcom/msm8953-no-pmi.dts
copy to arch/arm64/boot/dts/qcom/sda429-bg-wdp.dts
index fa9ccbb..9746c0c 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-no-pmi.dts
+++ b/arch/arm64/boot/dts/qcom/sda429-bg-wdp.dts
@@ -13,11 +13,13 @@
/dts-v1/;
-#include "msm8953.dtsi"
-#include "msm8953-mtp.dtsi"
+#include "sdm429-spyro.dtsi"
+#include "sdm429w-bg-pm660.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. msm8953 + NO PMI SOC";
- compatible = "qcom,msm8953";
- qcom,pmic-id = <0x010016 0x0 0x0 0x0>;
+ model = "Qualcomm Technologies, Inc. SDA429 QRD BG WDP Spyro";
+ compatible = "qcom,sdm429w-qrd","qcom,sdm429w","qcom,sda429w";
+ qcom,msm-id = <437 0x0>;
+ qcom,board-id = <0x00010b 9>;
+ qcom,pmic-id = <0x0002001b 0x0 0x0 0x0>;
};
diff --git a/arch/arm64/boot/dts/qcom/sda429-bg-wtp-overlay.dts b/arch/arm64/boot/dts/qcom/sda429-bg-wtp-overlay.dts
new file mode 100644
index 0000000..d12b346
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda429-bg-wtp-overlay.dts
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include "sdm429-spyro-qrd-evt.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDA429 QRD BG WTP Overlay";
+ compatible = "qcom,sdm429w-qrd", "qcom,sdm429w", "qcom,qrd";
+ qcom,msm-id = <437 0x0>;
+ qcom,board-id = <0x00010b 8>;
+ qcom,pmic-id = <0x0002001b 0x0 0x0 0x0>;
+};
+
+&soc {
+
+ qcom,blackghost {
+ compatible = "qcom,pil-blackghost";
+ qcom,pil-force-shutdown;
+ qcom,firmware-name = "bg-wear";
+ /* GPIO inputs from blackghost */
+ qcom,bg2ap-status-gpio = <&msm_gpio 44 0>;
+ qcom,bg2ap-errfatal-gpio = <&msm_gpio 72 0>;
+ /* GPIO output to blackghost */
+ qcom,ap2bg-status-gpio = <&msm_gpio 61 0>;
+ qcom,ap2bg-errfatal-gpio = <&msm_gpio 62 0>;
+ };
+
+ qcom,msm-ssc-sensors {
+ compatible = "qcom,msm-ssc-sensors";
+ };
+
+ qcom,glink-bgcom-xprt-bg {
+ compatible = "qcom,glink-bgcom-xprt";
+ label = "bg";
+ qcom,qos-config = <&glink_qos_bg>;
+ qcom,ramp-time = <0x10>,
+ <0x20>,
+ <0x30>,
+ <0x40>;
+ };
+
+ glink_qos_bg: qcom,glink-qos-config-bg {
+ compatible = "qcom,glink-qos-config";
+ qcom,flow-info = <0x80 0x0>,
+ <0x70 0x1>,
+ <0x60 0x2>,
+ <0x50 0x3>;
+ qcom,mtu-size = <0x800>;
+ qcom,tput-stats-cycle = <0xa>;
+ };
+
+ qcom,glink_pkt {
+ compatible = "qcom,glinkpkt";
+
+ qcom,glinkpkt-bg-daemon {
+ qcom,glinkpkt-transport = "bgcom";
+ qcom,glinkpkt-edge = "bg";
+ qcom,glinkpkt-ch-name = "bg-daemon";
+ qcom,glinkpkt-dev-name = "glink_pkt_bg_daemon";
+ };
+
+ qcom,glinkpkt-bg-display-ctrl {
+ qcom,glinkpkt-transport = "bgcom";
+ qcom,glinkpkt-edge = "bg";
+ qcom,glinkpkt-ch-name = "display-ctrl";
+ qcom,glinkpkt-dev-name = "glink_pkt_bg_display_ctrl";
+ };
+
+ qcom,glinkpkt-bg-display-data {
+ qcom,glinkpkt-transport = "bgcom";
+ qcom,glinkpkt-edge = "bg";
+ qcom,glinkpkt-ch-name = "display-data";
+ qcom,glinkpkt-dev-name = "glink_pkt_bg_display_data";
+ };
+
+ qcom,glinkpkt-bg-rsb-ctrl {
+ qcom,glinkpkt-transport = "bgcom";
+ qcom,glinkpkt-edge = "bg";
+ qcom,glinkpkt-ch-name = "RSB_CTRL";
+ qcom,glinkpkt-dev-name = "glink_pkt_bg_rsb_ctrl";
+ };
+
+ qcom,glinkpkt-bg-sso-ctrl {
+ qcom,glinkpkt-transport = "bgcom";
+ qcom,glinkpkt-edge = "bg";
+ qcom,glinkpkt-ch-name = "sso-ctrl";
+ qcom,glinkpkt-dev-name = "glink_pkt_bg_sso_ctrl";
+ };
+
+ qcom,glinkpkt-bg-buzzer-ctrl {
+ qcom,glinkpkt-transport = "bgcom";
+ qcom,glinkpkt-edge = "bg";
+ qcom,glinkpkt-ch-name = "buzzer-ctrl";
+ qcom,glinkpkt-dev-name = "glink_pkt_bg_buzzer_ctrl";
+ };
+ };
+
+ spi@78B8000 { /* BLSP1 QUP4 */
+ status = "ok";
+ qcom,bg-spi {
+ compatible = "qcom,bg-spi";
+ reg = <0>;
+ spi-max-frequency = <16000000>;
+ interrupt-parent = <&msm_gpio>;
+ qcom,irq-gpio = <&msm_gpio 43 1>;
+ };
+ };
+
+ qcom,bg-rsb {
+ compatible = "qcom,bg-rsb";
+ vdd-ldo1-supply = <&pm660_l11>;
+ vdd-ldo2-supply = <&pm660_l15>;
+ };
+
+ qcom,bg-daemon {
+ compatible = "qcom,bg-daemon";
+ qcom,bg-reset-gpio = <&pm660_gpios 5 0>;
+ ssr-reg1-supply = <&pm660_l3>;
+ ssr-reg2-supply = <&pm660_l9>;
+ };
+};
+
+&usb_otg {
+ HSUSB_3p3-supply = <&L16A>;
+};
+
+&msm_dig_codec {
+ cdc-vdd-digital-supply = <&pm660_l11>;
+};
+
+&ext_smart_pa {
+ dvdd-supply = <&pm660_l11>;
+};
+
+&firmware {
+ android {
+ fstab {
+ system {
+ status = "disabled";
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-no-pmi.dts b/arch/arm64/boot/dts/qcom/sda429-bg-wtp.dts
similarity index 65%
copy from arch/arm64/boot/dts/qcom/msm8953-no-pmi.dts
copy to arch/arm64/boot/dts/qcom/sda429-bg-wtp.dts
index fa9ccbb..2312951 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-no-pmi.dts
+++ b/arch/arm64/boot/dts/qcom/sda429-bg-wtp.dts
@@ -13,11 +13,13 @@
/dts-v1/;
-#include "msm8953.dtsi"
-#include "msm8953-mtp.dtsi"
+#include "sdm429-spyro.dtsi"
+#include "sdm429w-bg-pm660.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. msm8953 + NO PMI SOC";
- compatible = "qcom,msm8953";
- qcom,pmic-id = <0x010016 0x0 0x0 0x0>;
+ model = "Qualcomm Technologies, Inc. SDM429 QRD DVT Spyro";
+ compatible = "qcom,sdm429w-qrd", "qcom,sdm429w", "qcom,qrd";
+ qcom,msm-id = <437 0x0>;
+ qcom,board-id = <0x00010b 8>;
+ qcom,pmic-id = <0x0002001b 0x0 0x0 0x0>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdw3300-bg-1gb-wtp.dts b/arch/arm64/boot/dts/qcom/sdm429-bg-iot-wtp.dts
similarity index 93%
rename from arch/arm64/boot/dts/qcom/sdw3300-bg-1gb-wtp.dts
rename to arch/arm64/boot/dts/qcom/sdm429-bg-iot-wtp.dts
index cf24032..46c7469 100644
--- a/arch/arm64/boot/dts/qcom/sdw3300-bg-1gb-wtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm429-bg-iot-wtp.dts
@@ -15,8 +15,10 @@
#include "sdm429-spyro.dtsi"
#include "sdm429-spyro-qrd-evt.dtsi"
+#include "sdm429w-bg-pm660.dtsi"
+
/ {
- model = "Qualcomm Technologies, Inc. SDM429W BG 1GB WTP";
+ model = "Qualcomm Technologies, Inc. SDM429W BG IOT WTP";
compatible = "qcom,sdm429w-qrd", "qcom,sdm429w", "qcom,qrd";
qcom,msm-id = <416 0x0>;
qcom,board-id = <0x00010b 8>;
@@ -35,10 +37,6 @@
dvdd-supply = <&pm660_l11>;
};
-&blsp1_uart2 {
- status = "okay";
-};
-
&thermal_zones {
/delete-node/ emmc-therm-adc;
/delete-node/ aoss0-lowf;
diff --git a/arch/arm64/boot/dts/qcom/sdm429-bg-wdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm429-bg-wdp-overlay.dts
new file mode 100644
index 0000000..53229a0
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm429-bg-wdp-overlay.dts
@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include "sdm429-spyro-qrd-evt.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDM429 QRD BG WDP Overlay";
+ compatible = "qcom,sdm429w-qrd", "qcom,sdm429w", "qcom,qrd";
+ qcom,msm-id = <416 0x0>;
+ qcom,board-id = <0x01000b 9>;
+ qcom,pmic-id = <0x0002001b 0x0 0x0 0x0>;
+};
+
+&soc {
+
+ qcom,blackghost {
+ compatible = "qcom,pil-blackghost";
+ qcom,pil-force-shutdown;
+ qcom,firmware-name = "bg-wear";
+ /* GPIO inputs from blackghost */
+ qcom,bg2ap-status-gpio = <&msm_gpio 44 0>;
+ qcom,bg2ap-errfatal-gpio = <&msm_gpio 72 0>;
+ /* GPIO output to blackghost */
+ qcom,ap2bg-status-gpio = <&msm_gpio 61 0>;
+ qcom,ap2bg-errfatal-gpio = <&msm_gpio 62 0>;
+ };
+
+ qcom,msm-ssc-sensors {
+ compatible = "qcom,msm-ssc-sensors";
+ };
+
+ qcom,glink-bgcom-xprt-bg {
+ compatible = "qcom,glink-bgcom-xprt";
+ label = "bg";
+ qcom,qos-config = <&glink_qos_bg>;
+ qcom,ramp-time = <0x10>,
+ <0x20>,
+ <0x30>,
+ <0x40>;
+ };
+
+ glink_qos_bg: qcom,glink-qos-config-bg {
+ compatible = "qcom,glink-qos-config";
+ qcom,flow-info = <0x80 0x0>,
+ <0x70 0x1>,
+ <0x60 0x2>,
+ <0x50 0x3>;
+ qcom,mtu-size = <0x800>;
+ qcom,tput-stats-cycle = <0xa>;
+ };
+
+ qcom,glink_pkt {
+ compatible = "qcom,glinkpkt";
+
+ qcom,glinkpkt-bg-daemon {
+ qcom,glinkpkt-transport = "bgcom";
+ qcom,glinkpkt-edge = "bg";
+ qcom,glinkpkt-ch-name = "bg-daemon";
+ qcom,glinkpkt-dev-name = "glink_pkt_bg_daemon";
+ };
+
+ qcom,glinkpkt-bg-display-ctrl {
+ qcom,glinkpkt-transport = "bgcom";
+ qcom,glinkpkt-edge = "bg";
+ qcom,glinkpkt-ch-name = "display-ctrl";
+ qcom,glinkpkt-dev-name = "glink_pkt_bg_display_ctrl";
+ };
+
+ qcom,glinkpkt-bg-display-data {
+ qcom,glinkpkt-transport = "bgcom";
+ qcom,glinkpkt-edge = "bg";
+ qcom,glinkpkt-ch-name = "display-data";
+ qcom,glinkpkt-dev-name = "glink_pkt_bg_display_data";
+ };
+
+ qcom,glinkpkt-bg-rsb-ctrl {
+ qcom,glinkpkt-transport = "bgcom";
+ qcom,glinkpkt-edge = "bg";
+ qcom,glinkpkt-ch-name = "RSB_CTRL";
+ qcom,glinkpkt-dev-name = "glink_pkt_bg_rsb_ctrl";
+ };
+
+ qcom,glinkpkt-bg-sso-ctrl {
+ qcom,glinkpkt-transport = "bgcom";
+ qcom,glinkpkt-edge = "bg";
+ qcom,glinkpkt-ch-name = "sso-ctrl";
+ qcom,glinkpkt-dev-name = "glink_pkt_bg_sso_ctrl";
+ };
+
+ qcom,glinkpkt-bg-buzzer-ctrl {
+ qcom,glinkpkt-transport = "bgcom";
+ qcom,glinkpkt-edge = "bg";
+ qcom,glinkpkt-ch-name = "buzzer-ctrl";
+ qcom,glinkpkt-dev-name = "glink_pkt_bg_buzzer_ctrl";
+ };
+ };
+
+ spi@78B8000 { /* BLSP1 QUP4 */
+ status = "ok";
+ qcom,bg-spi {
+ compatible = "qcom,bg-spi";
+ reg = <0>;
+ spi-max-frequency = <16000000>;
+ interrupt-parent = <&msm_gpio>;
+ qcom,irq-gpio = <&msm_gpio 43 1>;
+ };
+ };
+
+ qcom,bg-rsb {
+ compatible = "qcom,bg-rsb";
+ vdd-ldo1-supply = <&pm660_l11>;
+ vdd-ldo2-supply = <&pm660_l15>;
+ };
+
+ qcom,bg-daemon {
+ compatible = "qcom,bg-daemon";
+ qcom,bg-reset-gpio = <&pm660_gpios 5 0>;
+ ssr-reg1-supply = <&pm660_l3>;
+ ssr-reg2-supply = <&pm660_l9>;
+ };
+};
+
+&usb_otg {
+ HSUSB_3p3-supply = <&L16A>;
+};
+
+&msm_dig_codec {
+ cdc-vdd-digital-supply = <&pm660_l11>;
+};
+
+&ext_smart_pa {
+ dvdd-supply = <&pm660_l11>;
+};
+
+&sdhc_2 {
+ cd-gpios = <&tlmm 67 0x1>;
+};
+
+&mdss_dsi {
+ vddio-supply = <&L12A>; /* 1.8v */
+};
+
+&mdss_dsi0_pll {
+ vddio-supply = <&L12A>; /* 1.8V */
+};
+
+&mdss_dsi1_pll {
+ vddio-supply = <&L12A>; /* 1.8V */
+};
+
+&mdss_dsi0 {
+ qcom,platform-enable-gpio = <&pm660_gpios 12 0>;
+ /delete-property/ vdd-supply;
+ qcom,dsi-pref-prim-pan = <&dsi_truly_rm69090_qvga_cmd>;
+};
+&dsi_pm660_panel_pwr_supply {
+ /delete-node/ qcom,panel-supply-entry@0;
+};
+
+&pm660_gpios {
+ gpio@cb00 {
+ status = "ok";
+ qcom,mode = <1>;
+ qcom,vin-sel = <0>;
+ qcom,src-sel = <0>;
+ qcom,master-en = <1>;
+ qcom,out-strength = <2>;
+ };
+};
+
+&i2c_4 {
+ status = "ok";
+
+ tsc@24 {
+ cy,core {
+ cy,mt {
+ cy,name = "cyttsp5_mt";
+
+ cy,inp_dev_name = "cyttsp5_mt";
+ cy,flags = <0x8>;
+ cy,abs =
+ <0x35 0 368 0 0
+ 0x36 0 448 0 0
+ 0x3a 0 255 0 0
+ 0xffff 0 255 0 0
+ 0x39 0 15 0 0
+ 0x30 0 255 0 0
+ 0x31 0 255 0 0
+ 0x34 0xffffff81 127 0 0
+ 0x37 0 1 0 0
+ 0x3b 0 255 0 0>;
+
+ };
+
+ };
+ };
+
+};
+
+&firmware {
+ android {
+ fstab {
+ system {
+ status = "disabled";
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-no-pmi.dts b/arch/arm64/boot/dts/qcom/sdm429-bg-wdp.dts
similarity index 66%
copy from arch/arm64/boot/dts/qcom/msm8953-no-pmi.dts
copy to arch/arm64/boot/dts/qcom/sdm429-bg-wdp.dts
index fa9ccbb..26ed8a7 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-no-pmi.dts
+++ b/arch/arm64/boot/dts/qcom/sdm429-bg-wdp.dts
@@ -13,11 +13,13 @@
/dts-v1/;
-#include "msm8953.dtsi"
-#include "msm8953-mtp.dtsi"
+#include "sdm429-spyro.dtsi"
+#include "sdm429w-bg-pm660.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. msm8953 + NO PMI SOC";
- compatible = "qcom,msm8953";
- qcom,pmic-id = <0x010016 0x0 0x0 0x0>;
+ model = "Qualcomm Technologies, Inc. SDM429 QRD BG WDP";
+ compatible = "qcom,sdm429w-qrd", "qcom,sdm429w", "qcom,qrd";
+ qcom,msm-id = <416 0x0>;
+ qcom,board-id = <0x01000b 9>;
+ qcom,pmic-id = <0x0002001b 0x0 0x0 0x0>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdw3300-bg-1gb-wtp.dts b/arch/arm64/boot/dts/qcom/sdm429-bg-wtp-overlay.dts
similarity index 64%
copy from arch/arm64/boot/dts/qcom/sdw3300-bg-1gb-wtp.dts
copy to arch/arm64/boot/dts/qcom/sdm429-bg-wtp-overlay.dts
index cf24032..96c2385 100644
--- a/arch/arm64/boot/dts/qcom/sdw3300-bg-1gb-wtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm429-bg-wtp-overlay.dts
@@ -12,11 +12,12 @@
*/
/dts-v1/;
+/plugin/;
-#include "sdm429-spyro.dtsi"
#include "sdm429-spyro-qrd-evt.dtsi"
+
/ {
- model = "Qualcomm Technologies, Inc. SDM429W BG 1GB WTP";
+ model = "Qualcomm Technologies, Inc. SDM429 QRD BG WTP Overlay";
compatible = "qcom,sdm429w-qrd", "qcom,sdm429w", "qcom,qrd";
qcom,msm-id = <416 0x0>;
qcom,board-id = <0x00010b 8>;
@@ -35,29 +36,12 @@
dvdd-supply = <&pm660_l11>;
};
-&blsp1_uart2 {
- status = "okay";
-};
-
-&thermal_zones {
- /delete-node/ emmc-therm-adc;
- /delete-node/ aoss0-lowf;
- /delete-node/ mdm-core-lowf;
- /delete-node/ lpass-lowf;
- /delete-node/ camera-lowf;
- /delete-node/ cpuss1-lowf;
- /delete-node/ apc1-cpu0-lowf;
- /delete-node/ apc1-cpu1-lowf;
- /delete-node/ apc1-cpu2-lowf;
- /delete-node/ apc1-cpu3-lowf;
- /delete-node/ cpuss0-lowf;
- /delete-node/ gpu-lowf;
-};
-
&firmware {
android {
fstab {
- /delete-node/ system ;
+ system {
+ status = "disabled";
+ };
};
};
};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-no-pmi.dts b/arch/arm64/boot/dts/qcom/sdm429-bg-wtp.dts
similarity index 66%
copy from arch/arm64/boot/dts/qcom/msm8953-no-pmi.dts
copy to arch/arm64/boot/dts/qcom/sdm429-bg-wtp.dts
index fa9ccbb..62294f0 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-no-pmi.dts
+++ b/arch/arm64/boot/dts/qcom/sdm429-bg-wtp.dts
@@ -13,11 +13,13 @@
/dts-v1/;
-#include "msm8953.dtsi"
-#include "msm8953-mtp.dtsi"
+#include "sdm429-spyro.dtsi"
+#include "sdm429w-bg-pm660.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. msm8953 + NO PMI SOC";
- compatible = "qcom,msm8953";
- qcom,pmic-id = <0x010016 0x0 0x0 0x0>;
+ model = "Qualcomm Technologies, Inc. SDM429 BG WTP";
+ compatible = "qcom,sdm429w-qrd", "qcom,sdm429w", "qcom,qrd";
+ qcom,msm-id = <416 0x0>;
+ qcom,board-id = <0x00010b 8>;
+ qcom,pmic-id = <0x0002001b 0x0 0x0 0x0>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm429-spyro-qrd-evt.dtsi b/arch/arm64/boot/dts/qcom/sdm429-spyro-qrd-evt.dtsi
index 9014166..567389a 100644
--- a/arch/arm64/boot/dts/qcom/sdm429-spyro-qrd-evt.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm429-spyro-qrd-evt.dtsi
@@ -190,37 +190,6 @@
status = "ok";
};
-&firmware {
- android {
- compatible = "android,firmware";
- vbmeta {
- compatible = "android,vbmeta";
- parts = "vbmeta,boot,system,vendor,dtbo,recovery";
- };
- fstab {
- compatible = "android,fstab";
- vendor {
- compatible = "android,vendor";
- dev =
- "/dev/block/platform/soc/7824900.sdhci/by-name/vendor";
- type = "ext4";
- mnt_flags = "ro,barrier=1,discard";
- fsmgr_flags = "wait";
- status = "ok";
- };
- system {
- compatible = "android,system";
- dev =
- "/dev/block/platform/soc/7824900.sdhci/by-name/system";
- type = "ext4";
- mnt_flags = "ro,barrier=1,discard";
- fsmgr_flags = "wait";
- status = "ok";
- };
- };
- };
-};
-
&soc {
qcom,ion {
qcom,ion-heap@8 { /* CP_MM HEAP */
@@ -967,7 +936,7 @@
type = "passive";
};
- quiet_439_batt_trip6_mdm_trip3: quiet-bt-trp6-mdm-trp3 {
+ quiet_modem_439_trip3: quiet-modem-trp3 {
temperature = <48000>;
hysteresis = <2000>;
type = "passive";
@@ -985,8 +954,8 @@
type = "passive";
};
- quiet_batt_439_trip7: quiet-batt-trip7 {
- temperature = <50000>;
+ quiet_batt_439_trip6: quiet-batt-trip6 {
+ temperature = <55000>;
hysteresis = <2000>;
type = "passive";
};
@@ -1047,7 +1016,7 @@
};
modem_lvl2 {
- trip = <&quiet_439_batt_trip6_mdm_trip3>;
+ trip = <&quiet_modem_439_trip3>;
cooling-device = <&modem_pa 2 2>;
};
@@ -1082,14 +1051,10 @@
};
battery_lvl6 {
- trip = <&quiet_439_batt_trip6_mdm_trip3>;
+ trip = <&quiet_batt_439_trip6>;
cooling-device = <&pm660_charger 6 6>;
};
- battery_lvl7 {
- trip = <&quiet_batt_439_trip7>;
- cooling-device = <&pm660_charger 7 7>;
- };
};
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm429-spyro.dtsi b/arch/arm64/boot/dts/qcom/sdm429-spyro.dtsi
index a951c7d..004011c 100644
--- a/arch/arm64/boot/dts/qcom/sdm429-spyro.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm429-spyro.dtsi
@@ -17,3 +17,34 @@
&pil_mss {
qcom,sequential-fw-load;
};
+
+&firmware {
+ android {
+ compatible = "android,firmware";
+ vbmeta {
+ compatible = "android,vbmeta";
+ parts = "vbmeta,boot,system,vendor,dtbo,recovery";
+ };
+ fstab {
+ compatible = "android,fstab";
+ vendor {
+ compatible = "android,vendor";
+ dev =
+ "/dev/block/platform/soc/7824900.sdhci/by-name/vendor";
+ type = "ext4";
+ mnt_flags = "ro,barrier=1,discard";
+ fsmgr_flags = "wait";
+ status = "ok";
+ };
+ system {
+ compatible = "android,system";
+ dev =
+ "/dev/block/platform/soc/7824900.sdhci/by-name/system";
+ type = "ext4";
+ mnt_flags = "ro,barrier=1,discard";
+ fsmgr_flags = "wait";
+ status = "ok";
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-no-pmi.dts b/arch/arm64/boot/dts/qcom/sdm429w-bg-pm660.dtsi
similarity index 70%
rename from arch/arm64/boot/dts/qcom/msm8953-no-pmi.dts
rename to arch/arm64/boot/dts/qcom/sdm429w-bg-pm660.dtsi
index fa9ccbb..7c389ff 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-no-pmi.dts
+++ b/arch/arm64/boot/dts/qcom/sdm429w-bg-pm660.dtsi
@@ -11,13 +11,22 @@
* GNU General Public License for more details.
*/
-/dts-v1/;
+#include "sdm429w-pm660.dtsi"
-#include "msm8953.dtsi"
-#include "msm8953-mtp.dtsi"
-
-/ {
- model = "Qualcomm Technologies, Inc. msm8953 + NO PMI SOC";
- compatible = "qcom,msm8953";
- qcom,pmic-id = <0x010016 0x0 0x0 0x0>;
+&pm660_misc {
+ qcom,support-twm-config;
};
+
+&pm660_pbs {
+ status = "okay";
+};
+
+&pm660_pon {
+ qcom,support-twm-config;
+ qcom,pbs-client = <&pm660_pbs>;
+};
+
+&pm660_fg {
+ qcom,fg-disable-in-twm;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm429w-pm660.dtsi b/arch/arm64/boot/dts/qcom/sdm429w-pm660.dtsi
index f6d85f4..0dd6035 100644
--- a/arch/arm64/boot/dts/qcom/sdm429w-pm660.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm429w-pm660.dtsi
@@ -371,9 +371,10 @@
/* over-write the PM660 GPIO mappings for 429w */
&pm660_gpios {
interrupts = <0x0 0xc3 0 IRQ_TYPE_NONE>,
+ <0x0 0xc4 0 IRQ_TYPE_NONE>,
<0x0 0xcb 0 IRQ_TYPE_NONE>;
- interrupt-names = "pm660_gpio4", "pm660_gpio12";
- qcom,gpios-disallowed = <1 2 3 5 6 7 8 9 10 11 13>;
+ interrupt-names = "pm660_gpio4", "pm660_gpio5", "pm660_gpio12";
+ qcom,gpios-disallowed = <1 2 3 6 7 8 9 10 11 13>;
};
&pm660_vadc {
@@ -761,4 +762,3 @@
/delete-property/ vddio-supply;
vddio-supply = <&L13A>;
};
-
diff --git a/arch/arm64/boot/dts/qcom/sdm429w-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm429w-regulator.dtsi
index db76a24..6e51013 100644
--- a/arch/arm64/boot/dts/qcom/sdm429w-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm429w-regulator.dtsi
@@ -265,6 +265,7 @@
regulator-min-microvolt = <1600000>;
regulator-max-microvolt = <2040000>;
qcom,init-voltage = <1600000>;
+ regulator-system-load = <20000>;
status = "okay";
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm450-no-pmi-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm450-no-pmi-mtp-overlay.dts
deleted file mode 100644
index cd78c2c..0000000
--- a/arch/arm64/boot/dts/qcom/sdm450-no-pmi-mtp-overlay.dts
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright (c) 2019, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/dts-v1/;
-/plugin/;
-
-/ {
- model = " SDM450 NOPMI MTP";
- qcom,board-id = <8 3>;
-};
-
diff --git a/arch/arm64/boot/dts/qcom/sdm450-no-pmi-mtp.dts b/arch/arm64/boot/dts/qcom/sdm450-no-pmi-mtp.dts
deleted file mode 100644
index 71a9eef..0000000
--- a/arch/arm64/boot/dts/qcom/sdm450-no-pmi-mtp.dts
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2019, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/dts-v1/;
-
-#include "sdm450.dtsi"
-#include "msm8953-mtp.dtsi"
-/ {
- model = "Qualcomm Technologies, Inc. SDM450 + NO PMI MTP S3";
- compatible = "qcom,sdm450-mtp", "qcom,sdm450", "qcom,mtp";
- qcom,board-id = <8 3>;
- qcom,pmic-id = <0x010016 0x0 0x0 0x0>;
-};
-
-&eeprom0 {
- cam_vdig-supply = <&pm8953_l23>;
-};
-
-&camera0 {
- cam_vdig-supply = <&pm8953_l23>;
-};
-
-&pm8953_gpios {
- bklt_en {
- bklt_en_default: bklt_en_default {
- pins = "gpio4";
- function = "normal";
- power-source = <0>;
- output-high;
- };
- };
-};
-
-&pm8953_pwm {
- status = "ok";
-};
-
-&mdss_dsi0 {
- qcom,dsi-pref-prim-pan = <&dsi_hx8399c_truly_vid>;
- pinctrl-names = "mdss_default", "mdss_sleep";
- pinctrl-0 = <&mdss_dsi_active &mdss_te_active &bklt_en_default>;
- pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
- qcom,platform-bklight-en-gpio = <&pm8953_gpios 4 0>;
-
-};
-
-&dsi_truly_1080_vid {
- qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm";
- qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
- qcom,mdss-dsi-bl-pmic-bank-select = <0>;
- qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
-};
-
-&dsi_hx8399c_truly_vid {
- qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm";
- qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
- qcom,mdss-dsi-bl-pmic-bank-select = <0>;
- qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm450-no-pmi.dts b/arch/arm64/boot/dts/qcom/sdm450-no-pmi.dts
deleted file mode 100644
index afb3554..0000000
--- a/arch/arm64/boot/dts/qcom/sdm450-no-pmi.dts
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (c) 2019, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/dts-v1/;
-
-#include "sdm450.dtsi"
-#include "msm8953-mtp.dtsi"
-/ {
- model = "Qualcomm Technologies, Inc. SDM450 + NO PMI MTP S3";
- compatible = "qcom,sdm450-mtp", "qcom,sdm450", "qcom,mtp";
- qcom,pmic-id = <0x010016 0x0 0x0 0x0>;
-};
-
-&eeprom0 {
- cam_vdig-supply = <&pm8953_l23>;
-};
-
-&camera0 {
- cam_vdig-supply = <&pm8953_l23>;
-};
-
-&pm8953_gpios {
- bklt_en {
- bklt_en_default: bklt_en_default {
- pins = "gpio4";
- function = "normal";
- power-source = <0>;
- output-high;
- };
- };
-};
-
-&pm8953_pwm {
- status = "ok";
-};
-
-&mdss_dsi0 {
- qcom,dsi-pref-prim-pan = <&dsi_hx8399c_truly_vid>;
- pinctrl-names = "mdss_default", "mdss_sleep";
- pinctrl-0 = <&mdss_dsi_active &mdss_te_active &bklt_en_default>;
- pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
- qcom,platform-bklight-en-gpio = <&pm8953_gpios 4 0>;
-
-};
-
-&dsi_truly_1080_vid {
- qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm";
- qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
- qcom,mdss-dsi-bl-pmic-bank-select = <0>;
- qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
-};
-
-&dsi_hx8399c_truly_vid {
- qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm";
- qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
- qcom,mdss-dsi-bl-pmic-bank-select = <0>;
- qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm450-pmi632.dts b/arch/arm64/boot/dts/qcom/sdm450-pmi632.dts
index cb01d39..4e9a217 100644
--- a/arch/arm64/boot/dts/qcom/sdm450-pmi632.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-pmi632.dts
@@ -19,6 +19,6 @@
/ {
model = "Qualcomm Technologies, Inc. SDM450 + PMI632 SOC";
compatible = "qcom,sdm450";
- qcom,pmic-id = <0x16 0x25 0x0 0x0>;
+ qcom,pmic-id = <0x010016 0x25 0x0 0x0>;
qcom,pmic-name = "PMI632";
};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-vidc.dtsi b/arch/arm64/boot/dts/qcom/sdm670-vidc.dtsi
index 1259893..4040623 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-vidc.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-vidc.dtsi
@@ -80,7 +80,7 @@
<&apps_smmu 0x10a0 0x8>,
<&apps_smmu 0x10b0 0x0>;
buffer-types = <0xfff>;
- virtual-addr-pool = <0x79000000 0x87000000>;
+ virtual-addr-pool = <0x79000000 0x67000000>;
};
secure_bitstream_cb {
@@ -180,7 +180,7 @@
<&apps_smmu 0x10a0 0x8>,
<&apps_smmu 0x10b0 0x0>;
buffer-types = <0xfff>;
- virtual-addr-pool = <0x79000000 0x87000000>;
+ virtual-addr-pool = <0x79000000 0x67000000>;
};
secure_bitstream_cb {
diff --git a/arch/arm64/configs/msm8953-perf_defconfig b/arch/arm64/configs/msm8953-perf_defconfig
index 4374ef3..78f1f97 100755
--- a/arch/arm64/configs/msm8953-perf_defconfig
+++ b/arch/arm64/configs/msm8953-perf_defconfig
@@ -321,6 +321,10 @@
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_JOYSTICK=y
CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v26=y
+CONFIG_SECURE_TOUCH_SYNAPTICS_DSX_V26=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_HBTP_INPUT=y
CONFIG_INPUT_QPNP_POWER_ON=y
diff --git a/arch/arm64/configs/msm8953_defconfig b/arch/arm64/configs/msm8953_defconfig
index 66ae8cf..f30592c 100755
--- a/arch/arm64/configs/msm8953_defconfig
+++ b/arch/arm64/configs/msm8953_defconfig
@@ -328,6 +328,10 @@
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_JOYSTICK=y
CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v26=y
+CONFIG_SECURE_TOUCH_SYNAPTICS_DSX_V26=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_HBTP_INPUT=y
CONFIG_INPUT_QPNP_POWER_ON=y
diff --git a/arch/arm/configs/sdw3300-perf_defconfig b/arch/arm64/configs/qcs605-perf_defconfig
similarity index 65%
copy from arch/arm/configs/sdw3300-perf_defconfig
copy to arch/arm64/configs/qcs605-perf_defconfig
index bb2bb1d..775ac0c 100644
--- a/arch/arm/configs/sdw3300-perf_defconfig
+++ b/arch/arm64/configs/qcs605-perf_defconfig
@@ -2,6 +2,7 @@
# CONFIG_LOCALVERSION_AUTO is not set
# CONFIG_FHANDLE is not set
CONFIG_AUDIT=y
+# CONFIG_AUDITSYSCALL is not set
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IRQ_TIME_ACCOUNTING=y
@@ -13,10 +14,14 @@
CONFIG_RCU_FAST_NO_HZ=y
CONFIG_RCU_NOCB_CPU=y
CONFIG_RCU_NOCB_CPU_ALL=y
-CONFIG_LOG_CPU_MAX_BUF_SHIFT=13
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_BLK_CGROUP=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_BPF=y
CONFIG_SCHED_CORE_CTL=y
@@ -27,20 +32,17 @@
CONFIG_SCHED_TUNE=y
CONFIG_DEFAULT_USE_ENERGY_AWARE=y
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_RD_BZIP2 is not set
-# CONFIG_RD_LZMA is not set
# CONFIG_RD_XZ is not set
# CONFIG_RD_LZO is not set
# CONFIG_RD_LZ4 is not set
CONFIG_KALLSYMS_ALL=y
CONFIG_BPF_SYSCALL=y
-# CONFIG_MEMBARRIER is not set
CONFIG_EMBEDDED=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_CC_STACKPROTECTOR_STRONG=y
-CONFIG_ARCH_MMAP_RND_BITS=16
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
@@ -49,48 +51,50 @@
CONFIG_MODULE_SIG_FORCE=y
CONFIG_MODULE_SIG_SHA512=y
CONFIG_PARTITION_ADVANCED=y
-# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_CFQ_GROUP_IOSCHED=y
CONFIG_ARCH_QCOM=y
-CONFIG_ARCH_MSM8937=y
-CONFIG_ARCH_MSM8917=y
-CONFIG_ARCH_SDM439=y
-CONFIG_ARCH_SDM429=y
-# CONFIG_VDSO is not set
-CONFIG_SMP=y
+CONFIG_ARCH_SDM670=y
+CONFIG_PCI=y
+CONFIG_PCI_MSM=y
CONFIG_SCHED_MC=y
CONFIG_NR_CPUS=8
-CONFIG_ARM_PSCI=y
CONFIG_PREEMPT=y
-CONFIG_AEABI=y
-CONFIG_HIGHMEM=y
-CONFIG_ARM_MODULE_PLTS=y
+CONFIG_HZ_100=y
CONFIG_CMA=y
-CONFIG_CMA_DEBUGFS=y
CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
CONFIG_PROCESS_RECLAIM=y
CONFIG_SECCOMP=y
-CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
+CONFIG_HARDEN_BRANCH_PREDICTOR=y
+CONFIG_PSCI_BP_HARDENING=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+CONFIG_ARM64_SW_TTBR0_PAN=y
+# CONFIG_ARM64_VHE is not set
+CONFIG_RANDOMIZE_BASE=y
+# CONFIG_EFI is not set
+CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_CPU_IDLE=y
CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
CONFIG_CPU_BOOST=y
CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
-CONFIG_CPU_FREQ_MSM=y
-CONFIG_CPU_IDLE=y
-CONFIG_VFP=y
-CONFIG_NEON=y
-CONFIG_KERNEL_MODE_NEON=y
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_PM_AUTOSLEEP=y
-CONFIG_PM_WAKELOCKS=y
-CONFIG_PM_WAKELOCKS_LIMIT=0
-# CONFIG_PM_WAKELOCKS_GC is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
CONFIG_XFRM_STATISTICS=y
CONFIG_NET_KEY=y
CONFIG_INET=y
@@ -100,10 +104,13 @@
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_SYN_COOKIES=y
CONFIG_NET_IPVTI=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
+CONFIG_INET_UDP_DIAG=y
CONFIG_INET_DIAG_DESTROY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
@@ -120,7 +127,6 @@
CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CT_PROTO_DCCP=y
-CONFIG_NF_CT_PROTO_SCTP=y
CONFIG_NF_CT_PROTO_UDPLITE=y
CONFIG_NF_CONNTRACK_AMANDA=y
CONFIG_NF_CONNTRACK_FTP=y
@@ -146,6 +152,7 @@
CONFIG_NETFILTER_XT_TARGET_TRACE=y
CONFIG_NETFILTER_XT_TARGET_SECMARK=y
CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
CONFIG_NETFILTER_XT_MATCH_COMMENT=y
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
@@ -161,10 +168,12 @@
CONFIG_NETFILTER_XT_MATCH_MAC=y
CONFIG_NETFILTER_XT_MATCH_MARK=y
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
CONFIG_NETFILTER_XT_MATCH_POLICY=y
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
CONFIG_NETFILTER_XT_MATCH_QUOTA=y
CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
CONFIG_NETFILTER_XT_MATCH_SOCKET=y
CONFIG_NETFILTER_XT_MATCH_STATE=y
CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
@@ -198,8 +207,8 @@
CONFIG_IP6_NF_RAW=y
CONFIG_BRIDGE_NF_EBTABLES=y
CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_IP_SCTP=y
CONFIG_L2TP=y
-CONFIG_L2TP_DEBUGFS=y
CONFIG_L2TP_V3=y
CONFIG_L2TP_IP=y
CONFIG_L2TP_ETH=y
@@ -207,10 +216,13 @@
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_HTB=y
CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_INGRESS=y
CONFIG_NET_CLS_FW=y
CONFIG_NET_CLS_U32=y
CONFIG_CLS_U32_MARK=y
CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_CLS_BPF=y
CONFIG_NET_EMATCH=y
CONFIG_NET_EMATCH_CMP=y
CONFIG_NET_EMATCH_NBYTE=y
@@ -218,51 +230,63 @@
CONFIG_NET_EMATCH_META=y
CONFIG_NET_EMATCH_TEXT=y
CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
CONFIG_RMNET_DATA=y
CONFIG_RMNET_DATA_FC=y
CONFIG_RMNET_DATA_DEBUG_PKT=y
CONFIG_BT=y
-# CONFIG_BT_BREDR is not set
-# CONFIG_BT_LE is not set
CONFIG_MSM_BT_POWER=y
CONFIG_CFG80211=y
+CONFIG_CFG80211_CERTIFICATION_ONUS=y
+CONFIG_CFG80211_REG_CELLULAR_HINTS=y
CONFIG_CFG80211_INTERNAL_REGDB=y
-# CONFIG_CFG80211_CRDA_SUPPORT is not set
CONFIG_RFKILL=y
CONFIG_NFC_NQ=y
CONFIG_IPC_ROUTER=y
CONFIG_IPC_ROUTER_SECURITY=y
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_AQT_REGMAP=y
CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
CONFIG_DMA_CMA=y
CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_HDCP_QSEECOM=y
CONFIG_QSEECOM=y
CONFIG_UID_SYS_STATS=y
+CONFIG_MEMORY_STATE_TIME=y
CONFIG_QPNP_MISC=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
+CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
+CONFIG_DM_DEBUG=y
CONFIG_DM_CRYPT=y
CONFIG_DM_REQ_CRYPT=y
+CONFIG_DM_DEFAULT_KEY=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
CONFIG_DM_VERITY_FEC=y
+CONFIG_DM_BOW=y
CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
CONFIG_DUMMY=y
CONFIG_TUN=y
-# CONFIG_NET_VENDOR_AMAZON is not set
-# CONFIG_NET_CADENCE is not set
-# CONFIG_NET_VENDOR_EZCHIP is not set
-# CONFIG_NET_VENDOR_HISILICON is not set
-# CONFIG_NET_VENDOR_MARVELL is not set
-# CONFIG_NET_VENDOR_NETRONOME is not set
-# CONFIG_NET_VENDOR_ROCKER is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
+CONFIG_SKY2=y
+CONFIG_SMSC911X=y
CONFIG_PPP=y
CONFIG_PPP_BSDCOMP=y
CONFIG_PPP_DEFLATE=y
@@ -270,187 +294,154 @@
CONFIG_PPP_MPPE=y
CONFIG_PPP_MULTILINK=y
CONFIG_PPPOE=y
+CONFIG_PPTP=y
CONFIG_PPPOL2TP=y
CONFIG_PPPOLAC=y
CONFIG_PPPOPNS=y
CONFIG_PPP_ASYNC=y
CONFIG_PPP_SYNC_TTY=y
+CONFIG_USB_RTL8152=y
+CONFIG_USB_LAN78XX=y
CONFIG_USB_USBNET=y
-# CONFIG_WLAN_VENDOR_ADMTEK is not set
-# CONFIG_WLAN_VENDOR_ATH is not set
-# CONFIG_WLAN_VENDOR_ATMEL is not set
-# CONFIG_WLAN_VENDOR_BROADCOM is not set
-# CONFIG_WLAN_VENDOR_CISCO is not set
-# CONFIG_WLAN_VENDOR_INTEL is not set
-# CONFIG_WLAN_VENDOR_INTERSIL is not set
-# CONFIG_WLAN_VENDOR_MARVELL is not set
-# CONFIG_WLAN_VENDOR_MEDIATEK is not set
-# CONFIG_WLAN_VENDOR_RALINK is not set
-# CONFIG_WLAN_VENDOR_REALTEK is not set
-# CONFIG_WLAN_VENDOR_RSI is not set
-# CONFIG_WLAN_VENDOR_ST is not set
-# CONFIG_WLAN_VENDOR_TI is not set
-# CONFIG_WLAN_VENDOR_ZYDAS is not set
CONFIG_WCNSS_MEM_PRE_ALLOC=y
CONFIG_CLD_LL_CORE=y
+CONFIG_CNSS_GENL=y
CONFIG_INPUT_EVDEV=y
CONFIG_KEYBOARD_GPIO=y
# CONFIG_INPUT_MOUSE is not set
-CONFIG_INPUT_JOYSTICK=y
CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26=y
-CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v26=y
-CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v26=y
-CONFIG_SECURE_TOUCH_SYNAPTICS_DSX_V26=y
CONFIG_INPUT_MISC=y
+CONFIG_INPUT_HBTP_INPUT=y
CONFIG_INPUT_QPNP_POWER_ON=y
-CONFIG_INPUT_QTI_HAPTICS=y
CONFIG_INPUT_UINPUT=y
# CONFIG_SERIO_SERPORT is not set
-# CONFIG_VT is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVMEM is not set
# CONFIG_DEVKMEM is not set
-CONFIG_SERIAL_MSM_HS=y
-CONFIG_SERIAL_MSM_SMD=y
+CONFIG_SERIAL_MSM_GENI=y
CONFIG_DIAG_CHAR=y
-CONFIG_DIAG_USES_SMD=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_MSM_LEGACY=y
-CONFIG_MSM_SMD_PKT=y
+# CONFIG_DEVPORT is not set
CONFIG_MSM_ADSPRPC=y
CONFIG_MSM_RDBG=m
CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_MSM_V2=y
+CONFIG_I2C_QCOM_GENI=y
CONFIG_SPI=y
CONFIG_SPI_QUP=y
+CONFIG_SPI_QCOM_GENI=y
CONFIG_SPI_SPIDEV=y
CONFIG_SLIMBUS_MSM_NGD=y
CONFIG_SPMI=y
-CONFIG_PINCTRL_MSM8937=y
-CONFIG_PINCTRL_MSM8917=y
+CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
+CONFIG_PINCTRL_SDM670=y
CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
-CONFIG_GPIO_QPNP_PIN=y
-CONFIG_POWER_RESET=y
CONFIG_POWER_RESET_QCOM=y
CONFIG_QCOM_DLOAD_MODE=y
+CONFIG_POWER_RESET_XGENE=y
CONFIG_POWER_RESET_SYSCON=y
-CONFIG_POWER_SUPPLY=y
+CONFIG_NX30P6093=y
CONFIG_QPNP_FG_GEN3=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
CONFIG_QPNP_SMB2=y
-CONFIG_MSM_APM=y
+CONFIG_QPNP_QNOVO=y
+CONFIG_SMB1390_CHARGE_PUMP=y
CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
CONFIG_THERMAL=y
CONFIG_THERMAL_WRITABLE_TRIPS=y
-CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_STEP_WISE=y
CONFIG_THERMAL_GOV_LOW_LIMITS=y
CONFIG_CPU_THERMAL=y
CONFIG_DEVFREQ_THERMAL=y
+CONFIG_QCOM_SPMI_TEMP_ALARM=y
CONFIG_THERMAL_QPNP=y
CONFIG_THERMAL_QPNP_ADC_TM=y
CONFIG_THERMAL_TSENS=y
+CONFIG_MSM_BCL_PERIPHERAL_CTL=y
+CONFIG_QTI_THERMAL_LIMITS_DCVS=y
CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_QTI_AOP_REG_COOLING_DEVICE=y
CONFIG_QTI_QMI_COOLING_DEVICE=y
CONFIG_REGULATOR_COOLING_DEVICE=y
-CONFIG_QTI_BCL_PMIC5=y
-CONFIG_QTI_BCL_SOC_DRIVER=y
+CONFIG_MFD_I2C_PMIC=y
CONFIG_MFD_SPMI_PMIC=y
-CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_PROXY_CONSUMER=y
-CONFIG_REGULATOR_GPIO=y
-CONFIG_REGULATOR_CPR=y
-CONFIG_REGULATOR_CPR4_APSS=y
-CONFIG_REGULATOR_CPRH_KBSS=y
-CONFIG_REGULATOR_MEM_ACC=y
-CONFIG_REGULATOR_MSM_GFX_LDO=y
+CONFIG_REGULATOR_QPNP_LABIBB=y
+CONFIG_REGULATOR_QPNP_LCDB=y
+CONFIG_REGULATOR_QPNP_OLEDB=y
CONFIG_REGULATOR_QPNP=y
-CONFIG_REGULATOR_RPM_SMD=y
-CONFIG_REGULATOR_SPM=y
+CONFIG_REGULATOR_REFGEN=y
+CONFIG_REGULATOR_RPMH=y
CONFIG_REGULATOR_STUB=y
CONFIG_MEDIA_SUPPORT=y
CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
-CONFIG_MEDIA_USB_SUPPORT=y
-CONFIG_USB_VIDEO_CLASS=y
+CONFIG_VIDEO_ADV_DEBUG=y
+CONFIG_VIDEO_FIXED_MINOR_RANGES=y
CONFIG_V4L_PLATFORM_DRIVERS=y
-CONFIG_MSM_CAMERA=y
-CONFIG_MSM_CAMERA_DEBUG=y
-CONFIG_MSMB_CAMERA=y
-CONFIG_MSMB_CAMERA_DEBUG=y
-CONFIG_MSM_CAMERA_SENSOR=y
-CONFIG_MSM_CPP=y
-CONFIG_MSM_CCI=y
-CONFIG_MSM_CSI20_HEADER=y
-CONFIG_MSM_CSI22_HEADER=y
-CONFIG_MSM_CSI30_HEADER=y
-CONFIG_MSM_CSI31_HEADER=y
-CONFIG_MSM_CSIPHY=y
-CONFIG_MSM_CSID=y
-CONFIG_MSM_EEPROM=y
-CONFIG_MSM_ISPIF_V2=y
-CONFIG_IMX134=y
-CONFIG_IMX132=y
-CONFIG_OV9724=y
-CONFIG_OV5648=y
-CONFIG_GC0339=y
-CONFIG_OV8825=y
-CONFIG_OV8865=y
-CONFIG_s5k4e1=y
-CONFIG_OV12830=y
-CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y
-CONFIG_MSMB_JPEG=y
-CONFIG_MSM_FD=y
-CONFIG_MSM_VIDC_3X_V4L2=y
-CONFIG_MSM_VIDC_3X_GOVERNORS=y
-CONFIG_RADIO_IRIS=y
-CONFIG_RADIO_IRIS_TRANSPORT=y
+CONFIG_SPECTRA_CAMERA=y
+CONFIG_MSM_VIDC_V4L2=y
+CONFIG_MSM_VIDC_GOVERNORS=y
+CONFIG_MSM_SDE_ROTATOR=y
+CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
+CONFIG_DVB_MPQ=m
+CONFIG_DVB_MPQ_DEMUX=m
+CONFIG_DVB_MPQ_SW=y
CONFIG_QCOM_KGSL=y
-CONFIG_FB=y
-CONFIG_FB_MSM=y
-CONFIG_FB_MSM_MDSS=y
-CONFIG_FB_MSM_MDSS_WRITEBACK=y
-CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS=y
-CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
+CONFIG_DRM=y
+CONFIG_DRM_SDE_EVTLOG_DEBUG=y
+CONFIG_DRM_SDE_RSC=y
+CONFIG_DRM_LT_LT9611=y
+CONFIG_FB_VIRTUAL=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
-# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_DYNAMIC_MINORS=y
CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
CONFIG_SND_SOC=y
-CONFIG_SND_SOC_AW8896=y
CONFIG_UHID=y
CONFIG_HID_APPLE=y
CONFIG_HID_ELECOM=y
CONFIG_HID_MAGICMOUSE=y
CONFIG_HID_MICROSOFT=y
CONFIG_HID_MULTITOUCH=y
-CONFIG_USB_HIDDEV=y
+CONFIG_HID_PLANTRONICS=y
+CONFIG_HID_SONY=y
CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-CONFIG_USB_MON=y
+CONFIG_USB_XHCI_HCD=y
CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_MSM=y
CONFIG_USB_EHCI_HCD_PLATFORM=y
-CONFIG_USB_ACM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_USB_STORAGE=y
-CONFIG_USB_SERIAL=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_PD_POLICY=y
+CONFIG_QPNP_USB_PDPHY=y
CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_LINK_LAYER_TEST=y
CONFIG_NOP_USB_XCEIV=y
CONFIG_DUAL_ROLE_USB_INTF=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
CONFIG_USB_GADGET=y
-CONFIG_USB_GADGET_DEBUG_FILES=y
-CONFIG_USB_GADGET_DEBUG_FS=y
CONFIG_USB_GADGET_VBUS_DRAW=500
-CONFIG_USB_CI13XXX_MSM=y
CONFIG_USB_CONFIGFS=y
-CONFIG_USB_CONFIGFS_SERIAL=y
CONFIG_USB_CONFIGFS_NCM=y
-CONFIG_USB_CONFIGFS_RNDIS=y
-CONFIG_USB_CONFIGFS_RMNET_BAM=y
CONFIG_USB_CONFIGFS_MASS_STORAGE=y
CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_USB_CONFIGFS_F_MTP=y
@@ -458,34 +449,39 @@
CONFIG_USB_CONFIGFS_F_ACC=y
CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_UAC2=y
CONFIG_USB_CONFIGFS_F_MIDI=y
CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_UVC=y
CONFIG_USB_CONFIGFS_F_DIAG=y
CONFIG_USB_CONFIGFS_F_CDEV=y
CONFIG_USB_CONFIGFS_F_CCID=y
+CONFIG_USB_CONFIGFS_F_GSI=y
CONFIG_USB_CONFIGFS_F_QDSS=y
CONFIG_MMC=y
CONFIG_MMC_PERF_PROFILING=y
-# CONFIG_PWRSEQ_EMMC is not set
-# CONFIG_PWRSEQ_SIMPLE is not set
CONFIG_MMC_PARANOID_SD_INIT=y
CONFIG_MMC_CLKGATE=y
CONFIG_MMC_BLOCK_MINORS=32
CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
+CONFIG_MMC_TEST=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_MMC_SDHCI_MSM_ICE=y
CONFIG_MMC_CQ_HCI=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_QPNP=y
+CONFIG_LEDS_QPNP_FLASH_V2=y
+CONFIG_LEDS_QPNP_WLED=y
CONFIG_LEDS_QPNP_HAPTICS=y
CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_EDAC=y
-CONFIG_EDAC_MM_EDAC=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_QPNP=y
CONFIG_DMADEVICES=y
-CONFIG_QCOM_SPS_DMA=y
+CONFIG_QCOM_GPI_DMA=y
CONFIG_UIO=y
CONFIG_UIO_MSM_SHAREDMEM=y
CONFIG_STAGING=y
@@ -493,68 +489,106 @@
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
CONFIG_ION=y
CONFIG_ION_MSM=y
-CONFIG_IPA=y
-CONFIG_RMNET_IPA=y
+CONFIG_GSI=y
+CONFIG_IPA3=y
+CONFIG_RMNET_IPA3=y
CONFIG_RNDIS_IPA=y
+CONFIG_IPA_UT=y
CONFIG_SPS=y
CONFIG_SPS_SUPPORT_NDP_BAM=y
CONFIG_QPNP_COINCELL=y
CONFIG_QPNP_REVID=y
CONFIG_USB_BAM=y
-CONFIG_MSM_RMNET_BAM=y
-CONFIG_MSM_MDSS_PLL=y
+CONFIG_MSM_11AD=m
+CONFIG_SEEMP_CORE=y
+CONFIG_QCOM_GENI_SE=y
+CONFIG_MSM_GCC_SDM845=y
+CONFIG_MSM_VIDEOCC_SDM845=y
+CONFIG_MSM_CAMCC_SDM845=y
+CONFIG_MSM_DISPCC_SDM845=y
+CONFIG_CLOCK_QPNP_DIV=y
+CONFIG_MSM_CLK_RPMH=y
+CONFIG_CLOCK_CPU_OSM=y
+CONFIG_MSM_GPUCC_SDM845=y
+CONFIG_MSM_CLK_AOP_QMP=y
+CONFIG_QCOM_MDSS_PLL=y
CONFIG_REMOTE_SPINLOCK_MSM=y
-CONFIG_MAILBOX=y
+CONFIG_MSM_QMP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
CONFIG_ARM_SMMU=y
CONFIG_QCOM_LAZY_MAPPING=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
+CONFIG_IOMMU_TESTS=y
CONFIG_QCOM_RUN_QUEUE_STATS=y
-CONFIG_MSM_SPM=y
-CONFIG_MSM_L2_SPM=y
+CONFIG_QCOM_LLCC=y
+CONFIG_QCOM_SDM670_LLCC=y
+CONFIG_QCOM_QCS605_LLCC=y
+CONFIG_QCOM_LLCC_PERFMON=m
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
CONFIG_MSM_BOOT_STATS=y
+CONFIG_QCOM_EUD=y
CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QPNP_PBS=y
CONFIG_QCOM_MEMORY_DUMP_V2=y
-CONFIG_MSM_RPM_SMD=y
+CONFIG_QCOM_MINIDUMP=y
CONFIG_QCOM_BUS_SCALING=y
+CONFIG_QCOM_BUS_CONFIG_RPMH=y
CONFIG_QCOM_SECURE_BUFFER=y
CONFIG_QCOM_EARLY_RANDOM=y
CONFIG_MSM_SMEM=y
-CONFIG_MSM_SMD=y
-CONFIG_MSM_SMD_DEBUG=y
-CONFIG_MSM_TZ_SMMU=y
+CONFIG_MSM_GLINK=y
+CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
+CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
+CONFIG_MSM_GLINK_SPI_XPRT=y
+CONFIG_MSM_SPCOM=y
+CONFIG_MSM_SPSS_UTILS=y
+CONFIG_TRACER_PKT=y
+CONFIG_QTI_RPMH_API=y
CONFIG_MSM_SMP2P=y
-CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
+CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
CONFIG_MSM_QMI_INTERFACE=y
+CONFIG_MSM_GLINK_PKT=y
CONFIG_MSM_SUBSYSTEM_RESTART=y
-CONFIG_MSM_SYSMON_COMM=y
CONFIG_MSM_PIL=y
+CONFIG_MSM_SYSMON_GLINK_COMM=y
CONFIG_MSM_PIL_SSR_GENERIC=y
CONFIG_MSM_PIL_MSS_QDSP6V5=y
CONFIG_ICNSS=y
+CONFIG_QCOM_COMMAND_DB=y
CONFIG_MSM_PERFORMANCE=y
+CONFIG_MSM_CDSP_LOADER=y
+CONFIG_QCOM_SMCINVOKE=y
CONFIG_MSM_EVENT_TIMER=y
-CONFIG_MSM_AVTIMER=y
CONFIG_MSM_PM=y
-CONFIG_QCOM_DCC=y
+CONFIG_MSM_QBT1000=y
+CONFIG_QCOM_DCC_V2=y
CONFIG_QTI_RPM_STATS_LOG=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QMP_DEBUGFS_CLIENT=y
CONFIG_MEM_SHARE_QMI_SERVICE=y
-CONFIG_MSM_BAM_DMUX=y
-CONFIG_WCNSS_CORE=y
-CONFIG_WCNSS_CORE_PRONTO=y
-CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
-CONFIG_BIG_CLUSTER_MIN_FREQ_ADJUST=y
+CONFIG_MSM_REMOTEQDSS=y
CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_QCOMCCI_HWMON=y
+CONFIG_QCOM_M4M_HWMON=y
CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
+CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
CONFIG_DEVFREQ_SIMPLE_DEV=y
CONFIG_QCOM_DEVFREQ_DEVBW=y
-CONFIG_SPDM_SCM=y
-CONFIG_DEVFREQ_SPDM=y
+CONFIG_EXTCON_USB_GPIO=y
CONFIG_IIO=y
CONFIG_QCOM_RRADC=y
CONFIG_PWM=y
-CONFIG_QTI_MPM=y
+CONFIG_PWM_QPNP=y
+CONFIG_ARM_GIC_V3_ACL=y
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_NVMEM=y
+# CONFIG_NVMEM_SYSFS is not set
+CONFIG_QCOM_QFPROM=y
CONFIG_SENSORS_SSC=y
CONFIG_MSM_TZ_LOG=y
CONFIG_EXT4_FS=y
@@ -569,30 +603,42 @@
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V2=y
CONFIG_FUSE_FS=y
+CONFIG_OVERLAY_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
CONFIG_SDCARD_FS=y
-# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
-CONFIG_FRAME_WARN=2048
-CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_PANIC_TIMEOUT=5
-# CONFIG_SCHED_DEBUG is not set
+CONFIG_SCHEDSTATS=y
# CONFIG_DEBUG_PREEMPT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_FTRACE is not set
-# CONFIG_ARM_UNWIND is not set
+CONFIG_IPC_LOGGING=y
+CONFIG_CPU_FREQ_SWITCH_PROFILER=y
+CONFIG_DEBUG_ALIGN_RODATA=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_EVENT=y
+CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_DUMMY=y
CONFIG_PFK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
-CONFIG_LSM_MMAP_MIN_ADDR=4096
CONFIG_HARDENED_USERCOPY=y
+CONFIG_FORTIFY_SOURCE=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y
@@ -600,11 +646,13 @@
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
-CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
-CONFIG_ARM_CRYPTO=y
-CONFIG_CRYPTO_SHA1_ARM_NEON=y
-CONFIG_CRYPTO_SHA2_ARM_CE=y
-CONFIG_CRYPTO_AES_ARM_BS=y
-CONFIG_CRYPTO_AES_ARM_CE=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_CRYPTO_CRC32_ARM64=y
CONFIG_QMI_ENCDEC=y
diff --git a/arch/arm/configs/sdw3300_defconfig b/arch/arm64/configs/qcs605_defconfig
similarity index 69%
copy from arch/arm/configs/sdw3300_defconfig
copy to arch/arm64/configs/qcs605_defconfig
index 5e6fafb..3b7319c 100644
--- a/arch/arm/configs/sdw3300_defconfig
+++ b/arch/arm64/configs/qcs605_defconfig
@@ -1,6 +1,7 @@
# CONFIG_LOCALVERSION_AUTO is not set
# CONFIG_FHANDLE is not set
CONFIG_AUDIT=y
+# CONFIG_AUDITSYSCALL is not set
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IRQ_TIME_ACCOUNTING=y
@@ -16,9 +17,13 @@
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_BLK_CGROUP=y
+CONFIG_DEBUG_BLK_CGROUP=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_BPF=y
CONFIG_SCHED_CORE_CTL=y
@@ -29,21 +34,16 @@
CONFIG_SCHED_TUNE=y
CONFIG_DEFAULT_USE_ENERGY_AWARE=y
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_RD_BZIP2 is not set
-# CONFIG_RD_LZMA is not set
# CONFIG_RD_XZ is not set
# CONFIG_RD_LZO is not set
# CONFIG_RD_LZ4 is not set
CONFIG_KALLSYMS_ALL=y
CONFIG_BPF_SYSCALL=y
-# CONFIG_MEMBARRIER is not set
CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
-CONFIG_KPROBES=y
CONFIG_CC_STACKPROTECTOR_STRONG=y
-CONFIG_ARCH_MMAP_RND_BITS=16
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
@@ -51,50 +51,55 @@
CONFIG_MODULE_SIG=y
CONFIG_MODULE_SIG_FORCE=y
CONFIG_MODULE_SIG_SHA512=y
+# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_CFQ_GROUP_IOSCHED=y
CONFIG_ARCH_QCOM=y
-CONFIG_ARCH_MSM8937=y
-CONFIG_ARCH_MSM8917=y
-CONFIG_ARCH_SDM439=y
-CONFIG_ARCH_SDM429=y
-# CONFIG_VDSO is not set
-CONFIG_SMP=y
+CONFIG_ARCH_SDM670=y
+CONFIG_PCI=y
+CONFIG_PCI_MSM=y
CONFIG_SCHED_MC=y
CONFIG_NR_CPUS=8
-CONFIG_ARM_PSCI=y
CONFIG_PREEMPT=y
-CONFIG_AEABI=y
-CONFIG_HIGHMEM=y
-CONFIG_ARM_MODULE_PLTS=y
+CONFIG_HZ_100=y
+CONFIG_CLEANCACHE=y
CONFIG_CMA=y
CONFIG_CMA_DEBUGFS=y
CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
CONFIG_PROCESS_RECLAIM=y
CONFIG_SECCOMP=y
-CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
+CONFIG_HARDEN_BRANCH_PREDICTOR=y
+CONFIG_PSCI_BP_HARDENING=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+CONFIG_ARM64_SW_TTBR0_PAN=y
+# CONFIG_ARM64_VHE is not set
+CONFIG_RANDOMIZE_BASE=y
+CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_PM_DEBUG=y
+CONFIG_CPU_IDLE=y
CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
CONFIG_CPU_BOOST=y
CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
-CONFIG_CPU_FREQ_MSM=y
-CONFIG_CPU_IDLE=y
-CONFIG_VFP=y
-CONFIG_NEON=y
-CONFIG_KERNEL_MODE_NEON=y
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_PM_AUTOSLEEP=y
-CONFIG_PM_WAKELOCKS=y
-CONFIG_PM_WAKELOCKS_LIMIT=0
-# CONFIG_PM_WAKELOCKS_GC is not set
-CONFIG_PM_DEBUG=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
CONFIG_XFRM_STATISTICS=y
CONFIG_NET_KEY=y
CONFIG_INET=y
@@ -104,10 +109,13 @@
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_SYN_COOKIES=y
CONFIG_NET_IPVTI=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
+CONFIG_INET_UDP_DIAG=y
CONFIG_INET_DIAG_DESTROY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
@@ -124,7 +132,6 @@
CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CT_PROTO_DCCP=y
-CONFIG_NF_CT_PROTO_SCTP=y
CONFIG_NF_CT_PROTO_UDPLITE=y
CONFIG_NF_CONNTRACK_AMANDA=y
CONFIG_NF_CONNTRACK_FTP=y
@@ -171,6 +178,8 @@
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
CONFIG_NETFILTER_XT_MATCH_QUOTA=y
CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
+# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
CONFIG_NETFILTER_XT_MATCH_SOCKET=y
CONFIG_NETFILTER_XT_MATCH_STATE=y
CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
@@ -204,6 +213,7 @@
CONFIG_IP6_NF_RAW=y
CONFIG_BRIDGE_NF_EBTABLES=y
CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_IP_SCTP=y
CONFIG_L2TP=y
CONFIG_L2TP_DEBUGFS=y
CONFIG_L2TP_V3=y
@@ -213,10 +223,13 @@
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_HTB=y
CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_INGRESS=y
CONFIG_NET_CLS_FW=y
CONFIG_NET_CLS_U32=y
CONFIG_CLS_U32_MARK=y
CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_CLS_BPF=y
CONFIG_NET_EMATCH=y
CONFIG_NET_EMATCH_CMP=y
CONFIG_NET_EMATCH_NBYTE=y
@@ -224,15 +237,18 @@
CONFIG_NET_EMATCH_META=y
CONFIG_NET_EMATCH_TEXT=y
CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
CONFIG_DNS_RESOLVER=y
CONFIG_RMNET_DATA=y
CONFIG_RMNET_DATA_FC=y
CONFIG_RMNET_DATA_DEBUG_PKT=y
CONFIG_BT=y
-# CONFIG_BT_BREDR is not set
-# CONFIG_BT_LE is not set
CONFIG_MSM_BT_POWER=y
CONFIG_CFG80211=y
+CONFIG_CFG80211_CERTIFICATION_ONUS=y
+CONFIG_CFG80211_REG_CELLULAR_HINTS=y
CONFIG_CFG80211_INTERNAL_REGDB=y
# CONFIG_CFG80211_CRDA_SUPPORT is not set
CONFIG_RFKILL=y
@@ -240,36 +256,43 @@
CONFIG_IPC_ROUTER=y
CONFIG_IPC_ROUTER_SECURITY=y
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_AQT_REGMAP=y
CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
CONFIG_DMA_CMA=y
CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_HDCP_QSEECOM=y
CONFIG_QSEECOM=y
CONFIG_UID_SYS_STATS=y
+CONFIG_MEMORY_STATE_TIME=y
CONFIG_QPNP_MISC=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
+CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_REQ_CRYPT=y
+CONFIG_DM_DEFAULT_KEY=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
CONFIG_DM_VERITY_FEC=y
+CONFIG_DM_BOW=y
CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
CONFIG_DUMMY=y
CONFIG_TUN=y
-# CONFIG_NET_VENDOR_AMAZON is not set
-# CONFIG_NET_CADENCE is not set
-# CONFIG_NET_VENDOR_EZCHIP is not set
-# CONFIG_NET_VENDOR_HISILICON is not set
-# CONFIG_NET_VENDOR_MARVELL is not set
-# CONFIG_NET_VENDOR_NETRONOME is not set
-# CONFIG_NET_VENDOR_ROCKER is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=y
CONFIG_PPP_BSDCOMP=y
CONFIG_PPP_DEFLATE=y
@@ -277,190 +300,155 @@
CONFIG_PPP_MPPE=y
CONFIG_PPP_MULTILINK=y
CONFIG_PPPOE=y
+CONFIG_PPTP=y
CONFIG_PPPOL2TP=y
CONFIG_PPPOLAC=y
CONFIG_PPPOPNS=y
CONFIG_PPP_ASYNC=y
CONFIG_PPP_SYNC_TTY=y
+CONFIG_USB_RTL8152=y
+CONFIG_USB_LAN78XX=y
CONFIG_USB_USBNET=y
-# CONFIG_WLAN_VENDOR_ADMTEK is not set
-# CONFIG_WLAN_VENDOR_ATH is not set
-# CONFIG_WLAN_VENDOR_ATMEL is not set
-# CONFIG_WLAN_VENDOR_BROADCOM is not set
-# CONFIG_WLAN_VENDOR_CISCO is not set
-# CONFIG_WLAN_VENDOR_INTEL is not set
-# CONFIG_WLAN_VENDOR_INTERSIL is not set
-# CONFIG_WLAN_VENDOR_MARVELL is not set
-# CONFIG_WLAN_VENDOR_MEDIATEK is not set
-# CONFIG_WLAN_VENDOR_RALINK is not set
-# CONFIG_WLAN_VENDOR_REALTEK is not set
-# CONFIG_WLAN_VENDOR_RSI is not set
-# CONFIG_WLAN_VENDOR_ST is not set
-# CONFIG_WLAN_VENDOR_TI is not set
-# CONFIG_WLAN_VENDOR_ZYDAS is not set
CONFIG_WCNSS_MEM_PRE_ALLOC=y
CONFIG_CLD_LL_CORE=y
+CONFIG_CNSS_GENL=y
CONFIG_INPUT_EVDEV=y
CONFIG_KEYBOARD_GPIO=y
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_JOYSTICK=y
CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26=y
-CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v26=y
-CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v26=y
-CONFIG_SECURE_TOUCH_SYNAPTICS_DSX_V26=y
CONFIG_INPUT_MISC=y
+CONFIG_INPUT_HBTP_INPUT=y
CONFIG_INPUT_QPNP_POWER_ON=y
-CONFIG_INPUT_QTI_HAPTICS=y
CONFIG_INPUT_UINPUT=y
# CONFIG_SERIO_SERPORT is not set
-# CONFIG_VT is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVMEM is not set
# CONFIG_DEVKMEM is not set
-CONFIG_SERIAL_MSM=y
-CONFIG_SERIAL_MSM_CONSOLE=y
-CONFIG_SERIAL_MSM_HS=y
-CONFIG_SERIAL_MSM_SMD=y
+CONFIG_SERIAL_MSM_GENI=y
+CONFIG_SERIAL_MSM_GENI_CONSOLE=y
CONFIG_DIAG_CHAR=y
-CONFIG_DIAG_USES_SMD=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_MSM_LEGACY=y
-CONFIG_MSM_SMD_PKT=y
+# CONFIG_DEVPORT is not set
CONFIG_MSM_ADSPRPC=y
CONFIG_MSM_RDBG=m
CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_MSM_V2=y
+CONFIG_I2C_QCOM_GENI=y
CONFIG_SPI=y
CONFIG_SPI_QUP=y
+CONFIG_SPI_QCOM_GENI=y
CONFIG_SPI_SPIDEV=y
CONFIG_SLIMBUS_MSM_NGD=y
CONFIG_SPMI=y
-CONFIG_PINCTRL_MSM8937=y
-CONFIG_PINCTRL_MSM8917=y
+CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
+CONFIG_PINCTRL_SDM670=y
CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
-CONFIG_GPIO_QPNP_PIN=y
-CONFIG_POWER_RESET=y
CONFIG_POWER_RESET_QCOM=y
CONFIG_QCOM_DLOAD_MODE=y
+CONFIG_POWER_RESET_XGENE=y
CONFIG_POWER_RESET_SYSCON=y
-CONFIG_POWER_SUPPLY=y
+CONFIG_NX30P6093=y
CONFIG_QPNP_FG_GEN3=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
CONFIG_QPNP_SMB2=y
-CONFIG_MSM_APM=y
+CONFIG_QPNP_QNOVO=y
+CONFIG_SMB1390_CHARGE_PUMP=y
CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
CONFIG_THERMAL=y
CONFIG_THERMAL_WRITABLE_TRIPS=y
-CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_STEP_WISE=y
CONFIG_THERMAL_GOV_LOW_LIMITS=y
CONFIG_CPU_THERMAL=y
CONFIG_DEVFREQ_THERMAL=y
+CONFIG_QCOM_SPMI_TEMP_ALARM=y
CONFIG_THERMAL_QPNP=y
CONFIG_THERMAL_QPNP_ADC_TM=y
CONFIG_THERMAL_TSENS=y
+CONFIG_MSM_BCL_PERIPHERAL_CTL=y
+CONFIG_QTI_THERMAL_LIMITS_DCVS=y
CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_QTI_AOP_REG_COOLING_DEVICE=y
CONFIG_QTI_QMI_COOLING_DEVICE=y
CONFIG_REGULATOR_COOLING_DEVICE=y
-CONFIG_QTI_BCL_PMIC5=y
-CONFIG_QTI_BCL_SOC_DRIVER=y
+CONFIG_MFD_I2C_PMIC=y
CONFIG_MFD_SPMI_PMIC=y
-CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_PROXY_CONSUMER=y
-CONFIG_REGULATOR_GPIO=y
-CONFIG_REGULATOR_CPR=y
-CONFIG_REGULATOR_CPR4_APSS=y
-CONFIG_REGULATOR_CPRH_KBSS=y
-CONFIG_REGULATOR_MEM_ACC=y
-CONFIG_REGULATOR_MSM_GFX_LDO=y
+CONFIG_REGULATOR_QPNP_LABIBB=y
+CONFIG_REGULATOR_QPNP_LCDB=y
+CONFIG_REGULATOR_QPNP_OLEDB=y
CONFIG_REGULATOR_QPNP=y
-CONFIG_REGULATOR_RPM_SMD=y
-CONFIG_REGULATOR_SPM=y
+CONFIG_REGULATOR_REFGEN=y
+CONFIG_REGULATOR_RPMH=y
CONFIG_REGULATOR_STUB=y
CONFIG_MEDIA_SUPPORT=y
CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
-CONFIG_MEDIA_USB_SUPPORT=y
-CONFIG_USB_VIDEO_CLASS=y
+CONFIG_VIDEO_ADV_DEBUG=y
+CONFIG_VIDEO_FIXED_MINOR_RANGES=y
CONFIG_V4L_PLATFORM_DRIVERS=y
-CONFIG_MSM_CAMERA=y
-CONFIG_MSM_CAMERA_DEBUG=y
-CONFIG_MSMB_CAMERA=y
-CONFIG_MSMB_CAMERA_DEBUG=y
-CONFIG_MSM_CAMERA_SENSOR=y
-CONFIG_MSM_CPP=y
-CONFIG_MSM_CCI=y
-CONFIG_MSM_CSI20_HEADER=y
-CONFIG_MSM_CSI22_HEADER=y
-CONFIG_MSM_CSI30_HEADER=y
-CONFIG_MSM_CSI31_HEADER=y
-CONFIG_MSM_CSIPHY=y
-CONFIG_MSM_CSID=y
-CONFIG_MSM_EEPROM=y
-CONFIG_MSM_ISPIF_V2=y
-CONFIG_IMX134=y
-CONFIG_IMX132=y
-CONFIG_OV9724=y
-CONFIG_OV5648=y
-CONFIG_GC0339=y
-CONFIG_OV8825=y
-CONFIG_OV8865=y
-CONFIG_s5k4e1=y
-CONFIG_OV12830=y
-CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y
-CONFIG_MSMB_JPEG=y
-CONFIG_MSM_FD=y
-CONFIG_MSM_VIDC_3X_V4L2=y
-CONFIG_MSM_VIDC_3X_GOVERNORS=y
-CONFIG_RADIO_IRIS=y
-CONFIG_RADIO_IRIS_TRANSPORT=y
+CONFIG_SPECTRA_CAMERA=y
+CONFIG_MSM_VIDC_V4L2=y
+CONFIG_MSM_VIDC_GOVERNORS=y
+CONFIG_MSM_SDE_ROTATOR=y
+CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
+CONFIG_DVB_MPQ=m
+CONFIG_DVB_MPQ_DEMUX=m
+CONFIG_DVB_MPQ_SW=y
CONFIG_QCOM_KGSL=y
-CONFIG_FB=y
-CONFIG_FB_MSM=y
-CONFIG_FB_MSM_MDSS=y
-CONFIG_FB_MSM_MDSS_WRITEBACK=y
-CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS=y
-CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
+CONFIG_DRM=y
+CONFIG_DRM_SDE_EVTLOG_DEBUG=y
+CONFIG_DRM_SDE_RSC=y
+CONFIG_DRM_LT_LT9611=y
+CONFIG_FB_VIRTUAL=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
-# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_DYNAMIC_MINORS=y
CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
CONFIG_SND_SOC=y
-CONFIG_SND_SOC_AW8896=y
CONFIG_UHID=y
CONFIG_HID_APPLE=y
CONFIG_HID_ELECOM=y
CONFIG_HID_MAGICMOUSE=y
CONFIG_HID_MICROSOFT=y
CONFIG_HID_MULTITOUCH=y
-CONFIG_USB_HIDDEV=y
+CONFIG_HID_PLANTRONICS=y
+CONFIG_HID_SONY=y
CONFIG_USB=y
-CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-CONFIG_USB_MON=y
+CONFIG_USB_XHCI_HCD=y
CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_MSM=y
CONFIG_USB_EHCI_HCD_PLATFORM=y
-CONFIG_USB_ACM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_USB_STORAGE=y
-CONFIG_USB_SERIAL=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_PD_POLICY=y
+CONFIG_QPNP_USB_PDPHY=y
CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_LINK_LAYER_TEST=y
CONFIG_NOP_USB_XCEIV=y
CONFIG_DUAL_ROLE_USB_INTF=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
CONFIG_USB_GADGET=y
-CONFIG_USB_GADGET_DEBUG_FILES=y
-CONFIG_USB_GADGET_DEBUG_FS=y
CONFIG_USB_GADGET_VBUS_DRAW=500
-CONFIG_USB_CI13XXX_MSM=y
CONFIG_USB_CONFIGFS=y
-CONFIG_USB_CONFIGFS_SERIAL=y
CONFIG_USB_CONFIGFS_NCM=y
-CONFIG_USB_CONFIGFS_QCRNDIS=y
-CONFIG_USB_CONFIGFS_RNDIS=y
-CONFIG_USB_CONFIGFS_RMNET_BAM=y
CONFIG_USB_CONFIGFS_MASS_STORAGE=y
CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_USB_CONFIGFS_F_MTP=y
@@ -468,35 +456,45 @@
CONFIG_USB_CONFIGFS_F_ACC=y
CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_UAC2=y
CONFIG_USB_CONFIGFS_F_MIDI=y
CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_UVC=y
CONFIG_USB_CONFIGFS_F_DIAG=y
CONFIG_USB_CONFIGFS_F_CDEV=y
CONFIG_USB_CONFIGFS_F_CCID=y
+CONFIG_USB_CONFIGFS_F_GSI=y
CONFIG_USB_CONFIGFS_F_QDSS=y
CONFIG_MMC=y
CONFIG_MMC_PERF_PROFILING=y
-# CONFIG_PWRSEQ_EMMC is not set
-# CONFIG_PWRSEQ_SIMPLE is not set
CONFIG_MMC_RING_BUFFER=y
CONFIG_MMC_PARANOID_SD_INIT=y
CONFIG_MMC_CLKGATE=y
CONFIG_MMC_BLOCK_MINORS=32
CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
+CONFIG_MMC_TEST=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_MMC_SDHCI_MSM_ICE=y
CONFIG_MMC_CQ_HCI=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_QPNP=y
+CONFIG_LEDS_QPNP_FLASH_V2=y
+CONFIG_LEDS_QPNP_WLED=y
CONFIG_LEDS_QPNP_HAPTICS=y
CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_EDAC=y
CONFIG_EDAC_MM_EDAC=y
+CONFIG_EDAC_KRYO3XX_ARM64=y
+CONFIG_EDAC_KRYO3XX_ARM64_PANIC_ON_UE=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_QPNP=y
CONFIG_DMADEVICES=y
-CONFIG_QCOM_SPS_DMA=y
+CONFIG_QCOM_GPI_DMA=y
+CONFIG_QCOM_GPI_DMA_DEBUG=y
CONFIG_UIO=y
CONFIG_UIO_MSM_SHAREDMEM=y
CONFIG_STAGING=y
@@ -504,18 +502,32 @@
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
CONFIG_ION=y
CONFIG_ION_MSM=y
-CONFIG_IPA=y
-CONFIG_RMNET_IPA=y
+CONFIG_GSI=y
+CONFIG_IPA3=y
+CONFIG_RMNET_IPA3=y
CONFIG_RNDIS_IPA=y
+CONFIG_IPA_UT=y
CONFIG_SPS=y
CONFIG_SPS_SUPPORT_NDP_BAM=y
CONFIG_QPNP_COINCELL=y
CONFIG_QPNP_REVID=y
CONFIG_USB_BAM=y
-CONFIG_MSM_RMNET_BAM=y
-CONFIG_MSM_MDSS_PLL=y
+CONFIG_MSM_11AD=m
+CONFIG_SEEMP_CORE=y
+CONFIG_QCOM_GENI_SE=y
+CONFIG_MSM_GCC_SDM845=y
+CONFIG_MSM_VIDEOCC_SDM845=y
+CONFIG_MSM_CAMCC_SDM845=y
+CONFIG_MSM_DISPCC_SDM845=y
+CONFIG_CLOCK_QPNP_DIV=y
+CONFIG_MSM_CLK_RPMH=y
+CONFIG_CLOCK_CPU_OSM=y
+CONFIG_MSM_GPUCC_SDM845=y
+CONFIG_MSM_CLK_AOP_QMP=y
+CONFIG_QCOM_MDSS_PLL=y
CONFIG_REMOTE_SPINLOCK_MSM=y
-CONFIG_MAILBOX=y
+CONFIG_MSM_QMP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
CONFIG_ARM_SMMU=y
CONFIG_QCOM_LAZY_MAPPING=y
CONFIG_IOMMU_DEBUG=y
@@ -523,58 +535,80 @@
CONFIG_IOMMU_TESTS=y
CONFIG_QCOM_CPUSS_DUMP=y
CONFIG_QCOM_RUN_QUEUE_STATS=y
-CONFIG_MSM_SPM=y
-CONFIG_MSM_L2_SPM=y
+CONFIG_QCOM_LLCC=y
+CONFIG_QCOM_SDM670_LLCC=y
+CONFIG_QCOM_QCS605_LLCC=y
+CONFIG_QCOM_LLCC_PERFMON=m
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
CONFIG_MSM_BOOT_STATS=y
CONFIG_MSM_CORE_HANG_DETECT=y
+CONFIG_MSM_GLADIATOR_HANG_DETECT=y
+CONFIG_MSM_GLADIATOR_ERP=y
+CONFIG_QCOM_EUD=y
CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_WDOG_IPI_ENABLE=y
+CONFIG_QPNP_PBS=y
CONFIG_QCOM_MEMORY_DUMP_V2=y
-CONFIG_MSM_DEBUG_LAR_UNLOCK=y
-CONFIG_MSM_RPM_SMD=y
+CONFIG_QCOM_MINIDUMP=y
CONFIG_QCOM_BUS_SCALING=y
+CONFIG_QCOM_BUS_CONFIG_RPMH=y
CONFIG_QCOM_SECURE_BUFFER=y
CONFIG_QCOM_EARLY_RANDOM=y
CONFIG_MSM_SMEM=y
-CONFIG_MSM_SMD=y
-CONFIG_MSM_SMD_DEBUG=y
-CONFIG_MSM_TZ_SMMU=y
+CONFIG_MSM_GLINK=y
+CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
+CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
+CONFIG_MSM_GLINK_SPI_XPRT=y
+CONFIG_MSM_SPCOM=y
+CONFIG_MSM_SPSS_UTILS=y
CONFIG_TRACER_PKT=y
+CONFIG_QTI_RPMH_API=y
CONFIG_MSM_SMP2P=y
-CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
+CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
CONFIG_MSM_QMI_INTERFACE=y
+CONFIG_MSM_GLINK_PKT=y
CONFIG_MSM_SUBSYSTEM_RESTART=y
-CONFIG_MSM_SYSMON_COMM=y
CONFIG_MSM_PIL=y
+CONFIG_MSM_SYSMON_GLINK_COMM=y
CONFIG_MSM_PIL_SSR_GENERIC=y
CONFIG_MSM_PIL_MSS_QDSP6V5=y
CONFIG_ICNSS=y
+CONFIG_ICNSS_DEBUG=y
+CONFIG_QCOM_COMMAND_DB=y
CONFIG_MSM_PERFORMANCE=y
+CONFIG_MSM_CDSP_LOADER=y
+CONFIG_QCOM_SMCINVOKE=y
CONFIG_MSM_EVENT_TIMER=y
-CONFIG_MSM_AVTIMER=y
CONFIG_MSM_PM=y
-CONFIG_QCOM_DCC=y
+CONFIG_MSM_QBT1000=y
+CONFIG_QCOM_DCC_V2=y
CONFIG_QTI_RPM_STATS_LOG=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QMP_DEBUGFS_CLIENT=y
CONFIG_MEM_SHARE_QMI_SERVICE=y
-# CONFIG_MSM_JTAGV8 is not set
-CONFIG_MSM_BAM_DMUX=y
-CONFIG_WCNSS_CORE=y
-CONFIG_WCNSS_CORE_PRONTO=y
-CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
-CONFIG_BIG_CLUSTER_MIN_FREQ_ADJUST=y
+CONFIG_MSM_REMOTEQDSS=y
CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_QCOMCCI_HWMON=y
+CONFIG_QCOM_M4M_HWMON=y
CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
+CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
CONFIG_DEVFREQ_SIMPLE_DEV=y
CONFIG_QCOM_DEVFREQ_DEVBW=y
-CONFIG_SPDM_SCM=y
-CONFIG_DEVFREQ_SPDM=y
+CONFIG_EXTCON_USB_GPIO=y
CONFIG_IIO=y
CONFIG_QCOM_RRADC=y
CONFIG_PWM=y
-CONFIG_QCOM_SHOW_RESUME_IRQ=y
-CONFIG_QTI_MPM=y
+CONFIG_PWM_QPNP=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_PHY_XGENE=y
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_NVMEM=y
+# CONFIG_NVMEM_SYSFS is not set
+CONFIG_QCOM_QFPROM=y
CONFIG_SENSORS_SSC=y
CONFIG_MSM_TZ_LOG=y
CONFIG_EXT4_FS=y
@@ -589,17 +623,21 @@
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V2=y
CONFIG_FUSE_FS=y
+CONFIG_OVERLAY_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_EFIVAR_FS=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
CONFIG_SDCARD_FS=y
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_MODULE_LOAD_INFO=y
CONFIG_DEBUG_INFO=y
-CONFIG_FRAME_WARN=2048
CONFIG_PAGE_OWNER=y
CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
@@ -607,27 +645,19 @@
CONFIG_SLUB_DEBUG_PANIC_ON=y
CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
CONFIG_PAGE_POISONING=y
-CONFIG_PAGE_POISONING_ENABLE_DEFAULT=y
-CONFIG_DEBUG_OBJECTS=y
-CONFIG_DEBUG_OBJECTS_FREE=y
-CONFIG_DEBUG_OBJECTS_TIMERS=y
-CONFIG_DEBUG_OBJECTS_WORK=y
-CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
-CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
CONFIG_DEBUG_KMEMLEAK=y
CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000
CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_LOCKUP_DETECTOR=y
-CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
CONFIG_WQ_WATCHDOG=y
CONFIG_PANIC_TIMEOUT=5
CONFIG_PANIC_ON_SCHED_BUG=y
CONFIG_PANIC_ON_RT_THROTTLING=y
CONFIG_SCHEDSTATS=y
CONFIG_SCHED_STACK_END_CHECK=y
-# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_DEBUG_ATOMIC_SLEEP=y
CONFIG_DEBUG_LIST=y
@@ -639,6 +669,7 @@
CONFIG_QCOM_RTB=y
CONFIG_QCOM_RTB_SEPARATE_CPUS=y
CONFIG_FUNCTION_TRACER=y
+CONFIG_PREEMPTIRQ_EVENTS=y
CONFIG_IRQSOFF_TRACER=y
CONFIG_PREEMPT_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
@@ -646,30 +677,30 @@
CONFIG_LKDTM=y
CONFIG_MEMTEST=y
CONFIG_PANIC_ON_DATA_CORRUPTION=y
-CONFIG_DEBUG_USER=y
-CONFIG_FORCE_PAGES=y
+CONFIG_ARM64_PTDUMP=y
CONFIG_PID_IN_CONTEXTIDR=y
-CONFIG_DEBUG_SET_MODULE_RONX=y
CONFIG_CORESIGHT=y
CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
CONFIG_CORESIGHT_SOURCE_ETM4X=y
CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
CONFIG_CORESIGHT_QCOM_REPLICATOR=y
-CONFIG_CORESIGHT_DBGUI=y
CONFIG_CORESIGHT_STM=y
CONFIG_CORESIGHT_TPDA=y
CONFIG_CORESIGHT_TPDM=y
CONFIG_CORESIGHT_CTI=y
CONFIG_CORESIGHT_EVENT=y
+CONFIG_CORESIGHT_TGU=y
CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_DUMMY=y
CONFIG_PFK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
-CONFIG_LSM_MMAP_MIN_ADDR=4096
CONFIG_HARDENED_USERCOPY=y
+CONFIG_FORTIFY_SOURCE=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y
@@ -677,11 +708,14 @@
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
-CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
-CONFIG_ARM_CRYPTO=y
-CONFIG_CRYPTO_SHA1_ARM_NEON=y
-CONFIG_CRYPTO_SHA2_ARM_CE=y
-CONFIG_CRYPTO_AES_ARM_BS=y
-CONFIG_CRYPTO_AES_ARM_CE=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_CRYPTO_CRC32_ARM64=y
+CONFIG_XZ_DEC=y
CONFIG_QMI_ENCDEC=y
diff --git a/arch/arm64/configs/sdm670-perf_defconfig b/arch/arm64/configs/sdm670-perf_defconfig
index d233c67..2d39856 100755
--- a/arch/arm64/configs/sdm670-perf_defconfig
+++ b/arch/arm64/configs/sdm670-perf_defconfig
@@ -55,7 +55,6 @@
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_SDM670=y
CONFIG_PCI=y
-CONFIG_PCI_MSM=y
CONFIG_SCHED_MC=y
CONFIG_NR_CPUS=8
CONFIG_PREEMPT=y
@@ -498,7 +497,6 @@
CONFIG_QPNP_COINCELL=y
CONFIG_QPNP_REVID=y
CONFIG_USB_BAM=y
-CONFIG_MSM_11AD=m
CONFIG_SEEMP_CORE=y
CONFIG_QCOM_GENI_SE=y
CONFIG_MSM_GCC_SDM845=y
diff --git a/arch/arm64/configs/sdm670_defconfig b/arch/arm64/configs/sdm670_defconfig
index 25e3245..ccfcf65 100755
--- a/arch/arm64/configs/sdm670_defconfig
+++ b/arch/arm64/configs/sdm670_defconfig
@@ -58,7 +58,6 @@
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_SDM670=y
CONFIG_PCI=y
-CONFIG_PCI_MSM=y
CONFIG_SCHED_MC=y
CONFIG_NR_CPUS=8
CONFIG_PREEMPT=y
@@ -511,7 +510,6 @@
CONFIG_QPNP_COINCELL=y
CONFIG_QPNP_REVID=y
CONFIG_USB_BAM=y
-CONFIG_MSM_11AD=m
CONFIG_SEEMP_CORE=y
CONFIG_QCOM_GENI_SE=y
CONFIG_MSM_GCC_SDM845=y
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 6cd2304..92bcde0 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -792,7 +792,6 @@
select SYS_SUPPORTS_HIGHMEM
select SYS_SUPPORTS_LITTLE_ENDIAN
select ZONE_DMA32 if 64BIT
- select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
config SIBYTE_LITTLESUR
bool "Sibyte BCM91250C2-LittleSur"
@@ -815,7 +814,6 @@
select SYS_HAS_CPU_SB1
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_LITTLE_ENDIAN
- select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
config SIBYTE_BIGSUR
bool "Sibyte BCM91480B-BigSur"
@@ -829,7 +827,6 @@
select SYS_SUPPORTS_HIGHMEM
select SYS_SUPPORTS_LITTLE_ENDIAN
select ZONE_DMA32 if 64BIT
- select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
config SNI_RM
bool "SNI RM200/300/400"
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index 060f23f..258158c 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -25,7 +25,17 @@
extern cpumask_t cpu_core_map[];
extern cpumask_t cpu_foreign_map[];
-#define raw_smp_processor_id() (current_thread_info()->cpu)
+static inline int raw_smp_processor_id(void)
+{
+#if defined(__VDSO__)
+ extern int vdso_smp_processor_id(void)
+ __compiletime_error("VDSO should not call smp_processor_id()");
+ return vdso_smp_processor_id();
+#else
+ return current_thread_info()->cpu;
+#endif
+}
+#define raw_smp_processor_id raw_smp_processor_id
/* Map from cpu id to sequential logical cpu number. This will only
not be idempotent when cpus failed to come on-line. */
diff --git a/arch/mips/sibyte/common/Makefile b/arch/mips/sibyte/common/Makefile
index 3ef3fb6..b3d6bf2 100644
--- a/arch/mips/sibyte/common/Makefile
+++ b/arch/mips/sibyte/common/Makefile
@@ -1,5 +1,4 @@
obj-y := cfe.o
-obj-$(CONFIG_SWIOTLB) += dma.o
obj-$(CONFIG_SIBYTE_BUS_WATCHER) += bus_watcher.o
obj-$(CONFIG_SIBYTE_CFE_CONSOLE) += cfe_console.o
obj-$(CONFIG_SIBYTE_TBPROF) += sb_tbprof.o
diff --git a/arch/mips/sibyte/common/dma.c b/arch/mips/sibyte/common/dma.c
deleted file mode 100644
index eb47a94..0000000
--- a/arch/mips/sibyte/common/dma.c
+++ /dev/null
@@ -1,14 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * DMA support for Broadcom SiByte platforms.
- *
- * Copyright (c) 2018 Maciej W. Rozycki
- */
-
-#include <linux/swiotlb.h>
-#include <asm/bootinfo.h>
-
-void __init plat_swiotlb_setup(void)
-{
- swiotlb_init(1);
-}
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index 0b845cc..247ca2e 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -6,7 +6,9 @@
$(filter -I%,$(KBUILD_CFLAGS)) \
$(filter -E%,$(KBUILD_CFLAGS)) \
$(filter -mmicromips,$(KBUILD_CFLAGS)) \
- $(filter -march=%,$(KBUILD_CFLAGS))
+ $(filter -march=%,$(KBUILD_CFLAGS)) \
+ $(filter -m%-float,$(KBUILD_CFLAGS)) \
+ -D__VDSO__
cflags-vdso := $(ccflags-vdso) \
$(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \
-O2 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 44c33ee..2525f23 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -287,14 +287,6 @@
mmu_psize_defs[MMU_PAGE_64K].shift = 16;
mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
found:
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
- if (mmu_psize_defs[MMU_PAGE_2M].shift) {
- /*
- * map vmemmap using 2M if available
- */
- mmu_vmemmap_psize = MMU_PAGE_2M;
- }
-#endif /* CONFIG_SPARSEMEM_VMEMMAP */
return;
}
@@ -337,7 +329,13 @@
#ifdef CONFIG_SPARSEMEM_VMEMMAP
/* vmemmap mapping */
- mmu_vmemmap_psize = mmu_virtual_psize;
+ if (mmu_psize_defs[MMU_PAGE_2M].shift) {
+ /*
+ * map vmemmap using 2M if available
+ */
+ mmu_vmemmap_psize = MMU_PAGE_2M;
+ } else
+ mmu_vmemmap_psize = mmu_virtual_psize;
#endif
/*
* initialize page table size
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index be4db07..95126d2 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -1652,6 +1652,16 @@
case KVM_S390_MCHK:
irq->u.mchk.mcic = s390int->parm64;
break;
+ case KVM_S390_INT_PFAULT_INIT:
+ irq->u.ext.ext_params = s390int->parm;
+ irq->u.ext.ext_params2 = s390int->parm64;
+ break;
+ case KVM_S390_RESTART:
+ case KVM_S390_INT_CLOCK_COMP:
+ case KVM_S390_INT_CPU_TIMER:
+ break;
+ default:
+ return -EINVAL;
}
return 0;
}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 07f5719..ea20b60 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -3105,7 +3105,7 @@
}
case KVM_S390_INTERRUPT: {
struct kvm_s390_interrupt s390int;
- struct kvm_s390_irq s390irq;
+ struct kvm_s390_irq s390irq = {};
r = -EFAULT;
if (copy_from_user(&s390int, argp, sizeof(s390int)))
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 896344b..9b15a1d 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -881,7 +881,7 @@
break;
case BPF_ALU64 | BPF_NEG: /* dst = -dst */
/* lcgr %dst,%dst */
- EMIT4(0xb9130000, dst_reg, dst_reg);
+ EMIT4(0xb9030000, dst_reg, dst_reg);
break;
/*
* BPF_FROM_BE/LE
@@ -1062,8 +1062,8 @@
/* llgf %w1,map.max_entries(%b2) */
EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
offsetof(struct bpf_array, map.max_entries));
- /* clgrj %b3,%w1,0xa,label0: if %b3 >= %w1 goto out */
- EMIT6_PCREL_LABEL(0xec000000, 0x0065, BPF_REG_3,
+ /* clrj %b3,%w1,0xa,label0: if (u32)%b3 >= (u32)%w1 goto out */
+ EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3,
REG_W1, 0, 0xa);
/*
@@ -1089,8 +1089,10 @@
* goto out;
*/
- /* sllg %r1,%b3,3: %r1 = index * 8 */
- EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, BPF_REG_3, REG_0, 3);
+ /* llgfr %r1,%b3: %r1 = (u32) index */
+ EMIT4(0xb9160000, REG_1, BPF_REG_3);
+ /* sllg %r1,%r1,3: %r1 *= 8 */
+ EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3);
/* lg %r1,prog(%b2,%r1) */
EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2,
REG_1, offsetof(struct bpf_array, ptrs));
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index f20c83a..47be4f5 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -38,6 +38,7 @@
REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -ffreestanding)
REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -fno-stack-protector)
+REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member)
REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4))
export REALMODE_CFLAGS
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index fd4484ae..112e3c4 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -671,10 +671,17 @@
throttle = perf_event_overflow(event, &data, ®s);
out:
- if (throttle)
+ if (throttle) {
perf_ibs_stop(event, 0);
- else
- perf_ibs_enable_event(perf_ibs, hwc, period >> 4);
+ } else {
+ period >>= 4;
+
+ if ((ibs_caps & IBS_CAPS_RDWROPCNT) &&
+ (*config & IBS_OP_CNT_CTL))
+ period |= *config & IBS_OP_CUR_CNT_RAND;
+
+ perf_ibs_enable_event(perf_ibs, hwc, period);
+ }
perf_event_update_userpage(event);
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index e98e238..55e362f 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3075,6 +3075,11 @@
return left;
}
+static u64 nhm_limit_period(struct perf_event *event, u64 left)
+{
+ return max(left, 32ULL);
+}
+
PMU_FORMAT_ATTR(event, "config:0-7" );
PMU_FORMAT_ATTR(umask, "config:8-15" );
PMU_FORMAT_ATTR(edge, "config:18" );
@@ -3734,6 +3739,7 @@
x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
x86_pmu.enable_all = intel_pmu_nhm_enable_all;
x86_pmu.extra_regs = intel_nehalem_extra_regs;
+ x86_pmu.limit_period = nhm_limit_period;
x86_pmu.cpu_events = nhm_events_attrs;
diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
index 0232b5a..588d8fb 100644
--- a/arch/x86/include/asm/bootparam_utils.h
+++ b/arch/x86/include/asm/bootparam_utils.h
@@ -71,6 +71,7 @@
BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
BOOT_PARAM_PRESERVE(hdr),
+ BOOT_PARAM_PRESERVE(e820_map),
BOOT_PARAM_PRESERVE(eddbuf),
};
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index f353061..81d5ea7 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -200,16 +200,20 @@
#define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
#define IBSCTL_LVT_OFFSET_MASK 0x0F
-/* ibs fetch bits/masks */
+/* IBS fetch bits/masks */
#define IBS_FETCH_RAND_EN (1ULL<<57)
#define IBS_FETCH_VAL (1ULL<<49)
#define IBS_FETCH_ENABLE (1ULL<<48)
#define IBS_FETCH_CNT 0xFFFF0000ULL
#define IBS_FETCH_MAX_CNT 0x0000FFFFULL
-/* ibs op bits/masks */
-/* lower 4 bits of the current count are ignored: */
-#define IBS_OP_CUR_CNT (0xFFFF0ULL<<32)
+/*
+ * IBS op bits/masks
+ * The lower 7 bits of the current count are random bits
+ * preloaded by hardware and ignored in software
+ */
+#define IBS_OP_CUR_CNT (0xFFF80ULL<<32)
+#define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32)
#define IBS_OP_CNT_CTL (1ULL<<19)
#define IBS_OP_VAL (1ULL<<18)
#define IBS_OP_ENABLE (1ULL<<17)
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 92639c9..d1ddcc0 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -444,8 +444,10 @@
({ \
int __gu_err; \
__inttype(*(ptr)) __gu_val; \
+ __typeof__(ptr) __gu_ptr = (ptr); \
+ __typeof__(size) __gu_size = (size); \
__uaccess_begin_nospec(); \
- __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
+ __get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err, -EFAULT); \
__uaccess_end(); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
__builtin_expect(__gu_err, 0); \
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index d34629d..09dd95c 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2346,7 +2346,13 @@
* dmar_alloc_hwirq() may be called before setup_IO_APIC(), so use
* gsi_top if ioapic_dynirq_base hasn't been initialized yet.
*/
- return ioapic_initialized ? ioapic_dynirq_base : gsi_top;
+ if (!ioapic_initialized)
+ return gsi_top;
+ /*
+ * For DT enabled machines ioapic_dynirq_base is irrelevant and not
+ * updated. So simply return @from if ioapic_dynirq_base == 0.
+ */
+ return ioapic_dynirq_base ? : from;
}
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 6e36152..ce9a0d7 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -7639,6 +7639,7 @@
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
gva_t gva = 0;
+ struct x86_exception e;
if (!nested_vmx_check_permission(vcpu) ||
!nested_vmx_check_vmcs12(vcpu))
@@ -7665,8 +7666,10 @@
vmx_instruction_info, true, &gva))
return 1;
/* _system ok, as nested_vmx_check_permission verified cpl=0 */
- kvm_write_guest_virt_system(vcpu, gva, &field_value,
- (is_long_mode(vcpu) ? 8 : 4), NULL);
+ if (kvm_write_guest_virt_system(vcpu, gva, &field_value,
+ (is_long_mode(vcpu) ? 8 : 4),
+ NULL))
+ kvm_inject_page_fault(vcpu, &e);
}
nested_vmx_succeed(vcpu);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index bbecbf2..aabfc14 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4620,6 +4620,13 @@
/* kvm_write_guest_virt_system can pull in tons of pages. */
vcpu->arch.l1tf_flush_l1d = true;
+ /*
+ * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
+ * is returned, but our callers are not ready for that and they blindly
+ * call kvm_inject_page_fault. Ensure that they at least do not leak
+ * uninitialized kernel stack memory into cr2 and error code.
+ */
+ memset(exception, 0, sizeof(*exception));
return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
PFERR_WRITE_MASK, exception);
}
diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig
index 31c6010..7fa8401 100644
--- a/drivers/atm/Kconfig
+++ b/drivers/atm/Kconfig
@@ -199,7 +199,7 @@
make the card work).
config ATM_NICSTAR_USE_IDT77105
- bool "Use IDT77015 PHY driver (25Mbps)"
+ bool "Use IDT77105 PHY driver (25Mbps)"
depends on ATM_NICSTAR
help
Support for the PHYsical layer chip in ForeRunner LE25 cards. In
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 6930abe..ece4f70 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3784,7 +3784,7 @@
v.native_format = UDP->native_format;
mutex_unlock(&floppy_mutex);
- if (copy_from_user(arg, &v, sizeof(struct compat_floppy_drive_params)))
+ if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_params)))
return -EFAULT;
return 0;
}
@@ -3820,7 +3820,7 @@
v.bufblocks = UDRS->bufblocks;
mutex_unlock(&floppy_mutex);
- if (copy_from_user(arg, &v, sizeof(struct compat_floppy_drive_struct)))
+ if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_struct)))
return -EFAULT;
return 0;
Eintr:
diff --git a/drivers/bluetooth/bluetooth-power.c b/drivers/bluetooth/bluetooth-power.c
index a803e72..b3ec52c 100644
--- a/drivers/bluetooth/bluetooth-power.c
+++ b/drivers/bluetooth/bluetooth-power.c
@@ -509,7 +509,7 @@
static int bt_dt_parse_clk_info(struct device *dev,
struct bt_power_clk_data **clk_data)
{
- int ret = -EINVAL;
+ int ret = 0;
struct bt_power_clk_data *clk = NULL;
struct device_node *np = dev->of_node;
@@ -546,7 +546,7 @@
*clk_data = clk;
} else {
- BT_PWR_ERR("clocks is not provided in device tree");
+ BT_PWR_INFO("clocks is not provided in device tree");
}
err:
@@ -567,7 +567,7 @@
of_get_named_gpio(pdev->dev.of_node,
"qca,bt-reset-gpio", 0);
if (bt_power_pdata->bt_gpio_sys_rst < 0)
- BT_PWR_ERR("bt-reset-gpio not provided in device tree");
+ BT_PWR_INFO("bt-reset-gpio not provided in devicetree");
rc = bt_dt_parse_vreg_info(&pdev->dev,
&bt_power_pdata->bt_vdd_core,
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 7875105..d8cdd34 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -167,4 +167,25 @@
help
Platform configuration infrastructure for the ARM Ltd.
Versatile Express.
+
+config MHI_BUS
+ tristate "Modem Host Interface"
+ help
+ MHI Host Interface is a communication protocol to be used by the host
+ to control and communcate with modem over a high speed peripheral bus.
+ Enabling this module will allow host to communicate with external
+ devices that support MHI protocol.
+
+config MHI_DEBUG
+ bool "MHI debug support"
+ depends on MHI_BUS
+ help
+ Say yes here to enable debugging support in the MHI transport
+ and individual MHI client drivers. This option will impact
+ throughput as individual MHI packets and state transitions
+ will be logged.
+
+source drivers/bus/mhi/controllers/Kconfig
+source drivers/bus/mhi/devices/Kconfig
+
endmenu
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index c6cfa6b..6b1df6b 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -21,3 +21,4 @@
obj-$(CONFIG_TEGRA_ACONNECT) += tegra-aconnect.o
obj-$(CONFIG_UNIPHIER_SYSTEM_BUS) += uniphier-system-bus.o
obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o
+obj-$(CONFIG_MHI_BUS) += mhi/
diff --git a/drivers/bus/mhi/Makefile b/drivers/bus/mhi/Makefile
new file mode 100644
index 0000000..2382e04
--- /dev/null
+++ b/drivers/bus/mhi/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the MHI stack
+#
+
+# core layer
+obj-y += core/
+obj-y += controllers/
+obj-y += devices/
diff --git a/drivers/bus/mhi/controllers/Kconfig b/drivers/bus/mhi/controllers/Kconfig
new file mode 100644
index 0000000..e8a90b6
--- /dev/null
+++ b/drivers/bus/mhi/controllers/Kconfig
@@ -0,0 +1,10 @@
+menu "MHI controllers"
+
+config MHI_QCOM
+ tristate "MHI QCOM"
+ depends on MHI_BUS
+ help
+ If you say yes to this option, MHI bus support for QCOM modem chipsets
+ will be enabled.
+
+endmenu
diff --git a/drivers/bus/mhi/controllers/Makefile b/drivers/bus/mhi/controllers/Makefile
new file mode 100644
index 0000000..9cb38a5
--- /dev/null
+++ b/drivers/bus/mhi/controllers/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_MHI_QCOM) += mhi_qcom.o mhi_arch_qcom.o
diff --git a/drivers/bus/mhi/controllers/mhi_arch_qcom.c b/drivers/bus/mhi/controllers/mhi_arch_qcom.c
new file mode 100644
index 0000000..0da2863
--- /dev/null
+++ b/drivers/bus/mhi/controllers/mhi_arch_qcom.c
@@ -0,0 +1,770 @@
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/dma-iommu.h>
+#include <linux/async.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/esoc_client.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/memblock.h>
+#include <linux/module.h>
+#include <linux/msm-bus.h>
+#include <linux/msm_pcie.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <linux/mhi.h>
+#include "mhi_qcom.h"
+
+struct arch_info {
+ struct mhi_dev *mhi_dev;
+ struct esoc_desc *esoc_client;
+ struct esoc_client_hook esoc_ops;
+ struct msm_bus_scale_pdata *msm_bus_pdata;
+ u32 bus_client;
+ struct msm_pcie_register_event pcie_reg_event;
+ struct pci_saved_state *pcie_state;
+ struct dma_iommu_mapping *mapping;
+ async_cookie_t cookie;
+ void *boot_ipc_log;
+ void *tsync_ipc_log;
+ struct mhi_device *boot_dev;
+ struct notifier_block pm_notifier;
+ struct completion pm_completion;
+};
+
+/* ipc log markings */
+#define DLOG "Dev->Host: "
+#define HLOG "Host: "
+
+#define MHI_TSYNC_LOG_PAGES (10)
+
+#ifdef CONFIG_MHI_DEBUG
+
+#define MHI_IPC_LOG_PAGES (100)
+enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_LVL_VERBOSE;
+
+#else
+
+#define MHI_IPC_LOG_PAGES (10)
+enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_LVL_ERROR;
+
+#endif
+
+static int mhi_arch_pm_notifier(struct notifier_block *nb,
+ unsigned long event, void *unused)
+{
+ struct arch_info *arch_info =
+ container_of(nb, struct arch_info, pm_notifier);
+
+ switch (event) {
+ case PM_SUSPEND_PREPARE:
+ reinit_completion(&arch_info->pm_completion);
+ break;
+
+ case PM_POST_SUSPEND:
+ complete_all(&arch_info->pm_completion);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static void mhi_arch_timesync_log(struct mhi_controller *mhi_cntrl,
+ u64 remote_time)
+{
+ struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+ struct arch_info *arch_info = mhi_dev->arch_info;
+
+ if (remote_time != U64_MAX)
+ ipc_log_string(arch_info->tsync_ipc_log, "%6u.%06lu 0x%llx",
+ REMOTE_TICKS_TO_SEC(remote_time),
+ REMOTE_TIME_REMAINDER_US(remote_time),
+ remote_time);
+}
+
+static int mhi_arch_set_bus_request(struct mhi_controller *mhi_cntrl, int index)
+{
+ struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+ struct arch_info *arch_info = mhi_dev->arch_info;
+
+ MHI_LOG("Setting bus request to index %d\n", index);
+
+ if (arch_info->bus_client)
+ return msm_bus_scale_client_update_request(
+ arch_info->bus_client,
+ index);
+
+ /* default return success */
+ return 0;
+}
+
+static void mhi_arch_pci_link_state_cb(struct msm_pcie_notify *notify)
+{
+ struct mhi_controller *mhi_cntrl = notify->data;
+ struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+ struct pci_dev *pci_dev = mhi_dev->pci_dev;
+
+ switch (notify->event) {
+ case MSM_PCIE_EVENT_WAKEUP:
+ MHI_LOG("Received MSM_PCIE_EVENT_WAKE signal\n");
+
+ /* bring link out of d3cold */
+ if (mhi_dev->powered_on) {
+ pm_runtime_get(&pci_dev->dev);
+ pm_runtime_put_noidle(&pci_dev->dev);
+ }
+ break;
+ case MSM_PCIE_EVENT_L1SS_TIMEOUT:
+ MHI_VERB("Received MSM_PCIE_EVENT_L1SS_TIMEOUT signal\n");
+
+ pm_runtime_mark_last_busy(&pci_dev->dev);
+ pm_request_autosuspend(&pci_dev->dev);
+ break;
+ default:
+ MHI_ERR("Unhandled event 0x%x\n", notify->event);
+ }
+}
+
+static int mhi_arch_esoc_ops_power_on(void *priv, unsigned int flags)
+{
+ struct mhi_controller *mhi_cntrl = priv;
+ struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+ struct pci_dev *pci_dev = mhi_dev->pci_dev;
+ int ret;
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+ if (mhi_dev->powered_on) {
+ MHI_LOG("MHI still in active state\n");
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+ return 0;
+ }
+
+ MHI_LOG("Enter\n");
+
+ /* reset rpm state */
+ pm_runtime_set_active(&pci_dev->dev);
+ pm_runtime_enable(&pci_dev->dev);
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+ pm_runtime_forbid(&pci_dev->dev);
+ ret = pm_runtime_get_sync(&pci_dev->dev);
+ if (ret < 0) {
+ MHI_ERR("Error with rpm resume, ret:%d\n", ret);
+ return ret;
+ }
+
+ /* re-start the link & recover default cfg states */
+ ret = msm_pcie_pm_control(MSM_PCIE_RESUME, pci_dev->bus->number,
+ pci_dev, NULL, 0);
+ if (ret) {
+ MHI_ERR("Failed to resume pcie bus ret %d\n", ret);
+ return ret;
+ }
+
+ return mhi_pci_probe(pci_dev, NULL);
+}
+
+static void mhi_arch_link_off(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+ struct pci_dev *pci_dev = mhi_dev->pci_dev;
+
+ MHI_LOG("Entered\n");
+
+ pci_set_power_state(pci_dev, PCI_D3hot);
+
+ /* release the resources */
+ msm_pcie_pm_control(MSM_PCIE_SUSPEND, mhi_cntrl->bus, pci_dev, NULL, 0);
+ mhi_arch_set_bus_request(mhi_cntrl, 0);
+
+ MHI_LOG("Exited\n");
+}
+
+static void mhi_arch_esoc_ops_power_off(void *priv, unsigned int flags)
+{
+ struct mhi_controller *mhi_cntrl = priv;
+ struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+ bool mdm_state = (flags & ESOC_HOOK_MDM_CRASH);
+ struct arch_info *arch_info = mhi_dev->arch_info;
+ struct pci_dev *pci_dev = mhi_dev->pci_dev;
+
+ MHI_LOG("Enter: mdm_crashed:%d\n", mdm_state);
+
+ /*
+ * Abort system suspend if system is preparing to go to suspend
+ * by grabbing wake source.
+ * If system is suspended, wait for pm notifier callback to notify
+ * that resume has occurred with PM_POST_SUSPEND event.
+ */
+ pm_stay_awake(&mhi_cntrl->mhi_dev->dev);
+ wait_for_completion(&arch_info->pm_completion);
+
+ /* if link is in drv suspend, wake it up */
+ pm_runtime_get_sync(&pci_dev->dev);
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+ if (!mhi_dev->powered_on) {
+ MHI_LOG("Not in active state\n");
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+ pm_runtime_put_noidle(&pci_dev->dev);
+ return;
+ }
+ mhi_dev->powered_on = false;
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+
+ pm_runtime_put_noidle(&pci_dev->dev);
+
+ MHI_LOG("Triggering shutdown process\n");
+ mhi_power_down(mhi_cntrl, !mdm_state);
+
+ /* turn the link off */
+ mhi_deinit_pci_dev(mhi_cntrl);
+ mhi_arch_link_off(mhi_cntrl);
+
+ /* wait for boot monitor to exit */
+ async_synchronize_cookie(arch_info->cookie + 1);
+
+ mhi_arch_iommu_deinit(mhi_cntrl);
+ mhi_arch_pcie_deinit(mhi_cntrl);
+
+ pm_relax(&mhi_cntrl->mhi_dev->dev);
+}
+
+static void mhi_arch_esoc_ops_mdm_error(void *priv)
+{
+ struct mhi_controller *mhi_cntrl = priv;
+
+ MHI_LOG("Enter: mdm asserted\n");
+
+ /* transition MHI state into error state */
+ mhi_control_error(mhi_cntrl);
+
+ MHI_LOG("Exit\n");
+}
+
+static void mhi_bl_dl_cb(struct mhi_device *mhi_device,
+ struct mhi_result *mhi_result)
+{
+ struct mhi_controller *mhi_cntrl = mhi_device->mhi_cntrl;
+ struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+ struct arch_info *arch_info = mhi_dev->arch_info;
+ char *buf = mhi_result->buf_addr;
+
+ /* force a null at last character */
+ buf[mhi_result->bytes_xferd - 1] = 0;
+
+ ipc_log_string(arch_info->boot_ipc_log, "%s %s", DLOG, buf);
+}
+
+static void mhi_bl_dummy_cb(struct mhi_device *mhi_dev,
+ struct mhi_result *mhi_result)
+{
+}
+
+static void mhi_bl_remove(struct mhi_device *mhi_device)
+{
+ struct mhi_controller *mhi_cntrl = mhi_device->mhi_cntrl;
+ struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+ struct arch_info *arch_info = mhi_dev->arch_info;
+
+ arch_info->boot_dev = NULL;
+ ipc_log_string(arch_info->boot_ipc_log,
+ HLOG "Received Remove notif.\n");
+}
+
+static void mhi_boot_monitor(void *data, async_cookie_t cookie)
+{
+ struct mhi_controller *mhi_cntrl = data;
+ struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+ struct arch_info *arch_info = mhi_dev->arch_info;
+ struct mhi_device *boot_dev;
+ /* 15 sec timeout for booting device */
+ const u32 timeout = msecs_to_jiffies(15000);
+
+ /* wait for device to enter boot stage */
+ wait_event_timeout(mhi_cntrl->state_event, mhi_cntrl->ee == MHI_EE_AMSS
+ || mhi_cntrl->ee == MHI_EE_DISABLE_TRANSITION,
+ timeout);
+
+ ipc_log_string(arch_info->boot_ipc_log, HLOG "Device current ee = %s\n",
+ TO_MHI_EXEC_STR(mhi_cntrl->ee));
+
+ /* if we successfully booted to amss disable boot log channel */
+ if (mhi_cntrl->ee == MHI_EE_AMSS) {
+ boot_dev = arch_info->boot_dev;
+ if (boot_dev)
+ mhi_unprepare_from_transfer(boot_dev);
+
+ pm_runtime_allow(&mhi_dev->pci_dev->dev);
+ }
+}
+
+int mhi_arch_power_up(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+ struct arch_info *arch_info = mhi_dev->arch_info;
+
+ /* start a boot monitor */
+ arch_info->cookie = async_schedule(mhi_boot_monitor, mhi_cntrl);
+
+ return 0;
+}
+
+static int mhi_arch_pcie_scale_bw(struct mhi_controller *mhi_cntrl,
+ struct pci_dev *pci_dev,
+ struct mhi_link_info *link_info)
+{
+ int ret;
+
+ mhi_cntrl->lpm_disable(mhi_cntrl, mhi_cntrl->priv_data);
+ ret = msm_pcie_set_link_bandwidth(pci_dev, link_info->target_link_speed,
+ link_info->target_link_width);
+ mhi_cntrl->lpm_enable(mhi_cntrl, mhi_cntrl->priv_data);
+
+ if (ret)
+ return ret;
+
+ /* do a bus scale vote based on gen speeds */
+ mhi_arch_set_bus_request(mhi_cntrl, link_info->target_link_speed);
+
+ MHI_VERB("bw changed to speed:0x%x width:0x%x\n",
+ link_info->target_link_speed, link_info->target_link_width);
+
+ return 0;
+}
+
+static int mhi_arch_bw_scale(struct mhi_controller *mhi_cntrl,
+ struct mhi_link_info *link_info)
+{
+ struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+ struct pci_dev *pci_dev = mhi_dev->pci_dev;
+
+ return mhi_arch_pcie_scale_bw(mhi_cntrl, pci_dev, link_info);
+}
+
+static int mhi_bl_probe(struct mhi_device *mhi_device,
+ const struct mhi_device_id *id)
+{
+ char node_name[32];
+ struct mhi_controller *mhi_cntrl = mhi_device->mhi_cntrl;
+ struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+ struct arch_info *arch_info = mhi_dev->arch_info;
+
+ snprintf(node_name, sizeof(node_name), "mhi_bl_%04x_%02u.%02u.%02u",
+ mhi_device->dev_id, mhi_device->domain, mhi_device->bus,
+ mhi_device->slot);
+
+ arch_info->boot_dev = mhi_device;
+ arch_info->boot_ipc_log = ipc_log_context_create(MHI_IPC_LOG_PAGES,
+ node_name, 0);
+ ipc_log_string(arch_info->boot_ipc_log, HLOG
+ "Entered SBL, Session ID:0x%x\n", mhi_cntrl->session_id);
+
+ return 0;
+}
+
+static const struct mhi_device_id mhi_bl_match_table[] = {
+ { .chan = "BL" },
+ {},
+};
+
+static struct mhi_driver mhi_bl_driver = {
+ .id_table = mhi_bl_match_table,
+ .remove = mhi_bl_remove,
+ .probe = mhi_bl_probe,
+ .ul_xfer_cb = mhi_bl_dummy_cb,
+ .dl_xfer_cb = mhi_bl_dl_cb,
+ .driver = {
+ .name = "MHI_BL",
+ .owner = THIS_MODULE,
+ },
+};
+
+int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+ struct arch_info *arch_info = mhi_dev->arch_info;
+ struct mhi_link_info *cur_link_info;
+ char node[32];
+ int ret;
+ u16 linkstat;
+
+ if (!arch_info) {
+ struct msm_pcie_register_event *reg_event;
+
+ arch_info = devm_kzalloc(&mhi_dev->pci_dev->dev,
+ sizeof(*arch_info), GFP_KERNEL);
+ if (!arch_info)
+ return -ENOMEM;
+
+ mhi_dev->arch_info = arch_info;
+ arch_info->mhi_dev = mhi_dev;
+
+ snprintf(node, sizeof(node), "mhi_%04x_%02u.%02u.%02u",
+ mhi_cntrl->dev_id, mhi_cntrl->domain, mhi_cntrl->bus,
+ mhi_cntrl->slot);
+ mhi_cntrl->log_buf = ipc_log_context_create(MHI_IPC_LOG_PAGES,
+ node, 0);
+ mhi_cntrl->log_lvl = mhi_ipc_log_lvl;
+
+ snprintf(node, sizeof(node), "mhi_tsync_%04x_%02u.%02u.%02u",
+ mhi_cntrl->dev_id, mhi_cntrl->domain, mhi_cntrl->bus,
+ mhi_cntrl->slot);
+ arch_info->tsync_ipc_log = ipc_log_context_create(
+ MHI_TSYNC_LOG_PAGES, node, 0);
+ if (arch_info->tsync_ipc_log)
+ mhi_cntrl->tsync_log = mhi_arch_timesync_log;
+
+ /* register for bus scale if defined */
+ arch_info->msm_bus_pdata = msm_bus_cl_get_pdata_from_dev(
+ &mhi_dev->pci_dev->dev);
+ if (arch_info->msm_bus_pdata) {
+ arch_info->bus_client =
+ msm_bus_scale_register_client(
+ arch_info->msm_bus_pdata);
+ if (!arch_info->bus_client)
+ return -EINVAL;
+ }
+
+ /* register with pcie rc for WAKE# events */
+ reg_event = &arch_info->pcie_reg_event;
+ reg_event->events =
+ MSM_PCIE_EVENT_WAKEUP | MSM_PCIE_EVENT_L1SS_TIMEOUT;
+
+ reg_event->user = mhi_dev->pci_dev;
+ reg_event->callback = mhi_arch_pci_link_state_cb;
+ reg_event->notify.data = mhi_cntrl;
+ ret = msm_pcie_register_event(reg_event);
+ if (ret)
+ MHI_LOG("Failed to reg. for link up notification\n");
+
+ init_completion(&arch_info->pm_completion);
+
+ /* register PM notifier to get post resume events */
+ arch_info->pm_notifier.notifier_call = mhi_arch_pm_notifier;
+ register_pm_notifier(&arch_info->pm_notifier);
+
+ /*
+ * Mark as completed at initial boot-up to allow ESOC power on
+ * callback to proceed if system has not gone to suspend
+ */
+ complete_all(&arch_info->pm_completion);
+
+ arch_info->esoc_client = devm_register_esoc_client(
+ &mhi_dev->pci_dev->dev, "mdm");
+ if (IS_ERR_OR_NULL(arch_info->esoc_client)) {
+ MHI_ERR("Failed to register esoc client\n");
+ } else {
+ /* register for power on/off hooks */
+ struct esoc_client_hook *esoc_ops =
+ &arch_info->esoc_ops;
+
+ esoc_ops->priv = mhi_cntrl;
+ esoc_ops->prio = ESOC_MHI_HOOK;
+ esoc_ops->esoc_link_power_on =
+ mhi_arch_esoc_ops_power_on;
+ esoc_ops->esoc_link_power_off =
+ mhi_arch_esoc_ops_power_off;
+ esoc_ops->esoc_link_mdm_crash =
+ mhi_arch_esoc_ops_mdm_error;
+
+ ret = esoc_register_client_hook(arch_info->esoc_client,
+ esoc_ops);
+ if (ret)
+ MHI_ERR("Failed to register esoc ops\n");
+ }
+
+ /*
+ * MHI host driver has full autonomy to manage power state.
+ * Disable all automatic power collapse features
+ */
+ msm_pcie_pm_control(MSM_PCIE_DISABLE_PC, mhi_cntrl->bus,
+ mhi_dev->pci_dev, NULL, 0);
+ mhi_dev->pci_dev->no_d3hot = true;
+
+ mhi_cntrl->bw_scale = mhi_arch_bw_scale;
+
+ mhi_driver_register(&mhi_bl_driver);
+ }
+
+ /* store the current bw info */
+ ret = pcie_capability_read_word(mhi_dev->pci_dev,
+ PCI_EXP_LNKSTA, &linkstat);
+ if (ret)
+ return ret;
+
+ cur_link_info = &mhi_cntrl->mhi_link_info;
+ cur_link_info->target_link_speed = linkstat & PCI_EXP_LNKSTA_CLS;
+ cur_link_info->target_link_width = (linkstat & PCI_EXP_LNKSTA_NLW) >>
+ PCI_EXP_LNKSTA_NLW_SHIFT;
+
+ return mhi_arch_set_bus_request(mhi_cntrl,
+ cur_link_info->target_link_speed);
+}
+
+void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl)
+{
+ mhi_arch_set_bus_request(mhi_cntrl, 0);
+}
+
+static struct dma_iommu_mapping *mhi_arch_create_iommu_mapping(
+ struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+ dma_addr_t base;
+ size_t size;
+
+ /*
+ * If S1_BYPASS enabled then iommu space is not used, however framework
+ * still require clients to create a mapping space before attaching. So
+ * set to smallest size required by iommu framework.
+ */
+ if (mhi_dev->smmu_cfg & MHI_SMMU_S1_BYPASS) {
+ base = 0;
+ size = PAGE_SIZE;
+ } else {
+ base = mhi_dev->iova_start;
+ size = (mhi_dev->iova_stop - base) + 1;
+ }
+
+ MHI_LOG("Create iommu mapping of base:%pad size:%zu\n",
+ &base, size);
+ return arm_iommu_create_mapping(&pci_bus_type, base, size);
+}
+
+int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+ struct arch_info *arch_info = mhi_dev->arch_info;
+ u32 smmu_config = mhi_dev->smmu_cfg;
+ struct dma_iommu_mapping *mapping = NULL;
+ int ret;
+
+ if (smmu_config) {
+ mapping = mhi_arch_create_iommu_mapping(mhi_cntrl);
+ if (IS_ERR(mapping)) {
+ MHI_ERR("Failed to create iommu mapping\n");
+ return PTR_ERR(mapping);
+ }
+ }
+
+ if (smmu_config & MHI_SMMU_S1_BYPASS) {
+ int s1_bypass = 1;
+
+ ret = iommu_domain_set_attr(mapping->domain,
+ DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
+ if (ret) {
+ MHI_ERR("Failed to set attribute S1_BYPASS\n");
+ goto release_mapping;
+ }
+ }
+
+ if (smmu_config & MHI_SMMU_FAST) {
+ int fast_map = 1;
+
+ ret = iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_FAST,
+ &fast_map);
+ if (ret) {
+ MHI_ERR("Failed to set attribute FAST_MAP\n");
+ goto release_mapping;
+ }
+ }
+
+ if (smmu_config & MHI_SMMU_ATOMIC) {
+ int atomic = 1;
+
+ ret = iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_ATOMIC,
+ &atomic);
+ if (ret) {
+ MHI_ERR("Failed to set attribute ATOMIC\n");
+ goto release_mapping;
+ }
+ }
+
+ if (smmu_config & MHI_SMMU_FORCE_COHERENT) {
+ int force_coherent = 1;
+
+ ret = iommu_domain_set_attr(mapping->domain,
+ DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT,
+ &force_coherent);
+ if (ret) {
+ MHI_ERR("Failed to set attribute FORCE_COHERENT\n");
+ goto release_mapping;
+ }
+ }
+
+ if (smmu_config) {
+ ret = arm_iommu_attach_device(&mhi_dev->pci_dev->dev, mapping);
+
+ if (ret) {
+ MHI_ERR("Error attach device, ret:%d\n", ret);
+ goto release_mapping;
+ }
+ arch_info->mapping = mapping;
+ }
+
+ mhi_cntrl->dev = &mhi_dev->pci_dev->dev;
+
+ ret = dma_set_mask_and_coherent(mhi_cntrl->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ MHI_ERR("Error setting dma mask, ret:%d\n", ret);
+ goto release_device;
+ }
+
+ return 0;
+
+release_device:
+ arm_iommu_detach_device(mhi_cntrl->dev);
+
+release_mapping:
+ arm_iommu_release_mapping(mapping);
+
+ return ret;
+}
+
+void mhi_arch_iommu_deinit(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+ struct arch_info *arch_info = mhi_dev->arch_info;
+ struct dma_iommu_mapping *mapping = arch_info->mapping;
+
+ if (mapping) {
+ arm_iommu_detach_device(mhi_cntrl->dev);
+ arm_iommu_release_mapping(mapping);
+ }
+ arch_info->mapping = NULL;
+ mhi_cntrl->dev = NULL;
+}
+
+int mhi_arch_link_suspend(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+ struct arch_info *arch_info = mhi_dev->arch_info;
+ struct pci_dev *pci_dev = mhi_dev->pci_dev;
+ int ret = 0;
+
+ MHI_LOG("Entered\n");
+
+ /* disable inactivity timer */
+ msm_pcie_l1ss_timeout_disable(pci_dev);
+
+ switch (mhi_dev->suspend_mode) {
+ case MHI_DEFAULT_SUSPEND:
+ pci_clear_master(pci_dev);
+ ret = pci_save_state(mhi_dev->pci_dev);
+ if (ret) {
+ MHI_ERR("Failed with pci_save_state, ret:%d\n", ret);
+ goto exit_suspend;
+ }
+
+ arch_info->pcie_state = pci_store_saved_state(pci_dev);
+ pci_disable_device(pci_dev);
+
+ pci_set_power_state(pci_dev, PCI_D3hot);
+
+ /* release the resources */
+ msm_pcie_pm_control(MSM_PCIE_SUSPEND, mhi_cntrl->bus, pci_dev,
+ NULL, 0);
+ mhi_arch_set_bus_request(mhi_cntrl, 0);
+ break;
+ case MHI_FAST_LINK_OFF:
+ case MHI_ACTIVE_STATE:
+ case MHI_FAST_LINK_ON:/* keeping link on do nothing */
+ break;
+ }
+
+exit_suspend:
+ if (ret)
+ msm_pcie_l1ss_timeout_enable(pci_dev);
+
+ MHI_LOG("Exited with ret:%d\n", ret);
+
+ return ret;
+}
+
+static int __mhi_arch_link_resume(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+ struct arch_info *arch_info = mhi_dev->arch_info;
+ struct pci_dev *pci_dev = mhi_dev->pci_dev;
+ struct mhi_link_info *cur_info = &mhi_cntrl->mhi_link_info;
+ int ret;
+
+ MHI_LOG("Entered\n");
+
+ /* request bus scale voting based on higher gen speed */
+ ret = mhi_arch_set_bus_request(mhi_cntrl,
+ cur_info->target_link_speed);
+ if (ret)
+ MHI_LOG("Could not set bus frequency, ret:%d\n", ret);
+
+ ret = msm_pcie_pm_control(MSM_PCIE_RESUME, mhi_cntrl->bus, pci_dev,
+ NULL, 0);
+ if (ret) {
+ MHI_ERR("Link training failed, ret:%d\n", ret);
+ return ret;
+ }
+
+ ret = pci_set_power_state(pci_dev, PCI_D0);
+ if (ret) {
+ MHI_ERR("Failed to set PCI_D0 state, ret:%d\n", ret);
+ return ret;
+ }
+
+ ret = pci_enable_device(pci_dev);
+ if (ret) {
+ MHI_ERR("Failed to enable device, ret:%d\n", ret);
+ return ret;
+ }
+
+ ret = pci_load_and_free_saved_state(pci_dev, &arch_info->pcie_state);
+ if (ret)
+ MHI_LOG("Failed to load saved cfg state\n");
+
+ pci_restore_state(pci_dev);
+ pci_set_master(pci_dev);
+
+ return 0;
+}
+
+int mhi_arch_link_resume(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+ struct pci_dev *pci_dev = mhi_dev->pci_dev;
+ int ret = 0;
+
+ MHI_LOG("Entered\n");
+
+ switch (mhi_dev->suspend_mode) {
+ case MHI_DEFAULT_SUSPEND:
+ ret = __mhi_arch_link_resume(mhi_cntrl);
+ break;
+ case MHI_FAST_LINK_OFF:
+ case MHI_ACTIVE_STATE:
+ case MHI_FAST_LINK_ON:
+ break;
+ }
+
+ if (ret) {
+ MHI_ERR("Link training failed, ret:%d\n", ret);
+ return ret;
+ }
+
+ msm_pcie_l1ss_timeout_enable(pci_dev);
+
+ MHI_LOG("Exited\n");
+
+ return 0;
+}
diff --git a/drivers/bus/mhi/controllers/mhi_qcom.c b/drivers/bus/mhi/controllers/mhi_qcom.c
new file mode 100644
index 0000000..3df293c
--- /dev/null
+++ b/drivers/bus/mhi/controllers/mhi_qcom.c
@@ -0,0 +1,875 @@
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/arch_timer.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/memblock.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/mhi.h>
+#include "mhi_qcom.h"
+
+struct firmware_info {
+ unsigned int dev_id;
+ const char *fw_image;
+ const char *edl_image;
+};
+
+static const struct firmware_info firmware_table[] = {
+ {.dev_id = 0x306, .fw_image = "sdx55m/sbl1.mbn"},
+ {.dev_id = 0x305, .fw_image = "sdx50m/sbl1.mbn"},
+ {.dev_id = 0x304, .fw_image = "sbl.mbn", .edl_image = "edl.mbn"},
+ /* default, set to debug.mbn */
+ {.fw_image = "debug.mbn"},
+};
+
+static int debug_mode;
+module_param_named(debug_mode, debug_mode, int, 0644);
+
+int mhi_debugfs_trigger_m0(void *data, u64 val)
+{
+ struct mhi_controller *mhi_cntrl = data;
+ struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+
+ MHI_LOG("Trigger M3 Exit\n");
+ pm_runtime_get(&mhi_dev->pci_dev->dev);
+ pm_runtime_put(&mhi_dev->pci_dev->dev);
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_m0_fops, NULL,
+ mhi_debugfs_trigger_m0, "%llu\n");
+
+int mhi_debugfs_trigger_m3(void *data, u64 val)
+{
+ struct mhi_controller *mhi_cntrl = data;
+ struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+
+ MHI_LOG("Trigger M3 Entry\n");
+ pm_runtime_mark_last_busy(&mhi_dev->pci_dev->dev);
+ pm_request_autosuspend(&mhi_dev->pci_dev->dev);
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_m3_fops, NULL,
+ mhi_debugfs_trigger_m3, "%llu\n");
+
+void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+ struct pci_dev *pci_dev = mhi_dev->pci_dev;
+
+ pm_runtime_mark_last_busy(&pci_dev->dev);
+ pm_runtime_dont_use_autosuspend(&pci_dev->dev);
+ pm_runtime_disable(&pci_dev->dev);
+
+ /* reset counter for lpm state changes */
+ mhi_dev->lpm_disable_depth = 0;
+
+ pci_free_irq_vectors(pci_dev);
+ kfree(mhi_cntrl->irq);
+ mhi_cntrl->irq = NULL;
+ iounmap(mhi_cntrl->regs);
+ mhi_cntrl->regs = NULL;
+ pci_clear_master(pci_dev);
+ pci_release_region(pci_dev, mhi_dev->resn);
+ pci_disable_device(pci_dev);
+}
+
+static int mhi_init_pci_dev(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+ struct pci_dev *pci_dev = mhi_dev->pci_dev;
+ int ret;
+ resource_size_t len;
+ int i;
+
+ mhi_dev->resn = MHI_PCI_BAR_NUM;
+ ret = pci_assign_resource(pci_dev, mhi_dev->resn);
+ if (ret) {
+ MHI_ERR("Error assign pci resources, ret:%d\n", ret);
+ return ret;
+ }
+
+ ret = pci_enable_device(pci_dev);
+ if (ret) {
+ MHI_ERR("Error enabling device, ret:%d\n", ret);
+ goto error_enable_device;
+ }
+
+ ret = pci_request_region(pci_dev, mhi_dev->resn, "mhi");
+ if (ret) {
+ MHI_ERR("Error pci_request_region, ret:%d\n", ret);
+ goto error_request_region;
+ }
+
+ pci_set_master(pci_dev);
+
+ mhi_cntrl->base_addr = pci_resource_start(pci_dev, mhi_dev->resn);
+ len = pci_resource_len(pci_dev, mhi_dev->resn);
+ mhi_cntrl->regs = ioremap_nocache(mhi_cntrl->base_addr, len);
+ if (!mhi_cntrl->regs) {
+ MHI_ERR("Error ioremap region\n");
+ goto error_ioremap;
+ }
+
+ ret = pci_alloc_irq_vectors(pci_dev, mhi_cntrl->msi_required,
+ mhi_cntrl->msi_required, PCI_IRQ_MSI);
+ if (IS_ERR_VALUE((ulong)ret) || ret < mhi_cntrl->msi_required) {
+ MHI_ERR("Failed to enable MSI, ret:%d\n", ret);
+ goto error_req_msi;
+ }
+
+ mhi_cntrl->msi_allocated = ret;
+ mhi_cntrl->irq = kmalloc_array(mhi_cntrl->msi_allocated,
+ sizeof(*mhi_cntrl->irq), GFP_KERNEL);
+ if (!mhi_cntrl->irq) {
+ ret = -ENOMEM;
+ goto error_alloc_msi_vec;
+ }
+
+ for (i = 0; i < mhi_cntrl->msi_allocated; i++) {
+ mhi_cntrl->irq[i] = pci_irq_vector(pci_dev, i);
+ if (mhi_cntrl->irq[i] < 0) {
+ ret = mhi_cntrl->irq[i];
+ goto error_get_irq_vec;
+ }
+ }
+
+ dev_set_drvdata(&pci_dev->dev, mhi_cntrl);
+
+ /* configure runtime pm */
+ pm_runtime_set_autosuspend_delay(&pci_dev->dev, MHI_RPM_SUSPEND_TMR_MS);
+ pm_runtime_use_autosuspend(&pci_dev->dev);
+ pm_suspend_ignore_children(&pci_dev->dev, true);
+
+ /*
+ * pci framework will increment usage count (twice) before
+ * calling local device driver probe function.
+ * 1st pci.c pci_pm_init() calls pm_runtime_forbid
+ * 2nd pci-driver.c local_pci_probe calls pm_runtime_get_sync
+ * Framework expect pci device driver to call
+ * pm_runtime_put_noidle to decrement usage count after
+ * successful probe and and call pm_runtime_allow to enable
+ * runtime suspend.
+ */
+ pm_runtime_mark_last_busy(&pci_dev->dev);
+ pm_runtime_put_noidle(&pci_dev->dev);
+
+ return 0;
+
+error_get_irq_vec:
+ kfree(mhi_cntrl->irq);
+ mhi_cntrl->irq = NULL;
+
+error_alloc_msi_vec:
+ pci_free_irq_vectors(pci_dev);
+
+error_req_msi:
+ iounmap(mhi_cntrl->regs);
+
+error_ioremap:
+ pci_clear_master(pci_dev);
+
+error_request_region:
+ pci_disable_device(pci_dev);
+
+error_enable_device:
+ pci_release_region(pci_dev, mhi_dev->resn);
+
+ return ret;
+}
+
+static int mhi_runtime_suspend(struct device *dev)
+{
+ int ret = 0;
+ struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
+ struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+
+ MHI_LOG("Enter\n");
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+
+ if (!mhi_dev->powered_on) {
+ MHI_LOG("Not fully powered, return success\n");
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+ return 0;
+ }
+
+ ret = mhi_pm_suspend(mhi_cntrl);
+
+ if (ret) {
+ MHI_LOG("Abort due to ret:%d\n", ret);
+ mhi_dev->suspend_mode = MHI_ACTIVE_STATE;
+ goto exit_runtime_suspend;
+ }
+
+ mhi_dev->suspend_mode = MHI_DEFAULT_SUSPEND;
+
+ ret = mhi_arch_link_suspend(mhi_cntrl);
+
+ /* failed suspending link abort mhi suspend */
+ if (ret) {
+ MHI_LOG("Failed to suspend link, abort suspend\n");
+ mhi_pm_resume(mhi_cntrl);
+ mhi_dev->suspend_mode = MHI_ACTIVE_STATE;
+ }
+
+exit_runtime_suspend:
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+ MHI_LOG("Exited with ret:%d\n", ret);
+
+ return ret;
+}
+
+static int mhi_runtime_idle(struct device *dev)
+{
+ struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
+
+ MHI_LOG("Entered returning -EBUSY\n");
+
+ /*
+ * RPM framework during runtime resume always calls
+ * rpm_idle to see if device ready to suspend.
+ * If dev.power usage_count count is 0, rpm fw will call
+ * rpm_idle cb to see if device is ready to suspend.
+ * if cb return 0, or cb not defined the framework will
+ * assume device driver is ready to suspend;
+ * therefore, fw will schedule runtime suspend.
+ * In MHI power management, MHI host shall go to
+ * runtime suspend only after entering MHI State M2, even if
+ * usage count is 0. Return -EBUSY to disable automatic suspend.
+ */
+ return -EBUSY;
+}
+
+static int mhi_runtime_resume(struct device *dev)
+{
+ int ret = 0;
+ struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
+ struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+
+ MHI_LOG("Enter\n");
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+
+ if (!mhi_dev->powered_on) {
+ MHI_LOG("Not fully powered, return success\n");
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+ return 0;
+ }
+
+ /* turn on link */
+ ret = mhi_arch_link_resume(mhi_cntrl);
+ if (ret)
+ goto rpm_resume_exit;
+
+
+ /* transition to M0 state */
+ if (mhi_dev->suspend_mode == MHI_DEFAULT_SUSPEND)
+ ret = mhi_pm_resume(mhi_cntrl);
+ else
+ ret = mhi_pm_fast_resume(mhi_cntrl, MHI_FAST_LINK_ON);
+
+ mhi_dev->suspend_mode = MHI_ACTIVE_STATE;
+
+rpm_resume_exit:
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+ MHI_LOG("Exited with :%d\n", ret);
+
+ return ret;
+}
+
+static int mhi_system_resume(struct device *dev)
+{
+ return mhi_runtime_resume(dev);
+}
+
+int mhi_system_suspend(struct device *dev)
+{
+ struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
+ struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+ int ret;
+
+ MHI_LOG("Entered\n");
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+
+ if (!mhi_dev->powered_on) {
+ MHI_LOG("Not fully powered, return success\n");
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+ return 0;
+ }
+
+ /*
+ * pci framework always makes a dummy vote to rpm
+ * framework to resume before calling system suspend
+ * hence usage count is minimum one
+ */
+ if (atomic_read(&dev->power.usage_count) > 1) {
+ /*
+ * clients have requested to keep link on, try
+ * fast suspend. No need to notify clients since
+ * we will not be turning off the pcie link
+ */
+ ret = mhi_pm_fast_suspend(mhi_cntrl, false);
+ mhi_dev->suspend_mode = MHI_FAST_LINK_ON;
+ } else {
+ /* try normal suspend */
+ mhi_dev->suspend_mode = MHI_DEFAULT_SUSPEND;
+ ret = mhi_pm_suspend(mhi_cntrl);
+
+ /*
+ * normal suspend failed because we're busy, try
+ * fast suspend before aborting system suspend.
+ * this could happens if client has disabled
+ * device lpm but no active vote for PCIe from
+ * apps processor
+ */
+ if (ret == -EBUSY) {
+ ret = mhi_pm_fast_suspend(mhi_cntrl, true);
+ mhi_dev->suspend_mode = MHI_FAST_LINK_ON;
+ }
+ }
+
+ if (ret) {
+ MHI_LOG("Abort due to ret:%d\n", ret);
+ mhi_dev->suspend_mode = MHI_ACTIVE_STATE;
+ goto exit_system_suspend;
+ }
+
+ ret = mhi_arch_link_suspend(mhi_cntrl);
+
+ /* failed suspending link abort mhi suspend */
+ if (ret) {
+ MHI_LOG("Failed to suspend link, abort suspend\n");
+ if (mhi_dev->suspend_mode == MHI_DEFAULT_SUSPEND)
+ mhi_pm_resume(mhi_cntrl);
+ else
+ mhi_pm_fast_resume(mhi_cntrl, MHI_FAST_LINK_OFF);
+
+ mhi_dev->suspend_mode = MHI_ACTIVE_STATE;
+ }
+
+exit_system_suspend:
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+
+ MHI_LOG("Exit with ret:%d\n", ret);
+
+ return ret;
+}
+
+static int mhi_force_suspend(struct mhi_controller *mhi_cntrl)
+{
+ int ret = -EIO;
+ const u32 delayms = 100;
+ struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+ int itr = DIV_ROUND_UP(mhi_cntrl->timeout_ms, delayms);
+
+ MHI_LOG("Entered\n");
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+
+ for (; itr; itr--) {
+ /*
+ * This function get called soon as device entered mission mode
+ * so most of the channels are still in disabled state. However,
+ * sbl channels are active and clients could be trying to close
+ * channels while we trying to suspend the link. So, we need to
+ * re-try if MHI is busy
+ */
+ ret = mhi_pm_suspend(mhi_cntrl);
+ if (!ret || ret != -EBUSY)
+ break;
+
+ MHI_LOG("MHI busy, sleeping and retry\n");
+ msleep(delayms);
+ }
+
+ if (ret)
+ goto exit_force_suspend;
+
+ mhi_dev->suspend_mode = MHI_DEFAULT_SUSPEND;
+ ret = mhi_arch_link_suspend(mhi_cntrl);
+
+exit_force_suspend:
+ MHI_LOG("Force suspend ret with %d\n", ret);
+
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+
+ return ret;
+}
+
+/* checks if link is down */
+static int mhi_link_status(struct mhi_controller *mhi_cntrl, void *priv)
+{
+ struct mhi_dev *mhi_dev = priv;
+ u16 dev_id;
+ int ret;
+
+ /* try reading device id, if dev id don't match, link is down */
+ ret = pci_read_config_word(mhi_dev->pci_dev, PCI_DEVICE_ID, &dev_id);
+
+ return (ret || dev_id != mhi_cntrl->dev_id) ? -EIO : 0;
+}
+
+/* disable PCIe L1 */
+static int mhi_lpm_disable(struct mhi_controller *mhi_cntrl, void *priv)
+{
+ struct mhi_dev *mhi_dev = priv;
+ struct pci_dev *pci_dev = mhi_dev->pci_dev;
+ int lnkctl = pci_dev->pcie_cap + PCI_EXP_LNKCTL;
+ u8 val;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&mhi_dev->lpm_lock, flags);
+
+ /* L1 is already disabled */
+ if (mhi_dev->lpm_disable_depth) {
+ mhi_dev->lpm_disable_depth++;
+ goto lpm_disable_exit;
+ }
+
+ ret = pci_read_config_byte(pci_dev, lnkctl, &val);
+ if (ret) {
+ MHI_ERR("Error reading LNKCTL, ret:%d\n", ret);
+ goto lpm_disable_exit;
+ }
+
+ /* L1 is not supported, do not increment lpm_disable_depth */
+ if (unlikely(!(val & PCI_EXP_LNKCTL_ASPM_L1)))
+ goto lpm_disable_exit;
+
+ val &= ~PCI_EXP_LNKCTL_ASPM_L1;
+ ret = pci_write_config_byte(pci_dev, lnkctl, val);
+ if (ret) {
+ MHI_ERR("Error writing LNKCTL to disable LPM, ret:%d\n", ret);
+ goto lpm_disable_exit;
+ }
+
+ mhi_dev->lpm_disable_depth++;
+
+lpm_disable_exit:
+ spin_unlock_irqrestore(&mhi_dev->lpm_lock, flags);
+
+ return ret;
+}
+
+/* enable PCIe L1 */
+static int mhi_lpm_enable(struct mhi_controller *mhi_cntrl, void *priv)
+{
+ struct mhi_dev *mhi_dev = priv;
+ struct pci_dev *pci_dev = mhi_dev->pci_dev;
+ int lnkctl = pci_dev->pcie_cap + PCI_EXP_LNKCTL;
+ u8 val;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&mhi_dev->lpm_lock, flags);
+
+ /*
+ * Exit if L1 is not supported or is already disabled or
+ * decrementing lpm_disable_depth still keeps it above 0
+ */
+ if (!mhi_dev->lpm_disable_depth)
+ goto lpm_enable_exit;
+
+ if (mhi_dev->lpm_disable_depth > 1) {
+ mhi_dev->lpm_disable_depth--;
+ goto lpm_enable_exit;
+ }
+
+ ret = pci_read_config_byte(pci_dev, lnkctl, &val);
+ if (ret) {
+ MHI_ERR("Error reading LNKCTL, ret:%d\n", ret);
+ goto lpm_enable_exit;
+ }
+
+ val |= PCI_EXP_LNKCTL_ASPM_L1;
+ ret = pci_write_config_byte(pci_dev, lnkctl, val);
+ if (ret) {
+ MHI_ERR("Error writing LNKCTL to enable LPM, ret:%d\n", ret);
+ goto lpm_enable_exit;
+ }
+
+ mhi_dev->lpm_disable_depth = 0;
+
+lpm_enable_exit:
+ spin_unlock_irqrestore(&mhi_dev->lpm_lock, flags);
+
+ return ret;
+}
+
+static int mhi_qcom_power_up(struct mhi_controller *mhi_cntrl)
+{
+ enum mhi_dev_state dev_state = mhi_get_mhi_state(mhi_cntrl);
+ const u32 delayus = 10;
+ int itr = DIV_ROUND_UP(mhi_cntrl->timeout_ms * 1000, delayus);
+ int ret;
+
+ /*
+ * It's possible device did not go thru a cold reset before
+ * power up and still in error state. If device in error state,
+ * we need to trigger a soft reset before continue with power
+ * up
+ */
+ if (dev_state == MHI_STATE_SYS_ERR) {
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
+ while (itr--) {
+ dev_state = mhi_get_mhi_state(mhi_cntrl);
+ if (dev_state != MHI_STATE_SYS_ERR)
+ break;
+ usleep_range(delayus, delayus << 1);
+ }
+ /* device still in error state, abort power up */
+ if (dev_state == MHI_STATE_SYS_ERR)
+ return -EIO;
+ }
+
+ /* when coming out of SSR, initial ee state is not valid */
+ mhi_cntrl->ee = 0;
+
+ ret = mhi_arch_power_up(mhi_cntrl);
+ if (ret)
+ return ret;
+
+ ret = mhi_async_power_up(mhi_cntrl);
+
+ /* power up create the dentry */
+ if (mhi_cntrl->dentry) {
+ debugfs_create_file("m0", 0444, mhi_cntrl->dentry, mhi_cntrl,
+ &debugfs_trigger_m0_fops);
+ debugfs_create_file("m3", 0444, mhi_cntrl->dentry, mhi_cntrl,
+ &debugfs_trigger_m3_fops);
+ }
+
+ return ret;
+}
+
+static int mhi_runtime_get(struct mhi_controller *mhi_cntrl, void *priv)
+{
+ struct mhi_dev *mhi_dev = priv;
+ struct device *dev = &mhi_dev->pci_dev->dev;
+
+ return pm_runtime_get(dev);
+}
+
+static void mhi_runtime_put(struct mhi_controller *mhi_cntrl, void *priv)
+{
+ struct mhi_dev *mhi_dev = priv;
+ struct device *dev = &mhi_dev->pci_dev->dev;
+
+ pm_runtime_put_noidle(dev);
+}
+
+static void mhi_status_cb(struct mhi_controller *mhi_cntrl,
+ void *priv,
+ enum MHI_CB reason)
+{
+ struct mhi_dev *mhi_dev = priv;
+ struct device *dev = &mhi_dev->pci_dev->dev;
+ int ret;
+
+ switch (reason) {
+ case MHI_CB_IDLE:
+ MHI_LOG("Schedule runtime suspend\n");
+ pm_runtime_mark_last_busy(dev);
+ pm_request_autosuspend(dev);
+ break;
+ case MHI_CB_EE_MISSION_MODE:
+ /*
+ * we need to force a suspend so device can switch to
+ * mission mode pcie phy settings.
+ */
+ pm_runtime_get(dev);
+ ret = mhi_force_suspend(mhi_cntrl);
+ if (!ret)
+ mhi_runtime_resume(dev);
+ pm_runtime_put(dev);
+ break;
+ default:
+ MHI_ERR("Unhandled cb:0x%x\n", reason);
+ }
+}
+
+/* capture host SoC XO time in ticks */
+static u64 mhi_time_get(struct mhi_controller *mhi_cntrl, void *priv)
+{
+ return arch_counter_get_cntvct();
+}
+
+static ssize_t timeout_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ /* buffer provided by sysfs has a minimum size of PAGE_SIZE */
+ return snprintf(buf, PAGE_SIZE, "%u\n", mhi_cntrl->timeout_ms);
+}
+
+static ssize_t timeout_ms_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ u32 timeout_ms;
+
+ if (kstrtou32(buf, 0, &timeout_ms) < 0)
+ return -EINVAL;
+
+ mhi_cntrl->timeout_ms = timeout_ms;
+
+ return count;
+}
+static DEVICE_ATTR_RW(timeout_ms);
+
+static ssize_t power_up_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ int ret;
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ ret = mhi_qcom_power_up(mhi_cntrl);
+ if (ret)
+ return ret;
+
+ return count;
+}
+static DEVICE_ATTR_WO(power_up);
+
+static struct attribute *mhi_qcom_attrs[] = {
+ &dev_attr_timeout_ms.attr,
+ &dev_attr_power_up.attr,
+ NULL
+};
+
+static const struct attribute_group mhi_qcom_group = {
+ .attrs = mhi_qcom_attrs,
+};
+
+static struct mhi_controller *mhi_register_controller(struct pci_dev *pci_dev)
+{
+ struct mhi_controller *mhi_cntrl;
+ struct mhi_dev *mhi_dev;
+ struct device_node *of_node = pci_dev->dev.of_node;
+ const struct firmware_info *firmware_info;
+ bool use_bb;
+ u64 addr_win[2];
+ int ret, i;
+
+ if (!of_node)
+ return ERR_PTR(-ENODEV);
+
+ mhi_cntrl = mhi_alloc_controller(sizeof(*mhi_dev));
+ if (!mhi_cntrl)
+ return ERR_PTR(-ENOMEM);
+
+ mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+
+ mhi_cntrl->domain = pci_domain_nr(pci_dev->bus);
+ mhi_cntrl->dev_id = pci_dev->device;
+ mhi_cntrl->bus = pci_dev->bus->number;
+ mhi_cntrl->slot = PCI_SLOT(pci_dev->devfn);
+
+ ret = of_property_read_u32(of_node, "qcom,smmu-cfg",
+ &mhi_dev->smmu_cfg);
+ if (ret)
+ goto error_register;
+
+ use_bb = of_property_read_bool(of_node, "mhi,use-bb");
+
+ /*
+ * if s1 translation enabled or using bounce buffer pull iova addr
+ * from dt
+ */
+ if (use_bb || (mhi_dev->smmu_cfg & MHI_SMMU_ATTACH &&
+ !(mhi_dev->smmu_cfg & MHI_SMMU_S1_BYPASS))) {
+ ret = of_property_count_elems_of_size(of_node, "qcom,addr-win",
+ sizeof(addr_win));
+ if (ret != 1)
+ goto error_register;
+ ret = of_property_read_u64_array(of_node, "qcom,addr-win",
+ addr_win, 2);
+ if (ret)
+ goto error_register;
+ } else {
+ addr_win[0] = memblock_start_of_DRAM();
+ addr_win[1] = memblock_end_of_DRAM();
+ }
+
+ mhi_dev->iova_start = addr_win[0];
+ mhi_dev->iova_stop = addr_win[1];
+
+ /*
+ * If S1 is enabled, set MHI_CTRL start address to 0 so we can use low
+ * level mapping api to map buffers outside of smmu domain
+ */
+ if (mhi_dev->smmu_cfg & MHI_SMMU_ATTACH &&
+ !(mhi_dev->smmu_cfg & MHI_SMMU_S1_BYPASS))
+ mhi_cntrl->iova_start = 0;
+ else
+ mhi_cntrl->iova_start = addr_win[0];
+
+ mhi_cntrl->iova_stop = mhi_dev->iova_stop;
+ mhi_cntrl->of_node = of_node;
+
+ mhi_dev->pci_dev = pci_dev;
+ spin_lock_init(&mhi_dev->lpm_lock);
+
+ /* setup power management apis */
+ mhi_cntrl->status_cb = mhi_status_cb;
+ mhi_cntrl->runtime_get = mhi_runtime_get;
+ mhi_cntrl->runtime_put = mhi_runtime_put;
+ mhi_cntrl->link_status = mhi_link_status;
+
+ mhi_cntrl->lpm_disable = mhi_lpm_disable;
+ mhi_cntrl->lpm_enable = mhi_lpm_enable;
+ mhi_cntrl->time_get = mhi_time_get;
+ mhi_cntrl->remote_timer_freq = 19200000;
+ mhi_cntrl->local_timer_freq = 19200000;
+
+ ret = of_register_mhi_controller(mhi_cntrl);
+ if (ret)
+ goto error_register;
+
+ for (i = 0; i < ARRAY_SIZE(firmware_table); i++) {
+ firmware_info = firmware_table + i;
+
+ /* debug mode always use default */
+ if (!debug_mode && mhi_cntrl->dev_id == firmware_info->dev_id)
+ break;
+ }
+
+ mhi_cntrl->fw_image = firmware_info->fw_image;
+ mhi_cntrl->edl_image = firmware_info->edl_image;
+
+ ret = sysfs_create_group(&mhi_cntrl->mhi_dev->dev.kobj,
+ &mhi_qcom_group);
+ if (ret)
+ goto error_register;
+
+ return mhi_cntrl;
+
+error_register:
+ mhi_free_controller(mhi_cntrl);
+
+ return ERR_PTR(-EINVAL);
+}
+
+int mhi_pci_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *device_id)
+{
+ struct mhi_controller *mhi_cntrl;
+ u32 domain = pci_domain_nr(pci_dev->bus);
+ u32 bus = pci_dev->bus->number;
+ u32 dev_id = pci_dev->device;
+ u32 slot = PCI_SLOT(pci_dev->devfn);
+ struct mhi_dev *mhi_dev;
+ int ret;
+
+ /* see if we already registered */
+ mhi_cntrl = mhi_bdf_to_controller(domain, bus, slot, dev_id);
+ if (!mhi_cntrl)
+ mhi_cntrl = mhi_register_controller(pci_dev);
+
+ if (IS_ERR(mhi_cntrl))
+ return PTR_ERR(mhi_cntrl);
+
+ mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+ mhi_dev->powered_on = true;
+
+ ret = mhi_arch_pcie_init(mhi_cntrl);
+ if (ret)
+ return ret;
+
+ ret = mhi_arch_iommu_init(mhi_cntrl);
+ if (ret)
+ goto error_iommu_init;
+
+ ret = mhi_init_pci_dev(mhi_cntrl);
+ if (ret)
+ goto error_init_pci;
+
+ /* start power up sequence */
+ if (!debug_mode) {
+ ret = mhi_qcom_power_up(mhi_cntrl);
+ if (ret)
+ goto error_power_up;
+ }
+
+ pm_runtime_mark_last_busy(&pci_dev->dev);
+
+ MHI_LOG("Return successful\n");
+
+ return 0;
+
+error_power_up:
+ mhi_deinit_pci_dev(mhi_cntrl);
+
+error_init_pci:
+ mhi_arch_iommu_deinit(mhi_cntrl);
+
+error_iommu_init:
+ mhi_arch_pcie_deinit(mhi_cntrl);
+
+ return ret;
+}
+
+static const struct dev_pm_ops pm_ops = {
+ SET_RUNTIME_PM_OPS(mhi_runtime_suspend,
+ mhi_runtime_resume,
+ mhi_runtime_idle)
+ SET_SYSTEM_SLEEP_PM_OPS(mhi_system_suspend, mhi_system_resume)
+};
+
+static struct pci_device_id mhi_pcie_device_id[] = {
+ {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0300)},
+ {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0301)},
+ {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0302)},
+ {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0303)},
+ {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0304)},
+ {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0305)},
+ {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0306)},
+ {PCI_DEVICE(MHI_PCIE_VENDOR_ID, MHI_PCIE_DEBUG_ID)},
+ {0},
+};
+
+static struct pci_driver mhi_pcie_driver = {
+ .name = "mhi",
+ .id_table = mhi_pcie_device_id,
+ .probe = mhi_pci_probe,
+ .driver = {
+ .pm = &pm_ops
+ }
+};
+
+module_pci_driver(mhi_pcie_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("MHI_CORE");
+MODULE_DESCRIPTION("MHI Host Driver");
diff --git a/drivers/bus/mhi/controllers/mhi_qcom.h b/drivers/bus/mhi/controllers/mhi_qcom.h
new file mode 100644
index 0000000..9060802
--- /dev/null
+++ b/drivers/bus/mhi/controllers/mhi_qcom.h
@@ -0,0 +1,120 @@
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MHI_QCOM_
+#define _MHI_QCOM_
+
+/* iova cfg bitmask */
+#define MHI_SMMU_ATTACH BIT(0)
+#define MHI_SMMU_S1_BYPASS BIT(1)
+#define MHI_SMMU_FAST BIT(2)
+#define MHI_SMMU_ATOMIC BIT(3)
+#define MHI_SMMU_FORCE_COHERENT BIT(4)
+
+#define MHI_PCIE_VENDOR_ID (0x17cb)
+#define MHI_PCIE_DEBUG_ID (0xffff)
+
+/* runtime suspend timer */
+#define MHI_RPM_SUSPEND_TMR_MS (250)
+#define MHI_PCI_BAR_NUM (0)
+
+/* timesync time calculations */
+#define REMOTE_TICKS_TO_US(x) (div_u64((x) * 100ULL, \
+ div_u64(mhi_cntrl->remote_timer_freq, 10000ULL)))
+#define REMOTE_TICKS_TO_SEC(x) (div_u64((x), \
+ mhi_cntrl->remote_timer_freq))
+#define REMOTE_TIME_REMAINDER_US(x) (REMOTE_TICKS_TO_US((x)) % \
+ (REMOTE_TICKS_TO_SEC((x)) * 1000000ULL))
+
+extern const char * const mhi_ee_str[MHI_EE_MAX];
+#define TO_MHI_EXEC_STR(ee) (ee >= MHI_EE_MAX ? "INVALID_EE" : mhi_ee_str[ee])
+
+enum mhi_suspend_mode {
+ MHI_ACTIVE_STATE,
+ MHI_DEFAULT_SUSPEND,
+ MHI_FAST_LINK_OFF,
+ MHI_FAST_LINK_ON,
+};
+
+#define MHI_IS_SUSPENDED(mode) (mode)
+
+struct mhi_dev {
+ struct pci_dev *pci_dev;
+ bool drv_supported;
+ u32 smmu_cfg;
+ int resn;
+ void *arch_info;
+ bool powered_on;
+ dma_addr_t iova_start;
+ dma_addr_t iova_stop;
+ enum mhi_suspend_mode suspend_mode;
+
+ unsigned int lpm_disable_depth;
+ /* lock to toggle low power modes */
+ spinlock_t lpm_lock;
+};
+
+void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl);
+int mhi_pci_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *device_id);
+
+#ifdef CONFIG_ARCH_QCOM
+
+int mhi_arch_power_up(struct mhi_controller *mhi_cntrl);
+int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl);
+void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl);
+int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl);
+void mhi_arch_iommu_deinit(struct mhi_controller *mhi_cntrl);
+int mhi_arch_link_suspend(struct mhi_controller *mhi_cntrl);
+int mhi_arch_link_resume(struct mhi_controller *mhi_cntrl);
+
+#else
+
+static inline int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+
+ mhi_cntrl->dev = &mhi_dev->pci_dev->dev;
+
+ return dma_set_mask_and_coherent(mhi_cntrl->dev, DMA_BIT_MASK(64));
+}
+
+static inline void mhi_arch_iommu_deinit(struct mhi_controller *mhi_cntrl)
+{
+}
+
+static inline int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl)
+{
+ return 0;
+}
+
+static inline void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl)
+{
+}
+
+static inline int mhi_arch_link_suspend(struct mhi_controller *mhi_cntrl)
+{
+ return 0;
+}
+
+static inline int mhi_arch_link_resume(struct mhi_controller *mhi_cntrl)
+{
+ return 0;
+}
+
+static inline int mhi_arch_power_up(struct mhi_controller *mhi_cntrl)
+{
+ return 0;
+}
+
+#endif
+
+#endif /* _MHI_QCOM_ */
diff --git a/drivers/bus/mhi/core/Makefile b/drivers/bus/mhi/core/Makefile
new file mode 100644
index 0000000..a743fbf
--- /dev/null
+++ b/drivers/bus/mhi/core/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_MHI_BUS) +=mhi_init.o mhi_main.o mhi_pm.o mhi_boot.o mhi_dtr.o
diff --git a/drivers/bus/mhi/core/mhi_boot.c b/drivers/bus/mhi/core/mhi_boot.c
new file mode 100644
index 0000000..58bbcfc
--- /dev/null
+++ b/drivers/bus/mhi/core/mhi_boot.c
@@ -0,0 +1,556 @@
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/mhi.h>
+#include "mhi_internal.h"
+
+
+/* setup rddm vector table for rddm transfer and program rxvec */
+void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
+ struct image_info *img_info)
+{
+ struct mhi_buf *mhi_buf = img_info->mhi_buf;
+ struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
+ void __iomem *base = mhi_cntrl->bhie;
+ u32 sequence_id;
+ int i = 0;
+
+ for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) {
+ MHI_VERB("Setting vector:%pad size:%zu\n",
+ &mhi_buf->dma_addr, mhi_buf->len);
+ bhi_vec->dma_addr = mhi_buf->dma_addr;
+ bhi_vec->size = mhi_buf->len;
+ }
+
+ MHI_LOG("BHIe programming for RDDM\n");
+
+ mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS,
+ upper_32_bits(mhi_buf->dma_addr));
+
+ mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS,
+ lower_32_bits(mhi_buf->dma_addr));
+
+ mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
+ sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
+
+ if (unlikely(!sequence_id))
+ sequence_id = 1;
+
+ mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
+ BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT,
+ sequence_id);
+
+ MHI_LOG("address:%pad len:0x%zx sequence:%u\n",
+ &mhi_buf->dma_addr, mhi_buf->len, sequence_id);
+}
+
+/* collect rddm during kernel panic */
+static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
+{
+ int ret;
+ u32 rx_status;
+ enum mhi_ee ee;
+ const u32 delayus = 2000;
+ u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
+ const u32 rddm_timeout_us = 200000;
+ int rddm_retry = rddm_timeout_us / delayus; /* time to enter rddm */
+ void __iomem *base = mhi_cntrl->bhie;
+
+ MHI_LOG("Entered with pm_state:%s dev_state:%s ee:%s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ TO_MHI_EXEC_STR(mhi_cntrl->ee));
+
+ /*
+ * This should only be executing during a kernel panic, we expect all
+ * other cores to shutdown while we're collecting rddm buffer. After
+ * returning from this function, we expect device to reset.
+ *
+ * Normaly, we would read/write pm_state only after grabbing
+ * pm_lock, since we're in a panic, skipping it. Also there is no
+ * gurantee this state change would take effect since
+ * we're setting it w/o grabbing pmlock, it's best effort
+ */
+ mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
+ /* update should take the effect immediately */
+ smp_wmb();
+
+ /*
+ * Make sure device is not already in RDDM.
+ * In case device asserts and a kernel panic follows, device will
+ * already be in RDDM. Do not trigger SYS ERR again and proceed with
+ * waiting for image download completion.
+ */
+ ee = mhi_get_exec_env(mhi_cntrl);
+ if (ee != MHI_EE_RDDM) {
+
+ MHI_LOG("Trigger device into RDDM mode using SYSERR\n");
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
+
+ MHI_LOG("Waiting for device to enter RDDM\n");
+ while (rddm_retry--) {
+ ee = mhi_get_exec_env(mhi_cntrl);
+ if (ee == MHI_EE_RDDM)
+ break;
+
+ udelay(delayus);
+ }
+
+ if (rddm_retry <= 0) {
+ /* Hardware reset; force device to enter rddm */
+ MHI_LOG(
+ "Did not enter RDDM, do a host req. reset\n");
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->regs,
+ MHI_SOC_RESET_REQ_OFFSET,
+ MHI_SOC_RESET_REQ);
+ udelay(delayus);
+ }
+
+ ee = mhi_get_exec_env(mhi_cntrl);
+ }
+
+ MHI_LOG("Waiting for image download completion, current EE:%s\n",
+ TO_MHI_EXEC_STR(ee));
+ while (retry--) {
+ ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS,
+ BHIE_RXVECSTATUS_STATUS_BMSK,
+ BHIE_RXVECSTATUS_STATUS_SHFT,
+ &rx_status);
+ if (ret)
+ return -EIO;
+
+ if (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) {
+ MHI_LOG("RDDM successfully collected\n");
+ return 0;
+ }
+
+ udelay(delayus);
+ }
+
+ ee = mhi_get_exec_env(mhi_cntrl);
+ ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status);
+
+ MHI_ERR("Did not complete RDDM transfer\n");
+ MHI_ERR("Current EE:%s\n", TO_MHI_EXEC_STR(ee));
+ MHI_ERR("RXVEC_STATUS:0x%x, ret:%d\n", rx_status, ret);
+
+ return -EIO;
+}
+
+/* download ramdump image from device */
+int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic)
+{
+ void __iomem *base = mhi_cntrl->bhie;
+ u32 rx_status;
+
+ if (in_panic)
+ return __mhi_download_rddm_in_panic(mhi_cntrl);
+
+ MHI_LOG("Waiting for image download completion\n");
+
+ /* waiting for image download completion */
+ wait_event_timeout(mhi_cntrl->state_event,
+ mhi_read_reg_field(mhi_cntrl, base,
+ BHIE_RXVECSTATUS_OFFS,
+ BHIE_RXVECSTATUS_STATUS_BMSK,
+ BHIE_RXVECSTATUS_STATUS_SHFT,
+ &rx_status) || rx_status,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
+}
+EXPORT_SYMBOL(mhi_download_rddm_img);
+
+static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
+ const struct mhi_buf *mhi_buf)
+{
+ void __iomem *base = mhi_cntrl->bhie;
+ rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
+ u32 tx_status;
+
+ read_lock_bh(pm_lock);
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ read_unlock_bh(pm_lock);
+ return -EIO;
+ }
+
+ MHI_LOG("Starting BHIe Programming\n");
+
+ mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS,
+ upper_32_bits(mhi_buf->dma_addr));
+
+ mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_LOW_OFFS,
+ lower_32_bits(mhi_buf->dma_addr));
+
+ mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len);
+
+ mhi_cntrl->sequence_id = prandom_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK;
+ mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS,
+ BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT,
+ mhi_cntrl->sequence_id);
+ read_unlock_bh(pm_lock);
+
+ MHI_LOG("Upper:0x%x Lower:0x%x len:0x%zx sequence:%u\n",
+ upper_32_bits(mhi_buf->dma_addr),
+ lower_32_bits(mhi_buf->dma_addr),
+ mhi_buf->len, mhi_cntrl->sequence_id);
+ MHI_LOG("Waiting for image transfer completion\n");
+
+ /* waiting for image download completion */
+ wait_event_timeout(mhi_cntrl->state_event,
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
+ mhi_read_reg_field(mhi_cntrl, base,
+ BHIE_TXVECSTATUS_OFFS,
+ BHIE_TXVECSTATUS_STATUS_BMSK,
+ BHIE_TXVECSTATUS_STATUS_SHFT,
+ &tx_status) || tx_status,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+ return -EIO;
+
+ return (tx_status == BHIE_TXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
+}
+
+static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl,
+ dma_addr_t dma_addr,
+ size_t size)
+{
+ u32 tx_status, val;
+ int i, ret;
+ void __iomem *base = mhi_cntrl->bhi;
+ rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
+ struct {
+ char *name;
+ u32 offset;
+ } error_reg[] = {
+ { "ERROR_CODE", BHI_ERRCODE },
+ { "ERROR_DBG1", BHI_ERRDBG1 },
+ { "ERROR_DBG2", BHI_ERRDBG2 },
+ { "ERROR_DBG3", BHI_ERRDBG3 },
+ { NULL },
+ };
+
+ MHI_LOG("Starting BHI programming\n");
+
+ /* program start sbl download via bhi protocol */
+ read_lock_bh(pm_lock);
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ read_unlock_bh(pm_lock);
+ goto invalid_pm_state;
+ }
+
+ mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0);
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH,
+ upper_32_bits(dma_addr));
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW,
+ lower_32_bits(dma_addr));
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size);
+ mhi_cntrl->session_id = prandom_u32() & BHI_TXDB_SEQNUM_BMSK;
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, mhi_cntrl->session_id);
+ read_unlock_bh(pm_lock);
+
+ MHI_LOG("Waiting for image transfer completion\n");
+
+ /* waiting for image download completion */
+ wait_event_timeout(mhi_cntrl->state_event,
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
+ mhi_read_reg_field(mhi_cntrl, base, BHI_STATUS,
+ BHI_STATUS_MASK, BHI_STATUS_SHIFT,
+ &tx_status) || tx_status,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+ goto invalid_pm_state;
+
+ if (tx_status == BHI_STATUS_ERROR) {
+ MHI_ERR("Image transfer failed\n");
+ read_lock_bh(pm_lock);
+ if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ for (i = 0; error_reg[i].name; i++) {
+ ret = mhi_read_reg(mhi_cntrl, base,
+ error_reg[i].offset, &val);
+ if (ret)
+ break;
+ MHI_ERR("reg:%s value:0x%x\n",
+ error_reg[i].name, val);
+ }
+ }
+ read_unlock_bh(pm_lock);
+ goto invalid_pm_state;
+ }
+
+ return (tx_status == BHI_STATUS_SUCCESS) ? 0 : -ETIMEDOUT;
+
+invalid_pm_state:
+
+ return -EIO;
+}
+
+void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
+ struct image_info *image_info)
+{
+ int i;
+ struct mhi_buf *mhi_buf = image_info->mhi_buf;
+
+ for (i = 0; i < image_info->entries; i++, mhi_buf++)
+ mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf,
+ mhi_buf->dma_addr);
+
+ kfree(image_info->mhi_buf);
+ kfree(image_info);
+}
+
+int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
+ struct image_info **image_info,
+ size_t alloc_size)
+{
+ size_t seg_size = mhi_cntrl->seg_len;
+ /* requier additional entry for vec table */
+ int segments = DIV_ROUND_UP(alloc_size, seg_size) + 1;
+ int i;
+ struct image_info *img_info;
+ struct mhi_buf *mhi_buf;
+
+ MHI_LOG("Allocating bytes:%zu seg_size:%zu total_seg:%u\n",
+ alloc_size, seg_size, segments);
+
+ img_info = kzalloc(sizeof(*img_info), GFP_KERNEL);
+ if (!img_info)
+ return -ENOMEM;
+
+ /* allocate memory for entries */
+ img_info->mhi_buf = kcalloc(segments, sizeof(*img_info->mhi_buf),
+ GFP_KERNEL);
+ if (!img_info->mhi_buf)
+ goto error_alloc_mhi_buf;
+
+ /* allocate and populate vector table */
+ mhi_buf = img_info->mhi_buf;
+ for (i = 0; i < segments; i++, mhi_buf++) {
+ size_t vec_size = seg_size;
+
+ /* last entry is for vector table */
+ if (i == segments - 1)
+ vec_size = sizeof(struct bhi_vec_entry) * i;
+
+ mhi_buf->len = vec_size;
+ mhi_buf->buf = mhi_alloc_coherent(mhi_cntrl, vec_size,
+ &mhi_buf->dma_addr, GFP_KERNEL);
+ if (!mhi_buf->buf)
+ goto error_alloc_segment;
+
+ MHI_LOG("Entry:%d Address:0x%llx size:%zu\n", i,
+ (u64)mhi_buf->dma_addr, mhi_buf->len);
+ }
+
+ img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf;
+ img_info->entries = segments;
+ *image_info = img_info;
+
+ MHI_LOG("Successfully allocated bhi vec table\n");
+
+ return 0;
+
+error_alloc_segment:
+ for (--i, --mhi_buf; i >= 0; i--, mhi_buf--)
+ mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf,
+ mhi_buf->dma_addr);
+
+error_alloc_mhi_buf:
+ kfree(img_info);
+
+ return -ENOMEM;
+}
+
+static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
+ const struct firmware *firmware,
+ struct image_info *img_info)
+{
+ size_t remainder = firmware->size;
+ size_t to_cpy;
+ const u8 *buf = firmware->data;
+ int i = 0;
+ struct mhi_buf *mhi_buf = img_info->mhi_buf;
+ struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
+
+ while (remainder) {
+ MHI_ASSERT(i >= img_info->entries, "malformed vector table");
+
+ to_cpy = min(remainder, mhi_buf->len);
+ memcpy(mhi_buf->buf, buf, to_cpy);
+ bhi_vec->dma_addr = mhi_buf->dma_addr;
+ bhi_vec->size = to_cpy;
+
+ MHI_VERB("Setting Vector:0x%llx size: %llu\n",
+ bhi_vec->dma_addr, bhi_vec->size);
+ buf += to_cpy;
+ remainder -= to_cpy;
+ i++;
+ bhi_vec++;
+ mhi_buf++;
+ }
+}
+
+void mhi_fw_load_worker(struct work_struct *work)
+{
+ int ret;
+ struct mhi_controller *mhi_cntrl;
+ const char *fw_name;
+ const struct firmware *firmware;
+ struct image_info *image_info;
+ void *buf;
+ dma_addr_t dma_addr;
+ size_t size;
+
+ mhi_cntrl = container_of(work, struct mhi_controller, fw_worker);
+
+ MHI_LOG("Waiting for device to enter PBL from EE:%s\n",
+ TO_MHI_EXEC_STR(mhi_cntrl->ee));
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ MHI_IN_PBL(mhi_cntrl->ee) ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ MHI_ERR("MHI is not in valid state\n");
+ return;
+ }
+
+ MHI_LOG("Device current EE:%s\n", TO_MHI_EXEC_STR(mhi_cntrl->ee));
+
+ /* if device in pthru, do reset to ready state transition */
+ if (mhi_cntrl->ee == MHI_EE_PTHRU)
+ goto fw_load_ee_pthru;
+
+ fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ?
+ mhi_cntrl->edl_image : mhi_cntrl->fw_image;
+
+ if (!fw_name || (mhi_cntrl->fbc_download && (!mhi_cntrl->sbl_size ||
+ !mhi_cntrl->seg_len))) {
+ MHI_ERR("No firmware image defined or !sbl_size || !seg_len\n");
+ return;
+ }
+
+ ret = request_firmware(&firmware, fw_name, mhi_cntrl->dev);
+ if (ret) {
+ MHI_ERR("Error loading firmware, ret:%d\n", ret);
+ return;
+ }
+
+ size = (mhi_cntrl->fbc_download) ? mhi_cntrl->sbl_size : firmware->size;
+
+ /* the sbl size provided is maximum size, not necessarily image size */
+ if (size > firmware->size)
+ size = firmware->size;
+
+ buf = mhi_alloc_coherent(mhi_cntrl, size, &dma_addr, GFP_KERNEL);
+ if (!buf) {
+ MHI_ERR("Could not allocate memory for image\n");
+ release_firmware(firmware);
+ return;
+ }
+
+ /* load sbl image */
+ memcpy(buf, firmware->data, size);
+ ret = mhi_fw_load_sbl(mhi_cntrl, dma_addr, size);
+ mhi_free_coherent(mhi_cntrl, size, buf, dma_addr);
+
+ if (!mhi_cntrl->fbc_download || ret || mhi_cntrl->ee == MHI_EE_EDL)
+ release_firmware(firmware);
+
+ /* error or in edl, we're done */
+ if (ret || mhi_cntrl->ee == MHI_EE_EDL)
+ return;
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ mhi_cntrl->dev_state = MHI_STATE_RESET;
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ /*
+ * if we're doing fbc, populate vector tables while
+ * device transitioning into MHI READY state
+ */
+ if (mhi_cntrl->fbc_download) {
+ ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image,
+ firmware->size);
+ if (ret) {
+ MHI_ERR("Error alloc size of %zu\n", firmware->size);
+ goto error_alloc_fw_table;
+ }
+
+ MHI_LOG("Copying firmware image into vector table\n");
+
+ /* load the firmware into BHIE vec table */
+ mhi_firmware_copy(mhi_cntrl, firmware, mhi_cntrl->fbc_image);
+ }
+
+fw_load_ee_pthru:
+ /* transitioning into MHI RESET->READY state */
+ ret = mhi_ready_state_transition(mhi_cntrl);
+
+ MHI_LOG("To Reset->Ready PM_STATE:%s MHI_STATE:%s EE:%s, ret:%d\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ TO_MHI_EXEC_STR(mhi_cntrl->ee), ret);
+
+ if (!mhi_cntrl->fbc_download)
+ return;
+
+ if (ret) {
+ MHI_ERR("Did not transition to READY state\n");
+ goto error_read;
+ }
+
+ /* wait for SBL event */
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->ee == MHI_EE_SBL ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ MHI_ERR("MHI did not enter BHIE\n");
+ goto error_read;
+ }
+
+ /* start full firmware image download */
+ image_info = mhi_cntrl->fbc_image;
+ ret = mhi_fw_load_amss(mhi_cntrl,
+ /* last entry is vec table */
+ &image_info->mhi_buf[image_info->entries - 1]);
+
+ MHI_LOG("amss fw_load, ret:%d\n", ret);
+
+ release_firmware(firmware);
+
+ return;
+
+error_read:
+ mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
+ mhi_cntrl->fbc_image = NULL;
+
+error_alloc_fw_table:
+ release_firmware(firmware);
+}
diff --git a/drivers/bus/mhi/core/mhi_dtr.c b/drivers/bus/mhi/core/mhi_dtr.c
new file mode 100644
index 0000000..85e27b7
--- /dev/null
+++ b/drivers/bus/mhi/core/mhi_dtr.c
@@ -0,0 +1,236 @@
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/termios.h>
+#include <linux/wait.h>
+#include <linux/mhi.h>
+#include "mhi_internal.h"
+
+struct __packed dtr_ctrl_msg {
+ u32 preamble;
+ u32 msg_id;
+ u32 dest_id;
+ u32 size;
+ u32 msg;
+};
+
+#define CTRL_MAGIC (0x4C525443)
+#define CTRL_MSG_DTR BIT(0)
+#define CTRL_MSG_RTS BIT(1)
+#define CTRL_MSG_DCD BIT(0)
+#define CTRL_MSG_DSR BIT(1)
+#define CTRL_MSG_RI BIT(3)
+#define CTRL_HOST_STATE (0x10)
+#define CTRL_DEVICE_STATE (0x11)
+#define CTRL_GET_CHID(dtr) (dtr->dest_id & 0xFF)
+
+static int mhi_dtr_tiocmset(struct mhi_controller *mhi_cntrl,
+ struct mhi_device *mhi_dev,
+ u32 tiocm)
+{
+ struct dtr_ctrl_msg *dtr_msg = NULL;
+ struct mhi_chan *dtr_chan = mhi_cntrl->dtr_dev->ul_chan;
+ spinlock_t *res_lock = &mhi_dev->dev.devres_lock;
+ u32 cur_tiocm;
+ int ret = 0;
+
+ cur_tiocm = mhi_dev->tiocm & ~(TIOCM_CD | TIOCM_DSR | TIOCM_RI);
+
+ tiocm &= (TIOCM_DTR | TIOCM_RTS);
+
+ /* state did not changed */
+ if (cur_tiocm == tiocm)
+ return 0;
+
+ mutex_lock(&dtr_chan->mutex);
+
+ dtr_msg = kzalloc(sizeof(*dtr_msg), GFP_KERNEL);
+ if (!dtr_msg) {
+ ret = -ENOMEM;
+ goto tiocm_exit;
+ }
+
+ dtr_msg->preamble = CTRL_MAGIC;
+ dtr_msg->msg_id = CTRL_HOST_STATE;
+ dtr_msg->dest_id = mhi_dev->ul_chan_id;
+ dtr_msg->size = sizeof(u32);
+ if (tiocm & TIOCM_DTR)
+ dtr_msg->msg |= CTRL_MSG_DTR;
+ if (tiocm & TIOCM_RTS)
+ dtr_msg->msg |= CTRL_MSG_RTS;
+
+ reinit_completion(&dtr_chan->completion);
+ ret = mhi_queue_transfer(mhi_cntrl->dtr_dev, DMA_TO_DEVICE, dtr_msg,
+ sizeof(*dtr_msg), MHI_EOT);
+ if (ret)
+ goto tiocm_exit;
+
+ ret = wait_for_completion_timeout(&dtr_chan->completion,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ if (!ret) {
+ MHI_ERR("Failed to receive transfer callback\n");
+ ret = -EIO;
+ goto tiocm_exit;
+ }
+
+ ret = 0;
+ spin_lock_irq(res_lock);
+ mhi_dev->tiocm &= ~(TIOCM_DTR | TIOCM_RTS);
+ mhi_dev->tiocm |= tiocm;
+ spin_unlock_irq(res_lock);
+
+tiocm_exit:
+ kfree(dtr_msg);
+ mutex_unlock(&dtr_chan->mutex);
+
+ return ret;
+}
+
+long mhi_ioctl(struct mhi_device *mhi_dev, unsigned int cmd, unsigned long arg)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ int ret;
+
+ /* ioctl not supported by this controller */
+ if (!mhi_cntrl->dtr_dev)
+ return -EIO;
+
+ switch (cmd) {
+ case TIOCMGET:
+ return mhi_dev->tiocm;
+ case TIOCMSET:
+ {
+ u32 tiocm;
+
+ ret = get_user(tiocm, (u32 *)arg);
+ if (ret)
+ return ret;
+
+ return mhi_dtr_tiocmset(mhi_cntrl, mhi_dev, tiocm);
+ }
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(mhi_ioctl);
+
+static void mhi_dtr_dl_xfer_cb(struct mhi_device *mhi_dev,
+ struct mhi_result *mhi_result)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct dtr_ctrl_msg *dtr_msg = mhi_result->buf_addr;
+ u32 chan;
+ spinlock_t *res_lock;
+
+ if (mhi_result->bytes_xferd != sizeof(*dtr_msg)) {
+ MHI_ERR("Unexpected length %zu received\n",
+ mhi_result->bytes_xferd);
+ return;
+ }
+
+ MHI_VERB("preamble:0x%x msg_id:%u dest_id:%u msg:0x%x\n",
+ dtr_msg->preamble, dtr_msg->msg_id, dtr_msg->dest_id,
+ dtr_msg->msg);
+
+ chan = CTRL_GET_CHID(dtr_msg);
+ if (chan >= mhi_cntrl->max_chan)
+ return;
+
+ mhi_dev = mhi_cntrl->mhi_chan[chan].mhi_dev;
+ if (!mhi_dev)
+ return;
+
+ res_lock = &mhi_dev->dev.devres_lock;
+ spin_lock_irq(res_lock);
+ mhi_dev->tiocm &= ~(TIOCM_CD | TIOCM_DSR | TIOCM_RI);
+
+ if (dtr_msg->msg & CTRL_MSG_DCD)
+ mhi_dev->tiocm |= TIOCM_CD;
+
+ if (dtr_msg->msg & CTRL_MSG_DSR)
+ mhi_dev->tiocm |= TIOCM_DSR;
+
+ if (dtr_msg->msg & CTRL_MSG_RI)
+ mhi_dev->tiocm |= TIOCM_RI;
+ spin_unlock_irq(res_lock);
+
+ /* Notify the update */
+ mhi_notify(mhi_dev, MHI_CB_DTR_SIGNAL);
+}
+
+static void mhi_dtr_ul_xfer_cb(struct mhi_device *mhi_dev,
+ struct mhi_result *mhi_result)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *dtr_chan = mhi_cntrl->dtr_dev->ul_chan;
+
+ MHI_VERB("Received with status:%d\n", mhi_result->transaction_status);
+ if (!mhi_result->transaction_status)
+ complete(&dtr_chan->completion);
+}
+
+static void mhi_dtr_remove(struct mhi_device *mhi_dev)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ mhi_cntrl->dtr_dev = NULL;
+}
+
+static int mhi_dtr_probe(struct mhi_device *mhi_dev,
+ const struct mhi_device_id *id)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ int ret;
+
+ MHI_LOG("Enter for DTR control channel\n");
+
+ ret = mhi_prepare_for_transfer(mhi_dev);
+ if (!ret)
+ mhi_cntrl->dtr_dev = mhi_dev;
+
+ MHI_LOG("Exit with ret:%d\n", ret);
+
+ return ret;
+}
+
+static const struct mhi_device_id mhi_dtr_table[] = {
+ { .chan = "IP_CTRL" },
+ {},
+};
+
+static struct mhi_driver mhi_dtr_driver = {
+ .id_table = mhi_dtr_table,
+ .remove = mhi_dtr_remove,
+ .probe = mhi_dtr_probe,
+ .ul_xfer_cb = mhi_dtr_ul_xfer_cb,
+ .dl_xfer_cb = mhi_dtr_dl_xfer_cb,
+ .driver = {
+ .name = "MHI_DTR",
+ .owner = THIS_MODULE,
+ }
+};
+
+int __init mhi_dtr_init(void)
+{
+ return mhi_driver_register(&mhi_dtr_driver);
+}
diff --git a/drivers/bus/mhi/core/mhi_init.c b/drivers/bus/mhi/core/mhi_init.c
new file mode 100644
index 0000000..53a9e5c
--- /dev/null
+++ b/drivers/bus/mhi/core/mhi_init.c
@@ -0,0 +1,1798 @@
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/mhi.h>
+#include "mhi_internal.h"
+
+static struct dentry *mhi_parent;
+
+const char * const mhi_ee_str[MHI_EE_MAX] = {
+ [MHI_EE_PBL] = "PBL",
+ [MHI_EE_SBL] = "SBL",
+ [MHI_EE_AMSS] = "AMSS",
+ [MHI_EE_RDDM] = "RDDM",
+ [MHI_EE_WFW] = "WFW",
+ [MHI_EE_PTHRU] = "PASS THRU",
+ [MHI_EE_EDL] = "EDL",
+ [MHI_EE_DISABLE_TRANSITION] = "DISABLE",
+ [MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED",
+};
+
+const char * const mhi_state_tran_str[MHI_ST_TRANSITION_MAX] = {
+ [MHI_ST_TRANSITION_PBL] = "PBL",
+ [MHI_ST_TRANSITION_READY] = "READY",
+ [MHI_ST_TRANSITION_SBL] = "SBL",
+ [MHI_ST_TRANSITION_MISSION_MODE] = "MISSION MODE",
+};
+
+const char * const mhi_state_str[MHI_STATE_MAX] = {
+ [MHI_STATE_RESET] = "RESET",
+ [MHI_STATE_READY] = "READY",
+ [MHI_STATE_M0] = "M0",
+ [MHI_STATE_M1] = "M1",
+ [MHI_STATE_M2] = "M2",
+ [MHI_STATE_M3] = "M3",
+ [MHI_STATE_M3_FAST] = "M3_FAST",
+ [MHI_STATE_BHI] = "BHI",
+ [MHI_STATE_SYS_ERR] = "SYS_ERR",
+};
+
+static const char * const mhi_pm_state_str[] = {
+ [MHI_PM_BIT_DISABLE] = "DISABLE",
+ [MHI_PM_BIT_POR] = "POR",
+ [MHI_PM_BIT_M0] = "M0",
+ [MHI_PM_BIT_M2] = "M2",
+ [MHI_PM_BIT_M3_ENTER] = "M?->M3",
+ [MHI_PM_BIT_M3] = "M3",
+ [MHI_PM_BIT_M3_EXIT] = "M3->M0",
+ [MHI_PM_BIT_FW_DL_ERR] = "FW DL Error",
+ [MHI_PM_BIT_SYS_ERR_DETECT] = "SYS_ERR Detect",
+ [MHI_PM_BIT_SYS_ERR_PROCESS] = "SYS_ERR Process",
+ [MHI_PM_BIT_SHUTDOWN_PROCESS] = "SHUTDOWN Process",
+ [MHI_PM_BIT_LD_ERR_FATAL_DETECT] = "LD or Error Fatal Detect",
+};
+
+struct mhi_bus mhi_bus;
+
+const char *to_mhi_pm_state_str(enum MHI_PM_STATE state)
+{
+ int index = find_last_bit((unsigned long *)&state, 32);
+
+ if (index >= ARRAY_SIZE(mhi_pm_state_str))
+ return "Invalid State";
+
+ return mhi_pm_state_str[index];
+}
+
+static ssize_t bus_vote_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ atomic_read(&mhi_dev->bus_vote));
+}
+
+static ssize_t bus_vote_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ int ret = -EINVAL;
+
+ if (sysfs_streq(buf, "get")) {
+ ret = mhi_device_get_sync(mhi_dev, MHI_VOTE_BUS);
+ } else if (sysfs_streq(buf, "put")) {
+ mhi_device_put(mhi_dev, MHI_VOTE_BUS);
+ ret = 0;
+ }
+
+ return ret ? ret : count;
+}
+static DEVICE_ATTR_RW(bus_vote);
+
+static ssize_t device_vote_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ atomic_read(&mhi_dev->dev_vote));
+}
+
+static ssize_t device_vote_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ int ret = -EINVAL;
+
+ if (sysfs_streq(buf, "get")) {
+ ret = mhi_device_get_sync(mhi_dev, MHI_VOTE_DEVICE);
+ } else if (sysfs_streq(buf, "put")) {
+ mhi_device_put(mhi_dev, MHI_VOTE_DEVICE);
+ ret = 0;
+ }
+
+ return ret ? ret : count;
+}
+static DEVICE_ATTR_RW(device_vote);
+
+static struct attribute *mhi_vote_attrs[] = {
+ &dev_attr_bus_vote.attr,
+ &dev_attr_device_vote.attr,
+ NULL,
+};
+
+static const struct attribute_group mhi_vote_group = {
+ .attrs = mhi_vote_attrs,
+};
+
+int mhi_create_vote_sysfs(struct mhi_controller *mhi_cntrl)
+{
+ return sysfs_create_group(&mhi_cntrl->mhi_dev->dev.kobj,
+ &mhi_vote_group);
+}
+
+void mhi_destroy_vote_sysfs(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
+
+ sysfs_remove_group(&mhi_dev->dev.kobj, &mhi_vote_group);
+
+ /* relinquish any pending votes for device */
+ while (atomic_read(&mhi_dev->dev_vote))
+ mhi_device_put(mhi_dev, MHI_VOTE_DEVICE);
+
+ /* remove pending votes for the bus */
+ while (atomic_read(&mhi_dev->bus_vote))
+ mhi_device_put(mhi_dev, MHI_VOTE_BUS);
+}
+
+/* MHI protocol require transfer ring to be aligned to ring length */
+static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring,
+ u64 len)
+{
+ ring->alloc_size = len + (len - 1);
+ ring->pre_aligned = mhi_alloc_coherent(mhi_cntrl, ring->alloc_size,
+ &ring->dma_handle, GFP_KERNEL);
+ if (!ring->pre_aligned)
+ return -ENOMEM;
+
+ ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1);
+ ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle);
+ return 0;
+}
+
+void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
+{
+ int i;
+ struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
+
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (!mhi_event->request_irq)
+ continue;
+
+ free_irq(mhi_cntrl->irq[mhi_event->msi], mhi_event);
+ }
+
+ free_irq(mhi_cntrl->irq[0], mhi_cntrl);
+}
+
+int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
+{
+ int i;
+ int ret;
+ struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
+
+ /* for BHI INTVEC msi */
+ ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handlr,
+ mhi_intvec_threaded_handlr,
+ IRQF_ONESHOT | IRQF_NO_SUSPEND,
+ "mhi", mhi_cntrl);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (!mhi_event->request_irq)
+ continue;
+
+ ret = request_irq(mhi_cntrl->irq[mhi_event->msi],
+ mhi_msi_handlr, IRQF_SHARED | IRQF_NO_SUSPEND,
+ "mhi", mhi_event);
+ if (ret) {
+ MHI_ERR("Error requesting irq:%d for ev:%d\n",
+ mhi_cntrl->irq[mhi_event->msi], i);
+ goto error_request;
+ }
+ }
+
+ return 0;
+
+error_request:
+ for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
+ if (!mhi_event->request_irq)
+ continue;
+
+ free_irq(mhi_cntrl->irq[mhi_event->msi], mhi_event);
+ }
+ free_irq(mhi_cntrl->irq[0], mhi_cntrl);
+
+ return ret;
+}
+
+void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
+{
+ int i;
+ struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt;
+ struct mhi_cmd *mhi_cmd;
+ struct mhi_event *mhi_event;
+ struct mhi_ring *ring;
+
+ mhi_cmd = mhi_cntrl->mhi_cmd;
+ for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) {
+ ring = &mhi_cmd->ring;
+ mhi_free_coherent(mhi_cntrl, ring->alloc_size,
+ ring->pre_aligned, ring->dma_handle);
+ ring->base = NULL;
+ ring->iommu_base = 0;
+ }
+
+ mhi_free_coherent(mhi_cntrl,
+ sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
+ mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
+
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ ring = &mhi_event->ring;
+ mhi_free_coherent(mhi_cntrl, ring->alloc_size,
+ ring->pre_aligned, ring->dma_handle);
+ ring->base = NULL;
+ ring->iommu_base = 0;
+ }
+
+ mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) *
+ mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
+ mhi_ctxt->er_ctxt_addr);
+
+ mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) *
+ mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
+ mhi_ctxt->chan_ctxt_addr);
+
+ kfree(mhi_ctxt);
+ mhi_cntrl->mhi_ctxt = NULL;
+}
+
+static int mhi_init_debugfs_mhi_states_open(struct inode *inode,
+ struct file *fp)
+{
+ return single_open(fp, mhi_debugfs_mhi_states_show, inode->i_private);
+}
+
+static int mhi_init_debugfs_mhi_event_open(struct inode *inode, struct file *fp)
+{
+ return single_open(fp, mhi_debugfs_mhi_event_show, inode->i_private);
+}
+
+static int mhi_init_debugfs_mhi_chan_open(struct inode *inode, struct file *fp)
+{
+ return single_open(fp, mhi_debugfs_mhi_chan_show, inode->i_private);
+}
+
+static const struct file_operations debugfs_state_ops = {
+ .open = mhi_init_debugfs_mhi_states_open,
+ .release = single_release,
+ .read = seq_read,
+};
+
+static const struct file_operations debugfs_ev_ops = {
+ .open = mhi_init_debugfs_mhi_event_open,
+ .release = single_release,
+ .read = seq_read,
+};
+
+static const struct file_operations debugfs_chan_ops = {
+ .open = mhi_init_debugfs_mhi_chan_open,
+ .release = single_release,
+ .read = seq_read,
+};
+
+DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_reset_fops, NULL,
+ mhi_debugfs_trigger_reset, "%llu\n");
+
+void mhi_init_debugfs(struct mhi_controller *mhi_cntrl)
+{
+ struct dentry *dentry;
+ char node[32];
+
+ if (!mhi_cntrl->parent)
+ return;
+
+ snprintf(node, sizeof(node), "%04x_%02u:%02u.%02u",
+ mhi_cntrl->dev_id, mhi_cntrl->domain, mhi_cntrl->bus,
+ mhi_cntrl->slot);
+
+ dentry = debugfs_create_dir(node, mhi_cntrl->parent);
+ if (IS_ERR_OR_NULL(dentry))
+ return;
+
+ debugfs_create_file("states", 0444, dentry, mhi_cntrl,
+ &debugfs_state_ops);
+ debugfs_create_file("events", 0444, dentry, mhi_cntrl,
+ &debugfs_ev_ops);
+ debugfs_create_file("chan", 0444, dentry, mhi_cntrl, &debugfs_chan_ops);
+ debugfs_create_file("reset", 0444, dentry, mhi_cntrl,
+ &debugfs_trigger_reset_fops);
+ mhi_cntrl->dentry = dentry;
+}
+
+void mhi_deinit_debugfs(struct mhi_controller *mhi_cntrl)
+{
+ debugfs_remove_recursive(mhi_cntrl->dentry);
+ mhi_cntrl->dentry = NULL;
+}
+
+int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_ctxt *mhi_ctxt;
+ struct mhi_chan_ctxt *chan_ctxt;
+ struct mhi_event_ctxt *er_ctxt;
+ struct mhi_cmd_ctxt *cmd_ctxt;
+ struct mhi_chan *mhi_chan;
+ struct mhi_event *mhi_event;
+ struct mhi_cmd *mhi_cmd;
+ int ret = -ENOMEM, i;
+
+ atomic_set(&mhi_cntrl->dev_wake, 0);
+ atomic_set(&mhi_cntrl->alloc_size, 0);
+ atomic_set(&mhi_cntrl->pending_pkts, 0);
+
+ mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL);
+ if (!mhi_ctxt)
+ return -ENOMEM;
+
+ /* setup channel ctxt */
+ mhi_ctxt->chan_ctxt = mhi_alloc_coherent(mhi_cntrl,
+ sizeof(*mhi_ctxt->chan_ctxt) * mhi_cntrl->max_chan,
+ &mhi_ctxt->chan_ctxt_addr, GFP_KERNEL);
+ if (!mhi_ctxt->chan_ctxt)
+ goto error_alloc_chan_ctxt;
+
+ mhi_chan = mhi_cntrl->mhi_chan;
+ chan_ctxt = mhi_ctxt->chan_ctxt;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
+ /* If it's offload channel skip this step */
+ if (mhi_chan->offload_ch)
+ continue;
+
+ chan_ctxt->chstate = MHI_CH_STATE_DISABLED;
+ chan_ctxt->brstmode = mhi_chan->db_cfg.brstmode;
+ chan_ctxt->pollcfg = mhi_chan->db_cfg.pollcfg;
+ chan_ctxt->chtype = mhi_chan->type;
+ chan_ctxt->erindex = mhi_chan->er_index;
+
+ mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
+ mhi_chan->tre_ring.db_addr = &chan_ctxt->wp;
+ }
+
+ /* setup event context */
+ mhi_ctxt->er_ctxt = mhi_alloc_coherent(mhi_cntrl,
+ sizeof(*mhi_ctxt->er_ctxt) * mhi_cntrl->total_ev_rings,
+ &mhi_ctxt->er_ctxt_addr, GFP_KERNEL);
+ if (!mhi_ctxt->er_ctxt)
+ goto error_alloc_er_ctxt;
+
+ er_ctxt = mhi_ctxt->er_ctxt;
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
+ mhi_event++) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ /* it's a satellite ev, we do not touch it */
+ if (mhi_event->offload_ev)
+ continue;
+
+ er_ctxt->intmodc = 0;
+ er_ctxt->intmodt = mhi_event->intmod;
+ er_ctxt->ertype = MHI_ER_TYPE_VALID;
+ er_ctxt->msivec = mhi_event->msi;
+ mhi_event->db_cfg.db_mode = true;
+
+ ring->el_size = sizeof(struct mhi_tre);
+ ring->len = ring->el_size * ring->elements;
+ ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
+ if (ret)
+ goto error_alloc_er;
+
+ ring->rp = ring->wp = ring->base;
+ er_ctxt->rbase = ring->iommu_base;
+ er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase;
+ er_ctxt->rlen = ring->len;
+ ring->ctxt_wp = &er_ctxt->wp;
+ }
+
+ /* setup cmd context */
+ mhi_ctxt->cmd_ctxt = mhi_alloc_coherent(mhi_cntrl,
+ sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
+ &mhi_ctxt->cmd_ctxt_addr, GFP_KERNEL);
+ if (!mhi_ctxt->cmd_ctxt)
+ goto error_alloc_er;
+
+ mhi_cmd = mhi_cntrl->mhi_cmd;
+ cmd_ctxt = mhi_ctxt->cmd_ctxt;
+ for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
+ struct mhi_ring *ring = &mhi_cmd->ring;
+
+ ring->el_size = sizeof(struct mhi_tre);
+ ring->elements = CMD_EL_PER_RING;
+ ring->len = ring->el_size * ring->elements;
+ ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
+ if (ret)
+ goto error_alloc_cmd;
+
+ ring->rp = ring->wp = ring->base;
+ cmd_ctxt->rbase = ring->iommu_base;
+ cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase;
+ cmd_ctxt->rlen = ring->len;
+ ring->ctxt_wp = &cmd_ctxt->wp;
+ }
+
+ mhi_cntrl->mhi_ctxt = mhi_ctxt;
+
+ return 0;
+
+error_alloc_cmd:
+ for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) {
+ struct mhi_ring *ring = &mhi_cmd->ring;
+
+ mhi_free_coherent(mhi_cntrl, ring->alloc_size,
+ ring->pre_aligned, ring->dma_handle);
+ }
+ mhi_free_coherent(mhi_cntrl,
+ sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
+ mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
+ i = mhi_cntrl->total_ev_rings;
+ mhi_event = mhi_cntrl->mhi_event + i;
+
+error_alloc_er:
+ for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ if (mhi_event->offload_ev)
+ continue;
+
+ mhi_free_coherent(mhi_cntrl, ring->alloc_size,
+ ring->pre_aligned, ring->dma_handle);
+ }
+ mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) *
+ mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
+ mhi_ctxt->er_ctxt_addr);
+
+error_alloc_er_ctxt:
+ mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) *
+ mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
+ mhi_ctxt->chan_ctxt_addr);
+
+error_alloc_chan_ctxt:
+ kfree(mhi_ctxt);
+
+ return ret;
+}
+
+/* to be used only if a single event ring with the type is present */
+static int mhi_get_er_index(struct mhi_controller *mhi_cntrl,
+ enum mhi_er_data_type type)
+{
+ int i;
+ struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
+
+ /* find event ring for requested type */
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->data_type == type)
+ return mhi_event->er_index;
+ }
+
+ return -ENOENT;
+}
+
+int mhi_init_timesync(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_timesync *mhi_tsync;
+ u32 time_offset, db_offset;
+ int ret;
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ ret = -EIO;
+ goto exit_timesync;
+ }
+
+ ret = mhi_get_capability_offset(mhi_cntrl, TIMESYNC_CAP_ID,
+ &time_offset);
+ if (ret) {
+ MHI_LOG("No timesync capability found\n");
+ goto exit_timesync;
+ }
+
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ if (!mhi_cntrl->time_get || !mhi_cntrl->lpm_disable ||
+ !mhi_cntrl->lpm_enable)
+ return -EINVAL;
+
+ /* register method supported */
+ mhi_tsync = kzalloc(sizeof(*mhi_tsync), GFP_KERNEL);
+ if (!mhi_tsync)
+ return -ENOMEM;
+
+ spin_lock_init(&mhi_tsync->lock);
+ mutex_init(&mhi_tsync->lpm_mutex);
+ INIT_LIST_HEAD(&mhi_tsync->head);
+ init_completion(&mhi_tsync->completion);
+
+ /* save time_offset for obtaining time */
+ MHI_LOG("TIME OFFS:0x%x\n", time_offset);
+ mhi_tsync->time_reg = mhi_cntrl->regs + time_offset
+ + TIMESYNC_TIME_LOW_OFFSET;
+
+ mhi_cntrl->mhi_tsync = mhi_tsync;
+
+ ret = mhi_create_timesync_sysfs(mhi_cntrl);
+ if (unlikely(ret)) {
+ /* kernel method still work */
+ MHI_ERR("Failed to create timesync sysfs nodes\n");
+ }
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ ret = -EIO;
+ goto exit_timesync;
+ }
+
+ /* get DB offset if supported, else return */
+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs,
+ time_offset + TIMESYNC_DB_OFFSET, &db_offset);
+ if (ret || !db_offset) {
+ ret = 0;
+ goto exit_timesync;
+ }
+
+ MHI_LOG("TIMESYNC_DB OFFS:0x%x\n", db_offset);
+ mhi_tsync->db = mhi_cntrl->regs + db_offset;
+
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ /* get time-sync event ring configuration */
+ ret = mhi_get_er_index(mhi_cntrl, MHI_ER_TSYNC_ELEMENT_TYPE);
+ if (ret < 0) {
+ MHI_LOG("Could not find timesync event ring\n");
+ return ret;
+ }
+
+ mhi_tsync->er_index = ret;
+
+ ret = mhi_send_cmd(mhi_cntrl, NULL, MHI_CMD_TIMSYNC_CFG);
+ if (ret) {
+ MHI_ERR("Failed to send time sync cfg cmd\n");
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&mhi_tsync->completion,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ if (!ret || mhi_tsync->ccs != MHI_EV_CC_SUCCESS) {
+ MHI_ERR("Failed to get time cfg cmd completion\n");
+ return -EIO;
+ }
+
+ return 0;
+
+exit_timesync:
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return ret;
+}
+
+static int mhi_init_bw_scale(struct mhi_controller *mhi_cntrl)
+{
+ int ret, er_index;
+ u32 bw_cfg_offset;
+
+ /* controller doesn't support dynamic bw switch */
+ if (!mhi_cntrl->bw_scale)
+ return -ENODEV;
+
+ ret = mhi_get_capability_offset(mhi_cntrl, BW_SCALE_CAP_ID,
+ &bw_cfg_offset);
+ if (ret)
+ return ret;
+
+ /* No ER configured to support BW scale */
+ er_index = mhi_get_er_index(mhi_cntrl, MHI_ER_BW_SCALE_ELEMENT_TYPE);
+ if (ret < 0)
+ return er_index;
+
+ bw_cfg_offset += BW_SCALE_CFG_OFFSET;
+
+ MHI_LOG("BW_CFG OFFSET:0x%x\n", bw_cfg_offset);
+
+ /* advertise host support */
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, bw_cfg_offset,
+ MHI_BW_SCALE_SETUP(er_index));
+
+ return 0;
+}
+
+int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
+{
+ u32 val;
+ int i, ret;
+ struct mhi_chan *mhi_chan;
+ struct mhi_event *mhi_event;
+ void __iomem *base = mhi_cntrl->regs;
+ struct {
+ u32 offset;
+ u32 mask;
+ u32 shift;
+ u32 val;
+ } reg_info[] = {
+ {
+ CCABAP_HIGHER, U32_MAX, 0,
+ upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
+ },
+ {
+ CCABAP_LOWER, U32_MAX, 0,
+ lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
+ },
+ {
+ ECABAP_HIGHER, U32_MAX, 0,
+ upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
+ },
+ {
+ ECABAP_LOWER, U32_MAX, 0,
+ lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
+ },
+ {
+ CRCBAP_HIGHER, U32_MAX, 0,
+ upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
+ },
+ {
+ CRCBAP_LOWER, U32_MAX, 0,
+ lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
+ },
+ {
+ MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT,
+ mhi_cntrl->total_ev_rings,
+ },
+ {
+ MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT,
+ mhi_cntrl->hw_ev_rings,
+ },
+ {
+ MHICTRLBASE_HIGHER, U32_MAX, 0,
+ upper_32_bits(mhi_cntrl->iova_start),
+ },
+ {
+ MHICTRLBASE_LOWER, U32_MAX, 0,
+ lower_32_bits(mhi_cntrl->iova_start),
+ },
+ {
+ MHIDATABASE_HIGHER, U32_MAX, 0,
+ upper_32_bits(mhi_cntrl->iova_start),
+ },
+ {
+ MHIDATABASE_LOWER, U32_MAX, 0,
+ lower_32_bits(mhi_cntrl->iova_start),
+ },
+ {
+ MHICTRLLIMIT_HIGHER, U32_MAX, 0,
+ upper_32_bits(mhi_cntrl->iova_stop),
+ },
+ {
+ MHICTRLLIMIT_LOWER, U32_MAX, 0,
+ lower_32_bits(mhi_cntrl->iova_stop),
+ },
+ {
+ MHIDATALIMIT_HIGHER, U32_MAX, 0,
+ upper_32_bits(mhi_cntrl->iova_stop),
+ },
+ {
+ MHIDATALIMIT_LOWER, U32_MAX, 0,
+ lower_32_bits(mhi_cntrl->iova_stop),
+ },
+ { 0, 0, 0 }
+ };
+
+ MHI_LOG("Initializing MMIO\n");
+
+ /* set up DB register for all the chan rings */
+ ret = mhi_read_reg_field(mhi_cntrl, base, CHDBOFF, CHDBOFF_CHDBOFF_MASK,
+ CHDBOFF_CHDBOFF_SHIFT, &val);
+ if (ret)
+ return -EIO;
+
+ MHI_LOG("CHDBOFF:0x%x\n", val);
+
+ /* setup wake db */
+ mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 4, 0);
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 0, 0);
+ mhi_cntrl->wake_set = false;
+
+ /* setup bw scale db */
+ mhi_cntrl->bw_scale_db = base + val + (8 * MHI_BW_SCALE_CHAN_DB);
+
+ /* setup channel db addresses */
+ mhi_chan = mhi_cntrl->mhi_chan;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++)
+ mhi_chan->tre_ring.db_addr = base + val;
+
+ /* setup event ring db addresses */
+ ret = mhi_read_reg_field(mhi_cntrl, base, ERDBOFF, ERDBOFF_ERDBOFF_MASK,
+ ERDBOFF_ERDBOFF_SHIFT, &val);
+ if (ret)
+ return -EIO;
+
+ MHI_LOG("ERDBOFF:0x%x\n", val);
+
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ mhi_event->ring.db_addr = base + val;
+ }
+
+ /* set up DB register for primary CMD rings */
+ mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER;
+
+ MHI_LOG("Programming all MMIO values.\n");
+ for (i = 0; reg_info[i].offset; i++)
+ mhi_write_reg_field(mhi_cntrl, base, reg_info[i].offset,
+ reg_info[i].mask, reg_info[i].shift,
+ reg_info[i].val);
+
+ /* setup bandwidth scaling features */
+ mhi_init_bw_scale(mhi_cntrl);
+
+ return 0;
+}
+
+void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *buf_ring;
+ struct mhi_ring *tre_ring;
+ struct mhi_chan_ctxt *chan_ctxt;
+
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+ chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
+
+ mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size,
+ tre_ring->pre_aligned, tre_ring->dma_handle);
+ vfree(buf_ring->base);
+
+ buf_ring->base = tre_ring->base = NULL;
+ chan_ctxt->rbase = 0;
+}
+
+int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *buf_ring;
+ struct mhi_ring *tre_ring;
+ struct mhi_chan_ctxt *chan_ctxt;
+ int ret;
+
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+ tre_ring->el_size = sizeof(struct mhi_tre);
+ tre_ring->len = tre_ring->el_size * tre_ring->elements;
+ chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
+ ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len);
+ if (ret)
+ return -ENOMEM;
+
+ buf_ring->el_size = sizeof(struct mhi_buf_info);
+ buf_ring->len = buf_ring->el_size * buf_ring->elements;
+ buf_ring->base = vzalloc(buf_ring->len);
+
+ if (!buf_ring->base) {
+ mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size,
+ tre_ring->pre_aligned, tre_ring->dma_handle);
+ return -ENOMEM;
+ }
+
+ chan_ctxt->chstate = MHI_CH_STATE_ENABLED;
+ chan_ctxt->rbase = tre_ring->iommu_base;
+ chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase;
+ chan_ctxt->rlen = tre_ring->len;
+ tre_ring->ctxt_wp = &chan_ctxt->wp;
+
+ tre_ring->rp = tre_ring->wp = tre_ring->base;
+ buf_ring->rp = buf_ring->wp = buf_ring->base;
+ mhi_chan->db_cfg.db_mode = 1;
+
+ /* update to all cores */
+ smp_wmb();
+
+ return 0;
+}
+
+int mhi_device_configure(struct mhi_device *mhi_dev,
+ enum dma_data_direction dir,
+ struct mhi_buf *cfg_tbl,
+ int elements)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan;
+ struct mhi_event_ctxt *er_ctxt;
+ struct mhi_chan_ctxt *ch_ctxt;
+ int er_index, chan;
+
+ switch (dir) {
+ case DMA_TO_DEVICE:
+ mhi_chan = mhi_dev->ul_chan;
+ break;
+ case DMA_BIDIRECTIONAL:
+ case DMA_FROM_DEVICE:
+ case DMA_NONE:
+ mhi_chan = mhi_dev->dl_chan;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ er_index = mhi_chan->er_index;
+ chan = mhi_chan->chan;
+
+ for (; elements > 0; elements--, cfg_tbl++) {
+ /* update event context array */
+ if (!strcmp(cfg_tbl->name, "ECA")) {
+ er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[er_index];
+ if (sizeof(*er_ctxt) != cfg_tbl->len) {
+ MHI_ERR(
+ "Invalid ECA size, expected:%zu actual%zu\n",
+ sizeof(*er_ctxt), cfg_tbl->len);
+ return -EINVAL;
+ }
+ memcpy((void *)er_ctxt, cfg_tbl->buf, sizeof(*er_ctxt));
+ continue;
+ }
+
+ /* update channel context array */
+ if (!strcmp(cfg_tbl->name, "CCA")) {
+ ch_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[chan];
+ if (cfg_tbl->len != sizeof(*ch_ctxt)) {
+ MHI_ERR(
+ "Invalid CCA size, expected:%zu actual:%zu\n",
+ sizeof(*ch_ctxt), cfg_tbl->len);
+ return -EINVAL;
+ }
+ memcpy((void *)ch_ctxt, cfg_tbl->buf, sizeof(*ch_ctxt));
+ continue;
+ }
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl,
+ struct device_node *of_node)
+{
+ int i, ret, num = 0;
+ struct mhi_event *mhi_event;
+ struct device_node *child;
+
+ of_node = of_find_node_by_name(of_node, "mhi_events");
+ if (!of_node)
+ return -EINVAL;
+
+ for_each_available_child_of_node(of_node, child) {
+ if (!strcmp(child->name, "mhi_event"))
+ num++;
+ }
+
+ if (!num)
+ return -EINVAL;
+
+ mhi_cntrl->total_ev_rings = num;
+ mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event),
+ GFP_KERNEL);
+ if (!mhi_cntrl->mhi_event)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&mhi_cntrl->lp_ev_rings);
+
+ /* populate ev ring */
+ mhi_event = mhi_cntrl->mhi_event;
+ i = 0;
+ for_each_available_child_of_node(of_node, child) {
+ if (strcmp(child->name, "mhi_event"))
+ continue;
+
+ mhi_event->er_index = i++;
+ ret = of_property_read_u32(child, "mhi,num-elements",
+ (u32 *)&mhi_event->ring.elements);
+ if (ret)
+ goto error_ev_cfg;
+
+ ret = of_property_read_u32(child, "mhi,intmod",
+ &mhi_event->intmod);
+ if (ret)
+ goto error_ev_cfg;
+
+ ret = of_property_read_u32(child, "mhi,msi",
+ &mhi_event->msi);
+ if (ret)
+ goto error_ev_cfg;
+
+ ret = of_property_read_u32(child, "mhi,chan",
+ &mhi_event->chan);
+ if (!ret) {
+ if (mhi_event->chan >= mhi_cntrl->max_chan)
+ goto error_ev_cfg;
+ /* this event ring has a dedicated channel */
+ mhi_event->mhi_chan =
+ &mhi_cntrl->mhi_chan[mhi_event->chan];
+ }
+
+ ret = of_property_read_u32(child, "mhi,priority",
+ &mhi_event->priority);
+ if (ret)
+ goto error_ev_cfg;
+
+ ret = of_property_read_u32(child, "mhi,brstmode",
+ &mhi_event->db_cfg.brstmode);
+ if (ret || MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode))
+ goto error_ev_cfg;
+
+ mhi_event->db_cfg.process_db =
+ (mhi_event->db_cfg.brstmode == MHI_BRSTMODE_ENABLE) ?
+ mhi_db_brstmode : mhi_db_brstmode_disable;
+
+ ret = of_property_read_u32(child, "mhi,data-type",
+ &mhi_event->data_type);
+ if (ret)
+ mhi_event->data_type = MHI_ER_DATA_ELEMENT_TYPE;
+
+ if (mhi_event->data_type > MHI_ER_DATA_TYPE_MAX)
+ goto error_ev_cfg;
+
+ switch (mhi_event->data_type) {
+ case MHI_ER_DATA_ELEMENT_TYPE:
+ mhi_event->process_event = mhi_process_data_event_ring;
+ break;
+ case MHI_ER_CTRL_ELEMENT_TYPE:
+ mhi_event->process_event = mhi_process_ctrl_ev_ring;
+ break;
+ case MHI_ER_TSYNC_ELEMENT_TYPE:
+ mhi_event->process_event = mhi_process_tsync_event_ring;
+ break;
+ case MHI_ER_BW_SCALE_ELEMENT_TYPE:
+ mhi_event->process_event = mhi_process_bw_scale_ev_ring;
+ break;
+ }
+
+ mhi_event->hw_ring = of_property_read_bool(child, "mhi,hw-ev");
+ if (mhi_event->hw_ring)
+ mhi_cntrl->hw_ev_rings++;
+ else
+ mhi_cntrl->sw_ev_rings++;
+ mhi_event->cl_manage = of_property_read_bool(child,
+ "mhi,client-manage");
+ mhi_event->offload_ev = of_property_read_bool(child,
+ "mhi,offload");
+
+ /*
+ * low priority events are handled in a separate worker thread
+ * to allow for sleeping functions to be called.
+ */
+ if (!mhi_event->offload_ev) {
+ if (IS_MHI_ER_PRIORITY_LOW(mhi_event))
+ list_add_tail(&mhi_event->node,
+ &mhi_cntrl->lp_ev_rings);
+ else
+ mhi_event->request_irq = true;
+ }
+
+ mhi_event++;
+ }
+
+ /* we need msi for each event ring + additional one for BHI */
+ mhi_cntrl->msi_required = mhi_cntrl->total_ev_rings + 1;
+
+ return 0;
+
+error_ev_cfg:
+
+ kfree(mhi_cntrl->mhi_event);
+ return -EINVAL;
+}
+static int of_parse_ch_cfg(struct mhi_controller *mhi_cntrl,
+ struct device_node *of_node)
+{
+ int ret;
+ struct device_node *child;
+ u32 chan;
+
+ ret = of_property_read_u32(of_node, "mhi,max-channels",
+ &mhi_cntrl->max_chan);
+ if (ret)
+ return ret;
+
+ of_node = of_find_node_by_name(of_node, "mhi_channels");
+ if (!of_node)
+ return -EINVAL;
+
+ mhi_cntrl->mhi_chan = vzalloc(mhi_cntrl->max_chan *
+ sizeof(*mhi_cntrl->mhi_chan));
+ if (!mhi_cntrl->mhi_chan)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&mhi_cntrl->lpm_chans);
+
+ /* populate channel configurations */
+ for_each_available_child_of_node(of_node, child) {
+ struct mhi_chan *mhi_chan;
+
+ if (strcmp(child->name, "mhi_chan"))
+ continue;
+
+ ret = of_property_read_u32(child, "reg", &chan);
+ if (ret || chan >= mhi_cntrl->max_chan)
+ goto error_chan_cfg;
+
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+
+ ret = of_property_read_string(child, "label",
+ &mhi_chan->name);
+ if (ret)
+ goto error_chan_cfg;
+
+ mhi_chan->chan = chan;
+
+ ret = of_property_read_u32(child, "mhi,num-elements",
+ (u32 *)&mhi_chan->tre_ring.elements);
+ if (!ret && !mhi_chan->tre_ring.elements)
+ goto error_chan_cfg;
+
+ /*
+ * For some channels, local ring len should be bigger than
+ * transfer ring len due to internal logical channels in device.
+ * So host can queue much more buffers than transfer ring len.
+ * Example, RSC channels should have a larger local channel
+ * than transfer ring length.
+ */
+ ret = of_property_read_u32(child, "mhi,local-elements",
+ (u32 *)&mhi_chan->buf_ring.elements);
+ if (ret)
+ mhi_chan->buf_ring.elements =
+ mhi_chan->tre_ring.elements;
+
+ ret = of_property_read_u32(child, "mhi,event-ring",
+ &mhi_chan->er_index);
+ if (ret)
+ goto error_chan_cfg;
+
+ ret = of_property_read_u32(child, "mhi,chan-dir",
+ &mhi_chan->dir);
+ if (ret)
+ goto error_chan_cfg;
+
+ /*
+ * For most channels, chtype is identical to channel directions,
+ * if not defined, assign ch direction to chtype
+ */
+ ret = of_property_read_u32(child, "mhi,chan-type",
+ &mhi_chan->type);
+ if (ret)
+ mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir;
+
+ ret = of_property_read_u32(child, "mhi,ee", &mhi_chan->ee_mask);
+ if (ret)
+ goto error_chan_cfg;
+
+ of_property_read_u32(child, "mhi,pollcfg",
+ &mhi_chan->db_cfg.pollcfg);
+
+ ret = of_property_read_u32(child, "mhi,data-type",
+ &mhi_chan->xfer_type);
+ if (ret)
+ goto error_chan_cfg;
+
+ switch (mhi_chan->xfer_type) {
+ case MHI_XFER_BUFFER:
+ mhi_chan->gen_tre = mhi_gen_tre;
+ mhi_chan->queue_xfer = mhi_queue_buf;
+ break;
+ case MHI_XFER_SKB:
+ mhi_chan->queue_xfer = mhi_queue_skb;
+ break;
+ case MHI_XFER_SCLIST:
+ mhi_chan->gen_tre = mhi_gen_tre;
+ mhi_chan->queue_xfer = mhi_queue_sclist;
+ break;
+ case MHI_XFER_NOP:
+ mhi_chan->queue_xfer = mhi_queue_nop;
+ break;
+ case MHI_XFER_DMA:
+ case MHI_XFER_RSC_DMA:
+ mhi_chan->queue_xfer = mhi_queue_dma;
+ break;
+ default:
+ goto error_chan_cfg;
+ }
+
+ mhi_chan->lpm_notify = of_property_read_bool(child,
+ "mhi,lpm-notify");
+ mhi_chan->offload_ch = of_property_read_bool(child,
+ "mhi,offload-chan");
+ mhi_chan->db_cfg.reset_req = of_property_read_bool(child,
+ "mhi,db-mode-switch");
+ mhi_chan->pre_alloc = of_property_read_bool(child,
+ "mhi,auto-queue");
+ mhi_chan->auto_start = of_property_read_bool(child,
+ "mhi,auto-start");
+ mhi_chan->wake_capable = of_property_read_bool(child,
+ "mhi,wake-capable");
+
+ if (mhi_chan->pre_alloc &&
+ (mhi_chan->dir != DMA_FROM_DEVICE ||
+ mhi_chan->xfer_type != MHI_XFER_BUFFER))
+ goto error_chan_cfg;
+
+ /* bi-dir and dirctionless channels must be a offload chan */
+ if ((mhi_chan->dir == DMA_BIDIRECTIONAL ||
+ mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch)
+ goto error_chan_cfg;
+
+ /* if mhi host allocate the buffers then client cannot queue */
+ if (mhi_chan->pre_alloc)
+ mhi_chan->queue_xfer = mhi_queue_nop;
+
+ if (!mhi_chan->offload_ch) {
+ ret = of_property_read_u32(child, "mhi,doorbell-mode",
+ &mhi_chan->db_cfg.brstmode);
+ if (ret ||
+ MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode))
+ goto error_chan_cfg;
+
+ mhi_chan->db_cfg.process_db =
+ (mhi_chan->db_cfg.brstmode ==
+ MHI_BRSTMODE_ENABLE) ?
+ mhi_db_brstmode : mhi_db_brstmode_disable;
+ }
+
+ mhi_chan->configured = true;
+
+ if (mhi_chan->lpm_notify)
+ list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans);
+ }
+
+ return 0;
+
+error_chan_cfg:
+ vfree(mhi_cntrl->mhi_chan);
+
+ return -EINVAL;
+}
+
+static int of_parse_dt(struct mhi_controller *mhi_cntrl,
+ struct device_node *of_node)
+{
+ int ret;
+ enum mhi_ee i;
+ u32 *ee;
+ u32 bhie_offset;
+
+ /* parse MHI channel configuration */
+ ret = of_parse_ch_cfg(mhi_cntrl, of_node);
+ if (ret)
+ return ret;
+
+ /* parse MHI event configuration */
+ ret = of_parse_ev_cfg(mhi_cntrl, of_node);
+ if (ret)
+ goto error_ev_cfg;
+
+ ret = of_property_read_u32(of_node, "mhi,timeout",
+ &mhi_cntrl->timeout_ms);
+ if (ret)
+ mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS;
+
+ mhi_cntrl->bounce_buf = of_property_read_bool(of_node, "mhi,use-bb");
+ ret = of_property_read_u32(of_node, "mhi,buffer-len",
+ (u32 *)&mhi_cntrl->buffer_len);
+ if (ret)
+ mhi_cntrl->buffer_len = MHI_MAX_MTU;
+
+ /* by default host allowed to ring DB both M0 and M2 state */
+ mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2;
+ if (of_property_read_bool(of_node, "mhi,m2-no-db-access"))
+ mhi_cntrl->db_access &= ~MHI_PM_M2;
+
+ /* parse the device ee table */
+ for (i = MHI_EE_PBL, ee = mhi_cntrl->ee_table; i < MHI_EE_MAX;
+ i++, ee++) {
+ /* setup the default ee before checking for override */
+ *ee = i;
+ ret = of_property_match_string(of_node, "mhi,ee-names",
+ mhi_ee_str[i]);
+ if (ret < 0)
+ continue;
+
+ of_property_read_u32_index(of_node, "mhi,ee", ret, ee);
+ }
+
+ ret = of_property_read_u32(of_node, "mhi,bhie-offset", &bhie_offset);
+ if (!ret)
+ mhi_cntrl->bhie = mhi_cntrl->regs + bhie_offset;
+
+ return 0;
+
+error_ev_cfg:
+ kfree(mhi_cntrl->mhi_chan);
+
+ return ret;
+}
+
+int of_register_mhi_controller(struct mhi_controller *mhi_cntrl)
+{
+ int ret;
+ int i;
+ struct mhi_event *mhi_event;
+ struct mhi_chan *mhi_chan;
+ struct mhi_cmd *mhi_cmd;
+ struct mhi_device *mhi_dev;
+ u32 soc_info;
+
+ if (!mhi_cntrl->of_node)
+ return -EINVAL;
+
+ if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put)
+ return -EINVAL;
+
+ if (!mhi_cntrl->status_cb || !mhi_cntrl->link_status)
+ return -EINVAL;
+
+ ret = of_parse_dt(mhi_cntrl, mhi_cntrl->of_node);
+ if (ret)
+ return -EINVAL;
+
+ mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS,
+ sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
+ if (!mhi_cntrl->mhi_cmd) {
+ ret = -ENOMEM;
+ goto error_alloc_cmd;
+ }
+
+ INIT_LIST_HEAD(&mhi_cntrl->transition_list);
+ mutex_init(&mhi_cntrl->pm_mutex);
+ rwlock_init(&mhi_cntrl->pm_lock);
+ spin_lock_init(&mhi_cntrl->transition_lock);
+ spin_lock_init(&mhi_cntrl->wlock);
+ INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
+ INIT_WORK(&mhi_cntrl->fw_worker, mhi_fw_load_worker);
+ INIT_WORK(&mhi_cntrl->syserr_worker, mhi_pm_sys_err_worker);
+ INIT_WORK(&mhi_cntrl->low_priority_worker, mhi_low_priority_worker);
+ init_waitqueue_head(&mhi_cntrl->state_event);
+
+ mhi_cmd = mhi_cntrl->mhi_cmd;
+ for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++)
+ spin_lock_init(&mhi_cmd->lock);
+
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ mhi_event->mhi_cntrl = mhi_cntrl;
+ spin_lock_init(&mhi_event->lock);
+
+ if (IS_MHI_ER_PRIORITY_LOW(mhi_event))
+ continue;
+
+ if (mhi_event->data_type == MHI_ER_CTRL_ELEMENT_TYPE)
+ tasklet_init(&mhi_event->task, mhi_ctrl_ev_task,
+ (ulong)mhi_event);
+ else
+ tasklet_init(&mhi_event->task, mhi_ev_task,
+ (ulong)mhi_event);
+ }
+
+ mhi_chan = mhi_cntrl->mhi_chan;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
+ mutex_init(&mhi_chan->mutex);
+ init_completion(&mhi_chan->completion);
+ rwlock_init(&mhi_chan->lock);
+ }
+
+ if (mhi_cntrl->bounce_buf) {
+ mhi_cntrl->map_single = mhi_map_single_use_bb;
+ mhi_cntrl->unmap_single = mhi_unmap_single_use_bb;
+ } else {
+ mhi_cntrl->map_single = mhi_map_single_no_bb;
+ mhi_cntrl->unmap_single = mhi_unmap_single_no_bb;
+ }
+
+ /* read the device info if possible */
+ if (mhi_cntrl->regs) {
+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs,
+ SOC_HW_VERSION_OFFS, &soc_info);
+ if (ret)
+ goto error_alloc_dev;
+
+ mhi_cntrl->family_number =
+ (soc_info & SOC_HW_VERSION_FAM_NUM_BMSK) >>
+ SOC_HW_VERSION_FAM_NUM_SHFT;
+ mhi_cntrl->device_number =
+ (soc_info & SOC_HW_VERSION_DEV_NUM_BMSK) >>
+ SOC_HW_VERSION_DEV_NUM_SHFT;
+ mhi_cntrl->major_version =
+ (soc_info & SOC_HW_VERSION_MAJOR_VER_BMSK) >>
+ SOC_HW_VERSION_MAJOR_VER_SHFT;
+ mhi_cntrl->minor_version =
+ (soc_info & SOC_HW_VERSION_MINOR_VER_BMSK) >>
+ SOC_HW_VERSION_MINOR_VER_SHFT;
+ }
+
+ /* register controller with mhi_bus */
+ mhi_dev = mhi_alloc_device(mhi_cntrl);
+ if (!mhi_dev) {
+ ret = -ENOMEM;
+ goto error_alloc_dev;
+ }
+
+ mhi_dev->dev_type = MHI_CONTROLLER_TYPE;
+ mhi_dev->mhi_cntrl = mhi_cntrl;
+ dev_set_name(&mhi_dev->dev, "%04x_%02u.%02u.%02u", mhi_dev->dev_id,
+ mhi_dev->domain, mhi_dev->bus, mhi_dev->slot);
+
+ /* init wake source */
+ device_init_wakeup(&mhi_dev->dev, true);
+
+ ret = device_add(&mhi_dev->dev);
+ if (ret)
+ goto error_add_dev;
+
+ mhi_cntrl->mhi_dev = mhi_dev;
+
+ mhi_cntrl->parent = mhi_parent;
+
+ mhi_cntrl->klog_lvl = MHI_MSG_LVL_ERROR;
+
+ /* adding it to this list only for debug purpose */
+ mutex_lock(&mhi_bus.lock);
+ list_add_tail(&mhi_cntrl->node, &mhi_bus.controller_list);
+ mutex_unlock(&mhi_bus.lock);
+
+ return 0;
+
+error_add_dev:
+ mhi_dealloc_device(mhi_cntrl, mhi_dev);
+
+error_alloc_dev:
+ kfree(mhi_cntrl->mhi_cmd);
+
+error_alloc_cmd:
+ kfree(mhi_cntrl->mhi_chan);
+ kfree(mhi_cntrl->mhi_event);
+
+ return ret;
+};
+EXPORT_SYMBOL(of_register_mhi_controller);
+
+void mhi_unregister_mhi_controller(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
+
+ kfree(mhi_cntrl->mhi_cmd);
+ kfree(mhi_cntrl->mhi_event);
+ kfree(mhi_cntrl->mhi_chan);
+ kfree(mhi_cntrl->mhi_tsync);
+
+ device_del(&mhi_dev->dev);
+ put_device(&mhi_dev->dev);
+
+ mutex_lock(&mhi_bus.lock);
+ list_del(&mhi_cntrl->node);
+ mutex_unlock(&mhi_bus.lock);
+}
+EXPORT_SYMBOL(mhi_unregister_mhi_controller);
+
+/* set ptr to control private data */
+static inline void mhi_controller_set_devdata(struct mhi_controller *mhi_cntrl,
+ void *priv)
+{
+ mhi_cntrl->priv_data = priv;
+}
+
+
+/* allocate mhi controller to register */
+struct mhi_controller *mhi_alloc_controller(size_t size)
+{
+ struct mhi_controller *mhi_cntrl;
+
+ mhi_cntrl = kzalloc(size + sizeof(*mhi_cntrl), GFP_KERNEL);
+
+ if (mhi_cntrl && size)
+ mhi_controller_set_devdata(mhi_cntrl, mhi_cntrl + 1);
+
+ return mhi_cntrl;
+}
+EXPORT_SYMBOL(mhi_alloc_controller);
+
+int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
+{
+ int ret;
+ u32 bhie_off;
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+
+ ret = mhi_init_dev_ctxt(mhi_cntrl);
+ if (ret) {
+ MHI_ERR("Error with init dev_ctxt\n");
+ goto error_dev_ctxt;
+ }
+
+ /*
+ * allocate rddm table if specified, this table is for debug purpose
+ * so we'll ignore erros
+ */
+ if (mhi_cntrl->rddm_size) {
+ mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image,
+ mhi_cntrl->rddm_size);
+
+ /*
+ * This controller supports rddm, we need to manually clear
+ * BHIE RX registers since por values are undefined.
+ */
+ if (!mhi_cntrl->bhie) {
+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF,
+ &bhie_off);
+ if (ret) {
+ MHI_ERR("Error getting bhie offset\n");
+ goto bhie_error;
+ }
+
+ mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off;
+ }
+
+ memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS, 0,
+ BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS + 4);
+
+ if (mhi_cntrl->rddm_image)
+ mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image);
+ }
+
+ mhi_cntrl->pre_init = true;
+
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+
+ return 0;
+
+bhie_error:
+ if (mhi_cntrl->rddm_image) {
+ mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
+ mhi_cntrl->rddm_image = NULL;
+ }
+
+error_dev_ctxt:
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(mhi_prepare_for_power_up);
+
+void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl)
+{
+ if (mhi_cntrl->fbc_image) {
+ mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
+ mhi_cntrl->fbc_image = NULL;
+ }
+
+ if (mhi_cntrl->rddm_image) {
+ mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
+ mhi_cntrl->rddm_image = NULL;
+ }
+
+ mhi_deinit_dev_ctxt(mhi_cntrl);
+ mhi_cntrl->pre_init = false;
+}
+EXPORT_SYMBOL(mhi_unprepare_after_power_down);
+
+/* match dev to drv */
+static int mhi_match(struct device *dev, struct device_driver *drv)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_driver *mhi_drv = to_mhi_driver(drv);
+ const struct mhi_device_id *id;
+
+ /* if controller type there is no client driver associated with it */
+ if (mhi_dev->dev_type == MHI_CONTROLLER_TYPE)
+ return 0;
+
+ for (id = mhi_drv->id_table; id->chan[0]; id++)
+ if (!strcmp(mhi_dev->chan_name, id->chan)) {
+ mhi_dev->id = id;
+ return 1;
+ }
+
+ return 0;
+};
+
+static void mhi_release_device(struct device *dev)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+
+ if (mhi_dev->ul_chan)
+ mhi_dev->ul_chan->mhi_dev = NULL;
+
+ if (mhi_dev->dl_chan)
+ mhi_dev->dl_chan->mhi_dev = NULL;
+
+ kfree(mhi_dev);
+}
+
+struct bus_type mhi_bus_type = {
+ .name = "mhi",
+ .dev_name = "mhi",
+ .match = mhi_match,
+};
+
+static int mhi_driver_probe(struct device *dev)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct device_driver *drv = dev->driver;
+ struct mhi_driver *mhi_drv = to_mhi_driver(drv);
+ struct mhi_event *mhi_event;
+ struct mhi_chan *ul_chan = mhi_dev->ul_chan;
+ struct mhi_chan *dl_chan = mhi_dev->dl_chan;
+ int ret;
+
+ /* bring device out of lpm */
+ ret = mhi_device_get_sync(mhi_dev, MHI_VOTE_DEVICE);
+ if (ret)
+ return ret;
+
+ ret = -EINVAL;
+ if (ul_chan) {
+ /* lpm notification require status_cb */
+ if (ul_chan->lpm_notify && !mhi_drv->status_cb)
+ goto exit_probe;
+
+ if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb)
+ goto exit_probe;
+
+ ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
+ mhi_dev->status_cb = mhi_drv->status_cb;
+ if (ul_chan->auto_start) {
+ ret = mhi_prepare_channel(mhi_cntrl, ul_chan);
+ if (ret)
+ goto exit_probe;
+ }
+ }
+
+ if (dl_chan) {
+ if (dl_chan->lpm_notify && !mhi_drv->status_cb)
+ goto exit_probe;
+
+ if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb)
+ goto exit_probe;
+
+ mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index];
+
+ /*
+ * if this channal event ring manage by client, then
+ * status_cb must be defined so we can send the async
+ * cb whenever there are pending data
+ */
+ if (mhi_event->cl_manage && !mhi_drv->status_cb)
+ goto exit_probe;
+
+ dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
+
+ /* ul & dl uses same status cb */
+ mhi_dev->status_cb = mhi_drv->status_cb;
+ }
+
+ ret = mhi_drv->probe(mhi_dev, mhi_dev->id);
+ if (ret)
+ goto exit_probe;
+
+ if (dl_chan && dl_chan->auto_start)
+ mhi_prepare_channel(mhi_cntrl, dl_chan);
+
+ mhi_device_put(mhi_dev, MHI_VOTE_DEVICE);
+
+ return ret;
+
+exit_probe:
+ mhi_unprepare_from_transfer(mhi_dev);
+
+ mhi_device_put(mhi_dev, MHI_VOTE_DEVICE);
+
+ return ret;
+}
+
+static int mhi_driver_remove(struct device *dev)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver);
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan;
+ enum MHI_CH_STATE ch_state[] = {
+ MHI_CH_STATE_DISABLED,
+ MHI_CH_STATE_DISABLED
+ };
+ int dir;
+
+ /* control device has no work to do */
+ if (mhi_dev->dev_type == MHI_CONTROLLER_TYPE)
+ return 0;
+
+ MHI_LOG("Removing device for chan:%s\n", mhi_dev->chan_name);
+
+ /* reset both channels */
+ for (dir = 0; dir < 2; dir++) {
+ mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
+
+ if (!mhi_chan)
+ continue;
+
+ /* wake all threads waiting for completion */
+ write_lock_irq(&mhi_chan->lock);
+ mhi_chan->ccs = MHI_EV_CC_INVALID;
+ complete_all(&mhi_chan->completion);
+ write_unlock_irq(&mhi_chan->lock);
+
+ /* move channel state to disable, no more processing */
+ mutex_lock(&mhi_chan->mutex);
+ write_lock_irq(&mhi_chan->lock);
+ ch_state[dir] = mhi_chan->ch_state;
+ mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED;
+ write_unlock_irq(&mhi_chan->lock);
+
+ /* reset the channel */
+ if (!mhi_chan->offload_ch)
+ mhi_reset_chan(mhi_cntrl, mhi_chan);
+
+ mutex_unlock(&mhi_chan->mutex);
+ }
+
+ /* destroy the device */
+ mhi_drv->remove(mhi_dev);
+
+ /* de_init channel if it was enabled */
+ for (dir = 0; dir < 2; dir++) {
+ mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
+
+ if (!mhi_chan)
+ continue;
+
+ mutex_lock(&mhi_chan->mutex);
+
+ if (ch_state[dir] == MHI_CH_STATE_ENABLED &&
+ !mhi_chan->offload_ch)
+ mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
+
+ mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
+
+ mutex_unlock(&mhi_chan->mutex);
+ }
+
+
+ if (mhi_cntrl->tsync_dev == mhi_dev)
+ mhi_cntrl->tsync_dev = NULL;
+
+ /* relinquish any pending votes for device */
+ while (atomic_read(&mhi_dev->dev_vote))
+ mhi_device_put(mhi_dev, MHI_VOTE_DEVICE);
+
+ /* remove pending votes for the bus */
+ while (atomic_read(&mhi_dev->bus_vote))
+ mhi_device_put(mhi_dev, MHI_VOTE_BUS);
+
+ return 0;
+}
+
+int mhi_driver_register(struct mhi_driver *mhi_drv)
+{
+ struct device_driver *driver = &mhi_drv->driver;
+
+ if (!mhi_drv->probe || !mhi_drv->remove)
+ return -EINVAL;
+
+ driver->bus = &mhi_bus_type;
+ driver->probe = mhi_driver_probe;
+ driver->remove = mhi_driver_remove;
+ return driver_register(driver);
+}
+EXPORT_SYMBOL(mhi_driver_register);
+
+void mhi_driver_unregister(struct mhi_driver *mhi_drv)
+{
+ driver_unregister(&mhi_drv->driver);
+}
+EXPORT_SYMBOL(mhi_driver_unregister);
+
+struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_device *mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL);
+ struct device *dev;
+
+ if (!mhi_dev)
+ return NULL;
+
+ dev = &mhi_dev->dev;
+ device_initialize(dev);
+ dev->bus = &mhi_bus_type;
+ dev->release = mhi_release_device;
+ dev->parent = mhi_cntrl->dev;
+ mhi_dev->mhi_cntrl = mhi_cntrl;
+ mhi_dev->dev_id = mhi_cntrl->dev_id;
+ mhi_dev->domain = mhi_cntrl->domain;
+ mhi_dev->bus = mhi_cntrl->bus;
+ mhi_dev->slot = mhi_cntrl->slot;
+ mhi_dev->mtu = MHI_MAX_MTU;
+ atomic_set(&mhi_dev->dev_vote, 0);
+ atomic_set(&mhi_dev->bus_vote, 0);
+
+ return mhi_dev;
+}
+
+static int __init mhi_init(void)
+{
+ int ret;
+
+ mutex_init(&mhi_bus.lock);
+ INIT_LIST_HEAD(&mhi_bus.controller_list);
+
+ /* parent directory */
+ mhi_parent = debugfs_create_dir(mhi_bus_type.name, NULL);
+
+ ret = bus_register(&mhi_bus_type);
+
+ if (!ret)
+ mhi_dtr_init();
+ return ret;
+}
+postcore_initcall(mhi_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("MHI_CORE");
+MODULE_DESCRIPTION("MHI Host Interface");
diff --git a/drivers/bus/mhi/core/mhi_internal.h b/drivers/bus/mhi/core/mhi_internal.h
new file mode 100644
index 0000000..d0bc74b
--- /dev/null
+++ b/drivers/bus/mhi/core/mhi_internal.h
@@ -0,0 +1,900 @@
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MHI_INT_H
+#define _MHI_INT_H
+
+extern struct bus_type mhi_bus_type;
+
+/* MHI mmio register mapping */
+#define PCI_INVALID_READ(val) (val == U32_MAX)
+#define MHI_REG_SIZE (SZ_4K)
+
+#define MHIREGLEN (0x0)
+#define MHIREGLEN_MHIREGLEN_MASK (0xFFFFFFFF)
+#define MHIREGLEN_MHIREGLEN_SHIFT (0)
+
+#define MHIVER (0x8)
+#define MHIVER_MHIVER_MASK (0xFFFFFFFF)
+#define MHIVER_MHIVER_SHIFT (0)
+
+#define MHICFG (0x10)
+#define MHICFG_NHWER_MASK (0xFF000000)
+#define MHICFG_NHWER_SHIFT (24)
+#define MHICFG_NER_MASK (0xFF0000)
+#define MHICFG_NER_SHIFT (16)
+#define MHICFG_NHWCH_MASK (0xFF00)
+#define MHICFG_NHWCH_SHIFT (8)
+#define MHICFG_NCH_MASK (0xFF)
+#define MHICFG_NCH_SHIFT (0)
+
+#define CHDBOFF (0x18)
+#define CHDBOFF_CHDBOFF_MASK (0xFFFFFFFF)
+#define CHDBOFF_CHDBOFF_SHIFT (0)
+
+#define ERDBOFF (0x20)
+#define ERDBOFF_ERDBOFF_MASK (0xFFFFFFFF)
+#define ERDBOFF_ERDBOFF_SHIFT (0)
+
+#define BHIOFF (0x28)
+#define BHIOFF_BHIOFF_MASK (0xFFFFFFFF)
+#define BHIOFF_BHIOFF_SHIFT (0)
+
+#define BHIEOFF (0x2C)
+#define BHIEOFF_BHIEOFF_MASK (0xFFFFFFFF)
+#define BHIEOFF_BHIEOFF_SHIFT (0)
+
+#define DEBUGOFF (0x30)
+#define DEBUGOFF_DEBUGOFF_MASK (0xFFFFFFFF)
+#define DEBUGOFF_DEBUGOFF_SHIFT (0)
+
+#define MHICTRL (0x38)
+#define MHICTRL_MHISTATE_MASK (0x0000FF00)
+#define MHICTRL_MHISTATE_SHIFT (8)
+#define MHICTRL_RESET_MASK (0x2)
+#define MHICTRL_RESET_SHIFT (1)
+
+#define MHISTATUS (0x48)
+#define MHISTATUS_MHISTATE_MASK (0x0000FF00)
+#define MHISTATUS_MHISTATE_SHIFT (8)
+#define MHISTATUS_SYSERR_MASK (0x4)
+#define MHISTATUS_SYSERR_SHIFT (2)
+#define MHISTATUS_READY_MASK (0x1)
+#define MHISTATUS_READY_SHIFT (0)
+
+#define CCABAP_LOWER (0x58)
+#define CCABAP_LOWER_CCABAP_LOWER_MASK (0xFFFFFFFF)
+#define CCABAP_LOWER_CCABAP_LOWER_SHIFT (0)
+
+#define CCABAP_HIGHER (0x5C)
+#define CCABAP_HIGHER_CCABAP_HIGHER_MASK (0xFFFFFFFF)
+#define CCABAP_HIGHER_CCABAP_HIGHER_SHIFT (0)
+
+#define ECABAP_LOWER (0x60)
+#define ECABAP_LOWER_ECABAP_LOWER_MASK (0xFFFFFFFF)
+#define ECABAP_LOWER_ECABAP_LOWER_SHIFT (0)
+
+#define ECABAP_HIGHER (0x64)
+#define ECABAP_HIGHER_ECABAP_HIGHER_MASK (0xFFFFFFFF)
+#define ECABAP_HIGHER_ECABAP_HIGHER_SHIFT (0)
+
+#define CRCBAP_LOWER (0x68)
+#define CRCBAP_LOWER_CRCBAP_LOWER_MASK (0xFFFFFFFF)
+#define CRCBAP_LOWER_CRCBAP_LOWER_SHIFT (0)
+
+#define CRCBAP_HIGHER (0x6C)
+#define CRCBAP_HIGHER_CRCBAP_HIGHER_MASK (0xFFFFFFFF)
+#define CRCBAP_HIGHER_CRCBAP_HIGHER_SHIFT (0)
+
+#define CRDB_LOWER (0x70)
+#define CRDB_LOWER_CRDB_LOWER_MASK (0xFFFFFFFF)
+#define CRDB_LOWER_CRDB_LOWER_SHIFT (0)
+
+#define CRDB_HIGHER (0x74)
+#define CRDB_HIGHER_CRDB_HIGHER_MASK (0xFFFFFFFF)
+#define CRDB_HIGHER_CRDB_HIGHER_SHIFT (0)
+
+#define MHICTRLBASE_LOWER (0x80)
+#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK (0xFFFFFFFF)
+#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT (0)
+
+#define MHICTRLBASE_HIGHER (0x84)
+#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK (0xFFFFFFFF)
+#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT (0)
+
+#define MHICTRLLIMIT_LOWER (0x88)
+#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK (0xFFFFFFFF)
+#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT (0)
+
+#define MHICTRLLIMIT_HIGHER (0x8C)
+#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK (0xFFFFFFFF)
+#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT (0)
+
+#define MHIDATABASE_LOWER (0x98)
+#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK (0xFFFFFFFF)
+#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT (0)
+
+#define MHIDATABASE_HIGHER (0x9C)
+#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK (0xFFFFFFFF)
+#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_SHIFT (0)
+
+#define MHIDATALIMIT_LOWER (0xA0)
+#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK (0xFFFFFFFF)
+#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT (0)
+
+#define MHIDATALIMIT_HIGHER (0xA4)
+#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK (0xFFFFFFFF)
+#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT (0)
+
+/* Host request register */
+#define MHI_SOC_RESET_REQ_OFFSET (0xB0)
+#define MHI_SOC_RESET_REQ BIT(0)
+
+/* MHI misc capability registers */
+#define MISC_OFFSET (0x24)
+#define MISC_CAP_MASK (0xFFFFFFFF)
+#define MISC_CAP_SHIFT (0)
+
+#define CAP_CAPID_MASK (0xFF000000)
+#define CAP_CAPID_SHIFT (24)
+#define CAP_NEXT_CAP_MASK (0x00FFF000)
+#define CAP_NEXT_CAP_SHIFT (12)
+
+/* MHI Timesync offsets */
+#define TIMESYNC_CFG_OFFSET (0x00)
+#define TIMESYNC_CFG_CAPID_MASK (CAP_CAPID_MASK)
+#define TIMESYNC_CFG_CAPID_SHIFT (CAP_CAPID_SHIFT)
+#define TIMESYNC_CFG_NEXT_OFF_MASK (CAP_NEXT_CAP_MASK)
+#define TIMESYNC_CFG_NEXT_OFF_SHIFT (CAP_NEXT_CAP_SHIFT)
+#define TIMESYNC_CFG_NUMCMD_MASK (0xFF)
+#define TIMESYNC_CFG_NUMCMD_SHIFT (0)
+#define TIMESYNC_DB_OFFSET (0x4)
+#define TIMESYNC_TIME_LOW_OFFSET (0x8)
+#define TIMESYNC_TIME_HIGH_OFFSET (0xC)
+
+#define TIMESYNC_CAP_ID (2)
+
+/* MHI Bandwidth scaling offsets */
+#define BW_SCALE_CFG_OFFSET (0x04)
+#define BW_SCALE_CFG_CHAN_DB_ID_MASK (0xFE000000)
+#define BW_SCALE_CFG_CHAN_DB_ID_SHIFT (25)
+#define BW_SCALE_CFG_ENABLED_MASK (0x01000000)
+#define BW_SCALE_CFG_ENABLED_SHIFT (24)
+#define BW_SCALE_CFG_ER_ID_MASK (0x00F80000)
+#define BW_SCALE_CFG_ER_ID_SHIFT (19)
+
+#define BW_SCALE_CAP_ID (3)
+
+/* MHI BHI offfsets */
+#define BHI_BHIVERSION_MINOR (0x00)
+#define BHI_BHIVERSION_MAJOR (0x04)
+#define BHI_IMGADDR_LOW (0x08)
+#define BHI_IMGADDR_HIGH (0x0C)
+#define BHI_IMGSIZE (0x10)
+#define BHI_RSVD1 (0x14)
+#define BHI_IMGTXDB (0x18)
+#define BHI_TXDB_SEQNUM_BMSK (0x3FFFFFFF)
+#define BHI_TXDB_SEQNUM_SHFT (0)
+#define BHI_RSVD2 (0x1C)
+#define BHI_INTVEC (0x20)
+#define BHI_RSVD3 (0x24)
+#define BHI_EXECENV (0x28)
+#define BHI_STATUS (0x2C)
+#define BHI_ERRCODE (0x30)
+#define BHI_ERRDBG1 (0x34)
+#define BHI_ERRDBG2 (0x38)
+#define BHI_ERRDBG3 (0x3C)
+#define BHI_SERIALNU (0x40)
+#define BHI_SBLANTIROLLVER (0x44)
+#define BHI_NUMSEG (0x48)
+#define BHI_MSMHWID(n) (0x4C + (0x4 * n))
+#define BHI_OEMPKHASH(n) (0x64 + (0x4 * n))
+#define BHI_RSVD5 (0xC4)
+#define BHI_STATUS_MASK (0xC0000000)
+#define BHI_STATUS_SHIFT (30)
+#define BHI_STATUS_ERROR (3)
+#define BHI_STATUS_SUCCESS (2)
+#define BHI_STATUS_RESET (0)
+
+/* MHI BHIE offsets */
+#define BHIE_MSMSOCID_OFFS (0x0000)
+#define BHIE_TXVECADDR_LOW_OFFS (0x002C)
+#define BHIE_TXVECADDR_HIGH_OFFS (0x0030)
+#define BHIE_TXVECSIZE_OFFS (0x0034)
+#define BHIE_TXVECDB_OFFS (0x003C)
+#define BHIE_TXVECDB_SEQNUM_BMSK (0x3FFFFFFF)
+#define BHIE_TXVECDB_SEQNUM_SHFT (0)
+#define BHIE_TXVECSTATUS_OFFS (0x0044)
+#define BHIE_TXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF)
+#define BHIE_TXVECSTATUS_SEQNUM_SHFT (0)
+#define BHIE_TXVECSTATUS_STATUS_BMSK (0xC0000000)
+#define BHIE_TXVECSTATUS_STATUS_SHFT (30)
+#define BHIE_TXVECSTATUS_STATUS_RESET (0x00)
+#define BHIE_TXVECSTATUS_STATUS_XFER_COMPL (0x02)
+#define BHIE_TXVECSTATUS_STATUS_ERROR (0x03)
+#define BHIE_RXVECADDR_LOW_OFFS (0x0060)
+#define BHIE_RXVECADDR_HIGH_OFFS (0x0064)
+#define BHIE_RXVECSIZE_OFFS (0x0068)
+#define BHIE_RXVECDB_OFFS (0x0070)
+#define BHIE_RXVECDB_SEQNUM_BMSK (0x3FFFFFFF)
+#define BHIE_RXVECDB_SEQNUM_SHFT (0)
+#define BHIE_RXVECSTATUS_OFFS (0x0078)
+#define BHIE_RXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF)
+#define BHIE_RXVECSTATUS_SEQNUM_SHFT (0)
+#define BHIE_RXVECSTATUS_STATUS_BMSK (0xC0000000)
+#define BHIE_RXVECSTATUS_STATUS_SHFT (30)
+#define BHIE_RXVECSTATUS_STATUS_RESET (0x00)
+#define BHIE_RXVECSTATUS_STATUS_XFER_COMPL (0x02)
+#define BHIE_RXVECSTATUS_STATUS_ERROR (0x03)
+
+#define SOC_HW_VERSION_OFFS (0x224)
+#define SOC_HW_VERSION_FAM_NUM_BMSK (0xF0000000)
+#define SOC_HW_VERSION_FAM_NUM_SHFT (28)
+#define SOC_HW_VERSION_DEV_NUM_BMSK (0x0FFF0000)
+#define SOC_HW_VERSION_DEV_NUM_SHFT (16)
+#define SOC_HW_VERSION_MAJOR_VER_BMSK (0x0000FF00)
+#define SOC_HW_VERSION_MAJOR_VER_SHFT (8)
+#define SOC_HW_VERSION_MINOR_VER_BMSK (0x000000FF)
+#define SOC_HW_VERSION_MINOR_VER_SHFT (0)
+
+/* timesync time calculations */
+#define LOCAL_TICKS_TO_US(x) (div_u64((x) * 100ULL, \
+ div_u64(mhi_cntrl->local_timer_freq, 10000ULL)))
+#define REMOTE_TICKS_TO_US(x) (div_u64((x) * 100ULL, \
+ div_u64(mhi_cntrl->remote_timer_freq, 10000ULL)))
+
+struct mhi_event_ctxt {
+ u32 reserved : 8;
+ u32 intmodc : 8;
+ u32 intmodt : 16;
+ u32 ertype;
+ u32 msivec;
+
+ u64 rbase __packed __aligned(4);
+ u64 rlen __packed __aligned(4);
+ u64 rp __packed __aligned(4);
+ u64 wp __packed __aligned(4);
+};
+
+struct mhi_chan_ctxt {
+ u32 chstate : 8;
+ u32 brstmode : 2;
+ u32 pollcfg : 6;
+ u32 reserved : 16;
+ u32 chtype;
+ u32 erindex;
+
+ u64 rbase __packed __aligned(4);
+ u64 rlen __packed __aligned(4);
+ u64 rp __packed __aligned(4);
+ u64 wp __packed __aligned(4);
+};
+
+struct mhi_cmd_ctxt {
+ u32 reserved0;
+ u32 reserved1;
+ u32 reserved2;
+
+ u64 rbase __packed __aligned(4);
+ u64 rlen __packed __aligned(4);
+ u64 rp __packed __aligned(4);
+ u64 wp __packed __aligned(4);
+};
+
+struct mhi_tre {
+ u64 ptr;
+ u32 dword[2];
+};
+
+struct bhi_vec_entry {
+ u64 dma_addr;
+ u64 size;
+};
+
+enum mhi_cmd_type {
+ MHI_CMD_TYPE_NOP = 1,
+ MHI_CMD_TYPE_RESET = 16,
+ MHI_CMD_TYPE_STOP = 17,
+ MHI_CMD_TYPE_START = 18,
+ MHI_CMD_TYPE_TSYNC = 24,
+};
+
+/* no operation command */
+#define MHI_TRE_CMD_NOOP_PTR (0)
+#define MHI_TRE_CMD_NOOP_DWORD0 (0)
+#define MHI_TRE_CMD_NOOP_DWORD1 (MHI_CMD_TYPE_NOP << 16)
+
+/* channel reset command */
+#define MHI_TRE_CMD_RESET_PTR (0)
+#define MHI_TRE_CMD_RESET_DWORD0 (0)
+#define MHI_TRE_CMD_RESET_DWORD1(chid) ((chid << 24) | \
+ (MHI_CMD_TYPE_RESET << 16))
+
+/* channel stop command */
+#define MHI_TRE_CMD_STOP_PTR (0)
+#define MHI_TRE_CMD_STOP_DWORD0 (0)
+#define MHI_TRE_CMD_STOP_DWORD1(chid) ((chid << 24) | (MHI_CMD_TYPE_STOP << 16))
+
+/* channel start command */
+#define MHI_TRE_CMD_START_PTR (0)
+#define MHI_TRE_CMD_START_DWORD0 (0)
+#define MHI_TRE_CMD_START_DWORD1(chid) ((chid << 24) | \
+ (MHI_CMD_TYPE_START << 16))
+
+/* time sync cfg command */
+#define MHI_TRE_CMD_TSYNC_CFG_PTR (0)
+#define MHI_TRE_CMD_TSYNC_CFG_DWORD0 (0)
+#define MHI_TRE_CMD_TSYNC_CFG_DWORD1(er) ((MHI_CMD_TYPE_TSYNC << 16) | \
+ (er << 24))
+
+#define MHI_TRE_GET_CMD_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF)
+#define MHI_TRE_GET_CMD_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF)
+
+/* event descriptor macros */
+#define MHI_TRE_EV_PTR(ptr) (ptr)
+#define MHI_TRE_EV_DWORD0(code, len) ((code << 24) | len)
+#define MHI_TRE_EV_DWORD1(chid, type) ((chid << 24) | (type << 16))
+#define MHI_TRE_GET_EV_PTR(tre) ((tre)->ptr)
+#define MHI_TRE_GET_EV_CODE(tre) (((tre)->dword[0] >> 24) & 0xFF)
+#define MHI_TRE_GET_EV_LEN(tre) ((tre)->dword[0] & 0xFFFF)
+#define MHI_TRE_GET_EV_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF)
+#define MHI_TRE_GET_EV_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF)
+#define MHI_TRE_GET_EV_STATE(tre) (((tre)->dword[0] >> 24) & 0xFF)
+#define MHI_TRE_GET_EV_EXECENV(tre) (((tre)->dword[0] >> 24) & 0xFF)
+#define MHI_TRE_GET_EV_TSYNC_SEQ(tre) ((tre)->dword[0])
+#define MHI_TRE_GET_EV_TIME(tre) ((tre)->ptr)
+#define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits((tre)->ptr)
+#define MHI_TRE_GET_EV_VEID(tre) (((tre)->dword[0] >> 16) & 0xFF)
+#define MHI_TRE_GET_EV_LINKSPEED(tre) (((tre)->dword[1] >> 24) & 0xFF)
+#define MHI_TRE_GET_EV_LINKWIDTH(tre) ((tre)->dword[0] & 0xFF)
+#define MHI_TRE_GET_EV_BW_REQ_SEQ(tre) (((tre)->dword[0] >> 8) & 0xFF)
+
+/* transfer descriptor macros */
+#define MHI_TRE_DATA_PTR(ptr) (ptr)
+#define MHI_TRE_DATA_DWORD0(len) (len & MHI_MAX_MTU)
+#define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) ((2 << 16) | (bei << 10) \
+ | (ieot << 9) | (ieob << 8) | chain)
+
+/* rsc transfer descriptor macros */
+#define MHI_RSCTRE_DATA_PTR(ptr, len) (((u64)len << 48) | ptr)
+#define MHI_RSCTRE_DATA_DWORD0(cookie) (cookie)
+#define MHI_RSCTRE_DATA_DWORD1 (MHI_PKT_TYPE_COALESCING << 16)
+
+enum MHI_CMD {
+ MHI_CMD_RESET_CHAN,
+ MHI_CMD_START_CHAN,
+ MHI_CMD_TIMSYNC_CFG,
+};
+
+enum MHI_PKT_TYPE {
+ MHI_PKT_TYPE_INVALID = 0x0,
+ MHI_PKT_TYPE_NOOP_CMD = 0x1,
+ MHI_PKT_TYPE_TRANSFER = 0x2,
+ MHI_PKT_TYPE_COALESCING = 0x8,
+ MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10,
+ MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11,
+ MHI_PKT_TYPE_START_CHAN_CMD = 0x12,
+ MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20,
+ MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21,
+ MHI_PKT_TYPE_TX_EVENT = 0x22,
+ MHI_PKT_TYPE_RSC_TX_EVENT = 0x28,
+ MHI_PKT_TYPE_EE_EVENT = 0x40,
+ MHI_PKT_TYPE_TSYNC_EVENT = 0x48,
+ MHI_PKT_TYPE_BW_REQ_EVENT = 0x50,
+ MHI_PKT_TYPE_STALE_EVENT, /* internal event */
+};
+
+/* MHI transfer completion events */
+enum MHI_EV_CCS {
+ MHI_EV_CC_INVALID = 0x0,
+ MHI_EV_CC_SUCCESS = 0x1,
+ MHI_EV_CC_EOT = 0x2,
+ MHI_EV_CC_OVERFLOW = 0x3,
+ MHI_EV_CC_EOB = 0x4,
+ MHI_EV_CC_OOB = 0x5,
+ MHI_EV_CC_DB_MODE = 0x6,
+ MHI_EV_CC_UNDEFINED_ERR = 0x10,
+ MHI_EV_CC_BAD_TRE = 0x11,
+};
+
+enum MHI_CH_STATE {
+ MHI_CH_STATE_DISABLED = 0x0,
+ MHI_CH_STATE_ENABLED = 0x1,
+ MHI_CH_STATE_RUNNING = 0x2,
+ MHI_CH_STATE_SUSPENDED = 0x3,
+ MHI_CH_STATE_STOP = 0x4,
+ MHI_CH_STATE_ERROR = 0x5,
+};
+
+enum MHI_BRSTMODE {
+ MHI_BRSTMODE_DISABLE = 0x2,
+ MHI_BRSTMODE_ENABLE = 0x3,
+};
+
+#define MHI_INVALID_BRSTMODE(mode) (mode != MHI_BRSTMODE_DISABLE && \
+ mode != MHI_BRSTMODE_ENABLE)
+
+extern const char * const mhi_ee_str[MHI_EE_MAX];
+#define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \
+ "INVALID_EE" : mhi_ee_str[ee])
+
+#define MHI_IN_PBL(ee) (ee == MHI_EE_PBL || ee == MHI_EE_PTHRU || \
+ ee == MHI_EE_EDL)
+
+#define MHI_IN_MISSION_MODE(ee) (ee == MHI_EE_AMSS || ee == MHI_EE_WFW)
+
+enum MHI_ST_TRANSITION {
+ MHI_ST_TRANSITION_PBL,
+ MHI_ST_TRANSITION_READY,
+ MHI_ST_TRANSITION_SBL,
+ MHI_ST_TRANSITION_MISSION_MODE,
+ MHI_ST_TRANSITION_MAX,
+};
+
+extern const char * const mhi_state_tran_str[MHI_ST_TRANSITION_MAX];
+#define TO_MHI_STATE_TRANS_STR(state) (((state) >= MHI_ST_TRANSITION_MAX) ? \
+ "INVALID_STATE" : mhi_state_tran_str[state])
+
+extern const char * const mhi_state_str[MHI_STATE_MAX];
+#define TO_MHI_STATE_STR(state) ((state >= MHI_STATE_MAX || \
+ !mhi_state_str[state]) ? \
+ "INVALID_STATE" : mhi_state_str[state])
+
+enum {
+ MHI_PM_BIT_DISABLE,
+ MHI_PM_BIT_POR,
+ MHI_PM_BIT_M0,
+ MHI_PM_BIT_M2,
+ MHI_PM_BIT_M3_ENTER,
+ MHI_PM_BIT_M3,
+ MHI_PM_BIT_M3_EXIT,
+ MHI_PM_BIT_FW_DL_ERR,
+ MHI_PM_BIT_SYS_ERR_DETECT,
+ MHI_PM_BIT_SYS_ERR_PROCESS,
+ MHI_PM_BIT_SHUTDOWN_PROCESS,
+ MHI_PM_BIT_LD_ERR_FATAL_DETECT,
+ MHI_PM_BIT_MAX
+};
+
+/* internal power states */
+enum MHI_PM_STATE {
+ MHI_PM_DISABLE = BIT(MHI_PM_BIT_DISABLE), /* MHI is not enabled */
+ MHI_PM_POR = BIT(MHI_PM_BIT_POR), /* reset state */
+ MHI_PM_M0 = BIT(MHI_PM_BIT_M0),
+ MHI_PM_M2 = BIT(MHI_PM_BIT_M2),
+ MHI_PM_M3_ENTER = BIT(MHI_PM_BIT_M3_ENTER),
+ MHI_PM_M3 = BIT(MHI_PM_BIT_M3),
+ MHI_PM_M3_EXIT = BIT(MHI_PM_BIT_M3_EXIT),
+ /* firmware download failure state */
+ MHI_PM_FW_DL_ERR = BIT(MHI_PM_BIT_FW_DL_ERR),
+ MHI_PM_SYS_ERR_DETECT = BIT(MHI_PM_BIT_SYS_ERR_DETECT),
+ MHI_PM_SYS_ERR_PROCESS = BIT(MHI_PM_BIT_SYS_ERR_PROCESS),
+ MHI_PM_SHUTDOWN_PROCESS = BIT(MHI_PM_BIT_SHUTDOWN_PROCESS),
+ /* link not accessible */
+ MHI_PM_LD_ERR_FATAL_DETECT = BIT(MHI_PM_BIT_LD_ERR_FATAL_DETECT),
+};
+
+#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \
+ MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \
+ MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \
+ MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR)))
+#define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR)
+#define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT)
+#define MHI_DB_ACCESS_VALID(mhi_cntrl) (mhi_cntrl->pm_state & \
+ mhi_cntrl->db_access)
+#define MHI_WAKE_DB_CLEAR_VALID(pm_state) (pm_state & (MHI_PM_M0 | \
+ MHI_PM_M2 | MHI_PM_M3_EXIT))
+#define MHI_WAKE_DB_SET_VALID(pm_state) (pm_state & MHI_PM_M2)
+#define MHI_WAKE_DB_FORCE_SET_VALID(pm_state) MHI_WAKE_DB_CLEAR_VALID(pm_state)
+#define MHI_EVENT_ACCESS_INVALID(pm_state) (pm_state == MHI_PM_DISABLE || \
+ MHI_PM_IN_ERROR_STATE(pm_state))
+#define MHI_PM_IN_SUSPEND_STATE(pm_state) (pm_state & \
+ (MHI_PM_M3_ENTER | MHI_PM_M3))
+
+/* accepted buffer type for the channel */
+enum MHI_XFER_TYPE {
+ MHI_XFER_BUFFER,
+ MHI_XFER_SKB,
+ MHI_XFER_SCLIST,
+ MHI_XFER_NOP, /* CPU offload channel, host does not accept transfer */
+ MHI_XFER_DMA, /* receive dma address, already mapped by client */
+ MHI_XFER_RSC_DMA, /* RSC type, accept premapped buffer */
+};
+
+#define NR_OF_CMD_RINGS (1)
+#define CMD_EL_PER_RING (128)
+#define PRIMARY_CMD_RING (0)
+#define MHI_BW_SCALE_CHAN_DB (126)
+#define MHI_DEV_WAKE_DB (127)
+#define MHI_MAX_MTU (0xffff)
+
+#define MHI_BW_SCALE_SETUP(er_index) (((MHI_BW_SCALE_CHAN_DB << \
+ BW_SCALE_CFG_CHAN_DB_ID_SHIFT) & BW_SCALE_CFG_CHAN_DB_ID_MASK) | \
+ ((1 << BW_SCALE_CFG_ENABLED_SHIFT) & BW_SCALE_CFG_ENABLED_MASK) | \
+ (((er_index) << BW_SCALE_CFG_ER_ID_SHIFT) & BW_SCALE_CFG_ER_ID_MASK))
+
+#define MHI_BW_SCALE_RESULT(status, seq) ((status & 0xF) << 8 | (seq & 0xFF))
+#define MHI_BW_SCALE_NACK 0xF
+
+enum MHI_ER_TYPE {
+ MHI_ER_TYPE_INVALID = 0x0,
+ MHI_ER_TYPE_VALID = 0x1,
+};
+
+enum mhi_er_priority {
+ MHI_ER_PRIORITY_HIGH,
+ MHI_ER_PRIORITY_MEDIUM,
+ MHI_ER_PRIORITY_LOW,
+};
+
+#define IS_MHI_ER_PRIORITY_LOW(ev) (ev->priority >= MHI_ER_PRIORITY_LOW)
+#define IS_MHI_ER_PRIORITY_HIGH(ev) (ev->priority == MHI_ER_PRIORITY_HIGH)
+
+enum mhi_er_data_type {
+ MHI_ER_DATA_ELEMENT_TYPE,
+ MHI_ER_CTRL_ELEMENT_TYPE,
+ MHI_ER_TSYNC_ELEMENT_TYPE,
+ MHI_ER_BW_SCALE_ELEMENT_TYPE,
+ MHI_ER_DATA_TYPE_MAX = MHI_ER_BW_SCALE_ELEMENT_TYPE,
+};
+
+enum mhi_ch_ee_mask {
+ MHI_CH_EE_PBL = BIT(MHI_EE_PBL),
+ MHI_CH_EE_SBL = BIT(MHI_EE_SBL),
+ MHI_CH_EE_AMSS = BIT(MHI_EE_AMSS),
+ MHI_CH_EE_RDDM = BIT(MHI_EE_RDDM),
+ MHI_CH_EE_PTHRU = BIT(MHI_EE_PTHRU),
+ MHI_CH_EE_WFW = BIT(MHI_EE_WFW),
+ MHI_CH_EE_EDL = BIT(MHI_EE_EDL),
+};
+
+enum mhi_ch_type {
+ MHI_CH_TYPE_INVALID = 0,
+ MHI_CH_TYPE_OUTBOUND = DMA_TO_DEVICE,
+ MHI_CH_TYPE_INBOUND = DMA_FROM_DEVICE,
+ MHI_CH_TYPE_INBOUND_COALESCED = 3,
+};
+
+struct db_cfg {
+ bool reset_req;
+ bool db_mode;
+ u32 pollcfg;
+ enum MHI_BRSTMODE brstmode;
+ dma_addr_t db_val;
+ void (*process_db)(struct mhi_controller *mhi_cntrl,
+ struct db_cfg *db_cfg, void __iomem *io_addr,
+ dma_addr_t db_val);
+};
+
+struct mhi_pm_transitions {
+ enum MHI_PM_STATE from_state;
+ u32 to_states;
+};
+
+struct state_transition {
+ struct list_head node;
+ enum MHI_ST_TRANSITION state;
+};
+
+struct mhi_ctxt {
+ struct mhi_event_ctxt *er_ctxt;
+ struct mhi_chan_ctxt *chan_ctxt;
+ struct mhi_cmd_ctxt *cmd_ctxt;
+ dma_addr_t er_ctxt_addr;
+ dma_addr_t chan_ctxt_addr;
+ dma_addr_t cmd_ctxt_addr;
+};
+
+struct mhi_ring {
+ dma_addr_t dma_handle;
+ dma_addr_t iommu_base;
+ u64 *ctxt_wp; /* point to ctxt wp */
+ void *pre_aligned;
+ void *base;
+ void *rp;
+ void *wp;
+ size_t el_size;
+ size_t len;
+ size_t elements;
+ size_t alloc_size;
+ void __iomem *db_addr;
+};
+
+struct mhi_cmd {
+ struct mhi_ring ring;
+ spinlock_t lock;
+};
+
+struct mhi_buf_info {
+ dma_addr_t p_addr;
+ void *v_addr;
+ void *bb_addr;
+ void *wp;
+ size_t len;
+ void *cb_buf;
+ bool used; /* indicate element is free to use */
+ bool pre_mapped; /* already pre-mapped by client */
+ enum dma_data_direction dir;
+};
+
+struct mhi_event {
+ struct list_head node;
+ u32 er_index;
+ u32 intmod;
+ u32 msi;
+ int chan; /* this event ring is dedicated to a channel */
+ enum mhi_er_priority priority;
+ enum mhi_er_data_type data_type;
+ struct mhi_ring ring;
+ struct db_cfg db_cfg;
+ bool hw_ring;
+ bool cl_manage;
+ bool offload_ev; /* managed by a device driver */
+ bool request_irq; /* has dedicated interrupt handler */
+ spinlock_t lock;
+ struct mhi_chan *mhi_chan; /* dedicated to channel */
+ struct tasklet_struct task;
+ int (*process_event)(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event,
+ u32 event_quota);
+ struct mhi_controller *mhi_cntrl;
+};
+
+struct mhi_chan {
+ u32 chan;
+ const char *name;
+ /*
+ * important, when consuming increment tre_ring first, when releasing
+ * decrement buf_ring first. If tre_ring has space, buf_ring
+ * guranteed to have space so we do not need to check both rings.
+ */
+ struct mhi_ring buf_ring;
+ struct mhi_ring tre_ring;
+ u32 er_index;
+ u32 intmod;
+ enum mhi_ch_type type;
+ enum dma_data_direction dir;
+ struct db_cfg db_cfg;
+ u32 ee_mask;
+ enum MHI_XFER_TYPE xfer_type;
+ enum MHI_CH_STATE ch_state;
+ enum MHI_EV_CCS ccs;
+ bool lpm_notify;
+ bool configured;
+ bool offload_ch;
+ bool pre_alloc;
+ bool auto_start;
+ bool wake_capable; /* channel should wake up system */
+ /* functions that generate the transfer ring elements */
+ int (*gen_tre)(struct mhi_controller *, struct mhi_chan *, void *,
+ void *, size_t, enum MHI_FLAGS);
+ int (*queue_xfer)(struct mhi_device *, struct mhi_chan *, void *,
+ size_t, enum MHI_FLAGS);
+ /* xfer call back */
+ struct mhi_device *mhi_dev;
+ void (*xfer_cb)(struct mhi_device *, struct mhi_result *);
+ struct mutex mutex;
+ struct completion completion;
+ rwlock_t lock;
+ struct list_head node;
+};
+
+struct tsync_node {
+ struct list_head node;
+ u32 sequence;
+ u64 local_time;
+ u64 remote_time;
+ struct mhi_device *mhi_dev;
+ void (*cb_func)(struct mhi_device *mhi_dev, u32 sequence,
+ u64 local_time, u64 remote_time);
+};
+
+struct mhi_timesync {
+ u32 er_index;
+ void __iomem *db;
+ void __iomem *time_reg;
+ enum MHI_EV_CCS ccs;
+ struct completion completion;
+ spinlock_t lock; /* list protection */
+ struct mutex lpm_mutex; /* lpm protection */
+ struct list_head head;
+};
+
+struct mhi_bus {
+ struct list_head controller_list;
+ struct mutex lock;
+};
+
+/* default MHI timeout */
+#define MHI_TIMEOUT_MS (1000)
+extern struct mhi_bus mhi_bus;
+
+/* debug fs related functions */
+int mhi_debugfs_mhi_chan_show(struct seq_file *m, void *d);
+int mhi_debugfs_mhi_event_show(struct seq_file *m, void *d);
+int mhi_debugfs_mhi_states_show(struct seq_file *m, void *d);
+int mhi_debugfs_trigger_reset(void *data, u64 val);
+
+void mhi_deinit_debugfs(struct mhi_controller *mhi_cntrl);
+void mhi_init_debugfs(struct mhi_controller *mhi_cntrl);
+
+/* power management apis */
+enum MHI_PM_STATE __must_check mhi_tryset_pm_state(
+ struct mhi_controller *mhi_cntrl,
+ enum MHI_PM_STATE state);
+const char *to_mhi_pm_state_str(enum MHI_PM_STATE state);
+void mhi_reset_chan(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan);
+enum mhi_ee mhi_get_exec_env(struct mhi_controller *mhi_cntrl);
+int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
+ enum MHI_ST_TRANSITION state);
+void mhi_pm_st_worker(struct work_struct *work);
+void mhi_fw_load_worker(struct work_struct *work);
+void mhi_pm_sys_err_worker(struct work_struct *work);
+void mhi_low_priority_worker(struct work_struct *work);
+int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl);
+void mhi_ctrl_ev_task(unsigned long data);
+int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl);
+void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl);
+int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl);
+void mhi_notify(struct mhi_device *mhi_dev, enum MHI_CB cb_reason);
+int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event, u32 event_quota);
+int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event, u32 event_quota);
+int mhi_process_tsync_event_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event, u32 event_quota);
+int mhi_process_bw_scale_ev_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event, u32 event_quota);
+int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
+ enum MHI_CMD cmd);
+int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl);
+
+/* queue transfer buffer */
+int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
+ void *buf, void *cb, size_t buf_len, enum MHI_FLAGS flags);
+int mhi_queue_buf(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan,
+ void *buf, size_t len, enum MHI_FLAGS mflags);
+int mhi_queue_skb(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan,
+ void *buf, size_t len, enum MHI_FLAGS mflags);
+int mhi_queue_sclist(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan,
+ void *buf, size_t len, enum MHI_FLAGS mflags);
+int mhi_queue_nop(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan,
+ void *buf, size_t len, enum MHI_FLAGS mflags);
+int mhi_queue_dma(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan,
+ void *buf, size_t len, enum MHI_FLAGS mflags);
+
+/* register access methods */
+void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, struct db_cfg *db_cfg,
+ void __iomem *db_addr, dma_addr_t wp);
+void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
+ struct db_cfg *db_mode, void __iomem *db_addr,
+ dma_addr_t wp);
+int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset, u32 *out);
+int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset, u32 mask,
+ u32 shift, u32 *out);
+void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
+ u32 offset, u32 val);
+void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base,
+ u32 offset, u32 mask, u32 shift, u32 val);
+void mhi_ring_er_db(struct mhi_event *mhi_event);
+void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
+ dma_addr_t wp);
+void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd);
+void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan);
+int mhi_get_capability_offset(struct mhi_controller *mhi_cntrl, u32 capability,
+ u32 *offset);
+void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr);
+int mhi_init_timesync(struct mhi_controller *mhi_cntrl);
+int mhi_create_timesync_sysfs(struct mhi_controller *mhi_cntrl);
+void mhi_destroy_timesync(struct mhi_controller *mhi_cntrl);
+int mhi_create_vote_sysfs(struct mhi_controller *mhi_cntrl);
+void mhi_destroy_vote_sysfs(struct mhi_controller *mhi_cntrl);
+int mhi_early_notify_device(struct device *dev, void *data);
+
+/* timesync log support */
+static inline void mhi_timesync_log(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_timesync *mhi_tsync = mhi_cntrl->mhi_tsync;
+
+ if (mhi_tsync && mhi_cntrl->tsync_log)
+ mhi_cntrl->tsync_log(mhi_cntrl,
+ readq_no_log(mhi_tsync->time_reg));
+}
+
+/* memory allocation methods */
+static inline void *mhi_alloc_coherent(struct mhi_controller *mhi_cntrl,
+ size_t size,
+ dma_addr_t *dma_handle,
+ gfp_t gfp)
+{
+ void *buf = dma_zalloc_coherent(mhi_cntrl->dev, size, dma_handle, gfp);
+
+ if (buf)
+ atomic_add(size, &mhi_cntrl->alloc_size);
+
+ return buf;
+}
+static inline void mhi_free_coherent(struct mhi_controller *mhi_cntrl,
+ size_t size,
+ void *vaddr,
+ dma_addr_t dma_handle)
+{
+ atomic_sub(size, &mhi_cntrl->alloc_size);
+ dma_free_coherent(mhi_cntrl->dev, size, vaddr, dma_handle);
+}
+struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl);
+static inline void mhi_dealloc_device(struct mhi_controller *mhi_cntrl,
+ struct mhi_device *mhi_dev)
+{
+ kfree(mhi_dev);
+}
+int mhi_destroy_device(struct device *dev, void *data);
+void mhi_create_devices(struct mhi_controller *mhi_cntrl);
+int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
+ struct image_info **image_info, size_t alloc_size);
+void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
+ struct image_info *image_info);
+
+int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info);
+int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info);
+void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info);
+void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info);
+
+/* initialization methods */
+int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan);
+void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan);
+int mhi_init_mmio(struct mhi_controller *mhi_cntrl);
+int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl);
+void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl);
+int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl);
+void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl);
+int mhi_dtr_init(void);
+void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
+ struct image_info *img_info);
+int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan);
+
+/* isr handlers */
+irqreturn_t mhi_msi_handlr(int irq_number, void *dev);
+irqreturn_t mhi_intvec_threaded_handlr(int irq_number, void *dev);
+irqreturn_t mhi_intvec_handlr(int irq_number, void *dev);
+void mhi_ev_task(unsigned long data);
+
+#ifdef CONFIG_MHI_DEBUG
+
+#define MHI_ASSERT(cond, msg) do { \
+ if (cond) \
+ panic(msg); \
+} while (0)
+
+#else
+
+#define MHI_ASSERT(cond, msg) do { \
+ if (cond) { \
+ MHI_ERR(msg); \
+ WARN_ON(cond); \
+ } \
+} while (0)
+
+#endif
+
+#endif /* _MHI_INT_H */
diff --git a/drivers/bus/mhi/core/mhi_main.c b/drivers/bus/mhi/core/mhi_main.c
new file mode 100644
index 0000000..7404b8e
--- /dev/null
+++ b/drivers/bus/mhi/core/mhi_main.c
@@ -0,0 +1,2388 @@
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/mhi.h>
+#include <linux/types.h>
+#include "mhi_internal.h"
+
+static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan);
+
+int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
+ void __iomem *base,
+ u32 offset,
+ u32 *out)
+{
+ u32 tmp = readl_relaxed(base + offset);
+
+ /* unexpected value, query the link status */
+ if (PCI_INVALID_READ(tmp) &&
+ mhi_cntrl->link_status(mhi_cntrl, mhi_cntrl->priv_data))
+ return -EIO;
+
+ *out = tmp;
+
+ return 0;
+}
+
+int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
+ void __iomem *base,
+ u32 offset,
+ u32 mask,
+ u32 shift,
+ u32 *out)
+{
+ u32 tmp;
+ int ret;
+
+ ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
+ if (ret)
+ return ret;
+
+ *out = (tmp & mask) >> shift;
+
+ return 0;
+}
+
+int mhi_get_capability_offset(struct mhi_controller *mhi_cntrl,
+ u32 capability,
+ u32 *offset)
+{
+ u32 cur_cap, next_offset;
+ int ret;
+
+ /* get the 1st supported capability offset */
+ ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MISC_OFFSET,
+ MISC_CAP_MASK, MISC_CAP_SHIFT, offset);
+ if (ret)
+ return ret;
+ do {
+ ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, *offset,
+ CAP_CAPID_MASK, CAP_CAPID_SHIFT,
+ &cur_cap);
+ if (ret)
+ return ret;
+
+ if (cur_cap == capability)
+ return 0;
+
+ ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, *offset,
+ CAP_NEXT_CAP_MASK, CAP_NEXT_CAP_SHIFT,
+ &next_offset);
+ if (ret)
+ return ret;
+
+ *offset = next_offset;
+ if (*offset >= MHI_REG_SIZE)
+ return -ENXIO;
+ } while (next_offset);
+
+ return -ENXIO;
+}
+
+void mhi_write_reg(struct mhi_controller *mhi_cntrl,
+ void __iomem *base,
+ u32 offset,
+ u32 val)
+{
+ writel_relaxed(val, base + offset);
+}
+
+void mhi_write_reg_field(struct mhi_controller *mhi_cntrl,
+ void __iomem *base,
+ u32 offset,
+ u32 mask,
+ u32 shift,
+ u32 val)
+{
+ int ret;
+ u32 tmp;
+
+ ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
+ if (ret)
+ return;
+
+ tmp &= ~mask;
+ tmp |= (val << shift);
+ mhi_write_reg(mhi_cntrl, base, offset, tmp);
+}
+
+void mhi_write_db(struct mhi_controller *mhi_cntrl,
+ void __iomem *db_addr,
+ dma_addr_t wp)
+{
+ mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(wp));
+ mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(wp));
+}
+
+void mhi_db_brstmode(struct mhi_controller *mhi_cntrl,
+ struct db_cfg *db_cfg,
+ void __iomem *db_addr,
+ dma_addr_t wp)
+{
+ if (db_cfg->db_mode) {
+ db_cfg->db_val = wp;
+ mhi_write_db(mhi_cntrl, db_addr, wp);
+ db_cfg->db_mode = 0;
+ }
+}
+
+void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
+ struct db_cfg *db_cfg,
+ void __iomem *db_addr,
+ dma_addr_t wp)
+{
+ db_cfg->db_val = wp;
+ mhi_write_db(mhi_cntrl, db_addr, wp);
+}
+
+void mhi_ring_er_db(struct mhi_event *mhi_event)
+{
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg,
+ ring->db_addr, *ring->ctxt_wp);
+}
+
+void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)
+{
+ dma_addr_t db;
+ struct mhi_ring *ring = &mhi_cmd->ring;
+
+ db = ring->iommu_base + (ring->wp - ring->base);
+ *ring->ctxt_wp = db;
+ mhi_write_db(mhi_cntrl, ring->db_addr, db);
+}
+
+void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *ring = &mhi_chan->tre_ring;
+ dma_addr_t db;
+
+ db = ring->iommu_base + (ring->wp - ring->base);
+ *ring->ctxt_wp = db;
+ mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg, ring->db_addr,
+ db);
+}
+
+static enum mhi_ee mhi_translate_dev_ee(struct mhi_controller *mhi_cntrl,
+ u32 dev_ee)
+{
+ enum mhi_ee i;
+
+ for (i = MHI_EE_PBL; i < MHI_EE_MAX; i++)
+ if (mhi_cntrl->ee_table[i] == dev_ee)
+ return i;
+
+ return MHI_EE_NOT_SUPPORTED;
+}
+
+enum mhi_ee mhi_get_exec_env(struct mhi_controller *mhi_cntrl)
+{
+ u32 exec;
+ int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec);
+
+ return (ret) ? MHI_EE_MAX : mhi_translate_dev_ee(mhi_cntrl, exec);
+}
+
+enum mhi_dev_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
+{
+ u32 state;
+ int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
+ MHISTATUS_MHISTATE_MASK,
+ MHISTATUS_MHISTATE_SHIFT, &state);
+ return ret ? MHI_STATE_MAX : state;
+}
+
+int mhi_queue_sclist(struct mhi_device *mhi_dev,
+ struct mhi_chan *mhi_chan,
+ void *buf,
+ size_t len,
+ enum MHI_FLAGS mflags)
+{
+ return -EINVAL;
+}
+
+int mhi_queue_nop(struct mhi_device *mhi_dev,
+ struct mhi_chan *mhi_chan,
+ void *buf,
+ size_t len,
+ enum MHI_FLAGS mflags)
+{
+ return -EINVAL;
+}
+
+static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring)
+{
+ ring->wp += ring->el_size;
+ if (ring->wp >= (ring->base + ring->len))
+ ring->wp = ring->base;
+ /* smp update */
+ smp_wmb();
+}
+
+static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring)
+{
+ ring->rp += ring->el_size;
+ if (ring->rp >= (ring->base + ring->len))
+ ring->rp = ring->base;
+ /* smp update */
+ smp_wmb();
+}
+
+static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring)
+{
+ int nr_el;
+
+ if (ring->wp < ring->rp)
+ nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1;
+ else {
+ nr_el = (ring->rp - ring->base) / ring->el_size;
+ nr_el += ((ring->base + ring->len - ring->wp) /
+ ring->el_size) - 1;
+ }
+ return nr_el;
+}
+
+void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
+{
+ return (addr - ring->iommu_base) + ring->base;
+}
+
+dma_addr_t mhi_to_physical(struct mhi_ring *ring, void *addr)
+{
+ return (addr - ring->base) + ring->iommu_base;
+}
+
+static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring)
+{
+ dma_addr_t ctxt_wp;
+
+ /* update the WP */
+ ring->wp += ring->el_size;
+ ctxt_wp = *ring->ctxt_wp + ring->el_size;
+
+ if (ring->wp >= (ring->base + ring->len)) {
+ ring->wp = ring->base;
+ ctxt_wp = ring->iommu_base;
+ }
+
+ *ring->ctxt_wp = ctxt_wp;
+
+ /* update the RP */
+ ring->rp += ring->el_size;
+ if (ring->rp >= (ring->base + ring->len))
+ ring->rp = ring->base;
+
+ /* visible to other cores */
+ smp_wmb();
+}
+
+static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring)
+{
+ void *tmp = ring->wp + ring->el_size;
+
+ if (tmp >= (ring->base + ring->len))
+ tmp = ring->base;
+
+ return (tmp == ring->rp);
+}
+
+int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info)
+{
+ buf_info->p_addr = dma_map_single(mhi_cntrl->dev, buf_info->v_addr,
+ buf_info->len, buf_info->dir);
+ if (dma_mapping_error(mhi_cntrl->dev, buf_info->p_addr))
+ return -ENOMEM;
+
+ return 0;
+}
+
+int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info)
+{
+ void *buf = mhi_alloc_coherent(mhi_cntrl, buf_info->len,
+ &buf_info->p_addr, GFP_ATOMIC);
+
+ if (!buf)
+ return -ENOMEM;
+
+ if (buf_info->dir == DMA_TO_DEVICE)
+ memcpy(buf, buf_info->v_addr, buf_info->len);
+
+ buf_info->bb_addr = buf;
+
+ return 0;
+}
+
+void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info)
+{
+ dma_unmap_single(mhi_cntrl->dev, buf_info->p_addr, buf_info->len,
+ buf_info->dir);
+}
+
+void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info)
+{
+ if (buf_info->dir == DMA_FROM_DEVICE)
+ memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len);
+
+ mhi_free_coherent(mhi_cntrl, buf_info->len, buf_info->bb_addr,
+ buf_info->p_addr);
+}
+
+int mhi_queue_skb(struct mhi_device *mhi_dev,
+ struct mhi_chan *mhi_chan,
+ void *buf,
+ size_t len,
+ enum MHI_FLAGS mflags)
+{
+ struct sk_buff *skb = buf;
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
+ struct mhi_ring *buf_ring = &mhi_chan->buf_ring;
+ struct mhi_buf_info *buf_info;
+ struct mhi_tre *mhi_tre;
+ int ret;
+
+ if (mhi_is_ring_full(mhi_cntrl, tre_ring))
+ return -ENOMEM;
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
+ MHI_VERB("MHI is not in activate state, pm_state:%s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return -EIO;
+ }
+
+ /* we're in M3 or transitioning to M3 */
+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
+ mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
+ mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
+ }
+
+ /* toggle wake to exit out of M2 */
+ mhi_cntrl->wake_toggle(mhi_cntrl);
+
+ /* generate the tre */
+ buf_info = buf_ring->wp;
+ buf_info->v_addr = skb->data;
+ buf_info->cb_buf = skb;
+ buf_info->wp = tre_ring->wp;
+ buf_info->dir = mhi_chan->dir;
+ buf_info->len = len;
+ ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
+ if (ret)
+ goto map_error;
+
+ mhi_tre = tre_ring->wp;
+
+ mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
+ mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len);
+ mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0);
+
+ MHI_VERB("chan:%d WP:0x%llx TRE:0x%llx 0x%08x 0x%08x\n", mhi_chan->chan,
+ (u64)mhi_to_physical(tre_ring, mhi_tre), mhi_tre->ptr,
+ mhi_tre->dword[0], mhi_tre->dword[1]);
+
+ /* increment WP */
+ mhi_add_ring_element(mhi_cntrl, tre_ring);
+ mhi_add_ring_element(mhi_cntrl, buf_ring);
+
+ if (mhi_chan->dir == DMA_TO_DEVICE)
+ atomic_inc(&mhi_cntrl->pending_pkts);
+
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
+ read_lock_bh(&mhi_chan->lock);
+ mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+ read_unlock_bh(&mhi_chan->lock);
+ }
+
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return 0;
+
+map_error:
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return ret;
+}
+
+int mhi_queue_dma(struct mhi_device *mhi_dev,
+ struct mhi_chan *mhi_chan,
+ void *buf,
+ size_t len,
+ enum MHI_FLAGS mflags)
+{
+ struct mhi_buf *mhi_buf = buf;
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
+ struct mhi_ring *buf_ring = &mhi_chan->buf_ring;
+ struct mhi_buf_info *buf_info;
+ struct mhi_tre *mhi_tre;
+
+ if (mhi_is_ring_full(mhi_cntrl, tre_ring))
+ return -ENOMEM;
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
+ MHI_VERB("MHI is not in activate state, pm_state:%s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return -EIO;
+ }
+
+ /* we're in M3 or transitioning to M3 */
+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
+ mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
+ mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
+ }
+
+ /* toggle wake to exit out of M2 */
+ mhi_cntrl->wake_toggle(mhi_cntrl);
+
+ /* generate the tre */
+ buf_info = buf_ring->wp;
+ MHI_ASSERT(buf_info->used, "TRE Not Freed\n");
+ buf_info->p_addr = mhi_buf->dma_addr;
+ buf_info->pre_mapped = true;
+ buf_info->cb_buf = mhi_buf;
+ buf_info->wp = tre_ring->wp;
+ buf_info->dir = mhi_chan->dir;
+ buf_info->len = len;
+
+ mhi_tre = tre_ring->wp;
+
+ if (mhi_chan->xfer_type == MHI_XFER_RSC_DMA) {
+ buf_info->used = true;
+ mhi_tre->ptr =
+ MHI_RSCTRE_DATA_PTR(buf_info->p_addr, buf_info->len);
+ mhi_tre->dword[0] =
+ MHI_RSCTRE_DATA_DWORD0(buf_ring->wp - buf_ring->base);
+ mhi_tre->dword[1] = MHI_RSCTRE_DATA_DWORD1;
+ } else {
+ mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
+ mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len);
+ mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0);
+ }
+
+ MHI_VERB("chan:%d WP:0x%llx TRE:0x%llx 0x%08x 0x%08x\n", mhi_chan->chan,
+ (u64)mhi_to_physical(tre_ring, mhi_tre), mhi_tre->ptr,
+ mhi_tre->dword[0], mhi_tre->dword[1]);
+
+ /* increment WP */
+ mhi_add_ring_element(mhi_cntrl, tre_ring);
+ mhi_add_ring_element(mhi_cntrl, buf_ring);
+
+ if (mhi_chan->dir == DMA_TO_DEVICE)
+ atomic_inc(&mhi_cntrl->pending_pkts);
+
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
+ read_lock_bh(&mhi_chan->lock);
+ mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+ read_unlock_bh(&mhi_chan->lock);
+ }
+
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return 0;
+}
+
+int mhi_gen_tre(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan,
+ void *buf,
+ void *cb,
+ size_t buf_len,
+ enum MHI_FLAGS flags)
+{
+ struct mhi_ring *buf_ring, *tre_ring;
+ struct mhi_tre *mhi_tre;
+ struct mhi_buf_info *buf_info;
+ int eot, eob, chain, bei;
+ int ret;
+
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+
+ buf_info = buf_ring->wp;
+ buf_info->v_addr = buf;
+ buf_info->cb_buf = cb;
+ buf_info->wp = tre_ring->wp;
+ buf_info->dir = mhi_chan->dir;
+ buf_info->len = buf_len;
+
+ ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
+ if (ret)
+ return ret;
+
+ eob = !!(flags & MHI_EOB);
+ eot = !!(flags & MHI_EOT);
+ chain = !!(flags & MHI_CHAIN);
+ bei = !!(mhi_chan->intmod);
+
+ mhi_tre = tre_ring->wp;
+ mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
+ mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_len);
+ mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain);
+
+ MHI_VERB("chan:%d WP:0x%llx TRE:0x%llx 0x%08x 0x%08x\n", mhi_chan->chan,
+ (u64)mhi_to_physical(tre_ring, mhi_tre), mhi_tre->ptr,
+ mhi_tre->dword[0], mhi_tre->dword[1]);
+
+ /* increment WP */
+ mhi_add_ring_element(mhi_cntrl, tre_ring);
+ mhi_add_ring_element(mhi_cntrl, buf_ring);
+
+ return 0;
+}
+
+int mhi_queue_buf(struct mhi_device *mhi_dev,
+ struct mhi_chan *mhi_chan,
+ void *buf,
+ size_t len,
+ enum MHI_FLAGS mflags)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_ring *tre_ring;
+ unsigned long flags;
+ int ret;
+
+ /*
+ * this check here only as a guard, it's always
+ * possible mhi can enter error while executing rest of function,
+ * which is not fatal so we do not need to hold pm_lock
+ */
+ if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
+ MHI_VERB("MHI is not in active state, pm_state:%s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+
+ return -EIO;
+ }
+
+ tre_ring = &mhi_chan->tre_ring;
+ if (mhi_is_ring_full(mhi_cntrl, tre_ring))
+ return -ENOMEM;
+
+ ret = mhi_chan->gen_tre(mhi_cntrl, mhi_chan, buf, buf, len, mflags);
+ if (unlikely(ret))
+ return ret;
+
+ read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
+
+ /* we're in M3 or transitioning to M3 */
+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
+ mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
+ mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
+ }
+
+ /* toggle wake to exit out of M2 */
+ mhi_cntrl->wake_toggle(mhi_cntrl);
+
+ if (mhi_chan->dir == DMA_TO_DEVICE)
+ atomic_inc(&mhi_cntrl->pending_pkts);
+
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
+ unsigned long flags;
+
+ read_lock_irqsave(&mhi_chan->lock, flags);
+ mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+ read_unlock_irqrestore(&mhi_chan->lock, flags);
+ }
+
+ read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
+
+ return 0;
+}
+
+/* destroy specific device */
+int mhi_destroy_device(struct device *dev, void *data)
+{
+ struct mhi_device *mhi_dev;
+ struct mhi_controller *mhi_cntrl;
+
+ if (dev->bus != &mhi_bus_type)
+ return 0;
+
+ mhi_dev = to_mhi_device(dev);
+ mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ /* only destroying virtual devices thats attached to bus */
+ if (mhi_dev->dev_type == MHI_CONTROLLER_TYPE)
+ return 0;
+
+ MHI_LOG("destroy device for chan:%s\n", mhi_dev->chan_name);
+
+ /* notify the client and remove the device from mhi bus */
+ device_del(dev);
+ put_device(dev);
+
+ return 0;
+}
+
+int mhi_early_notify_device(struct device *dev, void *data)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ /* skip early notification */
+ if (!mhi_dev->early_notif)
+ return 0;
+
+ MHI_LOG("Early notification for dev:%s\n", mhi_dev->chan_name);
+
+ mhi_notify(mhi_dev, MHI_CB_FATAL_ERROR);
+
+ return 0;
+}
+
+void mhi_notify(struct mhi_device *mhi_dev, enum MHI_CB cb_reason)
+{
+ struct mhi_driver *mhi_drv;
+
+ if (!mhi_dev->dev.driver)
+ return;
+
+ mhi_drv = to_mhi_driver(mhi_dev->dev.driver);
+
+ if (mhi_drv->status_cb)
+ mhi_drv->status_cb(mhi_dev, cb_reason);
+}
+
+static void mhi_assign_of_node(struct mhi_controller *mhi_cntrl,
+ struct mhi_device *mhi_dev)
+{
+ struct device_node *controller, *node;
+ const char *dt_name;
+ int ret;
+
+ controller = of_find_node_by_name(mhi_cntrl->of_node, "mhi_devices");
+ if (!controller)
+ return;
+
+ for_each_available_child_of_node(controller, node) {
+ ret = of_property_read_string(node, "mhi,chan", &dt_name);
+ if (ret)
+ continue;
+ if (!strcmp(mhi_dev->chan_name, dt_name)) {
+ mhi_dev->dev.of_node = node;
+ break;
+ }
+ }
+}
+
+static ssize_t time_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ u64 t_host, t_device;
+ int ret;
+
+ ret = mhi_get_remote_time_sync(mhi_dev, &t_host, &t_device);
+ if (ret) {
+ MHI_ERR("Failed to obtain time, ret:%d\n", ret);
+ return ret;
+ }
+
+ return scnprintf(buf, PAGE_SIZE, "local: %llu remote: %llu (ticks)\n",
+ t_host, t_device);
+}
+static DEVICE_ATTR_RO(time);
+
+static ssize_t time_us_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ u64 t_host, t_device;
+ int ret;
+
+ ret = mhi_get_remote_time_sync(mhi_dev, &t_host, &t_device);
+ if (ret) {
+ MHI_ERR("Failed to obtain time, ret:%d\n", ret);
+ return ret;
+ }
+
+ return scnprintf(buf, PAGE_SIZE, "local: %llu remote: %llu (us)\n",
+ LOCAL_TICKS_TO_US(t_host),
+ REMOTE_TICKS_TO_US(t_device));
+}
+static DEVICE_ATTR_RO(time_us);
+
+static struct attribute *mhi_tsync_attrs[] = {
+ &dev_attr_time.attr,
+ &dev_attr_time_us.attr,
+ NULL,
+};
+
+static const struct attribute_group mhi_tsync_group = {
+ .attrs = mhi_tsync_attrs,
+};
+
+void mhi_destroy_timesync(struct mhi_controller *mhi_cntrl)
+{
+ if (mhi_cntrl->mhi_tsync) {
+ sysfs_remove_group(&mhi_cntrl->mhi_dev->dev.kobj,
+ &mhi_tsync_group);
+ kfree(mhi_cntrl->mhi_tsync);
+ mhi_cntrl->mhi_tsync = NULL;
+ }
+}
+
+int mhi_create_timesync_sysfs(struct mhi_controller *mhi_cntrl)
+{
+ return sysfs_create_group(&mhi_cntrl->mhi_dev->dev.kobj,
+ &mhi_tsync_group);
+}
+
+static void mhi_create_time_sync_dev(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_device *mhi_dev;
+ int ret;
+
+ if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee))
+ return;
+
+ mhi_dev = mhi_alloc_device(mhi_cntrl);
+ if (!mhi_dev)
+ return;
+
+ mhi_dev->dev_type = MHI_TIMESYNC_TYPE;
+ mhi_dev->chan_name = "TIME_SYNC";
+ dev_set_name(&mhi_dev->dev, "%04x_%02u.%02u.%02u_%s", mhi_dev->dev_id,
+ mhi_dev->domain, mhi_dev->bus, mhi_dev->slot,
+ mhi_dev->chan_name);
+
+ /* add if there is a matching DT node */
+ mhi_assign_of_node(mhi_cntrl, mhi_dev);
+
+ ret = device_add(&mhi_dev->dev);
+ if (ret) {
+ MHI_ERR("Failed to register dev for chan:%s\n",
+ mhi_dev->chan_name);
+ mhi_dealloc_device(mhi_cntrl, mhi_dev);
+ return;
+ }
+
+ mhi_cntrl->tsync_dev = mhi_dev;
+}
+
+/* bind mhi channels into mhi devices */
+void mhi_create_devices(struct mhi_controller *mhi_cntrl)
+{
+ int i;
+ struct mhi_chan *mhi_chan;
+ struct mhi_device *mhi_dev;
+ int ret;
+
+ /*
+ * we need to create time sync device before creating other
+ * devices, because client may try to capture time during
+ * clint probe.
+ */
+ mhi_create_time_sync_dev(mhi_cntrl);
+
+ mhi_chan = mhi_cntrl->mhi_chan;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
+ if (!mhi_chan->configured || mhi_chan->mhi_dev ||
+ !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee)))
+ continue;
+ mhi_dev = mhi_alloc_device(mhi_cntrl);
+ if (!mhi_dev)
+ return;
+
+ mhi_dev->dev_type = MHI_XFER_TYPE;
+ switch (mhi_chan->dir) {
+ case DMA_TO_DEVICE:
+ mhi_dev->ul_chan = mhi_chan;
+ mhi_dev->ul_chan_id = mhi_chan->chan;
+ mhi_dev->ul_xfer = mhi_chan->queue_xfer;
+ mhi_dev->ul_event_id = mhi_chan->er_index;
+ break;
+ case DMA_NONE:
+ case DMA_BIDIRECTIONAL:
+ mhi_dev->ul_chan_id = mhi_chan->chan;
+ mhi_dev->ul_event_id = mhi_chan->er_index;
+ case DMA_FROM_DEVICE:
+ /* we use dl_chan for offload channels */
+ mhi_dev->dl_chan = mhi_chan;
+ mhi_dev->dl_chan_id = mhi_chan->chan;
+ mhi_dev->dl_xfer = mhi_chan->queue_xfer;
+ mhi_dev->dl_event_id = mhi_chan->er_index;
+ }
+
+ mhi_chan->mhi_dev = mhi_dev;
+
+ /* check next channel if it matches */
+ if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) {
+ if (!strcmp(mhi_chan[1].name, mhi_chan->name)) {
+ i++;
+ mhi_chan++;
+ if (mhi_chan->dir == DMA_TO_DEVICE) {
+ mhi_dev->ul_chan = mhi_chan;
+ mhi_dev->ul_chan_id = mhi_chan->chan;
+ mhi_dev->ul_xfer = mhi_chan->queue_xfer;
+ mhi_dev->ul_event_id =
+ mhi_chan->er_index;
+ } else {
+ mhi_dev->dl_chan = mhi_chan;
+ mhi_dev->dl_chan_id = mhi_chan->chan;
+ mhi_dev->dl_xfer = mhi_chan->queue_xfer;
+ mhi_dev->dl_event_id =
+ mhi_chan->er_index;
+ }
+ mhi_chan->mhi_dev = mhi_dev;
+ }
+ }
+
+ mhi_dev->chan_name = mhi_chan->name;
+ dev_set_name(&mhi_dev->dev, "%04x_%02u.%02u.%02u_%s",
+ mhi_dev->dev_id, mhi_dev->domain, mhi_dev->bus,
+ mhi_dev->slot, mhi_dev->chan_name);
+
+ /* add if there is a matching DT node */
+ mhi_assign_of_node(mhi_cntrl, mhi_dev);
+
+ /*
+ * if set, these device should get a early notification during
+ * early notification state
+ */
+ mhi_dev->early_notif =
+ of_property_read_bool(mhi_dev->dev.of_node,
+ "mhi,early-notify");
+ /* init wake source */
+ if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable)
+ device_init_wakeup(&mhi_dev->dev, true);
+
+ ret = device_add(&mhi_dev->dev);
+ if (ret) {
+ MHI_ERR("Failed to register dev for chan:%s\n",
+ mhi_dev->chan_name);
+ mhi_dealloc_device(mhi_cntrl, mhi_dev);
+ }
+ }
+}
+
+static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
+ struct mhi_tre *event,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *buf_ring, *tre_ring;
+ u32 ev_code;
+ struct mhi_result result;
+ unsigned long flags = 0;
+
+ ev_code = MHI_TRE_GET_EV_CODE(event);
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+
+ result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
+ -EOVERFLOW : 0;
+
+ /*
+ * if it's a DB Event then we need to grab the lock
+ * with preemption disable and as a write because we
+ * have to update db register and another thread could
+ * be doing same.
+ */
+ if (ev_code >= MHI_EV_CC_OOB)
+ write_lock_irqsave(&mhi_chan->lock, flags);
+ else
+ read_lock_bh(&mhi_chan->lock);
+
+ if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
+ goto end_process_tx_event;
+
+ switch (ev_code) {
+ case MHI_EV_CC_OVERFLOW:
+ case MHI_EV_CC_EOB:
+ case MHI_EV_CC_EOT:
+ {
+ dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event);
+ struct mhi_tre *local_rp, *ev_tre;
+ void *dev_rp;
+ struct mhi_buf_info *buf_info;
+ u16 xfer_len;
+
+ /* Get the TRB this event points to */
+ ev_tre = mhi_to_virtual(tre_ring, ptr);
+
+ /* device rp after servicing the TREs */
+ dev_rp = ev_tre + 1;
+ if (dev_rp >= (tre_ring->base + tre_ring->len))
+ dev_rp = tre_ring->base;
+
+ result.dir = mhi_chan->dir;
+
+ /* local rp */
+ local_rp = tre_ring->rp;
+ while (local_rp != dev_rp) {
+ buf_info = buf_ring->rp;
+ /* if it's last tre get len from the event */
+ if (local_rp == ev_tre)
+ xfer_len = MHI_TRE_GET_EV_LEN(event);
+ else
+ xfer_len = buf_info->len;
+
+ /* unmap if it's not premapped by client */
+ if (likely(!buf_info->pre_mapped))
+ mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
+
+ result.buf_addr = buf_info->cb_buf;
+ result.bytes_xferd = xfer_len;
+ mhi_del_ring_element(mhi_cntrl, buf_ring);
+ mhi_del_ring_element(mhi_cntrl, tre_ring);
+ local_rp = tre_ring->rp;
+
+ /* notify client */
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+
+ if (mhi_chan->dir == DMA_TO_DEVICE)
+ atomic_dec(&mhi_cntrl->pending_pkts);
+
+ /*
+ * recycle the buffer if buffer is pre-allocated,
+ * if there is error, not much we can do apart from
+ * dropping the packet
+ */
+ if (mhi_chan->pre_alloc) {
+ if (mhi_queue_buf(mhi_chan->mhi_dev, mhi_chan,
+ buf_info->cb_buf,
+ buf_info->len, MHI_EOT)) {
+ MHI_ERR(
+ "Error recycling buffer for chan:%d\n",
+ mhi_chan->chan);
+ kfree(buf_info->cb_buf);
+ }
+ }
+ };
+ break;
+ } /* CC_EOT */
+ case MHI_EV_CC_OOB:
+ case MHI_EV_CC_DB_MODE:
+ {
+ unsigned long flags;
+
+ MHI_VERB("DB_MODE/OOB Detected chan %d.\n", mhi_chan->chan);
+ mhi_chan->db_cfg.db_mode = 1;
+ read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
+ if (tre_ring->wp != tre_ring->rp &&
+ MHI_DB_ACCESS_VALID(mhi_cntrl)) {
+ mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+ }
+ read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
+ break;
+ }
+ case MHI_EV_CC_BAD_TRE:
+ MHI_ASSERT(1, "Received BAD TRE event for ring");
+ break;
+ default:
+ MHI_CRITICAL("Unknown TX completion.\n");
+
+ break;
+ } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */
+
+end_process_tx_event:
+ if (ev_code >= MHI_EV_CC_OOB)
+ write_unlock_irqrestore(&mhi_chan->lock, flags);
+ else
+ read_unlock_bh(&mhi_chan->lock);
+
+ return 0;
+}
+
+static int parse_rsc_event(struct mhi_controller *mhi_cntrl,
+ struct mhi_tre *event,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *buf_ring, *tre_ring;
+ struct mhi_buf_info *buf_info;
+ struct mhi_result result;
+ int ev_code;
+ u32 cookie; /* offset to local descriptor */
+ u16 xfer_len;
+
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+
+ ev_code = MHI_TRE_GET_EV_CODE(event);
+ cookie = MHI_TRE_GET_EV_COOKIE(event);
+ xfer_len = MHI_TRE_GET_EV_LEN(event);
+
+ /* received out of bound cookie */
+ MHI_ASSERT(cookie >= buf_ring->len, "Invalid Cookie\n");
+
+ buf_info = buf_ring->base + cookie;
+
+ result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
+ -EOVERFLOW : 0;
+ result.bytes_xferd = xfer_len;
+ result.buf_addr = buf_info->cb_buf;
+ result.dir = mhi_chan->dir;
+
+ read_lock_bh(&mhi_chan->lock);
+
+ if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
+ goto end_process_rsc_event;
+
+ MHI_ASSERT(!buf_info->used, "TRE already Freed\n");
+
+ /* notify the client */
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+
+ /*
+ * Note: We're arbitrarily incrementing RP even though, completion
+ * packet we processed might not be the same one, reason we can do this
+ * is because device guaranteed to cache descriptors in order it
+ * receive, so even though completion event is different we can re-use
+ * all descriptors in between.
+ * Example:
+ * Transfer Ring has descriptors: A, B, C, D
+ * Last descriptor host queue is D (WP) and first descriptor
+ * host queue is A (RP).
+ * The completion event we just serviced is descriptor C.
+ * Then we can safely queue descriptors to replace A, B, and C
+ * even though host did not receive any completions.
+ */
+ mhi_del_ring_element(mhi_cntrl, tre_ring);
+ buf_info->used = false;
+
+end_process_rsc_event:
+ read_unlock_bh(&mhi_chan->lock);
+
+ return 0;
+}
+
+static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
+ struct mhi_tre *tre)
+{
+ dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre);
+ struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
+ struct mhi_ring *mhi_ring = &cmd_ring->ring;
+ struct mhi_tre *cmd_pkt;
+ struct mhi_chan *mhi_chan;
+ struct mhi_timesync *mhi_tsync;
+ enum mhi_cmd_type type;
+ u32 chan;
+
+ cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
+
+ /* out of order completion received */
+ MHI_ASSERT(cmd_pkt != mhi_ring->rp, "Out of order cmd completion");
+
+ type = MHI_TRE_GET_CMD_TYPE(cmd_pkt);
+
+ if (type == MHI_CMD_TYPE_TSYNC) {
+ mhi_tsync = mhi_cntrl->mhi_tsync;
+ mhi_tsync->ccs = MHI_TRE_GET_EV_CODE(tre);
+ complete(&mhi_tsync->completion);
+ } else {
+ chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+ write_lock_bh(&mhi_chan->lock);
+ mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
+ complete(&mhi_chan->completion);
+ write_unlock_bh(&mhi_chan->lock);
+ }
+
+ mhi_del_ring_element(mhi_cntrl, mhi_ring);
+}
+
+int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event,
+ u32 event_quota)
+{
+ struct mhi_tre *dev_rp, *local_rp;
+ struct mhi_ring *ev_ring = &mhi_event->ring;
+ struct mhi_event_ctxt *er_ctxt =
+ &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+ int count = 0;
+
+ /*
+ * this is a quick check to avoid unnecessary event processing
+ * in case we already in error state, but it's still possible
+ * to transition to error state while processing events
+ */
+ if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) {
+ MHI_ERR("No EV access, PM_STATE:%s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+ local_rp = ev_ring->rp;
+
+ while (dev_rp != local_rp) {
+ enum MHI_PKT_TYPE type = MHI_TRE_GET_EV_TYPE(local_rp);
+
+ MHI_VERB("Processing Event:0x%llx 0x%08x 0x%08x\n",
+ local_rp->ptr, local_rp->dword[0], local_rp->dword[1]);
+
+ switch (type) {
+ case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
+ {
+ enum mhi_dev_state new_state;
+
+ new_state = MHI_TRE_GET_EV_STATE(local_rp);
+
+ MHI_LOG("MHI state change event to state:%s\n",
+ TO_MHI_STATE_STR(new_state));
+
+ switch (new_state) {
+ case MHI_STATE_M0:
+ mhi_pm_m0_transition(mhi_cntrl);
+ break;
+ case MHI_STATE_M1:
+ mhi_pm_m1_transition(mhi_cntrl);
+ break;
+ case MHI_STATE_M3:
+ mhi_pm_m3_transition(mhi_cntrl);
+ break;
+ case MHI_STATE_SYS_ERR:
+ {
+ enum MHI_PM_STATE new_state;
+
+ MHI_ERR("MHI system error detected\n");
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ new_state = mhi_tryset_pm_state(mhi_cntrl,
+ MHI_PM_SYS_ERR_DETECT);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (new_state == MHI_PM_SYS_ERR_DETECT)
+ schedule_work(
+ &mhi_cntrl->syserr_worker);
+ break;
+ }
+ default:
+ MHI_ERR("Unsupported STE:%s\n",
+ TO_MHI_STATE_STR(new_state));
+ }
+
+ break;
+ }
+ case MHI_PKT_TYPE_CMD_COMPLETION_EVENT:
+ mhi_process_cmd_completion(mhi_cntrl, local_rp);
+ break;
+ case MHI_PKT_TYPE_EE_EVENT:
+ {
+ enum MHI_ST_TRANSITION st = MHI_ST_TRANSITION_MAX;
+ enum mhi_ee event = MHI_TRE_GET_EV_EXECENV(local_rp);
+
+ /* convert device ee to host ee */
+ event = mhi_translate_dev_ee(mhi_cntrl, event);
+
+ MHI_LOG("MHI EE received event:%s\n",
+ TO_MHI_EXEC_STR(event));
+ switch (event) {
+ case MHI_EE_SBL:
+ st = MHI_ST_TRANSITION_SBL;
+ break;
+ case MHI_EE_WFW:
+ case MHI_EE_AMSS:
+ st = MHI_ST_TRANSITION_MISSION_MODE;
+ break;
+ case MHI_EE_RDDM:
+ mhi_cntrl->status_cb(mhi_cntrl,
+ mhi_cntrl->priv_data,
+ MHI_CB_EE_RDDM);
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ mhi_cntrl->ee = event;
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ wake_up_all(&mhi_cntrl->state_event);
+ break;
+ default:
+ MHI_ERR("Unhandled EE event:%s\n",
+ TO_MHI_EXEC_STR(event));
+ }
+ if (st != MHI_ST_TRANSITION_MAX)
+ mhi_queue_state_transition(mhi_cntrl, st);
+
+ break;
+ }
+ default:
+ MHI_ERR("Unhandled Event: 0x%x\n", type);
+ break;
+ }
+
+ mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
+ local_rp = ev_ring->rp;
+ dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+ count++;
+ }
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
+ mhi_ring_er_db(mhi_event);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ MHI_VERB("exit er_index:%u\n", mhi_event->er_index);
+
+ return count;
+}
+
+int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event,
+ u32 event_quota)
+{
+ struct mhi_tre *dev_rp, *local_rp;
+ struct mhi_ring *ev_ring = &mhi_event->ring;
+ struct mhi_event_ctxt *er_ctxt =
+ &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+ int count = 0;
+ u32 chan;
+ struct mhi_chan *mhi_chan;
+
+ if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) {
+ MHI_ERR("No EV access, PM_STATE:%s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+ local_rp = ev_ring->rp;
+
+ while (dev_rp != local_rp && event_quota > 0) {
+ enum MHI_PKT_TYPE type = MHI_TRE_GET_EV_TYPE(local_rp);
+
+ MHI_VERB("Processing Event:0x%llx 0x%08x 0x%08x\n",
+ local_rp->ptr, local_rp->dword[0], local_rp->dword[1]);
+
+ chan = MHI_TRE_GET_EV_CHID(local_rp);
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+
+ if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
+ parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
+ event_quota--;
+ } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) {
+ parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
+ event_quota--;
+ }
+
+ mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
+ local_rp = ev_ring->rp;
+ dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+ count++;
+ }
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
+ mhi_ring_er_db(mhi_event);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ MHI_VERB("exit er_index:%u\n", mhi_event->er_index);
+
+ return count;
+}
+
+int mhi_process_tsync_event_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event,
+ u32 event_quota)
+{
+ struct mhi_tre *dev_rp, *local_rp;
+ struct mhi_ring *ev_ring = &mhi_event->ring;
+ struct mhi_event_ctxt *er_ctxt =
+ &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+ struct mhi_timesync *mhi_tsync = mhi_cntrl->mhi_tsync;
+ int count = 0;
+ u32 sequence;
+ u64 remote_time;
+
+ if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) {
+ MHI_ERR("No EV access, PM_STATE:%s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ return -EIO;
+ }
+
+ dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+ local_rp = ev_ring->rp;
+
+ while (dev_rp != local_rp) {
+ enum MHI_PKT_TYPE type = MHI_TRE_GET_EV_TYPE(local_rp);
+ struct tsync_node *tsync_node;
+
+ MHI_VERB("Processing Event:0x%llx 0x%08x 0x%08x\n",
+ local_rp->ptr, local_rp->dword[0], local_rp->dword[1]);
+
+ MHI_ASSERT(type != MHI_PKT_TYPE_TSYNC_EVENT, "!TSYNC event");
+
+ sequence = MHI_TRE_GET_EV_TSYNC_SEQ(local_rp);
+ remote_time = MHI_TRE_GET_EV_TIME(local_rp);
+
+ do {
+ spin_lock_irq(&mhi_tsync->lock);
+ tsync_node = list_first_entry_or_null(&mhi_tsync->head,
+ struct tsync_node, node);
+ MHI_ASSERT(!tsync_node, "Unexpected Event");
+
+ if (unlikely(!tsync_node))
+ break;
+
+ list_del(&tsync_node->node);
+ spin_unlock_irq(&mhi_tsync->lock);
+
+ /*
+ * device may not able to process all time sync commands
+ * host issue and only process last command it receive
+ */
+ if (tsync_node->sequence == sequence) {
+ tsync_node->cb_func(tsync_node->mhi_dev,
+ sequence,
+ tsync_node->local_time,
+ remote_time);
+ kfree(tsync_node);
+ } else {
+ kfree(tsync_node);
+ }
+ } while (true);
+
+ mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
+ local_rp = ev_ring->rp;
+ dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+ count++;
+ }
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
+ mhi_ring_er_db(mhi_event);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ MHI_VERB("exit er_index:%u\n", mhi_event->er_index);
+
+ return count;
+}
+
+int mhi_process_bw_scale_ev_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event,
+ u32 event_quota)
+{
+ struct mhi_tre *dev_rp;
+ struct mhi_ring *ev_ring = &mhi_event->ring;
+ struct mhi_event_ctxt *er_ctxt =
+ &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+ struct mhi_link_info link_info, *cur_info = &mhi_cntrl->mhi_link_info;
+ int result, ret = 0;
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+
+ if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) {
+ MHI_LOG("No EV access, PM_STATE:%s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ ret = -EIO;
+ goto exit_bw_process;
+ }
+
+ /*
+ * BW change is not process during suspend since we're suspending link,
+ * host will process it during resume
+ */
+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
+ ret = -EACCES;
+ goto exit_bw_process;
+ }
+
+ spin_lock_bh(&mhi_event->lock);
+ dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+
+ if (ev_ring->rp == dev_rp) {
+ spin_unlock_bh(&mhi_event->lock);
+ goto exit_bw_process;
+ }
+
+ /* if rp points to base, we need to wrap it around */
+ if (dev_rp == ev_ring->base)
+ dev_rp = ev_ring->base + ev_ring->len;
+ dev_rp--;
+
+ MHI_ASSERT(MHI_TRE_GET_EV_TYPE(dev_rp) != MHI_PKT_TYPE_BW_REQ_EVENT,
+ "!BW SCALE REQ event");
+
+ link_info.target_link_speed = MHI_TRE_GET_EV_LINKSPEED(dev_rp);
+ link_info.target_link_width = MHI_TRE_GET_EV_LINKWIDTH(dev_rp);
+ link_info.sequence_num = MHI_TRE_GET_EV_BW_REQ_SEQ(dev_rp);
+
+ MHI_VERB("Received BW_REQ with seq:%d link speed:0x%x width:0x%x\n",
+ link_info.sequence_num,
+ link_info.target_link_speed,
+ link_info.target_link_width);
+
+ /* fast forward to currently processed element and recycle er */
+ ev_ring->rp = dev_rp;
+ ev_ring->wp = dev_rp - 1;
+ if (ev_ring->wp < ev_ring->base)
+ ev_ring->wp = ev_ring->base + ev_ring->len - ev_ring->el_size;
+ mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
+ mhi_ring_er_db(mhi_event);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ spin_unlock_bh(&mhi_event->lock);
+
+ ret = mhi_cntrl->bw_scale(mhi_cntrl, &link_info);
+ if (!ret)
+ *cur_info = link_info;
+
+ result = ret ? MHI_BW_SCALE_NACK : 0;
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->bw_scale_db, 0,
+ MHI_BW_SCALE_RESULT(result,
+ link_info.sequence_num));
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+exit_bw_process:
+ MHI_VERB("exit er_index:%u\n", mhi_event->er_index);
+
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+
+ return ret;
+}
+
+void mhi_ev_task(unsigned long data)
+{
+ struct mhi_event *mhi_event = (struct mhi_event *)data;
+ struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
+
+ MHI_VERB("Enter for ev_index:%d\n", mhi_event->er_index);
+
+ /* process all pending events */
+ spin_lock_bh(&mhi_event->lock);
+ mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
+ spin_unlock_bh(&mhi_event->lock);
+}
+
+void mhi_ctrl_ev_task(unsigned long data)
+{
+ struct mhi_event *mhi_event = (struct mhi_event *)data;
+ struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
+ enum mhi_dev_state state;
+ enum MHI_PM_STATE pm_state = 0;
+ int ret;
+
+ MHI_VERB("Enter for ev_index:%d\n", mhi_event->er_index);
+
+ /*
+ * we can check pm_state w/o a lock here because there is no way
+ * pm_state can change from reg access valid to no access while this
+ * therad being executed.
+ */
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ /*
+ * we may have a pending event but not allowed to
+ * process it since we probably in a suspended state,
+ * trigger a resume.
+ */
+ mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
+ mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
+ return;
+ }
+
+ /* process ctrl events events */
+ ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
+
+ /*
+ * we received a MSI but no events to process maybe device went to
+ * SYS_ERR state, check the state
+ */
+ if (!ret) {
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ state = mhi_get_mhi_state(mhi_cntrl);
+ if (state == MHI_STATE_SYS_ERR) {
+ MHI_ERR("MHI system error detected\n");
+ pm_state = mhi_tryset_pm_state(mhi_cntrl,
+ MHI_PM_SYS_ERR_DETECT);
+ }
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (pm_state == MHI_PM_SYS_ERR_DETECT)
+ schedule_work(&mhi_cntrl->syserr_worker);
+ }
+}
+
+irqreturn_t mhi_msi_handlr(int irq_number, void *dev)
+{
+ struct mhi_event *mhi_event = dev;
+ struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
+ struct mhi_event_ctxt *er_ctxt =
+ &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+ struct mhi_ring *ev_ring = &mhi_event->ring;
+ void *dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+
+ /* confirm ER has pending events to process before scheduling work */
+ if (ev_ring->rp == dev_rp)
+ return IRQ_HANDLED;
+
+ /* client managed event ring, notify pending data */
+ if (mhi_event->cl_manage) {
+ struct mhi_chan *mhi_chan = mhi_event->mhi_chan;
+ struct mhi_device *mhi_dev = mhi_chan->mhi_dev;
+
+ if (mhi_dev)
+ mhi_dev->status_cb(mhi_dev, MHI_CB_PENDING_DATA);
+
+ return IRQ_HANDLED;
+ }
+
+ if (IS_MHI_ER_PRIORITY_HIGH(mhi_event))
+ tasklet_hi_schedule(&mhi_event->task);
+ else
+ tasklet_schedule(&mhi_event->task);
+
+ return IRQ_HANDLED;
+}
+
+/* this is the threaded fn */
+irqreturn_t mhi_intvec_threaded_handlr(int irq_number, void *dev)
+{
+ struct mhi_controller *mhi_cntrl = dev;
+ enum mhi_dev_state state = MHI_STATE_MAX;
+ enum MHI_PM_STATE pm_state = 0;
+ enum mhi_ee ee = 0;
+
+ MHI_VERB("Enter\n");
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ state = mhi_get_mhi_state(mhi_cntrl);
+ ee = mhi_cntrl->ee;
+ mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
+ MHI_LOG("device ee:%s dev_state:%s\n",
+ TO_MHI_EXEC_STR(mhi_cntrl->ee),
+ TO_MHI_STATE_STR(state));
+ }
+
+ if (state == MHI_STATE_SYS_ERR) {
+ MHI_ERR("MHI system error detected\n");
+ pm_state = mhi_tryset_pm_state(mhi_cntrl,
+ MHI_PM_SYS_ERR_DETECT);
+ }
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ /* if device in rddm don't bother processing sys error */
+ if (mhi_cntrl->ee == MHI_EE_RDDM) {
+ if (mhi_cntrl->ee != ee) {
+ mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
+ MHI_CB_EE_RDDM);
+ wake_up_all(&mhi_cntrl->state_event);
+ }
+ goto exit_intvec;
+ }
+
+ if (pm_state == MHI_PM_SYS_ERR_DETECT) {
+ wake_up_all(&mhi_cntrl->state_event);
+
+ /* for fatal errors, we let controller decide next step */
+ if (MHI_IN_PBL(ee))
+ mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
+ MHI_CB_FATAL_ERROR);
+ else
+ schedule_work(&mhi_cntrl->syserr_worker);
+ }
+
+exit_intvec:
+ MHI_VERB("Exit\n");
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t mhi_intvec_handlr(int irq_number, void *dev)
+{
+
+ struct mhi_controller *mhi_cntrl = dev;
+
+ /* wake up any events waiting for state change */
+ MHI_VERB("Enter\n");
+ wake_up_all(&mhi_cntrl->state_event);
+ MHI_VERB("Exit\n");
+
+ schedule_work(&mhi_cntrl->low_priority_worker);
+
+ return IRQ_WAKE_THREAD;
+}
+
+int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan,
+ enum MHI_CMD cmd)
+{
+ struct mhi_tre *cmd_tre = NULL;
+ struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
+ struct mhi_ring *ring = &mhi_cmd->ring;
+ int chan = 0;
+
+ MHI_VERB("Entered, MHI pm_state:%s dev_state:%s ee:%s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ TO_MHI_EXEC_STR(mhi_cntrl->ee));
+
+ if (mhi_chan)
+ chan = mhi_chan->chan;
+
+ spin_lock_bh(&mhi_cmd->lock);
+ if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) {
+ spin_unlock_bh(&mhi_cmd->lock);
+ return -ENOMEM;
+ }
+
+ /* prepare the cmd tre */
+ cmd_tre = ring->wp;
+ switch (cmd) {
+ case MHI_CMD_RESET_CHAN:
+ cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR;
+ cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0;
+ cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan);
+ break;
+ case MHI_CMD_START_CHAN:
+ cmd_tre->ptr = MHI_TRE_CMD_START_PTR;
+ cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0;
+ cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan);
+ break;
+ case MHI_CMD_TIMSYNC_CFG:
+ cmd_tre->ptr = MHI_TRE_CMD_TSYNC_CFG_PTR;
+ cmd_tre->dword[0] = MHI_TRE_CMD_TSYNC_CFG_DWORD0;
+ cmd_tre->dword[1] = MHI_TRE_CMD_TSYNC_CFG_DWORD1
+ (mhi_cntrl->mhi_tsync->er_index);
+ break;
+ }
+
+
+ MHI_VERB("WP:0x%llx TRE: 0x%llx 0x%08x 0x%08x\n",
+ (u64)mhi_to_physical(ring, cmd_tre), cmd_tre->ptr,
+ cmd_tre->dword[0], cmd_tre->dword[1]);
+
+ /* queue to hardware */
+ mhi_add_ring_element(mhi_cntrl, ring);
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
+ mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ spin_unlock_bh(&mhi_cmd->lock);
+
+ return 0;
+}
+
+int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
+{
+ int ret = 0;
+
+ MHI_LOG("Entered: preparing channel:%d\n", mhi_chan->chan);
+
+ if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
+ MHI_ERR("Current EE:%s Required EE Mask:0x%x for chan:%s\n",
+ TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask,
+ mhi_chan->name);
+ return -ENOTCONN;
+ }
+
+ mutex_lock(&mhi_chan->mutex);
+
+ /* if channel is not disable state do not allow to start */
+ if (mhi_chan->ch_state != MHI_CH_STATE_DISABLED) {
+ ret = -EIO;
+ MHI_LOG("channel:%d is not in disabled state, ch_state%d\n",
+ mhi_chan->chan, mhi_chan->ch_state);
+ goto error_init_chan;
+ }
+
+ /* client manages channel context for offload channels */
+ if (!mhi_chan->offload_ch) {
+ ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan);
+ if (ret) {
+ MHI_ERR("Error with init chan\n");
+ goto error_init_chan;
+ }
+ }
+
+ reinit_completion(&mhi_chan->completion);
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ MHI_ERR("MHI host is not in active state\n");
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ ret = -EIO;
+ goto error_pm_state;
+ }
+
+ mhi_cntrl->wake_toggle(mhi_cntrl);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
+ mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
+
+ ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_START_CHAN);
+ if (ret) {
+ MHI_ERR("Failed to send start chan cmd\n");
+ goto error_pm_state;
+ }
+
+ ret = wait_for_completion_timeout(&mhi_chan->completion,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) {
+ MHI_ERR("Failed to receive cmd completion for chan:%d\n",
+ mhi_chan->chan);
+ ret = -EIO;
+ goto error_pm_state;
+ }
+
+ write_lock_irq(&mhi_chan->lock);
+ mhi_chan->ch_state = MHI_CH_STATE_ENABLED;
+ write_unlock_irq(&mhi_chan->lock);
+
+ /* pre allocate buffer for xfer ring */
+ if (mhi_chan->pre_alloc) {
+ int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
+ &mhi_chan->tre_ring);
+ size_t len = mhi_cntrl->buffer_len;
+
+ while (nr_el--) {
+ void *buf;
+
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto error_pre_alloc;
+ }
+
+ /* prepare transfer descriptors */
+ ret = mhi_chan->gen_tre(mhi_cntrl, mhi_chan, buf, buf,
+ len, MHI_EOT);
+ if (ret) {
+ MHI_ERR("Chan:%d error prepare buffer\n",
+ mhi_chan->chan);
+ kfree(buf);
+ goto error_pre_alloc;
+ }
+ }
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (MHI_DB_ACCESS_VALID(mhi_cntrl)) {
+ read_lock_irq(&mhi_chan->lock);
+ mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+ read_unlock_irq(&mhi_chan->lock);
+ }
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ }
+
+ mutex_unlock(&mhi_chan->mutex);
+
+ MHI_LOG("Chan:%d successfully moved to start state\n", mhi_chan->chan);
+
+ return 0;
+
+error_pm_state:
+ if (!mhi_chan->offload_ch)
+ mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
+
+error_init_chan:
+ mutex_unlock(&mhi_chan->mutex);
+
+ return ret;
+
+error_pre_alloc:
+ mutex_unlock(&mhi_chan->mutex);
+ __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
+
+ return ret;
+}
+
+static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event,
+ struct mhi_event_ctxt *er_ctxt,
+ int chan)
+{
+ struct mhi_tre *dev_rp, *local_rp;
+ struct mhi_ring *ev_ring;
+ unsigned long flags;
+
+ MHI_LOG("Marking all events for chan:%d as stale\n", chan);
+
+ ev_ring = &mhi_event->ring;
+
+ /* mark all stale events related to channel as STALE event */
+ spin_lock_irqsave(&mhi_event->lock, flags);
+ dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+
+ local_rp = ev_ring->rp;
+ while (dev_rp != local_rp) {
+ if (MHI_TRE_GET_EV_TYPE(local_rp) ==
+ MHI_PKT_TYPE_TX_EVENT &&
+ chan == MHI_TRE_GET_EV_CHID(local_rp))
+ local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan,
+ MHI_PKT_TYPE_STALE_EVENT);
+ local_rp++;
+ if (local_rp == (ev_ring->base + ev_ring->len))
+ local_rp = ev_ring->base;
+ }
+
+
+ MHI_LOG("Finished marking events as stale events\n");
+ spin_unlock_irqrestore(&mhi_event->lock, flags);
+}
+
+static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *buf_ring, *tre_ring;
+ struct mhi_result result;
+
+ /* reset any pending buffers */
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+ result.transaction_status = -ENOTCONN;
+ result.bytes_xferd = 0;
+ while (tre_ring->rp != tre_ring->wp) {
+ struct mhi_buf_info *buf_info = buf_ring->rp;
+
+ if (mhi_chan->dir == DMA_TO_DEVICE)
+ atomic_dec(&mhi_cntrl->pending_pkts);
+
+ if (!buf_info->pre_mapped)
+ mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
+ mhi_del_ring_element(mhi_cntrl, buf_ring);
+ mhi_del_ring_element(mhi_cntrl, tre_ring);
+
+ if (mhi_chan->pre_alloc) {
+ kfree(buf_info->cb_buf);
+ } else {
+ result.buf_addr = buf_info->cb_buf;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ }
+ }
+}
+
+static void mhi_reset_rsc_chan(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *buf_ring, *tre_ring;
+ struct mhi_result result;
+ struct mhi_buf_info *buf_info;
+
+ /* reset any pending buffers */
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+ result.transaction_status = -ENOTCONN;
+ result.bytes_xferd = 0;
+
+ buf_info = buf_ring->base;
+ for (; (void *)buf_info < buf_ring->base + buf_ring->len; buf_info++) {
+ if (!buf_info->used)
+ continue;
+
+ result.buf_addr = buf_info->cb_buf;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ buf_info->used = false;
+ }
+}
+
+void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
+{
+
+ struct mhi_event *mhi_event;
+ struct mhi_event_ctxt *er_ctxt;
+ int chan = mhi_chan->chan;
+
+ /* nothing to reset, client don't queue buffers */
+ if (mhi_chan->offload_ch)
+ return;
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
+ er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index];
+
+ mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan);
+
+ if (mhi_chan->xfer_type == MHI_XFER_RSC_DMA)
+ mhi_reset_rsc_chan(mhi_cntrl, mhi_chan);
+ else
+ mhi_reset_data_chan(mhi_cntrl, mhi_chan);
+
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ MHI_LOG("Reset complete.\n");
+}
+
+static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
+{
+ int ret;
+
+ MHI_LOG("Entered: unprepare channel:%d\n", mhi_chan->chan);
+
+ /* no more processing events for this channel */
+ mutex_lock(&mhi_chan->mutex);
+ write_lock_irq(&mhi_chan->lock);
+ if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) {
+ MHI_LOG("chan:%d is already disabled\n", mhi_chan->chan);
+ write_unlock_irq(&mhi_chan->lock);
+ mutex_unlock(&mhi_chan->mutex);
+ return;
+ }
+
+ mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
+ write_unlock_irq(&mhi_chan->lock);
+
+ reinit_completion(&mhi_chan->completion);
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ goto error_invalid_state;
+ }
+
+ mhi_cntrl->wake_toggle(mhi_cntrl);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
+ mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
+ ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_RESET_CHAN);
+ if (ret) {
+ MHI_ERR("Failed to send reset chan cmd\n");
+ goto error_invalid_state;
+ }
+
+ /* even if it fails we will still reset */
+ ret = wait_for_completion_timeout(&mhi_chan->completion,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS)
+ MHI_ERR("Failed to receive cmd completion, still resetting\n");
+
+error_invalid_state:
+ if (!mhi_chan->offload_ch) {
+ mhi_reset_chan(mhi_cntrl, mhi_chan);
+ mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
+ }
+ MHI_LOG("chan:%d successfully resetted\n", mhi_chan->chan);
+ mutex_unlock(&mhi_chan->mutex);
+}
+
+int mhi_debugfs_mhi_states_show(struct seq_file *m, void *d)
+{
+ struct mhi_controller *mhi_cntrl = m->private;
+
+ seq_printf(m,
+ "pm_state:%s dev_state:%s EE:%s M0:%u M2:%u M3:%u M3_Fast:%u wake:%d dev_wake:%u alloc_size:%u pending_pkts:%u\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ TO_MHI_EXEC_STR(mhi_cntrl->ee),
+ mhi_cntrl->M0, mhi_cntrl->M2, mhi_cntrl->M3,
+ mhi_cntrl->M3_FAST, mhi_cntrl->wake_set,
+ atomic_read(&mhi_cntrl->dev_wake),
+ atomic_read(&mhi_cntrl->alloc_size),
+ atomic_read(&mhi_cntrl->pending_pkts));
+ return 0;
+}
+
+int mhi_debugfs_mhi_event_show(struct seq_file *m, void *d)
+{
+ struct mhi_controller *mhi_cntrl = m->private;
+ struct mhi_event *mhi_event;
+ struct mhi_event_ctxt *er_ctxt;
+
+ int i;
+
+ er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
+ mhi_event++) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ if (mhi_event->offload_ev) {
+ seq_printf(m, "Index:%d offload event ring\n", i);
+ } else {
+ seq_printf(m,
+ "Index:%d modc:%d modt:%d base:0x%0llx len:0x%llx",
+ i, er_ctxt->intmodc, er_ctxt->intmodt,
+ er_ctxt->rbase, er_ctxt->rlen);
+ seq_printf(m,
+ " rp:0x%llx wp:0x%llx local_rp:0x%llx db:0x%llx\n",
+ er_ctxt->rp, er_ctxt->wp,
+ (u64)mhi_to_physical(ring, ring->rp),
+ (u64)mhi_event->db_cfg.db_val);
+ }
+ }
+
+ return 0;
+}
+
+int mhi_debugfs_mhi_chan_show(struct seq_file *m, void *d)
+{
+ struct mhi_controller *mhi_cntrl = m->private;
+ struct mhi_chan *mhi_chan;
+ struct mhi_chan_ctxt *chan_ctxt;
+ int i;
+
+ mhi_chan = mhi_cntrl->mhi_chan;
+ chan_ctxt = mhi_cntrl->mhi_ctxt->chan_ctxt;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
+ struct mhi_ring *ring = &mhi_chan->tre_ring;
+
+ if (mhi_chan->offload_ch) {
+ seq_printf(m, "%s(%u) offload channel\n",
+ mhi_chan->name, mhi_chan->chan);
+ } else if (mhi_chan->mhi_dev) {
+ seq_printf(m,
+ "%s(%u) state:0x%x brstmode:0x%x pllcfg:0x%x type:0x%x erindex:%u",
+ mhi_chan->name, mhi_chan->chan,
+ chan_ctxt->chstate, chan_ctxt->brstmode,
+ chan_ctxt->pollcfg, chan_ctxt->chtype,
+ chan_ctxt->erindex);
+ seq_printf(m,
+ " base:0x%llx len:0x%llx wp:0x%llx local_rp:0x%llx local_wp:0x%llx db:0x%llx\n",
+ chan_ctxt->rbase, chan_ctxt->rlen,
+ chan_ctxt->wp,
+ (u64)mhi_to_physical(ring, ring->rp),
+ (u64)mhi_to_physical(ring, ring->wp),
+ (u64)mhi_chan->db_cfg.db_val);
+ }
+ }
+
+ return 0;
+}
+
+/* move channel to start state */
+int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
+{
+ int ret, dir;
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan;
+
+ for (dir = 0; dir < 2; dir++) {
+ mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
+
+ if (!mhi_chan)
+ continue;
+
+ ret = mhi_prepare_channel(mhi_cntrl, mhi_chan);
+ if (ret) {
+ MHI_ERR("Error moving chan %s,%d to START state\n",
+ mhi_chan->name, mhi_chan->chan);
+ goto error_open_chan;
+ }
+ }
+
+ return 0;
+
+error_open_chan:
+ for (--dir; dir >= 0; dir--) {
+ mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
+
+ if (!mhi_chan)
+ continue;
+
+ __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(mhi_prepare_for_transfer);
+
+void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan;
+ int dir;
+
+ for (dir = 0; dir < 2; dir++) {
+ mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
+
+ if (!mhi_chan)
+ continue;
+
+ __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
+ }
+}
+EXPORT_SYMBOL(mhi_unprepare_from_transfer);
+
+int mhi_get_no_free_descriptors(struct mhi_device *mhi_dev,
+ enum dma_data_direction dir)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
+ mhi_dev->ul_chan : mhi_dev->dl_chan;
+ struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
+
+ return get_nr_avail_ring_elements(mhi_cntrl, tre_ring);
+}
+EXPORT_SYMBOL(mhi_get_no_free_descriptors);
+
+static int __mhi_bdf_to_controller(struct device *dev, void *tmp)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_device *match = tmp;
+
+ /* return any none-zero value if match */
+ if (mhi_dev->dev_type == MHI_CONTROLLER_TYPE &&
+ mhi_dev->domain == match->domain && mhi_dev->bus == match->bus &&
+ mhi_dev->slot == match->slot && mhi_dev->dev_id == match->dev_id)
+ return 1;
+
+ return 0;
+}
+
+struct mhi_controller *mhi_bdf_to_controller(u32 domain,
+ u32 bus,
+ u32 slot,
+ u32 dev_id)
+{
+ struct mhi_device tmp, *mhi_dev;
+ struct device *dev;
+
+ tmp.domain = domain;
+ tmp.bus = bus;
+ tmp.slot = slot;
+ tmp.dev_id = dev_id;
+
+ dev = bus_find_device(&mhi_bus_type, NULL, &tmp,
+ __mhi_bdf_to_controller);
+ if (!dev)
+ return NULL;
+
+ mhi_dev = to_mhi_device(dev);
+
+ return mhi_dev->mhi_cntrl;
+}
+EXPORT_SYMBOL(mhi_bdf_to_controller);
+
+int mhi_poll(struct mhi_device *mhi_dev,
+ u32 budget)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan = mhi_dev->dl_chan;
+ struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
+ int ret;
+
+ spin_lock_bh(&mhi_event->lock);
+ ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget);
+ spin_unlock_bh(&mhi_event->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(mhi_poll);
+
+int mhi_get_remote_time_sync(struct mhi_device *mhi_dev,
+ u64 *t_host,
+ u64 *t_dev)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_timesync *mhi_tsync = mhi_cntrl->mhi_tsync;
+ int ret;
+
+ /* not all devices support time feature */
+ if (!mhi_tsync)
+ return -EIO;
+
+ /* bring to M0 state */
+ ret = __mhi_device_get_sync(mhi_cntrl);
+ if (ret)
+ return ret;
+
+ mutex_lock(&mhi_tsync->lpm_mutex);
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
+ MHI_ERR("MHI is not in active state, pm_state:%s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ ret = -EIO;
+ goto error_invalid_state;
+ }
+
+ /* disable link level low power modes */
+ ret = mhi_cntrl->lpm_disable(mhi_cntrl, mhi_cntrl->priv_data);
+ if (ret)
+ goto error_invalid_state;
+
+ /*
+ * time critical code to fetch device times,
+ * delay between these two steps should be
+ * deterministic as possible.
+ */
+ preempt_disable();
+ local_irq_disable();
+
+ *t_host = mhi_cntrl->time_get(mhi_cntrl, mhi_cntrl->priv_data);
+ *t_dev = readq_relaxed_no_log(mhi_tsync->time_reg);
+
+ local_irq_enable();
+ preempt_enable();
+
+ mhi_cntrl->lpm_enable(mhi_cntrl, mhi_cntrl->priv_data);
+
+error_invalid_state:
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ mutex_unlock(&mhi_tsync->lpm_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(mhi_get_remote_time_sync);
+
+/**
+ * mhi_get_remote_time - Get external modem time relative to host time
+ * Trigger event to capture modem time, also capture host time so client
+ * can do a relative drift comparision.
+ * Recommended only tsync device calls this method and do not call this
+ * from atomic context
+ * @mhi_dev: Device associated with the channels
+ * @sequence:unique sequence id track event
+ * @cb_func: callback function to call back
+ */
+int mhi_get_remote_time(struct mhi_device *mhi_dev,
+ u32 sequence,
+ void (*cb_func)(struct mhi_device *mhi_dev,
+ u32 sequence,
+ u64 local_time,
+ u64 remote_time))
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_timesync *mhi_tsync = mhi_cntrl->mhi_tsync;
+ struct tsync_node *tsync_node;
+ int ret;
+
+ /* not all devices support time feature */
+ if (!mhi_tsync)
+ return -EIO;
+
+ /* tsync db can only be rung in M0 state */
+ ret = __mhi_device_get_sync(mhi_cntrl);
+ if (ret)
+ return ret;
+
+ /*
+ * technically we can use GFP_KERNEL, but wants to avoid
+ * # of times scheduling out
+ */
+ tsync_node = kzalloc(sizeof(*tsync_node), GFP_ATOMIC);
+ if (!tsync_node) {
+ ret = -ENOMEM;
+ goto error_no_mem;
+ }
+
+ tsync_node->sequence = sequence;
+ tsync_node->cb_func = cb_func;
+ tsync_node->mhi_dev = mhi_dev;
+
+ /* disable link level low power modes */
+ mhi_cntrl->lpm_disable(mhi_cntrl, mhi_cntrl->priv_data);
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
+ MHI_ERR("MHI is not in active state, pm_state:%s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ ret = -EIO;
+ goto error_invalid_state;
+ }
+
+ spin_lock_irq(&mhi_tsync->lock);
+ list_add_tail(&tsync_node->node, &mhi_tsync->head);
+ spin_unlock_irq(&mhi_tsync->lock);
+
+ /*
+ * time critical code, delay between these two steps should be
+ * deterministic as possible.
+ */
+ preempt_disable();
+ local_irq_disable();
+
+ tsync_node->local_time =
+ mhi_cntrl->time_get(mhi_cntrl, mhi_cntrl->priv_data);
+ writel_relaxed_no_log(tsync_node->sequence, mhi_tsync->db);
+ /* write must go thru immediately */
+ wmb();
+
+ local_irq_enable();
+ preempt_enable();
+
+ ret = 0;
+
+error_invalid_state:
+ if (ret)
+ kfree(tsync_node);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->lpm_enable(mhi_cntrl, mhi_cntrl->priv_data);
+
+error_no_mem:
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(mhi_get_remote_time);
+
+void mhi_debug_reg_dump(struct mhi_controller *mhi_cntrl)
+{
+ enum mhi_dev_state state;
+ enum mhi_ee ee;
+ int i, ret;
+ u32 val;
+ void __iomem *mhi_base = mhi_cntrl->regs;
+ void __iomem *bhi_base = mhi_cntrl->bhi;
+ void __iomem *bhie_base = mhi_cntrl->bhie;
+ void __iomem *wake_db = mhi_cntrl->wake_db;
+ struct {
+ const char *name;
+ int offset;
+ void *base;
+ } debug_reg[] = {
+ { "MHI_CNTRL", MHICTRL, mhi_base},
+ { "MHI_STATUS", MHISTATUS, mhi_base},
+ { "MHI_WAKE_DB", 0, wake_db},
+ { "BHI_EXECENV", BHI_EXECENV, bhi_base},
+ { "BHI_STATUS", BHI_STATUS, bhi_base},
+ { "BHI_ERRCODE", BHI_ERRCODE, bhi_base},
+ { "BHI_ERRDBG1", BHI_ERRDBG1, bhi_base},
+ { "BHI_ERRDBG2", BHI_ERRDBG2, bhi_base},
+ { "BHI_ERRDBG3", BHI_ERRDBG3, bhi_base},
+ { "BHIE_TXVEC_DB", BHIE_TXVECDB_OFFS, bhie_base},
+ { "BHIE_TXVEC_STATUS", BHIE_TXVECSTATUS_OFFS, bhie_base},
+ { "BHIE_RXVEC_DB", BHIE_RXVECDB_OFFS, bhie_base},
+ { "BHIE_RXVEC_STATUS", BHIE_RXVECSTATUS_OFFS, bhie_base},
+ { NULL },
+ };
+
+ MHI_LOG("host pm_state:%s dev_state:%s ee:%s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ TO_MHI_EXEC_STR(mhi_cntrl->ee));
+
+ state = mhi_get_mhi_state(mhi_cntrl);
+ ee = mhi_get_exec_env(mhi_cntrl);
+
+ MHI_LOG("device ee:%s dev_state:%s\n", TO_MHI_EXEC_STR(ee),
+ TO_MHI_STATE_STR(state));
+
+ for (i = 0; debug_reg[i].name; i++) {
+ ret = mhi_read_reg(mhi_cntrl, debug_reg[i].base,
+ debug_reg[i].offset, &val);
+ MHI_LOG("reg:%s val:0x%x, ret:%d\n", debug_reg[i].name, val,
+ ret);
+ }
+}
+EXPORT_SYMBOL(mhi_debug_reg_dump);
diff --git a/drivers/bus/mhi/core/mhi_pm.c b/drivers/bus/mhi/core/mhi_pm.c
new file mode 100644
index 0000000..3fe6545
--- /dev/null
+++ b/drivers/bus/mhi/core/mhi_pm.c
@@ -0,0 +1,1522 @@
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/mhi.h>
+#include "mhi_internal.h"
+
+/*
+ * Not all MHI states transitions are sync transitions. Linkdown, SSR, and
+ * shutdown can happen anytime asynchronously. This function will transition to
+ * new state only if we're allowed to transitions.
+ *
+ * Priority increase as we go down, example while in any states from L0, start
+ * state from L1, L2, or L3 can be set. Notable exception to this rule is state
+ * DISABLE. From DISABLE state we can transition to only POR or state. Also
+ * for example while in L2 state, user cannot jump back to L1 or L0 states.
+ * Valid transitions:
+ * L0: DISABLE <--> POR
+ * POR <--> POR
+ * POR -> M0 -> M2 --> M0
+ * POR -> FW_DL_ERR
+ * FW_DL_ERR <--> FW_DL_ERR
+ * M0 <--> M0
+ * M0 -> FW_DL_ERR
+ * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
+ * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
+ * L2: SHUTDOWN_PROCESS -> DISABLE
+ * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
+ * LD_ERR_FATAL_DETECT -> SHUTDOWN_PROCESS
+ */
+static struct mhi_pm_transitions const mhi_state_transitions[] = {
+ /* L0 States */
+ {
+ MHI_PM_DISABLE,
+ MHI_PM_POR
+ },
+ {
+ MHI_PM_POR,
+ MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 |
+ MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
+ },
+ {
+ MHI_PM_M0,
+ MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER |
+ MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
+ },
+ {
+ MHI_PM_M2,
+ MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_M3_ENTER,
+ MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_M3,
+ MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT |
+ MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_M3_EXIT,
+ MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_FW_DL_ERR,
+ MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT |
+ MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ /* L1 States */
+ {
+ MHI_PM_SYS_ERR_DETECT,
+ MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_SYS_ERR_PROCESS,
+ MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ /* L2 States */
+ {
+ MHI_PM_SHUTDOWN_PROCESS,
+ MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ /* L3 States */
+ {
+ MHI_PM_LD_ERR_FATAL_DETECT,
+ MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_PROCESS
+ },
+};
+
+enum MHI_PM_STATE __must_check mhi_tryset_pm_state(
+ struct mhi_controller *mhi_cntrl,
+ enum MHI_PM_STATE state)
+{
+ unsigned long cur_state = mhi_cntrl->pm_state;
+ int index = find_last_bit(&cur_state, 32);
+
+ if (unlikely(index >= ARRAY_SIZE(mhi_state_transitions))) {
+ MHI_CRITICAL("cur_state:%s is not a valid pm_state\n",
+ to_mhi_pm_state_str(cur_state));
+ return cur_state;
+ }
+
+ if (unlikely(mhi_state_transitions[index].from_state != cur_state)) {
+ MHI_ERR("index:%u cur_state:%s != actual_state: %s\n",
+ index, to_mhi_pm_state_str(cur_state),
+ to_mhi_pm_state_str
+ (mhi_state_transitions[index].from_state));
+ return cur_state;
+ }
+
+ if (unlikely(!(mhi_state_transitions[index].to_states & state))) {
+ MHI_LOG(
+ "Not allowing pm state transition from:%s to:%s state\n",
+ to_mhi_pm_state_str(cur_state),
+ to_mhi_pm_state_str(state));
+ return cur_state;
+ }
+
+ MHI_VERB("Transition to pm state from:%s to:%s\n",
+ to_mhi_pm_state_str(cur_state), to_mhi_pm_state_str(state));
+
+ if (MHI_REG_ACCESS_VALID(cur_state) && MHI_REG_ACCESS_VALID(state))
+ mhi_timesync_log(mhi_cntrl);
+
+ mhi_cntrl->pm_state = state;
+ return mhi_cntrl->pm_state;
+}
+
+void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl,
+ enum mhi_dev_state state)
+{
+ if (state == MHI_STATE_RESET) {
+ mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
+ MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 1);
+ } else {
+ mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
+ MHICTRL_MHISTATE_MASK, MHICTRL_MHISTATE_SHIFT, state);
+ }
+}
+
+/* nop for backward compatibility, allowed to ring db registers in M2 state */
+static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl)
+{
+}
+
+static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl)
+{
+ mhi_cntrl->wake_get(mhi_cntrl, false);
+ mhi_cntrl->wake_put(mhi_cntrl, true);
+}
+
+/* set device wake */
+void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force)
+{
+ unsigned long flags;
+
+ /* if set, regardless of count set the bit if not set */
+ if (unlikely(force)) {
+ spin_lock_irqsave(&mhi_cntrl->wlock, flags);
+ atomic_inc(&mhi_cntrl->dev_wake);
+ if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) &&
+ !mhi_cntrl->wake_set) {
+ mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
+ mhi_cntrl->wake_set = true;
+ }
+ spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
+ } else {
+ /* if resources requested already, then increment and exit */
+ if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0)))
+ return;
+
+ spin_lock_irqsave(&mhi_cntrl->wlock, flags);
+ if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) &&
+ MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) &&
+ !mhi_cntrl->wake_set) {
+ mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
+ mhi_cntrl->wake_set = true;
+ }
+ spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
+ }
+}
+
+/* clear device wake */
+void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl, bool override)
+{
+ unsigned long flags;
+
+ MHI_ASSERT(atomic_read(&mhi_cntrl->dev_wake) == 0, "dev_wake == 0");
+
+ /* resources not dropping to 0, decrement and exit */
+ if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1)))
+ return;
+
+ spin_lock_irqsave(&mhi_cntrl->wlock, flags);
+ if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) &&
+ MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override &&
+ mhi_cntrl->wake_set) {
+ mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0);
+ mhi_cntrl->wake_set = false;
+ }
+ spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
+}
+
+int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
+{
+ void __iomem *base = mhi_cntrl->regs;
+ u32 reset = 1, ready = 0;
+ struct mhi_event *mhi_event;
+ enum MHI_PM_STATE cur_state;
+ int ret, i;
+
+ MHI_LOG("Waiting to enter READY state\n");
+
+ /* wait for RESET to be cleared and READY bit to be set */
+ wait_event_timeout(mhi_cntrl->state_event,
+ MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) ||
+ mhi_read_reg_field(mhi_cntrl, base, MHICTRL,
+ MHICTRL_RESET_MASK,
+ MHICTRL_RESET_SHIFT, &reset) ||
+ mhi_read_reg_field(mhi_cntrl, base, MHISTATUS,
+ MHISTATUS_READY_MASK,
+ MHISTATUS_READY_SHIFT, &ready) ||
+ (!reset && ready),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ /* device enter into error state */
+ if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state))
+ return -EIO;
+
+ /* device did not transition to ready state */
+ if (reset || !ready)
+ return -ETIMEDOUT;
+
+ MHI_LOG("Device in READY State\n");
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
+ mhi_cntrl->dev_state = MHI_STATE_READY;
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ if (cur_state != MHI_PM_POR) {
+ MHI_ERR("Error moving to state %s from %s\n",
+ to_mhi_pm_state_str(MHI_PM_POR),
+ to_mhi_pm_state_str(cur_state));
+ return -EIO;
+ }
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
+ goto error_mmio;
+
+ ret = mhi_init_mmio(mhi_cntrl);
+ if (ret) {
+ MHI_ERR("Error programming mmio registers\n");
+ goto error_mmio;
+ }
+
+ /* add elements to all sw event rings */
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ if (mhi_event->offload_ev || mhi_event->hw_ring)
+ continue;
+
+ ring->wp = ring->base + ring->len - ring->el_size;
+ *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size;
+ /* needs to update to all cores */
+ smp_wmb();
+
+ /* ring the db for event rings */
+ spin_lock_irq(&mhi_event->lock);
+ mhi_ring_er_db(mhi_event);
+ spin_unlock_irq(&mhi_event->lock);
+ }
+
+ /* set device into M0 state */
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return 0;
+
+error_mmio:
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return -EIO;
+}
+
+int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
+{
+ enum MHI_PM_STATE cur_state;
+ struct mhi_chan *mhi_chan;
+ int i;
+
+ MHI_LOG("Entered With State:%s PM_STATE:%s\n",
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ mhi_cntrl->dev_state = MHI_STATE_M0;
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (unlikely(cur_state != MHI_PM_M0)) {
+ MHI_ERR("Failed to transition to state %s from %s\n",
+ to_mhi_pm_state_str(MHI_PM_M0),
+ to_mhi_pm_state_str(cur_state));
+ return -EIO;
+ }
+ mhi_cntrl->M0++;
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_get(mhi_cntrl, true);
+
+ /* ring all event rings and CMD ring only if we're in mission mode */
+ if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
+ struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
+ struct mhi_cmd *mhi_cmd =
+ &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
+
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ spin_lock_irq(&mhi_event->lock);
+ mhi_ring_er_db(mhi_event);
+ spin_unlock_irq(&mhi_event->lock);
+ }
+
+ /* only ring primary cmd ring */
+ spin_lock_irq(&mhi_cmd->lock);
+ if (mhi_cmd->ring.rp != mhi_cmd->ring.wp)
+ mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
+ spin_unlock_irq(&mhi_cmd->lock);
+ }
+
+ /* ring channel db registers */
+ mhi_chan = mhi_cntrl->mhi_chan;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
+ struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
+
+ write_lock_irq(&mhi_chan->lock);
+ if (mhi_chan->db_cfg.reset_req)
+ mhi_chan->db_cfg.db_mode = true;
+
+ /* only ring DB if ring is not empty */
+ if (tre_ring->base && tre_ring->wp != tre_ring->rp)
+ mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+ write_unlock_irq(&mhi_chan->lock);
+ }
+
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ wake_up_all(&mhi_cntrl->state_event);
+ MHI_VERB("Exited\n");
+
+ return 0;
+}
+
+void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl)
+{
+ enum MHI_PM_STATE state;
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ /* if it fails, means we transition to M3 */
+ state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2);
+ if (state == MHI_PM_M2) {
+ MHI_VERB("Entered M2 State\n");
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2);
+ mhi_cntrl->dev_state = MHI_STATE_M2;
+ mhi_cntrl->M2++;
+
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ wake_up_all(&mhi_cntrl->state_event);
+
+ /* transfer pending, exit M2 immediately */
+ if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) ||
+ atomic_read(&mhi_cntrl->dev_wake))) {
+ MHI_VERB(
+ "Exiting M2 Immediately, pending_pkts:%d dev_wake:%d\n",
+ atomic_read(&mhi_cntrl->pending_pkts),
+ atomic_read(&mhi_cntrl->dev_wake));
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_get(mhi_cntrl, true);
+ mhi_cntrl->wake_put(mhi_cntrl, true);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ } else {
+ mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
+ MHI_CB_IDLE);
+ }
+ } else {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ }
+}
+
+int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl)
+{
+ enum MHI_PM_STATE state;
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ mhi_cntrl->dev_state = MHI_STATE_M3;
+ state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (state != MHI_PM_M3) {
+ MHI_ERR("Failed to transition to state %s from %s\n",
+ to_mhi_pm_state_str(MHI_PM_M3),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+ wake_up_all(&mhi_cntrl->state_event);
+ mhi_cntrl->M3++;
+
+ MHI_LOG("Entered mhi_state:%s pm_state:%s\n",
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return 0;
+}
+
+static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
+{
+ int i, ret;
+ struct mhi_event *mhi_event;
+
+ MHI_LOG("Processing Mission Mode Transition\n");
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
+ mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee))
+ return -EIO;
+
+ wake_up_all(&mhi_cntrl->state_event);
+
+ mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
+ MHI_CB_EE_MISSION_MODE);
+
+ /* force MHI to be in M0 state before continuing */
+ ret = __mhi_device_get_sync(mhi_cntrl);
+ if (ret)
+ return ret;
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+
+ /* add elements to all HW event rings */
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ ret = -EIO;
+ goto error_mission_mode;
+ }
+
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ if (mhi_event->offload_ev || !mhi_event->hw_ring)
+ continue;
+
+ ring->wp = ring->base + ring->len - ring->el_size;
+ *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size;
+ /* all ring updates must get updated immediately */
+ smp_wmb();
+
+ spin_lock_irq(&mhi_event->lock);
+ if (MHI_DB_ACCESS_VALID(mhi_cntrl))
+ mhi_ring_er_db(mhi_event);
+ spin_unlock_irq(&mhi_event->lock);
+
+ }
+
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ /* setup support for time sync */
+ mhi_init_timesync(mhi_cntrl);
+
+ MHI_LOG("Adding new devices\n");
+
+ /* add supported devices */
+ mhi_create_devices(mhi_cntrl);
+
+ /* setup sysfs nodes for userspace votes */
+ mhi_create_vote_sysfs(mhi_cntrl);
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+
+error_mission_mode:
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ MHI_LOG("Exit with ret:%d\n", ret);
+
+ return ret;
+}
+
+/* handles both sys_err and shutdown transitions */
+static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
+ enum MHI_PM_STATE transition_state)
+{
+ enum MHI_PM_STATE cur_state, prev_state;
+ struct mhi_event *mhi_event;
+ struct mhi_cmd_ctxt *cmd_ctxt;
+ struct mhi_cmd *mhi_cmd;
+ struct mhi_event_ctxt *er_ctxt;
+ int ret, i;
+
+ MHI_LOG("Enter with from pm_state:%s MHI_STATE:%s to pm_state:%s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ to_mhi_pm_state_str(transition_state));
+
+ /* We must notify MHI control driver so it can clean up first */
+ if (transition_state == MHI_PM_SYS_ERR_PROCESS) {
+ /*
+ * if controller support rddm, we do not process
+ * sys error state, instead we will jump directly
+ * to rddm state
+ */
+ if (mhi_cntrl->rddm_image) {
+ MHI_LOG(
+ "Controller Support RDDM, skipping SYS_ERR_PROCESS\n");
+ return;
+ }
+ mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
+ MHI_CB_SYS_ERROR);
+ }
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ prev_state = mhi_cntrl->pm_state;
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state);
+ if (cur_state == transition_state) {
+ mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
+ mhi_cntrl->dev_state = MHI_STATE_RESET;
+ }
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ /* wake up any threads waiting for state transitions */
+ wake_up_all(&mhi_cntrl->state_event);
+
+ /* not handling sys_err, could be middle of shut down */
+ if (cur_state != transition_state) {
+ MHI_LOG("Failed to transition to state:0x%x from:0x%x\n",
+ transition_state, cur_state);
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+ return;
+ }
+
+ /* trigger MHI RESET so device will not access host ddr */
+ if (MHI_REG_ACCESS_VALID(prev_state)) {
+ u32 in_reset = -1;
+ unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
+
+ MHI_LOG("Trigger device into MHI_RESET\n");
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
+
+ /* wait for reset to be cleared */
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_read_reg_field(mhi_cntrl,
+ mhi_cntrl->regs, MHICTRL,
+ MHICTRL_RESET_MASK,
+ MHICTRL_RESET_SHIFT, &in_reset)
+ || !in_reset, timeout);
+ if ((!ret || in_reset) && cur_state == MHI_PM_SYS_ERR_PROCESS) {
+ MHI_CRITICAL("Device failed to exit RESET state\n");
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+ return;
+ }
+
+ /*
+ * device cleares INTVEC as part of RESET processing,
+ * re-program it
+ */
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
+ }
+
+ MHI_LOG("Waiting for all pending event ring processing to complete\n");
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (!mhi_event->request_irq)
+ continue;
+ tasklet_kill(&mhi_event->task);
+ }
+
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+
+ MHI_LOG("Reset all active channels and remove mhi devices\n");
+ device_for_each_child(mhi_cntrl->dev, NULL, mhi_destroy_device);
+
+ MHI_LOG("Finish resetting channels\n");
+
+ /* remove support for userspace votes */
+ mhi_destroy_vote_sysfs(mhi_cntrl);
+
+ MHI_LOG("Waiting for all pending threads to complete\n");
+ wake_up_all(&mhi_cntrl->state_event);
+ flush_work(&mhi_cntrl->st_worker);
+ flush_work(&mhi_cntrl->fw_worker);
+ flush_work(&mhi_cntrl->low_priority_worker);
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+
+ MHI_ASSERT(atomic_read(&mhi_cntrl->dev_wake), "dev_wake != 0");
+ MHI_ASSERT(atomic_read(&mhi_cntrl->pending_pkts), "pending_pkts != 0");
+
+ /* reset the ev rings and cmd rings */
+ MHI_LOG("Resetting EV CTXT and CMD CTXT\n");
+ mhi_cmd = mhi_cntrl->mhi_cmd;
+ cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
+ for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
+ struct mhi_ring *ring = &mhi_cmd->ring;
+
+ ring->rp = ring->base;
+ ring->wp = ring->base;
+ cmd_ctxt->rp = cmd_ctxt->rbase;
+ cmd_ctxt->wp = cmd_ctxt->rbase;
+ }
+
+ mhi_event = mhi_cntrl->mhi_event;
+ er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
+ mhi_event++) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ /* do not touch offload er */
+ if (mhi_event->offload_ev)
+ continue;
+
+ ring->rp = ring->base;
+ ring->wp = ring->base;
+ er_ctxt->rp = er_ctxt->rbase;
+ er_ctxt->wp = er_ctxt->rbase;
+ }
+
+ /* remove support for time sync */
+ mhi_destroy_timesync(mhi_cntrl);
+
+ if (cur_state == MHI_PM_SYS_ERR_PROCESS) {
+ mhi_ready_state_transition(mhi_cntrl);
+ } else {
+ /* move to disable state */
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (unlikely(cur_state != MHI_PM_DISABLE))
+ MHI_ERR("Error moving from pm state:%s to state:%s\n",
+ to_mhi_pm_state_str(cur_state),
+ to_mhi_pm_state_str(MHI_PM_DISABLE));
+ }
+
+ MHI_LOG("Exit with pm_state:%s mhi_state:%s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state));
+
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+}
+
+int mhi_debugfs_trigger_reset(void *data, u64 val)
+{
+ struct mhi_controller *mhi_cntrl = data;
+ enum MHI_PM_STATE cur_state;
+ int ret;
+
+ MHI_LOG("Trigger MHI Reset\n");
+
+ /* exit lpm first */
+ mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
+ mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->dev_state == MHI_STATE_M0 ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ MHI_ERR("Did not enter M0 state, cur_state:%s pm_state:%s\n",
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_DETECT);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ if (cur_state == MHI_PM_SYS_ERR_DETECT)
+ schedule_work(&mhi_cntrl->syserr_worker);
+
+ return 0;
+}
+
+/* queue a new work item and scheduler work */
+int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
+ enum MHI_ST_TRANSITION state)
+{
+ struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC);
+ unsigned long flags;
+
+ if (!item)
+ return -ENOMEM;
+
+ item->state = state;
+ spin_lock_irqsave(&mhi_cntrl->transition_lock, flags);
+ list_add_tail(&item->node, &mhi_cntrl->transition_list);
+ spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags);
+
+ schedule_work(&mhi_cntrl->st_worker);
+
+ return 0;
+}
+
+static void mhi_low_priority_events_pending(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_event *mhi_event;
+
+ list_for_each_entry(mhi_event, &mhi_cntrl->lp_ev_rings, node) {
+ struct mhi_event_ctxt *er_ctxt =
+ &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+ struct mhi_ring *ev_ring = &mhi_event->ring;
+
+ spin_lock_bh(&mhi_event->lock);
+ if (ev_ring->rp != mhi_to_virtual(ev_ring, er_ctxt->rp)) {
+ schedule_work(&mhi_cntrl->low_priority_worker);
+ spin_unlock_bh(&mhi_event->lock);
+ break;
+ }
+ spin_unlock_bh(&mhi_event->lock);
+ }
+}
+
+void mhi_low_priority_worker(struct work_struct *work)
+{
+ struct mhi_controller *mhi_cntrl = container_of(work,
+ struct mhi_controller,
+ low_priority_worker);
+ struct mhi_event *mhi_event;
+
+ MHI_VERB("Enter with pm_state:%s MHI_STATE:%s ee:%s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ TO_MHI_EXEC_STR(mhi_cntrl->ee));
+
+ /* check low priority event rings and process events */
+ list_for_each_entry(mhi_event, &mhi_cntrl->lp_ev_rings, node) {
+ if (MHI_IN_MISSION_MODE(mhi_cntrl->ee))
+ mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
+ }
+}
+
+void mhi_pm_sys_err_worker(struct work_struct *work)
+{
+ struct mhi_controller *mhi_cntrl = container_of(work,
+ struct mhi_controller,
+ syserr_worker);
+
+ MHI_LOG("Enter with pm_state:%s MHI_STATE:%s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state));
+
+ mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
+}
+
+void mhi_pm_st_worker(struct work_struct *work)
+{
+ struct state_transition *itr, *tmp;
+ LIST_HEAD(head);
+ struct mhi_controller *mhi_cntrl = container_of(work,
+ struct mhi_controller,
+ st_worker);
+ spin_lock_irq(&mhi_cntrl->transition_lock);
+ list_splice_tail_init(&mhi_cntrl->transition_list, &head);
+ spin_unlock_irq(&mhi_cntrl->transition_lock);
+
+ list_for_each_entry_safe(itr, tmp, &head, node) {
+ list_del(&itr->node);
+ MHI_LOG("Transition to state:%s\n",
+ TO_MHI_STATE_TRANS_STR(itr->state));
+
+ switch (itr->state) {
+ case MHI_ST_TRANSITION_PBL:
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
+ mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (MHI_IN_PBL(mhi_cntrl->ee))
+ wake_up_all(&mhi_cntrl->state_event);
+ break;
+ case MHI_ST_TRANSITION_SBL:
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ mhi_cntrl->ee = MHI_EE_SBL;
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ wake_up_all(&mhi_cntrl->state_event);
+ mhi_create_devices(mhi_cntrl);
+ break;
+ case MHI_ST_TRANSITION_MISSION_MODE:
+ mhi_pm_mission_mode_transition(mhi_cntrl);
+ break;
+ case MHI_ST_TRANSITION_READY:
+ mhi_ready_state_transition(mhi_cntrl);
+ break;
+ default:
+ break;
+ }
+ kfree(itr);
+ }
+}
+
+int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
+{
+ int ret;
+ u32 val;
+ enum mhi_ee current_ee;
+ enum MHI_ST_TRANSITION next_state;
+
+ MHI_LOG("Requested to power on\n");
+
+ if (mhi_cntrl->msi_allocated < mhi_cntrl->total_ev_rings)
+ return -EINVAL;
+
+ /* set to default wake if any one is not set */
+ if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put ||
+ !mhi_cntrl->wake_toggle) {
+ mhi_cntrl->wake_get = mhi_assert_dev_wake;
+ mhi_cntrl->wake_put = mhi_deassert_dev_wake;
+ mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ?
+ mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake;
+ }
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+ mhi_cntrl->pm_state = MHI_PM_DISABLE;
+
+ if (!mhi_cntrl->pre_init) {
+ /* setup device context */
+ ret = mhi_init_dev_ctxt(mhi_cntrl);
+ if (ret) {
+ MHI_ERR("Error setting dev_context\n");
+ goto error_dev_ctxt;
+ }
+ }
+
+ ret = mhi_init_irq_setup(mhi_cntrl);
+ if (ret) {
+ MHI_ERR("Error setting up irq\n");
+ goto error_setup_irq;
+ }
+
+ /* setup bhi offset & intvec */
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &val);
+ if (ret) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ MHI_ERR("Error getting bhi offset\n");
+ goto error_bhi_offset;
+ }
+
+ mhi_cntrl->bhi = mhi_cntrl->regs + val;
+
+ /* setup bhie offset if not set */
+ if (mhi_cntrl->fbc_download && !mhi_cntrl->bhie) {
+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, &val);
+ if (ret) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ MHI_ERR("Error getting bhie offset\n");
+ goto error_bhi_offset;
+ }
+
+ mhi_cntrl->bhie = mhi_cntrl->regs + val;
+ }
+
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
+ mhi_cntrl->pm_state = MHI_PM_POR;
+ mhi_cntrl->ee = MHI_EE_MAX;
+ current_ee = mhi_get_exec_env(mhi_cntrl);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ /* confirm device is in valid exec env */
+ if (!MHI_IN_PBL(current_ee) && current_ee != MHI_EE_AMSS) {
+ MHI_ERR("Not a valid ee for power on\n");
+ ret = -EIO;
+ goto error_bhi_offset;
+ }
+
+ /* transition to next state */
+ next_state = MHI_IN_PBL(current_ee) ?
+ MHI_ST_TRANSITION_PBL : MHI_ST_TRANSITION_READY;
+
+ if (next_state == MHI_ST_TRANSITION_PBL)
+ schedule_work(&mhi_cntrl->fw_worker);
+
+ mhi_queue_state_transition(mhi_cntrl, next_state);
+
+ mhi_init_debugfs(mhi_cntrl);
+
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+
+ MHI_LOG("Power on setup success\n");
+
+ return 0;
+
+error_bhi_offset:
+ mhi_deinit_free_irq(mhi_cntrl);
+
+error_setup_irq:
+ if (!mhi_cntrl->pre_init)
+ mhi_deinit_dev_ctxt(mhi_cntrl);
+
+error_dev_ctxt:
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(mhi_async_power_up);
+
+/* Transition MHI into error state and notify critical clients */
+void mhi_control_error(struct mhi_controller *mhi_cntrl)
+{
+ enum MHI_PM_STATE cur_state;
+
+ MHI_LOG("Enter with pm_state:%s MHI_STATE:%s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state));
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_LD_ERR_FATAL_DETECT);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ if (cur_state != MHI_PM_LD_ERR_FATAL_DETECT) {
+ MHI_ERR("Failed to transition to state:%s from:%s\n",
+ to_mhi_pm_state_str(MHI_PM_LD_ERR_FATAL_DETECT),
+ to_mhi_pm_state_str(cur_state));
+ goto exit_control_error;
+ }
+
+ /* notify waiters to bail out early since MHI has entered ERROR state */
+ wake_up_all(&mhi_cntrl->state_event);
+
+ /* start notifying all clients who request early notification */
+ device_for_each_child(mhi_cntrl->dev, NULL, mhi_early_notify_device);
+
+exit_control_error:
+ MHI_LOG("Exit with pm_state:%s MHI_STATE:%s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state));
+}
+EXPORT_SYMBOL(mhi_control_error);
+
+void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
+{
+ enum MHI_PM_STATE cur_state;
+
+ /* if it's not graceful shutdown, force MHI to a linkdown state */
+ if (!graceful) {
+ mutex_lock(&mhi_cntrl->pm_mutex);
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_tryset_pm_state(mhi_cntrl,
+ MHI_PM_LD_ERR_FATAL_DETECT);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+ if (cur_state != MHI_PM_LD_ERR_FATAL_DETECT)
+ MHI_ERR("Failed to move to state:%s from:%s\n",
+ to_mhi_pm_state_str(MHI_PM_LD_ERR_FATAL_DETECT),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ }
+ mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS);
+
+ mhi_deinit_debugfs(mhi_cntrl);
+
+ mhi_deinit_free_irq(mhi_cntrl);
+
+ if (!mhi_cntrl->pre_init) {
+ /* free all allocated resources */
+ if (mhi_cntrl->fbc_image) {
+ mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
+ mhi_cntrl->fbc_image = NULL;
+ }
+ mhi_deinit_dev_ctxt(mhi_cntrl);
+ }
+}
+EXPORT_SYMBOL(mhi_power_down);
+
+int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
+{
+ int ret = mhi_async_power_up(mhi_cntrl);
+
+ if (ret)
+ return ret;
+
+ wait_event_timeout(mhi_cntrl->state_event,
+ MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ return (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -EIO;
+}
+EXPORT_SYMBOL(mhi_sync_power_up);
+
+int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
+{
+ int ret;
+ enum MHI_PM_STATE new_state;
+ struct mhi_chan *itr, *tmp;
+ struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
+
+ if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
+ return -EINVAL;
+
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+ return -EIO;
+
+ /* do a quick check to see if any pending votes to keep us busy */
+ if (atomic_read(&mhi_cntrl->dev_wake) ||
+ atomic_read(&mhi_cntrl->pending_pkts) ||
+ atomic_read(&mhi_dev->bus_vote)) {
+ MHI_VERB("Busy, aborting M3\n");
+ return -EBUSY;
+ }
+
+ /* exit MHI out of M2 state */
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_get(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->dev_state == MHI_STATE_M0 ||
+ mhi_cntrl->dev_state == MHI_STATE_M1 ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ MHI_ERR(
+ "Did not enter M0||M1 state, cur_state:%s pm_state:%s\n",
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ ret = -EIO;
+ goto error_m0_entry;
+ }
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+
+ /*
+ * Check the votes once more to see if we should abort
+ * suepend. We're asserting wake so count would be @ least 1
+ */
+ if (atomic_read(&mhi_cntrl->dev_wake) > 1 ||
+ atomic_read(&mhi_cntrl->pending_pkts) ||
+ atomic_read(&mhi_dev->bus_vote)) {
+ MHI_VERB("Busy, aborting M3\n");
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ ret = -EBUSY;
+ goto error_m0_entry;
+ }
+
+ /* anytime after this, we will resume thru runtime pm framework */
+ MHI_LOG("Allowing M3 transition\n");
+ new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER);
+ if (new_state != MHI_PM_M3_ENTER) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ MHI_ERR("Error setting to pm_state:%s from pm_state:%s\n",
+ to_mhi_pm_state_str(MHI_PM_M3_ENTER),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+
+ ret = -EIO;
+ goto error_m0_entry;
+ }
+
+ /* set dev to M3 and wait for completion */
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ MHI_LOG("Wait for M3 completion\n");
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->dev_state == MHI_STATE_M3 ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ MHI_ERR("Did not enter M3 state, cur_state:%s pm_state:%s\n",
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ /* notify any clients we enter lpm */
+ list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
+ mutex_lock(&itr->mutex);
+ if (itr->mhi_dev)
+ mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
+ mutex_unlock(&itr->mutex);
+ }
+
+ return 0;
+
+error_m0_entry:
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(mhi_pm_suspend);
+
+/**
+ * mhi_pm_fast_suspend - Faster suspend path where we transition host to
+ * inactive state w/o suspending device. Useful for cases where we want apps to
+ * go into power collapse but keep the physical link in active state.
+ */
+int mhi_pm_fast_suspend(struct mhi_controller *mhi_cntrl, bool notify_client)
+{
+ int ret;
+ enum MHI_PM_STATE new_state;
+ struct mhi_chan *itr, *tmp;
+
+ if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
+ return -EINVAL;
+
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+ return -EIO;
+
+ /* do a quick check to see if any pending votes to keep us busy */
+ if (atomic_read(&mhi_cntrl->pending_pkts)) {
+ MHI_VERB("Busy, aborting M3\n");
+ return -EBUSY;
+ }
+
+ /* disable ctrl event processing */
+ tasklet_disable(&mhi_cntrl->mhi_event->task);
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+
+ /*
+ * Check the votes once more to see if we should abort
+ * suspend.
+ */
+ if (atomic_read(&mhi_cntrl->pending_pkts)) {
+ MHI_VERB("Busy, aborting M3\n");
+ ret = -EBUSY;
+ goto error_suspend;
+ }
+
+ /* anytime after this, we will resume thru runtime pm framework */
+ MHI_LOG("Allowing Fast M3 transition\n");
+
+ /* save the current states */
+ mhi_cntrl->saved_pm_state = mhi_cntrl->pm_state;
+ mhi_cntrl->saved_dev_state = mhi_cntrl->dev_state;
+
+ /* If we're in M2, we need to switch back to M0 first */
+ if (mhi_cntrl->pm_state == MHI_PM_M2) {
+ new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0);
+ if (new_state != MHI_PM_M0) {
+ MHI_ERR("Error set pm_state to:%s from pm_state:%s\n",
+ to_mhi_pm_state_str(MHI_PM_M0),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ ret = -EIO;
+ goto error_suspend;
+ }
+ }
+
+ new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER);
+ if (new_state != MHI_PM_M3_ENTER) {
+ MHI_ERR("Error setting to pm_state:%s from pm_state:%s\n",
+ to_mhi_pm_state_str(MHI_PM_M3_ENTER),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ ret = -EIO;
+ goto error_suspend;
+ }
+
+ /* set dev to M3_FAST and host to M3 */
+ new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3);
+ if (new_state != MHI_PM_M3) {
+ MHI_ERR("Error setting to pm_state:%s from pm_state:%s\n",
+ to_mhi_pm_state_str(MHI_PM_M3),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ ret = -EIO;
+ goto error_suspend;
+ }
+
+ mhi_cntrl->dev_state = MHI_STATE_M3_FAST;
+ mhi_cntrl->M3_FAST++;
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ /* now safe to check ctrl event ring */
+ tasklet_enable(&mhi_cntrl->mhi_event->task);
+ mhi_msi_handlr(0, mhi_cntrl->mhi_event);
+
+ if (!notify_client)
+ return 0;
+
+ /* notify any clients we enter lpm */
+ list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
+ mutex_lock(&itr->mutex);
+ if (itr->mhi_dev)
+ mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
+ mutex_unlock(&itr->mutex);
+ }
+
+ return 0;
+
+error_suspend:
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ /* check ctrl event ring for pending work */
+ tasklet_enable(&mhi_cntrl->mhi_event->task);
+ mhi_msi_handlr(0, mhi_cntrl->mhi_event);
+
+ return ret;
+}
+EXPORT_SYMBOL(mhi_pm_fast_suspend);
+
+int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
+{
+ enum MHI_PM_STATE cur_state;
+ int ret;
+ struct mhi_chan *itr, *tmp;
+
+ MHI_LOG("Entered with pm_state:%s dev_state:%s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state));
+
+ if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
+ return 0;
+
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+ return -EIO;
+
+ MHI_ASSERT(mhi_cntrl->pm_state != MHI_PM_M3, "mhi_pm_state != M3");
+
+ /* notify any clients we enter lpm */
+ list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
+ mutex_lock(&itr->mutex);
+ if (itr->mhi_dev)
+ mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
+ mutex_unlock(&itr->mutex);
+ }
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT);
+ if (cur_state != MHI_PM_M3_EXIT) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ MHI_ERR("Error setting to pm_state:%s from pm_state:%s\n",
+ to_mhi_pm_state_str(MHI_PM_M3_EXIT),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ /* set dev to M0 and wait for completion */
+ mhi_cntrl->wake_get(mhi_cntrl, true);
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->dev_state == MHI_STATE_M0 ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ MHI_ERR("Did not enter M0 state, cur_state:%s pm_state:%s\n",
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+
+ /*
+ * It's possible device already in error state and we didn't
+ * process it due to low power mode, force a check
+ */
+ mhi_intvec_threaded_handlr(0, mhi_cntrl);
+ return -EIO;
+ }
+
+ /*
+ * If MHI on host is in suspending/suspended state, we do not process
+ * any low priority requests, for example, bandwidth scaling events
+ * from the device. Check for low priority event rings and handle the
+ * pending events upon resume.
+ */
+ mhi_low_priority_events_pending(mhi_cntrl);
+
+ return 0;
+}
+
+int mhi_pm_fast_resume(struct mhi_controller *mhi_cntrl, bool notify_client)
+{
+ struct mhi_chan *itr, *tmp;
+ struct mhi_event *mhi_event;
+ int i;
+
+ MHI_LOG("Entered with pm_state:%s dev_state:%s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state));
+
+ if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
+ return 0;
+
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+ return -EIO;
+
+ MHI_ASSERT(mhi_cntrl->pm_state != MHI_PM_M3, "mhi_pm_state != M3");
+
+ /* notify any clients we're about to exit lpm */
+ if (notify_client) {
+ list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans,
+ node) {
+ mutex_lock(&itr->mutex);
+ if (itr->mhi_dev)
+ mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
+ mutex_unlock(&itr->mutex);
+ }
+ }
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ /* restore the states */
+ mhi_cntrl->pm_state = mhi_cntrl->saved_pm_state;
+ mhi_cntrl->dev_state = mhi_cntrl->saved_dev_state;
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ switch (mhi_cntrl->pm_state) {
+ case MHI_PM_M0:
+ mhi_pm_m0_transition(mhi_cntrl);
+ case MHI_PM_M2:
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ /*
+ * we're doing a double check of pm_state because by the time we
+ * grab the pm_lock, device may have already initiate a M0 on
+ * its own. If that's the case we should not be toggling device
+ * wake.
+ */
+ if (mhi_cntrl->pm_state == MHI_PM_M2) {
+ mhi_cntrl->wake_get(mhi_cntrl, true);
+ mhi_cntrl->wake_put(mhi_cntrl, true);
+ }
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ }
+
+ /*
+ * In fast suspend/resume case device is not aware host transition
+ * to suspend state. So, device could be triggering a interrupt while
+ * host not accepting MSI. We have to manually check each event ring
+ * upon resume.
+ */
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (!mhi_event->request_irq)
+ continue;
+
+ mhi_msi_handlr(0, mhi_event);
+ }
+
+ /* schedules worker if any low priority events need to be handled */
+ mhi_low_priority_events_pending(mhi_cntrl);
+
+ MHI_LOG("Exit with pm_state:%s dev_state:%s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state));
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_pm_resume);
+
+int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
+{
+ int ret;
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_get(mhi_cntrl, true);
+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
+ pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0);
+ mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
+ mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
+ }
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->pm_state == MHI_PM_M0 ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ MHI_ERR("Did not enter M0 state, cur_state:%s pm_state:%s\n",
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+void mhi_device_get(struct mhi_device *mhi_dev, int vote)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ if (vote & MHI_VOTE_DEVICE) {
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_get(mhi_cntrl, true);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ atomic_inc(&mhi_dev->dev_vote);
+ }
+
+ if (vote & MHI_VOTE_BUS) {
+ mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
+ atomic_inc(&mhi_dev->bus_vote);
+ }
+}
+EXPORT_SYMBOL(mhi_device_get);
+
+int mhi_device_get_sync(struct mhi_device *mhi_dev, int vote)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ int ret;
+
+ /*
+ * regardless of any vote we will bring device out lpm and assert
+ * device wake
+ */
+ ret = __mhi_device_get_sync(mhi_cntrl);
+ if (ret)
+ return ret;
+
+ if (vote & MHI_VOTE_DEVICE) {
+ atomic_inc(&mhi_dev->dev_vote);
+ } else {
+ /* client did not requested device vote so de-assert dev_wake */
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ }
+
+ if (vote & MHI_VOTE_BUS) {
+ mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
+ atomic_inc(&mhi_dev->bus_vote);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_device_get_sync);
+
+void mhi_device_put(struct mhi_device *mhi_dev, int vote)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ if (vote & MHI_VOTE_DEVICE) {
+ atomic_dec(&mhi_dev->dev_vote);
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
+ mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
+ mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
+ }
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ }
+
+ if (vote & MHI_VOTE_BUS) {
+ atomic_dec(&mhi_dev->bus_vote);
+ mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
+
+ /*
+ * if counts reach 0, clients release all votes
+ * send idle cb to to attempt suspend
+ */
+ if (!atomic_read(&mhi_dev->bus_vote))
+ mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
+ MHI_CB_IDLE);
+ }
+}
+EXPORT_SYMBOL(mhi_device_put);
+
+int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl)
+{
+ int ret;
+
+ MHI_LOG("Enter with pm_state:%s ee:%s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ TO_MHI_EXEC_STR(mhi_cntrl->ee));
+
+ /* device already in rddm */
+ if (mhi_cntrl->ee == MHI_EE_RDDM)
+ return 0;
+
+ MHI_LOG("Triggering SYS_ERR to force rddm state\n");
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
+
+ /* wait for rddm event */
+ MHI_LOG("Waiting for device to enter RDDM state\n");
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->ee == MHI_EE_RDDM,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ ret = ret ? 0 : -EIO;
+
+ MHI_LOG("Exiting with pm_state:%s ee:%s ret:%d\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ TO_MHI_EXEC_STR(mhi_cntrl->ee), ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(mhi_force_rddm_mode);
diff --git a/drivers/bus/mhi/devices/Kconfig b/drivers/bus/mhi/devices/Kconfig
new file mode 100644
index 0000000..385137f
--- /dev/null
+++ b/drivers/bus/mhi/devices/Kconfig
@@ -0,0 +1,21 @@
+menu "MHI device support"
+
+config MHI_NETDEV
+ tristate "MHI NETDEV"
+ depends on MHI_BUS
+ help
+ MHI based net device driver for transferring
+ IP traffic between host and modem. By enabling
+ this driver, clients can transfer data using
+ standard network interface.
+
+config MHI_UCI
+ tristate "MHI UCI"
+ depends on MHI_BUS
+ help
+ MHI based uci driver is for transferring data
+ between host and modem using standard file operations
+ from user space. Open, read, write, ioctl, and close
+ operations are supported by this driver.
+
+endmenu
diff --git a/drivers/bus/mhi/devices/Makefile b/drivers/bus/mhi/devices/Makefile
new file mode 100644
index 0000000..300eed1
--- /dev/null
+++ b/drivers/bus/mhi/devices/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_MHI_NETDEV) +=mhi_netdev.o
+obj-$(CONFIG_MHI_UCI) +=mhi_uci.o
diff --git a/drivers/bus/mhi/devices/mhi_netdev.c b/drivers/bus/mhi/devices/mhi_netdev.c
new file mode 100644
index 0000000..7e1cffb
--- /dev/null
+++ b/drivers/bus/mhi/devices/mhi_netdev.c
@@ -0,0 +1,1137 @@
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/msm_rmnet.h>
+#include <linux/if_arp.h>
+#include <linux/dma-mapping.h>
+#include <linux/debugfs.h>
+#include <linux/ipc_logging.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/of_device.h>
+#include <linux/rtnetlink.h>
+#include <linux/kthread.h>
+#include <linux/mhi.h>
+
+#define MHI_NETDEV_DRIVER_NAME "mhi_netdev"
+#define WATCHDOG_TIMEOUT (30 * HZ)
+#define IPC_LOG_PAGES (100)
+#define MAX_NETBUF_SIZE (128)
+
+#ifdef CONFIG_MHI_DEBUG
+
+#define IPC_LOG_LVL (MHI_MSG_LVL_VERBOSE)
+
+#define MHI_ASSERT(cond, msg) do { \
+ if (cond) \
+ panic(msg); \
+} while (0)
+
+#define MSG_VERB(fmt, ...) do { \
+ if (mhi_netdev->msg_lvl <= MHI_MSG_LVL_VERBOSE) \
+ pr_err("[D][%s] " fmt, __func__, ##__VA_ARGS__);\
+ if (mhi_netdev->ipc_log && (mhi_netdev->ipc_log_lvl <= \
+ MHI_MSG_LVL_VERBOSE)) \
+ ipc_log_string(mhi_netdev->ipc_log, "[D][%s] " fmt, \
+ __func__, ##__VA_ARGS__); \
+} while (0)
+
+#else
+
+#define IPC_LOG_LVL (MHI_MSG_LVL_ERROR)
+
+#define MHI_ASSERT(cond, msg) do { \
+ if (cond) { \
+ MSG_ERR(msg); \
+ WARN_ON(cond); \
+ } \
+} while (0)
+
+#define MSG_VERB(fmt, ...)
+
+#endif
+
+#define MSG_LOG(fmt, ...) do { \
+ if (mhi_netdev->msg_lvl <= MHI_MSG_LVL_INFO) \
+ pr_err("[I][%s] " fmt, __func__, ##__VA_ARGS__);\
+ if (mhi_netdev->ipc_log && (mhi_netdev->ipc_log_lvl <= \
+ MHI_MSG_LVL_INFO)) \
+ ipc_log_string(mhi_netdev->ipc_log, "[I][%s] " fmt, \
+ __func__, ##__VA_ARGS__); \
+} while (0)
+
+#define MSG_ERR(fmt, ...) do { \
+ if (mhi_netdev->msg_lvl <= MHI_MSG_LVL_ERROR) \
+ pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \
+ if (mhi_netdev->ipc_log && (mhi_netdev->ipc_log_lvl <= \
+ MHI_MSG_LVL_ERROR)) \
+ ipc_log_string(mhi_netdev->ipc_log, "[E][%s] " fmt, \
+ __func__, ##__VA_ARGS__); \
+} while (0)
+
+struct mhi_net_chain {
+ struct sk_buff *head, *tail; /* chained skb */
+};
+
+struct mhi_netdev {
+ int alias;
+ struct mhi_device *mhi_dev;
+ struct mhi_netdev *rsc_dev; /* rsc linked node */
+ struct mhi_netdev *rsc_parent;
+ bool is_rsc_dev;
+ int wake;
+
+ u32 mru;
+ u32 order;
+ const char *interface_name;
+ struct napi_struct *napi;
+ struct net_device *ndev;
+
+ struct list_head *recycle_pool;
+ int pool_size;
+ bool chain_skb;
+ struct mhi_net_chain *chain;
+
+ struct task_struct *alloc_task;
+ wait_queue_head_t alloc_event;
+ int bg_pool_limit; /* minimum pool size */
+ int bg_pool_size; /* current size of the pool */
+ struct list_head *bg_pool;
+ spinlock_t bg_lock; /* lock to access list */
+
+
+ struct dentry *dentry;
+ enum MHI_DEBUG_LEVEL msg_lvl;
+ enum MHI_DEBUG_LEVEL ipc_log_lvl;
+ void *ipc_log;
+
+ /* debug stats */
+ u32 abuffers, kbuffers, rbuffers;
+};
+
+struct mhi_netdev_priv {
+ struct mhi_netdev *mhi_netdev;
+};
+
+/* Try not to make this structure bigger than 128 bytes, since this take space
+ * in payload packet.
+ * Example: If MRU = 16K, effective MRU = 16K - sizeof(mhi_netbuf)
+ */
+struct mhi_netbuf {
+ struct mhi_buf mhi_buf; /* this must be first element */
+ bool recycle;
+ void (*unmap)(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir);
+};
+
+static struct mhi_driver mhi_netdev_driver;
+static void mhi_netdev_create_debugfs(struct mhi_netdev *mhi_netdev);
+
+static __be16 mhi_netdev_ip_type_trans(u8 data)
+{
+ __be16 protocol = 0;
+
+ /* determine L3 protocol */
+ switch (data & 0xf0) {
+ case 0x40:
+ protocol = htons(ETH_P_IP);
+ break;
+ case 0x60:
+ protocol = htons(ETH_P_IPV6);
+ break;
+ default:
+ /* default is QMAP */
+ protocol = htons(ETH_P_MAP);
+ break;
+ }
+ return protocol;
+}
+
+static struct mhi_netbuf *mhi_netdev_alloc(struct device *dev,
+ gfp_t gfp,
+ unsigned int order)
+{
+ struct page *page;
+ struct mhi_netbuf *netbuf;
+ struct mhi_buf *mhi_buf;
+ void *vaddr;
+
+ page = __dev_alloc_pages(gfp, order);
+ if (!page)
+ return NULL;
+
+ vaddr = page_address(page);
+
+ /* we going to use the end of page to store cached data */
+ netbuf = vaddr + (PAGE_SIZE << order) - sizeof(*netbuf);
+ netbuf->recycle = false;
+ mhi_buf = (struct mhi_buf *)netbuf;
+ mhi_buf->page = page;
+ mhi_buf->buf = vaddr;
+ mhi_buf->len = (void *)netbuf - vaddr;
+
+ if (!dev)
+ return netbuf;
+
+ mhi_buf->dma_addr = dma_map_page(dev, page, 0, mhi_buf->len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, mhi_buf->dma_addr)) {
+ __free_pages(mhi_buf->page, order);
+ return NULL;
+ }
+
+ return netbuf;
+}
+
+static void mhi_netdev_unmap_page(struct device *dev,
+ dma_addr_t dma_addr,
+ size_t len,
+ enum dma_data_direction dir)
+{
+ dma_unmap_page(dev, dma_addr, len, dir);
+}
+
+static int mhi_netdev_tmp_alloc(struct mhi_netdev *mhi_netdev,
+ struct mhi_device *mhi_dev,
+ int nr_tre)
+{
+ struct device *dev = mhi_dev->dev.parent;
+ const u32 order = mhi_netdev->order;
+ int i, ret;
+
+ for (i = 0; i < nr_tre; i++) {
+ struct mhi_buf *mhi_buf;
+ struct mhi_netbuf *netbuf = mhi_netdev_alloc(dev, GFP_ATOMIC,
+ order);
+ if (!netbuf)
+ return -ENOMEM;
+
+ mhi_buf = (struct mhi_buf *)netbuf;
+ netbuf->unmap = mhi_netdev_unmap_page;
+
+ ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, mhi_buf,
+ mhi_buf->len, MHI_EOT);
+ if (unlikely(ret)) {
+ MSG_ERR("Failed to queue transfer, ret:%d\n", ret);
+ mhi_netdev_unmap_page(dev, mhi_buf->dma_addr,
+ mhi_buf->len, DMA_FROM_DEVICE);
+ __free_pages(mhi_buf->page, order);
+ return ret;
+ }
+ mhi_netdev->abuffers++;
+ }
+
+ return 0;
+}
+
+static int mhi_netdev_queue_bg_pool(struct mhi_netdev *mhi_netdev,
+ struct mhi_device *mhi_dev,
+ int nr_tre)
+{
+ struct device *dev = mhi_dev->dev.parent;
+ int i, ret;
+ LIST_HEAD(head);
+
+ spin_lock_bh(&mhi_netdev->bg_lock);
+ list_splice_init(mhi_netdev->bg_pool, &head);
+ spin_unlock_bh(&mhi_netdev->bg_lock);
+
+ for (i = 0; i < nr_tre; i++) {
+ struct mhi_buf *mhi_buf =
+ list_first_entry_or_null(&head, struct mhi_buf, node);
+ struct mhi_netbuf *netbuf = (struct mhi_netbuf *)mhi_buf;
+
+ if (!mhi_buf)
+ break;
+
+ mhi_buf->dma_addr = dma_map_page(dev, mhi_buf->page, 0,
+ mhi_buf->len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, mhi_buf->dma_addr))
+ break;
+
+ netbuf->unmap = mhi_netdev_unmap_page;
+ ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, mhi_buf,
+ mhi_buf->len, MHI_EOT);
+ if (unlikely(ret)) {
+ MSG_ERR("Failed to queue transfer, ret:%d\n", ret);
+ mhi_netdev_unmap_page(dev, mhi_buf->dma_addr,
+ mhi_buf->len, DMA_FROM_DEVICE);
+ break;
+ }
+ list_del(&mhi_buf->node);
+ mhi_netdev->kbuffers++;
+ }
+
+ /* add remaining buffers back to main pool */
+ spin_lock_bh(&mhi_netdev->bg_lock);
+ list_splice(&head, mhi_netdev->bg_pool);
+ mhi_netdev->bg_pool_size -= i;
+ spin_unlock_bh(&mhi_netdev->bg_lock);
+
+
+ /* wake up the bg thread to allocate more buffers */
+ wake_up_interruptible(&mhi_netdev->alloc_event);
+
+ return i;
+}
+
+static void mhi_netdev_queue(struct mhi_netdev *mhi_netdev,
+ struct mhi_device *mhi_dev)
+{
+ struct device *dev = mhi_dev->dev.parent;
+ struct mhi_netbuf *netbuf;
+ struct mhi_buf *mhi_buf;
+ struct list_head *pool = mhi_netdev->recycle_pool;
+ int nr_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
+ int i, ret;
+ const int max_peek = 4;
+
+ MSG_VERB("Enter free_desc:%d\n", nr_tre);
+
+ if (!nr_tre)
+ return;
+
+ /* try going thru reclaim pool first */
+ for (i = 0; i < nr_tre; i++) {
+ /* peek for the next buffer, we going to peak several times,
+ * and we going to give up if buffers are not yet free
+ */
+ int peek = 0;
+
+ netbuf = NULL;
+ list_for_each_entry(mhi_buf, pool, node) {
+ /* page == 1 idle, buffer is free to reclaim */
+ if (page_ref_count(mhi_buf->page) == 1) {
+ netbuf = (struct mhi_netbuf *)mhi_buf;
+ break;
+ }
+
+ if (peek++ >= max_peek)
+ break;
+ }
+
+ /* could not find a free buffer */
+ if (!netbuf)
+ break;
+
+ /* increment reference count so when network stack is done
+ * with buffer, the buffer won't be freed
+ */
+ page_ref_inc(mhi_buf->page);
+ list_del(&mhi_buf->node);
+ dma_sync_single_for_device(dev, mhi_buf->dma_addr, mhi_buf->len,
+ DMA_FROM_DEVICE);
+ ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, mhi_buf,
+ mhi_buf->len, MHI_EOT);
+ if (unlikely(ret)) {
+ MSG_ERR("Failed to queue buffer, ret:%d\n", ret);
+ netbuf->unmap(dev, mhi_buf->dma_addr, mhi_buf->len,
+ DMA_FROM_DEVICE);
+ page_ref_dec(mhi_buf->page);
+ list_add(&mhi_buf->node, pool);
+ return;
+ }
+ mhi_netdev->rbuffers++;
+ }
+
+ /* recycling did not work, buffers are still busy use bg pool */
+ if (i < nr_tre)
+ i += mhi_netdev_queue_bg_pool(mhi_netdev, mhi_dev, nr_tre - i);
+
+ /* recyling did not work, buffers are still busy allocate temp pkts */
+ if (i < nr_tre)
+ mhi_netdev_tmp_alloc(mhi_netdev, mhi_dev, nr_tre - i);
+}
+
+/* allocating pool of memory */
+static int mhi_netdev_alloc_pool(struct mhi_netdev *mhi_netdev)
+{
+ int i;
+ struct mhi_netbuf *netbuf;
+ struct mhi_buf *mhi_buf, *tmp;
+ const u32 order = mhi_netdev->order;
+ struct device *dev = mhi_netdev->mhi_dev->dev.parent;
+ struct list_head *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
+
+ if (!pool)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(pool);
+
+ for (i = 0; i < mhi_netdev->pool_size; i++) {
+ /* allocate paged data */
+ netbuf = mhi_netdev_alloc(dev, GFP_KERNEL, order);
+ if (!netbuf)
+ goto error_alloc_page;
+
+ netbuf->unmap = dma_sync_single_for_cpu;
+ netbuf->recycle = true;
+ mhi_buf = (struct mhi_buf *)netbuf;
+ list_add(&mhi_buf->node, pool);
+ }
+
+ mhi_netdev->recycle_pool = pool;
+
+ return 0;
+
+error_alloc_page:
+ list_for_each_entry_safe(mhi_buf, tmp, pool, node) {
+ list_del(&mhi_buf->node);
+ dma_unmap_page(dev, mhi_buf->dma_addr, mhi_buf->len,
+ DMA_FROM_DEVICE);
+ __free_pages(mhi_buf->page, order);
+ }
+
+ kfree(pool);
+
+ return -ENOMEM;
+}
+
+static void mhi_netdev_free_pool(struct mhi_netdev *mhi_netdev)
+{
+ struct device *dev = mhi_netdev->mhi_dev->dev.parent;
+ struct mhi_buf *mhi_buf, *tmp;
+
+ list_for_each_entry_safe(mhi_buf, tmp, mhi_netdev->recycle_pool, node) {
+ list_del(&mhi_buf->node);
+ dma_unmap_page(dev, mhi_buf->dma_addr, mhi_buf->len,
+ DMA_FROM_DEVICE);
+ __free_pages(mhi_buf->page, mhi_netdev->order);
+ }
+
+ kfree(mhi_netdev->recycle_pool);
+
+ /* free the bg pool */
+ list_for_each_entry_safe(mhi_buf, tmp, mhi_netdev->bg_pool, node) {
+ list_del(&mhi_buf->node);
+ __free_pages(mhi_buf->page, mhi_netdev->order);
+ mhi_netdev->bg_pool_size--;
+ }
+}
+
+static int mhi_netdev_alloc_thread(void *data)
+{
+ struct mhi_netdev *mhi_netdev = data;
+ struct mhi_netbuf *netbuf;
+ struct mhi_buf *mhi_buf, *tmp_buf;
+ const u32 order = mhi_netdev->order;
+ LIST_HEAD(head);
+
+ while (!kthread_should_stop()) {
+ while (mhi_netdev->bg_pool_size <= mhi_netdev->bg_pool_limit) {
+ int buffers = 0, i;
+
+ /* do a bulk allocation */
+ for (i = 0; i < NAPI_POLL_WEIGHT; i++) {
+ if (kthread_should_stop())
+ goto exit_alloc;
+
+ netbuf = mhi_netdev_alloc(NULL, GFP_KERNEL,
+ order);
+ if (!netbuf)
+ continue;
+
+ mhi_buf = (struct mhi_buf *)netbuf;
+ list_add(&mhi_buf->node, &head);
+ buffers++;
+ }
+
+ /* add the list to main pool */
+ spin_lock_bh(&mhi_netdev->bg_lock);
+ list_splice_init(&head, mhi_netdev->bg_pool);
+ mhi_netdev->bg_pool_size += buffers;
+ spin_unlock_bh(&mhi_netdev->bg_lock);
+ }
+
+ /* replenish the ring */
+ napi_schedule(mhi_netdev->napi);
+
+ /* wait for buffers to run low or thread to stop */
+ wait_event_interruptible(mhi_netdev->alloc_event,
+ kthread_should_stop() ||
+ mhi_netdev->bg_pool_size <= mhi_netdev->bg_pool_limit);
+ }
+
+exit_alloc:
+ list_for_each_entry_safe(mhi_buf, tmp_buf, &head, node) {
+ list_del(&mhi_buf->node);
+ __free_pages(mhi_buf->page, order);
+ }
+
+ return 0;
+}
+
+static int mhi_netdev_poll(struct napi_struct *napi, int budget)
+{
+ struct net_device *dev = napi->dev;
+ struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev);
+ struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev;
+ struct mhi_device *mhi_dev = mhi_netdev->mhi_dev;
+ struct mhi_netdev *rsc_dev = mhi_netdev->rsc_dev;
+ struct mhi_net_chain *chain = mhi_netdev->chain;
+ int rx_work = 0;
+
+ MSG_VERB("Entered\n");
+
+ rx_work = mhi_poll(mhi_dev, budget);
+
+ /* chained skb, push it to stack */
+ if (chain && chain->head) {
+ netif_receive_skb(chain->head);
+ chain->head = NULL;
+ }
+
+ if (rx_work < 0) {
+ MSG_ERR("Error polling ret:%d\n", rx_work);
+ napi_complete(napi);
+ return 0;
+ }
+
+ /* queue new buffers */
+ mhi_netdev_queue(mhi_netdev, mhi_dev);
+
+ if (rsc_dev)
+ mhi_netdev_queue(mhi_netdev, rsc_dev->mhi_dev);
+
+ /* complete work if # of packet processed less than allocated budget */
+ if (rx_work < budget)
+ napi_complete(napi);
+
+ MSG_VERB("polled %d pkts\n", rx_work);
+
+ return rx_work;
+}
+
+static int mhi_netdev_open(struct net_device *dev)
+{
+ struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev);
+ struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev;
+ struct mhi_device *mhi_dev = mhi_netdev->mhi_dev;
+
+ MSG_LOG("Opened net dev interface\n");
+
+ /* tx queue may not necessarily be stopped already
+ * so stop the queue if tx path is not enabled
+ */
+ if (!mhi_dev->ul_chan)
+ netif_stop_queue(dev);
+ else
+ netif_start_queue(dev);
+
+ return 0;
+
+}
+
+static int mhi_netdev_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev);
+ struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev;
+ struct mhi_device *mhi_dev = mhi_netdev->mhi_dev;
+
+ if (new_mtu < 0 || mhi_dev->mtu < new_mtu)
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+static int mhi_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev);
+ struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev;
+ struct mhi_device *mhi_dev = mhi_netdev->mhi_dev;
+ int res = 0;
+
+ MSG_VERB("Entered\n");
+
+ res = mhi_queue_transfer(mhi_dev, DMA_TO_DEVICE, skb, skb->len,
+ MHI_EOT);
+ if (res) {
+ MSG_VERB("Failed to queue with reason:%d\n", res);
+ netif_stop_queue(dev);
+ res = NETDEV_TX_BUSY;
+ }
+
+ MSG_VERB("Exited\n");
+
+ return res;
+}
+
+static int mhi_netdev_ioctl_extended(struct net_device *dev, struct ifreq *ifr)
+{
+ struct rmnet_ioctl_extended_s ext_cmd;
+ int rc = 0;
+ struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev);
+ struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev;
+ struct mhi_device *mhi_dev = mhi_netdev->mhi_dev;
+
+ rc = copy_from_user(&ext_cmd, ifr->ifr_ifru.ifru_data,
+ sizeof(struct rmnet_ioctl_extended_s));
+ if (rc)
+ return rc;
+
+ switch (ext_cmd.extended_ioctl) {
+ case RMNET_IOCTL_GET_SUPPORTED_FEATURES:
+ ext_cmd.u.data = 0;
+ break;
+ case RMNET_IOCTL_GET_DRIVER_NAME:
+ strlcpy(ext_cmd.u.if_name, mhi_netdev->interface_name,
+ sizeof(ext_cmd.u.if_name));
+ break;
+ case RMNET_IOCTL_SET_SLEEP_STATE:
+ if (ext_cmd.u.data && mhi_netdev->wake) {
+ /* Request to enable LPM */
+ MSG_VERB("Enable MHI LPM");
+ mhi_netdev->wake--;
+ mhi_device_put(mhi_dev, MHI_VOTE_DEVICE);
+ } else if (!ext_cmd.u.data && !mhi_netdev->wake) {
+ /* Request to disable LPM */
+ MSG_VERB("Disable MHI LPM");
+ mhi_netdev->wake++;
+ mhi_device_get(mhi_dev, MHI_VOTE_DEVICE);
+ }
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ rc = copy_to_user(ifr->ifr_ifru.ifru_data, &ext_cmd,
+ sizeof(struct rmnet_ioctl_extended_s));
+ return rc;
+}
+
+static int mhi_netdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ int rc = 0;
+ struct rmnet_ioctl_data_s ioctl_data;
+
+ switch (cmd) {
+ case RMNET_IOCTL_SET_LLP_IP: /* set RAWIP protocol */
+ break;
+ case RMNET_IOCTL_GET_LLP: /* get link protocol state */
+ ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
+ if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
+ sizeof(struct rmnet_ioctl_data_s)))
+ rc = -EFAULT;
+ break;
+ case RMNET_IOCTL_GET_OPMODE: /* get operation mode */
+ ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
+ if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
+ sizeof(struct rmnet_ioctl_data_s)))
+ rc = -EFAULT;
+ break;
+ case RMNET_IOCTL_SET_QOS_ENABLE:
+ rc = -EINVAL;
+ break;
+ case RMNET_IOCTL_SET_QOS_DISABLE:
+ rc = 0;
+ break;
+ case RMNET_IOCTL_OPEN:
+ case RMNET_IOCTL_CLOSE:
+ /* we just ignore them and return success */
+ rc = 0;
+ break;
+ case RMNET_IOCTL_EXTENDED:
+ rc = mhi_netdev_ioctl_extended(dev, ifr);
+ break;
+ default:
+ /* don't fail any IOCTL right now */
+ rc = 0;
+ break;
+ }
+
+ return rc;
+}
+
+static const struct net_device_ops mhi_netdev_ops_ip = {
+ .ndo_open = mhi_netdev_open,
+ .ndo_start_xmit = mhi_netdev_xmit,
+ .ndo_do_ioctl = mhi_netdev_ioctl,
+ .ndo_change_mtu = mhi_netdev_change_mtu,
+ .ndo_set_mac_address = 0,
+ .ndo_validate_addr = 0,
+};
+
+static void mhi_netdev_setup(struct net_device *dev)
+{
+ dev->netdev_ops = &mhi_netdev_ops_ip;
+ ether_setup(dev);
+
+ /* set this after calling ether_setup */
+ dev->header_ops = 0; /* No header */
+ dev->type = ARPHRD_RAWIP;
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
+ dev->watchdog_timeo = WATCHDOG_TIMEOUT;
+}
+
+/* enable mhi_netdev netdev, call only after grabbing mhi_netdev.mutex */
+static int mhi_netdev_enable_iface(struct mhi_netdev *mhi_netdev)
+{
+ int ret = 0;
+ char ifalias[IFALIASZ];
+ char ifname[IFNAMSIZ];
+ struct mhi_device *mhi_dev = mhi_netdev->mhi_dev;
+ struct device_node *of_node = mhi_dev->dev.of_node;
+ struct mhi_netdev_priv *mhi_netdev_priv;
+
+ mhi_netdev->alias = of_alias_get_id(of_node, "mhi_netdev");
+ if (mhi_netdev->alias < 0)
+ return -ENODEV;
+
+ ret = of_property_read_string(of_node, "mhi,interface-name",
+ &mhi_netdev->interface_name);
+ if (ret)
+ mhi_netdev->interface_name = mhi_netdev_driver.driver.name;
+
+ snprintf(ifalias, sizeof(ifalias), "%s_%04x_%02u.%02u.%02u_%u",
+ mhi_netdev->interface_name, mhi_dev->dev_id, mhi_dev->domain,
+ mhi_dev->bus, mhi_dev->slot, mhi_netdev->alias);
+
+ snprintf(ifname, sizeof(ifname), "%s%%d", mhi_netdev->interface_name);
+
+ rtnl_lock();
+ mhi_netdev->ndev = alloc_netdev(sizeof(*mhi_netdev_priv),
+ ifname, NET_NAME_PREDICTABLE,
+ mhi_netdev_setup);
+ if (!mhi_netdev->ndev) {
+ rtnl_unlock();
+ return -ENOMEM;
+ }
+
+ mhi_netdev->ndev->mtu = mhi_dev->mtu;
+ SET_NETDEV_DEV(mhi_netdev->ndev, &mhi_dev->dev);
+ dev_set_alias(mhi_netdev->ndev, ifalias, strlen(ifalias));
+ mhi_netdev_priv = netdev_priv(mhi_netdev->ndev);
+ mhi_netdev_priv->mhi_netdev = mhi_netdev;
+ rtnl_unlock();
+
+ mhi_netdev->napi = devm_kzalloc(&mhi_dev->dev,
+ sizeof(*mhi_netdev->napi), GFP_KERNEL);
+ if (!mhi_netdev->napi) {
+ ret = -ENOMEM;
+ goto napi_alloc_fail;
+ }
+
+ netif_napi_add(mhi_netdev->ndev, mhi_netdev->napi,
+ mhi_netdev_poll, NAPI_POLL_WEIGHT);
+ ret = register_netdev(mhi_netdev->ndev);
+ if (ret) {
+ MSG_ERR("Network device registration failed\n");
+ goto net_dev_reg_fail;
+ }
+
+ napi_enable(mhi_netdev->napi);
+
+ MSG_LOG("Exited.\n");
+
+ return 0;
+
+net_dev_reg_fail:
+ netif_napi_del(mhi_netdev->napi);
+
+napi_alloc_fail:
+ free_netdev(mhi_netdev->ndev);
+ mhi_netdev->ndev = NULL;
+
+ return ret;
+}
+
+static void mhi_netdev_xfer_ul_cb(struct mhi_device *mhi_dev,
+ struct mhi_result *mhi_result)
+{
+ struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev);
+ struct sk_buff *skb = mhi_result->buf_addr;
+ struct net_device *ndev = mhi_netdev->ndev;
+
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
+ dev_kfree_skb(skb);
+
+ if (netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+}
+
+static void mhi_netdev_push_skb(struct mhi_netdev *mhi_netdev,
+ struct mhi_buf *mhi_buf,
+ struct mhi_result *mhi_result)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(0, GFP_ATOMIC);
+ if (!skb) {
+ __free_pages(mhi_buf->page, mhi_netdev->order);
+ return;
+ }
+
+ skb_add_rx_frag(skb, 0, mhi_buf->page, 0,
+ mhi_result->bytes_xferd, mhi_netdev->mru);
+ skb->dev = mhi_netdev->ndev;
+ skb->protocol = mhi_netdev_ip_type_trans(*(u8 *)mhi_buf->buf);
+ netif_receive_skb(skb);
+}
+
+static void mhi_netdev_xfer_dl_cb(struct mhi_device *mhi_dev,
+ struct mhi_result *mhi_result)
+{
+ struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev);
+ struct mhi_netbuf *netbuf = mhi_result->buf_addr;
+ struct mhi_buf *mhi_buf = &netbuf->mhi_buf;
+ struct sk_buff *skb;
+ struct net_device *ndev = mhi_netdev->ndev;
+ struct device *dev = mhi_dev->dev.parent;
+ struct mhi_net_chain *chain = mhi_netdev->chain;
+
+ netbuf->unmap(dev, mhi_buf->dma_addr, mhi_buf->len, DMA_FROM_DEVICE);
+ if (likely(netbuf->recycle))
+ list_add_tail(&mhi_buf->node, mhi_netdev->recycle_pool);
+
+ /* modem is down, drop the buffer */
+ if (mhi_result->transaction_status == -ENOTCONN) {
+ __free_pages(mhi_buf->page, mhi_netdev->order);
+ return;
+ }
+
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += mhi_result->bytes_xferd;
+
+ if (unlikely(!chain)) {
+ mhi_netdev_push_skb(mhi_netdev, mhi_buf, mhi_result);
+ return;
+ }
+
+ /* we support chaining */
+ skb = alloc_skb(0, GFP_ATOMIC);
+ if (likely(skb)) {
+ skb_add_rx_frag(skb, 0, mhi_buf->page, 0,
+ mhi_result->bytes_xferd, mhi_netdev->mru);
+ /* this is first on list */
+ if (!chain->head) {
+ skb->dev = ndev;
+ skb->protocol =
+ mhi_netdev_ip_type_trans(*(u8 *)mhi_buf->buf);
+ chain->head = skb;
+ } else {
+ skb_shinfo(chain->tail)->frag_list = skb;
+ }
+
+ chain->tail = skb;
+ } else {
+ __free_pages(mhi_buf->page, mhi_netdev->order);
+ }
+}
+
+static void mhi_netdev_status_cb(struct mhi_device *mhi_dev, enum MHI_CB mhi_cb)
+{
+ struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev);
+
+ if (mhi_cb != MHI_CB_PENDING_DATA)
+ return;
+
+ napi_schedule(mhi_netdev->napi);
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+struct dentry *dentry;
+
+static int mhi_netdev_debugfs_stats_show(struct seq_file *m, void *d)
+{
+ struct mhi_netdev *mhi_netdev = m->private;
+
+ seq_printf(m,
+ "mru:%u order:%u pool_size:%d, bg_pool_size:%d bg_pool_limit:%d abuf:%u kbuf:%u rbuf:%u\n",
+ mhi_netdev->mru, mhi_netdev->order, mhi_netdev->pool_size,
+ mhi_netdev->bg_pool_size, mhi_netdev->bg_pool_limit,
+ mhi_netdev->abuffers, mhi_netdev->kbuffers,
+ mhi_netdev->rbuffers);
+
+ return 0;
+}
+
+static int mhi_netdev_debugfs_stats_open(struct inode *inode, struct file *fp)
+{
+ return single_open(fp, mhi_netdev_debugfs_stats_show, inode->i_private);
+}
+
+static const struct file_operations debugfs_stats = {
+ .open = mhi_netdev_debugfs_stats_open,
+ .release = single_release,
+ .read = seq_read,
+};
+
+static int mhi_netdev_debugfs_chain(void *data, u64 val)
+{
+ struct mhi_netdev *mhi_netdev = data;
+ struct mhi_netdev *rsc_dev = mhi_netdev->rsc_dev;
+
+ mhi_netdev->chain = NULL;
+
+ if (rsc_dev)
+ rsc_dev->chain = NULL;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(debugfs_chain, NULL,
+ mhi_netdev_debugfs_chain, "%llu\n");
+
+static void mhi_netdev_create_debugfs(struct mhi_netdev *mhi_netdev)
+{
+ char node_name[32];
+ struct mhi_device *mhi_dev = mhi_netdev->mhi_dev;
+
+ /* Both tx & rx client handle contain same device info */
+ snprintf(node_name, sizeof(node_name), "%s_%04x_%02u.%02u.%02u_%u",
+ mhi_netdev->interface_name, mhi_dev->dev_id, mhi_dev->domain,
+ mhi_dev->bus, mhi_dev->slot, mhi_netdev->alias);
+
+ if (IS_ERR_OR_NULL(dentry))
+ return;
+
+ mhi_netdev->dentry = debugfs_create_dir(node_name, dentry);
+ if (IS_ERR_OR_NULL(mhi_netdev->dentry))
+ return;
+
+ debugfs_create_file_unsafe("stats", 0444, mhi_netdev->dentry,
+ mhi_netdev, &debugfs_stats);
+ debugfs_create_file_unsafe("chain", 0444, mhi_netdev->dentry,
+ mhi_netdev, &debugfs_chain);
+}
+
+static void mhi_netdev_create_debugfs_dir(void)
+{
+ dentry = debugfs_create_dir(MHI_NETDEV_DRIVER_NAME, 0);
+}
+
+#else
+
+static void mhi_netdev_create_debugfs(struct mhi_netdev_private *mhi_netdev)
+{
+}
+
+static void mhi_netdev_create_debugfs_dir(void)
+{
+}
+
+#endif
+
+static void mhi_netdev_remove(struct mhi_device *mhi_dev)
+{
+ struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev);
+
+ MSG_LOG("Remove notification received\n");
+
+ /* rsc parent takes cares of the cleanup */
+ if (mhi_netdev->is_rsc_dev) {
+ mhi_netdev_free_pool(mhi_netdev);
+ return;
+ }
+
+ kthread_stop(mhi_netdev->alloc_task);
+ netif_stop_queue(mhi_netdev->ndev);
+ napi_disable(mhi_netdev->napi);
+ unregister_netdev(mhi_netdev->ndev);
+ netif_napi_del(mhi_netdev->napi);
+ free_netdev(mhi_netdev->ndev);
+
+ if (!IS_ERR_OR_NULL(mhi_netdev->dentry))
+ debugfs_remove_recursive(mhi_netdev->dentry);
+}
+
+static int mhi_netdev_match(struct device *dev, void *data)
+{
+ /* if phandle dt == device dt, we found a match */
+ return (dev->of_node == data);
+}
+
+static void mhi_netdev_clone_dev(struct mhi_netdev *mhi_netdev,
+ struct mhi_netdev *parent)
+{
+ mhi_netdev->ndev = parent->ndev;
+ mhi_netdev->napi = parent->napi;
+ mhi_netdev->ipc_log = parent->ipc_log;
+ mhi_netdev->msg_lvl = parent->msg_lvl;
+ mhi_netdev->ipc_log_lvl = parent->ipc_log_lvl;
+ mhi_netdev->is_rsc_dev = true;
+ mhi_netdev->chain = parent->chain;
+ mhi_netdev->rsc_parent = parent;
+ mhi_netdev->recycle_pool = parent->recycle_pool;
+ mhi_netdev->bg_pool = parent->bg_pool;
+}
+
+static int mhi_netdev_probe(struct mhi_device *mhi_dev,
+ const struct mhi_device_id *id)
+{
+ int ret;
+ struct mhi_netdev *mhi_netdev, *p_netdev = NULL;
+ struct device_node *of_node = mhi_dev->dev.of_node;
+ int nr_tre;
+ char node_name[32];
+ struct device_node *phandle;
+ bool no_chain;
+
+ if (!of_node)
+ return -ENODEV;
+
+ mhi_netdev = devm_kzalloc(&mhi_dev->dev, sizeof(*mhi_netdev),
+ GFP_KERNEL);
+ if (!mhi_netdev)
+ return -ENOMEM;
+
+ /* move mhi channels to start state */
+ ret = mhi_prepare_for_transfer(mhi_dev);
+ if (ret) {
+ MSG_ERR("Failed to start channels ret %d\n", ret);
+ return ret;
+ }
+
+ mhi_netdev->mhi_dev = mhi_dev;
+ mhi_device_set_devdata(mhi_dev, mhi_netdev);
+
+ ret = of_property_read_u32(of_node, "mhi,mru", &mhi_netdev->mru);
+ if (ret)
+ return -ENODEV;
+
+ /* MRU must be multiplication of page size */
+ mhi_netdev->order = __ilog2_u32(mhi_netdev->mru / PAGE_SIZE);
+ if ((PAGE_SIZE << mhi_netdev->order) < mhi_netdev->mru)
+ return -EINVAL;
+
+ /* check if this device shared by a parent device */
+ phandle = of_parse_phandle(of_node, "mhi,rsc-parent", 0);
+ if (phandle) {
+ struct device *dev;
+ struct mhi_device *pdev;
+ /* find the parent device */
+ dev = driver_find_device(mhi_dev->dev.driver, NULL, phandle,
+ mhi_netdev_match);
+ if (!dev)
+ return -ENODEV;
+
+ /* this device is shared with parent device. so we won't be
+ * creating a new network interface. Clone parent
+ * information to child node
+ */
+ pdev = to_mhi_device(dev);
+ p_netdev = mhi_device_get_devdata(pdev);
+ mhi_netdev_clone_dev(mhi_netdev, p_netdev);
+ put_device(dev);
+ } else {
+ mhi_netdev->msg_lvl = MHI_MSG_LVL_ERROR;
+ no_chain = of_property_read_bool(of_node,
+ "mhi,disable-chain-skb");
+ if (!no_chain) {
+ mhi_netdev->chain = devm_kzalloc(&mhi_dev->dev,
+ sizeof(*mhi_netdev->chain),
+ GFP_KERNEL);
+ if (!mhi_netdev->chain)
+ return -ENOMEM;
+ }
+
+ ret = mhi_netdev_enable_iface(mhi_netdev);
+ if (ret)
+ return ret;
+
+ /* setup pool size ~2x ring length*/
+ nr_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
+ mhi_netdev->pool_size = 1 << __ilog2_u32(nr_tre);
+ if (nr_tre > mhi_netdev->pool_size)
+ mhi_netdev->pool_size <<= 1;
+ mhi_netdev->pool_size <<= 1;
+
+ /* if we expect child device to share then double the pool */
+ if (of_parse_phandle(of_node, "mhi,rsc-child", 0))
+ mhi_netdev->pool_size <<= 1;
+
+ /* allocate memory pool */
+ ret = mhi_netdev_alloc_pool(mhi_netdev);
+ if (ret)
+ return -ENOMEM;
+
+ /* create a background task to allocate memory */
+ mhi_netdev->bg_pool = kmalloc(sizeof(*mhi_netdev->bg_pool),
+ GFP_KERNEL);
+ if (!mhi_netdev->bg_pool)
+ return -ENOMEM;
+
+ init_waitqueue_head(&mhi_netdev->alloc_event);
+ INIT_LIST_HEAD(mhi_netdev->bg_pool);
+ spin_lock_init(&mhi_netdev->bg_lock);
+ mhi_netdev->bg_pool_limit = mhi_netdev->pool_size / 4;
+ mhi_netdev->alloc_task = kthread_run(mhi_netdev_alloc_thread,
+ mhi_netdev,
+ mhi_netdev->ndev->name);
+ if (IS_ERR(mhi_netdev->alloc_task))
+ return PTR_ERR(mhi_netdev->alloc_task);
+
+ /* create ipc log buffer */
+ snprintf(node_name, sizeof(node_name),
+ "%s_%04x_%02u.%02u.%02u_%u",
+ mhi_netdev->interface_name, mhi_dev->dev_id,
+ mhi_dev->domain, mhi_dev->bus, mhi_dev->slot,
+ mhi_netdev->alias);
+ mhi_netdev->ipc_log = ipc_log_context_create(IPC_LOG_PAGES,
+ node_name, 0);
+ mhi_netdev->ipc_log_lvl = IPC_LOG_LVL;
+
+ mhi_netdev_create_debugfs(mhi_netdev);
+ }
+
+ /* link child node with parent node if it's children dev */
+ if (p_netdev)
+ p_netdev->rsc_dev = mhi_netdev;
+
+ /* now we have a pool of buffers allocated, queue to hardware
+ * by triggering a napi_poll
+ */
+ napi_schedule(mhi_netdev->napi);
+
+ return 0;
+}
+
+static const struct mhi_device_id mhi_netdev_match_table[] = {
+ { .chan = "IP_HW0" },
+ { .chan = "IP_HW0_RSC" },
+ {},
+};
+
+static struct mhi_driver mhi_netdev_driver = {
+ .id_table = mhi_netdev_match_table,
+ .probe = mhi_netdev_probe,
+ .remove = mhi_netdev_remove,
+ .ul_xfer_cb = mhi_netdev_xfer_ul_cb,
+ .dl_xfer_cb = mhi_netdev_xfer_dl_cb,
+ .status_cb = mhi_netdev_status_cb,
+ .driver = {
+ .name = "mhi_netdev",
+ .owner = THIS_MODULE,
+ }
+};
+
+static int __init mhi_netdev_init(void)
+{
+ BUILD_BUG_ON(sizeof(struct mhi_netbuf) > MAX_NETBUF_SIZE);
+ mhi_netdev_create_debugfs_dir();
+
+ return mhi_driver_register(&mhi_netdev_driver);
+}
+module_init(mhi_netdev_init);
+
+MODULE_DESCRIPTION("MHI NETDEV Network Interface");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/mhi/devices/mhi_uci.c b/drivers/bus/mhi/devices/mhi_uci.c
new file mode 100644
index 0000000..1a8535a
--- /dev/null
+++ b/drivers/bus/mhi/devices/mhi_uci.c
@@ -0,0 +1,741 @@
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/ipc_logging.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/termios.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/uaccess.h>
+#include <linux/mhi.h>
+
+#define DEVICE_NAME "mhi"
+#define MHI_UCI_DRIVER_NAME "mhi_uci"
+
+struct uci_chan {
+ wait_queue_head_t wq;
+ spinlock_t lock;
+ struct list_head pending; /* user space waiting to read */
+ struct uci_buf *cur_buf; /* current buffer user space reading */
+ size_t rx_size;
+};
+
+struct uci_buf {
+ void *data;
+ size_t len;
+ struct list_head node;
+};
+
+struct uci_dev {
+ struct list_head node;
+ dev_t devt;
+ struct device *dev;
+ struct mhi_device *mhi_dev;
+ const char *chan;
+ struct mutex mutex; /* sync open and close */
+ struct uci_chan ul_chan;
+ struct uci_chan dl_chan;
+ size_t mtu;
+ int ref_count;
+ bool enabled;
+ u32 tiocm;
+ void *ipc_log;
+};
+
+struct mhi_uci_drv {
+ struct list_head head;
+ struct mutex lock;
+ struct class *class;
+ int major;
+ dev_t dev_t;
+};
+
+enum MHI_DEBUG_LEVEL msg_lvl = MHI_MSG_LVL_ERROR;
+
+#ifdef CONFIG_MHI_DEBUG
+
+#define IPC_LOG_LVL (MHI_MSG_LVL_VERBOSE)
+#define MHI_UCI_IPC_LOG_PAGES (25)
+
+#else
+
+#define IPC_LOG_LVL (MHI_MSG_LVL_ERROR)
+#define MHI_UCI_IPC_LOG_PAGES (1)
+
+#endif
+
+#ifdef CONFIG_MHI_DEBUG
+
+#define MSG_VERB(fmt, ...) do { \
+ if (msg_lvl <= MHI_MSG_LVL_VERBOSE) \
+ pr_err("[D][%s] " fmt, __func__, ##__VA_ARGS__); \
+ if (uci_dev->ipc_log && (IPC_LOG_LVL <= MHI_MSG_LVL_VERBOSE)) \
+ ipc_log_string(uci_dev->ipc_log, "[D][%s] " fmt, \
+ __func__, ##__VA_ARGS__); \
+ } while (0)
+
+#else
+
+#define MSG_VERB(fmt, ...)
+
+#endif
+
+#define MSG_LOG(fmt, ...) do { \
+ if (msg_lvl <= MHI_MSG_LVL_INFO) \
+ pr_err("[I][%s] " fmt, __func__, ##__VA_ARGS__); \
+ if (uci_dev->ipc_log && (IPC_LOG_LVL <= MHI_MSG_LVL_INFO)) \
+ ipc_log_string(uci_dev->ipc_log, "[I][%s] " fmt, \
+ __func__, ##__VA_ARGS__); \
+ } while (0)
+
+#define MSG_ERR(fmt, ...) do { \
+ if (msg_lvl <= MHI_MSG_LVL_ERROR) \
+ pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \
+ if (uci_dev->ipc_log && (IPC_LOG_LVL <= MHI_MSG_LVL_ERROR)) \
+ ipc_log_string(uci_dev->ipc_log, "[E][%s] " fmt, \
+ __func__, ##__VA_ARGS__); \
+ } while (0)
+
+#define MAX_UCI_DEVICES (64)
+
+static DECLARE_BITMAP(uci_minors, MAX_UCI_DEVICES);
+static struct mhi_uci_drv mhi_uci_drv;
+
+static int mhi_queue_inbound(struct uci_dev *uci_dev)
+{
+ struct mhi_device *mhi_dev = uci_dev->mhi_dev;
+ int nr_trbs = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
+ size_t mtu = uci_dev->mtu;
+ void *buf;
+ struct uci_buf *uci_buf;
+ int ret = -EIO, i;
+
+ for (i = 0; i < nr_trbs; i++) {
+ buf = kmalloc(mtu + sizeof(*uci_buf), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ uci_buf = buf + mtu;
+ uci_buf->data = buf;
+
+ MSG_VERB("Allocated buf %d of %d size %zx\n", i, nr_trbs, mtu);
+
+ ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, buf, mtu,
+ MHI_EOT);
+ if (ret) {
+ kfree(buf);
+ MSG_ERR("Failed to queue buffer %d\n", i);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static long mhi_uci_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ struct uci_dev *uci_dev = file->private_data;
+ struct mhi_device *mhi_dev = uci_dev->mhi_dev;
+ struct uci_chan *uci_chan = &uci_dev->dl_chan;
+ long ret = -ERESTARTSYS;
+
+ mutex_lock(&uci_dev->mutex);
+
+ if (cmd == TIOCMGET) {
+ spin_lock_bh(&uci_chan->lock);
+ ret = uci_dev->tiocm;
+ uci_dev->tiocm = 0;
+ spin_unlock_bh(&uci_chan->lock);
+ } else if (uci_dev->enabled) {
+ ret = mhi_ioctl(mhi_dev, cmd, arg);
+ }
+
+ mutex_unlock(&uci_dev->mutex);
+
+ return ret;
+}
+
+static int mhi_uci_release(struct inode *inode, struct file *file)
+{
+ struct uci_dev *uci_dev = file->private_data;
+
+ mutex_lock(&uci_dev->mutex);
+ uci_dev->ref_count--;
+ if (!uci_dev->ref_count) {
+ struct uci_buf *itr, *tmp;
+ struct uci_chan *uci_chan;
+
+ MSG_LOG("Last client left, closing node\n");
+
+ if (uci_dev->enabled)
+ mhi_unprepare_from_transfer(uci_dev->mhi_dev);
+
+ /* clean inbound channel */
+ uci_chan = &uci_dev->dl_chan;
+ list_for_each_entry_safe(itr, tmp, &uci_chan->pending, node) {
+ list_del(&itr->node);
+ kfree(itr->data);
+ }
+ if (uci_chan->cur_buf)
+ kfree(uci_chan->cur_buf->data);
+
+ uci_chan->cur_buf = NULL;
+
+ if (!uci_dev->enabled) {
+ MSG_LOG("Node is deleted, freeing dev node\n");
+ mutex_unlock(&uci_dev->mutex);
+ mutex_destroy(&uci_dev->mutex);
+ clear_bit(MINOR(uci_dev->devt), uci_minors);
+ kfree(uci_dev);
+ return 0;
+ }
+ }
+
+ MSG_LOG("exit: ref_count:%d\n", uci_dev->ref_count);
+
+ mutex_unlock(&uci_dev->mutex);
+
+ return 0;
+}
+
+static unsigned int mhi_uci_poll(struct file *file, poll_table *wait)
+{
+ struct uci_dev *uci_dev = file->private_data;
+ struct mhi_device *mhi_dev = uci_dev->mhi_dev;
+ struct uci_chan *uci_chan;
+ unsigned int mask = 0;
+
+ poll_wait(file, &uci_dev->dl_chan.wq, wait);
+ poll_wait(file, &uci_dev->ul_chan.wq, wait);
+
+ uci_chan = &uci_dev->dl_chan;
+ spin_lock_bh(&uci_chan->lock);
+ if (!uci_dev->enabled) {
+ mask = POLLERR;
+ } else {
+ if (!list_empty(&uci_chan->pending) || uci_chan->cur_buf) {
+ MSG_VERB("Client can read from node\n");
+ mask |= POLLIN | POLLRDNORM;
+ }
+
+ if (uci_dev->tiocm) {
+ MSG_VERB("Line status changed\n");
+ mask |= POLLPRI;
+ }
+ }
+ spin_unlock_bh(&uci_chan->lock);
+
+ uci_chan = &uci_dev->ul_chan;
+ spin_lock_bh(&uci_chan->lock);
+ if (!uci_dev->enabled) {
+ mask |= POLLERR;
+ } else if (mhi_get_no_free_descriptors(mhi_dev, DMA_TO_DEVICE) > 0) {
+ MSG_VERB("Client can write to node\n");
+ mask |= POLLOUT | POLLWRNORM;
+ }
+ spin_unlock_bh(&uci_chan->lock);
+
+ MSG_LOG("Client attempted to poll, returning mask 0x%x\n", mask);
+
+ return mask;
+}
+
+static ssize_t mhi_uci_write(struct file *file,
+ const char __user *buf,
+ size_t count,
+ loff_t *offp)
+{
+ struct uci_dev *uci_dev = file->private_data;
+ struct mhi_device *mhi_dev = uci_dev->mhi_dev;
+ struct uci_chan *uci_chan = &uci_dev->ul_chan;
+ size_t bytes_xfered = 0;
+ int ret, nr_avail;
+
+ if (!buf || !count)
+ return -EINVAL;
+
+ /* confirm channel is active */
+ spin_lock_bh(&uci_chan->lock);
+ if (!uci_dev->enabled) {
+ spin_unlock_bh(&uci_chan->lock);
+ return -ERESTARTSYS;
+ }
+
+ MSG_VERB("Enter: to xfer:%zx bytes\n", count);
+
+ while (count) {
+ size_t xfer_size;
+ void *kbuf;
+ enum MHI_FLAGS flags;
+
+ spin_unlock_bh(&uci_chan->lock);
+
+ /* wait for free descriptors */
+ ret = wait_event_interruptible(uci_chan->wq,
+ (!uci_dev->enabled) ||
+ (nr_avail = mhi_get_no_free_descriptors(mhi_dev,
+ DMA_TO_DEVICE)) > 0);
+
+ if (ret == -ERESTARTSYS || !uci_dev->enabled) {
+ MSG_LOG("Exit signal caught for node or not enabled\n");
+ return -ERESTARTSYS;
+ }
+
+ xfer_size = min_t(size_t, count, uci_dev->mtu);
+ kbuf = kmalloc(xfer_size, GFP_KERNEL);
+ if (!kbuf) {
+ MSG_ERR("Failed to allocate memory %zx\n", xfer_size);
+ return -ENOMEM;
+ }
+
+ ret = copy_from_user(kbuf, buf, xfer_size);
+ if (unlikely(ret)) {
+ kfree(kbuf);
+ return ret;
+ }
+
+ spin_lock_bh(&uci_chan->lock);
+
+ /* if ring is full after this force EOT */
+ if (nr_avail > 1 && (count - xfer_size))
+ flags = MHI_CHAIN;
+ else
+ flags = MHI_EOT;
+
+ if (uci_dev->enabled)
+ ret = mhi_queue_transfer(mhi_dev, DMA_TO_DEVICE, kbuf,
+ xfer_size, flags);
+ else
+ ret = -ERESTARTSYS;
+
+ if (ret) {
+ kfree(kbuf);
+ goto sys_interrupt;
+ }
+
+ bytes_xfered += xfer_size;
+ count -= xfer_size;
+ buf += xfer_size;
+ }
+
+ spin_unlock_bh(&uci_chan->lock);
+ MSG_VERB("Exit: Number of bytes xferred:%zx\n", bytes_xfered);
+
+ return bytes_xfered;
+
+sys_interrupt:
+ spin_unlock_bh(&uci_chan->lock);
+
+ return ret;
+}
+
+static ssize_t mhi_uci_read(struct file *file,
+ char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ struct uci_dev *uci_dev = file->private_data;
+ struct mhi_device *mhi_dev = uci_dev->mhi_dev;
+ struct uci_chan *uci_chan = &uci_dev->dl_chan;
+ struct uci_buf *uci_buf;
+ char *ptr;
+ size_t to_copy;
+ int ret = 0;
+
+ if (!buf)
+ return -EINVAL;
+
+ MSG_VERB("Client provided buf len:%zx\n", count);
+
+ /* confirm channel is active */
+ spin_lock_bh(&uci_chan->lock);
+ if (!uci_dev->enabled) {
+ spin_unlock_bh(&uci_chan->lock);
+ return -ERESTARTSYS;
+ }
+
+ /* No data available to read, wait */
+ if (!uci_chan->cur_buf && list_empty(&uci_chan->pending)) {
+ MSG_VERB("No data available to read waiting\n");
+
+ spin_unlock_bh(&uci_chan->lock);
+ ret = wait_event_interruptible(uci_chan->wq,
+ (!uci_dev->enabled ||
+ !list_empty(&uci_chan->pending)));
+ if (ret == -ERESTARTSYS) {
+ MSG_LOG("Exit signal caught for node\n");
+ return -ERESTARTSYS;
+ }
+
+ spin_lock_bh(&uci_chan->lock);
+ if (!uci_dev->enabled) {
+ MSG_LOG("node is disabled\n");
+ ret = -ERESTARTSYS;
+ goto read_error;
+ }
+ }
+
+ /* new read, get the next descriptor from the list */
+ if (!uci_chan->cur_buf) {
+ uci_buf = list_first_entry_or_null(&uci_chan->pending,
+ struct uci_buf, node);
+ if (unlikely(!uci_buf)) {
+ ret = -EIO;
+ goto read_error;
+ }
+
+ list_del(&uci_buf->node);
+ uci_chan->cur_buf = uci_buf;
+ uci_chan->rx_size = uci_buf->len;
+ MSG_VERB("Got pkt of size:%zu\n", uci_chan->rx_size);
+ }
+
+ uci_buf = uci_chan->cur_buf;
+ spin_unlock_bh(&uci_chan->lock);
+
+ /* Copy the buffer to user space */
+ to_copy = min_t(size_t, count, uci_chan->rx_size);
+ ptr = uci_buf->data + (uci_buf->len - uci_chan->rx_size);
+ ret = copy_to_user(buf, ptr, to_copy);
+ if (ret)
+ return ret;
+
+ MSG_VERB("Copied %zx of %zx bytes\n", to_copy, uci_chan->rx_size);
+ uci_chan->rx_size -= to_copy;
+
+ /* we finished with this buffer, queue it back to hardware */
+ if (!uci_chan->rx_size) {
+ spin_lock_bh(&uci_chan->lock);
+ uci_chan->cur_buf = NULL;
+
+ if (uci_dev->enabled)
+ ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE,
+ uci_buf->data, uci_dev->mtu,
+ MHI_EOT);
+ else
+ ret = -ERESTARTSYS;
+
+ if (ret) {
+ MSG_ERR("Failed to recycle element\n");
+ kfree(uci_buf->data);
+ goto read_error;
+ }
+
+ spin_unlock_bh(&uci_chan->lock);
+ }
+
+ MSG_VERB("Returning %zx bytes\n", to_copy);
+
+ return to_copy;
+
+read_error:
+ spin_unlock_bh(&uci_chan->lock);
+
+ return ret;
+}
+
+static int mhi_uci_open(struct inode *inode, struct file *filp)
+{
+ struct uci_dev *uci_dev = NULL, *tmp_dev;
+ int ret = -EIO;
+ struct uci_buf *buf_itr, *tmp;
+ struct uci_chan *dl_chan;
+
+ mutex_lock(&mhi_uci_drv.lock);
+ list_for_each_entry(tmp_dev, &mhi_uci_drv.head, node) {
+ if (tmp_dev->devt == inode->i_rdev) {
+ uci_dev = tmp_dev;
+ break;
+ }
+ }
+
+ /* could not find a minor node */
+ if (!uci_dev)
+ goto error_exit;
+
+ mutex_lock(&uci_dev->mutex);
+ if (!uci_dev->enabled) {
+ MSG_ERR("Node exist, but not in active state!\n");
+ goto error_open_chan;
+ }
+
+ uci_dev->ref_count++;
+
+ MSG_LOG("Node open, ref counts %u\n", uci_dev->ref_count);
+
+ if (uci_dev->ref_count == 1) {
+ MSG_LOG("Starting channel\n");
+ ret = mhi_prepare_for_transfer(uci_dev->mhi_dev);
+ if (ret) {
+ MSG_ERR("Error starting transfer channels\n");
+ uci_dev->ref_count--;
+ goto error_open_chan;
+ }
+
+ ret = mhi_queue_inbound(uci_dev);
+ if (ret)
+ goto error_rx_queue;
+ }
+
+ filp->private_data = uci_dev;
+ mutex_unlock(&uci_dev->mutex);
+ mutex_unlock(&mhi_uci_drv.lock);
+
+ return 0;
+
+ error_rx_queue:
+ dl_chan = &uci_dev->dl_chan;
+ mhi_unprepare_from_transfer(uci_dev->mhi_dev);
+ list_for_each_entry_safe(buf_itr, tmp, &dl_chan->pending, node) {
+ list_del(&buf_itr->node);
+ kfree(buf_itr->data);
+ }
+
+ error_open_chan:
+ mutex_unlock(&uci_dev->mutex);
+
+error_exit:
+ mutex_unlock(&mhi_uci_drv.lock);
+
+ return ret;
+}
+
+static const struct file_operations mhidev_fops = {
+ .open = mhi_uci_open,
+ .release = mhi_uci_release,
+ .read = mhi_uci_read,
+ .write = mhi_uci_write,
+ .poll = mhi_uci_poll,
+ .unlocked_ioctl = mhi_uci_ioctl,
+};
+
+static void mhi_uci_remove(struct mhi_device *mhi_dev)
+{
+ struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev);
+
+ MSG_LOG("Enter\n");
+
+
+ mutex_lock(&mhi_uci_drv.lock);
+ mutex_lock(&uci_dev->mutex);
+
+ /* disable the node */
+ spin_lock_irq(&uci_dev->dl_chan.lock);
+ spin_lock_irq(&uci_dev->ul_chan.lock);
+ uci_dev->enabled = false;
+ spin_unlock_irq(&uci_dev->ul_chan.lock);
+ spin_unlock_irq(&uci_dev->dl_chan.lock);
+ wake_up(&uci_dev->dl_chan.wq);
+ wake_up(&uci_dev->ul_chan.wq);
+
+ /* delete the node to prevent new opens */
+ device_destroy(mhi_uci_drv.class, uci_dev->devt);
+ uci_dev->dev = NULL;
+ list_del(&uci_dev->node);
+
+ /* safe to free memory only if all file nodes are closed */
+ if (!uci_dev->ref_count) {
+ mutex_unlock(&uci_dev->mutex);
+ mutex_destroy(&uci_dev->mutex);
+ clear_bit(MINOR(uci_dev->devt), uci_minors);
+ kfree(uci_dev);
+ mutex_unlock(&mhi_uci_drv.lock);
+ return;
+ }
+
+ MSG_LOG("Exit\n");
+ mutex_unlock(&uci_dev->mutex);
+ mutex_unlock(&mhi_uci_drv.lock);
+
+}
+
+static int mhi_uci_probe(struct mhi_device *mhi_dev,
+ const struct mhi_device_id *id)
+{
+ struct uci_dev *uci_dev;
+ int minor;
+ char node_name[32];
+ int dir;
+
+ uci_dev = kzalloc(sizeof(*uci_dev), GFP_KERNEL);
+ if (!uci_dev)
+ return -ENOMEM;
+
+ mutex_init(&uci_dev->mutex);
+ uci_dev->mhi_dev = mhi_dev;
+
+ minor = find_first_zero_bit(uci_minors, MAX_UCI_DEVICES);
+ if (minor >= MAX_UCI_DEVICES) {
+ kfree(uci_dev);
+ return -ENOSPC;
+ }
+
+ mutex_lock(&uci_dev->mutex);
+ mutex_lock(&mhi_uci_drv.lock);
+
+ uci_dev->devt = MKDEV(mhi_uci_drv.major, minor);
+ uci_dev->dev = device_create(mhi_uci_drv.class, &mhi_dev->dev,
+ uci_dev->devt, uci_dev,
+ DEVICE_NAME "_%04x_%02u.%02u.%02u%s%d",
+ mhi_dev->dev_id, mhi_dev->domain,
+ mhi_dev->bus, mhi_dev->slot, "_pipe_",
+ mhi_dev->ul_chan_id);
+ set_bit(minor, uci_minors);
+
+ /* create debugging buffer */
+ snprintf(node_name, sizeof(node_name), "mhi_uci_%04x_%02u.%02u.%02u_%d",
+ mhi_dev->dev_id, mhi_dev->domain, mhi_dev->bus, mhi_dev->slot,
+ mhi_dev->ul_chan_id);
+ uci_dev->ipc_log = ipc_log_context_create(MHI_UCI_IPC_LOG_PAGES,
+ node_name, 0);
+
+ for (dir = 0; dir < 2; dir++) {
+ struct uci_chan *uci_chan = (dir) ?
+ &uci_dev->ul_chan : &uci_dev->dl_chan;
+ spin_lock_init(&uci_chan->lock);
+ init_waitqueue_head(&uci_chan->wq);
+ INIT_LIST_HEAD(&uci_chan->pending);
+ };
+
+ uci_dev->mtu = min_t(size_t, id->driver_data, mhi_dev->mtu);
+ mhi_device_set_devdata(mhi_dev, uci_dev);
+ uci_dev->enabled = true;
+
+ list_add(&uci_dev->node, &mhi_uci_drv.head);
+ mutex_unlock(&mhi_uci_drv.lock);
+ mutex_unlock(&uci_dev->mutex);
+
+ MSG_LOG("channel:%s successfully probed\n", mhi_dev->chan_name);
+
+ return 0;
+};
+
+static void mhi_ul_xfer_cb(struct mhi_device *mhi_dev,
+ struct mhi_result *mhi_result)
+{
+ struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev);
+ struct uci_chan *uci_chan = &uci_dev->ul_chan;
+
+ MSG_VERB("status:%d xfer_len:%zu\n", mhi_result->transaction_status,
+ mhi_result->bytes_xferd);
+
+ kfree(mhi_result->buf_addr);
+ if (!mhi_result->transaction_status)
+ wake_up(&uci_chan->wq);
+}
+
+static void mhi_dl_xfer_cb(struct mhi_device *mhi_dev,
+ struct mhi_result *mhi_result)
+{
+ struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev);
+ struct uci_chan *uci_chan = &uci_dev->dl_chan;
+ unsigned long flags;
+ struct uci_buf *buf;
+
+ MSG_VERB("status:%d receive_len:%zu\n", mhi_result->transaction_status,
+ mhi_result->bytes_xferd);
+
+ if (mhi_result->transaction_status == -ENOTCONN) {
+ kfree(mhi_result->buf_addr);
+ return;
+ }
+
+ spin_lock_irqsave(&uci_chan->lock, flags);
+ buf = mhi_result->buf_addr + uci_dev->mtu;
+ buf->data = mhi_result->buf_addr;
+ buf->len = mhi_result->bytes_xferd;
+ list_add_tail(&buf->node, &uci_chan->pending);
+ spin_unlock_irqrestore(&uci_chan->lock, flags);
+
+ if (mhi_dev->dev.power.wakeup)
+ __pm_wakeup_event(mhi_dev->dev.power.wakeup, 0);
+
+ wake_up(&uci_chan->wq);
+}
+
+static void mhi_status_cb(struct mhi_device *mhi_dev, enum MHI_CB reason)
+{
+ struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev);
+ struct uci_chan *uci_chan = &uci_dev->dl_chan;
+ unsigned long flags;
+
+ if (reason == MHI_CB_DTR_SIGNAL) {
+ spin_lock_irqsave(&uci_chan->lock, flags);
+ uci_dev->tiocm = mhi_dev->tiocm;
+ spin_unlock_irqrestore(&uci_chan->lock, flags);
+ wake_up(&uci_chan->wq);
+ }
+}
+
+/* .driver_data stores max mtu */
+static const struct mhi_device_id mhi_uci_match_table[] = {
+ { .chan = "LOOPBACK", .driver_data = 0x1000 },
+ { .chan = "SAHARA", .driver_data = 0x8000 },
+ { .chan = "EFS", .driver_data = 0x1000 },
+ { .chan = "QMI0", .driver_data = 0x1000 },
+ { .chan = "QMI1", .driver_data = 0x1000 },
+ { .chan = "TF", .driver_data = 0x1000 },
+ { .chan = "DUN", .driver_data = 0x1000 },
+ {},
+};
+
+static struct mhi_driver mhi_uci_driver = {
+ .id_table = mhi_uci_match_table,
+ .remove = mhi_uci_remove,
+ .probe = mhi_uci_probe,
+ .ul_xfer_cb = mhi_ul_xfer_cb,
+ .dl_xfer_cb = mhi_dl_xfer_cb,
+ .status_cb = mhi_status_cb,
+ .driver = {
+ .name = MHI_UCI_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int mhi_uci_init(void)
+{
+ int ret;
+
+ ret = register_chrdev(0, MHI_UCI_DRIVER_NAME, &mhidev_fops);
+ if (ret < 0)
+ return ret;
+
+ mhi_uci_drv.major = ret;
+ mhi_uci_drv.class = class_create(THIS_MODULE, MHI_UCI_DRIVER_NAME);
+ if (IS_ERR(mhi_uci_drv.class))
+ return -ENODEV;
+
+ mutex_init(&mhi_uci_drv.lock);
+ INIT_LIST_HEAD(&mhi_uci_drv.head);
+
+ ret = mhi_driver_register(&mhi_uci_driver);
+ if (ret)
+ class_destroy(mhi_uci_drv.class);
+
+ return ret;
+}
+
+module_init(mhi_uci_init);
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("MHI_UCI");
+MODULE_DESCRIPTION("MHI UCI Driver");
diff --git a/drivers/char/diag/diag_usb.c b/drivers/char/diag/diag_usb.c
index 3a1c92e..646f247 100644
--- a/drivers/char/diag/diag_usb.c
+++ b/drivers/char/diag/diag_usb.c
@@ -636,8 +636,8 @@
INIT_WORK(&(ch->read_done_work), usb_read_done_work_fn);
INIT_WORK(&(ch->connect_work), usb_connect_work_fn);
INIT_WORK(&(ch->disconnect_work), usb_disconnect_work_fn);
- strlcpy(wq_name, "DIAG_USB_", DIAG_USB_STRING_SZ);
- strlcat(wq_name, ch->name, sizeof(ch->name));
+ strlcpy(wq_name, "DIAG_USB_", sizeof(wq_name));
+ strlcat(wq_name, ch->name, sizeof(wq_name));
ch->usb_wq = create_singlethread_workqueue(wq_name);
if (!ch->usb_wq)
goto err;
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index ec63680..687b34c 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -4292,7 +4292,7 @@
pr_debug("diagchar initializing ..\n");
driver->num = 1;
driver->name = ((void *)driver) + sizeof(struct diagchar_dev);
- strlcpy(driver->name, "diag", 4);
+ strlcpy(driver->name, "diag", 5);
/* Get major number from kernel and initialize */
ret = alloc_chrdev_region(&dev, driver->minor_start,
driver->num, driver->name);
diff --git a/drivers/char/diag/diagfwd_bridge.c b/drivers/char/diag/diagfwd_bridge.c
index 5de7897..3e0bdac 100644
--- a/drivers/char/diag/diagfwd_bridge.c
+++ b/drivers/char/diag/diagfwd_bridge.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -176,8 +176,8 @@
if (!ch->dci_read_buf)
return -ENOMEM;
ch->dci_read_len = 0;
- strlcpy(wq_name, "diag_dci_", 10);
- strlcat(wq_name, ch->name, sizeof(ch->name));
+ strlcpy(wq_name, "diag_dci_", sizeof(wq_name));
+ strlcat(wq_name, ch->name, sizeof(wq_name));
INIT_WORK(&(ch->dci_read_work), bridge_dci_read_work_fn);
ch->dci_wq = create_singlethread_workqueue(wq_name);
if (!ch->dci_wq) {
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
index 7f27aff..76d091a 100644
--- a/drivers/char/diag/diagfwd_cntl.c
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -845,6 +845,7 @@
uint8_t pd_val, uint8_t peripheral)
{
struct diag_id_tbl_t *new_item = NULL;
+ int process_len = 0;
if (!process_name || diag_id == 0) {
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
@@ -857,7 +858,8 @@
if (!new_item)
return -ENOMEM;
kmemleak_not_leak(new_item);
- new_item->process_name = kzalloc(strlen(process_name) + 1, GFP_KERNEL);
+ process_len = strlen(process_name);
+ new_item->process_name = kzalloc(process_len + 1, GFP_KERNEL);
if (!new_item->process_name) {
kfree(new_item);
new_item = NULL;
@@ -867,7 +869,7 @@
new_item->diag_id = diag_id;
new_item->pd_val = pd_val;
new_item->peripheral = peripheral;
- strlcpy(new_item->process_name, process_name, strlen(process_name) + 1);
+ strlcpy(new_item->process_name, process_name, process_len + 1);
INIT_LIST_HEAD(&new_item->link);
mutex_lock(&driver->diag_id_mutex);
list_add_tail(&new_item->link, &driver->diag_id_list);
@@ -969,7 +971,7 @@
ctrl_pkt.pkt_id = DIAG_CTRL_MSG_DIAGID;
ctrl_pkt.version = 1;
strlcpy((char *)&ctrl_pkt.process_name, process_name,
- strlen(process_name) + 1);
+ sizeof(ctrl_pkt.process_name));
ctrl_pkt.len = sizeof(ctrl_pkt.diag_id) + sizeof(ctrl_pkt.version) +
strlen(process_name) + 1;
err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt, ctrl_pkt.len +
diff --git a/drivers/char/diag/diagfwd_hsic.c b/drivers/char/diag/diagfwd_hsic.c
index 81afcae..f54d29f 100644
--- a/drivers/char/diag/diagfwd_hsic.c
+++ b/drivers/char/diag/diagfwd_hsic.c
@@ -407,8 +407,8 @@
INIT_WORK(&(ch->read_work), hsic_read_work_fn);
INIT_WORK(&(ch->open_work), hsic_open_work_fn);
INIT_WORK(&(ch->close_work), hsic_close_work_fn);
- strlcpy(wq_name, "DIAG_HSIC_", DIAG_HSIC_STRING_SZ);
- strlcat(wq_name, ch->name, sizeof(ch->name));
+ strlcpy(wq_name, "DIAG_HSIC_", sizeof(wq_name));
+ strlcat(wq_name, ch->name, sizeof(wq_name));
ch->hsic_wq = create_singlethread_workqueue(wq_name);
if (!ch->hsic_wq)
goto fail;
diff --git a/drivers/char/diag/diagfwd_mhi.c b/drivers/char/diag/diagfwd_mhi.c
index 6f41868..59ed809 100644
--- a/drivers/char/diag/diagfwd_mhi.c
+++ b/drivers/char/diag/diagfwd_mhi.c
@@ -695,8 +695,8 @@
INIT_WORK(&(mhi_info->read_done_work), mhi_read_done_work_fn);
INIT_WORK(&(mhi_info->open_work), mhi_open_work_fn);
INIT_WORK(&(mhi_info->close_work), mhi_close_work_fn);
- strlcpy(wq_name, "diag_mhi_", DIAG_MHI_STRING_SZ);
- strlcat(wq_name, mhi_info->name, sizeof(mhi_info->name));
+ strlcpy(wq_name, "diag_mhi_", sizeof(wq_name));
+ strlcat(wq_name, mhi_info->name, sizeof(wq_name));
diagmem_init(driver, mhi_info->mempool);
mhi_info->mempool_init = 1;
mhi_info->mhi_wq = create_singlethread_workqueue(wq_name);
diff --git a/drivers/char/diag/diagfwd_smux.c b/drivers/char/diag/diagfwd_smux.c
index 33f91d1..9646133 100644
--- a/drivers/char/diag/diagfwd_smux.c
+++ b/drivers/char/diag/diagfwd_smux.c
@@ -286,8 +286,8 @@
for (i = 0; i < NUM_SMUX_DEV; i++) {
ch = &diag_smux[i];
- strlcpy(wq_name, "DIAG_SMUX_", 11);
- strlcat(wq_name, ch->name, sizeof(ch->name));
+ strlcpy(wq_name, "DIAG_SMUX_", sizeof(wq_name));
+ strlcat(wq_name, ch->name, sizeof(wq_name));
ch->smux_wq = create_singlethread_workqueue(wq_name);
if (!ch->smux_wq) {
err = -ENOMEM;
diff --git a/drivers/char/diag/diagfwd_socket.c b/drivers/char/diag/diagfwd_socket.c
index 3451aba..664e881 100644
--- a/drivers/char/diag/diagfwd_socket.c
+++ b/drivers/char/diag/diagfwd_socket.c
@@ -769,8 +769,8 @@
info->data_ready = 0;
atomic_set(&info->flow_cnt, 0);
spin_lock_init(&info->lock);
- strlcpy(wq_name, "DIAG_SOCKET_", 10);
- strlcat(wq_name, info->name, sizeof(info->name));
+ strlcpy(wq_name, "DIAG_SOCKET_", sizeof(wq_name));
+ strlcpy(wq_name, info->name, sizeof(wq_name));
init_waitqueue_head(&info->read_wait_q);
info->wq = create_singlethread_workqueue(wq_name);
if (!info->wq) {
diff --git a/drivers/clk/msm/clock-cpu-8939.c b/drivers/clk/msm/clock-cpu-8939.c
index 5b8b8f3..1bb7e5d 100644
--- a/drivers/clk/msm/clock-cpu-8939.c
+++ b/drivers/clk/msm/clock-cpu-8939.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2016, 2018-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1081,7 +1081,7 @@
/* Wait for update to take effect */
for (count = 500; count > 0; count--) {
- if (!(readl_relaxed(base)) & BIT(0))
+ if (!(readl_relaxed(base) & BIT(0)))
break;
udelay(1);
}
diff --git a/drivers/clk/msm/clock-cpu-sdm632.c b/drivers/clk/msm/clock-cpu-sdm632.c
index 58a2520..dbba36f 100644
--- a/drivers/clk/msm/clock-cpu-sdm632.c
+++ b/drivers/clk/msm/clock-cpu-sdm632.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1104,7 +1104,7 @@
/* Wait for update to take effect */
for (count = 500; count > 0; count--) {
- if (!(readl_relaxed(base)) & BIT(0))
+ if (!(readl_relaxed(base) & BIT(0)))
break;
udelay(1);
}
diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c
index f53d1ee..48ea05d1 100644
--- a/drivers/clk/qcom/clk-cpu-osm.c
+++ b/drivers/clk/qcom/clk-cpu-osm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -76,7 +76,7 @@
struct osm_entry {
u16 virtual_corner;
u16 open_loop_volt;
- long frequency;
+ unsigned long frequency;
u32 lval;
u16 ccount;
};
@@ -143,7 +143,8 @@
return (req <= new && new < best) || (best < req && best < new);
}
-static int clk_osm_search_table(struct osm_entry *table, int entries, long rate)
+static int clk_osm_search_table(struct osm_entry *table, int entries,
+ unsigned long rate)
{
int index;
@@ -173,21 +174,21 @@
return c->rate;
}
-static long clk_osm_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_osm_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int i;
unsigned long rrate = 0;
-
- if (!hw)
- return -EINVAL;
+ unsigned long rate = req->rate;
/*
* If the rate passed in is 0, return the first frequency in the
* FMAX table.
*/
- if (!rate)
- return hw->init->rate_max[0];
+ if (!rate) {
+ req->rate = hw->init->rate_max[0];
+ return 0;
+ }
for (i = 0; i < hw->init->num_rate_max; i++) {
if (is_better_rate(rate, rrate, hw->init->rate_max[i])) {
@@ -197,10 +198,12 @@
}
}
- pr_debug("%s: rate %lu, rrate %ld, Rate max %ld\n", __func__, rate,
- rrate, hw->init->rate_max[i]);
+ req->rate = rrate;
- return rrate;
+ pr_debug("clk:%s rate %lu, rrate %lu, Rate max %lu index %u\n",
+ hw->init->name, rate, rrate, hw->init->rate_max[i], i);
+
+ return 0;
}
static int clk_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -282,40 +285,40 @@
return parent->osm_table[index].frequency;
}
-static long clk_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_cpu_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_hw *parent_hw = clk_hw_get_parent(hw);
+ unsigned long rate = req->rate;
- if (!parent_hw)
- return -EINVAL;
+ req->rate = clk_hw_round_rate(parent_hw, rate);
- *parent_rate = rate;
- return clk_hw_round_rate(parent_hw, rate);
+ return 0;
}
static int l3_clk_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_osm *cpuclk = to_clk_osm(hw);
+ struct clk_rate_request req;
int index = 0;
- unsigned long r_rate;
int count = 40;
u32 curr_lval;
if (!cpuclk)
return -EINVAL;
- r_rate = clk_osm_round_rate(hw, rate, NULL);
+ req.rate = rate;
+ clk_osm_determine_rate(hw, &req);
- if (rate != r_rate) {
+ if (rate != req.rate) {
pr_err("invalid requested rate=%ld\n", rate);
return -EINVAL;
}
/* Convert rate to table index */
index = clk_osm_search_table(cpuclk->osm_table,
- cpuclk->num_entries, r_rate);
+ cpuclk->num_entries, req.rate);
if (index < 0) {
pr_err("cannot set %s to %lu\n", clk_hw_get_name(hw), rate);
return -EINVAL;
@@ -370,7 +373,7 @@
}
static const struct clk_ops clk_ops_l3_osm = {
- .round_rate = clk_osm_round_rate,
+ .determine_rate = clk_osm_determine_rate,
.list_rate = clk_osm_list_rate,
.recalc_rate = l3_clk_recalc_rate,
.set_rate = l3_clk_set_rate,
@@ -379,23 +382,23 @@
static const struct clk_ops clk_ops_pwrcl_core = {
.set_rate = clk_pwrcl_set_rate,
- .round_rate = clk_cpu_round_rate,
+ .determine_rate = clk_cpu_determine_rate,
.recalc_rate = clk_cpu_recalc_rate,
.debug_init = clk_debug_measure_add,
};
static const struct clk_ops clk_ops_perfcl_core = {
.set_rate = clk_cpu_set_rate,
- .round_rate = clk_cpu_round_rate,
+ .determine_rate = clk_cpu_determine_rate,
.recalc_rate = clk_cpu_recalc_rate,
.debug_init = clk_debug_measure_add,
};
static const struct clk_ops clk_ops_cpu_osm = {
.set_rate = clk_osm_set_rate,
- .round_rate = clk_osm_round_rate,
.recalc_rate = clk_osm_recalc_rate,
.list_rate = clk_osm_list_rate,
+ .determine_rate = clk_osm_determine_rate,
.debug_init = clk_debug_measure_add,
};
diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
index fe7d9ed..b0a18bc 100644
--- a/drivers/clk/rockchip/clk-mmc-phase.c
+++ b/drivers/clk/rockchip/clk-mmc-phase.c
@@ -59,10 +59,8 @@
u32 delay_num = 0;
/* See the comment for rockchip_mmc_set_phase below */
- if (!rate) {
- pr_err("%s: invalid clk rate\n", __func__);
+ if (!rate)
return -EINVAL;
- }
raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift);
diff --git a/drivers/crypto/msm/Makefile b/drivers/crypto/msm/Makefile
index b712fc1..19635d7 100644
--- a/drivers/crypto/msm/Makefile
+++ b/drivers/crypto/msm/Makefile
@@ -1,6 +1,6 @@
obj-$(CONFIG_CRYPTO_DEV_QCOM_MSM_QCE) += qce50.o
-obj-$(CONFIG_CRYPTO_DEV_QCEDEV) += qcedev.o
-obj-$(CONFIG_CRYPTO_DEV_QCEDEV) += qcedev_smmu.o
+qcedevice-objs := qcedev_smmu.o qcedev.o
+obj-$(CONFIG_CRYPTO_DEV_QCEDEV) += qcedevice.o
obj-$(CONFIG_CRYPTO_DEV_QCRYPTO) += qcrypto.o
obj-$(CONFIG_CRYPTO_DEV_OTA_CRYPTO) += ota_crypto.o
obj-$(CONFIG_CRYPTO_DEV_QCOM_ICE) += ice.o
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
index e98bb06..1ef5382 100644
--- a/drivers/crypto/msm/qce50.c
+++ b/drivers/crypto/msm/qce50.c
@@ -1,7 +1,7 @@
/*
* QTI Crypto Engine driver.
*
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -6105,6 +6105,7 @@
dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
pce_dev->coh_vmem, pce_dev->coh_pmem);
err_iobase:
+ arm_iommu_detach_device(pce_dev->pdev);
if (pce_dev->enable_s1_smmu)
qce_iommu_release_iomapping(pce_dev);
@@ -6138,6 +6139,7 @@
pce_dev->coh_vmem, pce_dev->coh_pmem);
kfree(pce_dev->dummyreq_in_buf);
kfree(pce_dev->iovec_vmem);
+ arm_iommu_detach_device(pce_dev->pdev);
if (pce_dev->enable_s1_smmu)
qce_iommu_release_iomapping(pce_dev);
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
index d58144f..db1d3d8 100644
--- a/drivers/crypto/msm/qcedev.c
+++ b/drivers/crypto/msm/qcedev.c
@@ -1,7 +1,7 @@
/*
* QTI CE device driver.
*
- * Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -60,14 +60,14 @@
static DEFINE_MUTEX(qcedev_sent_bw_req);
static DEFINE_MUTEX(hash_access_lock);
-MODULE_DEVICE_TABLE(of, qcedev_match);
-
static const struct of_device_id qcedev_match[] = {
{ .compatible = "qcom,qcedev"},
{ .compatible = "qcom,qcedev,context-bank"},
{}
};
+MODULE_DEVICE_TABLE(of, qcedev_match);
+
static int qcedev_control_clocks(struct qcedev_control *podev, bool enable)
{
unsigned int control_flag;
@@ -1668,107 +1668,131 @@
int err = 0;
struct qcedev_handle *handle;
struct qcedev_control *podev;
- struct qcedev_async_req qcedev_areq;
+ struct qcedev_async_req *qcedev_areq;
struct qcedev_stat *pstat;
+ qcedev_areq = kzalloc(sizeof(struct qcedev_async_req), GFP_KERNEL);
+ if (!qcedev_areq)
+ return -ENOMEM;
+
handle = file->private_data;
podev = handle->cntl;
- qcedev_areq.handle = handle;
+ qcedev_areq->handle = handle;
if (podev == NULL || podev->magic != QCEDEV_MAGIC) {
pr_err("%s: invalid handle %pK\n",
__func__, podev);
- return -ENOENT;
+ err = -ENOENT;
+ goto exit_free_qcedev_areq;
}
/* Verify user arguments. */
- if (_IOC_TYPE(cmd) != QCEDEV_IOC_MAGIC)
- return -ENOTTY;
+ if (_IOC_TYPE(cmd) != QCEDEV_IOC_MAGIC) {
+ err = -ENOTTY;
+ goto exit_free_qcedev_areq;
+ }
- init_completion(&qcedev_areq.complete);
+ init_completion(&qcedev_areq->complete);
pstat = &_qcedev_stat;
switch (cmd) {
case QCEDEV_IOCTL_ENC_REQ:
case QCEDEV_IOCTL_DEC_REQ:
- if (copy_from_user(&qcedev_areq.cipher_op_req,
+ if (copy_from_user(&qcedev_areq->cipher_op_req,
(void __user *)arg,
- sizeof(struct qcedev_cipher_op_req)))
- return -EFAULT;
- qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_CIPHER;
+ sizeof(struct qcedev_cipher_op_req))) {
+ err = -EFAULT;
+ goto exit_free_qcedev_areq;
+ }
+ qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_CIPHER;
- if (qcedev_check_cipher_params(&qcedev_areq.cipher_op_req,
- podev))
- return -EINVAL;
+ if (qcedev_check_cipher_params(&qcedev_areq->cipher_op_req,
+ podev)) {
+ err = -EINVAL;
+ goto exit_free_qcedev_areq;
+ }
- err = qcedev_vbuf_ablk_cipher(&qcedev_areq, handle);
+ err = qcedev_vbuf_ablk_cipher(qcedev_areq, handle);
if (err)
- return err;
+ goto exit_free_qcedev_areq;
if (copy_to_user((void __user *)arg,
- &qcedev_areq.cipher_op_req,
- sizeof(struct qcedev_cipher_op_req)))
- return -EFAULT;
+ &qcedev_areq->cipher_op_req,
+ sizeof(struct qcedev_cipher_op_req))) {
+ err = -EFAULT;
+ goto exit_free_qcedev_areq;
+ }
break;
case QCEDEV_IOCTL_SHA_INIT_REQ:
{
struct scatterlist sg_src;
- if (copy_from_user(&qcedev_areq.sha_op_req,
+ if (copy_from_user(&qcedev_areq->sha_op_req,
(void __user *)arg,
- sizeof(struct qcedev_sha_op_req)))
- return -EFAULT;
- mutex_lock(&hash_access_lock);
- if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev)) {
- mutex_unlock(&hash_access_lock);
- return -EINVAL;
+ sizeof(struct qcedev_sha_op_req))) {
+ err = -EFAULT;
+ goto exit_free_qcedev_areq;
}
- qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
- err = qcedev_hash_init(&qcedev_areq, handle, &sg_src);
+ mutex_lock(&hash_access_lock);
+ if (qcedev_check_sha_params(&qcedev_areq->sha_op_req, podev)) {
+ mutex_unlock(&hash_access_lock);
+ err = -EINVAL;
+ goto exit_free_qcedev_areq;
+ }
+ qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA;
+ err = qcedev_hash_init(qcedev_areq, handle, &sg_src);
if (err) {
mutex_unlock(&hash_access_lock);
- return err;
+ goto exit_free_qcedev_areq;
}
mutex_unlock(&hash_access_lock);
- if (copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
- sizeof(struct qcedev_sha_op_req)))
- return -EFAULT;
+ if (copy_to_user((void __user *)arg, &qcedev_areq->sha_op_req,
+ sizeof(struct qcedev_sha_op_req))) {
+ err = -EFAULT;
+ goto exit_free_qcedev_areq;
}
handle->sha_ctxt.init_done = true;
+ }
break;
case QCEDEV_IOCTL_GET_CMAC_REQ:
- if (!podev->ce_support.cmac)
- return -ENOTTY;
+ if (!podev->ce_support.cmac) {
+ err = -ENOTTY;
+ goto exit_free_qcedev_areq;
+ }
case QCEDEV_IOCTL_SHA_UPDATE_REQ:
{
struct scatterlist sg_src;
- if (copy_from_user(&qcedev_areq.sha_op_req,
+ if (copy_from_user(&qcedev_areq->sha_op_req,
(void __user *)arg,
- sizeof(struct qcedev_sha_op_req)))
- return -EFAULT;
- mutex_lock(&hash_access_lock);
- if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev)) {
- mutex_unlock(&hash_access_lock);
- return -EINVAL;
+ sizeof(struct qcedev_sha_op_req))) {
+ err = -EFAULT;
+ goto exit_free_qcedev_areq;
}
- qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+ mutex_lock(&hash_access_lock);
+ if (qcedev_check_sha_params(&qcedev_areq->sha_op_req, podev)) {
+ mutex_unlock(&hash_access_lock);
+ err = -EINVAL;
+ goto exit_free_qcedev_areq;
+ }
+ qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA;
- if (qcedev_areq.sha_op_req.alg == QCEDEV_ALG_AES_CMAC) {
- err = qcedev_hash_cmac(&qcedev_areq, handle, &sg_src);
+ if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_AES_CMAC) {
+ err = qcedev_hash_cmac(qcedev_areq, handle, &sg_src);
if (err) {
mutex_unlock(&hash_access_lock);
- return err;
+ goto exit_free_qcedev_areq;
}
} else {
if (handle->sha_ctxt.init_done == false) {
pr_err("%s Init was not called\n", __func__);
mutex_unlock(&hash_access_lock);
- return -EINVAL;
+ err = -EINVAL;
+ goto exit_free_qcedev_areq;
}
- err = qcedev_hash_update(&qcedev_areq, handle, &sg_src);
+ err = qcedev_hash_update(qcedev_areq, handle, &sg_src);
if (err) {
mutex_unlock(&hash_access_lock);
- return err;
+ goto exit_free_qcedev_areq;
}
}
@@ -1776,15 +1800,17 @@
pr_err("Invalid sha_ctxt.diglen %d\n",
handle->sha_ctxt.diglen);
mutex_unlock(&hash_access_lock);
- return -EINVAL;
+ err = -EINVAL;
+ goto exit_free_qcedev_areq;
}
- memcpy(&qcedev_areq.sha_op_req.digest[0],
+ memcpy(&qcedev_areq->sha_op_req.digest[0],
&handle->sha_ctxt.digest[0],
handle->sha_ctxt.diglen);
mutex_unlock(&hash_access_lock);
- if (copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
+ if (copy_to_user((void __user *)arg, &qcedev_areq->sha_op_req,
sizeof(struct qcedev_sha_op_req)))
- return -EFAULT;
+ err = -EFAULT;
+ goto exit_free_qcedev_areq;
}
break;
@@ -1792,37 +1818,44 @@
if (handle->sha_ctxt.init_done == false) {
pr_err("%s Init was not called\n", __func__);
- return -EINVAL;
+ err = -EINVAL;
+ goto exit_free_qcedev_areq;
}
- if (copy_from_user(&qcedev_areq.sha_op_req,
+ if (copy_from_user(&qcedev_areq->sha_op_req,
(void __user *)arg,
- sizeof(struct qcedev_sha_op_req)))
- return -EFAULT;
- mutex_lock(&hash_access_lock);
- if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev)) {
- mutex_unlock(&hash_access_lock);
- return -EINVAL;
+ sizeof(struct qcedev_sha_op_req))) {
+ err = -EFAULT;
+ goto exit_free_qcedev_areq;
}
- qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
- err = qcedev_hash_final(&qcedev_areq, handle);
+ mutex_lock(&hash_access_lock);
+ if (qcedev_check_sha_params(&qcedev_areq->sha_op_req, podev)) {
+ mutex_unlock(&hash_access_lock);
+ err = -EINVAL;
+ goto exit_free_qcedev_areq;
+ }
+ qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA;
+ err = qcedev_hash_final(qcedev_areq, handle);
if (err) {
mutex_unlock(&hash_access_lock);
- return err;
+ goto exit_free_qcedev_areq;
}
if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) {
pr_err("Invalid sha_ctxt.diglen %d\n",
handle->sha_ctxt.diglen);
mutex_unlock(&hash_access_lock);
- return -EINVAL;
+ err = -EINVAL;
+ goto exit_free_qcedev_areq;
}
- qcedev_areq.sha_op_req.diglen = handle->sha_ctxt.diglen;
- memcpy(&qcedev_areq.sha_op_req.digest[0],
+ qcedev_areq->sha_op_req.diglen = handle->sha_ctxt.diglen;
+ memcpy(&qcedev_areq->sha_op_req.digest[0],
&handle->sha_ctxt.digest[0],
handle->sha_ctxt.diglen);
mutex_unlock(&hash_access_lock);
- if (copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
- sizeof(struct qcedev_sha_op_req)))
- return -EFAULT;
+ if (copy_to_user((void __user *)arg, &qcedev_areq->sha_op_req,
+ sizeof(struct qcedev_sha_op_req))) {
+ err = -EFAULT;
+ goto exit_free_qcedev_areq;
+ }
handle->sha_ctxt.init_done = false;
break;
@@ -1830,41 +1863,46 @@
{
struct scatterlist sg_src;
- if (copy_from_user(&qcedev_areq.sha_op_req,
+ if (copy_from_user(&qcedev_areq->sha_op_req,
(void __user *)arg,
- sizeof(struct qcedev_sha_op_req)))
- return -EFAULT;
+ sizeof(struct qcedev_sha_op_req))) {
+ err = -EFAULT;
+ goto exit_free_qcedev_areq;
+ }
mutex_lock(&hash_access_lock);
- if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev)) {
+ if (qcedev_check_sha_params(&qcedev_areq->sha_op_req, podev)) {
mutex_unlock(&hash_access_lock);
- return -EINVAL;
+ err = -EINVAL;
+ goto exit_free_qcedev_areq;
}
- qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
- qcedev_hash_init(&qcedev_areq, handle, &sg_src);
- err = qcedev_hash_update(&qcedev_areq, handle, &sg_src);
+ qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA;
+ qcedev_hash_init(qcedev_areq, handle, &sg_src);
+ err = qcedev_hash_update(qcedev_areq, handle, &sg_src);
if (err) {
mutex_unlock(&hash_access_lock);
- return err;
+ goto exit_free_qcedev_areq;
}
- err = qcedev_hash_final(&qcedev_areq, handle);
+ err = qcedev_hash_final(qcedev_areq, handle);
if (err) {
mutex_unlock(&hash_access_lock);
- return err;
+ goto exit_free_qcedev_areq;
}
if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) {
pr_err("Invalid sha_ctxt.diglen %d\n",
handle->sha_ctxt.diglen);
mutex_unlock(&hash_access_lock);
- return -EINVAL;
+ err = -EINVAL;
+ goto exit_free_qcedev_areq;
}
- qcedev_areq.sha_op_req.diglen = handle->sha_ctxt.diglen;
- memcpy(&qcedev_areq.sha_op_req.digest[0],
+ qcedev_areq->sha_op_req.diglen = handle->sha_ctxt.diglen;
+ memcpy(&qcedev_areq->sha_op_req.digest[0],
&handle->sha_ctxt.digest[0],
handle->sha_ctxt.diglen);
mutex_unlock(&hash_access_lock);
- if (copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
+ if (copy_to_user((void __user *)arg, &qcedev_areq->sha_op_req,
sizeof(struct qcedev_sha_op_req)))
- return -EFAULT;
+ err = -EFAULT;
+ goto exit_free_qcedev_areq;
}
break;
@@ -1875,8 +1913,10 @@
int i = 0;
if (copy_from_user(&map_buf,
- (void __user *)arg, sizeof(map_buf)))
- return -EFAULT;
+ (void __user *)arg, sizeof(map_buf))) {
+ err = -EFAULT;
+ goto exit_free_qcedev_areq;
+ }
for (i = 0; i < map_buf.num_fds; i++) {
err = qcedev_check_and_map_buffer(handle,
@@ -1888,7 +1928,7 @@
pr_err(
"%s: err: failed to map fd(%d) - %d\n",
__func__, map_buf.fd[i], err);
- return err;
+ goto exit_free_qcedev_areq;
}
map_buf.buf_vaddr[i] = vaddr;
pr_info("%s: info: vaddr = %llx\n",
@@ -1896,8 +1936,10 @@
}
if (copy_to_user((void __user *)arg, &map_buf,
- sizeof(map_buf)))
- return -EFAULT;
+ sizeof(map_buf))) {
+ err = -EFAULT;
+ goto exit_free_qcedev_areq;
+ }
break;
}
@@ -1907,8 +1949,10 @@
int i = 0;
if (copy_from_user(&unmap_buf,
- (void __user *)arg, sizeof(unmap_buf)))
- return -EFAULT;
+ (void __user *)arg, sizeof(unmap_buf))) {
+ err = -EFAULT;
+ goto exit_free_qcedev_areq;
+ }
for (i = 0; i < unmap_buf.num_fds; i++) {
err = qcedev_check_and_unmap_buffer(handle,
@@ -1918,16 +1962,19 @@
"%s: err: failed to unmap fd(%d) - %d\n",
__func__,
unmap_buf.fd[i], err);
- return err;
+ goto exit_free_qcedev_areq;
}
}
break;
}
default:
- return -ENOTTY;
+ err = -ENOTTY;
+ goto exit_free_qcedev_areq;
}
+exit_free_qcedev_areq:
+ kfree(qcedev_areq);
return err;
}
@@ -2030,11 +2077,14 @@
podev->mem_client = NULL;
misc_deregister(&podev->miscdevice);
+ if (msm_bus_scale_client_update_request(podev->bus_scale_handle, 1))
+ pr_err("%s Unable to set high bandwidth\n", __func__);
exit_qce_close:
if (handle)
qce_close(handle);
exit_scale_busbandwidth:
- msm_bus_scale_client_update_request(podev->bus_scale_handle, 0);
+ if (msm_bus_scale_client_update_request(podev->bus_scale_handle, 0))
+ pr_err("%s Unable to set low bandwidth\n", __func__);
exit_unregister_bus_scale:
if (podev->platform_support.bus_scale_table != NULL)
msm_bus_scale_unregister_client(podev->bus_scale_handle);
@@ -2064,9 +2114,15 @@
podev = platform_get_drvdata(pdev);
if (!podev)
return 0;
+ if (msm_bus_scale_client_update_request(podev->bus_scale_handle, 1))
+ pr_err("%s Unable to set high bandwidth\n", __func__);
+
if (podev->qce)
qce_close(podev->qce);
+ if (msm_bus_scale_client_update_request(podev->bus_scale_handle, 0))
+ pr_err("%s Unable to set low bandwidth\n", __func__);
+
if (podev->platform_support.bus_scale_table != NULL)
msm_bus_scale_unregister_client(podev->bus_scale_handle);
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
index fd42575..415621e 100644
--- a/drivers/crypto/msm/qcrypto.c
+++ b/drivers/crypto/msm/qcrypto.c
@@ -1,7 +1,7 @@
/*
* QTI Crypto driver
*
- * Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1354,10 +1354,6 @@
cancel_work_sync(&pengine->bw_allocate_ws);
del_timer_sync(&pengine->bw_reaper_timer);
- if (pengine->bus_scale_handle != 0)
- msm_bus_scale_unregister_client(pengine->bus_scale_handle);
- pengine->bus_scale_handle = 0;
-
kzfree(pengine->preq_pool);
if (cp->total_units)
@@ -1388,8 +1384,18 @@
mutex_lock(&cp->engine_lock);
_qcrypto_remove_engine(pengine);
mutex_unlock(&cp->engine_lock);
+
+ if (msm_bus_scale_client_update_request(pengine->bus_scale_handle, 1))
+ pr_err("%s Unable to set high bandwidth\n", __func__);
+
if (pengine->qce)
qce_close(pengine->qce);
+
+ if (msm_bus_scale_client_update_request(pengine->bus_scale_handle, 0))
+ pr_err("%s Unable to set low bandwidth\n", __func__);
+ msm_bus_scale_unregister_client(pengine->bus_scale_handle);
+ pengine->bus_scale_handle = 0;
+
kzfree(pengine);
return 0;
}
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index ea8595d..30f8bbe 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -943,11 +943,13 @@
static void ipsec_esp_unmap(struct device *dev,
struct talitos_edesc *edesc,
- struct aead_request *areq)
+ struct aead_request *areq, bool encrypt)
{
struct crypto_aead *aead = crypto_aead_reqtfm(areq);
struct talitos_ctx *ctx = crypto_aead_ctx(aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
+ unsigned int authsize = crypto_aead_authsize(aead);
+ unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
if (edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP)
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
@@ -956,7 +958,7 @@
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
- talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
+ talitos_sg_unmap(dev, edesc, areq->src, areq->dst, cryptlen,
areq->assoclen);
if (edesc->dma_len)
@@ -967,7 +969,7 @@
unsigned int dst_nents = edesc->dst_nents ? : 1;
sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
- areq->assoclen + areq->cryptlen - ivsize);
+ areq->assoclen + cryptlen - ivsize);
}
}
@@ -988,7 +990,7 @@
edesc = container_of(desc, struct talitos_edesc, desc);
- ipsec_esp_unmap(dev, edesc, areq);
+ ipsec_esp_unmap(dev, edesc, areq, true);
/* copy the generated ICV to dst */
if (edesc->icv_ool) {
@@ -1020,7 +1022,7 @@
edesc = container_of(desc, struct talitos_edesc, desc);
- ipsec_esp_unmap(dev, edesc, req);
+ ipsec_esp_unmap(dev, edesc, req, false);
if (!err) {
char icvdata[SHA512_DIGEST_SIZE];
@@ -1066,7 +1068,7 @@
edesc = container_of(desc, struct talitos_edesc, desc);
- ipsec_esp_unmap(dev, edesc, req);
+ ipsec_esp_unmap(dev, edesc, req, false);
/* check ICV auth status */
if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
@@ -1173,6 +1175,7 @@
* fill in and submit ipsec_esp descriptor
*/
static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
+ bool encrypt,
void (*callback)(struct device *dev,
struct talitos_desc *desc,
void *context, int error))
@@ -1182,7 +1185,7 @@
struct talitos_ctx *ctx = crypto_aead_ctx(aead);
struct device *dev = ctx->dev;
struct talitos_desc *desc = &edesc->desc;
- unsigned int cryptlen = areq->cryptlen;
+ unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
unsigned int ivsize = crypto_aead_ivsize(aead);
int tbl_off = 0;
int sg_count, ret;
@@ -1324,7 +1327,7 @@
ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
if (ret != -EINPROGRESS) {
- ipsec_esp_unmap(dev, edesc, areq);
+ ipsec_esp_unmap(dev, edesc, areq, encrypt);
kfree(edesc);
}
return ret;
@@ -1433,9 +1436,10 @@
unsigned int authsize = crypto_aead_authsize(authenc);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
unsigned int ivsize = crypto_aead_ivsize(authenc);
+ unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
- iv, areq->assoclen, areq->cryptlen,
+ iv, areq->assoclen, cryptlen,
authsize, ivsize, icv_stashing,
areq->base.flags, encrypt);
}
@@ -1454,7 +1458,7 @@
/* set encrypt */
edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
- return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
+ return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
}
static int aead_decrypt(struct aead_request *req)
@@ -1466,14 +1470,13 @@
struct talitos_edesc *edesc;
void *icvdata;
- req->cryptlen -= authsize;
-
/* allocate extended descriptor */
edesc = aead_edesc_alloc(req, req->iv, 1, false);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
- if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
+ if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
+ (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
((!edesc->src_nents && !edesc->dst_nents) ||
priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
@@ -1485,7 +1488,8 @@
/* reset integrity check result bits */
edesc->desc.hdr_lo = 0;
- return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
+ return ipsec_esp(edesc, req, false,
+ ipsec_esp_decrypt_hwauth_done);
}
/* Have to check the ICV with software */
@@ -1501,7 +1505,7 @@
sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
req->assoclen + req->cryptlen - authsize);
- return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
+ return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
}
static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
@@ -1528,6 +1532,18 @@
return 0;
}
+static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
+ keylen == AES_KEYSIZE_256)
+ return ablkcipher_setkey(cipher, key, keylen);
+
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+
+ return -EINVAL;
+}
+
static void common_nonsnoop_unmap(struct device *dev,
struct talitos_edesc *edesc,
struct ablkcipher_request *areq)
@@ -1656,6 +1672,14 @@
struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
struct talitos_edesc *edesc;
+ unsigned int blocksize =
+ crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
+
+ if (!areq->nbytes)
+ return 0;
+
+ if (areq->nbytes % blocksize)
+ return -EINVAL;
/* allocate extended descriptor */
edesc = ablkcipher_edesc_alloc(areq, true);
@@ -1673,6 +1697,14 @@
struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
struct talitos_edesc *edesc;
+ unsigned int blocksize =
+ crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
+
+ if (!areq->nbytes)
+ return 0;
+
+ if (areq->nbytes % blocksize)
+ return -EINVAL;
/* allocate extended descriptor */
edesc = ablkcipher_edesc_alloc(areq, false);
@@ -2621,6 +2653,7 @@
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
+ .setkey = ablkcipher_aes_setkey,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2631,13 +2664,13 @@
.alg.crypto = {
.cra_name = "ctr(aes)",
.cra_driver_name = "ctr-aes-talitos",
- .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_blocksize = 1,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_ASYNC,
.cra_ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
+ .setkey = ablkcipher_aes_setkey,
}
},
.desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 6b16ce3..9f901f1 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -1429,8 +1429,10 @@
rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq,
IRQF_SHARED, "omap-dma-engine", od);
- if (rc)
+ if (rc) {
+ omap_dma_free(od);
return rc;
+ }
}
if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123)
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index 8c3c588..a7e1f6e 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -395,8 +395,10 @@
ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events,
nelm * 2);
- if (ret)
+ if (ret) {
+ kfree(rsv_events);
return ret;
+ }
for (i = 0; i < nelm; i++) {
ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1],
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 3b0d77b..6008a30 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -426,12 +426,23 @@
struct linehandle_state *lh;
struct file *file;
int fd, i, count = 0, ret;
+ u32 lflags;
if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
return -EFAULT;
if ((handlereq.lines == 0) || (handlereq.lines > GPIOHANDLES_MAX))
return -EINVAL;
+ lflags = handlereq.flags;
+
+ /*
+ * Do not allow both INPUT & OUTPUT flags to be set as they are
+ * contradictory.
+ */
+ if ((lflags & GPIOHANDLE_REQUEST_INPUT) &&
+ (lflags & GPIOHANDLE_REQUEST_OUTPUT))
+ return -EINVAL;
+
lh = kzalloc(sizeof(*lh), GFP_KERNEL);
if (!lh)
return -ENOMEM;
@@ -452,7 +463,6 @@
/* Request each GPIO */
for (i = 0; i < handlereq.lines; i++) {
u32 offset = handlereq.lineoffsets[i];
- u32 lflags = handlereq.flags;
struct gpio_desc *desc;
if (offset >= gdev->ngpio) {
@@ -787,7 +797,9 @@
}
/* This is just wrong: we don't look for events on output lines */
- if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
+ if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) ||
+ (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
+ (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)) {
ret = -EINVAL;
goto out_free_label;
}
@@ -801,10 +813,6 @@
if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
- if (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN)
- set_bit(FLAG_OPEN_DRAIN, &desc->flags);
- if (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)
- set_bit(FLAG_OPEN_SOURCE, &desc->flags);
ret = gpiod_direction_input(desc);
if (ret)
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 2f936a7..78591ea 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -83,6 +83,16 @@
default case is N. Details and instructions how to build your own
EDID data are given in Documentation/EDID/HOWTO.txt.
+config DRM_DP_CEC
+ bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support"
+ select CEC_CORE
+ help
+ Choose this option if you want to enable HDMI CEC support for
+ DisplayPort/USB-C to HDMI adapters.
+
+ Note: not all adapters support this feature, and even for those
+ that do support this they often do not hook up the CEC pin.
+
config DRM_TTM
tristate
depends on DRM
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 25c7204..4a7766c 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -33,6 +33,7 @@
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o
+drm_kms_helper-$(CONFIG_DRM_DP_CEC) += drm_dp_cec.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
diff --git a/drivers/gpu/drm/drm_dp_cec.c b/drivers/gpu/drm/drm_dp_cec.c
new file mode 100644
index 0000000..ddb1c5a
--- /dev/null
+++ b/drivers/gpu/drm/drm_dp_cec.c
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DisplayPort CEC-Tunneling-over-AUX support
+ *
+ * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <drm/drm_dp_helper.h>
+#include <media/cec.h>
+
+/*
+ * Unfortunately it turns out that we have a chicken-and-egg situation
+ * here. Quite a few active (mini-)DP-to-HDMI or USB-C-to-HDMI adapters
+ * have a converter chip that supports CEC-Tunneling-over-AUX (usually the
+ * Parade PS176), but they do not wire up the CEC pin, thus making CEC
+ * useless.
+ *
+ * Sadly there is no way for this driver to know this. What happens is
+ * that a /dev/cecX device is created that is isolated and unable to see
+ * any of the other CEC devices. Quite literally the CEC wire is cut
+ * (or in this case, never connected in the first place).
+ *
+ * The reason so few adapters support this is that this tunneling protocol
+ * was never supported by any OS. So there was no easy way of testing it,
+ * and no incentive to correctly wire up the CEC pin.
+ *
+ * Hopefully by creating this driver it will be easier for vendors to
+ * finally fix their adapters and test the CEC functionality.
+ *
+ * I keep a list of known working adapters here:
+ *
+ * https://hverkuil.home.xs4all.nl/cec-status.txt
+ *
+ * Please mail me (hverkuil@xs4all.nl) if you find an adapter that works
+ * and is not yet listed there.
+ *
+ * Note that the current implementation does not support CEC over an MST hub.
+ * As far as I can see there is no mechanism defined in the DisplayPort
+ * standard to transport CEC interrupts over an MST device. It might be
+ * possible to do this through polling, but I have not been able to get that
+ * to work.
+ */
+
+/**
+ * DOC: dp cec helpers
+ *
+ * These functions take care of supporting the CEC-Tunneling-over-AUX
+ * feature of DisplayPort-to-HDMI adapters.
+ */
+
+/*
+ * When the EDID is unset because the HPD went low, then the CEC DPCD registers
+ * typically can no longer be read (true for a DP-to-HDMI adapter since it is
+ * powered by the HPD). However, some displays toggle the HPD off and on for a
+ * short period for one reason or another, and that would cause the CEC adapter
+ * to be removed and added again, even though nothing else changed.
+ *
+ * This module parameter sets a delay in seconds before the CEC adapter is
+ * actually unregistered. Only if the HPD does not return within that time will
+ * the CEC adapter be unregistered.
+ *
+ * If it is set to a value >= NEVER_UNREG_DELAY, then the CEC adapter will never
+ * be unregistered for as long as the connector remains registered.
+ *
+ * If it is set to 0, then the CEC adapter will be unregistered immediately as
+ * soon as the HPD disappears.
+ *
+ * The default is one second to prevent short HPD glitches from unregistering
+ * the CEC adapter.
+ *
+ * Note that for integrated HDMI branch devices that support CEC the DPCD
+ * registers remain available even if the HPD goes low since it is not powered
+ * by the HPD. In that case the CEC adapter will never be unregistered during
+ * the life time of the connector. At least, this is the theory since I do not
+ * have hardware with an integrated HDMI branch device that supports CEC.
+ */
+#define NEVER_UNREG_DELAY 1000
+static unsigned int drm_dp_cec_unregister_delay = 1;
+module_param(drm_dp_cec_unregister_delay, uint, 0600);
+MODULE_PARM_DESC(drm_dp_cec_unregister_delay,
+ "CEC unregister delay in seconds, 0: no delay, >= 1000: never unregister");
+
+static int drm_dp_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct drm_dp_aux *aux = cec_get_drvdata(adap);
+ u32 val = enable ? DP_CEC_TUNNELING_ENABLE : 0;
+ ssize_t err = 0;
+
+ err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val);
+ return (enable && err < 0) ? err : 0;
+}
+
+static int drm_dp_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
+{
+ struct drm_dp_aux *aux = cec_get_drvdata(adap);
+ /* Bit 15 (logical address 15) should always be set */
+ u16 la_mask = 1 << CEC_LOG_ADDR_BROADCAST;
+ u8 mask[2];
+ ssize_t err;
+
+ if (addr != CEC_LOG_ADDR_INVALID)
+ la_mask |= adap->log_addrs.log_addr_mask | (1 << addr);
+ mask[0] = la_mask & 0xff;
+ mask[1] = la_mask >> 8;
+ err = drm_dp_dpcd_write(aux, DP_CEC_LOGICAL_ADDRESS_MASK, mask, 2);
+ return (addr != CEC_LOG_ADDR_INVALID && err < 0) ? err : 0;
+}
+
+static int drm_dp_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct drm_dp_aux *aux = cec_get_drvdata(adap);
+ unsigned int retries = min(5, attempts - 1);
+ ssize_t err;
+
+ err = drm_dp_dpcd_write(aux, DP_CEC_TX_MESSAGE_BUFFER,
+ msg->msg, msg->len);
+ if (err < 0)
+ return err;
+
+ err = drm_dp_dpcd_writeb(aux, DP_CEC_TX_MESSAGE_INFO,
+ (msg->len - 1) | (retries << 4) |
+ DP_CEC_TX_MESSAGE_SEND);
+ return err < 0 ? err : 0;
+}
+
+static int drm_dp_cec_adap_monitor_all_enable(struct cec_adapter *adap,
+ bool enable)
+{
+ struct drm_dp_aux *aux = cec_get_drvdata(adap);
+ ssize_t err;
+ u8 val;
+
+ if (!(adap->capabilities & CEC_CAP_MONITOR_ALL))
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CONTROL, &val);
+ if (err >= 0) {
+ if (enable)
+ val |= DP_CEC_SNOOPING_ENABLE;
+ else
+ val &= ~DP_CEC_SNOOPING_ENABLE;
+ err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val);
+ }
+ return (enable && err < 0) ? err : 0;
+}
+
+static void drm_dp_cec_adap_status(struct cec_adapter *adap,
+ struct seq_file *file)
+{
+ struct drm_dp_aux *aux = cec_get_drvdata(adap);
+ struct drm_dp_desc desc;
+ struct drm_dp_dpcd_ident *id = &desc.ident;
+
+ if (drm_dp_read_desc(aux, &desc, true))
+ return;
+ seq_printf(file, "OUI: %*pdH\n",
+ (int)sizeof(id->oui), id->oui);
+ seq_printf(file, "ID: %*pE\n",
+ (int)strnlen(id->device_id, sizeof(id->device_id)),
+ id->device_id);
+ seq_printf(file, "HW Rev: %d.%d\n", id->hw_rev >> 4, id->hw_rev & 0xf);
+ /*
+ * Show this both in decimal and hex: at least one vendor
+ * always reports this in hex.
+ */
+ seq_printf(file, "FW/SW Rev: %d.%d (0x%02x.0x%02x)\n",
+ id->sw_major_rev, id->sw_minor_rev,
+ id->sw_major_rev, id->sw_minor_rev);
+}
+
+static const struct cec_adap_ops drm_dp_cec_adap_ops = {
+ .adap_enable = drm_dp_cec_adap_enable,
+ .adap_log_addr = drm_dp_cec_adap_log_addr,
+ .adap_transmit = drm_dp_cec_adap_transmit,
+ .adap_monitor_all_enable = drm_dp_cec_adap_monitor_all_enable,
+ .adap_status = drm_dp_cec_adap_status,
+};
+
+static int drm_dp_cec_received(struct drm_dp_aux *aux)
+{
+ struct cec_adapter *adap = aux->cec.adap;
+ struct cec_msg msg;
+ u8 rx_msg_info;
+ ssize_t err;
+
+ err = drm_dp_dpcd_readb(aux, DP_CEC_RX_MESSAGE_INFO, &rx_msg_info);
+ if (err < 0)
+ return err;
+
+ if (!(rx_msg_info & DP_CEC_RX_MESSAGE_ENDED))
+ return 0;
+
+ msg.len = (rx_msg_info & DP_CEC_RX_MESSAGE_LEN_MASK) + 1;
+ err = drm_dp_dpcd_read(aux, DP_CEC_RX_MESSAGE_BUFFER, msg.msg, msg.len);
+ if (err < 0)
+ return err;
+
+ cec_received_msg(adap, &msg);
+ return 0;
+}
+
+static void drm_dp_cec_handle_irq(struct drm_dp_aux *aux)
+{
+ struct cec_adapter *adap = aux->cec.adap;
+ u8 flags;
+
+ if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, &flags) < 0)
+ return;
+
+ if (flags & DP_CEC_RX_MESSAGE_INFO_VALID)
+ drm_dp_cec_received(aux);
+
+ if (flags & DP_CEC_TX_MESSAGE_SENT)
+ cec_transmit_attempt_done(adap, CEC_TX_STATUS_OK);
+ else if (flags & DP_CEC_TX_LINE_ERROR)
+ cec_transmit_attempt_done(adap, CEC_TX_STATUS_ERROR |
+ CEC_TX_STATUS_MAX_RETRIES);
+ else if (flags &
+ (DP_CEC_TX_ADDRESS_NACK_ERROR | DP_CEC_TX_DATA_NACK_ERROR))
+ cec_transmit_attempt_done(adap, CEC_TX_STATUS_NACK |
+ CEC_TX_STATUS_MAX_RETRIES);
+ drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, flags);
+}
+
+/**
+ * drm_dp_cec_irq() - handle CEC interrupt, if any
+ * @aux: DisplayPort AUX channel
+ *
+ * Should be called when handling an IRQ_HPD request. If CEC-tunneling-over-AUX
+ * is present, then it will check for a CEC_IRQ and handle it accordingly.
+ */
+void drm_dp_cec_irq(struct drm_dp_aux *aux)
+{
+ u8 cec_irq;
+ int ret;
+
+ mutex_lock(&aux->cec.lock);
+ if (!aux->cec.adap)
+ goto unlock;
+
+ ret = drm_dp_dpcd_readb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1,
+ &cec_irq);
+ if (ret < 0 || !(cec_irq & DP_CEC_IRQ))
+ goto unlock;
+
+ drm_dp_cec_handle_irq(aux);
+ drm_dp_dpcd_writeb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1, DP_CEC_IRQ);
+unlock:
+ mutex_unlock(&aux->cec.lock);
+}
+EXPORT_SYMBOL(drm_dp_cec_irq);
+
+static bool drm_dp_cec_cap(struct drm_dp_aux *aux, u8 *cec_cap)
+{
+ u8 cap = 0;
+
+ if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CAPABILITY, &cap) != 1 ||
+ !(cap & DP_CEC_TUNNELING_CAPABLE))
+ return false;
+ if (cec_cap)
+ *cec_cap = cap;
+ return true;
+}
+
+/*
+ * Called if the HPD was low for more than drm_dp_cec_unregister_delay
+ * seconds. This unregisters the CEC adapter.
+ */
+static void drm_dp_cec_unregister_work(struct work_struct *work)
+{
+ struct drm_dp_aux *aux = container_of(work, struct drm_dp_aux,
+ cec.unregister_work.work);
+
+ mutex_lock(&aux->cec.lock);
+ cec_unregister_adapter(aux->cec.adap);
+ aux->cec.adap = NULL;
+ mutex_unlock(&aux->cec.lock);
+}
+
+/*
+ * A new EDID is set. If there is no CEC adapter, then create one. If
+ * there was a CEC adapter, then check if the CEC adapter properties
+ * were unchanged and just update the CEC physical address. Otherwise
+ * unregister the old CEC adapter and create a new one.
+ */
+void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid)
+{
+ u32 cec_caps = CEC_CAP_DEFAULTS | CEC_CAP_NEEDS_HPD;
+ unsigned int num_las = 1;
+ u8 cap;
+
+#ifndef CONFIG_MEDIA_CEC_RC
+ /*
+ * CEC_CAP_RC is part of CEC_CAP_DEFAULTS, but it is stripped by
+ * cec_allocate_adapter() if CONFIG_MEDIA_CEC_RC is undefined.
+ *
+ * Do this here as well to ensure the tests against cec_caps are
+ * correct.
+ */
+ cec_caps &= ~CEC_CAP_RC;
+#endif
+ cancel_delayed_work_sync(&aux->cec.unregister_work);
+
+ mutex_lock(&aux->cec.lock);
+ if (!drm_dp_cec_cap(aux, &cap)) {
+ /* CEC is not supported, unregister any existing adapter */
+ cec_unregister_adapter(aux->cec.adap);
+ aux->cec.adap = NULL;
+ goto unlock;
+ }
+
+ if (cap & DP_CEC_SNOOPING_CAPABLE)
+ cec_caps |= CEC_CAP_MONITOR_ALL;
+ if (cap & DP_CEC_MULTIPLE_LA_CAPABLE)
+ num_las = CEC_MAX_LOG_ADDRS;
+
+ if (aux->cec.adap) {
+ if (aux->cec.adap->capabilities == cec_caps &&
+ aux->cec.adap->available_log_addrs == num_las) {
+ /* Unchanged, so just set the phys addr */
+ cec_s_phys_addr_from_edid(aux->cec.adap, edid);
+ goto unlock;
+ }
+ /*
+ * The capabilities changed, so unregister the old
+ * adapter first.
+ */
+ cec_unregister_adapter(aux->cec.adap);
+ }
+
+ /* Create a new adapter */
+ aux->cec.adap = cec_allocate_adapter(&drm_dp_cec_adap_ops,
+ aux, aux->cec.name, cec_caps,
+ num_las);
+ if (IS_ERR(aux->cec.adap)) {
+ aux->cec.adap = NULL;
+ goto unlock;
+ }
+ if (cec_register_adapter(aux->cec.adap, aux->cec.parent)) {
+ cec_delete_adapter(aux->cec.adap);
+ aux->cec.adap = NULL;
+ } else {
+ /*
+ * Update the phys addr for the new CEC adapter. When called
+ * from drm_dp_cec_register_connector() edid == NULL, so in
+ * that case the phys addr is just invalidated.
+ */
+ cec_s_phys_addr_from_edid(aux->cec.adap, edid);
+ }
+unlock:
+ mutex_unlock(&aux->cec.lock);
+}
+EXPORT_SYMBOL(drm_dp_cec_set_edid);
+
+/*
+ * The EDID disappeared (likely because of the HPD going down).
+ */
+void drm_dp_cec_unset_edid(struct drm_dp_aux *aux)
+{
+ cancel_delayed_work_sync(&aux->cec.unregister_work);
+
+ mutex_lock(&aux->cec.lock);
+ if (!aux->cec.adap)
+ goto unlock;
+
+ cec_phys_addr_invalidate(aux->cec.adap);
+ /*
+ * We're done if we want to keep the CEC device
+ * (drm_dp_cec_unregister_delay is >= NEVER_UNREG_DELAY) or if the
+ * DPCD still indicates the CEC capability (expected for an integrated
+ * HDMI branch device).
+ */
+ if (drm_dp_cec_unregister_delay < NEVER_UNREG_DELAY &&
+ !drm_dp_cec_cap(aux, NULL)) {
+ /*
+ * Unregister the CEC adapter after drm_dp_cec_unregister_delay
+ * seconds. This to debounce short HPD off-and-on cycles from
+ * displays.
+ */
+ schedule_delayed_work(&aux->cec.unregister_work,
+ drm_dp_cec_unregister_delay * HZ);
+ }
+unlock:
+ mutex_unlock(&aux->cec.lock);
+}
+EXPORT_SYMBOL(drm_dp_cec_unset_edid);
+
+/**
+ * drm_dp_cec_register_connector() - register a new connector
+ * @aux: DisplayPort AUX channel
+ * @name: name of the CEC device
+ * @parent: parent device
+ *
+ * A new connector was registered with associated CEC adapter name and
+ * CEC adapter parent device. After registering the name and parent
+ * drm_dp_cec_set_edid() is called to check if the connector supports
+ * CEC and to register a CEC adapter if that is the case.
+ */
+void drm_dp_cec_register_connector(struct drm_dp_aux *aux, const char *name,
+ struct device *parent)
+{
+ WARN_ON(aux->cec.adap);
+ aux->cec.name = name;
+ aux->cec.parent = parent;
+ INIT_DELAYED_WORK(&aux->cec.unregister_work,
+ drm_dp_cec_unregister_work);
+
+ drm_dp_cec_set_edid(aux, NULL);
+}
+EXPORT_SYMBOL(drm_dp_cec_register_connector);
+
+/**
+ * drm_dp_cec_unregister_connector() - unregister the CEC adapter, if any
+ * @aux: DisplayPort AUX channel
+ */
+void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux)
+{
+ if (!aux->cec.adap)
+ return;
+ cancel_delayed_work_sync(&aux->cec.unregister_work);
+ cec_unregister_adapter(aux->cec.adap);
+ aux->cec.adap = NULL;
+}
+EXPORT_SYMBOL(drm_dp_cec_unregister_connector);
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 876a2d9..dde0623 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -990,6 +990,7 @@
void drm_dp_aux_init(struct drm_dp_aux *aux)
{
mutex_init(&aux->hw_mutex);
+ mutex_init(&aux->cec.lock);
aux->ddc.algo = &drm_dp_i2c_algo;
aux->ddc.algo_data = aux;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 48dfc16..2865876 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -423,12 +423,15 @@
comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
if (!comp) {
ret = -ENOMEM;
+ of_node_put(node);
goto err_node;
}
ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL);
- if (ret)
+ if (ret) {
+ of_node_put(node);
goto err_node;
+ }
private->ddp_comp[comp_id] = comp;
}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index 5899fcf..ace0fac 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -124,7 +124,7 @@
if (len > count)
len = count;
- /* TODO: make sure that this does not exceed 4K */
+ len = min_t(size_t, len, SZ_4K);
if (copy_to_user(buff, buf, len)) {
kfree(buf);
return -EFAULT;
@@ -182,7 +182,7 @@
if (len > count)
len = count;
- /* TODO: make sure that this does not exceed 4K */
+ len = min_t(size_t, len, SZ_4K);
if (copy_to_user(buff, buf, len)) {
kfree(buf);
return -EFAULT;
diff --git a/drivers/iio/imu/st_asm330lhh/st_asm330lhh.h b/drivers/iio/imu/st_asm330lhh/st_asm330lhh.h
index 9605ab2..7b4963d 100644
--- a/drivers/iio/imu/st_asm330lhh/st_asm330lhh.h
+++ b/drivers/iio/imu/st_asm330lhh/st_asm330lhh.h
@@ -18,7 +18,7 @@
#include <linux/slab.h>
#define ST_ASM330LHH_REVISION "2.0.1"
-#define ST_ASM330LHH_PATCH "1"
+#define ST_ASM330LHH_PATCH "2"
#define ST_ASM330LHH_VERSION "v" \
ST_ASM330LHH_REVISION \
@@ -220,7 +220,10 @@
u8 enable_mask;
s64 ts_offset;
+ u32 hw_val;
+ u32 hw_val_old;
s64 hw_ts;
+ s64 hw_ts_high;
s64 delta_ts;
s64 ts;
s64 tsample;
diff --git a/drivers/iio/imu/st_asm330lhh/st_asm330lhh_buffer.c b/drivers/iio/imu/st_asm330lhh/st_asm330lhh_buffer.c
index 734d735..c224427 100644
--- a/drivers/iio/imu/st_asm330lhh/st_asm330lhh_buffer.c
+++ b/drivers/iio/imu/st_asm330lhh/st_asm330lhh_buffer.c
@@ -64,6 +64,8 @@
hw->ts_offset = hw->ts;
hw->hw_ts_old = 0ull;
hw->tsample = 0ull;
+ hw->hw_ts_high = 0ull;
+ hw->hw_val_old = 0ull;
return hw->tf->write(hw->dev, ST_ASM330LHH_REG_TS2_ADDR, sizeof(data),
&data);
@@ -257,14 +259,12 @@
struct iio_dev *iio_dev;
__le16 fifo_status;
u16 fifo_depth;
- u32 val;
int ts_processed = 0;
s64 hw_ts = 0ull, delta_hw_ts, cpu_timestamp;
ts_irq = hw->ts - hw->delta_ts;
- do
- {
+ do {
err = hw->tf->read(hw->dev, ST_ASM330LHH_REG_FIFO_DIFFL_ADDR,
sizeof(fifo_status), (u8 *)&fifo_status);
if (err < 0)
@@ -289,8 +289,14 @@
tag = buf[i] >> 3;
if (tag == ST_ASM330LHH_TS_TAG) {
- val = get_unaligned_le32(ptr);
- hw->hw_ts = val * ST_ASM330LHH_TS_DELTA_NS;
+ hw->hw_val = get_unaligned_le32(ptr);
+
+ /* check for timer rollover */
+ if (hw->hw_val < hw->hw_val_old)
+ hw->hw_ts_high++;
+ hw->hw_ts =
+ (hw->hw_val + (hw->hw_ts_high << 32))
+ * ST_ASM330LHH_TS_DELTA_NS;
ts_delta_hw_ts = hw->hw_ts - hw->hw_ts_old;
hw_ts += ts_delta_hw_ts;
ts_delta_offs =
@@ -301,6 +307,7 @@
ts_irq += (hw->hw_ts + ts_delta_offs);
hw->hw_ts_old = hw->hw_ts;
+ hw->hw_val_old = hw->hw_val;
ts_processed++;
if (!hw->tsample)
diff --git a/drivers/input/sensors/smi130/smi130_acc.c b/drivers/input/sensors/smi130/smi130_acc.c
index a63e08e..a6416e4 100644
--- a/drivers/input/sensors/smi130/smi130_acc.c
+++ b/drivers/input/sensors/smi130/smi130_acc.c
@@ -133,7 +133,7 @@
#include <linux/delay.h>
#include <asm/irq.h>
#include <linux/math64.h>
-
+#include <linux/cpu.h>
#ifdef CONFIG_HAS_EARLYSUSPEND
#include <linux/earlysuspend.h>
#endif
@@ -159,7 +159,7 @@
#define SENSOR_NAME "smi130_acc"
#define SMI130_ACC_USE_BASIC_I2C_FUNC 1
-
+#define SMI130_HRTIMER 1
#define MSC_TIME 6
#define ABSMIN -512
#define ABSMAX 512
@@ -1573,7 +1573,6 @@
struct mutex enable_mutex;
struct mutex mode_mutex;
struct delayed_work work;
- struct work_struct irq_work;
#ifdef CONFIG_HAS_EARLYSUSPEND
struct early_suspend early_suspend;
#endif
@@ -1611,8 +1610,57 @@
struct input_dev *accbuf_dev;
int report_evt_cnt;
#endif
+#ifdef SMI130_HRTIMER
+ struct hrtimer smi130_hrtimer;
+#endif
};
+#ifdef SMI130_HRTIMER
+static void smi130_set_cpu_idle_state(bool value)
+{
+ cpu_idle_poll_ctrl(value);
+}
+static enum hrtimer_restart smi130_timer_function(struct hrtimer *timer)
+{
+ smi130_set_cpu_idle_state(true);
+
+ return HRTIMER_NORESTART;
+}
+static void smi130_hrtimer_reset(struct smi130_acc_data *data)
+{
+ hrtimer_cancel(&data->smi130_hrtimer);
+ /*forward HRTIMER just before 1ms of irq arrival*/
+ hrtimer_forward(&data->smi130_hrtimer, ktime_get(),
+ ns_to_ktime(data->time_odr - 1000000));
+ hrtimer_restart(&data->smi130_hrtimer);
+}
+static void smi130_hrtimer_init(struct smi130_acc_data *data)
+{
+ hrtimer_init(&data->smi130_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ data->smi130_hrtimer.function = smi130_timer_function;
+}
+static void smi130_hrtimer_cleanup(struct smi130_acc_data *data)
+{
+ hrtimer_cancel(&data->smi130_hrtimer);
+}
+#else
+static void smi130_set_cpu_idle_state(bool value)
+{
+}
+static void smi130_hrtimer_reset(struct smi130_acc_data *data)
+{
+
+}
+static void smi130_hrtimer_init(struct smi130_acc_data *data)
+{
+
+}
+static void smi130_hrtimer_remove(struct smi130_acc_data *data)
+{
+
+}
+#endif
+
#ifdef CONFIG_HAS_EARLYSUSPEND
static void smi130_acc_early_suspend(struct early_suspend *h);
static void smi130_acc_late_resume(struct early_suspend *h);
@@ -3871,7 +3919,7 @@
signed char sensor_type, short *a_x)
{
int comres = 0;
- unsigned char data[2];
+ unsigned char data[2] = {0};
switch (sensor_type) {
case 0:
@@ -3940,7 +3988,7 @@
signed char sensor_type, short *a_y)
{
int comres = 0;
- unsigned char data[2];
+ unsigned char data[2] = {0};
switch (sensor_type) {
case 0:
@@ -3998,7 +4046,7 @@
signed char sensor_type, short *a_z)
{
int comres = 0;
- unsigned char data[2];
+ unsigned char data[2] = {0};
switch (sensor_type) {
case 0:
@@ -5087,7 +5135,7 @@
signed char sensor_type, struct smi130_accacc *acc)
{
int comres = 0;
- unsigned char data[6];
+ unsigned char data[6] = {0};
struct smi130_acc_data *client_data = i2c_get_clientdata(client);
#ifndef SMI_ACC2X2_SENSOR_IDENTIFICATION_ENABLE
int bitwidth;
@@ -5381,7 +5429,7 @@
struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
size_t count = 0;
- u8 reg[0x40];
+ u8 reg[0x40] = {0};
int i;
for (i = 0; i < 0x40; i++) {
@@ -6962,7 +7010,7 @@
if (!client_data->smi_acc_cachepool) {
PERR("smi_acc_cachepool cache create failed\n");
err = -ENOMEM;
- goto clean_exit1;
+ return 0;
}
for (i = 0; i < SMI_ACC_MAXSAMPLE; i++) {
client_data->smi130_acc_samplist[i] =
@@ -6970,7 +7018,7 @@
GFP_KERNEL);
if (!client_data->smi130_acc_samplist[i]) {
err = -ENOMEM;
- goto clean_exit2;
+ goto clean_exit1;
}
}
@@ -6978,7 +7026,7 @@
if (!client_data->accbuf_dev) {
err = -ENOMEM;
PERR("input device allocation failed\n");
- goto clean_exit3;
+ goto clean_exit1;
}
client_data->accbuf_dev->name = "smi130_accbuf";
client_data->accbuf_dev->id.bustype = BUS_I2C;
@@ -6999,25 +7047,26 @@
if (err) {
PERR("unable to register input device %s\n",
client_data->accbuf_dev->name);
- goto clean_exit3;
+ goto clean_exit2;
}
client_data->acc_buffer_smi130_samples = true;
client_data->acc_enable = false;
+ smi130_set_cpu_idle_state(true);
+
smi130_acc_set_mode(client, SMI_ACC2X2_MODE_NORMAL, 1);
smi130_acc_set_bandwidth(client, SMI_ACC2X2_BW_62_50HZ);
smi130_acc_set_range(client, SMI_ACC2X2_RANGE_2G);
return 1;
-clean_exit3:
- input_free_device(client_data->accbuf_dev);
clean_exit2:
+ input_free_device(client_data->accbuf_dev);
+clean_exit1:
for (i = 0; i < SMI_ACC_MAXSAMPLE; i++)
kmem_cache_free(client_data->smi_acc_cachepool,
client_data->smi130_acc_samplist[i]);
-clean_exit1:
kmem_cache_destroy(client_data->smi_acc_cachepool);
return 0;
}
@@ -7044,10 +7093,9 @@
}
#endif
-static void smi130_acc_irq_work_func(struct work_struct *work)
+static irqreturn_t smi130_acc_irq_work_func(int irq, void *handle)
{
- struct smi130_acc_data *smi130_acc = container_of((struct work_struct *)work,
- struct smi130_acc_data, irq_work);
+ struct smi130_acc_data *smi130_acc = handle;
#ifdef CONFIG_DOUBLE_TAP
struct i2c_client *client = smi130_acc->smi130_acc_client;
#endif
@@ -7092,8 +7140,10 @@
mutex_unlock(&smi130_acc->value_mutex);
}
store_acc_boot_sample(smi130_acc, acc.x, acc.y, acc.z, ts);
-#endif
+ smi130_set_cpu_idle_state(false);
+ return IRQ_HANDLED;
+#endif
smi130_acc_get_interruptstatus1(smi130_acc->smi130_acc_client, &status);
PDEBUG("smi130_acc_irq_work_func, status = 0x%x\n", status);
@@ -7246,10 +7296,9 @@
if (data->smi130_acc_client == NULL)
return IRQ_HANDLED;
data->timestamp = smi130_acc_get_alarm_timestamp();
+ smi130_hrtimer_reset(data);
- schedule_work(&data->irq_work);
-
- return IRQ_HANDLED;
+ return IRQ_WAKE_THREAD;
}
#endif /* defined(SMI_ACC2X2_ENABLE_INT1)||defined(SMI_ACC2X2_ENABLE_INT2) */
@@ -7363,7 +7412,6 @@
if (err)
PERR("could not request irq\n");
- INIT_WORK(&data->irq_work, smi130_acc_irq_work_func);
#endif
#ifndef CONFIG_SMI_ACC_ENABLE_NEWDATA_INT
@@ -7558,9 +7606,11 @@
return -EINVAL;
data->IRQ = client->irq;
PDEBUG("data->IRQ = %d", data->IRQ);
- err = request_irq(data->IRQ, smi130_acc_irq_handler, IRQF_TRIGGER_RISING,
+ err = request_threaded_irq(data->IRQ, smi130_acc_irq_handler,
+ smi130_acc_irq_work_func, IRQF_TRIGGER_RISING,
"smi130_acc", data);
+ smi130_hrtimer_init(data);
err = smi130_acc_early_buff_init(client, data);
if (!err)
goto exit;
@@ -7676,6 +7726,7 @@
if (NULL == data)
return 0;
+ smi130_hrtimer_cleanup(data);
smi130_acc_input_cleanup(data);
smi130_acc_set_enable(&client->dev, 0);
#ifdef CONFIG_HAS_EARLYSUSPEND
diff --git a/drivers/input/sensors/smi130/smi130_driver.c b/drivers/input/sensors/smi130/smi130_driver.c
index 42a0a57..3991ada 100644
--- a/drivers/input/sensors/smi130/smi130_driver.c
+++ b/drivers/input/sensors/smi130/smi130_driver.c
@@ -2879,7 +2879,7 @@
struct smi_client_data *client_data = input_get_drvdata(input);
ssize_t ret;
- u8 reg_data[128], i;
+ u8 reg_data[128] = {0}, i;
int pos;
if (client_data == NULL) {
@@ -2913,7 +2913,8 @@
struct input_dev *input = to_input_dev(dev);
struct smi_client_data *client_data = input_get_drvdata(input);
ssize_t ret;
- u8 reg_data[32];
+ u8 reg_data[32] = {0};
+
int i, j, status, digit;
if (client_data == NULL) {
diff --git a/drivers/input/sensors/smi130/smi130_gyro_driver.c b/drivers/input/sensors/smi130/smi130_gyro_driver.c
index fd9e87d..e497a0a8 100644
--- a/drivers/input/sensors/smi130/smi130_gyro_driver.c
+++ b/drivers/input/sensors/smi130/smi130_gyro_driver.c
@@ -290,7 +290,6 @@
ktime_t work_delay_kt;
uint8_t gpio_pin;
int16_t IRQ;
- struct work_struct irq_work;
#ifdef CONFIG_ENABLE_SMI_ACC_GYRO_BUFFERING
bool read_gyro_boot_sample;
int gyro_bufsample_cnt;
@@ -422,7 +421,7 @@
static void smi_gyro_dump_reg(struct i2c_client *client)
{
int i;
- u8 dbg_buf[64];
+ u8 dbg_buf[64] = {0};
u8 dbg_buf_str[64 * 3 + 1] = "";
for (i = 0; i < BYTES_PER_LINE; i++) {
@@ -1788,7 +1787,7 @@
if (!client_data->smi_gyro_cachepool) {
PERR("smi_gyro_cachepool cache create failed\n");
err = -ENOMEM;
- goto clean_exit1;
+ return 0;
}
for (i = 0; i < SMI_GYRO_MAXSAMPLE; i++) {
@@ -1797,7 +1796,7 @@
GFP_KERNEL);
if (!client_data->smi130_gyro_samplist[i]) {
err = -ENOMEM;
- goto clean_exit2;
+ goto clean_exit1;
}
}
@@ -1806,7 +1805,7 @@
if (!client_data->gyrobuf_dev) {
err = -ENOMEM;
PERR("input device allocation failed\n");
- goto clean_exit3;
+ goto clean_exit1;
}
client_data->gyrobuf_dev->name = "smi130_gyrobuf";
client_data->gyrobuf_dev->id.bustype = BUS_I2C;
@@ -1827,7 +1826,7 @@
if (err) {
PERR("unable to register input device %s\n",
client_data->gyrobuf_dev->name);
- goto clean_exit3;
+ goto clean_exit2;
}
client_data->gyro_buffer_smi130_samples = true;
@@ -1852,13 +1851,12 @@
return 1;
-clean_exit3:
- input_free_device(client_data->gyrobuf_dev);
clean_exit2:
+ input_free_device(client_data->gyrobuf_dev);
+clean_exit1:
for (i = 0; i < SMI_GYRO_MAXSAMPLE; i++)
kmem_cache_free(client_data->smi_gyro_cachepool,
client_data->smi130_gyro_samplist[i]);
-clean_exit1:
kmem_cache_destroy(client_data->smi_gyro_cachepool);
return 0;
}
@@ -1895,10 +1893,9 @@
#if defined(SMI130_GYRO_ENABLE_INT1) || defined(SMI130_GYRO_ENABLE_INT2)
-static void smi130_gyro_irq_work_func(struct work_struct *work)
+static irqreturn_t smi130_gyro_irq_work_func(int irq, void *handle)
{
- struct smi_gyro_client_data *client_data = container_of(work,
- struct smi_gyro_client_data, irq_work);
+ struct smi_gyro_client_data *client_data = handle;
struct smi130_gyro_data_t gyro_data;
struct timespec ts;
ts = ns_to_timespec(client_data->timestamp);
@@ -1919,14 +1916,14 @@
input_sync(client_data->input);
store_gyro_boot_sample(client_data, gyro_data.datax,
gyro_data.datay, gyro_data.dataz, ts);
+ return IRQ_HANDLED;
}
static irqreturn_t smi_gyro_irq_handler(int irq, void *handle)
{
struct smi_gyro_client_data *client_data = handle;
client_data->timestamp= smi130_gyro_get_alarm_timestamp();
- schedule_work(&client_data->irq_work);
- return IRQ_HANDLED;
+ return IRQ_WAKE_THREAD;
}
#endif
static int smi_gyro_probe(struct i2c_client *client, const struct i2c_device_id *id)
@@ -2098,13 +2095,12 @@
PDEBUG("request failed\n");
}
client_data->IRQ = gpio_to_irq(client_data->gpio_pin);
- err = request_irq(client_data->IRQ, smi_gyro_irq_handler,
- IRQF_TRIGGER_RISING,
- SENSOR_NAME, client_data);
+ err = request_threaded_irq(client_data->IRQ,
+ smi_gyro_irq_handler, smi130_gyro_irq_work_func,
+ IRQF_TRIGGER_RISING, SENSOR_NAME, client_data);
if (err < 0)
PDEBUG("request handle failed\n");
}
- INIT_WORK(&client_data->irq_work, smi130_gyro_irq_work_func);
#endif
err = smi130_gyro_early_buff_init(client_data);
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index c1233d0..dd7880d 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1321,18 +1321,21 @@
* another level increases the size of the address space by 9 bits to a size up
* to 64 bits.
*/
-static bool increase_address_space(struct protection_domain *domain,
+static void increase_address_space(struct protection_domain *domain,
gfp_t gfp)
{
+ unsigned long flags;
u64 *pte;
- if (domain->mode == PAGE_MODE_6_LEVEL)
+ spin_lock_irqsave(&domain->lock, flags);
+
+ if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
/* address space already 64 bit large */
- return false;
+ goto out;
pte = (void *)get_zeroed_page(gfp);
if (!pte)
- return false;
+ goto out;
*pte = PM_LEVEL_PDE(domain->mode,
virt_to_phys(domain->pt_root));
@@ -1340,7 +1343,10 @@
domain->mode += 1;
domain->updated = true;
- return true;
+out:
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+ return;
}
static u64 *alloc_pte(struct protection_domain *domain,
diff --git a/drivers/iommu/msm_dma_iommu_mapping.c b/drivers/iommu/msm_dma_iommu_mapping.c
index 180edf3..05ed311 100644
--- a/drivers/iommu/msm_dma_iommu_mapping.c
+++ b/drivers/iommu/msm_dma_iommu_mapping.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -41,7 +41,7 @@
struct list_head lnode;
struct rb_node node;
struct device *dev;
- struct scatterlist sgl;
+ struct scatterlist *sgl;
unsigned int nents;
enum dma_data_direction dir;
struct msm_iommu_meta *meta;
@@ -148,6 +148,22 @@
static void msm_iommu_meta_put(struct msm_iommu_meta *meta);
+static struct scatterlist *clone_sgl(struct scatterlist *sg, int nents)
+{
+ struct scatterlist *next, *s;
+ int i;
+ struct sg_table table;
+
+ if (sg_alloc_table(&table, nents, GFP_KERNEL))
+ return NULL;
+ next = table.sgl;
+ for_each_sg(sg, s, nents, i) {
+ *next = *s;
+ next = sg_next(next);
+ }
+ return table.sgl;
+}
+
static inline int __msm_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
struct dma_buf *dma_buf,
@@ -191,20 +207,25 @@
}
ret = dma_map_sg_attrs(dev, sg, nents, dir, attrs);
- if (ret != nents) {
+ if (!ret) {
kfree(iommu_map);
goto out_unlock;
}
+ iommu_map->sgl = clone_sgl(sg, nents);
+ if (!iommu_map->sgl) {
+ kfree(iommu_map);
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+ iommu_map->nents = nents;
+ iommu_map->dev = dev;
+
kref_init(&iommu_map->ref);
if (late_unmap)
kref_get(&iommu_map->ref);
iommu_map->meta = iommu_meta;
- iommu_map->sgl.dma_address = sg->dma_address;
- iommu_map->sgl.dma_length = sg->dma_length;
- iommu_map->dev = dev;
iommu_map->dir = dir;
- iommu_map->nents = nents;
iommu_map->map_attrs = attrs;
iommu_map->buf_start_addr = sg_phys(sg);
msm_iommu_add(iommu_meta, iommu_map);
@@ -214,8 +235,8 @@
dir == iommu_map->dir &&
attrs == iommu_map->map_attrs &&
sg_phys(sg) == iommu_map->buf_start_addr) {
- sg->dma_address = iommu_map->sgl.dma_address;
- sg->dma_length = iommu_map->sgl.dma_length;
+ sg->dma_address = iommu_map->sgl->dma_address;
+ sg->dma_length = iommu_map->sgl->dma_length;
kref_get(&iommu_map->ref);
if (is_device_dma_coherent(dev))
@@ -315,9 +336,14 @@
{
struct msm_iommu_map *map = container_of(kref, struct msm_iommu_map,
ref);
+ struct sg_table table;
+ table.nents = table.orig_nents = map->nents;
+ table.sgl = map->sgl;
list_del(&map->lnode);
- dma_unmap_sg(map->dev, &map->sgl, map->nents, map->dir);
+
+ dma_unmap_sg(map->dev, map->sgl, map->nents, map->dir);
+ sg_free_table(&table);
kfree(map);
}
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 6a2df32..691ad06 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -687,6 +687,9 @@
if (!cdev->ap.applid)
return -ENODEV;
+ if (count < CAPIMSG_BASELEN)
+ return -EINVAL;
+
skb = alloc_skb(count, GFP_USER);
if (!skb)
return -ENOMEM;
@@ -697,7 +700,8 @@
}
mlen = CAPIMSG_LEN(skb->data);
if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) {
- if ((size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) {
+ if (count < CAPI_DATA_B3_REQ_LEN ||
+ (size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) {
kfree_skb(skb);
return -EINVAL;
}
@@ -710,6 +714,10 @@
CAPIMSG_SETAPPID(skb->data, cdev->ap.applid);
if (CAPIMSG_CMD(skb->data) == CAPI_DISCONNECT_B3_RESP) {
+ if (count < CAPI_DISCONNECT_B3_RESP_LEN) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
mutex_lock(&cdev->lock);
capincci_free(cdev, CAPIMSG_NCCI(skb->data));
mutex_unlock(&cdev->lock);
diff --git a/drivers/media/platform/msm/camera_v2/common/cam_soc_api.c b/drivers/media/platform/msm/camera_v2/common/cam_soc_api.c
index 2eb00eb..834b535 100644
--- a/drivers/media/platform/msm/camera_v2/common/cam_soc_api.c
+++ b/drivers/media/platform/msm/camera_v2/common/cam_soc_api.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -208,7 +208,7 @@
{
int rc = 0;
- if (!pdev || !&pdev->dev || !clk_info || !clk_ptr || !num_clk)
+ if (!pdev || (&pdev->dev == NULL) || !clk_info || !clk_ptr || !num_clk)
return -EINVAL;
rc = msm_camera_get_clk_info_internal(&pdev->dev,
@@ -513,7 +513,7 @@
{
int rc = 0;
- if (!pdev || !&pdev->dev || !clk_info || !clk_ptr)
+ if (!pdev || (&pdev->dev == NULL) || !clk_info || !clk_ptr)
return -EINVAL;
rc = msm_camera_put_clk_info_internal(&pdev->dev,
diff --git a/drivers/media/platform/msm/camera_v2/msm.c b/drivers/media/platform/msm/camera_v2/msm.c
index bcd9274..80b250d 100644
--- a/drivers/media/platform/msm/camera_v2/msm.c
+++ b/drivers/media/platform/msm/camera_v2/msm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -636,7 +636,7 @@
{
struct msm_command_ack *cmd_ack = d1;
- if (!(&cmd_ack->command_q))
+ if (&cmd_ack->command_q == NULL)
return 0;
msm_queue_drain(&cmd_ack->command_q, struct msm_command, list);
@@ -646,7 +646,7 @@
static void msm_remove_session_cmd_ack_q(struct msm_session *session)
{
- if ((!session) || !(&session->command_ack_q))
+ if ((!session) || (&session->command_ack_q == NULL))
return;
mutex_lock(&session->lock);
diff --git a/drivers/media/platform/msm/camera_v2/sensor/flash/Makefile b/drivers/media/platform/msm/camera_v2/sensor/flash/Makefile
index 6a28da5..69be506 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/flash/Makefile
+++ b/drivers/media/platform/msm/camera_v2/sensor/flash/Makefile
@@ -3,3 +3,4 @@
ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
obj-$(CONFIG_MSMB_CAMERA) += msm_flash.o
+obj-$(CONFIG_MSMB_CAMERA) += qm215_gpio_flash.o
diff --git a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c
index 656b1ca..5f2cce1 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2009-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2009-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -17,6 +17,7 @@
#include <linux/of_gpio.h>
#include <linux/leds-qpnp-flash.h>
#include "msm_flash.h"
+#include "msm_camera_io_util.h"
#include "msm_camera_dt_util.h"
#include "msm_cci.h"
@@ -388,7 +389,8 @@
{
int32_t rc = 0;
- if (!(&flash_ctrl->power_info) || !(&flash_ctrl->flash_i2c_client)) {
+ if ((&flash_ctrl->power_info == NULL) ||
+ (&flash_ctrl->flash_i2c_client == NULL)) {
pr_err("%s:%d failed: %pK %pK\n",
__func__, __LINE__, &flash_ctrl->power_info,
&flash_ctrl->flash_i2c_client);
@@ -514,6 +516,8 @@
__func__, __LINE__,
flash_data->cfg.flash_init_info->flash_driver_type);
}
+ if (flash_ctrl->platform_flash_init)
+ flash_ctrl->platform_flash_init(flash_ctrl, flash_data);
if (flash_ctrl->func_tbl->camera_flash_init) {
rc = flash_ctrl->func_tbl->camera_flash_init(
@@ -600,6 +604,8 @@
__func__, __LINE__, flash_ctrl->flash_state);
if (flash_ctrl->switch_trigger == NULL) {
+ if (flash_ctrl->platform_flash_init)
+ return ret;
pr_err("%s:%d Invalid argument\n",
__func__, __LINE__);
return -EINVAL;
@@ -1286,6 +1292,8 @@
kfree(flash_ctrl);
return -EINVAL;
}
+ if (flash_ctrl->flash_driver_type == FLASH_DRIVER_GPIO)
+ platform_set_drvdata(pdev, flash_ctrl);
flash_ctrl->flash_state = MSM_CAMERA_FLASH_RELEASE;
flash_ctrl->power_info.dev = &flash_ctrl->pdev->dev;
@@ -1334,6 +1342,11 @@
return rc;
}
+int32_t camera_flash_platform_probe(struct platform_device *pdev)
+{
+ return msm_flash_platform_probe(pdev);
+}
+
MODULE_DEVICE_TABLE(of, msm_flash_dt_match);
static struct platform_driver msm_flash_platform_driver = {
diff --git a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.h b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.h
index eda6ce4..67a8c2c 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2009-2016, 2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2009-2016, 2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -102,6 +102,8 @@
/* flash state */
enum msm_camera_flash_state_t flash_state;
+ int32_t (*platform_flash_init)(struct msm_flash_ctrl_t *flash_ctrl,
+ struct msm_flash_cfg_data_t *flash_data);
};
int msm_flash_i2c_probe(struct i2c_client *client,
@@ -124,4 +126,6 @@
int msm_flash_led_off(struct msm_flash_ctrl_t *fctrl);
int msm_flash_led_low(struct msm_flash_ctrl_t *fctrl);
int msm_flash_led_high(struct msm_flash_ctrl_t *fctrl);
+int32_t camera_flash_platform_probe(struct platform_device *pdev);
+
#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/flash/qm215_gpio_flash.c b/drivers/media/platform/msm/camera_v2/sensor/flash/qm215_gpio_flash.c
new file mode 100644
index 0000000..11a7db5
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/flash/qm215_gpio_flash.c
@@ -0,0 +1,264 @@
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/export.h>
+#include "msm_camera_io_util.h"
+#include "msm_flash.h"
+
+#define FLASH_NAME "qcom,gpio-flash"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+static const struct of_device_id msm_gpio_flash_dt_match[] = {
+ {.compatible = "qcom,qm215-gpio-flash", .data = NULL},
+ {}
+};
+static struct msm_flash_table qm215_gpio_flash_table;
+MODULE_DEVICE_TABLE(of, msm_flash_dt_match);
+
+static int32_t qm215_flash_low(
+ struct msm_flash_ctrl_t *flash_ctrl,
+ struct msm_flash_cfg_data_t *flash_data)
+{
+ struct msm_camera_power_ctrl_t *power_info = NULL;
+ struct msm_camera_gpio_num_info *gpio_num_info = NULL;
+
+ if (!flash_ctrl) {
+ pr_err("device data NULL\n");
+ return -EINVAL;
+ }
+
+ CDBG("Enter");
+ power_info = &flash_ctrl->power_info;
+ gpio_num_info = power_info->gpio_conf->gpio_num_info;
+
+ if (flash_ctrl->flash_driver_type == FLASH_DRIVER_GPIO &&
+ gpio_num_info->valid[SENSOR_GPIO_FL_NOW] &&
+ gpio_num_info->gpio_num[SENSOR_GPIO_FL_EN]) {
+
+ CDBG("flash op low gpio num %d(state:%d) %d(state: %d)\n",
+ gpio_num_info->gpio_num[SENSOR_GPIO_FL_NOW],
+ GPIO_OUT_HIGH,
+ gpio_num_info->gpio_num[SENSOR_GPIO_FL_EN],
+ GPIO_OUT_HIGH);
+ gpio_set_value_cansleep(
+ gpio_num_info->gpio_num[SENSOR_GPIO_FL_NOW],
+ GPIO_OUT_HIGH);
+ gpio_set_value_cansleep(
+ gpio_num_info->gpio_num[SENSOR_GPIO_FL_EN],
+ GPIO_OUT_HIGH);
+ }
+ CDBG("Exit\n");
+ return 0;
+}
+
+static int32_t qm215_flash_high(
+ struct msm_flash_ctrl_t *flash_ctrl,
+ struct msm_flash_cfg_data_t *flash_data)
+{
+ struct msm_camera_power_ctrl_t *power_info = NULL;
+ struct msm_camera_gpio_num_info *gpio_num_info = NULL;
+
+ if (!flash_ctrl) {
+ pr_err("device data NULL\n");
+ return -EINVAL;
+ }
+
+ CDBG("Enter\n");
+ power_info = &flash_ctrl->power_info;
+ gpio_num_info = power_info->gpio_conf->gpio_num_info;
+
+ if (flash_ctrl->flash_driver_type == FLASH_DRIVER_GPIO &&
+ gpio_num_info->valid[SENSOR_GPIO_FL_NOW] &&
+ gpio_num_info->gpio_num[SENSOR_GPIO_FL_EN]) {
+
+ CDBG("flash op high gpio num %d(state:%d) %d(state:%d)\n",
+ gpio_num_info->gpio_num[SENSOR_GPIO_FL_NOW],
+ GPIO_OUT_LOW,
+ gpio_num_info->gpio_num[SENSOR_GPIO_FL_EN],
+ GPIO_OUT_HIGH);
+ gpio_set_value_cansleep(
+ gpio_num_info->gpio_num[SENSOR_GPIO_FL_NOW],
+ GPIO_OUT_LOW);
+ gpio_set_value_cansleep(
+ gpio_num_info->gpio_num[SENSOR_GPIO_FL_EN],
+ GPIO_OUT_HIGH);
+ }
+ CDBG("Exit\n");
+
+ return 0;
+}
+
+static int32_t qm215_flash_release(
+ struct msm_flash_ctrl_t *flash_ctrl)
+{
+ int32_t rc = 0;
+
+ if (!flash_ctrl) {
+ pr_err("device data NULL\n");
+ return -EINVAL;
+ }
+
+ CDBG("Enter\n");
+ rc = flash_ctrl->func_tbl->camera_flash_off(flash_ctrl, NULL);
+ if (rc < 0) {
+ pr_err("%s:%d camera_flash_init failed rc = %d",
+ __func__, __LINE__, rc);
+ return rc;
+ }
+ flash_ctrl->flash_state = MSM_CAMERA_FLASH_RELEASE;
+ CDBG("Exit\n");
+ return 0;
+}
+
+static int32_t qm215_flash_off(struct msm_flash_ctrl_t *flash_ctrl,
+ struct msm_flash_cfg_data_t *flash_data)
+{
+ struct msm_camera_power_ctrl_t *power_info = NULL;
+ struct msm_camera_gpio_num_info *gpio_num_info = NULL;
+
+ if (!flash_ctrl) {
+ pr_err("device data NULL\n");
+ return -EINVAL;
+ }
+
+ CDBG("Enter\n");
+ power_info = &flash_ctrl->power_info;
+ gpio_num_info = power_info->gpio_conf->gpio_num_info;
+
+ if (flash_ctrl->flash_driver_type == FLASH_DRIVER_GPIO &&
+ gpio_num_info->valid[SENSOR_GPIO_FL_NOW] &&
+ gpio_num_info->gpio_num[SENSOR_GPIO_FL_EN]) {
+
+ CDBG("flash off gpio num %d(state:%d) %d(state: %d)\n",
+ gpio_num_info->gpio_num[SENSOR_GPIO_FL_NOW],
+ GPIO_OUT_LOW,
+ gpio_num_info->gpio_num[SENSOR_GPIO_FL_EN],
+ GPIO_OUT_LOW);
+ gpio_set_value_cansleep(
+ gpio_num_info->gpio_num[SENSOR_GPIO_FL_NOW],
+ GPIO_OUT_LOW);
+ gpio_set_value_cansleep(
+ gpio_num_info->gpio_num[SENSOR_GPIO_FL_EN],
+ GPIO_OUT_LOW);
+ }
+
+ CDBG("Exit\n");
+ return 0;
+}
+
+static int32_t qm215_flash_gpio_init(
+ struct msm_flash_ctrl_t *flash_ctrl,
+ struct msm_flash_cfg_data_t *flash_data)
+{
+ int32_t rc = 0;
+
+ CDBG("Enter");
+ rc = flash_ctrl->func_tbl->camera_flash_off(flash_ctrl, flash_data);
+
+ CDBG("Exit");
+ return rc;
+}
+
+
+static int32_t qm215_platform_flash_init(struct msm_flash_ctrl_t *flash_ctrl,
+ struct msm_flash_cfg_data_t *flash_data)
+{
+ if (!flash_ctrl) {
+ pr_err("devices data NULL\n");
+ return -EINVAL;
+ }
+
+ if (flash_ctrl->flash_driver_type == FLASH_DRIVER_GPIO)
+ flash_ctrl->func_tbl = &qm215_gpio_flash_table.func_tbl;
+
+ return 0;
+}
+static int32_t qm215_flash_platform_probe(struct platform_device *pdev)
+{
+ int32_t rc = 0;
+ struct msm_flash_ctrl_t *flash_ctrl = NULL;
+ struct msm_camera_power_ctrl_t *power_info = NULL;
+ struct msm_camera_gpio_conf *gpio_conf = NULL;
+
+ if (!pdev->dev.of_node) {
+ pr_err("of_node NULL\n");
+ return -EINVAL;
+ }
+ CDBG("enter probe\n");
+ rc = camera_flash_platform_probe(pdev);
+ if (rc >= 0) {
+ flash_ctrl =
+ (struct msm_flash_ctrl_t *) platform_get_drvdata(pdev);
+ CDBG("device data %pK\n", flash_ctrl);
+ if (!flash_ctrl) {
+ pr_err("of_node NULL\n");
+ return -EINVAL;
+ }
+ power_info = &flash_ctrl->power_info;
+ gpio_conf = power_info->gpio_conf;
+ rc = msm_camera_request_gpio_table(gpio_conf->cam_gpio_req_tbl,
+ gpio_conf->cam_gpio_req_tbl_size, 1);
+ if (rc < 0) {
+ pr_err("%s: request gpio failed\n", __func__);
+ return rc;
+ }
+ flash_ctrl->platform_flash_init = qm215_platform_flash_init;
+ }
+ return rc;
+}
+
+static struct platform_driver msm_gpio_flash_platform_driver = {
+ .probe = qm215_flash_platform_probe,
+ .driver = {
+ .name = "qcom,camera-gpio-flash",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_gpio_flash_dt_match,
+ },
+};
+
+static int __init qm215_gpio_flash_init_module(void)
+{
+ int32_t rc = 0;
+
+ CDBG("Enter\n");
+ rc = platform_driver_register(&msm_gpio_flash_platform_driver);
+ if (rc)
+ pr_err("platform probe for flash failed");
+
+ return rc;
+}
+
+static void __exit qm215_gpio_flash_exit_module(void)
+{
+ platform_driver_unregister(&msm_gpio_flash_platform_driver);
+}
+
+static struct msm_flash_table qm215_gpio_flash_table = {
+ .flash_driver_type = FLASH_DRIVER_GPIO,
+ .func_tbl = {
+ .camera_flash_init = qm215_flash_gpio_init,
+ .camera_flash_release = qm215_flash_release,
+ .camera_flash_off = qm215_flash_off,
+ .camera_flash_low = qm215_flash_low,
+ .camera_flash_high = qm215_flash_high,
+ .camera_flash_query_current = NULL,
+ },
+};
+
+module_init(qm215_gpio_flash_init_module);
+module_exit(qm215_gpio_flash_exit_module);
+MODULE_DESCRIPTION("MSM GPIO FLASH");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.c b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.c
index 9e9ef92..a33436e 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1350,7 +1350,7 @@
CDBG("%s: %d GPIO offset: %d, seq_val: %d\n", __func__, __LINE__,
gpio_offset, seq_val);
- if ((gconf->gpio_num_info->valid[gpio_offset] == 1)) {
+ if (gconf->gpio_num_info->valid[gpio_offset] == 1) {
gpio_set_value_cansleep(
gconf->gpio_num_info->gpio_num
[gpio_offset], val);
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index eeca9f3..a6b90dd 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -3369,6 +3369,8 @@
if (hw_data->downscale_caps)
SPRINT("downscale_ratios=%s\n", hw_data->downscale_caps);
+ SPRINT("max_line_width=%d\n", sde_rotator_get_maxlinewidth(mgr));
+
#undef SPRINT
return cnt;
}
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index d8d7985..4b51846 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -1850,34 +1850,6 @@
return rc;
}
-static int __get_q_size(struct venus_hfi_device *dev, unsigned int q_index)
-{
- struct hfi_queue_header *queue;
- struct vidc_iface_q_info *q_info;
- u32 write_ptr, read_ptr;
-
- if (q_index >= VIDC_IFACEQ_NUMQ) {
- dprintk(VIDC_ERR, "Invalid q index: %d\n", q_index);
- return -ENOENT;
- }
-
- q_info = &dev->iface_queues[q_index];
- if (!q_info) {
- dprintk(VIDC_ERR, "cannot read shared Q's\n");
- return -ENOENT;
- }
-
- queue = (struct hfi_queue_header *)q_info->q_hdr;
- if (!queue) {
- dprintk(VIDC_ERR, "queue not present\n");
- return -ENOENT;
- }
-
- write_ptr = (u32)queue->qhdr_write_idx;
- read_ptr = (u32)queue->qhdr_read_idx;
- return read_ptr - write_ptr;
-}
-
static void __core_clear_interrupt(struct venus_hfi_device *device)
{
u32 intr_status = 0;
@@ -3206,8 +3178,7 @@
*session_id = session->session_id;
}
- if (packet_count >= max_packets &&
- __get_q_size(device, VIDC_IFACEQ_MSGQ_IDX)) {
+ if (packet_count >= max_packets) {
dprintk(VIDC_WARN,
"Too many packets in message queue to handle at once, deferring read\n");
break;
diff --git a/drivers/media/platform/msm/vidc_3x/hfi_response_handler.c b/drivers/media/platform/msm/vidc_3x/hfi_response_handler.c
index 654d9fa..74d995f 100644
--- a/drivers/media/platform/msm/vidc_3x/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc_3x/hfi_response_handler.c
@@ -1382,13 +1382,7 @@
cmd_done.status = VIDC_ERR_NONE;
cmd_done.data.property.buf_req = buff_req;
cmd_done.size = sizeof(buff_req);
-
- *info = (struct msm_vidc_cb_info) {
- .response_type = HAL_SESSION_PROPERTY_INFO,
- .response.cmd = cmd_done,
- };
-
- return 0;
+ break;
case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT:
hfi_process_sess_get_prop_profile_level(pkt, &profile_level);
cmd_done.device_id = device_id;
@@ -1400,12 +1394,7 @@
.level = profile_level.level,
};
cmd_done.size = sizeof(struct hal_profile_level);
-
- *info = (struct msm_vidc_cb_info) {
- .response_type = HAL_SESSION_PROPERTY_INFO,
- .response.cmd = cmd_done,
- };
- return 0;
+ break;
case HFI_PROPERTY_CONFIG_VDEC_ENTROPY:
hfi_process_sess_get_prop_dec_entropy(pkt, &entropy);
cmd_done.device_id = device_id;
@@ -1413,18 +1402,19 @@
cmd_done.status = VIDC_ERR_NONE;
cmd_done.data.property.h264_entropy = entropy;
cmd_done.size = sizeof(enum hal_h264_entropy);
-
- *info = (struct msm_vidc_cb_info) {
- .response_type = HAL_SESSION_PROPERTY_INFO,
- .response.cmd = cmd_done,
- };
- return 0;
+ break;
default:
dprintk(VIDC_DBG,
"hal_process_session_prop_info: unknown_prop_id: %x\n",
pkt->rg_property_data[0]);
return -ENOTSUPP;
}
+ *info = (struct msm_vidc_cb_info) {
+ .response_type = HAL_SESSION_PROPERTY_INFO,
+ .response.cmd = cmd_done,
+ };
+ return 0;
+
}
static int hfi_process_session_init_done(u32 device_id,
@@ -1835,12 +1825,8 @@
cmd_done.size = sizeof(struct msm_vidc_cb_cmd_done);
cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
cmd_done.status = hfi_map_err_status(pkt->error_type);
- if (pkt->rg_buffer_info) {
- cmd_done.data.buffer_info.buffer_addr = *pkt->rg_buffer_info;
- cmd_done.size = sizeof(struct hal_buffer_info);
- } else {
- dprintk(VIDC_ERR, "invalid payload in rel_buff_done\n");
- }
+ cmd_done.data.buffer_info.buffer_addr = *pkt->rg_buffer_info;
+ cmd_done.size = sizeof(struct hal_buffer_info);
*info = (struct msm_vidc_cb_info) {
.response_type = HAL_SESSION_RELEASE_BUFFER_DONE,
diff --git a/drivers/media/platform/msm/vidc_3x/msm_vidc.c b/drivers/media/platform/msm/vidc_3x/msm_vidc.c
index 07804d2..c38784c 100644
--- a/drivers/media/platform/msm/vidc_3x/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc_3x/msm_vidc.c
@@ -1381,16 +1381,16 @@
return 0;
}
+static void close_helper(struct kref *kref)
+{
+ struct msm_vidc_inst *inst = container_of(kref,
+ struct msm_vidc_inst, kref);
+
+ msm_vidc_destroy(inst);
+}
+
int msm_vidc_close(void *instance)
{
- void close_helper(struct kref *kref)
- {
- struct msm_vidc_inst *inst = container_of(kref,
- struct msm_vidc_inst, kref);
-
- msm_vidc_destroy(inst);
- }
-
struct msm_vidc_inst *inst = instance;
struct buffer_info *bi, *dummy;
int rc = 0;
diff --git a/drivers/media/platform/msm/vidc_3x/msm_vidc_common.c b/drivers/media/platform/msm/vidc_3x/msm_vidc_common.c
index d7e2154..c0efc3b 100644
--- a/drivers/media/platform/msm/vidc_3x/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc_3x/msm_vidc_common.c
@@ -727,15 +727,15 @@
}
+static void put_inst_helper(struct kref *kref)
+{
+ struct msm_vidc_inst *inst = container_of(kref,
+ struct msm_vidc_inst, kref);
+ msm_vidc_destroy(inst);
+}
+
void put_inst(struct msm_vidc_inst *inst)
{
- void put_inst_helper(struct kref *kref)
- {
- struct msm_vidc_inst *inst = container_of(kref,
- struct msm_vidc_inst, kref);
- msm_vidc_destroy(inst);
- }
-
if (!inst)
return;
diff --git a/drivers/media/platform/msm/vidc_3x/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc_3x/msm_vidc_res_parse.c
index f2c93cd..1f685ee 100644
--- a/drivers/media/platform/msm/vidc_3x/msm_vidc_res_parse.c
+++ b/drivers/media/platform/msm/vidc_3x/msm_vidc_res_parse.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -59,13 +59,13 @@
return 0;
}
+static bool is_compatible(char *compat)
+{
+ return !!of_find_compatible_node(NULL, NULL, compat);
+}
+
static inline enum imem_type read_imem_type(struct platform_device *pdev)
{
- bool is_compatible(char *compat)
- {
- return !!of_find_compatible_node(NULL, NULL, compat);
- }
-
return is_compatible("qcom,msm-ocmem") ? IMEM_OCMEM :
is_compatible("qcom,msm-vmem") ? IMEM_VMEM :
IMEM_NONE;
@@ -624,20 +624,20 @@
return rc;
}
+/* A comparator to compare loads (needed later on) */
+static int cmp(const void *a, const void *b)
+{
+ /* want to sort in reverse so flip the comparison */
+ return ((struct load_freq_table *)b)->load -
+ ((struct load_freq_table *)a)->load;
+}
+
static int msm_vidc_load_freq_table(struct msm_vidc_platform_resources *res)
{
int rc = 0;
int num_elements = 0;
struct platform_device *pdev = res->pdev;
- /* A comparator to compare loads (needed later on) */
- int cmp(const void *a, const void *b)
- {
- /* want to sort in reverse so flip the comparison */
- return ((struct load_freq_table *)b)->load -
- ((struct load_freq_table *)a)->load;
- }
-
if (!of_find_property(pdev->dev.of_node, "qcom,load-freq-tbl", NULL)) {
/* qcom,load-freq-tbl is an optional property. It likely won't
* be present on cores that we can't clock scale on.
diff --git a/drivers/media/platform/msm/vidc_3x/venus_hfi.c b/drivers/media/platform/msm/vidc_3x/venus_hfi.c
index 2d0e353..550e060 100644
--- a/drivers/media/platform/msm/vidc_3x/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc_3x/venus_hfi.c
@@ -2289,34 +2289,6 @@
return rc;
}
-static int __get_q_size(struct venus_hfi_device *dev, unsigned int q_index)
-{
- struct hfi_queue_header *queue;
- struct vidc_iface_q_info *q_info;
- u32 write_ptr, read_ptr;
-
- if (q_index >= VIDC_IFACEQ_NUMQ) {
- dprintk(VIDC_ERR, "Invalid q index: %d\n", q_index);
- return -ENOENT;
- }
-
- q_info = &dev->iface_queues[q_index];
- if (!q_info) {
- dprintk(VIDC_ERR, "cannot read shared Q's\n");
- return -ENOENT;
- }
-
- queue = (struct hfi_queue_header *)q_info->q_hdr;
- if (!queue) {
- dprintk(VIDC_ERR, "queue not present\n");
- return -ENOENT;
- }
-
- write_ptr = (u32)queue->qhdr_write_idx;
- read_ptr = (u32)queue->qhdr_read_idx;
- return read_ptr - write_ptr;
-}
-
static void __core_clear_interrupt(struct venus_hfi_device *device)
{
u32 intr_status = 0;
@@ -3605,8 +3577,7 @@
*session_id = session->session_id;
}
- if (packet_count >= max_packets &&
- __get_q_size(device, VIDC_IFACEQ_MSGQ_IDX)) {
+ if (packet_count >= max_packets) {
dprintk(VIDC_WARN,
"Too many packets in message queue to handle at once, deferring read\n");
break;
diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
index 4706628..10bccce 100644
--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
@@ -612,10 +612,9 @@
static int technisat_usb2_get_ir(struct dvb_usb_device *d)
{
struct technisat_usb2_state *state = d->priv;
- u8 *buf = state->buf;
- u8 *b;
- int ret;
struct ir_raw_event ev;
+ u8 *buf = state->buf;
+ int i, ret;
buf[0] = GET_IR_DATA_VENDOR_REQUEST;
buf[1] = 0x08;
@@ -651,26 +650,25 @@
return 0; /* no key pressed */
/* decoding */
- b = buf+1;
#if 0
deb_rc("RC: %d ", ret);
- debug_dump(b, ret, deb_rc);
+ debug_dump(buf + 1, ret, deb_rc);
#endif
ev.pulse = 0;
- while (1) {
- ev.pulse = !ev.pulse;
- ev.duration = (*b * FIRMWARE_CLOCK_DIVISOR * FIRMWARE_CLOCK_TICK) / 1000;
- ir_raw_event_store(d->rc_dev, &ev);
-
- b++;
- if (*b == 0xff) {
+ for (i = 1; i < ARRAY_SIZE(state->buf); i++) {
+ if (buf[i] == 0xff) {
ev.pulse = 0;
ev.duration = 888888*2;
ir_raw_event_store(d->rc_dev, &ev);
break;
}
+
+ ev.pulse = !ev.pulse;
+ ev.duration = (buf[i] * FIRMWARE_CLOCK_DIVISOR *
+ FIRMWARE_CLOCK_TICK) / 1000;
+ ir_raw_event_store(d->rc_dev, &ev);
}
ir_raw_event_handle(d->rc_dev);
diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c
index ee88ae8..185c807 100644
--- a/drivers/media/usb/tm6000/tm6000-dvb.c
+++ b/drivers/media/usb/tm6000/tm6000-dvb.c
@@ -111,6 +111,7 @@
printk(KERN_ERR "tm6000: error %s\n", __func__);
kfree(urb->transfer_buffer);
usb_free_urb(urb);
+ dev->dvb->bulk_urb = NULL;
}
}
}
@@ -141,6 +142,7 @@
dvb->bulk_urb->transfer_buffer = kzalloc(size, GFP_KERNEL);
if (dvb->bulk_urb->transfer_buffer == NULL) {
usb_free_urb(dvb->bulk_urb);
+ dvb->bulk_urb = NULL;
printk(KERN_ERR "tm6000: couldn't allocate transfer buffer!\n");
return -ENOMEM;
}
@@ -168,6 +170,7 @@
kfree(dvb->bulk_urb->transfer_buffer);
usb_free_urb(dvb->bulk_urb);
+ dvb->bulk_urb = NULL;
return ret;
}
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 8618eba..2e50546 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -30,6 +30,13 @@
get_user(__assign_tmp, from) || put_user(__assign_tmp, to); \
})
+#define convert_in_user(srcptr, dstptr) \
+({ \
+ typeof(*srcptr) val; \
+ \
+ get_user(val, srcptr) || put_user(val, dstptr); \
+})
+
static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
long ret = -ENOIOCTLCMD;
@@ -513,6 +520,15 @@
&up->timestamp.tv_usec))
return -EFAULT;
+ if (type == V4L2_BUF_TYPE_PRIVATE) {
+ compat_long_t tmp;
+
+ if (get_user(tmp, &up->m.userptr) ||
+ put_user((unsigned long) compat_ptr(tmp),
+ &kp->m.userptr))
+ return -EFAULT;
+ }
+
if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
u32 num_planes = length;
@@ -611,6 +627,10 @@
put_user(length, &up->length))
return -EFAULT;
+ if (type == V4L2_BUF_TYPE_PRIVATE)
+ if (convert_in_user(&kp->m.userptr, &up->m.userptr))
+ return -EFAULT;
+
if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
u32 num_planes = length;
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index de8aef0..9b98d97 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -1329,6 +1329,8 @@
descr = "Y/CbCr 4:2:0 TP10 UBWC"; break;
case V4L2_PIX_FMT_NV12_P010_UBWC:
descr = "Y/CbCr 4:2:0 P010 UBWC"; break;
+ case V4L2_PIX_FMT_NV12_512:
+ descr = "Y/CbCr 4:2:0 (512 align)"; break;
default:
/* Compressed formats */
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 67d8e33..e238cb4 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -1814,6 +1814,16 @@
if (ptr_svc->svc.listener_id == lstnr) {
ptr_svc->listener_in_use = true;
ptr_svc->rcv_req_flag = 1;
+ rc = msm_ion_do_cache_op(qseecom.ion_clnt,
+ ptr_svc->ihandle,
+ ptr_svc->sb_virt,
+ ptr_svc->sb_length,
+ ION_IOC_INV_CACHES);
+ if (rc) {
+ pr_err("cache opp failed %d\n", rc);
+ status = QSEOS_RESULT_FAILURE;
+ goto err_resp;
+ }
wake_up_interruptible(&ptr_svc->rcv_req_wq);
break;
}
@@ -2142,6 +2152,16 @@
if (ptr_svc->svc.listener_id == lstnr) {
ptr_svc->listener_in_use = true;
ptr_svc->rcv_req_flag = 1;
+ rc = msm_ion_do_cache_op(qseecom.ion_clnt,
+ ptr_svc->ihandle,
+ ptr_svc->sb_virt,
+ ptr_svc->sb_length,
+ ION_IOC_INV_CACHES);
+ if (rc) {
+ pr_err("cache opp failed %d\n", rc);
+ status = QSEOS_RESULT_FAILURE;
+ goto err_resp;
+ }
wake_up_interruptible(&ptr_svc->rcv_req_wq);
break;
}
diff --git a/drivers/mtd/nand/mtk_nand.c b/drivers/mtd/nand/mtk_nand.c
index 5223a21..ca95ae0 100644
--- a/drivers/mtd/nand/mtk_nand.c
+++ b/drivers/mtd/nand/mtk_nand.c
@@ -810,19 +810,21 @@
return ret & NAND_STATUS_FAIL ? -EIO : 0;
}
-static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors)
+static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 start,
+ u32 sectors)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct mtk_nfc *nfc = nand_get_controller_data(chip);
struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
struct mtk_ecc_stats stats;
+ u32 reg_size = mtk_nand->fdm.reg_size;
int rc, i;
rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE;
if (rc) {
memset(buf, 0xff, sectors * chip->ecc.size);
for (i = 0; i < sectors; i++)
- memset(oob_ptr(chip, i), 0xff, mtk_nand->fdm.reg_size);
+ memset(oob_ptr(chip, start + i), 0xff, reg_size);
return 0;
}
@@ -842,7 +844,7 @@
u32 spare = mtk_nand->spare_per_sector;
u32 column, sectors, start, end, reg;
dma_addr_t addr;
- int bitflips;
+ int bitflips = 0;
size_t len;
u8 *buf;
int rc;
@@ -910,14 +912,11 @@
if (rc < 0) {
dev_err(nfc->dev, "subpage done timeout\n");
bitflips = -EIO;
- } else {
- bitflips = 0;
- if (!raw) {
- rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
- bitflips = rc < 0 ? -ETIMEDOUT :
- mtk_nfc_update_ecc_stats(mtd, buf, sectors);
- mtk_nfc_read_fdm(chip, start, sectors);
- }
+ } else if (!raw) {
+ rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
+ bitflips = rc < 0 ? -ETIMEDOUT :
+ mtk_nfc_update_ecc_stats(mtd, buf, start, sectors);
+ mtk_nfc_read_fdm(chip, start, sectors);
}
dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index 93ceea4..ab0120c 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -834,9 +834,21 @@
int err = 0;
struct ubi_ainf_peb *aeb, *tmp_aeb;
- if (!list_empty(&ai->free)) {
- aeb = list_entry(ai->free.next, struct ubi_ainf_peb, u.list);
+ list_for_each_entry_safe(aeb, tmp_aeb, &ai->free, u.list) {
list_del(&aeb->u.list);
+ if (aeb->ec == UBI_UNKNOWN) {
+ ubi_err(ubi, "PEB %d in freelist has unknown EC",
+ aeb->pnum);
+ aeb->ec = ai->mean_ec;
+ }
+ err = early_erase_peb(ubi, ai, aeb->pnum, aeb->ec+1);
+ if (err) {
+ ubi_err(ubi, "Erase failed for PEB %d in freelist",
+ aeb->pnum);
+ list_add(&aeb->u.list, &ai->erase);
+ continue;
+ }
+ aeb->ec += 1;
dbg_bld("return free PEB %d, EC %d", aeb->pnum, aeb->ec);
return aeb;
}
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 541c179..8d28b22 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -125,6 +125,9 @@
static ssize_t dev_attribute_show(struct device *dev,
struct device_attribute *attr, char *buf);
+static ssize_t dev_attribute_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count);
/* UBI device attributes (correspond to files in '/<sysfs>/class/ubi/ubiX') */
static struct device_attribute dev_eraseblock_size =
@@ -151,6 +154,13 @@
__ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
static struct device_attribute dev_ro_mode =
__ATTR(ro_mode, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_mtd_trigger_scrub =
+ __ATTR(scrub_all, 0644,
+ dev_attribute_show, dev_attribute_store);
+static struct device_attribute dev_mtd_max_scrub_sqnum =
+ __ATTR(scrub_max_sqnum, 0444, dev_attribute_show, NULL);
+static struct device_attribute dev_mtd_min_scrub_sqnum =
+ __ATTR(scrub_min_sqnum, 0444, dev_attribute_show, NULL);
/**
* ubi_volume_notify - send a volume change notification.
@@ -343,6 +353,17 @@
return ubi_num;
}
+static unsigned long long get_max_sqnum(struct ubi_device *ubi)
+{
+ unsigned long long max_sqnum;
+
+ spin_lock(&ubi->ltree_lock);
+ max_sqnum = ubi->global_sqnum - 1;
+ spin_unlock(&ubi->ltree_lock);
+
+ return max_sqnum;
+}
+
/* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */
static ssize_t dev_attribute_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -389,6 +410,15 @@
ret = sprintf(buf, "%d\n", ubi->mtd->index);
else if (attr == &dev_ro_mode)
ret = sprintf(buf, "%d\n", ubi->ro_mode);
+ else if (attr == &dev_mtd_trigger_scrub)
+ ret = snprintf(buf, PAGE_SIZE, "%d\n",
+ atomic_read(&ubi->scrub_work_count));
+ else if (attr == &dev_mtd_max_scrub_sqnum)
+ ret = snprintf(buf, PAGE_SIZE, "%llu\n",
+ get_max_sqnum(ubi));
+ else if (attr == &dev_mtd_min_scrub_sqnum)
+ ret = snprintf(buf, PAGE_SIZE, "%llu\n",
+ ubi_wl_scrub_get_min_sqnum(ubi));
else
ret = -EINVAL;
@@ -409,10 +439,45 @@
&dev_bgt_enabled.attr,
&dev_mtd_num.attr,
&dev_ro_mode.attr,
+ &dev_mtd_trigger_scrub.attr,
+ &dev_mtd_max_scrub_sqnum.attr,
+ &dev_mtd_min_scrub_sqnum.attr,
NULL
};
ATTRIBUTE_GROUPS(ubi_dev);
+static ssize_t dev_attribute_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret = count;
+ struct ubi_device *ubi;
+ unsigned long long scrub_sqnum;
+
+ ubi = container_of(dev, struct ubi_device, dev);
+ ubi = ubi_get_device(ubi->ubi_num);
+ if (!ubi)
+ return -ENODEV;
+
+ if (attr == &dev_mtd_trigger_scrub) {
+ if (kstrtoull(buf, 10, &scrub_sqnum)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!ubi->lookuptbl) {
+ pr_err("lookuptbl is null");
+ goto out;
+ }
+ ret = ubi_wl_scrub_all(ubi, scrub_sqnum);
+ if (ret == 0)
+ ret = count;
+ }
+
+out:
+ ubi_put_device(ubi);
+ return ret;
+}
+
static void dev_release(struct device *dev)
{
struct ubi_device *ubi = container_of(dev, struct ubi_device, dev);
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index b6fb8f9..f036165 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -1105,6 +1105,16 @@
dbg_io("write VID header to PEB %d", pnum);
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+ /*
+ * Re-erase the PEB before using it. This should minimize any issues
+ * from decay of charge in this block.
+ */
+ if (ubi->wl_is_inited) {
+ err = ubi_wl_re_erase_peb(ubi, pnum);
+ if (err)
+ return err;
+ }
+
err = self_check_peb_ec_hdr(ubi, pnum);
if (err)
return err;
@@ -1123,6 +1133,8 @@
err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset,
ubi->vid_hdr_alsize);
+ if (!err && ubi->wl_is_inited)
+ ubi_wl_update_peb_sqnum(ubi, pnum, vid_hdr);
return err;
}
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 697dbcb..13efd64 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -183,6 +183,8 @@
* @u.list: link in the protection queue
* @ec: erase counter
* @pnum: physical eraseblock number
+ * @tagged_scrub_all: if the entry is tagged for scrub all
+ * @sqnum: The sequence number of the vol header.
*
* This data structure is used in the WL sub-system. Each physical eraseblock
* has a corresponding &struct wl_entry object which may be kept in different
@@ -195,6 +197,8 @@
} u;
int ec;
int pnum;
+ unsigned int tagged_scrub_all:1;
+ unsigned long long sqnum;
};
/**
@@ -624,6 +628,9 @@
struct task_struct *bgt_thread;
int thread_enabled;
char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2];
+ bool scrub_in_progress;
+ atomic_t scrub_work_count;
+ int wl_is_inited;
/* I/O sub-system's stuff */
long long flash_size;
@@ -919,6 +926,12 @@
int ubi_is_erase_work(struct ubi_work *wrk);
void ubi_refill_pools(struct ubi_device *ubi);
int ubi_ensure_anchor_pebs(struct ubi_device *ubi);
+ssize_t ubi_wl_scrub_all(struct ubi_device *ubi,
+ unsigned long long scrub_sqnum);
+void ubi_wl_update_peb_sqnum(struct ubi_device *ubi, int pnum,
+ struct ubi_vid_hdr *vid_hdr);
+unsigned long long ubi_wl_scrub_get_min_sqnum(struct ubi_device *ubi);
+int ubi_wl_re_erase_peb(struct ubi_device *ubi, int pnum);
/* io.c */
int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index a8f74d9..5c0332e 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -102,6 +102,7 @@
#include <linux/crc32.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
+#include <linux/delay.h>
#include "ubi.h"
#include "wl.h"
@@ -487,6 +488,42 @@
return err;
}
+int ubi_wl_re_erase_peb(struct ubi_device *ubi, int pnum)
+{
+ int err;
+ struct ubi_wl_entry *e;
+ struct ubi_ec_hdr *ec_hdr;
+
+ spin_lock(&ubi->wl_lock);
+ e = ubi->lookuptbl[pnum];
+ spin_unlock(&ubi->wl_lock);
+
+ dbg_wl("Re-erase PEB %d, EC %u", e->pnum, e->ec);
+
+ err = self_check_ec(ubi, e->pnum, e->ec);
+ if (err)
+ return -EINVAL;
+
+ ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
+ if (!ec_hdr)
+ return -ENOMEM;
+
+ err = ubi_io_sync_erase(ubi, e->pnum, 0);
+ if (err < 0)
+ goto out_free;
+
+ dbg_wl("re-erased PEB %d, EC %u", e->pnum, e->ec);
+
+ ec_hdr->ec = cpu_to_be64(e->ec);
+
+ err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
+ if (err)
+ goto out_free;
+out_free:
+ kfree(ec_hdr);
+ return err;
+}
+
/**
* serve_prot_queue - check if it is time to stop protecting PEBs.
* @ubi: UBI device description object
@@ -862,9 +899,20 @@
}
/* The PEB has been successfully moved */
- if (scrubbing)
- ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
- e1->pnum, vol_id, lnum, e2->pnum);
+ if (scrubbing) {
+ spin_lock(&ubi->wl_lock);
+ if (e1->tagged_scrub_all) {
+ WARN_ON(atomic_read(&ubi->scrub_work_count) <= 0);
+ atomic_dec(&ubi->scrub_work_count);
+ e1->tagged_scrub_all = 0;
+ e2->tagged_scrub_all = 0;
+ } else {
+ ubi_msg(ubi,
+ "scrubbed PEB %d (LEB %d:%d),data moved to PEB %d",
+ e1->pnum, vol_id, lnum, e2->pnum);
+ }
+ spin_unlock(&ubi->wl_lock);
+ }
ubi_free_vid_buf(vidb);
spin_lock(&ubi->wl_lock);
@@ -1226,6 +1274,7 @@
retry:
spin_lock(&ubi->wl_lock);
e = ubi->lookuptbl[pnum];
+ e->sqnum = UBI_UNKNOWN;
if (e == ubi->move_from) {
/*
* User is putting the physical eraseblock which was selected to
@@ -1262,6 +1311,20 @@
} else if (in_wl_tree(e, &ubi->scrub)) {
self_check_in_wl_tree(ubi, e, &ubi->scrub);
rb_erase(&e->u.rb, &ubi->scrub);
+
+ /*
+ * Since this PEB has been put we dont need to worry
+ * about it anymore
+ */
+ if (e->tagged_scrub_all) {
+ int wrk_count;
+
+ wrk_count = atomic_read(&ubi->scrub_work_count);
+ WARN_ON(wrk_count <= 0);
+
+ atomic_dec(&ubi->scrub_work_count);
+ e->tagged_scrub_all = 0;
+ }
} else if (in_wl_tree(e, &ubi->erroneous)) {
self_check_in_wl_tree(ubi, e, &ubi->erroneous);
rb_erase(&e->u.rb, &ubi->erroneous);
@@ -1294,6 +1357,197 @@
}
/**
+ * ubi_wl_scrub_get_min_sqnum - Return the minimum sqnum of the used/pq/scrub.
+ * @ubi: UBI device description object
+ *
+ * This function returns the minimum sqnum of the PEB that are currently in use.
+ *
+ * Return the min sqnum if there are any used PEB's otherwise return ~(0)
+ *
+ */
+unsigned long long ubi_wl_scrub_get_min_sqnum(struct ubi_device *ubi)
+{
+ int i;
+ struct ubi_wl_entry *e, *tmp;
+ struct rb_node *node;
+ unsigned long long min_sqnum = ~0;
+
+ spin_lock(&ubi->wl_lock);
+
+ /* Go through the pq list */
+ for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
+ list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
+ if (e->sqnum < min_sqnum)
+ min_sqnum = e->sqnum;
+ }
+ }
+
+ /* Go through used PEB tree */
+ for (node = rb_first(&ubi->used); node; node = rb_next(node)) {
+ e = rb_entry(node, struct ubi_wl_entry, u.rb);
+ self_check_in_wl_tree(ubi, e, &ubi->used);
+ if (e->sqnum < min_sqnum)
+ min_sqnum = e->sqnum;
+ }
+ /* Go through scrub PEB tree */
+ for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) {
+ e = rb_entry(node, struct ubi_wl_entry, u.rb);
+ self_check_in_wl_tree(ubi, e, &ubi->scrub);
+ if (e->sqnum < min_sqnum)
+ min_sqnum = e->sqnum;
+ }
+ spin_unlock(&ubi->wl_lock);
+ return min_sqnum;
+}
+
+/**
+ * ubi_wl_update_peb_sqnum - Update the vol hdr sqnum of the PEB.
+ * @pnum: The PEB number.
+ * @vid_hdr: The vol hdr being written to the PEB.
+ *
+ */
+void ubi_wl_update_peb_sqnum(struct ubi_device *ubi, int pnum,
+ struct ubi_vid_hdr *vid_hdr)
+{
+ struct ubi_wl_entry *e;
+
+ spin_lock(&ubi->wl_lock);
+ e = ubi->lookuptbl[pnum];
+ e->sqnum = be64_to_cpu(vid_hdr->sqnum);
+ e->tagged_scrub_all = 0;
+ spin_unlock(&ubi->wl_lock);
+}
+
+static int is_ubi_readonly(struct ubi_device *ubi)
+{
+ int is_readonly = 0;
+
+ spin_lock(&ubi->wl_lock);
+ if (ubi->ro_mode || !ubi->thread_enabled ||
+ ubi_dbg_is_bgt_disabled(ubi))
+ is_readonly = 1;
+ spin_unlock(&ubi->wl_lock);
+
+ return is_readonly;
+}
+
+/**
+ * ubi_wl_scan_all - Scan all PEB's
+ * @ubi: UBI device description object
+ * @scrub_sqnum: The max seqnum of the PEB to scrub from the used/pq lists
+ *
+ * This function schedules all device PEBs for scrubbing if the sqnum of the
+ * vol hdr is less than the sqnum in the trigger.
+ *
+ * Return 0 in case of success, (negative) error code otherwise
+ *
+ */
+ssize_t ubi_wl_scrub_all(struct ubi_device *ubi, unsigned long long scrub_sqnum)
+{
+ struct rb_node *node;
+ struct ubi_wl_entry *e, *tmp;
+ int scrub_count = 0;
+ int total_scrub_count = 0;
+ int err, i;
+
+ if (!ubi->lookuptbl) {
+ ubi_err(ubi, "lookuptbl is null");
+ return -ENOENT;
+ }
+
+ if (is_ubi_readonly(ubi)) {
+ ubi_err(ubi, "Cannot *Initiate* scrub:background thread disabled or readonly!");
+ return -EROFS;
+ }
+
+ /* Wait for all currently running work to be done! */
+ down_write(&ubi->work_sem);
+ spin_lock(&ubi->wl_lock);
+ ubi_msg(ubi, "Scrub triggered sqnum = %llu!", scrub_sqnum);
+
+ if (ubi->scrub_in_progress) {
+ ubi_err(ubi, "Scrub already in progress, ignoring the trigger");
+ spin_unlock(&ubi->wl_lock);
+ up_write(&ubi->work_sem); /* Allow new work to start. */
+ return -EBUSY;
+ }
+ ubi->scrub_in_progress = true;
+
+ /* Go through scrub PEB tree and count pending */
+ for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) {
+ e = rb_entry(node, struct ubi_wl_entry, u.rb);
+ self_check_in_wl_tree(ubi, e, &ubi->scrub);
+ e->tagged_scrub_all = 1;
+ total_scrub_count++;
+ }
+
+ /* Move all used pebs to scrub tree */
+ node = rb_first(&ubi->used);
+ while (node != NULL) {
+ e = rb_entry(node, struct ubi_wl_entry, u.rb);
+ self_check_in_wl_tree(ubi, e, &ubi->used);
+ node = rb_next(node);
+
+ if (e->sqnum > scrub_sqnum)
+ continue;
+ rb_erase(&e->u.rb, &ubi->used);
+ wl_tree_add(e, &ubi->scrub);
+ e->tagged_scrub_all = 1;
+ scrub_count++;
+ total_scrub_count++;
+ }
+
+ /* Move all protected pebs to scrub tree */
+ for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
+ list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
+
+ if (e->sqnum > scrub_sqnum)
+ continue;
+
+ list_del(&e->u.list);
+ wl_tree_add(e, &ubi->scrub);
+ e->tagged_scrub_all = 1;
+ scrub_count++;
+ total_scrub_count++;
+ }
+ }
+
+ atomic_set(&ubi->scrub_work_count, total_scrub_count);
+ spin_unlock(&ubi->wl_lock);
+ up_write(&ubi->work_sem); /* Allow new work to start. */
+
+ /*
+ * Technically scrubbing is the same as wear-leveling, so it is done
+ * by the WL worker.
+ */
+ err = ensure_wear_leveling(ubi, 0);
+ if (err) {
+ ubi_err(ubi, "Failed to start the WL worker err =%d", err);
+ return err;
+ }
+ ubi_msg(ubi, "Scheduled %d PEB's for scrubbing!", scrub_count);
+ ubi_msg(ubi, "Total PEB's for scrub = %d", total_scrub_count);
+
+ /* Wait for scrub to finish */
+ while (atomic_read(&ubi->scrub_work_count) > 0) {
+ /* Poll every second to check if the scrub work is done */
+ msleep(1000);
+
+ if (is_ubi_readonly(ubi)) {
+ ubi_err(ubi, "Cannot *Complete* scrub:background thread disabled or readonly!");
+ return -EROFS;
+ }
+ wake_up_process(ubi->bgt_thread);
+ }
+
+ spin_lock(&ubi->wl_lock);
+ ubi->scrub_in_progress = false;
+ spin_unlock(&ubi->wl_lock);
+ ubi_msg(ubi, "Done scrubbing %d PEB's!", scrub_count);
+ return 0;
+}
+
+/**
* ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
* @ubi: UBI device description object
* @pnum: the physical eraseblock to schedule
@@ -1622,6 +1876,8 @@
e->pnum = aeb->pnum;
e->ec = aeb->ec;
+ e->tagged_scrub_all = 0;
+ e->sqnum = aeb->sqnum;
ubi_assert(e->ec >= 0);
wl_tree_add(e, &ubi->free);
@@ -1644,6 +1900,8 @@
e->pnum = aeb->pnum;
e->ec = aeb->ec;
+ e->tagged_scrub_all = 0;
+ e->sqnum = aeb->sqnum;
ubi->lookuptbl[e->pnum] = e;
if (!aeb->scrub) {
@@ -1724,6 +1982,7 @@
if (err)
goto out_free;
+ ubi->wl_is_inited = 1;
return 0;
out_free:
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 59dbecd..49f6929 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4946,6 +4946,13 @@
DMI_MATCH(DMI_BOARD_NAME, "P6T"),
},
},
+ {
+ .ident = "ASUS P6X",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "P6X"),
+ },
+ },
{}
};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index a769196..708117f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -958,7 +958,7 @@
&drv_version);
if (rc) {
DP_NOTICE(cdev, "Failed sending drv version command\n");
- return rc;
+ goto err4;
}
}
@@ -966,6 +966,8 @@
return 0;
+err4:
+ qed_ll2_dealloc_if(cdev);
err3:
qed_hw_stop(cdev);
err2:
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index c2bd537..3527962 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -792,15 +792,16 @@
printk(KERN_ERR "Sgiseeq: Cannot register net device, "
"aborting.\n");
err = -ENODEV;
- goto err_out_free_page;
+ goto err_out_free_attrs;
}
printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr);
return 0;
-err_out_free_page:
- free_page((unsigned long) sp->srings);
+err_out_free_attrs:
+ dma_free_attrs(&pdev->dev, sizeof(*sp->srings), sp->srings,
+ sp->srings_dma, DMA_ATTR_NON_CONSISTENT);
err_out_free_dev:
free_netdev(dev);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index e33a45e..134c59c 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -627,7 +627,8 @@
module_put(THIS_MODULE);
}
-static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filter)
+static int tun_attach(struct tun_struct *tun, struct file *file,
+ bool skip_filter, bool publish_tun)
{
struct tun_file *tfile = file->private_data;
struct net_device *dev = tun->dev;
@@ -669,7 +670,8 @@
tfile->queue_index = tun->numqueues;
tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
- rcu_assign_pointer(tfile->tun, tun);
+ if (publish_tun)
+ rcu_assign_pointer(tfile->tun, tun);
rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
tun->numqueues++;
@@ -1755,7 +1757,7 @@
if (err < 0)
return err;
- err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER);
+ err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, true);
if (err < 0)
return err;
@@ -1843,13 +1845,17 @@
NETIF_F_HW_VLAN_STAG_TX);
INIT_LIST_HEAD(&tun->disabled);
- err = tun_attach(tun, file, false);
+ err = tun_attach(tun, file, false, false);
if (err < 0)
goto err_free_flow;
err = register_netdevice(tun->dev);
if (err < 0)
goto err_detach;
+ /* free_netdev() won't check refcnt, to aovid race
+ * with dev_put() we need publish tun after registration.
+ */
+ rcu_assign_pointer(tfile->tun, tun);
}
netif_carrier_on(tun->dev);
@@ -1993,7 +1999,7 @@
ret = security_tun_dev_attach_queue(tun->security);
if (ret < 0)
goto unlock;
- ret = tun_attach(tun, file, false);
+ ret = tun_attach(tun, file, false, true);
} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
tun = rtnl_dereference(tfile->tun);
if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 99424c8..8f03cc5 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -212,9 +212,16 @@
goto bad_desc;
}
skip:
- if ( rndis &&
- header.usb_cdc_acm_descriptor &&
- header.usb_cdc_acm_descriptor->bmCapabilities) {
+ /* Communcation class functions with bmCapabilities are not
+ * RNDIS. But some Wireless class RNDIS functions use
+ * bmCapabilities for their own purpose. The failsafe is
+ * therefore applied only to Communication class RNDIS
+ * functions. The rndis test is redundant, but a cheap
+ * optimization.
+ */
+ if (rndis && is_rndis(&intf->cur_altsetting->desc) &&
+ header.usb_cdc_acm_descriptor &&
+ header.usb_cdc_acm_descriptor->bmCapabilities) {
dev_dbg(&intf->dev,
"ACM capabilities %02x, not really RNDIS?\n",
header.usb_cdc_acm_descriptor->bmCapabilities);
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 57b47b6..b9ba8bb 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -868,12 +868,13 @@
ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
if (ret == 0) {
- if (sig == OTP_INDICATOR_1)
- offset = offset;
- else if (sig == OTP_INDICATOR_2)
- offset += 0x100;
- else
- ret = -EINVAL;
+ if (sig != OTP_INDICATOR_1) {
+ if (sig == OTP_INDICATOR_2)
+ offset += 0x100;
+ else
+ ret = -EINVAL;
+ }
+
if (!ret)
ret = lan78xx_read_raw_otp(dev, offset, length, data);
}
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 02e2956..15dc70c 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -689,8 +689,11 @@
ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0),
RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
value, index, tmp, size, 500);
+ if (ret < 0)
+ memset(data, 0xff, size);
+ else
+ memcpy(data, tmp, size);
- memcpy(data, tmp, size);
kfree(tmp);
return ret;
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index 5e66b6e..22b7fec 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -1003,14 +1003,16 @@
}
if (pci_priv->pci_link_state == PCI_LINK_UP) {
- ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_SUSPEND);
- if (ret) {
- if (driver_ops && driver_ops->resume)
- driver_ops->resume(pci_dev);
- ret = -EAGAIN;
- goto out;
+ if (pci_priv->device_id != QCA6174_DEVICE_ID) {
+ ret = cnss_pci_set_mhi_state(pci_priv,
+ CNSS_MHI_SUSPEND);
+ if (ret) {
+ if (driver_ops && driver_ops->resume)
+ driver_ops->resume(pci_dev);
+ ret = -EAGAIN;
+ goto out;
+ }
}
-
cnss_set_pci_config_space(pci_priv,
SAVE_PCI_CONFIG_SPACE);
pci_disable_device(pci_dev);
@@ -1103,16 +1105,15 @@
if (!pci_priv)
goto out;
- ret = cnss_set_pci_link(pci_priv, PCI_LINK_UP);
- if (ret)
- goto out;
- pci_priv->pci_link_state = PCI_LINK_UP;
-
driver_ops = pci_priv->driver_ops;
if (driver_ops && driver_ops->resume_noirq &&
- !pci_priv->pci_link_down_ind)
+ !pci_priv->pci_link_down_ind) {
+ ret = cnss_set_pci_link(pci_priv, PCI_LINK_UP);
+ if (ret)
+ goto out;
+ pci_priv->pci_link_state = PCI_LINK_UP;
ret = driver_ops->resume_noirq(pci_dev);
-
+ }
out:
return ret;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c
index 0f977dc..c67e08f 100644
--- a/drivers/net/wireless/marvell/mwifiex/ie.c
+++ b/drivers/net/wireless/marvell/mwifiex/ie.c
@@ -240,6 +240,9 @@
}
vs_ie = (struct ieee_types_header *)vendor_ie;
+ if (le16_to_cpu(ie->ie_length) + vs_ie->len + 2 >
+ IEEE_MAX_IE_SIZE)
+ return -EINVAL;
memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length),
vs_ie, vs_ie->len + 2);
le16_add_cpu(&ie->ie_length, vs_ie->len + 2);
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
index a7e9f54..f2ef146 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
@@ -287,6 +287,8 @@
rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len);
if (rate_ie) {
+ if (rate_ie->len > MWIFIEX_SUPPORTED_RATES)
+ return;
memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len);
rate_len = rate_ie->len;
}
@@ -294,8 +296,11 @@
rate_ie = (void *)cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
params->beacon.tail,
params->beacon.tail_len);
- if (rate_ie)
+ if (rate_ie) {
+ if (rate_ie->len > MWIFIEX_SUPPORTED_RATES - rate_len)
+ return;
memcpy(bss_cfg->rates + rate_len, rate_ie + 1, rate_ie->len);
+ }
return;
}
@@ -413,6 +418,8 @@
params->beacon.tail_len);
if (vendor_ie) {
wmm_ie = (struct ieee_types_header *)vendor_ie;
+ if (*(vendor_ie + 1) > sizeof(struct mwifiex_types_wmm_info))
+ return;
memcpy(&bss_cfg->wmm_info, wmm_ie + 1,
sizeof(bss_cfg->wmm_info));
priv->wmm_enabled = 1;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 14ceeaa..c31c564 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -907,7 +907,7 @@
__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
}
if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
- queue->rx.rsp_cons = ++cons;
+ queue->rx.rsp_cons = ++cons + skb_queue_len(list);
kfree_skb(nskb);
return ~0U;
}
diff --git a/drivers/nfc/nq-nci.c b/drivers/nfc/nq-nci.c
index e7a3946..bae7f5b 100644
--- a/drivers/nfc/nq-nci.c
+++ b/drivers/nfc/nq-nci.c
@@ -746,10 +746,18 @@
nci_get_version_rsp[3];
nqx_dev->nqx_info.info.rom_version =
nci_get_version_rsp[4];
- nqx_dev->nqx_info.info.fw_minor =
- nci_get_version_rsp[10];
- nqx_dev->nqx_info.info.fw_major =
- nci_get_version_rsp[11];
+ if ((nci_get_version_rsp[3] == NFCC_SN100_A)
+ || (nci_get_version_rsp[3] == NFCC_SN100_B)) {
+ nqx_dev->nqx_info.info.fw_minor =
+ nci_get_version_rsp[6];
+ nqx_dev->nqx_info.info.fw_major =
+ nci_get_version_rsp[7];
+ } else {
+ nqx_dev->nqx_info.info.fw_minor =
+ nci_get_version_rsp[10];
+ nqx_dev->nqx_info.info.fw_major =
+ nci_get_version_rsp[11];
+ }
}
goto err_nfcc_reset_failed;
}
@@ -1229,7 +1237,7 @@
gpio_free(platform_data->clkreq_gpio);
err_ese_gpio:
/* optional gpio, not sure was configured in probe */
- if (nqx_dev->ese_gpio > 0)
+ if (gpio_is_valid(platform_data->ese_gpio))
gpio_free(platform_data->ese_gpio);
err_firm_gpio:
gpio_free(platform_data->firm_gpio);
diff --git a/drivers/nvmem/nvmem-sysfs.c b/drivers/nvmem/nvmem-sysfs.c
old mode 100644
new mode 100755
index cb1fbaf..5628820b
--- a/drivers/nvmem/nvmem-sysfs.c
+++ b/drivers/nvmem/nvmem-sysfs.c
@@ -198,10 +198,17 @@
if (!config->base_dev)
return -EINVAL;
- if (nvmem->read_only)
- nvmem->eeprom = bin_attr_ro_root_nvmem;
- else
- nvmem->eeprom = bin_attr_rw_root_nvmem;
+ if (nvmem->read_only) {
+ if (config->root_only)
+ nvmem->eeprom = bin_attr_ro_root_nvmem;
+ else
+ nvmem->eeprom = bin_attr_ro_nvmem;
+ } else {
+ if (config->root_only)
+ nvmem->eeprom = bin_attr_rw_root_nvmem;
+ else
+ nvmem->eeprom = bin_attr_rw_nvmem;
+ }
nvmem->eeprom.attr.name = "eeprom";
nvmem->eeprom.size = nvmem->size;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c
index b8a8a92..c0da01f 100644
--- a/drivers/pci/host/pci-msm.c
+++ b/drivers/pci/host/pci-msm.c
@@ -1332,6 +1332,7 @@
static void __iomem *loopback_lbar_vir;
int ret, i;
u32 base_sel_size = 0;
+ u32 wr_ofst = 0;
switch (testcase) {
case MSM_PCIE_OUTPUT_PCIE_INFO:
@@ -1563,22 +1564,24 @@
break;
}
+ wr_ofst = wr_offset;
+
PCIE_DBG_FS(dev,
"base: %s: 0x%pK\nwr_offset: 0x%x\nwr_mask: 0x%x\nwr_value: 0x%x\n",
dev->res[base_sel - 1].name,
dev->res[base_sel - 1].base,
- wr_offset, wr_mask, wr_value);
+ wr_ofst, wr_mask, wr_value);
base_sel_size = resource_size(dev->res[base_sel - 1].resource);
- if (wr_offset > base_sel_size - 4 ||
- msm_pcie_check_align(dev, wr_offset))
+ if (wr_ofst > base_sel_size - 4 ||
+ msm_pcie_check_align(dev, wr_ofst))
PCIE_DBG_FS(dev,
"PCIe: RC%d: Invalid wr_offset: 0x%x. wr_offset should be no more than 0x%x\n",
- dev->rc_idx, wr_offset, base_sel_size - 4);
+ dev->rc_idx, wr_ofst, base_sel_size - 4);
else
msm_pcie_write_reg_field(dev->res[base_sel - 1].base,
- wr_offset, wr_mask, wr_value);
+ wr_ofst, wr_mask, wr_value);
break;
case MSM_PCIE_DUMP_PCIE_REGISTER_SPACE:
@@ -5545,7 +5548,7 @@
{
bool child_l0s_enable = 0, child_l1_enable = 0, child_l1ss_enable = 0;
- if (!pdev->subordinate || !(&pdev->subordinate->devices)) {
+ if (!pdev->subordinate || list_empty(&pdev->subordinate->devices)) {
PCIE_DBG(dev,
"PCIe: RC%d: no device connected to root complex\n",
dev->rc_idx);
diff --git a/drivers/phy/phy-qcom-ufs.c b/drivers/phy/phy-qcom-ufs.c
index d18929f..fb1cd33 100644
--- a/drivers/phy/phy-qcom-ufs.c
+++ b/drivers/phy/phy-qcom-ufs.c
@@ -496,9 +496,25 @@
struct device *dev = ufs_qcom_phy->dev;
int ret = 0;
- if (!vreg || !vreg->enabled || vreg->is_always_on)
+ if (!vreg || !vreg->enabled)
goto out;
+ if (vreg->is_always_on) {
+ /* voting 0 uA load will keep regulator in LPM mode */
+ ret = regulator_set_load(vreg->reg, 0);
+ if (ret >= 0) {
+ /*
+ * regulator_set_load() returns new regulator
+ * mode upon success
+ */
+ ret = 0;
+ } else {
+ dev_err(dev, "%s: %s set optimum mode(uA_load=0) failed, err=%d\n",
+ __func__, vreg->name, ret);
+ }
+ goto out;
+ }
+
ret = regulator_disable(vreg->reg);
if (!ret) {
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index a9040a6..fce4132 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -691,7 +691,7 @@
/* send ipa_fltr_installed_notif_req_msg_v01 to Q6*/
req->source_pipe_index =
ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
- req->install_status = QMI_RESULT_SUCCESS_V01;
+ req->install_status = IPA_QMI_RESULT_SUCCESS_V01;
req->filter_index_list_len = num_q6_rule;
mutex_lock(&ipa_qmi_lock);
for (i = 0; i < num_q6_rule; i++) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 9f98ceb..2564cd5 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -2061,8 +2061,11 @@
case IPA_IOC_GET_PHERIPHERAL_EP_INFO:
IPADBG("Got IPA_IOC_GET_EP_INFO\n");
- if (ipa3_ctx->ipa_config_is_auto == false)
- return -ENOTTY;
+ if (ipa3_ctx->ipa_config_is_auto == false) {
+ IPADBG("not an auto config: returning error\n");
+ retval = -ENOTTY;
+ break;
+ }
if (copy_from_user(&ep_info, (const void __user *)arg,
sizeof(struct ipa_ioc_get_ep_info))) {
IPAERR_RL("copy_from_user fails\n");
@@ -2786,7 +2789,7 @@
/* Set the exception path to AP */
for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
ep_idx = ipa3_get_ep_mapping(client_idx);
- if (ep_idx == -1)
+ if (ep_idx == -1 || (ep_idx >= IPA3_MAX_NUM_PIPES))
continue;
/* disable statuses for all modem controlled prod pipes */
@@ -5140,10 +5143,6 @@
IPADBG("user input string %s\n", dbg_buff);
- /* Prevent consequent calls from trying to load the FW again. */
- if (ipa3_is_ready())
- return count;
-
/* Check MHI configuration on MDM devices */
if (!ipa3_is_msm_device()) {
@@ -5183,6 +5182,10 @@
}
}
+ /* Prevent consequent calls from trying to load the FW again. */
+ if (ipa3_is_ready())
+ return count;
+
/* Prevent multiple calls from trying to load the FW again. */
if (ipa3_ctx->fw_loaded) {
IPAERR("not load FW again\n");
@@ -5380,6 +5383,7 @@
ipa3_ctx->modem_cfg_emb_pipe_flt = resource_p->modem_cfg_emb_pipe_flt;
ipa3_ctx->ipa_wdi2 = resource_p->ipa_wdi2;
ipa3_ctx->ipa_config_is_auto = resource_p->ipa_config_is_auto;
+ ipa3_ctx->use_xbl_boot = resource_p->use_xbl_boot;
ipa3_ctx->use_64_bit_dma_mask = resource_p->use_64_bit_dma_mask;
ipa3_ctx->wan_rx_ring_size = resource_p->wan_rx_ring_size;
ipa3_ctx->lan_rx_ring_size = resource_p->lan_rx_ring_size;
@@ -5952,6 +5956,7 @@
ipa_drv_res->modem_cfg_emb_pipe_flt = false;
ipa_drv_res->ipa_wdi2 = false;
ipa_drv_res->ipa_config_is_auto = false;
+ ipa_drv_res->use_xbl_boot = false;
ipa_drv_res->ipa_mhi_dynamic_config = false;
ipa_drv_res->use_64_bit_dma_mask = false;
ipa_drv_res->use_bw_vote = false;
@@ -6043,6 +6048,12 @@
ipa_drv_res->ipa_config_is_auto
? "True" : "False");
+ ipa_drv_res->use_xbl_boot =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,use-xbl-boot");
+ IPADBG("Is xbl loading used ? (%s)\n",
+ ipa_drv_res->use_xbl_boot ? "Yes":"No");
+
ipa_drv_res->use_64_bit_dma_mask =
of_property_read_bool(pdev->dev.of_node,
"qcom,use-64-bit-dma-mask");
@@ -6782,6 +6793,32 @@
cb->dev = dev;
smmu_info.present[IPA_SMMU_CB_UC] = true;
+ if (ipa3_ctx->use_xbl_boot) {
+ /* Ensure uC probe is the last. */
+ if (!smmu_info.present[IPA_SMMU_CB_AP] ||
+ !smmu_info.present[IPA_SMMU_CB_WLAN]) {
+ IPAERR("AP or WLAN CB probe not done. Defer");
+ return -EPROBE_DEFER;
+ }
+
+ pr_info("Using XBL boot load for IPA FW\n");
+ ipa3_ctx->fw_loaded = true;
+
+ result = ipa3_attach_to_smmu();
+ if (result) {
+ IPAERR("IPA attach to smmu failed %d\n",
+ result);
+ return result;
+ }
+
+ result = ipa3_post_init(&ipa3_res, ipa3_ctx->cdev.dev);
+ if (result) {
+ IPAERR("IPA post init failed %d\n", result);
+ return result;
+ }
+ }
+
+
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index b05ad08..50dc080 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -84,6 +84,7 @@
__stringify(IPA_HDR_L2_NONE),
__stringify(IPA_HDR_L2_ETHERNET_II),
__stringify(IPA_HDR_L2_802_3),
+ __stringify(IPA_HDR_L2_802_1Q),
};
const char *ipa3_hdr_proc_type_name[] = {
@@ -94,6 +95,7 @@
__stringify(IPA_HDR_PROC_802_3_TO_802_3),
__stringify(IPA_HDR_PROC_L2TP_HEADER_ADD),
__stringify(IPA_HDR_PROC_L2TP_HEADER_REMOVE),
+ __stringify(IPA_HDR_PROC_ETHII_TO_ETHII_EX),
};
static struct dentry *dent;
@@ -515,13 +517,15 @@
pr_err("frg ");
if ((attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) ||
- (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3)) {
+ (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) ||
+ (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_1Q)) {
pr_err("src_mac_addr:%pM ", attrib->src_mac_addr);
}
if ((attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) ||
(attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) ||
- (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP)) {
+ (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP) ||
+ (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_1Q)) {
pr_err("dst_mac_addr:%pM ", attrib->dst_mac_addr);
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
index 4eb8edb..e416456 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2019 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -96,7 +96,8 @@
entry->hdr->phys_base,
hdr_base_addr,
entry->hdr->offset_entry,
- entry->l2tp_params);
+ &entry->l2tp_params,
+ &entry->generic_params);
if (ret)
return ret;
}
@@ -117,6 +118,7 @@
struct ipa_mem_buffer *mem, struct ipa_mem_buffer *aligned_mem)
{
u32 hdr_base_addr;
+ gfp_t flag = GFP_KERNEL;
mem->size = (ipa3_ctx->hdr_proc_ctx_tbl.end) ? : 4;
@@ -125,9 +127,14 @@
IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_proc_ctx_tbl.end);
+alloc:
mem->base = dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
- &mem->phys_base, GFP_KERNEL);
+ &mem->phys_base, flag);
if (!mem->base) {
+ if (flag == GFP_KERNEL) {
+ flag = GFP_ATOMIC;
+ goto alloc;
+ }
IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
return -ENOMEM;
}
@@ -360,24 +367,28 @@
entry->type = proc_ctx->type;
entry->hdr = hdr_entry;
entry->l2tp_params = proc_ctx->l2tp_params;
+ entry->generic_params = proc_ctx->generic_params;
if (add_ref_hdr)
hdr_entry->ref_cnt++;
entry->cookie = IPA_PROC_HDR_COOKIE;
entry->ipacm_installed = user_only;
needed_len = ipahal_get_proc_ctx_needed_len(proc_ctx->type);
-
- if (needed_len <= ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN0]) {
- bin = IPA_HDR_PROC_CTX_BIN0;
- } else if (needed_len <=
- ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN1]) {
- bin = IPA_HDR_PROC_CTX_BIN1;
- } else {
+ if ((needed_len < 0) ||
+ ((needed_len > ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN0])
+ &&
+ (needed_len >
+ ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN1]))) {
IPAERR_RL("unexpected needed len %d\n", needed_len);
WARN_ON_RATELIMIT_IPA(1);
goto bad_len;
}
+ if (needed_len <= ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN0])
+ bin = IPA_HDR_PROC_CTX_BIN0;
+ else
+ bin = IPA_HDR_PROC_CTX_BIN1;
+
mem_size = (ipa3_ctx->hdr_proc_ctx_tbl_lcl) ?
IPA_MEM_PART(apps_hdr_proc_ctx_size) :
IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
@@ -867,7 +878,7 @@
for (i = 0; i < proc_ctxs->num_proc_ctxs; i++) {
if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i],
true, user_only)) {
- IPAERR_RL("failed to add hdr pric ctx %d\n", i);
+ IPAERR_RL("failed to add hdr proc ctx %d\n", i);
proc_ctxs->proc_ctx[i].status = -1;
} else {
proc_ctxs->proc_ctx[i].status = 0;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 1b1f047..c876938 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -555,6 +555,7 @@
* @link: entry's link in global header table entries list
* @type: header processing context type
* @l2tp_params: L2TP parameters
+ * @generic_params: generic proc_ctx params
* @offset_entry: entry's offset
* @hdr: the header
* @cookie: cookie used for validity check
@@ -568,6 +569,7 @@
u32 cookie;
enum ipa_hdr_proc_type type;
struct ipa_l2tp_hdr_proc_ctx_params l2tp_params;
+ struct ipa_eth_II_to_eth_II_ex_procparams generic_params;
struct ipa3_hdr_proc_ctx_offset_entry *offset_entry;
struct ipa3_hdr_entry *hdr;
u32 ref_cnt;
@@ -1447,6 +1449,7 @@
* @logbuf_low: ipc log buffer for low priority messages
* @ipa_wdi2: using wdi-2.0
* @ipa_config_is_auto: is this AUTO use case
+ * @use_xbl_boot: use xbl loading for IPA FW
* @use_64_bit_dma_mask: using 64bits dma mask
* @ipa_bus_hdl: msm driver handle for the data path bus
* @ctrl: holds the core specific operations based on
@@ -1549,6 +1552,7 @@
bool modem_cfg_emb_pipe_flt;
bool ipa_wdi2;
bool ipa_config_is_auto;
+ bool use_xbl_boot;
bool use_64_bit_dma_mask;
/* featurize if memory footprint becomes a concern */
struct ipa3_stats stats;
@@ -1631,6 +1635,7 @@
bool modem_cfg_emb_pipe_flt;
bool ipa_wdi2;
bool ipa_config_is_auto;
+ bool use_xbl_boot;
bool use_64_bit_dma_mask;
bool use_bw_vote;
u32 wan_rx_ring_size;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 4d06df6..5df2b90 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -46,7 +46,7 @@
#define IPA_V4_0_CLK_RATE_NOMINAL (220 * 1000 * 1000UL)
#define IPA_V4_0_CLK_RATE_TURBO (250 * 1000 * 1000UL)
-#define IPA_V3_0_MAX_HOLB_TMR_VAL (4294967296 - 1)
+#define IPA_MAX_HOLB_TMR_VAL (4294967296 - 1)
#define IPA_V3_0_BW_THRESHOLD_TURBO_MBPS (1000)
#define IPA_V3_0_BW_THRESHOLD_NOMINAL_MBPS (600)
@@ -4336,6 +4336,7 @@
ctrl->ipa_init_sram = _ipa_init_sram_v3;
ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v3_0;
ctrl->ipa_init_hdr = _ipa_init_hdr_v3_0;
+ ctrl->max_holb_tmr_val = IPA_MAX_HOLB_TMR_VAL;
if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
ctrl->ipa3_read_ep_reg = _ipa_read_ep_reg_v4_0;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
index 3569760..1f245e7 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1204,13 +1204,15 @@
* @hdr_base_addr: base address in table
* @offset_entry: offset from hdr_base_addr in table
* @l2tp_params: l2tp parameters
+ * @generic_params: generic proc_ctx params
*/
static int ipahal_cp_proc_ctx_to_hw_buff_v3(enum ipa_hdr_proc_type type,
void *const base, u32 offset,
u32 hdr_len, bool is_hdr_proc_ctx,
dma_addr_t phys_base, u32 hdr_base_addr,
struct ipa_hdr_offset_entry *offset_entry,
- struct ipa_l2tp_hdr_proc_ctx_params l2tp_params){
+ struct ipa_l2tp_hdr_proc_ctx_params *l2tp_params,
+ struct ipa_eth_II_to_eth_II_ex_procparams *generic_params){
if (type == IPA_HDR_PROC_NONE) {
struct ipa_hw_hdr_proc_ctx_add_hdr_seq *ctx;
@@ -1243,11 +1245,11 @@
ctx->l2tp_params.tlv.value =
IPA_HDR_UCP_L2TP_HEADER_ADD;
ctx->l2tp_params.l2tp_params.eth_hdr_retained =
- l2tp_params.hdr_add_param.eth_hdr_retained;
+ l2tp_params->hdr_add_param.eth_hdr_retained;
ctx->l2tp_params.l2tp_params.input_ip_version =
- l2tp_params.hdr_add_param.input_ip_version;
+ l2tp_params->hdr_add_param.input_ip_version;
ctx->l2tp_params.l2tp_params.output_ip_version =
- l2tp_params.hdr_add_param.output_ip_version;
+ l2tp_params->hdr_add_param.output_ip_version;
IPAHAL_DBG("command id %d\n", ctx->l2tp_params.tlv.value);
ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
@@ -1270,15 +1272,15 @@
ctx->l2tp_params.tlv.value =
IPA_HDR_UCP_L2TP_HEADER_REMOVE;
ctx->l2tp_params.l2tp_params.hdr_len_remove =
- l2tp_params.hdr_remove_param.hdr_len_remove;
+ l2tp_params->hdr_remove_param.hdr_len_remove;
ctx->l2tp_params.l2tp_params.eth_hdr_retained =
- l2tp_params.hdr_remove_param.eth_hdr_retained;
+ l2tp_params->hdr_remove_param.eth_hdr_retained;
ctx->l2tp_params.l2tp_params.hdr_ofst_pkt_size_valid =
- l2tp_params.hdr_remove_param.hdr_ofst_pkt_size_valid;
+ l2tp_params->hdr_remove_param.hdr_ofst_pkt_size_valid;
ctx->l2tp_params.l2tp_params.hdr_ofst_pkt_size =
- l2tp_params.hdr_remove_param.hdr_ofst_pkt_size;
+ l2tp_params->hdr_remove_param.hdr_ofst_pkt_size;
ctx->l2tp_params.l2tp_params.hdr_endianness =
- l2tp_params.hdr_remove_param.hdr_endianness;
+ l2tp_params->hdr_remove_param.hdr_endianness;
IPAHAL_DBG("hdr ofst valid: %d, hdr ofst pkt size: %d\n",
ctx->l2tp_params.l2tp_params.hdr_ofst_pkt_size_valid,
ctx->l2tp_params.l2tp_params.hdr_ofst_pkt_size);
@@ -1289,6 +1291,33 @@
ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
ctx->end.length = 0;
ctx->end.value = 0;
+ } else if (type == IPA_HDR_PROC_ETHII_TO_ETHII_EX) {
+ struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq_ex *ctx;
+
+ ctx = (struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq_ex *)
+ (base + offset);
+
+ ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
+ ctx->hdr_add.tlv.length = 1;
+ ctx->hdr_add.tlv.value = hdr_len;
+ ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base :
+ hdr_base_addr + offset_entry->offset;
+ IPAHAL_DBG("header address 0x%x\n",
+ ctx->hdr_add.hdr_addr);
+
+ ctx->hdr_add_ex.tlv.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD;
+ ctx->hdr_add_ex.tlv.length = 1;
+ ctx->hdr_add_ex.tlv.value = IPA_HDR_UCP_ETHII_TO_ETHII_EX;
+
+ ctx->hdr_add_ex.params.input_ethhdr_negative_offset =
+ generic_params->input_ethhdr_negative_offset;
+ ctx->hdr_add_ex.params.output_ethhdr_negative_offset =
+ generic_params->output_ethhdr_negative_offset;
+ ctx->hdr_add_ex.params.reserved = 0;
+
+ ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
+ ctx->end.length = 0;
+ ctx->end.value = 0;
} else {
struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq *ctx;
@@ -1339,9 +1368,35 @@
*/
static int ipahal_get_proc_ctx_needed_len_v3(enum ipa_hdr_proc_type type)
{
- return (type == IPA_HDR_PROC_NONE) ?
- sizeof(struct ipa_hw_hdr_proc_ctx_add_hdr_seq) :
- sizeof(struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq);
+ int ret;
+
+ switch (type) {
+ case IPA_HDR_PROC_NONE:
+ ret = sizeof(struct ipa_hw_hdr_proc_ctx_add_hdr_seq);
+ break;
+ case IPA_HDR_PROC_ETHII_TO_ETHII:
+ case IPA_HDR_PROC_ETHII_TO_802_3:
+ case IPA_HDR_PROC_802_3_TO_ETHII:
+ case IPA_HDR_PROC_802_3_TO_802_3:
+ ret = sizeof(struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq);
+ break;
+ case IPA_HDR_PROC_L2TP_HEADER_ADD:
+ ret = sizeof(struct ipa_hw_hdr_proc_ctx_add_l2tp_hdr_cmd_seq);
+ break;
+ case IPA_HDR_PROC_L2TP_HEADER_REMOVE:
+ ret =
+ sizeof(struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq);
+ break;
+ case IPA_HDR_PROC_ETHII_TO_ETHII_EX:
+ ret = sizeof(struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq_ex);
+ break;
+ default:
+ /* invalid value to make sure failure */
+ IPAHAL_ERR_RL("invalid ipa_hdr_proc_type %d\n", type);
+ ret = -1;
+ }
+
+ return ret;
}
/*
@@ -1358,7 +1413,9 @@
bool is_hdr_proc_ctx, dma_addr_t phys_base,
u32 hdr_base_addr,
struct ipa_hdr_offset_entry *offset_entry,
- struct ipa_l2tp_hdr_proc_ctx_params l2tp_params);
+ struct ipa_l2tp_hdr_proc_ctx_params *l2tp_params,
+ struct ipa_eth_II_to_eth_II_ex_procparams
+ *generic_params);
int (*ipahal_get_proc_ctx_needed_len)(enum ipa_hdr_proc_type type);
};
@@ -1424,12 +1481,14 @@
* @hdr_base_addr: base address in table
* @offset_entry: offset from hdr_base_addr in table
* @l2tp_params: l2tp parameters
+ * @generic_params: generic proc_ctx params
*/
int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
void *const base, u32 offset, u32 hdr_len,
bool is_hdr_proc_ctx, dma_addr_t phys_base,
u32 hdr_base_addr, struct ipa_hdr_offset_entry *offset_entry,
- struct ipa_l2tp_hdr_proc_ctx_params l2tp_params)
+ struct ipa_l2tp_hdr_proc_ctx_params *l2tp_params,
+ struct ipa_eth_II_to_eth_II_ex_procparams *generic_params)
{
IPAHAL_DBG(
"type %d, base %p, offset %d, hdr_len %d, is_hdr_proc_ctx %d, hdr_base_addr %d, offset_entry %p\n"
@@ -1450,7 +1509,8 @@
return hdr_funcs.ipahal_cp_proc_ctx_to_hw_buff(type, base, offset,
hdr_len, is_hdr_proc_ctx, phys_base,
- hdr_base_addr, offset_entry, l2tp_params);
+ hdr_base_addr, offset_entry, l2tp_params,
+ generic_params);
}
/*
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
index 0c2697c..f8720b0 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, 2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -635,13 +635,15 @@
* @hdr_base_addr: base address in table
* @offset_entry: offset from hdr_base_addr in table
* @l2tp_params: l2tp parameters
+ * @generic_params: generic proc_ctx params
*/
int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
void *base, u32 offset, u32 hdr_len,
bool is_hdr_proc_ctx, dma_addr_t phys_base,
u32 hdr_base_addr,
struct ipa_hdr_offset_entry *offset_entry,
- struct ipa_l2tp_hdr_proc_ctx_params l2tp_params);
+ struct ipa_l2tp_hdr_proc_ctx_params *l2tp_params,
+ struct ipa_eth_II_to_eth_II_ex_procparams *generic_params);
/*
* ipahal_get_proc_ctx_needed_len() - calculates the needed length for addition
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
index c881180..06bd062 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
@@ -20,6 +20,11 @@
#include "ipahal_i.h"
#include "../../ipa_common_i.h"
+#define IPA_MAC_FLT_BITS (IPA_FLT_MAC_DST_ADDR_ETHER_II | \
+ IPA_FLT_MAC_SRC_ADDR_ETHER_II | IPA_FLT_MAC_DST_ADDR_802_3 | \
+ IPA_FLT_MAC_SRC_ADDR_802_3 | IPA_FLT_MAC_DST_ADDR_802_1Q | \
+ IPA_FLT_MAC_SRC_ADDR_802_1Q)
+
/*
* struct ipahal_fltrt_obj - Flt/Rt H/W information for specific IPA version
* @support_hash: Is hashable tables supported
@@ -597,6 +602,112 @@
*rest = ipa_write_8(mac_addr[i], *rest);
}
+static inline void ipa_fltrt_get_mac_data(const struct ipa_rule_attrib *attrib,
+ uint32_t attrib_mask, u8 *offset, const uint8_t **mac_addr,
+ const uint8_t **mac_addr_mask)
+{
+ if (attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
+ *offset = -14;
+ *mac_addr = attrib->dst_mac_addr;
+ *mac_addr_mask = attrib->dst_mac_addr_mask;
+ return;
+ }
+
+ if (attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
+ *offset = -8;
+ *mac_addr = attrib->src_mac_addr;
+ *mac_addr_mask = attrib->src_mac_addr_mask;
+ return;
+ }
+
+ if (attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
+ *offset = -22;
+ *mac_addr = attrib->dst_mac_addr;
+ *mac_addr_mask = attrib->dst_mac_addr_mask;
+ return;
+ }
+
+ if (attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
+ *offset = -16;
+ *mac_addr = attrib->src_mac_addr;
+ *mac_addr_mask = attrib->src_mac_addr_mask;
+ return;
+ }
+
+ if (attrib_mask & IPA_FLT_MAC_DST_ADDR_802_1Q) {
+ *offset = -18;
+ *mac_addr = attrib->dst_mac_addr;
+ *mac_addr_mask = attrib->dst_mac_addr_mask;
+ return;
+ }
+
+ if (attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_1Q) {
+ *offset = -10;
+ *mac_addr = attrib->src_mac_addr;
+ *mac_addr_mask = attrib->src_mac_addr_mask;
+ return;
+ }
+}
+
+static int ipa_fltrt_generate_mac_hw_rule_bdy(u16 *en_rule,
+ const struct ipa_rule_attrib *attrib,
+ u8 *ofst_meq128, u8 **extra, u8 **rest)
+{
+ u8 offset = 0;
+ const uint8_t *mac_addr = NULL;
+ const uint8_t *mac_addr_mask = NULL;
+ int i;
+ uint32_t attrib_mask;
+
+ for (i = 0; i < hweight_long(IPA_MAC_FLT_BITS); i++) {
+ switch (i) {
+ case 0:
+ attrib_mask = IPA_FLT_MAC_DST_ADDR_ETHER_II;
+ break;
+ case 1:
+ attrib_mask = IPA_FLT_MAC_SRC_ADDR_ETHER_II;
+ break;
+ case 2:
+ attrib_mask = IPA_FLT_MAC_DST_ADDR_802_3;
+ break;
+ case 3:
+ attrib_mask = IPA_FLT_MAC_SRC_ADDR_802_3;
+ break;
+ case 4:
+ attrib_mask = IPA_FLT_MAC_DST_ADDR_802_1Q;
+ break;
+ case 5:
+ attrib_mask = IPA_FLT_MAC_SRC_ADDR_802_1Q;
+ break;
+ default:
+ return -EPERM;
+ }
+
+ attrib_mask &= attrib->attrib_mask;
+ if (!attrib_mask)
+ continue;
+
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, *ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[*ofst_meq128]);
+
+ ipa_fltrt_get_mac_data(attrib, attrib_mask, &offset,
+ &mac_addr, &mac_addr_mask);
+
+ ipa_fltrt_generate_mac_addr_hw_rule(extra, rest, offset,
+ mac_addr_mask,
+ mac_addr);
+
+ (*ofst_meq128)++;
+ }
+
+ return 0;
+}
+
static int ipa_fltrt_generate_hw_rule_bdy_ip4(u16 *en_rule,
const struct ipa_rule_attrib *attrib,
u8 **extra_wrds, u8 **rest_wrds)
@@ -619,80 +730,10 @@
extra = ipa_write_8(attrib->u.v4.protocol, extra);
}
- if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
- if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
- IPAHAL_ERR("ran out of meq128 eq\n");
+ if (attrib->attrib_mask & IPA_MAC_FLT_BITS) {
+ if (ipa_fltrt_generate_mac_hw_rule_bdy(en_rule, attrib,
+ &ofst_meq128, &extra, &rest))
goto err;
- }
- *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
- ipa3_0_ofst_meq128[ofst_meq128]);
-
- /* -14 => offset of dst mac addr in Ethernet II hdr */
- ipa_fltrt_generate_mac_addr_hw_rule(
- &extra,
- &rest,
- -14,
- attrib->dst_mac_addr_mask,
- attrib->dst_mac_addr);
-
- ofst_meq128++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
- if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
- IPAHAL_ERR("ran out of meq128 eq\n");
- goto err;
- }
- *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
- ipa3_0_ofst_meq128[ofst_meq128]);
-
- /* -8 => offset of src mac addr in Ethernet II hdr */
- ipa_fltrt_generate_mac_addr_hw_rule(
- &extra,
- &rest,
- -8,
- attrib->src_mac_addr_mask,
- attrib->src_mac_addr);
-
- ofst_meq128++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
- if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
- IPAHAL_ERR("ran out of meq128 eq\n");
- goto err;
- }
- *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
- ipa3_0_ofst_meq128[ofst_meq128]);
-
- /* -22 => offset of dst mac addr in 802.3 hdr */
- ipa_fltrt_generate_mac_addr_hw_rule(
- &extra,
- &rest,
- -22,
- attrib->dst_mac_addr_mask,
- attrib->dst_mac_addr);
-
- ofst_meq128++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
- if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
- IPAHAL_ERR("ran out of meq128 eq\n");
- goto err;
- }
- *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
- ipa3_0_ofst_meq128[ofst_meq128]);
-
- /* -16 => offset of src mac addr in 802.3 hdr */
- ipa_fltrt_generate_mac_addr_hw_rule(
- &extra,
- &rest,
- -16,
- attrib->src_mac_addr_mask,
- attrib->src_mac_addr);
-
- ofst_meq128++;
}
if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
@@ -1032,80 +1073,10 @@
ofst_meq128++;
}
- if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
- if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
- IPAHAL_ERR("ran out of meq128 eq\n");
+ if (attrib->attrib_mask & IPA_MAC_FLT_BITS) {
+ if (ipa_fltrt_generate_mac_hw_rule_bdy(en_rule, attrib,
+ &ofst_meq128, &extra, &rest))
goto err;
- }
- *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
- ipa3_0_ofst_meq128[ofst_meq128]);
-
- /* -14 => offset of dst mac addr in Ethernet II hdr */
- ipa_fltrt_generate_mac_addr_hw_rule(
- &extra,
- &rest,
- -14,
- attrib->dst_mac_addr_mask,
- attrib->dst_mac_addr);
-
- ofst_meq128++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
- if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
- IPAHAL_ERR("ran out of meq128 eq\n");
- goto err;
- }
- *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
- ipa3_0_ofst_meq128[ofst_meq128]);
-
- /* -8 => offset of src mac addr in Ethernet II hdr */
- ipa_fltrt_generate_mac_addr_hw_rule(
- &extra,
- &rest,
- -8,
- attrib->src_mac_addr_mask,
- attrib->src_mac_addr);
-
- ofst_meq128++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
- if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
- IPAHAL_ERR("ran out of meq128 eq\n");
- goto err;
- }
- *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
- ipa3_0_ofst_meq128[ofst_meq128]);
-
- /* -22 => offset of dst mac addr in 802.3 hdr */
- ipa_fltrt_generate_mac_addr_hw_rule(
- &extra,
- &rest,
- -22,
- attrib->dst_mac_addr_mask,
- attrib->dst_mac_addr);
-
- ofst_meq128++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
- if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
- IPAHAL_ERR("ran out of meq128 eq\n");
- goto err;
- }
- *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
- ipa3_0_ofst_meq128[ofst_meq128]);
-
- /* -16 => offset of src mac addr in 802.3 hdr */
- ipa_fltrt_generate_mac_addr_hw_rule(
- &extra,
- &rest,
- -16,
- attrib->src_mac_addr_mask,
- attrib->src_mac_addr);
-
- ofst_meq128++;
}
if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
@@ -1747,6 +1718,65 @@
mac_addr[i];
}
+static int ipa_flt_generate_mac_eq(
+ const struct ipa_rule_attrib *attrib, u16 *en_rule, u8 *ofst_meq128,
+ struct ipa_ipfltri_rule_eq *eq_atrb)
+{
+ u8 offset = 0;
+ const uint8_t *mac_addr = NULL;
+ const uint8_t *mac_addr_mask = NULL;
+ int i;
+ uint32_t attrib_mask;
+
+ for (i = 0; i < hweight_long(IPA_MAC_FLT_BITS); i++) {
+ switch (i) {
+ case 0:
+ attrib_mask = IPA_FLT_MAC_DST_ADDR_ETHER_II;
+ break;
+ case 1:
+ attrib_mask = IPA_FLT_MAC_SRC_ADDR_ETHER_II;
+ break;
+ case 2:
+ attrib_mask = IPA_FLT_MAC_DST_ADDR_802_3;
+ break;
+ case 3:
+ attrib_mask = IPA_FLT_MAC_SRC_ADDR_802_3;
+ break;
+ case 4:
+ attrib_mask = IPA_FLT_MAC_DST_ADDR_802_1Q;
+ break;
+ case 5:
+ attrib_mask = IPA_FLT_MAC_SRC_ADDR_802_1Q;
+ break;
+ default:
+ return -EPERM;
+ }
+
+ attrib_mask &= attrib->attrib_mask;
+ if (!attrib_mask)
+ continue;
+
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, *ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[*ofst_meq128]);
+
+ ipa_fltrt_get_mac_data(attrib, attrib_mask, &offset,
+ &mac_addr, &mac_addr_mask);
+
+ ipa_flt_generate_mac_addr_eq(eq_atrb, offset,
+ mac_addr_mask, mac_addr,
+ *ofst_meq128);
+
+ (*ofst_meq128)++;
+ }
+
+ return 0;
+}
+
static int ipa_flt_generate_eq_ip4(enum ipa_ip_type ip,
const struct ipa_rule_attrib *attrib,
struct ipa_ipfltri_rule_eq *eq_atrb)
@@ -1770,68 +1800,10 @@
eq_atrb->protocol_eq = attrib->u.v4.protocol;
}
- if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
- if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
- IPAHAL_ERR("ran out of meq128 eq\n");
+ if (attrib->attrib_mask & IPA_MAC_FLT_BITS) {
+ if (ipa_flt_generate_mac_eq(attrib, en_rule,
+ &ofst_meq128, eq_atrb))
return -EPERM;
- }
- *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
- ipa3_0_ofst_meq128[ofst_meq128]);
-
- /* -14 => offset of dst mac addr in Ethernet II hdr */
- ipa_flt_generate_mac_addr_eq(eq_atrb, -14,
- attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
- ofst_meq128);
-
- ofst_meq128++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
- if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
- IPAHAL_ERR("ran out of meq128 eq\n");
- return -EPERM;
- }
- *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
- ipa3_0_ofst_meq128[ofst_meq128]);
-
- /* -8 => offset of src mac addr in Ethernet II hdr */
- ipa_flt_generate_mac_addr_eq(eq_atrb, -8,
- attrib->src_mac_addr_mask, attrib->src_mac_addr,
- ofst_meq128);
-
- ofst_meq128++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
- if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
- IPAHAL_ERR("ran out of meq128 eq\n");
- return -EPERM;
- }
- *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
- ipa3_0_ofst_meq128[ofst_meq128]);
-
- /* -22 => offset of dst mac addr in 802.3 hdr */
- ipa_flt_generate_mac_addr_eq(eq_atrb, -22,
- attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
- ofst_meq128);
-
- ofst_meq128++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
- if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
- IPAHAL_ERR("ran out of meq128 eq\n");
- return -EPERM;
- }
- *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
- ipa3_0_ofst_meq128[ofst_meq128]);
-
- /* -16 => offset of src mac addr in 802.3 hdr */
- ipa_flt_generate_mac_addr_eq(eq_atrb, -16,
- attrib->src_mac_addr_mask, attrib->src_mac_addr,
- ofst_meq128);
-
- ofst_meq128++;
}
if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP) {
@@ -2200,68 +2172,10 @@
ofst_meq128++;
}
- if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
- if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
- IPAHAL_ERR_RL("ran out of meq128 eq\n");
+ if (attrib->attrib_mask & IPA_MAC_FLT_BITS) {
+ if (ipa_flt_generate_mac_eq(attrib, en_rule,
+ &ofst_meq128, eq_atrb))
return -EPERM;
- }
- *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
- ipa3_0_ofst_meq128[ofst_meq128]);
-
- /* -14 => offset of dst mac addr in Ethernet II hdr */
- ipa_flt_generate_mac_addr_eq(eq_atrb, -14,
- attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
- ofst_meq128);
-
- ofst_meq128++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
- if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
- IPAHAL_ERR_RL("ran out of meq128 eq\n");
- return -EPERM;
- }
- *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
- ipa3_0_ofst_meq128[ofst_meq128]);
-
- /* -8 => offset of src mac addr in Ethernet II hdr */
- ipa_flt_generate_mac_addr_eq(eq_atrb, -8,
- attrib->src_mac_addr_mask, attrib->src_mac_addr,
- ofst_meq128);
-
- ofst_meq128++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
- if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
- IPAHAL_ERR_RL("ran out of meq128 eq\n");
- return -EPERM;
- }
- *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
- ipa3_0_ofst_meq128[ofst_meq128]);
-
- /* -22 => offset of dst mac addr in 802.3 hdr */
- ipa_flt_generate_mac_addr_eq(eq_atrb, -22,
- attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
- ofst_meq128);
-
- ofst_meq128++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
- if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
- IPAHAL_ERR_RL("ran out of meq128 eq\n");
- return -EPERM;
- }
- *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
- ipa3_0_ofst_meq128[ofst_meq128]);
-
- /* -16 => offset of src mac addr in 802.3 hdr */
- ipa_flt_generate_mac_addr_eq(eq_atrb, -16,
- attrib->src_mac_addr_mask, attrib->src_mac_addr,
- ofst_meq128);
-
- ofst_meq128++;
}
if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP) {
@@ -3417,6 +3331,7 @@
struct ipahal_fltrt_alloc_imgs_params *params)
{
struct ipahal_fltrt_obj *obj;
+ gfp_t flag = GFP_KERNEL;
obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
@@ -3449,10 +3364,15 @@
IPAHAL_DBG_LOW("nhash lcl tbl bdy total h/w size = %u\n",
params->nhash_bdy.size);
+alloc1:
params->nhash_bdy.base = dma_alloc_coherent(
ipahal_ctx->ipa_pdev, params->nhash_bdy.size,
- ¶ms->nhash_bdy.phys_base, GFP_KERNEL);
+ ¶ms->nhash_bdy.phys_base, flag);
if (!params->nhash_bdy.base) {
+ if (flag == GFP_KERNEL) {
+ flag = GFP_ATOMIC;
+ goto alloc1;
+ }
IPAHAL_ERR("fail to alloc DMA buff of size %d\n",
params->nhash_bdy.size);
return -ENOMEM;
@@ -3480,10 +3400,15 @@
IPAHAL_DBG_LOW("hash lcl tbl bdy total h/w size = %u\n",
params->hash_bdy.size);
+alloc2:
params->hash_bdy.base = dma_alloc_coherent(
ipahal_ctx->ipa_pdev, params->hash_bdy.size,
- ¶ms->hash_bdy.phys_base, GFP_KERNEL);
+ ¶ms->hash_bdy.phys_base, flag);
if (!params->hash_bdy.base) {
+ if (flag == GFP_KERNEL) {
+ flag = GFP_ATOMIC;
+ goto alloc2;
+ }
IPAHAL_ERR("fail to alloc DMA buff of size %d\n",
params->hash_bdy.size);
goto hash_bdy_fail;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
index 816bc58..d7e1541 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -612,12 +612,15 @@
/* Headers and processing context H/W structures and definitions */
/* uCP command numbers */
-#define IPA_HDR_UCP_802_3_TO_802_3 6
-#define IPA_HDR_UCP_802_3_TO_ETHII 7
-#define IPA_HDR_UCP_ETHII_TO_802_3 8
-#define IPA_HDR_UCP_ETHII_TO_ETHII 9
-#define IPA_HDR_UCP_L2TP_HEADER_ADD 10
-#define IPA_HDR_UCP_L2TP_HEADER_REMOVE 11
+#define IPA_HDR_UCP_802_3_TO_802_3 6
+#define IPA_HDR_UCP_802_3_TO_ETHII 7
+#define IPA_HDR_UCP_ETHII_TO_802_3 8
+#define IPA_HDR_UCP_ETHII_TO_ETHII 9
+#define IPA_HDR_UCP_L2TP_HEADER_ADD 10
+#define IPA_HDR_UCP_L2TP_HEADER_REMOVE 11
+#define IPA_HDR_UCP_L2TP_UDP_HEADER_ADD 12
+#define IPA_HDR_UCP_L2TP_UDP_HEADER_REMOVE 13
+#define IPA_HDR_UCP_ETHII_TO_ETHII_EX 14
/* Processing context TLV type */
#define IPA_PROC_CTX_TLV_TYPE_END 0
@@ -730,6 +733,30 @@
struct ipa_hw_hdr_proc_ctx_tlv end;
};
+/**
+ * struct ipa_hw_hdr_proc_ctx_add_hdr_ex -
+ * HW structure of IPA processing context - add generic header
+ * @tlv: IPA processing context TLV
+ * @params: generic eth2 to eth2 parameters
+ */
+struct ipa_hw_hdr_proc_ctx_add_hdr_ex {
+ struct ipa_hw_hdr_proc_ctx_tlv tlv;
+ struct ipa_eth_II_to_eth_II_ex_procparams params;
+};
+
+/**
+ * struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq_ex -
+ * IPA processing context header - process command sequence
+ * @hdr_add: add header command
+ * @params: params for header generic header add
+ * @end: tlv end command (cmd.type must be 0)
+ */
+struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq_ex {
+ struct ipa_hw_hdr_proc_ctx_hdr_add hdr_add;
+ struct ipa_hw_hdr_proc_ctx_add_hdr_ex hdr_add_ex;
+ struct ipa_hw_hdr_proc_ctx_tlv end;
+};
+
/* IPA HW DPS/HPS image memory sizes */
#define IPA_HW_DPS_IMG_MEM_SIZE_V3_0 128
#define IPA_HW_HPS_IMG_MEM_SIZE_V3_0 320
diff --git a/drivers/platform/msm/mhi_dev/mhi_uci.c b/drivers/platform/msm/mhi_dev/mhi_uci.c
index 22ab30f..d045d59 100644
--- a/drivers/platform/msm/mhi_dev/mhi_uci.c
+++ b/drivers/platform/msm/mhi_dev/mhi_uci.c
@@ -996,6 +996,15 @@
mhi_parse_state(buf, &nbytes, info);
add_uevent_var(env, "MHI_CHANNEL_STATE_12=%s", buf);
+ rc = mhi_ctrl_state_info(MHI_CLIENT_DCI_OUT, &info);
+ if (rc) {
+ pr_err("Failed to obtain channel 20 state\n");
+ return -EINVAL;
+ }
+ nbytes = 0;
+ mhi_parse_state(buf, &nbytes, info);
+ add_uevent_var(env, "MHI_CHANNEL_STATE_20=%s", buf);
+
return 0;
}
diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c
index c49e254..28792c2 100644
--- a/drivers/power/reset/msm-poweroff.c
+++ b/drivers/power/reset/msm-poweroff.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -57,6 +57,7 @@
static void __iomem *msm_ps_hold;
static phys_addr_t tcsr_boot_misc_detect;
static void scm_disable_sdi(void);
+static bool force_warm_reboot;
#ifdef CONFIG_QCOM_DLOAD_MODE
/* Runtime could be only changed value once.
@@ -87,8 +88,6 @@
static struct kobject dload_kobj;
static void *dload_type_addr;
-static bool force_warm_reboot;
-
static int dload_set(const char *val, const struct kernel_param *kp);
/* interface for exporting attributes */
struct reset_attribute {
diff --git a/drivers/power/supply/qcom/qg-battery-profile.c b/drivers/power/supply/qcom/qg-battery-profile.c
index 00a4533..8eaa5b5 100644
--- a/drivers/power/supply/qcom/qg-battery-profile.c
+++ b/drivers/power/supply/qcom/qg-battery-profile.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2018 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -398,7 +398,7 @@
{
u8 table_index = charging ? TABLE_SOC_OCV1 : TABLE_SOC_OCV2;
- if (!the_battery || !the_battery->profile)
+ if (!the_battery || !the_battery->profile_node)
return -ENODEV;
*soc = interpolate_soc(&the_battery->profile[table_index],
@@ -414,7 +414,7 @@
u8 table_index = charging ? TABLE_FCC1 : TABLE_FCC2;
u32 fcc_mah;
- if (!the_battery || !the_battery->profile)
+ if (!the_battery || !the_battery->profile_node)
return -ENODEV;
fcc_mah = interpolate_single_row_lut(
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index 5ec38eb..391813e 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -2789,6 +2789,10 @@
if (rc < 0)
return rc;
}
+ } else if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB &&
+ usb_current == -ETIMEDOUT) {
+ rc = vote(chg->usb_icl_votable, USB_PSY_VOTER,
+ true, USBIN_100MA);
} else {
rc = vote(chg->usb_icl_votable, USB_PSY_VOTER,
true, usb_current);
diff --git a/drivers/regulator/qpnp-lcdb-regulator.c b/drivers/regulator/qpnp-lcdb-regulator.c
index 744e35a..0ea743e 100644
--- a/drivers/regulator/qpnp-lcdb-regulator.c
+++ b/drivers/regulator/qpnp-lcdb-regulator.c
@@ -2022,7 +2022,7 @@
if (lcdb->bst.ps != -EINVAL) {
rc = qpnp_lcdb_masked_write(lcdb, lcdb->base +
LCDB_PS_CTL_REG, EN_PS_BIT,
- &lcdb->bst.ps ? EN_PS_BIT : 0);
+ lcdb->bst.ps ? EN_PS_BIT : 0);
if (rc < 0) {
pr_err("Failed to disable BST PS rc=%d", rc);
return rc;
diff --git a/drivers/regulator/rpm-smd-regulator.c b/drivers/regulator/rpm-smd-regulator.c
index b38db82..cded6e4 100644
--- a/drivers/regulator/rpm-smd-regulator.c
+++ b/drivers/regulator/rpm-smd-regulator.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2015, 2019 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -933,10 +933,6 @@
if (load_mA > params[RPM_REGULATOR_PARAM_CURRENT].max)
load_mA = params[RPM_REGULATOR_PARAM_CURRENT].max;
- rpm_vreg_lock(reg->rpm_vreg);
- RPM_VREG_SET_PARAM(reg, CURRENT, load_mA);
- rpm_vreg_unlock(reg->rpm_vreg);
-
return (load_uA >= reg->rpm_vreg->hpm_min_load)
? REGULATOR_MODE_NORMAL : REGULATOR_MODE_IDLE;
}
diff --git a/drivers/soc/qcom/glink_debugfs.c b/drivers/soc/qcom/glink_debugfs.c
index b831880..fe464d2 100644
--- a/drivers/soc/qcom/glink_debugfs.c
+++ b/drivers/soc/qcom/glink_debugfs.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2016, 2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -568,9 +568,9 @@
dbgfs_dent_s->parent = parent;
dbgfs_dent_s->self = curr_dent;
strlcpy(dbgfs_dent_s->self_name,
- curr, strlen(curr) + 1);
+ curr, sizeof(dbgfs_dent_s->self_name));
strlcpy(dbgfs_dent_s->par_name, par_dir,
- strlen(par_dir) + 1);
+ sizeof(dbgfs_dent_s->par_name));
INIT_WORK(&dbgfs_dent_s->rm_work,
glink_dfs_dent_rm_worker);
mutex_lock(&dent_list_lock_lha0);
diff --git a/drivers/soc/qcom/ipc_router_mhi_dev_xprt.c b/drivers/soc/qcom/ipc_router_mhi_dev_xprt.c
index d0b650d..7cf2644 100644
--- a/drivers/soc/qcom/ipc_router_mhi_dev_xprt.c
+++ b/drivers/soc/qcom/ipc_router_mhi_dev_xprt.c
@@ -29,7 +29,7 @@
#define MODULE_NAME "ipc_router_mhi_dev_xprt"
#define XPRT_NAME_LEN 32
#define IPC_ROUTER_MHI_XPRT_MAX_PKT_SIZE 8192
-#define MHI_IPCR_ASYNC_TIMEOUT msecs_to_jiffies(100)
+#define MHI_IPCR_ASYNC_TIMEOUT msecs_to_jiffies(1000)
#define MAX_IPCR_WR_REQ 128
/**
diff --git a/drivers/soc/qcom/msm-spm.c b/drivers/soc/qcom/msm-spm.c
index 03b1c56..7c74f79 100644
--- a/drivers/soc/qcom/msm-spm.c
+++ b/drivers/soc/qcom/msm-spm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -151,7 +151,7 @@
static void msm_spm_drv_flush_shadow(struct msm_spm_driver_data *dev,
unsigned int reg_index)
{
- if (!dev || !dev->reg_shadow)
+ if (!dev || (dev->reg_shadow == NULL))
return;
__raw_writel(dev->reg_shadow[reg_index],
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_dbg.c b/drivers/soc/qcom/msm_bus/msm_bus_dbg.c
index df29233..c607049 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_dbg.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_dbg.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, 2014-2017, The Linux Foundation. All rights
+/* Copyright (c) 2010-2012, 2014-2017, 2019 The Linux Foundation. All rights
* reserved.
*
* This program is free software; you can redistribute it and/or modify
@@ -577,7 +577,6 @@
list_for_each_entry(cldata, &cl_list, list) {
if (strnstr(chid, cldata->pdata->name, cnt)) {
found = 1;
- cldata = cldata;
strsep(&chid, " ");
if (chid) {
ret = kstrtoul(chid, 10, &index);
diff --git a/drivers/soc/qcom/rpm_master_stat.c b/drivers/soc/qcom/rpm_master_stat.c
index bf4f5ec..efbe033 100644
--- a/drivers/soc/qcom/rpm_master_stat.c
+++ b/drivers/soc/qcom/rpm_master_stat.c
@@ -416,7 +416,7 @@
if (!pdata->masters[i])
goto err;
strlcpy(pdata->masters[i], master_name,
- strlen(master_name) + 1);
+ strlen(pdata->masters[i]) + 1);
}
return pdata;
err:
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index ad1026c..64918e8 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -66,6 +66,7 @@
HW_PLATFORM_RCM = 21,
HW_PLATFORM_STP = 23,
HW_PLATFORM_SBC = 24,
+ HW_PLATFORM_ADP = 25,
HW_PLATFORM_HDK = 31,
HW_PLATFORM_INVALID
};
@@ -87,6 +88,7 @@
[HW_PLATFORM_DTV] = "DTV",
[HW_PLATFORM_STP] = "STP",
[HW_PLATFORM_SBC] = "SBC",
+ [HW_PLATFORM_ADP] = "ADP",
[HW_PLATFORM_HDK] = "HDK",
};
diff --git a/drivers/soc/qcom/wcnss/wcnss_vreg.c b/drivers/soc/qcom/wcnss/wcnss_vreg.c
index d4d9a75..476ff60 100644
--- a/drivers/soc/qcom/wcnss/wcnss_vreg.c
+++ b/drivers/soc/qcom/wcnss/wcnss_vreg.c
@@ -585,11 +585,6 @@
wcnss_log(ERR, "vreg %s disable failed (%d)\n",
regulators[i].name, rc);
}
- /* Free the regulator source */
- if (regulators[i].state & VREG_GET_REGULATOR_MASK)
- regulator_put(regulators[i].regulator);
-
- regulators[i].state = VREG_NULL_CONFIG;
}
}
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 9e02658..3973822 100755
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -126,19 +126,6 @@
int tail;
struct lmk_event *events;
struct lmk_event *event;
- int res;
- char taskname[MAX_TASKNAME];
-
- res = get_cmdline(selected, taskname, MAX_TASKNAME - 1);
-
- /* No valid process name means this is definitely not associated with a
- * userspace activity.
- */
-
- if (res <= 0 || res >= MAX_TASKNAME)
- return;
-
- taskname[res] = '\0';
spin_lock(&lmk_event_lock);
@@ -154,7 +141,7 @@
events = (struct lmk_event *) event_buffer.buf;
event = &events[head];
- memcpy(event->taskname, taskname, res + 1);
+ strncpy(event->taskname, selected->comm, MAX_TASKNAME);
event->pid = selected->pid;
event->uid = from_kuid_munged(current_user_ns(), task_uid(selected));
@@ -737,6 +724,7 @@
}
task_lock(selected);
+ get_task_struct(selected);
send_sig(SIGKILL, selected, 0);
if (selected->mm) {
task_set_lmk_waiting(selected);
@@ -781,13 +769,15 @@
lowmem_deathpending_timeout = jiffies + HZ;
rem += selected_tasksize;
+
rcu_read_unlock();
- get_task_struct(selected);
/* give the system time to free up the memory */
msleep_interruptible(20);
trace_almk_shrink(selected_tasksize, ret,
other_free, other_file,
selected_oom_score_adj);
+
+ get_task_struct(selected);
} else {
trace_almk_shrink(1, ret, other_free, other_file, 0);
rcu_read_unlock();
diff --git a/drivers/thermal/qpnp-temp-alarm.c b/drivers/thermal/qpnp-temp-alarm.c
index e8a8907..e9ae4b6 100644
--- a/drivers/thermal/qpnp-temp-alarm.c
+++ b/drivers/thermal/qpnp-temp-alarm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -403,12 +403,6 @@
int rc = 0;
u8 raw_type[2], type, subtype;
- if (!pdev || !(&pdev->dev) || !pdev->dev.of_node) {
- dev_err(&pdev->dev, "%s: device tree node not found\n",
- __func__);
- return -EINVAL;
- }
-
node = pdev->dev.of_node;
chip = kzalloc(sizeof(struct qpnp_tm_chip), GFP_KERNEL);
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index ef688aa..5782422 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1279,7 +1279,6 @@
atmel_port->hd_start_rx = false;
atmel_start_rx(port);
- return;
}
atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
diff --git a/drivers/tty/serial/msm_smd_tty.c b/drivers/tty/serial/msm_smd_tty.c
index 84ee1dd..c9f8af1 100644
--- a/drivers/tty/serial/msm_smd_tty.c
+++ b/drivers/tty/serial/msm_smd_tty.c
@@ -1,5 +1,5 @@
/* Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2009-2015, 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2009-2015, 2017, 2019, The Linux Foundation. All rights reserved.
* Author: Brian Swetland <swetland@google.com>
*
* This software is licensed under the terms of the GNU General Public
@@ -370,7 +370,7 @@
int n;
for (n = 0; n < MAX_SMD_TTYS; ++n) {
- if (!smd_tty[n].dev_name)
+ if (smd_tty[n].dev_name == NULL)
continue;
if (pdev->id == smd_tty[n].edge &&
@@ -504,7 +504,7 @@
struct smd_tty_info *info;
const char *peripheral = NULL;
- if (n >= MAX_SMD_TTYS || !smd_tty[n].ch_name)
+ if (n >= MAX_SMD_TTYS || smd_tty[n].ch_name == NULL)
return -ENODEV;
info = smd_tty + n;
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
index 747560f..2e34239 100644
--- a/drivers/tty/serial/sprd_serial.c
+++ b/drivers/tty/serial/sprd_serial.c
@@ -240,7 +240,7 @@
if (lsr & (SPRD_LSR_BI | SPRD_LSR_PE |
SPRD_LSR_FE | SPRD_LSR_OE))
- if (handle_lsr_errors(port, &lsr, &flag))
+ if (handle_lsr_errors(port, &flag, &lsr))
continue;
if (uart_handle_sysrq_char(port, ch))
continue;
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 2476d66..5e782e5 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -920,7 +920,7 @@
struct usb_bos_descriptor *bos;
struct usb_dev_cap_header *cap;
struct usb_ssp_cap_descriptor *ssp_cap;
- unsigned char *buffer;
+ unsigned char *buffer, *buffer0;
int length, total_len, num, i, ssac;
__u8 cap_type;
int ret;
@@ -965,10 +965,12 @@
ret = -ENOMSG;
goto err;
}
+
+ buffer0 = buffer;
total_len -= length;
+ buffer += length;
for (i = 0; i < num; i++) {
- buffer += length;
cap = (struct usb_dev_cap_header *)buffer;
if (total_len < sizeof(*cap) || total_len < cap->bLength) {
@@ -982,8 +984,6 @@
break;
}
- total_len -= length;
-
if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
dev_warn(ddev, "descriptor type invalid, skip\n");
continue;
@@ -1027,7 +1027,11 @@
default:
break;
}
+
+ total_len -= length;
+ buffer += length;
}
+ dev->bos->desc->wTotalLength = cpu_to_le16(buffer - buffer0);
return 0;
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index 2e300a3..be3172e 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -400,7 +400,9 @@
hw_cwrite(CAP_USBINTR, ~0,
USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI);
hw_cwrite(CAP_USBCMD, USBCMD_RS, USBCMD_RS);
+ udc->transceiver->flags |= PHY_SOFT_CONNECT;
} else {
+ udc->transceiver->flags &= ~PHY_SOFT_CONNECT;
hw_cwrite(CAP_USBCMD, USBCMD_RS, 0);
hw_cwrite(CAP_USBINTR, ~0, 0);
/* Clear BIT(31) to disable AHB2AHB Bypass functionality */
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index 51413dc..1acfd3a 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -2429,9 +2429,12 @@
{
struct f_gsi *gsi = func_to_gsi(f);
struct f_gsi *gsi_rmnet_v2x = __gsi[USB_PROT_RMNET_V2X_IPA];
+ struct f_gsi *gsi_ecm = __gsi[USB_PROT_ECM_IPA];
struct usb_composite_dev *cdev = f->config->cdev;
struct net_device *net;
int ret;
+ int in_intr_num = 0;
+ int out_intr_num = 0;
log_event_dbg("intf=%u, alt=%u", intf, alt);
@@ -2504,21 +2507,43 @@
}
/*
- * Configure EPs for GSI. Note that when both RmNet LTE
- * (or ECM) and RmNet V2X instances are enabled in a
- * composition, configure HW accelerated EPs for V2X
- * instance and normal EPs for LTE (or ECM).
+ * Configure EPs for GSI. Note that:
+ * 1. In general, configure HW accelerated EPs for all
+ * instances.
+ * 2. If both RmNet LTE and RmNet V2X instances are
+ * enabled in a composition, configure HW accelerated
+ * EPs for V2X and normal EPs for LTE.
+ * 3. If RmNet V2X, ECM and ADPL instances are enabled
+ * in a composition, configure HW accelerated EPs in
+ * both directions for V2X and IN direction for ECM.
+ * Configure normal EPs for ECM OUT and ADPL.
*/
+ switch (gsi->prot_id) {
+ case USB_PROT_RMNET_IPA:
+ if (!gsi_rmnet_v2x->function.fs_descriptors) {
+ in_intr_num = 2;
+ out_intr_num = 1;
+ }
+ break;
+ case USB_PROT_ECM_IPA:
+ in_intr_num = 2;
+ if (!gsi_rmnet_v2x->function.fs_descriptors)
+ out_intr_num = 1;
+ break;
+ case USB_PROT_DIAG_IPA:
+ if (!(gsi_ecm->function.fs_descriptors &&
+ gsi_rmnet_v2x->function.fs_descriptors))
+ in_intr_num = 3;
+ break;
+ default:
+ in_intr_num = 2;
+ out_intr_num = 1;
+ }
+
+ /* gsi_configure_ep required only for GSI-IPA EPs */
if (gsi->d_port.in_ep &&
gsi->prot_id <= USB_PROT_RMNET_V2X_IPA) {
- if (gsi->prot_id == USB_PROT_DIAG_IPA)
- gsi->d_port.in_ep->ep_intr_num = 3;
- else if ((gsi->prot_id == USB_PROT_RMNET_IPA ||
- gsi->prot_id == USB_PROT_ECM_IPA) &&
- gsi_rmnet_v2x->function.fs_descriptors)
- gsi->d_port.in_ep->ep_intr_num = 0;
- else
- gsi->d_port.in_ep->ep_intr_num = 2;
+ gsi->d_port.in_ep->ep_intr_num = in_intr_num;
usb_gsi_ep_op(gsi->d_port.in_ep,
&gsi->d_port.in_request,
GSI_EP_OP_CONFIG);
@@ -2526,12 +2551,7 @@
if (gsi->d_port.out_ep &&
gsi->prot_id <= USB_PROT_RMNET_V2X_IPA) {
- if ((gsi->prot_id == USB_PROT_RMNET_IPA ||
- gsi->prot_id == USB_PROT_ECM_IPA) &&
- gsi_rmnet_v2x->function.fs_descriptors)
- gsi->d_port.out_ep->ep_intr_num = 0;
- else
- gsi->d_port.out_ep->ep_intr_num = 1;
+ gsi->d_port.out_ep->ep_intr_num = out_intr_num;
usb_gsi_ep_op(gsi->d_port.out_ep,
&gsi->d_port.out_request,
GSI_EP_OP_CONFIG);
diff --git a/drivers/usb/gadget/function/f_qc_rndis.c b/drivers/usb/gadget/function/f_qc_rndis.c
index dddf89e..535cfae 100644
--- a/drivers/usb/gadget/function/f_qc_rndis.c
+++ b/drivers/usb/gadget/function/f_qc_rndis.c
@@ -6,7 +6,7 @@
* Copyright (C) 2008 Nokia Corporation
* Copyright (C) 2009 Samsung Electronics
* Author: Michal Nazarewicz (mina86@mina86.com)
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
@@ -331,18 +331,8 @@
.bInterval = RNDIS_QC_LOG2_STATUS_INTERVAL_MSEC + 4,
};
-static struct usb_ss_ep_comp_descriptor ss_intr_comp_desc = {
- .bLength = sizeof(ss_intr_comp_desc),
- .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
-
- /* the following 3 values can be tweaked if necessary */
- /* .bMaxBurst = 0, */
- /* .bmAttributes = 0, */
- .wBytesPerInterval = cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT),
-};
-
static struct usb_ss_ep_comp_descriptor rndis_qc_ss_intr_comp_desc = {
- .bLength = sizeof(ss_intr_comp_desc),
+ .bLength = sizeof(rndis_qc_ss_intr_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
/* the following 3 values can be tweaked if necessary */
@@ -351,15 +341,6 @@
.wBytesPerInterval = cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT),
};
-static struct usb_ss_ep_comp_descriptor ss_bulk_comp_desc = {
- .bLength = sizeof(ss_bulk_comp_desc),
- .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
-
- /* the following 2 values can be tweaked if necessary */
- /* .bMaxBurst = 0, */
- /* .bmAttributes = 0, */
-};
-
static struct usb_endpoint_descriptor rndis_qc_ss_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -379,7 +360,7 @@
};
static struct usb_ss_ep_comp_descriptor rndis_qc_ss_bulk_comp_desc = {
- .bLength = sizeof(ss_bulk_comp_desc),
+ .bLength = sizeof(rndis_qc_ss_bulk_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
/* the following 2 values can be tweaked if necessary */
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index a74a0aa..c9366c5 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -218,6 +218,7 @@
};
#define SDP_CHECK_DELAY_MS 10000 /* in ms */
+#define SDP_CHECK_BOOT_DELAY_MS 30000 /* in ms */
#define MSM_USB_BASE (motg->regs)
#define MSM_USB_PHY_CSR_BASE (motg->phy_csr_regs)
@@ -1919,12 +1920,13 @@
motg->chg_type);
psy_type = get_psy_type(motg);
- if (psy_type == POWER_SUPPLY_TYPE_USB_FLOAT) {
- if (!mA)
+ if (psy_type == POWER_SUPPLY_TYPE_USB_FLOAT ||
+ (psy_type == POWER_SUPPLY_TYPE_USB &&
+ motg->enable_sdp_check_timer)) {
+ if (!mA) {
pval.intval = -ETIMEDOUT;
- else
- pval.intval = 1000 * mA;
- goto set_prop;
+ goto set_prop;
+ }
}
if (motg->cur_power == mA)
@@ -2775,7 +2777,7 @@
struct msm_otg *motg = container_of(w, struct msm_otg, sdp_check.work);
/* Cable disconnected or device enumerated as SDP */
- if (!motg->vbus_state || motg->phy.otg->gadget->state >=
+ if (!motg->vbus_state || motg->phy.otg->gadget->state >
USB_STATE_DEFAULT)
return;
@@ -2856,13 +2858,18 @@
break;
}
- if (get_psy_type(motg) == POWER_SUPPLY_TYPE_USB_FLOAT)
- queue_delayed_work(motg->otg_wq,
- &motg->sdp_check,
- msecs_to_jiffies(SDP_CHECK_DELAY_MS));
-
pm_runtime_get_sync(otg->usb_phy->dev);
msm_otg_start_peripheral(otg, 1);
+ if (get_psy_type(motg) == POWER_SUPPLY_TYPE_USB_FLOAT ||
+ (get_psy_type(motg) == POWER_SUPPLY_TYPE_USB &&
+ motg->enable_sdp_check_timer)) {
+ queue_delayed_work(motg->otg_wq,
+ &motg->sdp_check,
+ msecs_to_jiffies(
+ (phy->flags & PHY_SOFT_CONNECT) ?
+ SDP_CHECK_DELAY_MS :
+ SDP_CHECK_BOOT_DELAY_MS));
+ }
otg->state = OTG_STATE_B_PERIPHERAL;
} else {
pr_debug("Cable disconnected\n");
@@ -4116,6 +4123,9 @@
of_property_read_u32(pdev->dev.of_node, "qcom,pm-qos-latency",
&motg->pm_qos_latency);
+ motg->enable_sdp_check_timer = of_property_read_bool(pdev->dev.of_node,
+ "qcom,enumeration-check-for-sdp");
+
pdata = msm_otg_dt_to_pdata(pdev);
if (!pdata) {
ret = -ENOMEM;
diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c
index 3e9b7eb..3a5dd02 100644
--- a/drivers/video/fbdev/msm/mdss_fb.c
+++ b/drivers/video/fbdev/msm/mdss_fb.c
@@ -2,7 +2,7 @@
* Core MDSS framebuffer driver.
*
* Copyright (C) 2007 Google Incorporated
- * Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2008-2019, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -3579,6 +3579,9 @@
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+ if (!mfd)
+ return -EPERM;
+
if (!mfd->op_enable)
return -EPERM;
diff --git a/drivers/video/fbdev/msm/mdss_mdp.h b/drivers/video/fbdev/msm/mdss_mdp.h
index e72a315..5767ca1 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.h
+++ b/drivers/video/fbdev/msm/mdss_mdp.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -600,6 +600,7 @@
struct mutex flush_lock;
struct mutex *shared_lock;
struct mutex rsrc_lock;
+ struct mutex vsync_handler_lock;
spinlock_t spin_lock;
struct mdss_panel_data *panel_data;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
index 029634b..75c2b6f 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2452,6 +2452,7 @@
mutex_init(&ctl->offlock);
mutex_init(&ctl->flush_lock);
mutex_init(&ctl->rsrc_lock);
+ mutex_init(&ctl->vsync_handler_lock);
spin_lock_init(&ctl->spin_lock);
BLOCKING_INIT_NOTIFIER_HEAD(&ctl->notifier_head);
pr_debug("alloc ctl_num=%d\n", ctl->num);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
index 1c70637..9e27bb1 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
@@ -1982,6 +1982,49 @@
pr_debug("%pS->%s ctl:%d\n",
__builtin_return_address(0), __func__, ctl->num);
+ mutex_lock(&ctl->vsync_handler_lock);
+
+ MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt), 0x88888);
+ sctl = mdss_mdp_get_split_ctl(ctl);
+ if (sctl)
+ sctx = (struct mdss_mdp_cmd_ctx *) sctl->intf_ctx[MASTER_CTX];
+
+ spin_lock_irqsave(&ctx->clk_lock, flags);
+ if (handle->enabled) {
+ handle->enabled = false;
+ list_del_init(&handle->list);
+ disable_vsync_irq = !handle->cmd_post_flush;
+ }
+ spin_unlock_irqrestore(&ctx->clk_lock, flags);
+
+ if (disable_vsync_irq) {
+ /* disable rd_ptr interrupt and clocks */
+ mdss_mdp_setup_vsync(ctx, false);
+ complete(&ctx->stop_comp);
+ }
+
+ mutex_unlock(&ctl->vsync_handler_lock);
+
+ return 0;
+}
+
+static int mdss_mdp_cmd_remove_vsync_handler_nolock(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_vsync_handler *handle)
+{
+ struct mdss_mdp_ctl *sctl;
+ struct mdss_mdp_cmd_ctx *ctx, *sctx = NULL;
+ unsigned long flags;
+ bool disable_vsync_irq = false;
+
+ ctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[MASTER_CTX];
+ if (!ctx) {
+ pr_err("%s: invalid ctx\n", __func__);
+ return -ENODEV;
+ }
+
+ pr_debug("%pS->%s ctl:%d\n",
+ __builtin_return_address(0), __func__, ctl->num);
+
MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt), 0x88888);
sctl = mdss_mdp_get_split_ctl(ctl);
if (sctl)
@@ -3216,8 +3259,12 @@
return -ENODEV;
}
- list_for_each_entry_safe(handle, tmp, &ctx->vsync_handlers, list)
- mdss_mdp_cmd_remove_vsync_handler(ctl, handle);
+ mutex_lock(&ctl->vsync_handler_lock);
+ list_for_each_entry_safe(handle, tmp, &ctx->vsync_handlers, list) {
+ mdss_mdp_cmd_remove_vsync_handler_nolock(ctl, handle);
+ }
+ mutex_unlock(&ctl->vsync_handler_lock);
+
if (mdss_mdp_is_lineptr_supported(ctl))
mdss_mdp_cmd_lineptr_ctrl(ctl, false);
MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt), XLOG_FUNC_ENTRY);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp.c b/drivers/video/fbdev/msm/mdss_mdp_pp.c
index 8f4d437..c2e3b18 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -3133,7 +3133,7 @@
}
if (!ad->bl_mfd || !ad->bl_mfd->panel_info ||
- !ad->bl_att_lut) {
+ ad->bl_att_lut == NULL) {
pr_err("Invalid ad info: bl_mfd = 0x%pK, ad->bl_mfd->panel_info = 0x%pK, bl_att_lut = 0x%pK\n",
ad->bl_mfd,
(!ad->bl_mfd) ? NULL : ad->bl_mfd->panel_info,
@@ -6671,7 +6671,7 @@
ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
break;
/* Dither enable/disable */
- } else if ((ptr == base + MDSS_MDP_REG_DSPP_DITHER_DEPTH)) {
+ } else if (ptr == base + MDSS_MDP_REG_DSPP_DITHER_DEPTH) {
ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
break;
/* Six zone and mem color */
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 02bb7b5..65e1eaa 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -4846,7 +4846,7 @@
err = btrfs_log_inode(trans, root, other_inode,
LOG_OTHER_INODE,
0, LLONG_MAX, ctx);
- iput(other_inode);
+ btrfs_add_delayed_iput(other_inode);
if (err)
goto out_unlock;
else
@@ -5264,7 +5264,7 @@
}
if (btrfs_inode_in_log(di_inode, trans->transid)) {
- iput(di_inode);
+ btrfs_add_delayed_iput(di_inode);
break;
}
@@ -5276,7 +5276,7 @@
if (!ret &&
btrfs_must_commit_transaction(trans, di_inode))
ret = 1;
- iput(di_inode);
+ btrfs_add_delayed_iput(di_inode);
if (ret)
goto next_dir_inode;
if (ctx->log_new_dentries) {
@@ -5422,7 +5422,7 @@
if (!ret && ctx && ctx->log_new_dentries)
ret = log_new_dir_dentries(trans, root,
dir_inode, ctx);
- iput(dir_inode);
+ btrfs_add_delayed_iput(dir_inode);
if (ret)
goto out;
}
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 4b350ac..d4f8bb3 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -2447,6 +2447,7 @@
cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
{
int rc = 0;
+ int is_domain = 0;
const char *delim, *payload;
char *desc;
ssize_t len;
@@ -2494,6 +2495,7 @@
rc = PTR_ERR(key);
goto out_err;
}
+ is_domain = 1;
}
down_read(&key->sem);
@@ -2551,6 +2553,26 @@
goto out_key_put;
}
+ /*
+ * If we have a domain key then we must set the domainName in the
+ * for the request.
+ */
+ if (is_domain && ses->domainName) {
+ vol->domainname = kstrndup(ses->domainName,
+ strlen(ses->domainName),
+ GFP_KERNEL);
+ if (!vol->domainname) {
+ cifs_dbg(FYI, "Unable to allocate %zd bytes for "
+ "domain\n", len);
+ rc = -ENOMEM;
+ kfree(vol->username);
+ vol->username = NULL;
+ kzfree(vol->password);
+ vol->password = NULL;
+ goto out_key_put;
+ }
+ }
+
out_key_put:
up_read(&key->sem);
key_put(key);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
old mode 100755
new mode 100644
index 1781989..a710c73
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -530,6 +530,10 @@
struct bio *bio = *fio->bio;
struct page *page = fio->encrypted_page ?
fio->encrypted_page : fio->page;
+ struct inode *inode;
+ bool bio_encrypted;
+ int bi_crypt_skip;
+ u64 dun;
if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
__is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
@@ -538,16 +542,29 @@
trace_f2fs_submit_page_bio(page, fio);
f2fs_trace_ios(fio, 0);
+ inode = fio->page->mapping->host;
+ dun = PG_DUN(inode, fio->page);
+ bi_crypt_skip = fio->encrypted_page ? 1 : 0;
+ bio_encrypted = f2fs_may_encrypt_bio(inode, fio);
+ fio->op_flags |= fio->encrypted_page ? REQ_NOENCRYPT : 0;
+
if (bio && (*fio->last_block + 1 != fio->new_blkaddr ||
!__same_bdev(fio->sbi, fio->new_blkaddr, bio))) {
__submit_bio(fio->sbi, bio, fio->type);
bio = NULL;
}
+ /* ICE support */
+ if (bio && !fscrypt_mergeable_bio(bio, dun,
+ bio_encrypted, bi_crypt_skip))
+ __submit_bio(fio->sbi, bio, fio->type);
alloc_new:
if (!bio) {
bio = __bio_alloc(fio->sbi, fio->new_blkaddr, fio->io_wbc,
BIO_MAX_PAGES, false, fio->type, fio->temp);
bio_set_op_attrs(bio, fio->op, fio->op_flags);
+ if (bio_encrypted)
+ fscrypt_set_ice_dun(inode, bio, dun);
+ fscrypt_set_ice_skip(bio, bi_crypt_skip);
}
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 8a0c301..7138383 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -73,13 +73,13 @@
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
switch (err) {
- case -EPERM:
- case -EACCES:
- case -EDQUOT:
- case -ENOSPC:
- case -EROFS:
- goto out_put_ctx;
default:
+ goto out_put_ctx;
+ case -ENOENT:
+ case -ESTALE:
+ case -EISDIR:
+ case -ENOTDIR:
+ case -ELOOP:
goto out_drop;
}
}
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index fad4d51..b6e2512 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -562,7 +562,7 @@
}
hdr->res.fattr = &hdr->fattr;
- hdr->res.count = count;
+ hdr->res.count = 0;
hdr->res.eof = 0;
hdr->res.verf = &hdr->verf;
nfs_fattr_init(&hdr->fattr);
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index b7bca83..06e7222 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -588,7 +588,8 @@
/* Emulate the eof flag, which isn't normally needed in NFSv2
* as it is guaranteed to always return the file attributes
*/
- if (hdr->args.offset + hdr->res.count >= hdr->res.fattr->size)
+ if ((hdr->res.count == 0 && hdr->args.count > 0) ||
+ hdr->args.offset + hdr->res.count >= hdr->res.fattr->size)
hdr->res.eof = 1;
}
return 0;
@@ -609,8 +610,10 @@
static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
{
- if (task->tk_status >= 0)
+ if (task->tk_status >= 0) {
+ hdr->res.count = hdr->args.count;
nfs_writeback_update_inode(hdr);
+ }
return 0;
}
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 57764da..7c52993 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -482,7 +482,7 @@
struct dentry *hardlink)
{
int err;
- const struct cred *old_cred;
+ const struct cred *old_cred, *hold_cred = NULL;
struct cred *override_cred;
err = ovl_copy_up(dentry->d_parent);
@@ -505,7 +505,7 @@
goto out_revert_creds;
}
}
- put_cred(override_creds(override_cred));
+ hold_cred = override_creds(override_cred);
put_cred(override_cred);
if (!ovl_dentry_is_opaque(dentry))
@@ -516,7 +516,9 @@
link, hardlink);
}
out_revert_creds:
- ovl_revert_creds(old_cred);
+ ovl_revert_creds(old_cred ?: hold_cred);
+ if (old_cred && hold_cred)
+ put_cred(hold_cred);
if (!err) {
struct inode *realinode = d_inode(ovl_dentry_upper(dentry));
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 1daeacb..0c51f91 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -794,6 +794,25 @@
size_t size;
};
+struct cec_adapter;
+struct edid;
+
+/**
+ * struct drm_dp_aux_cec - DisplayPort CEC-Tunneling-over-AUX
+ * @lock: mutex protecting this struct
+ * @adap: the CEC adapter for CEC-Tunneling-over-AUX support.
+ * @name: name of the CEC adapter
+ * @parent: parent device of the CEC adapter
+ * @unregister_work: unregister the CEC adapter
+ */
+struct drm_dp_aux_cec {
+ struct mutex lock;
+ struct cec_adapter *adap;
+ const char *name;
+ struct device *parent;
+ struct delayed_work unregister_work;
+};
+
/**
* struct drm_dp_aux - DisplayPort AUX channel
* @name: user-visible name of this AUX channel and the I2C-over-AUX adapter
@@ -846,6 +865,10 @@
* @i2c_defer_count: Counts I2C DEFERs, used for DP validation.
*/
unsigned i2c_defer_count;
+ /**
+ * @cec: struct containing fields used for CEC-Tunneling-over-AUX.
+ */
+ struct drm_dp_aux_cec cec;
};
ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
@@ -914,4 +937,37 @@
int drm_dp_aux_register(struct drm_dp_aux *aux);
void drm_dp_aux_unregister(struct drm_dp_aux *aux);
+#ifdef CONFIG_DRM_DP_CEC
+void drm_dp_cec_irq(struct drm_dp_aux *aux);
+void drm_dp_cec_register_connector(struct drm_dp_aux *aux, const char *name,
+ struct device *parent);
+void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux);
+void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid);
+void drm_dp_cec_unset_edid(struct drm_dp_aux *aux);
+#else
+static inline void drm_dp_cec_irq(struct drm_dp_aux *aux)
+{
+}
+
+static inline void drm_dp_cec_register_connector(struct drm_dp_aux *aux,
+ const char *name,
+ struct device *parent)
+{
+}
+
+static inline void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux)
+{
+}
+
+static inline void drm_dp_cec_set_edid(struct drm_dp_aux *aux,
+ const struct edid *edid)
+{
+}
+
+static inline void drm_dp_cec_unset_edid(struct drm_dp_aux *aux)
+{
+}
+
+#endif
+
#endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index f0641d3..9a6402a 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -109,14 +109,8 @@
#define bio_op(bio) ((bio)->bi_opf >> BIO_OP_SHIFT)
#define bio_set_op_attrs(bio, op, op_flags) do { \
- if (__builtin_constant_p(op)) \
- BUILD_BUG_ON((op) + 0U >= (1U << REQ_OP_BITS)); \
- else \
- WARN_ON_ONCE((op) + 0U >= (1U << REQ_OP_BITS)); \
- if (__builtin_constant_p(op_flags)) \
- BUILD_BUG_ON((op_flags) + 0U >= (1U << BIO_OP_SHIFT)); \
- else \
- WARN_ON_ONCE((op_flags) + 0U >= (1U << BIO_OP_SHIFT)); \
+ WARN_ON_ONCE((op) + 0U >= (1U << REQ_OP_BITS)); \
+ WARN_ON_ONCE((op_flags) + 0U >= (1U << BIO_OP_SHIFT)); \
(bio)->bi_opf = bio_flags(bio); \
(bio)->bi_opf |= (((op) + 0U) << BIO_OP_SHIFT); \
(bio)->bi_opf |= (op_flags); \
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index e04e291..90e8394 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -148,10 +148,10 @@
* a new RANGE of SSIDs to the msg_mask_tbl.
*/
#define MSG_MASK_TBL_CNT 26
-#define APPS_EVENT_LAST_ID 0xCB4
+#define APPS_EVENT_LAST_ID 0xCB7
#define MSG_SSID_0 0
-#define MSG_SSID_0_LAST 130
+#define MSG_SSID_0_LAST 131
#define MSG_SSID_1 500
#define MSG_SSID_1_LAST 506
#define MSG_SSID_2 1000
@@ -354,13 +354,15 @@
MSG_LVL_MED,
MSG_LVL_HIGH,
MSG_LVL_LOW,
- MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+ MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+ MSG_LVL_FATAL,
MSG_LVL_HIGH,
MSG_LVL_LOW,
MSG_LVL_MED,
MSG_LVL_MED,
MSG_LVL_HIGH,
- MSG_LVL_HIGH
+ MSG_LVL_HIGH,
+ MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR
};
static const uint32_t msg_bld_masks_1[] = {
@@ -922,7 +924,7 @@
/* LOG CODES */
static const uint32_t log_code_last_tbl[] = {
0x0, /* EQUIP ID 0 */
- 0x1CB2, /* EQUIP ID 1 */
+ 0x1CC0, /* EQUIP ID 1 */
0x0, /* EQUIP ID 2 */
0x0, /* EQUIP ID 3 */
0x4910, /* EQUIP ID 4 */
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
new file mode 100644
index 0000000..e083d57
--- /dev/null
+++ b/include/linux/mhi.h
@@ -0,0 +1,812 @@
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MHI_H_
+#define _MHI_H_
+
+struct mhi_chan;
+struct mhi_event;
+struct mhi_ctxt;
+struct mhi_cmd;
+struct image_info;
+struct bhi_vec_entry;
+struct mhi_timesync;
+struct mhi_buf_info;
+
+/**
+ * enum MHI_CB - MHI callback
+ * @MHI_CB_IDLE: MHI entered idle state
+ * @MHI_CB_PENDING_DATA: New data available for client to process
+ * @MHI_CB_DTR_SIGNAL: DTR signaling update
+ * @MHI_CB_LPM_ENTER: MHI host entered low power mode
+ * @MHI_CB_LPM_EXIT: MHI host about to exit low power mode
+ * @MHI_CB_EE_RDDM: MHI device entered RDDM execution enviornment
+ * @MHI_CB_EE_MISSION_MODE: MHI device entered Mission Mode ee
+ * @MHI_CB_SYS_ERROR: MHI device enter error state (may recover)
+ * @MHI_CB_FATAL_ERROR: MHI device entered fatal error
+ */
+enum MHI_CB {
+ MHI_CB_IDLE,
+ MHI_CB_PENDING_DATA,
+ MHI_CB_DTR_SIGNAL,
+ MHI_CB_LPM_ENTER,
+ MHI_CB_LPM_EXIT,
+ MHI_CB_EE_RDDM,
+ MHI_CB_EE_MISSION_MODE,
+ MHI_CB_SYS_ERROR,
+ MHI_CB_FATAL_ERROR,
+};
+
+/**
+ * enum MHI_DEBUG_LEVL - various debugging level
+ */
+enum MHI_DEBUG_LEVEL {
+ MHI_MSG_LVL_VERBOSE,
+ MHI_MSG_LVL_INFO,
+ MHI_MSG_LVL_ERROR,
+ MHI_MSG_LVL_CRITICAL,
+ MHI_MSG_LVL_MASK_ALL,
+};
+
+/**
+ * enum MHI_FLAGS - Transfer flags
+ * @MHI_EOB: End of buffer for bulk transfer
+ * @MHI_EOT: End of transfer
+ * @MHI_CHAIN: Linked transfer
+ */
+enum MHI_FLAGS {
+ MHI_EOB,
+ MHI_EOT,
+ MHI_CHAIN,
+};
+
+/**
+ * enum mhi_device_type - Device types
+ * @MHI_XFER_TYPE: Handles data transfer
+ * @MHI_TIMESYNC_TYPE: Use for timesync feature
+ * @MHI_CONTROLLER_TYPE: Control device
+ */
+enum mhi_device_type {
+ MHI_XFER_TYPE,
+ MHI_TIMESYNC_TYPE,
+ MHI_CONTROLLER_TYPE,
+};
+
+/**
+ * enum mhi_ee - device current execution enviornment
+ * @MHI_EE_PBL - device in PBL
+ * @MHI_EE_SBL - device in SBL
+ * @MHI_EE_AMSS - device in mission mode (firmware fully loaded)
+ * @MHI_EE_RDDM - device in ram dump collection mode
+ * @MHI_EE_WFW - device in WLAN firmware mode
+ * @MHI_EE_PTHRU - device in PBL but configured in pass thru mode
+ * @MHI_EE_EDL - device in emergency download mode
+ */
+enum mhi_ee {
+ MHI_EE_PBL,
+ MHI_EE_SBL,
+ MHI_EE_AMSS,
+ MHI_EE_RDDM,
+ MHI_EE_WFW,
+ MHI_EE_PTHRU,
+ MHI_EE_EDL,
+ MHI_EE_MAX_SUPPORTED = MHI_EE_EDL,
+ MHI_EE_DISABLE_TRANSITION, /* local EE, not related to mhi spec */
+ MHI_EE_NOT_SUPPORTED,
+ MHI_EE_MAX,
+};
+
+/**
+ * enum mhi_dev_state - device current MHI state
+ */
+enum mhi_dev_state {
+ MHI_STATE_RESET = 0x0,
+ MHI_STATE_READY = 0x1,
+ MHI_STATE_M0 = 0x2,
+ MHI_STATE_M1 = 0x3,
+ MHI_STATE_M2 = 0x4,
+ MHI_STATE_M3 = 0x5,
+ MHI_STATE_M3_FAST = 0x6,
+ MHI_STATE_BHI = 0x7,
+ MHI_STATE_SYS_ERR = 0xFF,
+ MHI_STATE_MAX,
+};
+
+/**
+ * struct mhi_link_info - bw requirement
+ * target_link_speed - as defined by TLS bits in LinkControl reg
+ * target_link_width - as defined by NLW bits in LinkStatus reg
+ * sequence_num - used by device to track bw requests sent to host
+ */
+struct mhi_link_info {
+ unsigned int target_link_speed;
+ unsigned int target_link_width;
+ int sequence_num;
+};
+
+#define MHI_VOTE_BUS BIT(0) /* do not disable the bus */
+#define MHI_VOTE_DEVICE BIT(1) /* prevent mhi device from entering lpm */
+
+/**
+ * struct image_info - firmware and rddm table table
+ * @mhi_buf - Contain device firmware and rddm table
+ * @entries - # of entries in table
+ */
+struct image_info {
+ struct mhi_buf *mhi_buf;
+ struct bhi_vec_entry *bhi_vec;
+ u32 entries;
+};
+
+/**
+ * struct mhi_controller - Master controller structure for external modem
+ * @dev: Device associated with this controller
+ * @of_node: DT that has MHI configuration information
+ * @regs: Points to base of MHI MMIO register space
+ * @bhi: Points to base of MHI BHI register space
+ * @bhie: Points to base of MHI BHIe register space
+ * @wake_db: MHI WAKE doorbell register address
+ * @dev_id: PCIe device id of the external device
+ * @domain: PCIe domain the device connected to
+ * @bus: PCIe bus the device assigned to
+ * @slot: PCIe slot for the modem
+ * @iova_start: IOMMU starting address for data
+ * @iova_stop: IOMMU stop address for data
+ * @fw_image: Firmware image name for normal booting
+ * @edl_image: Firmware image name for emergency download mode
+ * @fbc_download: MHI host needs to do complete image transfer
+ * @rddm_size: RAM dump size that host should allocate for debugging purpose
+ * @sbl_size: SBL image size
+ * @seg_len: BHIe vector size
+ * @fbc_image: Points to firmware image buffer
+ * @rddm_image: Points to RAM dump buffer
+ * @max_chan: Maximum number of channels controller support
+ * @mhi_chan: Points to channel configuration table
+ * @lpm_chans: List of channels that require LPM notifications
+ * @total_ev_rings: Total # of event rings allocated
+ * @hw_ev_rings: Number of hardware event rings
+ * @sw_ev_rings: Number of software event rings
+ * @msi_required: Number of msi required to operate
+ * @msi_allocated: Number of msi allocated by bus master
+ * @irq: base irq # to request
+ * @mhi_event: MHI event ring configurations table
+ * @mhi_cmd: MHI command ring configurations table
+ * @mhi_ctxt: MHI device context, shared memory between host and device
+ * @timeout_ms: Timeout in ms for state transitions
+ * @pm_state: Power management state
+ * @ee: MHI device execution environment
+ * @dev_state: MHI STATE
+ * @mhi_link_info: requested link bandwidth by device
+ * @status_cb: CB function to notify various power states to but master
+ * @link_status: Query link status in case of abnormal value read from device
+ * @runtime_get: Async runtime resume function
+ * @runtimet_put: Release votes
+ * @time_get: Return host time in us
+ * @lpm_disable: Request controller to disable link level low power modes
+ * @lpm_enable: Controller may enable link level low power modes again
+ * @priv_data: Points to bus master's private data
+ */
+struct mhi_controller {
+ struct list_head node;
+ struct mhi_device *mhi_dev;
+
+ /* device node for iommu ops */
+ struct device *dev;
+ struct device_node *of_node;
+
+ /* mmio base */
+ phys_addr_t base_addr;
+ void __iomem *regs;
+ void __iomem *bhi;
+ void __iomem *bhie;
+ void __iomem *wake_db;
+ void __iomem *bw_scale_db;
+
+ /* device topology */
+ u32 dev_id;
+ u32 domain;
+ u32 bus;
+ u32 slot;
+ u32 family_number;
+ u32 device_number;
+ u32 major_version;
+ u32 minor_version;
+
+ /* addressing window */
+ dma_addr_t iova_start;
+ dma_addr_t iova_stop;
+
+ /* fw images */
+ const char *fw_image;
+ const char *edl_image;
+
+ /* mhi host manages downloading entire fbc images */
+ bool fbc_download;
+ size_t rddm_size;
+ size_t sbl_size;
+ size_t seg_len;
+ u32 session_id;
+ u32 sequence_id;
+ struct image_info *fbc_image;
+ struct image_info *rddm_image;
+
+ /* physical channel config data */
+ u32 max_chan;
+ struct mhi_chan *mhi_chan;
+ struct list_head lpm_chans; /* these chan require lpm notification */
+
+ /* physical event config data */
+ u32 total_ev_rings;
+ u32 hw_ev_rings;
+ u32 sw_ev_rings;
+ u32 msi_required;
+ u32 msi_allocated;
+ int *irq; /* interrupt table */
+ struct mhi_event *mhi_event;
+ struct list_head lp_ev_rings; /* low priority event rings */
+
+ /* cmd rings */
+ struct mhi_cmd *mhi_cmd;
+
+ /* mhi context (shared with device) */
+ struct mhi_ctxt *mhi_ctxt;
+
+ u32 timeout_ms;
+
+ /* caller should grab pm_mutex for suspend/resume operations */
+ struct mutex pm_mutex;
+ bool pre_init;
+ rwlock_t pm_lock;
+ u32 pm_state;
+ u32 saved_pm_state; /* saved state during fast suspend */
+ u32 db_access; /* db access only on these states */
+ enum mhi_ee ee;
+ u32 ee_table[MHI_EE_MAX]; /* ee conversion from dev to host */
+ enum mhi_dev_state dev_state;
+ enum mhi_dev_state saved_dev_state;
+ bool wake_set;
+ atomic_t dev_wake;
+ atomic_t alloc_size;
+ atomic_t pending_pkts;
+ struct list_head transition_list;
+ spinlock_t transition_lock;
+ spinlock_t wlock;
+
+ /* target bandwidth info */
+ struct mhi_link_info mhi_link_info;
+
+ /* debug counters */
+ u32 M0, M2, M3, M3_FAST;
+
+ /* worker for different state transitions */
+ struct work_struct st_worker;
+ struct work_struct fw_worker;
+ struct work_struct syserr_worker;
+ struct work_struct low_priority_worker;
+ wait_queue_head_t state_event;
+
+ /* shadow functions */
+ void (*status_cb)(struct mhi_controller *, void *, enum MHI_CB);
+ int (*link_status)(struct mhi_controller *, void *);
+ void (*wake_get)(struct mhi_controller *, bool);
+ void (*wake_put)(struct mhi_controller *, bool);
+ void (*wake_toggle)(struct mhi_controller *mhi_cntrl);
+ int (*runtime_get)(struct mhi_controller *, void *);
+ void (*runtime_put)(struct mhi_controller *, void *);
+ u64 (*time_get)(struct mhi_controller *mhi_cntrl, void *priv);
+ int (*lpm_disable)(struct mhi_controller *mhi_cntrl, void *priv);
+ int (*lpm_enable)(struct mhi_controller *mhi_cntrl, void *priv);
+ int (*map_single)(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf);
+ void (*unmap_single)(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf);
+ void (*tsync_log)(struct mhi_controller *mhi_cntrl, u64 remote_time);
+ int (*bw_scale)(struct mhi_controller *mhi_cntrl,
+ struct mhi_link_info *link_info);
+
+ /* channel to control DTR messaging */
+ struct mhi_device *dtr_dev;
+
+ /* bounce buffer settings */
+ bool bounce_buf;
+ size_t buffer_len;
+
+ /* supports time sync feature */
+ struct mhi_timesync *mhi_tsync;
+ struct mhi_device *tsync_dev;
+ u64 local_timer_freq;
+ u64 remote_timer_freq;
+
+ /* kernel log level */
+ enum MHI_DEBUG_LEVEL klog_lvl;
+
+ /* private log level controller driver to set */
+ enum MHI_DEBUG_LEVEL log_lvl;
+
+ /* controller specific data */
+ void *priv_data;
+ void *log_buf;
+ struct dentry *dentry;
+ struct dentry *parent;
+};
+
+/**
+ * struct mhi_device - mhi device structure associated bind to channel
+ * @dev: Device associated with the channels
+ * @mtu: Maximum # of bytes controller support
+ * @ul_chan_id: MHI channel id for UL transfer
+ * @dl_chan_id: MHI channel id for DL transfer
+ * @tiocm: Device current terminal settings
+ * @early_notif: This device needs an early notification in case of error
+ * with external modem.
+ * @dev_vote: Keep external device in active state
+ * @bus_vote: Keep physical bus (pci, spi) in active state
+ * @priv: Driver private data
+ */
+struct mhi_device {
+ struct device dev;
+ u32 dev_id;
+ u32 domain;
+ u32 bus;
+ u32 slot;
+ size_t mtu;
+ int ul_chan_id;
+ int dl_chan_id;
+ int ul_event_id;
+ int dl_event_id;
+ u32 tiocm;
+ bool early_notif;
+ const struct mhi_device_id *id;
+ const char *chan_name;
+ struct mhi_controller *mhi_cntrl;
+ struct mhi_chan *ul_chan;
+ struct mhi_chan *dl_chan;
+ atomic_t dev_vote;
+ atomic_t bus_vote;
+ enum mhi_device_type dev_type;
+ void *priv_data;
+ int (*ul_xfer)(struct mhi_device *, struct mhi_chan *, void *,
+ size_t, enum MHI_FLAGS);
+ int (*dl_xfer)(struct mhi_device *, struct mhi_chan *, void *,
+ size_t, enum MHI_FLAGS);
+ void (*status_cb)(struct mhi_device *, enum MHI_CB);
+};
+
+/**
+ * struct mhi_result - Completed buffer information
+ * @buf_addr: Address of data buffer
+ * @dir: Channel direction
+ * @bytes_xfer: # of bytes transferred
+ * @transaction_status: Status of last trasnferred
+ */
+struct mhi_result {
+ void *buf_addr;
+ enum dma_data_direction dir;
+ size_t bytes_xferd;
+ int transaction_status;
+};
+
+/**
+ * struct mhi_buf - Describes the buffer
+ * @page: buffer as a page
+ * @buf: cpu address for the buffer
+ * @phys_addr: physical address of the buffer
+ * @dma_addr: iommu address for the buffer
+ * @skb: skb of ip packet
+ * @len: # of bytes
+ * @name: Buffer label, for offload channel configurations name must be:
+ * ECA - Event context array data
+ * CCA - Channel context array data
+ */
+struct mhi_buf {
+ struct list_head node;
+ struct page *page;
+ void *buf;
+ phys_addr_t phys_addr;
+ dma_addr_t dma_addr;
+ struct sk_buff *skb;
+ size_t len;
+ const char *name; /* ECA, CCA */
+};
+
+/**
+ * struct mhi_driver - mhi driver information
+ * @id_table: NULL terminated channel ID names
+ * @ul_xfer_cb: UL data transfer callback
+ * @dl_xfer_cb: DL data transfer callback
+ * @status_cb: Asynchronous status callback
+ */
+struct mhi_driver {
+ const struct mhi_device_id *id_table;
+ int (*probe)(struct mhi_device *, const struct mhi_device_id *id);
+ void (*remove)(struct mhi_device *);
+ void (*ul_xfer_cb)(struct mhi_device *, struct mhi_result *);
+ void (*dl_xfer_cb)(struct mhi_device *, struct mhi_result *);
+ void (*status_cb)(struct mhi_device *, enum MHI_CB mhi_cb);
+ struct device_driver driver;
+};
+
+#define to_mhi_driver(drv) container_of(drv, struct mhi_driver, driver)
+#define to_mhi_device(dev) container_of(dev, struct mhi_device, dev)
+
+static inline void mhi_device_set_devdata(struct mhi_device *mhi_dev,
+ void *priv)
+{
+ mhi_dev->priv_data = priv;
+}
+
+static inline void *mhi_device_get_devdata(struct mhi_device *mhi_dev)
+{
+ return mhi_dev->priv_data;
+}
+
+/**
+ * mhi_queue_transfer - Queue a buffer to hardware
+ * All transfers are asyncronous transfers
+ * @mhi_dev: Device associated with the channels
+ * @dir: Data direction
+ * @buf: Data buffer (skb for hardware channels)
+ * @len: Size in bytes
+ * @mflags: Interrupt flags for the device
+ */
+static inline int mhi_queue_transfer(struct mhi_device *mhi_dev,
+ enum dma_data_direction dir,
+ void *buf,
+ size_t len,
+ enum MHI_FLAGS mflags)
+{
+ if (dir == DMA_TO_DEVICE)
+ return mhi_dev->ul_xfer(mhi_dev, mhi_dev->ul_chan, buf, len,
+ mflags);
+ else
+ return mhi_dev->dl_xfer(mhi_dev, mhi_dev->dl_chan, buf, len,
+ mflags);
+}
+
+static inline void *mhi_controller_get_devdata(struct mhi_controller *mhi_cntrl)
+{
+ return mhi_cntrl->priv_data;
+}
+
+static inline void mhi_free_controller(struct mhi_controller *mhi_cntrl)
+{
+ kfree(mhi_cntrl);
+}
+
+/**
+ * mhi_driver_register - Register driver with MHI framework
+ * @mhi_drv: mhi_driver structure
+ */
+int mhi_driver_register(struct mhi_driver *mhi_drv);
+
+/**
+ * mhi_driver_unregister - Unregister a driver for mhi_devices
+ * @mhi_drv: mhi_driver structure
+ */
+void mhi_driver_unregister(struct mhi_driver *mhi_drv);
+
+/**
+ * mhi_device_configure - configure ECA or CCA context
+ * For offload channels that client manage, call this
+ * function to configure channel context or event context
+ * array associated with the channel
+ * @mhi_div: Device associated with the channels
+ * @dir: Direction of the channel
+ * @mhi_buf: Configuration data
+ * @elements: # of configuration elements
+ */
+int mhi_device_configure(struct mhi_device *mhi_div,
+ enum dma_data_direction dir,
+ struct mhi_buf *mhi_buf,
+ int elements);
+
+/**
+ * mhi_device_get - disable low power modes
+ * Only disables lpm, does not immediately exit low power mode
+ * if controller already in a low power mode
+ * @mhi_dev: Device associated with the channels
+ * @vote: requested vote (bus, device or both)
+ */
+void mhi_device_get(struct mhi_device *mhi_dev, int vote);
+
+/**
+ * mhi_device_get_sync - disable low power modes
+ * Synchronously disable device & or bus low power, exit low power mode if
+ * controller already in a low power state
+ * @mhi_dev: Device associated with the channels
+ * @vote: requested vote (bus, device or both)
+ */
+int mhi_device_get_sync(struct mhi_device *mhi_dev, int vote);
+
+/**
+ * mhi_device_put - re-enable low power modes
+ * @mhi_dev: Device associated with the channels
+ * @vote: vote to remove
+ */
+void mhi_device_put(struct mhi_device *mhi_dev, int vote);
+
+/**
+ * mhi_prepare_for_transfer - setup channel for data transfer
+ * Moves both UL and DL channel from RESET to START state
+ * @mhi_dev: Device associated with the channels
+ */
+int mhi_prepare_for_transfer(struct mhi_device *mhi_dev);
+
+/**
+ * mhi_unprepare_from_transfer -unprepare the channels
+ * Moves both UL and DL channels to RESET state
+ * @mhi_dev: Device associated with the channels
+ */
+void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev);
+
+/**
+ * mhi_get_no_free_descriptors - Get transfer ring length
+ * Get # of TD available to queue buffers
+ * @mhi_dev: Device associated with the channels
+ * @dir: Direction of the channel
+ */
+int mhi_get_no_free_descriptors(struct mhi_device *mhi_dev,
+ enum dma_data_direction dir);
+
+/**
+ * mhi_poll - poll for any available data to consume
+ * This is only applicable for DL direction
+ * @mhi_dev: Device associated with the channels
+ * @budget: In descriptors to service before returning
+ */
+int mhi_poll(struct mhi_device *mhi_dev, u32 budget);
+
+/**
+ * mhi_ioctl - user space IOCTL support for MHI channels
+ * Native support for setting TIOCM
+ * @mhi_dev: Device associated with the channels
+ * @cmd: IOCTL cmd
+ * @arg: Optional parameter, iotcl cmd specific
+ */
+long mhi_ioctl(struct mhi_device *mhi_dev, unsigned int cmd, unsigned long arg);
+
+/**
+ * mhi_alloc_controller - Allocate mhi_controller structure
+ * Allocate controller structure and additional data for controller
+ * private data. You may get the private data pointer by calling
+ * mhi_controller_get_devdata
+ * @size: # of additional bytes to allocate
+ */
+struct mhi_controller *mhi_alloc_controller(size_t size);
+
+/**
+ * of_register_mhi_controller - Register MHI controller
+ * Registers MHI controller with MHI bus framework. DT must be supported
+ * @mhi_cntrl: MHI controller to register
+ */
+int of_register_mhi_controller(struct mhi_controller *mhi_cntrl);
+
+void mhi_unregister_mhi_controller(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_bdf_to_controller - Look up a registered controller
+ * Search for controller based on device identification
+ * @domain: RC domain of the device
+ * @bus: Bus device connected to
+ * @slot: Slot device assigned to
+ * @dev_id: Device Identification
+ */
+struct mhi_controller *mhi_bdf_to_controller(u32 domain, u32 bus, u32 slot,
+ u32 dev_id);
+
+/**
+ * mhi_prepare_for_power_up - Do pre-initialization before power up
+ * This is optional, call this before power up if controller do not
+ * want bus framework to automatically free any allocated memory during shutdown
+ * process.
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_async_power_up - Starts MHI power up sequence
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_async_power_up(struct mhi_controller *mhi_cntrl);
+int mhi_sync_power_up(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_power_down - Start MHI power down sequence
+ * @mhi_cntrl: MHI controller
+ * @graceful: link is still accessible, do a graceful shutdown process otherwise
+ * we will shutdown host w/o putting device into RESET state
+ */
+void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful);
+
+/**
+ * mhi_unprepare_after_powre_down - free any allocated memory for power up
+ * @mhi_cntrl: MHI controller
+ */
+void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_pm_suspend - Move MHI into a suspended state
+ * Transition to MHI state M3 state from M0||M1||M2 state
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_pm_suspend(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_pm_fast_suspend - Move host into suspend state while keeping
+ * the device in active state.
+ * @mhi_cntrl: MHI controller
+ * @notify_client: if true, clients will get a notification about lpm transition
+ */
+int mhi_pm_fast_suspend(struct mhi_controller *mhi_cntrl, bool notify_client);
+
+/**
+ * mhi_pm_resume - Resume MHI from suspended state
+ * Transition to MHI state M0 state from M3 state
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_pm_resume(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_pm_fast_resume - Move host into resume state from fast suspend state
+ * @mhi_cntrl: MHI controller
+ * @notify_client: if true, clients will get a notification about lpm transition
+ */
+int mhi_pm_fast_resume(struct mhi_controller *mhi_cntrl, bool notify_client);
+
+/**
+ * mhi_download_rddm_img - Download ramdump image from device for
+ * debugging purpose.
+ * @mhi_cntrl: MHI controller
+ * @in_panic: If we trying to capture image while in kernel panic
+ */
+int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic);
+
+/**
+ * mhi_force_rddm_mode - Force external device into rddm mode
+ * to collect device ramdump. This is useful if host driver assert
+ * and we need to see device state as well.
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_get_remote_time_sync - Get external soc time relative to local soc time
+ * using MMIO method.
+ * @mhi_dev: Device associated with the channels
+ * @t_host: Pointer to output local soc time
+ * @t_dev: Pointer to output remote soc time
+ */
+int mhi_get_remote_time_sync(struct mhi_device *mhi_dev,
+ u64 *t_host,
+ u64 *t_dev);
+
+/**
+ * mhi_get_mhi_state - Return MHI state of device
+ * @mhi_cntrl: MHI controller
+ */
+enum mhi_dev_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_set_mhi_state - Set device state
+ * @mhi_cntrl: MHI controller
+ * @state: state to set
+ */
+void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl,
+ enum mhi_dev_state state);
+
+
+/**
+ * mhi_is_active - helper function to determine if MHI in active state
+ * @mhi_dev: client device
+ */
+static inline bool mhi_is_active(struct mhi_device *mhi_dev)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ return (mhi_cntrl->dev_state >= MHI_STATE_M0 &&
+ mhi_cntrl->dev_state <= MHI_STATE_M3_FAST);
+}
+
+/**
+ * mhi_control_error - MHI controller went into unrecoverable error state.
+ * Will transition MHI into Linkdown state. Do not call from atomic
+ * context.
+ * @mhi_cntrl: MHI controller
+ */
+void mhi_control_error(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_debug_reg_dump - dump MHI registers for debug purpose
+ * @mhi_cntrl: MHI controller
+ */
+void mhi_debug_reg_dump(struct mhi_controller *mhi_cntrl);
+
+#ifndef CONFIG_ARCH_QCOM
+
+#ifdef CONFIG_MHI_DEBUG
+
+#define MHI_VERB(fmt, ...) do { \
+ if (mhi_cntrl->klog_lvl <= MHI_MSG_VERBOSE) \
+ pr_dbg("[D][%s] " fmt, __func__, ##__VA_ARGS__);\
+} while (0)
+
+#else
+
+#define MHI_VERB(fmt, ...)
+
+#endif
+
+#define MHI_LOG(fmt, ...) do { \
+ if (mhi_cntrl->klog_lvl <= MHI_MSG_INFO) \
+ pr_info("[I][%s] " fmt, __func__, ##__VA_ARGS__);\
+} while (0)
+
+#define MHI_ERR(fmt, ...) do { \
+ if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_ERROR) \
+ pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \
+} while (0)
+
+#define MHI_CRITICAL(fmt, ...) do { \
+ if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_CRITICAL) \
+ pr_alert("[C][%s] " fmt, __func__, ##__VA_ARGS__); \
+} while (0)
+
+#else /* ARCH QCOM */
+
+#include <linux/ipc_logging.h>
+
+#ifdef CONFIG_MHI_DEBUG
+
+#define MHI_VERB(fmt, ...) do { \
+ if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_VERBOSE) \
+ pr_err("[D][%s] " fmt, __func__, ##__VA_ARGS__);\
+ if (mhi_cntrl->log_buf && \
+ (mhi_cntrl->log_lvl <= MHI_MSG_LVL_VERBOSE)) \
+ ipc_log_string(mhi_cntrl->log_buf, "[D][%s] " fmt, \
+ __func__, ##__VA_ARGS__); \
+} while (0)
+
+#else
+
+#define MHI_VERB(fmt, ...)
+
+#endif
+
+#define MHI_LOG(fmt, ...) do { \
+ if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_INFO) \
+ pr_err("[I][%s] " fmt, __func__, ##__VA_ARGS__);\
+ if (mhi_cntrl->log_buf && \
+ (mhi_cntrl->log_lvl <= MHI_MSG_LVL_INFO)) \
+ ipc_log_string(mhi_cntrl->log_buf, "[I][%s] " fmt, \
+ __func__, ##__VA_ARGS__); \
+} while (0)
+
+#define MHI_ERR(fmt, ...) do { \
+ if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_ERROR) \
+ pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \
+ if (mhi_cntrl->log_buf && \
+ (mhi_cntrl->log_lvl <= MHI_MSG_LVL_ERROR)) \
+ ipc_log_string(mhi_cntrl->log_buf, "[E][%s] " fmt, \
+ __func__, ##__VA_ARGS__); \
+} while (0)
+
+#define MHI_CRITICAL(fmt, ...) do { \
+ if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_CRITICAL) \
+ pr_err("[C][%s] " fmt, __func__, ##__VA_ARGS__); \
+ if (mhi_cntrl->log_buf && \
+ (mhi_cntrl->log_lvl <= MHI_MSG_LVL_CRITICAL)) \
+ ipc_log_string(mhi_cntrl->log_buf, "[C][%s] " fmt, \
+ __func__, ##__VA_ARGS__); \
+} while (0)
+
+#endif
+
+#endif /* _MHI_H_ */
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 579fd0a..85a43db 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -690,5 +690,17 @@
const char obj_type[16];
};
+#define MHI_NAME_SIZE 32
+
+/**
+ * struct mhi_device_id - MHI device identification
+ * @chan: MHI channel name
+ * @driver_data: driver data;
+ */
+
+struct mhi_device_id {
+ const char chan[MHI_NAME_SIZE];
+ kernel_ulong_t driver_data;
+};
#endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
index 0e7f7c5..f010249 100644
--- a/include/linux/usb/msm_hsusb.h
+++ b/include/linux/usb/msm_hsusb.h
@@ -2,7 +2,7 @@
*
* Copyright (C) 2008 Google, Inc.
* Author: Brian Swetland <swetland@google.com>
- * Copyright (c) 2009-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2009-2019, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -319,6 +319,7 @@
struct work_struct notify_charger_work;
struct work_struct extcon_register_work;
struct notifier_block psy_nb;
+ bool enable_sdp_check_timer;
};
struct ci13xxx_platform_data {
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
index 4caac13..c724747 100644
--- a/include/linux/usb/phy.h
+++ b/include/linux/usb/phy.h
@@ -23,6 +23,7 @@
#define PHY_HSFS_MODE BIT(8)
#define PHY_LS_MODE BIT(9)
#define PHY_USB_DP_CONCURRENT_MODE BIT(10)
+#define PHY_SOFT_CONNECT BIT(11)
enum usb_phy_interface {
USBPHY_INTERFACE_MODE_UNKNOWN,
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index f656728..dc9c526 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -67,6 +67,12 @@
/* Indicate backport support for processing user cell base hint */
#define CFG80211_USER_HINT_CELL_BASE_SELF_MANAGED 1
+/* Indicate backport support for external authentication in AP mode */
+#define CFG80211_EXTERNAL_AUTH_AP_SUPPORT 1
+
+/* Indicate backport support for DH IE creation/update*/
+#define CFG80211_EXTERNAL_DH_UPDATE_SUPPORT 1
+
/**
* DOC: Introduction
*
@@ -739,6 +745,17 @@
};
/**
+ * enum cfg80211_ap_settings_flags - AP settings flags
+ *
+ * Used by cfg80211_ap_settings
+ *
+ * @AP_SETTINGS_EXTERNAL_AUTH_SUPPORT: AP supports external authentication
+ */
+enum cfg80211_ap_settings_flags {
+ AP_SETTINGS_EXTERNAL_AUTH_SUPPORT = BIT(0),
+};
+
+/**
* struct cfg80211_ap_settings - AP configuration
*
* Used to configure an AP interface.
@@ -763,6 +780,7 @@
* @pbss: If set, start as a PCP instead of AP. Relevant for DMG
* networks.
* @beacon_rate: bitrate to be used for beacons
+ * @flags: flags, as defined in enum cfg80211_ap_settings_flags
*/
struct cfg80211_ap_settings {
struct cfg80211_chan_def chandef;
@@ -783,6 +801,7 @@
const struct cfg80211_acl_data *acl;
bool pbss;
struct cfg80211_bitrate_mask beacon_rate;
+ u32 flags;
};
/**
@@ -2582,6 +2601,7 @@
* use %WLAN_STATUS_UNSPECIFIED_FAILURE if user space cannot give you
* the real status code for failures. Used only for the authentication
* response command interface (user space to driver).
+ * @pmkid: The identifier to refer a PMKSA.
*/
struct cfg80211_external_auth_params {
enum nl80211_external_auth_action action;
@@ -2589,6 +2609,33 @@
struct cfg80211_ssid ssid;
unsigned int key_mgmt_suite;
u16 status;
+ const u8 *pmkid;
+};
+
+/**
+ * struct cfg80211_update_owe_info - OWE Information
+ *
+ * This structure provides information needed for the drivers to offload OWE
+ * (Opportunistic Wireless Encryption) processing to the user space.
+ *
+ * Commonly used across update_owe_info request and event interfaces.
+ *
+ * @peer: MAC address of the peer device for which the OWE processing
+ * has to be done.
+ * @status: status code, %WLAN_STATUS_SUCCESS for successful OWE info
+ * processing, use %WLAN_STATUS_UNSPECIFIED_FAILURE if user space
+ * cannot give you the real status code for failures. Used only for
+ * OWE update request command interface (user space to driver).
+ * @ie: IEs obtained from the peer or constructed by the user space. These are
+ * the IEs of the remote peer in the event from the host driver and
+ * the constructed IEs by the user space in the request interface.
+ * @ie_len: Length of IEs in octets.
+ */
+struct cfg80211_update_owe_info {
+ u8 peer[ETH_ALEN] __aligned(2);
+ u16 status;
+ const u8 *ie;
+ size_t ie_len;
};
/**
@@ -2903,6 +2950,10 @@
*
* @external_auth: indicates result of offloaded authentication processing from
* user space
+ *
+ * @update_owe_info: Provide updated OWE info to driver. Driver implementing SME
+ * but offloading OWE processing to the user space will get the updated
+ * DH IE through this interface.
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -3189,6 +3240,8 @@
const bool enabled);
int (*external_auth)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_external_auth_params *params);
+ int (*update_owe_info)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_update_owe_info *owe_info);
};
/*
@@ -6185,4 +6238,14 @@
#define wiphy_WARN(wiphy, format, args...) \
WARN(1, "wiphy: %s\n" format, wiphy_name(wiphy), ##args);
+/**
+ * cfg80211_update_owe_info_event - Notify the peer's OWE info to user space
+ * @netdev: network device
+ * @owe_info: peer's owe info
+ * @gfp: allocation flags
+ */
+void cfg80211_update_owe_info_event(struct net_device *netdev,
+ struct cfg80211_update_owe_info *owe_info,
+ gfp_t gfp);
+
#endif /* __NET_CFG80211_H */
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 71fd438..d225789 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -494,9 +494,7 @@
int snd_soc_platform_write(struct snd_soc_platform *platform,
unsigned int reg, unsigned int val);
int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num);
-#ifdef CONFIG_SND_SOC_COMPRESS
int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num);
-#endif
struct snd_pcm_substream *snd_soc_get_dai_substream(struct snd_soc_card *card,
const char *dai_link, int stream);
diff --git a/include/uapi/linux/isdn/capicmd.h b/include/uapi/linux/isdn/capicmd.h
index b58635f..ae1e1fb 100644
--- a/include/uapi/linux/isdn/capicmd.h
+++ b/include/uapi/linux/isdn/capicmd.h
@@ -15,6 +15,7 @@
#define CAPI_MSG_BASELEN 8
#define CAPI_DATA_B3_REQ_LEN (CAPI_MSG_BASELEN+4+4+2+2+2)
#define CAPI_DATA_B3_RESP_LEN (CAPI_MSG_BASELEN+4+2)
+#define CAPI_DISCONNECT_B3_RESP_LEN (CAPI_MSG_BASELEN+4)
/*----- CAPI commands -----*/
#define CAPI_ALERT 0x01
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index 6d09d21..864943f 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -110,6 +110,7 @@
#define IPA_IOCTL_GSB_DISCONNECT 62
#define IPA_IOCTL_GET_PHERIPHERAL_EP_INFO 63
#define IPA_IOCTL_GET_NAT_IN_SRAM_INFO 64
+#define IPA_IOCTL_APP_CLOCK_VOTE 65
/**
* max size of the header to be inserted
@@ -189,6 +190,8 @@
#define IPA_FLT_L2TP_INNER_IPV4_DST_ADDR (1ul << 26)
#define IPA_FLT_IS_PURE_ACK (1ul << 27)
#define IPA_FLT_VLAN_ID (1ul << 28)
+#define IPA_FLT_MAC_SRC_ADDR_802_1Q (1ul << 29)
+#define IPA_FLT_MAC_DST_ADDR_802_1Q (1ul << 30)
/**
* maximal number of NAT PDNs in the PDN config table
@@ -608,6 +611,11 @@
#define IPA_GSB_EVENT_MAX IPA_GSB_EVENT_MAX
};
+enum ipa_peripheral_event {
+ IPA_PERIPHERAL_CONNECT = ECM_CONNECT,
+ IPA_PERIPHERAL_DISCONNECT = ECM_DISCONNECT
+};
+
#define WIGIG_CLIENT_CONNECT (IPA_GSB_EVENT_MAX)
#define WIGIG_FST_SWITCH (WIGIG_CLIENT_CONNECT + 1)
#define WIGIG_EVENT_MAX (WIGIG_FST_SWITCH + 1)
@@ -954,6 +962,8 @@
* IPA_HDR_PROC_ETHII_TO_802_3: Process Ethernet II to 802_3
* IPA_HDR_PROC_802_3_TO_ETHII: Process 802_3 to Ethernet II
* IPA_HDR_PROC_802_3_TO_802_3: Process 802_3 to 802_3
+ * IPA_HDR_PROC_ETHII_TO_ETHII_EX: Process Ethernet II to Ethernet II with
+ * generic lengths of src and dst headers
*/
enum ipa_hdr_proc_type {
IPA_HDR_PROC_NONE,
@@ -962,9 +972,10 @@
IPA_HDR_PROC_802_3_TO_ETHII,
IPA_HDR_PROC_802_3_TO_802_3,
IPA_HDR_PROC_L2TP_HEADER_ADD,
- IPA_HDR_PROC_L2TP_HEADER_REMOVE
+ IPA_HDR_PROC_L2TP_HEADER_REMOVE,
+ IPA_HDR_PROC_ETHII_TO_ETHII_EX
};
-#define IPA_HDR_PROC_MAX (IPA_HDR_PROC_L2TP_HEADER_REMOVE + 1)
+#define IPA_HDR_PROC_MAX (IPA_HDR_PROC_ETHII_TO_ETHII_EX + 1)
/**
* struct ipa_rt_rule - attributes of a routing rule
@@ -1081,6 +1092,20 @@
enum ipa_client_type dst_pipe;
};
+/**
+ * struct ipa_eth_II_to_eth_II_ex_procparams -
+ * @input_ethhdr_negative_offset: Specifies where the ethernet hdr offset is
+ * (in bytes) from the start of the input IP hdr
+ * @output_ethhdr_negative_offset: Specifies where the ethernet hdr offset is
+ * (in bytes) from the end of the template hdr
+ * @reserved: for future use
+ */
+struct ipa_eth_II_to_eth_II_ex_procparams {
+ uint32_t input_ethhdr_negative_offset : 8;
+ uint32_t output_ethhdr_negative_offset : 8;
+ uint32_t reserved : 16;
+};
+
#define L2TP_USER_SPACE_SPECIFY_DST_PIPE
/**
@@ -1089,6 +1114,7 @@
* @type: processing context type
* @hdr_hdl: in parameter, handle to header
* @l2tp_params: l2tp parameters
+ * @generic_params: generic proc_ctx params
* @proc_ctx_hdl: out parameter, handle to proc_ctx, valid when status is 0
* @status: out parameter, status of header add operation,
* 0 for success,
@@ -1100,6 +1126,7 @@
uint32_t proc_ctx_hdl;
int status;
struct ipa_l2tp_hdr_proc_ctx_params l2tp_params;
+ struct ipa_eth_II_to_eth_II_ex_procparams generic_params;
};
#define IPA_L2TP_HDR_PROC_SUPPORT
@@ -2283,6 +2310,10 @@
IPA_IOCTL_GET_NAT_IN_SRAM_INFO, \
struct ipa_nat_in_sram_info)
+#define IPA_IOC_APP_CLOCK_VOTE _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_APP_CLOCK_VOTE, \
+ uint32_t)
+
/*
* unique magic number of the Tethering bridge ioctls
*/
@@ -2388,6 +2419,18 @@
uint32_t best_nat_in_sram_size_rqst;
};
+/**
+ * enum ipa_app_clock_vote_type
+ *
+ * The types of votes that can be accepted by the
+ * IPA_IOC_APP_CLOCK_VOTE ioctl
+ */
+enum ipa_app_clock_vote_type {
+ IPA_APP_CLK_DEVOTE = 0,
+ IPA_APP_CLK_VOTE = 1,
+ IPA_APP_CLK_RESET_VOTE = 2,
+};
+
#define TETH_BRIDGE_IOC_SET_BRIDGE_MODE _IOW(TETH_BRIDGE_IOC_MAGIC, \
TETH_BRIDGE_IOCTL_SET_BRIDGE_MODE, \
struct teth_ioc_set_bridge_mode *)
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 8143af6..31817b8 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -981,6 +981,48 @@
* user space through the connect result as the user space would have
* initiated the connection through the connect request.
*
+ * @NL80211_CMD_STA_OPMODE_CHANGED: An event that notify station's
+ * ht opmode or vht opmode changes using any of %NL80211_ATTR_SMPS_MODE,
+ * %NL80211_ATTR_CHANNEL_WIDTH,%NL80211_ATTR_NSS attributes with its
+ * address(specified in %NL80211_ATTR_MAC).
+ *
+ * @NL80211_CMD_GET_FTM_RESPONDER_STATS: Retrieve FTM responder statistics, in
+ * the %NL80211_ATTR_FTM_RESPONDER_STATS attribute.
+ *
+ * @NL80211_CMD_PEER_MEASUREMENT_START: start a (set of) peer measurement(s)
+ * with the given parameters, which are encapsulated in the nested
+ * %NL80211_ATTR_PEER_MEASUREMENTS attribute. Optionally, MAC address
+ * randomization may be enabled and configured by specifying the
+ * %NL80211_ATTR_MAC and %NL80211_ATTR_MAC_MASK attributes.
+ * If a timeout is requested, use the %NL80211_ATTR_TIMEOUT attribute.
+ * A u64 cookie for further %NL80211_ATTR_COOKIE use is is returned in
+ * the netlink extended ack message.
+ *
+ * To cancel a measurement, close the socket that requested it.
+ *
+ * Measurement results are reported to the socket that requested the
+ * measurement using @NL80211_CMD_PEER_MEASUREMENT_RESULT when they
+ * become available, so applications must ensure a large enough socket
+ * buffer size.
+ *
+ * Depending on driver support it may or may not be possible to start
+ * multiple concurrent measurements.
+ * @NL80211_CMD_PEER_MEASUREMENT_RESULT: This command number is used for the
+ * result notification from the driver to the requesting socket.
+ * @NL80211_CMD_PEER_MEASUREMENT_COMPLETE: Notification only, indicating that
+ * the measurement completed, using the measurement cookie
+ * (%NL80211_ATTR_COOKIE).
+ *
+ * @NL80211_CMD_NOTIFY_RADAR: Notify the kernel that a radar signal was
+ * detected and reported by a neighboring device on the channel
+ * indicated by %NL80211_ATTR_WIPHY_FREQ and other attributes
+ * determining the width and type.
+ *
+ * @NL80211_CMD_UPDATE_OWE_INFO: This interface allows the host driver to
+ * offload OWE processing to user space. This intends to support
+ * OWE AKM by the host drivers that implement SME but rely
+ * on the user space for the cryptographic/DH IE processing in AP mode.
+ *
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@@ -1189,6 +1231,20 @@
NL80211_CMD_EXTERNAL_AUTH,
+ NL80211_CMD_STA_OPMODE_CHANGED,
+
+ NL80211_CMD_CONTROL_PORT_FRAME,
+
+ NL80211_CMD_GET_FTM_RESPONDER_STATS,
+
+ NL80211_CMD_PEER_MEASUREMENT_START,
+ NL80211_CMD_PEER_MEASUREMENT_RESULT,
+ NL80211_CMD_PEER_MEASUREMENT_COMPLETE,
+
+ NL80211_CMD_NOTIFY_RADAR,
+
+ NL80211_CMD_UPDATE_OWE_INFO,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -2130,10 +2186,10 @@
* &enum nl80211_external_auth_action value). This is used with the
* &NL80211_CMD_EXTERNAL_AUTH request event.
* @NL80211_ATTR_EXTERNAL_AUTH_SUPPORT: Flag attribute indicating that the user
- * space supports external authentication. This attribute shall be used
- * only with %NL80211_CMD_CONNECT request. The driver may offload
- * authentication processing to user space if this capability is indicated
- * in NL80211_CMD_CONNECT requests from the user space.
+ * space supports external authentication. This attribute shall be used
+ * with %NL80211_CMD_CONNECT and %NL80211_CMD_START_AP request. The driver
+ * may offload authentication processing to user space if this capability
+ * is indicated in the respective requests from the user space.
*
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
@@ -4923,6 +4979,38 @@
* @NL80211_EXT_FEATURE_LOW_SPAN_SCAN: Driver supports low span scan.
* @NL80211_EXT_FEATURE_LOW_POWER_SCAN: Driver supports low power scan.
* @NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN: Driver supports high accuracy scan.
+ * @NL80211_EXT_FEATURE_DFS_OFFLOAD: HW/driver will offload DFS actions.
+ * Device or driver will do all DFS-related actions by itself,
+ * informing user-space about CAC progress, radar detection event,
+ * channel change triggered by radar detection event.
+ * No need to start CAC from user-space, no need to react to
+ * "radar detected" event.
+ * @NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211: Driver supports sending and
+ * receiving control port frames over nl80211 instead of the netdevice.
+ * @NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT: This Driver support data ack
+ * rssi if firmware support, this flag is to intimate about ack rssi
+ * support to nl80211.
+ * @NL80211_EXT_FEATURE_TXQS: Driver supports FQ-CoDel-enabled intermediate
+ * TXQs.
+ * @NL80211_EXT_FEATURE_SCAN_RANDOM_SN: Driver/device supports randomizing the
+ * SN in probe request frames if requested by %NL80211_SCAN_FLAG_RANDOM_SN.
+ * @NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT: Driver/device can omit all data
+ * except for supported rates from the probe request content if requested
+ * by the %NL80211_SCAN_FLAG_MIN_PREQ_CONTENT flag.
+ * @NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER: Driver supports enabling fine
+ * timing measurement responder role.
+ *
+ * @NL80211_EXT_FEATURE_CAN_REPLACE_PTK0: Driver/device confirm that they are
+ * able to rekey an in-use key correctly. Userspace must not rekey PTK
+ * keys if this flag is not set. Ignoring this can leak clear text packets
+ * and/or freeze the connection.
+ *
+ * @NL80211_EXT_FEATURE_AIRTIME_FAIRNESS: Driver supports getting airtime
+ * fairness for transmitted packets and has enabled airtime fairness
+ * scheduling.
+ *
+ * @NL80211_EXT_FEATURE_AP_PMKSA_CACHING: Driver/device supports PMKSA caching
+ * (set/del PMKSA operations) in AP mode.
*
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
@@ -4953,6 +5041,16 @@
NL80211_EXT_FEATURE_LOW_SPAN_SCAN,
NL80211_EXT_FEATURE_LOW_POWER_SCAN,
NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN,
+ NL80211_EXT_FEATURE_DFS_OFFLOAD,
+ NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211,
+ NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT,
+ NL80211_EXT_FEATURE_TXQS,
+ NL80211_EXT_FEATURE_SCAN_RANDOM_SN,
+ NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT,
+ NL80211_EXT_FEATURE_CAN_REPLACE_PTK0,
+ NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER,
+ NL80211_EXT_FEATURE_AIRTIME_FAIRNESS,
+ NL80211_EXT_FEATURE_AP_PMKSA_CACHING,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
@@ -5173,9 +5271,14 @@
* Used by cfg80211_rx_mgmt()
*
* @NL80211_RXMGMT_FLAG_ANSWERED: frame was answered by device/driver.
+ * @NL80211_RXMGMT_FLAG_EXTERNAL_AUTH: Host driver intends to offload
+ * the authentication. Exclusively defined for host drivers that
+ * advertises the SME functionality but would like the userspace
+ * to handle certain authentication algorithms (e.g. SAE).
*/
enum nl80211_rxmgmt_flags {
NL80211_RXMGMT_FLAG_ANSWERED = 1 << 0,
+ NL80211_RXMGMT_FLAG_EXTERNAL_AUTH = 1 << 1,
};
/*
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 839cfde..824eee7 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -161,6 +161,9 @@
|| (type) == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT \
|| (type) == V4L2_BUF_TYPE_SDR_OUTPUT)
+#define V4L2_TYPE_IS_PRIVATE(type) \
+ ((type) == V4L2_BUF_TYPE_PRIVATE)
+
enum v4l2_tuner_type {
V4L2_TUNER_RADIO = 1,
V4L2_TUNER_ANALOG_TV = 2,
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
index b86886b..867fb0e 100644
--- a/kernel/irq/resend.c
+++ b/kernel/irq/resend.c
@@ -37,6 +37,8 @@
irq = find_first_bit(irqs_resend, nr_irqs);
clear_bit(irq, irqs_resend);
desc = irq_to_desc(irq);
+ if (!desc)
+ continue;
local_irq_disable();
desc->handle_irq(desc);
local_irq_enable();
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
old mode 100644
new mode 100755
index ba9dce4..26ea5c8
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1938,6 +1938,7 @@
.update_curr = update_curr_dl,
#ifdef CONFIG_SCHED_WALT
.fixup_walt_sched_stats = fixup_walt_sched_stats_common,
+ .fixup_cumulative_runnable_avg = walt_fixup_cumulative_runnable_avg,
#endif
};
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
old mode 100644
new mode 100755
index 3027a58..2889302
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4279,6 +4279,23 @@
return 0;
}
+#ifdef CONFIG_SCHED_WALT
+static inline void walt_propagate_cumulative_runnable_avg(u64 *accumulated,
+ u64 value, bool add)
+{
+ if (add)
+ *accumulated += value;
+ else
+ *accumulated -= value;
+}
+#else
+/*
+ * Provide a nop definition since cumulative_runnable_avg is not
+ * available in rq or cfs_rq when WALT is not enabled.
+ */
+#define walt_propagate_cumulative_runnable_avg(...)
+#endif
+
static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
{
struct rq *rq = rq_of(cfs_rq);
@@ -4305,6 +4322,9 @@
dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
qcfs_rq->h_nr_running -= task_delta;
walt_dec_throttled_cfs_rq_stats(&qcfs_rq->walt_stats, cfs_rq);
+ walt_propagate_cumulative_runnable_avg(
+ &qcfs_rq->cumulative_runnable_avg,
+ cfs_rq->cumulative_runnable_avg, false);
if (qcfs_rq->load.weight)
dequeue = 0;
@@ -4313,6 +4333,9 @@
if (!se) {
sub_nr_running(rq, task_delta);
walt_dec_throttled_cfs_rq_stats(&rq->walt_stats, cfs_rq);
+ walt_propagate_cumulative_runnable_avg(
+ &rq->cumulative_runnable_avg,
+ cfs_rq->cumulative_runnable_avg, false);
}
cfs_rq->throttled = 1;
@@ -4376,6 +4399,9 @@
enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
cfs_rq->h_nr_running += task_delta;
walt_inc_throttled_cfs_rq_stats(&cfs_rq->walt_stats, tcfs_rq);
+ walt_propagate_cumulative_runnable_avg(
+ &cfs_rq->cumulative_runnable_avg,
+ tcfs_rq->cumulative_runnable_avg, true);
if (cfs_rq_throttled(cfs_rq))
break;
@@ -4384,6 +4410,9 @@
if (!se) {
add_nr_running(rq, task_delta);
walt_inc_throttled_cfs_rq_stats(&rq->walt_stats, tcfs_rq);
+ walt_propagate_cumulative_runnable_avg(
+ &rq->cumulative_runnable_avg,
+ tcfs_rq->cumulative_runnable_avg, true);
}
/* determine whether we need to wake up potentially idle cpu */
@@ -4827,6 +4856,30 @@
}
}
+#ifdef CONFIG_SCHED_WALT
+static void walt_fixup_cumulative_runnable_avg_fair(struct rq *rq,
+ struct task_struct *p,
+ u64 new_task_load)
+{
+ struct cfs_rq *cfs_rq;
+ struct sched_entity *se = &p->se;
+ s64 task_load_delta = (s64)new_task_load - p->ravg.demand;
+
+ for_each_sched_entity(se) {
+ cfs_rq = cfs_rq_of(se);
+
+ cfs_rq->cumulative_runnable_avg += task_load_delta;
+ if (cfs_rq_throttled(cfs_rq))
+ break;
+ }
+
+ /* Fix up rq only if we didn't find any throttled cfs_rq */
+ if (!se)
+ walt_fixup_cumulative_runnable_avg(rq, p, new_task_load);
+}
+
+#endif /* CONFIG_SCHED_WALT */
+
#else /* CONFIG_CFS_BANDWIDTH */
static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
{
@@ -4869,6 +4922,9 @@
static inline void update_runtime_enabled(struct rq *rq) {}
static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
+#define walt_fixup_cumulative_runnable_avg_fair \
+ walt_fixup_cumulative_runnable_avg
+
#endif /* CONFIG_CFS_BANDWIDTH */
/**************************************************
@@ -11725,6 +11781,8 @@
#endif
#ifdef CONFIG_SCHED_WALT
.fixup_walt_sched_stats = walt_fixup_sched_stats_fair,
+ .fixup_cumulative_runnable_avg =
+ walt_fixup_cumulative_runnable_avg_fair,
#endif
};
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index ced7587..5fdbc23 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -1051,7 +1051,7 @@
if (!rcu_access_pointer(group->poll_kworker)) {
struct sched_param param = {
- .sched_priority = MAX_RT_PRIO - 1,
+ .sched_priority = 1,
};
struct kthread_worker *kworker;
@@ -1061,7 +1061,7 @@
mutex_unlock(&group->trigger_lock);
return ERR_CAST(kworker);
}
- sched_setscheduler(kworker->task, SCHED_FIFO, ¶m);
+ sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, ¶m);
kthread_init_delayed_work(&group->poll_work,
psi_poll_work);
rcu_assign_pointer(group->poll_kworker, kworker);
@@ -1131,7 +1131,15 @@
* deadlock while waiting for psi_poll_work to acquire trigger_lock
*/
if (kworker_to_destroy) {
+ /*
+ * After the RCU grace period has expired, the worker
+ * can no longer be found through group->poll_kworker.
+ * But it might have been already scheduled before
+ * that - deschedule it cleanly before destroying it.
+ */
kthread_cancel_delayed_work_sync(&group->poll_work);
+ atomic_set(&group->poll_scheduled, 0);
+
kthread_destroy_worker(kworker_to_destroy);
}
kfree(t);
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
old mode 100644
new mode 100755
index 1d704a4..4e56c08
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2649,6 +2649,7 @@
.update_curr = update_curr_rt,
#ifdef CONFIG_SCHED_WALT
.fixup_walt_sched_stats = fixup_walt_sched_stats_common,
+ .fixup_cumulative_runnable_avg = walt_fixup_cumulative_runnable_avg,
#endif
};
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
old mode 100644
new mode 100755
index e8faa55..430140e
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1566,6 +1566,9 @@
#ifdef CONFIG_SCHED_WALT
void (*fixup_walt_sched_stats)(struct rq *rq, struct task_struct *p,
u32 new_task_load, u32 new_pred_demand);
+ void (*fixup_cumulative_runnable_avg)(struct rq *rq,
+ struct task_struct *task,
+ u64 new_task_load);
#endif
};
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
old mode 100644
new mode 100755
index a6ce92c..8e7b7c1
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -139,5 +139,6 @@
.update_curr = update_curr_stop,
#ifdef CONFIG_SCHED_WALT
.fixup_walt_sched_stats = fixup_walt_sched_stats_common,
+ .fixup_cumulative_runnable_avg = walt_fixup_cumulative_runnable_avg,
#endif
};
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
old mode 100644
new mode 100755
index c34311b..0050ca5
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -53,6 +53,21 @@
static struct irq_work walt_cpufreq_irq_work;
static struct irq_work walt_migration_irq_work;
+void
+walt_fixup_cumulative_runnable_avg(struct rq *rq,
+ struct task_struct *p, u64 new_task_load)
+{
+ s64 task_load_delta = (s64)new_task_load - task_load(p);
+ struct walt_sched_stats *stats = &rq->walt_stats;
+
+ stats->cumulative_runnable_avg += task_load_delta;
+ if ((s64)stats->cumulative_runnable_avg < 0)
+ panic("cra less than zero: tld: %lld, task_load(p) = %u\n",
+ task_load_delta, task_load(p));
+
+ walt_fixup_cum_window_demand(rq, task_load_delta);
+}
+
u64 sched_ktime_clock(void)
{
if (unlikely(sched_ktime_suspended))
diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h
old mode 100644
new mode 100755
index 7d505c3..9671e56
--- a/kernel/sched/walt.h
+++ b/kernel/sched/walt.h
@@ -155,6 +155,11 @@
extern void set_window_start(struct rq *rq);
void account_irqtime(int cpu, struct task_struct *curr, u64 delta,
u64 wallclock);
+void walt_fixup_cumulative_runnable_avg(struct rq *rq, struct task_struct *p,
+ u64 new_task_load);
+
+
+
extern bool do_pl_notif(struct rq *rq);
#define SCHED_HIGH_IRQ_TIMEOUT 3
@@ -304,6 +309,9 @@
static inline void walt_rotate_work_init(void) { }
static inline void walt_rotation_checkpoint(int nr_big) { }
static inline void walt_update_last_enqueue(struct task_struct *p) { }
+static inline void walt_fixup_cumulative_runnable_avg(struct rq *rq,
+ struct task_struct *p,
+ u64 new_task_load) { }
static inline void update_task_ravg(struct task_struct *p, struct rq *rq,
int event, u64 wallclock, u64 irqtime) { }
diff --git a/mm/cma.c b/mm/cma.c
index 7b3e3d5..e8292ee 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -49,11 +49,13 @@
{
return PFN_PHYS(cma->base_pfn);
}
+EXPORT_SYMBOL(cma_get_base);
unsigned long cma_get_size(const struct cma *cma)
{
return cma->count << PAGE_SHIFT;
}
+EXPORT_SYMBOL(cma_get_size);
const char *cma_get_name(const struct cma *cma)
{
diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
index 1aeeadc..f435435 100644
--- a/net/batman-adv/bat_v_ogm.c
+++ b/net/batman-adv/bat_v_ogm.c
@@ -618,17 +618,23 @@
* batadv_v_ogm_aggr_packet - checks if there is another OGM aggregated
* @buff_pos: current position in the skb
* @packet_len: total length of the skb
- * @tvlv_len: tvlv length of the previously considered OGM
+ * @ogm2_packet: potential OGM2 in buffer
*
* Return: true if there is enough space for another OGM, false otherwise.
*/
-static bool batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
- __be16 tvlv_len)
+static bool
+batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
+ const struct batadv_ogm2_packet *ogm2_packet)
{
int next_buff_pos = 0;
- next_buff_pos += buff_pos + BATADV_OGM2_HLEN;
- next_buff_pos += ntohs(tvlv_len);
+ /* check if there is enough space for the header */
+ next_buff_pos += buff_pos + sizeof(*ogm2_packet);
+ if (next_buff_pos > packet_len)
+ return false;
+
+ /* check if there is enough space for the optional TVLV */
+ next_buff_pos += ntohs(ogm2_packet->tvlv_len);
return (next_buff_pos <= packet_len) &&
(next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
@@ -775,7 +781,7 @@
ogm_packet = (struct batadv_ogm2_packet *)skb->data;
while (batadv_v_ogm_aggr_packet(ogm_offset, skb_headlen(skb),
- ogm_packet->tvlv_len)) {
+ ogm_packet)) {
batadv_v_ogm_process(skb, ogm_offset, if_incoming);
ogm_offset += BATADV_OGM2_HLEN;
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 6406010e..7007683 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -372,7 +372,7 @@
struct nlmsghdr *nlh;
struct nlattr *nest;
- nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI);
+ nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
if (!nlh)
return -EMSGSIZE;
diff --git a/net/core/dev.c b/net/core/dev.c
index 7bce57b..14d7db7 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -7485,6 +7485,8 @@
ret = notifier_to_errno(ret);
if (ret) {
rollback_registered(dev);
+ rcu_barrier();
+
dev->reg_state = NETREG_UNREGISTERED;
}
/*
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 57743e5..3d91d43 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3100,6 +3100,25 @@
int pos;
int dummy;
+ if (list_skb && !list_skb->head_frag && skb_headlen(list_skb) &&
+ (skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY)) {
+ /* gso_size is untrusted, and we have a frag_list with a linear
+ * non head_frag head.
+ *
+ * (we assume checking the first list_skb member suffices;
+ * i.e if either of the list_skb members have non head_frag
+ * head, then the first one has too).
+ *
+ * If head_skb's headlen does not fit requested gso_size, it
+ * means that the frag_list members do NOT terminate on exact
+ * gso_size boundaries. Hence we cannot perform skb_frag_t page
+ * sharing. Therefore we must fallback to copying the frag_list
+ * skbs; we do so by disabling SG.
+ */
+ if (mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb))
+ features &= ~NETIF_F_SG;
+ }
+
__skb_push(head_skb, doffset);
proto = skb_network_protocol(head_skb, &dummy);
if (unlikely(!proto))
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 435450f..82ed997 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -248,7 +248,7 @@
static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
{
- tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
+ tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
}
static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 2a965d4..45c1a5b 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -240,7 +240,7 @@
return ping_proc_register(net, &ping_v6_seq_afinfo);
}
-static void __net_init ping_v6_proc_exit_net(struct net *net)
+static void __net_exit ping_v6_proc_exit_net(struct net *net)
{
return ping_proc_unregister(net, &ping_v6_seq_afinfo);
}
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index e3ed200..562b545 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -323,7 +323,7 @@
i++;
}
- pr_debug("Skipped up to `%c'!\n", skip);
+ pr_debug("Skipped up to 0x%hhx delimiter!\n", skip);
*numoff = i;
*numlen = getnum(data + i, dlen - i, cmd, term, numoff);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index b8031aa..da3a7c9 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -704,7 +704,11 @@
void qdisc_destroy(struct Qdisc *qdisc)
{
- const struct Qdisc_ops *ops = qdisc->ops;
+ const struct Qdisc_ops *ops;
+
+ if (!qdisc)
+ return;
+ ops = qdisc->ops;
if (qdisc->flags & TCQ_F_BUILTIN ||
!atomic_dec_and_test(&qdisc->refcnt))
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index f4b2d69..fe32239 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -543,7 +543,7 @@
new_hhf_non_hh_weight = nla_get_u32(tb[TCA_HHF_NON_HH_WEIGHT]);
non_hh_quantum = (u64)new_quantum * new_hhf_non_hh_weight;
- if (non_hh_quantum > INT_MAX)
+ if (non_hh_quantum == 0 || non_hh_quantum > INT_MAX)
return -EINVAL;
sch_tree_lock(sch);
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index d6af93a..833283c 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1336,7 +1336,7 @@
return status;
}
-static void __net_init sctp_ctrlsock_exit(struct net *net)
+static void __net_exit sctp_ctrlsock_exit(struct net *net)
{
/* Free the control endpoint. */
inet_ctl_sock_destroy(net->sctp.ctl_sock);
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index b1ead17..8b4cf78 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -509,7 +509,7 @@
if (net->sctp.pf_enable &&
(transport->state == SCTP_ACTIVE) &&
(transport->error_count < transport->pathmaxrxt) &&
- (transport->error_count > asoc->pf_retrans)) {
+ (transport->error_count > transport->pf_retrans)) {
sctp_assoc_control_transport(asoc, transport,
SCTP_TRANSPORT_PF,
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 23f8899..7ebcaff 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -224,7 +224,8 @@
publ->key);
}
- kfree_rcu(p, rcu);
+ if (p)
+ kfree_rcu(p, rcu);
}
/**
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 3b7d778..71ef07a 100755
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3989,6 +3989,9 @@
return PTR_ERR(params.acl);
}
+ if (info->attrs[NL80211_ATTR_EXTERNAL_AUTH_SUPPORT])
+ params.flags |= AP_SETTINGS_EXTERNAL_AUTH_SUPPORT;
+
wdev_lock(wdev);
err = rdev_start_ap(rdev, dev, ¶ms);
if (!err) {
@@ -9067,7 +9070,10 @@
}
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT)
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT &&
+ !(dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP &&
+ wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_AP_PMKSA_CACHING)))
return -EOPNOTSUPP;
switch (info->genlhdr->cmd) {
@@ -12052,7 +12058,9 @@
if (!rdev->ops->external_auth)
return -EOPNOTSUPP;
- if (!info->attrs[NL80211_ATTR_SSID])
+ if (!info->attrs[NL80211_ATTR_SSID] &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
return -EINVAL;
if (!info->attrs[NL80211_ATTR_BSSID])
@@ -12063,21 +12071,52 @@
memset(¶ms, 0, sizeof(params));
- params.ssid.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
- if (params.ssid.ssid_len == 0 ||
- params.ssid.ssid_len > IEEE80211_MAX_SSID_LEN)
- return -EINVAL;
- memcpy(params.ssid.ssid, nla_data(info->attrs[NL80211_ATTR_SSID]),
- params.ssid.ssid_len);
+ if (info->attrs[NL80211_ATTR_SSID]) {
+ params.ssid.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
+ if (params.ssid.ssid_len == 0 ||
+ params.ssid.ssid_len > IEEE80211_MAX_SSID_LEN)
+ return -EINVAL;
+ memcpy(params.ssid.ssid,
+ nla_data(info->attrs[NL80211_ATTR_SSID]),
+ params.ssid.ssid_len);
+ }
memcpy(params.bssid, nla_data(info->attrs[NL80211_ATTR_BSSID]),
ETH_ALEN);
params.status = nla_get_u16(info->attrs[NL80211_ATTR_STATUS_CODE]);
+ if (info->attrs[NL80211_ATTR_PMKID])
+ params.pmkid = nla_data(info->attrs[NL80211_ATTR_PMKID]);
+
return rdev_external_auth(rdev, dev, ¶ms);
}
+static int nl80211_update_owe_info(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct cfg80211_update_owe_info owe_info;
+ struct net_device *dev = info->user_ptr[1];
+
+ if (!rdev->ops->update_owe_info)
+ return -EOPNOTSUPP;
+
+ if (!info->attrs[NL80211_ATTR_STATUS_CODE] ||
+ !info->attrs[NL80211_ATTR_MAC])
+ return -EINVAL;
+
+ memset(&owe_info, 0, sizeof(owe_info));
+ owe_info.status = nla_get_u16(info->attrs[NL80211_ATTR_STATUS_CODE]);
+ nla_memcpy(owe_info.peer, info->attrs[NL80211_ATTR_MAC], ETH_ALEN);
+
+ if (info->attrs[NL80211_ATTR_IE]) {
+ owe_info.ie = nla_data(info->attrs[NL80211_ATTR_IE]);
+ owe_info.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
+ }
+
+ return rdev_update_owe_info(rdev, dev, &owe_info);
+}
+
#define NL80211_FLAG_NEED_WIPHY 0x01
#define NL80211_FLAG_NEED_NETDEV 0x02
#define NL80211_FLAG_NEED_RTNL 0x04
@@ -12975,6 +13014,13 @@
.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
+ {
+ .cmd = NL80211_CMD_UPDATE_OWE_INFO,
+ .doit = nl80211_update_owe_info,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
};
/* notification functions */
@@ -15015,6 +15061,46 @@
}
EXPORT_SYMBOL(cfg80211_external_auth_request);
+void cfg80211_update_owe_info_event(struct net_device *netdev,
+ struct cfg80211_update_owe_info *owe_info,
+ gfp_t gfp)
+{
+ struct wiphy *wiphy = netdev->ieee80211_ptr->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+ struct sk_buff *msg;
+ void *hdr;
+
+ trace_cfg80211_update_owe_info_event(wiphy, netdev, owe_info);
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ if (!msg)
+ return;
+
+ hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_UPDATE_OWE_INFO);
+ if (!hdr)
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+ nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, owe_info->peer))
+ goto nla_put_failure;
+
+ if (!owe_info->ie_len ||
+ nla_put(msg, NL80211_ATTR_IE, owe_info->ie_len, owe_info->ie))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+
+ genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
+ NL80211_MCGRP_MLME, gfp);
+ return;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ nlmsg_free(msg);
+}
+EXPORT_SYMBOL(cfg80211_update_owe_info_event);
+
/* initialisation/exit functions */
int nl80211_init(void)
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index 091806d..a0ff6b1 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -1168,4 +1168,17 @@
return ret;
}
+static inline int rdev_update_owe_info(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct cfg80211_update_owe_info *oweinfo)
+{
+ int ret = -EOPNOTSUPP;
+
+ trace_rdev_update_owe_info(&rdev->wiphy, dev, oweinfo);
+ if (rdev->ops->update_owe_info)
+ ret = rdev->ops->update_owe_info(&rdev->wiphy, dev, oweinfo);
+ trace_rdev_return_int(&rdev->wiphy, ret);
+ return ret;
+}
+
#endif /* __CFG80211_RDEV_OPS */
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 80ea75a..ef56df5 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -3090,6 +3090,44 @@
WIPHY_PR_ARG, NETDEV_PR_ARG,
BOOL_TO_STR(__entry->enabled))
);
+
+TRACE_EVENT(rdev_update_owe_info,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_update_owe_info *owe_info),
+ TP_ARGS(wiphy, netdev, owe_info),
+ TP_STRUCT__entry(WIPHY_ENTRY
+ NETDEV_ENTRY
+ MAC_ENTRY(peer)
+ __field(u16, status)
+ __dynamic_array(u8, ie, owe_info->ie_len)),
+ TP_fast_assign(WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ MAC_ASSIGN(peer, owe_info->peer);
+ __entry->status = owe_info->status;
+ memcpy(__get_dynamic_array(ie),
+ owe_info->ie, owe_info->ie_len);),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: " MAC_PR_FMT
+ " status %d", WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer),
+ __entry->status)
+);
+
+TRACE_EVENT(cfg80211_update_owe_info_event,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_update_owe_info *owe_info),
+ TP_ARGS(wiphy, netdev, owe_info),
+ TP_STRUCT__entry(WIPHY_ENTRY
+ NETDEV_ENTRY
+ MAC_ENTRY(peer)
+ __dynamic_array(u8, ie, owe_info->ie_len)),
+ TP_fast_assign(WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ MAC_ASSIGN(peer, owe_info->peer);
+ memcpy(__get_dynamic_array(ie), owe_info->ie,
+ owe_info->ie_len);),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: " MAC_PR_FMT,
+ WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer))
+);
+
#endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */
#undef TRACE_INCLUDE_PATH
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index f60baeb..b474450 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -71,6 +71,9 @@
{
struct request_key_auth *rka = key->payload.data[0];
+ if (!rka)
+ return;
+
seq_puts(m, "key:");
seq_puts(m, key->description);
if (key_is_positive(key))
@@ -88,6 +91,9 @@
size_t datalen;
long ret;
+ if (!rka)
+ return -EKEYREVOKED;
+
datalen = rka->callout_len;
ret = datalen;
diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig
index 32151d8..65fd60f 100644
--- a/sound/pci/Kconfig
+++ b/sound/pci/Kconfig
@@ -3,7 +3,6 @@
menuconfig SND_PCI
bool "PCI sound devices"
depends on PCI
- default y
help
Support for sound devices connected via the PCI bus.
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index 182d92e..3cf139d 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -7,6 +7,9 @@
select SND_PCM
select AC97_BUS if SND_SOC_AC97_BUS
select SND_JACK
+ select SND_TIMER
+ select SND_HWDEP
+ select SND_SOC_COMPRESS
select REGMAP_I2C if I2C
select REGMAP_SPI if SPI_MASTER
---help---
@@ -30,7 +33,7 @@
select SND_DMAENGINE_PCM
config SND_SOC_COMPRESS
- bool
+ tristate
select SND_COMPRESS_OFFLOAD
config SND_SOC_TOPOLOGY
diff --git a/sound/usb/Kconfig b/sound/usb/Kconfig
index f32cfa4..6838f2e 100644
--- a/sound/usb/Kconfig
+++ b/sound/usb/Kconfig
@@ -11,7 +11,6 @@
config SND_USB_AUDIO
tristate "USB Audio/MIDI driver"
- select SND_HWDEP
select SND_RAWMIDI
select SND_PCM
select BITREVERSE
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index b4c5d96..7c2c8e7 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -3593,7 +3593,7 @@
void allocate_output_buffer()
{
- output_buffer = calloc(1, (1 + topo.num_cpus) * 1024);
+ output_buffer = calloc(1, (1 + topo.num_cpus) * 2048);
outp = output_buffer;
if (outp == NULL)
err(-1, "calloc output buffer");
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index 571c1ce..5c1efb8 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -39,7 +39,7 @@
return 1;
}
-static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
+static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last)
{
struct kvm_coalesced_mmio_ring *ring;
unsigned avail;
@@ -51,7 +51,7 @@
* there is always one unused entry in the buffer
*/
ring = dev->kvm->coalesced_mmio_ring;
- avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
+ avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX;
if (avail == 0) {
/* full */
return 0;
@@ -66,24 +66,27 @@
{
struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
+ __u32 insert;
if (!coalesced_mmio_in_range(dev, addr, len))
return -EOPNOTSUPP;
spin_lock(&dev->kvm->ring_lock);
- if (!coalesced_mmio_has_room(dev)) {
+ insert = READ_ONCE(ring->last);
+ if (!coalesced_mmio_has_room(dev, insert) ||
+ insert >= KVM_COALESCED_MMIO_MAX) {
spin_unlock(&dev->kvm->ring_lock);
return -EOPNOTSUPP;
}
/* copy data in first free entry of the ring */
- ring->coalesced_mmio[ring->last].phys_addr = addr;
- ring->coalesced_mmio[ring->last].len = len;
- memcpy(ring->coalesced_mmio[ring->last].data, val, len);
+ ring->coalesced_mmio[insert].phys_addr = addr;
+ ring->coalesced_mmio[insert].len = len;
+ memcpy(ring->coalesced_mmio[insert].data, val, len);
smp_wmb();
- ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
+ ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX;
spin_unlock(&dev->kvm->ring_lock);
return 0;
}