Merge "wil6210: add debugfs blobs for UCODE code and data" into msm-4.9
diff --git a/AndroidKernel.mk b/AndroidKernel.mk
new file mode 100644
index 0000000..d28f7ba
--- /dev/null
+++ b/AndroidKernel.mk
@@ -0,0 +1,168 @@
+#Android makefile to build kernel as a part of Android Build
+PERL = perl
+
+KERNEL_TARGET := $(strip $(INSTALLED_KERNEL_TARGET))
+ifeq ($(KERNEL_TARGET),)
+INSTALLED_KERNEL_TARGET := $(PRODUCT_OUT)/kernel
+endif
+
+TARGET_KERNEL_ARCH := $(strip $(TARGET_KERNEL_ARCH))
+ifeq ($(TARGET_KERNEL_ARCH),)
+KERNEL_ARCH := arm
+else
+KERNEL_ARCH := $(TARGET_KERNEL_ARCH)
+endif
+
+TARGET_KERNEL_HEADER_ARCH := $(strip $(TARGET_KERNEL_HEADER_ARCH))
+ifeq ($(TARGET_KERNEL_HEADER_ARCH),)
+KERNEL_HEADER_ARCH := $(KERNEL_ARCH)
+else
+$(warning Forcing kernel header generation only for '$(TARGET_KERNEL_HEADER_ARCH)')
+KERNEL_HEADER_ARCH := $(TARGET_KERNEL_HEADER_ARCH)
+endif
+
+KERNEL_HEADER_DEFCONFIG := $(strip $(KERNEL_HEADER_DEFCONFIG))
+ifeq ($(KERNEL_HEADER_DEFCONFIG),)
+KERNEL_HEADER_DEFCONFIG := $(KERNEL_DEFCONFIG)
+endif
+
+# Force 32-bit binder IPC for 64bit kernel with 32bit userspace
+ifeq ($(KERNEL_ARCH),arm64)
+ifeq ($(TARGET_ARCH),arm)
+KERNEL_CONFIG_OVERRIDE := CONFIG_ANDROID_BINDER_IPC_32BIT=y
+endif
+endif
+
+TARGET_KERNEL_CROSS_COMPILE_PREFIX := $(strip $(TARGET_KERNEL_CROSS_COMPILE_PREFIX))
+ifeq ($(TARGET_KERNEL_CROSS_COMPILE_PREFIX),)
+KERNEL_CROSS_COMPILE := arm-eabi-
+else
+KERNEL_CROSS_COMPILE := $(TARGET_KERNEL_CROSS_COMPILE_PREFIX)
+endif
+
+ifeq ($(TARGET_PREBUILT_KERNEL),)
+
+KERNEL_GCC_NOANDROID_CHK := $(shell (echo "int main() {return 0;}" | $(KERNEL_CROSS_COMPILE)gcc -E -mno-android - > /dev/null 2>&1 ; echo $$?))
+ifeq ($(strip $(KERNEL_GCC_NOANDROID_CHK)),0)
+KERNEL_CFLAGS := KCFLAGS=-mno-android
+endif
+
+mkfile_path := $(abspath $(lastword $(MAKEFILE_LIST)))
+current_dir := $(notdir $(patsubst %/,%,$(dir $(mkfile_path))))
+TARGET_KERNEL := msm-$(TARGET_KERNEL_VERSION)
+ifeq ($(TARGET_KERNEL),$(current_dir))
+ # New style, kernel/msm-version
+ BUILD_ROOT_LOC := ../../
+ TARGET_KERNEL_SOURCE := kernel/$(TARGET_KERNEL)
+ KERNEL_OUT := $(TARGET_OUT_INTERMEDIATES)/kernel/$(TARGET_KERNEL)
+ KERNEL_SYMLINK := $(TARGET_OUT_INTERMEDIATES)/KERNEL_OBJ
+ KERNEL_USR := $(KERNEL_SYMLINK)/usr
+else
+ # Legacy style, kernel source directly under kernel
+ KERNEL_LEGACY_DIR := true
+ BUILD_ROOT_LOC := ../
+ TARGET_KERNEL_SOURCE := kernel
+ KERNEL_OUT := $(TARGET_OUT_INTERMEDIATES)/KERNEL_OBJ
+endif
+
+KERNEL_CONFIG := $(KERNEL_OUT)/.config
+
+ifeq ($(KERNEL_DEFCONFIG)$(wildcard $(KERNEL_CONFIG)),)
+$(error Kernel configuration not defined, cannot build kernel)
+else
+
+ifeq ($(TARGET_USES_UNCOMPRESSED_KERNEL),true)
+$(info Using uncompressed kernel)
+TARGET_PREBUILT_INT_KERNEL := $(KERNEL_OUT)/arch/$(KERNEL_ARCH)/boot/Image
+else
+ifeq ($(KERNEL_ARCH),arm64)
+TARGET_PREBUILT_INT_KERNEL := $(KERNEL_OUT)/arch/$(KERNEL_ARCH)/boot/Image.gz
+else
+TARGET_PREBUILT_INT_KERNEL := $(KERNEL_OUT)/arch/$(KERNEL_ARCH)/boot/zImage
+endif
+endif
+
+ifeq ($(TARGET_KERNEL_APPEND_DTB), true)
+$(info Using appended DTB)
+TARGET_PREBUILT_INT_KERNEL := $(TARGET_PREBUILT_INT_KERNEL)-dtb
+endif
+
+KERNEL_HEADERS_INSTALL := $(KERNEL_OUT)/usr
+KERNEL_MODULES_INSTALL := system
+KERNEL_MODULES_OUT := $(TARGET_OUT)/lib/modules
+
+TARGET_PREBUILT_KERNEL := $(TARGET_PREBUILT_INT_KERNEL)
+
+define mv-modules
+mdpath=`find $(KERNEL_MODULES_OUT) -type f -name modules.dep`;\
+if [ "$$mdpath" != "" ];then\
+mpath=`dirname $$mdpath`;\
+ko=`find $$mpath/kernel -type f -name *.ko`;\
+for i in $$ko; do mv $$i $(KERNEL_MODULES_OUT)/; done;\
+fi
+endef
+
+define clean-module-folder
+mdpath=`find $(KERNEL_MODULES_OUT) -type f -name modules.dep`;\
+if [ "$$mdpath" != "" ];then\
+mpath=`dirname $$mdpath`; rm -rf $$mpath;\
+fi
+endef
+
+ifneq ($(KERNEL_LEGACY_DIR),true)
+$(KERNEL_USR): $(KERNEL_HEADERS_INSTALL)
+ rm -rf $(KERNEL_SYMLINK)
+ ln -s kernel/$(TARGET_KERNEL) $(KERNEL_SYMLINK)
+
+$(TARGET_PREBUILT_INT_KERNEL): $(KERNEL_USR)
+endif
+
+$(KERNEL_OUT):
+ mkdir -p $(KERNEL_OUT)
+
+$(KERNEL_CONFIG): $(KERNEL_OUT)
+ $(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(KERNEL_DEFCONFIG)
+ $(hide) if [ ! -z "$(KERNEL_CONFIG_OVERRIDE)" ]; then \
+ echo "Overriding kernel config with '$(KERNEL_CONFIG_OVERRIDE)'"; \
+ echo $(KERNEL_CONFIG_OVERRIDE) >> $(KERNEL_OUT)/.config; \
+ $(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) oldconfig; fi
+
+$(TARGET_PREBUILT_INT_KERNEL): $(KERNEL_OUT) $(KERNEL_HEADERS_INSTALL)
+ $(hide) echo "Building kernel..."
+ $(hide) rm -rf $(KERNEL_OUT)/arch/$(KERNEL_ARCH)/boot/dts
+ $(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(KERNEL_CFLAGS)
+ $(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(KERNEL_CFLAGS) modules
+ $(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) INSTALL_MOD_PATH=$(BUILD_ROOT_LOC)../$(KERNEL_MODULES_INSTALL) INSTALL_MOD_STRIP=1 ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) modules_install
+ $(mv-modules)
+ $(clean-module-folder)
+
+$(KERNEL_HEADERS_INSTALL): $(KERNEL_OUT)
+ $(hide) if [ ! -z "$(KERNEL_HEADER_DEFCONFIG)" ]; then \
+ rm -f $(BUILD_ROOT_LOC)$(KERNEL_CONFIG); \
+ $(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) ARCH=$(KERNEL_HEADER_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(KERNEL_HEADER_DEFCONFIG); \
+ $(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) ARCH=$(KERNEL_HEADER_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) headers_install;\
+ if [ -d "$(KERNEL_HEADERS_INSTALL)/include/bringup_headers" ]; then \
+ cp -Rf $(KERNEL_HEADERS_INSTALL)/include/bringup_headers/* $(KERNEL_HEADERS_INSTALL)/include/ ;\
+ fi ;\
+ fi
+ $(hide) if [ "$(KERNEL_HEADER_DEFCONFIG)" != "$(KERNEL_DEFCONFIG)" ]; then \
+ echo "Used a different defconfig for header generation"; \
+ rm -f $(BUILD_ROOT_LOC)$(KERNEL_CONFIG); \
+ $(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(KERNEL_DEFCONFIG); fi
+ $(hide) if [ ! -z "$(KERNEL_CONFIG_OVERRIDE)" ]; then \
+ echo "Overriding kernel config with '$(KERNEL_CONFIG_OVERRIDE)'"; \
+ echo $(KERNEL_CONFIG_OVERRIDE) >> $(KERNEL_OUT)/.config; \
+ $(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) oldconfig; fi
+
+kerneltags: $(KERNEL_OUT) $(KERNEL_CONFIG)
+ $(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) tags
+
+kernelconfig: $(KERNEL_OUT) $(KERNEL_CONFIG)
+ env KCONFIG_NOTIMESTAMP=true \
+ $(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) menuconfig
+ env KCONFIG_NOTIMESTAMP=true \
+ $(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) savedefconfig
+ cp $(KERNEL_OUT)/defconfig $(TARGET_KERNEL_SOURCE)/arch/$(KERNEL_ARCH)/configs/$(KERNEL_DEFCONFIG)
+
+endif
+endif
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
index 2e5c1ee..53e4295 100644
--- a/Documentation/devicetree/bindings/arm/coresight.txt
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -84,6 +84,11 @@
* coresight-name: unique descriptive name of the component.
+* Additional required property for coresight-dummy devices:
+ * qcom,dummy-source: Configure the device as source.
+
+ * qcom,dummy-sink: Configure the device as sink.
+
* Optional properties for all components:
* reg-names: names corresponding to each reg property value.
diff --git a/Documentation/devicetree/bindings/display/msm/sde-rsc.txt b/Documentation/devicetree/bindings/display/msm/sde-rsc.txt
new file mode 100644
index 0000000..7e54fdd
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/msm/sde-rsc.txt
@@ -0,0 +1,72 @@
+Qualcomm Technologies, Inc. SDE RSC
+
+Snapdragon Display Engine implements display rsc to driver
+display core to different modes for power saving
+
+Required properties
+- compatible: Must be "qcom,sde-rsc"
+- reg: Offset and length of the register set for
+ the device.
+- reg-names: Names to refer to register sets related
+ to this device
+
+Optional properties:
+- clocks: List of phandles for clock device nodes
+ needed by the device.
+- clock-names: List of clock names needed by the device.
+- vdd-supply: phandle for vdd regulator device node.
+- qcom,sde-rsc-version: U32 property represents the rsc version. It helps to
+ select correct sequence for sde rsc based on version.
+- qcom,sde-dram-channels: U32 property represents the number of channels in the
+ Bus memory controller.
+- qcom,sde-num-nrt-paths: U32 property represents the number of non-realtime
+ paths in each Bus Scaling Usecase. This value depends on
+ number of AXI ports that are dedicated to non-realtime VBIF
+ for particular chipset.
+ These paths must be defined after rt-paths in
+ "qcom,msm-bus,vectors-KBps" vector request.
+
+Bus Scaling Subnodes:
+- qcom,sde-data-bus: Property to provide Bus scaling for data bus access for
+ sde blocks.
+
+Bus Scaling Data:
+- qcom,msm-bus,name: String property describing client name.
+- qcom,msm-bus,active-only: Boolean context flag for requests in active or
+ dual (active & sleep) contex
+- qcom,msm-bus,num-cases: This is the number of Bus Scaling use cases
+ defined in the vectors property.
+- qcom,msm-bus,num-paths: This represents the number of paths in each
+ Bus Scaling Usecase.
+- qcom,msm-bus,vectors-KBps: * A series of 4 cell properties, with a format
+ of (src, dst, ab, ib) which is defined at
+ Documentation/devicetree/bindings/arm/msm/msm_bus.txt
+ * Current values of src & dst are defined at
+ include/linux/msm-bus-board.h
+Example:
+ sde_rscc {
+ cell-index = <0>;
+ compatible = "qcom,sde-rsc";
+ reg = <0xaf20000 0x1c44>,
+ <0xaf30000 0x3fd4>;
+ reg-names = "drv", "wrapper";
+ clocks = <&clock_mmss clk_mdss_ahb_clk>,
+ <&clock_mmss clk_mdss_axi_clk>;
+ clock-names = "iface_clk", "bus_clk";
+ vdd-supply = <&gdsc_mdss>;
+
+ qcom,sde-rsc-version = <1>;
+ qcom,sde-dram-channels = <2>;
+ qcom,sde-num-nrt-paths = <1>;
+
+ qcom,sde-data-bus {
+ qcom,msm-bus,name = "sde_rsc";
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,num-cases = <3>;
+ qcom,msm-bus,num-paths = <2>;
+ qcom,msm-bus,vectors-KBps =
+ <22 512 0 0>, <23 512 0 0>,
+ <22 512 0 6400000>, <23 512 0 6400000>,
+ <22 512 0 6400000>, <23 512 0 6400000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
index c7f43bc..62efecc 100644
--- a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
@@ -144,6 +144,13 @@
0xff = default value.
- qcom,mdss-dsi-border-color: Defines the border color value if border is present.
0 = default value.
+- qcom,mdss-dsi-panel-jitter: An integer value defines the panel jitter timing for rsc
+ backoff time. The jitter configurition causes the early
+ wakeup if panel needs to adjust before vsync.
+ Default jitter value is 5%. Max allowed value is 25%.
+- qcom,mdss-dsi-panel-prefill-lines: An integer value defines the panel prefill lines required to
+ calculate the backoff time of rsc.
+ Default value is 16 lines. Max allowed value is vtotal.
- qcom,mdss-dsi-pan-enable-dynamic-fps: Boolean used to enable change in frame rate dynamically.
- qcom,mdss-dsi-pan-fps-update: A string that specifies when to change the frame rate.
"dfps_suspend_resume_mode"= FPS change request is
@@ -634,6 +641,8 @@
<40 120 128>,
<128 240 64>;
qcom,mdss-dsi-panel-orientation = "180"
+ qcom,mdss-dsi-panel-jitter = <0x8>;
+ qcom,mdss-dsi-panel-prefill-lines = <0x10>;
qcom,mdss-dsi-force-clock-lane-hs;
qcom,compression-mode = "dsc";
qcom,adjust-timer-wakeup-ms = <1>;
diff --git a/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt b/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt
new file mode 100644
index 0000000..f6b7552
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt
@@ -0,0 +1,22 @@
+GENI based Qualcomm Technologies Inc Universal Peripheral version 3 (QUPv3)
+ I2C controller
+
+Required properties:
+ - compatible: Should be:
+ * "qcom,i2c-geni.
+ - reg: Should contain QUP register address and length.
+ - interrupts: Should contain I2C interrupt.
+ - #address-cells: Should be <1> Address cells for i2c device address
+ - #size-cells: Should be <0> as i2c addresses have no size component
+
+Child nodes should conform to i2c bus binding.
+
+Example:
+
+i2c@a94000 {
+ compatible = "qcom,i2c-geni";
+ reg = <0xa94000 0x4000>;
+ interrupts = <GIC_SPI 358 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+};
diff --git a/Documentation/devicetree/bindings/qdsp/msm-cdsp-loader.txt b/Documentation/devicetree/bindings/qdsp/msm-cdsp-loader.txt
new file mode 100644
index 0000000..155514f
--- /dev/null
+++ b/Documentation/devicetree/bindings/qdsp/msm-cdsp-loader.txt
@@ -0,0 +1,16 @@
+Qualcomm Technologies, Inc. CDSP Loader Driver
+
+msm-cdsp-loader driver implements the mechanism that allows to load CDSP firmware images.
+
+Required properties:
+
+ - compatible: This must be "qcom,msm-cdsp-loader".
+ - qcom,proc-img-to-load: CDSP firmware name, must be "cdsp".
+
+Example:
+ The following for sdm660.
+
+ qcom,msm-cdsp-loader {
+ compatible = "qcom,cdsp-loader";
+ qcom,proc-img-to-load = "cdsp";
+ };
diff --git a/Documentation/media/uapi/v4l/pixfmt-007.rst b/Documentation/media/uapi/v4l/pixfmt-007.rst
index 44bb5a7..95a23a2 100644
--- a/Documentation/media/uapi/v4l/pixfmt-007.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-007.rst
@@ -211,7 +211,13 @@
The :ref:`srgb` standard defines the colorspace used by most webcams
and computer graphics. The default transfer function is
``V4L2_XFER_FUNC_SRGB``. The default Y'CbCr encoding is
-``V4L2_YCBCR_ENC_601``. The default Y'CbCr quantization is full range.
+``V4L2_YCBCR_ENC_601``. The default Y'CbCr quantization is limited range.
+
+Note that the :ref:`sycc` standard specifies full range quantization,
+however all current capture hardware supported by the kernel convert
+R'G'B' to limited range Y'CbCr. So choosing full range as the default
+would break how applications interpret the quantization range.
+
The chromaticities of the primary colors and the white reference are:
@@ -276,7 +282,7 @@
Y' is clamped to the range [0…1] and Cb and Cr are clamped to the range
[-0.5…0.5]. This transform is identical to one defined in SMPTE
-170M/BT.601. The Y'CbCr quantization is full range.
+170M/BT.601. The Y'CbCr quantization is limited range.
.. _col-adobergb:
@@ -288,10 +294,15 @@
graphics that use the AdobeRGB colorspace. This is also known as the
:ref:`oprgb` standard. The default transfer function is
``V4L2_XFER_FUNC_ADOBERGB``. The default Y'CbCr encoding is
-``V4L2_YCBCR_ENC_601``. The default Y'CbCr quantization is full
-range. The chromaticities of the primary colors and the white reference
-are:
+``V4L2_YCBCR_ENC_601``. The default Y'CbCr quantization is limited
+range.
+Note that the :ref:`oprgb` standard specifies full range quantization,
+however all current capture hardware supported by the kernel convert
+R'G'B' to limited range Y'CbCr. So choosing full range as the default
+would break how applications interpret the quantization range.
+
+The chromaticities of the primary colors and the white reference are:
.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
@@ -344,7 +355,7 @@
Y' is clamped to the range [0…1] and Cb and Cr are clamped to the range
[-0.5…0.5]. This transform is identical to one defined in SMPTE
-170M/BT.601. The Y'CbCr quantization is full range.
+170M/BT.601. The Y'CbCr quantization is limited range.
.. _col-bt2020:
diff --git a/Makefile b/Makefile
index 18d0eaa..30807c0 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 9
-SUBLEVEL = 9
+SUBLEVEL = 12
EXTRAVERSION =
NAME = Roaring Lionus
diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
index 91ebe38..5f69c3b 100644
--- a/arch/arc/kernel/unaligned.c
+++ b/arch/arc/kernel/unaligned.c
@@ -243,7 +243,7 @@
/* clear any remanants of delay slot */
if (delay_mode(regs)) {
- regs->ret = regs->bta ~1U;
+ regs->ret = regs->bta & ~1U;
regs->status32 &= ~STATUS_DE_MASK;
} else {
regs->ret += state.instr_len;
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 00be82f..63ea69d 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -548,6 +548,31 @@
help
Support for Intel/Marvell's PXA2xx/PXA3xx processor line.
+config ARCH_QCOM
+ bool "Qualcomm MSM (non-multiplatform)"
+ select ARCH_REQUIRE_GPIOLIB
+ select CPU_V7
+ select AUTO_ZRELADDR
+ select HAVE_SMP
+ select CLKDEV_LOOKUP
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_ALLOCATOR
+ select ARM_PATCH_PHYS_VIRT
+ select ARM_HAS_SG_CHAIN
+ select ARCH_HAS_OPP
+ select SOC_BUS
+ select MULTI_IRQ_HANDLER
+ select PM_OPP
+ select SPARSE_IRQ
+ select USE_OF
+ select PINCTRL
+ help
+ Support for Qualcomm MSM/QSD based systems. This runs on the
+ apps processor of the MSM/QSD and depends on a shared memory
+ interface to the modem processor which runs the baseband
+ stack and controls some vital subsystems
+ (clock and power control, etc).
+
config ARCH_RPC
bool "RiscPC"
depends on MMU
@@ -1478,7 +1503,7 @@
config ARCH_NR_GPIO
int
default 1024 if ARCH_BRCMSTB || ARCH_SHMOBILE || ARCH_TEGRA || \
- ARCH_ZYNQ
+ ARCH_ZYNQ || ARCH_QCOM
default 512 if ARCH_EXYNOS || ARCH_KEYSTONE || SOC_OMAP5 || \
SOC_DRA7XX || ARCH_S3C24XX || ARCH_S3C64XX || ARCH_S5PV210
default 416 if ARCH_SUNXI
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 17dcd94..771896f 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -1712,7 +1712,7 @@
config UNCOMPRESS_INCLUDE
string
default "debug/uncompress.h" if ARCH_MULTIPLATFORM || ARCH_MSM || \
- PLAT_SAMSUNG || ARM_SINGLE_ARMV7M
+ ARCH_QCOM || PLAT_SAMSUNG || ARM_SINGLE_ARMV7M
default "mach/uncompress.h"
config EARLY_PRINTK
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index b53a7b4..f56516c 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -309,6 +309,8 @@
KBUILD_DTBS := dtbs
endif
+DTSSUBDIR := qcom
+
all: $(KBUILD_IMAGE) $(KBUILD_DTBS)
boot := arch/arm/boot
@@ -337,13 +339,9 @@
%.dtb: | scripts
$(Q)$(MAKE) $(build)=$(boot)/dts MACHINE=$(MACHINE) $(boot)/dts/$@
-PHONY += dtbs dtbs_install
-
-dtbs: prepare scripts
- $(Q)$(MAKE) $(build)=$(boot)/dts
-
-dtbs_install:
- $(Q)$(MAKE) $(dtbinst)=$(boot)/dts
+dtbs: scripts
+ $(Q)$(MAKE) $(build)=$(boot)/dts MACHINE=$(MACHINE) dtbs
+ $(foreach DIR, $(DTSSUBDIR), $(Q)$(MAKE) $(build)=$(boot)/dts/$(DIR) MACHINE=$(MACHINE) dtbs)
PHONY += vdso_install
vdso_install:
@@ -352,7 +350,7 @@
endif
zImage-dtb: vmlinux scripts dtbs
- $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
+ $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) DTSSUBDIR=$(DTSSUBDIR) $(boot)/$@
# We use MRPROPER_FILES and CLEAN_FILES now
archclean:
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index da75630..4175dfe 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -13,10 +13,11 @@
OBJCOPYFLAGS :=-O binary -R .comment -S
-ifneq ($(MACHINE),)
-include $(MACHINE)/Makefile.boot
-endif
include $(srctree)/arch/arm/boot/dts/Makefile
+ifneq ($(DTSSUBDIR),)
+DTSSUBDIR_INCS=$(foreach DIR, $(DTSSUBDIR), $(addsuffix /Makefile, $(addprefix $(srctree)/arch/arm/boot/dts/, $(DIR))))
+include $(DTSSUBDIR_INCS)
+endif
# Note: the following conditions must always be true:
# ZRELADDR == virt_to_phys(PAGE_OFFSET + TEXT_OFFSET)
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 54f95d3..4266ea6 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -970,5 +970,10 @@
targets += dtbs dtbs_install
targets += $(DTB_LIST)
+ifeq ($(CONFIG_ARM64),y)
always := $(DTB_LIST)
+else
+dtbs: $(addprefix $(obj)/, $(DTB_LIST))
+ $(Q)rm -f $(obj)/../*.dtb
+endif
clean-files := *.dtb
diff --git a/arch/arm/boot/dts/imx6dl.dtsi b/arch/arm/boot/dts/imx6dl.dtsi
index 1ade195..7aa120f 100644
--- a/arch/arm/boot/dts/imx6dl.dtsi
+++ b/arch/arm/boot/dts/imx6dl.dtsi
@@ -137,7 +137,7 @@
&gpio4 {
gpio-ranges = <&iomuxc 5 136 1>, <&iomuxc 6 145 1>, <&iomuxc 7 150 1>,
<&iomuxc 8 146 1>, <&iomuxc 9 151 1>, <&iomuxc 10 147 1>,
- <&iomuxc 11 151 1>, <&iomuxc 12 148 1>, <&iomuxc 13 153 1>,
+ <&iomuxc 11 152 1>, <&iomuxc 12 148 1>, <&iomuxc 13 153 1>,
<&iomuxc 14 149 1>, <&iomuxc 15 154 1>, <&iomuxc 16 39 7>,
<&iomuxc 23 56 1>, <&iomuxc 24 61 7>, <&iomuxc 31 46 1>;
};
diff --git a/arch/arm/boot/dts/qcom/Makefile b/arch/arm/boot/dts/qcom/Makefile
new file mode 100644
index 0000000..14422e5
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/Makefile
@@ -0,0 +1,16 @@
+
+
+
+ifeq ($(CONFIG_ARM64),y)
+always := $(dtb-y)
+subdir-y := $(dts-dirs)
+else
+targets += dtbs
+targets += $(addprefix ../, $(dtb-y))
+
+$(obj)/../%.dtb: $(src)/%.dts FORCE
+ $(call if_changed_dep,dtc)
+
+dtbs: $(addprefix $(obj)/../,$(dtb-y))
+endif
+clean-files := *.dtb
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index bdd283b..58c6398 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -94,6 +94,21 @@
* DMA Cache Coherency
* ===================
*
+ * dma_inv_range(start, end)
+ *
+ * Invalidate (discard) the specified virtual address range.
+ * May not write back any entries. If 'start' or 'end'
+ * are not cache line aligned, those lines must be written
+ * back.
+ * - start - virtual start address
+ * - end - virtual end address
+ *
+ * dma_clean_range(start, end)
+ *
+ * Clean (write back) the specified virtual address range.
+ * - start - virtual start address
+ * - end - virtual end address
+ *
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
@@ -115,6 +130,8 @@
void (*dma_map_area)(const void *, size_t, int);
void (*dma_unmap_area)(const void *, size_t, int);
+ void (*dma_inv_range)(const void *, const void *);
+ void (*dma_clean_range)(const void *, const void *);
void (*dma_flush_range)(const void *, const void *);
};
@@ -140,6 +157,8 @@
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
+#define dmac_inv_range cpu_cache.dma_inv_range
+#define dmac_clean_range cpu_cache.dma_clean_range
#define dmac_flush_range cpu_cache.dma_flush_range
#else
@@ -159,6 +178,8 @@
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
+extern void dmac_inv_range(const void *, const void *);
+extern void dmac_clean_range(const void *, const void *);
extern void dmac_flush_range(const void *, const void *);
#endif
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h
index 01c3d92..d14f310 100644
--- a/arch/arm/include/asm/glue-cache.h
+++ b/arch/arm/include/asm/glue-cache.h
@@ -155,6 +155,8 @@
#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
+#define dmac_inv_range __glue(_CACHE, _dma_inv_range)
+#define dmac_clean_range __glue(_CACHE, _dma_clean_range)
#endif
#endif
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 021692c..66003a8 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -29,6 +29,7 @@
#include <asm/byteorder.h>
#include <asm/memory.h>
#include <asm-generic/pci_iomap.h>
+#include <linux/msm_rtb.h>
#include <xen/xen.h>
/*
@@ -62,23 +63,21 @@
* the bus. Rather than special-case the machine, just let the compiler
* generate the access for CPUs prior to ARMv6.
*/
-#define __raw_readw(a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a))
-#define __raw_writew(v,a) ((void)(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v)))
+#define __raw_readw_no_log(a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a))
+#define __raw_writew_no_log(v, a) ((void)(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v)))
#else
/*
* When running under a hypervisor, we want to avoid I/O accesses with
* writeback addressing modes as these incur a significant performance
* overhead (the address generation must be emulated in software).
*/
-#define __raw_writew __raw_writew
-static inline void __raw_writew(u16 val, volatile void __iomem *addr)
+static inline void __raw_writew_no_log(u16 val, volatile void __iomem *addr)
{
asm volatile("strh %1, %0"
: : "Q" (*(volatile u16 __force *)addr), "r" (val));
}
-#define __raw_readw __raw_readw
-static inline u16 __raw_readw(const volatile void __iomem *addr)
+static inline u16 __raw_readw_no_log(const volatile void __iomem *addr)
{
u16 val;
asm volatile("ldrh %0, %1"
@@ -88,22 +87,30 @@
}
#endif
-#define __raw_writeb __raw_writeb
-static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
+static inline void __raw_writeb_no_log(u8 val, volatile void __iomem *addr)
{
asm volatile("strb %1, %0"
: : "Qo" (*(volatile u8 __force *)addr), "r" (val));
}
-#define __raw_writel __raw_writel
-static inline void __raw_writel(u32 val, volatile void __iomem *addr)
+static inline void __raw_writel_no_log(u32 val, volatile void __iomem *addr)
{
asm volatile("str %1, %0"
: : "Qo" (*(volatile u32 __force *)addr), "r" (val));
}
-#define __raw_readb __raw_readb
-static inline u8 __raw_readb(const volatile void __iomem *addr)
+static inline void __raw_writeq_no_log(u64 val, volatile void __iomem *addr)
+{
+ register u64 v asm ("r2");
+
+ v = val;
+
+ asm volatile("strd %1, %0"
+ : "+Qo" (*(volatile u64 __force *)addr)
+ : "r" (v));
+}
+
+static inline u8 __raw_readb_no_log(const volatile void __iomem *addr)
{
u8 val;
asm volatile("ldrb %0, %1"
@@ -112,8 +119,7 @@
return val;
}
-#define __raw_readl __raw_readl
-static inline u32 __raw_readl(const volatile void __iomem *addr)
+static inline u32 __raw_readl_no_log(const volatile void __iomem *addr)
{
u32 val;
asm volatile("ldr %0, %1"
@@ -122,6 +128,58 @@
return val;
}
+static inline u64 __raw_readq_no_log(const volatile void __iomem *addr)
+{
+ register u64 val asm ("r2");
+
+ asm volatile("ldrd %1, %0"
+ : "+Qo" (*(volatile u64 __force *)addr),
+ "=r" (val));
+ return val;
+}
+
+/*
+ * There may be cases when clients don't want to support or can't support the
+ * logging. The appropriate functions can be used but clients should carefully
+ * consider why they can't support the logging.
+ */
+
+#define __raw_write_logged(v, a, _t) ({ \
+ int _ret; \
+ volatile void __iomem *_a = (a); \
+ void *_addr = (void __force *)(_a); \
+ _ret = uncached_logk(LOGK_WRITEL, _addr); \
+ ETB_WAYPOINT; \
+ __raw_write##_t##_no_log((v), _a); \
+ if (_ret) \
+ LOG_BARRIER; \
+ })
+
+
+#define __raw_writeb(v, a) __raw_write_logged((v), (a), b)
+#define __raw_writew(v, a) __raw_write_logged((v), (a), w)
+#define __raw_writel(v, a) __raw_write_logged((v), (a), l)
+#define __raw_writeq(v, a) __raw_write_logged((v), (a), q)
+
+#define __raw_read_logged(a, _l, _t) ({ \
+ unsigned _t __a; \
+ const volatile void __iomem *_a = (a); \
+ void *_addr = (void __force *)(_a); \
+ int _ret; \
+ _ret = uncached_logk(LOGK_READL, _addr); \
+ ETB_WAYPOINT; \
+ __a = __raw_read##_l##_no_log(_a);\
+ if (_ret) \
+ LOG_BARRIER; \
+ __a; \
+ })
+
+
+#define __raw_readb(a) __raw_read_logged((a), b, char)
+#define __raw_readw(a) __raw_read_logged((a), w, short)
+#define __raw_readl(a) __raw_read_logged((a), l, int)
+#define __raw_readq(a) __raw_read_logged((a), q, long long)
+
/*
* Architecture ioremap implementation.
*/
@@ -291,18 +349,32 @@
__raw_readw(c)); __r; })
#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
__raw_readl(c)); __r; })
+#define readq_relaxed(c) ({ u64 __r = le64_to_cpu((__force __le64) \
+ __raw_readq(c)); __r; })
+#define readb_relaxed_no_log(c) ({ u8 __r = __raw_readb_no_log(c); __r; })
+#define readl_relaxed_no_log(c) ({ u32 __r = le32_to_cpu((__force __le32) \
+ __raw_readl_no_log(c)); __r; })
+#define readq_relaxed_no_log(c) ({ u64 __r = le64_to_cpu((__force __le64) \
+ __raw_readq_no_log(c)); __r; })
-#define writeb_relaxed(v,c) __raw_writeb(v,c)
-#define writew_relaxed(v,c) __raw_writew((__force u16) cpu_to_le16(v),c)
-#define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c)
+
+#define writeb_relaxed(v, c) __raw_writeb(v, c)
+#define writew_relaxed(v, c) __raw_writew((__force u16) cpu_to_le16(v), c)
+#define writel_relaxed(v, c) __raw_writel((__force u32) cpu_to_le32(v), c)
+#define writeq_relaxed(v, c) __raw_writeq((__force u64) cpu_to_le64(v), c)
+#define writeb_relaxed_no_log(v, c) ((void)__raw_writeb_no_log((v), (c)))
+#define writel_relaxed_no_log(v, c) __raw_writel_no_log((__force u32) cpu_to_le32(v), c)
+#define writeq_relaxed_no_log(v, c) __raw_writeq_no_log((__force u64) cpu_to_le64(v), c)
#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
+#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(); __v; })
#define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); })
#define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); })
#define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); })
+#define writeq(v, c) ({ __iowmb(); writeq_relaxed(v, c); })
#define readsb(p,d,l) __raw_readsb(p,d,l)
#define readsw(p,d,l) __raw_readsw(p,d,l)
@@ -410,6 +482,23 @@
void iounmap(volatile void __iomem *iomem_cookie);
#define iounmap iounmap
+/*
+ * io{read,write}{8,16,32,64} macros
+ */
+#ifndef ioread8
+#define ioread8(p) ({ unsigned int __v = __raw_readb(p); __iormb(); __v; })
+#define ioread16(p) ({ unsigned int __v = le16_to_cpu((__force __le16)__raw_readw(p)); __iormb(); __v; })
+#define ioread32(p) ({ unsigned int __v = le32_to_cpu((__force __le32)__raw_readl(p)); __iormb(); __v; })
+#define ioread64(p) ({ unsigned int __v = le64_to_cpu((__force __le64)__raw_readq(p)); __iormb(); __v; })
+
+#define ioread64be(p) ({ unsigned int __v = be64_to_cpu((__force __be64)__raw_readq(p)); __iormb(); __v; })
+
+#define iowrite8(v, p) ({ __iowmb(); __raw_writeb(v, p); })
+#define iowrite16(v, p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_le16(v), p); })
+#define iowrite32(v, p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_le32(v), p); })
+#define iowrite64(v, p) ({ __iowmb(); __raw_writeq((__force __u64)cpu_to_le64(v), p); })
+
+#define iowrite64be(v, p) ({ __iowmb(); __raw_writeq((__force __u64)cpu_to_be64(v), p); })
void *arch_memremap_wb(phys_addr_t phys_addr, size_t size);
#define arch_memremap_wb arch_memremap_wb
@@ -431,6 +520,7 @@
#define ioport_unmap ioport_unmap
extern void ioport_unmap(void __iomem *addr);
#endif
+#endif
struct pci_dev;
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index ce131ed..ae738a6 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -600,7 +600,7 @@
const void *kbuf, const void __user *ubuf)
{
int ret;
- struct pt_regs newregs;
+ struct pt_regs newregs = *task_pt_regs(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&newregs,
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 792340f..b27bef5 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -58,6 +58,144 @@
per_cpu(cpu_scale, cpu) = capacity;
}
+static int __init get_cpu_for_node(struct device_node *node)
+{
+ struct device_node *cpu_node;
+ int cpu;
+
+ cpu_node = of_parse_phandle(node, "cpu", 0);
+ if (!cpu_node)
+ return -EINVAL;
+
+ for_each_possible_cpu(cpu) {
+ if (of_get_cpu_node(cpu, NULL) == cpu_node) {
+ of_node_put(cpu_node);
+ return cpu;
+ }
+ }
+
+ pr_crit("Unable to find CPU node for %s\n", cpu_node->full_name);
+
+ of_node_put(cpu_node);
+ return -EINVAL;
+}
+
+static int __init parse_core(struct device_node *core, int cluster_id,
+ int core_id)
+{
+ char name[10];
+ bool leaf = true;
+ int i = 0;
+ int cpu;
+ struct device_node *t;
+
+ do {
+ snprintf(name, sizeof(name), "thread%d", i);
+ t = of_get_child_by_name(core, name);
+ if (t) {
+ leaf = false;
+ cpu = get_cpu_for_node(t);
+ if (cpu >= 0) {
+ cpu_topology[cpu].socket_id = cluster_id;
+ cpu_topology[cpu].core_id = core_id;
+ cpu_topology[cpu].thread_id = i;
+ } else {
+ pr_err("%s: Can't get CPU for thread\n",
+ t->full_name);
+ of_node_put(t);
+ return -EINVAL;
+ }
+ of_node_put(t);
+ }
+ i++;
+ } while (t);
+
+ cpu = get_cpu_for_node(core);
+ if (cpu >= 0) {
+ if (!leaf) {
+ pr_err("%s: Core has both threads and CPU\n",
+ core->full_name);
+ return -EINVAL;
+ }
+
+ cpu_topology[cpu].socket_id = cluster_id;
+ cpu_topology[cpu].core_id = core_id;
+ } else if (leaf) {
+ pr_err("%s: Can't get CPU for leaf core\n", core->full_name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __init parse_cluster(struct device_node *cluster, int depth)
+{
+ static int cluster_id __initdata;
+ char name[10];
+ bool leaf = true;
+ bool has_cores = false;
+ struct device_node *c;
+ int core_id = 0;
+ int i, ret;
+
+ /*
+ * First check for child clusters; we currently ignore any
+ * information about the nesting of clusters and present the
+ * scheduler with a flat list of them.
+ */
+ i = 0;
+ do {
+ snprintf(name, sizeof(name), "cluster%d", i);
+ c = of_get_child_by_name(cluster, name);
+ if (c) {
+ leaf = false;
+ ret = parse_cluster(c, depth + 1);
+ of_node_put(c);
+ if (ret != 0)
+ return ret;
+ }
+ i++;
+ } while (c);
+
+ /* Now check for cores */
+ i = 0;
+ do {
+ snprintf(name, sizeof(name), "core%d", i);
+ c = of_get_child_by_name(cluster, name);
+ if (c) {
+ has_cores = true;
+
+ if (depth == 0) {
+ pr_err("%s: cpu-map children should be clusters\n",
+ c->full_name);
+ of_node_put(c);
+ return -EINVAL;
+ }
+
+ if (leaf) {
+ ret = parse_core(c, cluster_id, core_id++);
+ } else {
+ pr_err("%s: Non-leaf cluster with core %s\n",
+ cluster->full_name, name);
+ ret = -EINVAL;
+ }
+
+ of_node_put(c);
+ if (ret != 0)
+ return ret;
+ }
+ i++;
+ } while (c);
+
+ if (leaf && !has_cores)
+ pr_warn("%s: empty cluster\n", cluster->full_name);
+
+ if (leaf)
+ cluster_id++;
+
+ return 0;
+}
+
#ifdef CONFIG_OF
struct cpu_efficiency {
const char *compatible;
@@ -93,14 +231,40 @@
* 'average' CPU is of middle capacity. Also see the comments near
* table_efficiency[] and update_cpu_capacity().
*/
-static void __init parse_dt_topology(void)
+static int __init parse_dt_topology(void)
{
const struct cpu_efficiency *cpu_eff;
- struct device_node *cn = NULL;
+ struct device_node *cn = NULL, *map;
unsigned long min_capacity = ULONG_MAX;
unsigned long max_capacity = 0;
unsigned long capacity = 0;
- int cpu = 0;
+ int cpu = 0, ret = 0;
+
+ cn = of_find_node_by_path("/cpus");
+ if (!cn) {
+ pr_err("No CPU information found in DT\n");
+ return 0;
+ }
+
+ /*
+ * When topology is provided cpu-map is essentially a root
+ * cluster with restricted subnodes.
+ */
+ map = of_get_child_by_name(cn, "cpu-map");
+ if (!map)
+ goto out;
+
+ ret = parse_cluster(map, 0);
+ if (ret != 0)
+ goto out_map;
+
+ /*
+ * Check that all cores are in the topology; the SMP code will
+ * only mark cores described in the DT as possible.
+ */
+ for_each_possible_cpu(cpu)
+ if (cpu_topology[cpu].socket_id == -1)
+ ret = -EINVAL;
__cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity),
GFP_NOWAIT);
@@ -156,7 +320,11 @@
else
middle_capacity = ((max_capacity / 3)
>> (SCHED_CAPACITY_SHIFT-1)) + 1;
-
+out_map:
+ of_node_put(map);
+out:
+ of_node_put(cn);
+ return ret;
}
static const struct sched_group_energy * const cpu_core_energy(int cpu);
@@ -182,7 +350,7 @@
}
#else
-static inline void parse_dt_topology(void) {}
+static inline int parse_dt_topology(void) {}
static inline void update_cpu_capacity(unsigned int cpuid) {}
#endif
@@ -242,9 +410,8 @@
struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid];
unsigned int mpidr;
- /* If the cpu topology has been already set, just return */
if (cpuid_topo->core_id != -1)
- return;
+ goto topology_populated;
mpidr = read_cpuid_mpidr();
@@ -277,14 +444,14 @@
cpuid_topo->socket_id = -1;
}
- update_siblings_masks(cpuid);
-
- update_cpu_capacity(cpuid);
-
- pr_info("CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
+ pr_info("CPU%u: thread %d, cpu %d, cluster %d, mpidr %x\n",
cpuid, cpu_topology[cpuid].thread_id,
cpu_topology[cpuid].core_id,
cpu_topology[cpuid].socket_id, mpidr);
+
+topology_populated:
+ update_siblings_masks(cpuid);
+ update_cpu_capacity(cpuid);
}
/*
@@ -442,7 +609,17 @@
}
smp_wmb();
- parse_dt_topology();
+ if (parse_dt_topology()) {
+ struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]);
+
+ cpu_topo->thread_id = -1;
+ cpu_topo->core_id = -1;
+ cpu_topo->socket_id = -1;
+ cpumask_clear(&cpu_topo->core_sibling);
+ cpumask_clear(&cpu_topo->thread_sibling);
+
+ set_capacity_scale(cpu, SCHED_CAPACITY_SCALE);
+ }
for_each_possible_cpu(cpu)
update_siblings_masks(cpu);
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
index 8ecfd15..df73914 100644
--- a/arch/arm/lib/getuser.S
+++ b/arch/arm/lib/getuser.S
@@ -67,7 +67,7 @@
ENDPROC(__get_user_4)
ENTRY(__get_user_8)
- check_uaccess r0, 8, r1, r2, __get_user_bad
+ check_uaccess r0, 8, r1, r2, __get_user_bad8
#ifdef CONFIG_THUMB2_KERNEL
5: TUSER(ldr) r2, [r0]
6: TUSER(ldr) r3, [r0, #4]
diff --git a/arch/arm/mach-qcom/Kconfig b/arch/arm/mach-qcom/Kconfig
index 46ed10a..6e6ebcb 100644
--- a/arch/arm/mach-qcom/Kconfig
+++ b/arch/arm/mach-qcom/Kconfig
@@ -1,30 +1,46 @@
-menuconfig ARCH_QCOM
- bool "Qualcomm Support"
- depends on ARCH_MULTI_V7
- select ARCH_SUPPORTS_BIG_ENDIAN
- select ARM_GIC
- select ARM_AMBA
- select PINCTRL
- select QCOM_SCM if SMP
- help
- Support for Qualcomm's devicetree based systems.
-
if ARCH_QCOM
+menu "QCOM SoC Type"
config ARCH_MSM8X60
bool "Enable support for MSM8X60"
+ select ARCH_SUPPORTS_BIG_ENDIAN
+ select ARM_GIC
+ select ARM_AMBA
+ select QCOM_SCM if SMP
select CLKSRC_QCOM
+ select CLKSRC_OF
+ select COMMON_CLK
config ARCH_MSM8960
bool "Enable support for MSM8960"
select CLKSRC_QCOM
+ select ARCH_SUPPORTS_BIG_ENDIAN
+ select ARM_GIC
+ select ARM_AMBA
+ select QCOM_SCM if SMP
+ select CLKSRC_OF
+ select COMMON_CLK
+
config ARCH_MSM8974
bool "Enable support for MSM8974"
select HAVE_ARM_ARCH_TIMER
+ select ARCH_SUPPORTS_BIG_ENDIAN
+ select ARM_GIC
+ select ARM_AMBA
+ select QCOM_SCM if SMP
+ select CLKSRC_OF
+ select COMMON_CLK
config ARCH_MDM9615
bool "Enable support for MDM9615"
select CLKSRC_QCOM
+ select ARCH_SUPPORTS_BIG_ENDIAN
+ select ARM_GIC
+ select ARM_AMBA
+ select QCOM_SCM if SMP
+ select CLKSRC_OF
+ select COMMON_CLK
+endmenu
endif
diff --git a/arch/arm/mach-qcom/Makefile b/arch/arm/mach-qcom/Makefile
index 12878e9..e7ffa04 100644
--- a/arch/arm/mach-qcom/Makefile
+++ b/arch/arm/mach-qcom/Makefile
@@ -1 +1,2 @@
+obj-$(CONFIG_USE_OF) += board-dt.o
obj-$(CONFIG_SMP) += platsmp.o
diff --git a/arch/arm/mach-qcom/board-dt.c b/arch/arm/mach-qcom/board-dt.c
new file mode 100644
index 0000000..866cb74
--- /dev/null
+++ b/arch/arm/mach-qcom/board-dt.c
@@ -0,0 +1,28 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+
+#include "board-dt.h"
+
+void __init board_dt_populate(struct of_dev_auxdata *adata)
+{
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+
+ /* Explicitly parent the /soc devices to the root node to preserve
+ * the kernel ABI (sysfs structure, etc) until userspace is updated
+ */
+ of_platform_populate(of_find_node_by_path("/soc"),
+ of_default_bus_match_table, adata, NULL);
+}
diff --git a/arch/arm/mach-qcom/board-dt.h b/arch/arm/mach-qcom/board-dt.h
new file mode 100644
index 0000000..0f36e04
--- /dev/null
+++ b/arch/arm/mach-qcom/board-dt.h
@@ -0,0 +1,15 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of_platform.h>
+
+void __init board_dt_populate(struct of_dev_auxdata *adata);
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index a134d8a..2ddf364 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -349,7 +349,7 @@
* - start - virtual start address of region
* - end - virtual end address of region
*/
-v7_dma_inv_range:
+ENTRY(v7_dma_inv_range)
dcache_line_size r2, r3
sub r3, r2, #1
tst r0, r3
@@ -377,7 +377,7 @@
* - start - virtual start address of region
* - end - virtual end address of region
*/
-v7_dma_clean_range:
+ENTRY(v7_dma_clean_range)
dcache_line_size r2, r3
sub r3, r2, #1
bic r0, r0, r3
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 217ddb2..aec74bf 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -610,9 +610,9 @@
void __init early_abt_enable(void)
{
- fsr_info[22].fn = early_abort_handler;
+ fsr_info[FSR_FS_AEA].fn = early_abort_handler;
local_abt_enable();
- fsr_info[22].fn = do_bad;
+ fsr_info[FSR_FS_AEA].fn = do_bad;
}
#ifndef CONFIG_ARM_LPAE
diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
index 67532f2..afc1f84 100644
--- a/arch/arm/mm/fault.h
+++ b/arch/arm/mm/fault.h
@@ -11,11 +11,15 @@
#define FSR_FS5_0 (0x3f)
#ifdef CONFIG_ARM_LPAE
+#define FSR_FS_AEA 17
+
static inline int fsr_fs(unsigned int fsr)
{
return fsr & FSR_FS5_0;
}
#else
+#define FSR_FS_AEA 22
+
static inline int fsr_fs(unsigned int fsr)
{
return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6;
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index 0d40c28..60bd916 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -325,6 +325,8 @@
.long \name\()_flush_kern_dcache_area
.long \name\()_dma_map_area
.long \name\()_dma_unmap_area
+ .long \name\()_dma_inv_range
+ .long \name\()_dma_clean_range
.long \name\()_dma_flush_range
.size \name\()_cache_fns, . - \name\()_cache_fns
.endm
diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c
index 054b491..70e8b7d 100644
--- a/arch/arm/mm/proc-syms.c
+++ b/arch/arm/mm/proc-syms.c
@@ -30,6 +30,9 @@
EXPORT_SYMBOL(__cpuc_flush_user_range);
EXPORT_SYMBOL(__cpuc_coherent_kern_range);
EXPORT_SYMBOL(__cpuc_flush_dcache_area);
+EXPORT_SYMBOL(dmac_inv_range);
+EXPORT_SYMBOL(dmac_clean_range);
+EXPORT_SYMBOL(dmac_flush_range);
#else
EXPORT_SYMBOL(cpu_cache);
#endif
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 0b50576..6293973 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -838,7 +838,7 @@
endif
config ARM64_SW_TTBR0_PAN
- bool "Emulate Priviledged Access Never using TTBR0_EL1 switching"
+ bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
help
Enabling this option prevents the kernel from accessing
user-space memory directly by pointing TTBR0_EL1 to a reserved
diff --git a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
index d36c0ff..e7ff343 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
@@ -13,11 +13,19 @@
&soc {
replicator_qdss: replicator@6046000 {
- compatible = "arm,coresight-replicator";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b909>;
+
+ reg = <0x6046000 0x1000>;
+ reg-names = "replicator-base";
coresight-name = "coresight-replicator";
- ports{
+ clocks = <&clock_gcc RPMH_QDSS_CLK>,
+ <&clock_gcc RPMH_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "core_a_clk";
+
+ ports {
#address-cells = <1>;
#size-cells = <0>;
@@ -40,7 +48,215 @@
};
};
- tmc_etr:tmc@6048000 {
+ replicator_swao: replicator@6b0a000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b909>;
+
+ reg = <0x6b0a000 0x1000>;
+ reg-names = "replicator-base";
+
+ coresight-name = "coresight-replicator-swao";
+
+ clocks = <&clock_gcc RPMH_QDSS_CLK>,
+ <&clock_gcc RPMH_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "core_a_clk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ replicator_swao_in_tmc_etf_swao: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tmc_etf_swao_out_replicator>;
+ };
+ };
+
+ /* Always have EUD before funnel leading to ETR. If both
+ * sink are active we need to give preference to EUD
+ * over ETR
+ */
+ port@1 {
+ reg = <1>;
+ replicator_swao_out_eud: endpoint {
+ remote-endpoint =
+ <&eud_in_replicator_swao>;
+ };
+ };
+
+ port@2 {
+ reg = <0>;
+ replicator_swao_out_funnel_in2: endpoint {
+ remote-endpoint =
+ <&funnel_in2_in_replicator_swao>;
+ };
+ };
+
+ };
+ };
+
+ tmc_etf_swao: tmc@6b09000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b961>;
+
+ reg = <0x6b09000 0x1000>;
+ reg-names = "tmc-base";
+
+ coresight-name = "coresight-tmc-etf-swao";
+
+ clocks = <&clock_gcc RPMH_QDSS_CLK>,
+ <&clock_gcc RPMH_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "core_a_clk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ tmc_etf_swao_out_replicator: endpoint {
+ remote-endpoint=
+ <&replicator_swao_in_tmc_etf_swao>;
+ };
+ };
+
+ port@1 {
+ reg = <0>;
+ tmc_etf_swao_in_funnel_swao: endpoint {
+ slave-mode;
+ remote-endpoint=
+ <&funnel_swao_out_tmc_etf_swao>;
+ };
+ };
+ };
+
+ };
+
+ funnel_swao:funnel@0x6b08000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b908>;
+
+ reg = <0x6b08000 0x1000>;
+ reg-names = "funnel-base";
+
+ coresight-name = "coresight-funnel-swao";
+
+ clocks = <&clock_gcc RPMH_QDSS_CLK>,
+ <&clock_gcc RPMH_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "core_a_clk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ funnel_swao_out_tmc_etf_swao: endpoint {
+ remote-endpoint =
+ <&tmc_etf_swao_in_funnel_swao>;
+ };
+ };
+
+ port@1 {
+ reg = <7>;
+ funnel_swao_in_tpda_swao: endpoint {
+ slave-mode;
+ remote-endpoint=
+ <&tpda_swao_out_funnel_swao>;
+ };
+ };
+ };
+ };
+
+ tpda_swao: tpda@6b01000 {
+ compatible = "qcom,coresight-tpda";
+ reg = <0x6b01000 0x1000>;
+ reg-names = "tpda-base";
+
+ coresight-name = "coresight-tpda-swao";
+
+ qcom,tpda-atid = <71>;
+ qcom,dsb-elem-size = <1 32>;
+ qcom,cmb-elem-size = <0 64>;
+
+ clocks = <&clock_gcc RPMH_QDSS_CLK>,
+ <&clock_gcc RPMH_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ tpda_swao_out_funnel_swao: endpoint {
+ remote-endpoint =
+ <&funnel_swao_in_tpda_swao>;
+ };
+
+ };
+
+ port@1 {
+ reg = <0>;
+ tpda_swao_in_tpdm_swao0: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_swao0_out_tpda_swao>;
+ };
+ };
+
+ port@2 {
+ reg = <1>;
+ tpda_swao_in_tpdm_swao1: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_swao1_out_tpda_swao>;
+ };
+
+ };
+ };
+ };
+
+ tpdm_swao0: tpdm@6b02000 {
+ compatible = "qcom,coresight-tpdm";
+
+ reg = <0x6b02000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-swao-0";
+
+ clocks = <&clock_gcc RPMH_QDSS_CLK>,
+ <&clock_gcc RPMH_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ port {
+ tpdm_swao0_out_tpda_swao: endpoint {
+ remote-endpoint = <&tpda_swao_in_tpdm_swao0>;
+ };
+ };
+ };
+
+ tpdm_swao1: tpdm@6b03000 {
+ compatible = "qcom,coresight-tpdm";
+ reg = <0x6b03000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name="coresight-tpdm-swao-1";
+
+ clocks = <&clock_gcc RPMH_QDSS_CLK>,
+ <&clock_gcc RPMH_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ port {
+ tpdm_swao1_out_tpda_swao: endpoint {
+ remote-endpoint = <&tpda_swao_in_tpdm_swao1>;
+ };
+ };
+ };
+
+ tmc_etr: tmc@6048000 {
compatible = "arm,primecell";
arm,primecell-periphid = <0x0003b961>;
@@ -65,7 +281,7 @@
};
};
- tmc_etf:tmc@6047000 {
+ tmc_etf: tmc@6047000 {
compatible = "arm,primecell";
arm,primecell-periphid = <0x0003b961>;
@@ -104,6 +320,51 @@
};
+ funnel_merg: funnel@6045000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b908>;
+
+ reg = <0x6045000 0x1000>;
+ reg-names = "funnel-base";
+
+ coresight-name = "coresight-funnel-merg";
+
+ clocks = <&clock_gcc RPMH_QDSS_CLK>,
+ <&clock_gcc RPMH_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "core_a_clk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ funnel_merg_out_tmc_etf: endpoint {
+ remote-endpoint =
+ <&tmc_etf_in_funnel_merg>;
+ };
+ };
+
+ port@1 {
+ reg = <0>;
+ funnel_merg_in_funnel_in0: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&funnel_in0_out_funnel_merg>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+ funnel_merg_in_funnel_in2: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&funnel_in2_out_funnel_merg>;
+ };
+ };
+ };
+ };
+
stm: stm@6002000 {
compatible = "arm,primecell";
arm,primecell-periphid = <0x0003b962>;
@@ -152,6 +413,24 @@
};
port@1 {
+ reg = <3>;
+ funnel_in0_in_funnel_spss: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&funnel_spss_out_funnel_in0>;
+ };
+ };
+
+ port@2 {
+ reg = <6>;
+ funnel_in0_in_funnel_qatb: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&funnel_qatb_out_funnel_in0>;
+ };
+ };
+
+ port@3 {
reg = <7>;
funnel_in0_in_stm: endpoint {
slave-mode;
@@ -161,14 +440,14 @@
};
};
- funnel_merg:funnel@6045000 {
+ funnel_in2: funnel@0x6043000 {
compatible = "arm,primecell";
arm,primecell-periphid = <0x0003b908>;
- reg = <0x6045000 0x1000>;
+ reg = <0x6043000 0x1000>;
reg-names = "funnel-base";
- coresight-name = "coresight-funnel-merg";
+ coresight-name = "coresight-funnel-in2";
clocks = <&clock_gcc RPMH_QDSS_CLK>,
<&clock_gcc RPMH_QDSS_A_CLK>;
@@ -180,18 +459,325 @@
port@0 {
reg = <0>;
- funnel_merg_out_tmc_etf: endpoint {
+ funnel_in2_out_funnel_merg: endpoint {
remote-endpoint =
- <&tmc_etf_in_funnel_merg>;
+ <&funnel_merg_in_funnel_in2>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ funnel_in2_in_replicator_swao: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&replicator_swao_out_funnel_in2>;
+ };
+
+ };
+
+ port@2 {
+ reg = <5>;
+ funnel_in2_in_funnel_apss_merg: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&funnel_apss_merg_out_funnel_in2>;
+ };
+ };
+
+ };
+ };
+
+ tpda: tpda@6004000 {
+ compatible = "qcom,coresight-tpda";
+ reg = <0x6004000 0x1000>;
+ reg-names = "tpda-base";
+
+ coresight-name = "coresight-tpda";
+
+ qcom,tpda-atid = <65>;
+ qcom,bc-elem-size = <13 32>;
+ qcom,tc-elem-size = <7 32>,
+ <13 32>;
+ qcom,dsb-elem-size = <13 32>;
+ qcom,cmb-elem-size = <7 32>,
+ <8 32>,
+ <13 64>;
+
+ clocks = <&clock_gcc RPMH_QDSS_CLK>,
+ <&clock_gcc RPMH_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ tpda_out_funnel_qatb: endpoint {
+ remote-endpoint =
+ <&funnel_qatb_in_tpda>;
+ };
+
+ };
+
+ port@1 {
+ reg = <7>;
+ tpda_in_tpdm_vsense: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_vsense_out_tpda>;
+ };
+ };
+
+ port@2 {
+ reg = <8>;
+ tpda_in_tpdm_dcc: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_dcc_out_tpda>;
+ };
+ };
+
+ port@3 {
+ reg = <13>;
+ tpda_in_tpdm_pimem: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_pimem_out_tpda>;
+ };
+ };
+ };
+ };
+
+ tpdm_pimem: tpdm@6850000 {
+ compatible = "qcom,coresight-tpdm";
+ reg = <0x6850000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-pimem";
+
+ clocks = <&clock_gcc RPMH_QDSS_CLK>,
+ <&clock_gcc RPMH_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ port {
+ tpdm_pimem_out_tpda: endpoint {
+ remote-endpoint = <&tpda_in_tpdm_pimem>;
+ };
+ };
+ };
+
+
+ tpdm_dcc: tpdm@6870000 {
+ compatible = "qcom,coresight-tpdm";
+ reg = <0x6870000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-dcc";
+
+ clocks = <&clock_gcc RPMH_QDSS_CLK>,
+ <&clock_gcc RPMH_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ port {
+ tpdm_dcc_out_tpda: endpoint {
+ remote-endpoint = <&tpda_in_tpdm_dcc>;
+ };
+ };
+ };
+
+ tpdm_vsense: tpdm@6840000 {
+ compatible = "qcom,coresight-tpdm";
+ reg = <0x6840000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-vsense";
+
+ clocks = <&clock_gcc RPMH_QDSS_CLK>,
+ <&clock_gcc RPMH_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ port{
+ tpdm_vsense_out_tpda: endpoint {
+ remote-endpoint = <&tpda_in_tpdm_vsense>;
+ };
+ };
+ };
+
+ tpda_olc: tpda@7832000 {
+ compatible = "qcom,coresight-tpda";
+ reg = <0x7832000 0x1000>;
+ reg-names = "tpda-base";
+
+ coresight-name = "coresight-tpda-olc";
+
+ qcom,tpda-atid = <69>;
+ qcom,cmb-elem-size = <0 64>;
+
+ clocks = <&clock_gcc RPMH_QDSS_CLK>,
+ <&clock_gcc RPMH_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ tpda_olc_out_funnel_apss_merg: endpoint {
+ remote-endpoint =
+ <&funnel_apss_merg_in_tpda_olc>;
+ };
+ };
+ port@1 {
+ reg = <0>;
+ tpda_olc_in_tpdm_olc: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_olc_out_tpda_olc>;
+ };
+ };
+ };
+ };
+
+ tpdm_olc: tpdm@7830000 {
+ compatible = "qcom,coresight-tpdm";
+ reg = <0x7830000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-olc";
+
+ clocks = <&clock_gcc RPMH_QDSS_CLK>,
+ <&clock_gcc RPMH_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ port{
+ tpdm_olc_out_tpda_olc: endpoint {
+ remote-endpoint = <&tpda_olc_in_tpdm_olc>;
+ };
+ };
+ };
+
+ tpda_spss: tpda@6882000 {
+ compatible = "qcom,coresight-tpda";
+ reg = <0x6882000 0x1000>;
+ reg-names = "tpda-base";
+
+ coresight-name = "coresight-tpda-spss";
+
+ qcom,tpda-atid = <70>;
+ qcom,dsb-elem-size = <0 32>;
+
+ clocks = <&clock_gcc RPMH_QDSS_CLK>,
+ <&clock_gcc RPMH_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ tpda_spss_out_funnel_spss: endpoint {
+ remote-endpoint =
+ <&funnel_spss_in_tpda_spss>;
+ };
+ };
+ port@1 {
+ reg = <0>;
+ tpda_spss_in_tpdm_spss: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_spss_out_tpda_spss>;
+ };
+ };
+ };
+ };
+
+ tpdm_spss: tpdm@6880000 {
+ compatible = "qcom,coresight-tpdm";
+ reg = <0x6880000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-spss";
+
+ clocks = <&clock_gcc RPMH_QDSS_CLK>,
+ <&clock_gcc RPMH_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ qcom,msr-fix-req;
+
+ port{
+ tpdm_spss_out_tpda_spss: endpoint {
+ remote-endpoint = <&tpda_spss_in_tpdm_spss>;
+ };
+ };
+ };
+
+ funnel_spss: funnel@6883000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b908>;
+
+ reg = <0x6883000 0x1000>;
+ reg-names = "funnel-base";
+
+ coresight-name = "coresight-funnel-spss";
+
+ clocks = <&clock_gcc RPMH_QDSS_CLK>,
+ <&clock_gcc RPMH_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "core_a_clk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ funnel_spss_out_funnel_in0: endpoint {
+ remote-endpoint =
+ <&funnel_in0_in_funnel_spss>;
};
};
port@1 {
reg = <0>;
- funnel_merg_in_funnel_in0: endpoint {
+ funnel_spss_in_tpda_spss: endpoint {
slave-mode;
remote-endpoint =
- <&funnel_in0_out_funnel_merg>;
+ <&tpda_spss_out_funnel_spss>;
+ };
+ };
+ };
+ };
+
+ funnel_qatb: funnel@6005000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b908>;
+
+ reg = <0x6005000 0x1000>;
+ reg-names = "funnel-base";
+
+ coresight-name = "coresight-funnel-qatb";
+
+ clocks = <&clock_gcc RPMH_QDSS_CLK>,
+ <&clock_gcc RPMH_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "core_a_clk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ funnel_qatb_out_funnel_in0: endpoint {
+ remote-endpoint =
+ <&funnel_in0_in_funnel_qatb>;
+ };
+ };
+
+ port@1 {
+ reg = <0>;
+ funnel_qatb_in_tpda: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpda_out_funnel_qatb>;
};
};
};
@@ -389,9 +975,9 @@
clock-names = "core_clk", "core_a_clk";
};
- cti_cpu0: cti@7420000 {
+ cti_cpu0: cti@7020000 {
compatible = "arm,coresight-cti";
- reg = <0x7420000 0x1000>;
+ reg = <0x7020000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti-cpu0";
@@ -402,9 +988,9 @@
clock-names = "core_clk", "core_a_clk";
};
- cti_cpu1: cti@7520000 {
+ cti_cpu1: cti@7120000 {
compatible = "arm,coresight-cti";
- reg = <0x7520000 0x1000>;
+ reg = <0x7120000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti-cpu1";
@@ -415,9 +1001,9 @@
clock-names = "core_clk", "core_a_clk";
};
- cti_cpu2: cti@7620000 {
+ cti_cpu2: cti@7220000 {
compatible = "arm,coresight-cti";
- reg = <0x7620000 0x1000>;
+ reg = <0x7220000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti-cpu2";
@@ -428,9 +1014,9 @@
clock-names = "core_clk", "core_a_clk";
};
- cti_cpu3: cti@7720000 {
+ cti_cpu3: cti@7320000 {
compatible = "arm,coresight-cti";
- reg = <0x7720000 0x1000>;
+ reg = <0x7320000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti-cpu3";
@@ -441,9 +1027,9 @@
clock-names = "core_clk", "core_a_clk";
};
- cti_cpu4: cti@7020000 {
+ cti_cpu4: cti@7420000 {
compatible = "arm,coresight-cti";
- reg = <0x7020000 0x1000>;
+ reg = <0x7420000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti-cpu4";
@@ -454,9 +1040,9 @@
clock-names = "core_clk", "core_a_clk";
};
- cti_cpu5: cti@7120000 {
+ cti_cpu5: cti@7520000 {
compatible = "arm,coresight-cti";
- reg = <0x7120000 0x1000>;
+ reg = <0x7520000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti-cpu5";
@@ -467,9 +1053,9 @@
clock-names = "core_clk", "core_a_clk";
};
- cti_cpu6: cti@7220000 {
+ cti_cpu6: cti@7620000 {
compatible = "arm,coresight-cti";
- reg = <0x7220000 0x1000>;
+ reg = <0x7620000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti-cpu6";
@@ -480,9 +1066,9 @@
clock-names = "core_clk", "core_a_clk";
};
- cti_cpu7: cti@7320000 {
+ cti_cpu7: cti@7720000 {
compatible = "arm,coresight-cti";
- reg = <0x7320000 0x1000>;
+ reg = <0x7720000 0x1000>;
reg-names = "cti-base";
coresight-name = "coresight-cti-cpu7";
@@ -492,4 +1078,91 @@
<&clock_gcc RPMH_QDSS_A_CLK>;
clock-names = "core_clk", "core_a_clk";
};
+
+ dummy_eud: dummy_sink {
+ compatible = "qcom,coresight-dummy";
+
+ coresight-name = "coresight-eud";
+
+ qcom,dummy-sink;
+ port {
+ eud_in_replicator_swao: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&replicator_swao_out_eud>;
+ };
+ };
+ };
+
+ funnel_apss_merg: funnel@7810000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b908>;
+
+ reg = <0x7810000 0x1000>;
+ reg-names = "funnel-base";
+
+ coresight-name = "coresight-funnel-apss-merg";
+
+ clocks = <&clock_gcc RPMH_QDSS_CLK>,
+ <&clock_gcc RPMH_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "core_a_clk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ funnel_apss_merg_out_funnel_in2: endpoint {
+ remote-endpoint =
+ <&funnel_in2_in_funnel_apss_merg>;
+ };
+ };
+
+ port@1 {
+ reg = <0>;
+ funnel_apss_merg_in_funnel_apss: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&funnel_apss_out_funnel_apss_merg>;
+ };
+ };
+
+ port@2 {
+ reg = <1>;
+ funnel_apss_merg_in_tpda_olc: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpda_olc_out_funnel_apss_merg>;
+ };
+ };
+ };
+ };
+
+ funnel_apss: funnel@7800000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b908>;
+
+ reg = <0x7800000 0x1000>;
+ reg-names = "funnel-base";
+
+ coresight-name = "coresight-funnel-apss";
+
+ clocks = <&clock_gcc RPMH_QDSS_CLK>,
+ <&clock_gcc RPMH_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "core_a_clk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ funnel_apss_out_funnel_apss_merg: endpoint {
+ remote-endpoint =
+ <&funnel_apss_merg_in_funnel_apss>;
+ };
+ };
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index 342eec7..34743ca 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -36,6 +36,9 @@
#interrupt-cells = <1>;
iommus = <&apps_smmu 0x880>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
/* hw blocks */
qcom,sde-off = <0x1000>;
qcom,sde-len = <0x45C>;
@@ -162,4 +165,30 @@
<1 590 0 300000>;
};
};
+
+ sde_rscc: qcom,sde_rscc@af20000 {
+ status = "disabled";
+ cell-index = <0>;
+ compatible = "qcom,sde-rsc";
+ reg = <0xaf20000 0x1c44>,
+ <0xaf30000 0x3fd4>;
+ reg-names = "drv", "wrapper";
+ qcom,sde-rsc-version = <1>;
+
+ vdd-supply = <&mdss_core_gdsc>;
+ qcom,sde-dram-channels = <2>;
+
+ /* data and reg bus scale settings */
+ qcom,sde-data-bus {
+ qcom,msm-bus,name = "disp_rsc";
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,num-cases = <3>;
+ qcom,msm-bus,num-paths = <2>;
+ qcom,msm-bus,vectors-KBps =
+ <20003 20512 0 0>, <20004 20512 0 0>,
+ <20003 20512 0 6400000>, <20004 20512 0 6400000>,
+ <20003 20512 0 6400000>, <20004 20512 0 6400000>;
+ };
+
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index f3b529c..8974ef8 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -710,7 +710,7 @@
<0x1f65000 0x008>,
<0x1f64000 0x008>,
<0x4180000 0x020>,
- <0x00179000 0x004>;
+ <0xc2b0000 0x004>;
reg-names = "qdsp6_base", "halt_q6", "halt_modem",
"halt_nc", "rmb_base", "restart_reg";
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index e029f82..283103f 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -53,6 +53,7 @@
CONFIG_ARMV8_DEPRECATED=y
CONFIG_SWP_EMULATION=y
# CONFIG_ARM64_VHE is not set
+CONFIG_RANDOMIZE_BASE=y
# CONFIG_EFI is not set
CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
@@ -61,6 +62,7 @@
CONFIG_PM_WAKELOCKS=y
CONFIG_PM_WAKELOCKS_LIMIT=0
# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_CPU_IDLE=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
@@ -382,6 +384,8 @@
CONFIG_MSM_PIL_MSS_QDSP6V5=y
CONFIG_ICNSS=y
CONFIG_QCOM_COMMAND_DB=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_PM=y
CONFIG_EXTCON=y
CONFIG_IIO=y
CONFIG_QCOM_RRADC=y
@@ -414,7 +418,10 @@
CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
CONFIG_CORESIGHT_QCOM_REPLICATOR=y
CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_DUMMY=y
CONFIG_SECURITY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index 6cca05a..9ca8e0a 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -57,6 +57,7 @@
CONFIG_CP15_BARRIER_EMULATION=y
CONFIG_SETEND_EMULATION=y
# CONFIG_ARM64_VHE is not set
+CONFIG_RANDOMIZE_BASE=y
CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_COMPAT=y
@@ -65,6 +66,7 @@
CONFIG_PM_WAKELOCKS_LIMIT=0
# CONFIG_PM_WAKELOCKS_GC is not set
CONFIG_PM_DEBUG=y
+CONFIG_CPU_IDLE=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
@@ -346,6 +348,9 @@
CONFIG_MMC_SDHCI_MSM=y
CONFIG_EDAC=y
CONFIG_EDAC_MM_EDAC=y
+CONFIG_EDAC_KRYO3XX_ARM64=y
+CONFIG_EDAC_KRYO3XX_ARM64_PANIC_ON_CE=y
+CONFIG_EDAC_KRYO3XX_ARM64_PANIC_ON_UE=y
CONFIG_EDAC_QCOM_LLCC=y
CONFIG_EDAC_QCOM_LLCC_PANIC_ON_CE=y
CONFIG_EDAC_QCOM_LLCC_PANIC_ON_UE=y
@@ -394,6 +399,8 @@
CONFIG_ICNSS=y
CONFIG_ICNSS_DEBUG=y
CONFIG_QCOM_COMMAND_DB=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_PM=y
CONFIG_EXTCON=y
CONFIG_IIO=y
CONFIG_QCOM_RRADC=y
@@ -434,6 +441,8 @@
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_LOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
CONFIG_WQ_WATCHDOG=y
CONFIG_PANIC_TIMEOUT=5
CONFIG_SCHEDSTATS=y
@@ -463,7 +472,10 @@
CONFIG_CORESIGHT_SOURCE_ETM4X=y
CONFIG_CORESIGHT_QCOM_REPLICATOR=y
CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_DUMMY=y
CONFIG_SECURITY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index 71dfa3b..85c4a89 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -21,10 +21,7 @@
#include <linux/futex.h>
#include <linux/uaccess.h>
-#include <asm/alternative.h>
-#include <asm/cpufeature.h>
#include <asm/errno.h>
-#include <asm/sysreg.h>
#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \
do { \
diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h
index 8740297..1473fc2 100644
--- a/arch/arm64/include/asm/hardirq.h
+++ b/arch/arm64/include/asm/hardirq.h
@@ -20,7 +20,7 @@
#include <linux/threads.h>
#include <asm/irq.h>
-#define NR_IPI 6
+#define NR_IPI 7
typedef struct {
unsigned int __softirq_pending;
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
index b77197d..96fbe7a 100644
--- a/arch/arm64/include/asm/irq.h
+++ b/arch/arm64/include/asm/irq.h
@@ -56,5 +56,8 @@
return (low <= sp && sp <= high);
}
+void arch_trigger_all_cpu_backtrace(void);
+#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
+
#endif /* !__ASSEMBLER__ */
#endif
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 52a0e43..0363fe8 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -208,9 +208,11 @@
* Update the saved TTBR0_EL1 of the scheduled-in task as the previous
* value may have not been initialised yet (activate_mm caller) or the
* ASID has changed since the last run (following the context switch
- * of another thread of the same process).
+ * of another thread of the same process). Avoid setting the reserved
+ * TTBR0_EL1 to swapper_pg_dir (init_mm; e.g. via idle_task_exit).
*/
- update_saved_ttbr0(tsk, next);
+ if (next != &init_mm)
+ update_saved_ttbr0(tsk, next);
}
#define deactivate_mm(tsk,mm) do { } while (0)
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 8b2703e..d1472eb 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -51,6 +51,9 @@
u64 ttbr0; /* saved TTBR0_EL1 */
#endif
struct task_struct *task; /* main task structure */
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ u64 ttbr0; /* saved TTBR0_EL1 */
+#endif
int preempt_count; /* 0 => preemptable, <0 => bug */
int cpu; /* cpu */
};
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 9e06272..73fee2c 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -18,6 +18,10 @@
#ifndef __ASM_UACCESS_H
#define __ASM_UACCESS_H
+#include <asm/alternative.h>
+#include <asm/kernel-pgtable.h>
+#include <asm/sysreg.h>
+
#ifndef __ASSEMBLY__
/*
@@ -28,11 +32,9 @@
#include <linux/string.h>
#include <linux/thread_info.h>
-#include <asm/alternative.h>
#include <asm/cpufeature.h>
#include <asm/kernel-pgtable.h>
#include <asm/ptrace.h>
-#include <asm/sysreg.h>
#include <asm/errno.h>
#include <asm/memory.h>
#include <asm/compiler.h>
@@ -126,7 +128,7 @@
* User access enabling/disabling.
*/
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
-static inline void uaccess_ttbr0_disable(void)
+static inline void __uaccess_ttbr0_disable(void)
{
unsigned long ttbr;
@@ -136,7 +138,7 @@
isb();
}
-static inline void uaccess_ttbr0_enable(void)
+static inline void __uaccess_ttbr0_enable(void)
{
unsigned long flags;
@@ -150,30 +152,44 @@
isb();
local_irq_restore(flags);
}
-#else
-static inline void uaccess_ttbr0_disable(void)
+
+static inline bool uaccess_ttbr0_disable(void)
{
+ if (!system_uses_ttbr0_pan())
+ return false;
+ __uaccess_ttbr0_disable();
+ return true;
}
-static inline void uaccess_ttbr0_enable(void)
+static inline bool uaccess_ttbr0_enable(void)
{
+ if (!system_uses_ttbr0_pan())
+ return false;
+ __uaccess_ttbr0_enable();
+ return true;
+}
+#else
+static inline bool uaccess_ttbr0_disable(void)
+{
+ return false;
+}
+
+static inline bool uaccess_ttbr0_enable(void)
+{
+ return false;
}
#endif
#define __uaccess_disable(alt) \
do { \
- if (system_uses_ttbr0_pan()) \
- uaccess_ttbr0_disable(); \
- else \
+ if (!uaccess_ttbr0_disable()) \
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \
CONFIG_ARM64_PAN)); \
} while (0)
#define __uaccess_enable(alt) \
do { \
- if (system_uses_ttbr0_pan()) \
- uaccess_ttbr0_enable(); \
- else \
+ if (!uaccess_ttbr0_enable()) \
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \
CONFIG_ARM64_PAN)); \
} while (0)
@@ -411,69 +427,62 @@
#else /* __ASSEMBLY__ */
-#include <asm/alternative.h>
#include <asm/assembler.h>
-#include <asm/kernel-pgtable.h>
/*
* User access enabling/disabling macros.
*/
- .macro uaccess_ttbr0_disable, tmp1
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ .macro __uaccess_ttbr0_disable, tmp1
mrs \tmp1, ttbr1_el1 // swapper_pg_dir
add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
isb
.endm
- .macro uaccess_ttbr0_enable, tmp1
+ .macro __uaccess_ttbr0_enable, tmp1
get_thread_info \tmp1
- ldr \tmp1, [\tmp1, #TI_TTBR0] // load saved TTBR0_EL1
+ ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1
msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1
isb
.endm
+ .macro uaccess_ttbr0_disable, tmp1
+alternative_if_not ARM64_HAS_PAN
+ __uaccess_ttbr0_disable \tmp1
+alternative_else_nop_endif
+ .endm
+
+ .macro uaccess_ttbr0_enable, tmp1, tmp2
+alternative_if_not ARM64_HAS_PAN
+ save_and_disable_irq \tmp2 // avoid preemption
+ __uaccess_ttbr0_enable \tmp1
+ restore_irq \tmp2
+alternative_else_nop_endif
+ .endm
+#else
+ .macro uaccess_ttbr0_disable, tmp1
+ .endm
+
+ .macro uaccess_ttbr0_enable, tmp1, tmp2
+ .endm
+#endif
+
/*
* These macros are no-ops when UAO is present.
*/
.macro uaccess_disable_not_uao, tmp1
-#ifdef CONFIG_ARM64_SW_TTBR0_PAN
-alternative_if_not ARM64_HAS_PAN
uaccess_ttbr0_disable \tmp1
-alternative_else
- nop
- nop
- nop
- nop
-alternative_endif
-#endif
-alternative_if_not ARM64_ALT_PAN_NOT_UAO
- nop
-alternative_else
+alternative_if ARM64_ALT_PAN_NOT_UAO
SET_PSTATE_PAN(1)
-alternative_endif
+alternative_else_nop_endif
.endm
.macro uaccess_enable_not_uao, tmp1, tmp2
-#ifdef CONFIG_ARM64_SW_TTBR0_PAN
-alternative_if_not ARM64_HAS_PAN
- save_and_disable_irq \tmp2 // avoid preemption
- uaccess_ttbr0_enable \tmp1
- restore_irq \tmp2
-alternative_else
- nop
- nop
- nop
- nop
- nop
- nop
- nop
-alternative_endif
-#endif
-alternative_if_not ARM64_ALT_PAN_NOT_UAO
- nop
-alternative_else
+ uaccess_ttbr0_enable \tmp1, \tmp2
+alternative_if ARM64_ALT_PAN_NOT_UAO
SET_PSTATE_PAN(0)
-alternative_endif
+alternative_else_nop_endif
.endm
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h
index ee469be..c731ca0 100644
--- a/arch/arm64/include/uapi/asm/sigcontext.h
+++ b/arch/arm64/include/uapi/asm/sigcontext.h
@@ -16,6 +16,7 @@
#ifndef _UAPI__ASM_SIGCONTEXT_H
#define _UAPI__ASM_SIGCONTEXT_H
+#ifdef CONFIG_64BIT
#include <linux/types.h>
/*
@@ -61,4 +62,35 @@
__u64 esr;
};
+#else /* CONFIG_64BIT */
+
+/*
+ * Signal context structure - contains all info to do with the state
+ * before the signal handler was invoked. Note: only add new entries
+ * to the end of the structure.
+ */
+struct sigcontext {
+ unsigned long trap_no;
+ unsigned long error_code;
+ unsigned long oldmask;
+ unsigned long arm_r0;
+ unsigned long arm_r1;
+ unsigned long arm_r2;
+ unsigned long arm_r3;
+ unsigned long arm_r4;
+ unsigned long arm_r5;
+ unsigned long arm_r6;
+ unsigned long arm_r7;
+ unsigned long arm_r8;
+ unsigned long arm_r9;
+ unsigned long arm_r10;
+ unsigned long arm_fp;
+ unsigned long arm_ip;
+ unsigned long arm_sp;
+ unsigned long arm_lr;
+ unsigned long arm_pc;
+ unsigned long arm_cpsr;
+ unsigned long fault_address;
+};
+#endif /* CONFIG_64BIT */
#endif /* _UAPI__ASM_SIGCONTEXT_H */
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index fbdb8bb..bdb35b9 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -14,7 +14,6 @@
#include <linux/slab.h>
#include <linux/sysctl.h>
-#include <asm/alternative.h>
#include <asm/cpufeature.h>
#include <asm/insn.h>
#include <asm/opcodes.h>
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index e555321..d42e61c 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -44,6 +44,9 @@
#endif
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ DEFINE(TSK_TI_TTBR0, offsetof(struct thread_info, ttbr0));
+#endif
BLANK();
DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context));
BLANK();
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 324b288..1f0cea7 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -121,11 +121,9 @@
* feature as all TTBR0_EL1 accesses are disabled, not just those to
* user mappings.
*/
-alternative_if_not ARM64_HAS_PAN
- nop
-alternative_else
+alternative_if ARM64_HAS_PAN
b 1f // skip TTBR0 PAN
-alternative_endif
+alternative_else_nop_endif
.if \el != 0
mrs x21, ttbr0_el1
@@ -135,7 +133,7 @@
and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR
.endif
- uaccess_ttbr0_disable x21
+ __uaccess_ttbr0_disable x21
1:
#endif
@@ -184,17 +182,15 @@
* Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
* PAN bit checking.
*/
-alternative_if_not ARM64_HAS_PAN
- nop
-alternative_else
+alternative_if ARM64_HAS_PAN
b 2f // skip TTBR0 PAN
-alternative_endif
+alternative_else_nop_endif
.if \el != 0
- tbnz x22, #_PSR_PAN_BIT, 1f // Skip re-enabling TTBR0 access if previously disabled
+ tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
.endif
- uaccess_ttbr0_enable x0
+ __uaccess_ttbr0_enable x0
.if \el == 0
/*
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 6a4348b1..0f62709 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -73,7 +73,8 @@
IPI_CPU_STOP,
IPI_TIMER,
IPI_IRQ_WORK,
- IPI_WAKEUP
+ IPI_WAKEUP,
+ IPI_CPU_BACKTRACE,
};
#ifdef CONFIG_ARM64_VHE
@@ -760,6 +761,7 @@
S(IPI_TIMER, "Timer broadcast interrupts"),
S(IPI_IRQ_WORK, "IRQ work interrupts"),
S(IPI_WAKEUP, "CPU wake-up interrupts"),
+ S(IPI_CPU_BACKTRACE, "CPU backtrace"),
};
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
@@ -846,6 +848,72 @@
cpu_relax();
}
+static cpumask_t backtrace_mask;
+static DEFINE_RAW_SPINLOCK(backtrace_lock);
+
+/* "in progress" flag of arch_trigger_all_cpu_backtrace */
+static unsigned long backtrace_flag;
+
+static void smp_send_all_cpu_backtrace(void)
+{
+ unsigned int this_cpu = smp_processor_id();
+ int i;
+
+ if (test_and_set_bit(0, &backtrace_flag))
+ /*
+ * If there is already a trigger_all_cpu_backtrace() in progress
+ * (backtrace_flag == 1), don't output double cpu dump infos.
+ */
+ return;
+
+ cpumask_copy(&backtrace_mask, cpu_online_mask);
+ cpumask_clear_cpu(this_cpu, &backtrace_mask);
+
+ pr_info("Backtrace for cpu %d (current):\n", this_cpu);
+ dump_stack();
+
+ pr_info("\nsending IPI to all other CPUs:\n");
+ if (!cpumask_empty(&backtrace_mask))
+ smp_cross_call(&backtrace_mask, IPI_CPU_BACKTRACE);
+
+ /* Wait for up to 10 seconds for all other CPUs to do the backtrace */
+ for (i = 0; i < 10 * 1000; i++) {
+ if (cpumask_empty(&backtrace_mask))
+ break;
+ mdelay(1);
+ }
+
+ clear_bit(0, &backtrace_flag);
+ smp_mb__after_atomic();
+}
+
+/*
+ * ipi_cpu_backtrace - handle IPI from smp_send_all_cpu_backtrace()
+ */
+static void ipi_cpu_backtrace(unsigned int cpu, struct pt_regs *regs)
+{
+ if (cpumask_test_cpu(cpu, &backtrace_mask)) {
+ raw_spin_lock(&backtrace_lock);
+ pr_warn("IPI backtrace for cpu %d\n", cpu);
+ show_regs(regs);
+ raw_spin_unlock(&backtrace_lock);
+ cpumask_clear_cpu(cpu, &backtrace_mask);
+ }
+}
+
+#ifdef CONFIG_SMP
+void arch_trigger_all_cpu_backtrace(void)
+{
+ smp_send_all_cpu_backtrace();
+}
+#else
+void arch_trigger_all_cpu_backtrace(void)
+{
+ dump_stack();
+}
+#endif
+
+
/*
* Main handler for inter-processor interrupts
*/
@@ -900,6 +968,10 @@
break;
#endif
+ case IPI_CPU_BACKTRACE:
+ ipi_cpu_backtrace(cpu, regs);
+ break;
+
default:
pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
break;
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index d5c4242..d84c7d0 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -317,7 +317,7 @@
if (!user_mode(regs))
bug_type = report_bug(regs->pc, regs);
- if (bug_type != BUG_TRAP_TYPE_NONE)
+ if (bug_type != BUG_TRAP_TYPE_NONE && !strlen(str))
str = "Oops - BUG";
ret = __die(str, err, thread, regs);
@@ -470,9 +470,10 @@
}
#define __user_cache_maint(insn, address, res) \
- if (untagged_addr(address) >= user_addr_max()) \
+ if (untagged_addr(address) >= user_addr_max()) { \
res = -EFAULT; \
- else \
+ } else { \
+ uaccess_ttbr0_enable(); \
asm volatile ( \
"1: " insn ", %1\n" \
" mov %w0, #0\n" \
@@ -484,7 +485,9 @@
" .popsection\n" \
_ASM_EXTABLE(1b, 3b) \
: "=r" (res) \
- : "r" (address), "i" (-EFAULT) )
+ : "r" (address), "i" (-EFAULT)); \
+ uaccess_ttbr0_disable(); \
+ }
static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
{
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index 08b5f18..d7150e3 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -17,9 +17,6 @@
*/
#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/cpufeature.h>
-#include <asm/sysreg.h>
#include <asm/uaccess.h>
.text
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 5f8f812..cfe1339 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -16,10 +16,7 @@
#include <linux/linkage.h>
-#include <asm/assembler.h>
#include <asm/cache.h>
-#include <asm/cpufeature.h>
-#include <asm/sysreg.h>
#include <asm/uaccess.h>
/*
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index 9b04ff3..718b1c4 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -18,10 +18,7 @@
#include <linux/linkage.h>
-#include <asm/assembler.h>
#include <asm/cache.h>
-#include <asm/cpufeature.h>
-#include <asm/sysreg.h>
#include <asm/uaccess.h>
/*
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 8077e4f..e99e31c 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -16,10 +16,7 @@
#include <linux/linkage.h>
-#include <asm/assembler.h>
#include <asm/cache.h>
-#include <asm/cpufeature.h>
-#include <asm/sysreg.h>
#include <asm/uaccess.h>
/*
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index db00fc9..97de0eb 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -23,6 +23,7 @@
#include <asm/assembler.h>
#include <asm/cpufeature.h>
#include <asm/alternative.h>
+#include <asm/uaccess.h>
/*
* __flush_dcache_all()
@@ -121,6 +122,7 @@
* - end - virtual end address of region
*/
ENTRY(__flush_cache_user_range)
+ uaccess_ttbr0_enable x2, x3
dcache_line_size x2, x3
sub x3, x2, #1
bic x4, x0, x3
@@ -142,10 +144,12 @@
dsb ish
isb
mov x0, #0
+1:
+ uaccess_ttbr0_disable x1
ret
9:
mov x0, #-EFAULT
- ret
+ b 1b
ENDPROC(flush_icache_range)
ENDPROC(__flush_cache_user_range)
diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S
index a23b2e8..b41aff2 100644
--- a/arch/arm64/xen/hypercall.S
+++ b/arch/arm64/xen/hypercall.S
@@ -92,7 +92,6 @@
mov x2, x3
mov x3, x4
mov x4, x5
-#ifdef CONFIG_ARM64_SW_TTBR0_PAN
/*
* Privcmd calls are issued by the userspace. The kernel needs to
* enable access to TTBR0_EL1 as the hypervisor would issue stage 1
@@ -101,15 +100,12 @@
* need the explicit uaccess_enable/disable if the TTBR0 PAN emulation
* is enabled (it implies that hardware UAO and PAN disabled).
*/
- uaccess_enable_not_uao x6, x7
-#endif
+ uaccess_ttbr0_enable x6, x7
hvc XEN_IMM
-#ifdef CONFIG_ARM64_SW_TTBR0_PAN
/*
* Disable userspace access from kernel once the hyp call completed.
*/
- uaccess_disable_not_uao x6
-#endif
+ uaccess_ttbr0_disable x6
ret
ENDPROC(privcmd_call);
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 9e1499f..13f5fad 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -641,9 +641,10 @@
#define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */
#define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */
#define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */
-#define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 */
+#define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 and 9 */
#define SRR1_WAKESYSERR 0x00300000 /* System error */
#define SRR1_WAKEEE 0x00200000 /* External interrupt */
+#define SRR1_WAKEHVI 0x00240000 /* Hypervisor Virtualization Interrupt (P9) */
#define SRR1_WAKEMT 0x00280000 /* mtctrl */
#define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */
#define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
index f0b2385..e0b9e57 100644
--- a/arch/powerpc/include/asm/xics.h
+++ b/arch/powerpc/include/asm/xics.h
@@ -44,6 +44,7 @@
#ifdef CONFIG_PPC_POWERNV
extern int icp_opal_init(void);
+extern void icp_opal_flush_interrupt(void);
#else
static inline int icp_opal_init(void) { return -ENODEV; }
#endif
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 16ada1e..d5ce34d 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -424,7 +424,8 @@
void __init mmu_early_init_devtree(void)
{
/* Disable radix mode based on kernel command line. */
- if (disable_radix)
+ /* We don't yet have the machinery to do radix as a guest. */
+ if (disable_radix || !(mfmsr() & MSR_HV))
cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
if (early_radix_enabled())
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index 3493cf4..71697ff 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -50,9 +50,7 @@
for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
__tlbiel_pid(pid, set, ric);
}
- if (cpu_has_feature(CPU_FTR_POWER9_DD1))
- asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
- return;
+ asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
}
static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
@@ -85,8 +83,6 @@
asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
asm volatile("ptesync": : :"memory");
- if (cpu_has_feature(CPU_FTR_POWER9_DD1))
- asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
}
static inline void _tlbie_va(unsigned long va, unsigned long pid,
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index c789258..eec0e8d 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -155,8 +155,10 @@
wmask = SRR1_WAKEMASK_P8;
idle_states = pnv_get_supported_cpuidle_states();
+
/* We don't want to take decrementer interrupts while we are offline,
- * so clear LPCR:PECE1. We keep PECE2 enabled.
+ * so clear LPCR:PECE1. We keep PECE2 (and LPCR_PECE_HVEE on P9)
+ * enabled as to let IPIs in.
*/
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
@@ -206,8 +208,12 @@
* contains 0.
*/
if (((srr1 & wmask) == SRR1_WAKEEE) ||
+ ((srr1 & wmask) == SRR1_WAKEHVI) ||
(local_paca->irq_happened & PACA_IRQ_EE)) {
- icp_native_flush_interrupt();
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ icp_opal_flush_interrupt();
+ else
+ icp_native_flush_interrupt();
} else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
@@ -221,6 +227,8 @@
if (srr1 && !generic_check_cpu_restart(cpu))
DBG("CPU%d Unexpected exit while offline !\n", cpu);
}
+
+ /* Re-enable decrementer interrupts */
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1);
DBG("CPU%d coming online...\n", cpu);
}
diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c
index 60c5765..c96c0cb 100644
--- a/arch/powerpc/sysdev/xics/icp-opal.c
+++ b/arch/powerpc/sysdev/xics/icp-opal.c
@@ -132,6 +132,35 @@
return smp_ipi_demux();
}
+/*
+ * Called when an interrupt is received on an off-line CPU to
+ * clear the interrupt, so that the CPU can go back to nap mode.
+ */
+void icp_opal_flush_interrupt(void)
+{
+ unsigned int xirr;
+ unsigned int vec;
+
+ do {
+ xirr = icp_opal_get_xirr();
+ vec = xirr & 0x00ffffff;
+ if (vec == XICS_IRQ_SPURIOUS)
+ break;
+ if (vec == XICS_IPI) {
+ /* Clear pending IPI */
+ int cpu = smp_processor_id();
+ kvmppc_set_host_ipi(cpu, 0);
+ opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
+ } else {
+ pr_err("XICS: hw interrupt 0x%x to offline cpu, "
+ "disabling\n", vec);
+ xics_mask_unknown_vec(vec);
+ }
+
+ /* EOI the interrupt */
+ } while (opal_int_eoi(xirr) > 0);
+}
+
#endif /* CONFIG_SMP */
static const struct icp_ops icp_opal_ops = {
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 984a7bf..83db0ea 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -104,6 +104,7 @@
__u8 x86_phys_bits;
/* CPUID returned core id bits: */
__u8 x86_coreid_bits;
+ __u8 cu_id;
/* Max extended CPUID function supported: */
__u32 extended_cpuid_level;
/* Maximum supported CPUID level, -1=no CPUID: */
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 7249f15..d1e2556 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1876,7 +1876,6 @@
.irq_ack = irq_chip_ack_parent,
.irq_eoi = ioapic_ack_level,
.irq_set_affinity = ioapic_set_affinity,
- .irq_retrigger = irq_chip_retrigger_hierarchy,
.flags = IRQCHIP_SKIP_SET_WAKE,
};
@@ -1888,7 +1887,6 @@
.irq_ack = irq_chip_ack_parent,
.irq_eoi = ioapic_ir_ack_level,
.irq_set_affinity = ioapic_set_affinity,
- .irq_retrigger = irq_chip_retrigger_hierarchy,
.flags = IRQCHIP_SKIP_SET_WAKE,
};
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 1d31672..2b4cf04 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -309,8 +309,22 @@
/* get information required for multi-node processors */
if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
+ u32 eax, ebx, ecx, edx;
- node_id = cpuid_ecx(0x8000001e) & 7;
+ cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
+
+ node_id = ecx & 0xff;
+ smp_num_siblings = ((ebx >> 8) & 0xff) + 1;
+
+ if (c->x86 == 0x15)
+ c->cu_id = ebx & 0xff;
+
+ if (c->x86 >= 0x17) {
+ c->cpu_core_id = ebx & 0xff;
+
+ if (smp_num_siblings > 1)
+ c->x86_max_cores /= smp_num_siblings;
+ }
/*
* We may have multiple LLCs if L3 caches exist, so check if we
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 023c7bf..4eece91 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1015,6 +1015,7 @@
c->x86_model_id[0] = '\0'; /* Unset */
c->x86_max_cores = 1;
c->x86_coreid_bits = 0;
+ c->cu_id = 0xff;
#ifdef CONFIG_X86_64
c->x86_clflush_size = 64;
c->x86_phys_bits = 36;
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index ebb4e95..96d80df 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -236,7 +236,8 @@
* it will #GP. Make sure it is replaced after the memset().
*/
if (static_cpu_has(X86_FEATURE_XSAVES))
- state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT;
+ state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT |
+ xfeatures_mask;
if (static_cpu_has(X86_FEATURE_FXSR))
fpstate_init_fxstate(&state->fxsave);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index e9bbe02..36171bc 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -423,9 +423,15 @@
int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
if (c->phys_proc_id == o->phys_proc_id &&
- per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) &&
- c->cpu_core_id == o->cpu_core_id)
- return topology_sane(c, o, "smt");
+ per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) {
+ if (c->cpu_core_id == o->cpu_core_id)
+ return topology_sane(c, o, "smt");
+
+ if ((c->cu_id != 0xff) &&
+ (o->cu_id != 0xff) &&
+ (c->cu_id == o->cu_id))
+ return topology_sane(c, o, "smt");
+ }
} else if (c->phys_proc_id == o->phys_proc_id &&
c->cpu_core_id == o->cpu_core_id) {
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index ea9c49a..8aa6bea 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -15,6 +15,7 @@
#include <linux/debugfs.h>
#include <linux/mm.h>
#include <linux/init.h>
+#include <linux/sched.h>
#include <linux/seq_file.h>
#include <asm/pgtable.h>
@@ -406,6 +407,7 @@
} else
note_page(m, &st, __pgprot(0), 1);
+ cond_resched();
start++;
}
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index e9c0993..e8817e2 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -671,9 +671,9 @@
unlock:
list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) {
af_alg_free_sg(&rsgl->sgl);
+ list_del(&rsgl->list);
if (rsgl != &ctx->first_rsgl)
sock_kfree_s(sk, rsgl, sizeof(*rsgl));
- list_del(&rsgl->list);
}
INIT_LIST_HEAD(&ctx->list);
aead_wmem_wakeup(sk);
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 312c4b4..6eb6733 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -2704,6 +2704,7 @@
struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
struct device *dev = acpi_desc->dev;
struct acpi_nfit_flush_work flush;
+ int rc;
/* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
device_lock(dev);
@@ -2716,7 +2717,10 @@
INIT_WORK_ONSTACK(&flush.work, flush_probe);
COMPLETION_INITIALIZER_ONSTACK(flush.cmp);
queue_work(nfit_wq, &flush.work);
- return wait_for_completion_interruptible(&flush.cmp);
+
+ rc = wait_for_completion_interruptible(&flush.cmp);
+ cancel_work_sync(&flush.work);
+ return rc;
}
static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 3fd76d9..a13a45e 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -378,6 +378,7 @@
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
const struct freq_tbl *f;
+ int ret;
f = qcom_find_freq(rcg->freq_tbl, rate);
if (!f)
@@ -392,7 +393,13 @@
return 0;
}
- return clk_rcg2_configure(rcg, f);
+ ret = clk_rcg2_configure(rcg, f);
+ if (ret)
+ return ret;
+
+ /* Update current frequency with the requested frequency. */
+ rcg->current_freq = rate;
+ return ret;
}
static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -410,7 +417,7 @@
static int clk_rcg2_enable(struct clk_hw *hw)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
- unsigned long rate = clk_get_rate(hw->clk);
+ unsigned long rate;
const struct freq_tbl *f;
if (!rcg->enable_safe_config)
@@ -424,6 +431,7 @@
* is always on while APPS is online. Therefore, the RCG can safely be
* switched.
*/
+ rate = rcg->current_freq;
f = qcom_find_freq(rcg->freq_tbl, rate);
if (!f)
return -EINVAL;
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index 92e0ffa..a1a1501 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -50,7 +50,6 @@
P_CORE_BI_PLL_TEST_SE,
P_GPLL0_OUT_EVEN,
P_GPLL0_OUT_MAIN,
- P_GPLL1_OUT_MAIN,
P_GPLL4_OUT_MAIN,
P_SLEEP_CLK,
};
@@ -122,7 +121,7 @@
static const struct parent_map gcc_parent_map_5[] = {
{ P_BI_TCXO, 0 },
{ P_GPLL0_OUT_MAIN, 1 },
- { P_GPLL1_OUT_MAIN, 4 },
+ { P_GPLL4_OUT_MAIN, 5 },
{ P_GPLL0_OUT_EVEN, 6 },
{ P_CORE_BI_PLL_TEST_SE, 7 },
};
@@ -130,7 +129,7 @@
static const char * const gcc_parent_names_5[] = {
"bi_tcxo",
"gpll0",
- "gpll1",
+ "gpll4",
"gpll0_out_even",
"core_bi_pll_test_se",
};
@@ -138,28 +137,12 @@
static const struct parent_map gcc_parent_map_6[] = {
{ P_BI_TCXO, 0 },
{ P_GPLL0_OUT_MAIN, 1 },
- { P_GPLL4_OUT_MAIN, 5 },
- { P_GPLL0_OUT_EVEN, 6 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
-};
-
-static const char * const gcc_parent_names_6[] = {
- "bi_tcxo",
- "gpll0",
- "gpll4",
- "gpll0_out_even",
- "core_bi_pll_test_se",
-};
-
-static const struct parent_map gcc_parent_map_7[] = {
- { P_BI_TCXO, 0 },
- { P_GPLL0_OUT_MAIN, 1 },
{ P_AUD_REF_CLK, 2 },
{ P_GPLL0_OUT_EVEN, 6 },
{ P_CORE_BI_PLL_TEST_SE, 7 },
};
-static const char * const gcc_parent_names_7[] = {
+static const char * const gcc_parent_names_6[] = {
"bi_tcxo",
"gpll0",
"aud_ref_clk",
@@ -264,7 +247,6 @@
static const struct freq_tbl ftbl_gcc_cpuss_rbcpr_clk_src[] = {
F(19200000, P_BI_TCXO, 1, 0, 0),
- F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
{ }
};
@@ -280,9 +262,8 @@
.num_parents = 3,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
- VDD_CX_FMAX_MAP2(
- MIN, 19200000,
- NOMINAL, 50000000),
+ VDD_CX_FMAX_MAP1(
+ MIN, 19200000),
},
};
@@ -471,57 +452,17 @@
},
};
-static const struct freq_tbl ftbl_gcc_qupv3_wrap0_core_2x_clk_src[] = {
- F(19200000, P_BI_TCXO, 1, 0, 0),
- F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
- F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
- F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
- F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
- F(236888889, P_GPLL1_OUT_MAIN, 4.5, 0, 0),
- { }
-};
-
-static struct clk_rcg2 gcc_qupv3_wrap0_core_2x_clk_src = {
- .cmd_rcgr = 0x17018,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = gcc_parent_map_5,
- .freq_tbl = ftbl_gcc_qupv3_wrap0_core_2x_clk_src,
- .enable_safe_config = true,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_core_2x_clk_src",
- .parent_names = gcc_parent_names_5,
- .num_parents = 5,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
- VDD_CX_FMAX_MAP6(
- MIN, 19200000,
- LOWER, 50000000,
- LOW, 100000000,
- LOW_L1, 150000000,
- NOMINAL, 200000000,
- HIGH, 236888889),
- },
-};
-
static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
- F(3686400, P_GPLL0_OUT_MAIN, 1, 96, 15625),
- F(14745600, P_GPLL0_OUT_MAIN, 1, 384, 15625),
- F(16000000, P_GPLL0_OUT_MAIN, 5, 2, 15),
+ F(7372800, P_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GPLL0_OUT_EVEN, 1, 768, 15625),
F(19200000, P_BI_TCXO, 1, 0, 0),
- F(24000000, P_GPLL0_OUT_MAIN, 5, 1, 5),
- F(32000000, P_GPLL0_OUT_MAIN, 1, 4, 75),
- F(40000000, P_GPLL0_OUT_MAIN, 15, 0, 0),
- F(46400000, P_GPLL0_OUT_MAIN, 1, 29, 375),
- F(48000000, P_GPLL0_OUT_MAIN, 12.5, 0, 0),
- F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
- F(51200000, P_GPLL0_OUT_MAIN, 1, 32, 375),
- F(56000000, P_GPLL0_OUT_MAIN, 1, 7, 75),
- F(58982400, P_GPLL0_OUT_MAIN, 1, 1536, 15625),
- F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
- F(63157895, P_GPLL0_OUT_MAIN, 9.5, 0, 0),
- F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
- F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(29491200, P_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(80000000, P_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
{ }
};
@@ -538,11 +479,10 @@
.num_parents = 4,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
- VDD_CX_FMAX_MAP4(
+ VDD_CX_FMAX_MAP3(
MIN, 19200000,
LOWER, 75000000,
- LOW, 100000000,
- NOMINAL, 150000000),
+ LOW, 100000000),
},
};
@@ -559,11 +499,10 @@
.num_parents = 4,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
- VDD_CX_FMAX_MAP4(
+ VDD_CX_FMAX_MAP3(
MIN, 19200000,
LOWER, 75000000,
- LOW, 100000000,
- NOMINAL, 150000000),
+ LOW, 100000000),
},
};
@@ -580,11 +519,10 @@
.num_parents = 4,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
- VDD_CX_FMAX_MAP4(
+ VDD_CX_FMAX_MAP3(
MIN, 19200000,
LOWER, 75000000,
- LOW, 100000000,
- NOMINAL, 150000000),
+ LOW, 100000000),
},
};
@@ -601,11 +539,10 @@
.num_parents = 4,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
- VDD_CX_FMAX_MAP4(
+ VDD_CX_FMAX_MAP3(
MIN, 19200000,
LOWER, 75000000,
- LOW, 100000000,
- NOMINAL, 150000000),
+ LOW, 100000000),
},
};
@@ -622,11 +559,10 @@
.num_parents = 4,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
- VDD_CX_FMAX_MAP4(
+ VDD_CX_FMAX_MAP3(
MIN, 19200000,
LOWER, 75000000,
- LOW, 100000000,
- NOMINAL, 150000000),
+ LOW, 100000000),
},
};
@@ -643,11 +579,10 @@
.num_parents = 4,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
- VDD_CX_FMAX_MAP4(
+ VDD_CX_FMAX_MAP3(
MIN, 19200000,
LOWER, 75000000,
- LOW, 100000000,
- NOMINAL, 150000000),
+ LOW, 100000000),
},
};
@@ -664,11 +599,10 @@
.num_parents = 4,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
- VDD_CX_FMAX_MAP4(
+ VDD_CX_FMAX_MAP3(
MIN, 19200000,
LOWER, 75000000,
- LOW, 100000000,
- NOMINAL, 150000000),
+ LOW, 100000000),
},
};
@@ -685,11 +619,10 @@
.num_parents = 4,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
- VDD_CX_FMAX_MAP4(
+ VDD_CX_FMAX_MAP3(
MIN, 19200000,
LOWER, 75000000,
- LOW, 100000000,
- NOMINAL, 150000000),
+ LOW, 100000000),
},
};
@@ -706,11 +639,10 @@
.num_parents = 4,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
- VDD_CX_FMAX_MAP4(
+ VDD_CX_FMAX_MAP3(
MIN, 19200000,
LOWER, 75000000,
- LOW, 100000000,
- NOMINAL, 150000000),
+ LOW, 100000000),
},
};
@@ -727,11 +659,10 @@
.num_parents = 4,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
- VDD_CX_FMAX_MAP4(
+ VDD_CX_FMAX_MAP3(
MIN, 19200000,
LOWER, 75000000,
- LOW, 100000000,
- NOMINAL, 150000000),
+ LOW, 100000000),
},
};
@@ -748,11 +679,10 @@
.num_parents = 4,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
- VDD_CX_FMAX_MAP4(
+ VDD_CX_FMAX_MAP3(
MIN, 19200000,
LOWER, 75000000,
- LOW, 100000000,
- NOMINAL, 150000000),
+ LOW, 100000000),
},
};
@@ -769,11 +699,10 @@
.num_parents = 4,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
- VDD_CX_FMAX_MAP4(
+ VDD_CX_FMAX_MAP3(
MIN, 19200000,
LOWER, 75000000,
- LOW, 100000000,
- NOMINAL, 150000000),
+ LOW, 100000000),
},
};
@@ -790,11 +719,10 @@
.num_parents = 4,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
- VDD_CX_FMAX_MAP4(
+ VDD_CX_FMAX_MAP3(
MIN, 19200000,
LOWER, 75000000,
- LOW, 100000000,
- NOMINAL, 150000000),
+ LOW, 100000000),
},
};
@@ -811,11 +739,10 @@
.num_parents = 4,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
- VDD_CX_FMAX_MAP4(
+ VDD_CX_FMAX_MAP3(
MIN, 19200000,
LOWER, 75000000,
- LOW, 100000000,
- NOMINAL, 150000000),
+ LOW, 100000000),
},
};
@@ -832,11 +759,10 @@
.num_parents = 4,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
- VDD_CX_FMAX_MAP4(
+ VDD_CX_FMAX_MAP3(
MIN, 19200000,
LOWER, 75000000,
- LOW, 100000000,
- NOMINAL, 150000000),
+ LOW, 100000000),
},
};
@@ -853,11 +779,10 @@
.num_parents = 4,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
- VDD_CX_FMAX_MAP4(
+ VDD_CX_FMAX_MAP3(
MIN, 19200000,
LOWER, 75000000,
- LOW, 100000000,
- NOMINAL, 150000000),
+ LOW, 100000000),
},
};
@@ -872,12 +797,12 @@
.cmd_rcgr = 0x1400c,
.mnd_width = 8,
.hid_width = 5,
- .parent_map = gcc_parent_map_6,
+ .parent_map = gcc_parent_map_5,
.freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
.enable_safe_config = true,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc2_apps_clk_src",
- .parent_names = gcc_parent_names_6,
+ .parent_names = gcc_parent_names_5,
.num_parents = 5,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
@@ -903,7 +828,7 @@
.hid_width = 5,
.parent_map = gcc_parent_map_3,
.freq_tbl = ftbl_gcc_sdcc4_apps_clk_src,
- .enable_safe_config = true,
+ .enable_safe_config = true,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc4_apps_clk_src",
.parent_names = gcc_parent_names_3,
@@ -927,11 +852,11 @@
.cmd_rcgr = 0x36010,
.mnd_width = 8,
.hid_width = 5,
- .parent_map = gcc_parent_map_7,
+ .parent_map = gcc_parent_map_6,
.freq_tbl = ftbl_gcc_tsif_ref_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_tsif_ref_clk_src",
- .parent_names = gcc_parent_names_7,
+ .parent_names = gcc_parent_names_6,
.num_parents = 5,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
@@ -940,21 +865,12 @@
},
};
-static const struct freq_tbl ftbl_gcc_ufs_card_axi_clk_src[] = {
- F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
- F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
- F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
- F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
- F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
- { }
-};
-
static struct clk_rcg2 gcc_ufs_card_axi_clk_src = {
.cmd_rcgr = 0x7501c,
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_parent_map_0,
- .freq_tbl = ftbl_gcc_ufs_card_axi_clk_src,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
.enable_safe_config = true,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_card_axi_clk_src",
@@ -962,11 +878,10 @@
.num_parents = 4,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
- VDD_CX_FMAX_MAP4(
+ VDD_CX_FMAX_MAP3(
MIN, 50000000,
LOW, 100000000,
- NOMINAL, 200000000,
- HIGH, 240000000),
+ NOMINAL, 200000000),
},
};
@@ -998,17 +913,12 @@
},
};
-static const struct freq_tbl ftbl_gcc_ufs_card_phy_aux_clk_src[] = {
- F(19200000, P_BI_TCXO, 1, 0, 0),
- { }
-};
-
static struct clk_rcg2 gcc_ufs_card_phy_aux_clk_src = {
.cmd_rcgr = 0x75090,
.mnd_width = 0,
.hid_width = 5,
.parent_map = gcc_parent_map_4,
- .freq_tbl = ftbl_gcc_ufs_card_phy_aux_clk_src,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_card_phy_aux_clk_src",
.parent_names = gcc_parent_names_4,
@@ -1047,12 +957,21 @@
},
};
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
.cmd_rcgr = 0x7701c,
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_parent_map_0,
- .freq_tbl = ftbl_gcc_ufs_card_axi_clk_src,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
.enable_safe_config = true,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_axi_clk_src",
@@ -1227,7 +1146,7 @@
.mnd_width = 0,
.hid_width = 5,
.parent_map = gcc_parent_map_2,
- .freq_tbl = ftbl_gcc_ufs_card_phy_aux_clk_src,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_usb3_prim_phy_aux_clk_src",
.parent_names = gcc_parent_names_2,
@@ -1244,7 +1163,7 @@
.mnd_width = 0,
.hid_width = 5,
.parent_map = gcc_parent_map_2,
- .freq_tbl = ftbl_gcc_ufs_card_phy_aux_clk_src,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
.enable_safe_config = true,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_usb3_sec_phy_aux_clk_src",
@@ -2195,11 +2114,6 @@
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap0_core_2x_clk",
- .parent_names = (const char *[]){
- "gcc_qupv3_wrap0_core_2x_clk_src",
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2213,11 +2127,6 @@
.enable_mask = BIT(8),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap0_core_clk",
- .parent_names = (const char *[]){
- "gcc_qupv3_wrap0_core_2x_clk_src",
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2375,11 +2284,6 @@
.enable_mask = BIT(18),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap1_core_2x_clk",
- .parent_names = (const char *[]){
- "gcc_qupv3_wrap0_core_2x_clk_src",
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2393,11 +2297,6 @@
.enable_mask = BIT(19),
.hw.init = &(struct clk_init_data){
.name = "gcc_qupv3_wrap1_core_clk",
- .parent_names = (const char *[]){
- "gcc_qupv3_wrap0_core_2x_clk_src",
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -3382,8 +3281,6 @@
[GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
[GCC_QMIP_VIDEO_AHB_CLK] = &gcc_qmip_video_ahb_clk.clkr,
[GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
- [GCC_QUPV3_WRAP0_CORE_2X_CLK_SRC] =
- &gcc_qupv3_wrap0_core_2x_clk_src.clkr,
[GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
[GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
[GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index e5a2016..82e62c5 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -824,6 +824,7 @@
arch_timer_banner(arch_timers_present);
arch_counter_register(arch_timers_present);
+ clocksource_select_force();
return arch_timer_arch_init();
}
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 0173b8b..884e557 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -120,14 +120,15 @@
loading your cpufreq low-level hardware driver, using the
'interactive' governor for latency-sensitive workloads.
-config CPU_FREQ_DEFAULT_GOV_INTERACTIVE
- bool "interactive"
- select CPU_FREQ_GOV_INTERACTIVE
+config CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
+ bool "schedutil"
+ depends on SMP
+ select CPU_FREQ_GOV_SCHEDUTIL
+ select CPU_FREQ_GOV_PERFORMANCE
help
- Use the CPUFreq governor 'interactive' as default. This allows
- you to get a full dynamic cpu frequency capable system by simply
- loading your cpufreq low-level hardware driver, using the
- 'interactive' governor for latency-sensitive workloads.
+ Use the 'schedutil' CPUFreq governor by default. If unsure,
+ have a look at the help section of that governor. The fallback
+ governor will be 'performance'.
endchoice
@@ -243,6 +244,23 @@
If in doubt, say N.
+config CPU_FREQ_GOV_SCHEDUTIL
+ bool "'schedutil' cpufreq policy governor"
+ depends on CPU_FREQ && SMP
+ select CPU_FREQ_GOV_ATTR_SET
+ select IRQ_WORK
+ help
+ This governor makes decisions based on the utilization data provided
+ by the scheduler. It sets the CPU frequency to be proportional to
+ the utilization/capacity ratio coming from the scheduler. If the
+ utilization is frequency-invariant, the new frequency is also
+ proportional to the maximum available frequency. If that is not the
+ case, it is proportional to the current frequency of the CPU. The
+ frequency tipping point is at utilization/capacity equal to 80% in
+ both cases.
+
+ If in doubt, say N.
+
comment "CPU frequency scaling drivers"
config CPUFREQ_DT
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 4737520..80fa656 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -820,6 +820,25 @@
wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
}
+#define MSR_IA32_POWER_CTL_BIT_EE 19
+
+/* Disable energy efficiency optimization */
+static void intel_pstate_disable_ee(int cpu)
+{
+ u64 power_ctl;
+ int ret;
+
+ ret = rdmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, &power_ctl);
+ if (ret)
+ return;
+
+ if (!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE))) {
+ pr_info("Disabling energy efficiency optimization\n");
+ power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE);
+ wrmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, power_ctl);
+ }
+}
+
static int atom_get_min_pstate(void)
{
u64 value;
@@ -1420,6 +1439,11 @@
{}
};
+static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
+ ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, core_params),
+ {}
+};
+
static int intel_pstate_init_cpu(unsigned int cpunum)
{
struct cpudata *cpu;
@@ -1435,6 +1459,12 @@
cpu->cpu = cpunum;
if (hwp_active) {
+ const struct x86_cpu_id *id;
+
+ id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids);
+ if (id)
+ intel_pstate_disable_ee(cpunum);
+
intel_pstate_hwp_enable(cpu);
pid_params.sample_rate_ms = 50;
pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC;
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index faf3cb3..a388bf2 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -955,7 +955,7 @@
static void ccp5_config(struct ccp_device *ccp)
{
/* Public side */
- iowrite32(0x00001249, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET);
+ iowrite32(0x0, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET);
}
static void ccp5other_config(struct ccp_device *ccp)
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index da5f4a6..340aef1 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -238,6 +238,7 @@
struct ccp_device *ccp;
spinlock_t lock;
+ struct list_head created;
struct list_head pending;
struct list_head active;
struct list_head complete;
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index 6553912..e5d9278 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -63,6 +63,7 @@
ccp_free_desc_resources(chan->ccp, &chan->complete);
ccp_free_desc_resources(chan->ccp, &chan->active);
ccp_free_desc_resources(chan->ccp, &chan->pending);
+ ccp_free_desc_resources(chan->ccp, &chan->created);
spin_unlock_irqrestore(&chan->lock, flags);
}
@@ -273,6 +274,7 @@
spin_lock_irqsave(&chan->lock, flags);
cookie = dma_cookie_assign(tx_desc);
+ list_del(&desc->entry);
list_add_tail(&desc->entry, &chan->pending);
spin_unlock_irqrestore(&chan->lock, flags);
@@ -426,7 +428,7 @@
spin_lock_irqsave(&chan->lock, sflags);
- list_add_tail(&desc->entry, &chan->pending);
+ list_add_tail(&desc->entry, &chan->created);
spin_unlock_irqrestore(&chan->lock, sflags);
@@ -610,6 +612,7 @@
/*TODO: Purge the complete list? */
ccp_free_desc_resources(chan->ccp, &chan->active);
ccp_free_desc_resources(chan->ccp, &chan->pending);
+ ccp_free_desc_resources(chan->ccp, &chan->created);
spin_unlock_irqrestore(&chan->lock, flags);
@@ -679,6 +682,7 @@
chan->ccp = ccp;
spin_lock_init(&chan->lock);
+ INIT_LIST_HEAD(&chan->created);
INIT_LIST_HEAD(&chan->pending);
INIT_LIST_HEAD(&chan->active);
INIT_LIST_HEAD(&chan->complete);
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index fb5f9bb..6aece3f 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -51,6 +51,7 @@
int assign_chcr_device(struct chcr_dev **dev)
{
struct uld_ctx *u_ctx;
+ int ret = -ENXIO;
/*
* Which device to use if multiple devices are available TODO
@@ -58,15 +59,14 @@
* must go to the same device to maintain the ordering.
*/
mutex_lock(&dev_mutex); /* TODO ? */
- u_ctx = list_first_entry(&uld_ctx_list, struct uld_ctx, entry);
- if (!u_ctx) {
- mutex_unlock(&dev_mutex);
- return -ENXIO;
+ list_for_each_entry(u_ctx, &uld_ctx_list, entry)
+ if (u_ctx && u_ctx->dev) {
+ *dev = u_ctx->dev;
+ ret = 0;
+ break;
}
-
- *dev = u_ctx->dev;
mutex_unlock(&dev_mutex);
- return 0;
+ return ret;
}
static int chcr_dev_add(struct uld_ctx *u_ctx)
@@ -203,10 +203,8 @@
static int __init chcr_crypto_init(void)
{
- if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info)) {
+ if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info))
pr_err("ULD register fail: No chcr crypto support in cxgb4");
- return -1;
- }
return 0;
}
diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c
index bc5cbc1..5b2d78a 100644
--- a/drivers/crypto/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62x/adf_drv.c
@@ -233,7 +233,7 @@
&hw_data->accel_capabilities_mask);
/* Find and map all the device's BARS */
- i = 0;
+ i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
ADF_PCI_MAX_BARS * 2) {
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index e882253..33f0a62 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -69,6 +69,7 @@
#define ADF_ERRSOU5 (0x3A000 + 0xD8)
#define ADF_DEVICE_FUSECTL_OFFSET 0x40
#define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
+#define ADF_DEVICE_FUSECTL_MASK 0x80000000
#define ADF_PCI_MAX_BARS 3
#define ADF_DEVICE_NAME_LENGTH 32
#define ADF_ETR_MAX_RINGS_PER_BANK 16
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
index 1e480f1..8c4fd25 100644
--- a/drivers/crypto/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/qat/qat_common/qat_hal.c
@@ -456,7 +456,7 @@
unsigned int csr_val;
int times = 30;
- if (handle->pci_dev->device == ADF_C3XXX_PCI_DEVICE_ID)
+ if (handle->pci_dev->device != ADF_DH895XCC_PCI_DEVICE_ID)
return 0;
csr_val = ADF_CSR_RD(csr_addr, 0);
@@ -716,7 +716,7 @@
(void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v +
LOCAL_TO_XFER_REG_OFFSET);
handle->pci_dev = pci_info->pci_dev;
- if (handle->pci_dev->device != ADF_C3XXX_PCI_DEVICE_ID) {
+ if (handle->pci_dev->device == ADF_DH895XCC_PCI_DEVICE_ID) {
sram_bar =
&pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
handle->hal_sram_addr_v = sram_bar->virt_addr;
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index e6862a7..4e19bde 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1759,16 +1759,16 @@
if (ret && arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
/*
- * TEST_ONLY and PAGE_FLIP_EVENT are mutually exclusive,
- * if they weren't, this code should be called on success
- * for TEST_ONLY too.
+ * Free the allocated event. drm_atomic_helper_setup_commit
+ * can allocate an event too, so only free it if it's ours
+ * to prevent a double free in drm_atomic_state_clear.
*/
-
for_each_crtc_in_state(state, crtc, crtc_state, i) {
- if (!crtc_state->event)
- continue;
-
- drm_event_cancel_free(dev, &crtc_state->event->base);
+ struct drm_pending_vblank_event *event = crtc_state->event;
+ if (event && (event->base.fence || event->base.file_priv)) {
+ drm_event_cancel_free(dev, &event->base);
+ crtc_state->event = NULL;
+ }
}
}
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index aa64448..f59771d 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1817,7 +1817,7 @@
mgr->payloads[i].vcpi = req_payload.vcpi;
} else if (mgr->payloads[i].num_slots) {
mgr->payloads[i].num_slots = 0;
- drm_dp_destroy_payload_step1(mgr, port, port->vcpi.vcpi, &mgr->payloads[i]);
+ drm_dp_destroy_payload_step1(mgr, port, mgr->payloads[i].vcpi, &mgr->payloads[i]);
req_payload.payload_state = mgr->payloads[i].payload_state;
mgr->payloads[i].start_slot = 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index a218c2e..0c400f8 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1215,14 +1215,14 @@
if (exec[i].offset !=
gen8_canonical_addr(exec[i].offset & PAGE_MASK))
return -EINVAL;
-
- /* From drm_mm perspective address space is continuous,
- * so from this point we're always using non-canonical
- * form internally.
- */
- exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
}
+ /* From drm_mm perspective address space is continuous,
+ * so from this point we're always using non-canonical
+ * form internally.
+ */
+ exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
+
if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 8079e5b..b9be8a6 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4280,10 +4280,10 @@
drm_crtc_vblank_put(&intel_crtc->base);
wake_up_all(&dev_priv->pending_flip_queue);
- queue_work(dev_priv->wq, &work->unpin_work);
-
trace_i915_flip_complete(intel_crtc->plane,
work->pending_flip_obj);
+
+ queue_work(dev_priv->wq, &work->unpin_work);
}
static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 1c59ca5..cae27c5 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -1723,7 +1723,8 @@
return NULL;
if ((encoder->type == INTEL_OUTPUT_DP ||
- encoder->type == INTEL_OUTPUT_EDP) &&
+ encoder->type == INTEL_OUTPUT_EDP ||
+ encoder->type == INTEL_OUTPUT_DP_MST) &&
!bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state))
return NULL;
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index af327f1..b67efc8 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -51,6 +51,8 @@
sde_io_util.o \
sde/sde_hw_reg_dma_v1_color_proc.o \
sde/sde_hw_color_proc_v4.o \
+ sde_rsc.o \
+ sde_rsc_hw.o \
# use drm gpu driver only if qcom_kgsl driver not available
ifneq ($(CONFIG_QCOM_KGSL),y)
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index f54852d..1525cb2 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -2660,13 +2660,19 @@
{
struct dsi_display *display;
struct dsi_panel_phy_props phy_props;
+ struct dsi_mode_info *timing;
int i, rc;
if (!info || !disp) {
pr_err("invalid params\n");
return -EINVAL;
}
+
display = disp;
+ if (!display->panel) {
+ pr_err("invalid display panel\n");
+ return -EINVAL;
+ }
mutex_lock(&display->display_lock);
rc = dsi_panel_get_phy_props(display->panel, &phy_props);
@@ -2677,12 +2683,18 @@
}
info->intf_type = DRM_MODE_CONNECTOR_DSI;
+ timing = &display->panel->mode.timing;
info->num_of_h_tiles = display->ctrl_count;
for (i = 0; i < info->num_of_h_tiles; i++)
info->h_tile_instance[i] = display->ctrl[i].ctrl->index;
info->is_connected = true;
+ info->is_primary = true;
+ info->frame_rate = timing->refresh_rate;
+ info->vtotal = DSI_V_TOTAL(timing);
+ info->prefill_lines = display->panel->panel_prefill_lines;
+ info->jitter = display->panel->panel_jitter;
info->width_mm = phy_props.panel_width_mm;
info->height_mm = phy_props.panel_height_mm;
info->max_width = 1920;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index fa10b55..23f0577 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -24,6 +24,10 @@
#define DEFAULT_MDP_TRANSFER_TIME 14000
+#define DEFAULT_PANEL_JITTER 5
+#define MAX_PANEL_JITTER 25
+#define DEFAULT_PANEL_PREFILL_LINES 16
+
static int dsi_panel_vreg_get(struct dsi_panel *panel)
{
int rc = 0;
@@ -1361,6 +1365,37 @@
return rc;
}
+static int dsi_panel_parse_jitter_config(struct dsi_panel *panel,
+ struct device_node *of_node)
+{
+ int rc;
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-panel-jitter",
+ &panel->panel_jitter);
+ if (rc) {
+ pr_debug("panel jitter is not defined rc=%d\n", rc);
+ panel->panel_jitter = DEFAULT_PANEL_JITTER;
+ } else if (panel->panel_jitter > MAX_PANEL_JITTER) {
+ pr_debug("invalid jitter config=%d setting to:%d\n",
+ panel->panel_jitter, DEFAULT_PANEL_JITTER);
+ panel->panel_jitter = DEFAULT_PANEL_JITTER;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-panel-prefill-lines",
+ &panel->panel_prefill_lines);
+ if (rc) {
+ pr_debug("panel prefill lines are not defined rc=%d\n", rc);
+ panel->panel_prefill_lines = DEFAULT_PANEL_PREFILL_LINES;
+ } else if (panel->panel_prefill_lines >=
+ DSI_V_TOTAL(&panel->mode.timing)) {
+ pr_debug("invalid prefill lines config=%d setting to:%d\n",
+ panel->panel_prefill_lines, DEFAULT_PANEL_PREFILL_LINES);
+ panel->panel_prefill_lines = DEFAULT_PANEL_PREFILL_LINES;
+ }
+
+ return 0;
+}
+
static int dsi_panel_parse_power_cfg(struct device *parent,
struct dsi_panel *panel,
struct device_node *of_node)
@@ -1643,6 +1678,10 @@
if (rc)
pr_err("failed to parse backlight config, rc=%d\n", rc);
+ rc = dsi_panel_parse_jitter_config(panel, of_node);
+ if (rc)
+ pr_err("failed to parse panel jitter config, rc=%d\n", rc);
+
panel->panel_of_node = of_node;
drm_panel_init(&panel->drm_panel);
mutex_init(&panel->panel_lock);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
index 7b60193..386e8a9 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
@@ -176,6 +176,8 @@
bool ulps_enabled;
bool allow_phy_power_off;
+ u32 panel_jitter;
+ u32 panel_prefill_lines;
bool panel_initialized;
};
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 585e206..ca4d213 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -205,6 +205,11 @@
* this is max width supported by controller
* @max_height: Max height of display. In case of hot pluggable display
* this is max height supported by controller
+ * @is_primary: Set to true if display is primary display
+ * @frame_rate: Display frame rate
+ * @prefill_lines: prefill lines based on porches.
+ * @vtotal: display vertical total
+ * @jitter: display jitter configuration
* @compression: Compression supported by the display
*/
struct msm_display_info {
@@ -222,6 +227,12 @@
uint32_t max_width;
uint32_t max_height;
+ bool is_primary;
+ uint32_t frame_rate;
+ uint32_t prefill_lines;
+ uint32_t vtotal;
+ uint32_t jitter;
+
enum msm_display_compression compression;
};
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c
index 6892646..f7fcd01 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.c
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c
@@ -531,9 +531,10 @@
hw_dspp->ops.setup_cont(hw_dspp, &hw_cfg);
break;
case SDE_CP_CRTC_DSPP_MEMCOLOR:
- if (!hw_dspp || !hw_dspp->ops.setup_pa_memcolor)
+ if (!hw_dspp || !hw_dspp->ops.setup_pa_memcolor) {
ret = -EINVAL;
continue;
+ }
hw_dspp->ops.setup_pa_memcolor(hw_dspp, &hw_cfg);
break;
case SDE_CP_CRTC_DSPP_SIXZONE:
@@ -638,16 +639,18 @@
if (!ctl)
continue;
if (set_dspp_flush && ctl->ops.get_bitmask_dspp
- && sde_crtc->mixers[i].hw_dspp)
+ && sde_crtc->mixers[i].hw_dspp) {
ctl->ops.get_bitmask_dspp(ctl,
&flush_mask,
sde_crtc->mixers[i].hw_dspp->idx);
ctl->ops.update_pending_flush(ctl, flush_mask);
+ }
if (set_lm_flush && ctl->ops.get_bitmask_mixer
- && sde_crtc->mixers[i].hw_lm)
+ && sde_crtc->mixers[i].hw_lm) {
flush_mask = ctl->ops.get_bitmask_mixer(ctl,
sde_crtc->mixers[i].hw_lm->idx);
ctl->ops.update_pending_flush(ctl, flush_mask);
+ }
}
}
diff --git a/drivers/gpu/drm/msm/sde/sde_core_perf.c b/drivers/gpu/drm/msm/sde/sde_core_perf.c
index 0ba644d..7a68f91 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_perf.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_perf.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -22,16 +22,10 @@
#include "msm_prop.h"
#include "sde_kms.h"
-#include "sde_fence.h"
-#include "sde_formats.h"
-#include "sde_hw_sspp.h"
#include "sde_trace.h"
#include "sde_crtc.h"
-#include "sde_plane.h"
-#include "sde_encoder.h"
-#include "sde_wb.h"
+#include "sde_rsc.h"
#include "sde_core_perf.h"
-#include "sde_trace.h"
static struct sde_kms *_sde_crtc_get_kms(struct drm_crtc *crtc)
{
@@ -59,20 +53,23 @@
static bool _sde_core_video_mode_intf_connected(struct drm_crtc *crtc)
{
struct drm_crtc *tmp_crtc;
+ bool intf_connected = false;
if (!crtc)
- return 0;
+ goto end;
drm_for_each_crtc(tmp_crtc, crtc->dev) {
if ((sde_crtc_get_intf_mode(tmp_crtc) == INTF_MODE_VIDEO) &&
_sde_core_perf_crtc_is_power_on(tmp_crtc)) {
SDE_DEBUG("video interface connected crtc:%d\n",
tmp_crtc->base.id);
- return true;
+ intf_connected = true;
+ goto end;
}
}
- return false;
+end:
+ return intf_connected;
}
int sde_core_perf_crtc_check(struct drm_crtc *crtc,
@@ -80,6 +77,7 @@
{
u32 bw, threshold;
u64 bw_sum_of_intfs = 0;
+ enum sde_crtc_client_type curr_client_type;
bool is_video_mode;
struct sde_crtc_state *sde_cstate;
struct drm_crtc *tmp_crtc;
@@ -97,16 +95,18 @@
}
/* we only need bandwidth check on real-time clients (interfaces) */
- if (sde_crtc_is_wb(crtc))
+ if (sde_crtc_get_client_type(crtc) == NRT_CLIENT)
return 0;
sde_cstate = to_sde_crtc_state(state);
bw_sum_of_intfs = sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_AB);
+ curr_client_type = sde_crtc_get_client_type(crtc);
drm_for_each_crtc(tmp_crtc, crtc->dev) {
if (_sde_core_perf_crtc_is_power_on(tmp_crtc) &&
- sde_crtc_is_rt(tmp_crtc) && tmp_crtc != crtc) {
+ (sde_crtc_get_client_type(tmp_crtc) == curr_client_type) &&
+ (tmp_crtc != crtc)) {
struct sde_crtc_state *tmp_cstate =
to_sde_crtc_state(tmp_crtc->state);
@@ -131,7 +131,7 @@
return -E2BIG;
} else if (bw > threshold) {
sde_cstate->cur_perf.bw_ctl = 0;
- SDE_DEBUG("exceeds bandwidth: %ukb > %ukb\n", bw, threshold);
+ SDE_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw, threshold);
return -E2BIG;
}
@@ -158,23 +158,23 @@
perf->max_per_pipe_ib, perf->bw_ctl);
}
-static u64 _sde_core_perf_crtc_calc_client_vote(struct sde_kms *kms,
- struct drm_crtc *crtc, struct sde_core_perf_params *perf,
- bool nrt_client, u32 core_clk)
+static void _sde_core_perf_crtc_update_bus(struct sde_kms *kms,
+ struct drm_crtc *crtc)
{
- u64 bw_sum_of_intfs = 0;
+ u64 bw_sum_of_intfs = 0, bus_ab_quota, bus_ib_quota;
+ struct sde_core_perf_params perf = {0};
+ enum sde_crtc_client_type curr_client_type
+ = sde_crtc_get_client_type(crtc);
struct drm_crtc *tmp_crtc;
+ struct sde_crtc_state *sde_cstate;
+ struct msm_drm_private *priv = kms->dev->dev_private;
drm_for_each_crtc(tmp_crtc, crtc->dev) {
- if (_sde_core_perf_crtc_is_power_on(crtc) &&
- /* RealTime clients */
- ((!nrt_client) ||
- /* Non-RealTime clients */
- (nrt_client && sde_crtc_is_nrt(tmp_crtc)))) {
- struct sde_crtc_state *sde_cstate =
- to_sde_crtc_state(tmp_crtc->state);
+ if (_sde_core_perf_crtc_is_power_on(tmp_crtc) &&
+ (curr_client_type == sde_crtc_get_client_type(tmp_crtc))) {
+ sde_cstate = to_sde_crtc_state(tmp_crtc->state);
- perf->max_per_pipe_ib = max(perf->max_per_pipe_ib,
+ perf.max_per_pipe_ib = max(perf.max_per_pipe_ib,
sde_cstate->cur_perf.max_per_pipe_ib);
bw_sum_of_intfs += sde_cstate->cur_perf.bw_ctl;
@@ -185,57 +185,38 @@
}
}
- return bw_sum_of_intfs;
-}
+ bus_ab_quota = max(bw_sum_of_intfs, kms->perf.perf_tune.min_bus_vote);
+ bus_ib_quota = perf.max_per_pipe_ib;
-static void _sde_core_perf_crtc_update_client_vote(struct sde_kms *kms,
- struct sde_core_perf_params *params, bool nrt_client, u64 bw_vote)
-{
- struct msm_drm_private *priv = kms->dev->dev_private;
- u64 bus_ab_quota, bus_ib_quota;
+ switch (curr_client_type) {
+ case NRT_CLIENT:
+ sde_power_data_bus_set_quota(&priv->phandle, kms->core_client,
+ SDE_POWER_HANDLE_DATA_BUS_CLIENT_NRT,
+ bus_ab_quota, bus_ib_quota);
+ SDE_DEBUG("client:%s ab=%llu ib=%llu\n", "nrt",
+ bus_ab_quota, bus_ib_quota);
+ break;
- bus_ab_quota = max(bw_vote, kms->perf.perf_tune.min_bus_vote);
- bus_ib_quota = params->max_per_pipe_ib;
-
- SDE_ATRACE_INT("bus_quota", bus_ib_quota);
- sde_power_data_bus_set_quota(&priv->phandle, kms->core_client,
- nrt_client ? SDE_POWER_HANDLE_DATA_BUS_CLIENT_NRT :
+ case RT_CLIENT:
+ sde_power_data_bus_set_quota(&priv->phandle, kms->core_client,
SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT,
- bus_ab_quota, bus_ib_quota);
- SDE_DEBUG("client:%s ab=%llu ib=%llu\n", nrt_client ? "nrt" : "rt",
- bus_ab_quota, bus_ib_quota);
-}
+ bus_ab_quota, bus_ib_quota);
+ SDE_DEBUG("client:%s ab=%llu ib=%llu\n", "rt",
+ bus_ab_quota, bus_ib_quota);
+ break;
-static void _sde_core_perf_crtc_update_bus(struct sde_kms *kms,
- struct drm_crtc *crtc, u32 core_clk)
-{
- u64 bw_sum_of_rt_intfs = 0, bw_sum_of_nrt_intfs = 0;
- struct sde_core_perf_params params = {0};
+ case RT_RSC_CLIENT:
+ sde_cstate = to_sde_crtc_state(crtc->state);
+ sde_rsc_client_vote(sde_cstate->rsc_client, bus_ab_quota,
+ bus_ib_quota);
+ SDE_DEBUG("client:%s ab=%llu ib=%llu\n", "rt_rsc",
+ bus_ab_quota, bus_ib_quota);
+ break;
- SDE_ATRACE_BEGIN(__func__);
-
- /*
- * non-real time client
- */
- if (sde_crtc_is_nrt(crtc)) {
- bw_sum_of_nrt_intfs = _sde_core_perf_crtc_calc_client_vote(
- kms, crtc, ¶ms, true, core_clk);
- _sde_core_perf_crtc_update_client_vote(kms, ¶ms, true,
- bw_sum_of_nrt_intfs);
+ default:
+ SDE_ERROR("invalid client type:%d\n", curr_client_type);
+ break;
}
-
- /*
- * real time client
- */
- if (!sde_crtc_is_nrt(crtc) ||
- sde_crtc_is_wb(crtc)) {
- bw_sum_of_rt_intfs = _sde_core_perf_crtc_calc_client_vote(kms,
- crtc, ¶ms, false, core_clk);
- _sde_core_perf_crtc_update_client_vote(kms, ¶ms, false,
- bw_sum_of_rt_intfs);
- }
-
- SDE_ATRACE_END(__func__);
}
/**
@@ -265,9 +246,9 @@
sde_cstate = to_sde_crtc_state(crtc->state);
- /* only do this for command panel or writeback */
+ /* only do this for command mode rt client (non-rsc client) */
if ((sde_crtc_get_intf_mode(crtc) != INTF_MODE_CMD) &&
- (sde_crtc_get_intf_mode(crtc) != INTF_MODE_WB_LINE))
+ (sde_crtc_get_client_type(crtc) != RT_RSC_CLIENT))
return;
/*
@@ -288,22 +269,15 @@
sde_cstate->cur_perf.bw_ctl = 0;
sde_cstate->new_perf.bw_ctl = 0;
SDE_DEBUG("Release BW crtc=%d\n", crtc->base.id);
- _sde_core_perf_crtc_update_bus(kms, crtc, 0);
+ _sde_core_perf_crtc_update_bus(kms, crtc);
}
}
-static int _sde_core_select_clk_lvl(struct sde_kms *kms,
- u32 clk_rate)
-{
- return clk_round_rate(kms->perf.core_clk, clk_rate);
-}
-
static u32 _sde_core_perf_get_core_clk_rate(struct sde_kms *kms)
{
u32 clk_rate = 0;
struct drm_crtc *crtc;
struct sde_crtc_state *sde_cstate;
- int ncrtc = 0;
drm_for_each_crtc(crtc, kms->dev) {
if (_sde_core_perf_crtc_is_power_on(crtc)) {
@@ -312,11 +286,9 @@
clk_rate);
clk_rate = clk_round_rate(kms->perf.core_clk, clk_rate);
}
- ncrtc++;
}
- clk_rate = _sde_core_select_clk_lvl(kms, clk_rate);
- SDE_DEBUG("clk:%u ncrtc:%d\n", clk_rate, ncrtc);
+ SDE_DEBUG("clk:%u\n", clk_rate);
return clk_rate;
}
@@ -351,8 +323,6 @@
SDE_DEBUG("crtc:%d stop_req:%d core_clk:%u\n",
crtc->base.id, stop_req, kms->perf.core_clk_rate);
- SDE_ATRACE_BEGIN(__func__);
-
old = &sde_cstate->cur_perf;
new = &sde_cstate->new_perf;
@@ -392,38 +362,28 @@
update_clk = 1;
}
- /*
- * Calculate mdp clock before bandwidth calculation. If traffic shaper
- * is enabled and clock increased, the bandwidth calculation can
- * use the new clock for the rotator bw calculation.
- */
- if (update_clk)
- clk_rate = _sde_core_perf_get_core_clk_rate(kms);
-
if (update_bus)
- _sde_core_perf_crtc_update_bus(kms, crtc, clk_rate);
+ _sde_core_perf_crtc_update_bus(kms, crtc);
/*
* Update the clock after bandwidth vote to ensure
* bandwidth is available before clock rate is increased.
*/
if (update_clk) {
- SDE_ATRACE_INT(kms->perf.clk_name, clk_rate);
+ clk_rate = _sde_core_perf_get_core_clk_rate(kms);
+
SDE_EVT32(kms->dev, stop_req, clk_rate);
ret = sde_power_clk_set_rate(&priv->phandle,
kms->perf.clk_name, clk_rate);
if (ret) {
SDE_ERROR("failed to set %s clock rate %u\n",
kms->perf.clk_name, clk_rate);
- goto end;
+ return;
}
kms->perf.core_clk_rate = clk_rate;
SDE_DEBUG("update clk rate = %d HZ\n", clk_rate);
}
-
-end:
- SDE_ATRACE_END(__func__);
}
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index ba68652..821f93f 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -417,7 +417,6 @@
SDE_EVT32(DRMID(crtc));
/* identify connectors attached to this crtc */
- cstate->is_rt = false;
cstate->num_connectors = 0;
drm_for_each_connector(conn, crtc->dev)
@@ -425,9 +424,6 @@
cstate->num_connectors < MAX_CONNECTORS) {
cstate->connectors[cstate->num_connectors++] = conn;
sde_connector_prepare_fence(conn);
-
- if (conn->connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
- cstate->is_rt = true;
}
if (cstate->num_connectors > 0 && cstate->connectors[0]->encoder)
@@ -440,15 +436,6 @@
sde_fence_prepare(&sde_crtc->output_fence);
}
-bool sde_crtc_is_rt(struct drm_crtc *crtc)
-{
- if (!crtc || !crtc->state) {
- SDE_ERROR("invalid crtc or state\n");
- return true;
- }
- return to_sde_crtc_state(crtc->state)->is_rt;
-}
-
/* if file!=NULL, this is preclose potential cancel-flip path */
static void _sde_crtc_complete_flip(struct drm_crtc *crtc,
struct drm_file *file)
@@ -542,8 +529,6 @@
crtc->base.id,
ktime_to_ns(fevent->ts));
SDE_EVT32(DRMID(crtc), fevent->event, 1);
- sde_power_data_bus_bandwidth_ctrl(&priv->phandle,
- sde_kms->core_client, false);
sde_core_perf_crtc_release_bw(crtc);
} else {
SDE_EVT32(DRMID(crtc), fevent->event, 2);
@@ -882,10 +867,12 @@
static void sde_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
+ struct drm_encoder *encoder;
struct sde_crtc *sde_crtc;
struct drm_device *dev;
struct drm_plane *plane;
unsigned long flags;
+ struct sde_crtc_state *cstate;
if (!crtc) {
SDE_ERROR("invalid crtc\n");
@@ -901,7 +888,7 @@
SDE_DEBUG("crtc%d\n", crtc->base.id);
sde_crtc = to_sde_crtc(crtc);
-
+ cstate = to_sde_crtc_state(crtc->state);
dev = crtc->dev;
if (sde_crtc->event) {
@@ -923,6 +910,17 @@
/* wait for acquire fences before anything else is done */
_sde_crtc_wait_for_fences(crtc);
+ if (!cstate->rsc_update) {
+ drm_for_each_encoder(encoder, dev) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ cstate->rsc_client =
+ sde_encoder_update_rsc_client(encoder, true);
+ }
+ cstate->rsc_update = true;
+ }
+
/* update performance setting before crtc kickoff */
sde_core_perf_crtc_update(crtc, 1, false);
@@ -1003,8 +1001,6 @@
/* acquire bandwidth and other resources */
SDE_DEBUG("crtc%d first commit\n", crtc->base.id);
SDE_EVT32(DRMID(crtc), 1);
- sde_power_data_bus_bandwidth_ctrl(&priv->phandle,
- sde_kms->core_client, true);
} else {
SDE_DEBUG("crtc%d commit\n", crtc->base.id);
SDE_EVT32(DRMID(crtc), 2);
@@ -1095,6 +1091,7 @@
{
struct msm_drm_private *priv;
struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *cstate;
struct drm_encoder *encoder;
struct sde_kms *sde_kms;
@@ -1103,6 +1100,7 @@
return;
}
sde_crtc = to_sde_crtc(crtc);
+ cstate = to_sde_crtc_state(crtc->state);
sde_kms = _sde_crtc_get_kms(crtc);
priv = sde_kms->dev->dev_private;
@@ -1129,8 +1127,6 @@
SDE_ERROR("crtc%d invalid frame pending\n",
crtc->base.id);
SDE_EVT32(DRMID(crtc));
- sde_power_data_bus_bandwidth_ctrl(&priv->phandle,
- sde_kms->core_client, false);
sde_core_perf_crtc_release_bw(crtc);
atomic_set(&sde_crtc->frame_pending, 0);
}
@@ -1141,6 +1137,9 @@
if (encoder->crtc != crtc)
continue;
sde_encoder_register_frame_event_callback(encoder, NULL, NULL);
+ sde_encoder_update_rsc_client(encoder, false);
+ cstate->rsc_client = NULL;
+ cstate->rsc_update = false;
}
memset(sde_crtc->mixers, 0, sizeof(sde_crtc->mixers));
@@ -1875,7 +1874,7 @@
struct sde_crtc_state *cstate = to_sde_crtc_state(crtc->state);
seq_printf(s, "num_connectors: %d\n", cstate->num_connectors);
- seq_printf(s, "is_rt: %d\n", cstate->is_rt);
+ seq_printf(s, "client type: %d\n", sde_crtc_get_client_type(crtc));
seq_printf(s, "intf_mode: %d\n", cstate->intf_mode);
seq_printf(s, "bw_ctl: %llu\n", cstate->cur_perf.bw_ctl);
seq_printf(s, "core_clk_rate: %u\n", cstate->cur_perf.core_clk_rate);
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 91fdaed..c4546b9 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -31,6 +31,20 @@
#define SDE_CRTC_FRAME_EVENT_SIZE 2
/**
+ * enum sde_crtc_client_type: crtc client type
+ * @RT_CLIENT: RealTime client like video/cmd mode display
+ * voting through apps rsc
+ * @NRT_CLIENT: Non-RealTime client like WB display
+ * voting through apps rsc
+ * @RT_RSC_CLIENT: Realtime display RSC voting client
+ */
+enum sde_crtc_client_type {
+ RT_CLIENT,
+ NRT_CLIENT,
+ RT_RSC_CLIENT,
+};
+
+/**
* struct sde_crtc_mixer: stores the map for each virtual pipeline in the CRTC
* @hw_lm: LM HW Driver context
* @hw_ctl: CTL Path HW driver context
@@ -136,8 +150,8 @@
* @base: Base drm crtc state structure
* @connectors : Currently associated drm connectors
* @num_connectors: Number of associated drm connectors
- * @is_rt : Whether or not the current commit contains RT connectors
* @intf_mode : Interface mode of the primary connector
+ * @rsc_client : sde rsc client when mode is valid
* @property_values: Current crtc property values
* @input_fence_timeout_ns : Cached input fence timeout, in ns
* @property_blobs: Reference pointers for blob properties
@@ -151,8 +165,9 @@
struct drm_connector *connectors[MAX_CONNECTORS];
int num_connectors;
- bool is_rt;
enum sde_intf_mode intf_mode;
+ struct sde_rsc_client *rsc_client;
+ bool rsc_update;
uint64_t property_values[CRTC_PROP_COUNT];
uint64_t input_fence_timeout_ns;
@@ -255,13 +270,6 @@
void sde_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
/**
- * sde_crtc_is_rt - query whether real time connectors are present on the crtc
- * @crtc: Pointer to drm crtc structure
- * Returns: True if a connector is present with real time constraints
- */
-bool sde_crtc_is_rt(struct drm_crtc *crtc);
-
-/**
* sde_crtc_get_intf_mode - get interface mode of the given crtc
* @crtc: Pointert to crtc
*/
@@ -274,24 +282,20 @@
}
/**
- * sde_core_perf_crtc_is_wb - check if writeback is primary output of this crtc
+ * sde_crtc_get_client_type - check the crtc type- rt, nrt, rsc, etc.
* @crtc: Pointer to crtc
*/
-static inline bool sde_crtc_is_wb(struct drm_crtc *crtc)
+static inline enum sde_crtc_client_type sde_crtc_get_client_type(
+ struct drm_crtc *crtc)
{
struct sde_crtc_state *cstate =
crtc ? to_sde_crtc_state(crtc->state) : NULL;
- return cstate ? (cstate->intf_mode == INTF_MODE_WB_LINE) : false;
-}
+ if (!cstate)
+ return NRT_CLIENT;
-/**
- * sde_crtc_is_nrt - check if primary output of this crtc is non-realtime client
- * @crtc: Pointer to crtc
- */
-static inline bool sde_crtc_is_nrt(struct drm_crtc *crtc)
-{
- return sde_crtc_is_wb(crtc);
+ return cstate->rsc_client ? RT_RSC_CLIENT :
+ (cstate->intf_mode == INTF_MODE_WB_LINE ? NRT_CLIENT : RT_CLIENT);
}
/**
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 282fd88..059471d 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -32,6 +32,9 @@
#include "sde_formats.h"
#include "sde_encoder_phys.h"
#include "sde_color_processing.h"
+#include "sde_rsc.h"
+
+#include "sde_power_handle.h"
#define SDE_DEBUG_ENC(e, fmt, ...) SDE_DEBUG("enc%d " fmt,\
(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
@@ -111,6 +114,10 @@
atomic_t frame_done_timeout;
struct timer_list frame_done_timer;
+
+ struct sde_rsc_client *rsc_client;
+ struct msm_display_info disp_info;
+ bool rsc_state_update;
};
#define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base)
@@ -157,6 +164,8 @@
SDE_DEBUG_ENC(sde_enc, "\n");
mutex_lock(&sde_enc->enc_lock);
+ sde_rsc_client_destroy(sde_enc->rsc_client);
+
for (i = 0; i < sde_enc->num_phys_encs; i++) {
struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
@@ -390,6 +399,7 @@
sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
sde_enc->cur_master = NULL;
+
for (i = 0; i < sde_enc->num_phys_encs; i++) {
struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
@@ -559,6 +569,49 @@
}
}
+struct sde_rsc_client *sde_encoder_update_rsc_client(
+ struct drm_encoder *drm_enc, bool enable)
+{
+ struct sde_encoder_virt *sde_enc;
+ enum sde_rsc_state rsc_state;
+ struct sde_rsc_cmd_config rsc_config;
+ int ret;
+
+ if (!drm_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return NULL;
+ }
+
+ sde_enc = to_sde_encoder_virt(drm_enc);
+ if (!sde_enc->disp_info.is_primary)
+ return NULL;
+
+ rsc_state = enable ?
+ (sde_enc->disp_info.capabilities & MSM_DISPLAY_CAP_CMD_MODE ?
+ SDE_RSC_CMD_STATE : SDE_RSC_VID_STATE) : SDE_RSC_IDLE_STATE;
+
+ if (rsc_state != SDE_RSC_IDLE_STATE && !sde_enc->rsc_state_update) {
+ rsc_config.fps = sde_enc->disp_info.frame_rate;
+ rsc_config.vtotal = sde_enc->disp_info.vtotal;
+ rsc_config.prefill_lines = sde_enc->disp_info.prefill_lines;
+ rsc_config.jitter = sde_enc->disp_info.jitter;
+ sde_enc->rsc_state_update = true;
+
+ ret = sde_rsc_client_state_update(sde_enc->rsc_client,
+ rsc_state, &rsc_config,
+ drm_enc->crtc ? drm_enc->crtc->index : -1);
+ } else {
+ ret = sde_rsc_client_state_update(sde_enc->rsc_client,
+ rsc_state, NULL,
+ drm_enc->crtc ? drm_enc->crtc->index : -1);
+ }
+
+ if (ret)
+ SDE_ERROR("sde rsc client update failed ret:%d\n", ret);
+
+ return sde_enc->rsc_client;
+}
+
void sde_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
void (*frame_event_cb)(void *, u32 event),
void *frame_event_cb_data)
@@ -1266,7 +1319,6 @@
}
mutex_unlock(&sde_enc->enc_lock);
-
return ret;
}
@@ -1310,6 +1362,7 @@
struct drm_encoder *drm_enc = NULL;
struct sde_encoder_virt *sde_enc = NULL;
int drm_enc_mode = DRM_MODE_ENCODER_NONE;
+ char name[SDE_NAME_SIZE];
int ret = 0;
sde_enc = kzalloc(sizeof(*sde_enc), GFP_KERNEL);
@@ -1336,6 +1389,17 @@
_sde_encoder_init_debugfs(drm_enc, sde_enc, sde_kms);
+ snprintf(name, SDE_NAME_SIZE, "rsc_enc%u", drm_enc->base.id);
+ sde_enc->rsc_client = sde_rsc_client_create(SDE_RSC_INDEX, name,
+ disp_info->is_primary);
+ if (IS_ERR_OR_NULL(sde_enc->rsc_client)) {
+ SDE_ERROR("sde rsc client create failed :%ld\n",
+ PTR_ERR(sde_enc->rsc_client));
+ sde_enc->rsc_client = NULL;
+ }
+
+ memcpy(&sde_enc->disp_info, disp_info, sizeof(*disp_info));
+
SDE_DEBUG_ENC(sde_enc, "created\n");
return drm_enc;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
index 61435c9..e0c28b5 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -73,6 +73,15 @@
void (*cb)(void *, u32), void *data);
/**
+ * sde_encoder_update_rsc_client - updates the rsc client state for primary
+ * for primary display.
+ * @encoder: encoder pointer
+ * @enable: enable/disable the client
+ */
+struct sde_rsc_client *sde_encoder_update_rsc_client(
+ struct drm_encoder *encoder, bool enable);
+
+/**
* sde_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl
* path (i.e. ctl flush and start) at next appropriate time.
* Immediately: if no previous commit is outstanding.
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index 0ee0f13..d8f096c 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -27,6 +27,8 @@
#include "sde_encoder.h"
#include "sde_connector.h"
+#include "sde_rsc.h"
+
#define SDE_ENCODER_NAME_MAX 16
/* wait for at most 2 vsync for lowest refresh rate (24hz) */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
index e00b4d2..f61077a 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -506,9 +506,6 @@
if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
return;
- sde_power_data_bus_bandwidth_ctrl(&priv->phandle,
- phys_enc->sde_kms->core_client, true);
-
sde_encoder_helper_split_config(phys_enc, vid_enc->hw_intf->idx);
sde_encoder_phys_vid_setup_timing_engine(phys_enc);
@@ -740,9 +737,6 @@
sde_encoder_phys_vid_control_vblank_irq(phys_enc, false);
}
- sde_power_data_bus_bandwidth_ctrl(&priv->phandle,
- phys_enc->sde_kms->core_client, false);
-
if (atomic_read(&phys_enc->vblank_refcount))
SDE_ERROR_VIDENC(vid_enc, "invalid vblank refcount %d\n",
atomic_read(&phys_enc->vblank_refcount));
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c
deleted file mode 100644
index 652331f..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c
+++ /dev/null
@@ -1,671 +0,0 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include "sde_hw_catalog.h"
-#include "sde_hw_mdss.h"
-#include "sde_hwio.h"
-
-/* VIG layer capability */
-#define VIG_17X_MASK \
- (BIT(SDE_SSPP_SRC) | BIT(SDE_SSPP_SCALER_QSEED2) |\
- BIT(SDE_SSPP_CSC) | BIT(SDE_SSPP_HSIC) |\
- BIT(SDE_SSPP_PCC) | BIT(SDE_SSPP_IGC) |\
- BIT(SDE_SSPP_MEMCOLOR) | BIT(SDE_SSPP_QOS))
-
-/* RGB layer capability */
-#define RGB_17X_MASK \
- (BIT(SDE_SSPP_SRC) | BIT(SDE_SSPP_SCALER_RGB) |\
- BIT(SDE_SSPP_PCC) | BIT(SDE_SSPP_IGC) | BIT(SDE_SSPP_QOS))
-
-/* DMA layer capability */
-#define DMA_17X_MASK \
- (BIT(SDE_SSPP_SRC) | BIT(SDE_SSPP_PCC) | BIT(SDE_SSPP_IGC) |\
- BIT(SDE_SSPP_QOS))
-
-/* Cursor layer capability */
-#define CURSOR_17X_MASK (BIT(SDE_SSPP_SRC) | BIT(SDE_SSPP_CURSOR))
-
-#define MIXER_17X_MASK (BIT(SDE_MIXER_SOURCESPLIT) |\
- BIT(SDE_MIXER_GC))
-
-#define DSPP_17X_MASK \
- (BIT(SDE_DSPP_IGC) | BIT(SDE_DSPP_PCC) |\
- BIT(SDE_DSPP_GC) | BIT(SDE_DSPP_HSIC) | BIT(SDE_DSPP_GAMUT) |\
- BIT(SDE_DSPP_DITHER) | BIT(SDE_DSPP_HIST) | BIT(SDE_DSPP_MEMCOLOR) |\
- BIT(SDE_DSPP_SIXZONE) | BIT(SDE_DSPP_AD) | BIT(SDE_DSPP_VLUT))
-
-#define PINGPONG_17X_MASK \
- (BIT(SDE_PINGPONG_TE) | BIT(SDE_PINGPONG_DSC))
-
-#define PINGPONG_17X_SPLIT_MASK \
- (PINGPONG_17X_MASK | BIT(SDE_PINGPONG_SPLIT) |\
- BIT(SDE_PINGPONG_TE2))
-
-#define WB01_17X_MASK \
- (BIT(SDE_WB_LINE_MODE) | BIT(SDE_WB_BLOCK_MODE) |\
- BIT(SDE_WB_CSC) | BIT(SDE_WB_CHROMA_DOWN) | BIT(SDE_WB_DOWNSCALE) |\
- BIT(SDE_WB_DITHER) | BIT(SDE_WB_TRAFFIC_SHAPER) |\
- BIT(SDE_WB_UBWC_1_0) | BIT(SDE_WB_YUV_CONFIG))
-
-#define WB2_17X_MASK \
- (BIT(SDE_WB_LINE_MODE) | BIT(SDE_WB_TRAFFIC_SHAPER) |\
- BIT(SDE_WB_YUV_CONFIG))
-
-#define DECIMATION_17X_MAX_H 4
-#define DECIMATION_17X_MAX_V 4
-
-#define RES_1080p ((u64)(1088*1920))
-#define RES_UHD ((u64)(3840*2160))
-
-static const struct sde_format_extended plane_formats[] = {
- {DRM_FORMAT_ARGB8888, 0},
- {DRM_FORMAT_ABGR8888, 0},
- {DRM_FORMAT_RGBA8888, 0},
- {DRM_FORMAT_RGBA8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_BGRA8888, 0},
- {DRM_FORMAT_XRGB8888, 0},
- {DRM_FORMAT_RGBX8888, 0},
- {DRM_FORMAT_RGBX8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_RGB888, 0},
- {DRM_FORMAT_BGR888, 0},
- {DRM_FORMAT_RGB565, 0},
- {DRM_FORMAT_RGB565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_BGR565, 0},
- {DRM_FORMAT_ARGB1555, 0},
- {DRM_FORMAT_ABGR1555, 0},
- {DRM_FORMAT_RGBA5551, 0},
- {DRM_FORMAT_BGRA5551, 0},
- {DRM_FORMAT_XRGB1555, 0},
- {DRM_FORMAT_XBGR1555, 0},
- {DRM_FORMAT_RGBX5551, 0},
- {DRM_FORMAT_BGRX5551, 0},
- {DRM_FORMAT_ARGB4444, 0},
- {DRM_FORMAT_ABGR4444, 0},
- {DRM_FORMAT_RGBA4444, 0},
- {DRM_FORMAT_BGRA4444, 0},
- {DRM_FORMAT_XRGB4444, 0},
- {DRM_FORMAT_XBGR4444, 0},
- {DRM_FORMAT_RGBX4444, 0},
- {DRM_FORMAT_BGRX4444, 0},
- {0, 0},
-};
-
-static const struct sde_format_extended plane_formats_yuv[] = {
- {DRM_FORMAT_ARGB8888, 0},
- {DRM_FORMAT_ABGR8888, 0},
- {DRM_FORMAT_RGBA8888, 0},
- {DRM_FORMAT_RGBA8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_BGRA8888, 0},
- {DRM_FORMAT_XRGB8888, 0},
- {DRM_FORMAT_RGBX8888, 0},
- {DRM_FORMAT_RGBX8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_RGB888, 0},
- {DRM_FORMAT_BGR888, 0},
- {DRM_FORMAT_RGB565, 0},
- {DRM_FORMAT_RGB565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_BGR565, 0},
- {DRM_FORMAT_ARGB1555, 0},
- {DRM_FORMAT_ABGR1555, 0},
- {DRM_FORMAT_RGBA5551, 0},
- {DRM_FORMAT_BGRA5551, 0},
- {DRM_FORMAT_XRGB1555, 0},
- {DRM_FORMAT_XBGR1555, 0},
- {DRM_FORMAT_RGBX5551, 0},
- {DRM_FORMAT_BGRX5551, 0},
- {DRM_FORMAT_ARGB4444, 0},
- {DRM_FORMAT_ABGR4444, 0},
- {DRM_FORMAT_RGBA4444, 0},
- {DRM_FORMAT_BGRA4444, 0},
- {DRM_FORMAT_XRGB4444, 0},
- {DRM_FORMAT_XBGR4444, 0},
- {DRM_FORMAT_RGBX4444, 0},
- {DRM_FORMAT_BGRX4444, 0},
- {DRM_FORMAT_NV12, 0},
- {DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_NV21, 0},
- {DRM_FORMAT_NV16, 0},
- {DRM_FORMAT_NV61, 0},
- {DRM_FORMAT_VYUY, 0},
- {DRM_FORMAT_UYVY, 0},
- {DRM_FORMAT_YUYV, 0},
- {DRM_FORMAT_YVYU, 0},
- {DRM_FORMAT_YUV420, 0},
- {DRM_FORMAT_YVU420, 0},
- {0, 0},
-};
-
-static const struct sde_format_extended wb0_formats[] = {
- {DRM_FORMAT_RGB565, 0},
- {DRM_FORMAT_RGB888, 0},
- {DRM_FORMAT_ARGB8888, 0},
- {DRM_FORMAT_RGBA8888, 0},
- {DRM_FORMAT_XRGB8888, 0},
- {DRM_FORMAT_RGBX8888, 0},
- {DRM_FORMAT_ARGB1555, 0},
- {DRM_FORMAT_RGBA5551, 0},
- {DRM_FORMAT_XRGB1555, 0},
- {DRM_FORMAT_RGBX5551, 0},
- {DRM_FORMAT_ARGB4444, 0},
- {DRM_FORMAT_RGBA4444, 0},
- {DRM_FORMAT_RGBX4444, 0},
- {DRM_FORMAT_XRGB4444, 0},
-
- {DRM_FORMAT_BGR565, 0},
- {DRM_FORMAT_BGR888, 0},
- {DRM_FORMAT_ABGR8888, 0},
- {DRM_FORMAT_BGRA8888, 0},
- {DRM_FORMAT_BGRX8888, 0},
- {DRM_FORMAT_ABGR1555, 0},
- {DRM_FORMAT_BGRA5551, 0},
- {DRM_FORMAT_XBGR1555, 0},
- {DRM_FORMAT_BGRX5551, 0},
- {DRM_FORMAT_ABGR4444, 0},
- {DRM_FORMAT_BGRA4444, 0},
- {DRM_FORMAT_BGRX4444, 0},
- {DRM_FORMAT_XBGR4444, 0},
-
- {DRM_FORMAT_RGBX8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_RGBA8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_RGB565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
-
- {DRM_FORMAT_YUV420, 0},
- {DRM_FORMAT_NV12, 0},
- {DRM_FORMAT_NV16, 0},
- {DRM_FORMAT_NV21, 0},
- {DRM_FORMAT_NV61, 0},
- {DRM_FORMAT_UYVY, 0},
- {DRM_FORMAT_YUYV, 0},
-
- {DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_AYUV, DRM_FORMAT_MOD_QCOM_COMPRESSED},
-
- {0, 0},
-};
-
-static const struct sde_format_extended wb2_formats[] = {
- {DRM_FORMAT_RGB565, 0},
- {DRM_FORMAT_RGB888, 0},
- {DRM_FORMAT_ARGB8888, 0},
- {DRM_FORMAT_RGBA8888, 0},
- {DRM_FORMAT_XRGB8888, 0},
- {DRM_FORMAT_RGBX8888, 0},
- {DRM_FORMAT_ARGB1555, 0},
- {DRM_FORMAT_RGBA5551, 0},
- {DRM_FORMAT_XRGB1555, 0},
- {DRM_FORMAT_RGBX5551, 0},
- {DRM_FORMAT_ARGB4444, 0},
- {DRM_FORMAT_RGBA4444, 0},
- {DRM_FORMAT_RGBX4444, 0},
- {DRM_FORMAT_XRGB4444, 0},
-
- {DRM_FORMAT_BGR565, 0},
- {DRM_FORMAT_BGR888, 0},
- {DRM_FORMAT_ABGR8888, 0},
- {DRM_FORMAT_BGRA8888, 0},
- {DRM_FORMAT_BGRX8888, 0},
- {DRM_FORMAT_ABGR1555, 0},
- {DRM_FORMAT_BGRA5551, 0},
- {DRM_FORMAT_XBGR1555, 0},
- {DRM_FORMAT_BGRX5551, 0},
- {DRM_FORMAT_ABGR4444, 0},
- {DRM_FORMAT_BGRA4444, 0},
- {DRM_FORMAT_BGRX4444, 0},
- {DRM_FORMAT_XBGR4444, 0},
-
- {DRM_FORMAT_YUV420, 0},
- {DRM_FORMAT_NV12, 0},
- {DRM_FORMAT_NV16, 0},
- {DRM_FORMAT_YUYV, 0},
-
- {0, 0},
-};
-
-/**
- * set_cfg_1xx_init(): populate sde sub-blocks reg offsets and instance counts
- */
-static inline int set_cfg_1xx_init(struct sde_mdss_cfg *cfg)
-{
-
- /* Layer capability */
- static const struct sde_sspp_sub_blks vig_layer = {
- .maxlinewidth = 2560,
- .danger_lut_linear = 0x000f,
- .safe_lut_linear = 0xfffc,
- .danger_lut_tile = 0xffff,
- .safe_lut_tile = 0xff00,
- .danger_lut_nrt = 0x0,
- .safe_lut_nrt = 0xffff,
- .creq_lut_nrt = 0x0,
- .creq_vblank = 0x2,
- .danger_vblank = 0,
- .pixel_ram_size = 50 * 1024,
- .maxdwnscale = 4, .maxupscale = 20,
- .maxhdeciexp = DECIMATION_17X_MAX_H,
- .maxvdeciexp = DECIMATION_17X_MAX_V,
- .src_blk = {.id = SDE_SSPP_SRC,
- .base = 0x00, .len = 0x150,},
- .scaler_blk = {.id = SDE_SSPP_SCALER_QSEED2,
- .base = 0x200, .len = 0x70,},
- .csc_blk = {.id = SDE_SSPP_CSC,
- .base = 0x320, .len = 0x44,},
- .format_list = plane_formats_yuv,
- .igc_blk = {.id = SDE_SSPP_IGC, .base = 0x0, .len = 0x0,
- .version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
- .pcc_blk = {.id = SDE_SSPP_PCC, .base = 0x0, .len = 0x0,
- .version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
- .hsic = {.id = SDE_SSPP_HSIC, .base = 0x0, .len = 0x0,
- .version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
- .memcolor = {.id = SDE_SSPP_MEMCOLOR, .base = 0x0, .len = 0x0,
- .version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
- };
-
- static const struct sde_sspp_sub_blks layer = {
- .maxlinewidth = 2560,
- .danger_lut_linear = 0x000f,
- .safe_lut_linear = 0xfffc,
- .danger_lut_tile = 0xffff,
- .safe_lut_tile = 0xff00,
- .danger_lut_nrt = 0x0,
- .safe_lut_nrt = 0xffff,
- .creq_lut_nrt = 0x0,
- .creq_vblank = 0x2,
- .danger_vblank = 0,
- .pixel_ram_size = 50 * 1024,
- .maxdwnscale = 4, .maxupscale = 20,
- .maxhdeciexp = DECIMATION_17X_MAX_H,
- .maxvdeciexp = DECIMATION_17X_MAX_V,
- .src_blk = {.id = SDE_SSPP_SRC,
- .base = 0x00, .len = 0x150,},
- .scaler_blk = {.id = SDE_SSPP_SCALER_QSEED2,
- .base = 0x200, .len = 0x70,},
- .csc_blk = {.id = SDE_SSPP_CSC,
- .base = 0x320, .len = 0x44,},
- .format_list = plane_formats,
- .igc_blk = {.id = SDE_SSPP_IGC, .base = 0x0, .len = 0x0,
- .version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
- .pcc_blk = {.id = SDE_SSPP_PCC, .base = 0x0, .len = 0x0,
- .version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
- };
-
- static const struct sde_sspp_sub_blks dma = {
- .maxlinewidth = 2560,
- .danger_lut_linear = 0x000f,
- .safe_lut_linear = 0xfffc,
- .danger_lut_tile = 0xffff,
- .safe_lut_tile = 0xff00,
- .danger_lut_nrt = 0x0,
- .safe_lut_nrt = 0xffff,
- .creq_lut_nrt = 0x0,
- .creq_vblank = 0x2,
- .danger_vblank = 0,
- .pixel_ram_size = 50 * 1024,
- .maxdwnscale = 1, .maxupscale = 1,
- .maxhdeciexp = DECIMATION_17X_MAX_H,
- .maxvdeciexp = DECIMATION_17X_MAX_V,
- .src_blk = {.id = SDE_SSPP_SRC, .base = 0x00, .len = 0x150,},
- .scaler_blk = {.id = 0, .base = 0x00, .len = 0x0,},
- .csc_blk = {.id = 0, .base = 0x00, .len = 0x0,},
- .format_list = plane_formats,
- .igc_blk = {.id = SDE_SSPP_IGC, .base = 0x0, .len = 0x0,
- .version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
- .pcc_blk = {.id = SDE_SSPP_PCC, .base = 0x0, .len = 0x0,
- .version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
- };
-
- static const struct sde_sspp_sub_blks cursor = {
- .maxlinewidth = 128,
- .maxdwnscale = 1, .maxupscale = 1,
- .maxhdeciexp = 0,
- .maxvdeciexp = 0,
- .src_blk = {.id = SDE_SSPP_SRC, .base = 0x00, .len = 0x150,},
- .scaler_blk = {.id = 0, .base = 0x00, .len = 0x0,},
- .csc_blk = {.id = 0, .base = 0x00, .len = 0x0,},
- .format_list = plane_formats,
- };
-
- /* MIXER capability */
- static const struct sde_lm_sub_blks lm = {
- .maxwidth = 2560,
- .maxblendstages = 7, /* excluding base layer */
- .blendstage_base = { /* offsets relative to mixer base */
- 0x20, 0x50, 0x80, 0xB0, 0x230, 0x260, 0x290 },
- .gc = {.id = SDE_DSPP_GC, .base = 0x0, .len = 0x0,
- .version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
- };
-
- /* DSPP capability */
- static const struct sde_dspp_sub_blks dspp = {
- .igc = {.id = SDE_DSPP_IGC, .base = 0x0, .len = 0x0,
- .version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
- .pcc = {.id = SDE_DSPP_PCC, .base = 0x0, .len = 0x0,
- .version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
- .gamut = {.id = SDE_DSPP_GAMUT, .base = 0x0, .len = 0x0,
- .version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
- .dither = {.id = SDE_DSPP_DITHER, .base = 0x0, .len = 0x0,
- .version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
- .hsic = {.id = SDE_DSPP_HSIC, .base = 0x00, .len = 0x0,
- .version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
- .memcolor = {.id = SDE_DSPP_MEMCOLOR, .base = 0x00, .len = 0x0,
- .version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
- .sixzone = {.id = SDE_DSPP_SIXZONE, .base = 0x00, .len = 0x0,
- .version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
- .hist = {.id = SDE_DSPP_HIST, .base = 0x00, .len = 0x0,
- .version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
- .gc = {.id = SDE_DSPP_GC, .base = 0x0, .len = 0x0,
- .version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
- .ad = {.id = SDE_DSPP_AD, .base = 0x00, .len = 0x0,
- .version = SDE_COLOR_PROCESS_VER(0x3, 0x0)},
- .vlut = {.id = SDE_DSPP_VLUT, .base = 0x1400, .len = 0x0,
- .version = SDE_COLOR_PROCESS_VER(0x1, 0x0)},
- };
-
- /* PINGPONG capability */
- static const struct sde_pingpong_sub_blks pingpong = {
- .te = {.id = SDE_PINGPONG_TE, .base = 0x0000, .len = 0x0,
- .version = 0x1},
- .te2 = {.id = SDE_PINGPONG_TE2, .base = 0x2000, .len = 0x0,
- .version = 0x1},
- .dsc = {.id = SDE_PINGPONG_DSC, .base = 0x10000, .len = 0x0,
- .version = 0x1},
- };
-
- /* Writeback 0/1 capability */
- static const struct sde_wb_sub_blocks wb0 = {
- .maxlinewidth = 2048,
- };
-
- /* Writeback 2 capability */
- static const struct sde_wb_sub_blocks wb2 = {
- .maxlinewidth = 4096,
- };
-
- static const struct sde_vbif_dynamic_ot_cfg dynamic_ot_cfg[] = {
- {RES_1080p * 30, 2},
- {RES_1080p * 60, 4},
- {RES_UHD * 30, 16},
- };
-
- /* Setup Register maps and defaults */
- *cfg = (struct sde_mdss_cfg){
- .mdss_count = 1,
- .mdss = {
- {.id = MDP_TOP, .base = 0x00000000, .features = 0}
- },
- .mdp_count = 1,
- .mdp = {
- {.id = MDP_TOP, .base = 0x00001000, .features = 0,
- .highest_bank_bit = 0x2,
- .clk_ctrls[SDE_CLK_CTRL_VIG0] = {
- .reg_off = 0x2AC, .bit_off = 0},
- .clk_ctrls[SDE_CLK_CTRL_VIG1] = {
- .reg_off = 0x2B4, .bit_off = 0},
- .clk_ctrls[SDE_CLK_CTRL_VIG2] = {
- .reg_off = 0x2BC, .bit_off = 0},
- .clk_ctrls[SDE_CLK_CTRL_VIG3] = {
- .reg_off = 0x2C4, .bit_off = 0},
- .clk_ctrls[SDE_CLK_CTRL_RGB0] = {
- .reg_off = 0x2AC, .bit_off = 4},
- .clk_ctrls[SDE_CLK_CTRL_RGB1] = {
- .reg_off = 0x2B4, .bit_off = 4},
- .clk_ctrls[SDE_CLK_CTRL_RGB2] = {
- .reg_off = 0x2BC, .bit_off = 4},
- .clk_ctrls[SDE_CLK_CTRL_RGB3] = {
- .reg_off = 0x2C4, .bit_off = 4},
- .clk_ctrls[SDE_CLK_CTRL_DMA0] = {
- .reg_off = 0x2AC, .bit_off = 8},
- .clk_ctrls[SDE_CLK_CTRL_DMA1] = {
- .reg_off = 0x2B4, .bit_off = 8},
- .clk_ctrls[SDE_CLK_CTRL_CURSOR0] = {
- .reg_off = 0x3A8, .bit_off = 16},
- .clk_ctrls[SDE_CLK_CTRL_CURSOR1] = {
- .reg_off = 0x3B0, .bit_off = 16},
- .clk_ctrls[SDE_CLK_CTRL_WB0] = {
- .reg_off = 0x2BC, .bit_off = 8},
- .clk_ctrls[SDE_CLK_CTRL_WB1] = {
- .reg_off = 0x2BC, .bit_off = 12},
- .clk_ctrls[SDE_CLK_CTRL_WB2] = {
- .reg_off = 0x2BC, .bit_off = 16},
- },
- },
- .ctl_count = 5,
- .ctl = {
- {.id = CTL_0,
- .base = 0x00002000,
- .features = BIT(SDE_CTL_SPLIT_DISPLAY) |
- BIT(SDE_CTL_PINGPONG_SPLIT) },
- {.id = CTL_1,
- .base = 0x00002200,
- .features = BIT(SDE_CTL_SPLIT_DISPLAY) },
- {.id = CTL_2,
- .base = 0x00002400},
- {.id = CTL_3,
- .base = 0x00002600},
- {.id = CTL_4,
- .base = 0x00002800},
- },
- /* 4 VIG, + 4 RGB + 2 DMA + 2 CURSOR */
- .sspp_count = 12,
- .sspp = {
- {.id = SSPP_VIG0, .base = 0x00005000,
- .features = VIG_17X_MASK, .sblk = &vig_layer,
- .xin_id = 0,
- .clk_ctrl = SDE_CLK_CTRL_VIG0},
- {.id = SSPP_VIG1, .base = 0x00007000,
- .features = VIG_17X_MASK, .sblk = &vig_layer,
- .xin_id = 4,
- .clk_ctrl = SDE_CLK_CTRL_VIG1},
- {.id = SSPP_VIG2, .base = 0x00009000,
- .features = VIG_17X_MASK, .sblk = &vig_layer,
- .xin_id = 8,
- .clk_ctrl = SDE_CLK_CTRL_VIG2},
- {.id = SSPP_VIG3, .base = 0x0000b000,
- .features = VIG_17X_MASK, .sblk = &vig_layer,
- .xin_id = 12,
- .clk_ctrl = SDE_CLK_CTRL_VIG3},
-
- {.id = SSPP_RGB0, .base = 0x00015000,
- .features = RGB_17X_MASK, .sblk = &layer,
- .xin_id = 1,
- .clk_ctrl = SDE_CLK_CTRL_RGB0},
- {.id = SSPP_RGB1, .base = 0x00017000,
- .features = RGB_17X_MASK, .sblk = &layer,
- .xin_id = 5,
- .clk_ctrl = SDE_CLK_CTRL_RGB1},
- {.id = SSPP_RGB2, .base = 0x00019000,
- .features = RGB_17X_MASK, .sblk = &layer,
- .xin_id = 9,
- .clk_ctrl = SDE_CLK_CTRL_RGB2},
- {.id = SSPP_RGB3, .base = 0x0001B000,
- .features = RGB_17X_MASK, .sblk = &layer,
- .xin_id = 13,
- .clk_ctrl = SDE_CLK_CTRL_RGB3},
-
- {.id = SSPP_DMA0, .base = 0x00025000,
- .features = DMA_17X_MASK, .sblk = &dma,
- .xin_id = 2,
- .clk_ctrl = SDE_CLK_CTRL_DMA0},
- {.id = SSPP_DMA1, .base = 0x00027000,
- .features = DMA_17X_MASK, .sblk = &dma,
- .xin_id = 10,
- .clk_ctrl = SDE_CLK_CTRL_DMA1},
-
- {.id = SSPP_CURSOR0, .base = 0x00035000,
- .features = CURSOR_17X_MASK, .sblk = &cursor,
- .xin_id = 7,
- .clk_ctrl = SDE_CLK_CTRL_CURSOR0},
- {.id = SSPP_CURSOR1, .base = 0x00037000,
- .features = CURSOR_17X_MASK, .sblk = &cursor,
- .xin_id = 7,
- .clk_ctrl = SDE_CLK_CTRL_CURSOR1},
- },
- .mixer_count = 6,
- .mixer = {
- {.id = LM_0, .base = 0x00045000,
- .features = MIXER_17X_MASK,
- .sblk = &lm,
- .dspp = DSPP_0,
- .pingpong = PINGPONG_0,
- .lm_pair_mask = (1 << LM_1) },
- {.id = LM_1, .base = 0x00046000,
- .features = MIXER_17X_MASK,
- .sblk = &lm,
- .dspp = DSPP_1,
- .pingpong = PINGPONG_1,
- .lm_pair_mask = (1 << LM_0) },
- {.id = LM_2, .base = 0x00047000,
- .features = MIXER_17X_MASK,
- .sblk = &lm,
- .dspp = DSPP_MAX,
- .pingpong = PINGPONG_2,
- .lm_pair_mask = (1 << LM_5) },
- {.id = LM_3, .base = 0x00048000,
- .features = MIXER_17X_MASK,
- .sblk = &lm,
- .dspp = DSPP_MAX,
- .pingpong = PINGPONG_MAX},
- {.id = LM_4, .base = 0x00049000,
- .features = MIXER_17X_MASK,
- .sblk = &lm,
- .dspp = DSPP_MAX,
- .pingpong = PINGPONG_MAX},
- {.id = LM_5, .base = 0x0004a000,
- .features = MIXER_17X_MASK,
- .sblk = &lm,
- .dspp = DSPP_MAX,
- .pingpong = PINGPONG_3,
- .lm_pair_mask = (1 << LM_2) },
- },
- .dspp_count = 2,
- .dspp = {
- {.id = DSPP_0, .base = 0x00055000,
- .features = DSPP_17X_MASK,
- .sblk = &dspp},
- {.id = DSPP_1, .base = 0x00057000,
- .features = DSPP_17X_MASK,
- .sblk = &dspp},
- },
- .pingpong_count = 4,
- .pingpong = {
- {.id = PINGPONG_0, .base = 0x00071000,
- .features = PINGPONG_17X_SPLIT_MASK,
- .sblk = &pingpong},
- {.id = PINGPONG_1, .base = 0x00071800,
- .features = PINGPONG_17X_SPLIT_MASK,
- .sblk = &pingpong},
- {.id = PINGPONG_2, .base = 0x00072000,
- .features = PINGPONG_17X_MASK,
- .sblk = &pingpong},
- {.id = PINGPONG_3, .base = 0x00072800,
- .features = PINGPONG_17X_MASK,
- .sblk = &pingpong},
- },
- .cdm_count = 1,
- .cdm = {
- {.id = CDM_0, .base = 0x0007A200, .features = 0,
- .intf_connect = BIT(INTF_3),
- .wb_connect = BIT(WB_2),}
- },
- .intf_count = 4,
- .intf = {
- {.id = INTF_0, .base = 0x0006B000,
- .type = INTF_NONE, .controller_id = 0,
- .prog_fetch_lines_worst_case = 21},
- {.id = INTF_1, .base = 0x0006B800,
- .type = INTF_DSI, .controller_id = 0,
- .prog_fetch_lines_worst_case = 21},
- {.id = INTF_2, .base = 0x0006C000,
- .type = INTF_DSI, .controller_id = 1,
- .prog_fetch_lines_worst_case = 21},
- {.id = INTF_3, .base = 0x0006C800,
- .type = INTF_HDMI, .controller_id = 0,
- .prog_fetch_lines_worst_case = 21},
- },
- .wb_count = 3,
- .wb = {
- {.id = WB_0, .base = 0x00065000,
- .features = WB01_17X_MASK,
- .sblk = &wb0,
- .format_list = wb0_formats,
- .vbif_idx = VBIF_NRT,
- .xin_id = 3,
- .clk_ctrl = SDE_CLK_CTRL_WB0},
- {.id = WB_1, .base = 0x00065800,
- .features = WB01_17X_MASK,
- .sblk = &wb0,
- .format_list = wb0_formats,
- .vbif_idx = VBIF_NRT,
- .xin_id = 11,
- .clk_ctrl = SDE_CLK_CTRL_WB1},
- {.id = WB_2, .base = 0x00066000,
- .features = WB2_17X_MASK,
- .sblk = &wb2,
- .format_list = wb2_formats,
- .vbif_idx = VBIF_NRT,
- .xin_id = 6,
- .clk_ctrl = SDE_CLK_CTRL_WB2},
- },
- .vbif_count = 2,
- .vbif = {
- {.id = VBIF_0,
- .base = 0, /* 0x000B0000 */
- .features = BIT(SDE_VBIF_QOS_OTLIM),
- .default_ot_rd_limit = 32,
- .default_ot_wr_limit = 16,
- .xin_halt_timeout = 0x4000,
- .dynamic_ot_rd_tbl = {
- .count = ARRAY_SIZE(dynamic_ot_cfg),
- .cfg = dynamic_ot_cfg},
- .dynamic_ot_wr_tbl = {
- .count = ARRAY_SIZE(dynamic_ot_cfg),
- .cfg = dynamic_ot_cfg},
- },
- {.id = VBIF_1,
- .base = 0, /* 0x000B8000 */
- .features = BIT(SDE_VBIF_QOS_OTLIM),
- .default_ot_rd_limit = 32,
- .default_ot_wr_limit = 16,
- .xin_halt_timeout = 0x4000,
- .dynamic_ot_rd_tbl = {
- .count = ARRAY_SIZE(dynamic_ot_cfg),
- .cfg = dynamic_ot_cfg},
- .dynamic_ot_wr_tbl = {
- .count = ARRAY_SIZE(dynamic_ot_cfg),
- .cfg = dynamic_ot_cfg},
- },
- },
- };
- return 0;
-}
-
-/**
- * sde_mdp_cfg_170_init(): Populate the sde sub-blocks catalog information
- */
-struct sde_mdss_cfg *sde_mdss_cfg_170_init(u32 step)
-{
- struct sde_mdss_cfg *m = NULL;
-
- /*
- * This function, for each sub-block sets,
- * instance count, IO regions,
- * default capabilities and this version capabilities,
- * Additional catalog items
- */
-
- m = kzalloc(sizeof(*m), GFP_KERNEL);
- if (!m)
- return NULL;
-
- set_cfg_1xx_init(m);
- m->hwversion = SDE_HW_VER(1, 7, step);
-
- return m;
-}
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index e144a0d..3416396 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -1203,7 +1203,7 @@
return 0;
pstate->pending = true;
- psde->is_rt_pipe = sde_crtc_is_rt(crtc);
+ psde->is_rt_pipe = (sde_crtc_get_client_type(crtc) != NRT_CLIENT);
_sde_plane_set_qos_ctrl(plane, false, SDE_PLANE_QOS_PANIC_CTRL);
/* update roi config */
@@ -2687,7 +2687,7 @@
_sde_plane_init_debugfs(psde, kms);
- DRM_INFO("%s created for pipe %u\n", psde->pipe_name, pipe);
+ SDE_DEBUG("%s created for pipe %u\n", psde->pipe_name, pipe);
return plane;
clean_sspp:
diff --git a/drivers/gpu/drm/msm/sde_power_handle.c b/drivers/gpu/drm/msm/sde_power_handle.c
index da56891..5157b9c 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.c
+++ b/drivers/gpu/drm/msm/sde_power_handle.c
@@ -371,14 +371,11 @@
pdbus->curr_bw_uc_idx = new_uc_idx;
pdbus->ao_bw_uc_idx = new_uc_idx;
- if ((pdbus->bus_ref_cnt == 0) && pdbus->curr_bw_uc_idx) {
- rc = 0;
- } else { /* vote BW if bus_bw_cnt > 0 or uc_idx is zero */
- SDE_ATRACE_BEGIN("msm_bus_scale_req");
- rc = msm_bus_scale_client_update_request(pdbus->data_bus_hdl,
+ SDE_ATRACE_BEGIN("msm_bus_scale_req");
+ rc = msm_bus_scale_client_update_request(pdbus->data_bus_hdl,
new_uc_idx);
- SDE_ATRACE_END("msm_bus_scale_req");
- }
+ SDE_ATRACE_END("msm_bus_scale_req");
+
return rc;
}
@@ -583,57 +580,6 @@
}
#endif
-void sde_power_data_bus_bandwidth_ctrl(struct sde_power_handle *phandle,
- struct sde_power_client *pclient, int enable)
-{
- struct sde_power_data_bus_handle *pdbus;
- int changed = 0;
-
- if (!phandle || !pclient) {
- pr_err("invalid power/client handle\n");
- return;
- }
-
- pdbus = &phandle->data_bus_handle;
-
- mutex_lock(&phandle->phandle_lock);
- if (enable) {
- if (pdbus->bus_ref_cnt == 0)
- changed++;
- pdbus->bus_ref_cnt++;
- } else {
- if (pdbus->bus_ref_cnt) {
- pdbus->bus_ref_cnt--;
- if (pdbus->bus_ref_cnt == 0)
- changed++;
- } else {
- pr_debug("Can not be turned off\n");
- }
- }
-
- pr_debug("%pS: task:%s bw_cnt=%d changed=%d enable=%d\n",
- __builtin_return_address(0), current->group_leader->comm,
- pdbus->bus_ref_cnt, changed, enable);
-
- if (changed) {
- SDE_ATRACE_INT("data_bus_ctrl", enable);
-
- if (!enable) {
- if (!pdbus->handoff_pending) {
- msm_bus_scale_client_update_request(
- pdbus->data_bus_hdl, 0);
- pdbus->ao_bw_uc_idx = 0;
- }
- } else {
- msm_bus_scale_client_update_request(
- pdbus->data_bus_hdl,
- pdbus->curr_bw_uc_idx);
- }
- }
-
- mutex_unlock(&phandle->phandle_lock);
-}
-
int sde_power_resource_init(struct platform_device *pdev,
struct sde_power_handle *phandle)
{
diff --git a/drivers/gpu/drm/msm/sde_power_handle.h b/drivers/gpu/drm/msm/sde_power_handle.h
index b982d17..4f0348f 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.h
+++ b/drivers/gpu/drm/msm/sde_power_handle.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -75,8 +75,6 @@
* @bus_channels: number of memory bus channels
* @curr_bw_uc_idx: current use case index of data bus
* @ao_bw_uc_idx: active only use case index of data bus
- * @bus_ref_cnt: reference count of data bus enable request
- * @handoff_pending: True to indicate if bootloader hand-over is pending
*/
struct sde_power_data_bus_handle {
struct msm_bus_scale_pdata *data_bus_scale_table;
@@ -86,8 +84,6 @@
u32 bus_channels;
u32 curr_bw_uc_idx;
u32 ao_bw_uc_idx;
- u32 bus_ref_cnt;
- int handoff_pending;
};
/**
diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c
new file mode 100644
index 0000000..b36e17c
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde_rsc.c
@@ -0,0 +1,961 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "[sde_rsc:%s:%d]: " fmt, __func__, __LINE__
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/of.h>
+#include <linux/string.h>
+#include <linux/of_address.h>
+#include <linux/component.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+#include <linux/module.h>
+
+#include <soc/qcom/rpmh.h>
+#include <drm/drmP.h>
+#include <drm/drm_irq.h>
+#include "sde_rsc.h"
+
+/* this time is ~0.02ms */
+#define RSC_BACKOFF_TIME_NS 20000
+
+/* next two values should be same based on doc */
+
+/* this time is ~0.2ms */
+#define RSC_MODE_THRESHOLD_TIME_IN_NS 200000
+/* this time is ~0.2ms */
+#define RSC_TIME_SLOT_0_NS 200000
+
+#define DEFAULT_PANEL_FPS 60
+#define DEFAULT_PANEL_JITTER 5
+#define DEFAULT_PANEL_PREFILL_LINES 16
+#define DEFAULT_PANEL_VTOTAL (480 + DEFAULT_PANEL_PREFILL_LINES)
+#define TICKS_IN_NANO_SECOND 1000000000
+
+#define MAX_BUFFER_SIZE 256
+
+#define TRY_CMD_MODE_SWITCH 0xFFFF
+
+static struct sde_rsc_priv *rsc_prv_list[MAX_RSC_COUNT];
+
+/**
+ * sde_rsc_client_create() - create the client for sde rsc.
+ * Different displays like DSI, HDMI, DP, WB, etc should call this
+ * api to register their vote for rpmh. They still need to vote for
+ * power handle to get the clocks.
+
+ * @rsc_index: A client will be created on this RSC. As of now only
+ * SDE_RSC_INDEX is valid rsc index.
+ * @name: Caller needs to provide some valid string to identify
+ * the client. "primary", "dp", "hdmi" are suggested name.
+ * @is_primary: Caller needs to provide information if client is primary
+ * or not. Primary client votes will be redirected to
+ * display rsc.
+ *
+ * Return: client node pointer.
+ */
+struct sde_rsc_client *sde_rsc_client_create(u32 rsc_index, char *client_name,
+ bool is_primary_client)
+{
+ struct sde_rsc_client *client;
+ struct sde_rsc_priv *rsc;
+
+ if (!client_name) {
+ pr_err("client name is null- not supported\n");
+ return ERR_PTR(-EINVAL);
+ } else if (rsc_index >= MAX_RSC_COUNT) {
+ pr_err("invalid rsc index\n");
+ return ERR_PTR(-EINVAL);
+ } else if (!rsc_prv_list[rsc_index]) {
+ pr_err("rsc not probed yet or not available\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ rsc = rsc_prv_list[rsc_index];
+ client = kzalloc(sizeof(struct sde_rsc_client), GFP_KERNEL);
+ if (!client)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&rsc->client_lock);
+ strlcpy(client->name, client_name, MAX_RSC_CLIENT_NAME_LEN);
+ client->current_state = SDE_RSC_IDLE_STATE;
+ client->rsc_index = rsc_index;
+ if (is_primary_client)
+ rsc->primary_client = client;
+ pr_debug("client %s rsc index:%d primary:%d\n", client_name,
+ rsc_index, is_primary_client);
+
+ list_add(&client->list, &rsc->client_list);
+ mutex_unlock(&rsc->client_lock);
+
+ return client;
+}
+
+/**
+ * sde_rsc_client_destroy() - Destroy the sde rsc client.
+ *
+ * @client: Client pointer provided by sde_rsc_client_create().
+ *
+ * Return: none
+ */
+void sde_rsc_client_destroy(struct sde_rsc_client *client)
+{
+ struct sde_rsc_priv *rsc;
+
+ if (!client) {
+ pr_debug("invalid client\n");
+ goto end;
+ } else if (client->rsc_index >= MAX_RSC_COUNT) {
+ pr_err("invalid rsc index\n");
+ goto end;
+ }
+
+ pr_debug("client %s destroyed\n", client->name);
+ rsc = rsc_prv_list[client->rsc_index];
+ if (!rsc)
+ goto end;
+
+ mutex_lock(&rsc->client_lock);
+ if (client->current_state != SDE_RSC_IDLE_STATE)
+ sde_rsc_client_state_update(client, SDE_RSC_IDLE_STATE,
+ NULL, -1);
+ list_del_init(&client->list);
+ mutex_unlock(&rsc->client_lock);
+
+ kfree(client);
+end:
+ return;
+}
+
+static u32 sde_rsc_timer_calculate(struct sde_rsc_priv *rsc,
+ struct sde_rsc_cmd_config *cmd_config)
+{
+ const u32 cxo_period_ns = 52;
+ u64 rsc_backoff_time_ns = RSC_BACKOFF_TIME_NS;
+ u64 rsc_mode_threshold_time_ns = RSC_MODE_THRESHOLD_TIME_IN_NS;
+ u64 rsc_time_slot_0_ns = RSC_TIME_SLOT_0_NS;
+ u64 rsc_time_slot_1_ns;
+ const u64 pdc_jitter = 20; /* 20% more */
+
+ u64 frame_time_ns, frame_jitter;
+ u64 line_time_ns, prefill_time_ns;
+ u64 pdc_backoff_time_ns;
+ s64 total;
+ int ret = 0;
+
+ if (cmd_config)
+ memcpy(&rsc->cmd_config, cmd_config, sizeof(*cmd_config));
+
+ /* calculate for 640x480 60 fps resolution by default */
+ if (!rsc->cmd_config.fps)
+ rsc->cmd_config.fps = DEFAULT_PANEL_FPS;
+ if (!rsc->cmd_config.jitter)
+ rsc->cmd_config.jitter = DEFAULT_PANEL_JITTER;
+ if (!rsc->cmd_config.vtotal)
+ rsc->cmd_config.vtotal = DEFAULT_PANEL_VTOTAL;
+ if (!rsc->cmd_config.prefill_lines)
+ rsc->cmd_config.prefill_lines = DEFAULT_PANEL_PREFILL_LINES;
+ pr_debug("frame fps:%d jitter:%d vtotal:%d prefill lines:%d\n",
+ rsc->cmd_config.fps, rsc->cmd_config.jitter,
+ rsc->cmd_config.vtotal, rsc->cmd_config.prefill_lines);
+
+ /* 1 nano second */
+ frame_time_ns = TICKS_IN_NANO_SECOND;
+ frame_time_ns = div_u64(frame_time_ns, rsc->cmd_config.fps);
+
+ frame_jitter = frame_time_ns * rsc->cmd_config.jitter;
+ /* convert it to percentage */
+ frame_jitter = div_u64(frame_jitter, 100);
+
+ line_time_ns = frame_time_ns;
+ line_time_ns = div_u64(line_time_ns, rsc->cmd_config.vtotal);
+ prefill_time_ns = line_time_ns * rsc->cmd_config.prefill_lines;
+
+ total = frame_time_ns - frame_jitter - prefill_time_ns;
+ if (total < 0) {
+ pr_err("invalid total time period time:%llu jiter_time:%llu blanking time:%llu\n",
+ frame_time_ns, frame_jitter, prefill_time_ns);
+ total = 0;
+ }
+
+ total = div_u64(total, cxo_period_ns);
+ rsc->timer_config.static_wakeup_time_ns = total;
+
+ pr_debug("frame time:%llu frame jiter_time:%llu\n",
+ frame_time_ns, frame_jitter);
+ pr_debug("line time:%llu prefill time ps:%llu\n",
+ line_time_ns, prefill_time_ns);
+ pr_debug("static wakeup time:%lld cxo:%u\n", total, cxo_period_ns);
+
+ pdc_backoff_time_ns = rsc_backoff_time_ns;
+ rsc_backoff_time_ns = div_u64(rsc_backoff_time_ns, cxo_period_ns);
+ rsc->timer_config.rsc_backoff_time_ns = (u32) rsc_backoff_time_ns;
+
+ pdc_backoff_time_ns *= pdc_jitter;
+ pdc_backoff_time_ns = div_u64(pdc_backoff_time_ns, 100);
+ rsc->timer_config.pdc_backoff_time_ns = (u32) pdc_backoff_time_ns;
+
+ rsc_mode_threshold_time_ns =
+ div_u64(rsc_mode_threshold_time_ns, cxo_period_ns);
+ rsc->timer_config.rsc_mode_threshold_time_ns
+ = (u32) rsc_mode_threshold_time_ns;
+
+ /* time_slot_0 for mode0 latency */
+ rsc_time_slot_0_ns = div_u64(rsc_time_slot_0_ns, cxo_period_ns);
+ rsc->timer_config.rsc_time_slot_0_ns = (u32) rsc_time_slot_0_ns;
+
+ /* time_slot_1 for mode1 latency */
+ rsc_time_slot_1_ns = frame_time_ns;
+ rsc_time_slot_1_ns = div_u64(rsc_time_slot_1_ns, cxo_period_ns);
+ rsc->timer_config.rsc_time_slot_1_ns = (u32) rsc_time_slot_1_ns;
+
+ /* mode 2 is infinite */
+ rsc->timer_config.rsc_time_slot_2_ns = 0xFFFFFFFF;
+
+ if (rsc->hw_ops.init) {
+ ret = rsc->hw_ops.init(rsc);
+ if (ret)
+ pr_err("sde rsc: hw init failed ret:%d\n", ret);
+ }
+
+ return ret;
+}
+
+static int sde_rsc_switch_to_idle(struct sde_rsc_priv *rsc)
+{
+ struct sde_rsc_client *client;
+ int rc = 0;
+
+ list_for_each_entry(client, &rsc->client_list, list)
+ if (client->current_state != SDE_RSC_IDLE_STATE)
+ return TRY_CMD_MODE_SWITCH;
+
+ if (rsc->hw_ops.state_update)
+ rc = rsc->hw_ops.state_update(rsc, SDE_RSC_IDLE_STATE);
+
+ return rc;
+}
+
+static bool sde_rsc_switch_to_cmd(struct sde_rsc_priv *rsc,
+ struct sde_rsc_cmd_config *config,
+ struct sde_rsc_client *caller_client, bool wait_req)
+{
+ struct sde_rsc_client *client;
+ int rc = 0;
+
+ if (!rsc->primary_client) {
+ pr_err("primary client not available for cmd state switch\n");
+ rc = -EINVAL;
+ goto end;
+ } else if (caller_client != rsc->primary_client) {
+ pr_err("primary client state:%d not cmd state request\n",
+ rsc->primary_client->current_state);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ /* update timers - might not be available at next switch */
+ if (config)
+ sde_rsc_timer_calculate(rsc, config);
+
+ /* any one client in video state blocks the cmd state switch */
+ list_for_each_entry(client, &rsc->client_list, list)
+ if (client->current_state == SDE_RSC_VID_STATE)
+ goto end;
+
+ if (rsc->hw_ops.state_update)
+ rc = rsc->hw_ops.state_update(rsc, SDE_RSC_CMD_STATE);
+
+ /* wait for vsync */
+ if (!rc && wait_req)
+ drm_wait_one_vblank(rsc->master_drm,
+ rsc->primary_client->crtc_id);
+end:
+ return rc;
+}
+
+static bool sde_rsc_switch_to_vid(struct sde_rsc_priv *rsc,
+ struct sde_rsc_cmd_config *config,
+ struct sde_rsc_client *caller_client, bool wait_req)
+{
+ int rc = 0;
+
+ /* update timers - might not be available at next switch */
+ if (config && (caller_client == rsc->primary_client))
+ sde_rsc_timer_calculate(rsc, config);
+
+ /* video state switch should be done immediately */
+ if (rsc->hw_ops.state_update)
+ rc = rsc->hw_ops.state_update(rsc, SDE_RSC_VID_STATE);
+
+ /* wait for vsync */
+ if (!rc && rsc->primary_client && wait_req)
+ drm_wait_one_vblank(rsc->master_drm,
+ rsc->primary_client->crtc_id);
+ return rc;
+}
+
+/**
+ * sde_rsc_client_state_update() - rsc client state update
+ * Video mode and command mode are supported as modes. A client need to
+ * set this property during panel config time. A switching client can set the
+ * property to change the state
+ *
+ * @client: Client pointer provided by sde_rsc_client_create().
+ * @state: Client state - video/cmd
+ * @config: fps, vtotal, porches, etc configuration for command mode
+ * panel
+ * @crtc_id: current client's crtc id
+ *
+ * Return: error code.
+ */
+int sde_rsc_client_state_update(struct sde_rsc_client *caller_client,
+ enum sde_rsc_state state,
+ struct sde_rsc_cmd_config *config, int crtc_id)
+{
+ int rc = 0;
+ struct sde_rsc_priv *rsc;
+ bool wait_requested = false;
+
+ if (!caller_client) {
+ pr_err("invalid client for rsc state update\n");
+ return -EINVAL;
+ } else if (caller_client->rsc_index >= MAX_RSC_COUNT) {
+ pr_err("invalid rsc index\n");
+ return -EINVAL;
+ }
+
+ rsc = rsc_prv_list[caller_client->rsc_index];
+ if (!rsc)
+ return -EINVAL;
+
+ mutex_lock(&rsc->client_lock);
+ caller_client->crtc_id = crtc_id;
+ caller_client->current_state = state;
+
+ if (rsc->master_drm == NULL) {
+ pr_err("invalid master component binding\n");
+ rc = -EINVAL;
+ goto end;
+ } else if ((rsc->current_state == state) &&
+ (state != SDE_RSC_CMD_UPDATE_STATE)) {
+ pr_debug("no state change: %d\n", state);
+ goto end;
+ }
+
+ pr_debug("%pS: rsc state:%d request client:%s state:%d\n",
+ __builtin_return_address(0), rsc->current_state,
+ caller_client->name, state);
+
+ wait_requested = (rsc->current_state != SDE_RSC_IDLE_STATE);
+
+ if (rsc->power_collapse)
+ sde_power_resource_enable(&rsc->phandle, rsc->pclient, true);
+
+ switch (state) {
+ case SDE_RSC_IDLE_STATE:
+ rc = sde_rsc_switch_to_idle(rsc);
+ /* video state client might be exiting; try cmd state switch */
+ if (rc == TRY_CMD_MODE_SWITCH)
+ rc = sde_rsc_switch_to_cmd(rsc, NULL,
+ rsc->primary_client, wait_requested);
+ break;
+
+ case SDE_RSC_CMD_STATE:
+ case SDE_RSC_CMD_UPDATE_STATE:
+ rc = sde_rsc_switch_to_cmd(rsc, config, caller_client,
+ wait_requested);
+ break;
+
+ case SDE_RSC_VID_STATE:
+ rc = sde_rsc_switch_to_vid(rsc, config, caller_client,
+ wait_requested);
+ break;
+
+ default:
+ pr_err("invalid state handling %d\n", state);
+ break;
+ }
+
+ if (rc) {
+ pr_err("state update failed rc:%d\n", rc);
+ goto end;
+ }
+
+ pr_debug("state switch successfully complete: %d\n", state);
+ rsc->current_state = state;
+
+ if (rsc->power_collapse)
+ sde_power_resource_enable(&rsc->phandle, rsc->pclient, false);
+end:
+ mutex_unlock(&rsc->client_lock);
+ return rc;
+}
+
+/**
+ * sde_rsc_client_vote() - ab/ib vote from rsc client
+ *
+ * @client: Client pointer provided by sde_rsc_client_create().
+ * @ab: aggregated bandwidth vote from client.
+ * @ib: instant bandwidth vote from client.
+ *
+ * Return: error code.
+ */
+int sde_rsc_client_vote(struct sde_rsc_client *caller_client,
+ u64 ab_vote, u64 ib_vote)
+{
+ int rc = 0;
+ struct sde_rsc_priv *rsc;
+ bool amc_mode = false;
+ enum rpmh_state state;
+
+ if (!caller_client) {
+ pr_err("invalid client for ab/ib vote\n");
+ return -EINVAL;
+ } else if (caller_client->rsc_index >= MAX_RSC_COUNT) {
+ pr_err("invalid rsc index\n");
+ return -EINVAL;
+ }
+
+ rsc = rsc_prv_list[caller_client->rsc_index];
+ if (!rsc)
+ return -EINVAL;
+
+ if (caller_client != rsc->primary_client) {
+ pr_err("only primary client can use sde rsc:: curr client name:%s\n",
+ caller_client->name);
+ return -EINVAL;
+ }
+ pr_debug("client:%s ab:%llu ib:%llu\n",
+ caller_client->name, ab_vote, ib_vote);
+
+ mutex_lock(&rsc->client_lock);
+ if ((caller_client->current_state == SDE_RSC_IDLE_STATE) ||
+ (rsc->current_state == SDE_RSC_IDLE_STATE)) {
+
+ pr_err("invalid state: client state:%d rsc state:%d\n",
+ caller_client->current_state, rsc->current_state);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (rsc->hw_ops.is_amc_mode)
+ amc_mode = rsc->hw_ops.is_amc_mode(rsc);
+
+ if (rsc->current_state == SDE_RSC_CMD_STATE)
+ state = RPMH_WAKE_ONLY_STATE;
+ else if (amc_mode)
+ state = RPMH_ACTIVE_ONLY_STATE;
+ else
+ state = RPMH_AWAKE_STATE;
+
+ if (rsc->hw_ops.tcs_wait) {
+ rc = rsc->hw_ops.tcs_wait(rsc);
+ if (rc) {
+ pr_err("tcs is still busy; can't send command\n");
+ if (rsc->hw_ops.tcs_use_ok)
+ rsc->hw_ops.tcs_use_ok(rsc);
+ goto end;
+ }
+ }
+
+ sde_power_data_bus_set_quota(&rsc->phandle, rsc->pclient,
+ SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT, ab_vote, ib_vote);
+
+ if (rsc->hw_ops.tcs_use_ok)
+ rsc->hw_ops.tcs_use_ok(rsc);
+
+end:
+ mutex_unlock(&rsc->client_lock);
+ return rc;
+}
+
+static int _sde_debugfs_status_show(struct seq_file *s, void *data)
+{
+ struct sde_rsc_priv *rsc;
+ struct sde_rsc_client *client;
+ int ret;
+
+ if (!s || !s->private)
+ return -EINVAL;
+
+ rsc = s->private;
+
+ mutex_lock(&rsc->client_lock);
+ seq_printf(s, "rsc current state:%d\n", rsc->current_state);
+ seq_printf(s, "wraper backoff time(ns):%d\n",
+ rsc->timer_config.static_wakeup_time_ns);
+ seq_printf(s, "rsc backoff time(ns):%d\n",
+ rsc->timer_config.rsc_backoff_time_ns);
+ seq_printf(s, "pdc backoff time(ns):%d\n",
+ rsc->timer_config.pdc_backoff_time_ns);
+ seq_printf(s, "rsc mode threshold time(ns):%d\n",
+ rsc->timer_config.rsc_mode_threshold_time_ns);
+ seq_printf(s, "rsc time slot 0(ns):%d\n",
+ rsc->timer_config.rsc_time_slot_0_ns);
+ seq_printf(s, "rsc time slot 1(ns):%d\n",
+ rsc->timer_config.rsc_time_slot_1_ns);
+ seq_printf(s, "frame fps:%d jitter:%d vtotal:%d prefill lines:%d\n",
+ rsc->cmd_config.fps, rsc->cmd_config.jitter,
+ rsc->cmd_config.vtotal, rsc->cmd_config.prefill_lines);
+
+ seq_puts(s, "\n");
+
+ list_for_each_entry(client, &rsc->client_list, list)
+ seq_printf(s, "\t client:%s state:%d\n",
+ client->name, client->current_state);
+
+ sde_power_resource_enable(&rsc->phandle, rsc->pclient, true);
+
+ if (rsc->hw_ops.debug_show) {
+ ret = rsc->hw_ops.debug_show(s, rsc);
+ if (ret)
+ pr_err("sde rsc: hw debug failed ret:%d\n", ret);
+ }
+
+ sde_power_resource_enable(&rsc->phandle, rsc->pclient, false);
+ mutex_unlock(&rsc->client_lock);
+
+ return 0;
+}
+
+static int _sde_debugfs_status_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, _sde_debugfs_status_show, inode->i_private);
+}
+
+static int _sde_debugfs_mode_ctrl_open(struct inode *inode, struct file *file)
+{
+ /* non-seekable */
+ file->private_data = inode->i_private;
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t _sde_debugfs_mode_ctrl_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct sde_rsc_priv *rsc = file->private_data;
+ char buffer[MAX_BUFFER_SIZE];
+ int blen = 0;
+
+ if (*ppos || !rsc || !rsc->hw_ops.mode_ctrl)
+ return 0;
+
+ mutex_lock(&rsc->client_lock);
+ sde_power_resource_enable(&rsc->phandle, rsc->pclient, true);
+
+ blen = rsc->hw_ops.mode_ctrl(rsc, MODE_READ, buffer,
+ MAX_BUFFER_SIZE, 0);
+
+ sde_power_resource_enable(&rsc->phandle, rsc->pclient, false);
+ mutex_unlock(&rsc->client_lock);
+
+ if (blen < 0)
+ return 0;
+
+ if (copy_to_user(buf, buffer, blen))
+ return -EFAULT;
+
+ *ppos += blen;
+ return blen;
+}
+
+static ssize_t _sde_debugfs_mode_ctrl_write(struct file *file,
+ const char __user *p, size_t count, loff_t *ppos)
+{
+ struct sde_rsc_priv *rsc = file->private_data;
+ char *input, *mode;
+ u32 mode0_state = 0, mode1_state = 0, mode2_state = 0;
+
+ if (!rsc || !rsc->hw_ops.mode_ctrl)
+ return 0;
+
+ input = kmalloc(count, GFP_KERNEL);
+ if (!input)
+ return -ENOMEM;
+
+ if (copy_from_user(input, p, count)) {
+ kfree(input);
+ return -EFAULT;
+ }
+ input[count - 1] = '\0';
+
+ mutex_lock(&rsc->client_lock);
+ sde_power_resource_enable(&rsc->phandle, rsc->pclient, true);
+
+ mode = strnstr(input, "mode0=", strlen("mode0="));
+ if (mode) {
+ mode0_state = mode[0] - '0';
+ mode0_state &= BIT(0);
+ rsc->hw_ops.mode_ctrl(rsc, MODE0_UPDATE, NULL, 0, mode0_state);
+ goto end;
+ }
+
+ mode = strnstr(input, "mode1=", strlen("mode1="));
+ if (mode) {
+ mode1_state = mode[0] - '0';
+ mode1_state &= BIT(0);
+ rsc->hw_ops.mode_ctrl(rsc, MODE1_UPDATE, NULL, 0, mode1_state);
+ goto end;
+ }
+
+ mode = strnstr(input, "mode2=", strlen("mode2="));
+ if (mode) {
+ mode2_state = mode[0] - '0';
+ mode2_state &= BIT(0);
+ rsc->hw_ops.mode_ctrl(rsc, MODE2_UPDATE, NULL, 0, mode2_state);
+ }
+
+end:
+ sde_power_resource_enable(&rsc->phandle, rsc->pclient, false);
+ mutex_unlock(&rsc->client_lock);
+
+ pr_err("req: mode0:%d mode1:%d mode2:%d\n", mode0_state, mode1_state,
+ mode2_state);
+ kfree(input);
+ return count;
+}
+
+static int _sde_debugfs_vsync_mode_open(struct inode *inode, struct file *file)
+{
+ /* non-seekable */
+ file->private_data = inode->i_private;
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t _sde_debugfs_vsync_mode_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct sde_rsc_priv *rsc = file->private_data;
+ char buffer[MAX_BUFFER_SIZE];
+ int blen = 0;
+
+ if (*ppos || !rsc || !rsc->hw_ops.hw_vsync)
+ return 0;
+
+ mutex_lock(&rsc->client_lock);
+ sde_power_resource_enable(&rsc->phandle, rsc->pclient, true);
+
+ blen = rsc->hw_ops.hw_vsync(rsc, VSYNC_READ, buffer,
+ MAX_BUFFER_SIZE, 0);
+
+ sde_power_resource_enable(&rsc->phandle, rsc->pclient, false);
+ mutex_unlock(&rsc->client_lock);
+
+ if (blen < 0)
+ return 0;
+
+ if (copy_to_user(buf, buffer, blen))
+ return -EFAULT;
+
+ *ppos += blen;
+ return blen;
+}
+
+static ssize_t _sde_debugfs_vsync_mode_write(struct file *file,
+ const char __user *p, size_t count, loff_t *ppos)
+{
+ struct sde_rsc_priv *rsc = file->private_data;
+ char *input, *vsync_mode;
+ u32 vsync_state = 0;
+
+ if (!rsc || !rsc->hw_ops.hw_vsync)
+ return 0;
+
+ input = kmalloc(count, GFP_KERNEL);
+ if (!input)
+ return -ENOMEM;
+
+ if (copy_from_user(input, p, count)) {
+ kfree(input);
+ return -EFAULT;
+ }
+ input[count - 1] = '\0';
+
+ vsync_mode = strnstr(input, "vsync_mode=", strlen("vsync_mode="));
+ if (vsync_mode) {
+ vsync_state = vsync_mode[0] - '0';
+ vsync_state &= 0x7;
+ }
+
+ mutex_lock(&rsc->client_lock);
+ sde_power_resource_enable(&rsc->phandle, rsc->pclient, true);
+
+ if (vsync_state)
+ rsc->hw_ops.hw_vsync(rsc, VSYNC_ENABLE, NULL,
+ 0, vsync_state - 1);
+ else
+ rsc->hw_ops.hw_vsync(rsc, VSYNC_DISABLE, NULL, 0, 0);
+
+ sde_power_resource_enable(&rsc->phandle, rsc->pclient, false);
+ mutex_unlock(&rsc->client_lock);
+
+ kfree(input);
+ return count;
+}
+
+static const struct file_operations debugfs_status_fops = {
+ .open = _sde_debugfs_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations mode_control_fops = {
+ .open = _sde_debugfs_mode_ctrl_open,
+ .read = _sde_debugfs_mode_ctrl_read,
+ .write = _sde_debugfs_mode_ctrl_write,
+};
+
+static const struct file_operations vsync_status_fops = {
+ .open = _sde_debugfs_vsync_mode_open,
+ .read = _sde_debugfs_vsync_mode_read,
+ .write = _sde_debugfs_vsync_mode_write,
+};
+
+static void _sde_rsc_init_debugfs(struct sde_rsc_priv *rsc, char *name)
+{
+ rsc->debugfs_root = debugfs_create_dir(name, NULL);
+ if (!rsc->debugfs_root)
+ return;
+
+ /* don't error check these */
+ debugfs_create_file("status", 0444, rsc->debugfs_root, rsc,
+ &debugfs_status_fops);
+ debugfs_create_file("mode_control", 0644, rsc->debugfs_root, rsc,
+ &mode_control_fops);
+ debugfs_create_file("vsync_mode", 0644, rsc->debugfs_root, rsc,
+ &vsync_status_fops);
+ debugfs_create_x32("debug_mode", 0644, rsc->debugfs_root,
+ &rsc->debug_mode);
+}
+
+static void sde_rsc_deinit(struct platform_device *pdev,
+ struct sde_rsc_priv *rsc)
+{
+ if (!rsc)
+ return;
+
+ if (rsc->pclient)
+ sde_power_resource_enable(&rsc->phandle, rsc->pclient, false);
+ if (rsc->fs)
+ devm_regulator_put(rsc->fs);
+ if (rsc->wrapper_io.base)
+ msm_dss_iounmap(&rsc->wrapper_io);
+ if (rsc->drv_io.base)
+ msm_dss_iounmap(&rsc->drv_io);
+ if (rsc->pclient)
+ sde_power_client_destroy(&rsc->phandle, rsc->pclient);
+
+ sde_power_resource_deinit(pdev, &rsc->phandle);
+ debugfs_remove_recursive(rsc->debugfs_root);
+ kfree(rsc);
+}
+
+/**
+ * sde_rsc_bind - bind rsc device with controlling device
+ * @dev: Pointer to base of platform device
+ * @master: Pointer to container of drm device
+ * @data: Pointer to private data
+ * Returns: Zero on success
+ */
+static int sde_rsc_bind(struct device *dev,
+ struct device *master,
+ void *data)
+{
+ struct sde_rsc_priv *rsc;
+ struct drm_device *drm;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ if (!dev || !pdev || !master) {
+ pr_err("invalid param(s), dev %pK, pdev %pK, master %pK\n",
+ dev, pdev, master);
+ return -EINVAL;
+ }
+
+ drm = dev_get_drvdata(master);
+ rsc = platform_get_drvdata(pdev);
+ if (!drm || !rsc) {
+ pr_err("invalid param(s), drm %pK, rsc %pK\n",
+ drm, rsc);
+ return -EINVAL;
+ }
+
+ mutex_lock(&rsc->client_lock);
+ rsc->master_drm = drm;
+ mutex_unlock(&rsc->client_lock);
+
+ return 0;
+}
+
+/**
+ * sde_rsc_unbind - unbind rsc from controlling device
+ * @dev: Pointer to base of platform device
+ * @master: Pointer to container of drm device
+ * @data: Pointer to private data
+ */
+static void sde_rsc_unbind(struct device *dev,
+ struct device *master, void *data)
+{
+ struct sde_rsc_priv *rsc;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ if (!dev || !pdev) {
+ pr_err("invalid param(s)\n");
+ return;
+ }
+
+ rsc = platform_get_drvdata(pdev);
+ if (!rsc) {
+ pr_err("invalid display rsc\n");
+ return;
+ }
+
+ mutex_lock(&rsc->client_lock);
+ rsc->master_drm = NULL;
+ mutex_unlock(&rsc->client_lock);
+}
+
+static const struct component_ops sde_rsc_comp_ops = {
+ .bind = sde_rsc_bind,
+ .unbind = sde_rsc_unbind,
+};
+
+static int sde_rsc_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct sde_rsc_priv *rsc;
+ static int counter;
+ char name[MAX_RSC_CLIENT_NAME_LEN];
+
+ rsc = kzalloc(sizeof(*rsc), GFP_KERNEL);
+ if (!rsc) {
+ ret = -ENOMEM;
+ goto rsc_alloc_fail;
+ }
+
+ platform_set_drvdata(pdev, rsc);
+ of_property_read_u32(pdev->dev.of_node, "qcom,sde-rsc-version",
+ &rsc->version);
+
+ ret = sde_power_resource_init(pdev, &rsc->phandle);
+ if (ret) {
+ pr_err("sde rsc:power resource init failed ret:%d\n", ret);
+ goto sde_rsc_fail;
+ }
+
+ rsc->pclient = sde_power_client_create(&rsc->phandle, "rsc");
+ if (IS_ERR_OR_NULL(rsc->pclient)) {
+ ret = PTR_ERR(rsc->pclient);
+ rsc->pclient = NULL;
+ pr_err("sde rsc:power client create failed ret:%d\n", ret);
+ goto sde_rsc_fail;
+ }
+
+ ret = msm_dss_ioremap_byname(pdev, &rsc->wrapper_io, "wrapper");
+ if (ret) {
+ pr_err("sde rsc: wrapper io data mapping failed ret=%d\n", ret);
+ goto sde_rsc_fail;
+ }
+
+ ret = msm_dss_ioremap_byname(pdev, &rsc->drv_io, "drv");
+ if (ret) {
+ pr_err("sde rsc: drv io data mapping failed ret:%d\n", ret);
+ goto sde_rsc_fail;
+ }
+
+ rsc->fs = devm_regulator_get(&pdev->dev, "vdd");
+ if (IS_ERR_OR_NULL(rsc->fs)) {
+ rsc->fs = NULL;
+ pr_err("unable to get regulator\n");
+ goto sde_rsc_fail;
+ }
+
+ ret = sde_rsc_hw_register(rsc);
+ if (ret) {
+ pr_err("sde rsc: hw register failed ret:%d\n", ret);
+ goto sde_rsc_fail;
+ }
+
+ /* these clocks are always on */
+ if (sde_power_resource_enable(&rsc->phandle, rsc->pclient, true)) {
+ pr_err("failed to enable sde rsc power resources\n");
+ goto sde_rsc_fail;
+ }
+
+ if (sde_rsc_timer_calculate(rsc, NULL))
+ goto sde_rsc_fail;
+
+ INIT_LIST_HEAD(&rsc->client_list);
+ mutex_init(&rsc->client_lock);
+
+ pr_info("sde rsc index:%d probed successfully\n",
+ SDE_RSC_INDEX + counter);
+
+ rsc_prv_list[SDE_RSC_INDEX + counter] = rsc;
+ snprintf(name, MAX_RSC_CLIENT_NAME_LEN, "%s%d", "sde_rsc", counter);
+ _sde_rsc_init_debugfs(rsc, name);
+ counter++;
+
+ ret = component_add(&pdev->dev, &sde_rsc_comp_ops);
+ if (ret)
+ pr_debug("component add failed, ret=%d\n", ret);
+ ret = 0;
+
+ return ret;
+
+sde_rsc_fail:
+ sde_rsc_deinit(pdev, rsc);
+rsc_alloc_fail:
+ return ret;
+}
+
+static int sde_rsc_remove(struct platform_device *pdev)
+{
+ struct sde_rsc_priv *rsc = platform_get_drvdata(pdev);
+
+ sde_rsc_deinit(pdev, rsc);
+ return 0;
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,sde-rsc"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, dt_match);
+
+static struct platform_driver sde_rsc_platform_driver = {
+ .probe = sde_rsc_probe,
+ .remove = sde_rsc_remove,
+ .driver = {
+ .name = "sde_rsc",
+ .of_match_table = dt_match,
+ },
+};
+
+static int __init sde_rsc_register(void)
+{
+ return platform_driver_register(&sde_rsc_platform_driver);
+}
+
+static void __exit sde_rsc_unregister(void)
+{
+ platform_driver_unregister(&sde_rsc_platform_driver);
+}
+
+module_init(sde_rsc_register);
+module_exit(sde_rsc_unregister);
diff --git a/drivers/gpu/drm/msm/sde_rsc.h b/drivers/gpu/drm/msm/sde_rsc.h
new file mode 100644
index 0000000..e9a55b6
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde_rsc.h
@@ -0,0 +1,302 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SDE_RSC_H_
+#define _SDE_RSC_H_
+
+#include <linux/kernel.h>
+#include <linux/sde_io_util.h>
+
+#include <soc/qcom/tcs.h>
+#include "sde_power_handle.h"
+
+#define SDE_RSC_COMPATIBLE "disp_rscc"
+
+#define MAX_RSC_CLIENT_NAME_LEN 128
+
+/* primary display rsc index */
+#define SDE_RSC_INDEX 0
+
+/* rsc index max count */
+#define MAX_RSC_COUNT 5
+
+struct sde_rsc_priv;
+
+/**
+ * rsc_mode_req: sde rsc mode request information
+ * MODE_READ: read vsync status
+ * MODE0_UPDATE: mode0 status , this should be 0x0
+ * MODE1_UPDATE: mode1 status , this should be 0x1
+ * MODE2_UPDATE: mode2 status , this should be 0x2
+ */
+enum rsc_mode_req {
+ MODE_READ,
+ MODE0_UPDATE = 0x1,
+ MODE1_UPDATE = 0x2,
+ MODE2_UPDATE = 0x3,
+};
+
+/**
+ * rsc_vsync_req: sde rsc vsync request information
+ * VSYNC_READ: read vsync status
+ * VSYNC_ENABLE: enable rsc wrapper vsync status
+ * VSYNC_DISABLE: disable rsc wrapper vsync status
+ */
+enum rsc_vsync_req {
+ VSYNC_READ,
+ VSYNC_ENABLE,
+ VSYNC_DISABLE,
+};
+
+/**
+ * sde_rsc_state: sde rsc state information
+ * SDE_RSC_MODE_IDLE: A client requests for idle state when there is no
+ * pixel or cmd transfer expected. An idle vote from
+ * all clients lead to power collapse state.
+ * SDE_RSC_MODE_CMD: A client requests for cmd state when it wants to
+ * enable the solver mode.
+ * SDE_RSC_MODE_CMD_UPDATE: A clients requests for cmd_update state when
+ * it wants to update the backoff time during solver
+ * enable state. Inline-rotation is one good example
+ * use case. It increases the prefill lines by 128 lines.
+ * SDE_RSC_MODE_VID: A client requests for vid state it wants to avoid
+ * solver enable because client is fetching data from
+ * continuously.
+ */
+enum sde_rsc_state {
+ SDE_RSC_IDLE_STATE,
+ SDE_RSC_CMD_STATE,
+ SDE_RSC_CMD_UPDATE_STATE,
+ SDE_RSC_VID_STATE,
+};
+
+/**
+ * struct sde_rsc_client: stores the rsc client for sde driver
+ * @name: name of the client
+ * @current_state: current client state
+ * @crtc_id: crtc_id associated with this rsc client.
+ * @rsc_index: rsc index of a client - only index "0" valid.
+ * @list: list to attach power handle master list
+ */
+struct sde_rsc_client {
+ char name[MAX_RSC_CLIENT_NAME_LEN];
+ short current_state;
+ int crtc_id;
+ u32 rsc_index;
+ struct list_head list;
+};
+
+/**
+ * struct sde_rsc_hw_ops - sde resource state coordinator hardware ops
+ * @init: Initialize the sequencer, solver, qtimer,
+ etc. hardware blocks on RSC.
+ * @tcs_wait: Waits for TCS block OK to allow sending a
+ * TCS command.
+ * @hw_vsync: Enables the vsync on RSC block.
+ * @tcs_use_ok: set TCS set to high to allow RSC to use it.
+ * @mode2_entry: Request to entry mode2 when all clients are
+ * requesting power collapse.
+ * @mode2_exit: Request to exit mode2 when one of the client
+ * is requesting against the power collapse
+ * @is_amc_mode: Check current amc mode status
+ * @state_update: Enable/override the solver based on rsc state
+ * status (command/video)
+ * @mode_show: shows current mode status, mode0/1/2
+ * @debug_show: Show current debug status.
+ */
+
+struct sde_rsc_hw_ops {
+ int (*init)(struct sde_rsc_priv *rsc);
+ int (*tcs_wait)(struct sde_rsc_priv *rsc);
+ int (*hw_vsync)(struct sde_rsc_priv *rsc, enum rsc_vsync_req request,
+ char *buffer, int buffer_size, u32 mode);
+ int (*tcs_use_ok)(struct sde_rsc_priv *rsc);
+ int (*mode2_entry)(struct sde_rsc_priv *rsc);
+ int (*mode2_exit)(struct sde_rsc_priv *rsc);
+ bool (*is_amc_mode)(struct sde_rsc_priv *rsc);
+ int (*state_update)(struct sde_rsc_priv *rsc, enum sde_rsc_state state);
+ int (*debug_show)(struct seq_file *s, struct sde_rsc_priv *rsc);
+ int (*mode_ctrl)(struct sde_rsc_priv *rsc, enum rsc_mode_req request,
+ char *buffer, int buffer_size, bool mode);
+};
+
+/**
+ * struct sde_rsc_cmd_config: provides panel configuration to rsc
+ * when client is command mode. It is not required to set it during
+ * video mode.
+ *
+ * @fps: panel te interval
+ * @vtotal: current vertical total (height + vbp + vfp)
+ * @jitter: panel can set the jitter to wake up rsc/solver early
+ * This value causes mdp core to exit certain mode
+ * early. Default is 10% jitter
+ * @prefill_lines: max prefill lines based on panel
+ */
+struct sde_rsc_cmd_config {
+ u32 fps;
+ u32 vtotal;
+ u32 jitter;
+ u32 prefill_lines;
+};
+
+/**
+ * struct sde_rsc_timer_config: this is internal configuration between
+ * rsc and rsc_hw API.
+ *
+ * @static_wakeup_time_ns: wrapper backoff time in nano seconds
+ * @rsc_backoff_time_ns: rsc backoff time in nano seconds
+ * @pdc_backoff_time_ns: pdc backoff time in nano seconds
+ * @rsc_mode_threshold_time_ns: rsc mode threshold time in nano seconds
+ * @rsc_time_slot_0_ns: mode-0 time slot threshold in nano seconds
+ * @rsc_time_slot_1_ns: mode-1 time slot threshold in nano seconds
+ * @rsc_time_slot_2_ns: mode-2 time slot threshold in nano seconds
+ */
+struct sde_rsc_timer_config {
+ u32 static_wakeup_time_ns;
+
+ u32 rsc_backoff_time_ns;
+ u32 pdc_backoff_time_ns;
+ u32 rsc_mode_threshold_time_ns;
+ u32 rsc_time_slot_0_ns;
+ u32 rsc_time_slot_1_ns;
+ u32 rsc_time_slot_2_ns;
+};
+
+/**
+ * struct sde_rsc_priv: sde resource state coordinator(rsc) private handle
+ * @version: rsc sequence version
+ * @phandle: module power handle for clocks
+ * @pclient: module power client of phandle
+ * @fs: "MDSS GDSC" handle
+ *
+ * @drv_io: sde drv io data mapping
+ * @wrapper_io: wrapper io data mapping
+ *
+ * @client_list: current rsc client list handle
+ * @client_lock: current rsc client synchronization lock
+ *
+ * timer_config: current rsc timer configuration
+ * cmd_config: current panel config
+ * current_state: current rsc state (video/command), solver
+ * override/enabled.
+ * debug_mode: enables the logging for each register read/write
+ * debugfs_root: debugfs file system root node
+ *
+ * hw_ops: sde rsc hardware operations
+ * power_collapse: if all clients are in IDLE state then it enters in
+ * mode2 state and enable the power collapse state
+ * power_collapse_block:By default, rsc move to mode-2 if all clients are in
+ * invalid state. It can be blocked by this boolean entry.
+ * primary_client: A client which is allowed to make command state request
+ * and ab/ib vote on display rsc
+ * master_drm: Primary client waits for vsync on this drm object based
+ * on crtc id
+ */
+struct sde_rsc_priv {
+ u32 version;
+ struct sde_power_handle phandle;
+ struct sde_power_client *pclient;
+ struct regulator *fs;
+
+ struct dss_io_data drv_io;
+ struct dss_io_data wrapper_io;
+
+ struct list_head client_list;
+ struct mutex client_lock;
+
+ struct sde_rsc_timer_config timer_config;
+ struct sde_rsc_cmd_config cmd_config;
+ u32 current_state;
+
+ u32 debug_mode;
+ struct dentry *debugfs_root;
+
+ struct sde_rsc_hw_ops hw_ops;
+ bool power_collapse;
+ bool power_collapse_block;
+ struct sde_rsc_client *primary_client;
+
+ struct drm_device *master_drm;
+};
+
+/**
+ * sde_rsc_client_create() - create the client for sde rsc.
+ * Different displays like DSI, HDMI, DP, WB, etc should call this
+ * api to register their vote for rpmh. They still need to vote for
+ * power handle to get the clocks.
+
+ * @rsc_index: A client will be created on this RSC. As of now only
+ * SDE_RSC_INDEX is valid rsc index.
+ * @name: Caller needs to provide some valid string to identify
+ * the client. "primary", "dp", "hdmi" are suggested name.
+ * @is_primary: Caller needs to provide information if client is primary
+ * or not. Primary client votes will be redirected to
+ * display rsc.
+ * @config: fps, vtotal, porches, etc configuration for command mode
+ * panel
+ *
+ * Return: client node pointer.
+ */
+struct sde_rsc_client *sde_rsc_client_create(u32 rsc_index, char *name,
+ bool is_primary_display);
+
+/**
+ * sde_rsc_client_destroy() - Destroy the sde rsc client.
+ *
+ * @client: Client pointer provided by sde_rsc_client_create().
+ *
+ * Return: none
+ */
+void sde_rsc_client_destroy(struct sde_rsc_client *client);
+
+/**
+ * sde_rsc_client_state_update() - rsc client state update
+ * Video mode and command mode are supported as modes. A client need to
+ * set this property during panel time. A switching client can set the
+ * property to change the state
+ *
+ * @client: Client pointer provided by sde_rsc_client_create().
+ * @state: Client state - video/cmd
+ * @config: fps, vtotal, porches, etc configuration for command mode
+ * panel
+ * @crtc_id: current client's crtc id
+ *
+ * Return: error code.
+ */
+int sde_rsc_client_state_update(struct sde_rsc_client *client,
+ enum sde_rsc_state state,
+ struct sde_rsc_cmd_config *config, int crtc_id);
+
+/**
+ * sde_rsc_client_vote() - ab/ib vote from rsc client
+ *
+ * @client: Client pointer provided by sde_rsc_client_create().
+ * @ab: aggregated bandwidth vote from client.
+ * @ib: instant bandwidth vote from client.
+ *
+ * Return: error code.
+ */
+int sde_rsc_client_vote(struct sde_rsc_client *caller_client,
+ u64 ab_vote, u64 ib_vote);
+
+/**
+ * sde_rsc_hw_register() - register hardware API
+ *
+ * @client: Client pointer provided by sde_rsc_client_create().
+ *
+ * Return: error code.
+ */
+int sde_rsc_hw_register(struct sde_rsc_priv *rsc);
+
+
+#endif /* _SDE_RSC_H_ */
diff --git a/drivers/gpu/drm/msm/sde_rsc_hw.c b/drivers/gpu/drm/msm/sde_rsc_hw.c
new file mode 100644
index 0000000..8dd04bd
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde_rsc_hw.c
@@ -0,0 +1,681 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "[sde_rsc_hw:%s:%d]: " fmt, __func__, __LINE__
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+
+#include "sde_rsc.h"
+
+/* display rsc offset */
+#define SDE_RSCC_PDC_SEQ_START_ADDR_REG_OFFSET_DRV0 0x020
+#define SDE_RSCC_PDC_MATCH_VALUE_LO_REG_OFFSET_DRV0 0x024
+#define SDE_RSCC_PDC_MATCH_VALUE_HI_REG_OFFSET_DRV0 0x028
+#define SDE_RSCC_PDC_SLAVE_ID_DRV0 0x02c
+#define SDE_RSCC_SEQ_CFG_BR_ADDR_0_DRV0 0x410
+#define SDE_RSCC_SEQ_CFG_BR_ADDR_1_DRV0 0x414
+#define SDE_RSCC_SEQ_MEM_0_DRV0 0x600
+#define SDE_RSCC_SOLVER_OVERRIDE_CTRL_DRV0 0xc14
+#define SDE_RSCC_ERROR_IRQ_STATUS_DRV0 0x0d0
+#define SDE_RSCC_SEQ_BUSY_DRV0 0x404
+#define SDE_RSCC_SOLVER_STATUS0_DRV0 0xc24
+#define SDE_RSCC_SOLVER_STATUS1_DRV0 0xc28
+#define SDE_RSCC_SOLVER_STATUS2_DRV0 0xc2c
+#define SDE_RSCC_AMC_TCS_MODE_IRQ_STATUS_DRV0 0x1c00
+
+#define SDE_RSCC_SOFT_WAKEUP_TIME_LO_DRV0 0xc04
+#define SDE_RSCC_MAX_IDLE_DURATION_DRV0 0xc0c
+#define SDE_RSC_SOLVER_TIME_SLOT_TABLE_0_DRV0 0x1000
+#define SDE_RSC_SOLVER_TIME_SLOT_TABLE_1_DRV0 0x1004
+#define SDE_RSC_SOLVER_TIME_SLOT_TABLE_2_DRV0 0x1008
+#define SDE_RSC_SOLVER_TIME_SLOT_TABLE_3_DRV0 0x100c
+
+#define SDE_RSC_SOLVER_SOLVER_MODES_ENABLED_DRV0 0xc20
+#define SDE_RSC_SOLVER_MODE_PRI_TABLE_SLOT0_PRI0_DRV0 0x1080
+#define SDE_RSC_SOLVER_MODE_PRI_TABLE_SLOT1_PRI0_DRV0 0x1100
+#define SDE_RSC_SOLVER_MODE_PRI_TABLE_SLOT1_PRI3_DRV0 0x110c
+#define SDE_RSC_SOLVER_MODE_PRI_TABLE_SLOT2_PRI0_DRV0 0x1180
+#define SDE_RSC_SOLVER_MODE_PRI_TABLE_SLOT2_PRI3_DRV0 0x118c
+
+#define SDE_RSC_SOLVER_OVERRIDE_MODE_DRV0 0xc18
+#define SDE_RSC_SOLVER_OVERRIDE_CTRL_DRV0 0xc14
+#define SDE_RSC_TIMERS_CONSIDERED_DRV0 0xc00
+#define SDE_RSC_SOLVER_OVERRIDE_IDLE_TIME_DRV0 0xc1c
+
+#define SDE_RSC_SOLVER_MODE_PARM0_DRV0_MODE0 0xc30
+#define SDE_RSC_SOLVER_MODE_PARM1_DRV0_MODE0 0xc34
+#define SDE_RSC_SOLVER_MODE_PARM2_DRV0_MODE0 0xc38
+#define SDE_RSC_SOLVER_MODE_PARM3_DRV0_MODE0 0xc40
+
+#define SDE_RSC_SOLVER_MODE_PARM0_DRV0_MODE1 0xc4c
+#define SDE_RSC_SOLVER_MODE_PARM1_DRV0_MODE1 0xc50
+#define SDE_RSC_SOLVER_MODE_PARM2_DRV0_MODE1 0xc54
+#define SDE_RSC_SOLVER_MODE_PARM3_DRV0_MODE1 0xc5c
+
+#define SDE_RSC_SOLVER_MODE_PARM0_DRV0_MODE2 0xc68
+#define SDE_RSC_SOLVER_MODE_PARM1_DRV0_MODE2 0xc6c
+#define SDE_RSC_SOLVER_MODE_PARM2_DRV0_MODE2 0xc70
+#define SDE_RSC_SOLVER_MODE_PARM3_DRV0_MODE2 0xc78
+
+#define SDE_RSCC_TCS_DRV0_CONTROL 0x1c14
+
+#define SDE_RSCC_WRAPPER_CTRL 0x000
+#define SDE_RSCC_WRAPPER_OVERRIDE_CTRL 0x004
+#define SDE_RSCC_WRAPPER_STATIC_WAKEUP_0 0x008
+#define SDE_RSCC_WRAPPER_RSCC_MODE_THRESHOLD 0x00c
+#define SDE_RSCC_WRAPPER_DEBUG_BUS 0x010
+#define SDE_RSCC_WRAPPER_VSYNC_TIMESTAMP0 0x018
+#define SDE_RSCC_WRAPPER_VSYNC_TIMESTAMP1 0x01c
+#define SDE_RSCC_SPARE_PWR_EVENT 0x020
+#define SDE_RSCC_PWR_CTRL 0x024
+
+/* qtimer offset */
+#define SDE_RSCC_QTMR_AC_HW_FRAME_SEL_1 0x1FE0
+#define SDE_RSCC_QTMR_AC_HW_FRAME_SEL_2 0x1FF0
+#define SDE_RSCC_QTMR_AC_CNTACR0_FG0 0x1040
+#define SDE_RSCC_QTMR_AC_CNTACR1_FG0 0x1044
+#define SDE_RSCC_F0_QTMR_V1_CNTP_CVAL_LO 0x2020
+#define SDE_RSCC_F0_QTMR_V1_CNTP_CVAL_HI 0x2024
+#define SDE_RSCC_F1_QTMR_V1_CNTP_CVAL_LO 0x3020
+#define SDE_RSCC_F1_QTMR_V1_CNTP_CVAL_HI 0x3024
+#define SDE_RSCC_F0_QTMR_V1_CNTP_CTL 0x202C
+#define SDE_RSCC_F1_QTMR_V1_CNTP_CTL 0x302C
+
+/* mdp and dsi clocks in clock gate state */
+#define DISP_MDP_DSI_CLK_GATE 0x7f0
+
+/* mdp and dsi clocks in clock ungate state */
+#define MDSS_CORE_GDSCR 0x0
+#define DISP_MDP_DSI_CLK_UNGATE 0x5000
+
+#define MAX_CHECK_LOOPS 500
+
+static int rsc_hw_qtimer_init(struct sde_rsc_priv *rsc)
+{
+ pr_debug("rsc hardware qtimer init\n");
+
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_QTMR_AC_HW_FRAME_SEL_1,
+ 0xffffffff, rsc->debug_mode);
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_QTMR_AC_HW_FRAME_SEL_2,
+ 0xffffffff, rsc->debug_mode);
+
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_QTMR_AC_CNTACR0_FG0,
+ 0x1, rsc->debug_mode);
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_QTMR_AC_CNTACR1_FG0,
+ 0x1, rsc->debug_mode);
+
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F0_QTMR_V1_CNTP_CVAL_LO,
+ 0xffffffff, rsc->debug_mode);
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F0_QTMR_V1_CNTP_CVAL_HI,
+ 0xffffffff, rsc->debug_mode);
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F1_QTMR_V1_CNTP_CVAL_LO,
+ 0xffffffff, rsc->debug_mode);
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F1_QTMR_V1_CNTP_CVAL_HI,
+ 0xffffffff, rsc->debug_mode);
+
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F0_QTMR_V1_CNTP_CTL,
+ 0x1, rsc->debug_mode);
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F1_QTMR_V1_CNTP_CTL,
+ 0x1, rsc->debug_mode);
+
+ return 0;
+}
+
+static int rsc_hw_pdc_init(struct sde_rsc_priv *rsc)
+{
+ pr_debug("rsc hardware pdc init\n");
+
+ dss_reg_w(&rsc->drv_io, SDE_RSCC_PDC_SEQ_START_ADDR_REG_OFFSET_DRV0,
+ 0x4520, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSCC_PDC_MATCH_VALUE_LO_REG_OFFSET_DRV0,
+ 0x4510, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSCC_PDC_MATCH_VALUE_HI_REG_OFFSET_DRV0,
+ 0x4514, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSCC_PDC_SLAVE_ID_DRV0,
+ 0x1, rsc->debug_mode);
+
+ return 0;
+}
+
+static int rsc_hw_wrapper_init(struct sde_rsc_priv *rsc)
+{
+ pr_debug("rsc hardware wrapper init\n");
+
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_STATIC_WAKEUP_0,
+ rsc->timer_config.static_wakeup_time_ns, rsc->debug_mode);
+
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_RSCC_MODE_THRESHOLD,
+ rsc->timer_config.rsc_mode_threshold_time_ns, rsc->debug_mode);
+
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
+ BIT(8), rsc->debug_mode);
+ return 0;
+}
+
+static int rsc_hw_seq_memory_init(struct sde_rsc_priv *rsc)
+{
+ pr_debug("rsc sequencer memory init\n");
+
+ /* Mode - 0 sequence */
+ dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x0,
+ 0xe0a88bab, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x4,
+ 0x8babec39, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x8,
+ 0x8bab2088, rsc->debug_mode);
+
+ /* Mode - 1 sequence */
+ dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0xc,
+ 0x39e038a8, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x10,
+ 0x888babec, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x14,
+ 0xaaa8a020, rsc->debug_mode);
+
+ /* Mode - 2 sequence */
+ dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x18,
+ 0xe1a138eb, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x1c,
+ 0xa2ede081, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x20,
+ 0x8a3982e2, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x24,
+ 0xa92088ea, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x28,
+ 0x89e6a6e9, rsc->debug_mode);
+
+ /* tcs sleep sequence */
+ dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x2c,
+ 0xa7e9a920, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x30,
+ 0x002089e7, rsc->debug_mode);
+
+ /* branch address */
+ dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_CFG_BR_ADDR_0_DRV0,
+ 0x27, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_CFG_BR_ADDR_1_DRV0,
+ 0x2d, rsc->debug_mode);
+
+ return 0;
+}
+
+static int rsc_hw_solver_init(struct sde_rsc_priv *rsc)
+{
+ const u32 mode_0_start_addr = 0x0;
+ const u32 mode_1_start_addr = 0xa;
+ const u32 mode_2_start_addr = 0x15;
+
+ pr_debug("rsc solver init\n");
+
+ dss_reg_w(&rsc->drv_io, SDE_RSCC_SOFT_WAKEUP_TIME_LO_DRV0,
+ 0x7FFFFFFF, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSCC_MAX_IDLE_DURATION_DRV0,
+ 0xEFFFFFFF, rsc->debug_mode);
+
+ dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_TIME_SLOT_TABLE_0_DRV0,
+ 0x0, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_TIME_SLOT_TABLE_1_DRV0,
+ rsc->timer_config.rsc_time_slot_0_ns, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_TIME_SLOT_TABLE_2_DRV0,
+ rsc->timer_config.rsc_time_slot_1_ns, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_TIME_SLOT_TABLE_3_DRV0,
+ rsc->timer_config.rsc_time_slot_2_ns, rsc->debug_mode);
+
+ dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_SOLVER_MODES_ENABLED_DRV0,
+ 0x7, rsc->debug_mode);
+
+ dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PRI_TABLE_SLOT0_PRI0_DRV0,
+ 0x0, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PRI_TABLE_SLOT1_PRI0_DRV0,
+ 0x1, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PRI_TABLE_SLOT1_PRI3_DRV0,
+ 0x1, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PRI_TABLE_SLOT2_PRI0_DRV0,
+ 0x2, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PRI_TABLE_SLOT2_PRI3_DRV0,
+ 0x2, rsc->debug_mode);
+
+ dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_OVERRIDE_MODE_DRV0,
+ 0x0, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_OVERRIDE_CTRL_DRV0,
+ mode_0_start_addr, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSC_TIMERS_CONSIDERED_DRV0,
+ 0x1, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_OVERRIDE_IDLE_TIME_DRV0,
+ 0x01000010, rsc->debug_mode);
+
+ dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM0_DRV0_MODE0,
+ mode_0_start_addr, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM1_DRV0_MODE0,
+ 0x80000010, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM2_DRV0_MODE0,
+ rsc->timer_config.rsc_backoff_time_ns, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM3_DRV0_MODE0,
+ rsc->timer_config.pdc_backoff_time_ns, rsc->debug_mode);
+
+ dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM0_DRV0_MODE1,
+ mode_1_start_addr, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM1_DRV0_MODE1,
+ 0x80000010, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM2_DRV0_MODE1,
+ rsc->timer_config.rsc_backoff_time_ns, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM3_DRV0_MODE1,
+ rsc->timer_config.pdc_backoff_time_ns, rsc->debug_mode);
+
+ dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM0_DRV0_MODE2,
+ mode_2_start_addr, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM1_DRV0_MODE2,
+ 0x80000010, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM2_DRV0_MODE2,
+ rsc->timer_config.rsc_backoff_time_ns, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_MODE_PARM3_DRV0_MODE2,
+ rsc->timer_config.pdc_backoff_time_ns, rsc->debug_mode);
+
+ return 0;
+}
+
+int sde_rsc_mode2_entry(struct sde_rsc_priv *rsc)
+{
+ int rc;
+ int count, wrapper_status;
+
+ if (rsc->power_collapse_block)
+ return -EINVAL;
+
+ rc = regulator_set_mode(rsc->fs, REGULATOR_MODE_FAST);
+ if (rc) {
+ pr_err("vdd reg fast mode set failed rc:%d\n", rc);
+ goto end;
+ }
+
+ rc = -EBUSY;
+ wrapper_status = dss_reg_r(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
+ rsc->debug_mode);
+ wrapper_status |= BIT(3);
+ wrapper_status |= BIT(0);
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
+ wrapper_status, rsc->debug_mode);
+ /* make sure that mode-2 is triggered before wait*/
+ wmb();
+
+ /* check for sequence running status before exiting */
+ for (count = MAX_CHECK_LOOPS; count > 0; count--) {
+ if (!regulator_is_enabled(rsc->fs)) {
+ rc = 0;
+ break;
+ }
+ usleep_range(1, 2);
+ }
+
+ if (rc)
+ pr_err("vdd fs is still enabled\n");
+
+end:
+ return rc;
+}
+
+int sde_rsc_mode2_exit(struct sde_rsc_priv *rsc)
+{
+ int rc = -EBUSY;
+ int count, reg;
+
+ // needs review with HPG sequence
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F1_QTMR_V1_CNTP_CVAL_LO,
+ 0x0, rsc->debug_mode);
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F1_QTMR_V1_CNTP_CVAL_HI,
+ 0x0, rsc->debug_mode);
+
+ reg = dss_reg_r(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
+ rsc->debug_mode);
+ reg &= ~BIT(3);
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
+ reg, rsc->debug_mode);
+
+ reg = dss_reg_r(&rsc->wrapper_io, SDE_RSCC_SPARE_PWR_EVENT,
+ rsc->debug_mode);
+ reg |= BIT(13);
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_SPARE_PWR_EVENT,
+ reg, rsc->debug_mode);
+
+ /* make sure that mode-2 exit before wait*/
+ wmb();
+
+ /* check for sequence running status before exiting */
+ for (count = MAX_CHECK_LOOPS; count > 0; count--) {
+ if (regulator_is_enabled(rsc->fs)) {
+ rc = 0;
+ break;
+ }
+ usleep_range(1, 2);
+ }
+
+ reg = dss_reg_r(&rsc->wrapper_io, SDE_RSCC_SPARE_PWR_EVENT,
+ rsc->debug_mode);
+ reg &= ~BIT(13);
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_SPARE_PWR_EVENT,
+ reg, rsc->debug_mode);
+
+ if (rc)
+ pr_err("vdd reg is not enabled yet\n");
+
+ rc = regulator_set_mode(rsc->fs, REGULATOR_MODE_NORMAL);
+ if (rc)
+ pr_err("vdd reg normal mode set failed rc:%d\n", rc);
+
+ return rc;
+}
+
+static int sde_rsc_state_update(struct sde_rsc_priv *rsc,
+ enum sde_rsc_state state)
+{
+ int rc = 0;
+ int reg;
+
+ if (rsc->power_collapse) {
+ rc = sde_rsc_mode2_exit(rsc);
+ if (rc)
+ pr_err("power collapse: mode2 exit failed\n");
+ else
+ rsc->power_collapse = false;
+ }
+
+ switch (state) {
+ case SDE_RSC_CMD_STATE:
+ pr_debug("command mode handling\n");
+
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
+ 0x1, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSCC_SOLVER_OVERRIDE_CTRL_DRV0,
+ 0x0, rsc->debug_mode);
+ reg = dss_reg_r(&rsc->wrapper_io,
+ SDE_RSCC_WRAPPER_OVERRIDE_CTRL, rsc->debug_mode);
+ reg |= (BIT(0) | BIT(8));
+ reg &= ~(BIT(1) | BIT(2) | BIT(3) | BIT(6) | BIT(7));
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
+ reg, rsc->debug_mode);
+ /* make sure that solver is enabled */
+ wmb();
+ break;
+
+ case SDE_RSC_VID_STATE:
+ pr_debug("video mode handling\n");
+
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
+ 0x1, rsc->debug_mode);
+ reg = dss_reg_r(&rsc->wrapper_io,
+ SDE_RSCC_WRAPPER_OVERRIDE_CTRL, rsc->debug_mode);
+ reg |= BIT(8);
+ reg &= ~(BIT(1) | BIT(0));
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
+ reg, rsc->debug_mode);
+ dss_reg_w(&rsc->drv_io, SDE_RSCC_SOLVER_OVERRIDE_CTRL_DRV0,
+ 0x1, rsc->debug_mode);
+ /* make sure that solver mode is override */
+ wmb();
+ break;
+
+ case SDE_RSC_IDLE_STATE:
+ rc = sde_rsc_mode2_entry(rsc);
+ if (rc)
+ pr_err("power collapse - mode 2 entry failed\n");
+ else
+ rsc->power_collapse = true;
+ break;
+
+ default:
+ pr_err("state:%d handling is not supported\n", state);
+ break;
+ }
+
+ return rc;
+}
+
+int rsc_hw_init(struct sde_rsc_priv *rsc)
+{
+ int rc = 0;
+
+ rc = rsc_hw_qtimer_init(rsc);
+ if (rc) {
+ pr_err("rsc hw qtimer init failed\n");
+ goto end;
+ }
+
+ rc = rsc_hw_wrapper_init(rsc);
+ if (rc) {
+ pr_err("rsc hw wrapper init failed\n");
+ goto end;
+ }
+
+ rc = rsc_hw_seq_memory_init(rsc);
+ if (rc) {
+ pr_err("rsc sequencer memory init failed\n");
+ goto end;
+ }
+
+ rc = rsc_hw_solver_init(rsc);
+ if (rc) {
+ pr_err("rsc solver init failed\n");
+ goto end;
+ }
+
+ rc = rsc_hw_pdc_init(rsc);
+ if (rc) {
+ pr_err("rsc hw pdc init failed\n");
+ goto end;
+ }
+
+ /* make sure that hw is initialized */
+ wmb();
+
+ pr_info("sde rsc init successfully done\n");
+end:
+ return rc;
+}
+
+int rsc_hw_mode_ctrl(struct sde_rsc_priv *rsc, enum rsc_mode_req request,
+ char *buffer, int buffer_size, bool mode)
+{
+ u32 blen = 0;
+ u32 slot_time;
+
+ switch (request) {
+ case MODE_READ:
+ if (!buffer || !buffer_size)
+ return blen;
+
+ blen = snprintf(buffer, buffer_size - blen,
+ "mode_status:0x%x\n",
+ dss_reg_r(&rsc->drv_io, SDE_RSCC_SOLVER_STATUS2_DRV0,
+ rsc->debug_mode));
+ break;
+
+ case MODE0_UPDATE:
+ slot_time = mode ? rsc->timer_config.rsc_time_slot_0_ns :
+ rsc->timer_config.rsc_time_slot_2_ns;
+ dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_TIME_SLOT_TABLE_1_DRV0,
+ slot_time, rsc->debug_mode);
+ slot_time = mode ? rsc->timer_config.rsc_time_slot_1_ns :
+ rsc->timer_config.rsc_time_slot_2_ns;
+ dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_TIME_SLOT_TABLE_2_DRV0,
+ slot_time, rsc->debug_mode);
+ rsc->power_collapse_block = mode;
+ break;
+
+ case MODE1_UPDATE:
+ slot_time = mode ? rsc->timer_config.rsc_time_slot_1_ns :
+ rsc->timer_config.rsc_time_slot_2_ns;
+ dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_TIME_SLOT_TABLE_2_DRV0,
+ slot_time, rsc->debug_mode);
+ rsc->power_collapse_block = mode;
+ break;
+
+ case MODE2_UPDATE:
+ rsc->power_collapse_block = mode;
+ break;
+
+ default:
+ break;
+ }
+
+ return blen;
+}
+
+int sde_rsc_debug_show(struct seq_file *s, struct sde_rsc_priv *rsc)
+{
+ seq_printf(s, "override ctrl:0x%x\n",
+ dss_reg_r(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
+ rsc->debug_mode));
+ seq_printf(s, "power ctrl:0x%x\n",
+ dss_reg_r(&rsc->wrapper_io, SDE_RSCC_PWR_CTRL,
+ rsc->debug_mode));
+ seq_printf(s, "vsycn timestamp0:0x%x\n",
+ dss_reg_r(&rsc->wrapper_io, SDE_RSCC_WRAPPER_VSYNC_TIMESTAMP0,
+ rsc->debug_mode));
+ seq_printf(s, "vsycn timestamp1:0x%x\n",
+ dss_reg_r(&rsc->wrapper_io, SDE_RSCC_WRAPPER_VSYNC_TIMESTAMP1,
+ rsc->debug_mode));
+
+ seq_printf(s, "error irq status:0x%x\n",
+ dss_reg_r(&rsc->drv_io, SDE_RSCC_ERROR_IRQ_STATUS_DRV0,
+ rsc->debug_mode));
+
+ seq_printf(s, "seq busy status:0x%x\n",
+ dss_reg_r(&rsc->drv_io, SDE_RSCC_SEQ_BUSY_DRV0,
+ rsc->debug_mode));
+
+ seq_printf(s, "solver override ctrl status:0x%x\n",
+ dss_reg_r(&rsc->drv_io, SDE_RSCC_SOLVER_OVERRIDE_CTRL_DRV0,
+ rsc->debug_mode));
+ seq_printf(s, "solver override status:0x%x\n",
+ dss_reg_r(&rsc->drv_io, SDE_RSCC_SOLVER_STATUS0_DRV0,
+ rsc->debug_mode));
+ seq_printf(s, "solver timeslot status:0x%x\n",
+ dss_reg_r(&rsc->drv_io, SDE_RSCC_SOLVER_STATUS1_DRV0,
+ rsc->debug_mode));
+ seq_printf(s, "solver mode status:0x%x\n",
+ dss_reg_r(&rsc->drv_io, SDE_RSCC_SOLVER_STATUS2_DRV0,
+ rsc->debug_mode));
+
+ seq_printf(s, "amc status:0x%x\n",
+ dss_reg_r(&rsc->drv_io, SDE_RSCC_AMC_TCS_MODE_IRQ_STATUS_DRV0,
+ rsc->debug_mode));
+
+ return 0;
+}
+
+int rsc_hw_vsync(struct sde_rsc_priv *rsc, enum rsc_vsync_req request,
+ char *buffer, int buffer_size, u32 mode)
+{
+ u32 blen = 0, reg;
+
+ switch (request) {
+ case VSYNC_READ:
+ if (!buffer || !buffer_size)
+ return blen;
+
+ blen = snprintf(buffer, buffer_size - blen, "vsync0:0x%x\n",
+ dss_reg_r(&rsc->drv_io,
+ SDE_RSCC_WRAPPER_VSYNC_TIMESTAMP0,
+ rsc->debug_mode));
+ if (blen >= buffer_size)
+ return blen;
+
+ blen += snprintf(buffer + blen, buffer_size - blen,
+ "vsync1:0x%x\n",
+ dss_reg_r(&rsc->drv_io,
+ SDE_RSCC_WRAPPER_VSYNC_TIMESTAMP1,
+ rsc->debug_mode));
+ break;
+
+ case VSYNC_ENABLE:
+ reg = BIT(8) | BIT(9) | ((mode & 0x7) < 10);
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_DEBUG_BUS,
+ mode, rsc->debug_mode);
+ break;
+
+ case VSYNC_DISABLE:
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_DEBUG_BUS,
+ 0x0, rsc->debug_mode);
+ break;
+ }
+
+ return blen;
+}
+
+bool rsc_hw_is_amc_mode(struct sde_rsc_priv *rsc)
+{
+ return dss_reg_r(&rsc->drv_io, SDE_RSCC_TCS_DRV0_CONTROL,
+ rsc->debug_mode) & BIT(16);
+}
+
+int rsc_hw_tcs_wait(struct sde_rsc_priv *rsc)
+{
+ int rc = -EBUSY;
+ int count, seq_status;
+
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
+ 0x0, rsc->debug_mode);
+ seq_status = dss_reg_r(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
+ rsc->debug_mode) & BIT(1);
+ /* if seq busy - set TCS use OK to high and wait for 200us */
+ if (seq_status) {
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
+ 0x1, rsc->debug_mode);
+ usleep_range(100, 200);
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
+ 0x0, rsc->debug_mode);
+ }
+
+ /* check for sequence running status before exiting */
+ for (count = MAX_CHECK_LOOPS; count > 0; count--) {
+ seq_status = dss_reg_r(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
+ rsc->debug_mode) & BIT(1);
+ if (!seq_status) {
+ rc = 0;
+ break;
+ }
+ usleep_range(1, 2);
+ }
+
+ return rc;
+}
+
+int rsc_hw_tcs_use_ok(struct sde_rsc_priv *rsc)
+{
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
+ 0x1, rsc->debug_mode);
+ return 0;
+}
+
+int sde_rsc_hw_register(struct sde_rsc_priv *rsc)
+{
+ pr_debug("rsc hardware register\n");
+
+ rsc->hw_ops.init = rsc_hw_init;
+
+ rsc->hw_ops.tcs_wait = rsc_hw_tcs_wait;
+ rsc->hw_ops.tcs_use_ok = rsc_hw_tcs_use_ok;
+ rsc->hw_ops.is_amc_mode = rsc_hw_is_amc_mode;
+
+ rsc->hw_ops.mode2_entry = sde_rsc_mode2_entry;
+ rsc->hw_ops.mode2_exit = sde_rsc_mode2_exit;
+
+ rsc->hw_ops.hw_vsync = rsc_hw_vsync;
+ rsc->hw_ops.state_update = sde_rsc_state_update;
+ rsc->hw_ops.debug_show = sde_rsc_debug_show;
+ rsc->hw_ops.mode_ctrl = rsc_hw_mode_ctrl;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index fb16070..4a4f953 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -205,8 +205,8 @@
}
if (x <= (crtc->x - w) || y <= (crtc->y - radeon_crtc->cursor_height) ||
- x >= (crtc->x + crtc->mode.crtc_hdisplay) ||
- y >= (crtc->y + crtc->mode.crtc_vdisplay))
+ x >= (crtc->x + crtc->mode.hdisplay) ||
+ y >= (crtc->y + crtc->mode.vdisplay))
goto out_of_bounds;
x += xorigin;
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index f04baac..5fde4a5 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -156,6 +156,7 @@
{ adreno_is_a530, a530_efuse_leakage },
{ adreno_is_a530, a530_efuse_speed_bin },
{ adreno_is_a505, a530_efuse_speed_bin },
+ { adreno_is_a512, a530_efuse_speed_bin },
};
static void a5xx_check_features(struct adreno_device *adreno_dev)
diff --git a/drivers/gpu/msm/adreno_cp_parser.c b/drivers/gpu/msm/adreno_cp_parser.c
index 2007c10..efc0868 100644
--- a/drivers/gpu/msm/adreno_cp_parser.c
+++ b/drivers/gpu/msm/adreno_cp_parser.c
@@ -53,7 +53,7 @@
static int adreno_ib_find_objs(struct kgsl_device *device,
struct kgsl_process_private *process,
uint64_t gpuaddr, uint64_t dwords,
- int obj_type,
+ uint64_t ib2base, int obj_type,
struct adreno_ib_object_list *ib_obj_list,
int ib_level);
@@ -484,7 +484,7 @@
ret = adreno_ib_find_objs(device, process,
ib_parse_vars->set_draw_groups[i].cmd_stream_addr,
ib_parse_vars->set_draw_groups[i].cmd_stream_dwords,
- SNAPSHOT_GPU_OBJECT_DRAW,
+ 0, SNAPSHOT_GPU_OBJECT_DRAW,
ib_obj_list, 2);
if (ret)
break;
@@ -687,8 +687,8 @@
if (cmd_stream_dwords)
ret = adreno_ib_find_objs(device, process,
cmd_stream_addr, cmd_stream_dwords,
- SNAPSHOT_GPU_OBJECT_DRAW, ib_obj_list,
- 2);
+ 0, SNAPSHOT_GPU_OBJECT_DRAW,
+ ib_obj_list, 2);
if (ret)
break;
continue;
@@ -700,7 +700,7 @@
gpuaddr = gpuaddr << 32 | ptr[i + 1];
ret = adreno_ib_find_objs(device, process,
gpuaddr, (ptr[i] & 0x0000FFFF),
- SNAPSHOT_GPU_OBJECT_IB,
+ 0, SNAPSHOT_GPU_OBJECT_IB,
ib_obj_list, 2);
if (ret)
break;
@@ -763,7 +763,7 @@
if (flags & 0x8) {
ret = adreno_ib_find_objs(device, process,
ptr[i + 1], (ptr[i] & 0x0000FFFF),
- SNAPSHOT_GPU_OBJECT_IB,
+ 0, SNAPSHOT_GPU_OBJECT_IB,
ib_obj_list, 2);
if (ret)
break;
@@ -778,6 +778,7 @@
* @process: Process in which the IB is allocated
* @gpuaddr: IB2 gpuaddr
* @dwords: IB2 size in dwords
+ * @ib2base: Base address of active IB2
* @ib_obj_list: List of objects found in IB
* @ib_level: The level from which function is called, either from IB1 or IB2
*
@@ -786,7 +787,7 @@
*/
static int adreno_cp_parse_ib2(struct kgsl_device *device,
struct kgsl_process_private *process,
- uint64_t gpuaddr, uint64_t dwords,
+ uint64_t gpuaddr, uint64_t dwords, uint64_t ib2base,
struct adreno_ib_object_list *ib_obj_list,
int ib_level)
{
@@ -798,6 +799,10 @@
*/
if (ib_level == 2)
return -EINVAL;
+
+ /* Save current IB2 statically */
+ if (ib2base == gpuaddr)
+ kgsl_snapshot_push_object(process, gpuaddr, dwords);
/*
* only try to find sub objects iff this IB has
* not been processed already
@@ -812,7 +817,7 @@
return 0;
}
- return adreno_ib_find_objs(device, process, gpuaddr, dwords,
+ return adreno_ib_find_objs(device, process, gpuaddr, dwords, ib2base,
SNAPSHOT_GPU_OBJECT_IB, ib_obj_list, 2);
}
@@ -821,6 +826,7 @@
* @device: The device pointer on which the IB executes
* @process: The process in which the IB and all contained objects are mapped.
* @gpuaddr: The gpu address of the IB
+ * @ib2base: IB2 base address
* @dwords: Size of ib in dwords
* @obj_type: The object type can be either an IB or a draw state sequence
* @ib_obj_list: The list in which the IB and the objects in it are added.
@@ -833,7 +839,7 @@
static int adreno_ib_find_objs(struct kgsl_device *device,
struct kgsl_process_private *process,
uint64_t gpuaddr, uint64_t dwords,
- int obj_type,
+ uint64_t ib2base, int obj_type,
struct adreno_ib_object_list *ib_obj_list,
int ib_level)
{
@@ -909,7 +915,7 @@
uint64_t size = src[i + 2];
ret = adreno_cp_parse_ib2(device, process,
- gpuaddrib2, size,
+ gpuaddrib2, size, ib2base,
ib_obj_list, ib_level);
if (ret)
goto done;
@@ -936,7 +942,7 @@
gpuaddrib2 = gpuaddrib2 << 32 | src[i + 1];
ret = adreno_cp_parse_ib2(device, process,
- gpuaddrib2, size,
+ gpuaddrib2, size, ib2base,
ib_obj_list, ib_level);
if (ret)
goto done;
@@ -988,6 +994,7 @@
* @process: The process in which the IB and all contained objects are mapped
* @gpuaddr: The gpu address of the IB
* @dwords: Size of ib in dwords
+ * @ib2base: Base address of active IB2
* @ib_obj_list: The list in which the IB and the objects in it are added.
*
* Find all the memory objects that an IB needs for execution and place
@@ -999,7 +1006,7 @@
*/
int adreno_ib_create_object_list(struct kgsl_device *device,
struct kgsl_process_private *process,
- uint64_t gpuaddr, uint64_t dwords,
+ uint64_t gpuaddr, uint64_t dwords, uint64_t ib2base,
struct adreno_ib_object_list **out_ib_obj_list)
{
int ret = 0;
@@ -1022,7 +1029,7 @@
return -ENOMEM;
}
- ret = adreno_ib_find_objs(device, process, gpuaddr, dwords,
+ ret = adreno_ib_find_objs(device, process, gpuaddr, dwords, ib2base,
SNAPSHOT_GPU_OBJECT_IB, ib_obj_list, 1);
/* Even if there was an error return the remaining objects found */
diff --git a/drivers/gpu/msm/adreno_cp_parser.h b/drivers/gpu/msm/adreno_cp_parser.h
index cdd983e..1fa46c1 100644
--- a/drivers/gpu/msm/adreno_cp_parser.h
+++ b/drivers/gpu/msm/adreno_cp_parser.h
@@ -179,7 +179,7 @@
int adreno_ib_create_object_list(
struct kgsl_device *device,
struct kgsl_process_private *process,
- uint64_t gpuaddr, uint64_t dwords,
+ uint64_t gpuaddr, uint64_t dwords, uint64_t ib2base,
struct adreno_ib_object_list **out_ib_obj_list);
void adreno_ib_destroy_obj_list(struct adreno_ib_object_list *ib_obj_list);
diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c
index f6c9805..2d38a1a 100644
--- a/drivers/gpu/msm/adreno_debugfs.c
+++ b/drivers/gpu/msm/adreno_debugfs.c
@@ -167,6 +167,7 @@
* KGSL_CONTEXT_PRIV_DEVICE_SPECIFIC so it is ok to cross the streams here.
*/
static const struct flag_entry context_priv[] = {
+ { KGSL_CONTEXT_PRIV_SUBMITTED, "submitted"},
{ KGSL_CONTEXT_PRIV_DETACHED, "detached"},
{ KGSL_CONTEXT_PRIV_INVALID, "invalid"},
{ KGSL_CONTEXT_PRIV_PAGEFAULT, "pagefault"},
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index f6b27f7..3fa38fa 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -2330,10 +2330,6 @@
if (adreno_drawqueue_is_empty(drawqueue))
return count;
- /* Don't update the drawqueue timeout if we are about to preempt out */
- if (!adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE))
- return count;
-
/* Don't update the drawqueue timeout if it isn't active */
if (!drawqueue_is_current(drawqueue))
return count;
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index cd7ffe7..9f4e185 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -215,10 +215,12 @@
int ret = 0;
/*
- * If the context is invalid then return immediately - we may end up
- * waiting for a timestamp that will never come
+ * If the context is invalid (OR) not submitted commands to GPU
+ * then return immediately - we may end up waiting for a timestamp
+ * that will never come
*/
- if (kgsl_context_invalid(context))
+ if (kgsl_context_invalid(context) ||
+ !test_bit(KGSL_CONTEXT_PRIV_SUBMITTED, &context->priv))
goto done;
trace_adreno_drawctxt_wait_start(drawctxt->rb->id, context->id,
diff --git a/drivers/gpu/msm/adreno_perfcounter.c b/drivers/gpu/msm/adreno_perfcounter.c
index c81ea69..a7068e1 100644
--- a/drivers/gpu/msm/adreno_perfcounter.c
+++ b/drivers/gpu/msm/adreno_perfcounter.c
@@ -638,6 +638,9 @@
static void _power_counter_enable_alwayson(struct adreno_device *adreno_dev,
struct adreno_perfcounters *counters)
{
+ if (!ADRENO_FEATURE(adreno_dev, ADRENO_GPMU))
+ return;
+
kgsl_regwrite(KGSL_DEVICE(adreno_dev),
A5XX_GPMU_ALWAYS_ON_COUNTER_RESET, 1);
counters->groups[KGSL_PERFCOUNTER_GROUP_ALWAYSON_PWR].regs[0].value = 0;
@@ -674,6 +677,9 @@
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_perfcount_register *reg;
+ if (!ADRENO_FEATURE(adreno_dev, ADRENO_GPMU))
+ return;
+
reg = &counters->groups[group].regs[counter];
kgsl_regwrite(device, reg->select, countable);
kgsl_regwrite(device, A5XX_GPMU_POWER_COUNTER_ENABLE, 1);
@@ -927,6 +933,9 @@
struct adreno_perfcount_register *reg;
unsigned int lo = 0, hi = 0;
+ if (!ADRENO_FEATURE(adreno_dev, ADRENO_GPMU))
+ return 0;
+
reg = &group->regs[counter];
kgsl_regread(device, reg->offset, &lo);
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 2aa9b00..78182b7 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -938,6 +938,7 @@
drawobj->timestamp, time);
if (!ret) {
+ set_bit(KGSL_CONTEXT_PRIV_SUBMITTED, &context->priv);
cmdobj->global_ts = drawctxt->internal_timestamp;
/* Put the timevalues in the profiling buffer */
diff --git a/drivers/gpu/msm/adreno_snapshot.c b/drivers/gpu/msm/adreno_snapshot.c
index f17d349..92b541d 100644
--- a/drivers/gpu/msm/adreno_snapshot.c
+++ b/drivers/gpu/msm/adreno_snapshot.c
@@ -27,8 +27,6 @@
#define SNAPSHOT_OBJ_BUFSIZE 64
-#define SNAPSHOT_OBJ_TYPE_IB 0
-
/* Used to print error message if an IB has too many objects in it */
static int ib_max_objs;
@@ -53,8 +51,7 @@
}
/* Push a new buffer object onto the list */
-static void push_object(int type,
- struct kgsl_process_private *process,
+void kgsl_snapshot_push_object(struct kgsl_process_private *process,
uint64_t gpuaddr, uint64_t dwords)
{
int index;
@@ -101,7 +98,6 @@
}
/* Put it on the list of things to parse */
- objbuf[objbufptr].type = type;
objbuf[objbufptr].gpuaddr = gpuaddr;
objbuf[objbufptr].size = dwords << 2;
objbuf[objbufptr++].entry = entry;
@@ -112,8 +108,7 @@
* to be dumped
*/
-static int find_object(int type, uint64_t gpuaddr,
- struct kgsl_process_private *process)
+static int find_object(uint64_t gpuaddr, struct kgsl_process_private *process)
{
int index;
@@ -131,14 +126,12 @@
* @snapshot: The snapshot data.
* @process: The process to which the IB belongs
* @ib_obj_list: List of the IB objects
- * @ib2base: IB2 base address at time of the fault
*
* Returns 0 on success else error code
*/
static int snapshot_freeze_obj_list(struct kgsl_snapshot *snapshot,
struct kgsl_process_private *process,
- struct adreno_ib_object_list *ib_obj_list,
- uint64_t ib2base)
+ struct adreno_ib_object_list *ib_obj_list)
{
int ret = 0;
struct adreno_ib_object *ib_objs;
@@ -163,21 +156,15 @@
}
if (freeze) {
- /* Save current IB2 statically */
- if (ib2base == ib_objs->gpuaddr) {
- push_object(SNAPSHOT_OBJ_TYPE_IB,
- process, ib_objs->gpuaddr, ib_objs->size >> 2);
+ temp_ret = kgsl_snapshot_get_object(snapshot,
+ process, ib_objs->gpuaddr,
+ ib_objs->size,
+ ib_objs->snapshot_obj_type);
+ if (temp_ret < 0) {
+ if (ret >= 0)
+ ret = temp_ret;
} else {
- temp_ret = kgsl_snapshot_get_object(snapshot,
- process, ib_objs->gpuaddr,
- ib_objs->size,
- ib_objs->snapshot_obj_type);
- if (temp_ret < 0) {
- if (ret >= 0)
- ret = temp_ret;
- } else {
- snapshot_frozen_objsize += temp_ret;
- }
+ snapshot_frozen_objsize += temp_ret;
}
}
}
@@ -203,8 +190,7 @@
* list
*/
if (gpuaddr == snapshot->ib1base) {
- push_object(SNAPSHOT_OBJ_TYPE_IB, process,
- gpuaddr, dwords);
+ kgsl_snapshot_push_object(process, gpuaddr, dwords);
return;
}
@@ -213,7 +199,8 @@
return;
if (-E2BIG == adreno_ib_create_object_list(device, process,
- gpuaddr, dwords, &ib_obj_list))
+ gpuaddr, dwords, snapshot->ib2base,
+ &ib_obj_list))
ib_max_objs = 1;
if (ib_obj_list)
@@ -559,8 +546,7 @@
int index = -ENOENT;
if (!snapshot->ib1dumped)
- index = find_object(SNAPSHOT_OBJ_TYPE_IB, snapshot->ib1base,
- snapshot->process);
+ index = find_object(snapshot->ib1base, snapshot->process);
/* only do this for IB1 because the IB2's are part of IB1 objects */
if ((index != -ENOENT) &&
@@ -569,19 +555,19 @@
objbuf[index].entry->priv,
objbuf[index].gpuaddr,
objbuf[index].size >> 2,
+ snapshot->ib2base,
&ib_obj_list))
ib_max_objs = 1;
if (ib_obj_list) {
/* freeze the IB objects in the IB */
snapshot_freeze_obj_list(snapshot,
objbuf[index].entry->priv,
- ib_obj_list, snapshot->ib2base);
+ ib_obj_list);
adreno_ib_destroy_obj_list(ib_obj_list);
}
} else {
/* Get the IB2 index from parsed object */
- index = find_object(SNAPSHOT_OBJ_TYPE_IB, snapshot->ib2base,
- snapshot->process);
+ index = find_object(snapshot->ib2base, snapshot->process);
if (index != -ENOENT)
parse_ib(device, snapshot, snapshot->process,
@@ -624,6 +610,7 @@
struct adreno_ib_object_list *ib_obj_list;
struct kgsl_snapshot *snapshot;
struct kgsl_snapshot_object *obj;
+ struct kgsl_memdesc *memdesc;
if (meta == NULL || meta->snapshot == NULL || meta->obj == NULL) {
KGSL_CORE_ERR("snapshot: bad metadata");
@@ -631,13 +618,18 @@
}
snapshot = meta->snapshot;
obj = meta->obj;
+ memdesc = &obj->entry->memdesc;
+
+ /* If size is zero get it from the medesc size */
+ if (!obj->size)
+ obj->size = (memdesc->size - (obj->gpuaddr - memdesc->gpuaddr));
if (remain < (obj->size + sizeof(*header))) {
KGSL_CORE_ERR("snapshot: Not enough memory for the ib\n");
return 0;
}
- src = kgsl_gpuaddr_to_vaddr(&obj->entry->memdesc, obj->gpuaddr);
+ src = kgsl_gpuaddr_to_vaddr(memdesc, obj->gpuaddr);
if (src == NULL) {
KGSL_DRV_ERR(device,
"snapshot: Unable to map GPU memory object 0x%016llX into the kernel\n",
@@ -653,13 +645,14 @@
if (-E2BIG == adreno_ib_create_object_list(device,
obj->entry->priv,
obj->gpuaddr, obj->size >> 2,
+ snapshot->ib2base,
&ib_obj_list))
ib_max_objs = 1;
if (ib_obj_list) {
/* freeze the IB objects in the IB */
snapshot_freeze_obj_list(snapshot,
obj->entry->priv,
- ib_obj_list, meta->ib2base);
+ ib_obj_list);
adreno_ib_destroy_obj_list(ib_obj_list);
}
}
@@ -688,26 +681,18 @@
{
struct snapshot_ib_meta meta;
- switch (objbuf[obj].type) {
- case SNAPSHOT_OBJ_TYPE_IB:
- meta.snapshot = snapshot;
- meta.obj = &objbuf[obj];
- meta.ib1base = snapshot->ib1base;
- meta.ib1size = snapshot->ib1size;
- meta.ib2base = snapshot->ib2base;
- meta.ib2size = snapshot->ib2size;
+ meta.snapshot = snapshot;
+ meta.obj = &objbuf[obj];
+ meta.ib1base = snapshot->ib1base;
+ meta.ib1size = snapshot->ib1size;
+ meta.ib2base = snapshot->ib2base;
+ meta.ib2size = snapshot->ib2size;
- kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_IB_V2,
+ kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_IB_V2,
snapshot, snapshot_ib, &meta);
- if (objbuf[obj].entry) {
- kgsl_memdesc_unmap(&(objbuf[obj].entry->memdesc));
- kgsl_mem_entry_put(objbuf[obj].entry);
- }
- break;
- default:
- KGSL_CORE_ERR("snapshot: Invalid snapshot object type: %d\n",
- objbuf[obj].type);
- break;
+ if (objbuf[obj].entry) {
+ kgsl_memdesc_unmap(&(objbuf[obj].entry->memdesc));
+ kgsl_mem_entry_put(objbuf[obj].entry);
}
}
@@ -910,10 +895,10 @@
* figure how often this really happens.
*/
- if (-ENOENT == find_object(SNAPSHOT_OBJ_TYPE_IB, snapshot->ib1base,
- snapshot->process) && snapshot->ib1size) {
- push_object(SNAPSHOT_OBJ_TYPE_IB, snapshot->process,
- snapshot->ib1base, snapshot->ib1size);
+ if (-ENOENT == find_object(snapshot->ib1base, snapshot->process) &&
+ snapshot->ib1size) {
+ kgsl_snapshot_push_object(snapshot->process, snapshot->ib1base,
+ snapshot->ib1size);
KGSL_CORE_ERR(
"CP_IB1_BASE not found in the ringbuffer.Dumping %x dwords of the buffer.\n",
snapshot->ib1size);
@@ -927,10 +912,9 @@
* correct size.
*/
- if (-ENOENT == find_object(SNAPSHOT_OBJ_TYPE_IB, snapshot->ib2base,
- snapshot->process)) {
- push_object(SNAPSHOT_OBJ_TYPE_IB, snapshot->process,
- snapshot->ib2base, snapshot->ib2size);
+ if (-ENOENT == find_object(snapshot->ib2base, snapshot->process)) {
+ kgsl_snapshot_push_object(snapshot->process, snapshot->ib2base,
+ snapshot->ib2size);
}
/*
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 56eae50..280e660 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -3627,6 +3627,9 @@
if (!IS_ALIGNED(offset | size, kgsl_memdesc_get_pagesize(memdesc)))
return false;
+ if (offset + size < offset)
+ return false;
+
if (!(flags & KGSL_SPARSE_BIND_MULTIPLE_TO_PHYS) &&
offset + size > memdesc->size)
return false;
@@ -3754,7 +3757,7 @@
break;
/* Sanity check initial range */
- if (obj.size == 0 ||
+ if (obj.size == 0 || obj.virtoffset + obj.size < obj.size ||
obj.virtoffset + obj.size > virt_entry->memdesc.size ||
!(IS_ALIGNED(obj.virtoffset | obj.size, pg_sz))) {
ret = -EINVAL;
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index ae164bc..556809c 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -325,6 +325,7 @@
/**
* enum bits for struct kgsl_context.priv
+ * @KGSL_CONTEXT_PRIV_SUBMITTED - The context has submitted commands to gpu.
* @KGSL_CONTEXT_PRIV_DETACHED - The context has been destroyed by userspace
* and is no longer using the gpu.
* @KGSL_CONTEXT_PRIV_INVALID - The context has been destroyed by the kernel
@@ -334,7 +335,8 @@
* reserved for devices specific use.
*/
enum kgsl_context_priv {
- KGSL_CONTEXT_PRIV_DETACHED = 0,
+ KGSL_CONTEXT_PRIV_SUBMITTED = 0,
+ KGSL_CONTEXT_PRIV_DETACHED,
KGSL_CONTEXT_PRIV_INVALID,
KGSL_CONTEXT_PRIV_PAGEFAULT,
KGSL_CONTEXT_PRIV_DEVICE_SPECIFIC = 16,
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index eaf0995..b32cb63 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -813,6 +813,13 @@
ptname = MMU_FEATURE(mmu, KGSL_MMU_GLOBAL_PAGETABLE) ?
KGSL_MMU_GLOBAL_PT : tid;
+ /*
+ * Trace needs to be logged before searching the faulting
+ * address in free list as it takes quite long time in
+ * search and delays the trace unnecessarily.
+ */
+ trace_kgsl_mmu_pagefault(ctx->kgsldev, addr,
+ ptname, write ? "write" : "read");
if (test_bit(KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE,
&adreno_dev->ft_pf_policy))
@@ -849,8 +856,6 @@
}
}
- trace_kgsl_mmu_pagefault(ctx->kgsldev, addr,
- ptname, write ? "write" : "read");
/*
* We do not want the h/w to resume fetching data from an iommu
diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c
index ee38f93b..fc5f5a8 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.c
+++ b/drivers/gpu/msm/kgsl_pwrscale.c
@@ -925,11 +925,15 @@
"qcom,enable-midframe-timer")) {
kgsl_midframe = kzalloc(
sizeof(struct kgsl_midframe_info), GFP_KERNEL);
- hrtimer_init(&kgsl_midframe->timer,
- CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- kgsl_midframe->timer.function =
- kgsl_pwrscale_midframe_timer;
- kgsl_midframe->device = device;
+ if (kgsl_midframe) {
+ hrtimer_init(&kgsl_midframe->timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ kgsl_midframe->timer.function =
+ kgsl_pwrscale_midframe_timer;
+ kgsl_midframe->device = device;
+ } else
+ KGSL_PWR_ERR(device,
+ "Failed to enable-midframe-timer feature\n");
}
/*
diff --git a/drivers/gpu/msm/kgsl_snapshot.c b/drivers/gpu/msm/kgsl_snapshot.c
index 4b1b5bc..40d239c 100644
--- a/drivers/gpu/msm/kgsl_snapshot.c
+++ b/drivers/gpu/msm/kgsl_snapshot.c
@@ -158,8 +158,10 @@
header->osid = KGSL_SNAPSHOT_OS_LINUX_V3;
/* Get the kernel build information */
- strlcpy(header->release, utsname()->release, sizeof(header->release));
- strlcpy(header->version, utsname()->version, sizeof(header->version));
+ strlcpy(header->release, init_utsname()->release,
+ sizeof(header->release));
+ strlcpy(header->version, init_utsname()->version,
+ sizeof(header->version));
/* Get the Unix time for the timestamp */
header->seconds = get_seconds();
diff --git a/drivers/gpu/msm/kgsl_snapshot.h b/drivers/gpu/msm/kgsl_snapshot.h
index e2ded87..2cb8b8f 100644
--- a/drivers/gpu/msm/kgsl_snapshot.h
+++ b/drivers/gpu/msm/kgsl_snapshot.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -255,4 +255,6 @@
__u64 size; /* Size of the object (in dwords) */
} __packed;
+void kgsl_snapshot_push_object(struct kgsl_process_private *process,
+ uint64_t gpuaddr, uint64_t dwords);
#endif
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 16f91c8..5fb4c6d 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -39,7 +39,7 @@
* vmbus_setevent- Trigger an event notification on the specified
* channel.
*/
-static void vmbus_setevent(struct vmbus_channel *channel)
+void vmbus_setevent(struct vmbus_channel *channel)
{
struct hv_monitor_page *monitorpage;
@@ -65,6 +65,7 @@
vmbus_set_event(channel);
}
}
+EXPORT_SYMBOL_GPL(vmbus_setevent);
/*
* vmbus_open - Open the specified channel.
@@ -635,8 +636,6 @@
u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
struct kvec bufferlist[3];
u64 aligned_data = 0;
- int ret;
- bool signal = false;
bool lock = channel->acquire_ring_lock;
int num_vecs = ((bufferlen != 0) ? 3 : 1);
@@ -656,33 +655,9 @@
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, num_vecs,
- &signal, lock, channel->signal_policy);
+ return hv_ringbuffer_write(channel, bufferlist, num_vecs,
+ lock, kick_q);
- /*
- * Signalling the host is conditional on many factors:
- * 1. The ring state changed from being empty to non-empty.
- * This is tracked by the variable "signal".
- * 2. The variable kick_q tracks if more data will be placed
- * on the ring. We will not signal if more data is
- * to be placed.
- *
- * Based on the channel signal state, we will decide
- * which signaling policy will be applied.
- *
- * If we cannot write to the ring-buffer; signal the host
- * even if we may not have written anything. This is a rare
- * enough condition that it should not matter.
- * NOTE: in this case, the hvsock channel is an exception, because
- * it looks the host side's hvsock implementation has a throttling
- * mechanism which can hurt the performance otherwise.
- */
-
- if (((ret == 0) && kick_q && signal) ||
- (ret && !is_hvsock_channel(channel)))
- vmbus_setevent(channel);
-
- return ret;
}
EXPORT_SYMBOL(vmbus_sendpacket_ctl);
@@ -723,7 +698,6 @@
u32 flags,
bool kick_q)
{
- int ret;
int i;
struct vmbus_channel_packet_page_buffer desc;
u32 descsize;
@@ -731,7 +705,6 @@
u32 packetlen_aligned;
struct kvec bufferlist[3];
u64 aligned_data = 0;
- bool signal = false;
bool lock = channel->acquire_ring_lock;
if (pagecount > MAX_PAGE_BUFFER_COUNT)
@@ -769,29 +742,8 @@
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
- &signal, lock, channel->signal_policy);
-
- /*
- * Signalling the host is conditional on many factors:
- * 1. The ring state changed from being empty to non-empty.
- * This is tracked by the variable "signal".
- * 2. The variable kick_q tracks if more data will be placed
- * on the ring. We will not signal if more data is
- * to be placed.
- *
- * Based on the channel signal state, we will decide
- * which signaling policy will be applied.
- *
- * If we cannot write to the ring-buffer; signal the host
- * even if we may not have written anything. This is a rare
- * enough condition that it should not matter.
- */
-
- if (((ret == 0) && kick_q && signal) || (ret))
- vmbus_setevent(channel);
-
- return ret;
+ return hv_ringbuffer_write(channel, bufferlist, 3,
+ lock, kick_q);
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl);
@@ -822,12 +774,10 @@
u32 desc_size,
void *buffer, u32 bufferlen, u64 requestid)
{
- int ret;
u32 packetlen;
u32 packetlen_aligned;
struct kvec bufferlist[3];
u64 aligned_data = 0;
- bool signal = false;
bool lock = channel->acquire_ring_lock;
packetlen = desc_size + bufferlen;
@@ -848,13 +798,8 @@
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
- &signal, lock, channel->signal_policy);
-
- if (ret == 0 && signal)
- vmbus_setevent(channel);
-
- return ret;
+ return hv_ringbuffer_write(channel, bufferlist, 3,
+ lock, true);
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
@@ -866,14 +811,12 @@
struct hv_multipage_buffer *multi_pagebuffer,
void *buffer, u32 bufferlen, u64 requestid)
{
- int ret;
struct vmbus_channel_packet_multipage_buffer desc;
u32 descsize;
u32 packetlen;
u32 packetlen_aligned;
struct kvec bufferlist[3];
u64 aligned_data = 0;
- bool signal = false;
bool lock = channel->acquire_ring_lock;
u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
multi_pagebuffer->len);
@@ -913,13 +856,8 @@
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
- &signal, lock, channel->signal_policy);
-
- if (ret == 0 && signal)
- vmbus_setevent(channel);
-
- return ret;
+ return hv_ringbuffer_write(channel, bufferlist, 3,
+ lock, true);
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer);
@@ -941,16 +879,9 @@
u32 bufferlen, u32 *buffer_actual_len, u64 *requestid,
bool raw)
{
- int ret;
- bool signal = false;
+ return hv_ringbuffer_read(channel, buffer, bufferlen,
+ buffer_actual_len, requestid, raw);
- ret = hv_ringbuffer_read(&channel->inbound, buffer, bufferlen,
- buffer_actual_len, requestid, &signal, raw);
-
- if (signal)
- vmbus_setevent(channel);
-
- return ret;
}
int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 1bc1d479..caf3418 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -449,8 +449,6 @@
}
dev_type = hv_get_dev_type(newchannel);
- if (dev_type == HV_NIC)
- set_channel_signal_state(newchannel, HV_SIGNAL_POLICY_EXPLICIT);
init_vp_index(newchannel, dev_type);
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index a5b4442..2b13f2a 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -527,14 +527,14 @@
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
-int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info,
+int hv_ringbuffer_write(struct vmbus_channel *channel,
struct kvec *kv_list,
- u32 kv_count, bool *signal, bool lock,
- enum hv_signal_policy policy);
+ u32 kv_count, bool lock,
+ bool kick_q);
-int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
+int hv_ringbuffer_read(struct vmbus_channel *channel,
void *buffer, u32 buflen, u32 *buffer_actual_len,
- u64 *requestid, bool *signal, bool raw);
+ u64 *requestid, bool raw);
void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
struct hv_ring_buffer_debug_info *debug_info);
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 08043da..308dbda 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -66,21 +66,25 @@
* once the ring buffer is empty, it will clear the
* interrupt_mask and re-check to see if new data has
* arrived.
+ *
+ * KYS: Oct. 30, 2016:
+ * It looks like Windows hosts have logic to deal with DOS attacks that
+ * can be triggered if it receives interrupts when it is not expecting
+ * the interrupt. The host expects interrupts only when the ring
+ * transitions from empty to non-empty (or full to non full on the guest
+ * to host ring).
+ * So, base the signaling decision solely on the ring state until the
+ * host logic is fixed.
*/
-static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi,
- enum hv_signal_policy policy)
+static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel,
+ bool kick_q)
{
+ struct hv_ring_buffer_info *rbi = &channel->outbound;
+
virt_mb();
if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
- return false;
-
- /*
- * When the client wants to control signaling,
- * we only honour the host interrupt mask.
- */
- if (policy == HV_SIGNAL_POLICY_EXPLICIT)
- return true;
+ return;
/* check interrupt_mask before read_index */
virt_rmb();
@@ -89,9 +93,9 @@
* ring transitions from being empty to non-empty.
*/
if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
- return true;
+ vmbus_setevent(channel);
- return false;
+ return;
}
/* Get the next write location for the specified ring buffer. */
@@ -280,9 +284,9 @@
}
/* Write to the ring buffer. */
-int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
- struct kvec *kv_list, u32 kv_count, bool *signal, bool lock,
- enum hv_signal_policy policy)
+int hv_ringbuffer_write(struct vmbus_channel *channel,
+ struct kvec *kv_list, u32 kv_count, bool lock,
+ bool kick_q)
{
int i = 0;
u32 bytes_avail_towrite;
@@ -292,6 +296,7 @@
u32 old_write;
u64 prev_indices = 0;
unsigned long flags = 0;
+ struct hv_ring_buffer_info *outring_info = &channel->outbound;
for (i = 0; i < kv_count; i++)
totalbytes_towrite += kv_list[i].iov_len;
@@ -344,13 +349,13 @@
if (lock)
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
- *signal = hv_need_to_signal(old_write, outring_info, policy);
+ hv_signal_on_write(old_write, channel, kick_q);
return 0;
}
-int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
+int hv_ringbuffer_read(struct vmbus_channel *channel,
void *buffer, u32 buflen, u32 *buffer_actual_len,
- u64 *requestid, bool *signal, bool raw)
+ u64 *requestid, bool raw)
{
u32 bytes_avail_toread;
u32 next_read_location = 0;
@@ -359,6 +364,7 @@
u32 offset;
u32 packetlen;
int ret = 0;
+ struct hv_ring_buffer_info *inring_info = &channel->inbound;
if (buflen <= 0)
return -EINVAL;
@@ -377,6 +383,7 @@
return ret;
}
+ init_cached_read_index(channel);
next_read_location = hv_get_next_read_location(inring_info);
next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
sizeof(desc),
@@ -416,7 +423,7 @@
/* Update the read index */
hv_set_next_read_location(inring_info, next_read_location);
- *signal = hv_need_to_signal_on_read(inring_info);
+ hv_signal_on_read(channel);
return ret;
}
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index 7f1ac30..87201c1 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -149,4 +149,12 @@
Hardware Event across STM interface. It configures Coresight
Hardware Event mux control registers to select hardware events
based on user input.
+
+config CORESIGHT_DUMMY
+ bool "Dummy driver support"
+ help
+ Enables support for dummy driver. Dummy driver can be
+ used for CoreSight sources/sinks that are owned and configured by some other
+ subsystem and use Linux drivers to configure rest of trace path.
+
endif
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index 7019968..196c9b6 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -22,3 +22,4 @@
obj-$(CONFIG_CORESIGHT_CTI) += coresight-cti.o
obj-$(CONFIG_CORESIGHT_CSR) += coresight-csr.o
obj-$(CONFIG_CORESIGHT_HWEVENT) += coresight-hwevent.o
+obj-$(CONFIG_CORESIGHT_SOURCE_DUMMY) += coresight-dummy.o
diff --git a/drivers/hwtracing/coresight/coresight-dummy.c b/drivers/hwtracing/coresight/coresight-dummy.c
new file mode 100644
index 0000000..a0268b4
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-dummy.c
@@ -0,0 +1,171 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/coresight.h>
+#include <linux/of.h>
+
+#define DUMMY_TRACE_ID_START 256
+
+struct dummy_drvdata {
+ struct device *dev;
+ struct coresight_device *csdev;
+ int traceid;
+};
+
+static int dummy_source_enable(struct coresight_device *csdev,
+ struct perf_event *event, u32 mode)
+{
+ struct dummy_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ dev_info(drvdata->dev, "Dummy source enabled\n");
+
+ return 0;
+}
+
+static void dummy_source_disable(struct coresight_device *csdev,
+ struct perf_event *event)
+{
+ struct dummy_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ dev_info(drvdata->dev, "Dummy source disabled\n");
+}
+
+static int dummy_sink_enable(struct coresight_device *csdev, u32 mode)
+{
+ struct dummy_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ dev_info(drvdata->dev, "Dummy sink enabled\n");
+
+ return 0;
+}
+
+static void dummy_sink_disable(struct coresight_device *csdev)
+{
+ struct dummy_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ dev_info(drvdata->dev, "Dummy sink disabled\n");
+}
+
+static int dummy_trace_id(struct coresight_device *csdev)
+{
+ struct dummy_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ return drvdata->traceid;
+}
+
+static const struct coresight_ops_source dummy_source_ops = {
+ .trace_id = dummy_trace_id,
+ .enable = dummy_source_enable,
+ .disable = dummy_source_disable,
+};
+
+static const struct coresight_ops_sink dummy_sink_ops = {
+ .enable = dummy_sink_enable,
+ .disable = dummy_sink_disable,
+};
+
+static const struct coresight_ops dummy_cs_ops = {
+ .source_ops = &dummy_source_ops,
+ .sink_ops = &dummy_sink_ops,
+};
+
+static int dummy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct coresight_platform_data *pdata;
+ struct dummy_drvdata *drvdata;
+ struct coresight_desc *desc;
+ static int traceid = DUMMY_TRACE_ID_START;
+
+ pdata = of_get_coresight_platform_data(dev, pdev->dev.of_node);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ pdev->dev.platform_data = pdata;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->dev = &pdev->dev;
+ platform_set_drvdata(pdev, drvdata);
+
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ drvdata->traceid = traceid++;
+
+ if (of_property_read_bool(pdev->dev.of_node, "qcom,dummy-source")) {
+ desc->type = CORESIGHT_DEV_TYPE_SOURCE;
+ desc->subtype.source_subtype =
+ CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
+ } else if (of_property_read_bool(pdev->dev.of_node,
+ "qcom,dummy-sink")) {
+ desc->type = CORESIGHT_DEV_TYPE_SINK;
+ desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
+ } else {
+ dev_info(dev, "Device type not set.\n");
+ return -EINVAL;
+ }
+
+ desc->ops = &dummy_cs_ops;
+ desc->pdata = pdev->dev.platform_data;
+ desc->dev = &pdev->dev;
+ drvdata->csdev = coresight_register(desc);
+ if (IS_ERR(drvdata->csdev))
+ return PTR_ERR(drvdata->csdev);
+
+ dev_info(dev, "Dummy device initialized\n");
+
+ return 0;
+}
+
+static int dummy_remove(struct platform_device *pdev)
+{
+ struct dummy_drvdata *drvdata = platform_get_drvdata(pdev);
+
+ coresight_unregister(drvdata->csdev);
+ return 0;
+}
+
+static const struct of_device_id dummy_match[] = {
+ {.compatible = "qcom,coresight-dummy"},
+ {}
+};
+
+static struct platform_driver dummy_driver = {
+ .probe = dummy_probe,
+ .remove = dummy_remove,
+ .driver = {
+ .name = "coresight-dummy",
+ .owner = THIS_MODULE,
+ .of_match_table = dummy_match,
+ },
+};
+
+int __init dummy_init(void)
+{
+ return platform_driver_register(&dummy_driver);
+}
+module_init(dummy_init);
+
+void __exit dummy_exit(void)
+{
+ platform_driver_unregister(&dummy_driver);
+}
+module_exit(dummy_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CoreSight dummy source driver");
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index 2cd7c71..a9cedba 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -120,6 +120,7 @@
cpumask_t *mask;
struct etm_event_data *event_data;
struct coresight_device *sink;
+ struct coresight_device *source;
event_data = container_of(work, struct etm_event_data, work);
mask = &event_data->mask;
@@ -135,8 +136,9 @@
}
for_each_cpu(cpu, mask) {
+ source = coresight_get_source(event_data->path[cpu]);
if (!(IS_ERR_OR_NULL(event_data->path[cpu])))
- coresight_release_path(event_data->path[cpu]);
+ coresight_release_path(source, event_data->path[cpu]);
}
kfree(event_data->path);
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index aa5538c..3af358a 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -1,4 +1,5 @@
-/* Copyright (c) 2011-2012, 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2012, 2016-2017, The Linux Foundation.
+ * All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -134,8 +135,10 @@
void coresight_disable_path(struct list_head *path);
int coresight_enable_path(struct list_head *path, u32 mode);
struct coresight_device *coresight_get_sink(struct list_head *path);
+struct coresight_device *coresight_get_source(struct list_head *path);
struct list_head *coresight_build_path(struct coresight_device *csdev);
-void coresight_release_path(struct list_head *path);
+void coresight_release_path(struct coresight_device *csdev,
+ struct list_head *path);
#ifdef CONFIG_CORESIGHT_SOURCE_ETM3X
extern int etm_readl_cp14(u32 off, unsigned int *val);
diff --git a/drivers/hwtracing/coresight/coresight-replicator-qcom.c b/drivers/hwtracing/coresight/coresight-replicator-qcom.c
index 0a3d15f..0bd8b78 100644
--- a/drivers/hwtracing/coresight/coresight-replicator-qcom.c
+++ b/drivers/hwtracing/coresight/coresight-replicator-qcom.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2015, 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -47,6 +47,8 @@
{
struct replicator_state *drvdata = dev_get_drvdata(csdev->dev.parent);
+ pm_runtime_get_sync(drvdata->dev);
+
CS_UNLOCK(drvdata->base);
/*
@@ -83,6 +85,7 @@
CS_LOCK(drvdata->base);
+ pm_runtime_put(drvdata->dev);
dev_info(drvdata->dev, "REPLICATOR disabled\n");
}
diff --git a/drivers/hwtracing/coresight/coresight-tpdm.c b/drivers/hwtracing/coresight/coresight-tpdm.c
index 95d7a90..1ccf3da 100644
--- a/drivers/hwtracing/coresight/coresight-tpdm.c
+++ b/drivers/hwtracing/coresight/coresight-tpdm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -655,7 +655,7 @@
}
static int tpdm_enable(struct coresight_device *csdev,
- struct perf_event_attr *attr, u32 mode)
+ struct perf_event *event, u32 mode)
{
struct tpdm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
int ret;
@@ -731,7 +731,8 @@
TPDM_LOCK(drvdata);
}
-static void tpdm_disable(struct coresight_device *csdev)
+static void tpdm_disable(struct coresight_device *csdev,
+ struct perf_event *event)
{
struct tpdm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 7bf00a0..3a4474d 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -39,19 +39,17 @@
struct list_head link;
};
-/*
- * When operating Coresight drivers from the sysFS interface, only a single
- * path can exist from a tracer (associated to a CPU) to a sink.
+/**
+ * struct coresight_path - path from source to sink
+ * @path: Address of path list.
+ * @link: hook to the list.
*/
-static DEFINE_PER_CPU(struct list_head *, tracer_path);
+struct coresight_path {
+ struct list_head *path;
+ struct list_head link;
+};
-/*
- * As of this writing only a single STM can be found in CS topologies. Since
- * there is no way to know if we'll ever see more and what kind of
- * configuration they will enact, for the time being only define a single path
- * for STM.
- */
-static struct list_head *stm_path;
+static LIST_HEAD(cs_active_paths);
static int coresight_id_match(struct device *dev, void *data)
{
@@ -152,6 +150,7 @@
if (sink_ops(csdev)->disable) {
sink_ops(csdev)->disable(csdev);
csdev->enable = false;
+ csdev->activated = false;
}
}
}
@@ -353,6 +352,20 @@
goto out;
}
+struct coresight_device *coresight_get_source(struct list_head *path)
+{
+ struct coresight_device *csdev;
+
+ if (!path)
+ return NULL;
+
+ csdev = list_first_entry(path, struct coresight_node, link)->csdev;
+ if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE)
+ return NULL;
+
+ return csdev;
+}
+
struct coresight_device *coresight_get_sink(struct list_head *path)
{
struct coresight_device *csdev;
@@ -446,14 +459,23 @@
* coresight_release_path - release a previously built path.
* @path: the path to release.
*
+ * Remove coresight path entry from source device
* Go through all the elements of a path and 1) removed it from the list and
* 2) free the memory allocated for each node.
*/
-void coresight_release_path(struct list_head *path)
+void coresight_release_path(struct coresight_device *csdev,
+ struct list_head *path)
{
- struct coresight_device *csdev;
struct coresight_node *nd, *next;
+ if (csdev != NULL && csdev->node != NULL) {
+ /* Remove path entry from source device */
+ list_del(&csdev->node->link);
+ kfree(csdev->node);
+ csdev->node = NULL;
+ }
+
+ /* Free the path */
list_for_each_entry_safe(nd, next, path, link) {
csdev = nd->csdev;
@@ -494,9 +516,25 @@
return 0;
}
+int coresight_store_path(struct coresight_device *csdev, struct list_head *path)
+{
+ struct coresight_path *node;
+
+ node = kzalloc(sizeof(struct coresight_path), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ node->path = path;
+ list_add(&node->link, &cs_active_paths);
+
+ csdev->node = node;
+
+ return 0;
+}
+
int coresight_enable(struct coresight_device *csdev)
{
- int cpu, ret = 0;
+ int ret = 0;
struct list_head *path;
mutex_lock(&coresight_mutex);
@@ -523,25 +561,9 @@
if (ret)
goto err_source;
- switch (csdev->subtype.source_subtype) {
- case CORESIGHT_DEV_SUBTYPE_SOURCE_PROC:
- /*
- * When working from sysFS it is important to keep track
- * of the paths that were created so that they can be
- * undone in 'coresight_disable()'. Since there can only
- * be a single session per tracer (when working from sysFS)
- * a per-cpu variable will do just fine.
- */
- cpu = source_ops(csdev)->cpu_id(csdev);
- per_cpu(tracer_path, cpu) = path;
- break;
- case CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE:
- stm_path = path;
- break;
- default:
- /* We can't be here */
- break;
- }
+ ret = coresight_store_path(csdev, path);
+ if (ret)
+ goto err_source;
out:
mutex_unlock(&coresight_mutex);
@@ -551,15 +573,14 @@
coresight_disable_path(path);
err_path:
- coresight_release_path(path);
+ coresight_release_path(csdev, path);
goto out;
}
EXPORT_SYMBOL_GPL(coresight_enable);
void coresight_disable(struct coresight_device *csdev)
{
- int cpu, ret;
- struct list_head *path = NULL;
+ int ret;
mutex_lock(&coresight_mutex);
@@ -570,24 +591,12 @@
if (!csdev->enable)
goto out;
- switch (csdev->subtype.source_subtype) {
- case CORESIGHT_DEV_SUBTYPE_SOURCE_PROC:
- cpu = source_ops(csdev)->cpu_id(csdev);
- path = per_cpu(tracer_path, cpu);
- per_cpu(tracer_path, cpu) = NULL;
- break;
- case CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE:
- path = stm_path;
- stm_path = NULL;
- break;
- default:
- /* We can't be here */
- break;
- }
+ if (csdev->node == NULL)
+ goto out;
coresight_disable_source(csdev);
- coresight_disable_path(path);
- coresight_release_path(path);
+ coresight_disable_path(csdev->node->path);
+ coresight_release_path(csdev, csdev->node->path);
out:
mutex_unlock(&coresight_mutex);
@@ -876,8 +885,42 @@
return -EAGAIN;
}
+static ssize_t reset_source_sink_store(struct bus_type *bus,
+ const char *buf, size_t size)
+{
+ int ret = 0;
+ unsigned long val;
+ struct coresight_path *cspath = NULL;
+ struct coresight_path *cspath_next = NULL;
+ struct coresight_device *csdev;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&coresight_mutex);
+
+ list_for_each_entry_safe(cspath, cspath_next, &cs_active_paths, link) {
+ csdev = coresight_get_source(cspath->path);
+ if (!csdev)
+ continue;
+ coresight_disable(csdev);
+ }
+
+ mutex_unlock(&coresight_mutex);
+ return size;
+}
+static BUS_ATTR_WO(reset_source_sink);
+
+static struct attribute *coresight_reset_source_sink_attrs[] = {
+ &bus_attr_reset_source_sink.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(coresight_reset_source_sink);
+
struct bus_type coresight_bustype = {
- .name = "coresight",
+ .name = "coresight",
+ .bus_groups = coresight_reset_source_sink_groups,
};
static int __init coresight_init(void)
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index d252276..a9cf687 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -790,6 +790,16 @@
This driver can also be built as a module. If so, the module
will be called i2c-qup.
+config I2C_QCOM_GENI
+ tristate "Qualcomm Technologies Inc.'s GENI based I2C controller"
+ depends on ARCH_QCOM
+ help
+ If you say yes to this option, support will be included for the
+ built-in I2C interface on the Qualcomm Technologies Inc.'s SoCs.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-qcom-geni.
+
config I2C_RIIC
tristate "Renesas RIIC adapter"
depends on ARCH_RENESAS || COMPILE_TEST
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 29764cc..7f2523f 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -76,6 +76,7 @@
obj-$(CONFIG_I2C_PXA) += i2c-pxa.o
obj-$(CONFIG_I2C_PXA_PCI) += i2c-pxa-pci.o
obj-$(CONFIG_I2C_QUP) += i2c-qup.o
+obj-$(CONFIG_I2C_QCOM_GENI) += i2c-qcom-geni.o
obj-$(CONFIG_I2C_RIIC) += i2c-riic.o
obj-$(CONFIG_I2C_RK3X) += i2c-rk3x.o
obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index b403fa5..809f4d4 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -475,30 +475,28 @@
static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
{
struct i2c_msg *msgs = dev->msgs;
- u32 ic_tar = 0;
+ u32 ic_con, ic_tar = 0;
/* Disable the adapter */
__i2c_dw_enable_and_wait(dev, false);
/* if the slave address is ten bit address, enable 10BITADDR */
- if (dev->dynamic_tar_update_enabled) {
+ ic_con = dw_readl(dev, DW_IC_CON);
+ if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) {
+ ic_con |= DW_IC_CON_10BITADDR_MASTER;
/*
* If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing
- * mode has to be enabled via bit 12 of IC_TAR register,
- * otherwise bit 4 of IC_CON is used.
+ * mode has to be enabled via bit 12 of IC_TAR register.
+ * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be
+ * detected from registers.
*/
- if (msgs[dev->msg_write_idx].flags & I2C_M_TEN)
- ic_tar = DW_IC_TAR_10BITADDR_MASTER;
+ ic_tar = DW_IC_TAR_10BITADDR_MASTER;
} else {
- u32 ic_con = dw_readl(dev, DW_IC_CON);
-
- if (msgs[dev->msg_write_idx].flags & I2C_M_TEN)
- ic_con |= DW_IC_CON_10BITADDR_MASTER;
- else
- ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
- dw_writel(dev, ic_con, DW_IC_CON);
+ ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
}
+ dw_writel(dev, ic_con, DW_IC_CON);
+
/*
* Set the slave (target) address and enable 10-bit addressing mode
* if applicable.
@@ -923,7 +921,6 @@
{
struct i2c_adapter *adap = &dev->adapter;
int r;
- u32 reg;
init_completion(&dev->cmd_complete);
@@ -931,26 +928,6 @@
if (r)
return r;
- r = i2c_dw_acquire_lock(dev);
- if (r)
- return r;
-
- /*
- * Test if dynamic TAR update is enabled in this controller by writing
- * to IC_10BITADDR_MASTER field in IC_CON: when it is enabled this
- * field is read-only so it should not succeed
- */
- reg = dw_readl(dev, DW_IC_CON);
- dw_writel(dev, reg ^ DW_IC_CON_10BITADDR_MASTER, DW_IC_CON);
-
- if ((dw_readl(dev, DW_IC_CON) & DW_IC_CON_10BITADDR_MASTER) ==
- (reg & DW_IC_CON_10BITADDR_MASTER)) {
- dev->dynamic_tar_update_enabled = true;
- dev_dbg(dev->dev, "Dynamic TAR update enabled");
- }
-
- i2c_dw_release_lock(dev);
-
snprintf(adap->name, sizeof(adap->name),
"Synopsys DesignWare I2C adapter");
adap->retries = 3;
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 0d44d2a..22bfbe1 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -117,7 +117,6 @@
int (*acquire_lock)(struct dw_i2c_dev *dev);
void (*release_lock)(struct dw_i2c_dev *dev);
bool pm_runtime_disabled;
- bool dynamic_tar_update_enabled;
};
#define ACCESS_SWAP 0x00000001
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
new file mode 100644
index 0000000..8e38a24
--- /dev/null
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -0,0 +1,321 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/qcom-geni-se.h>
+
+#define SE_I2C_TX_TRANS_LEN (0x26C)
+#define SE_I2C_RX_TRANS_LEN (0x270)
+#define SE_I2C_SCL_COUNTERS (0x278)
+
+#define SE_I2C_ERR (M_CMD_OVERRUN_EN | M_ILLEGAL_CMD_EN | M_CMD_FAILURE_EN |\
+ M_GP_IRQ_1_EN | M_GP_IRQ_3_EN | M_GP_IRQ_4_EN)
+#define SE_I2C_ABORT (1U << 1)
+/* M_CMD OP codes for I2C */
+#define I2C_WRITE (0x1)
+#define I2C_READ (0x2)
+#define I2C_WRITE_READ (0x3)
+#define I2C_ADDR_ONLY (0x4)
+#define I2C_BUS_CLEAR (0x6)
+#define I2C_STOP_ON_BUS (0x7)
+/* M_CMD params for I2C */
+#define PRE_CMD_DELAY (BIT(0))
+#define TIMESTAMP_BEFORE (BIT(1))
+#define STOP_STRETCH (BIT(2))
+#define TIMESTAMP_AFTER (BIT(3))
+#define POST_COMMAND_DELAY (BIT(4))
+#define IGNORE_ADD_NACK (BIT(6))
+#define READ_FINISHED_WITH_ACK (BIT(7))
+#define BYPASS_ADDR_PHASE (BIT(8))
+#define SLV_ADDR_MSK (GENMASK(15, 9))
+#define SLV_ADDR_SHFT (9)
+
+struct geni_i2c_dev {
+ struct device *dev;
+ void __iomem *base;
+ int irq;
+ int err;
+ struct i2c_adapter adap;
+ struct completion xfer;
+ struct i2c_msg *cur;
+ int cur_wr;
+ int cur_rd;
+};
+
+static inline void qcom_geni_i2c_conf(void __iomem *base, int dfs, int div)
+{
+ geni_write_reg(dfs, base, SE_GENI_CLK_SEL);
+ geni_write_reg((div << 4) | 1, base, GENI_SER_M_CLK_CFG);
+ geni_write_reg(((5 << 20) | (0xC << 10) | 0x18),
+ base, SE_I2C_SCL_COUNTERS);
+ /*
+ * Ensure Clk config completes before return.
+ */
+ mb();
+}
+
+static irqreturn_t geni_i2c_irq(int irq, void *dev)
+{
+ struct geni_i2c_dev *gi2c = dev;
+ int i, j;
+ u32 m_stat = readl_relaxed(gi2c->base + SE_GENI_M_IRQ_STATUS);
+ u32 tx_stat = readl_relaxed(gi2c->base + SE_GENI_TX_FIFO_STATUS);
+ u32 rx_stat = readl_relaxed(gi2c->base + SE_GENI_RX_FIFO_STATUS);
+ struct i2c_msg *cur = gi2c->cur;
+
+ dev_dbg(gi2c->dev,
+ "got i2c irq:%d, stat:0x%x, tx stat:0x%x, rx stat:0x%x\n",
+ irq, m_stat, tx_stat, rx_stat);
+ if (!cur || m_stat & SE_I2C_ERR) {
+ dev_err(gi2c->dev, "i2c txn err");
+ writel_relaxed(0, (gi2c->base + SE_GENI_TX_WATERMARK_REG));
+ gi2c->err = -EIO;
+ goto irqret;
+ }
+ if (((m_stat & M_RX_FIFO_WATERMARK_EN) ||
+ (m_stat & M_RX_FIFO_LAST_EN)) && (cur->flags & I2C_M_RD)) {
+ u32 rxcnt = rx_stat & RX_FIFO_WC_MSK;
+
+ for (j = 0; j < rxcnt; j++) {
+ u32 temp;
+ int p;
+
+ temp = readl_relaxed(gi2c->base + SE_GENI_RX_FIFOn);
+ for (i = gi2c->cur_rd, p = 0; (i < cur->len && p < 4);
+ i++, p++)
+ cur->buf[i] = (u8) ((temp >> (p * 8)) & 0xff);
+ gi2c->cur_rd = i;
+ if (gi2c->cur_rd == cur->len) {
+ dev_dbg(gi2c->dev, "i:%d,read 0x%x\n", i, temp);
+ break;
+ }
+ dev_dbg(gi2c->dev, "i: %d, read 0x%x\n", i, temp);
+ }
+ } else if ((m_stat & M_TX_FIFO_WATERMARK_EN) &&
+ !(cur->flags & I2C_M_RD)) {
+ for (j = 0; j < 0x1f; j++) {
+ u32 temp = 0;
+ int p;
+
+ for (i = gi2c->cur_wr, p = 0; (i < cur->len && p < 4);
+ i++, p++)
+ temp |= (((u32)(cur->buf[i]) << (p * 8)));
+ writel_relaxed(temp, gi2c->base + SE_GENI_TX_FIFOn);
+ gi2c->cur_wr = i;
+ dev_dbg(gi2c->dev, "i:%d,wrote 0x%x\n", i, temp);
+ if (gi2c->cur_wr == cur->len) {
+ dev_dbg(gi2c->dev, "i2c bytes done writing\n");
+ writel_relaxed(0,
+ (gi2c->base + SE_GENI_TX_WATERMARK_REG));
+ break;
+ }
+ }
+ }
+irqret:
+ writel_relaxed(m_stat, gi2c->base + SE_GENI_M_IRQ_CLEAR);
+ /* Ensure all writes are done before returning from ISR. */
+ wmb();
+ /* if this is err with done-bit not set, handle that thr' timeout. */
+ if (m_stat & M_CMD_DONE_EN) {
+ dev_dbg(gi2c->dev, "i2c irq: err:%d, stat:0x%x\n",
+ gi2c->err, m_stat);
+ complete(&gi2c->xfer);
+ }
+ return IRQ_HANDLED;
+}
+
+static int geni_i2c_xfer(struct i2c_adapter *adap,
+ struct i2c_msg msgs[],
+ int num)
+{
+ struct geni_i2c_dev *gi2c = i2c_get_adapdata(adap);
+ int i, ret = 0, timeout = 0;
+
+ gi2c->err = 0;
+ gi2c->cur = &msgs[0];
+ reinit_completion(&gi2c->xfer);
+ enable_irq(gi2c->irq);
+ qcom_geni_i2c_conf(gi2c->base, 0, 2);
+ se_config_packing(gi2c->base, 8, 4, true);
+ dev_dbg(gi2c->dev, "i2c xfer:num:%d, msgs:len:%d,flg:%d\n",
+ num, msgs[0].len, msgs[0].flags);
+ for (i = 0; i < num; i++) {
+ int stretch = (i < (num - 1));
+ u32 m_param = 0;
+ u32 m_cmd = 0;
+
+ m_param |= (stretch ? STOP_STRETCH : ~(STOP_STRETCH));
+ m_param |= ((msgs[i].addr & 0x7F) << SLV_ADDR_SHFT);
+
+ gi2c->cur = &msgs[i];
+ if (msgs[i].flags & I2C_M_RD) {
+ dev_dbg(gi2c->dev,
+ "READ,n:%d,i:%d len:%d, stretch:%d\n",
+ num, i, msgs[i].len, stretch);
+ geni_write_reg(msgs[i].len,
+ gi2c->base, SE_I2C_RX_TRANS_LEN);
+ m_cmd = I2C_READ;
+ geni_setup_m_cmd(gi2c->base, m_cmd, m_param);
+ } else {
+ dev_dbg(gi2c->dev,
+ "WRITE:n:%d,i%d len:%d, stretch:%d\n",
+ num, i, msgs[i].len, stretch);
+ geni_write_reg(msgs[i].len, gi2c->base,
+ SE_I2C_TX_TRANS_LEN);
+ m_cmd = I2C_WRITE;
+ geni_setup_m_cmd(gi2c->base, m_cmd, m_param);
+ /* Get FIFO IRQ */
+ geni_write_reg(1, gi2c->base, SE_GENI_TX_WATERMARK_REG);
+ }
+ /* Ensure FIFO write go through before waiting for Done evet */
+ mb();
+ timeout = wait_for_completion_timeout(&gi2c->xfer, HZ);
+ if (!timeout) {
+ dev_err(gi2c->dev, "Timed out\n");
+ gi2c->err = -ETIMEDOUT;
+ gi2c->cur = NULL;
+ geni_abort_m_cmd(gi2c->base);
+ timeout = wait_for_completion_timeout(&gi2c->xfer, HZ);
+ }
+ gi2c->cur_wr = 0;
+ gi2c->cur_rd = 0;
+ if (gi2c->err) {
+ dev_err(gi2c->dev, "i2c error :%d\n", gi2c->err);
+ ret = gi2c->err;
+ break;
+ }
+ }
+ if (ret == 0)
+ ret = i;
+ disable_irq(gi2c->irq);
+ gi2c->cur = NULL;
+ gi2c->err = 0;
+ dev_dbg(gi2c->dev, "i2c txn ret:%d\n", ret);
+ return ret;
+}
+
+static u32 geni_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
+}
+
+static const struct i2c_algorithm geni_i2c_algo = {
+ .master_xfer = geni_i2c_xfer,
+ .functionality = geni_i2c_func,
+};
+
+static int geni_i2c_probe(struct platform_device *pdev)
+{
+ struct geni_i2c_dev *gi2c;
+ struct resource *res;
+ int ret;
+
+ gi2c = devm_kzalloc(&pdev->dev, sizeof(*gi2c), GFP_KERNEL);
+ if (!gi2c)
+ return -ENOMEM;
+
+ gi2c->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ gi2c->base = devm_ioremap_resource(gi2c->dev, res);
+ if (IS_ERR(gi2c->base))
+ return PTR_ERR(gi2c->base);
+
+ gi2c->irq = platform_get_irq(pdev, 0);
+ if (gi2c->irq < 0) {
+ dev_err(gi2c->dev, "IRQ error for i2c-geni\n");
+ return gi2c->irq;
+ }
+
+ gi2c->adap.algo = &geni_i2c_algo;
+ init_completion(&gi2c->xfer);
+ platform_set_drvdata(pdev, gi2c);
+ ret = devm_request_irq(gi2c->dev, gi2c->irq, geni_i2c_irq,
+ IRQF_TRIGGER_HIGH, "i2c_geni", gi2c);
+ if (ret) {
+ dev_err(gi2c->dev, "Request_irq failed:%d: err:%d\n",
+ gi2c->irq, ret);
+ return ret;
+ }
+ disable_irq(gi2c->irq);
+ i2c_set_adapdata(&gi2c->adap, gi2c);
+ gi2c->adap.dev.parent = gi2c->dev;
+ gi2c->adap.dev.of_node = pdev->dev.of_node;
+
+ strlcpy(gi2c->adap.name, "Geni-I2C", sizeof(gi2c->adap.name));
+
+ i2c_add_adapter(&gi2c->adap);
+ geni_se_init(gi2c->base, FIFO_MODE, 0xF, 0x10);
+
+ return 0;
+}
+
+static int geni_i2c_remove(struct platform_device *pdev)
+{
+ struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
+
+ disable_irq(gi2c->irq);
+ i2c_del_adapter(&gi2c->adap);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int geni_i2c_suspend(struct device *device)
+{
+ return 0;
+}
+
+static int geni_i2c_resume(struct device *device)
+{
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops geni_i2c_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(
+ geni_i2c_suspend,
+ geni_i2c_resume)
+};
+
+static const struct of_device_id geni_i2c_dt_match[] = {
+ { .compatible = "qcom,i2c-geni" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, geni_i2c_dt_match);
+
+static struct platform_driver geni_i2c_driver = {
+ .probe = geni_i2c_probe,
+ .remove = geni_i2c_remove,
+ .driver = {
+ .name = "i2c_geni",
+ .pm = &geni_i2c_pm_ops,
+ .of_match_table = geni_i2c_dt_match,
+ },
+};
+
+module_platform_driver(geni_i2c_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:i2c_geni");
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 1869152..9b732c5 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -59,9 +59,11 @@
case RXE_MEM_TYPE_MR:
case RXE_MEM_TYPE_FMR:
- return ((iova < mem->iova) ||
- ((iova + length) > (mem->iova + mem->length))) ?
- -EFAULT : 0;
+ if (iova < mem->iova ||
+ length > mem->length ||
+ iova > mem->iova + mem->length - length)
+ return -EFAULT;
+ return 0;
default:
return -EFAULT;
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index dd3d88a..ccf6247 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -472,7 +472,7 @@
goto err2;
}
- resid = mtu;
+ qp->resp.resid = mtu;
} else {
if (pktlen != resid) {
state = RESPST_ERR_LENGTH;
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 92595b9..022be0e 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -263,13 +263,21 @@
return -EINVAL;
}
- if (test_bit(ABS_MT_SLOT, dev->absbit)) {
- nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1;
- error = input_mt_init_slots(dev, nslot, 0);
- if (error)
+ if (test_bit(EV_ABS, dev->evbit)) {
+ input_alloc_absinfo(dev);
+ if (!dev->absinfo) {
+ error = -EINVAL;
goto fail1;
- } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
- input_set_events_per_packet(dev, 60);
+ }
+
+ if (test_bit(ABS_MT_SLOT, dev->absbit)) {
+ nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1;
+ error = input_mt_init_slots(dev, nslot, 0);
+ if (error)
+ goto fail1;
+ } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
+ input_set_events_per_packet(dev, 60);
+ }
}
if (test_bit(EV_FF, dev->evbit) && !udev->ff_effects_max) {
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index d15b338..ed1935f 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1232,6 +1232,7 @@
{ "ELAN0000", 0 },
{ "ELAN0100", 0 },
{ "ELAN0600", 0 },
+ { "ELAN0605", 0 },
{ "ELAN1000", 0 },
{ }
};
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 4d6ee1b..c704c47 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -635,7 +635,7 @@
__be32 pci_sid;
int err = 0;
- memset(&it, sizeof(it), 0);
+ memset(&it, 0, sizeof(it));
np = dev_get_dev_node(dev);
if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
of_node_put(np);
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 6b420a5..c3ea03c 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -425,7 +425,7 @@
* until a gc finishes - otherwise we could pointlessly burn a ton of
* cpu
*/
- unsigned invalidate_needs_gc:1;
+ unsigned invalidate_needs_gc;
bool discard; /* Get rid of? */
@@ -593,8 +593,8 @@
/* Counts how many sectors bio_insert has added to the cache */
atomic_t sectors_to_gc;
+ wait_queue_head_t gc_wait;
- wait_queue_head_t moving_gc_wait;
struct keybuf moving_gc_keys;
/* Number of moving GC bios in flight */
struct semaphore moving_in_flight;
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 81d3db4..2efdce0 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -1757,32 +1757,34 @@
bch_moving_gc(c);
}
-static int bch_gc_thread(void *arg)
+static bool gc_should_run(struct cache_set *c)
{
- struct cache_set *c = arg;
struct cache *ca;
unsigned i;
- while (1) {
-again:
- bch_btree_gc(c);
+ for_each_cache(ca, c, i)
+ if (ca->invalidate_needs_gc)
+ return true;
- set_current_state(TASK_INTERRUPTIBLE);
+ if (atomic_read(&c->sectors_to_gc) < 0)
+ return true;
+
+ return false;
+}
+
+static int bch_gc_thread(void *arg)
+{
+ struct cache_set *c = arg;
+
+ while (1) {
+ wait_event_interruptible(c->gc_wait,
+ kthread_should_stop() || gc_should_run(c));
+
if (kthread_should_stop())
break;
- mutex_lock(&c->bucket_lock);
-
- for_each_cache(ca, c, i)
- if (ca->invalidate_needs_gc) {
- mutex_unlock(&c->bucket_lock);
- set_current_state(TASK_RUNNING);
- goto again;
- }
-
- mutex_unlock(&c->bucket_lock);
-
- schedule();
+ set_gc_sectors(c);
+ bch_btree_gc(c);
}
return 0;
@@ -1790,11 +1792,10 @@
int bch_gc_thread_start(struct cache_set *c)
{
- c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc");
+ c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
if (IS_ERR(c->gc_thread))
return PTR_ERR(c->gc_thread);
- set_task_state(c->gc_thread, TASK_INTERRUPTIBLE);
return 0;
}
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index 5c391fa..9b80417 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -260,8 +260,7 @@
static inline void wake_up_gc(struct cache_set *c)
{
- if (c->gc_thread)
- wake_up_process(c->gc_thread);
+ wake_up(&c->gc_wait);
}
#define MAP_DONE 0
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 40ffe5e..a37c177 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -196,10 +196,8 @@
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
struct bio *bio = op->bio, *n;
- if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
- set_gc_sectors(op->c);
+ if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
wake_up_gc(op->c);
- }
if (op->bypass)
return bch_data_invalidate(cl);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 849ad44..66669c8 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1491,6 +1491,7 @@
mutex_init(&c->bucket_lock);
init_waitqueue_head(&c->btree_cache_wait);
init_waitqueue_head(&c->bucket_wait);
+ init_waitqueue_head(&c->gc_wait);
sema_init(&c->uuid_write_mutex, 1);
spin_lock_init(&c->btree_gc_time.lock);
@@ -1550,6 +1551,7 @@
for_each_cache(ca, c, i)
c->nbuckets += ca->sb.nbuckets;
+ set_gc_sectors(c);
if (CACHE_SYNC(&c->sb)) {
LIST_HEAD(journal);
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 31a89c8..2c96542 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -804,6 +804,10 @@
int srcu_idx;
struct dm_table *map = dm_get_live_table(md, &srcu_idx);
+ if (unlikely(!map)) {
+ dm_put_live_table(md, srcu_idx);
+ return;
+ }
ti = dm_table_find_target(map, pos);
dm_put_live_table(md, srcu_idx);
}
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index bf0148c..68848a8 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -504,6 +504,7 @@
.num_planes = 1,
.get_frame_size = get_frame_size_compressed,
.type = OUTPUT_PORT,
+ .defer_outputs = false,
},
{
.name = "Mpeg2",
@@ -512,6 +513,7 @@
.num_planes = 1,
.get_frame_size = get_frame_size_compressed,
.type = OUTPUT_PORT,
+ .defer_outputs = false,
},
{
.name = "H263",
@@ -520,6 +522,7 @@
.num_planes = 1,
.get_frame_size = get_frame_size_compressed,
.type = OUTPUT_PORT,
+ .defer_outputs = false,
},
{
.name = "VC1",
@@ -528,6 +531,7 @@
.num_planes = 1,
.get_frame_size = get_frame_size_compressed,
.type = OUTPUT_PORT,
+ .defer_outputs = false,
},
{
.name = "VC1 SP",
@@ -536,6 +540,7 @@
.num_planes = 1,
.get_frame_size = get_frame_size_compressed,
.type = OUTPUT_PORT,
+ .defer_outputs = false,
},
{
.name = "H264",
@@ -544,6 +549,7 @@
.num_planes = 1,
.get_frame_size = get_frame_size_compressed,
.type = OUTPUT_PORT,
+ .defer_outputs = false,
},
{
.name = "H264_MVC",
@@ -552,6 +558,7 @@
.num_planes = 1,
.get_frame_size = get_frame_size_compressed,
.type = OUTPUT_PORT,
+ .defer_outputs = false,
},
{
.name = "HEVC",
@@ -560,6 +567,7 @@
.num_planes = 1,
.get_frame_size = get_frame_size_compressed,
.type = OUTPUT_PORT,
+ .defer_outputs = false,
},
{
.name = "VP8",
@@ -568,6 +576,7 @@
.num_planes = 1,
.get_frame_size = get_frame_size_compressed,
.type = OUTPUT_PORT,
+ .defer_outputs = false,
},
{
.name = "VP9",
@@ -576,6 +585,7 @@
.num_planes = 1,
.get_frame_size = get_frame_size_compressed_full_yuv,
.type = OUTPUT_PORT,
+ .defer_outputs = true,
},
};
@@ -1767,7 +1777,7 @@
case V4L2_MPEG_VIDC_VIDEO_DPB_COLOR_FMT_NONE:
if (!msm_comm_g_ctrl_for_id(inst, control.id)) {
rc = msm_comm_release_output_buffers(
- inst);
+ inst, false);
if (rc)
dprintk(VIDC_ERR,
"%s Release output buffers failed\n",
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 5d206d3..4a1fd76 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -1388,7 +1388,13 @@
"Failed to release persist buffers\n");
}
- if (msm_comm_release_output_buffers(inst)) {
+ /*
+ * At this point all buffes should be with driver
+ * irrespective of scenario
+ */
+ msm_comm_validate_output_buffers(inst);
+
+ if (msm_comm_release_output_buffers(inst, true)) {
dprintk(VIDC_ERR,
"Failed to release output buffers\n");
}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 564ab99..00feba6 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -1414,7 +1414,7 @@
put_inst(inst);
}
-void validate_output_buffers(struct msm_vidc_inst *inst)
+void msm_comm_validate_output_buffers(struct msm_vidc_inst *inst)
{
struct internal_buf *binfo;
u32 buffers_owned_by_driver = 0;
@@ -1440,11 +1440,13 @@
}
mutex_unlock(&inst->outputbufs.lock);
- if (buffers_owned_by_driver != output_buf->buffer_count_actual)
+ if (buffers_owned_by_driver != output_buf->buffer_count_actual) {
dprintk(VIDC_WARN,
"OUTPUT Buffer count mismatch %d of %d\n",
buffers_owned_by_driver,
output_buf->buffer_count_actual);
+ msm_vidc_handle_hw_error(inst->core);
+ }
}
int msm_comm_queue_output_buffers(struct msm_vidc_inst *inst)
@@ -1524,7 +1526,11 @@
if (msm_comm_get_stream_output_mode(inst) ==
HAL_VIDEO_DECODER_SECONDARY) {
- validate_output_buffers(inst);
+
+ if (!(inst->fmts[OUTPUT_PORT].defer_outputs &&
+ inst->in_reconfig))
+ msm_comm_validate_output_buffers(inst);
+
if (!inst->in_reconfig) {
rc = msm_comm_queue_output_buffers(inst);
if (rc) {
@@ -4051,7 +4057,8 @@
return rc;
}
-int msm_comm_release_output_buffers(struct msm_vidc_inst *inst)
+int msm_comm_release_output_buffers(struct msm_vidc_inst *inst,
+ bool force_release)
{
struct msm_smem *handle;
struct internal_buf *buf, *dummy;
@@ -4093,6 +4100,11 @@
goto exit;
}
+ if ((buf->buffer_ownership == FIRMWARE) && !force_release) {
+ dprintk(VIDC_INFO, "DPB is with f/w. Can't free it\n");
+ continue;
+ }
+
buffer_info.buffer_size = handle->size;
buffer_info.buffer_type = buf->buffer_type;
buffer_info.num_buffers = 1;
@@ -4352,13 +4364,17 @@
int msm_comm_set_output_buffers(struct msm_vidc_inst *inst)
{
int rc = 0;
+ bool force_release = true;
if (!inst || !inst->core || !inst->core->device) {
dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
return -EINVAL;
}
- if (msm_comm_release_output_buffers(inst))
+ if (inst->fmts[OUTPUT_PORT].defer_outputs)
+ force_release = false;
+
+ if (msm_comm_release_output_buffers(inst, force_release))
dprintk(VIDC_WARN, "Failed to release output buffers\n");
rc = set_output_buffers(inst, HAL_BUFFER_OUTPUT);
@@ -4366,7 +4382,7 @@
goto error;
return rc;
error:
- msm_comm_release_output_buffers(inst);
+ msm_comm_release_output_buffers(inst, true);
return rc;
}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.h b/drivers/media/platform/msm/vidc/msm_vidc_common.h
index c042fe9..7f2ab04 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.h
@@ -55,7 +55,9 @@
int msm_comm_release_scratch_buffers(struct msm_vidc_inst *inst,
bool check_for_reuse);
int msm_comm_release_persist_buffers(struct msm_vidc_inst *inst);
-int msm_comm_release_output_buffers(struct msm_vidc_inst *inst);
+int msm_comm_release_output_buffers(struct msm_vidc_inst *inst,
+ bool force_release);
+void msm_comm_validate_output_buffers(struct msm_vidc_inst *inst);
int msm_comm_force_cleanup(struct msm_vidc_inst *inst);
int msm_comm_suspend(int core_id);
enum hal_extradata_id msm_comm_get_hal_extradata_index(
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.h b/drivers/media/platform/msm/vidc/msm_vidc_debug.h
index 0af0220..cf5ce22 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.h
@@ -117,7 +117,7 @@
#define MSM_VIDC_ERROR(value) \
do { \
- dprintk(VIDC_WARN, "Fatal Level = %d\n", value);\
+ dprintk(VIDC_DBG, "Fatal Level = %d\n", value);\
BUG_ON(value); \
} while (0)
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index b4f3cd7..fb90fdf7 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -148,6 +148,7 @@
int num_planes;
int type;
u32 (*get_frame_size)(int plane, u32 height, u32 width);
+ bool defer_outputs;
};
struct msm_vidc_drv {
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi.h b/drivers/media/platform/msm/vidc/vidc_hfi.h
index 949bc47..3267999 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi.h
@@ -62,6 +62,7 @@
#define HFI_BUFFER_EXTRADATA_OUTPUT2 (HFI_OX_BASE + 0x4)
#define HFI_BUFFER_INTERNAL_SCRATCH_1 (HFI_OX_BASE + 0x5)
#define HFI_BUFFER_INTERNAL_SCRATCH_2 (HFI_OX_BASE + 0x6)
+#define HFI_BUFFER_INTERNAL_RECON (HFI_OX_BASE + 0x9)
#define HFI_BUFFER_MODE_DYNAMIC (HFI_OX_BASE + 0x3)
@@ -84,6 +85,7 @@
#define HFI_EXTRADATA_FRAME_QP 0x0000000F
#define HFI_EXTRADATA_FRAME_BITS_INFO 0x00000010
#define HFI_EXTRADATA_VPX_COLORSPACE 0x00000014
+#define HFI_EXTRADATA_UBWC_CR_STAT_INFO 0x00000019
#define HFI_EXTRADATA_MULTISLICE_INFO 0x7F100000
#define HFI_EXTRADATA_NUM_CONCEALED_MB 0x7F100001
#define HFI_EXTRADATA_INDEX 0x7F100002
@@ -119,6 +121,7 @@
#define HFI_INTERLACE_INTERLEAVE_FRAME_BOTTOMFIELDFIRST 0x04
#define HFI_INTERLACE_FRAME_TOPFIELDFIRST 0x08
#define HFI_INTERLACE_FRAME_BOTTOMFIELDFIRST 0x10
+#define HFI_INTERLACE_FRAME_MBAFF 0x20
#define HFI_PROPERTY_SYS_OX_START \
(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x0000)
@@ -135,9 +138,7 @@
(HFI_PROPERTY_PARAM_OX_START + 0x006)
#define HFI_PROPERTY_PARAM_S3D_FRAME_PACKING_EXTRADATA \
(HFI_PROPERTY_PARAM_OX_START + 0x009)
-#define HFI_PROPERTY_PARAM_ERR_DETECTION_CODE_EXTRADATA \
- (HFI_PROPERTY_PARAM_OX_START + 0x00A)
-#define HFI_PROPERTY_PARAM_BUFFER_SIZE_MINIMUM \
+#define HFI_PROPERTY_PARAM_BUFFER_SIZE_MINIMUM \
(HFI_PROPERTY_PARAM_OX_START + 0x00C)
#define HFI_PROPERTY_PARAM_SYNC_BASED_INTERRUPT \
(HFI_PROPERTY_PARAM_OX_START + 0x00E)
@@ -591,6 +592,21 @@
u32 flush_type;
};
+struct hfi_ubwc_cr_stats_info_type {
+ u32 cr_stats_info0;
+ u32 cr_stats_info1;
+ u32 cr_stats_info2;
+ u32 cr_stats_info3;
+ u32 cr_stats_info4;
+ u32 cr_stats_info5;
+ u32 cr_stats_info6;
+};
+
+struct hfi_frame_cr_stats_type {
+ u32 frame_index;
+ struct hfi_ubwc_cr_stats_info_type ubwc_stats_info;
+};
+
struct hfi_msg_session_empty_buffer_done_packet {
u32 size;
u32 packet_type;
@@ -601,6 +617,8 @@
u32 input_tag;
u32 packet_buffer;
u32 extra_data_buffer;
+ u32 flags;
+ struct hfi_frame_cr_stats_type ubwc_cr_stats;
u32 rgData[0];
};
@@ -761,6 +779,11 @@
u32 format;
};
+struct hfi_conceal_color_type {
+ u32 value_8bit;
+ u32 value_10bit;
+};
+
struct hfi_extradata_num_concealed_mb_payload {
u32 num_mb_concealed;
};
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index 6863d5e..e9a5bb3 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -83,6 +83,7 @@
#define HFI_VIDEO_CODEC_VP9 0x00004000
#define HFI_VIDEO_CODEC_HEVC_HYBRID 0x80000000
+#define HFI_PROFILE_UNKNOWN 0x00000000
#define HFI_H264_PROFILE_BASELINE 0x00000001
#define HFI_H264_PROFILE_MAIN 0x00000002
#define HFI_H264_PROFILE_HIGH 0x00000004
@@ -91,6 +92,7 @@
#define HFI_H264_PROFILE_CONSTRAINED_BASE 0x00000020
#define HFI_H264_PROFILE_CONSTRAINED_HIGH 0x00000040
+#define HFI_LEVEL_UNKNOWN 0x00000000
#define HFI_H264_LEVEL_1 0x00000001
#define HFI_H264_LEVEL_1b 0x00000002
#define HFI_H264_LEVEL_11 0x00000004
@@ -249,8 +251,8 @@
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x003)
#define HFI_PROPERTY_PARAM_VENC_RATE_CONTROL \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x004)
-#define HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE \
- (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x008)
+#define HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE \
+ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x009)
#define HFI_PROPERTY_PARAM_VENC_OPEN_GOP \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x00C)
#define HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH \
@@ -537,11 +539,13 @@
struct hfi_quantization {
u32 qp_packed;
u32 layer_id;
+ u32 reserved[4];
};
struct hfi_quantization_range {
struct hfi_quantization min_qp;
struct hfi_quantization max_qp;
+ u32 reserved[4];
};
#define HFI_LTR_MODE_DISABLE 0x0
@@ -617,10 +621,10 @@
#define HFI_COLOR_FORMAT_YUV444 (HFI_COMMON_BASE + 0xE)
#define HFI_COLOR_FORMAT_RGBA8888 (HFI_COMMON_BASE + 0x10)
-#define HFI_COLOR_FORMAT_P010 \
- (HFI_COLOR_FORMAT_10_BIT_BASE + HFI_COLOR_FORMAT_NV12)
#define HFI_COLOR_FORMAT_YUV420_TP10 \
- (HFI_COLOR_FORMAT_10_BIT_BASE + HFI_COLOR_FORMAT_NV12_4x4TILE)
+ (HFI_COLOR_FORMAT_10_BIT_BASE + HFI_COLOR_FORMAT_NV12)
+#define HFI_COLOR_FORMAT_P010 \
+ (HFI_COLOR_FORMAT_10_BIT_BASE + HFI_COLOR_FORMAT_NV12 + 0x1)
#define HFI_COLOR_FORMAT_NV12_UBWC \
(HFI_COLOR_FORMAT_UBWC_BASE + HFI_COLOR_FORMAT_NV12)
diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
index c2e2587..18b41b9 100644
--- a/drivers/media/usb/siano/smsusb.c
+++ b/drivers/media/usb/siano/smsusb.c
@@ -218,22 +218,30 @@
static int smsusb_sendrequest(void *context, void *buffer, size_t size)
{
struct smsusb_device_t *dev = (struct smsusb_device_t *) context;
- struct sms_msg_hdr *phdr = (struct sms_msg_hdr *) buffer;
- int dummy;
+ struct sms_msg_hdr *phdr;
+ int dummy, ret;
if (dev->state != SMSUSB_ACTIVE) {
pr_debug("Device not active yet\n");
return -ENOENT;
}
+ phdr = kmalloc(size, GFP_KERNEL);
+ if (!phdr)
+ return -ENOMEM;
+ memcpy(phdr, buffer, size);
+
pr_debug("sending %s(%d) size: %d\n",
smscore_translate_msg(phdr->msg_type), phdr->msg_type,
phdr->msg_length);
smsendian_handle_tx_message((struct sms_msg_data *) phdr);
- smsendian_handle_message_header((struct sms_msg_hdr *)buffer);
- return usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2),
- buffer, size, &dummy, 1000);
+ smsendian_handle_message_header((struct sms_msg_hdr *)phdr);
+ ret = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2),
+ phdr, size, &dummy, 1000);
+
+ kfree(phdr);
+ return ret;
}
static char *smsusb1_fw_lkup[] = {
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index df19777..f57700c 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1690,10 +1690,10 @@
err = mmc_select_hs400(card);
if (err)
goto free_card;
- } else if (mmc_card_hs(card)) {
+ } else {
/* Select the desired bus width optionally */
err = mmc_select_bus_width(card);
- if (err > 0) {
+ if (err > 0 && mmc_card_hs(card)) {
err = mmc_select_hs_ddr(card);
if (err)
goto free_card;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index 878950a..2cf8b1d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -1007,9 +1007,7 @@
static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value)
{
- u8 __iomem *reg_addr = ACCESS_ONCE(base);
-
- writel(value, reg_addr + reg);
+ writel(value, base + reg);
}
#define dsaf_write_dev(a, reg, value) \
@@ -1017,9 +1015,7 @@
static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg)
{
- u8 __iomem *reg_addr = ACCESS_ONCE(base);
-
- return readl(reg_addr + reg);
+ return readl(base + reg);
}
static inline void dsaf_write_syscon(struct regmap *base, u32 reg, u32 value)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index f2e8bed..4d3ddc2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -507,8 +507,11 @@
return;
for (ring = 0; ring < priv->rx_ring_num; ring++) {
- if (mlx4_en_is_ring_empty(priv->rx_ring[ring]))
+ if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) {
+ local_bh_disable();
napi_reschedule(&priv->rx_cq[ring]->napi);
+ local_bh_enable();
+ }
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 71382df..81d8e3b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -765,7 +765,8 @@
int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix);
-void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv);
+void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
+ enum mlx5e_traffic_types tt);
int mlx5e_open_locked(struct net_device *netdev);
int mlx5e_close_locked(struct net_device *netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 27ff401..126cfeb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -975,15 +975,18 @@
static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
{
- struct mlx5_core_dev *mdev = priv->mdev;
void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
- int i;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int ctxlen = MLX5_ST_SZ_BYTES(tirc);
+ int tt;
MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
- mlx5e_build_tir_ctx_hash(tirc, priv);
- for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
- mlx5_core_modify_tir(mdev, priv->indir_tir[i].tirn, in, inlen);
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+ memset(tirc, 0, ctxlen);
+ mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
+ mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
+ }
}
static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
@@ -991,6 +994,7 @@
{
struct mlx5e_priv *priv = netdev_priv(dev);
int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
+ bool hash_changed = false;
void *in;
if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
@@ -1012,14 +1016,21 @@
mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
}
- if (key)
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ hfunc != priv->params.rss_hfunc) {
+ priv->params.rss_hfunc = hfunc;
+ hash_changed = true;
+ }
+
+ if (key) {
memcpy(priv->params.toeplitz_hash_key, key,
sizeof(priv->params.toeplitz_hash_key));
+ hash_changed = hash_changed ||
+ priv->params.rss_hfunc == ETH_RSS_HASH_TOP;
+ }
- if (hfunc != ETH_RSS_HASH_NO_CHANGE)
- priv->params.rss_hfunc = hfunc;
-
- mlx5e_modify_tirs_hash(priv, in, inlen);
+ if (hash_changed)
+ mlx5e_modify_tirs_hash(priv, in, inlen);
mutex_unlock(&priv->state_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 5dc3e24..b306713 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1978,8 +1978,23 @@
MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
}
-void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
+void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
+ enum mlx5e_traffic_types tt)
{
+ void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+
+#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP)
+
+#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP |\
+ MLX5_HASH_FIELD_SEL_L4_SPORT |\
+ MLX5_HASH_FIELD_SEL_L4_DPORT)
+
+#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP |\
+ MLX5_HASH_FIELD_SEL_IPSEC_SPI)
+
MLX5_SET(tirc, tirc, rx_hash_fn,
mlx5e_rx_hash_fn(priv->params.rss_hfunc));
if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
@@ -1991,6 +2006,88 @@
MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
memcpy(rss_key, priv->params.toeplitz_hash_key, len);
}
+
+ switch (tt) {
+ case MLX5E_TT_IPV4_TCP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_TCP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_L4PORTS);
+ break;
+
+ case MLX5E_TT_IPV6_TCP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_TCP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_L4PORTS);
+ break;
+
+ case MLX5E_TT_IPV4_UDP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_UDP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_L4PORTS);
+ break;
+
+ case MLX5E_TT_IPV6_UDP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_UDP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_L4PORTS);
+ break;
+
+ case MLX5E_TT_IPV4_IPSEC_AH:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_IPSEC_SPI);
+ break;
+
+ case MLX5E_TT_IPV6_IPSEC_AH:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_IPSEC_SPI);
+ break;
+
+ case MLX5E_TT_IPV4_IPSEC_ESP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_IPSEC_SPI);
+ break;
+
+ case MLX5E_TT_IPV6_IPSEC_ESP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_IPSEC_SPI);
+ break;
+
+ case MLX5E_TT_IPV4:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP);
+ break;
+
+ case MLX5E_TT_IPV6:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP);
+ break;
+ default:
+ WARN_ONCE(true, "%s: bad traffic type!\n", __func__);
+ }
}
static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
@@ -2360,110 +2457,13 @@
static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
enum mlx5e_traffic_types tt)
{
- void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
-
MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
-#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
- MLX5_HASH_FIELD_SEL_DST_IP)
-
-#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
- MLX5_HASH_FIELD_SEL_DST_IP |\
- MLX5_HASH_FIELD_SEL_L4_SPORT |\
- MLX5_HASH_FIELD_SEL_L4_DPORT)
-
-#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
- MLX5_HASH_FIELD_SEL_DST_IP |\
- MLX5_HASH_FIELD_SEL_IPSEC_SPI)
-
mlx5e_build_tir_ctx_lro(tirc, priv);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
- mlx5e_build_tir_ctx_hash(tirc, priv);
-
- switch (tt) {
- case MLX5E_TT_IPV4_TCP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- MLX5_L4_PROT_TYPE_TCP);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_L4PORTS);
- break;
-
- case MLX5E_TT_IPV6_TCP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- MLX5_L4_PROT_TYPE_TCP);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_L4PORTS);
- break;
-
- case MLX5E_TT_IPV4_UDP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- MLX5_L4_PROT_TYPE_UDP);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_L4PORTS);
- break;
-
- case MLX5E_TT_IPV6_UDP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- MLX5_L4_PROT_TYPE_UDP);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_L4PORTS);
- break;
-
- case MLX5E_TT_IPV4_IPSEC_AH:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_IPSEC_SPI);
- break;
-
- case MLX5E_TT_IPV6_IPSEC_AH:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_IPSEC_SPI);
- break;
-
- case MLX5E_TT_IPV4_IPSEC_ESP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_IPSEC_SPI);
- break;
-
- case MLX5E_TT_IPV6_IPSEC_ESP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_IPSEC_SPI);
- break;
-
- case MLX5E_TT_IPV4:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP);
- break;
-
- case MLX5E_TT_IPV6:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP);
- break;
- default:
- WARN_ONCE(true,
- "mlx5e_build_indir_tir_ctx: bad traffic type!\n");
- }
+ mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
}
static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 914e546..7e20e4b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1110,9 +1110,8 @@
return rule;
}
rule = add_rule_fte(fte, fg, dest);
- unlock_ref_node(&fte->node);
if (IS_ERR(rule))
- goto unlock_fg;
+ goto unlock_fte;
else
goto add_rule;
}
@@ -1130,6 +1129,7 @@
goto unlock_fg;
}
tree_init_node(&fte->node, 0, del_fte);
+ nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
rule = add_rule_fte(fte, fg, dest);
if (IS_ERR(rule)) {
kfree(fte);
@@ -1142,6 +1142,8 @@
list_add(&fte->node.list, prev);
add_rule:
tree_add_node(&rule->node, &fte->node);
+unlock_fte:
+ unlock_ref_node(&fte->node);
unlock_fg:
unlock_ref_node(&fg->node);
return rule;
diff --git a/drivers/net/ethernet/msm/rndis_ipa.c b/drivers/net/ethernet/msm/rndis_ipa.c
index 29596f6..b8eff5a 100644
--- a/drivers/net/ethernet/msm/rndis_ipa.c
+++ b/drivers/net/ethernet/msm/rndis_ipa.c
@@ -121,8 +121,8 @@
};
#define RNDIS_IPA_STATE_DEBUG(ctx) \
- (RNDIS_IPA_DEBUG("Driver state: %s\n",\
- rndis_ipa_state_string((ctx)->state)))
+ RNDIS_IPA_DEBUG("Driver state: %s\n",\
+ rndis_ipa_state_string((ctx)->state))
/**
@@ -832,7 +832,7 @@
netdev_tx_t status = NETDEV_TX_BUSY;
struct rndis_ipa_dev *rndis_ipa_ctx = netdev_priv(net);
- net->trans_start = jiffies;
+ netif_trans_update(net);
RNDIS_IPA_DEBUG
("Tx, len=%d, skb->protocol=%d, outstanding=%d\n",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 7df4ff1..7d19029 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -305,8 +305,12 @@
{
void __iomem *ioaddr = hw->pcsr;
u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
+ u32 intr_mask = readl(ioaddr + GMAC_INT_MASK);
int ret = 0;
+ /* Discard masked bits */
+ intr_status &= ~intr_mask;
+
/* Not used events (e.g. MMC interrupts) are not handled. */
if ((intr_status & GMAC_INT_STATUS_MMCTIS))
x->mmc_tx_irq_n++;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 720b5fa..c2ac39a 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -1288,6 +1288,9 @@
ndev = hv_get_drvdata(device);
buffer = get_per_channel_state(channel);
+ /* commit_rd_index() -> hv_signal_on_read() needs this. */
+ init_cached_read_index(channel);
+
do {
desc = get_next_pkt_raw(channel);
if (desc != NULL) {
@@ -1340,6 +1343,9 @@
bufferlen = bytes_recvd;
}
+
+ init_cached_read_index(channel);
+
} while (1);
if (bufferlen > NETVSC_PACKET_SIZE)
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 6255973..1b65f0f 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -164,6 +164,7 @@
{
dev->mtu = 64 * 1024;
dev->hard_header_len = ETH_HLEN; /* 14 */
+ dev->min_header_len = ETH_HLEN; /* 14 */
dev->addr_len = ETH_ALEN; /* 6 */
dev->type = ARPHRD_LOOPBACK; /* 0x0001*/
dev->flags = IFF_LOOPBACK;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 6f38daf..adea6f5 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -682,7 +682,7 @@
ssize_t n;
if (q->flags & IFF_VNET_HDR) {
- vnet_hdr_len = q->vnet_hdr_sz;
+ vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
err = -EINVAL;
if (len < vnet_hdr_len)
@@ -822,7 +822,7 @@
if (q->flags & IFF_VNET_HDR) {
struct virtio_net_hdr vnet_hdr;
- vnet_hdr_len = q->vnet_hdr_sz;
+ vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
if (iov_iter_count(iter) < vnet_hdr_len)
return -EINVAL;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index e5d9041..e686b70 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1187,9 +1187,11 @@
}
if (tun->flags & IFF_VNET_HDR) {
- if (len < tun->vnet_hdr_sz)
+ int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
+
+ if (len < vnet_hdr_sz)
return -EINVAL;
- len -= tun->vnet_hdr_sz;
+ len -= vnet_hdr_sz;
n = copy_from_iter(&gso, sizeof(gso), from);
if (n != sizeof(gso))
@@ -1201,7 +1203,7 @@
if (tun16_to_cpu(tun, gso.hdr_len) > len)
return -EINVAL;
- iov_iter_advance(from, tun->vnet_hdr_sz - sizeof(gso));
+ iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
}
if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
@@ -1348,7 +1350,7 @@
vlan_hlen = VLAN_HLEN;
if (tun->flags & IFF_VNET_HDR)
- vnet_hdr_sz = tun->vnet_hdr_sz;
+ vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
total = skb->len + vlan_hlen + vnet_hdr_sz;
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index d9ca05d..4086415 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -777,7 +777,7 @@
struct net_device *netdev;
struct catc *catc;
u8 broadcast[ETH_ALEN];
- int i, pktsz;
+ int pktsz, ret;
if (usb_set_interface(usbdev,
intf->altsetting->desc.bInterfaceNumber, 1)) {
@@ -812,12 +812,8 @@
if ((!catc->ctrl_urb) || (!catc->tx_urb) ||
(!catc->rx_urb) || (!catc->irq_urb)) {
dev_err(&intf->dev, "No free urbs available.\n");
- usb_free_urb(catc->ctrl_urb);
- usb_free_urb(catc->tx_urb);
- usb_free_urb(catc->rx_urb);
- usb_free_urb(catc->irq_urb);
- free_netdev(netdev);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto fail_free;
}
/* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */
@@ -845,15 +841,24 @@
catc->irq_buf, 2, catc_irq_done, catc, 1);
if (!catc->is_f5u011) {
+ u32 *buf;
+ int i;
+
dev_dbg(dev, "Checking memory size\n");
- i = 0x12345678;
- catc_write_mem(catc, 0x7a80, &i, 4);
- i = 0x87654321;
- catc_write_mem(catc, 0xfa80, &i, 4);
- catc_read_mem(catc, 0x7a80, &i, 4);
+ buf = kmalloc(4, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto fail_free;
+ }
+
+ *buf = 0x12345678;
+ catc_write_mem(catc, 0x7a80, buf, 4);
+ *buf = 0x87654321;
+ catc_write_mem(catc, 0xfa80, buf, 4);
+ catc_read_mem(catc, 0x7a80, buf, 4);
- switch (i) {
+ switch (*buf) {
case 0x12345678:
catc_set_reg(catc, TxBufCount, 8);
catc_set_reg(catc, RxBufCount, 32);
@@ -868,6 +873,8 @@
dev_dbg(dev, "32k Memory\n");
break;
}
+
+ kfree(buf);
dev_dbg(dev, "Getting MAC from SEEROM.\n");
@@ -914,16 +921,21 @@
usb_set_intfdata(intf, catc);
SET_NETDEV_DEV(netdev, &intf->dev);
- if (register_netdev(netdev) != 0) {
- usb_set_intfdata(intf, NULL);
- usb_free_urb(catc->ctrl_urb);
- usb_free_urb(catc->tx_urb);
- usb_free_urb(catc->rx_urb);
- usb_free_urb(catc->irq_urb);
- free_netdev(netdev);
- return -EIO;
- }
+ ret = register_netdev(netdev);
+ if (ret)
+ goto fail_clear_intfdata;
+
return 0;
+
+fail_clear_intfdata:
+ usb_set_intfdata(intf, NULL);
+fail_free:
+ usb_free_urb(catc->ctrl_urb);
+ usb_free_urb(catc->tx_urb);
+ usb_free_urb(catc->rx_urb);
+ usb_free_urb(catc->irq_urb);
+ free_netdev(netdev);
+ return ret;
}
static void catc_disconnect(struct usb_interface *intf)
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 1434e5d..ee40ac2 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -126,40 +126,61 @@
static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
{
+ u8 *buf;
int ret;
+ buf = kmalloc(size, GFP_NOIO);
+ if (!buf)
+ return -ENOMEM;
+
ret = usb_control_msg(pegasus->usb, usb_rcvctrlpipe(pegasus->usb, 0),
PEGASUS_REQ_GET_REGS, PEGASUS_REQT_READ, 0,
- indx, data, size, 1000);
+ indx, buf, size, 1000);
if (ret < 0)
netif_dbg(pegasus, drv, pegasus->net,
"%s returned %d\n", __func__, ret);
+ else if (ret <= size)
+ memcpy(data, buf, ret);
+ kfree(buf);
return ret;
}
-static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
+static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
+ const void *data)
{
+ u8 *buf;
int ret;
+ buf = kmemdup(data, size, GFP_NOIO);
+ if (!buf)
+ return -ENOMEM;
+
ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0),
PEGASUS_REQ_SET_REGS, PEGASUS_REQT_WRITE, 0,
- indx, data, size, 100);
+ indx, buf, size, 100);
if (ret < 0)
netif_dbg(pegasus, drv, pegasus->net,
"%s returned %d\n", __func__, ret);
+ kfree(buf);
return ret;
}
static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data)
{
+ u8 *buf;
int ret;
+ buf = kmemdup(&data, 1, GFP_NOIO);
+ if (!buf)
+ return -ENOMEM;
+
ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0),
PEGASUS_REQ_SET_REG, PEGASUS_REQT_WRITE, data,
- indx, &data, 1, 1000);
+ indx, buf, 1, 1000);
if (ret < 0)
netif_dbg(pegasus, drv, pegasus->net,
"%s returned %d\n", __func__, ret);
+ kfree(buf);
return ret;
}
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 7c72bfa..dc4f7ea 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -155,16 +155,36 @@
*/
static int get_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)
{
- return usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
- RTL8150_REQ_GET_REGS, RTL8150_REQT_READ,
- indx, 0, data, size, 500);
+ void *buf;
+ int ret;
+
+ buf = kmalloc(size, GFP_NOIO);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
+ RTL8150_REQ_GET_REGS, RTL8150_REQT_READ,
+ indx, 0, buf, size, 500);
+ if (ret > 0 && ret <= size)
+ memcpy(data, buf, ret);
+ kfree(buf);
+ return ret;
}
-static int set_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)
+static int set_registers(rtl8150_t * dev, u16 indx, u16 size, const void *data)
{
- return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
- RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE,
- indx, 0, data, size, 500);
+ void *buf;
+ int ret;
+
+ buf = kmemdup(data, size, GFP_NOIO);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+ RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE,
+ indx, 0, buf, size, 500);
+ kfree(buf);
+ return ret;
}
static void async_set_reg_cb(struct urb *urb)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
index 8b6e37c..20bfb37 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
@@ -96,7 +96,7 @@
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- char *fw_name = "rtlwifi/rtl8192cfwU.bin";
+ char *fw_name;
rtl8192ce_bt_reg_init(hw);
@@ -168,8 +168,13 @@
}
/* request fw */
- if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version))
+ if (IS_VENDOR_UMC_A_CUT(rtlhal->version) &&
+ !IS_92C_SERIAL(rtlhal->version))
+ fw_name = "rtlwifi/rtl8192cfwU.bin";
+ else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version))
fw_name = "rtlwifi/rtl8192cfwU_B.bin";
+ else
+ fw_name = "rtlwifi/rtl8192cfw.bin";
rtlpriv->max_fw_size = 0x4000;
pr_info("Using firmware %s\n", fw_name);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index bf2744e..0cdcb21 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1397,6 +1397,8 @@
for (i = 0; i < num_queues && info->queues; ++i) {
struct netfront_queue *queue = &info->queues[i];
+ del_timer_sync(&queue->rx_refill_timer);
+
if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
unbind_from_irqhandler(queue->tx_irq, queue);
if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
@@ -1751,7 +1753,6 @@
if (netif_running(info->netdev))
napi_disable(&queue->napi);
- del_timer_sync(&queue->rx_refill_timer);
netif_napi_del(&queue->napi);
}
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 4eb8adb..c234ee43 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -1799,7 +1799,7 @@
node = dev_to_node(&ndev->dev);
- free_queue = ffs(nt->qp_bitmap);
+ free_queue = ffs(nt->qp_bitmap_free);
if (!free_queue)
goto err;
@@ -2270,9 +2270,8 @@
static void __exit ntb_transport_exit(void)
{
- debugfs_remove_recursive(nt_debugfs_dir);
-
ntb_unregister_client(&ntb_transport_client);
bus_unregister(&ntb_transport_bus);
+ debugfs_remove_recursive(nt_debugfs_dir);
}
module_exit(ntb_transport_exit);
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
index e75d4fd..434e1d4 100644
--- a/drivers/ntb/test/ntb_perf.c
+++ b/drivers/ntb/test/ntb_perf.c
@@ -265,6 +265,8 @@
if (dma_submit_error(cookie))
goto err_set_unmap;
+ dmaengine_unmap_put(unmap);
+
atomic_inc(&pctx->dma_sync);
dma_async_issue_pending(chan);
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 1480734..aefca64 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -962,8 +962,8 @@
struct nvdimm_drvdata *ndd;
struct nd_label_id label_id;
u32 flags = 0, remainder;
+ int rc, i, id = -1;
u8 *uuid = NULL;
- int rc, i;
if (dev->driver || ndns->claim)
return -EBUSY;
@@ -972,11 +972,13 @@
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
uuid = nspm->uuid;
+ id = nspm->id;
} else if (is_namespace_blk(dev)) {
struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
uuid = nsblk->uuid;
flags = NSLABEL_FLAG_LOCAL;
+ id = nsblk->id;
}
/*
@@ -1039,10 +1041,11 @@
/*
* Try to delete the namespace if we deleted all of its
- * allocation, this is not the seed device for the region, and
- * it is not actively claimed by a btt instance.
+ * allocation, this is not the seed or 0th device for the
+ * region, and it is not actively claimed by a btt, pfn, or dax
+ * instance.
*/
- if (val == 0 && nd_region->ns_seed != dev && !ndns->claim)
+ if (val == 0 && id != 0 && nd_region->ns_seed != dev && !ndns->claim)
nd_device_unregister(dev, ND_ASYNC);
return rc;
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index a2ac9e6..6c033c9 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -627,15 +627,12 @@
size = resource_size(&nsio->res);
npfns = (size - start_pad - end_trunc - SZ_8K) / SZ_4K;
if (nd_pfn->mode == PFN_MODE_PMEM) {
- unsigned long memmap_size;
-
/*
* vmemmap_populate_hugepages() allocates the memmap array in
* HPAGE_SIZE chunks.
*/
- memmap_size = ALIGN(64 * npfns, HPAGE_SIZE);
- offset = ALIGN(start + SZ_8K + memmap_size + dax_label_reserve,
- nd_pfn->align) - start;
+ offset = ALIGN(start + SZ_8K + 64 * npfns + dax_label_reserve,
+ max(nd_pfn->align, HPAGE_SIZE)) - start;
} else if (nd_pfn->mode == PFN_MODE_RAM)
offset = ALIGN(start + SZ_8K + dax_label_reserve,
nd_pfn->align) - start;
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 884bad5..4b70349 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -448,6 +448,17 @@
return 0;
}
+/**
+ * pcie_pme_remove - Prepare PCIe PME service device for removal.
+ * @srv - PCIe service device to remove.
+ */
+static void pcie_pme_remove(struct pcie_device *srv)
+{
+ pcie_pme_suspend(srv);
+ free_irq(srv->irq, srv);
+ kfree(get_service_data(srv));
+}
+
static struct pcie_port_service_driver pcie_pme_driver = {
.name = "pcie_pme",
.port_type = PCI_EXP_TYPE_ROOT_PORT,
@@ -456,6 +467,7 @@
.probe = pcie_pme_probe,
.suspend = pcie_pme_suspend,
.resume = pcie_pme_resume,
+ .remove = pcie_pme_remove,
};
/**
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index 23b0428..f48182c 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -22,7 +22,7 @@
#include "gsi_reg.h"
#define GSI_CMD_TIMEOUT (5*HZ)
-#define GSI_STOP_CMD_TIMEOUT_MS 1
+#define GSI_STOP_CMD_TIMEOUT_MS 10
#define GSI_MAX_CH_LOW_WEIGHT 15
#define GSI_MHI_ER_START 10
#define GSI_MHI_ER_END 16
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index 06881d3..aa81bdc 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2510,15 +2510,15 @@
/**
* ipa_get_gsi_ep_info() - provide gsi ep information
- * @ipa_ep_idx: IPA endpoint index
+ * @client: IPA client type
*
* Return value: pointer to ipa_gsi_ep_info
*/
-struct ipa_gsi_ep_config *ipa_get_gsi_ep_info(int ipa_ep_idx)
+const struct ipa_gsi_ep_config *ipa_get_gsi_ep_info(enum ipa_client_type client)
{
if (!ipa_api_ctrl || !ipa_api_ctrl->ipa_get_gsi_ep_info)
return NULL;
- return ipa_api_ctrl->ipa_get_gsi_ep_info(ipa_ep_idx);
+ return ipa_api_ctrl->ipa_get_gsi_ep_info(client);
}
EXPORT_SYMBOL(ipa_get_gsi_ep_info);
diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h
index 171c9fb..1b8e3d6 100644
--- a/drivers/platform/msm/ipa/ipa_api.h
+++ b/drivers/platform/msm/ipa/ipa_api.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -331,7 +331,8 @@
int (*ipa_create_wdi_mapping)(u32 num_buffers,
struct ipa_wdi_buffer_info *info);
- struct ipa_gsi_ep_config *(*ipa_get_gsi_ep_info)(int ipa_ep_idx);
+ const struct ipa_gsi_ep_config *(*ipa_get_gsi_ep_info)
+ (enum ipa_client_type client);
int (*ipa_register_ipa_ready_cb)(void (*ipa_ready_cb)(void *user_data),
void *user_data);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
index feec2aa..4d735df 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -1827,7 +1827,7 @@
u32 max_cmds = ipa_get_max_flt_rt_cmds(ipa_ctx->ipa_num_pipes);
mem.base = dma_alloc_coherent(ipa_ctx->pdev, 4, &mem.phys_base,
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!mem.base) {
IPAERR("failed to alloc DMA buff of size 4\n");
return -ENOMEM;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
index 4fdd84b..dd12169 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
@@ -4929,7 +4929,8 @@
return res;
}
-static struct ipa_gsi_ep_config *ipa2_get_gsi_ep_info(int ipa_ep_idx)
+static const struct ipa_gsi_ep_config *ipa2_get_gsi_ep_info
+ (enum ipa_client_type client)
{
IPAERR("Not supported for IPA 2.x\n");
return NULL;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index e0ae1c6..acc597b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -227,19 +227,11 @@
static void ipa3_start_tag_process(struct work_struct *work);
static DECLARE_WORK(ipa3_tag_work, ipa3_start_tag_process);
-static void ipa3_sps_release_resource(struct work_struct *work);
-static DECLARE_DELAYED_WORK(ipa3_sps_release_resource_work,
- ipa3_sps_release_resource);
+static void ipa3_transport_release_resource(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa3_transport_release_resource_work,
+ ipa3_transport_release_resource);
static void ipa_gsi_notify_cb(struct gsi_per_notify *notify);
-static void ipa_gsi_request_resource(struct work_struct *work);
-static DECLARE_WORK(ipa_gsi_request_resource_work,
- ipa_gsi_request_resource);
-
-static void ipa_gsi_release_resource(struct work_struct *work);
-static DECLARE_DELAYED_WORK(ipa_gsi_release_resource_work,
- ipa_gsi_release_resource);
-
static struct ipa3_plat_drv_res ipa3_res = {0, };
struct msm_bus_scale_pdata *ipa3_bus_scale_table;
@@ -1974,7 +1966,7 @@
}
retval = ipahal_flt_generate_empty_img(1, lcl_hdr_sz, lcl_hdr_sz,
- 0, &mem);
+ 0, &mem, true);
if (retval) {
IPAERR("failed to generate flt single tbl empty img\n");
goto free_cmd_pyld;
@@ -2081,7 +2073,7 @@
retval = ipahal_rt_generate_empty_img(
modem_rt_index_hi - modem_rt_index_lo + 1,
- lcl_hdr_sz, lcl_hdr_sz, &mem);
+ lcl_hdr_sz, lcl_hdr_sz, &mem, true);
if (retval) {
IPAERR("fail generate empty rt img\n");
return -ENOMEM;
@@ -2554,7 +2546,7 @@
rc = ipahal_rt_generate_empty_img(IPA_MEM_PART(v4_rt_num_index),
IPA_MEM_PART(v4_rt_hash_size), IPA_MEM_PART(v4_rt_nhash_size),
- &mem);
+ &mem, false);
if (rc) {
IPAERR("fail generate empty v4 rt img\n");
return rc;
@@ -2621,7 +2613,7 @@
rc = ipahal_rt_generate_empty_img(IPA_MEM_PART(v6_rt_num_index),
IPA_MEM_PART(v6_rt_hash_size), IPA_MEM_PART(v6_rt_nhash_size),
- &mem);
+ &mem, false);
if (rc) {
IPAERR("fail generate empty v6 rt img\n");
return rc;
@@ -2682,7 +2674,7 @@
rc = ipahal_flt_generate_empty_img(ipa3_ctx->ep_flt_num,
IPA_MEM_PART(v4_flt_hash_size),
IPA_MEM_PART(v4_flt_nhash_size), ipa3_ctx->ep_flt_bitmap,
- &mem);
+ &mem, false);
if (rc) {
IPAERR("fail generate empty v4 flt img\n");
return rc;
@@ -2742,7 +2734,7 @@
rc = ipahal_flt_generate_empty_img(ipa3_ctx->ep_flt_num,
IPA_MEM_PART(v6_flt_hash_size),
IPA_MEM_PART(v6_flt_nhash_size), ipa3_ctx->ep_flt_bitmap,
- &mem);
+ &mem, false);
if (rc) {
IPAERR("fail generate empty v6 flt img\n");
return rc;
@@ -3238,8 +3230,8 @@
*
* This function is called prior to clock gating when active client counter
* is 1. TAG process ensures that there are no packets inside IPA HW that
- * were not submitted to peer's BAM. During TAG process all aggregation frames
- * are (force) closed.
+ * were not submitted to the IPA client via the transport. During TAG process
+ * all aggregation frames are (force) closed.
*
* Return codes:
* None
@@ -3545,10 +3537,10 @@
return 0;
}
-static void ipa3_sps_process_irq_schedule_rel(void)
+static void ipa3_process_irq_schedule_rel(void)
{
queue_delayed_work(ipa3_ctx->transport_power_mgmt_wq,
- &ipa3_sps_release_resource_work,
+ &ipa3_transport_release_resource_work,
msecs_to_jiffies(IPA_TRANSPORT_PROD_TIMEOUT_MSEC));
}
@@ -3593,7 +3585,7 @@
atomic_set(
&ipa3_ctx->transport_pm.dec_clients,
1);
- ipa3_sps_process_irq_schedule_rel();
+ ipa3_process_irq_schedule_rel();
}
} else {
resource = ipa3_get_rm_resource_from_ep(i);
@@ -3655,17 +3647,17 @@
return 0;
}
-static void ipa3_sps_release_resource(struct work_struct *work)
+static void ipa3_transport_release_resource(struct work_struct *work)
{
mutex_lock(&ipa3_ctx->transport_pm.transport_pm_mutex);
/* check whether still need to decrease client usage */
if (atomic_read(&ipa3_ctx->transport_pm.dec_clients)) {
if (atomic_read(&ipa3_ctx->transport_pm.eot_activity)) {
IPADBG("EOT pending Re-scheduling\n");
- ipa3_sps_process_irq_schedule_rel();
+ ipa3_process_irq_schedule_rel();
} else {
atomic_set(&ipa3_ctx->transport_pm.dec_clients, 0);
- IPA_ACTIVE_CLIENTS_DEC_SPECIAL("SPS_RESOURCE");
+ IPA_ACTIVE_CLIENTS_DEC_SPECIAL("TRANSPORT_RESOURCE");
}
}
atomic_set(&ipa3_ctx->transport_pm.eot_activity, 0);
@@ -3891,13 +3883,13 @@
/**
* ipa3_post_init() - Initialize the IPA Driver (Part II).
* This part contains all initialization which requires interaction with
- * IPA HW (via SPS BAM or GSI).
+ * IPA HW (via GSI).
*
* @resource_p: contain platform specific values from DST file
* @pdev: The platform device structure representing the IPA driver
*
* Function initialization process:
- * - Register BAM/SPS or GSI
+ * - Register GSI
* - Setup APPS pipes
* - Initialize tethering bridge
* - Initialize IPA debugfs
@@ -3912,57 +3904,28 @@
struct device *ipa_dev)
{
int result;
- struct sps_bam_props bam_props = { 0 };
struct gsi_per_props gsi_props;
struct ipa3_uc_hdlrs uc_hdlrs = { 0 };
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
- memset(&gsi_props, 0, sizeof(gsi_props));
- gsi_props.ver = ipa3_get_gsi_ver(resource_p->ipa_hw_type);
- gsi_props.ee = resource_p->ee;
- gsi_props.intr = GSI_INTR_IRQ;
- gsi_props.irq = resource_p->transport_irq;
- gsi_props.phys_addr = resource_p->transport_mem_base;
- gsi_props.size = resource_p->transport_mem_size;
- gsi_props.notify_cb = ipa_gsi_notify_cb;
- gsi_props.req_clk_cb = NULL;
- gsi_props.rel_clk_cb = NULL;
+ memset(&gsi_props, 0, sizeof(gsi_props));
+ gsi_props.ver = ipa3_get_gsi_ver(resource_p->ipa_hw_type);
+ gsi_props.ee = resource_p->ee;
+ gsi_props.intr = GSI_INTR_IRQ;
+ gsi_props.irq = resource_p->transport_irq;
+ gsi_props.phys_addr = resource_p->transport_mem_base;
+ gsi_props.size = resource_p->transport_mem_size;
+ gsi_props.notify_cb = ipa_gsi_notify_cb;
+ gsi_props.req_clk_cb = NULL;
+ gsi_props.rel_clk_cb = NULL;
- result = gsi_register_device(&gsi_props,
- &ipa3_ctx->gsi_dev_hdl);
- if (result != GSI_STATUS_SUCCESS) {
- IPAERR(":gsi register error - %d\n", result);
- result = -ENODEV;
- goto fail_register_device;
- }
- IPADBG("IPA gsi is registered\n");
- } else {
- /* register IPA with SPS driver */
- bam_props.phys_addr = resource_p->transport_mem_base;
- bam_props.virt_size = resource_p->transport_mem_size;
- bam_props.irq = resource_p->transport_irq;
- bam_props.num_pipes = ipa3_ctx->ipa_num_pipes;
- bam_props.summing_threshold = IPA_SUMMING_THRESHOLD;
- bam_props.event_threshold = IPA_EVENT_THRESHOLD;
- bam_props.options |= SPS_BAM_NO_LOCAL_CLK_GATING;
- if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL)
- bam_props.options |= SPS_BAM_OPT_IRQ_WAKEUP;
- if (ipa3_ctx->ipa_bam_remote_mode == true)
- bam_props.manage |= SPS_BAM_MGR_DEVICE_REMOTE;
- if (!ipa3_ctx->smmu_s1_bypass)
- bam_props.options |= SPS_BAM_SMMU_EN;
- bam_props.ee = resource_p->ee;
- bam_props.ipc_loglevel = 3;
-
- result = sps_register_bam_device(&bam_props,
- &ipa3_ctx->bam_handle);
- if (result) {
- IPAERR(":bam register error - %d\n", result);
- result = -EPROBE_DEFER;
- goto fail_register_device;
- }
- IPADBG("IPA BAM is registered\n");
+ result = gsi_register_device(&gsi_props,
+ &ipa3_ctx->gsi_dev_hdl);
+ if (result != GSI_STATUS_SUCCESS) {
+ IPAERR(":gsi register error - %d\n", result);
+ result = -ENODEV;
+ goto fail_register_device;
}
+ IPADBG("IPA gsi is registered\n");
/* setup the AP-IPA pipes */
if (ipa3_setup_apps_pipes()) {
@@ -3970,7 +3933,7 @@
result = -ENODEV;
goto fail_setup_apps_pipes;
}
- IPADBG("IPA System2Bam pipes were connected\n");
+ IPADBG("IPA GPI pipes were connected\n");
if (ipa3_ctx->use_ipa_teth_bridge) {
/* Initialize the tethering bridge driver */
@@ -4023,18 +3986,13 @@
fail_teth_bridge_driver_init:
ipa3_teardown_apps_pipes();
fail_setup_apps_pipes:
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI)
- gsi_deregister_device(ipa3_ctx->gsi_dev_hdl, false);
- else
- sps_deregister_bam_device(ipa3_ctx->bam_handle);
+ gsi_deregister_device(ipa3_ctx->gsi_dev_hdl, false);
fail_register_device:
ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
ipa_rm_exit();
cdev_del(&ipa3_ctx->cdev);
device_destroy(ipa3_ctx->class, ipa3_ctx->dev_num);
unregister_chrdev_region(ipa3_ctx->dev_num, 1);
- if (ipa3_ctx->pipe_mem_pool)
- gen_pool_destroy(ipa3_ctx->pipe_mem_pool);
ipa3_destroy_flt_tbl_idrs();
idr_destroy(&ipa3_ctx->ipa_idr);
kmem_cache_destroy(ipa3_ctx->rx_pkt_wrapper_cache);
@@ -4139,27 +4097,22 @@
if (ipa3_is_ready())
return count;
- /*
- * We will trigger the process only if we're in GSI mode, otherwise,
- * we just ignore the write.
- */
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
- IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
- if (ipa3_is_msm_device())
- result = ipa3_trigger_fw_loading_msms();
- else
- result = ipa3_trigger_fw_loading_mdms();
- /* No IPAv3.x chipsets that don't support FW loading */
+ if (ipa3_is_msm_device())
+ result = ipa3_trigger_fw_loading_msms();
+ else
+ result = ipa3_trigger_fw_loading_mdms();
+ /* No IPAv3.x chipsets that don't support FW loading */
- IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
- if (result) {
- IPAERR("FW loading process has failed\n");
+ if (result) {
+ IPAERR("FW loading process has failed\n");
return result;
- } else
- ipa3_post_init(&ipa3_res, ipa3_ctx->dev);
- }
+ } else
+ ipa3_post_init(&ipa3_res, ipa3_ctx->dev);
+
return count;
}
@@ -4226,7 +4179,6 @@
* 3)read HW values(such as core memory size)
* - Map IPA core registers to CPU memory
* - Restart IPA core(HW reset)
-* - Set configuration for IPA BAM via BAM_CNFG_BITS
* - Initialize the look-aside caches(kmem_cache/slab) for filter,
* routing and IPA-tree
* - Create memory pool with 4 objects for DMA operations(each object
@@ -4239,7 +4191,6 @@
* routing table ,filtering rule
* - Initialize the filter block by committing IPV4 and IPV6 default rules
* - Create empty routing table in system memory(no committing)
-* - Initialize pipes memory pool with ipa3_pipe_mem_init for supported platforms
* - Create a char-device for IPA
* - Initialize IPA RM (resource manager)
* - Configure GSI registers (in GSI case)
@@ -4281,7 +4232,6 @@
ipa3_ctx->ipa_hw_type = resource_p->ipa_hw_type;
ipa3_ctx->ipa3_hw_mode = resource_p->ipa3_hw_mode;
ipa3_ctx->use_ipa_teth_bridge = resource_p->use_ipa_teth_bridge;
- ipa3_ctx->ipa_bam_remote_mode = resource_p->ipa_bam_remote_mode;
ipa3_ctx->modem_cfg_emb_pipe_flt = resource_p->modem_cfg_emb_pipe_flt;
ipa3_ctx->ipa_wdi2 = resource_p->ipa_wdi2;
ipa3_ctx->use_64_bit_dma_mask = resource_p->use_64_bit_dma_mask;
@@ -4289,7 +4239,6 @@
ipa3_ctx->lan_rx_ring_size = resource_p->lan_rx_ring_size;
ipa3_ctx->skip_uc_pipe_reset = resource_p->skip_uc_pipe_reset;
ipa3_ctx->tethered_flow_control = resource_p->tethered_flow_control;
- ipa3_ctx->transport_prototype = resource_p->transport_prototype;
ipa3_ctx->ee = resource_p->ee;
ipa3_ctx->apply_rg10_wa = resource_p->apply_rg10_wa;
ipa3_ctx->gsi_ch20_wa = resource_p->gsi_ch20_wa;
@@ -4468,11 +4417,7 @@
goto fail_create_transport_wq;
}
- /* Initialize the SPS PM lock. */
mutex_init(&ipa3_ctx->transport_pm.transport_pm_mutex);
- spin_lock_init(&ipa3_ctx->transport_pm.lock);
- ipa3_ctx->transport_pm.res_granted = false;
- ipa3_ctx->transport_pm.res_rel_in_prog = false;
/* init the lookaside cache */
ipa3_ctx->flt_rule_cache = kmem_cache_create("IPA_FLT",
@@ -4543,16 +4488,6 @@
goto fail_rx_pkt_wrapper_cache;
}
- /* Setup DMA pool */
- ipa3_ctx->dma_pool = dma_pool_create("ipa_tx", ipa3_ctx->pdev,
- IPA_NUM_DESC_PER_SW_TX * sizeof(struct sps_iovec),
- 0, 0);
- if (!ipa3_ctx->dma_pool) {
- IPAERR("cannot alloc DMA pool.\n");
- result = -ENOMEM;
- goto fail_dma_pool;
- }
-
/* init the various list heads */
INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_hdr_entry_list);
for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
@@ -4611,11 +4546,6 @@
spin_lock_init(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
INIT_LIST_HEAD(&ipa3_ctx->wc_memb.wlan_comm_desc_list);
- /* setup the IPA pipe mem pool */
- if (resource_p->ipa_pipe_mem_size)
- ipa3_pipe_mem_init(resource_p->ipa_pipe_mem_start_ofst,
- resource_p->ipa_pipe_mem_size);
-
ipa3_ctx->class = class_create(THIS_MODULE, DRV_NAME);
result = alloc_chrdev_region(&ipa3_ctx->dev_num, 0, 1, DRV_NAME);
@@ -4693,26 +4623,20 @@
init_completion(&ipa3_ctx->uc_loaded_completion_obj);
/*
- * For GSI, we can't register the GSI driver yet, as it expects
+ * We can't register the GSI driver yet, as it expects
* the GSI FW to be up and running before the registration.
+ *
+ * For IPA3.0, the GSI configuration is done by the GSI driver.
+ * For IPA3.1 (and on), the GSI configuration is done by TZ.
*/
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
- /*
- * For IPA3.0, the GSI configuration is done by the GSI driver.
- * For IPA3.1 (and on), the GSI configuration is done by TZ.
- */
- if (ipa3_ctx->ipa_hw_type == IPA_HW_v3_0) {
- result = ipa3_gsi_pre_fw_load_init();
- if (result) {
- IPAERR("gsi pre FW loading config failed\n");
- result = -ENODEV;
- goto fail_ipa_init_interrupts;
- }
+ if (ipa3_ctx->ipa_hw_type == IPA_HW_v3_0) {
+ result = ipa3_gsi_pre_fw_load_init();
+ if (result) {
+ IPAERR("gsi pre FW loading config failed\n");
+ result = -ENODEV;
+ goto fail_ipa_init_interrupts;
}
}
- /* For BAM (No other mode), we can just carry on with initialization */
- else
- return ipa3_post_init(resource_p, ipa_dev);
return 0;
@@ -4728,11 +4652,8 @@
fail_device_create:
unregister_chrdev_region(ipa3_ctx->dev_num, 1);
fail_alloc_chrdev_region:
- if (ipa3_ctx->pipe_mem_pool)
- gen_pool_destroy(ipa3_ctx->pipe_mem_pool);
ipa3_destroy_flt_tbl_idrs();
idr_destroy(&ipa3_ctx->ipa_idr);
-fail_dma_pool:
kmem_cache_destroy(ipa3_ctx->rx_pkt_wrapper_cache);
fail_rx_pkt_wrapper_cache:
kmem_cache_destroy(ipa3_ctx->tx_pkt_wrapper_cache);
@@ -4792,7 +4713,6 @@
ipa_drv_res->ipa_pipe_mem_size = IPA_PIPE_MEM_SIZE;
ipa_drv_res->ipa_hw_type = 0;
ipa_drv_res->ipa3_hw_mode = 0;
- ipa_drv_res->ipa_bam_remote_mode = false;
ipa_drv_res->modem_cfg_emb_pipe_flt = false;
ipa_drv_res->ipa_wdi2 = false;
ipa_drv_res->use_64_bit_dma_mask = false;
@@ -4854,13 +4774,6 @@
ipa_drv_res->use_ipa_teth_bridge
? "True" : "False");
- ipa_drv_res->ipa_bam_remote_mode =
- of_property_read_bool(pdev->dev.of_node,
- "qcom,ipa-bam-remote-mode");
- IPADBG(": ipa bam remote mode = %s\n",
- ipa_drv_res->ipa_bam_remote_mode
- ? "True" : "False");
-
ipa_drv_res->modem_cfg_emb_pipe_flt =
of_property_read_bool(pdev->dev.of_node,
"qcom,modem-cfg-emb-pipe-flt");
@@ -4896,16 +4809,6 @@
ipa_drv_res->tethered_flow_control
? "True" : "False");
- if (of_property_read_bool(pdev->dev.of_node,
- "qcom,use-gsi"))
- ipa_drv_res->transport_prototype = IPA_TRANSPORT_TYPE_GSI;
- else
- ipa_drv_res->transport_prototype = IPA_TRANSPORT_TYPE_SPS;
-
- IPADBG(": transport type = %s\n",
- ipa_drv_res->transport_prototype == IPA_TRANSPORT_TYPE_SPS
- ? "SPS" : "GSI");
-
/* Get IPA wrapper address */
resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"ipa-base");
@@ -4922,53 +4825,28 @@
smmu_info.ipa_base = ipa_drv_res->ipa_mem_base;
smmu_info.ipa_size = ipa_drv_res->ipa_mem_size;
- if (ipa_drv_res->transport_prototype == IPA_TRANSPORT_TYPE_SPS) {
- /* Get IPA BAM address */
- resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "bam-base");
- if (!resource) {
- IPAERR(":get resource failed for bam-base!\n");
- return -ENODEV;
- }
- ipa_drv_res->transport_mem_base = resource->start;
- ipa_drv_res->transport_mem_size = resource_size(resource);
- IPADBG(": bam-base = 0x%x, size = 0x%x\n",
- ipa_drv_res->transport_mem_base,
- ipa_drv_res->transport_mem_size);
-
- /* Get IPA BAM IRQ number */
- resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
- "bam-irq");
- if (!resource) {
- IPAERR(":get resource failed for bam-irq!\n");
- return -ENODEV;
- }
- ipa_drv_res->transport_irq = resource->start;
- IPADBG(": bam-irq = %d\n", ipa_drv_res->transport_irq);
- } else {
- /* Get IPA GSI address */
- resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "gsi-base");
- if (!resource) {
- IPAERR(":get resource failed for gsi-base!\n");
- return -ENODEV;
- }
- ipa_drv_res->transport_mem_base = resource->start;
- ipa_drv_res->transport_mem_size = resource_size(resource);
- IPADBG(": gsi-base = 0x%x, size = 0x%x\n",
- ipa_drv_res->transport_mem_base,
- ipa_drv_res->transport_mem_size);
-
- /* Get IPA GSI IRQ number */
- resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
- "gsi-irq");
- if (!resource) {
- IPAERR(":get resource failed for gsi-irq!\n");
- return -ENODEV;
- }
- ipa_drv_res->transport_irq = resource->start;
- IPADBG(": gsi-irq = %d\n", ipa_drv_res->transport_irq);
+ /* Get IPA GSI address */
+ resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "gsi-base");
+ if (!resource) {
+ IPAERR(":get resource failed for gsi-base!\n");
+ return -ENODEV;
}
+ ipa_drv_res->transport_mem_base = resource->start;
+ ipa_drv_res->transport_mem_size = resource_size(resource);
+ IPADBG(": gsi-base = 0x%x, size = 0x%x\n",
+ ipa_drv_res->transport_mem_base,
+ ipa_drv_res->transport_mem_size);
+
+ /* Get IPA GSI IRQ number */
+ resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ "gsi-irq");
+ if (!resource) {
+ IPAERR(":get resource failed for gsi-irq!\n");
+ return -ENODEV;
+ }
+ ipa_drv_res->transport_irq = resource->start;
+ IPADBG(": gsi-irq = %d\n", ipa_drv_res->transport_irq);
/* Get IPA pipe mem start ofst */
resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -5585,9 +5463,11 @@
}
}
- /* release SPS IPA resource without waiting for inactivity timer */
+ /*
+ * Release transport IPA resource without waiting for inactivity timer
+ */
atomic_set(&ipa3_ctx->transport_pm.eot_activity, 0);
- ipa3_sps_release_resource(NULL);
+ ipa3_transport_release_resource(NULL);
IPADBG("Exit\n");
return 0;
@@ -5612,85 +5492,6 @@
return ipa3_ctx;
}
-static void ipa_gsi_request_resource(struct work_struct *work)
-{
- unsigned long flags;
- int ret;
-
- /* request IPA clocks */
- IPA_ACTIVE_CLIENTS_INC_SIMPLE();
-
- /* mark transport resource as granted */
- spin_lock_irqsave(&ipa3_ctx->transport_pm.lock, flags);
- ipa3_ctx->transport_pm.res_granted = true;
-
- IPADBG("IPA is ON, calling gsi driver\n");
- ret = gsi_complete_clk_grant(ipa3_ctx->gsi_dev_hdl);
- if (ret != GSI_STATUS_SUCCESS)
- IPAERR("gsi_complete_clk_grant failed %d\n", ret);
-
- spin_unlock_irqrestore(&ipa3_ctx->transport_pm.lock, flags);
-}
-
-void ipa_gsi_req_res_cb(void *user_data, bool *granted)
-{
- unsigned long flags;
- struct ipa_active_client_logging_info log_info;
-
- spin_lock_irqsave(&ipa3_ctx->transport_pm.lock, flags);
-
- /* make sure no release will happen */
- cancel_delayed_work(&ipa_gsi_release_resource_work);
- ipa3_ctx->transport_pm.res_rel_in_prog = false;
-
- if (ipa3_ctx->transport_pm.res_granted) {
- *granted = true;
- } else {
- IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "GSI_RESOURCE");
- if (ipa3_inc_client_enable_clks_no_block(&log_info) == 0) {
- ipa3_ctx->transport_pm.res_granted = true;
- *granted = true;
- } else {
- queue_work(ipa3_ctx->transport_power_mgmt_wq,
- &ipa_gsi_request_resource_work);
- *granted = false;
- }
- }
- spin_unlock_irqrestore(&ipa3_ctx->transport_pm.lock, flags);
-}
-
-static void ipa_gsi_release_resource(struct work_struct *work)
-{
- unsigned long flags;
- bool dec_clients = false;
-
- spin_lock_irqsave(&ipa3_ctx->transport_pm.lock, flags);
- /* check whether still need to decrease client usage */
- if (ipa3_ctx->transport_pm.res_rel_in_prog) {
- dec_clients = true;
- ipa3_ctx->transport_pm.res_rel_in_prog = false;
- ipa3_ctx->transport_pm.res_granted = false;
- }
- spin_unlock_irqrestore(&ipa3_ctx->transport_pm.lock, flags);
- if (dec_clients)
- IPA_ACTIVE_CLIENTS_DEC_SPECIAL("GSI_RESOURCE");
-}
-
-int ipa_gsi_rel_res_cb(void *user_data)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ipa3_ctx->transport_pm.lock, flags);
-
- ipa3_ctx->transport_pm.res_rel_in_prog = true;
- queue_delayed_work(ipa3_ctx->transport_power_mgmt_wq,
- &ipa_gsi_release_resource_work,
- msecs_to_jiffies(IPA_TRANSPORT_PROD_TIMEOUT_MSEC));
-
- spin_unlock_irqrestore(&ipa3_ctx->transport_pm.lock, flags);
- return 0;
-}
-
static void ipa_gsi_notify_cb(struct gsi_per_notify *notify)
{
switch (notify->evt_id) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
index 75b2824..796103f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
@@ -130,623 +130,6 @@
return res;
}
-static int ipa3_smmu_map_peer_bam(unsigned long dev)
-{
- phys_addr_t base;
- u32 size;
- struct iommu_domain *smmu_domain;
- struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx();
-
- if (!ipa3_ctx->smmu_s1_bypass) {
- if (ipa3_ctx->peer_bam_map_cnt == 0) {
- if (sps_get_bam_addr(dev, &base, &size)) {
- IPAERR("Fail to get addr\n");
- return -EINVAL;
- }
- smmu_domain = ipa3_get_smmu_domain();
- if (smmu_domain != NULL) {
- if (ipa3_iommu_map(smmu_domain,
- cb->va_end,
- rounddown(base, PAGE_SIZE),
- roundup(size + base -
- rounddown(base, PAGE_SIZE), PAGE_SIZE),
- IOMMU_READ | IOMMU_WRITE |
- IOMMU_MMIO)) {
- IPAERR("Fail to ipa3_iommu_map\n");
- return -EINVAL;
- }
- }
-
- ipa3_ctx->peer_bam_iova = cb->va_end;
- ipa3_ctx->peer_bam_pa = base;
- ipa3_ctx->peer_bam_map_size = size;
- ipa3_ctx->peer_bam_dev = dev;
-
- IPADBG("Peer bam %lu mapped\n", dev);
- } else {
- WARN_ON(dev != ipa3_ctx->peer_bam_dev);
- }
-
- ipa3_ctx->peer_bam_map_cnt++;
- }
-
- return 0;
-}
-
-static int ipa3_connect_configure_sps(const struct ipa_connect_params *in,
- struct ipa3_ep_context *ep, int ipa_ep_idx)
-{
- int result = -EFAULT;
-
- /* Default Config */
- ep->ep_hdl = sps_alloc_endpoint();
-
- if (ipa3_smmu_map_peer_bam(in->client_bam_hdl)) {
- IPAERR("fail to iommu map peer BAM.\n");
- return -EFAULT;
- }
-
- if (ep->ep_hdl == NULL) {
- IPAERR("SPS EP alloc failed EP.\n");
- return -EFAULT;
- }
-
- result = sps_get_config(ep->ep_hdl,
- &ep->connect);
- if (result) {
- IPAERR("fail to get config.\n");
- return -EFAULT;
- }
-
- /* Specific Config */
- if (IPA_CLIENT_IS_CONS(in->client)) {
- ep->connect.mode = SPS_MODE_SRC;
- ep->connect.destination =
- in->client_bam_hdl;
- ep->connect.dest_iova = ipa3_ctx->peer_bam_iova;
- ep->connect.source = ipa3_ctx->bam_handle;
- ep->connect.dest_pipe_index =
- in->client_ep_idx;
- ep->connect.src_pipe_index = ipa_ep_idx;
- } else {
- ep->connect.mode = SPS_MODE_DEST;
- ep->connect.source = in->client_bam_hdl;
- ep->connect.source_iova = ipa3_ctx->peer_bam_iova;
- ep->connect.destination = ipa3_ctx->bam_handle;
- ep->connect.src_pipe_index = in->client_ep_idx;
- ep->connect.dest_pipe_index = ipa_ep_idx;
- }
-
- return 0;
-}
-
-static int ipa3_connect_allocate_fifo(const struct ipa_connect_params *in,
- struct sps_mem_buffer *mem_buff_ptr,
- bool *fifo_in_pipe_mem_ptr,
- u32 *fifo_pipe_mem_ofst_ptr,
- u32 fifo_size, int ipa_ep_idx)
-{
- dma_addr_t dma_addr;
- u32 ofst;
- int result = -EFAULT;
- struct iommu_domain *smmu_domain;
-
- mem_buff_ptr->size = fifo_size;
- if (in->pipe_mem_preferred) {
- if (ipa3_pipe_mem_alloc(&ofst, fifo_size)) {
- IPAERR("FIFO pipe mem alloc fail ep %u\n",
- ipa_ep_idx);
- mem_buff_ptr->base =
- dma_alloc_coherent(ipa3_ctx->pdev,
- mem_buff_ptr->size,
- &dma_addr, GFP_KERNEL);
- } else {
- memset(mem_buff_ptr, 0, sizeof(struct sps_mem_buffer));
- result = sps_setup_bam2bam_fifo(mem_buff_ptr, ofst,
- fifo_size, 1);
- WARN_ON(result);
- *fifo_in_pipe_mem_ptr = 1;
- dma_addr = mem_buff_ptr->phys_base;
- *fifo_pipe_mem_ofst_ptr = ofst;
- }
- } else {
- mem_buff_ptr->base =
- dma_alloc_coherent(ipa3_ctx->pdev, mem_buff_ptr->size,
- &dma_addr, GFP_KERNEL);
- }
- if (ipa3_ctx->smmu_s1_bypass) {
- mem_buff_ptr->phys_base = dma_addr;
- } else {
- mem_buff_ptr->iova = dma_addr;
- smmu_domain = ipa_get_smmu_domain();
- if (smmu_domain != NULL) {
- mem_buff_ptr->phys_base =
- iommu_iova_to_phys(smmu_domain, dma_addr);
- }
- }
- if (mem_buff_ptr->base == NULL) {
- IPAERR("fail to get DMA memory.\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-/**
- * ipa3_connect() - low-level IPA client connect
- * @in: [in] input parameters from client
- * @sps: [out] sps output from IPA needed by client for sps_connect
- * @clnt_hdl: [out] opaque client handle assigned by IPA to client
- *
- * Should be called by the driver of the peripheral that wants to connect to
- * IPA in BAM-BAM mode. these peripherals are USB and HSIC. this api
- * expects caller to take responsibility to add any needed headers, routing
- * and filtering tables and rules as needed.
- *
- * Returns: 0 on success, negative on failure
- *
- * Note: Should not be called from atomic context
- */
-int ipa3_connect(const struct ipa_connect_params *in,
- struct ipa_sps_params *sps,
- u32 *clnt_hdl)
-{
- int ipa_ep_idx;
- int result = -EFAULT;
- struct ipa3_ep_context *ep;
- struct ipahal_reg_ep_cfg_status ep_status;
- unsigned long base;
- struct iommu_domain *smmu_domain;
-
- IPADBG("connecting client\n");
-
- if (in == NULL || sps == NULL || clnt_hdl == NULL ||
- in->client >= IPA_CLIENT_MAX ||
- in->desc_fifo_sz == 0 || in->data_fifo_sz == 0) {
- IPAERR("bad parm.\n");
- return -EINVAL;
- }
-
- ipa_ep_idx = ipa3_get_ep_mapping(in->client);
- if (ipa_ep_idx == -1) {
- IPAERR("fail to alloc EP.\n");
- goto fail;
- }
-
- ep = &ipa3_ctx->ep[ipa_ep_idx];
-
- if (ep->valid) {
- IPAERR("EP already allocated.\n");
- goto fail;
- }
-
- memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
- IPA_ACTIVE_CLIENTS_INC_EP(in->client);
-
- ep->skip_ep_cfg = in->skip_ep_cfg;
- ep->valid = 1;
- ep->client = in->client;
- ep->client_notify = in->notify;
- ep->priv = in->priv;
- ep->keep_ipa_awake = in->keep_ipa_awake;
-
- result = ipa3_enable_data_path(ipa_ep_idx);
- if (result) {
- IPAERR("enable data path failed res=%d clnt=%d.\n", result,
- ipa_ep_idx);
- goto ipa_cfg_ep_fail;
- }
-
- if (!ep->skip_ep_cfg) {
- if (ipa3_cfg_ep(ipa_ep_idx, &in->ipa_ep_cfg)) {
- IPAERR("fail to configure EP.\n");
- goto ipa_cfg_ep_fail;
- }
- /* Setting EP status 0 */
- memset(&ep_status, 0, sizeof(ep_status));
- if (ipa3_cfg_ep_status(ipa_ep_idx, &ep_status)) {
- IPAERR("fail to configure status of EP.\n");
- goto ipa_cfg_ep_fail;
- }
- IPADBG("ep configuration successful\n");
- } else {
- IPADBG("Skipping endpoint configuration.\n");
- }
-
- result = ipa3_connect_configure_sps(in, ep, ipa_ep_idx);
- if (result) {
- IPAERR("fail to configure SPS.\n");
- goto ipa_cfg_ep_fail;
- }
-
- if (!ipa3_ctx->smmu_s1_bypass &&
- (in->desc.base == NULL ||
- in->data.base == NULL)) {
- IPAERR(" allocate FIFOs data_fifo=0x%p desc_fifo=0x%p.\n",
- in->data.base, in->desc.base);
- goto desc_mem_alloc_fail;
- }
-
- if (in->desc.base == NULL) {
- result = ipa3_connect_allocate_fifo(in, &ep->connect.desc,
- &ep->desc_fifo_in_pipe_mem,
- &ep->desc_fifo_pipe_mem_ofst,
- in->desc_fifo_sz, ipa_ep_idx);
- if (result) {
- IPAERR("fail to allocate DESC FIFO.\n");
- goto desc_mem_alloc_fail;
- }
- } else {
- IPADBG("client allocated DESC FIFO\n");
- ep->connect.desc = in->desc;
- ep->desc_fifo_client_allocated = 1;
- }
- IPADBG("Descriptor FIFO pa=%pa, size=%d\n", &ep->connect.desc.phys_base,
- ep->connect.desc.size);
-
- if (in->data.base == NULL) {
- result = ipa3_connect_allocate_fifo(in, &ep->connect.data,
- &ep->data_fifo_in_pipe_mem,
- &ep->data_fifo_pipe_mem_ofst,
- in->data_fifo_sz, ipa_ep_idx);
- if (result) {
- IPAERR("fail to allocate DATA FIFO.\n");
- goto data_mem_alloc_fail;
- }
- } else {
- IPADBG("client allocated DATA FIFO\n");
- ep->connect.data = in->data;
- ep->data_fifo_client_allocated = 1;
- }
- IPADBG("Data FIFO pa=%pa, size=%d\n", &ep->connect.data.phys_base,
- ep->connect.data.size);
-
- if (!ipa3_ctx->smmu_s1_bypass) {
- ep->connect.data.iova = ep->connect.data.phys_base;
- base = ep->connect.data.iova;
- smmu_domain = ipa_get_smmu_domain();
- if (smmu_domain != NULL) {
- if (ipa3_iommu_map(smmu_domain,
- rounddown(base, PAGE_SIZE),
- rounddown(base, PAGE_SIZE),
- roundup(ep->connect.data.size + base -
- rounddown(base, PAGE_SIZE), PAGE_SIZE),
- IOMMU_READ | IOMMU_WRITE)) {
- IPAERR("Fail to ipa3_iommu_map data FIFO\n");
- goto iommu_map_data_fail;
- }
- }
- ep->connect.desc.iova = ep->connect.desc.phys_base;
- base = ep->connect.desc.iova;
- if (smmu_domain != NULL) {
- if (ipa3_iommu_map(smmu_domain,
- rounddown(base, PAGE_SIZE),
- rounddown(base, PAGE_SIZE),
- roundup(ep->connect.desc.size + base -
- rounddown(base, PAGE_SIZE), PAGE_SIZE),
- IOMMU_READ | IOMMU_WRITE)) {
- IPAERR("Fail to ipa3_iommu_map desc FIFO\n");
- goto iommu_map_desc_fail;
- }
- }
- }
-
- if (IPA_CLIENT_IS_USB_CONS(in->client))
- ep->connect.event_thresh = IPA_USB_EVENT_THRESHOLD;
- else
- ep->connect.event_thresh = IPA_EVENT_THRESHOLD;
- ep->connect.options = SPS_O_AUTO_ENABLE; /* BAM-to-BAM */
-
- result = ipa3_sps_connect_safe(ep->ep_hdl, &ep->connect, in->client);
- if (result) {
- IPAERR("sps_connect fails.\n");
- goto sps_connect_fail;
- }
-
- sps->ipa_bam_hdl = ipa3_ctx->bam_handle;
- sps->ipa_ep_idx = ipa_ep_idx;
- *clnt_hdl = ipa_ep_idx;
- memcpy(&sps->desc, &ep->connect.desc, sizeof(struct sps_mem_buffer));
- memcpy(&sps->data, &ep->connect.data, sizeof(struct sps_mem_buffer));
-
- ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
- if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(in->client))
- ipa3_install_dflt_flt_rules(ipa_ep_idx);
-
- if (!ep->keep_ipa_awake)
- IPA_ACTIVE_CLIENTS_DEC_EP(in->client);
-
- IPADBG("client %d (ep: %d) connected\n", in->client, ipa_ep_idx);
-
- return 0;
-
-sps_connect_fail:
- if (!ipa3_ctx->smmu_s1_bypass) {
- base = ep->connect.desc.iova;
- smmu_domain = ipa_get_smmu_domain();
- if (smmu_domain != NULL) {
- iommu_unmap(smmu_domain,
- rounddown(base, PAGE_SIZE),
- roundup(ep->connect.desc.size + base -
- rounddown(base, PAGE_SIZE), PAGE_SIZE));
- }
- }
-iommu_map_desc_fail:
- if (!ipa3_ctx->smmu_s1_bypass) {
- base = ep->connect.data.iova;
- smmu_domain = ipa_get_smmu_domain();
- if (smmu_domain != NULL) {
- iommu_unmap(smmu_domain,
- rounddown(base, PAGE_SIZE),
- roundup(ep->connect.data.size + base -
- rounddown(base, PAGE_SIZE), PAGE_SIZE));
- }
- }
-iommu_map_data_fail:
- if (!ep->data_fifo_client_allocated) {
- if (!ep->data_fifo_in_pipe_mem)
- dma_free_coherent(ipa3_ctx->pdev,
- ep->connect.data.size,
- ep->connect.data.base,
- ep->connect.data.phys_base);
- else
- ipa3_pipe_mem_free(ep->data_fifo_pipe_mem_ofst,
- ep->connect.data.size);
- }
-data_mem_alloc_fail:
- if (!ep->desc_fifo_client_allocated) {
- if (!ep->desc_fifo_in_pipe_mem)
- dma_free_coherent(ipa3_ctx->pdev,
- ep->connect.desc.size,
- ep->connect.desc.base,
- ep->connect.desc.phys_base);
- else
- ipa3_pipe_mem_free(ep->desc_fifo_pipe_mem_ofst,
- ep->connect.desc.size);
- }
-desc_mem_alloc_fail:
- sps_free_endpoint(ep->ep_hdl);
-ipa_cfg_ep_fail:
- memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
- IPA_ACTIVE_CLIENTS_DEC_EP(in->client);
-fail:
- return result;
-}
-
-static int ipa3_smmu_unmap_peer_bam(unsigned long dev)
-{
- size_t len;
- struct iommu_domain *smmu_domain;
- struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx();
-
- if (!ipa3_ctx->smmu_s1_bypass) {
- WARN_ON(dev != ipa3_ctx->peer_bam_dev);
- ipa3_ctx->peer_bam_map_cnt--;
- if (ipa3_ctx->peer_bam_map_cnt == 0) {
- len = roundup(ipa3_ctx->peer_bam_map_size +
- ipa3_ctx->peer_bam_pa -
- rounddown(ipa3_ctx->peer_bam_pa,
- PAGE_SIZE), PAGE_SIZE);
- smmu_domain = ipa3_get_smmu_domain();
- if (smmu_domain != NULL) {
- if (iommu_unmap(smmu_domain,
- cb->va_end, len) != len) {
- IPAERR("Fail to iommu_unmap\n");
- return -EINVAL;
- }
- IPADBG("Peer bam %lu unmapped\n", dev);
- }
- }
- }
-
- return 0;
-}
-
-/**
- * ipa3_disconnect() - low-level IPA client disconnect
- * @clnt_hdl: [in] opaque client handle assigned by IPA to client
- *
- * Should be called by the driver of the peripheral that wants to disconnect
- * from IPA in BAM-BAM mode. this api expects caller to take responsibility to
- * free any needed headers, routing and filtering tables and rules as needed.
- *
- * Returns: 0 on success, negative on failure
- *
- * Note: Should not be called from atomic context
- */
-int ipa3_disconnect(u32 clnt_hdl)
-{
- int result;
- struct ipa3_ep_context *ep;
- unsigned long peer_bam;
- unsigned long base;
- struct iommu_domain *smmu_domain;
- struct ipa_disable_force_clear_datapath_req_msg_v01 req = {0};
- int res;
- enum ipa_client_type client_type;
-
- if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
- ipa3_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
- return -EINVAL;
- }
-
- ep = &ipa3_ctx->ep[clnt_hdl];
- client_type = ipa3_get_client_mapping(clnt_hdl);
- if (!ep->keep_ipa_awake)
- IPA_ACTIVE_CLIENTS_INC_EP(client_type);
-
- /* Set Disconnect in Progress flag. */
- spin_lock(&ipa3_ctx->disconnect_lock);
- ep->disconnect_in_progress = true;
- spin_unlock(&ipa3_ctx->disconnect_lock);
-
- result = ipa3_disable_data_path(clnt_hdl);
- if (result) {
- IPAERR("disable data path failed res=%d clnt=%d.\n", result,
- clnt_hdl);
- return -EPERM;
- }
-
- result = sps_disconnect(ep->ep_hdl);
- if (result) {
- IPAERR("SPS disconnect failed.\n");
- return -EPERM;
- }
-
- if (IPA_CLIENT_IS_CONS(ep->client))
- peer_bam = ep->connect.destination;
- else
- peer_bam = ep->connect.source;
-
- if (ipa3_smmu_unmap_peer_bam(peer_bam)) {
- IPAERR("fail to iommu unmap peer BAM.\n");
- return -EPERM;
- }
-
- if (!ep->desc_fifo_client_allocated &&
- ep->connect.desc.base) {
- if (!ep->desc_fifo_in_pipe_mem)
- dma_free_coherent(ipa3_ctx->pdev,
- ep->connect.desc.size,
- ep->connect.desc.base,
- ep->connect.desc.phys_base);
- else
- ipa3_pipe_mem_free(ep->desc_fifo_pipe_mem_ofst,
- ep->connect.desc.size);
- }
-
- if (!ep->data_fifo_client_allocated &&
- ep->connect.data.base) {
- if (!ep->data_fifo_in_pipe_mem)
- dma_free_coherent(ipa3_ctx->pdev,
- ep->connect.data.size,
- ep->connect.data.base,
- ep->connect.data.phys_base);
- else
- ipa3_pipe_mem_free(ep->data_fifo_pipe_mem_ofst,
- ep->connect.data.size);
- }
-
- if (!ipa3_ctx->smmu_s1_bypass) {
- base = ep->connect.desc.iova;
- smmu_domain = ipa_get_smmu_domain();
- if (smmu_domain != NULL) {
- iommu_unmap(smmu_domain,
- rounddown(base, PAGE_SIZE),
- roundup(ep->connect.desc.size + base -
- rounddown(base, PAGE_SIZE), PAGE_SIZE));
- }
- }
-
- if (!ipa3_ctx->smmu_s1_bypass) {
- base = ep->connect.data.iova;
- smmu_domain = ipa_get_smmu_domain();
- if (smmu_domain != NULL) {
- iommu_unmap(smmu_domain,
- rounddown(base, PAGE_SIZE),
- roundup(ep->connect.data.size + base -
- rounddown(base, PAGE_SIZE), PAGE_SIZE));
- }
- }
-
- result = sps_free_endpoint(ep->ep_hdl);
- if (result) {
- IPAERR("SPS de-alloc EP failed.\n");
- return -EPERM;
- }
-
- ipa3_delete_dflt_flt_rules(clnt_hdl);
-
- /* If APPS flow control is not enabled, send a message to modem to
- * enable flow control honoring.
- */
- if (!ipa3_ctx->tethered_flow_control && ep->qmi_request_sent) {
- /* Send a message to modem to disable flow control honoring. */
- req.request_id = clnt_hdl;
- res = ipa3_qmi_disable_force_clear_datapath_send(&req);
- if (res) {
- IPADBG("disable_force_clear_datapath failed %d\n",
- res);
- }
- }
-
- spin_lock(&ipa3_ctx->disconnect_lock);
- memset(&ipa3_ctx->ep[clnt_hdl], 0, sizeof(struct ipa3_ep_context));
- spin_unlock(&ipa3_ctx->disconnect_lock);
- IPA_ACTIVE_CLIENTS_DEC_EP(client_type);
-
- IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
-
- return 0;
-}
-
-/**
-* ipa3_reset_endpoint() - reset an endpoint from BAM perspective
-* @clnt_hdl: [in] IPA client handle
-*
-* Returns: 0 on success, negative on failure
-*
-* Note: Should not be called from atomic context
-*/
-int ipa3_reset_endpoint(u32 clnt_hdl)
-{
- int res;
- struct ipa3_ep_context *ep;
-
- if (clnt_hdl >= ipa3_ctx->ipa_num_pipes) {
- IPAERR("Bad parameters.\n");
- return -EFAULT;
- }
- ep = &ipa3_ctx->ep[clnt_hdl];
- IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
- res = sps_disconnect(ep->ep_hdl);
- if (res) {
- IPAERR("sps_disconnect() failed, res=%d.\n", res);
- goto bail;
- } else {
- res = ipa3_sps_connect_safe(ep->ep_hdl, &ep->connect,
- ep->client);
- if (res) {
- IPAERR("sps_connect() failed, res=%d.\n", res);
- goto bail;
- }
- }
-
-bail:
- IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
- return res;
-}
-
-/**
- * ipa3_sps_connect_safe() - connect endpoint from BAM prespective
- * @h: [in] sps pipe handle
- * @connect: [in] sps connect parameters
- * @ipa_client: [in] ipa client handle representing the pipe
- *
- * This function connects a BAM pipe using SPS driver sps_connect() API
- * and by requesting uC interface to reset the pipe, avoids an IPA HW
- * limitation that does not allow resetting a BAM pipe during traffic in
- * IPA TX command queue.
- *
- * Returns: 0 on success, negative on failure
- */
-int ipa3_sps_connect_safe(struct sps_pipe *h, struct sps_connect *connect,
- enum ipa_client_type ipa_client)
-{
- int res;
-
- if (ipa3_ctx->ipa_hw_type > IPA_HW_v2_5 ||
- ipa3_ctx->skip_uc_pipe_reset) {
- IPADBG("uC pipe reset is not required\n");
- } else {
- res = ipa3_uc_reset_pipe(ipa_client);
- if (res)
- return res;
- }
- return sps_connect(h, connect);
-}
-
static void ipa_chan_err_cb(struct gsi_chan_err_notify *notify)
{
if (notify) {
@@ -1152,8 +535,7 @@
struct ipahal_reg_ep_cfg_status ep_status;
unsigned long gsi_dev_hdl;
enum gsi_status gsi_res;
- struct ipa_gsi_ep_config gsi_ep_cfg;
- struct ipa_gsi_ep_config *gsi_ep_cfg_ptr = &gsi_ep_cfg;
+ const struct ipa_gsi_ep_config *gsi_ep_cfg_ptr;
IPADBG("entry\n");
if (params == NULL || out_params == NULL ||
@@ -1227,8 +609,7 @@
goto write_evt_scratch_fail;
}
- memset(gsi_ep_cfg_ptr, 0, sizeof(struct ipa_gsi_ep_config));
- gsi_ep_cfg_ptr = ipa_get_gsi_ep_info(ipa_ep_idx);
+ gsi_ep_cfg_ptr = ipa3_get_gsi_ep_info(ep->client);
params->chan_params.evt_ring_hdl = ep->gsi_evt_ring_hdl;
params->chan_params.ch_id = gsi_ep_cfg_ptr->ipa_gsi_chan_num;
gsi_res = gsi_alloc_channel(¶ms->chan_params, gsi_dev_hdl,
@@ -1978,7 +1359,7 @@
* @clnt_hdl: [in] opaque client handle assigned by IPA to client
*
* Should be called by the driver of the peripheral that wants to remove
- * ep delay on IPA consumer ipe before disconnect in BAM-BAM mode. this api
+ * ep delay on IPA consumer ipe before disconnect in non GPI mode. this api
* expects caller to take responsibility to free any needed headers, routing
* and filtering tables and rules as needed.
*
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c
index 961ce13..f6bd162 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c
@@ -25,10 +25,6 @@
#define IPA_DMA_POLLING_MAX_SLEEP_RX 1050
#define IPA_DMA_SYS_DESC_MAX_FIFO_SZ 0x7FF8
#define IPA_DMA_MAX_PKT_SZ 0xFFFF
-#define IPA_DMA_MAX_PENDING_SYNC (IPA_SYS_DESC_FIFO_SZ / \
- sizeof(struct sps_iovec) - 1)
-#define IPA_DMA_MAX_PENDING_ASYNC (IPA_DMA_SYS_DESC_MAX_FIFO_SZ / \
- sizeof(struct sps_iovec) - 1)
#define IPADMA_DRV_NAME "ipa_dma"
@@ -361,7 +357,7 @@
* -EINVAL: invalid params
* -EPERM: operation not permitted as ipa_dma isn't enable or
* initialized
- * -SPS_ERROR: on sps faliures
+ * -gsi_status : on GSI failures
* -EFAULT: other
*/
int ipa3_dma_sync_memcpy(u64 dest, u64 src, int len)
@@ -371,7 +367,6 @@
int i = 0;
struct ipa3_sys_context *cons_sys;
struct ipa3_sys_context *prod_sys;
- struct sps_iovec iov;
struct ipa3_dma_xfer_wrapper *xfer_descr = NULL;
struct ipa3_dma_xfer_wrapper *head_descr = NULL;
struct gsi_xfer_elem xfer_elem;
@@ -394,12 +389,6 @@
IPADMA_ERR("invalid len, %d\n", len);
return -EINVAL;
}
- if (ipa3_ctx->transport_prototype != IPA_TRANSPORT_TYPE_GSI) {
- if (((u32)src != src) || ((u32)dest != dest)) {
- IPADMA_ERR("Bad addr, only 32b addr supported for BAM");
- return -EINVAL;
- }
- }
spin_lock_irqsave(&ipa3_dma_ctx->pending_lock, flags);
if (!ipa3_dma_ctx->is_enabled) {
IPADMA_ERR("can't memcpy, IPADMA isn't enabled\n");
@@ -408,14 +397,6 @@
}
atomic_inc(&ipa3_dma_ctx->sync_memcpy_pending_cnt);
spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_SPS) {
- if (atomic_read(&ipa3_dma_ctx->sync_memcpy_pending_cnt) >=
- IPA_DMA_MAX_PENDING_SYNC) {
- atomic_dec(&ipa3_dma_ctx->sync_memcpy_pending_cnt);
- IPADMA_ERR("Reached pending requests limit\n");
- return -EFAULT;
- }
- }
ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_SYNC_CONS);
if (-1 == ep_idx) {
@@ -448,46 +429,31 @@
mutex_lock(&ipa3_dma_ctx->sync_lock);
list_add_tail(&xfer_descr->link, &cons_sys->head_desc_list);
cons_sys->len++;
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
- xfer_elem.addr = dest;
- xfer_elem.len = len;
- xfer_elem.type = GSI_XFER_ELEM_DATA;
- xfer_elem.flags = GSI_XFER_FLAG_EOT;
- xfer_elem.xfer_user_data = xfer_descr;
- res = gsi_queue_xfer(cons_sys->ep->gsi_chan_hdl, 1,
- &xfer_elem, true);
- if (res) {
- IPADMA_ERR(
- "Failed: gsi_queue_xfer dest descr res:%d\n",
- res);
- goto fail_send;
- }
- xfer_elem.addr = src;
- xfer_elem.len = len;
- xfer_elem.type = GSI_XFER_ELEM_DATA;
- xfer_elem.flags = GSI_XFER_FLAG_EOT;
- xfer_elem.xfer_user_data = NULL;
- res = gsi_queue_xfer(prod_sys->ep->gsi_chan_hdl, 1,
- &xfer_elem, true);
- if (res) {
- IPADMA_ERR(
- "Failed: gsi_queue_xfer src descr res:%d\n",
- res);
- BUG();
- }
- } else {
- res = sps_transfer_one(cons_sys->ep->ep_hdl, dest, len,
- NULL, 0);
- if (res) {
- IPADMA_ERR("Failed: sps_transfer_one on dest descr\n");
- goto fail_send;
- }
- res = sps_transfer_one(prod_sys->ep->ep_hdl, src, len,
- NULL, SPS_IOVEC_FLAG_EOT);
- if (res) {
- IPADMA_ERR("Failed: sps_transfer_one on src descr\n");
- BUG();
- }
+ xfer_elem.addr = dest;
+ xfer_elem.len = len;
+ xfer_elem.type = GSI_XFER_ELEM_DATA;
+ xfer_elem.flags = GSI_XFER_FLAG_EOT;
+ xfer_elem.xfer_user_data = xfer_descr;
+ res = gsi_queue_xfer(cons_sys->ep->gsi_chan_hdl, 1,
+ &xfer_elem, true);
+ if (res) {
+ IPADMA_ERR(
+ "Failed: gsi_queue_xfer dest descr res:%d\n",
+ res);
+ goto fail_send;
+ }
+ xfer_elem.addr = src;
+ xfer_elem.len = len;
+ xfer_elem.type = GSI_XFER_ELEM_DATA;
+ xfer_elem.flags = GSI_XFER_FLAG_EOT;
+ xfer_elem.xfer_user_data = NULL;
+ res = gsi_queue_xfer(prod_sys->ep->gsi_chan_hdl, 1,
+ &xfer_elem, true);
+ if (res) {
+ IPADMA_ERR(
+ "Failed: gsi_queue_xfer src descr res:%d\n",
+ res);
+ ipa_assert();
}
head_descr = list_first_entry(&cons_sys->head_desc_list,
struct ipa3_dma_xfer_wrapper, link);
@@ -505,37 +471,22 @@
do {
/* wait for transfer to complete */
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
- res = gsi_poll_channel(cons_sys->ep->gsi_chan_hdl,
- &gsi_notify);
- if (res == GSI_STATUS_SUCCESS)
- stop_polling = true;
- else if (res != GSI_STATUS_POLL_EMPTY)
- IPADMA_ERR(
- "Failed: gsi_poll_chanel, returned %d loop#:%d\n",
- res, i);
- } else {
- res = sps_get_iovec(cons_sys->ep->ep_hdl, &iov);
- if (res)
- IPADMA_ERR(
- "Failed: get_iovec, returned %d loop#:%d\n",
- res, i);
- if (iov.addr != 0)
- stop_polling = true;
- }
+ res = gsi_poll_channel(cons_sys->ep->gsi_chan_hdl,
+ &gsi_notify);
+ if (res == GSI_STATUS_SUCCESS)
+ stop_polling = true;
+ else if (res != GSI_STATUS_POLL_EMPTY)
+ IPADMA_ERR(
+ "Failed: gsi_poll_chanel, returned %d loop#:%d\n",
+ res, i);
usleep_range(IPA_DMA_POLLING_MIN_SLEEP_RX,
IPA_DMA_POLLING_MAX_SLEEP_RX);
i++;
} while (!stop_polling);
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
- BUG_ON(len != gsi_notify.bytes_xfered);
- BUG_ON(dest != ((struct ipa3_dma_xfer_wrapper *)
- (gsi_notify.xfer_user_data))->phys_addr_dest);
- } else {
- BUG_ON(dest != iov.addr);
- BUG_ON(len != iov.size);
- }
+ ipa_assert_on(len != gsi_notify.bytes_xfered);
+ ipa_assert_on(dest != ((struct ipa3_dma_xfer_wrapper *)
+ (gsi_notify.xfer_user_data))->phys_addr_dest);
mutex_lock(&ipa3_dma_ctx->sync_lock);
list_del(&head_descr->link);
@@ -582,7 +533,7 @@
* -EINVAL: invalid params
* -EPERM: operation not permitted as ipa_dma isn't enable or
* initialized
- * -SPS_ERROR: on sps faliures
+ * -gsi_status : on GSI failures
* -EFAULT: descr fifo is full.
*/
int ipa3_dma_async_memcpy(u64 dest, u64 src, int len,
@@ -611,13 +562,6 @@
IPADMA_ERR("invalid len, %d\n", len);
return -EINVAL;
}
- if (ipa3_ctx->transport_prototype != IPA_TRANSPORT_TYPE_GSI) {
- if (((u32)src != src) || ((u32)dest != dest)) {
- IPADMA_ERR(
- "Bad addr - only 32b addr supported for BAM");
- return -EINVAL;
- }
- }
if (!user_cb) {
IPADMA_ERR("null pointer: user_cb\n");
return -EINVAL;
@@ -630,14 +574,6 @@
}
atomic_inc(&ipa3_dma_ctx->async_memcpy_pending_cnt);
spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_SPS) {
- if (atomic_read(&ipa3_dma_ctx->async_memcpy_pending_cnt) >=
- IPA_DMA_MAX_PENDING_ASYNC) {
- atomic_dec(&ipa3_dma_ctx->async_memcpy_pending_cnt);
- IPADMA_ERR("Reached pending requests limit\n");
- return -EFAULT;
- }
- }
ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS);
if (-1 == ep_idx) {
@@ -671,48 +607,32 @@
spin_lock_irqsave(&ipa3_dma_ctx->async_lock, flags);
list_add_tail(&xfer_descr->link, &cons_sys->head_desc_list);
cons_sys->len++;
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
- xfer_elem_cons.addr = dest;
- xfer_elem_cons.len = len;
- xfer_elem_cons.type = GSI_XFER_ELEM_DATA;
- xfer_elem_cons.flags = GSI_XFER_FLAG_EOT;
- xfer_elem_cons.xfer_user_data = xfer_descr;
- xfer_elem_prod.addr = src;
- xfer_elem_prod.len = len;
- xfer_elem_prod.type = GSI_XFER_ELEM_DATA;
- xfer_elem_prod.flags = GSI_XFER_FLAG_EOT;
- xfer_elem_prod.xfer_user_data = NULL;
- res = gsi_queue_xfer(cons_sys->ep->gsi_chan_hdl, 1,
- &xfer_elem_cons, true);
- if (res) {
- IPADMA_ERR(
- "Failed: gsi_queue_xfer on dest descr res: %d\n",
- res);
- goto fail_send;
- }
- res = gsi_queue_xfer(prod_sys->ep->gsi_chan_hdl, 1,
- &xfer_elem_prod, true);
- if (res) {
- IPADMA_ERR(
- "Failed: gsi_queue_xfer on src descr res: %d\n",
- res);
- BUG();
- goto fail_send;
- }
- } else {
- res = sps_transfer_one(cons_sys->ep->ep_hdl, dest, len,
- xfer_descr, 0);
- if (res) {
- IPADMA_ERR("Failed: sps_transfer_one on dest descr\n");
- goto fail_send;
- }
- res = sps_transfer_one(prod_sys->ep->ep_hdl, src, len,
- NULL, SPS_IOVEC_FLAG_EOT);
- if (res) {
- IPADMA_ERR("Failed: sps_transfer_one on src descr\n");
- BUG();
- goto fail_send;
- }
+ xfer_elem_cons.addr = dest;
+ xfer_elem_cons.len = len;
+ xfer_elem_cons.type = GSI_XFER_ELEM_DATA;
+ xfer_elem_cons.flags = GSI_XFER_FLAG_EOT;
+ xfer_elem_cons.xfer_user_data = xfer_descr;
+ xfer_elem_prod.addr = src;
+ xfer_elem_prod.len = len;
+ xfer_elem_prod.type = GSI_XFER_ELEM_DATA;
+ xfer_elem_prod.flags = GSI_XFER_FLAG_EOT;
+ xfer_elem_prod.xfer_user_data = NULL;
+ res = gsi_queue_xfer(cons_sys->ep->gsi_chan_hdl, 1,
+ &xfer_elem_cons, true);
+ if (res) {
+ IPADMA_ERR(
+ "Failed: gsi_queue_xfer on dest descr res: %d\n",
+ res);
+ goto fail_send;
+ }
+ res = gsi_queue_xfer(prod_sys->ep->gsi_chan_hdl, 1,
+ &xfer_elem_prod, true);
+ if (res) {
+ IPADMA_ERR(
+ "Failed: gsi_queue_xfer on src descr res: %d\n",
+ res);
+ ipa_assert();
+ goto fail_send;
}
spin_unlock_irqrestore(&ipa3_dma_ctx->async_lock, flags);
IPADMA_FUNC_EXIT();
@@ -832,9 +752,9 @@
}
/**
- * ipa3_dma_async_memcpy_notify_cb() -Callback function which will be called by
- * IPA driver after getting notify from SPS driver or poll mode on Rx operation
- * is completed (data was written to dest descriptor on async_cons ep).
+ * ipa3_dma_async_memcpy_notify_cb() - Callback function which will be called
+ * by IPA driver after getting notify on Rx operation is completed (data was
+ * written to dest descriptor on async_cons ep).
*
* @priv -not in use.
* @evt - event name - IPA_RECIVE.
@@ -865,11 +785,6 @@
list_del(&xfer_descr_expected->link);
sys->len--;
spin_unlock_irqrestore(&ipa3_dma_ctx->async_lock, flags);
- if (ipa3_ctx->transport_prototype != IPA_TRANSPORT_TYPE_GSI) {
- BUG_ON(xfer_descr_expected->phys_addr_dest !=
- mem_info->phys_base);
- BUG_ON(xfer_descr_expected->len != mem_info->size);
- }
atomic_inc(&ipa3_dma_ctx->total_async_memcpy);
atomic_dec(&ipa3_dma_ctx->async_memcpy_pending_cnt);
xfer_descr_expected->callback(xfer_descr_expected->user1);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 34c7227..85cd468 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -70,13 +70,18 @@
#define IPA_DEFAULT_SYS_YELLOW_WM 32
+/*
+ * The transport descriptor size was changed to GSI_CHAN_RE_SIZE_16B, but
+ * IPA users still use sps_iovec size as FIFO element size.
+ */
+#define IPA_FIFO_ELEMENT_SIZE 8
+
static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags);
static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys);
static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys);
static void ipa3_replenish_rx_work_func(struct work_struct *work);
static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys);
static void ipa3_wq_handle_rx(struct work_struct *work);
-static void ipa3_wq_handle_tx(struct work_struct *work);
static void ipa3_wq_rx_common(struct ipa3_sys_context *sys, u32 size);
static void ipa3_wlan_wq_rx_common(struct ipa3_sys_context *sys,
u32 size);
@@ -94,10 +99,8 @@
static int ipa_populate_tag_field(struct ipa3_desc *desc,
struct ipa3_tx_pkt_wrapper *tx_pkt,
struct ipahal_imm_cmd_pyld **tag_pyld_ret);
-static int ipa_handle_rx_core_gsi(struct ipa3_sys_context *sys,
- bool process_all, bool in_poll_state);
-static int ipa_handle_rx_core_sps(struct ipa3_sys_context *sys,
- bool process_all, bool in_poll_state);
+static int ipa_poll_gsi_pkt(struct ipa3_sys_context *sys,
+ struct ipa_mem_buffer *mem_info);
static unsigned long tag_to_pointer_wa(uint64_t tag);
static uint64_t pointer_to_tag_wa(struct ipa3_tx_pkt_wrapper *tx_pkt);
@@ -142,22 +145,6 @@
if (tx_pkt->callback)
tx_pkt->callback(tx_pkt->user1, tx_pkt->user2);
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_SPS
- && tx_pkt->cnt > 1
- && tx_pkt->cnt != IPA_LAST_DESC_CNT) {
- if (tx_pkt->cnt == IPA_NUM_DESC_PER_SW_TX) {
- dma_pool_free(ipa3_ctx->dma_pool,
- tx_pkt->mult.base,
- tx_pkt->mult.phys_base);
- } else {
- dma_unmap_single(ipa3_ctx->pdev,
- tx_pkt->mult.phys_base,
- tx_pkt->mult.size,
- DMA_TO_DEVICE);
- kfree(tx_pkt->mult.base);
- }
- }
-
kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
tx_pkt = next_pkt;
}
@@ -191,7 +178,6 @@
* the order for sent packet is the same as expected
* - delete all the tx packet descriptors from the system
* pipe context (not needed anymore)
- * - return the tx buffer back to dma_pool
*/
static void ipa3_wq_write_done(struct work_struct *work)
{
@@ -204,118 +190,6 @@
ipa3_wq_write_done_common(sys, tx_pkt);
}
-static int ipa3_handle_tx_core(struct ipa3_sys_context *sys, bool process_all,
- bool in_poll_state)
-{
- struct sps_iovec iov;
- struct ipa3_tx_pkt_wrapper *tx_pkt_expected;
- int ret;
- int cnt = 0;
-
- while ((in_poll_state ? atomic_read(&sys->curr_polling_state) :
- !atomic_read(&sys->curr_polling_state))) {
- if (cnt && !process_all)
- break;
- ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
- if (ret) {
- IPAERR("sps_get_iovec failed %d\n", ret);
- break;
- }
-
- if (iov.addr == 0)
- break;
-
- tx_pkt_expected = list_first_entry(&sys->head_desc_list,
- struct ipa3_tx_pkt_wrapper,
- link);
- ipa3_wq_write_done_common(sys, tx_pkt_expected);
- cnt++;
- };
-
- return cnt;
-}
-
-/**
- * ipa3_tx_switch_to_intr_mode() - Operate the Tx data path in interrupt mode
- */
-static void ipa3_tx_switch_to_intr_mode(struct ipa3_sys_context *sys)
-{
- int ret;
-
- if (!atomic_read(&sys->curr_polling_state)) {
- IPAERR("already in intr mode\n");
- goto fail;
- }
-
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
- atomic_set(&sys->curr_polling_state, 0);
- ipa3_dec_release_wakelock();
- ret = gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
- GSI_CHAN_MODE_CALLBACK);
- if (ret != GSI_STATUS_SUCCESS) {
- IPAERR("Failed to switch to intr mode.\n");
- goto fail;
- }
- } else {
- ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
- if (ret) {
- IPAERR("sps_get_config() failed %d\n", ret);
- goto fail;
- }
- sys->event.options = SPS_O_EOT;
- ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
- if (ret) {
- IPAERR("sps_register_event() failed %d\n", ret);
- goto fail;
- }
- sys->ep->connect.options =
- SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
- ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
- if (ret) {
- IPAERR("sps_set_config() failed %d\n", ret);
- goto fail;
- }
- atomic_set(&sys->curr_polling_state, 0);
- ipa3_handle_tx_core(sys, true, false);
- ipa3_dec_release_wakelock();
- }
- return;
-
-fail:
- queue_delayed_work(sys->wq, &sys->switch_to_intr_work,
- msecs_to_jiffies(1));
-}
-
-static void ipa3_handle_tx(struct ipa3_sys_context *sys)
-{
- int inactive_cycles = 0;
- int cnt;
-
- IPA_ACTIVE_CLIENTS_INC_SIMPLE();
- do {
- cnt = ipa3_handle_tx_core(sys, true, true);
- if (cnt == 0) {
- inactive_cycles++;
- usleep_range(POLLING_MIN_SLEEP_TX,
- POLLING_MAX_SLEEP_TX);
- } else {
- inactive_cycles = 0;
- }
- } while (inactive_cycles <= POLLING_INACTIVITY_TX);
-
- ipa3_tx_switch_to_intr_mode(sys);
- IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
-}
-
-static void ipa3_wq_handle_tx(struct work_struct *work)
-{
- struct ipa3_sys_context *sys;
-
- sys = container_of(work, struct ipa3_sys_context, work);
-
- ipa3_handle_tx(sys);
-}
-
/**
* ipa3_send_one() - Send a single descriptor
* @sys: system pipe context
@@ -324,8 +198,8 @@
*
* - Allocate tx_packet wrapper
* - transfer data to the IPA
- * - after the transfer was done the SPS will
- * notify the sending user via ipa_sps_irq_comp_tx()
+ * - after the transfer was done the user will be notified via provided
+ * callback
*
* Return codes: 0: success, -EFAULT: failure
*/
@@ -335,9 +209,7 @@
struct ipa3_tx_pkt_wrapper *tx_pkt;
struct gsi_xfer_elem gsi_xfer;
int result;
- u16 sps_flags = SPS_IOVEC_FLAG_EOT;
dma_addr_t dma_address;
- u16 len = 0;
u32 mem_flag = GFP_ATOMIC;
if (unlikely(!in_atomic))
@@ -373,32 +245,16 @@
tx_pkt->user1 = desc->user1;
tx_pkt->user2 = desc->user2;
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
- memset(&gsi_xfer, 0, sizeof(gsi_xfer));
- gsi_xfer.addr = dma_address;
- gsi_xfer.flags |= GSI_XFER_FLAG_EOT;
- gsi_xfer.xfer_user_data = tx_pkt;
- if (desc->type == IPA_IMM_CMD_DESC) {
- gsi_xfer.len = desc->opcode;
- gsi_xfer.type = GSI_XFER_ELEM_IMME_CMD;
- } else {
- gsi_xfer.len = desc->len;
- gsi_xfer.type = GSI_XFER_ELEM_DATA;
- }
+ memset(&gsi_xfer, 0, sizeof(gsi_xfer));
+ gsi_xfer.addr = dma_address;
+ gsi_xfer.flags |= GSI_XFER_FLAG_EOT;
+ gsi_xfer.xfer_user_data = tx_pkt;
+ if (desc->type == IPA_IMM_CMD_DESC) {
+ gsi_xfer.len = desc->opcode;
+ gsi_xfer.type = GSI_XFER_ELEM_IMME_CMD;
} else {
- /*
- * Special treatment for immediate commands, where the
- * structure of the descriptor is different
- */
- if (desc->type == IPA_IMM_CMD_DESC) {
- sps_flags |= SPS_IOVEC_FLAG_IMME;
- len = desc->opcode;
- IPADBG_LOW("sending cmd=%d pyld_len=%d sps_flags=%x\n",
- desc->opcode, desc->len, sps_flags);
- IPA_DUMP_BUFF(desc->pyld, dma_address, desc->len);
- } else {
- len = desc->len;
- }
+ gsi_xfer.len = desc->len;
+ gsi_xfer.type = GSI_XFER_ELEM_DATA;
}
INIT_WORK(&tx_pkt->work, ipa3_wq_write_done);
@@ -406,20 +262,11 @@
spin_lock_bh(&sys->spinlock);
list_add_tail(&tx_pkt->link, &sys->head_desc_list);
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
- result = gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1,
- &gsi_xfer, true);
- if (result != GSI_STATUS_SUCCESS) {
- IPAERR("GSI xfer failed.\n");
- goto fail_transport_send;
- }
- } else {
- result = sps_transfer_one(sys->ep->ep_hdl, dma_address,
- len, tx_pkt, sps_flags);
- if (result) {
- IPAERR("sps_transfer_one failed rc=%d\n", result);
- goto fail_transport_send;
- }
+ result = gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1,
+ &gsi_xfer, true);
+ if (result != GSI_STATUS_SUCCESS) {
+ IPAERR("GSI xfer failed.\n");
+ goto fail_transport_send;
}
spin_unlock_bh(&sys->spinlock);
@@ -443,14 +290,11 @@
* @desc: packets to send (may be immediate command or data)
* @in_atomic: whether caller is in atomic context
*
- * This function is used for system-to-bam connection.
- * - SPS driver expect struct sps_transfer which will contain all the data
- * for a transaction
+ * This function is used for GPI connection.
* - ipa3_tx_pkt_wrapper will be used for each ipa
* descriptor (allocated from wrappers cache)
* - The wrapper struct will be configured for each ipa-desc payload and will
* contain information which will be later used by the user callbacks
- * - each transfer will be made by calling to sps_transfer()
* - Each packet (command or data) that will be sent will also be saved in
* ipa3_sys_context for later check that all data was sent
*
@@ -464,77 +308,36 @@
struct ipa3_tx_pkt_wrapper *tx_pkt, *tx_pkt_first;
struct ipahal_imm_cmd_pyld *tag_pyld_ret = NULL;
struct ipa3_tx_pkt_wrapper *next_pkt;
- struct sps_transfer transfer = { 0 };
- struct sps_iovec *iovec;
struct gsi_xfer_elem *gsi_xfer_elem_array = NULL;
- dma_addr_t dma_addr;
int i = 0;
int j;
int result;
int fail_dma_wrap = 0;
- uint size;
u32 mem_flag = GFP_ATOMIC;
- int ipa_ep_idx;
- struct ipa_gsi_ep_config *gsi_ep_cfg;
+ const struct ipa_gsi_ep_config *gsi_ep_cfg;
if (unlikely(!in_atomic))
mem_flag = GFP_KERNEL;
- size = num_desc * sizeof(struct sps_iovec);
+ gsi_ep_cfg = ipa3_get_gsi_ep_info(sys->ep->client);
+ if (unlikely(!gsi_ep_cfg)) {
+ IPAERR("failed to get gsi EP config for client=%d\n",
+ sys->ep->client);
+ return -EFAULT;
+ }
+ if (unlikely(num_desc > gsi_ep_cfg->ipa_if_tlv)) {
+ IPAERR("Too many chained descriptors need=%d max=%d\n",
+ num_desc, gsi_ep_cfg->ipa_if_tlv);
+ WARN_ON(1);
+ return -EPERM;
+ }
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
- ipa_ep_idx = ipa3_get_ep_mapping(sys->ep->client);
- if (unlikely(ipa_ep_idx < 0)) {
- IPAERR("invalid ep_index of client = %d\n",
- sys->ep->client);
- return -EFAULT;
- }
- gsi_ep_cfg = ipa3_get_gsi_ep_info(ipa_ep_idx);
- if (unlikely(!gsi_ep_cfg)) {
- IPAERR("failed to get gsi EP config of ep_idx=%d\n",
- ipa_ep_idx);
- return -EFAULT;
- }
- if (unlikely(num_desc > gsi_ep_cfg->ipa_if_tlv)) {
- IPAERR("Too many chained descriptors need=%d max=%d\n",
- num_desc, gsi_ep_cfg->ipa_if_tlv);
- WARN_ON(1);
- return -EPERM;
- }
-
- gsi_xfer_elem_array =
- kzalloc(num_desc * sizeof(struct gsi_xfer_elem),
- mem_flag);
- if (!gsi_xfer_elem_array) {
- IPAERR("Failed to alloc mem for gsi xfer array.\n");
- return -EFAULT;
- }
- } else {
- if (num_desc == IPA_NUM_DESC_PER_SW_TX) {
- transfer.iovec = dma_pool_alloc(ipa3_ctx->dma_pool,
- mem_flag, &dma_addr);
- if (!transfer.iovec) {
- IPAERR("fail to alloc dma mem\n");
- return -EFAULT;
- }
- } else {
- transfer.iovec = kmalloc(size, mem_flag);
- if (!transfer.iovec) {
- IPAERR("fail to alloc mem for sps xfr buff ");
- IPAERR("num_desc = %d size = %d\n",
- num_desc, size);
- return -EFAULT;
- }
- dma_addr = dma_map_single(ipa3_ctx->pdev,
- transfer.iovec, size, DMA_TO_DEVICE);
- if (!dma_addr) {
- IPAERR("dma_map_single failed\n");
- kfree(transfer.iovec);
- return -EFAULT;
- }
- }
- transfer.iovec_phys = dma_addr;
- transfer.iovec_count = num_desc;
+ gsi_xfer_elem_array =
+ kzalloc(num_desc * sizeof(struct gsi_xfer_elem),
+ mem_flag);
+ if (!gsi_xfer_elem_array) {
+ IPAERR("Failed to alloc mem for gsi xfer array.\n");
+ return -EFAULT;
}
spin_lock_bh(&sys->spinlock);
@@ -617,87 +420,41 @@
list_add_tail(&tx_pkt->link, &sys->head_desc_list);
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
- gsi_xfer_elem_array[i].addr = tx_pkt->mem.phys_base;
+ gsi_xfer_elem_array[i].addr = tx_pkt->mem.phys_base;
- /*
- * Special treatment for immediate commands, where
- * the structure of the descriptor is different
- */
- if (desc[i].type == IPA_IMM_CMD_DESC) {
- gsi_xfer_elem_array[i].len = desc[i].opcode;
- gsi_xfer_elem_array[i].type =
- GSI_XFER_ELEM_IMME_CMD;
- } else {
- gsi_xfer_elem_array[i].len = desc[i].len;
- gsi_xfer_elem_array[i].type =
- GSI_XFER_ELEM_DATA;
- }
-
- if (i == (num_desc - 1)) {
- gsi_xfer_elem_array[i].flags |=
- GSI_XFER_FLAG_EOT;
- gsi_xfer_elem_array[i].xfer_user_data =
- tx_pkt_first;
- /* "mark" the last desc */
- tx_pkt->cnt = IPA_LAST_DESC_CNT;
- } else
- gsi_xfer_elem_array[i].flags |=
- GSI_XFER_FLAG_CHAIN;
+ /*
+ * Special treatment for immediate commands, where
+ * the structure of the descriptor is different
+ */
+ if (desc[i].type == IPA_IMM_CMD_DESC) {
+ gsi_xfer_elem_array[i].len = desc[i].opcode;
+ gsi_xfer_elem_array[i].type =
+ GSI_XFER_ELEM_IMME_CMD;
} else {
- /*
- * first desc of set is "special" as it
- * holds the count and other info
- */
- if (i == 0) {
- transfer.user = tx_pkt;
- tx_pkt->mult.phys_base = dma_addr;
- tx_pkt->mult.base = transfer.iovec;
- tx_pkt->mult.size = size;
- }
-
- iovec = &transfer.iovec[i];
- iovec->flags = 0;
- /*
- * Point the iovec to the buffer and
- */
- iovec->addr = tx_pkt->mem.phys_base;
- /*
- * Special treatment for immediate commands, where
- * the structure of the descriptor is different
- */
- if (desc[i].type == IPA_IMM_CMD_DESC) {
- iovec->size = desc[i].opcode;
- iovec->flags |= SPS_IOVEC_FLAG_IMME;
- IPA_DUMP_BUFF(desc[i].pyld,
- tx_pkt->mem.phys_base, desc[i].len);
- } else {
- iovec->size = desc[i].len;
- }
-
- if (i == (num_desc - 1)) {
- iovec->flags |= SPS_IOVEC_FLAG_EOT;
- /* "mark" the last desc */
- tx_pkt->cnt = IPA_LAST_DESC_CNT;
- }
+ gsi_xfer_elem_array[i].len = desc[i].len;
+ gsi_xfer_elem_array[i].type =
+ GSI_XFER_ELEM_DATA;
}
+
+ if (i == (num_desc - 1)) {
+ gsi_xfer_elem_array[i].flags |=
+ GSI_XFER_FLAG_EOT;
+ gsi_xfer_elem_array[i].xfer_user_data =
+ tx_pkt_first;
+ /* "mark" the last desc */
+ tx_pkt->cnt = IPA_LAST_DESC_CNT;
+ } else
+ gsi_xfer_elem_array[i].flags |=
+ GSI_XFER_FLAG_CHAIN;
}
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
- result = gsi_queue_xfer(sys->ep->gsi_chan_hdl, num_desc,
- gsi_xfer_elem_array, true);
- if (result != GSI_STATUS_SUCCESS) {
- IPAERR("GSI xfer failed.\n");
- goto failure;
- }
- kfree(gsi_xfer_elem_array);
- } else {
- result = sps_transfer(sys->ep->ep_hdl, &transfer);
- if (result) {
- IPAERR("sps_transfer failed rc=%d\n", result);
- goto failure;
- }
+ result = gsi_queue_xfer(sys->ep->gsi_chan_hdl, num_desc,
+ gsi_xfer_elem_array, true);
+ if (result != GSI_STATUS_SUCCESS) {
+ IPAERR("GSI xfer failed.\n");
+ goto failure;
}
+ kfree(gsi_xfer_elem_array);
spin_unlock_bh(&sys->spinlock);
return 0;
@@ -725,28 +482,15 @@
if (fail_dma_wrap)
kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
- kfree(gsi_xfer_elem_array);
- } else {
- if (transfer.iovec_phys) {
- if (num_desc == IPA_NUM_DESC_PER_SW_TX) {
- dma_pool_free(ipa3_ctx->dma_pool,
- transfer.iovec, transfer.iovec_phys);
- } else {
- dma_unmap_single(ipa3_ctx->pdev,
- transfer.iovec_phys, size,
- DMA_TO_DEVICE);
- kfree(transfer.iovec);
- }
- }
- }
+ kfree(gsi_xfer_elem_array);
+
spin_unlock_bh(&sys->spinlock);
return -EFAULT;
}
/**
* ipa3_transport_irq_cmd_ack - callback function which will be called by
- * SPS/GSI driver after an immediate command is complete.
+ * the transport driver after an immediate command is complete.
* @user1: pointer to the descriptor of the transfer
* @user2:
*
@@ -768,7 +512,7 @@
/**
* ipa3_transport_irq_cmd_ack_free - callback function which will be
- * called by SPS/GSI driver after an immediate command is complete.
+ * called by the transport driver after an immediate command is complete.
* This function will also free the completion object once it is done.
* @tag_comp: pointer to the completion object
* @ignored: parameter not used
@@ -942,77 +686,6 @@
}
/**
- * ipa3_sps_irq_tx_notify() - Callback function which will be called by
- * the SPS driver to start a Tx poll operation.
- * Called in an interrupt context.
- * @notify: SPS driver supplied notification struct
- *
- * This function defer the work for this event to the tx workqueue.
- */
-static void ipa3_sps_irq_tx_notify(struct sps_event_notify *notify)
-{
- struct ipa3_sys_context *sys = (struct ipa3_sys_context *)notify->user;
- int ret;
-
- IPADBG_LOW("event %d notified\n", notify->event_id);
-
- switch (notify->event_id) {
- case SPS_EVENT_EOT:
- if (IPA_CLIENT_IS_APPS_CONS(sys->ep->client))
- atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
- if (!atomic_read(&sys->curr_polling_state)) {
- ret = sps_get_config(sys->ep->ep_hdl,
- &sys->ep->connect);
- if (ret) {
- IPAERR("sps_get_config() failed %d\n", ret);
- break;
- }
- sys->ep->connect.options = SPS_O_AUTO_ENABLE |
- SPS_O_ACK_TRANSFERS | SPS_O_POLL;
- ret = sps_set_config(sys->ep->ep_hdl,
- &sys->ep->connect);
- if (ret) {
- IPAERR("sps_set_config() failed %d\n", ret);
- break;
- }
- ipa3_inc_acquire_wakelock();
- atomic_set(&sys->curr_polling_state, 1);
- queue_work(sys->wq, &sys->work);
- }
- break;
- default:
- IPAERR("received unexpected event id %d\n", notify->event_id);
- }
-}
-
-/**
- * ipa3_sps_irq_tx_no_aggr_notify() - Callback function which will be called by
- * the SPS driver after a Tx operation is complete.
- * Called in an interrupt context.
- * @notify: SPS driver supplied notification struct
- *
- * This function defer the work for this event to the tx workqueue.
- * This event will be later handled by ipa_write_done.
- */
-static void ipa3_sps_irq_tx_no_aggr_notify(struct sps_event_notify *notify)
-{
- struct ipa3_tx_pkt_wrapper *tx_pkt;
-
- IPADBG_LOW("event %d notified\n", notify->event_id);
-
- switch (notify->event_id) {
- case SPS_EVENT_EOT:
- tx_pkt = notify->data.transfer.user;
- if (IPA_CLIENT_IS_APPS_CONS(tx_pkt->sys->ep->client))
- atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
- queue_work(tx_pkt->sys->wq, &tx_pkt->work);
- break;
- default:
- IPAERR("received unexpected event id %d\n", notify->event_id);
- }
-}
-
-/**
* ipa3_handle_rx_core() - The core functionality of packet reception. This
* function is read from multiple code paths.
*
@@ -1029,13 +702,28 @@
static int ipa3_handle_rx_core(struct ipa3_sys_context *sys, bool process_all,
bool in_poll_state)
{
- int cnt;
+ int ret;
+ int cnt = 0;
+ struct ipa_mem_buffer mem_info = { 0 };
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI)
- cnt = ipa_handle_rx_core_gsi(sys, process_all, in_poll_state);
- else
- cnt = ipa_handle_rx_core_sps(sys, process_all, in_poll_state);
+ while ((in_poll_state ? atomic_read(&sys->curr_polling_state) :
+ !atomic_read(&sys->curr_polling_state))) {
+ if (cnt && !process_all)
+ break;
+ ret = ipa_poll_gsi_pkt(sys, &mem_info);
+ if (ret)
+ break;
+
+ if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(sys->ep->client))
+ ipa3_dma_memcpy_notify(sys, &mem_info);
+ else if (IPA_CLIENT_IS_WLAN_CONS(sys->ep->client))
+ ipa3_wlan_wq_rx_common(sys, mem_info.size);
+ else
+ ipa3_wq_rx_common(sys, mem_info.size);
+
+ ++cnt;
+ }
return cnt;
}
@@ -1046,50 +734,17 @@
{
int ret;
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
- if (!atomic_read(&sys->curr_polling_state)) {
- IPAERR("already in intr mode\n");
- goto fail;
- }
- atomic_set(&sys->curr_polling_state, 0);
- ipa3_dec_release_wakelock();
- ret = gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
- GSI_CHAN_MODE_CALLBACK);
- if (ret != GSI_STATUS_SUCCESS) {
- IPAERR("Failed to switch to intr mode.\n");
- goto fail;
- }
- } else {
- ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
- if (ret) {
- IPAERR("sps_get_config() failed %d\n", ret);
- goto fail;
- }
- if (!atomic_read(&sys->curr_polling_state) &&
- ((sys->ep->connect.options & SPS_O_EOT) == SPS_O_EOT)) {
- IPADBG("already in intr mode\n");
- return;
- }
- if (!atomic_read(&sys->curr_polling_state)) {
- IPAERR("already in intr mode\n");
- goto fail;
- }
- sys->event.options = SPS_O_EOT;
- ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
- if (ret) {
- IPAERR("sps_register_event() failed %d\n", ret);
- goto fail;
- }
- sys->ep->connect.options =
- SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
- ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
- if (ret) {
- IPAERR("sps_set_config() failed %d\n", ret);
- goto fail;
- }
- atomic_set(&sys->curr_polling_state, 0);
- ipa3_handle_rx_core(sys, true, false);
- ipa3_dec_release_wakelock();
+ if (!atomic_read(&sys->curr_polling_state)) {
+ IPAERR("already in intr mode\n");
+ goto fail;
+ }
+ atomic_set(&sys->curr_polling_state, 0);
+ ipa3_dec_release_wakelock();
+ ret = gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
+ GSI_CHAN_MODE_CALLBACK);
+ if (ret != GSI_STATUS_SUCCESS) {
+ IPAERR("Failed to switch to intr mode.\n");
+ goto fail;
}
return;
@@ -1099,74 +754,6 @@
}
/**
- * ipa_rx_notify() - Callback function which is called by the SPS driver when a
- * a packet is received
- * @notify: SPS driver supplied notification information
- *
- * Called in an interrupt context, therefore the majority of the work is
- * deffered using a work queue.
- *
- * After receiving a packet, the driver goes to polling mode and keeps pulling
- * packets until the rx buffer is empty, then it goes back to interrupt mode.
- * This comes to prevent the CPU from handling too many interrupts when the
- * throughput is high.
- */
-static void ipa3_sps_irq_rx_notify(struct sps_event_notify *notify)
-{
- struct ipa3_sys_context *sys = (struct ipa3_sys_context *)notify->user;
- int ret;
-
- IPADBG_LOW("event %d notified\n", notify->event_id);
-
- switch (notify->event_id) {
- case SPS_EVENT_EOT:
- if (IPA_CLIENT_IS_APPS_CONS(sys->ep->client))
- atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
- if (!atomic_read(&sys->curr_polling_state)) {
- sys->ep->eot_in_poll_err++;
- break;
- }
-
- ret = sps_get_config(sys->ep->ep_hdl,
- &sys->ep->connect);
- if (ret) {
- IPAERR("sps_get_config() failed %d\n", ret);
- break;
- }
- sys->ep->connect.options = SPS_O_AUTO_ENABLE |
- SPS_O_ACK_TRANSFERS | SPS_O_POLL;
- ret = sps_set_config(sys->ep->ep_hdl,
- &sys->ep->connect);
- if (ret) {
- IPAERR("sps_set_config() failed %d\n", ret);
- break;
- }
- ipa3_inc_acquire_wakelock();
- atomic_set(&sys->curr_polling_state, 1);
- trace_intr_to_poll3(sys->ep->client);
- queue_work(sys->wq, &sys->work);
- break;
- default:
- IPAERR("received unexpected event id %d\n", notify->event_id);
- }
-}
-
-/**
- * switch_to_intr_tx_work_func() - Wrapper function to move from polling
- * to interrupt mode
- * @work: work struct
- */
-void ipa3_switch_to_intr_tx_work_func(struct work_struct *work)
-{
- struct delayed_work *dwork;
- struct ipa3_sys_context *sys;
-
- dwork = container_of(work, struct delayed_work, work);
- sys = container_of(dwork, struct ipa3_sys_context, switch_to_intr_work);
- ipa3_handle_tx(sys);
-}
-
-/**
* ipa3_handle_rx() - handle packet reception. This function is executed in the
* context of a work queue.
* @work: work struct needed by the work queue
@@ -1220,18 +807,15 @@
}
/**
- * ipa3_setup_sys_pipe() - Setup an IPA end-point in system-BAM mode and perform
+ * ipa3_setup_sys_pipe() - Setup an IPA GPI pipe and perform
* IPA EP configuration
- * @sys_in: [in] input needed to setup BAM pipe and configure EP
+ * @sys_in: [in] input needed to setup the pipe and configure EP
* @clnt_hdl: [out] client handle
*
* - configure the end-point registers with the supplied
* parameters from the user.
- * - call SPS APIs to create a system-to-bam connection with IPA.
+ * - Creates a GPI connection with IPA.
* - allocate descriptor FIFO
- * - register callback function(ipa3_sps_irq_rx_notify or
- * ipa3_sps_irq_tx_notify - depends on client type) in case the driver is
- * not configured to pulling mode
*
* Returns: 0 on success, negative on failure
*/
@@ -1240,9 +824,7 @@
struct ipa3_ep_context *ep;
int ipa_ep_idx;
int result = -EINVAL;
- dma_addr_t dma_addr;
char buff[IPA_RESOURCE_NAME_MAX];
- struct iommu_domain *smmu_domain;
if (sys_in == NULL || clnt_hdl == NULL) {
IPAERR("NULL args\n");
@@ -1348,7 +930,7 @@
ep->priv = sys_in->priv;
ep->keep_ipa_awake = sys_in->keep_ipa_awake;
atomic_set(&ep->avail_fifo_desc,
- ((sys_in->desc_fifo_sz/sizeof(struct sps_iovec))-1));
+ ((sys_in->desc_fifo_sz / IPA_FIFO_ELEMENT_SIZE) - 1));
if (ep->status.status_en && IPA_CLIENT_IS_CONS(ep->client) &&
ep->sys->status_stat == NULL) {
@@ -1374,88 +956,11 @@
IPADBG("skipping ep configuration\n");
}
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
- result = ipa_gsi_setup_channel(sys_in, ep);
- if (result) {
- IPAERR("Failed to setup GSI channel\n");
- goto fail_gen2;
- }
- } else {
- /* Default Config */
- ep->ep_hdl = sps_alloc_endpoint();
- if (ep->ep_hdl == NULL) {
- IPAERR("SPS EP allocation failed.\n");
- goto fail_gen2;
- }
-
- result = sps_get_config(ep->ep_hdl, &ep->connect);
- if (result) {
- IPAERR("fail to get config.\n");
- goto fail_sps_cfg;
- }
-
- /* Specific Config */
- if (IPA_CLIENT_IS_CONS(sys_in->client)) {
- ep->connect.mode = SPS_MODE_SRC;
- ep->connect.destination = SPS_DEV_HANDLE_MEM;
- ep->connect.source = ipa3_ctx->bam_handle;
- ep->connect.dest_pipe_index = ipa3_ctx->a5_pipe_index++;
- ep->connect.src_pipe_index = ipa_ep_idx;
- } else {
- ep->connect.mode = SPS_MODE_DEST;
- ep->connect.source = SPS_DEV_HANDLE_MEM;
- ep->connect.destination = ipa3_ctx->bam_handle;
- ep->connect.src_pipe_index = ipa3_ctx->a5_pipe_index++;
- ep->connect.dest_pipe_index = ipa_ep_idx;
- }
-
- IPADBG("client:%d ep:%d",
- sys_in->client, ipa_ep_idx);
-
- IPADBG("dest_pipe_index:%d src_pipe_index:%d\n",
- ep->connect.dest_pipe_index,
- ep->connect.src_pipe_index);
-
- ep->connect.options = ep->sys->sps_option;
- ep->connect.desc.size = sys_in->desc_fifo_sz;
- ep->connect.desc.base = dma_alloc_coherent(ipa3_ctx->pdev,
- ep->connect.desc.size, &dma_addr, 0);
- if (ipa3_ctx->smmu_s1_bypass) {
- ep->connect.desc.phys_base = dma_addr;
- } else {
- ep->connect.desc.iova = dma_addr;
- smmu_domain = ipa3_get_smmu_domain();
- if (smmu_domain != NULL) {
- ep->connect.desc.phys_base =
- iommu_iova_to_phys(smmu_domain,
- dma_addr);
- }
- }
- if (ep->connect.desc.base == NULL) {
- IPAERR("fail to get DMA desc memory.\n");
- goto fail_sps_cfg;
- }
-
- ep->connect.event_thresh = IPA_EVENT_THRESHOLD;
-
- result = ipa3_sps_connect_safe(ep->ep_hdl,
- &ep->connect, sys_in->client);
- if (result) {
- IPAERR("sps_connect fails.\n");
- goto fail_sps_connect;
- }
-
- ep->sys->event.options = SPS_O_EOT;
- ep->sys->event.mode = SPS_TRIGGER_CALLBACK;
- ep->sys->event.xfer_done = NULL;
- ep->sys->event.user = ep->sys;
- ep->sys->event.callback = ep->sys->sps_callback;
- result = sps_register_event(ep->ep_hdl, &ep->sys->event);
- if (result < 0) {
- IPAERR("register event error %d\n", result);
- goto fail_register_event;
- }
- } /* end of sps config */
+ result = ipa_gsi_setup_channel(sys_in, ep);
+ if (result) {
+ IPAERR("Failed to setup GSI channel\n");
+ goto fail_gen2;
+ }
*clnt_hdl = ipa_ep_idx;
@@ -1506,14 +1011,6 @@
return 0;
-fail_register_event:
- sps_disconnect(ep->ep_hdl);
-fail_sps_connect:
- dma_free_coherent(ipa3_ctx->pdev, ep->connect.desc.size,
- ep->connect.desc.base,
- ep->connect.desc.phys_base);
-fail_sps_cfg:
- sps_free_endpoint(ep->ep_hdl);
fail_gen2:
destroy_workqueue(ep->sys->repl_wq);
fail_wq2:
@@ -1528,7 +1025,7 @@
}
/**
- * ipa3_teardown_sys_pipe() - Teardown the system-BAM pipe and cleanup IPA EP
+ * ipa3_teardown_sys_pipe() - Teardown the GPI pipe and cleanup IPA EP
* @clnt_hdl: [in] the handle obtained from ipa3_setup_sys_pipe
*
* Returns: 0 on success, negative on failure
@@ -1573,57 +1070,49 @@
if (IPA_CLIENT_IS_CONS(ep->client))
cancel_delayed_work_sync(&ep->sys->replenish_rx_work);
flush_workqueue(ep->sys->wq);
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
- result = ipa3_stop_gsi_channel(clnt_hdl);
+ result = ipa3_stop_gsi_channel(clnt_hdl);
+ if (result != GSI_STATUS_SUCCESS) {
+ IPAERR("GSI stop chan err: %d.\n", result);
+ ipa_assert();
+ return result;
+ }
+ result = gsi_reset_channel(ep->gsi_chan_hdl);
+ if (result != GSI_STATUS_SUCCESS) {
+ IPAERR("Failed to reset chan: %d.\n", result);
+ ipa_assert();
+ return result;
+ }
+ dma_free_coherent(ipa3_ctx->pdev,
+ ep->gsi_mem_info.chan_ring_len,
+ ep->gsi_mem_info.chan_ring_base_vaddr,
+ ep->gsi_mem_info.chan_ring_base_addr);
+ result = gsi_dealloc_channel(ep->gsi_chan_hdl);
+ if (result != GSI_STATUS_SUCCESS) {
+ IPAERR("Failed to dealloc chan: %d.\n", result);
+ ipa_assert();
+ return result;
+ }
+
+ /* free event ring only when it is present */
+ if (ep->gsi_evt_ring_hdl != ~0) {
+ result = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl);
if (result != GSI_STATUS_SUCCESS) {
- IPAERR("GSI stop chan err: %d.\n", result);
- BUG();
- return result;
- }
- result = gsi_reset_channel(ep->gsi_chan_hdl);
- if (result != GSI_STATUS_SUCCESS) {
- IPAERR("Failed to reset chan: %d.\n", result);
+ IPAERR("Failed to reset evt ring: %d.\n",
+ result);
BUG();
return result;
}
dma_free_coherent(ipa3_ctx->pdev,
- ep->gsi_mem_info.chan_ring_len,
- ep->gsi_mem_info.chan_ring_base_vaddr,
- ep->gsi_mem_info.chan_ring_base_addr);
- result = gsi_dealloc_channel(ep->gsi_chan_hdl);
+ ep->gsi_mem_info.evt_ring_len,
+ ep->gsi_mem_info.evt_ring_base_vaddr,
+ ep->gsi_mem_info.evt_ring_base_addr);
+ result = gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
if (result != GSI_STATUS_SUCCESS) {
- IPAERR("Failed to dealloc chan: %d.\n", result);
+ IPAERR("Failed to dealloc evt ring: %d.\n",
+ result);
BUG();
return result;
}
-
- /* free event ring only when it is present */
- if (ep->gsi_evt_ring_hdl != ~0) {
- result = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl);
- if (result != GSI_STATUS_SUCCESS) {
- IPAERR("Failed to reset evt ring: %d.\n",
- result);
- BUG();
- return result;
- }
- dma_free_coherent(ipa3_ctx->pdev,
- ep->gsi_mem_info.evt_ring_len,
- ep->gsi_mem_info.evt_ring_base_vaddr,
- ep->gsi_mem_info.evt_ring_base_addr);
- result = gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
- if (result != GSI_STATUS_SUCCESS) {
- IPAERR("Failed to dealloc evt ring: %d.\n",
- result);
- BUG();
- return result;
- }
- }
- } else {
- sps_disconnect(ep->ep_hdl);
- dma_free_coherent(ipa3_ctx->pdev, ep->connect.desc.size,
- ep->connect.desc.base,
- ep->connect.desc.phys_base);
- sps_free_endpoint(ep->ep_hdl);
}
if (ep->sys->repl_wq)
flush_workqueue(ep->sys->repl_wq);
@@ -1662,7 +1151,6 @@
* @user2
*
* This notified callback is for the destination client.
- * This function is supplied in ipa3_connect.
*/
static void ipa3_tx_comp_usr_notify_release(void *user1, int user2)
{
@@ -1704,11 +1192,8 @@
* (for A5_WLAN_AMPDU_PROD only one desciprtor will be sent),
* the first descriptor will be used to inform the IPA hardware that
* apps need to push data into the IPA (IP_PACKET_INIT immediate command).
- * Once this send was done from SPS point-of-view the IPA driver will
- * get notified by the supplied callback - ipa_sps_irq_tx_comp()
- *
- * ipa_sps_irq_tx_comp will call to the user supplied
- * callback (from ipa3_connect)
+ * Once this send was done from transport point-of-view the IPA driver will
+ * get notified by the supplied callback.
*
* Returns: 0 on success, negative on failure
*/
@@ -1723,7 +1208,7 @@
struct ipa3_sys_context *sys;
int src_ep_idx;
int num_frags, f;
- struct ipa_gsi_ep_config *gsi_ep;
+ const struct ipa_gsi_ep_config *gsi_ep;
if (unlikely(!ipa3_ctx)) {
IPAERR("IPA3 driver was not initialized\n");
@@ -1777,7 +1262,7 @@
* 2 descriptors are needed for IP_PACKET_INIT and TAG_STATUS.
* 1 descriptor needed for the linear portion of skb.
*/
- gsi_ep = ipa3_get_gsi_ep_info(src_ep_idx);
+ gsi_ep = ipa3_get_gsi_ep_info(ipa3_ctx->ep[src_ep_idx].client);
if (gsi_ep && (num_frags + 3 > gsi_ep->ipa_if_tlv)) {
if (skb_linearize(skb)) {
IPAERR("Failed to linear skb with %d frags\n",
@@ -2032,24 +1517,17 @@
rx_pkt->sys = sys;
list_add_tail(&rx_pkt->link, &sys->head_desc_list);
- if (ipa3_ctx->transport_prototype ==
- IPA_TRANSPORT_TYPE_GSI) {
- memset(&gsi_xfer_elem_one, 0,
- sizeof(gsi_xfer_elem_one));
- gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr;
- gsi_xfer_elem_one.len = IPA_WLAN_RX_BUFF_SZ;
- gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT;
- gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB;
- gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA;
- gsi_xfer_elem_one.xfer_user_data = rx_pkt;
+ memset(&gsi_xfer_elem_one, 0,
+ sizeof(gsi_xfer_elem_one));
+ gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr;
+ gsi_xfer_elem_one.len = IPA_WLAN_RX_BUFF_SZ;
+ gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT;
+ gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB;
+ gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA;
+ gsi_xfer_elem_one.xfer_user_data = rx_pkt;
- ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1,
- &gsi_xfer_elem_one, true);
- } else {
- ret = sps_transfer_one(sys->ep->ep_hdl,
- rx_pkt->data.dma_addr,
- IPA_WLAN_RX_BUFF_SZ, rx_pkt, 0);
- }
+ ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1,
+ &gsi_xfer_elem_one, true);
if (ret) {
IPAERR("failed to provide buffer: %d\n", ret);
@@ -2176,7 +1654,6 @@
* - Fill the packets skb with data
* - Make the packet DMAable
* - Add the packet to the system pipe linked list
- * - Initiate a SPS transfer so that SPS driver will use this packet later.
*/
static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys)
{
@@ -2220,33 +1697,21 @@
list_add_tail(&rx_pkt->link, &sys->head_desc_list);
rx_len_cached = ++sys->len;
- if (ipa3_ctx->transport_prototype ==
- IPA_TRANSPORT_TYPE_GSI) {
- memset(&gsi_xfer_elem_one, 0,
- sizeof(gsi_xfer_elem_one));
- gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr;
- gsi_xfer_elem_one.len = sys->rx_buff_sz;
- gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT;
- gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB;
- gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA;
- gsi_xfer_elem_one.xfer_user_data = rx_pkt;
+ memset(&gsi_xfer_elem_one, 0,
+ sizeof(gsi_xfer_elem_one));
+ gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr;
+ gsi_xfer_elem_one.len = sys->rx_buff_sz;
+ gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT;
+ gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB;
+ gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA;
+ gsi_xfer_elem_one.xfer_user_data = rx_pkt;
- ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl,
- 1, &gsi_xfer_elem_one, true);
- if (ret != GSI_STATUS_SUCCESS) {
- IPAERR("failed to provide buffer: %d\n",
- ret);
- goto fail_provide_rx_buffer;
- }
- } else {
- ret = sps_transfer_one(sys->ep->ep_hdl,
- rx_pkt->data.dma_addr, sys->rx_buff_sz,
- rx_pkt, 0);
-
- if (ret) {
- IPAERR("sps_transfer_one failed %d\n", ret);
- goto fail_provide_rx_buffer;
- }
+ ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl,
+ 1, &gsi_xfer_elem_one, true);
+ if (ret != GSI_STATUS_SUCCESS) {
+ IPAERR("failed to provide buffer: %d\n",
+ ret);
+ goto fail_provide_rx_buffer;
}
}
@@ -2327,33 +1792,21 @@
list_add_tail(&rx_pkt->link, &sys->head_desc_list);
rx_len_cached = ++sys->len;
- if (ipa3_ctx->transport_prototype ==
- IPA_TRANSPORT_TYPE_GSI) {
- memset(&gsi_xfer_elem_one, 0,
- sizeof(gsi_xfer_elem_one));
- gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr;
- gsi_xfer_elem_one.len = sys->rx_buff_sz;
- gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT;
- gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB;
- gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA;
- gsi_xfer_elem_one.xfer_user_data = rx_pkt;
+ memset(&gsi_xfer_elem_one, 0,
+ sizeof(gsi_xfer_elem_one));
+ gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr;
+ gsi_xfer_elem_one.len = sys->rx_buff_sz;
+ gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT;
+ gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB;
+ gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA;
+ gsi_xfer_elem_one.xfer_user_data = rx_pkt;
- ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl,
- 1, &gsi_xfer_elem_one, true);
- if (ret != GSI_STATUS_SUCCESS) {
- IPAERR("failed to provide buffer: %d\n",
- ret);
- goto fail_provide_rx_buffer;
- }
- } else {
- ret = sps_transfer_one(sys->ep->ep_hdl,
- rx_pkt->data.dma_addr, sys->rx_buff_sz,
- rx_pkt, 0);
-
- if (ret) {
- IPAERR("sps_transfer_one failed %d\n", ret);
- goto fail_provide_rx_buffer;
- }
+ ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl,
+ 1, &gsi_xfer_elem_one, true);
+ if (ret != GSI_STATUS_SUCCESS) {
+ IPAERR("failed to provide buffer: %d\n",
+ ret);
+ goto fail_provide_rx_buffer;
}
}
@@ -2393,34 +1846,21 @@
rx_pkt = sys->repl.cache[curr];
list_add_tail(&rx_pkt->link, &sys->head_desc_list);
- if (ipa3_ctx->transport_prototype ==
- IPA_TRANSPORT_TYPE_GSI) {
- memset(&gsi_xfer_elem_one, 0,
- sizeof(gsi_xfer_elem_one));
- gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr;
- gsi_xfer_elem_one.len = sys->rx_buff_sz;
- gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT;
- gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB;
- gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA;
- gsi_xfer_elem_one.xfer_user_data = rx_pkt;
+ memset(&gsi_xfer_elem_one, 0,
+ sizeof(gsi_xfer_elem_one));
+ gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr;
+ gsi_xfer_elem_one.len = sys->rx_buff_sz;
+ gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT;
+ gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB;
+ gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA;
+ gsi_xfer_elem_one.xfer_user_data = rx_pkt;
- ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1,
- &gsi_xfer_elem_one, true);
- if (ret != GSI_STATUS_SUCCESS) {
- IPAERR("failed to provide buffer: %d\n",
- ret);
- break;
- }
- } else {
- ret = sps_transfer_one(sys->ep->ep_hdl,
- rx_pkt->data.dma_addr, sys->rx_buff_sz,
- rx_pkt, 0);
-
- if (ret) {
- IPAERR("sps_transfer_one failed %d\n", ret);
- list_del(&rx_pkt->link);
- break;
- }
+ ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1,
+ &gsi_xfer_elem_one, true);
+ if (ret != GSI_STATUS_SUCCESS) {
+ IPAERR("failed to provide buffer: %d\n",
+ ret);
+ break;
}
rx_len_cached = ++sys->len;
curr = (curr + 1) % sys->repl.capacity;
@@ -3184,35 +2624,6 @@
ipa3_wq_rx_common(sys, 0);
}
-/**
- * ipa3_sps_irq_rx_no_aggr_notify() - Callback function which will be called by
- * the SPS driver after a Rx operation is complete.
- * Called in an interrupt context.
- * @notify: SPS driver supplied notification struct
- *
- * This function defer the work for this event to a workqueue.
- */
-void ipa3_sps_irq_rx_no_aggr_notify(struct sps_event_notify *notify)
-{
- struct ipa3_rx_pkt_wrapper *rx_pkt;
-
- switch (notify->event_id) {
- case SPS_EVENT_EOT:
- rx_pkt = notify->data.transfer.user;
- if (IPA_CLIENT_IS_APPS_CONS(rx_pkt->sys->ep->client))
- atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
- rx_pkt->len = notify->data.transfer.iovec.size;
- IPADBG_LOW("event %d notified sys=%p len=%u\n",
- notify->event_id,
- notify->user, rx_pkt->len);
- queue_work(rx_pkt->sys->wq, &rx_pkt->work);
- break;
- default:
- IPAERR("received unexpected event id %d sys=%p\n",
- notify->event_id, notify->user);
- }
-}
-
static int ipa3_odu_rx_pyld_hdlr(struct sk_buff *rx_skb,
struct ipa3_sys_context *sys)
{
@@ -3237,32 +2648,20 @@
{
if (in->client == IPA_CLIENT_APPS_CMD_PROD) {
sys->policy = IPA_POLICY_INTR_MODE;
- sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT);
- sys->sps_callback = ipa3_sps_irq_tx_no_aggr_notify;
return 0;
}
if (IPA_CLIENT_IS_MEMCPY_DMA_PROD(in->client)) {
sys->policy = IPA_POLICY_NOINTR_MODE;
- sys->sps_option = SPS_O_AUTO_ENABLE;
- sys->sps_callback = NULL;
return 0;
}
if (IPA_CLIENT_IS_PROD(in->client)) {
if (sys->ep->skip_ep_cfg) {
sys->policy = IPA_POLICY_INTR_POLL_MODE;
- sys->sps_option = (SPS_O_AUTO_ENABLE|
- SPS_O_EOT | SPS_O_ACK_TRANSFERS);
- sys->sps_callback = ipa3_sps_irq_tx_notify;
- INIT_WORK(&sys->work, ipa3_wq_handle_tx);
- INIT_DELAYED_WORK(&sys->switch_to_intr_work,
- ipa3_switch_to_intr_tx_work_func);
atomic_set(&sys->curr_polling_state, 0);
} else {
sys->policy = IPA_POLICY_NOINTR_MODE;
- sys->sps_option = SPS_O_AUTO_ENABLE;
- sys->sps_callback = NULL;
sys->ep->status.status_en = true;
sys->ep->status.status_ep = ipa3_get_ep_mapping(
IPA_CLIENT_APPS_LAN_CONS);
@@ -3272,9 +2671,6 @@
in->client == IPA_CLIENT_APPS_WAN_CONS) {
sys->ep->status.status_en = true;
sys->policy = IPA_POLICY_INTR_POLL_MODE;
- sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
- | SPS_O_ACK_TRANSFERS);
- sys->sps_callback = ipa3_sps_irq_rx_notify;
INIT_WORK(&sys->work, ipa3_wq_handle_rx);
INIT_DELAYED_WORK(&sys->switch_to_intr_work,
ipa3_switch_to_intr_rx_work_func);
@@ -3370,9 +2766,6 @@
in->client);
sys->policy = IPA_POLICY_INTR_POLL_MODE;
- sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
- | SPS_O_ACK_TRANSFERS);
- sys->sps_callback = ipa3_sps_irq_rx_notify;
INIT_WORK(&sys->work, ipa3_wq_handle_rx);
INIT_DELAYED_WORK(&sys->switch_to_intr_work,
ipa3_switch_to_intr_rx_work_func);
@@ -3380,8 +2773,8 @@
ipa3_replenish_rx_work_func);
atomic_set(&sys->curr_polling_state, 0);
sys->rx_buff_sz = IPA_WLAN_RX_BUFF_SZ;
- sys->rx_pool_sz = in->desc_fifo_sz/
- sizeof(struct sps_iovec) - 1;
+ sys->rx_pool_sz = in->desc_fifo_sz /
+ IPA_FIFO_ELEMENT_SIZE - 1;
if (sys->rx_pool_sz > IPA_WLAN_RX_POOL_SZ)
sys->rx_pool_sz = IPA_WLAN_RX_POOL_SZ;
sys->pyld_hdlr = NULL;
@@ -3395,18 +2788,15 @@
in->client);
sys->policy = IPA_POLICY_INTR_POLL_MODE;
- sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
- | SPS_O_ACK_TRANSFERS);
- sys->sps_callback = ipa3_sps_irq_rx_notify;
INIT_WORK(&sys->work, ipa3_wq_handle_rx);
INIT_DELAYED_WORK(&sys->switch_to_intr_work,
- ipa3_switch_to_intr_rx_work_func);
+ ipa3_switch_to_intr_rx_work_func);
INIT_DELAYED_WORK(&sys->replenish_rx_work,
ipa3_replenish_rx_work_func);
atomic_set(&sys->curr_polling_state, 0);
sys->rx_buff_sz = IPA_ODU_RX_BUFF_SZ;
sys->rx_pool_sz = in->desc_fifo_sz /
- sizeof(struct sps_iovec) - 1;
+ IPA_FIFO_ELEMENT_SIZE - 1;
if (sys->rx_pool_sz > IPA_ODU_RX_POOL_SZ)
sys->rx_pool_sz = IPA_ODU_RX_POOL_SZ;
sys->pyld_hdlr = ipa3_odu_rx_pyld_hdlr;
@@ -3420,9 +2810,6 @@
in->client);
sys->policy = IPA_POLICY_INTR_POLL_MODE;
- sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
- | SPS_O_ACK_TRANSFERS);
- sys->sps_callback = ipa3_sps_irq_rx_notify;
INIT_WORK(&sys->work, ipa3_wq_handle_rx);
INIT_DELAYED_WORK(&sys->switch_to_intr_work,
ipa3_switch_to_intr_rx_work_func);
@@ -3432,8 +2819,6 @@
in->client);
sys->policy = IPA_POLICY_NOINTR_MODE;
- sys->sps_option = SPS_O_AUTO_ENABLE |
- SPS_O_ACK_TRANSFERS | SPS_O_POLL;
} else {
IPAERR("Need to install a RX pipe hdlr\n");
WARN_ON(1);
@@ -3507,13 +2892,11 @@
* from WLAN1_PROD pipe to IPA HW
*
* The function will send data descriptors from WLAN1_PROD (one
- * at a time) using sps_transfer_one. Will set EOT flag for last
- * descriptor Once this send was done from SPS point-of-view the
- * IPA driver will get notified by the supplied callback -
- * ipa3_sps_irq_tx_no_aggr_notify()
+ * at a time). Will set EOT flag for last descriptor Once this send was done
+ * from transport point-of-view the IPA driver will get notified by the
+ * supplied callback - ipa_gsi_irq_tx_notify_cb()
*
- * ipa3_sps_irq_tx_no_aggr_notify will call to the user supplied
- * callback (from ipa3_connect)
+ * ipa_gsi_irq_tx_notify_cb will call to the user supplied callback
*
* Returns: 0 on success, negative on failure
*/
@@ -3638,7 +3021,7 @@
/* Functions added to support kernel tests */
int ipa3_sys_setup(struct ipa_sys_connect_params *sys_in,
- unsigned long *ipa_bam_or_gsi_hdl,
+ unsigned long *ipa_transport_hdl,
u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status)
{
struct ipa3_ep_context *ep;
@@ -3650,7 +3033,7 @@
goto fail_gen;
}
- if (ipa_bam_or_gsi_hdl == NULL || ipa_pipe_num == NULL) {
+ if (ipa_transport_hdl == NULL || ipa_pipe_num == NULL) {
IPAERR("NULL args\n");
goto fail_gen;
}
@@ -3735,10 +3118,7 @@
*clnt_hdl = ipa_ep_idx;
*ipa_pipe_num = ipa_ep_idx;
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI)
- *ipa_bam_or_gsi_hdl = ipa3_ctx->gsi_dev_hdl;
- else
- *ipa_bam_or_gsi_hdl = ipa3_ctx->bam_handle;
+ *ipa_transport_hdl = ipa3_ctx->gsi_dev_hdl;
if (!ep->keep_ipa_awake)
IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
@@ -3958,7 +3338,7 @@
struct gsi_evt_ring_props gsi_evt_ring_props;
struct gsi_chan_props gsi_channel_props;
union __packed gsi_channel_scratch ch_scratch;
- struct ipa_gsi_ep_config *gsi_ep_info;
+ const struct ipa_gsi_ep_config *gsi_ep_info;
dma_addr_t dma_addr;
dma_addr_t evt_dma_addr;
int result;
@@ -4021,9 +3401,10 @@
gsi_channel_props.max_re_expected = ep->sys->rx_pool_sz;
}
- gsi_ep_info = ipa3_get_gsi_ep_info(ipa3_get_ep_mapping(ep->client));
+ gsi_ep_info = ipa3_get_gsi_ep_info(ep->client);
if (!gsi_ep_info) {
- IPAERR("Invalid ep number\n");
+ IPAERR("Failed getting GSI EP info for client=%d\n",
+ ep->client);
result = -EINVAL;
goto fail_get_gsi_ep_info;
} else
@@ -4181,83 +3562,6 @@
return ret;
}
-static int ipa_handle_rx_core_gsi(struct ipa3_sys_context *sys,
- bool process_all, bool in_poll_state)
-{
- int ret;
- int cnt = 0;
- struct ipa_mem_buffer mem_info = {0};
-
- while ((in_poll_state ? atomic_read(&sys->curr_polling_state) :
- !atomic_read(&sys->curr_polling_state))) {
- if (cnt && !process_all)
- break;
-
- ret = ipa_poll_gsi_pkt(sys, &mem_info);
- if (ret)
- break;
-
- if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(sys->ep->client))
- ipa3_dma_memcpy_notify(sys, &mem_info);
- else if (IPA_CLIENT_IS_WLAN_CONS(sys->ep->client))
- ipa3_wlan_wq_rx_common(sys, mem_info.size);
- else
- ipa3_wq_rx_common(sys, mem_info.size);
-
- cnt++;
- }
- return cnt;
-}
-
-static int ipa_poll_sps_pkt(struct ipa3_sys_context *sys,
- struct ipa_mem_buffer *mem_info)
-{
- int ret;
- struct sps_iovec iov;
-
- ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
- if (ret) {
- IPAERR("sps_get_iovec failed %d\n", ret);
- return ret;
- }
-
- if (iov.addr == 0)
- return -EIO;
-
- mem_info->phys_base = iov.addr;
- mem_info->size = iov.size;
- return 0;
-}
-
-static int ipa_handle_rx_core_sps(struct ipa3_sys_context *sys,
- bool process_all, bool in_poll_state)
-{
- int ret;
- int cnt = 0;
- struct ipa_mem_buffer mem_info = {0};
-
- while ((in_poll_state ? atomic_read(&sys->curr_polling_state) :
- !atomic_read(&sys->curr_polling_state))) {
- if (cnt && !process_all)
- break;
-
- ret = ipa_poll_sps_pkt(sys, &mem_info);
- if (ret)
- break;
-
- if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(sys->ep->client))
- ipa3_dma_memcpy_notify(sys, &mem_info);
- else if (IPA_CLIENT_IS_WLAN_CONS(sys->ep->client))
- ipa3_wlan_wq_rx_common(sys, mem_info.size);
- else
- ipa3_wq_rx_common(sys, mem_info.size);
-
- cnt++;
- }
-
- return cnt;
-}
-
/**
* ipa3_rx_poll() - Poll the rx packets from IPA HW. This
* function is exectued in the softirq context
@@ -4287,11 +3591,7 @@
while (cnt < weight &&
atomic_read(&ep->sys->curr_polling_state)) {
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI)
- ret = ipa_poll_gsi_pkt(ep->sys, &mem_info);
- else
- ret = ipa_poll_sps_pkt(ep->sys, &mem_info);
-
+ ret = ipa_poll_gsi_pkt(ep->sys, &mem_info);
if (ret)
break;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index acad448..9e72f67 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -23,7 +23,6 @@
#include <linux/slab.h>
#include <linux/ipa.h>
#include <linux/ipa_usb.h>
-#include <linux/msm-sps.h>
#include <asm/dma-iommu.h>
#include <linux/iommu.h>
#include <linux/platform_device.h>
@@ -129,15 +128,6 @@
#define IPA_HDR_PROC_CTX_BIN1 1
#define IPA_HDR_PROC_CTX_BIN_MAX 2
-#define IPA_EVENT_THRESHOLD 0x10
-
-/*
- * Due to ZLT issue with USB 3.0 core, IPA BAM threashold need to be set
- * to max packet size + 1. After setting the threshold, USB core
- * will not be notified on ZLTs
- */
-#define IPA_USB_EVENT_THRESHOLD 0x4001
-
#define IPA_RX_POOL_CEIL 32
#define IPA_RX_SKB_SIZE 1792
@@ -148,9 +138,6 @@
#define IPA_CLIENT_IS_PROD(x) (x >= IPA_CLIENT_PROD && x < IPA_CLIENT_CONS)
#define IPA_CLIENT_IS_CONS(x) (x >= IPA_CLIENT_CONS && x < IPA_CLIENT_MAX)
-#define IPA_PIPE_MEM_START_OFST_ALIGNMENT(start_ofst) \
- (((start_ofst) + 127) & ~127)
-
#define IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE 8
#define IPA_HDR_PROC_CTX_TABLE_ALIGNMENT(start_ofst) \
(((start_ofst) + IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1) & \
@@ -493,7 +480,6 @@
* struct ipa3_ep_context - IPA end point context
* @valid: flag indicating id EP context is valid
* @client: EP client type
- * @ep_hdl: EP's client SPS handle
* @gsi_chan_hdl: EP's GSI channel handle
* @gsi_evt_ring_hdl: EP's GSI channel event ring handle
* @gsi_mem_info: EP's GSI channel rings info
@@ -501,17 +487,10 @@
* @cfg: EP cionfiguration
* @dst_pipe_index: destination pipe index
* @rt_tbl_idx: routing table index
- * @connect: SPS connect
* @priv: user provided information which will forwarded once the user is
* notified for new data avail
* @client_notify: user provided CB for EP events notification, the event is
* data revived.
- * @desc_fifo_in_pipe_mem: flag indicating if descriptors FIFO uses pipe memory
- * @data_fifo_in_pipe_mem: flag indicating if data FIFO uses pipe memory
- * @desc_fifo_pipe_mem_ofst: descriptors FIFO pipe memory offset
- * @data_fifo_pipe_mem_ofst: data FIFO pipe memory offset
- * @desc_fifo_client_allocated: if descriptors FIFO was allocated by a client
- * @data_fifo_client_allocated: if data FIFO was allocated by a client
* @skip_ep_cfg: boolean field that determines if EP should be configured
* by IPA driver
* @keep_ipa_awake: when true, IPA will not be clock gated
@@ -523,7 +502,6 @@
struct ipa3_ep_context {
int valid;
enum ipa_client_type client;
- struct sps_pipe *ep_hdl;
unsigned long gsi_chan_hdl;
unsigned long gsi_evt_ring_hdl;
struct ipa_gsi_ep_mem_info gsi_mem_info;
@@ -536,16 +514,9 @@
struct ipahal_reg_ep_cfg_status status;
u32 dst_pipe_index;
u32 rt_tbl_idx;
- struct sps_connect connect;
void *priv;
void (*client_notify)(void *priv, enum ipa_dp_evt_type evt,
unsigned long data);
- bool desc_fifo_in_pipe_mem;
- bool data_fifo_in_pipe_mem;
- u32 desc_fifo_pipe_mem_ofst;
- u32 data_fifo_pipe_mem_ofst;
- bool desc_fifo_client_allocated;
- bool data_fifo_client_allocated;
atomic_t avail_fifo_desc;
u32 dflt_flt4_rule_hdl;
u32 dflt_flt6_rule_hdl;
@@ -610,18 +581,16 @@
};
/**
- * struct ipa3_sys_context - IPA endpoint context for system to BAM pipes
+ * struct ipa3_sys_context - IPA GPI pipes context
* @head_desc_list: header descriptors list
* @len: the size of the above list
* @spinlock: protects the list and its size
- * @event: used to request CALLBACK mode from SPS driver
* @ep: IPA EP context
*
- * IPA context specific to the system-bam pipes a.k.a LAN IN/OUT and WAN
+ * IPA context specific to the GPI pipes a.k.a LAN IN/OUT and WAN
*/
struct ipa3_sys_context {
u32 len;
- struct sps_register_event event;
atomic_t curr_polling_state;
struct delayed_work switch_to_intr_work;
enum ipa3_sys_pipe_policy policy;
@@ -637,8 +606,6 @@
unsigned int len_partial;
bool drop_packet;
struct work_struct work;
- void (*sps_callback)(struct sps_event_notify *notify);
- enum sps_option sps_option;
struct delayed_work replenish_rx_work;
struct work_struct repl_work;
void (*repl_hdlr)(struct ipa3_sys_context *sys);
@@ -677,8 +644,6 @@
* @user1: cookie1 for above callback
* @user2: cookie2 for above callback
* @sys: corresponding IPA sys context
- * @mult: valid only for first of a "multiple" transfer,
- * holds info for the "sps_transfer" buffer
* @cnt: 1 for single transfers,
* >1 and <0xFFFF for first of a "multiple" transfer,
* 0xFFFF for last desc, 0 for rest of "multiple' transfer
@@ -696,7 +661,6 @@
void *user1;
int user2;
struct ipa3_sys_context *sys;
- struct ipa_mem_buffer mult;
u32 cnt;
void *bounce;
bool no_unmap_dma;
@@ -977,15 +941,9 @@
/**
* struct ipa3_transport_pm - transport power management related members
- * @lock: lock for ensuring atomic operations
- * @res_granted: true if SPS requested IPA resource and IPA granted it
- * @res_rel_in_prog: true if releasing IPA resource is in progress
* @transport_pm_mutex: Mutex to protect the transport_pm functionality.
*/
struct ipa3_transport_pm {
- spinlock_t lock;
- bool res_granted;
- bool res_rel_in_prog;
atomic_t dec_clients;
atomic_t eot_activity;
struct mutex transport_pm_mutex;
@@ -1034,13 +992,12 @@
* @dev_num: device number
* @dev: the dev_t of the device
* @cdev: cdev of the device
- * @bam_handle: IPA driver's BAM handle
* @ep: list of all end points
* @skip_ep_cfg_shadow: state to update filter table correctly across
power-save
* @ep_flt_bitmap: End-points supporting filtering bitmap
* @ep_flt_num: End-points supporting filtering number
- * @resume_on_connect: resume ep on ipa3_connect
+ * @resume_on_connect: resume ep on ipa connect
* @flt_tbl: list of all IPA filter tables
* @mode: IPA operating mode
* @mmio: iomem
@@ -1084,13 +1041,10 @@
* gating IPA clocks
* @transport_pm: transport power management related information
* @disconnect_lock: protects LAN_CONS packet receive notification CB
- * @pipe_mem_pool: pipe memory pool
- * @dma_pool: special purpose DMA pool
* @ipa3_active_clients: structure for reference counting connected IPA clients
* @ipa_hw_type: type of IPA HW type (e.g. IPA 1.0, IPA 1.1 etc')
* @ipa3_hw_mode: mode of IPA HW mode (e.g. Normal, Virtual or over PCIe)
* @use_ipa_teth_bridge: use tethering bridge driver
- * @ipa_bam_remote_mode: ipa bam is in remote mode
* @modem_cfg_emb_pipe_flt: modem configure embedded pipe filtering rules
* @logbuf: ipc log buffer for high priority messages
* @logbuf_low: ipc log buffer for low priority messages
@@ -1123,7 +1077,6 @@
dev_t dev_num;
struct device *dev;
struct cdev cdev;
- unsigned long bam_handle;
struct ipa3_ep_context ep[IPA3_MAX_NUM_PIPES];
bool skip_ep_cfg_shadow[IPA3_MAX_NUM_PIPES];
u32 ep_flt_bitmap;
@@ -1170,8 +1123,6 @@
bool ip4_flt_tbl_nhash_lcl;
bool ip6_flt_tbl_hash_lcl;
bool ip6_flt_tbl_nhash_lcl;
- struct gen_pool *pipe_mem_pool;
- struct dma_pool *dma_pool;
struct ipa3_active_clients ipa3_active_clients;
struct ipa3_active_clients_log_ctx ipa3_active_clients_logging;
struct workqueue_struct *power_mgmt_wq;
@@ -1191,7 +1142,6 @@
enum ipa_hw_type ipa_hw_type;
enum ipa3_hw_mode ipa3_hw_mode;
bool use_ipa_teth_bridge;
- bool ipa_bam_remote_mode;
bool modem_cfg_emb_pipe_flt;
bool ipa_wdi2;
bool use_64_bit_dma_mask;
@@ -1220,18 +1170,12 @@
u32 wan_rx_ring_size;
u32 lan_rx_ring_size;
bool skip_uc_pipe_reset;
- enum ipa_transport_type transport_prototype;
unsigned long gsi_dev_hdl;
u32 ee;
bool apply_rg10_wa;
bool gsi_ch20_wa;
bool smmu_present;
bool smmu_s1_bypass;
- unsigned long peer_bam_iova;
- phys_addr_t peer_bam_pa;
- u32 peer_bam_map_size;
- unsigned long peer_bam_dev;
- u32 peer_bam_map_cnt;
u32 wdi_map_cnt;
struct wakeup_source w_lock;
struct ipa3_wakelock_ref_cnt wakelock_ref_cnt;
@@ -1249,18 +1193,6 @@
struct ipa_tz_unlock_reg_info *ipa_tz_unlock_reg;
};
-/**
- * enum ipa3_pipe_mem_type - IPA pipe memory type
- * @IPA_SPS_PIPE_MEM: Default, SPS dedicated pipe memory
- * @IPA_PRIVATE_MEM: IPA's private memory
- * @IPA_SYSTEM_MEM: System RAM, requires allocation
- */
-enum ipa3_pipe_mem_type {
- IPA_SPS_PIPE_MEM = 0,
- IPA_PRIVATE_MEM = 1,
- IPA_SYSTEM_MEM = 2,
-};
-
struct ipa3_plat_drv_res {
bool use_ipa_teth_bridge;
u32 ipa_mem_base;
@@ -1274,14 +1206,12 @@
enum ipa_hw_type ipa_hw_type;
enum ipa3_hw_mode ipa3_hw_mode;
u32 ee;
- bool ipa_bam_remote_mode;
bool modem_cfg_emb_pipe_flt;
bool ipa_wdi2;
bool use_64_bit_dma_mask;
u32 wan_rx_ring_size;
u32 lan_rx_ring_size;
bool skip_uc_pipe_reset;
- enum ipa_transport_type transport_prototype;
bool apply_rg10_wa;
bool gsi_ch20_wa;
bool tethered_flow_control;
@@ -1468,14 +1398,6 @@
extern struct ipa3_context *ipa3_ctx;
/* public APIs */
-/*
- * Connect / Disconnect
- */
-int ipa3_connect(const struct ipa_connect_params *in,
- struct ipa_sps_params *sps,
- u32 *clnt_hdl);
-int ipa3_disconnect(u32 clnt_hdl);
-
/* Generic GSI channels functions */
int ipa3_request_gsi_channel(struct ipa_request_gsi_channel_params *params,
struct ipa_req_chan_out_params *out_params);
@@ -1506,11 +1428,6 @@
int ipa3_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, bool is_dpl);
/*
- * Resume / Suspend
- */
-int ipa3_reset_endpoint(u32 clnt_hdl);
-
-/*
* Remove ep delay
*/
int ipa3_clear_endpoint_delay(u32 clnt_hdl);
@@ -1675,7 +1592,7 @@
int ipa3_teardown_sys_pipe(u32 clnt_hdl);
int ipa3_sys_setup(struct ipa_sys_connect_params *sys_in,
- unsigned long *ipa_bam_hdl,
+ unsigned long *ipa_transport_hdl,
u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status);
int ipa3_sys_teardown(u32 clnt_hdl);
@@ -1794,8 +1711,6 @@
/*
* Miscellaneous
*/
-void ipa3_bam_reg_dump(void);
-
int ipa3_get_ep_mapping(enum ipa_client_type client);
bool ipa3_is_ready(void);
@@ -1857,9 +1772,6 @@
int ipa3_send_cmd_timeout(u16 num_desc, struct ipa3_desc *descr, u32 timeout);
int ipa3_send_cmd(u16 num_desc, struct ipa3_desc *descr);
int ipa3_cfg_filter(u32 disable);
-int ipa3_pipe_mem_init(u32 start_ofst, u32 size);
-int ipa3_pipe_mem_alloc(u32 *ofst, u32 size);
-int ipa3_pipe_mem_free(u32 ofst, u32 size);
int ipa3_straddle_boundary(u32 start, u32 end, u32 boundary);
struct ipa3_context *ipa3_get_ctx(void);
void ipa3_enable_clks(void);
@@ -1956,15 +1868,11 @@
void ipa3_q6_post_shutdown_cleanup(void);
int ipa3_init_q6_smem(void);
-int ipa3_sps_connect_safe(struct sps_pipe *h, struct sps_connect *connect,
- enum ipa_client_type ipa_client);
-
int ipa3_mhi_handle_ipa_config_req(struct ipa_config_req_msg_v01 *config_req);
int ipa3_mhi_query_ch_info(enum ipa_client_type client,
struct gsi_chan_info *ch_info);
int ipa3_uc_interface_init(void);
-int ipa3_uc_reset_pipe(enum ipa_client_type ipa_client);
int ipa3_uc_is_gsi_channel_empty(enum ipa_client_type ipa_client);
int ipa3_uc_state_check(void);
int ipa3_uc_loaded_check(void);
@@ -1995,7 +1903,8 @@
int ipa3_uc_mhi_print_stats(char *dbg_buff, int size);
int ipa3_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len);
void ipa3_tag_destroy_imm(void *user1, int user2);
-struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info(int ipa_ep_idx);
+const struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info
+ (enum ipa_client_type client);
void ipa3_uc_rg10_write_reg(enum ipahal_reg_name reg, u32 n, u32 val);
u32 ipa3_get_num_pipes(void);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c b/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c
index 75711c0..e7f8acd 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -61,7 +61,7 @@
[IPA_PROC_ERR_IRQ] = 13,
[IPA_TX_SUSPEND_IRQ] = 14,
[IPA_TX_HOLB_DROP_IRQ] = 15,
- [IPA_BAM_GSI_IDLE_IRQ] = 16,
+ [IPA_GSI_IDLE_IRQ] = 16,
};
static void ipa3_interrupt_defer(struct work_struct *work);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
index 9e2ffe7..f66e3a3 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, 2017 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -197,14 +197,14 @@
struct gsi_chan_props ch_props;
union __packed gsi_channel_scratch ch_scratch;
struct ipa3_ep_context *ep;
- struct ipa_gsi_ep_config *ep_cfg;
+ const struct ipa_gsi_ep_config *ep_cfg;
IPA_MHI_FUNC_ENTRY();
ep = &ipa3_ctx->ep[ipa_ep_idx];
msi = params->msi;
- ep_cfg = ipa_get_gsi_ep_info(ipa_ep_idx);
+ ep_cfg = ipa3_get_gsi_ep_info(client);
if (!ep_cfg) {
IPA_MHI_ERR("Wrong parameter, ep_cfg is NULL\n");
return -EPERM;
@@ -332,7 +332,7 @@
{
int res;
struct gsi_device_scratch gsi_scratch;
- struct ipa_gsi_ep_config *gsi_ep_info;
+ const struct ipa_gsi_ep_config *gsi_ep_info;
IPA_MHI_FUNC_ENTRY();
@@ -342,8 +342,7 @@
}
/* Initialize IPA MHI engine */
- gsi_ep_info = ipa_get_gsi_ep_info(
- ipa_get_ep_mapping(IPA_CLIENT_MHI_PROD));
+ gsi_ep_info = ipa3_get_gsi_ep_info(IPA_CLIENT_MHI_PROD);
if (!gsi_ep_info) {
IPAERR("MHI PROD has no ep allocated\n");
ipa_assert();
@@ -497,12 +496,10 @@
ep = &ipa3_ctx->ep[clnt_hdl];
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
- res = gsi_dealloc_channel(ep->gsi_chan_hdl);
- if (res) {
- IPAERR("gsi_dealloc_channel failed %d\n", res);
- goto fail_reset_channel;
- }
+ res = gsi_dealloc_channel(ep->gsi_chan_hdl);
+ if (res) {
+ IPAERR("gsi_dealloc_channel failed %d\n", res);
+ goto fail_reset_channel;
}
ep->valid = 0;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
index 6167301..f5ef141 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
@@ -89,7 +89,7 @@
};
/**
- * struct IpaHwResetPipeCmdData_t - Structure holding the parameters
+ * struct IpaHwMemCopyData_t - Structure holding the parameters
* for IPA_CPU_2_HW_CMD_MEMCPY command.
*
* The parameters are passed as immediate params in the shared memory
@@ -102,24 +102,6 @@
};
/**
- * union IpaHwResetPipeCmdData_t - Structure holding the parameters
- * for IPA_CPU_2_HW_CMD_RESET_PIPE command.
- * @pipeNum : Pipe number to be reset
- * @direction : 1 - IPA Producer, 0 - IPA Consumer
- * @reserved_02_03 : Reserved
- *
- * The parameters are passed as immediate params in the shared memory
- */
-union IpaHwResetPipeCmdData_t {
- struct IpaHwResetPipeCmdParams_t {
- u8 pipeNum;
- u8 direction;
- u32 reserved_02_03;
- } __packed params;
- u32 raw32b;
-} __packed;
-
-/**
* struct IpaHwRegWriteCmdData_t - holds the parameters for
* IPA_CPU_2_HW_CMD_REG_WRITE command. Parameters are
* sent as 64b immediate parameters.
@@ -790,67 +772,16 @@
IPADBG("uC handlers registered for feature %u\n", feature);
}
-/**
- * ipa3_uc_reset_pipe() - reset a BAM pipe using the uC interface
- * @ipa_client: [in] ipa client handle representing the pipe
- *
- * The function uses the uC interface in order to issue a BAM
- * PIPE reset request. The uC makes sure there's no traffic in
- * the TX command queue before issuing the reset.
- *
- * Returns: 0 on success, negative on failure
- */
-int ipa3_uc_reset_pipe(enum ipa_client_type ipa_client)
-{
- union IpaHwResetPipeCmdData_t cmd;
- int ep_idx;
- int ret;
-
- ep_idx = ipa3_get_ep_mapping(ipa_client);
- if (ep_idx == -1) {
- IPAERR("Invalid IPA client\n");
- return 0;
- }
-
- /*
- * If the uC interface has not been initialized yet,
- * continue with the sequence without resetting the
- * pipe.
- */
- if (ipa3_uc_state_check()) {
- IPADBG("uC interface will not be used to reset %s pipe %d\n",
- IPA_CLIENT_IS_PROD(ipa_client) ? "CONS" : "PROD",
- ep_idx);
- return 0;
- }
-
- /*
- * IPA consumer = 0, IPA producer = 1.
- * IPA driver concept of PROD/CONS is the opposite of the
- * IPA HW concept. Therefore, IPA AP CLIENT PRODUCER = IPA CONSUMER,
- * and vice-versa.
- */
- cmd.params.direction = (u8)(IPA_CLIENT_IS_PROD(ipa_client) ? 0 : 1);
- cmd.params.pipeNum = (u8)ep_idx;
-
- IPADBG("uC pipe reset on IPA %s pipe %d\n",
- IPA_CLIENT_IS_PROD(ipa_client) ? "CONS" : "PROD", ep_idx);
-
- ret = ipa3_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_RESET_PIPE, 0,
- false, 10*HZ);
-
- return ret;
-}
-
int ipa3_uc_is_gsi_channel_empty(enum ipa_client_type ipa_client)
{
- struct ipa_gsi_ep_config *gsi_ep_info;
+ const struct ipa_gsi_ep_config *gsi_ep_info;
union IpaHwChkChEmptyCmdData_t cmd;
int ret;
- gsi_ep_info = ipa3_get_gsi_ep_info(ipa3_get_ep_mapping(ipa_client));
+ gsi_ep_info = ipa3_get_gsi_ep_info(ipa_client);
if (!gsi_ep_info) {
- IPAERR("Invalid IPA ep index\n");
+ IPAERR("Failed getting GSI EP info for client=%d\n",
+ ipa_client);
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c
index 7949d91..ac5f421 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, 2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -68,7 +68,7 @@
* Values that represent MHI related HW event to be sent to CPU.
* @IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR: Event specify the device detected an
* error in an element from the transfer ring associated with the channel
- * @IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST: Event specify a bam
+ * @IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST: Event specify a transport
* interrupt was asserted when MHI engine is suspended
*/
enum ipa_hw_2_cpu_mhi_events {
@@ -187,7 +187,7 @@
* value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE
* @contexArrayIndex: Unique index for channels, between 0 and 255. The index is
* used as an index in channel context array structures.
- * @bamPipeId: The BAM pipe number for pipe dedicated for this channel
+ * @bamPipeId: The IPA pipe number for pipe dedicated for this channel
* @channelDirection: The direction of the channel as defined in the channel
* type field (CHTYPE) in the channel context data structure.
* @reserved: reserved.
@@ -264,8 +264,8 @@
* @state: The new channel state. In case state is not as requested this is
* error indication for the last command
* @channelHandle: The channel identifier
- * @additonalParams: For stop: the number of pending bam descriptors currently
- * queued
+ * @additonalParams: For stop: the number of pending transport descriptors
+ * currently queued
*/
union IpaHwMhiChangeChannelStateResponseData_t {
struct IpaHwMhiChangeChannelStateResponseParams_t {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 4c1f2b35..ab9bbed 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -95,8 +95,9 @@
#define QMB_MASTER_SELECT_PCIE (1)
#define IPA_CLIENT_NOT_USED \
- {IPA_EP_NOT_ALLOCATED, IPA_EP_NOT_ALLOCATED, false, \
- IPA_DPS_HPS_SEQ_TYPE_INVALID, QMB_MASTER_SELECT_DDR}
+ { IPA_EP_NOT_ALLOCATED, IPA_EP_NOT_ALLOCATED, false, \
+ IPA_DPS_HPS_SEQ_TYPE_INVALID, QMB_MASTER_SELECT_DDR, \
+ { -1, -1, -1, -1, -1 } }
/* Resource Group index*/
#define IPA_v3_0_GROUP_UL (0)
@@ -111,12 +112,12 @@
#define IPA_v3_0_GROUP_Q6ZIP_ENGINE IPA_v3_0_GROUP_UC_RX_Q
#define IPA_v3_0_GROUP_MAX (6)
-#define IPA_v3_5_1_GROUP_LWA_DL (0)
-#define IPA_v3_5_1_GROUP_UL_DL (1)
-#define IPA_v3_5_1_GROUP_DMA (2)
-#define IPA_v3_5_1_GROUP_UC_RX_Q (3)
-#define IPA_v3_5_1_SRC_GROUP_MAX (4)
-#define IPA_v3_5_1_DST_GROUP_MAX (3)
+#define IPA_v3_5_GROUP_LWA_DL (0)
+#define IPA_v3_5_GROUP_UL_DL (1)
+#define IPA_v3_5_GROUP_DMA (2)
+#define IPA_v3_5_GROUP_UC_RX_Q (3)
+#define IPA_v3_5_SRC_GROUP_MAX (4)
+#define IPA_v3_5_DST_GROUP_MAX (3)
#define IPA_GROUP_MAX IPA_v3_0_GROUP_MAX
@@ -131,12 +132,12 @@
IPA_v3_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES,
IPA_v3_0_RSRC_GRP_TYPE_SRC_MAX,
- IPA_v3_5_1_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS = 0,
- IPA_v3_5_1_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS,
- IPA_v3_5_1_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF,
- IPA_v3_5_1_RSRC_GRP_TYPE_SRC_HPS_DMARS,
- IPA_v3_5_1_RSRC_GRP_TYPE_SRC_ACK_ENTRIES,
- IPA_v3_5_1_RSRC_GRP_TYPE_SRC_MAX
+ IPA_v3_5_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS = 0,
+ IPA_v3_5_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS,
+ IPA_v3_5_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF,
+ IPA_v3_5_RSRC_GRP_TYPE_SRC_HPS_DMARS,
+ IPA_v3_5_RSRC_GRP_TYPE_SRC_ACK_ENTRIES,
+ IPA_v3_5_RSRC_GRP_TYPE_SRC_MAX
};
#define IPA_RSRC_GRP_TYPE_SRC_MAX IPA_v3_0_RSRC_GRP_TYPE_SRC_MAX
@@ -147,9 +148,9 @@
IPA_v3_0_RSRC_GRP_TYPE_DST_DPS_DMARS,
IPA_v3_0_RSRC_GRP_TYPE_DST_MAX,
- IPA_v3_5_1_RSRC_GRP_TYPE_DST_DATA_SECTORS = 0,
- IPA_v3_5_1_RSRC_GRP_TYPE_DST_DPS_DMARS,
- IPA_v3_5_1_RSRC_GRP_TYPE_DST_MAX,
+ IPA_v3_5_RSRC_GRP_TYPE_DST_DATA_SECTORS = 0,
+ IPA_v3_5_RSRC_GRP_TYPE_DST_DPS_DMARS,
+ IPA_v3_5_RSRC_GRP_TYPE_DST_MAX,
};
#define IPA_RSRC_GRP_TYPE_DST_MAX IPA_v3_0_RSRC_GRP_TYPE_DST_MAX
@@ -192,15 +193,15 @@
},
[IPA_3_5_1] = {
/* LWA_DL UL_DL not used UC_RX_Q, other are invalid */
- [IPA_v3_5_1_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
+ [IPA_v3_5_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
{1, 255}, {1, 255}, {0, 0}, {1, 255}, {0, 0}, {0, 0} },
- [IPA_v3_5_1_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = {
+ [IPA_v3_5_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = {
{10, 10}, {10, 10}, {0, 0}, {8, 8}, {0, 0}, {0, 0} },
- [IPA_v3_5_1_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
+ [IPA_v3_5_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
{12, 12}, {14, 14}, {0, 0}, {8, 8}, {0, 0}, {0, 0} },
- [IPA_v3_5_1_RSRC_GRP_TYPE_SRC_HPS_DMARS] = {
+ [IPA_v3_5_RSRC_GRP_TYPE_SRC_HPS_DMARS] = {
{0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 0}, {0, 0} },
- [IPA_v3_5_1_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = {
+ [IPA_v3_5_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = {
{14, 14}, {20, 20}, {0, 0}, {14, 14}, {0, 0}, {0, 0} },
}
};
@@ -218,9 +219,9 @@
},
[IPA_3_5_1] = {
/*LWA_DL UL/DL/DPL not used, other are invalid */
- [IPA_v3_5_1_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
+ [IPA_v3_5_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
{4, 4}, {4, 4}, {3, 3}, {0, 0}, {0, 0}, {0, 0} },
- [IPA_v3_5_1_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
+ [IPA_v3_5_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
{2, 255}, {1, 255}, {1, 2}, {0, 0}, {0, 0}, {0, 0} },
}
};
@@ -238,12 +239,19 @@
},
};
+enum ipa_ees {
+ IPA_EE_AP = 0,
+ IPA_EE_Q6 = 1,
+ IPA_EE_UC = 3,
+};
+
struct ipa_ep_configuration {
int pipe_num;
int group_num;
bool support_flt;
int sequencer_type;
u8 qmb_master_sel;
+ struct ipa_gsi_ep_config ipa_gsi_ep_info;
};
static const struct ipa_ep_configuration ipa3_ep_mapping
@@ -252,7 +260,8 @@
[IPA_3_0][IPA_CLIENT_WLAN1_PROD] = {
10, IPA_v3_0_GROUP_UL, true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 10, 1, 8, 16, IPA_EE_UC } },
[IPA_3_0][IPA_CLIENT_HSIC2_PROD] = IPA_CLIENT_NOT_USED,
[IPA_3_0][IPA_CLIENT_USB2_PROD] = IPA_CLIENT_NOT_USED,
[IPA_3_0][IPA_CLIENT_HSIC3_PROD] = IPA_CLIENT_NOT_USED,
@@ -263,188 +272,227 @@
[IPA_3_0][IPA_CLIENT_USB_PROD] = {
1, IPA_v3_0_GROUP_UL, true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
- QMB_MASTER_SELECT_DDR},
- [IPA_3_0][IPA_CLIENT_UC_USB_PROD] = {
- 2, IPA_v3_0_GROUP_UL, true,
- IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 1, 3, 8, 16, IPA_EE_AP } },
+ [IPA_3_0][IPA_CLIENT_UC_USB_PROD] = IPA_CLIENT_NOT_USED,
[IPA_3_0][IPA_CLIENT_A5_WLAN_AMPDU_PROD] = IPA_CLIENT_NOT_USED,
[IPA_3_0][IPA_CLIENT_A2_EMBEDDED_PROD] = IPA_CLIENT_NOT_USED,
[IPA_3_0][IPA_CLIENT_A2_TETHERED_PROD] = IPA_CLIENT_NOT_USED,
[IPA_3_0][IPA_CLIENT_APPS_LAN_WAN_PROD] = {
14, IPA_v3_0_GROUP_UL, true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
- QMB_MASTER_SELECT_DDR},
- [IPA_3_0][IPA_CLIENT_APPS_CMD_PROD]
- = {22, IPA_v3_0_GROUP_IMM_CMD, false,
+ QMB_MASTER_SELECT_DDR,
+ { 14, 11, 8, 16, IPA_EE_AP } },
+ [IPA_3_0][IPA_CLIENT_APPS_CMD_PROD] = {
+ 22, IPA_v3_0_GROUP_IMM_CMD, false,
IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 22, 6, 18, 28, IPA_EE_AP } },
[IPA_3_0][IPA_CLIENT_ODU_PROD] = {
12, IPA_v3_0_GROUP_UL, true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 12, 9, 8, 16, IPA_EE_AP } },
[IPA_3_0][IPA_CLIENT_MHI_PROD] = {
0, IPA_v3_0_GROUP_UL, true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
- QMB_MASTER_SELECT_PCIE},
+ QMB_MASTER_SELECT_PCIE,
+ { 0, 0, 8, 16, IPA_EE_AP } },
[IPA_3_0][IPA_CLIENT_Q6_LAN_PROD] = {
9, IPA_v3_0_GROUP_UL, false,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 9, 4, 8, 12, IPA_EE_Q6 } },
[IPA_3_0][IPA_CLIENT_Q6_WAN_PROD] = {
- 5, IPA_v3_0_GROUP_DL,
- true, IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
- QMB_MASTER_SELECT_DDR},
- [IPA_3_0][IPA_CLIENT_Q6_CMD_PROD]
- = {6, IPA_v3_0_GROUP_IMM_CMD, false,
+ 5, IPA_v3_0_GROUP_DL, true,
IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
- QMB_MASTER_SELECT_DDR},
- [IPA_3_0][IPA_CLIENT_Q6_DECOMP_PROD] = {7, IPA_v3_0_GROUP_Q6ZIP,
+ QMB_MASTER_SELECT_DDR,
+ { 5, 0, 16, 32, IPA_EE_Q6 } },
+ [IPA_3_0][IPA_CLIENT_Q6_CMD_PROD] = {
+ 6, IPA_v3_0_GROUP_IMM_CMD, false,
+ IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR,
+ { 6, 1, 18, 28, IPA_EE_Q6 } },
+ [IPA_3_0][IPA_CLIENT_Q6_DECOMP_PROD] = {
+ 7, IPA_v3_0_GROUP_Q6ZIP,
false, IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
- QMB_MASTER_SELECT_DDR},
- [IPA_3_0][IPA_CLIENT_Q6_DECOMP2_PROD] = {8, IPA_v3_0_GROUP_Q6ZIP,
+ QMB_MASTER_SELECT_DDR,
+ { 7, 2, 0, 0, IPA_EE_Q6 } },
+ [IPA_3_0][IPA_CLIENT_Q6_DECOMP2_PROD] = {
+ 8, IPA_v3_0_GROUP_Q6ZIP,
false, IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
- QMB_MASTER_SELECT_DDR},
- [IPA_3_0][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD]
- = {12, IPA_v3_0_GROUP_DMA, false,
+ QMB_MASTER_SELECT_DDR,
+ { 8, 3, 0, 0, IPA_EE_Q6 } },
+ [IPA_3_0][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = {
+ 12, IPA_v3_0_GROUP_DMA, false,
IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
- QMB_MASTER_SELECT_PCIE},
- [IPA_3_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD]
- = {13, IPA_v3_0_GROUP_DMA, false,
+ QMB_MASTER_SELECT_PCIE,
+ { 12, 9, 8, 16, IPA_EE_AP } },
+ [IPA_3_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = {
+ 13, IPA_v3_0_GROUP_DMA, false,
IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
- QMB_MASTER_SELECT_PCIE},
+ QMB_MASTER_SELECT_PCIE,
+ { 13, 10, 8, 16, IPA_EE_AP } },
/* Only for test purpose */
[IPA_3_0][IPA_CLIENT_TEST_PROD] = {
1, IPA_v3_0_GROUP_UL, true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 1, 3, 8, 16, IPA_EE_AP } },
[IPA_3_0][IPA_CLIENT_TEST1_PROD] = {
1, IPA_v3_0_GROUP_UL, true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 1, 3, 8, 16, IPA_EE_AP } },
[IPA_3_0][IPA_CLIENT_TEST2_PROD] = {
3, IPA_v3_0_GROUP_UL, true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 3, 5, 16, 32, IPA_EE_AP } },
[IPA_3_0][IPA_CLIENT_TEST3_PROD] = {
12, IPA_v3_0_GROUP_UL, true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 12, 9, 8, 16, IPA_EE_AP } },
[IPA_3_0][IPA_CLIENT_TEST4_PROD] = {
13, IPA_v3_0_GROUP_UL, true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 13, 10, 8, 16, IPA_EE_AP } },
[IPA_3_0][IPA_CLIENT_HSIC1_CONS] = IPA_CLIENT_NOT_USED,
[IPA_3_0][IPA_CLIENT_WLAN1_CONS] = {
25, IPA_v3_0_GROUP_DL, false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 25, 4, 8, 8, IPA_EE_UC } },
[IPA_3_0][IPA_CLIENT_HSIC2_CONS] = IPA_CLIENT_NOT_USED,
[IPA_3_0][IPA_CLIENT_USB2_CONS] = IPA_CLIENT_NOT_USED,
[IPA_3_0][IPA_CLIENT_WLAN2_CONS] = {
27, IPA_v3_0_GROUP_DL, false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 27, 4, 8, 8, IPA_EE_AP } },
[IPA_3_0][IPA_CLIENT_HSIC3_CONS] = IPA_CLIENT_NOT_USED,
[IPA_3_0][IPA_CLIENT_USB3_CONS] = IPA_CLIENT_NOT_USED,
[IPA_3_0][IPA_CLIENT_WLAN3_CONS] = {
28, IPA_v3_0_GROUP_DL, false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 28, 13, 8, 8, IPA_EE_AP } },
[IPA_3_0][IPA_CLIENT_HSIC4_CONS] = IPA_CLIENT_NOT_USED,
[IPA_3_0][IPA_CLIENT_USB4_CONS] = IPA_CLIENT_NOT_USED,
[IPA_3_0][IPA_CLIENT_WLAN4_CONS] = {
29, IPA_v3_0_GROUP_DL, false,
- IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
- QMB_MASTER_SELECT_DDR},
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR,
+ { 29, 14, 8, 8, IPA_EE_AP } },
[IPA_3_0][IPA_CLIENT_HSIC5_CONS] = IPA_CLIENT_NOT_USED,
[IPA_3_0][IPA_CLIENT_USB_CONS] = {
26, IPA_v3_0_GROUP_DL, false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 26, 12, 8, 8, IPA_EE_AP } },
[IPA_3_0][IPA_CLIENT_USB_DPL_CONS] = {
17, IPA_v3_0_GROUP_DPL, false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 17, 2, 8, 12, IPA_EE_AP } },
[IPA_3_0][IPA_CLIENT_A2_EMBEDDED_CONS] = IPA_CLIENT_NOT_USED,
[IPA_3_0][IPA_CLIENT_A2_TETHERED_CONS] = IPA_CLIENT_NOT_USED,
[IPA_3_0][IPA_CLIENT_A5_LAN_WAN_CONS] = IPA_CLIENT_NOT_USED,
[IPA_3_0][IPA_CLIENT_APPS_LAN_CONS] = {
15, IPA_v3_0_GROUP_UL, false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 15, 7, 8, 12, IPA_EE_AP } },
[IPA_3_0][IPA_CLIENT_APPS_WAN_CONS] = {
16, IPA_v3_0_GROUP_DL, false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 16, 8, 8, 12, IPA_EE_AP } },
[IPA_3_0][IPA_CLIENT_ODU_EMB_CONS] = {
23, IPA_v3_0_GROUP_DL, false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 23, 1, 8, 8, IPA_EE_AP } },
[IPA_3_0][IPA_CLIENT_ODU_TETH_CONS] = IPA_CLIENT_NOT_USED,
[IPA_3_0][IPA_CLIENT_MHI_CONS] = {
23, IPA_v3_0_GROUP_DL, false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_PCIE},
+ QMB_MASTER_SELECT_PCIE,
+ { 23, 1, 8, 8, IPA_EE_AP } },
[IPA_3_0][IPA_CLIENT_Q6_LAN_CONS] = {
19, IPA_v3_0_GROUP_DL, false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 19, 6, 8, 12, IPA_EE_Q6 } },
[IPA_3_0][IPA_CLIENT_Q6_WAN_CONS] = {
18, IPA_v3_0_GROUP_UL, false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 18, 5, 8, 12, IPA_EE_Q6 } },
[IPA_3_0][IPA_CLIENT_Q6_DUN_CONS] = {
- 30, IPA_v3_0_GROUP_DIAG,
- false, IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR},
- [IPA_3_0][IPA_CLIENT_Q6_DECOMP_CONS]
- = {21, IPA_v3_0_GROUP_Q6ZIP, false,
+ 30, IPA_v3_0_GROUP_DIAG, false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR},
- [IPA_3_0][IPA_CLIENT_Q6_DECOMP2_CONS]
- = {4, IPA_v3_0_GROUP_Q6ZIP, false,
+ QMB_MASTER_SELECT_DDR,
+ { 30, 7, 4, 4, IPA_EE_Q6 } },
+ [IPA_3_0][IPA_CLIENT_Q6_DECOMP_CONS] = {
+ 21, IPA_v3_0_GROUP_Q6ZIP, false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR},
- [IPA_3_0][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS]
- = {28, IPA_v3_0_GROUP_DMA, false,
+ QMB_MASTER_SELECT_DDR,
+ { 21, 8, 4, 4, IPA_EE_Q6 } },
+ [IPA_3_0][IPA_CLIENT_Q6_DECOMP2_CONS] = {
+ 4, IPA_v3_0_GROUP_Q6ZIP, false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_PCIE},
- [IPA_3_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS]
- = {29, IPA_v3_0_GROUP_DMA, false,
+ QMB_MASTER_SELECT_DDR,
+ { 4, 9, 4, 4, IPA_EE_Q6 } },
+ [IPA_3_0][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = {
+ 28, IPA_v3_0_GROUP_DMA, false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_PCIE},
+ QMB_MASTER_SELECT_PCIE,
+ { 28, 13, 8, 8, IPA_EE_AP } },
+ [IPA_3_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = {
+ 29, IPA_v3_0_GROUP_DMA, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_PCIE,
+ { 29, 14, 8, 8, IPA_EE_AP } },
[IPA_3_0][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] = IPA_CLIENT_NOT_USED,
/* Only for test purpose */
[IPA_3_0][IPA_CLIENT_TEST_CONS] = {
26, IPA_v3_0_GROUP_DL, false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 26, 12, 8, 8, IPA_EE_AP } },
[IPA_3_0][IPA_CLIENT_TEST1_CONS] = {
26, IPA_v3_0_GROUP_DL, false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 26, 12, 8, 8, IPA_EE_AP } },
[IPA_3_0][IPA_CLIENT_TEST2_CONS] = {
27, IPA_v3_0_GROUP_DL, false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 27, 4, 8, 8, IPA_EE_AP } },
[IPA_3_0][IPA_CLIENT_TEST3_CONS] = {
28, IPA_v3_0_GROUP_DL, false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 28, 13, 8, 8, IPA_EE_AP } },
[IPA_3_0][IPA_CLIENT_TEST4_CONS] = {
29, IPA_v3_0_GROUP_DL, false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 29, 14, 8, 8, IPA_EE_AP } },
+
/* IPA_3_5_1 */
[IPA_3_5_1][IPA_CLIENT_HSIC1_PROD] = IPA_CLIENT_NOT_USED,
[IPA_3_5_1][IPA_CLIENT_WLAN1_PROD] = {
- 7, IPA_v3_5_1_GROUP_UL_DL,
- true,
+ 7, IPA_v3_5_GROUP_UL_DL, true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 7, 1, 8, 16, IPA_EE_UC } },
[IPA_3_5_1][IPA_CLIENT_HSIC2_PROD] = IPA_CLIENT_NOT_USED,
[IPA_3_5_1][IPA_CLIENT_USB2_PROD] = IPA_CLIENT_NOT_USED,
[IPA_3_5_1][IPA_CLIENT_HSIC3_PROD] = IPA_CLIENT_NOT_USED,
@@ -453,244 +501,169 @@
[IPA_3_5_1][IPA_CLIENT_USB4_PROD] = IPA_CLIENT_NOT_USED,
[IPA_3_5_1][IPA_CLIENT_HSIC5_PROD] = IPA_CLIENT_NOT_USED,
[IPA_3_5_1][IPA_CLIENT_USB_PROD] = {
- 0, IPA_v3_5_1_GROUP_UL_DL,
- true,
+ 0, IPA_v3_5_GROUP_UL_DL, true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 0, 0, 8, 16, IPA_EE_AP } },
[IPA_3_5_1][IPA_CLIENT_UC_USB_PROD] = IPA_CLIENT_NOT_USED,
[IPA_3_5_1][IPA_CLIENT_A5_WLAN_AMPDU_PROD] = IPA_CLIENT_NOT_USED,
[IPA_3_5_1][IPA_CLIENT_A2_EMBEDDED_PROD] = IPA_CLIENT_NOT_USED,
[IPA_3_5_1][IPA_CLIENT_A2_TETHERED_PROD] = IPA_CLIENT_NOT_USED,
[IPA_3_5_1][IPA_CLIENT_APPS_LAN_WAN_PROD] = {
- 8, IPA_v3_5_1_GROUP_UL_DL,
- true,
+ 8, IPA_v3_5_GROUP_UL_DL, true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 8, 7, 8, 16, IPA_EE_AP } },
[IPA_3_5_1][IPA_CLIENT_APPS_CMD_PROD] = {
- 5, IPA_v3_5_1_GROUP_UL_DL,
- false,
+ 5, IPA_v3_5_GROUP_UL_DL, false,
IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 5, 4, 20, 23, IPA_EE_AP } },
[IPA_3_5_1][IPA_CLIENT_ODU_PROD] = IPA_CLIENT_NOT_USED,
[IPA_3_5_1][IPA_CLIENT_MHI_PROD] = IPA_CLIENT_NOT_USED,
[IPA_3_5_1][IPA_CLIENT_Q6_LAN_PROD] = {
- 3, IPA_v3_5_1_GROUP_UL_DL,
- true, IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
- QMB_MASTER_SELECT_DDR },
- [IPA_3_5_1][IPA_CLIENT_Q6_WAN_PROD] = {
- 6, IPA_v3_5_1_GROUP_UL_DL,
- true, IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
- QMB_MASTER_SELECT_DDR},
- [IPA_3_5_1][IPA_CLIENT_Q6_CMD_PROD]
- = {4, IPA_v3_5_1_GROUP_UL_DL, false,
+ 3, IPA_v3_5_GROUP_UL_DL, true,
IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 3, 0, 16, 32, IPA_EE_Q6 } },
+ [IPA_3_5_1][IPA_CLIENT_Q6_WAN_PROD] = {
+ 6, IPA_v3_5_GROUP_UL_DL, true,
+ IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR,
+ { 6, 4, 12, 30, IPA_EE_Q6 } },
+ [IPA_3_5_1][IPA_CLIENT_Q6_CMD_PROD] = {
+ 4, IPA_v3_5_GROUP_UL_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR,
+ { 4, 1, 20, 23, IPA_EE_Q6 } },
[IPA_3_5_1][IPA_CLIENT_Q6_DECOMP_PROD] = IPA_CLIENT_NOT_USED,
[IPA_3_5_1][IPA_CLIENT_Q6_DECOMP2_PROD] = IPA_CLIENT_NOT_USED,
[IPA_3_5_1][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = IPA_CLIENT_NOT_USED,
[IPA_3_5_1][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = IPA_CLIENT_NOT_USED,
/* Only for test purpose */
[IPA_3_5_1][IPA_CLIENT_TEST_PROD] = {
- 0, IPA_v3_5_1_GROUP_UL_DL, true,
+ 0, IPA_v3_5_GROUP_UL_DL, true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 0, 0, 8, 16, IPA_EE_AP } },
[IPA_3_5_1][IPA_CLIENT_TEST1_PROD] = {
- 0, IPA_v3_5_1_GROUP_UL_DL, true,
+ 0, IPA_v3_5_GROUP_UL_DL, true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 0, 0, 8, 16, IPA_EE_AP } },
[IPA_3_5_1][IPA_CLIENT_TEST2_PROD] = {
- 2, IPA_v3_5_1_GROUP_UL_DL, true,
+ 2, IPA_v3_5_GROUP_UL_DL, true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 2, 3, 16, 32, IPA_EE_AP } },
[IPA_3_5_1][IPA_CLIENT_TEST3_PROD] = {
- 4, IPA_v3_5_1_GROUP_UL_DL, true,
+ 4, IPA_v3_5_GROUP_UL_DL, true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 4, 1, 20, 23, IPA_EE_Q6 } },
[IPA_3_5_1][IPA_CLIENT_TEST4_PROD] = {
- 1, IPA_v3_5_1_GROUP_UL_DL, true,
+ 1, IPA_v3_5_GROUP_UL_DL, true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 1, 0, 8, 16, IPA_EE_UC } },
[IPA_3_5_1][IPA_CLIENT_HSIC1_CONS] = IPA_CLIENT_NOT_USED,
[IPA_3_5_1][IPA_CLIENT_WLAN1_CONS] = {
- 16, IPA_v3_5_1_GROUP_UL_DL,
- false,
+ 16, IPA_v3_5_GROUP_UL_DL, false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 16, 3, 8, 8, IPA_EE_UC } },
[IPA_3_5_1][IPA_CLIENT_HSIC2_CONS] = IPA_CLIENT_NOT_USED,
[IPA_3_5_1][IPA_CLIENT_USB2_CONS] = IPA_CLIENT_NOT_USED,
[IPA_3_5_1][IPA_CLIENT_WLAN2_CONS] = {
- 18, IPA_v3_5_1_GROUP_UL_DL, false,
+ 18, IPA_v3_5_GROUP_UL_DL, false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 18, 9, 8, 8, IPA_EE_AP } },
[IPA_3_5_1][IPA_CLIENT_HSIC3_CONS] = IPA_CLIENT_NOT_USED,
[IPA_3_5_1][IPA_CLIENT_USB3_CONS] = IPA_CLIENT_NOT_USED,
[IPA_3_5_1][IPA_CLIENT_WLAN3_CONS] = {
- 19, IPA_v3_5_1_GROUP_UL_DL, false,
+ 19, IPA_v3_5_GROUP_UL_DL, false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 19, 10, 8, 8, IPA_EE_AP } },
[IPA_3_5_1][IPA_CLIENT_HSIC4_CONS] = IPA_CLIENT_NOT_USED,
[IPA_3_5_1][IPA_CLIENT_USB4_CONS] = IPA_CLIENT_NOT_USED,
[IPA_3_5_1][IPA_CLIENT_WLAN4_CONS] = IPA_CLIENT_NOT_USED,
[IPA_3_5_1][IPA_CLIENT_HSIC5_CONS] = IPA_CLIENT_NOT_USED,
[IPA_3_5_1][IPA_CLIENT_USB_CONS] = {
- 17, IPA_v3_5_1_GROUP_UL_DL,
- false,
+ 17, IPA_v3_5_GROUP_UL_DL, false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 17, 8, 8, 8, IPA_EE_AP } },
[IPA_3_5_1][IPA_CLIENT_USB_DPL_CONS] = {
- 11, IPA_v3_5_1_GROUP_UL_DL,
- false,
+ 11, IPA_v3_5_GROUP_UL_DL, false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 11, 2, 4, 6, IPA_EE_AP } },
[IPA_3_5_1][IPA_CLIENT_A2_EMBEDDED_CONS] = IPA_CLIENT_NOT_USED,
[IPA_3_5_1][IPA_CLIENT_A2_TETHERED_CONS] = IPA_CLIENT_NOT_USED,
[IPA_3_5_1][IPA_CLIENT_A5_LAN_WAN_CONS] = IPA_CLIENT_NOT_USED,
[IPA_3_5_1][IPA_CLIENT_APPS_LAN_CONS] = {
- 9, IPA_v3_5_1_GROUP_UL_DL,
- false,
+ 9, IPA_v3_5_GROUP_UL_DL, false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 9, 5, 8, 12, IPA_EE_AP } },
[IPA_3_5_1][IPA_CLIENT_APPS_WAN_CONS] = {
- 10, IPA_v3_5_1_GROUP_UL_DL,
- false,
+ 10, IPA_v3_5_GROUP_UL_DL, false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 10, 6, 8, 12, IPA_EE_AP } },
[IPA_3_5_1][IPA_CLIENT_ODU_EMB_CONS] = IPA_CLIENT_NOT_USED,
[IPA_3_5_1][IPA_CLIENT_ODU_TETH_CONS] = IPA_CLIENT_NOT_USED,
[IPA_3_5_1][IPA_CLIENT_MHI_CONS] = IPA_CLIENT_NOT_USED,
[IPA_3_5_1][IPA_CLIENT_Q6_LAN_CONS] = {
- 13, IPA_v3_5_1_GROUP_UL_DL,
- false,
+ 13, IPA_v3_5_GROUP_UL_DL, false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 13, 3, 8, 12, IPA_EE_Q6 } },
[IPA_3_5_1][IPA_CLIENT_Q6_WAN_CONS] = {
- 12, IPA_v3_5_1_GROUP_UL_DL,
- false,
+ 12, IPA_v3_5_GROUP_UL_DL, false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR},
- [IPA_3_5_1][IPA_CLIENT_Q6_DUN_CONS] = IPA_CLIENT_NOT_USED,
- [IPA_3_5_1][IPA_CLIENT_Q6_DECOMP_CONS] = IPA_CLIENT_NOT_USED,
- [IPA_3_5_1][IPA_CLIENT_Q6_DECOMP2_CONS] = IPA_CLIENT_NOT_USED,
- [IPA_3_5_1][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = IPA_CLIENT_NOT_USED,
+ QMB_MASTER_SELECT_DDR,
+ { 12, 2, 8, 12, IPA_EE_Q6 } },
+ [IPA_3_5_1][IPA_CLIENT_Q6_DUN_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_3_5_1][IPA_CLIENT_Q6_DECOMP_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_3_5_1][IPA_CLIENT_Q6_DECOMP2_CONS] = IPA_CLIENT_NOT_USED,
+ [IPA_3_5_1][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = IPA_CLIENT_NOT_USED,
[IPA_3_5_1][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = IPA_CLIENT_NOT_USED,
[IPA_3_5_1][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] = IPA_CLIENT_NOT_USED,
/* Only for test purpose */
[IPA_3_5_1][IPA_CLIENT_TEST_CONS] = {
- 17, IPA_v3_5_1_GROUP_UL_DL,
+ 17, IPA_v3_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 17, 8, 8, 8, IPA_EE_AP } },
[IPA_3_5_1][IPA_CLIENT_TEST1_CONS] = {
- 17, IPA_v3_5_1_GROUP_UL_DL,
+ 17, IPA_v3_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 17, 8, 8, 8, IPA_EE_AP } },
[IPA_3_5_1][IPA_CLIENT_TEST2_CONS] = {
- 18, IPA_v3_5_1_GROUP_UL_DL,
+ 18, IPA_v3_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 18, 9, 8, 8, IPA_EE_AP } },
[IPA_3_5_1][IPA_CLIENT_TEST3_CONS] = {
- 19, IPA_v3_5_1_GROUP_UL_DL,
+ 19, IPA_v3_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR},
+ QMB_MASTER_SELECT_DDR,
+ { 19, 10, 8, 8, IPA_EE_AP } },
[IPA_3_5_1][IPA_CLIENT_TEST4_CONS] = {
- 11, IPA_v3_5_1_GROUP_UL_DL,
+ 11, IPA_v3_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
- QMB_MASTER_SELECT_DDR},
-};
-
-enum ipa_ees {
- IPA_EE_AP = 0,
- IPA_EE_Q6 = 1,
- IPA_EE_UC = 3,
-};
-
-static struct ipa_gsi_ep_config
- ipa_gsi_ep_info[IPA_VER_MAX][IPA3_MAX_NUM_PIPES] = {
- /* IPA_3_0 - valid also for IPAv3.1 */
- [IPA_3_0] = {
- /* {ipa_ep_num, ipa_gsi_chan_num, ipa_if_tlv, ipa_if_aos, ee} */
- {0, 0, 8, 16, IPA_EE_AP},
- {1, 3, 8, 16, IPA_EE_AP},
- {3, 5, 16, 32, IPA_EE_AP},
- {4, 9, 4, 4, IPA_EE_Q6},
- {5, 0, 16, 32, IPA_EE_Q6},
- {6, 1, 18, 28, IPA_EE_Q6},
- {7, 2, 0, 0, IPA_EE_Q6},
- {8, 3, 0, 0, IPA_EE_Q6},
- {9, 4, 8, 12, IPA_EE_Q6},
- {10, 1, 8, 16, IPA_EE_UC},
- {12, 9, 8, 16, IPA_EE_AP},
- {13, 10, 8, 16, IPA_EE_AP},
- {14, 11, 8, 16, IPA_EE_AP},
- {15, 7, 8, 12, IPA_EE_AP},
- {16, 8, 8, 12, IPA_EE_AP},
- {17, 2, 8, 12, IPA_EE_AP},
- {18, 5, 8, 12, IPA_EE_Q6},
- {19, 6, 8, 12, IPA_EE_Q6},
- {21, 8, 4, 4, IPA_EE_Q6},
- {22, 6, 18, 28, IPA_EE_AP},
- {23, 1, 8, 8, IPA_EE_AP},
- {25, 4, 8, 8, IPA_EE_UC},
- {26, 12, 8, 8, IPA_EE_AP},
- {27, 4, 8, 8, IPA_EE_AP},
- {28, 13, 8, 8, IPA_EE_AP},
- {29, 14, 8, 8, IPA_EE_AP},
- {30, 7, 4, 4, IPA_EE_Q6},
- {-1, -1, -1, -1, -1}
- },
- [IPA_3_5] = {
- /* {ipa_ep_num, ipa_gsi_chan_num, ipa_if_tlv, ipa_if_aos, ee} */
- {0, 7, 8, 16, IPA_EE_AP},
- {1, 0, 8, 16, IPA_EE_UC},
- {2, 3, 16, 32, IPA_EE_AP},
- {3, 0, 16, 32, IPA_EE_Q6},
- {4, 1, 20, 23, IPA_EE_Q6},
- {5, 4, 20, 23, IPA_EE_AP},
- {6, 4, 12, 30, IPA_EE_Q6},
- {7, 1, 8, 16, IPA_EE_UC},
- {8, 9, 8, 16, IPA_EE_AP},
- {9, 5, 8, 12, IPA_EE_AP},
- {10, 6, 8, 12, IPA_EE_AP},
- {11, 2, 4, 6, IPA_EE_AP},
- {12, 2, 8, 12, IPA_EE_Q6},
- {13, 3, 8, 12, IPA_EE_Q6},
- {14, 10, 4, 6, IPA_EE_AP},
- {15, 2, 8, 8, IPA_EE_UC},
- {16, 3, 8, 8, IPA_EE_UC},
- {17, 11, 8, 8, IPA_EE_AP},
- {18, 12, 8, 8, IPA_EE_AP},
- {19, 13, 8, 8, IPA_EE_AP},
- {-1, -1, -1, -1, -1}
- },
- [IPA_3_5_1] = {
- /* {ipa_ep_num, ipa_gsi_chan_num, ipa_if_tlv, ipa_if_aos, ee} */
- {0, 0, 8, 16, IPA_EE_AP},
- {1, 0, 8, 16, IPA_EE_UC},
- {2, 3, 16, 32, IPA_EE_AP},
- {3, 0, 16, 32, IPA_EE_Q6},
- {4, 1, 20, 23, IPA_EE_Q6},
- {5, 4, 20, 23, IPA_EE_AP},
- {6, 4, 12, 30, IPA_EE_Q6},
- {7, 1, 8, 16, IPA_EE_UC},
- {8, 7, 8, 16, IPA_EE_AP},
- {9, 5, 8, 12, IPA_EE_AP},
- {10, 6, 8, 12, IPA_EE_AP},
- {11, 2, 4, 6, IPA_EE_AP},
- {12, 2, 8, 12, IPA_EE_Q6},
- {13, 3, 8, 12, IPA_EE_Q6},
- {14, 5, 8, 8, IPA_EE_Q6},
- {15, 2, 8, 8, IPA_EE_UC},
- {16, 3, 8, 8, IPA_EE_UC},
- {17, 8, 8, 8, IPA_EE_AP},
- {18, 9, 8, 8, IPA_EE_AP},
- {19, 10, 8, 8, IPA_EE_AP},
- {-1, -1, -1, -1, -1}
- },
+ QMB_MASTER_SELECT_DDR,
+ { 11, 2, 4, 6, IPA_EE_AP } },
};
static struct msm_bus_vectors ipa_init_vectors_v3_0[] = {
@@ -1253,27 +1226,20 @@
/**
* ipa3_get_gsi_ep_info() - provide gsi ep information
- * @ipa_ep_idx: IPA endpoint index
+ * @client: IPA client value
*
* Return value: pointer to ipa_gsi_ep_info
*/
-struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info(int ipa_ep_idx)
+const struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info
+ (enum ipa_client_type client)
{
- int i;
- u8 hw_index;
-
- hw_index = ipa3_get_hw_type_index();
-
- for (i = 0; ; i++) {
- if (ipa_gsi_ep_info[hw_index][i].ipa_ep_num < 0)
- break;
-
- if (ipa_gsi_ep_info[hw_index][i].ipa_ep_num ==
- ipa_ep_idx)
- return &(ipa_gsi_ep_info[hw_index][i]);
+ if (client >= IPA_CLIENT_MAX || client < 0) {
+ IPAERR("Bad client number! client =%d\n", client);
+ return NULL;
}
- return NULL;
+ return &(ipa3_ep_mapping[ipa3_get_hw_type_index()]
+ [client].ipa_gsi_ep_info);
}
/**
@@ -2339,103 +2305,6 @@
}
/**
- * ipa3_pipe_mem_init() - initialize the pipe memory
- * @start_ofst: start offset
- * @size: size
- *
- * Return value:
- * 0: success
- * -ENOMEM: no memory
- */
-int ipa3_pipe_mem_init(u32 start_ofst, u32 size)
-{
- int res;
- u32 aligned_start_ofst;
- u32 aligned_size;
- struct gen_pool *pool;
-
- if (!size) {
- IPAERR("no IPA pipe memory allocated\n");
- goto fail;
- }
-
- aligned_start_ofst = IPA_PIPE_MEM_START_OFST_ALIGNMENT(start_ofst);
- aligned_size = size - (aligned_start_ofst - start_ofst);
-
- IPADBG("start_ofst=%u aligned_start_ofst=%u size=%u aligned_size=%u\n",
- start_ofst, aligned_start_ofst, size, aligned_size);
-
- /* allocation order of 8 i.e. 128 bytes, global pool */
- pool = gen_pool_create(8, -1);
- if (!pool) {
- IPAERR("Failed to create a new memory pool.\n");
- goto fail;
- }
-
- res = gen_pool_add(pool, aligned_start_ofst, aligned_size, -1);
- if (res) {
- IPAERR("Failed to add memory to IPA pipe pool\n");
- goto err_pool_add;
- }
-
- ipa3_ctx->pipe_mem_pool = pool;
- return 0;
-
-err_pool_add:
- gen_pool_destroy(pool);
-fail:
- return -ENOMEM;
-}
-
-/**
- * ipa3_pipe_mem_alloc() - allocate pipe memory
- * @ofst: offset
- * @size: size
- *
- * Return value:
- * 0: success
- */
-int ipa3_pipe_mem_alloc(u32 *ofst, u32 size)
-{
- u32 vaddr;
- int res = -1;
-
- if (!ipa3_ctx->pipe_mem_pool || !size) {
- IPAERR("failed size=%u pipe_mem_pool=%p\n", size,
- ipa3_ctx->pipe_mem_pool);
- return res;
- }
-
- vaddr = gen_pool_alloc(ipa3_ctx->pipe_mem_pool, size);
-
- if (vaddr) {
- *ofst = vaddr;
- res = 0;
- IPADBG("size=%u ofst=%u\n", size, vaddr);
- } else {
- IPAERR("size=%u failed\n", size);
- }
-
- return res;
-}
-
-/**
- * ipa3_pipe_mem_free() - free pipe memory
- * @ofst: offset
- * @size: size
- *
- * Return value:
- * 0: success
- */
-int ipa3_pipe_mem_free(u32 ofst, u32 size)
-{
- IPADBG("size=%u ofst=%u\n", size, ofst);
- if (ipa3_ctx->pipe_mem_pool && size)
- gen_pool_free(ipa3_ctx->pipe_mem_pool, ofst, size);
- return 0;
-}
-
-/**
* ipa3_set_aggr_mode() - Set the aggregation mode which is a global setting
* @mode: [in] the desired aggregation mode for e.g. straight MBIM, QCNCM,
* etc
@@ -2534,28 +2403,6 @@
}
/**
- * ipa3_bam_reg_dump() - Dump selected BAM registers for IPA.
- * The API is right now used only to dump IPA registers towards USB.
- *
- * Function is rate limited to avoid flooding kernel log buffer
- */
-void ipa3_bam_reg_dump(void)
-{
- static DEFINE_RATELIMIT_STATE(_rs, 500*HZ, 1);
-
- if (__ratelimit(&_rs)) {
- IPA_ACTIVE_CLIENTS_INC_SIMPLE();
- pr_err("IPA BAM START\n");
- sps_get_bam_debug_info(ipa3_ctx->bam_handle, 93,
- (SPS_BAM_PIPE(ipa3_get_ep_mapping(IPA_CLIENT_USB_CONS))
- |
- SPS_BAM_PIPE(ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD))),
- 0, 2);
- IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
- }
-}
-
-/**
* ipa3_init_mem_partition() - Reads IPA memory map from DTS, performs alignment
* checks and logs the fetched values.
*
@@ -3058,7 +2905,10 @@
if (atomic_dec_return(&comp->cnt) == 0)
kfree(comp);
- /* sleep for short period to ensure IPA wrote all packets to BAM */
+ /*
+ * sleep for short period to ensure IPA wrote all packets to
+ * the transport
+ */
usleep_range(IPA_TAG_SLEEP_MIN_USEC, IPA_TAG_SLEEP_MAX_USEC);
return 0;
@@ -3294,16 +3144,12 @@
}
/**
- * ipa3_get_transport_type()- Return ipa3_ctx->transport_prototype
+ * ipa3_get_transport_type()
*
* Return value: enum ipa_transport_type
*/
enum ipa_transport_type ipa3_get_transport_type(void)
{
- if (ipa3_ctx)
- return ipa3_ctx->transport_prototype;
-
- IPAERR("IPA driver has not been initialized\n");
return IPA_TRANSPORT_TYPE_GSI;
}
@@ -3379,9 +3225,9 @@
return -EPERM;
}
- api_ctrl->ipa_connect = ipa3_connect;
- api_ctrl->ipa_disconnect = ipa3_disconnect;
- api_ctrl->ipa_reset_endpoint = ipa3_reset_endpoint;
+ api_ctrl->ipa_connect = NULL;
+ api_ctrl->ipa_disconnect = NULL;
+ api_ctrl->ipa_reset_endpoint = NULL;
api_ctrl->ipa_clear_endpoint_delay = ipa3_clear_endpoint_delay;
api_ctrl->ipa_disable_endpoint = NULL;
api_ctrl->ipa_cfg_ep = ipa3_cfg_ep;
@@ -3497,7 +3343,7 @@
api_ctrl->ipa_add_interrupt_handler = ipa3_add_interrupt_handler;
api_ctrl->ipa_remove_interrupt_handler = ipa3_remove_interrupt_handler;
api_ctrl->ipa_restore_suspend_handler = ipa3_restore_suspend_handler;
- api_ctrl->ipa_bam_reg_dump = ipa3_bam_reg_dump;
+ api_ctrl->ipa_bam_reg_dump = NULL;
api_ctrl->ipa_get_ep_mapping = ipa3_get_ep_mapping;
api_ctrl->ipa_is_ready = ipa3_is_ready;
api_ctrl->ipa_proxy_clk_vote = ipa3_proxy_clk_vote;
@@ -3633,14 +3479,14 @@
case IPA_3_5_1:
if (src) {
switch (group_index) {
- case IPA_v3_5_1_GROUP_LWA_DL:
- case IPA_v3_5_1_GROUP_UL_DL:
+ case IPA_v3_5_GROUP_LWA_DL:
+ case IPA_v3_5_GROUP_UL_DL:
ipahal_write_reg_n_fields(
IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n,
n, val);
break;
- case IPA_v3_5_1_GROUP_DMA:
- case IPA_v3_5_1_GROUP_UC_RX_Q:
+ case IPA_v3_5_GROUP_DMA:
+ case IPA_v3_5_GROUP_UC_RX_Q:
ipahal_write_reg_n_fields(
IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n,
n, val);
@@ -3653,13 +3499,13 @@
}
} else {
switch (group_index) {
- case IPA_v3_5_1_GROUP_LWA_DL:
- case IPA_v3_5_1_GROUP_UL_DL:
+ case IPA_v3_5_GROUP_LWA_DL:
+ case IPA_v3_5_GROUP_UL_DL:
ipahal_write_reg_n_fields(
IPA_DST_RSRC_GRP_01_RSRC_TYPE_n,
n, val);
break;
- case IPA_v3_5_1_GROUP_DMA:
+ case IPA_v3_5_GROUP_DMA:
ipahal_write_reg_n_fields(
IPA_DST_RSRC_GRP_23_RSRC_TYPE_n,
n, val);
@@ -3739,10 +3585,10 @@
dst_grp_idx_max = IPA_v3_0_GROUP_MAX;
break;
case IPA_3_5_1:
- src_rsrc_type_max = IPA_v3_5_1_RSRC_GRP_TYPE_SRC_MAX;
- dst_rsrc_type_max = IPA_v3_5_1_RSRC_GRP_TYPE_DST_MAX;
- src_grp_idx_max = IPA_v3_5_1_SRC_GROUP_MAX;
- dst_grp_idx_max = IPA_v3_5_1_DST_GROUP_MAX;
+ src_rsrc_type_max = IPA_v3_5_RSRC_GRP_TYPE_SRC_MAX;
+ dst_rsrc_type_max = IPA_v3_5_RSRC_GRP_TYPE_DST_MAX;
+ src_grp_idx_max = IPA_v3_5_SRC_GROUP_MAX;
+ dst_grp_idx_max = IPA_v3_5_DST_GROUP_MAX;
break;
default:
IPAERR("invalid hw type index\n");
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
index 67b3cb3..053a581 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2553,16 +2553,19 @@
* @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check
* @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check
* @mem: mem object that points to DMA mem representing the hdr structure
+ * @atomic: should DMA allocation be executed with atomic flag
*/
int ipahal_rt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
- u32 nhash_hdr_size, struct ipa_mem_buffer *mem)
+ u32 nhash_hdr_size, struct ipa_mem_buffer *mem, bool atomic)
{
int i;
u64 addr;
struct ipahal_fltrt_obj *obj;
+ int flag;
IPAHAL_DBG("Entry\n");
+ flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
if (!tbls_num || !nhash_hdr_size || !mem) {
@@ -2589,7 +2592,7 @@
mem->size = tbls_num * obj->tbl_hdr_width;
mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, mem->size,
- &mem->phys_base, GFP_KERNEL);
+ &mem->phys_base, flag);
if (!mem->base) {
IPAHAL_ERR("fail to alloc DMA buff of size %d\n", mem->size);
return -ENOMEM;
@@ -2615,18 +2618,22 @@
* should be: bit0->EP0, bit1->EP1
* If bitmap is zero -> create tbl without bitmap entry
* @mem: mem object that points to DMA mem representing the hdr structure
+ * @atomic: should DMA allocation be executed with atomic flag
*/
int ipahal_flt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
- u32 nhash_hdr_size, u64 ep_bitmap, struct ipa_mem_buffer *mem)
+ u32 nhash_hdr_size, u64 ep_bitmap, struct ipa_mem_buffer *mem,
+ bool atomic)
{
int flt_spc;
u64 flt_bitmap;
int i;
u64 addr;
struct ipahal_fltrt_obj *obj;
+ int flag;
IPAHAL_DBG("Entry - ep_bitmap 0x%llx\n", ep_bitmap);
+ flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
if (!tbls_num || !nhash_hdr_size || !mem) {
@@ -2667,7 +2674,7 @@
if (ep_bitmap)
mem->size += obj->tbl_hdr_width;
mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, mem->size,
- &mem->phys_base, GFP_KERNEL);
+ &mem->phys_base, flag);
if (!mem->base) {
IPAHAL_ERR("fail to alloc DMA buff of size %d\n", mem->size);
return -ENOMEM;
@@ -2721,7 +2728,7 @@
params->nhash_hdr.base = dma_alloc_coherent(ipahal_ctx->ipa_pdev,
params->nhash_hdr.size,
¶ms->nhash_hdr.phys_base, GFP_KERNEL);
- if (!params->nhash_hdr.size) {
+ if (!params->nhash_hdr.base) {
IPAHAL_ERR("fail to alloc DMA buff of size %d\n",
params->nhash_hdr.size);
goto nhash_alloc_fail;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h
index ee2704d6..3ee883b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -171,9 +171,10 @@
* @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check
* @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check
* @mem: mem object that points to DMA mem representing the hdr structure
+ * @atomic: should DMA allocation be executed with atomic flag
*/
int ipahal_rt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
- u32 nhash_hdr_size, struct ipa_mem_buffer *mem);
+ u32 nhash_hdr_size, struct ipa_mem_buffer *mem, bool atomic);
/*
* ipahal_flt_generate_empty_img() - Generate empty filter image
@@ -185,9 +186,11 @@
* @ep_bitmap: Bitmap representing the EP that has flt tables. The format
* should be: bit0->EP0, bit1->EP1
* @mem: mem object that points to DMA mem representing the hdr structure
+ * @atomic: should DMA allocation be executed with atomic flag
*/
int ipahal_flt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
- u32 nhash_hdr_size, u64 ep_bitmap, struct ipa_mem_buffer *mem);
+ u32 nhash_hdr_size, u64 ep_bitmap, struct ipa_mem_buffer *mem,
+ bool atomic);
/*
* ipahal_fltrt_allocate_hw_tbl_imgs() - Allocate tbl images DMA structures
diff --git a/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c b/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c
index 3ed3e44..297f932 100644
--- a/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c
+++ b/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -87,8 +87,7 @@
* definition for more info)
*
* USB driver gets a pointer to a callback function (usb_notify_cb) and an
-* associated data. USB driver installs this callback function in the call to
-* ipa3_connect().
+* associated data.
*
* Builds IPA resource manager dependency graph.
*
diff --git a/drivers/platform/msm/ipa/test/Makefile b/drivers/platform/msm/ipa/test/Makefile
index e1686e6..c20fd2b 100644
--- a/drivers/platform/msm/ipa/test/Makefile
+++ b/drivers/platform/msm/ipa/test/Makefile
@@ -1,2 +1,2 @@
obj-$(CONFIG_IPA_UT) += ipa_ut_mod.o
-ipa_ut_mod-y := ipa_ut_framework.o ipa_test_example.o ipa_test_mhi.o
+ipa_ut_mod-y := ipa_ut_framework.o ipa_test_example.o ipa_test_mhi.o ipa_test_dma.o
diff --git a/drivers/platform/msm/ipa/test/ipa_test_dma.c b/drivers/platform/msm/ipa/test/ipa_test_dma.c
new file mode 100644
index 0000000..78393a3
--- /dev/null
+++ b/drivers/platform/msm/ipa/test/ipa_test_dma.c
@@ -0,0 +1,931 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ipa.h>
+#include "../ipa_v3/ipa_i.h"
+#include "ipa_ut_framework.h"
+
+#define IPA_TEST_DMA_WQ_NAME_BUFF_SZ 64
+#define IPA_TEST_DMA_MT_TEST_NUM_WQ 500
+#define IPA_TEST_DMA_MEMCPY_BUFF_SIZE 16384
+#define IPA_TEST_DMA_MAX_PKT_SIZE 0xFF00
+#define IPA_DMA_TEST_LOOP_NUM 1000
+#define IPA_DMA_TEST_INT_LOOP_NUM 50
+#define IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM 128
+#define IPA_DMA_RUN_TEST_UNIT_IN_LOOP(test_unit, iters, rc, args...) \
+ do { \
+ int __i; \
+ for (__i = 0; __i < iters; __i++) { \
+ IPA_UT_LOG(#test_unit " START iter %d\n", __i); \
+ rc = test_unit(args); \
+ if (!rc) \
+ continue; \
+ IPA_UT_LOG(#test_unit " failed %d\n", rc); \
+ break; \
+ } \
+ } while (0)
+
+/**
+ * struct ipa_test_dma_async_user_data - user_data structure for async memcpy
+ * @src_mem: source memory buffer
+ * @dest_mem: destination memory buffer
+ * @call_serial_number: Id of the caller
+ * @copy_done: Completion object
+ */
+struct ipa_test_dma_async_user_data {
+ struct ipa_mem_buffer src_mem;
+ struct ipa_mem_buffer dest_mem;
+ int call_serial_number;
+ struct completion copy_done;
+};
+
+/**
+ * ipa_test_dma_setup() - Suite setup function
+ */
+static int ipa_test_dma_setup(void **ppriv)
+{
+ int rc;
+
+ IPA_UT_DBG("Start Setup\n");
+
+ if (!ipa3_ctx) {
+ IPA_UT_ERR("No IPA ctx\n");
+ return -EINVAL;
+ }
+
+ rc = ipa_dma_init();
+ if (rc)
+ IPA_UT_ERR("Fail to init ipa_dma - return code %d\n", rc);
+ else
+ IPA_UT_DBG("ipa_dma_init() Completed successfully!\n");
+
+ *ppriv = NULL;
+
+ return rc;
+}
+
+/**
+ * ipa_test_dma_teardown() - Suite teardown function
+ */
+static int ipa_test_dma_teardown(void *priv)
+{
+ IPA_UT_DBG("Start Teardown\n");
+ ipa_dma_destroy();
+ return 0;
+}
+
+static int ipa_test_dma_alloc_buffs(struct ipa_mem_buffer *src,
+ struct ipa_mem_buffer *dest,
+ int size)
+{
+ int i;
+ static int val = 1;
+ int rc;
+
+ val++;
+ src->size = size;
+ src->base = dma_alloc_coherent(ipa3_ctx->pdev, src->size,
+ &src->phys_base, GFP_KERNEL);
+ if (!src->base) {
+ IPA_UT_LOG("fail to alloc dma mem %d bytes\n", size);
+ IPA_UT_TEST_FAIL_REPORT("fail to alloc dma mem");
+ return -ENOMEM;
+ }
+
+ dest->size = size;
+ dest->base = dma_alloc_coherent(ipa3_ctx->pdev, dest->size,
+ &dest->phys_base, GFP_KERNEL);
+ if (!dest->base) {
+ IPA_UT_LOG("fail to alloc dma mem %d bytes\n", size);
+ IPA_UT_TEST_FAIL_REPORT("fail to alloc dma mem");
+ rc = -ENOMEM;
+ goto fail_alloc_dest;
+ }
+
+ memset(dest->base, 0, dest->size);
+ for (i = 0; i < src->size; i++)
+ memset(src->base + i, (val + i) & 0xFF, 1);
+ rc = memcmp(dest->base, src->base, dest->size);
+ if (rc == 0) {
+ IPA_UT_LOG("dest & src buffers are equal\n");
+ IPA_UT_TEST_FAIL_REPORT("dest & src buffers are equal");
+ rc = -EFAULT;
+ goto fail_buf_cmp;
+ }
+
+ return 0;
+
+fail_buf_cmp:
+ dma_free_coherent(ipa3_ctx->pdev, dest->size, dest->base,
+ dest->phys_base);
+fail_alloc_dest:
+ dma_free_coherent(ipa3_ctx->pdev, src->size, src->base,
+ src->phys_base);
+ return rc;
+}
+
+static void ipa_test_dma_destroy_buffs(struct ipa_mem_buffer *src,
+ struct ipa_mem_buffer *dest)
+{
+ dma_free_coherent(ipa3_ctx->pdev, src->size, src->base,
+ src->phys_base);
+ dma_free_coherent(ipa3_ctx->pdev, dest->size, dest->base,
+ dest->phys_base);
+}
+
+/**
+ * ipa_test_dma_memcpy_sync() - memcpy in sync mode
+ *
+ * @size: buffer size
+ * @expect_fail: test expects the memcpy to fail
+ *
+ * To be run during tests
+ * 1. Alloc src and dst buffers
+ * 2. sync memcpy src to dst via dma
+ * 3. compare src and dts if memcpy succeeded as expected
+ */
+static int ipa_test_dma_memcpy_sync(int size, bool expect_fail)
+{
+ int rc = 0;
+ int i;
+ struct ipa_mem_buffer src_mem;
+ struct ipa_mem_buffer dest_mem;
+ u8 *src;
+ u8 *dest;
+
+ rc = ipa_test_dma_alloc_buffs(&src_mem, &dest_mem, size);
+ if (rc) {
+ IPA_UT_LOG("fail to alloc buffers\n");
+ IPA_UT_TEST_FAIL_REPORT("fail to alloc buffers");
+ return rc;
+ }
+
+ rc = ipa_dma_sync_memcpy(dest_mem.phys_base, src_mem.phys_base, size);
+ if (!expect_fail && rc) {
+ IPA_UT_LOG("fail to sync memcpy - rc = %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("sync memcpy failed");
+ goto free_buffs;
+ }
+ if (expect_fail && !rc) {
+ IPA_UT_LOG("sync memcpy succeeded while expected to fail\n");
+ IPA_UT_TEST_FAIL_REPORT(
+ "sync memcpy succeeded while expected to fail");
+ rc = -EFAULT;
+ goto free_buffs;
+ }
+
+ if (!rc) {
+ /* if memcpy succeeded, compare the buffers */
+ rc = memcmp(dest_mem.base, src_mem.base, size);
+ if (rc) {
+ IPA_UT_LOG("BAD memcpy - buffs are not equals\n");
+ IPA_UT_TEST_FAIL_REPORT(
+ "BAD memcpy - buffs are not equals");
+ src = src_mem.base;
+ dest = dest_mem.base;
+ for (i = 0; i < size; i++) {
+ if (*(src + i) != *(dest + i)) {
+ IPA_UT_LOG("byte: %d 0x%x != 0x%x\n",
+ i, *(src + i), *(dest + i));
+ }
+ }
+ }
+ } else {
+ /* if memcpy failed as expected, update the rc */
+ rc = 0;
+ }
+
+free_buffs:
+ ipa_test_dma_destroy_buffs(&src_mem, &dest_mem);
+ return rc;
+}
+
+static void ipa_test_dma_async_memcpy_cb(void *comp_obj)
+{
+ struct completion *xfer_done;
+
+ if (!comp_obj) {
+ IPA_UT_ERR("Invalid Input\n");
+ return;
+ }
+ xfer_done = (struct completion *)comp_obj;
+ complete(xfer_done);
+}
+
+static void ipa_test_dma_async_memcpy_cb_user_data(void *user_param)
+{
+ int rc;
+ int i;
+ u8 *src;
+ u8 *dest;
+ struct ipa_test_dma_async_user_data *udata =
+ (struct ipa_test_dma_async_user_data *)user_param;
+
+ if (!udata) {
+ IPA_UT_ERR("Invalid user param\n");
+ return;
+ }
+
+ rc = memcmp(udata->dest_mem.base, udata->src_mem.base,
+ udata->src_mem.size);
+ if (rc) {
+ IPA_UT_LOG("BAD memcpy - buffs are not equal sn=%d\n",
+ udata->call_serial_number);
+ IPA_UT_TEST_FAIL_REPORT(
+ "BAD memcpy - buffs are not equal");
+ src = udata->src_mem.base;
+ dest = udata->dest_mem.base;
+ for (i = 0; i < udata->src_mem.size; i++) {
+ if (*(src + i) != *(dest + i)) {
+ IPA_UT_ERR("byte: %d 0x%x != 0x%x\n", i,
+ *(src + i), *(dest + i));
+ }
+ }
+ return;
+ }
+
+ IPA_UT_LOG("Notify on async memcopy sn=%d\n",
+ udata->call_serial_number);
+ complete(&(udata->copy_done));
+}
+
+/**
+ * ipa_test_dma_memcpy_async() - memcpy in async mode
+ *
+ * @size: buffer size
+ * @expect_fail: test expected the memcpy to fail
+ *
+ * To be run during tests
+ * 1. Alloc src and dst buffers
+ * 2. async memcpy src to dst via dma and wait for completion
+ * 3. compare src and dts if memcpy succeeded as expected
+ */
+static int ipa_test_dma_memcpy_async(int size, bool expect_fail)
+{
+ int rc = 0;
+ int i;
+ struct ipa_mem_buffer src_mem;
+ struct ipa_mem_buffer dest_mem;
+ u8 *src;
+ u8 *dest;
+ struct completion xfer_done;
+
+ rc = ipa_test_dma_alloc_buffs(&src_mem, &dest_mem, size);
+ if (rc) {
+ IPA_UT_LOG("fail to alloc buffers\n");
+ IPA_UT_TEST_FAIL_REPORT("fail to alloc buffers");
+ return rc;
+ }
+
+ init_completion(&xfer_done);
+ rc = ipa_dma_async_memcpy(dest_mem.phys_base, src_mem.phys_base, size,
+ ipa_test_dma_async_memcpy_cb, &xfer_done);
+ if (!expect_fail && rc) {
+ IPA_UT_LOG("fail to initiate async memcpy - rc=%d\n",
+ rc);
+ IPA_UT_TEST_FAIL_REPORT("async memcpy initiate failed");
+ goto free_buffs;
+ }
+ if (expect_fail && !rc) {
+ IPA_UT_LOG("async memcpy succeeded while expected to fail\n");
+ IPA_UT_TEST_FAIL_REPORT(
+ "async memcpy succeeded while expected to fail");
+ rc = -EFAULT;
+ goto free_buffs;
+ }
+
+ if (!rc) {
+ /* if memcpy succeeded, compare the buffers */
+ wait_for_completion(&xfer_done);
+ rc = memcmp(dest_mem.base, src_mem.base, size);
+ if (rc) {
+ IPA_UT_LOG("BAD memcpy - buffs are not equals\n");
+ IPA_UT_TEST_FAIL_REPORT(
+ "BAD memcpy - buffs are not equals");
+ src = src_mem.base;
+ dest = dest_mem.base;
+ for (i = 0; i < size; i++) {
+ if (*(src + i) != *(dest + i)) {
+ IPA_UT_LOG("byte: %d 0x%x != 0x%x\n",
+ i, *(src + i), *(dest + i));
+ }
+ }
+ }
+ } else {
+ /* if memcpy failed as expected, update the rc */
+ rc = 0;
+ }
+
+free_buffs:
+ ipa_test_dma_destroy_buffs(&src_mem, &dest_mem);
+ return rc;
+}
+
+/**
+ * ipa_test_dma_sync_async_memcpy() - memcpy in sync and then async mode
+ *
+ * @size: buffer size
+ *
+ * To be run during tests
+ * 1. several sync memcopy in row
+ * 2. several async memcopy -
+ * back-to-back (next async try initiated after prev is completed)
+ */
+static int ipa_test_dma_sync_async_memcpy(int size)
+{
+ int rc;
+
+ IPA_DMA_RUN_TEST_UNIT_IN_LOOP(ipa_test_dma_memcpy_sync,
+ IPA_DMA_TEST_INT_LOOP_NUM, rc, size, false);
+ if (rc) {
+ IPA_UT_LOG("sync memcopy fail rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("sync memcopy fail");
+ return rc;
+ }
+
+ IPA_DMA_RUN_TEST_UNIT_IN_LOOP(ipa_test_dma_memcpy_async,
+ IPA_DMA_TEST_INT_LOOP_NUM, rc, size, false);
+ if (rc) {
+ IPA_UT_LOG("async memcopy fail rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("async memcopy fail");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: test control API - enable/disable dma
+ * 1. enable dma
+ * 2. disable dma
+ */
+static int ipa_test_dma_control_api(void *priv)
+{
+ int rc;
+
+ IPA_UT_LOG("Test Start\n");
+
+ rc = ipa_dma_enable();
+ if (rc) {
+ IPA_UT_LOG("DMA enable failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail enable dma");
+ return rc;
+ }
+
+ rc = ipa_dma_disable();
+ if (rc) {
+ IPA_UT_LOG("DMA disable failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail disable dma");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: memcpy before dma enable
+ *
+ * 1. sync memcpy - should fail
+ * 2. async memcpy - should fail
+ */
+static int ipa_test_dma_memcpy_before_enable(void *priv)
+{
+ int rc;
+
+ IPA_UT_LOG("Test Start\n");
+
+ rc = ipa_test_dma_memcpy_sync(IPA_TEST_DMA_MEMCPY_BUFF_SIZE, true);
+ if (rc) {
+ IPA_UT_LOG("sync memcpy succeeded unexpectedly rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("sync memcpy succeeded unexpectedly");
+ return rc;
+ }
+
+ rc = ipa_test_dma_memcpy_async(IPA_TEST_DMA_MEMCPY_BUFF_SIZE, true);
+ if (rc) {
+ IPA_UT_LOG("async memcpy succeeded unexpectedly rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("sync memcpy succeeded unexpectedly");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: Sync memory copy
+ *
+ * 1. dma enable
+ * 2. sync memcpy
+ * 3. dma disable
+ */
+static int ipa_test_dma_sync_memcpy(void *priv)
+{
+ int rc;
+
+ IPA_UT_LOG("Test Start\n");
+
+ rc = ipa_dma_enable();
+ if (rc) {
+ IPA_UT_LOG("DMA enable failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail enable dma");
+ return rc;
+ }
+
+ rc = ipa_test_dma_memcpy_sync(IPA_TEST_DMA_MEMCPY_BUFF_SIZE, false);
+ if (rc) {
+ IPA_UT_LOG("sync memcpy failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("sync memcpy failed");
+ (void)ipa_dma_disable();
+ return rc;
+ }
+
+ rc = ipa_dma_disable();
+ if (rc) {
+ IPA_UT_LOG("DMA disable failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail disable dma");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: Async memory copy
+ *
+ * 1. dma enable
+ * 2. async memcpy
+ * 3. dma disable
+ */
+static int ipa_test_dma_async_memcpy(void *priv)
+{
+ int rc;
+
+ IPA_UT_LOG("Test Start\n");
+
+ rc = ipa_dma_enable();
+ if (rc) {
+ IPA_UT_LOG("DMA enable failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail enable dma");
+ return rc;
+ }
+
+ rc = ipa_test_dma_memcpy_async(IPA_TEST_DMA_MEMCPY_BUFF_SIZE, false);
+ if (rc) {
+ IPA_UT_LOG("async memcpy failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("async memcpy failed");
+ (void)ipa_dma_disable();
+ return rc;
+ }
+
+ rc = ipa_dma_disable();
+ if (rc) {
+ IPA_UT_LOG("DMA disable failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail disable dma");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: Iteration of sync memory copy
+ *
+ * 1. dma enable
+ * 2. sync memcpy in loop - in row
+ * 3. dma disable
+ */
+static int ipa_test_dma_sync_memcpy_in_loop(void *priv)
+{
+ int rc;
+
+ IPA_UT_LOG("Test Start\n");
+
+ rc = ipa_dma_enable();
+ if (rc) {
+ IPA_UT_LOG("DMA enable failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail enable dma");
+ return rc;
+ }
+
+ IPA_DMA_RUN_TEST_UNIT_IN_LOOP(ipa_test_dma_memcpy_sync,
+ IPA_DMA_TEST_LOOP_NUM, rc,
+ IPA_TEST_DMA_MEMCPY_BUFF_SIZE, false);
+ if (rc) {
+ IPA_UT_LOG("Iterations of sync memcpy failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("Iterations of sync memcpy failed");
+ (void)ipa_dma_disable();
+ return rc;
+ }
+
+ rc = ipa_dma_disable();
+ if (rc) {
+ IPA_UT_LOG("DMA disable failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail disable dma");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: Iteration of async memory copy
+ *
+ * 1. dma enable
+ * 2. async memcpy in loop - back-to-back
+ * next async copy is initiated once previous one completed
+ * 3. dma disable
+ */
+static int ipa_test_dma_async_memcpy_in_loop(void *priv)
+{
+ int rc;
+
+ IPA_UT_LOG("Test Start\n");
+
+ rc = ipa_dma_enable();
+ if (rc) {
+ IPA_UT_LOG("DMA enable failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail enable dma");
+ return rc;
+ }
+
+ IPA_DMA_RUN_TEST_UNIT_IN_LOOP(ipa_test_dma_memcpy_async,
+ IPA_DMA_TEST_LOOP_NUM, rc,
+ IPA_TEST_DMA_MEMCPY_BUFF_SIZE, false);
+ if (rc) {
+ IPA_UT_LOG("Iterations of async memcpy failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("Iterations of async memcpy failed");
+ (void)ipa_dma_disable();
+ return rc;
+ }
+
+ rc = ipa_dma_disable();
+ if (rc) {
+ IPA_UT_LOG("DMA disable failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail disable dma");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: Iteration of interleaved sync and async memory copy
+ *
+ * 1. dma enable
+ * 2. sync and async memcpy in loop - interleaved
+ * 3. dma disable
+ */
+static int ipa_test_dma_interleaved_sync_async_memcpy_in_loop(void *priv)
+{
+ int rc;
+
+ IPA_UT_LOG("Test Start\n");
+
+ rc = ipa_dma_enable();
+ if (rc) {
+ IPA_UT_LOG("DMA enable failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail enable dma");
+ return rc;
+ }
+
+ IPA_DMA_RUN_TEST_UNIT_IN_LOOP(ipa_test_dma_sync_async_memcpy,
+ IPA_DMA_TEST_INT_LOOP_NUM, rc,
+ IPA_TEST_DMA_MEMCPY_BUFF_SIZE);
+ if (rc) {
+ IPA_UT_LOG(
+ "Iterations of interleaved sync async memcpy failed rc=%d\n"
+ , rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "Iterations of interleaved sync async memcpy failed");
+ (void)ipa_dma_disable();
+ return rc;
+ }
+
+ rc = ipa_dma_disable();
+ if (rc) {
+ IPA_UT_LOG("DMA disable failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail disable dma");
+ return rc;
+ }
+
+ return 0;
+}
+
+static atomic_t ipa_test_dma_mt_test_pass;
+
+static void ipa_test_dma_wrapper_test_one_sync(struct work_struct *work)
+{
+ int rc;
+
+ rc = ipa_test_dma_memcpy_sync(IPA_TEST_DMA_MEMCPY_BUFF_SIZE, false);
+ if (rc) {
+ IPA_UT_LOG("fail sync memcpy from thread rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail sync memcpy from thread");
+ return;
+ }
+ atomic_inc(&ipa_test_dma_mt_test_pass);
+}
+
+static void ipa_test_dma_wrapper_test_one_async(struct work_struct *work)
+{
+ int rc;
+
+ rc = ipa_test_dma_memcpy_async(IPA_TEST_DMA_MEMCPY_BUFF_SIZE, false);
+ if (rc) {
+ IPA_UT_LOG("fail async memcpy from thread rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail async memcpy from thread");
+ return;
+ }
+ atomic_inc(&ipa_test_dma_mt_test_pass);
+}
+
+/**
+ * TEST: Multiple threads running sync and sync mem copy
+ *
+ * 1. dma enable
+ * 2. In-loop
+ * 2.1 create wq for sync memcpy
+ * 2.2 create wq for async memcpy
+ * 2.3 queue sync memcpy work
+ * 2.4 queue async memcoy work
+ * 3. In-loop
+ * 3.1 flush and destroy wq sync
+ * 3.2 flush and destroy wq async
+ * 3. dma disable
+ */
+static int ipa_test_dma_mt_sync_async(void *priv)
+{
+ int rc;
+ int i;
+ static struct workqueue_struct *wq_sync[IPA_TEST_DMA_MT_TEST_NUM_WQ];
+ static struct workqueue_struct *wq_async[IPA_TEST_DMA_MT_TEST_NUM_WQ];
+ static struct work_struct work_async[IPA_TEST_DMA_MT_TEST_NUM_WQ];
+ static struct work_struct work_sync[IPA_TEST_DMA_MT_TEST_NUM_WQ];
+ char buff[IPA_TEST_DMA_WQ_NAME_BUFF_SZ];
+
+ memset(wq_sync, 0, sizeof(wq_sync));
+ memset(wq_sync, 0, sizeof(wq_async));
+ memset(work_async, 0, sizeof(work_async));
+ memset(work_sync, 0, sizeof(work_sync));
+
+ rc = ipa_dma_enable();
+ if (rc) {
+ IPA_UT_LOG("DMA enable failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail enable dma");
+ return rc;
+ }
+
+ atomic_set(&ipa_test_dma_mt_test_pass, 0);
+ for (i = 0; i < IPA_TEST_DMA_MT_TEST_NUM_WQ; i++) {
+ snprintf(buff, sizeof(buff), "ipa_test_dmaSwq%d", i);
+ wq_sync[i] = create_singlethread_workqueue(buff);
+ if (!wq_sync[i]) {
+ IPA_UT_ERR("failed to create sync wq#%d\n", i);
+ rc = -EFAULT;
+ goto fail_create_wq;
+ }
+ snprintf(buff, IPA_RESOURCE_NAME_MAX, "ipa_test_dmaAwq%d", i);
+ wq_async[i] = create_singlethread_workqueue(buff);
+ if (!wq_async[i]) {
+ IPA_UT_ERR("failed to create async wq#%d\n", i);
+ rc = -EFAULT;
+ goto fail_create_wq;
+ }
+
+ INIT_WORK(&work_sync[i], ipa_test_dma_wrapper_test_one_sync);
+ queue_work(wq_sync[i], &work_sync[i]);
+ INIT_WORK(&work_async[i], ipa_test_dma_wrapper_test_one_async);
+ queue_work(wq_async[i], &work_async[i]);
+ }
+
+ for (i = 0; i < IPA_TEST_DMA_MT_TEST_NUM_WQ; i++) {
+ flush_workqueue(wq_sync[i]);
+ destroy_workqueue(wq_sync[i]);
+ flush_workqueue(wq_async[i]);
+ destroy_workqueue(wq_async[i]);
+ }
+
+ rc = ipa_dma_disable();
+ if (rc) {
+ IPA_UT_LOG("DMA disable failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail disable dma");
+ return rc;
+ }
+
+ if ((2 * IPA_TEST_DMA_MT_TEST_NUM_WQ) !=
+ atomic_read(&ipa_test_dma_mt_test_pass)) {
+ IPA_UT_LOG(
+ "Multi-threaded sync/async memcopy failed passed=%d\n"
+ , atomic_read(&ipa_test_dma_mt_test_pass));
+ IPA_UT_TEST_FAIL_REPORT(
+ "Multi-threaded sync/async memcopy failed");
+ return -EFAULT;
+ }
+
+ return 0;
+
+fail_create_wq:
+ (void)ipa_dma_disable();
+ for (i = 0; i < IPA_TEST_DMA_MT_TEST_NUM_WQ; i++) {
+ if (wq_sync[i])
+ destroy_workqueue(wq_sync[i]);
+ if (wq_async[i])
+ destroy_workqueue(wq_async[i]);
+ }
+
+ return rc;
+}
+
+/**
+ * TEST: Several parallel async memory copy iterations
+ *
+ * 1. create several user_data structures - one per iteration
+ * 2. allocate buffs. Give slice for each iteration
+ * 3. iterations of async mem copy
+ * 4. wait for all to complete
+ * 5. dma disable
+ */
+static int ipa_test_dma_parallel_async_memcpy_in_loop(void *priv)
+{
+ int rc;
+ struct ipa_test_dma_async_user_data *udata;
+ struct ipa_mem_buffer all_src_mem;
+ struct ipa_mem_buffer all_dest_mem;
+ int i;
+ bool is_fail = false;
+
+ IPA_UT_LOG("Test Start\n");
+
+ rc = ipa_dma_enable();
+ if (rc) {
+ IPA_UT_LOG("DMA enable failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail enable dma");
+ return rc;
+ }
+
+ udata = kzalloc(IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM *
+ sizeof(struct ipa_test_dma_async_user_data), GFP_KERNEL);
+ if (!udata) {
+ IPA_UT_ERR("fail allocate user_data array\n");
+ (void)ipa_dma_disable();
+ return -ENOMEM;
+ }
+
+ rc = ipa_test_dma_alloc_buffs(&all_src_mem, &all_dest_mem,
+ IPA_TEST_DMA_MEMCPY_BUFF_SIZE);
+ if (rc) {
+ IPA_UT_LOG("fail to alloc buffers\n");
+ IPA_UT_TEST_FAIL_REPORT("fail to alloc buffers");
+ kfree(udata);
+ (void)ipa_dma_disable();
+ return rc;
+ }
+
+ for (i = 0 ; i < IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM ; i++) {
+ udata[i].src_mem.size =
+ IPA_TEST_DMA_MEMCPY_BUFF_SIZE /
+ IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM;
+ udata[i].src_mem.base = all_src_mem.base + i *
+ (IPA_TEST_DMA_MEMCPY_BUFF_SIZE /
+ IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM);
+ udata[i].src_mem.phys_base = all_src_mem.phys_base + i *
+ (IPA_TEST_DMA_MEMCPY_BUFF_SIZE /
+ IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM);
+
+ udata[i].dest_mem.size =
+ (IPA_TEST_DMA_MEMCPY_BUFF_SIZE /
+ IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM);
+ udata[i].dest_mem.base = all_dest_mem.base + i *
+ (IPA_TEST_DMA_MEMCPY_BUFF_SIZE /
+ IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM);
+ udata[i].dest_mem.phys_base = all_dest_mem.phys_base + i *
+ (IPA_TEST_DMA_MEMCPY_BUFF_SIZE /
+ IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM);
+
+ udata[i].call_serial_number = i + 1;
+ init_completion(&(udata[i].copy_done));
+ rc = ipa_dma_async_memcpy(udata[i].dest_mem.phys_base,
+ udata[i].src_mem.phys_base,
+ (IPA_TEST_DMA_MEMCPY_BUFF_SIZE /
+ IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM),
+ ipa_test_dma_async_memcpy_cb_user_data, &udata[i]);
+ if (rc) {
+ IPA_UT_LOG("async memcpy initiation fail i=%d rc=%d\n",
+ i, rc);
+ is_fail = true;
+ }
+ }
+
+ for (i = 0; i < IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM ; i++)
+ wait_for_completion(&udata[i].copy_done);
+
+ ipa_test_dma_destroy_buffs(&all_src_mem, &all_dest_mem);
+ kfree(udata);
+ rc = ipa_dma_disable();
+ if (rc) {
+ IPA_UT_LOG("DMA disable failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail disable dma");
+ return rc;
+ }
+
+ if (is_fail) {
+ IPA_UT_LOG("async memcopy failed\n");
+ IPA_UT_TEST_FAIL_REPORT("async memcopy failed");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: Sync memory copy
+ *
+ * 1. dma enable
+ * 2. sync memcpy with max packet size
+ * 3. dma disable
+ */
+static int ipa_test_dma_sync_memcpy_max_pkt_size(void *priv)
+{
+ int rc;
+
+ IPA_UT_LOG("Test Start\n");
+
+ rc = ipa_dma_enable();
+ if (rc) {
+ IPA_UT_LOG("DMA enable failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail enable dma");
+ return rc;
+ }
+
+ rc = ipa_test_dma_memcpy_sync(IPA_TEST_DMA_MAX_PKT_SIZE, false);
+ if (rc) {
+ IPA_UT_LOG("sync memcpy failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("sync memcpy failed");
+ (void)ipa_dma_disable();
+ return rc;
+ }
+
+ rc = ipa_dma_disable();
+ if (rc) {
+ IPA_UT_LOG("DMA disable failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail disable dma");
+ return rc;
+ }
+
+ return 0;
+}
+
+/* Suite definition block */
+IPA_UT_DEFINE_SUITE_START(dma, "DMA for GSI",
+ ipa_test_dma_setup, ipa_test_dma_teardown)
+{
+ IPA_UT_ADD_TEST(control_api,
+ "Control API",
+ ipa_test_dma_control_api,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(memcpy_before_enable,
+ "Call memcpy before dma enable and expect it to fail",
+ ipa_test_dma_memcpy_before_enable,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(sync_memcpy,
+ "Sync memory copy",
+ ipa_test_dma_sync_memcpy,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(async_memcpy,
+ "Async memory copy",
+ ipa_test_dma_async_memcpy,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(sync_memcpy_in_loop,
+ "Several sync memory copy iterations",
+ ipa_test_dma_sync_memcpy_in_loop,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(async_memcpy_in_loop,
+ "Several async memory copy iterations",
+ ipa_test_dma_async_memcpy_in_loop,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(interleaved_sync_async_memcpy_in_loop,
+ "Several interleaved sync and async memory copy iterations",
+ ipa_test_dma_interleaved_sync_async_memcpy_in_loop,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(multi_threaded_multiple_sync_async_memcpy,
+ "Several multi-threaded sync and async memory copy iterations",
+ ipa_test_dma_mt_sync_async,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(parallel_async_memcpy_in_loop,
+ "Several parallel async memory copy iterations",
+ ipa_test_dma_parallel_async_memcpy_in_loop,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(sync_memcpy_max_pkt_size,
+ "Sync memory copy with max packet size",
+ ipa_test_dma_sync_memcpy_max_pkt_size,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+} IPA_UT_DEFINE_SUITE_END(dma);
diff --git a/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h b/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h
index 944800f..4a9d3b0 100644
--- a/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h
+++ b/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -21,6 +21,7 @@
* No importance for order.
*/
IPA_UT_DECLARE_SUITE(mhi);
+IPA_UT_DECLARE_SUITE(dma);
IPA_UT_DECLARE_SUITE(example);
@@ -31,6 +32,7 @@
IPA_UT_DEFINE_ALL_SUITES_START
{
IPA_UT_REGISTER_SUITE(mhi),
+ IPA_UT_REGISTER_SUITE(dma),
IPA_UT_REGISTER_SUITE(example),
} IPA_UT_DEFINE_ALL_SUITES_END;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 75f820ca..27ff38f 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -1583,7 +1583,7 @@
int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
{
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
- struct zfcp_fsf_req *req = NULL;
+ struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_irq(&qdio->req_q_lock);
@@ -1612,7 +1612,7 @@
zfcp_fsf_req_free(req);
out:
spin_unlock_irq(&qdio->req_q_lock);
- if (req && !IS_ERR(req))
+ if (!retval)
zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
return retval;
}
@@ -1638,7 +1638,7 @@
int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
{
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
- struct zfcp_fsf_req *req = NULL;
+ struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_irq(&qdio->req_q_lock);
@@ -1667,7 +1667,7 @@
zfcp_fsf_req_free(req);
out:
spin_unlock_irq(&qdio->req_q_lock);
- if (req && !IS_ERR(req))
+ if (!retval)
zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
return retval;
}
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 341ea32..792d3e7 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -50,9 +50,13 @@
static inline int aac_is_msix_mode(struct aac_dev *dev)
{
- u32 status;
+ u32 status = 0;
- status = src_readl(dev, MUnit.OMR);
+ if (dev->pdev->device == PMC_DEVICE_S6 ||
+ dev->pdev->device == PMC_DEVICE_S7 ||
+ dev->pdev->device == PMC_DEVICE_S8) {
+ status = src_readl(dev, MUnit.OMR);
+ }
return (status & AAC_INT_MODE_MSIX);
}
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index e3b911c..91dfd58 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -3929,6 +3929,7 @@
static const struct target_core_fabric_ops ibmvscsis_ops = {
.module = THIS_MODULE,
.name = "ibmvscsis",
+ .max_data_sg_nents = MAX_TXU / PAGE_SIZE,
.get_fabric_name = ibmvscsis_get_fabric_name,
.tpg_get_wwn = ibmvscsis_get_fabric_wwn,
.tpg_get_tag = ibmvscsis_get_tag,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index f84a608..8a7941b 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -51,6 +51,7 @@
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/pci.h>
+#include <linux/pci-aspm.h>
#include <linux/interrupt.h>
#include <linux/aer.h>
#include <linux/raid_class.h>
@@ -8706,6 +8707,8 @@
switch (hba_mpi_version) {
case MPI2_VERSION:
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
+ PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
/* Use mpt2sas driver host template for SAS 2.0 HBA's */
shost = scsi_host_alloc(&mpt2sas_driver_template,
sizeof(struct MPT3SAS_ADAPTER));
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 078d797..bea819e 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1459,7 +1459,7 @@
/* Don't abort commands in adapter during EEH
* recovery as it's not accessible/responding.
*/
- if (!ha->flags.eeh_busy) {
+ if (GET_CMD_SP(sp) && !ha->flags.eeh_busy) {
/* Get a reference to the sp and drop the lock.
* The reference ensures this sp->done() call
* - and not the call in qla2xxx_eh_abort() -
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 935f782..9318829 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1035,7 +1035,8 @@
bool is_mq = (rq->mq_ctx != NULL);
int error;
- BUG_ON(!rq->nr_phys_segments);
+ if (WARN_ON_ONCE(!rq->nr_phys_segments))
+ return -EINVAL;
error = scsi_init_sgtable(rq, &cmd->sdb);
if (error)
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 8251f6e..c05cf3b 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1754,6 +1754,10 @@
return res;
iov_iter_truncate(&i, hp->dxfer_len);
+ if (!iov_iter_count(&i)) {
+ kfree(iov);
+ return -EINVAL;
+ }
res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC);
kfree(iov);
diff --git a/drivers/slimbus/slimbus.c b/drivers/slimbus/slimbus.c
index aa7ff12..567f290 100644
--- a/drivers/slimbus/slimbus.c
+++ b/drivers/slimbus/slimbus.c
@@ -1390,8 +1390,10 @@
txn.mc = SLIM_MSG_MC_CONNECT_SINK;
buf[0] = pn;
buf[1] = ctrl->chans[ch].chan;
- if (la == SLIM_LA_MANAGER)
+ if (la == SLIM_LA_MANAGER) {
ctrl->ports[pn].flow = flow;
+ ctrl->ports[pn].ch = &ctrl->chans[ch].prop;
+ }
ret = slim_processtxn(ctrl, &txn, false);
if (!ret && la == SLIM_LA_MANAGER)
ctrl->ports[pn].state = SLIM_P_CFG;
@@ -1467,7 +1469,6 @@
ret = -EALREADY;
goto connect_src_err;
}
- ctrl->ports[pn].ch = &slc->prop;
ret = connect_port_ch(ctrl, chan, srch, SLIM_SRC);
if (!ret)
@@ -1522,16 +1523,15 @@
u8 la = SLIM_HDL_TO_LA(sinkh[j]);
u8 pn = SLIM_HDL_TO_PORT(sinkh[j]);
- if (la != SLIM_LA_MANAGER && flow != SLIM_SINK) {
+ if (la != SLIM_LA_MANAGER && flow != SLIM_SINK)
ret = -EINVAL;
- } else if (la == SLIM_LA_MANAGER &&
+ else if (la == SLIM_LA_MANAGER &&
(pn >= ctrl->nports ||
- ctrl->ports[pn].state != SLIM_P_UNCFG)) {
+ ctrl->ports[pn].state != SLIM_P_UNCFG))
ret = -EINVAL;
- } else {
- ctrl->ports[pn].ch = &slc->prop;
+ else
ret = connect_port_ch(ctrl, chan, sinkh[j], SLIM_SINK);
- }
+
if (ret) {
for (j = j - 1; j >= 0; j--)
disconnect_port_ch(ctrl, sinkh[j]);
@@ -2813,9 +2813,10 @@
struct slim_ich *slc = arr[i];
if (slc->state == SLIM_CH_ACTIVE ||
- slc->state == SLIM_CH_SUSPENDED)
+ slc->state == SLIM_CH_SUSPENDED) {
slc->offset = slc->newoff;
slc->interval = slc->newintr;
+ }
}
}
static void slim_chan_changes(struct slim_device *sb, bool revert)
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index aa51411..a072d35 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -525,6 +525,15 @@
for the platforms that use APRv2.
Say M if you want to enable this module.
+config MSM_CDSP_LOADER
+ tristate "CDSP loader support"
+ depends on MSM_GLINK
+ help
+ Enable CDSP image loader.
+ The CDSP loader brings CDSP out of reset
+ for platforms that have compute DSP.
+ Say M if you want to enable this module.
+
config MSM_AVTIMER
tristate "Avtimer Driver"
depends on MSM_QDSP6_APRV2_GLINK || MSM_QDSP6_APRV3_GLINK
diff --git a/drivers/soc/qcom/qdsp6v2/Makefile b/drivers/soc/qcom/qdsp6v2/Makefile
index f3505ba..8c5b0d0 100644
--- a/drivers/soc/qcom/qdsp6v2/Makefile
+++ b/drivers/soc/qcom/qdsp6v2/Makefile
@@ -7,3 +7,4 @@
obj-$(CONFIG_MSM_QDSP6_SSR) += audio_ssr.o
obj-$(CONFIG_MSM_QDSP6_PDR) += audio_pdr.o
obj-$(CONFIG_MSM_QDSP6_NOTIFIER) += audio_notifier.o
+obj-$(CONFIG_MSM_CDSP_LOADER) += cdsp-loader.o
diff --git a/drivers/soc/qcom/qdsp6v2/cdsp-loader.c b/drivers/soc/qcom/qdsp6v2/cdsp-loader.c
new file mode 100644
index 0000000..9bb4eb0
--- /dev/null
+++ b/drivers/soc/qcom/qdsp6v2/cdsp-loader.c
@@ -0,0 +1,271 @@
+/*
+ * Copyright (c) 2012-2014, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/sysfs.h>
+#include <soc/qcom/subsystem_restart.h>
+
+#define BOOT_CMD 1
+#define IMAGE_UNLOAD_CMD 0
+
+#define CDSP_SUBSYS_DOWN 0
+#define CDSP_SUBSYS_LOADED 1
+
+static ssize_t cdsp_boot_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count);
+
+struct cdsp_loader_private {
+ void *pil_h;
+ struct kobject *boot_cdsp_obj;
+ struct attribute_group *attr_group;
+};
+
+static struct kobj_attribute cdsp_boot_attribute =
+ __ATTR(boot, 0220, NULL, cdsp_boot_store);
+
+static struct attribute *attrs[] = {
+ &cdsp_boot_attribute.attr,
+ NULL,
+};
+
+static u32 cdsp_state = CDSP_SUBSYS_DOWN;
+static struct platform_device *cdsp_private;
+static void cdsp_loader_unload(struct platform_device *pdev);
+
+static int cdsp_loader_do(struct platform_device *pdev)
+{
+ struct cdsp_loader_private *priv = NULL;
+
+ int rc = 0;
+ const char *img_name;
+
+ if (!pdev) {
+ dev_err(&pdev->dev, "%s: Platform device null\n", __func__);
+ goto fail;
+ }
+
+ if (!pdev->dev.of_node) {
+ dev_err(&pdev->dev,
+ "%s: Device tree information missing\n", __func__);
+
+ goto fail;
+ }
+
+ rc = of_property_read_string(pdev->dev.of_node,
+ "qcom,proc-img-to-load",
+ &img_name);
+ if (rc)
+ goto fail;
+
+ if (!strcmp(img_name, "cdsp")) {
+ /* cdsp_state always returns "0".*/
+ if (cdsp_state == CDSP_SUBSYS_DOWN) {
+ priv = platform_get_drvdata(pdev);
+ if (!priv) {
+ dev_err(&pdev->dev,
+ " %s: Private data get failed\n", __func__);
+ goto fail;
+ }
+
+ priv->pil_h = subsystem_get("cdsp");
+ if (IS_ERR(priv->pil_h)) {
+ dev_err(&pdev->dev, "%s: pil get failed,\n",
+ __func__);
+ goto fail;
+ }
+
+ /* Set the state of the CDSP.*/
+ cdsp_state = CDSP_SUBSYS_LOADED;
+ } else if (cdsp_state == CDSP_SUBSYS_LOADED) {
+ dev_dbg(&pdev->dev,
+ "%s: CDSP state = %x\n", __func__, cdsp_state);
+ }
+
+ dev_dbg(&pdev->dev, "%s: CDSP image is loaded\n", __func__);
+ return rc;
+ }
+
+fail:
+ dev_err(&pdev->dev, "%s: CDSP image loading failed\n", __func__);
+ return rc;
+}
+
+
+static ssize_t cdsp_boot_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ int boot = 0, ret = 0;
+
+ ret = sscanf(buf, "%du", &boot);
+
+ if (ret != 1)
+ pr_debug("%s: invalid arguments for cdsp_loader.\n", __func__);
+
+ if (boot == BOOT_CMD) {
+ pr_debug("%s: going to call cdsp_loader_do\n", __func__);
+ cdsp_loader_do(cdsp_private);
+ } else if (boot == IMAGE_UNLOAD_CMD) {
+ pr_debug("%s: going to call adsp_unloader\n", __func__);
+ cdsp_loader_unload(cdsp_private);
+ }
+ return count;
+}
+
+static void cdsp_loader_unload(struct platform_device *pdev)
+{
+ struct cdsp_loader_private *priv = NULL;
+
+ priv = platform_get_drvdata(pdev);
+
+ if (!priv)
+ return;
+
+ if (priv->pil_h) {
+ dev_dbg(&pdev->dev, "%s: calling subsystem put\n", __func__);
+ subsystem_put(priv->pil_h);
+ priv->pil_h = NULL;
+ }
+}
+
+static int cdsp_loader_init_sysfs(struct platform_device *pdev)
+{
+ int ret = -EINVAL;
+ struct cdsp_loader_private *priv = NULL;
+
+ cdsp_private = NULL;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ priv->pil_h = NULL;
+ priv->boot_cdsp_obj = NULL;
+ priv->attr_group = devm_kzalloc(&pdev->dev,
+ sizeof(*(priv->attr_group)),
+ GFP_KERNEL);
+ if (!priv->attr_group) {
+ dev_err(&pdev->dev, "%s: malloc attr_group failed\n",
+ __func__);
+ ret = -ENOMEM;
+ goto error_return;
+ }
+
+ priv->attr_group->attrs = attrs;
+
+ priv->boot_cdsp_obj = kobject_create_and_add("boot_cdsp", kernel_kobj);
+ if (!priv->boot_cdsp_obj) {
+ dev_err(&pdev->dev, "%s: sysfs create and add failed\n",
+ __func__);
+ ret = -ENOMEM;
+ goto error_return;
+ }
+
+ ret = sysfs_create_group(priv->boot_cdsp_obj, priv->attr_group);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: sysfs create group failed %d\n",
+ __func__, ret);
+ goto error_return;
+ }
+
+ cdsp_private = pdev;
+
+ return 0;
+
+error_return:
+
+ if (priv->boot_cdsp_obj) {
+ kobject_del(priv->boot_cdsp_obj);
+ priv->boot_cdsp_obj = NULL;
+ }
+
+ return ret;
+}
+
+static int cdsp_loader_remove(struct platform_device *pdev)
+{
+ struct cdsp_loader_private *priv = NULL;
+
+ priv = platform_get_drvdata(pdev);
+
+ if (!priv)
+ return 0;
+
+ if (priv->pil_h) {
+ subsystem_put(priv->pil_h);
+ priv->pil_h = NULL;
+ }
+
+ if (priv->boot_cdsp_obj) {
+ sysfs_remove_group(priv->boot_cdsp_obj, priv->attr_group);
+ kobject_del(priv->boot_cdsp_obj);
+ priv->boot_cdsp_obj = NULL;
+ }
+
+ return 0;
+}
+
+static int cdsp_loader_probe(struct platform_device *pdev)
+{
+ int ret = cdsp_loader_init_sysfs(pdev);
+
+ if (ret != 0) {
+ dev_err(&pdev->dev, "%s: Error in initing sysfs\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id cdsp_loader_dt_match[] = {
+ { .compatible = "qcom,cdsp-loader" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cdsp_loader_dt_match);
+
+static struct platform_driver cdsp_loader_driver = {
+ .driver = {
+ .name = "cdsp-loader",
+ .owner = THIS_MODULE,
+ .of_match_table = cdsp_loader_dt_match,
+ },
+ .probe = cdsp_loader_probe,
+ .remove = cdsp_loader_remove,
+};
+
+static int __init cdsp_loader_init(void)
+{
+ return platform_driver_register(&cdsp_loader_driver);
+}
+module_init(cdsp_loader_init);
+
+static void __exit cdsp_loader_exit(void)
+{
+ platform_driver_unregister(&cdsp_loader_driver);
+}
+module_exit(cdsp_loader_exit);
+
+MODULE_DESCRIPTION("CDSP Loader module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 6b42348..ea9617c 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -351,7 +351,15 @@
kfree(new);
return -EINVAL;
}
- BUG_ON(orig->se_lun_acl != NULL);
+ if (orig->se_lun_acl != NULL) {
+ pr_warn_ratelimited("Detected existing explicit"
+ " se_lun_acl->se_lun_group reference for %s"
+ " mapped_lun: %llu, failing\n",
+ nacl->initiatorname, mapped_lun);
+ mutex_unlock(&nacl->lun_entry_mutex);
+ kfree(new);
+ return -EINVAL;
+ }
rcu_assign_pointer(new->se_lun, lun);
rcu_assign_pointer(new->se_lun_acl, lun_acl);
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 04f616b..aabd660 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -450,6 +450,7 @@
int *post_ret)
{
struct se_device *dev = cmd->se_dev;
+ sense_reason_t ret = TCM_NO_SENSE;
/*
* Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
@@ -457,9 +458,12 @@
* sent to the backend driver.
*/
spin_lock_irq(&cmd->t_state_lock);
- if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) {
+ if (cmd->transport_state & CMD_T_SENT) {
cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
*post_ret = 1;
+
+ if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
spin_unlock_irq(&cmd->t_state_lock);
@@ -469,7 +473,7 @@
*/
up(&dev->caw_sem);
- return TCM_NO_SENSE;
+ return ret;
}
static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 7dfefd6..767d1eb6 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -457,8 +457,20 @@
{
struct se_node_acl *nacl = container_of(kref,
struct se_node_acl, acl_kref);
+ struct se_portal_group *se_tpg = nacl->se_tpg;
- complete(&nacl->acl_free_comp);
+ if (!nacl->dynamic_stop) {
+ complete(&nacl->acl_free_comp);
+ return;
+ }
+
+ mutex_lock(&se_tpg->acl_node_mutex);
+ list_del(&nacl->acl_list);
+ mutex_unlock(&se_tpg->acl_node_mutex);
+
+ core_tpg_wait_for_nacl_pr_ref(nacl);
+ core_free_device_list_for_node(nacl, se_tpg);
+ kfree(nacl);
}
void target_put_nacl(struct se_node_acl *nacl)
@@ -499,12 +511,39 @@
void transport_free_session(struct se_session *se_sess)
{
struct se_node_acl *se_nacl = se_sess->se_node_acl;
+
/*
* Drop the se_node_acl->nacl_kref obtained from within
* core_tpg_get_initiator_node_acl().
*/
if (se_nacl) {
+ struct se_portal_group *se_tpg = se_nacl->se_tpg;
+ const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo;
+ unsigned long flags;
+
se_sess->se_node_acl = NULL;
+
+ /*
+ * Also determine if we need to drop the extra ->cmd_kref if
+ * it had been previously dynamically generated, and
+ * the endpoint is not caching dynamic ACLs.
+ */
+ mutex_lock(&se_tpg->acl_node_mutex);
+ if (se_nacl->dynamic_node_acl &&
+ !se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
+ spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
+ if (list_empty(&se_nacl->acl_sess_list))
+ se_nacl->dynamic_stop = true;
+ spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
+
+ if (se_nacl->dynamic_stop)
+ list_del(&se_nacl->acl_list);
+ }
+ mutex_unlock(&se_tpg->acl_node_mutex);
+
+ if (se_nacl->dynamic_stop)
+ target_put_nacl(se_nacl);
+
target_put_nacl(se_nacl);
}
if (se_sess->sess_cmd_map) {
@@ -518,16 +557,12 @@
void transport_deregister_session(struct se_session *se_sess)
{
struct se_portal_group *se_tpg = se_sess->se_tpg;
- const struct target_core_fabric_ops *se_tfo;
- struct se_node_acl *se_nacl;
unsigned long flags;
- bool drop_nacl = false;
if (!se_tpg) {
transport_free_session(se_sess);
return;
}
- se_tfo = se_tpg->se_tpg_tfo;
spin_lock_irqsave(&se_tpg->session_lock, flags);
list_del(&se_sess->sess_list);
@@ -535,33 +570,15 @@
se_sess->fabric_sess_ptr = NULL;
spin_unlock_irqrestore(&se_tpg->session_lock, flags);
- /*
- * Determine if we need to do extra work for this initiator node's
- * struct se_node_acl if it had been previously dynamically generated.
- */
- se_nacl = se_sess->se_node_acl;
-
- mutex_lock(&se_tpg->acl_node_mutex);
- if (se_nacl && se_nacl->dynamic_node_acl) {
- if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
- list_del(&se_nacl->acl_list);
- drop_nacl = true;
- }
- }
- mutex_unlock(&se_tpg->acl_node_mutex);
-
- if (drop_nacl) {
- core_tpg_wait_for_nacl_pr_ref(se_nacl);
- core_free_device_list_for_node(se_nacl, se_tpg);
- se_sess->se_node_acl = NULL;
- kfree(se_nacl);
- }
pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
se_tpg->se_tpg_tfo->get_fabric_name());
/*
* If last kref is dropping now for an explicit NodeACL, awake sleeping
* ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
* removal context from within transport_free_session() code.
+ *
+ * For dynamic ACL, target_put_nacl() uses target_complete_nacl()
+ * to release all remaining generate_node_acl=1 created ACL resources.
*/
transport_free_session(se_sess);
@@ -3086,7 +3103,6 @@
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
goto check_stop;
}
- cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
cmd->se_tfo->queue_tm_rsp(cmd);
@@ -3099,11 +3115,25 @@
struct se_cmd *cmd)
{
unsigned long flags;
+ bool aborted = false;
spin_lock_irqsave(&cmd->t_state_lock, flags);
- cmd->transport_state |= CMD_T_ACTIVE;
+ if (cmd->transport_state & CMD_T_ABORTED) {
+ aborted = true;
+ } else {
+ cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
+ cmd->transport_state |= CMD_T_ACTIVE;
+ }
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ if (aborted) {
+ pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d"
+ "ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function,
+ cmd->se_tmr_req->ref_task_tag, cmd->tag);
+ transport_cmd_check_stop_to_fabric(cmd);
+ return 0;
+ }
+
INIT_WORK(&cmd->work, target_tmr_work);
queue_work(cmd->se_dev->tmr_wq, &cmd->work);
return 0;
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 094a144..18848ba 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -836,7 +836,7 @@
" CHECK_CONDITION -> sending response\n", rc);
ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
}
- target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION);
+ target_complete_cmd(ec_cmd, ec_cmd->scsi_status);
}
sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 079e6b1a..3c48dea 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -1470,6 +1470,7 @@
return;
}
+ dbg_event(0xFF, "RestartUSB", 0);
/* Reset active USB connection */
dwc3_resume_work(&mdwc->resume_work);
@@ -1480,6 +1481,8 @@
if (!timeout) {
dev_dbg(mdwc->dev,
"Not in LPM after disconnect, forcing suspend...\n");
+ dbg_event(0xFF, "ReStart:RT SUSP",
+ atomic_read(&mdwc->dev->power.usage_count));
pm_runtime_suspend(mdwc->dev);
}
@@ -1866,6 +1869,8 @@
}
if (!mdwc->init) {
+ dbg_event(0xFF, "dwc3 init",
+ atomic_read(&mdwc->dev->power.usage_count));
dwc3_core_pre_init(dwc);
mdwc->init = true;
}
@@ -2262,6 +2267,7 @@
*/
dwc3_pwr_event_handler(mdwc);
+ dbg_event(0xFF, "Ctl Res", atomic_read(&dwc->in_lpm));
return 0;
}
@@ -2305,6 +2311,7 @@
static void dwc3_resume_work(struct work_struct *w)
{
struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, resume_work);
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
dev_dbg(mdwc->dev, "%s: dwc3 resume work\n", __func__);
@@ -2320,9 +2327,11 @@
mdwc->resume_pending = false;
}
- if (atomic_read(&mdwc->pm_suspended))
+ if (atomic_read(&mdwc->pm_suspended)) {
+ dbg_event(0xFF, "RWrk PMSus", 0);
/* let pm resume kick in resume work later */
return;
+ }
dwc3_ext_event_notify(mdwc);
}
@@ -2394,6 +2403,7 @@
else
dwc3_pwr_event_handler(mdwc);
+ dbg_event(0xFF, "PWR IRQ", atomic_read(&dwc->in_lpm));
return IRQ_HANDLED;
}
@@ -2578,7 +2588,7 @@
mdwc->typec_orientation =
cc_state ? ORIENTATION_CC2 : ORIENTATION_CC1;
- dev_dbg(mdwc->dev, "cc_state:%d", mdwc->typec_orientation);
+ dbg_event(0xFF, "cc_state", mdwc->typec_orientation);
speed = extcon_get_cable_state_(edev, EXTCON_USB_SPEED);
dwc->maximum_speed = (speed == 0) ? USB_SPEED_HIGH : USB_SPEED_SUPER;
@@ -2587,6 +2597,7 @@
if (mdwc->id_state != id) {
mdwc->id_state = id;
+ dbg_event(0xFF, "id_state", mdwc->id_state);
queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
}
@@ -2620,7 +2631,7 @@
mdwc->typec_orientation =
cc_state ? ORIENTATION_CC2 : ORIENTATION_CC1;
- dev_dbg(mdwc->dev, "cc_state:%d", mdwc->typec_orientation);
+ dbg_event(0xFF, "cc_state", mdwc->typec_orientation);
speed = extcon_get_cable_state_(edev, EXTCON_USB_SPEED);
dwc->maximum_speed = (speed == 0) ? USB_SPEED_HIGH : USB_SPEED_SUPER;
@@ -3105,6 +3116,7 @@
static int dwc3_msm_remove(struct platform_device *pdev)
{
struct dwc3_msm *mdwc = platform_get_drvdata(pdev);
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
int ret_pm;
device_remove_file(&pdev->dev, &dev_attr_mode);
@@ -3117,6 +3129,7 @@
* Hence turn ON the clocks manually.
*/
ret_pm = pm_runtime_get_sync(mdwc->dev);
+ dbg_event(0xFF, "Remov gsyn", ret_pm);
if (ret_pm < 0) {
dev_err(mdwc->dev,
"pm_runtime_get_sync failed with %d\n", ret_pm);
@@ -3138,6 +3151,7 @@
platform_device_put(mdwc->dwc3);
device_for_each_child(&pdev->dev, NULL, dwc3_msm_remove_children);
+ dbg_event(0xFF, "Remov put", 0);
pm_runtime_disable(mdwc->dev);
pm_runtime_barrier(mdwc->dev);
pm_runtime_put_sync(mdwc->dev);
@@ -3275,6 +3289,8 @@
mdwc->ss_phy->flags |= PHY_HOST_MODE;
pm_runtime_get_sync(mdwc->dev);
+ dbg_event(0xFF, "StrtHost gync",
+ atomic_read(&mdwc->dev->power.usage_count));
usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
if (!IS_ERR(mdwc->vbus_reg))
ret = regulator_enable(mdwc->vbus_reg);
@@ -3283,6 +3299,8 @@
mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
pm_runtime_put_sync(mdwc->dev);
+ dbg_event(0xFF, "vregerr psync",
+ atomic_read(&mdwc->dev->power.usage_count));
return ret;
}
@@ -3309,6 +3327,8 @@
mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
pm_runtime_put_sync(mdwc->dev);
+ dbg_event(0xFF, "pdeverr psync",
+ atomic_read(&mdwc->dev->power.usage_count));
usb_unregister_notify(&mdwc->host_nb);
return ret;
}
@@ -3325,6 +3345,8 @@
dwc3_usb3_phy_suspend(dwc, true);
/* xHCI should have incremented child count as necessary */
+ dbg_event(0xFF, "StrtHost psync",
+ atomic_read(&mdwc->dev->power.usage_count));
pm_runtime_mark_last_busy(mdwc->dev);
pm_runtime_put_sync_autosuspend(mdwc->dev);
} else {
@@ -3338,6 +3360,8 @@
}
pm_runtime_get_sync(mdwc->dev);
+ dbg_event(0xFF, "StopHost gsync",
+ atomic_read(&mdwc->dev->power.usage_count));
usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
@@ -3360,6 +3384,8 @@
dwc3_post_host_reset_core_init(dwc);
pm_runtime_mark_last_busy(mdwc->dev);
pm_runtime_put_sync_autosuspend(mdwc->dev);
+ dbg_event(0xFF, "StopHost psync",
+ atomic_read(&mdwc->dev->power.usage_count));
}
return 0;
@@ -3397,6 +3423,8 @@
struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
pm_runtime_get_sync(mdwc->dev);
+ dbg_event(0xFF, "StrtGdgt gsync",
+ atomic_read(&mdwc->dev->power.usage_count));
if (on) {
dev_dbg(mdwc->dev, "%s: turn on gadget %s\n",
@@ -3425,6 +3453,8 @@
}
pm_runtime_put_sync(mdwc->dev);
+ dbg_event(0xFF, "StopGdgt psync",
+ atomic_read(&mdwc->dev->power.usage_count));
return 0;
}
@@ -3494,6 +3524,7 @@
state = usb_otg_state_string(mdwc->otg_state);
dev_dbg(mdwc->dev, "%s state\n", state);
+ dbg_event(0xFF, state, 0);
/* Check OTG state */
switch (mdwc->otg_state) {
@@ -3503,6 +3534,7 @@
!test_bit(B_SESS_VLD, &mdwc->inputs))
break;
+ dbg_event(0xFF, "Exit UNDEF", 0);
mdwc->otg_state = OTG_STATE_B_IDLE;
/* fall-through */
case OTG_STATE_B_IDLE:
@@ -3518,6 +3550,8 @@
* cable disconnect or in bus suspend.
*/
pm_runtime_get_sync(mdwc->dev);
+ dbg_event(0xFF, "BIDLE gsync",
+ atomic_read(&mdwc->dev->power.usage_count));
dwc3_otg_start_peripheral(mdwc, 1);
mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
work = 1;
@@ -3539,6 +3573,8 @@
* OTG_STATE_B_IDLE state
*/
pm_runtime_put_sync(mdwc->dev);
+ dbg_event(0xFF, "!BSV psync",
+ atomic_read(&mdwc->dev->power.usage_count));
work = 1;
} else if (test_bit(B_SUSPEND, &mdwc->inputs) &&
test_bit(B_SESS_VLD, &mdwc->inputs)) {
@@ -3553,6 +3589,8 @@
*/
pm_runtime_mark_last_busy(mdwc->dev);
pm_runtime_put_autosuspend(mdwc->dev);
+ dbg_event(0xFF, "SUSP put",
+ atomic_read(&mdwc->dev->power.usage_count));
}
break;
@@ -3571,6 +3609,8 @@
* OTG_STATE_B_PERIPHERAL state.
*/
pm_runtime_get_sync(mdwc->dev);
+ dbg_event(0xFF, "!SUSP gsync",
+ atomic_read(&mdwc->dev->power.usage_count));
}
break;
@@ -3612,6 +3652,7 @@
work = 1;
} else {
dev_dbg(mdwc->dev, "still in a_host state. Resuming root hub.\n");
+ dbg_event(0xFF, "XHCIResume", 0);
if (dwc)
pm_runtime_resume(&dwc->xhci->dev);
}
@@ -3637,6 +3678,7 @@
struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
dev_dbg(dev, "dwc3-msm PM suspend\n");
+ dbg_event(0xFF, "PM Sus", 0);
flush_workqueue(mdwc->dwc3_wq);
if (!atomic_read(&dwc->in_lpm)) {
@@ -3654,8 +3696,10 @@
static int dwc3_msm_pm_resume(struct device *dev)
{
struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
dev_dbg(dev, "dwc3-msm PM resume\n");
+ dbg_event(0xFF, "PM Res", 0);
/* flush to avoid race in read/write of pm_suspended */
flush_workqueue(mdwc->dwc3_wq);
@@ -3671,7 +3715,11 @@
#ifdef CONFIG_PM
static int dwc3_msm_runtime_idle(struct device *dev)
{
+ struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
dev_dbg(dev, "DWC3-msm runtime idle\n");
+ dbg_event(0xFF, "RT Idle", 0);
return 0;
}
@@ -3679,8 +3727,10 @@
static int dwc3_msm_runtime_suspend(struct device *dev)
{
struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
dev_dbg(dev, "DWC3-msm runtime suspend\n");
+ dbg_event(0xFF, "RT Sus", 0);
return dwc3_msm_suspend(mdwc);
}
@@ -3688,8 +3738,10 @@
static int dwc3_msm_runtime_resume(struct device *dev)
{
struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
dev_dbg(dev, "DWC3-msm runtime resume\n");
+ dbg_event(0xFF, "RT Res", 0);
return dwc3_msm_resume(mdwc);
}
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index ee60147..ed218fa 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -288,6 +288,7 @@
struct dwc3_ep *dep = to_dwc3_ep(ep);
struct dwc3 *dwc = dep->dwc;
+ dbg_event(dep->number, "EP0STAL", value);
dwc3_ep0_stall_and_restart(dwc);
return 0;
@@ -314,7 +315,8 @@
dwc3_ep0_prepare_one_trb(dwc, 0, dwc->ctrl_req_addr, 8,
DWC3_TRBCTL_CONTROL_SETUP, false);
ret = dwc3_ep0_start_trans(dwc, 0);
- WARN_ON(ret < 0);
+ if (WARN_ON(ret < 0))
+ dbg_event(dwc->eps[0]->number, "EOUTSTART", ret);
}
static struct dwc3_ep *dwc3_wIndex_to_dep(struct dwc3 *dwc, __le16 wIndex_le)
@@ -656,7 +658,8 @@
/* now that we have the time, issue DGCMD Set Sel */
ret = dwc3_send_gadget_generic_command(dwc,
DWC3_DGCMD_SET_PERIODIC_PAR, param);
- WARN_ON(ret < 0);
+ if (WARN_ON(ret < 0))
+ dbg_event(dep->number, "ESET_SELCMPL", ret);
}
static int dwc3_ep0_set_sel(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
@@ -782,6 +785,7 @@
dwc->ep0_next_event = DWC3_EP0_NRDY_DATA;
}
+ dbg_setup(0x00, ctrl);
if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
ret = dwc3_ep0_std_request(dwc, ctrl);
else
@@ -791,8 +795,10 @@
dwc->delayed_status = true;
out:
- if (ret < 0)
+ if (ret < 0) {
+ dbg_event(0x0, "ERRSTAL", ret);
dwc3_ep0_stall_and_restart(dwc);
+ }
}
static void dwc3_ep0_complete_data(struct dwc3 *dwc,
@@ -876,7 +882,7 @@
if ((epnum & 1) && ur->actual < ur->length) {
/* for some reason we did not get everything out */
-
+ dbg_event(epnum, "INDATSTAL", 0);
dwc3_ep0_stall_and_restart(dwc);
} else {
dwc3_gadget_giveback(ep0, r, 0);
@@ -890,7 +896,8 @@
dwc3_ep0_prepare_one_trb(dwc, epnum, dwc->ctrl_req_addr,
0, DWC3_TRBCTL_CONTROL_DATA, false);
ret = dwc3_ep0_start_trans(dwc, epnum);
- WARN_ON(ret < 0);
+ if (WARN_ON(ret < 0))
+ dbg_event(epnum, "ECTRL_DATA", ret);
}
}
}
@@ -921,6 +928,7 @@
if (ret < 0) {
dwc3_trace(trace_dwc3_ep0, "Invalid Test #%d",
dwc->test_mode_nr);
+ dbg_event(0x00, "INVALTEST", ret);
dwc3_ep0_stall_and_restart(dwc);
return;
}
@@ -932,6 +940,7 @@
dwc3_trace(trace_dwc3_ep0, "Setup Pending received");
}
+ dbg_print(dep->number, "DONE", status, "STATUS");
dwc->ep0state = EP0_SETUP_PHASE;
dwc3_ep0_out_start(dwc);
}
@@ -1025,6 +1034,7 @@
}
WARN_ON(ret < 0);
+ dbg_queue(dep->number, &req->request, ret);
}
static int dwc3_ep0_start_control_status(struct dwc3_ep *dep)
@@ -1042,13 +1052,17 @@
static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep)
{
+ int ret;
+
if (dwc->resize_fifos) {
dwc3_trace(trace_dwc3_ep0, "Resizing FIFOs");
dwc3_gadget_resize_tx_fifos(dwc);
dwc->resize_fifos = 0;
}
- WARN_ON(dwc3_ep0_start_control_status(dep));
+ ret = dwc3_ep0_start_control_status(dep);
+ if (WARN_ON_ONCE(ret))
+ dbg_event(dep->number, "ECTRLSTATUS", ret);
}
static void dwc3_ep0_do_control_status(struct dwc3 *dwc,
@@ -1073,7 +1087,11 @@
cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
memset(¶ms, 0, sizeof(params));
ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms);
- WARN_ON_ONCE(ret);
+ if (ret) {
+ dev_dbg(dwc->dev, "%s: send ep cmd ENDTRANSFER failed",
+ dep->name);
+ dbg_event(dep->number, "EENDXFER", ret);
+ }
dep->resource_index = 0;
}
@@ -1106,6 +1124,7 @@
dwc3_trace(trace_dwc3_ep0,
"Wrong direction for Data phase");
dwc3_ep0_end_control_data(dwc, dep);
+ dbg_event(epnum, "WRONGDR", 0);
dwc3_ep0_stall_and_restart(dwc);
return;
}
@@ -1122,7 +1141,8 @@
dwc->ep0state = EP0_STATUS_PHASE;
if (dwc->delayed_status) {
- WARN_ON_ONCE(event->endpoint_number != 1);
+ if (event->endpoint_number != 1)
+ dbg_event(epnum, "EEPNUM", event->status);
dwc3_trace(trace_dwc3_ep0, "Delayed Status");
return;
}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 8c41ebd..d7d929d 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1350,6 +1350,10 @@
goto out;
}
+ WARN(!dep->direction && (request->length % ep->desc->wMaxPacketSize),
+ "trying to queue unaligned request (%d) with %s\n",
+ request->length, ep->name);
+
ret = __dwc3_gadget_ep_queue(dep, req);
/*
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 4a30afa..ec498d8 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -2054,8 +2054,8 @@
ffs->epfiles = epfiles;
- ffs_log("exit: epfile name %s state %d setup_state %d flag %lu",
- epfile->name, ffs->state, ffs->setup_state, ffs->flags);
+ ffs_log("exit: eps_count %u state %d setup_state %d flag %lu",
+ count, ffs->state, ffs->setup_state, ffs->flags);
return 0;
}
@@ -2066,7 +2066,7 @@
ENTER();
- ffs_log("enter: epfilename %s", epfile->name);
+ ffs_log("enter: count %u", count);
for (; count; --count, ++epfile) {
BUG_ON(mutex_is_locked(&epfile->mutex) ||
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 7acbd2c..1782804 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -5648,6 +5648,10 @@
#ifdef CONFIG_COMPAT
long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
+ /*
+ * These all access 32-bit values anyway so no further
+ * handling is necessary.
+ */
switch (cmd) {
case FS_IOC32_GETFLAGS:
cmd = FS_IOC_GETFLAGS;
@@ -5658,8 +5662,6 @@
case FS_IOC32_GETVERSION:
cmd = FS_IOC_GETVERSION;
break;
- default:
- return -ENOIOCTLCMD;
}
return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 17a257c..dbef345 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -503,8 +503,16 @@
return -EAGAIN;
}
- trace_android_fs_dataread_start(inode, page_offset(page), PAGE_SIZE,
- current->pid, current->comm);
+ if (trace_android_fs_dataread_start_enabled()) {
+ char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+ path = android_fstrace_get_pathname(pathbuf,
+ MAX_TRACE_PATHBUF_LEN,
+ inode);
+ trace_android_fs_dataread_start(inode, page_offset(page),
+ PAGE_SIZE, current->pid,
+ path, current->comm);
+ }
/*
* Current inline data can only exist in the 1st page,
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 1db9080..46912da 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1184,8 +1184,16 @@
pgoff_t index;
unsigned from, to;
- trace_android_fs_datawrite_start(inode, pos, len,
- current->pid, current->comm);
+ if (trace_android_fs_datawrite_start_enabled()) {
+ char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+ path = android_fstrace_get_pathname(pathbuf,
+ MAX_TRACE_PATHBUF_LEN,
+ inode);
+ trace_android_fs_datawrite_start(inode, pos, len,
+ current->pid, path,
+ current->comm);
+ }
trace_ext4_write_begin(inode, pos, len, flags);
/*
* Reserve one block more for addition to orphan list in case
@@ -2902,8 +2910,16 @@
len, flags, pagep, fsdata);
}
*fsdata = (void *)0;
- trace_android_fs_datawrite_start(inode, pos, len,
- current->pid, current->comm);
+ if (trace_android_fs_datawrite_start_enabled()) {
+ char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+ path = android_fstrace_get_pathname(pathbuf,
+ MAX_TRACE_PATHBUF_LEN,
+ inode);
+ trace_android_fs_datawrite_start(inode, pos, len,
+ current->pid,
+ path, current->comm);
+ }
trace_ext4_da_write_begin(inode, pos, len, flags);
if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
@@ -3597,16 +3613,27 @@
return 0;
if (trace_android_fs_dataread_start_enabled() &&
- (rw == READ))
- trace_android_fs_dataread_start(inode, offset, count,
- current->pid,
- current->comm);
- if (trace_android_fs_datawrite_start_enabled() &&
- (rw == WRITE))
- trace_android_fs_datawrite_start(inode, offset, count,
- current->pid,
- current->comm);
+ (rw == READ)) {
+ char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+ path = android_fstrace_get_pathname(pathbuf,
+ MAX_TRACE_PATHBUF_LEN,
+ inode);
+ trace_android_fs_dataread_start(inode, offset, count,
+ current->pid, path,
+ current->comm);
+ }
+ if (trace_android_fs_datawrite_start_enabled() &&
+ (rw == WRITE)) {
+ char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+ path = android_fstrace_get_pathname(pathbuf,
+ MAX_TRACE_PATHBUF_LEN,
+ inode);
+ trace_android_fs_datawrite_start(inode, offset, count,
+ current->pid, path,
+ current->comm);
+ }
trace_ext4_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
if (iov_iter_rw(iter) == READ)
ret = ext4_direct_IO_read(iocb, iter);
@@ -3839,6 +3866,11 @@
unsigned blocksize;
struct inode *inode = mapping->host;
+ /* If we are processing an encrypted inode during orphan list
+ * handling */
+ if (ext4_encrypted_inode(inode) && !fscrypt_has_encryption_key(inode))
+ return 0;
+
blocksize = inode->i_sb->s_blocksize;
length = blocksize - (offset & (blocksize - 1));
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index 77cf54c..2531cc1 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -117,11 +117,17 @@
struct page *first_page = bio->bi_io_vec[0].bv_page;
if (first_page != NULL) {
+ char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+ path = android_fstrace_get_pathname(pathbuf,
+ MAX_TRACE_PATHBUF_LEN,
+ first_page->mapping->host);
trace_android_fs_dataread_start(
first_page->mapping->host,
page_offset(first_page),
bio->bi_iter.bi_size,
current->pid,
+ path,
current->comm);
}
}
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index aee4a45..2c5ae0b 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -1607,8 +1607,16 @@
block_t blkaddr = NULL_ADDR;
int err = 0;
- trace_android_fs_datawrite_start(inode, pos, len,
- current->pid, current->comm);
+ if (trace_android_fs_datawrite_start_enabled()) {
+ char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+ path = android_fstrace_get_pathname(pathbuf,
+ MAX_TRACE_PATHBUF_LEN,
+ inode);
+ trace_android_fs_datawrite_start(inode, pos, len,
+ current->pid, path,
+ current->comm);
+ }
trace_f2fs_write_begin(inode, pos, len, flags);
/*
@@ -1763,14 +1771,27 @@
trace_f2fs_direct_IO_enter(inode, offset, count, rw);
if (trace_android_fs_dataread_start_enabled() &&
- (rw == READ))
+ (rw == READ)) {
+ char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+ path = android_fstrace_get_pathname(pathbuf,
+ MAX_TRACE_PATHBUF_LEN,
+ inode);
trace_android_fs_dataread_start(inode, offset,
- count, current->pid,
+ count, current->pid, path,
current->comm);
+ }
if (trace_android_fs_datawrite_start_enabled() &&
- (rw == WRITE))
+ (rw == WRITE)) {
+ char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+ path = android_fstrace_get_pathname(pathbuf,
+ MAX_TRACE_PATHBUF_LEN,
+ inode);
trace_android_fs_datawrite_start(inode, offset, count,
- current->pid, current->comm);
+ current->pid, path,
+ current->comm);
+ }
down_read(&F2FS_I(inode)->dio_rwsem[rw]);
err = blockdev_direct_IO(iocb, inode, iter, get_data_block_dio);
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index d534f44..1427db9 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -83,9 +83,16 @@
{
struct page *ipage;
- trace_android_fs_dataread_start(inode, page_offset(page),
- PAGE_SIZE, current->pid,
- current->comm);
+ if (trace_android_fs_dataread_start_enabled()) {
+ char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+ path = android_fstrace_get_pathname(pathbuf,
+ MAX_TRACE_PATHBUF_LEN,
+ inode);
+ trace_android_fs_dataread_start(inode, page_offset(page),
+ PAGE_SIZE, current->pid,
+ path, current->comm);
+ }
ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino);
if (IS_ERR(ipage)) {
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 3fd1e21..83511cb 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -401,6 +401,10 @@
static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
{
spin_lock(&fiq->waitq.lock);
+ if (test_bit(FR_FINISHED, &req->flags)) {
+ spin_unlock(&fiq->waitq.lock);
+ return;
+ }
if (list_empty(&req->intr_entry)) {
list_add_tail(&req->intr_entry, &fiq->interrupts);
wake_up_locked(&fiq->waitq);
@@ -1376,6 +1380,7 @@
* code can Oops if the buffer persists after module unload.
*/
bufs[page_nr].ops = &nosteal_pipe_buf_ops;
+ bufs[page_nr].flags = 0;
ret = add_to_pipe(pipe, &bufs[page_nr++]);
if (unlikely(ret < 0))
break;
diff --git a/fs/mpage.c b/fs/mpage.c
index 2bb117d..802b481 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -80,11 +80,17 @@
struct page *first_page = bio->bi_io_vec[0].bv_page;
if (first_page != NULL) {
+ char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+ path = android_fstrace_get_pathname(pathbuf,
+ MAX_TRACE_PATHBUF_LEN,
+ first_page->mapping->host);
trace_android_fs_dataread_start(
first_page->mapping->host,
page_offset(first_page),
bio->bi_iter.bi_size,
current->pid,
+ path,
current->comm);
}
}
diff --git a/fs/splice.c b/fs/splice.c
index 63b8f54..8dd79ec 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -203,6 +203,7 @@
buf->len = spd->partial[page_nr].len;
buf->private = spd->partial[page_nr].private;
buf->ops = spd->ops;
+ buf->flags = 0;
pipe->nrbufs++;
page_nr++;
diff --git a/include/linux/can/core.h b/include/linux/can/core.h
index a087500..df08a41 100644
--- a/include/linux/can/core.h
+++ b/include/linux/can/core.h
@@ -45,10 +45,9 @@
extern int can_proto_register(const struct can_proto *cp);
extern void can_proto_unregister(const struct can_proto *cp);
-extern int can_rx_register(struct net_device *dev, canid_t can_id,
- canid_t mask,
- void (*func)(struct sk_buff *, void *),
- void *data, char *ident);
+int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
+ void (*func)(struct sk_buff *, void *),
+ void *data, char *ident, struct sock *sk);
extern void can_rx_unregister(struct net_device *dev, canid_t can_id,
canid_t mask,
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 0839818..3a4f264 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -186,6 +186,7 @@
extern void clocksource_resume(void);
extern struct clocksource * __init clocksource_default_clock(void);
extern void clocksource_mark_unstable(struct clocksource *cs);
+extern void clocksource_select_force(void);
extern u64
clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cycles);
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 9535e79..ec7047c 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -175,6 +175,7 @@
const struct coresight_ops *ops;
struct device dev;
atomic_t *refcnt;
+ struct coresight_path *node;
bool orphan;
bool enable; /* true only if configured as part of a path */
bool activated; /* true only if a sink is part of a path */
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index eec093c..b8eb25b 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -567,7 +567,7 @@
static inline int cpumask_parse_user(const char __user *buf, int len,
struct cpumask *dstp)
{
- return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpu_ids);
+ return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
}
/**
@@ -582,7 +582,7 @@
struct cpumask *dstp)
{
return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
- nr_cpu_ids);
+ nr_cpumask_bits);
}
/**
@@ -597,7 +597,7 @@
char *nl = strchr(buf, '\n');
unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf);
- return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpu_ids);
+ return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
}
/**
@@ -609,7 +609,7 @@
*/
static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
{
- return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpu_ids);
+ return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
}
/**
diff --git a/include/linux/device.h b/include/linux/device.h
index d85101c..d469121 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -55,6 +55,8 @@
struct bus_attribute bus_attr_##_name = __ATTR_RW(_name)
#define BUS_ATTR_RO(_name) \
struct bus_attribute bus_attr_##_name = __ATTR_RO(_name)
+#define BUS_ATTR_WO(_name) \
+ struct bus_attribute bus_attr_##_name = __ATTR_WO(_name)
extern int __must_check bus_create_file(struct bus_type *,
struct bus_attribute *);
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index cd184bd..c92a083 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -128,6 +128,7 @@
u32 ring_data_startoffset;
u32 priv_write_index;
u32 priv_read_index;
+ u32 cached_read_index;
};
/*
@@ -180,6 +181,19 @@
return write;
}
+static inline u32 hv_get_cached_bytes_to_write(
+ const struct hv_ring_buffer_info *rbi)
+{
+ u32 read_loc, write_loc, dsize, write;
+
+ dsize = rbi->ring_datasize;
+ read_loc = rbi->cached_read_index;
+ write_loc = rbi->ring_buffer->write_index;
+
+ write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
+ read_loc - write_loc;
+ return write;
+}
/*
* VMBUS version is 32 bit entity broken up into
* two 16 bit quantities: major_number. minor_number.
@@ -1447,6 +1461,7 @@
void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
+void vmbus_setevent(struct vmbus_channel *channel);
/*
* Negotiated version with the Host.
*/
@@ -1479,10 +1494,11 @@
* there is room for the producer to send the pending packet.
*/
-static inline bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
+static inline void hv_signal_on_read(struct vmbus_channel *channel)
{
- u32 cur_write_sz;
+ u32 cur_write_sz, cached_write_sz;
u32 pending_sz;
+ struct hv_ring_buffer_info *rbi = &channel->inbound;
/*
* Issue a full memory barrier before making the signaling decision.
@@ -1500,14 +1516,26 @@
pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
/* If the other end is not blocked on write don't bother. */
if (pending_sz == 0)
- return false;
+ return;
cur_write_sz = hv_get_bytes_to_write(rbi);
- if (cur_write_sz >= pending_sz)
- return true;
+ if (cur_write_sz < pending_sz)
+ return;
- return false;
+ cached_write_sz = hv_get_cached_bytes_to_write(rbi);
+ if (cached_write_sz < pending_sz)
+ vmbus_setevent(channel);
+
+ return;
+}
+
+static inline void
+init_cached_read_index(struct vmbus_channel *channel)
+{
+ struct hv_ring_buffer_info *rbi = &channel->inbound;
+
+ rbi->cached_read_index = rbi->ring_buffer->read_index;
}
/*
@@ -1571,6 +1599,8 @@
* This call commits the read index and potentially signals the host.
* Here is the pattern for using the "in-place" consumption APIs:
*
+ * init_cached_read_index();
+ *
* while (get_next_pkt_raw() {
* process the packet "in-place";
* put_pkt_raw();
@@ -1589,8 +1619,7 @@
virt_rmb();
ring_info->ring_buffer->read_index = ring_info->priv_read_index;
- if (hv_need_to_signal_on_read(ring_info))
- vmbus_set_event(channel);
+ hv_signal_on_read(channel);
}
diff --git a/include/linux/ipa.h b/include/linux/ipa.h
index 5fc7dda..a83ac84 100644
--- a/include/linux/ipa.h
+++ b/include/linux/ipa.h
@@ -765,7 +765,7 @@
IPA_TX_SUSPEND_IRQ,
IPA_TX_HOLB_DROP_IRQ,
IPA_BAM_IDLE_IRQ,
- IPA_BAM_GSI_IDLE_IRQ = IPA_BAM_IDLE_IRQ,
+ IPA_GSI_IDLE_IRQ = IPA_BAM_IDLE_IRQ,
IPA_IRQ_MAX
};
@@ -1412,7 +1412,8 @@
int ipa_disable_apps_wan_cons_deaggr(uint32_t agg_size, uint32_t agg_count);
-struct ipa_gsi_ep_config *ipa_get_gsi_ep_info(int ipa_ep_idx);
+const struct ipa_gsi_ep_config *ipa_get_gsi_ep_info
+ (enum ipa_client_type client);
int ipa_stop_gsi_channel(u32 clnt_hdl);
@@ -2156,7 +2157,8 @@
return -EINVAL;
}
-static inline struct ipa_gsi_ep_config *ipa_get_gsi_ep_info(int ipa_ep_idx)
+static inline const struct ipa_gsi_ep_config *ipa_get_gsi_ep_info
+ (enum ipa_client_type client)
{
return NULL;
}
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index da25f07..f7033fa 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -341,7 +341,7 @@
int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
bool memblock_is_reserved(phys_addr_t addr);
bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
-int memblock_overlaps_memory(phys_addr_t base, phys_addr_t size);
+bool memblock_overlaps_memory(phys_addr_t base, phys_addr_t size);
extern void __memblock_dump_all(void);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d213c76..a47c29e 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1508,6 +1508,7 @@
* @mtu: Interface MTU value
* @type: Interface hardware type
* @hard_header_len: Maximum hardware header length.
+ * @min_header_len: Minimum hardware header length
*
* @needed_headroom: Extra headroom the hardware may need, but not in all
* cases can this be guaranteed
@@ -1728,6 +1729,7 @@
unsigned int mtu;
unsigned short type;
unsigned short hard_header_len;
+ unsigned short min_header_len;
unsigned short needed_headroom;
unsigned short needed_tailroom;
@@ -2783,6 +2785,8 @@
{
if (likely(len >= dev->hard_header_len))
return true;
+ if (len < dev->min_header_len)
+ return false;
if (capable(CAP_SYS_RAWIO)) {
memset(ll_header + len, 0, dev->hard_header_len - len);
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 780949d..dde3b13 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -9,7 +9,7 @@
/**
* touch_nmi_watchdog - restart NMI watchdog timeout.
- *
+ *
* If the architecture supports the NMI watchdog, touch_nmi_watchdog()
* may be used to reset the timeout - for code which intentionally
* disables interrupts for a long time. This call is stateless.
@@ -41,25 +41,37 @@
#ifdef arch_trigger_cpumask_backtrace
static inline bool trigger_all_cpu_backtrace(void)
{
- arch_trigger_cpumask_backtrace(cpu_online_mask, false);
+ #if defined(CONFIG_ARM64)
+ arch_trigger_all_cpu_backtrace();
+ else
+ arch_trigger_cpumask_backtrace(cpu_online_mask, false);
+ #endif
+
return true;
}
static inline bool trigger_allbutself_cpu_backtrace(void)
{
- arch_trigger_cpumask_backtrace(cpu_online_mask, true);
+ #if defined(CONFIG_ARM64)
+ arch_trigger_all_cpu_backtrace();
+ else
+ arch_trigger_cpumask_backtrace(cpu_online_mask, true);
+ #endif
+
return true;
}
static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
{
arch_trigger_cpumask_backtrace(mask, false);
+
return true;
}
static inline bool trigger_single_cpu_backtrace(int cpu)
{
arch_trigger_cpumask_backtrace(cpumask_of(cpu), false);
+
return true;
}
diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
new file mode 100644
index 0000000..5b644c5
--- /dev/null
+++ b/include/linux/qcom-geni-se.h
@@ -0,0 +1,452 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_QCOM_GENI_SE
+#define _LINUX_QCOM_GENI_SE
+#include <linux/io.h>
+
+enum se_xfer_mode {
+ INVALID,
+ FIFO_MODE,
+ GSI_DMA,
+};
+
+enum se_protocol_types {
+ NONE,
+ SPI,
+ UART,
+ I2C,
+ I3C
+};
+
+#define GENI_INIT_CFG_REVISION (0x0)
+#define GENI_S_INIT_CFG_REVISION (0x4)
+#define GENI_FORCE_DEFAULT_REG (0x20)
+#define GENI_OUTPUT_CTRL (0x24)
+#define GENI_CGC_CTRL (0x28)
+#define SE_GENI_STATUS (0x40)
+#define GENI_SER_M_CLK_CFG (0x48)
+#define GENI_SER_S_CLK_CFG (0x4C)
+#define GENI_CLK_CTRL_RO (0x60)
+#define GENI_IF_DISABLE_RO (0x64)
+#define GENI_FW_REVISION_RO (0x68)
+#define GENI_FW_S_REVISION_RO (0x6C)
+#define SE_GENI_CLK_SEL (0x7C)
+#define SE_GENI_DMA_MODE_EN (0x258)
+#define SE_GENI_TX_PACKING_CFG0 (0x260)
+#define SE_GENI_TX_PACKING_CFG1 (0x264)
+#define SE_GENI_RX_PACKING_CFG0 (0x284)
+#define SE_GENI_RX_PACKING_CFG1 (0x288)
+#define SE_GENI_M_CMD0 (0x600)
+#define SE_GENI_M_CMD_CTRL_REG (0x604)
+#define SE_GENI_M_IRQ_STATUS (0x610)
+#define SE_GENI_M_IRQ_EN (0x614)
+#define SE_GENI_M_IRQ_CLEAR (0x618)
+#define SE_GENI_S_CMD0 (0x630)
+#define SE_GENI_S_CMD_CTRL_REG (0x634)
+#define SE_GENI_S_IRQ_STATUS (0x640)
+#define SE_GENI_S_IRQ_EN (0x644)
+#define SE_GENI_S_IRQ_CLEAR (0x648)
+#define SE_GENI_TX_FIFOn (0x700)
+#define SE_GENI_RX_FIFOn (0x780)
+#define SE_GENI_TX_FIFO_STATUS (0x800)
+#define SE_GENI_RX_FIFO_STATUS (0x804)
+#define SE_GENI_TX_WATERMARK_REG (0x80C)
+#define SE_GENI_RX_WATERMARK_REG (0x810)
+#define SE_GENI_RX_RFR_WATERMARK_REG (0x814)
+#define SE_GENI_M_GP_LENGTH (0x910)
+#define SE_GENI_S_GP_LENGTH (0x914)
+#define SE_IRQ_EN (0xE1C)
+#define SE_HW_PARAM_0 (0xE24)
+#define SE_HW_PARAM_1 (0xE28)
+#define SE_DMA_GENERAL_CFG (0xE30)
+
+/* GENI_OUTPUT_CTRL fields */
+#define DEFAULT_IO_OUTPUT_CTRL_MSK (GENMASK(6, 0))
+
+/* GENI_FORCE_DEFAULT_REG fields */
+#define FORCE_DEFAULT (BIT(0))
+
+/* GENI_CGC_CTRL fields */
+#define CFG_AHB_CLK_CGC_ON (BIT(0))
+#define CFG_AHB_WR_ACLK_CGC_ON (BIT(1))
+#define DATA_AHB_CLK_CGC_ON (BIT(2))
+#define SCLK_CGC_ON (BIT(3))
+#define TX_CLK_CGC_ON (BIT(4))
+#define RX_CLK_CGC_ON (BIT(5))
+#define EXT_CLK_CGC_ON (BIT(6))
+#define PROG_RAM_HCLK_OFF (BIT(8))
+#define PROG_RAM_SCLK_OFF (BIT(9))
+#define DEFAULT_CGC_EN (GENMASK(6, 0))
+
+/* GENI_STATUS fields */
+#define M_GENI_CMD_ACTIVE (BIT(0))
+#define S_GENI_CMD_ACTIVE (BIT(12))
+
+/* GENI_SER_M_CLK_CFG/GENI_SER_S_CLK_CFG */
+#define SER_CLK_EN (BIT(0))
+#define CLK_DIV_MSK (GENMASK(15, 4))
+#define CLK_DIV_SHFT (4)
+
+/* CLK_CTRL_RO fields */
+
+/* IF_DISABLE_RO fields */
+
+/* FW_REVISION_RO fields */
+#define FW_REV_PROTOCOL_MSK (GENMASK(15, 8))
+#define FW_REV_PROTOCOL_SHFT (8)
+
+/* SE_GENI_DMA_MODE_EN */
+#define GENI_DMA_MODE_EN (BIT(0))
+
+/* GENI_M_CMD0 fields */
+#define M_OPCODE_MSK (GENMASK(31, 27))
+#define M_OPCODE_SHFT (27)
+#define M_PARAMS_MSK (GENMASK(26, 0))
+
+/* GENI_M_CMD_CTRL_REG */
+#define M_GENI_CMD_CANCEL BIT(2)
+#define M_GENI_CMD_ABORT BIT(1)
+#define M_GENI_DISABLE BIT(0)
+
+/* GENI_S_CMD0 fields */
+#define S_OPCODE_MSK (GENMASK(31, 27))
+#define S_OPCODE_SHFT (27)
+#define S_PARAMS_MSK (GENMASK(26, 0))
+
+/* GENI_S_CMD_CTRL_REG */
+#define S_GENI_CMD_CANCEL (BIT(2))
+#define S_GENI_CMD_ABORT (BIT(1))
+#define S_GENI_DISABLE (BIT(0))
+
+/* GENI_M_IRQ_EN fields */
+#define M_CMD_DONE_EN (BIT(0))
+#define M_CMD_OVERRUN_EN (BIT(1))
+#define M_ILLEGAL_CMD_EN (BIT(2))
+#define M_CMD_FAILURE_EN (BIT(3))
+#define M_CMD_CANCEL_EN (BIT(4))
+#define M_CMD_ABORT_EN (BIT(5))
+#define M_TIMESTAMP_EN (BIT(6))
+#define M_RX_IRQ_EN (BIT(7))
+#define M_GP_SYNC_IRQ_0_EN (BIT(8))
+#define M_GP_IRQ_0_EN (BIT(9))
+#define M_GP_IRQ_1_EN (BIT(10))
+#define M_GP_IRQ_2_EN (BIT(11))
+#define M_GP_IRQ_3_EN (BIT(12))
+#define M_GP_IRQ_4_EN (BIT(13))
+#define M_GP_IRQ_5_EN (BIT(14))
+#define M_IO_DATA_DEASSERT_EN (BIT(22))
+#define M_IO_DATA_ASSERT_EN (BIT(23))
+#define M_RX_FIFO_RD_ERR_EN (BIT(24))
+#define M_RX_FIFO_WR_ERR_EN (BIT(25))
+#define M_RX_FIFO_WATERMARK_EN (BIT(26))
+#define M_RX_FIFO_LAST_EN (BIT(27))
+#define M_TX_FIFO_RD_ERR_EN (BIT(28))
+#define M_TX_FIFO_WR_ERR_EN (BIT(29))
+#define M_TX_FIFO_WATERMARK_EN (BIT(30))
+#define M_SEC_IRQ_EN (BIT(31))
+#define M_COMMON_GENI_M_IRQ_EN (GENMASK(3, 0) | M_TIMESTAMP_EN | \
+ GENMASK(14, 8) | M_IO_DATA_DEASSERT_EN | \
+ M_IO_DATA_ASSERT_EN | M_RX_FIFO_RD_ERR_EN | \
+ M_RX_FIFO_WR_ERR_EN | M_TX_FIFO_RD_ERR_EN | \
+ M_TX_FIFO_WR_ERR_EN | M_SEC_IRQ_EN)
+
+/* GENI_S_IRQ_EN fields */
+#define S_CMD_DONE_EN (BIT(0))
+#define S_CMD_OVERRUN_EN (BIT(1))
+#define S_ILLEGAL_CMD_EN (BIT(2))
+#define S_CMD_FAILURE_EN (BIT(3))
+#define S_CMD_CANCEL_EN (BIT(4))
+#define S_CMD_ABORT_EN (BIT(5))
+#define S_GP_SYNC_IRQ_0_EN (BIT(8))
+#define S_GP_IRQ_0_EN (BIT(9))
+#define S_GP_IRQ_1_EN (BIT(10))
+#define S_GP_IRQ_2_EN (BIT(11))
+#define S_GP_IRQ_3_EN (BIT(12))
+#define S_GP_IRQ_4_EN (BIT(13))
+#define S_GP_IRQ_5_EN (BIT(14))
+#define S_IO_DATA_DEASSERT_EN (BIT(22))
+#define S_IO_DATA_ASSERT_EN (BIT(23))
+#define S_RX_FIFO_RD_ERR_EN (BIT(24))
+#define S_RX_FIFO_WR_ERR_EN (BIT(25))
+#define S_RX_FIFO_WATERMARK_EN (BIT(26))
+#define S_RX_FIFO_LAST_EN (BIT(27))
+#define S_COMMON_GENI_S_IRQ_EN (GENMASK(3, 0) | GENMASK(14, 8) | \
+ S_RX_FIFO_RD_ERR_EN | S_RX_FIFO_WR_ERR_EN)
+
+/* GENI_/TX/RX/RX_RFR/_WATERMARK_REG fields */
+#define WATERMARK_MSK (GENMASK(5, 0))
+
+/* GENI_TX_FIFO_STATUS fields */
+#define TX_FIFO_WC (GENMASK(27, 0))
+
+/* GENI_RX_FIFO_STATUS fields */
+#define RX_LAST (BIT(31))
+#define RX_LAST_BYTE_VALID_MSK (GENMASK(30, 28))
+#define RX_LAST_BYTE_VALID_SHFT (28)
+#define RX_FIFO_WC_MSK (GENMASK(24, 0))
+
+/* SE_IRQ_EN fields */
+#define DMA_RX_IRQ_EN (BIT(0))
+#define DMA_TX_IRQ_EN (BIT(1))
+#define GENI_M_IRQ_EN (BIT(2))
+#define GENI_S_IRQ_EN (BIT(3))
+
+/* SE_HW_PARAM_0 fields */
+#define TX_FIFO_WIDTH_MSK (GENMASK(29, 24))
+#define TX_FIFO_WIDTH_SHFT (24)
+#define TX_FIFO_DEPTH_MSK (GENMASK(21, 16))
+#define TX_FIFO_DEPTH_SHFT (16)
+
+/* SE_HW_PARAM_1 fields */
+#define RX_FIFO_WIDTH_MSK (GENMASK(29, 24))
+#define RX_FIFO_WIDTH_SHFT (24)
+#define RX_FIFO_DEPTH_MSK (GENMASK(21, 16))
+#define RX_FIFO_DEPTH_SHFT (16)
+
+/* SE_DMA_GENERAL_CFG */
+#define DMA_RX_CLK_CGC_ON (BIT(0))
+#define DMA_TX_CLK_CGC_ON (BIT(1))
+#define DMA_AHB_SLV_CFG_ON (BIT(2))
+#define AHB_SEC_SLV_CLK_CGC_ON (BIT(3))
+#define DUMMY_RX_NON_BUFFERABLE (BIT(4))
+#define RX_DMA_ZERO_PADDING_EN (BIT(5))
+#define RX_DMA_IRQ_DELAY_MSK (GENMASK(8, 6))
+#define RX_DMA_IRQ_DELAY_SHFT (6)
+
+static inline unsigned int geni_read_reg(void __iomem *base, int offset)
+{
+ return readl_relaxed(base + offset);
+}
+
+static inline void geni_write_reg(unsigned int value, void __iomem *base,
+ int offset)
+{
+ return writel_relaxed(value, (base + offset));
+}
+
+static inline int get_se_proto(void __iomem *base)
+{
+ int proto = 0;
+
+ proto = ((geni_read_reg(base, GENI_FW_REVISION_RO)
+ & FW_REV_PROTOCOL_MSK) >> FW_REV_PROTOCOL_SHFT);
+ return proto;
+}
+
+static inline int se_geni_irq_en(void __iomem *base, int mode)
+{
+ int ret = 0;
+ unsigned int common_geni_m_irq_en;
+ unsigned int common_geni_s_irq_en;
+ int proto = get_se_proto(base);
+
+ common_geni_m_irq_en = geni_read_reg(base, SE_GENI_M_IRQ_EN);
+ common_geni_s_irq_en = geni_read_reg(base, SE_GENI_S_IRQ_EN);
+ /* Common to all modes */
+ common_geni_m_irq_en |= M_COMMON_GENI_M_IRQ_EN;
+ common_geni_s_irq_en |= S_COMMON_GENI_S_IRQ_EN;
+
+ switch (mode) {
+ case FIFO_MODE:
+ {
+ if (proto == I2C) {
+ common_geni_m_irq_en |=
+ (M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN);
+ common_geni_s_irq_en |= S_CMD_DONE_EN;
+ }
+ break;
+ }
+ case GSI_DMA:
+ break;
+ default:
+ pr_err("%s: Invalid mode %d\n", __func__, mode);
+ ret = -ENXIO;
+ goto exit_irq_en;
+ }
+
+
+ geni_write_reg(common_geni_m_irq_en, base, SE_GENI_M_IRQ_EN);
+ geni_write_reg(common_geni_s_irq_en, base, SE_GENI_S_IRQ_EN);
+exit_irq_en:
+ return ret;
+}
+
+
+static inline void se_set_rx_rfr_wm(void __iomem *base, unsigned int rx_wm,
+ unsigned int rx_rfr)
+{
+ geni_write_reg(rx_wm, base, SE_GENI_RX_WATERMARK_REG);
+ geni_write_reg(rx_rfr, base, SE_GENI_RX_RFR_WATERMARK_REG);
+}
+
+static inline int se_io_set_mode(void __iomem *base, int mode)
+{
+ int ret = 0;
+ unsigned int io_mode = 0;
+ unsigned int geni_dma_mode = 0;
+
+ io_mode = geni_read_reg(base, SE_IRQ_EN);
+ geni_dma_mode = geni_read_reg(base, SE_GENI_DMA_MODE_EN);
+
+ switch (mode) {
+ case FIFO_MODE:
+ {
+ io_mode |= (GENI_M_IRQ_EN | GENI_S_IRQ_EN);
+ io_mode |= (DMA_TX_IRQ_EN | DMA_RX_IRQ_EN);
+ geni_dma_mode &= ~GENI_DMA_MODE_EN;
+ break;
+
+ }
+ default:
+ ret = -ENXIO;
+ goto exit_set_mode;
+ }
+ geni_write_reg(io_mode, base, SE_IRQ_EN);
+ geni_write_reg(geni_dma_mode, base, SE_GENI_DMA_MODE_EN);
+exit_set_mode:
+ return ret;
+}
+
+static inline void se_io_init(void __iomem *base)
+{
+ unsigned int io_op_ctrl = 0;
+ unsigned int geni_cgc_ctrl;
+ unsigned int dma_general_cfg;
+
+ geni_cgc_ctrl = geni_read_reg(base, GENI_CGC_CTRL);
+ dma_general_cfg = geni_read_reg(base, SE_DMA_GENERAL_CFG);
+ geni_cgc_ctrl |= DEFAULT_CGC_EN;
+ dma_general_cfg |= (AHB_SEC_SLV_CLK_CGC_ON | DMA_AHB_SLV_CFG_ON |
+ DMA_TX_CLK_CGC_ON | DMA_RX_CLK_CGC_ON);
+ io_op_ctrl |= DEFAULT_IO_OUTPUT_CTRL_MSK;
+ geni_write_reg(geni_cgc_ctrl, base, GENI_CGC_CTRL);
+ geni_write_reg(dma_general_cfg, base, SE_DMA_GENERAL_CFG);
+
+ geni_write_reg(io_op_ctrl, base, GENI_OUTPUT_CTRL);
+ geni_write_reg(FORCE_DEFAULT, base, GENI_FORCE_DEFAULT_REG);
+}
+
+static inline int geni_se_init(void __iomem *base, int mode,
+ unsigned int rx_wm, unsigned int rx_rfr)
+{
+ int ret = 0;
+
+ se_io_init(base);
+ ret = se_io_set_mode(base, mode);
+ if (ret)
+ goto exit_geni_se_init;
+
+ se_set_rx_rfr_wm(base, rx_wm, rx_rfr);
+ ret = se_geni_irq_en(base, mode);
+ if (ret)
+ goto exit_geni_se_init;
+
+exit_geni_se_init:
+ return ret;
+}
+
+static inline void geni_setup_m_cmd(void __iomem *base, u32 cmd,
+ u32 params)
+{
+ u32 m_cmd = geni_read_reg(base, SE_GENI_M_CMD0);
+
+ m_cmd &= ~(M_OPCODE_MSK | M_PARAMS_MSK);
+ m_cmd |= (cmd << M_OPCODE_SHFT);
+ m_cmd |= (params & M_PARAMS_MSK);
+ geni_write_reg(m_cmd, base, SE_GENI_M_CMD0);
+}
+
+static inline void geni_setup_s_cmd(void __iomem *base, u32 cmd,
+ u32 params)
+{
+ u32 s_cmd = geni_read_reg(base, SE_GENI_S_CMD0);
+
+ s_cmd &= ~(S_OPCODE_MSK | S_PARAMS_MSK);
+ s_cmd |= (cmd << S_OPCODE_SHFT);
+ s_cmd |= (params & S_PARAMS_MSK);
+ geni_write_reg(s_cmd, base, SE_GENI_S_CMD0);
+}
+
+static inline void geni_cancel_m_cmd(void __iomem *base)
+{
+ geni_write_reg(M_GENI_CMD_CANCEL, base, SE_GENI_S_CMD_CTRL_REG);
+}
+
+static inline void geni_cancel_s_cmd(void __iomem *base)
+{
+ geni_write_reg(S_GENI_CMD_CANCEL, base, SE_GENI_S_CMD_CTRL_REG);
+}
+
+static inline void geni_abort_m_cmd(void __iomem *base)
+{
+ geni_write_reg(M_GENI_CMD_ABORT, base, SE_GENI_M_CMD_CTRL_REG);
+}
+
+static inline void qcom_geni_abort_s_cmd(void __iomem *base)
+{
+ geni_write_reg(S_GENI_CMD_ABORT, base, SE_GENI_S_CMD_CTRL_REG);
+}
+
+static inline int get_tx_fifo_depth(void __iomem *base)
+{
+ int tx_fifo_depth;
+
+ tx_fifo_depth = ((geni_read_reg(base, SE_HW_PARAM_0)
+ & TX_FIFO_DEPTH_MSK) >> TX_FIFO_DEPTH_SHFT);
+ return tx_fifo_depth;
+}
+
+static inline int get_tx_fifo_width(void __iomem *base)
+{
+ int tx_fifo_width;
+
+ tx_fifo_width = ((geni_read_reg(base, SE_HW_PARAM_0)
+ & TX_FIFO_WIDTH_MSK) >> TX_FIFO_WIDTH_SHFT);
+ return tx_fifo_width;
+}
+
+static inline int get_rx_fifo_depth(void __iomem *base)
+{
+ int rx_fifo_depth;
+
+ rx_fifo_depth = ((geni_read_reg(base, SE_HW_PARAM_1)
+ & RX_FIFO_DEPTH_MSK) >> RX_FIFO_DEPTH_SHFT);
+ return rx_fifo_depth;
+}
+
+static inline void se_config_packing(void __iomem *base, int bpw,
+ int pack_words, bool msb_to_lsb)
+{
+ u32 cfg[4] = {0};
+ unsigned long cfg0, cfg1;
+ int len = ((bpw < 8) ? (bpw - 1) : 7);
+ int idx = ((msb_to_lsb == 1) ? len : 0);
+ int iter = (bpw * pack_words) >> 3;
+ int i;
+
+ for (i = 0; i < iter; i++) {
+ cfg[i] = ((idx << 5) | (msb_to_lsb << 4) | (len << 1));
+ idx += (len + 1);
+ if (i == iter - 1)
+ cfg[i] |= 1;
+ }
+ cfg0 = cfg[0] | (cfg[1] << 10);
+ cfg1 = cfg[2] | (cfg[3] << 10);
+ geni_write_reg(cfg0, base, SE_GENI_TX_PACKING_CFG0);
+ geni_write_reg(cfg1, base, SE_GENI_TX_PACKING_CFG1);
+ geni_write_reg(cfg0, base, SE_GENI_RX_PACKING_CFG0);
+ geni_write_reg(cfg1, base, SE_GENI_RX_PACKING_CFG1);
+}
+#endif
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 356793e..3e354fd 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -244,6 +244,8 @@
extern int try_to_del_timer_sync(struct timer_list *timer);
+extern struct timer_base timer_base_deferrable;
+
#ifdef CONFIG_SMP
extern int del_timer_sync(struct timer_list *timer);
#else
diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h
index 3ebb168..a34b141 100644
--- a/include/net/cipso_ipv4.h
+++ b/include/net/cipso_ipv4.h
@@ -309,6 +309,10 @@
}
for (opt_iter = 6; opt_iter < opt_len;) {
+ if (opt_iter + 1 == opt_len) {
+ err_offset = opt_iter;
+ goto out;
+ }
tag_len = opt[opt_iter + 1];
if ((tag_len == 0) || (tag_len > (opt_len - opt_iter))) {
err_offset = opt_iter + 1;
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index f11ca83..7f15f95 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -871,7 +871,7 @@
* upper-layer output functions
*/
int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
- struct ipv6_txoptions *opt, int tclass);
+ __u32 mark, struct ipv6_txoptions *opt, int tclass);
int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
index fc7c0db..3f40132 100644
--- a/include/net/lwtunnel.h
+++ b/include/net/lwtunnel.h
@@ -176,7 +176,10 @@
}
static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len)
{
- return -EOPNOTSUPP;
+ /* return 0 since we are not walking attr looking for
+ * RTA_ENCAP_TYPE attribute on nexthops.
+ */
+ return 0;
}
static inline int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index c211900..48bc1ac 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -538,6 +538,7 @@
char initiatorname[TRANSPORT_IQN_LEN];
/* Used to signal demo mode created ACL, disabled by default */
bool dynamic_node_acl;
+ bool dynamic_stop;
u32 queue_depth;
u32 acl_index;
enum target_prot_type saved_prot_type;
diff --git a/include/trace/events/android_fs.h b/include/trace/events/android_fs.h
index 531da43..4950953 100644
--- a/include/trace/events/android_fs.h
+++ b/include/trace/events/android_fs.h
@@ -9,8 +9,8 @@
DEFINE_EVENT(android_fs_data_start_template, android_fs_dataread_start,
TP_PROTO(struct inode *inode, loff_t offset, int bytes,
- pid_t pid, char *command),
- TP_ARGS(inode, offset, bytes, pid, command));
+ pid_t pid, char *pathname, char *command),
+ TP_ARGS(inode, offset, bytes, pid, pathname, command));
DEFINE_EVENT(android_fs_data_end_template, android_fs_dataread_end,
TP_PROTO(struct inode *inode, loff_t offset, int bytes),
@@ -18,14 +18,48 @@
DEFINE_EVENT(android_fs_data_start_template, android_fs_datawrite_start,
TP_PROTO(struct inode *inode, loff_t offset, int bytes,
- pid_t pid, char *command),
- TP_ARGS(inode, offset, bytes, pid, command));
+ pid_t pid, char *pathname, char *command),
+ TP_ARGS(inode, offset, bytes, pid, pathname, command));
DEFINE_EVENT(android_fs_data_end_template, android_fs_datawrite_end,
TP_PROTO(struct inode *inode, loff_t offset, int bytes),
- TP_ARGS(inode, offset, bytes));
+ TP_ARGS(inode, offset, bytes));
#endif /* _TRACE_ANDROID_FS_H */
/* This part must be outside protection */
#include <trace/define_trace.h>
+
+#ifndef ANDROID_FSTRACE_GET_PATHNAME
+#define ANDROID_FSTRACE_GET_PATHNAME
+
+/* Sizes an on-stack array, so careful if sizing this up ! */
+#define MAX_TRACE_PATHBUF_LEN 256
+
+static inline char *
+android_fstrace_get_pathname(char *buf, int buflen, struct inode *inode)
+{
+ char *path;
+ struct dentry *d;
+
+ /*
+ * d_obtain_alias() will either iput() if it locates an existing
+ * dentry or transfer the reference to the new dentry created.
+ * So get an extra reference here.
+ */
+ ihold(inode);
+ d = d_obtain_alias(inode);
+ if (likely(!IS_ERR(d))) {
+ path = dentry_path_raw(d, buf, buflen);
+ if (unlikely(IS_ERR(path))) {
+ strcpy(buf, "ERROR");
+ path = buf;
+ }
+ dput(d);
+ } else {
+ strcpy(buf, "ERROR");
+ path = buf;
+ }
+ return path;
+}
+#endif
diff --git a/include/trace/events/android_fs_template.h b/include/trace/events/android_fs_template.h
index 618988b..4e61ffe 100644
--- a/include/trace/events/android_fs_template.h
+++ b/include/trace/events/android_fs_template.h
@@ -5,11 +5,10 @@
DECLARE_EVENT_CLASS(android_fs_data_start_template,
TP_PROTO(struct inode *inode, loff_t offset, int bytes,
- pid_t pid, char *command),
- TP_ARGS(inode, offset, bytes, pid, command),
+ pid_t pid, char *pathname, char *command),
+ TP_ARGS(inode, offset, bytes, pid, pathname, command),
TP_STRUCT__entry(
- __array(char, path, MAX_FILTER_STR_VAL);
- __field(char *, pathname);
+ __string(pathbuf, pathname);
__field(loff_t, offset);
__field(int, bytes);
__field(loff_t, i_size);
@@ -19,27 +18,7 @@
),
TP_fast_assign(
{
- struct dentry *d;
-
- /*
- * Grab a reference to the inode here because
- * d_obtain_alias() will either drop the inode
- * reference if it locates an existing dentry
- * or transfer the reference to the new dentry
- * created. In our case, the file is still open,
- * so the dentry is guaranteed to exist (connected),
- * so d_obtain_alias() drops the reference we
- * grabbed here.
- */
- ihold(inode);
- d = d_obtain_alias(inode);
- if (!IS_ERR(d)) {
- __entry->pathname = dentry_path(d,
- __entry->path,
- MAX_FILTER_STR_VAL);
- dput(d);
- } else
- __entry->pathname = ERR_PTR(-EINVAL);
+ __assign_str(pathbuf, pathname);
__entry->offset = offset;
__entry->bytes = bytes;
__entry->i_size = i_size_read(inode);
@@ -50,9 +29,8 @@
),
TP_printk("entry_name %s, offset %llu, bytes %d, cmdline %s,"
" pid %d, i_size %llu, ino %lu",
- (IS_ERR(__entry->pathname) ? "ERROR" : __entry->pathname),
- __entry->offset, __entry->bytes, __get_str(cmdline),
- __entry->pid, __entry->i_size,
+ __get_str(pathbuf), __entry->offset, __entry->bytes,
+ __get_str(cmdline), __entry->pid, __entry->i_size,
(unsigned long) __entry->ino)
);
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 6e7d325..a242e72 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -350,8 +350,8 @@
/*
* The default for R'G'B' quantization is always full range, except
* for the BT2020 colorspace. For Y'CbCr the quantization is always
- * limited range, except for COLORSPACE_JPEG, SRGB, ADOBERGB,
- * XV601 or XV709: those are full range.
+ * limited range, except for COLORSPACE_JPEG, XV601 or XV709: those
+ * are full range.
*/
V4L2_QUANTIZATION_DEFAULT = 0,
V4L2_QUANTIZATION_FULL_RANGE = 1,
@@ -366,8 +366,7 @@
#define V4L2_MAP_QUANTIZATION_DEFAULT(is_rgb, colsp, ycbcr_enc) \
(((is_rgb) && (colsp) == V4L2_COLORSPACE_BT2020) ? V4L2_QUANTIZATION_LIM_RANGE : \
(((is_rgb) || (ycbcr_enc) == V4L2_YCBCR_ENC_XV601 || \
- (ycbcr_enc) == V4L2_YCBCR_ENC_XV709 || (colsp) == V4L2_COLORSPACE_JPEG) || \
- (colsp) == V4L2_COLORSPACE_ADOBERGB || (colsp) == V4L2_COLORSPACE_SRGB ? \
+ (ycbcr_enc) == V4L2_YCBCR_ENC_XV709 || (colsp) == V4L2_COLORSPACE_JPEG) ? \
V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE))
enum v4l2_priority {
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 8e901de..99c91f6 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3466,14 +3466,15 @@
int ret;
};
-static int find_cpu_to_read(struct perf_event *event, int local_cpu)
+static int __perf_event_read_cpu(struct perf_event *event, int event_cpu)
{
- int event_cpu = event->oncpu;
u16 local_pkg, event_pkg;
if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) {
- event_pkg = topology_physical_package_id(event_cpu);
- local_pkg = topology_physical_package_id(local_cpu);
+ int local_cpu = smp_processor_id();
+
+ event_pkg = topology_physical_package_id(event_cpu);
+ local_pkg = topology_physical_package_id(local_cpu);
if (event_pkg == local_pkg)
return local_cpu;
@@ -3603,7 +3604,7 @@
static int perf_event_read(struct perf_event *event, bool group)
{
- int ret = 0, cpu_to_read, local_cpu;
+ int event_cpu, ret = 0;
/*
* If event is enabled and currently active on a CPU, update the
@@ -3617,21 +3618,25 @@
.ret = 0,
};
- local_cpu = get_cpu();
- cpu_to_read = find_cpu_to_read(event, local_cpu);
- put_cpu();
+ event_cpu = READ_ONCE(event->oncpu);
+ if ((unsigned)event_cpu >= nr_cpu_ids)
+ return 0;
+
+ preempt_disable();
+ event_cpu = __perf_event_read_cpu(event, event_cpu);
/*
* Purposely ignore the smp_call_function_single() return
* value.
*
- * If event->oncpu isn't a valid CPU it means the event got
+ * If event_cpu isn't a valid CPU it means the event got
* scheduled out and that will have updated the event count.
*
* Therefore, either way, we'll have an up-to-date event count
* after this.
*/
- (void)smp_call_function_single(cpu_to_read, __perf_event_read, &data, 1);
+ (void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1);
+ preempt_enable();
ret = data.ret;
} else if (event->state == PERF_EVENT_STATE_INACTIVE) {
struct perf_event_context *ctx = event->ctx;
diff --git a/kernel/futex.c b/kernel/futex.c
index 2c4be46..38b68c2 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -3323,4 +3323,4 @@
return 0;
}
-__initcall(futex_init);
+core_initcall(futex_init);
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index b38f3fb..20fc294 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -1568,7 +1568,7 @@
{
struct console *con;
- trace_console(text, len);
+ trace_console_rcuidle(text, len);
if (!console_drivers)
return;
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index eed4b72..90d10e8 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -29,3 +29,4 @@
obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
obj-$(CONFIG_SCHED_CORE_CTL) += core_ctl.o
obj-$(CONFIG_CPU_FREQ_GOV_SCHED) += cpufreq_sched.o
+obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
new file mode 100644
index 0000000..69e0689
--- /dev/null
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -0,0 +1,576 @@
+/*
+ * CPUFreq governor based on scheduler-provided CPU utilization data.
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpufreq.h>
+#include <linux/slab.h>
+#include <trace/events/power.h>
+
+#include "sched.h"
+
+struct sugov_tunables {
+ struct gov_attr_set attr_set;
+ unsigned int rate_limit_us;
+};
+
+struct sugov_policy {
+ struct cpufreq_policy *policy;
+
+ struct sugov_tunables *tunables;
+ struct list_head tunables_hook;
+
+ raw_spinlock_t update_lock; /* For shared policies */
+ u64 last_freq_update_time;
+ s64 freq_update_delay_ns;
+ unsigned int next_freq;
+
+ /* The next fields are only needed if fast switch cannot be used. */
+ struct irq_work irq_work;
+ struct work_struct work;
+ struct mutex work_lock;
+ bool work_in_progress;
+
+ bool need_freq_update;
+};
+
+struct sugov_cpu {
+ struct update_util_data update_util;
+ struct sugov_policy *sg_policy;
+
+ unsigned int cached_raw_freq;
+ unsigned long iowait_boost;
+ unsigned long iowait_boost_max;
+ u64 last_update;
+
+ /* The fields below are only needed when sharing a policy. */
+ unsigned long util;
+ unsigned long max;
+ unsigned int flags;
+};
+
+static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
+
+/************************ Governor internals ***********************/
+
+static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
+{
+ s64 delta_ns;
+
+ if (sg_policy->work_in_progress)
+ return false;
+
+ if (unlikely(sg_policy->need_freq_update)) {
+ sg_policy->need_freq_update = false;
+ /*
+ * This happens when limits change, so forget the previous
+ * next_freq value and force an update.
+ */
+ sg_policy->next_freq = UINT_MAX;
+ return true;
+ }
+
+ delta_ns = time - sg_policy->last_freq_update_time;
+ return delta_ns >= sg_policy->freq_update_delay_ns;
+}
+
+static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
+ unsigned int next_freq)
+{
+ struct cpufreq_policy *policy = sg_policy->policy;
+
+ sg_policy->last_freq_update_time = time;
+
+ if (policy->fast_switch_enabled) {
+ if (sg_policy->next_freq == next_freq) {
+ trace_cpu_frequency(policy->cur, smp_processor_id());
+ return;
+ }
+ sg_policy->next_freq = next_freq;
+ next_freq = cpufreq_driver_fast_switch(policy, next_freq);
+ if (next_freq == CPUFREQ_ENTRY_INVALID)
+ return;
+
+ policy->cur = next_freq;
+ trace_cpu_frequency(next_freq, smp_processor_id());
+ } else if (sg_policy->next_freq != next_freq) {
+ sg_policy->next_freq = next_freq;
+ sg_policy->work_in_progress = true;
+ irq_work_queue(&sg_policy->irq_work);
+ }
+}
+
+/**
+ * get_next_freq - Compute a new frequency for a given cpufreq policy.
+ * @sg_cpu: schedutil cpu object to compute the new frequency for.
+ * @util: Current CPU utilization.
+ * @max: CPU capacity.
+ *
+ * If the utilization is frequency-invariant, choose the new frequency to be
+ * proportional to it, that is
+ *
+ * next_freq = C * max_freq * util / max
+ *
+ * Otherwise, approximate the would-be frequency-invariant utilization by
+ * util_raw * (curr_freq / max_freq) which leads to
+ *
+ * next_freq = C * curr_freq * util_raw / max
+ *
+ * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
+ *
+ * The lowest driver-supported frequency which is equal or greater than the raw
+ * next_freq (as calculated above) is returned, subject to policy min/max and
+ * cpufreq driver limitations.
+ */
+static unsigned int get_next_freq(struct sugov_cpu *sg_cpu, unsigned long util,
+ unsigned long max)
+{
+ struct sugov_policy *sg_policy = sg_cpu->sg_policy;
+ struct cpufreq_policy *policy = sg_policy->policy;
+ unsigned int freq = arch_scale_freq_invariant() ?
+ policy->cpuinfo.max_freq : policy->cur;
+
+ freq = (freq + (freq >> 2)) * util / max;
+
+ if (freq == sg_cpu->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
+ return sg_policy->next_freq;
+ sg_cpu->cached_raw_freq = freq;
+ return cpufreq_driver_resolve_freq(policy, freq);
+}
+
+static void sugov_get_util(unsigned long *util, unsigned long *max)
+{
+ struct rq *rq = this_rq();
+ unsigned long cfs_max;
+
+ cfs_max = arch_scale_cpu_capacity(NULL, smp_processor_id());
+
+ *util = min(rq->cfs.avg.util_avg, cfs_max);
+ *max = cfs_max;
+}
+
+static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
+ unsigned int flags)
+{
+ if (flags & SCHED_CPUFREQ_IOWAIT) {
+ sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
+ } else if (sg_cpu->iowait_boost) {
+ s64 delta_ns = time - sg_cpu->last_update;
+
+ /* Clear iowait_boost if the CPU apprears to have been idle. */
+ if (delta_ns > TICK_NSEC)
+ sg_cpu->iowait_boost = 0;
+ }
+}
+
+static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
+ unsigned long *max)
+{
+ unsigned long boost_util = sg_cpu->iowait_boost;
+ unsigned long boost_max = sg_cpu->iowait_boost_max;
+
+ if (!boost_util)
+ return;
+
+ if (*util * boost_max < *max * boost_util) {
+ *util = boost_util;
+ *max = boost_max;
+ }
+ sg_cpu->iowait_boost >>= 1;
+}
+
+static void sugov_update_single(struct update_util_data *hook, u64 time,
+ unsigned int flags)
+{
+ struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
+ struct sugov_policy *sg_policy = sg_cpu->sg_policy;
+ struct cpufreq_policy *policy = sg_policy->policy;
+ unsigned long util, max;
+ unsigned int next_f;
+
+ sugov_set_iowait_boost(sg_cpu, time, flags);
+ sg_cpu->last_update = time;
+
+ if (!sugov_should_update_freq(sg_policy, time))
+ return;
+
+ if (flags & SCHED_CPUFREQ_RT_DL) {
+ next_f = policy->cpuinfo.max_freq;
+ } else {
+ sugov_get_util(&util, &max);
+ sugov_iowait_boost(sg_cpu, &util, &max);
+ next_f = get_next_freq(sg_cpu, util, max);
+ }
+ sugov_update_commit(sg_policy, time, next_f);
+}
+
+static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu,
+ unsigned long util, unsigned long max,
+ unsigned int flags)
+{
+ struct sugov_policy *sg_policy = sg_cpu->sg_policy;
+ struct cpufreq_policy *policy = sg_policy->policy;
+ unsigned int max_f = policy->cpuinfo.max_freq;
+ u64 last_freq_update_time = sg_policy->last_freq_update_time;
+ unsigned int j;
+
+ if (flags & SCHED_CPUFREQ_RT_DL)
+ return max_f;
+
+ sugov_iowait_boost(sg_cpu, &util, &max);
+
+ for_each_cpu(j, policy->cpus) {
+ struct sugov_cpu *j_sg_cpu;
+ unsigned long j_util, j_max;
+ s64 delta_ns;
+
+ if (j == smp_processor_id())
+ continue;
+
+ j_sg_cpu = &per_cpu(sugov_cpu, j);
+ /*
+ * If the CPU utilization was last updated before the previous
+ * frequency update and the time elapsed between the last update
+ * of the CPU utilization and the last frequency update is long
+ * enough, don't take the CPU into account as it probably is
+ * idle now (and clear iowait_boost for it).
+ */
+ delta_ns = last_freq_update_time - j_sg_cpu->last_update;
+ if (delta_ns > TICK_NSEC) {
+ j_sg_cpu->iowait_boost = 0;
+ continue;
+ }
+ if (j_sg_cpu->flags & SCHED_CPUFREQ_RT_DL)
+ return max_f;
+
+ j_util = j_sg_cpu->util;
+ j_max = j_sg_cpu->max;
+ if (j_util * max > j_max * util) {
+ util = j_util;
+ max = j_max;
+ }
+
+ sugov_iowait_boost(j_sg_cpu, &util, &max);
+ }
+
+ return get_next_freq(sg_cpu, util, max);
+}
+
+static void sugov_update_shared(struct update_util_data *hook, u64 time,
+ unsigned int flags)
+{
+ struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
+ struct sugov_policy *sg_policy = sg_cpu->sg_policy;
+ unsigned long util, max;
+ unsigned int next_f;
+
+ sugov_get_util(&util, &max);
+
+ raw_spin_lock(&sg_policy->update_lock);
+
+ sg_cpu->util = util;
+ sg_cpu->max = max;
+ sg_cpu->flags = flags;
+
+ sugov_set_iowait_boost(sg_cpu, time, flags);
+ sg_cpu->last_update = time;
+
+ if (sugov_should_update_freq(sg_policy, time)) {
+ next_f = sugov_next_freq_shared(sg_cpu, util, max, flags);
+ sugov_update_commit(sg_policy, time, next_f);
+ }
+
+ raw_spin_unlock(&sg_policy->update_lock);
+}
+
+static void sugov_work(struct work_struct *work)
+{
+ struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
+
+ mutex_lock(&sg_policy->work_lock);
+ __cpufreq_driver_target(sg_policy->policy, sg_policy->next_freq,
+ CPUFREQ_RELATION_L);
+ mutex_unlock(&sg_policy->work_lock);
+
+ sg_policy->work_in_progress = false;
+}
+
+static void sugov_irq_work(struct irq_work *irq_work)
+{
+ struct sugov_policy *sg_policy;
+
+ sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
+ schedule_work_on(smp_processor_id(), &sg_policy->work);
+}
+
+/************************** sysfs interface ************************/
+
+static struct sugov_tunables *global_tunables;
+static DEFINE_MUTEX(global_tunables_lock);
+
+static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set)
+{
+ return container_of(attr_set, struct sugov_tunables, attr_set);
+}
+
+static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
+{
+ struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
+
+ return sprintf(buf, "%u\n", tunables->rate_limit_us);
+}
+
+static ssize_t rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf,
+ size_t count)
+{
+ struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
+ struct sugov_policy *sg_policy;
+ unsigned int rate_limit_us;
+
+ if (kstrtouint(buf, 10, &rate_limit_us))
+ return -EINVAL;
+
+ tunables->rate_limit_us = rate_limit_us;
+
+ list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook)
+ sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC;
+
+ return count;
+}
+
+static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
+
+static struct attribute *sugov_attributes[] = {
+ &rate_limit_us.attr,
+ NULL
+};
+
+static struct kobj_type sugov_tunables_ktype = {
+ .default_attrs = sugov_attributes,
+ .sysfs_ops = &governor_sysfs_ops,
+};
+
+/********************** cpufreq governor interface *********************/
+
+static struct cpufreq_governor schedutil_gov;
+
+static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
+{
+ struct sugov_policy *sg_policy;
+
+ sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
+ if (!sg_policy)
+ return NULL;
+
+ sg_policy->policy = policy;
+ init_irq_work(&sg_policy->irq_work, sugov_irq_work);
+ INIT_WORK(&sg_policy->work, sugov_work);
+ mutex_init(&sg_policy->work_lock);
+ raw_spin_lock_init(&sg_policy->update_lock);
+ return sg_policy;
+}
+
+static void sugov_policy_free(struct sugov_policy *sg_policy)
+{
+ mutex_destroy(&sg_policy->work_lock);
+ kfree(sg_policy);
+}
+
+static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
+{
+ struct sugov_tunables *tunables;
+
+ tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
+ if (tunables) {
+ gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
+ if (!have_governor_per_policy())
+ global_tunables = tunables;
+ }
+ return tunables;
+}
+
+static void sugov_tunables_free(struct sugov_tunables *tunables)
+{
+ if (!have_governor_per_policy())
+ global_tunables = NULL;
+
+ kfree(tunables);
+}
+
+static int sugov_init(struct cpufreq_policy *policy)
+{
+ struct sugov_policy *sg_policy;
+ struct sugov_tunables *tunables;
+ unsigned int lat;
+ int ret = 0;
+
+ /* State should be equivalent to EXIT */
+ if (policy->governor_data)
+ return -EBUSY;
+
+ sg_policy = sugov_policy_alloc(policy);
+ if (!sg_policy)
+ return -ENOMEM;
+
+ mutex_lock(&global_tunables_lock);
+
+ if (global_tunables) {
+ if (WARN_ON(have_governor_per_policy())) {
+ ret = -EINVAL;
+ goto free_sg_policy;
+ }
+ policy->governor_data = sg_policy;
+ sg_policy->tunables = global_tunables;
+
+ gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
+ goto out;
+ }
+
+ tunables = sugov_tunables_alloc(sg_policy);
+ if (!tunables) {
+ ret = -ENOMEM;
+ goto free_sg_policy;
+ }
+
+ tunables->rate_limit_us = LATENCY_MULTIPLIER;
+ lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
+ if (lat)
+ tunables->rate_limit_us *= lat;
+
+ policy->governor_data = sg_policy;
+ sg_policy->tunables = tunables;
+
+ ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
+ get_governor_parent_kobj(policy), "%s",
+ schedutil_gov.name);
+ if (ret)
+ goto fail;
+
+ out:
+ mutex_unlock(&global_tunables_lock);
+
+ cpufreq_enable_fast_switch(policy);
+ return 0;
+
+ fail:
+ policy->governor_data = NULL;
+ sugov_tunables_free(tunables);
+
+ free_sg_policy:
+ mutex_unlock(&global_tunables_lock);
+
+ sugov_policy_free(sg_policy);
+ pr_err("initialization failed (error %d)\n", ret);
+ return ret;
+}
+
+static void sugov_exit(struct cpufreq_policy *policy)
+{
+ struct sugov_policy *sg_policy = policy->governor_data;
+ struct sugov_tunables *tunables = sg_policy->tunables;
+ unsigned int count;
+
+ cpufreq_disable_fast_switch(policy);
+
+ mutex_lock(&global_tunables_lock);
+
+ count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
+ policy->governor_data = NULL;
+ if (!count)
+ sugov_tunables_free(tunables);
+
+ mutex_unlock(&global_tunables_lock);
+
+ sugov_policy_free(sg_policy);
+}
+
+static int sugov_start(struct cpufreq_policy *policy)
+{
+ struct sugov_policy *sg_policy = policy->governor_data;
+ unsigned int cpu;
+
+ sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
+ sg_policy->last_freq_update_time = 0;
+ sg_policy->next_freq = UINT_MAX;
+ sg_policy->work_in_progress = false;
+ sg_policy->need_freq_update = false;
+
+ for_each_cpu(cpu, policy->cpus) {
+ struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
+
+ sg_cpu->sg_policy = sg_policy;
+ if (policy_is_shared(policy)) {
+ sg_cpu->util = 0;
+ sg_cpu->max = 0;
+ sg_cpu->flags = SCHED_CPUFREQ_RT;
+ sg_cpu->last_update = 0;
+ sg_cpu->cached_raw_freq = 0;
+ sg_cpu->iowait_boost = 0;
+ sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
+ cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
+ sugov_update_shared);
+ } else {
+ cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
+ sugov_update_single);
+ }
+ }
+ return 0;
+}
+
+static void sugov_stop(struct cpufreq_policy *policy)
+{
+ struct sugov_policy *sg_policy = policy->governor_data;
+ unsigned int cpu;
+
+ for_each_cpu(cpu, policy->cpus)
+ cpufreq_remove_update_util_hook(cpu);
+
+ synchronize_sched();
+
+ irq_work_sync(&sg_policy->irq_work);
+ cancel_work_sync(&sg_policy->work);
+}
+
+static void sugov_limits(struct cpufreq_policy *policy)
+{
+ struct sugov_policy *sg_policy = policy->governor_data;
+
+ if (!policy->fast_switch_enabled) {
+ mutex_lock(&sg_policy->work_lock);
+ cpufreq_policy_apply_limits(policy);
+ mutex_unlock(&sg_policy->work_lock);
+ }
+
+ sg_policy->need_freq_update = true;
+}
+
+static struct cpufreq_governor schedutil_gov = {
+ .name = "schedutil",
+ .owner = THIS_MODULE,
+ .init = sugov_init,
+ .exit = sugov_exit,
+ .start = sugov_start,
+ .stop = sugov_stop,
+ .limits = sugov_limits,
+};
+
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
+struct cpufreq_governor *cpufreq_default_governor(void)
+{
+ return &schedutil_gov;
+}
+#endif
+
+static int __init sugov_register(void)
+{
+ return cpufreq_register_governor(&schedutil_gov);
+}
+fs_initcall(sugov_register);
diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c
index b6e4c16..9c15a91 100644
--- a/kernel/stacktrace.c
+++ b/kernel/stacktrace.c
@@ -18,10 +18,8 @@
if (WARN_ON(!trace->entries))
return;
- for (i = 0; i < trace->nr_entries; i++) {
- printk("%*c", 1 + spaces, ' ');
- print_ip_sym(trace->entries[i]);
- }
+ for (i = 0; i < trace->nr_entries; i++)
+ printk("%*c%pS\n", 1 + spaces, ' ', (void *)trace->entries[i]);
}
EXPORT_SYMBOL_GPL(print_stack_trace);
@@ -29,7 +27,6 @@
struct stack_trace *trace, int spaces)
{
int i;
- unsigned long ip;
int generated;
int total = 0;
@@ -37,9 +34,8 @@
return 0;
for (i = 0; i < trace->nr_entries; i++) {
- ip = trace->entries[i];
- generated = snprintf(buf, size, "%*c[<%p>] %pS\n",
- 1 + spaces, ' ', (void *) ip, (void *) ip);
+ generated = snprintf(buf, size, "%*c%pS\n", 1 + spaces, ' ',
+ (void *)trace->entries[i]);
total += generated;
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 7e4fad7..8a6970e 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -108,7 +108,7 @@
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
static void clocksource_watchdog_work(struct work_struct *work);
-static void clocksource_select(void);
+static void clocksource_select(bool force);
static LIST_HEAD(watchdog_list);
static struct clocksource *watchdog;
@@ -415,7 +415,7 @@
{
mutex_lock(&clocksource_mutex);
if (__clocksource_watchdog_kthread())
- clocksource_select();
+ clocksource_select(false);
mutex_unlock(&clocksource_mutex);
return 0;
}
@@ -555,11 +555,12 @@
#ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
-static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur)
+static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur,
+ bool force)
{
struct clocksource *cs;
- if (!finished_booting || list_empty(&clocksource_list))
+ if ((!finished_booting && !force) || list_empty(&clocksource_list))
return NULL;
/*
@@ -577,13 +578,13 @@
return NULL;
}
-static void __clocksource_select(bool skipcur)
+static void __clocksource_select(bool skipcur, bool force)
{
bool oneshot = tick_oneshot_mode_active();
struct clocksource *best, *cs;
/* Find the best suitable clocksource */
- best = clocksource_find_best(oneshot, skipcur);
+ best = clocksource_find_best(oneshot, skipcur, force);
if (!best)
return;
@@ -632,22 +633,40 @@
* Select the clocksource with the best rating, or the clocksource,
* which is selected by userspace override.
*/
-static void clocksource_select(void)
+static void clocksource_select(bool force)
{
- __clocksource_select(false);
+ return __clocksource_select(false, force);
}
static void clocksource_select_fallback(void)
{
- __clocksource_select(true);
+ __clocksource_select(true, false);
}
#else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */
-static inline void clocksource_select(void) { }
+
+static inline void clocksource_select(bool force) { }
static inline void clocksource_select_fallback(void) { }
#endif
+/**
+ * clocksource_select_force - Force re-selection of the best clocksource
+ * among registered clocksources
+ *
+ * clocksource_select() can't select the best clocksource before
+ * calling clocksource_done_booting() and since clocksource_select()
+ * should be called with clocksource_mutex held, provide a new API
+ * can be called from other files to select best clockrouce irrespective
+ * of finished_booting flag.
+ */
+void clocksource_select_force(void)
+{
+ mutex_lock(&clocksource_mutex);
+ clocksource_select(true);
+ mutex_unlock(&clocksource_mutex);
+}
+
/*
* clocksource_done_booting - Called near the end of core bootup
*
@@ -664,7 +683,7 @@
* Run the watchdog first to eliminate unstable clock sources
*/
__clocksource_watchdog_kthread();
- clocksource_select();
+ clocksource_select(false);
mutex_unlock(&clocksource_mutex);
return 0;
}
@@ -755,6 +774,7 @@
}
EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale);
+
/**
* __clocksource_register_scale - Used to install new clocksources
* @cs: clocksource to be registered
@@ -776,7 +796,7 @@
mutex_lock(&clocksource_mutex);
clocksource_enqueue(cs);
clocksource_enqueue_watchdog(cs);
- clocksource_select();
+ clocksource_select(false);
clocksource_select_watchdog(false);
mutex_unlock(&clocksource_mutex);
return 0;
@@ -799,7 +819,7 @@
{
mutex_lock(&clocksource_mutex);
__clocksource_change_rating(cs, rating);
- clocksource_select();
+ clocksource_select(false);
clocksource_select_watchdog(false);
mutex_unlock(&clocksource_mutex);
}
@@ -903,7 +923,7 @@
ret = sysfs_get_uname(buf, override_name, count);
if (ret >= 0)
- clocksource_select();
+ clocksource_select(false);
mutex_unlock(&clocksource_mutex);
diff --git a/kernel/time/timekeeping_debug.c b/kernel/time/timekeeping_debug.c
index ca9fb80..38bc4d2 100644
--- a/kernel/time/timekeeping_debug.c
+++ b/kernel/time/timekeeping_debug.c
@@ -75,7 +75,7 @@
int bin = min(fls(t->tv_sec), NUM_BINS-1);
sleep_time_bin[bin]++;
- pr_info("Suspended for %lld.%03lu seconds\n", (s64)t->tv_sec,
- t->tv_nsec / NSEC_PER_MSEC);
+ printk_deferred(KERN_INFO "Suspended for %lld.%03lu seconds\n",
+ (s64)t->tv_sec, t->tv_nsec / NSEC_PER_MSEC);
}
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index f605186..400920e 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -206,6 +206,7 @@
} ____cacheline_aligned;
static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
+struct timer_base timer_base_deferrable;
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
unsigned int sysctl_timer_migration = 1;
@@ -229,6 +230,9 @@
per_cpu(timer_bases[BASE_DEF].nohz_active, cpu) = true;
per_cpu(hrtimer_bases.nohz_active, cpu) = true;
}
+
+ timer_base_deferrable.migration_enabled = on;
+ timer_base_deferrable.nohz_active = true;
}
int timer_migration_handler(struct ctl_table *table, int write,
@@ -852,8 +856,11 @@
* the deferrable base.
*/
if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active &&
- (tflags & TIMER_DEFERRABLE))
- base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu);
+ (tflags & TIMER_DEFERRABLE)) {
+ base = &timer_base_deferrable;
+ if (tflags & TIMER_PINNED)
+ base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu);
+ }
return base;
}
@@ -867,7 +874,9 @@
*/
if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active &&
(tflags & TIMER_DEFERRABLE))
- base = this_cpu_ptr(&timer_bases[BASE_DEF]);
+ base = &timer_base_deferrable;
+ if (tflags & TIMER_PINNED)
+ base = this_cpu_ptr(&timer_bases[BASE_DEF]);
return base;
}
@@ -1652,8 +1661,10 @@
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
__run_timers(base);
- if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active)
+ if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active) {
+ __run_timers(&timer_base_deferrable);
__run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
+ }
}
/*
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index bf9885e..44ae68a 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -27,6 +27,7 @@
#include <linux/kvm_para.h>
#include <linux/perf_event.h>
#include <linux/kthread.h>
+#include <soc/qcom/watchdog.h>
/*
* The run state of the lockup detectors is controlled by the content of the
@@ -364,8 +365,11 @@
if (per_cpu(hard_watchdog_warn, next_cpu) == true)
return;
- if (hardlockup_panic)
- panic("Watchdog detected hard LOCKUP on cpu %u", next_cpu);
+ if (hardlockup_panic) {
+ pr_err("Watchdog detected hard LOCKUP on cpu %u",
+ next_cpu);
+ msm_trigger_wdog_bite();
+ }
else
WARN(1, "Watchdog detected hard LOCKUP on cpu %u", next_cpu);
@@ -427,6 +431,9 @@
return;
pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
+ if (hardlockup_panic)
+ msm_trigger_wdog_bite();
+
print_modules();
print_irqtrace_events(current);
if (regs)
@@ -549,6 +556,9 @@
pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
smp_processor_id(), duration,
current->comm, task_pid_nr(current));
+
+ if (softlockup_panic)
+ msm_trigger_wdog_bite();
__this_cpu_write(softlockup_task_ptr_saved, current);
print_modules();
print_irqtrace_events(current);
diff --git a/mm/memblock.c b/mm/memblock.c
index 166f17a..49b7c1e 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1641,11 +1641,12 @@
memblock.memory.regions[idx].size) >= end;
}
-int __init_memblock memblock_overlaps_memory(phys_addr_t base, phys_addr_t size)
+bool __init_memblock memblock_overlaps_memory(phys_addr_t base,
+ phys_addr_t size)
{
memblock_cap_size(base, &size);
- return memblock_overlaps_region(&memblock.memory, base, size) >= 0;
+ return memblock_overlaps_region(&memblock.memory, base, size);
}
/**
diff --git a/mm/slub.c b/mm/slub.c
index 2b3e740..7aa0e97 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1419,6 +1419,10 @@
int err;
unsigned long i, count = oo_objects(s->oo);
+ /* Bailout if already initialised */
+ if (s->random_seq)
+ return 0;
+
err = cache_random_seq_create(s, count, GFP_KERNEL);
if (err) {
pr_err("SLUB: Unable to initialize free list for %s\n",
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 1108079..5488e4a 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -445,6 +445,7 @@
* @func: callback function on filter match
* @data: returned parameter for callback function
* @ident: string for calling module identification
+ * @sk: socket pointer (might be NULL)
*
* Description:
* Invokes the callback function with the received sk_buff and the given
@@ -468,7 +469,7 @@
*/
int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
void (*func)(struct sk_buff *, void *), void *data,
- char *ident)
+ char *ident, struct sock *sk)
{
struct receiver *r;
struct hlist_head *rl;
@@ -496,6 +497,7 @@
r->func = func;
r->data = data;
r->ident = ident;
+ r->sk = sk;
hlist_add_head_rcu(&r->list, rl);
d->entries++;
@@ -520,8 +522,11 @@
static void can_rx_delete_receiver(struct rcu_head *rp)
{
struct receiver *r = container_of(rp, struct receiver, rcu);
+ struct sock *sk = r->sk;
kmem_cache_free(rcv_cache, r);
+ if (sk)
+ sock_put(sk);
}
/**
@@ -596,8 +601,11 @@
spin_unlock(&can_rcvlists_lock);
/* schedule the receiver item for deletion */
- if (r)
+ if (r) {
+ if (r->sk)
+ sock_hold(r->sk);
call_rcu(&r->rcu, can_rx_delete_receiver);
+ }
}
EXPORT_SYMBOL(can_rx_unregister);
diff --git a/net/can/af_can.h b/net/can/af_can.h
index fca0fe9..b86f512 100644
--- a/net/can/af_can.h
+++ b/net/can/af_can.h
@@ -50,13 +50,14 @@
struct receiver {
struct hlist_node list;
- struct rcu_head rcu;
canid_t can_id;
canid_t mask;
unsigned long matches;
void (*func)(struct sk_buff *, void *);
void *data;
char *ident;
+ struct sock *sk;
+ struct rcu_head rcu;
};
#define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 5e9ed5e..e4f694d 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -1225,7 +1225,7 @@
err = can_rx_register(dev, op->can_id,
REGMASK(op->can_id),
bcm_rx_handler, op,
- "bcm");
+ "bcm", sk);
op->rx_reg_dev = dev;
dev_put(dev);
@@ -1234,7 +1234,7 @@
} else
err = can_rx_register(NULL, op->can_id,
REGMASK(op->can_id),
- bcm_rx_handler, op, "bcm");
+ bcm_rx_handler, op, "bcm", sk);
if (err) {
/* this bcm rx op is broken -> remove it */
list_del(&op->list);
diff --git a/net/can/gw.c b/net/can/gw.c
index 4551687..77c8af4 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -442,7 +442,7 @@
{
return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id,
gwj->ccgw.filter.can_mask, can_can_gw_rcv,
- gwj, "gw");
+ gwj, "gw", NULL);
}
static inline void cgw_unregister_filter(struct cgw_job *gwj)
diff --git a/net/can/raw.c b/net/can/raw.c
index b075f02..6dc546a 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -190,7 +190,7 @@
for (i = 0; i < count; i++) {
err = can_rx_register(dev, filter[i].can_id,
filter[i].can_mask,
- raw_rcv, sk, "raw");
+ raw_rcv, sk, "raw", sk);
if (err) {
/* clean up successfully registered filters */
while (--i >= 0)
@@ -211,7 +211,7 @@
if (err_mask)
err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG,
- raw_rcv, sk, "raw");
+ raw_rcv, sk, "raw", sk);
return err;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index ab6dc94..555ed4b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1696,24 +1696,19 @@
static struct static_key netstamp_needed __read_mostly;
#ifdef HAVE_JUMP_LABEL
-/* We are not allowed to call static_key_slow_dec() from irq context
- * If net_disable_timestamp() is called from irq context, defer the
- * static_key_slow_dec() calls.
- */
static atomic_t netstamp_needed_deferred;
+static void netstamp_clear(struct work_struct *work)
+{
+ int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
+
+ while (deferred--)
+ static_key_slow_dec(&netstamp_needed);
+}
+static DECLARE_WORK(netstamp_work, netstamp_clear);
#endif
void net_enable_timestamp(void)
{
-#ifdef HAVE_JUMP_LABEL
- int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
-
- if (deferred) {
- while (--deferred)
- static_key_slow_dec(&netstamp_needed);
- return;
- }
-#endif
static_key_slow_inc(&netstamp_needed);
}
EXPORT_SYMBOL(net_enable_timestamp);
@@ -1721,12 +1716,12 @@
void net_disable_timestamp(void)
{
#ifdef HAVE_JUMP_LABEL
- if (in_interrupt()) {
- atomic_inc(&netstamp_needed_deferred);
- return;
- }
-#endif
+ /* net_disable_timestamp() can be called from non process context */
+ atomic_inc(&netstamp_needed_deferred);
+ schedule_work(&netstamp_work);
+#else
static_key_slow_dec(&netstamp_needed);
+#endif
}
EXPORT_SYMBOL(net_disable_timestamp);
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 715e5d1..7506c03 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -227,7 +227,7 @@
opt = ireq->ipv6_opt;
if (!opt)
opt = rcu_dereference(np->opt);
- err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
+ err = ip6_xmit(sk, skb, &fl6, sk->sk_mark, opt, np->tclass);
rcu_read_unlock();
err = net_xmit_eval(err);
}
@@ -281,7 +281,7 @@
dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
if (!IS_ERR(dst)) {
skb_dst_set(skb, dst);
- ip6_xmit(ctl_sk, skb, &fl6, NULL, 0);
+ ip6_xmit(ctl_sk, skb, &fl6, 0, NULL, 0);
DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
return;
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index da38621..0f99297 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -273,6 +273,7 @@
if (err) {
dev_warn(ds->dev, "Failed to create slave %d: %d\n",
index, err);
+ ds->ports[index].netdev = NULL;
return err;
}
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 02acfff..24d7aff 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -356,6 +356,7 @@
dev->header_ops = ð_header_ops;
dev->type = ARPHRD_ETHER;
dev->hard_header_len = ETH_HLEN;
+ dev->min_header_len = ETH_HLEN;
dev->mtu = ETH_DATA_LEN;
dev->addr_len = ETH_ALEN;
dev->tx_queue_len = 1000; /* Ethernet wants good queues */
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 72d6f05..ae20616 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -1587,6 +1587,10 @@
goto validate_return_locked;
}
+ if (opt_iter + 1 == opt_len) {
+ err_offset = opt_iter;
+ goto validate_return_locked;
+ }
tag_len = tag[1];
if (tag_len > (opt_len - opt_iter)) {
err_offset = opt_iter + 1;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 32a08bc..1bc623d 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1172,6 +1172,7 @@
psf->sf_crcount = im->crcount;
}
in_dev_put(pmc->interface);
+ kfree(pmc);
}
spin_unlock_bh(&im->lock);
}
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index d24fa20..0bd3efe 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1607,6 +1607,7 @@
sk->sk_protocol = ip_hdr(skb)->protocol;
sk->sk_bound_dev_if = arg->bound_dev_if;
sk->sk_sndbuf = sysctl_wmem_default;
+ sk->sk_mark = fl4.flowi4_mark;
err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
len, 0, &ipc, &rt, MSG_DONTWAIT);
if (unlikely(err)) {
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index f226f408..65336f3 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -1215,7 +1215,14 @@
pktinfo->ipi_ifindex = 0;
pktinfo->ipi_spec_dst.s_addr = 0;
}
- skb_dst_drop(skb);
+ /* We need to keep the dst for __ip_options_echo()
+ * We could restrict the test to opt.ts_needtime || opt.srr,
+ * but the following is good enough as IP options are not often used.
+ */
+ if (unlikely(IPCB(skb)->opt.optlen))
+ skb_dst_force(skb);
+ else
+ skb_dst_drop(skb);
}
int ip_setsockopt(struct sock *sk, int level,
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 5b2635e..06879e6 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -642,6 +642,8 @@
{
struct sk_buff *skb = skb_peek(&sk->sk_write_queue);
+ if (!skb)
+ return 0;
pfh->wcheck = csum_partial((char *)&pfh->icmph,
sizeof(struct icmphdr), pfh->wcheck);
pfh->icmph.checksum = csum_fold(pfh->wcheck);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 814af89..6a90a0e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -772,6 +772,12 @@
ret = -EAGAIN;
break;
}
+ /* if __tcp_splice_read() got nothing while we have
+ * an skb in receive queue, we do not want to loop.
+ * This might happen with URG data.
+ */
+ if (!skb_queue_empty(&sk->sk_receive_queue))
+ break;
sk_wait_data(sk, &timeo, NULL);
if (signal_pending(current)) {
ret = sock_intr_errno(timeo);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index cd8e189..0e7c05b 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2436,9 +2436,11 @@
int full_space = min_t(int, tp->window_clamp, allowed_space);
int window;
- if (mss > full_space)
+ if (unlikely(mss > full_space)) {
mss = full_space;
-
+ if (mss <= 0)
+ return 0;
+ }
if (free_space < (full_space >> 1)) {
icsk->icsk_ack.quick = 0;
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 1c86c47..10d1deb 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -175,7 +175,7 @@
/* Restore final destination back after routing done */
fl6.daddr = sk->sk_v6_daddr;
- res = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
+ res = ip6_xmit(sk, skb, &fl6, sk->sk_mark, rcu_dereference(np->opt),
np->tclass);
rcu_read_unlock();
return res;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 710bc79..ffc83d4 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -367,35 +367,37 @@
static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
- u8 type, u8 code, int offset, __be32 info)
+ u8 type, u8 code, int offset, __be32 info)
{
- const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data;
- __be16 *p = (__be16 *)(skb->data + offset);
- int grehlen = offset + 4;
+ const struct gre_base_hdr *greh;
+ const struct ipv6hdr *ipv6h;
+ int grehlen = sizeof(*greh);
struct ip6_tnl *t;
+ int key_off = 0;
__be16 flags;
+ __be32 key;
- flags = p[0];
- if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
- if (flags&(GRE_VERSION|GRE_ROUTING))
- return;
- if (flags&GRE_KEY) {
- grehlen += 4;
- if (flags&GRE_CSUM)
- grehlen += 4;
- }
+ if (!pskb_may_pull(skb, offset + grehlen))
+ return;
+ greh = (const struct gre_base_hdr *)(skb->data + offset);
+ flags = greh->flags;
+ if (flags & (GRE_VERSION | GRE_ROUTING))
+ return;
+ if (flags & GRE_CSUM)
+ grehlen += 4;
+ if (flags & GRE_KEY) {
+ key_off = grehlen + offset;
+ grehlen += 4;
}
- /* If only 8 bytes returned, keyed message will be dropped here */
- if (!pskb_may_pull(skb, grehlen))
+ if (!pskb_may_pull(skb, offset + grehlen))
return;
ipv6h = (const struct ipv6hdr *)skb->data;
- p = (__be16 *)(skb->data + offset);
+ greh = (const struct gre_base_hdr *)(skb->data + offset);
+ key = key_off ? *(__be32 *)(skb->data + key_off) : 0;
t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
- flags & GRE_KEY ?
- *(((__be32 *)p) + (grehlen / 4) - 1) : 0,
- p[1]);
+ key, greh->protocol);
if (!t)
return;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 59eb4ed..9a87bfb 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -163,7 +163,7 @@
* which are using proper atomic operations or spinlocks.
*/
int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
- struct ipv6_txoptions *opt, int tclass)
+ __u32 mark, struct ipv6_txoptions *opt, int tclass)
{
struct net *net = sock_net(sk);
const struct ipv6_pinfo *np = inet6_sk(sk);
@@ -230,7 +230,7 @@
skb->protocol = htons(ETH_P_IPV6);
skb->priority = sk->sk_priority;
- skb->mark = sk->sk_mark;
+ skb->mark = mark;
mtu = dst_mtu(dst);
if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index c1f497b..885b411 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -400,18 +400,19 @@
__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
{
- const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw;
- __u8 nexthdr = ipv6h->nexthdr;
- __u16 off = sizeof(*ipv6h);
+ const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
+ unsigned int nhoff = raw - skb->data;
+ unsigned int off = nhoff + sizeof(*ipv6h);
+ u8 next, nexthdr = ipv6h->nexthdr;
while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
- __u16 optlen = 0;
struct ipv6_opt_hdr *hdr;
- if (raw + off + sizeof(*hdr) > skb->data &&
- !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr)))
+ u16 optlen;
+
+ if (!pskb_may_pull(skb, off + sizeof(*hdr)))
break;
- hdr = (struct ipv6_opt_hdr *) (raw + off);
+ hdr = (struct ipv6_opt_hdr *)(skb->data + off);
if (nexthdr == NEXTHDR_FRAGMENT) {
struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
if (frag_hdr->frag_off)
@@ -422,20 +423,29 @@
} else {
optlen = ipv6_optlen(hdr);
}
+ /* cache hdr->nexthdr, since pskb_may_pull() might
+ * invalidate hdr
+ */
+ next = hdr->nexthdr;
if (nexthdr == NEXTHDR_DEST) {
- __u16 i = off + 2;
+ u16 i = 2;
+
+ /* Remember : hdr is no longer valid at this point. */
+ if (!pskb_may_pull(skb, off + optlen))
+ break;
+
while (1) {
struct ipv6_tlv_tnl_enc_lim *tel;
/* No more room for encapsulation limit */
- if (i + sizeof (*tel) > off + optlen)
+ if (i + sizeof(*tel) > optlen)
break;
- tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i];
+ tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i);
/* return index of option if found and valid */
if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
tel->length == 1)
- return i;
+ return i + off - nhoff;
/* else jump to next option */
if (tel->type)
i += tel->length + 2;
@@ -443,7 +453,7 @@
i++;
}
}
- nexthdr = hdr->nexthdr;
+ nexthdr = next;
off += optlen;
}
return 0;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 14a3903..1bdc703 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -81,7 +81,7 @@
static void mld_ifc_timer_expire(unsigned long data);
static void mld_ifc_event(struct inet6_dev *idev);
static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
-static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *addr);
+static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
static void mld_clear_delrec(struct inet6_dev *idev);
static bool mld_in_v1_mode(const struct inet6_dev *idev);
static int sf_setstate(struct ifmcaddr6 *pmc);
@@ -692,9 +692,9 @@
dev_mc_del(dev, buf);
}
- if (mc->mca_flags & MAF_NOREPORT)
- goto done;
spin_unlock_bh(&mc->mca_lock);
+ if (mc->mca_flags & MAF_NOREPORT)
+ return;
if (!mc->idev->dead)
igmp6_leave_group(mc);
@@ -702,8 +702,6 @@
spin_lock_bh(&mc->mca_lock);
if (del_timer(&mc->mca_timer))
atomic_dec(&mc->mca_refcnt);
-done:
- ip6_mc_clear_src(mc);
spin_unlock_bh(&mc->mca_lock);
}
@@ -748,10 +746,11 @@
spin_unlock_bh(&idev->mc_lock);
}
-static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca)
+static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
{
struct ifmcaddr6 *pmc, *pmc_prev;
- struct ip6_sf_list *psf, *psf_next;
+ struct ip6_sf_list *psf;
+ struct in6_addr *pmca = &im->mca_addr;
spin_lock_bh(&idev->mc_lock);
pmc_prev = NULL;
@@ -768,14 +767,21 @@
}
spin_unlock_bh(&idev->mc_lock);
+ spin_lock_bh(&im->mca_lock);
if (pmc) {
- for (psf = pmc->mca_tomb; psf; psf = psf_next) {
- psf_next = psf->sf_next;
- kfree(psf);
+ im->idev = pmc->idev;
+ im->mca_crcount = idev->mc_qrv;
+ im->mca_sfmode = pmc->mca_sfmode;
+ if (pmc->mca_sfmode == MCAST_INCLUDE) {
+ im->mca_tomb = pmc->mca_tomb;
+ im->mca_sources = pmc->mca_sources;
+ for (psf = im->mca_sources; psf; psf = psf->sf_next)
+ psf->sf_crcount = im->mca_crcount;
}
in6_dev_put(pmc->idev);
kfree(pmc);
}
+ spin_unlock_bh(&im->mca_lock);
}
static void mld_clear_delrec(struct inet6_dev *idev)
@@ -904,7 +910,7 @@
mca_get(mc);
write_unlock_bh(&idev->lock);
- mld_del_delrec(idev, &mc->mca_addr);
+ mld_del_delrec(idev, mc);
igmp6_group_added(mc);
ma_put(mc);
return 0;
@@ -927,6 +933,7 @@
write_unlock_bh(&idev->lock);
igmp6_group_dropped(ma);
+ ip6_mc_clear_src(ma);
ma_put(ma);
return 0;
@@ -2501,15 +2508,17 @@
/* Withdraw multicast list */
read_lock_bh(&idev->lock);
- mld_ifc_stop_timer(idev);
- mld_gq_stop_timer(idev);
- mld_dad_stop_timer(idev);
for (i = idev->mc_list; i; i = i->next)
igmp6_group_dropped(i);
- read_unlock_bh(&idev->lock);
- mld_clear_delrec(idev);
+ /* Should stop timer after group drop. or we will
+ * start timer again in mld_ifc_event()
+ */
+ mld_ifc_stop_timer(idev);
+ mld_gq_stop_timer(idev);
+ mld_dad_stop_timer(idev);
+ read_unlock_bh(&idev->lock);
}
static void ipv6_mc_reset(struct inet6_dev *idev)
@@ -2531,8 +2540,10 @@
read_lock_bh(&idev->lock);
ipv6_mc_reset(idev);
- for (i = idev->mc_list; i; i = i->next)
+ for (i = idev->mc_list; i; i = i->next) {
+ mld_del_delrec(idev, i);
igmp6_group_added(i);
+ }
read_unlock_bh(&idev->lock);
}
@@ -2565,6 +2576,7 @@
/* Deactivate timers */
ipv6_mc_down(idev);
+ mld_clear_delrec(idev);
/* Delete all-nodes address. */
/* We cannot call ipv6_dev_mc_dec() directly, our caller in
@@ -2579,11 +2591,9 @@
write_lock_bh(&idev->lock);
while ((i = idev->mc_list) != NULL) {
idev->mc_list = i->next;
+
write_unlock_bh(&idev->lock);
-
- igmp6_group_dropped(i);
ma_put(i);
-
write_lock_bh(&idev->lock);
}
write_unlock_bh(&idev->lock);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index b1cdf80..40d7405 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1390,6 +1390,7 @@
err = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
if (err) {
free_percpu(dev->tstats);
+ dev->tstats = NULL;
return err;
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 28ec0a2..37c4b38 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -468,7 +468,7 @@
opt = ireq->ipv6_opt;
if (!opt)
opt = rcu_dereference(np->opt);
- err = ip6_xmit(sk, skb, fl6, opt, np->tclass);
+ err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
rcu_read_unlock();
err = net_xmit_eval(err);
}
@@ -839,7 +839,7 @@
dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
if (!IS_ERR(dst)) {
skb_dst_set(buff, dst);
- ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
+ ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
if (rst)
TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
@@ -989,6 +989,16 @@
return 0; /* don't send reset */
}
+static void tcp_v6_restore_cb(struct sk_buff *skb)
+{
+ /* We need to move header back to the beginning if xfrm6_policy_check()
+ * and tcp_v6_fill_cb() are going to be called again.
+ * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
+ */
+ memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
+ sizeof(struct inet6_skb_parm));
+}
+
static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
struct dst_entry *dst,
@@ -1180,8 +1190,10 @@
sk_gfp_mask(sk, GFP_ATOMIC));
consume_skb(ireq->pktopts);
ireq->pktopts = NULL;
- if (newnp->pktoptions)
+ if (newnp->pktoptions) {
+ tcp_v6_restore_cb(newnp->pktoptions);
skb_set_owner_r(newnp->pktoptions, newsk);
+ }
}
}
@@ -1196,16 +1208,6 @@
return NULL;
}
-static void tcp_v6_restore_cb(struct sk_buff *skb)
-{
- /* We need to move header back to the beginning if xfrm6_policy_check()
- * and tcp_v6_fill_cb() are going to be called again.
- * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
- */
- memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
- sizeof(struct inet6_skb_parm));
-}
-
/* The socket must have it's spinlock held when we get
* here, unless it is a TCP_LISTEN socket.
*
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 2599af6..181e755c 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -273,6 +273,7 @@
int l2tp_nl_register_ops(enum l2tp_pwtype pw_type,
const struct l2tp_nl_cmd_ops *ops);
void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
+int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg);
/* Session reference counts. Incremented when code obtains a reference
* to a session.
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 8938b6b..c0f0750 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -11,6 +11,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <asm/ioctls.h>
#include <linux/icmp.h>
#include <linux/module.h>
#include <linux/skbuff.h>
@@ -560,6 +561,30 @@
return err ? err : copied;
}
+int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg)
+{
+ struct sk_buff *skb;
+ int amount;
+
+ switch (cmd) {
+ case SIOCOUTQ:
+ amount = sk_wmem_alloc_get(sk);
+ break;
+ case SIOCINQ:
+ spin_lock_bh(&sk->sk_receive_queue.lock);
+ skb = skb_peek(&sk->sk_receive_queue);
+ amount = skb ? skb->len : 0;
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
+ break;
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ return put_user(amount, (int __user *)arg);
+}
+EXPORT_SYMBOL(l2tp_ioctl);
+
static struct proto l2tp_ip_prot = {
.name = "L2TP/IP",
.owner = THIS_MODULE,
@@ -568,7 +593,7 @@
.bind = l2tp_ip_bind,
.connect = l2tp_ip_connect,
.disconnect = l2tp_ip_disconnect,
- .ioctl = udp_ioctl,
+ .ioctl = l2tp_ioctl,
.destroy = l2tp_ip_destroy_sock,
.setsockopt = ip_setsockopt,
.getsockopt = ip_getsockopt,
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index f092ac4..7095786 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -730,7 +730,7 @@
.bind = l2tp_ip6_bind,
.connect = l2tp_ip6_connect,
.disconnect = l2tp_ip6_disconnect,
- .ioctl = udp_ioctl,
+ .ioctl = l2tp_ioctl,
.destroy = l2tp_ip6_destroy_sock,
.setsockopt = ipv6_setsockopt,
.getsockopt = ipv6_getsockopt,
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 42120d9..50e1b7f 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -339,7 +339,7 @@
/* fast-forward to vendor IEs */
offset = ieee80211_ie_split_vendor(ifmsh->ie, ifmsh->ie_len, 0);
- if (offset) {
+ if (offset < ifmsh->ie_len) {
len = ifmsh->ie_len - offset;
data = ifmsh->ie + offset;
if (skb_tailroom(skb) < len)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 94e4a59..458722b 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2813,7 +2813,7 @@
struct virtio_net_hdr vnet_hdr = { 0 };
int offset = 0;
struct packet_sock *po = pkt_sk(sk);
- int hlen, tlen;
+ int hlen, tlen, linear;
int extra_len = 0;
/*
@@ -2874,8 +2874,9 @@
err = -ENOBUFS;
hlen = LL_RESERVED_SPACE(dev);
tlen = dev->needed_tailroom;
- skb = packet_alloc_skb(sk, hlen + tlen, hlen, len,
- __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len),
+ linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
+ linear = max(linear, min_t(int, len, dev->hard_header_len));
+ skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
msg->msg_flags & MSG_DONTWAIT, &err);
if (skb == NULL)
goto out_unlock;
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index f935429..b12bc2a 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -16,16 +16,11 @@
#include <net/sch_generic.h>
#include <net/pkt_cls.h>
-struct cls_mall_filter {
+struct cls_mall_head {
struct tcf_exts exts;
struct tcf_result res;
u32 handle;
- struct rcu_head rcu;
u32 flags;
-};
-
-struct cls_mall_head {
- struct cls_mall_filter *filter;
struct rcu_head rcu;
};
@@ -33,38 +28,29 @@
struct tcf_result *res)
{
struct cls_mall_head *head = rcu_dereference_bh(tp->root);
- struct cls_mall_filter *f = head->filter;
- if (tc_skip_sw(f->flags))
+ if (tc_skip_sw(head->flags))
return -1;
- return tcf_exts_exec(skb, &f->exts, res);
+ return tcf_exts_exec(skb, &head->exts, res);
}
static int mall_init(struct tcf_proto *tp)
{
- struct cls_mall_head *head;
-
- head = kzalloc(sizeof(*head), GFP_KERNEL);
- if (!head)
- return -ENOBUFS;
-
- rcu_assign_pointer(tp->root, head);
-
return 0;
}
-static void mall_destroy_filter(struct rcu_head *head)
+static void mall_destroy_rcu(struct rcu_head *rcu)
{
- struct cls_mall_filter *f = container_of(head, struct cls_mall_filter, rcu);
+ struct cls_mall_head *head = container_of(rcu, struct cls_mall_head,
+ rcu);
- tcf_exts_destroy(&f->exts);
-
- kfree(f);
+ tcf_exts_destroy(&head->exts);
+ kfree(head);
}
static int mall_replace_hw_filter(struct tcf_proto *tp,
- struct cls_mall_filter *f,
+ struct cls_mall_head *head,
unsigned long cookie)
{
struct net_device *dev = tp->q->dev_queue->dev;
@@ -74,7 +60,7 @@
offload.type = TC_SETUP_MATCHALL;
offload.cls_mall = &mall_offload;
offload.cls_mall->command = TC_CLSMATCHALL_REPLACE;
- offload.cls_mall->exts = &f->exts;
+ offload.cls_mall->exts = &head->exts;
offload.cls_mall->cookie = cookie;
return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
@@ -82,7 +68,7 @@
}
static void mall_destroy_hw_filter(struct tcf_proto *tp,
- struct cls_mall_filter *f,
+ struct cls_mall_head *head,
unsigned long cookie)
{
struct net_device *dev = tp->q->dev_queue->dev;
@@ -103,29 +89,20 @@
{
struct cls_mall_head *head = rtnl_dereference(tp->root);
struct net_device *dev = tp->q->dev_queue->dev;
- struct cls_mall_filter *f = head->filter;
- if (!force && f)
- return false;
+ if (!head)
+ return true;
- if (f) {
- if (tc_should_offload(dev, tp, f->flags))
- mall_destroy_hw_filter(tp, f, (unsigned long) f);
+ if (tc_should_offload(dev, tp, head->flags))
+ mall_destroy_hw_filter(tp, head, (unsigned long) head);
- call_rcu(&f->rcu, mall_destroy_filter);
- }
- kfree_rcu(head, rcu);
+ call_rcu(&head->rcu, mall_destroy_rcu);
return true;
}
static unsigned long mall_get(struct tcf_proto *tp, u32 handle)
{
- struct cls_mall_head *head = rtnl_dereference(tp->root);
- struct cls_mall_filter *f = head->filter;
-
- if (f && f->handle == handle)
- return (unsigned long) f;
- return 0;
+ return 0UL;
}
static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
@@ -134,7 +111,7 @@
};
static int mall_set_parms(struct net *net, struct tcf_proto *tp,
- struct cls_mall_filter *f,
+ struct cls_mall_head *head,
unsigned long base, struct nlattr **tb,
struct nlattr *est, bool ovr)
{
@@ -147,11 +124,11 @@
return err;
if (tb[TCA_MATCHALL_CLASSID]) {
- f->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
- tcf_bind_filter(tp, &f->res, base);
+ head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
+ tcf_bind_filter(tp, &head->res, base);
}
- tcf_exts_change(tp, &f->exts, &e);
+ tcf_exts_change(tp, &head->exts, &e);
return 0;
}
@@ -162,21 +139,17 @@
unsigned long *arg, bool ovr)
{
struct cls_mall_head *head = rtnl_dereference(tp->root);
- struct cls_mall_filter *fold = (struct cls_mall_filter *) *arg;
struct net_device *dev = tp->q->dev_queue->dev;
- struct cls_mall_filter *f;
struct nlattr *tb[TCA_MATCHALL_MAX + 1];
+ struct cls_mall_head *new;
u32 flags = 0;
int err;
if (!tca[TCA_OPTIONS])
return -EINVAL;
- if (head->filter)
- return -EBUSY;
-
- if (fold)
- return -EINVAL;
+ if (head)
+ return -EEXIST;
err = nla_parse_nested(tb, TCA_MATCHALL_MAX,
tca[TCA_OPTIONS], mall_policy);
@@ -189,23 +162,23 @@
return -EINVAL;
}
- f = kzalloc(sizeof(*f), GFP_KERNEL);
- if (!f)
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
return -ENOBUFS;
- tcf_exts_init(&f->exts, TCA_MATCHALL_ACT, 0);
+ tcf_exts_init(&new->exts, TCA_MATCHALL_ACT, 0);
if (!handle)
handle = 1;
- f->handle = handle;
- f->flags = flags;
+ new->handle = handle;
+ new->flags = flags;
- err = mall_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr);
+ err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr);
if (err)
goto errout;
if (tc_should_offload(dev, tp, flags)) {
- err = mall_replace_hw_filter(tp, f, (unsigned long) f);
+ err = mall_replace_hw_filter(tp, new, (unsigned long) new);
if (err) {
if (tc_skip_sw(flags))
goto errout;
@@ -214,39 +187,29 @@
}
}
- *arg = (unsigned long) f;
- rcu_assign_pointer(head->filter, f);
-
+ *arg = (unsigned long) head;
+ rcu_assign_pointer(tp->root, new);
+ if (head)
+ call_rcu(&head->rcu, mall_destroy_rcu);
return 0;
errout:
- kfree(f);
+ kfree(new);
return err;
}
static int mall_delete(struct tcf_proto *tp, unsigned long arg)
{
- struct cls_mall_head *head = rtnl_dereference(tp->root);
- struct cls_mall_filter *f = (struct cls_mall_filter *) arg;
- struct net_device *dev = tp->q->dev_queue->dev;
-
- if (tc_should_offload(dev, tp, f->flags))
- mall_destroy_hw_filter(tp, f, (unsigned long) f);
-
- RCU_INIT_POINTER(head->filter, NULL);
- tcf_unbind_filter(tp, &f->res);
- call_rcu(&f->rcu, mall_destroy_filter);
- return 0;
+ return -EOPNOTSUPP;
}
static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{
struct cls_mall_head *head = rtnl_dereference(tp->root);
- struct cls_mall_filter *f = head->filter;
if (arg->count < arg->skip)
goto skip;
- if (arg->fn(tp, (unsigned long) f, arg) < 0)
+ if (arg->fn(tp, (unsigned long) head, arg) < 0)
arg->stop = 1;
skip:
arg->count++;
@@ -255,28 +218,28 @@
static int mall_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
struct sk_buff *skb, struct tcmsg *t)
{
- struct cls_mall_filter *f = (struct cls_mall_filter *) fh;
+ struct cls_mall_head *head = (struct cls_mall_head *) fh;
struct nlattr *nest;
- if (!f)
+ if (!head)
return skb->len;
- t->tcm_handle = f->handle;
+ t->tcm_handle = head->handle;
nest = nla_nest_start(skb, TCA_OPTIONS);
if (!nest)
goto nla_put_failure;
- if (f->res.classid &&
- nla_put_u32(skb, TCA_MATCHALL_CLASSID, f->res.classid))
+ if (head->res.classid &&
+ nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid))
goto nla_put_failure;
- if (tcf_exts_dump(skb, &f->exts))
+ if (tcf_exts_dump(skb, &head->exts))
goto nla_put_failure;
nla_nest_end(skb, nest);
- if (tcf_exts_dump_stats(skb, &f->exts) < 0)
+ if (tcf_exts_dump_stats(skb, &head->exts) < 0)
goto nla_put_failure;
return skb->len;
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 176af30..6a2532d 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -222,7 +222,8 @@
SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
rcu_read_lock();
- res = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt), np->tclass);
+ res = ip6_xmit(sk, skb, fl6, sk->sk_mark, rcu_dereference(np->opt),
+ np->tclass);
rcu_read_unlock();
return res;
}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index ca12aa3..6cbe5bd 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -7427,7 +7427,8 @@
*/
release_sock(sk);
current_timeo = schedule_timeout(current_timeo);
- BUG_ON(sk != asoc->base.sk);
+ if (sk != asoc->base.sk)
+ goto do_error;
lock_sock(sk);
*timeo_p = current_timeo;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 92db80d..034f70c 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -5874,6 +5874,7 @@
break;
}
cfg->ht_opmode = ht_opmode;
+ mask |= (1 << (NL80211_MESHCONF_HT_OPMODE - 1));
}
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout,
1, 65535, mask,
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 24bd84d..8b918f8 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -5859,7 +5859,7 @@
return error;
/* Obtain a SID for the context, if one was specified. */
- if (size && str[1] && str[1] != '\n') {
+ if (size && str[0] && str[0] != '\n') {
if (str[size-1] == '\n') {
str[size-1] = 0;
size--;
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
index c850345..dfa5156 100644
--- a/sound/core/seq/seq_memory.c
+++ b/sound/core/seq/seq_memory.c
@@ -419,7 +419,6 @@
{
unsigned long flags;
struct snd_seq_event_cell *ptr;
- int max_count = 5 * HZ;
if (snd_BUG_ON(!pool))
return -EINVAL;
@@ -432,14 +431,8 @@
if (waitqueue_active(&pool->output_sleep))
wake_up(&pool->output_sleep);
- while (atomic_read(&pool->counter) > 0) {
- if (max_count == 0) {
- pr_warn("ALSA: snd_seq_pool_done timeout: %d cells remain\n", atomic_read(&pool->counter));
- break;
- }
+ while (atomic_read(&pool->counter) > 0)
schedule_timeout_uninterruptible(1);
- max_count--;
- }
/* release all resources */
spin_lock_irqsave(&pool->lock, flags);
diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
index 0bec02e..450c518 100644
--- a/sound/core/seq/seq_queue.c
+++ b/sound/core/seq/seq_queue.c
@@ -181,6 +181,8 @@
}
}
+static void queue_use(struct snd_seq_queue *queue, int client, int use);
+
/* allocate a new queue -
* return queue index value or negative value for error
*/
@@ -192,11 +194,11 @@
if (q == NULL)
return -ENOMEM;
q->info_flags = info_flags;
+ queue_use(q, client, 1);
if (queue_list_add(q) < 0) {
queue_delete(q);
return -ENOMEM;
}
- snd_seq_queue_use(q->queue, client, 1); /* use this queue */
return q->queue;
}
@@ -502,19 +504,9 @@
return result;
}
-
-/* use or unuse this queue -
- * if it is the first client, starts the timer.
- * if it is not longer used by any clients, stop the timer.
- */
-int snd_seq_queue_use(int queueid, int client, int use)
+/* use or unuse this queue */
+static void queue_use(struct snd_seq_queue *queue, int client, int use)
{
- struct snd_seq_queue *queue;
-
- queue = queueptr(queueid);
- if (queue == NULL)
- return -EINVAL;
- mutex_lock(&queue->timer_mutex);
if (use) {
if (!test_and_set_bit(client, queue->clients_bitmap))
queue->clients++;
@@ -529,6 +521,21 @@
} else {
snd_seq_timer_close(queue);
}
+}
+
+/* use or unuse this queue -
+ * if it is the first client, starts the timer.
+ * if it is not longer used by any clients, stop the timer.
+ */
+int snd_seq_queue_use(int queueid, int client, int use)
+{
+ struct snd_seq_queue *queue;
+
+ queue = queueptr(queueid);
+ if (queue == NULL)
+ return -EINVAL;
+ mutex_lock(&queue->timer_mutex);
+ queue_use(queue, client, use);
mutex_unlock(&queue->timer_mutex);
queuefree(queue);
return 0;
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 56e5204..4bf4833 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -3638,6 +3638,7 @@
HDA_CODEC_ENTRY(0x10de0071, "GPU 71 HDMI/DP", patch_nvhdmi),
HDA_CODEC_ENTRY(0x10de0072, "GPU 72 HDMI/DP", patch_nvhdmi),
HDA_CODEC_ENTRY(0x10de007d, "GPU 7d HDMI/DP", patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de0080, "GPU 80 HDMI/DP", patch_nvhdmi),
HDA_CODEC_ENTRY(0x10de0082, "GPU 82 HDMI/DP", patch_nvhdmi),
HDA_CODEC_ENTRY(0x10de0083, "GPU 83 HDMI/DP", patch_nvhdmi),
HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch),
diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
index 90009c0..ab3c280 100644
--- a/sound/usb/line6/driver.c
+++ b/sound/usb/line6/driver.c
@@ -754,8 +754,9 @@
goto error;
}
+ line6_get_interval(line6);
+
if (properties->capabilities & LINE6_CAP_CONTROL) {
- line6_get_interval(line6);
ret = line6_init_cap_control(line6);
if (ret < 0)
goto error;
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 9ff0db4..933aeec 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -1199,7 +1199,7 @@
BUG_ON(1);
}
- perf_hpp__register_sort_field(fmt);
+ perf_hpp__prepend_sort_field(fmt);
return 0;
}
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index 3738839..18cfcdc9 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -521,6 +521,12 @@
list_add_tail(&format->sort_list, &list->sorts);
}
+void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
+ struct perf_hpp_fmt *format)
+{
+ list_add(&format->sort_list, &list->sorts);
+}
+
void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
{
list_del(&format->list);
@@ -560,6 +566,10 @@
perf_hpp_list__for_each_sort_list(list, fmt) {
struct perf_hpp_fmt *pos;
+ /* skip sort-only fields ("sort_compute" in perf diff) */
+ if (!fmt->entry && !fmt->color)
+ continue;
+
perf_hpp_list__for_each_format(list, pos) {
if (fmt_equal(fmt, pos))
goto next;
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 9928fed..a440a04 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -282,6 +282,8 @@
struct perf_hpp_fmt *format);
void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
struct perf_hpp_fmt *format);
+void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
+ struct perf_hpp_fmt *format);
static inline void perf_hpp__column_register(struct perf_hpp_fmt *format)
{
@@ -293,6 +295,11 @@
perf_hpp_list__register_sort_field(&perf_hpp_list, format);
}
+static inline void perf_hpp__prepend_sort_field(struct perf_hpp_fmt *format)
+{
+ perf_hpp_list__prepend_sort_field(&perf_hpp_list, format);
+}
+
#define perf_hpp_list__for_each_format(_list, format) \
list_for_each_entry(format, &(_list)->fields, list)