Merge "drivers: cpu_cooling: Remove cooling list lock during post suspend handling"
diff --git a/Documentation/devicetree/bindings/arm/msm/msm.txt b/Documentation/devicetree/bindings/arm/msm/msm.txt
index 9c2d647..9df16fb 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm.txt
@@ -326,6 +326,7 @@
compatible = "qcom,qcs605-mtp"
compatible = "qcom,sda670-cdp"
compatible = "qcom,sda670-mtp"
+compatible = "qcom,sda670-hdk"
compatible = "qcom,msm8952-rumi"
compatible = "qcom,msm8952-sim"
compatible = "qcom,msm8952-qrd"
diff --git a/Documentation/devicetree/bindings/input/touchscreen/synaptics_dsxv26_i2c.txt b/Documentation/devicetree/bindings/input/touchscreen/synaptics_dsxv26_i2c.txt
index 87a551ba..885be72 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/synaptics_dsxv26_i2c.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/synaptics_dsxv26_i2c.txt
@@ -24,6 +24,7 @@
- synaptics,reset-on-state : reset gpio active state.
- synaptics,power-on-state : power switch active state.
- synaptics,ub-i2c-addr : microbootloader mode I2C slave address.
+ - synaptics,do-not-disable-regulators : If specified, regulators cannot be disabled/enabled during suspend/resume.
- synaptics,cap-button-codes : virtual key code mappings to be used.
- synaptics,vir-button-codes : virtual key code and the response region on panel.
- synaptics,wakeup-gestures-en: enable wakeup gestures.
@@ -33,6 +34,7 @@
- synaptics,reset-active-ms : reset active duration for controller (ms), default 100.
- synaptics,power-delay-ms : power delay for controller (ms), default 100.
- synaptics,max-y-for-2d : maximal y value of the panel.
+ - synaptics,bus-lpm-cur-uA : I2C bus idle mode current setting.
- synaptics,swap-axes : specify whether to swap axes.
- synaptics,resume-in-workqueue : specify whether to defer the resume to workqueue.
- clock-names : Clock names used for secure touch. They are: "iface_clk", "core_clk"
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qg.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qg.txt
index afeb65d..43e7380 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qg.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qg.txt
@@ -262,6 +262,39 @@
capacity learning cycle. If this is not specified, then
the default value is 0. Unit is in decipercentage.
+- qcom,esr-disable
+ Usage: optional
+ Value type: <bool>
+ Definition: Boolean property to disable ESR estimation. If not defined
+ ESR estimation stays enabled for charge-cycles.
+
+- qcom,esr-discharge-enable
+ Usage: optional
+ Value type: <bool>
+ Definition: Boolean property to enable ESR estimation during discharge.
+ Only valid if 'qcom,esr-disable' is not defined.
+
+- qcom,esr-qual-current-ua
+ Usage: optional
+ Value type: <u32>
+ Definition: Minimum current differential in uA to qualify an ESR
+ reading as valid. If not defined the value defaults
+ to 130mA.
+
+- qcom,esr-qual-vbatt-uv
+ Usage: optional
+ Value type: <u32>
+ Definition: Minimum vbatt differential in uV to qualify an ESR
+ reading as valid. If not defined the value defaults
+ to 7mV.
+
+- qcom,esr-disable-soc
+ Usage: optional
+ Value type: <u32>
+ Definition: Minimum battery SOC below which ESR will not be
+ attempted by QG. If not defined the value defaults
+ to 10%.
+
==========================================================
Second Level Nodes - Peripherals managed by QGAUGE driver
==========================================================
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
index afa8009..9de24c3 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
@@ -193,6 +193,12 @@
to be get from these properties defined in battery profile:
qcom,step-chg-ranges, qcom,jeita-fcc-ranges, qcom,jeita-fv-ranges.
+- qcom,disable-stat-sw-override
+ Usage: optional
+ Value type: bool
+ Definition: Boolean flag which when present disables STAT pin default software
+ override configuration.
+
=============================================
Second Level Nodes - SMB2 Charger Peripherals
=============================================
diff --git a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
index 7f79f40..f075a2a 100644
--- a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
+++ b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
@@ -74,6 +74,7 @@
Defaults to 26 MHz if not specified.
- extcon: phandle to external connector (Refer Documentation/devicetree/bindings/extcon/extcon-gpio.txt for more details).
- non-removable : defines if the connected ufs device is not removable
+- force-ufshc-probe : For force probing UFS device (non removable) even if it is not the boot device.
Note: If above properties are not defined it can be assumed that the supply
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 0b8f21f..d771f95 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2662,6 +2662,9 @@
noalign [KNL,ARM]
+ noaltinstr [S390] Disables alternative instructions patching
+ (CPU alternatives feature).
+
noapic [SMP,APIC] Tells the kernel to not make use of any
IOAPICs that may be present in the system.
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 1f5eab4..e46c14f 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -2118,6 +2118,9 @@
ARM 64-bit FP registers have the following id bit patterns:
0x4030 0000 0012 0 <regno:12>
+ARM firmware pseudo-registers have the following bit pattern:
+ 0x4030 0000 0014 <regno:16>
+
arm64 registers are mapped using the lower 32 bits. The upper 16 of
that is the register group type, or coprocessor number:
@@ -2134,6 +2137,9 @@
arm64 system registers have the following id bit patterns:
0x6030 0000 0013 <op0:2> <op1:3> <crn:4> <crm:4> <op2:3>
+arm64 firmware pseudo-registers have the following bit pattern:
+ 0x6030 0000 0014 <regno:16>
+
MIPS registers are mapped using the lower 32 bits. The upper 16 of that is
the register group type:
@@ -2656,7 +2662,8 @@
and execute guest code when KVM_RUN is called.
- KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode.
Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only).
- - KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 for the CPU.
+ - KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 (or a future revision
+ backward compatible with v0.2) for the CPU.
Depends on KVM_CAP_ARM_PSCI_0_2.
- KVM_ARM_VCPU_PMU_V3: Emulate PMUv3 for the CPU.
Depends on KVM_CAP_ARM_PMU_V3.
diff --git a/Documentation/virtual/kvm/arm/psci.txt b/Documentation/virtual/kvm/arm/psci.txt
new file mode 100644
index 0000000..aafdab8
--- /dev/null
+++ b/Documentation/virtual/kvm/arm/psci.txt
@@ -0,0 +1,30 @@
+KVM implements the PSCI (Power State Coordination Interface)
+specification in order to provide services such as CPU on/off, reset
+and power-off to the guest.
+
+The PSCI specification is regularly updated to provide new features,
+and KVM implements these updates if they make sense from a virtualization
+point of view.
+
+This means that a guest booted on two different versions of KVM can
+observe two different "firmware" revisions. This could cause issues if
+a given guest is tied to a particular PSCI revision (unlikely), or if
+a migration causes a different PSCI version to be exposed out of the
+blue to an unsuspecting guest.
+
+In order to remedy this situation, KVM exposes a set of "firmware
+pseudo-registers" that can be manipulated using the GET/SET_ONE_REG
+interface. These registers can be saved/restored by userspace, and set
+to a convenient value if required.
+
+The following register is defined:
+
+* KVM_REG_ARM_PSCI_VERSION:
+
+ - Only valid if the vcpu has the KVM_ARM_VCPU_PSCI_0_2 feature set
+ (and thus has already been initialized)
+ - Returns the current PSCI version on GET_ONE_REG (defaulting to the
+ highest PSCI version implemented by KVM and compatible with v0.2)
+ - Allows any PSCI version implemented by KVM and compatible with
+ v0.2 to be set with SET_ONE_REG
+ - Affects the whole VM (even if the register view is per-vcpu)
diff --git a/Makefile b/Makefile
index bf5ea11..2f08005 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 9
-SUBLEVEL = 96
+SUBLEVEL = 101
EXTRAVERSION =
NAME = Roaring Lionus
diff --git a/arch/alpha/include/asm/futex.h b/arch/alpha/include/asm/futex.h
index f939794..5647469 100644
--- a/arch/alpha/include/asm/futex.h
+++ b/arch/alpha/include/asm/futex.h
@@ -29,18 +29,10 @@
: "r" (uaddr), "r"(oparg) \
: "memory")
-static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
pagefault_disable();
@@ -66,17 +58,9 @@
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/arc/include/asm/futex.h b/arch/arc/include/asm/futex.h
index 11e1b1f..eb887dd 100644
--- a/arch/arc/include/asm/futex.h
+++ b/arch/arc/include/asm/futex.h
@@ -73,20 +73,11 @@
#endif
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
-
#ifndef CONFIG_ARC_HAS_LLSC
preempt_disable(); /* to guarantee atomic r-m-w of futex op */
#endif
@@ -118,30 +109,9 @@
preempt_enable();
#endif
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ:
- ret = (oldval == cmparg);
- break;
- case FUTEX_OP_CMP_NE:
- ret = (oldval != cmparg);
- break;
- case FUTEX_OP_CMP_LT:
- ret = (oldval < cmparg);
- break;
- case FUTEX_OP_CMP_GE:
- ret = (oldval >= cmparg);
- break;
- case FUTEX_OP_CMP_LE:
- ret = (oldval <= cmparg);
- break;
- case FUTEX_OP_CMP_GT:
- ret = (oldval > cmparg);
- break;
- default:
- ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
index 47c9554..2b9c2be 100644
--- a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
@@ -88,7 +88,6 @@
clocks = <&clks IMX6QDL_CLK_CKO>;
VDDA-supply = <®_2p5v>;
VDDIO-supply = <®_3p3v>;
- lrclk-strength = <3>;
};
};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-cdp.dts b/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-cdp.dts
index 6909ef5..6c6e640 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-cdp.dts
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-cdp.dts
@@ -28,3 +28,7 @@
&cnss_sdio {
status = "okay";
};
+
+&blsp1_uart2b_hs {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-cdp.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-cdp.dtsi
index d447724..16f933f 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-dualwifi-cdp.dtsi
@@ -36,3 +36,11 @@
/delete-property/ qcom,devfreq,freq-table;
/delete-property/ cd-gpios;
};
+
+&soc {
+ bluetooth: bt_qca6174 {
+ compatible = "qca,qca6174";
+ qca,bt-reset-gpio = <&pmxpoorwills_gpios 4 0>; /* BT_EN */
+ qca,bt-vdd-pa-supply = <&vreg_wlan>;
+ };
+};
diff --git a/arch/arm/configs/msm8909-perf_defconfig b/arch/arm/configs/msm8909-perf_defconfig
index 1eaf4ff..f42b90a 100644
--- a/arch/arm/configs/msm8909-perf_defconfig
+++ b/arch/arm/configs/msm8909-perf_defconfig
@@ -31,13 +31,15 @@
# CONFIG_RD_XZ is not set
# CONFIG_RD_LZO is not set
# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_KALLSYMS_ALL=y
CONFIG_BPF_SYSCALL=y
# CONFIG_MEMBARRIER is not set
CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_CC_STACKPROTECTOR_STRONG=y
+CONFIG_OPROFILE=y
+CONFIG_CC_STACKPROTECTOR_REGULAR=y
CONFIG_ARCH_MMAP_RND_BITS=16
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -57,6 +59,7 @@
CONFIG_CMA=y
CONFIG_ZSMALLOC=y
CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_PROCESS_RECLAIM=y
CONFIG_SECCOMP=y
CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
@@ -145,6 +148,7 @@
CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
CONFIG_NETFILTER_XT_MATCH_QUOTA=y
CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
CONFIG_NETFILTER_XT_MATCH_SOCKET=y
CONFIG_NETFILTER_XT_MATCH_STATE=y
CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
@@ -263,6 +267,7 @@
CONFIG_CNSS_SDIO=y
CONFIG_CLD_HL_SDIO_CORE=y
CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_EVBUG=y
CONFIG_KEYBOARD_GPIO=y
CONFIG_INPUT_JOYSTICK=y
CONFIG_INPUT_TOUCHSCREEN=y
@@ -272,6 +277,8 @@
CONFIG_SERIAL_MSM=y
CONFIG_SERIAL_MSM_CONSOLE=y
CONFIG_SERIAL_MSM_SMD=y
+CONFIG_DIAG_CHAR=y
+CONFIG_DIAG_USES_SMD=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_MSM_LEGACY=y
CONFIG_MSM_SMD_PKT=y
@@ -321,6 +328,16 @@
CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_SOC_CAMERA=y
+CONFIG_MSMB_CAMERA=y
+CONFIG_MSM_CAMERA_SENSOR=y
+CONFIG_MSM_CSI30_HEADER=y
+CONFIG_MSM_CSIPHY=y
+CONFIG_MSM_CSID=y
+CONFIG_MSM_EEPROM=y
+CONFIG_MSM_ISP_V1=y
+CONFIG_MSM_ISPIF=y
+CONFIG_QCOM_KGSL=y
CONFIG_FB=y
CONFIG_FB_VIRTUAL=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
@@ -350,6 +367,9 @@
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_MSM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_GADGET=y
CONFIG_NOP_USB_XCEIV=y
CONFIG_DUAL_ROLE_USB_INTF=y
CONFIG_USB_GADGET=y
@@ -359,6 +379,9 @@
CONFIG_USB_CI13XXX_MSM=y
CONFIG_USB_CONFIGFS=y
CONFIG_USB_CONFIGFS_SERIAL=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_RMNET_BAM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_USB_CONFIGFS_UEVENT=y
CONFIG_USB_CONFIGFS_F_DIAG=y
@@ -374,8 +397,7 @@
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_CLASS_FLASH=y
CONFIG_LEDS_QPNP=y
CONFIG_LEDS_QPNP_VIBRATOR=y
CONFIG_LEDS_TRIGGERS=y
@@ -391,6 +413,8 @@
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
CONFIG_ION=y
CONFIG_ION_MSM=y
+CONFIG_IPA=y
+CONFIG_RMNET_IPA=y
CONFIG_SPS=y
CONFIG_SPS_SUPPORT_NDP_BAM=y
CONFIG_QPNP_REVID=y
@@ -398,6 +422,7 @@
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_MAILBOX=y
CONFIG_ARM_SMMU=y
+CONFIG_QCOM_LAZY_MAPPING=y
CONFIG_MSM_SPM=y
CONFIG_MSM_L2_SPM=y
CONFIG_MSM_BOOT_STATS=y
@@ -412,6 +437,7 @@
CONFIG_MSM_SMD=y
CONFIG_MSM_SMD_DEBUG=y
CONFIG_MSM_GLINK=y
+CONFIG_MSM_TZ_SMMU=y
CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
CONFIG_MSM_GLINK_SPI_XPRT=y
@@ -423,9 +449,12 @@
CONFIG_MSM_GLINK_PKT=y
CONFIG_MSM_SUBSYSTEM_RESTART=y
CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_PIL_MSS_QDSP6V5=y
CONFIG_MSM_EVENT_TIMER=y
CONFIG_QTI_RPM_STATS_LOG=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_MSM_BAM_DMUX=y
CONFIG_CNSS_CRYPTO=y
CONFIG_PWM=y
CONFIG_PWM_QPNP=y
@@ -470,8 +499,13 @@
CONFIG_PID_IN_CONTEXTIDR=y
CONFIG_DEBUG_SET_MODULE_RONX=y
CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_SINK_TPIU=y
+CONFIG_CORESIGHT_SOURCE_ETM3X=y
CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_DBGUI=y
CONFIG_CORESIGHT_STM=y
CONFIG_CORESIGHT_TPDA=y
CONFIG_CORESIGHT_TPDM=y
@@ -486,13 +520,14 @@
CONFIG_SECURITY_SMACK=y
CONFIG_SECURITY_APPARMOR=y
CONFIG_DEFAULT_SECURITY_DAC=y
-CONFIG_CRYPTO_CTR=y
-CONFIG_CRYPTO_XTS=y
CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_CRC32=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCE=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_ARM_CRYPTO=y
diff --git a/arch/arm/configs/msm8909_defconfig b/arch/arm/configs/msm8909_defconfig
index 5e6a68b..9eb0afd 100644
--- a/arch/arm/configs/msm8909_defconfig
+++ b/arch/arm/configs/msm8909_defconfig
@@ -27,11 +27,14 @@
CONFIG_SCHED_TUNE=y
CONFIG_DEFAULT_USE_ENERGY_AWARE=y
CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_KALLSYMS_ALL=y
CONFIG_BPF_SYSCALL=y
CONFIG_EMBEDDED=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
+CONFIG_OPROFILE=m
+CONFIG_KPROBES=y
+CONFIG_CC_STACKPROTECTOR_REGULAR=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
@@ -138,6 +141,7 @@
CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
CONFIG_NETFILTER_XT_MATCH_QUOTA=y
CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
CONFIG_NETFILTER_XT_MATCH_SOCKET=y
CONFIG_NETFILTER_XT_MATCH_STATE=y
CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
@@ -257,16 +261,25 @@
CONFIG_CNSS_SDIO=y
CONFIG_CLD_HL_SDIO_CORE=y
CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_EVBUG=y
CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_XPAD=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_QPNP_POWER_ON=y
CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_MSM=y
CONFIG_SERIAL_MSM_CONSOLE=y
CONFIG_SERIAL_MSM_HS=y
CONFIG_SERIAL_MSM_SMD=y
+CONFIG_DIAG_CHAR=y
+CONFIG_DIAG_USES_SMD=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_MSM_LEGACY=y
CONFIG_MSM_SMD_PKT=y
@@ -316,6 +329,16 @@
CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_SOC_CAMERA=y
+CONFIG_MSMB_CAMERA=y
+CONFIG_MSM_CAMERA_SENSOR=y
+CONFIG_MSM_CSI30_HEADER=y
+CONFIG_MSM_CSIPHY=y
+CONFIG_MSM_CSID=y
+CONFIG_MSM_EEPROM=y
+CONFIG_MSM_ISP_V1=y
+CONFIG_MSM_ISPIF=y
+CONFIG_QCOM_KGSL=y
CONFIG_FB=y
CONFIG_FB_VIRTUAL=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
@@ -340,11 +363,16 @@
CONFIG_HID_MICROSOFT=y
CONFIG_HID_MONTEREY=y
CONFIG_HID_MULTITOUCH=y
-CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_MSM=y
+CONFIG_USB_ACM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_GADGET=y
+CONFIG_USB_SERIAL=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
CONFIG_NOP_USB_XCEIV=y
CONFIG_DUAL_ROLE_USB_INTF=y
CONFIG_USB_GADGET=y
@@ -354,7 +382,12 @@
CONFIG_USB_CI13XXX_MSM=y
CONFIG_USB_CONFIGFS=y
CONFIG_USB_CONFIGFS_SERIAL=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_RMNET_BAM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
CONFIG_USB_CONFIGFS_UEVENT=y
CONFIG_USB_CONFIGFS_F_DIAG=y
CONFIG_USB_CONFIGFS_F_CDEV=y
@@ -369,8 +402,7 @@
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_CLASS_FLASH=y
CONFIG_LEDS_QPNP=y
CONFIG_LEDS_QPNP_VIBRATOR=y
CONFIG_LEDS_TRIGGERS=y
@@ -386,6 +418,8 @@
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
CONFIG_ION=y
CONFIG_ION_MSM=y
+CONFIG_IPA=y
+CONFIG_RMNET_IPA=y
CONFIG_SPS=y
CONFIG_SPS_SUPPORT_NDP_BAM=y
CONFIG_QPNP_REVID=y
@@ -393,6 +427,7 @@
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_MAILBOX=y
CONFIG_ARM_SMMU=y
+CONFIG_QCOM_LAZY_MAPPING=y
CONFIG_MSM_SPM=y
CONFIG_MSM_L2_SPM=y
CONFIG_MSM_BOOT_STATS=y
@@ -407,6 +442,7 @@
CONFIG_MSM_SMD=y
CONFIG_MSM_SMD_DEBUG=y
CONFIG_MSM_GLINK=y
+CONFIG_MSM_TZ_SMMU=y
CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
CONFIG_MSM_GLINK_SPI_XPRT=y
@@ -418,10 +454,14 @@
CONFIG_MSM_GLINK_PKT=y
CONFIG_MSM_SUBSYSTEM_RESTART=y
CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_PIL_MSS_QDSP6V5=y
CONFIG_MSM_EVENT_TIMER=y
CONFIG_QTI_RPM_STATS_LOG=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_MSM_BAM_DMUX=y
CONFIG_CNSS_CRYPTO=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
CONFIG_PWM=y
CONFIG_PWM_QPNP=y
CONFIG_QTI_MPM=y
@@ -435,13 +475,16 @@
CONFIG_EXT4_FS_SECURITY=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
CONFIG_FUSE_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_ECRYPT_FS=y
CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
@@ -466,6 +509,7 @@
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_LOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
# CONFIG_DETECT_HUNG_TASK is not set
CONFIG_WQ_WATCHDOG=y
CONFIG_PANIC_TIMEOUT=5
@@ -496,8 +540,13 @@
CONFIG_PID_IN_CONTEXTIDR=y
CONFIG_DEBUG_SET_MODULE_RONX=y
CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_SINK_TPIU=y
+CONFIG_CORESIGHT_SOURCE_ETM3X=y
CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_DBGUI=y
CONFIG_CORESIGHT_STM=y
CONFIG_CORESIGHT_TPDA=y
CONFIG_CORESIGHT_TPDM=y
@@ -515,10 +564,13 @@
CONFIG_CRYPTO_CTR=y
CONFIG_CRYPTO_XTS=y
CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_CRC32=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_ARM_CRYPTO=y
diff --git a/arch/arm/configs/msm8909w_defconfig b/arch/arm/configs/msm8909w_defconfig
index 2eb602b..3c60037 100644
--- a/arch/arm/configs/msm8909w_defconfig
+++ b/arch/arm/configs/msm8909w_defconfig
@@ -6,7 +6,6 @@
CONFIG_IRQ_TIME_ACCOUNTING=y
CONFIG_SCHED_WALT=y
CONFIG_TASKSTATS=y
-CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_RCU_EXPERT=y
@@ -18,7 +17,6 @@
CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
CONFIG_CGROUP_DEBUG=y
CONFIG_CGROUP_FREEZER=y
-CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_SCHEDTUNE=y
CONFIG_RT_GROUP_SCHED=y
@@ -29,12 +27,18 @@
CONFIG_SCHED_TUNE=y
CONFIG_DEFAULT_USE_ENERGY_AWARE=y
CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_KALLSYMS_ALL=y
CONFIG_BPF_SYSCALL=y
CONFIG_EMBEDDED=y
+# CONFIG_SLUB_DEBUG is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
+CONFIG_OPROFILE=m
CONFIG_CC_STACKPROTECTOR_REGULAR=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -50,11 +54,11 @@
CONFIG_SCHED_MC=y
CONFIG_PREEMPT=y
CONFIG_AEABI=y
-CONFIG_HIGHMEM=y
CONFIG_CMA=y
CONFIG_CMA_DEBUGFS=y
CONFIG_ZSMALLOC=y
CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_PROCESS_RECLAIM=y
CONFIG_SECCOMP=y
CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
@@ -65,12 +69,10 @@
CONFIG_PM_AUTOSLEEP=y
CONFIG_PM_WAKELOCKS=y
CONFIG_PM_WAKELOCKS_LIMIT=0
-CONFIG_PM_DEBUG=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_XFRM_USER=y
-CONFIG_XFRM_STATISTICS=y
CONFIG_NET_KEY=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
@@ -111,14 +113,13 @@
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_CT=y
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
CONFIG_NETFILTER_XT_TARGET_LOG=y
CONFIG_NETFILTER_XT_TARGET_MARK=y
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
-CONFIG_NETFILTER_XT_TARGET_TEE=y
CONFIG_NETFILTER_XT_TARGET_TPROXY=y
CONFIG_NETFILTER_XT_TARGET_TRACE=y
CONFIG_NETFILTER_XT_TARGET_SECMARK=y
@@ -128,7 +129,6 @@
CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
CONFIG_NETFILTER_XT_MATCH_DSCP=y
-CONFIG_NETFILTER_XT_MATCH_ESP=y
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
CONFIG_NETFILTER_XT_MATCH_HELPER=y
CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
@@ -150,6 +150,7 @@
CONFIG_NETFILTER_XT_MATCH_TIME=y
CONFIG_NETFILTER_XT_MATCH_U32=y
CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_NF_DUP_IPV4=y
CONFIG_IP_NF_IPTABLES=y
CONFIG_IP_NF_MATCH_AH=y
CONFIG_IP_NF_MATCH_ECN=y
@@ -168,14 +169,13 @@
CONFIG_IP_NF_ARPFILTER=y
CONFIG_IP_NF_ARP_MANGLE=y
CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_NF_DUP_IPV6=y
CONFIG_IP6_NF_IPTABLES=y
CONFIG_IP6_NF_MATCH_RPFILTER=y
CONFIG_IP6_NF_FILTER=y
CONFIG_IP6_NF_TARGET_REJECT=y
CONFIG_IP6_NF_MANGLE=y
CONFIG_IP6_NF_RAW=y
-CONFIG_BRIDGE_NF_EBTABLES=y
-CONFIG_BRIDGE_EBT_BROUTE=y
CONFIG_L2TP=y
CONFIG_L2TP_DEBUGFS=y
CONFIG_L2TP_V3=y
@@ -185,8 +185,6 @@
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_HTB=y
CONFIG_NET_SCH_PRIO=y
-CONFIG_NET_SCH_MULTIQ=y
-CONFIG_NET_SCH_INGRESS=y
CONFIG_NET_CLS_FW=y
CONFIG_NET_CLS_U32=y
CONFIG_CLS_U32_MARK=y
@@ -199,8 +197,6 @@
CONFIG_NET_EMATCH_TEXT=y
CONFIG_NET_CLS_ACT=y
CONFIG_NET_ACT_GACT=y
-CONFIG_NET_ACT_MIRRED=y
-CONFIG_NET_ACT_SKBEDIT=y
CONFIG_DNS_RESOLVER=y
CONFIG_RMNET_DATA=y
CONFIG_RMNET_DATA_FC=y
@@ -219,26 +215,11 @@
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_HDCP_QSEECOM=y
CONFIG_QSEECOM=y
CONFIG_UID_SYS_STATS=y
-CONFIG_MEMORY_STATE_TIME=y
CONFIG_QPNP_MISC=y
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_CHR_DEV_SCH=y
-CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_LOGGING=y
-CONFIG_SCSI_SCAN_ASYNC=y
-CONFIG_SCSI_UFSHCD=y
-CONFIG_SCSI_UFSHCD_PLATFORM=y
-CONFIG_SCSI_UFS_QCOM=y
-CONFIG_SCSI_UFS_QCOM_ICE=y
-CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
-CONFIG_DM_DEBUG=y
CONFIG_DM_CRYPT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
@@ -262,7 +243,6 @@
CONFIG_CLD_LL_CORE=y
CONFIG_INPUT_EVDEV=y
CONFIG_KEYBOARD_GPIO=y
-CONFIG_INPUT_JOYSTICK=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26=y
CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v26=y
@@ -343,25 +323,7 @@
CONFIG_SND_DYNAMIC_MINORS=y
CONFIG_SND_SOC=y
CONFIG_UHID=y
-CONFIG_HID_A4TECH=y
-CONFIG_HID_APPLE=y
-CONFIG_HID_BELKIN=y
-CONFIG_HID_CHERRY=y
-CONFIG_HID_CHICONY=y
-CONFIG_HID_CYPRESS=y
-CONFIG_HID_ELECOM=y
-CONFIG_HID_EZKEY=y
-CONFIG_HID_KENSINGTON=y
-CONFIG_HID_LOGITECH=y
-CONFIG_HID_MAGICMOUSE=y
-CONFIG_HID_MICROSOFT=y
-CONFIG_HID_MONTEREY=y
-CONFIG_HID_MULTITOUCH=y
-CONFIG_USB_DWC3=y
-CONFIG_NOP_USB_XCEIV=y
CONFIG_DUAL_ROLE_USB_INTF=y
-CONFIG_USB_MSM_SSPHY_QMP=y
-CONFIG_MSM_QUSB_PHY=y
CONFIG_USB_GADGET=y
CONFIG_USB_GADGET_DEBUG_FILES=y
CONFIG_USB_GADGET_DEBUG_FS=y
@@ -369,15 +331,10 @@
CONFIG_USB_CI13XXX_MSM=y
CONFIG_USB_CONFIGFS=y
CONFIG_USB_CONFIGFS_SERIAL=y
-CONFIG_USB_CONFIGFS_ACM=y
CONFIG_USB_CONFIGFS_NCM=y
-CONFIG_USB_CONFIGFS_ECM=y
CONFIG_USB_CONFIGFS_RMNET_BAM=y
-CONFIG_USB_CONFIGFS_EEM=y
CONFIG_USB_CONFIGFS_MASS_STORAGE=y
CONFIG_USB_CONFIGFS_F_FS=y
-CONFIG_USB_CONFIGFS_F_MTP=y
-CONFIG_USB_CONFIGFS_F_PTP=y
CONFIG_USB_CONFIGFS_F_ACC=y
CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
CONFIG_USB_CONFIGFS_UEVENT=y
@@ -386,7 +343,6 @@
CONFIG_USB_CONFIGFS_F_DIAG=y
CONFIG_USB_CONFIGFS_F_CDEV=y
CONFIG_USB_CONFIGFS_F_CCID=y
-CONFIG_USB_CONFIGFS_F_GSI=y
CONFIG_MMC=y
CONFIG_MMC_PERF_PROFILING=y
CONFIG_MMC_RING_BUFFER=y
@@ -477,15 +433,12 @@
CONFIG_ANDROID_BINDER_IPC=y
CONFIG_SENSORS_SSC=y
CONFIG_MSM_TZ_LOG=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V2=y
CONFIG_FUSE_FS=y
-CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_ECRYPT_FS=y
@@ -500,15 +453,7 @@
CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_PAGEALLOC=y
-CONFIG_SLUB_DEBUG_PANIC_ON=y
CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
-CONFIG_DEBUG_OBJECTS=y
-CONFIG_DEBUG_OBJECTS_FREE=y
-CONFIG_DEBUG_OBJECTS_TIMERS=y
-CONFIG_DEBUG_OBJECTS_WORK=y
-CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
-CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
-CONFIG_SLUB_DEBUG_ON=y
CONFIG_DEBUG_KMEMLEAK=y
CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000
CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
diff --git a/arch/arm/configs/msm8937-perf_defconfig b/arch/arm/configs/msm8937-perf_defconfig
index b96a08d..dae45e1 100644
--- a/arch/arm/configs/msm8937-perf_defconfig
+++ b/arch/arm/configs/msm8937-perf_defconfig
@@ -17,6 +17,7 @@
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_SCHEDTUNE=y
CONFIG_MEMCG=y
diff --git a/arch/arm/configs/msm8937_defconfig b/arch/arm/configs/msm8937_defconfig
index 7ed8509..3c43fa3 100644
--- a/arch/arm/configs/msm8937_defconfig
+++ b/arch/arm/configs/msm8937_defconfig
@@ -18,6 +18,7 @@
CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
CONFIG_CGROUP_DEBUG=y
CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_SCHEDTUNE=y
CONFIG_MEMCG=y
diff --git a/arch/arm/configs/msm8953-perf_defconfig b/arch/arm/configs/msm8953-perf_defconfig
index 88e5e22..06e5360 100644
--- a/arch/arm/configs/msm8953-perf_defconfig
+++ b/arch/arm/configs/msm8953-perf_defconfig
@@ -595,6 +595,9 @@
CONFIG_MSM_TZ_LOG=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V2=y
@@ -625,13 +628,13 @@
CONFIG_CORESIGHT_CTI=y
CONFIG_CORESIGHT_EVENT=y
CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_PFK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_LSM_MMAP_MIN_ADDR=4096
CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
-CONFIG_CRYPTO_CTR=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y
diff --git a/arch/arm/configs/msm8953_defconfig b/arch/arm/configs/msm8953_defconfig
index 01da3bf..70adcf1 100644
--- a/arch/arm/configs/msm8953_defconfig
+++ b/arch/arm/configs/msm8953_defconfig
@@ -613,6 +613,9 @@
CONFIG_MSM_TZ_LOG=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V2=y
@@ -689,13 +692,13 @@
CONFIG_CORESIGHT_CTI=y
CONFIG_CORESIGHT_EVENT=y
CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_PFK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_LSM_MMAP_MIN_ADDR=4096
CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
-CONFIG_CRYPTO_CTR=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y
diff --git a/arch/arm/include/asm/etmv4x.h b/arch/arm/include/asm/etmv4x.h
new file mode 100644
index 0000000..7ad0a92
--- /dev/null
+++ b/arch/arm/include/asm/etmv4x.h
@@ -0,0 +1,387 @@
+/* Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_ETMV4X_H
+#define __ASM_ETMV4X_H
+
+#include <linux/types.h>
+
+
+/* 32 bit register read for AArch32 */
+#define trc_readl(reg) RSYSL_##reg()
+#define trc_readq(reg) RSYSL_##reg()
+
+/* 32 bit register write for AArch32 */
+#define trc_write(val, reg) WSYS_##reg(val)
+
+#define MRC(op0, op1, crn, crm, op2) \
+({ \
+uint32_t val; \
+asm volatile("mrc p"#op0", "#op1", %0, "#crn", "#crm", "#op2 : "=r" (val)); \
+val; \
+})
+
+#define MCR(val, op0, op1, crn, crm, op2) \
+({ \
+asm volatile("mcr p"#op0", "#op1", %0, "#crn", "#crm", "#op2 : : "r" (val));\
+})
+
+/* Clock and Power Management Register */
+#define RSYSL_CPMR_EL1() MRC(15, 7, c15, c0, 5)
+#define WSYS_CPMR_EL1(val) MCR(val, 15, 7, c15, c0, 5)
+
+/*
+ * ETMv4 Registers
+ *
+ * Read only
+ * ETMAUTHSTATUS, ETMDEVARCH, ETMDEVID, ETMIDRn[0-13], ETMOSLSR, ETMSTATR
+ *
+ * Write only
+ * ETMOSLAR
+ */
+/* 32 bit registers */
+#define RSYSL_ETMAUTHSTATUS() MRC(14, 1, c7, c14, 6)
+#define RSYSL_ETMAUXCTLR() MRC(14, 1, c0, c6, 0)
+#define RSYSL_ETMCCCTLR() MRC(14, 1, c0, c14, 0)
+#define RSYSL_ETMCIDCCTLR0() MRC(14, 1, c3, c0, 2)
+#define RSYSL_ETMCNTCTLR0() MRC(14, 1, c0, c4, 5)
+#define RSYSL_ETMCNTCTLR1() MRC(14, 1, c0, c5, 5)
+#define RSYSL_ETMCNTCTLR2() MRC(14, 1, c0, c6, 5)
+#define RSYSL_ETMCNTCTLR3() MRC(14, 1, c0, c7, 5)
+#define RSYSL_ETMCNTRLDVR0() MRC(14, 1, c0, c0, 5)
+#define RSYSL_ETMCNTRLDVR1() MRC(14, 1, c0, c1, 5)
+#define RSYSL_ETMCNTRLDVR2() MRC(14, 1, c0, c2, 5)
+#define RSYSL_ETMCNTRLDVR3() MRC(14, 1, c0, c3, 5)
+#define RSYSL_ETMCNTVR0() MRC(14, 1, c0, c8, 5)
+#define RSYSL_ETMCNTVR1() MRC(14, 1, c0, c9, 5)
+#define RSYSL_ETMCNTVR2() MRC(14, 1, c0, c10, 5)
+#define RSYSL_ETMCNTVR3() MRC(14, 1, c0, c11, 5)
+#define RSYSL_ETMCONFIGR() MRC(14, 1, c0, c4, 0)
+#define RSYSL_ETMDEVARCH() MRC(14, 1, c7, c15, 6)
+#define RSYSL_ETMDEVID() MRC(14, 1, c7, c2, 7)
+#define RSYSL_ETMEVENTCTL0R() MRC(14, 1, c0, c8, 0)
+#define RSYSL_ETMEVENTCTL1R() MRC(14, 1, c0, c9, 0)
+#define RSYSL_ETMEXTINSELR() MRC(14, 1, c0, c8, 4)
+#define RSYSL_ETMIDR0() MRC(14, 1, c0, c8, 7)
+#define RSYSL_ETMIDR1() MRC(14, 1, c0, c9, 7)
+#define RSYSL_ETMIDR10() MRC(14, 1, c0, c2, 6)
+#define RSYSL_ETMIDR11() MRC(14, 1, c0, c3, 6)
+#define RSYSL_ETMIDR12() MRC(14, 1, c0, c4, 6)
+#define RSYSL_ETMIDR13() MRC(14, 1, c0, c5, 6)
+#define RSYSL_ETMIDR2() MRC(14, 1, c0, c10, 7)
+#define RSYSL_ETMIDR3() MRC(14, 1, c0, c11, 7)
+#define RSYSL_ETMIDR4() MRC(14, 1, c0, c12, 7)
+#define RSYSL_ETMIDR5() MRC(14, 1, c0, c13, 7)
+#define RSYSL_ETMIDR6() MRC(14, 1, c0, c14, 7)
+#define RSYSL_ETMIDR7() MRC(14, 1, c0, c15, 7)
+#define RSYSL_ETMIDR8() MRC(14, 1, c0, c0, 6)
+#define RSYSL_ETMIDR9() MRC(14, 1, c0, c1, 6)
+#define RSYSL_ETMIMSPEC0() MRC(14, 1, c0, c0, 7)
+#define RSYSL_ETMOSLSR() MRC(14, 1, c1, c1, 4)
+#define RSYSL_ETMPRGCTLR() MRC(14, 1, c0, c1, 0)
+#define RSYSL_ETMRSCTLR10() MRC(14, 1, c1, c10, 0)
+#define RSYSL_ETMRSCTLR11() MRC(14, 1, c1, c11, 0)
+#define RSYSL_ETMRSCTLR12() MRC(14, 1, c1, c12, 0)
+#define RSYSL_ETMRSCTLR13() MRC(14, 1, c1, c13, 0)
+#define RSYSL_ETMRSCTLR14() MRC(14, 1, c1, c14, 0)
+#define RSYSL_ETMRSCTLR15() MRC(14, 1, c1, c15, 0)
+#define RSYSL_ETMRSCTLR2() MRC(14, 1, c1, c2, 0)
+#define RSYSL_ETMRSCTLR3() MRC(14, 1, c1, c3, 0)
+#define RSYSL_ETMRSCTLR4() MRC(14, 1, c1, c4, 0)
+#define RSYSL_ETMRSCTLR5() MRC(14, 1, c1, c5, 0)
+#define RSYSL_ETMRSCTLR6() MRC(14, 1, c1, c6, 0)
+#define RSYSL_ETMRSCTLR7() MRC(14, 1, c1, c7, 0)
+#define RSYSL_ETMRSCTLR8() MRC(14, 1, c1, c8, 0)
+#define RSYSL_ETMRSCTLR9() MRC(14, 1, c1, c9, 0)
+#define RSYSL_ETMRSCTLR16() MRC(14, 1, c1, c0, 1)
+#define RSYSL_ETMRSCTLR17() MRC(14, 1, c1, c1, 1)
+#define RSYSL_ETMRSCTLR18() MRC(14, 1, c1, c2, 1)
+#define RSYSL_ETMRSCTLR19() MRC(14, 1, c1, c3, 1)
+#define RSYSL_ETMRSCTLR20() MRC(14, 1, c1, c4, 1)
+#define RSYSL_ETMRSCTLR21() MRC(14, 1, c1, c5, 1)
+#define RSYSL_ETMRSCTLR22() MRC(14, 1, c1, c6, 1)
+#define RSYSL_ETMRSCTLR23() MRC(14, 1, c1, c7, 1)
+#define RSYSL_ETMRSCTLR24() MRC(14, 1, c1, c8, 1)
+#define RSYSL_ETMRSCTLR25() MRC(14, 1, c1, c9, 1)
+#define RSYSL_ETMRSCTLR26() MRC(14, 1, c1, c10, 1)
+#define RSYSL_ETMRSCTLR27() MRC(14, 1, c1, c11, 1)
+#define RSYSL_ETMRSCTLR28() MRC(14, 1, c1, c12, 1)
+#define RSYSL_ETMRSCTLR29() MRC(14, 1, c1, c13, 1)
+#define RSYSL_ETMRSCTLR30() MRC(14, 1, c1, c14, 1)
+#define RSYSL_ETMRSCTLR31() MRC(14, 1, c1, c15, 1)
+#define RSYSL_ETMSEQEVR0() MRC(14, 1, c0, c0, 4)
+#define RSYSL_ETMSEQEVR1() MRC(14, 1, c0, c1, 4)
+#define RSYSL_ETMSEQEVR2() MRC(14, 1, c0, c2, 4)
+#define RSYSL_ETMSEQRSTEVR() MRC(14, 1, c0, c6, 4)
+#define RSYSL_ETMSEQSTR() MRC(14, 1, c0, c7, 4)
+#define RSYSL_ETMSTALLCTLR() MRC(14, 1, c0, c11, 0)
+#define RSYSL_ETMSTATR() MRC(14, 1, c0, c3, 0)
+#define RSYSL_ETMSYNCPR() MRC(14, 1, c0, c13, 0)
+#define RSYSL_ETMTRACEIDR() MRC(14, 1, c0, c0, 1)
+#define RSYSL_ETMTSCTLR() MRC(14, 1, c0, c12, 0)
+#define RSYSL_ETMVICTLR() MRC(14, 1, c0, c0, 2)
+#define RSYSL_ETMVIIECTLR() MRC(14, 1, c0, c1, 2)
+#define RSYSL_ETMVISSCTLR() MRC(14, 1, c0, c2, 2)
+#define RSYSL_ETMSSCCR0() MRC(14, 1, c1, c0, 2)
+#define RSYSL_ETMSSCCR1() MRC(14, 1, c1, c1, 2)
+#define RSYSL_ETMSSCCR2() MRC(14, 1, c1, c2, 2)
+#define RSYSL_ETMSSCCR3() MRC(14, 1, c1, c3, 2)
+#define RSYSL_ETMSSCCR4() MRC(14, 1, c1, c4, 2)
+#define RSYSL_ETMSSCCR5() MRC(14, 1, c1, c5, 2)
+#define RSYSL_ETMSSCCR6() MRC(14, 1, c1, c6, 2)
+#define RSYSL_ETMSSCCR7() MRC(14, 1, c1, c7, 2)
+#define RSYSL_ETMSSCSR0() MRC(14, 1, c1, c8, 2)
+#define RSYSL_ETMSSCSR1() MRC(14, 1, c1, c9, 2)
+#define RSYSL_ETMSSCSR2() MRC(14, 1, c1, c10, 2)
+#define RSYSL_ETMSSCSR3() MRC(14, 1, c1, c11, 2)
+#define RSYSL_ETMSSCSR4() MRC(14, 1, c1, c12, 2)
+#define RSYSL_ETMSSCSR5() MRC(14, 1, c1, c13, 2)
+#define RSYSL_ETMSSCSR6() MRC(14, 1, c1, c14, 2)
+#define RSYSL_ETMSSCSR7() MRC(14, 1, c1, c15, 2)
+#define RSYSL_ETMSSPCICR0() MRC(14, 1, c1, c0, 3)
+#define RSYSL_ETMSSPCICR1() MRC(14, 1, c1, c1, 3)
+#define RSYSL_ETMSSPCICR2() MRC(14, 1, c1, c2, 3)
+#define RSYSL_ETMSSPCICR3() MRC(14, 1, c1, c3, 3)
+#define RSYSL_ETMSSPCICR4() MRC(14, 1, c1, c4, 3)
+#define RSYSL_ETMSSPCICR5() MRC(14, 1, c1, c5, 3)
+#define RSYSL_ETMSSPCICR6() MRC(14, 1, c1, c6, 3)
+#define RSYSL_ETMSSPCICR7() MRC(14, 1, c1, c7, 3)
+
+/*
+ * 64 bit registers, ignore the upper 32bit
+ * A read from a 32-bit register location using a 64-bit access result
+ * in the upper 32bits being return as RES0.
+ */
+#define RSYSL_ETMACATR0() MRC(14, 1, c2, c0, 2)
+#define RSYSL_ETMACATR1() MRC(14, 1, c2, c2, 2)
+#define RSYSL_ETMACATR2() MRC(14, 1, c2, c4, 2)
+#define RSYSL_ETMACATR3() MRC(14, 1, c2, c6, 2)
+#define RSYSL_ETMACATR4() MRC(14, 1, c2, c8, 2)
+#define RSYSL_ETMACATR5() MRC(14, 1, c2, c10, 2)
+#define RSYSL_ETMACATR6() MRC(14, 1, c2, c12, 2)
+#define RSYSL_ETMACATR7() MRC(14, 1, c2, c14, 2)
+#define RSYSL_ETMACATR8() MRC(14, 1, c2, c0, 3)
+#define RSYSL_ETMACATR9() MRC(14, 1, c2, c2, 3)
+#define RSYSL_ETMACATR10() MRC(14, 1, c2, c4, 3)
+#define RSYSL_ETMACATR11() MRC(14, 1, c2, c6, 3)
+#define RSYSL_ETMACATR12() MRC(14, 1, c2, c8, 3)
+#define RSYSL_ETMACATR13() MRC(14, 1, c2, c10, 3)
+#define RSYSL_ETMACATR14() MRC(14, 1, c2, c12, 3)
+#define RSYSL_ETMACATR15() MRC(14, 1, c2, c14, 3)
+#define RSYSL_ETMCIDCVR0() MRC(14, 1, c3, c0, 0)
+#define RSYSL_ETMCIDCVR1() MRC(14, 1, c3, c2, 0)
+#define RSYSL_ETMCIDCVR2() MRC(14, 1, c3, c4, 0)
+#define RSYSL_ETMCIDCVR3() MRC(14, 1, c3, c6, 0)
+#define RSYSL_ETMCIDCVR4() MRC(14, 1, c3, c8, 0)
+#define RSYSL_ETMCIDCVR5() MRC(14, 1, c3, c10, 0)
+#define RSYSL_ETMCIDCVR6() MRC(14, 1, c3, c12, 0)
+#define RSYSL_ETMCIDCVR7() MRC(14, 1, c3, c14, 0)
+#define RSYSL_ETMACVR0() MRC(14, 1, c2, c0, 0)
+#define RSYSL_ETMACVR1() MRC(14, 1, c2, c2, 0)
+#define RSYSL_ETMACVR2() MRC(14, 1, c2, c4, 0)
+#define RSYSL_ETMACVR3() MRC(14, 1, c2, c6, 0)
+#define RSYSL_ETMACVR4() MRC(14, 1, c2, c8, 0)
+#define RSYSL_ETMACVR5() MRC(14, 1, c2, c10, 0)
+#define RSYSL_ETMACVR6() MRC(14, 1, c2, c12, 0)
+#define RSYSL_ETMACVR7() MRC(14, 1, c2, c14, 0)
+#define RSYSL_ETMACVR8() MRC(14, 1, c2, c0, 1)
+#define RSYSL_ETMACVR9() MRC(14, 1, c2, c2, 1)
+#define RSYSL_ETMACVR10() MRC(14, 1, c2, c4, 1)
+#define RSYSL_ETMACVR11() MRC(14, 1, c2, c6, 1)
+#define RSYSL_ETMACVR12() MRC(14, 1, c2, c8, 1)
+#define RSYSL_ETMACVR13() MRC(14, 1, c2, c10, 1)
+#define RSYSL_ETMACVR14() MRC(14, 1, c2, c12, 1)
+#define RSYSL_ETMACVR15() MRC(14, 1, c2, c14, 1)
+#define RSYSL_ETMVMIDCVR0() MRC(14, 1, c3, c0, 1)
+#define RSYSL_ETMVMIDCVR1() MRC(14, 1, c3, c2, 1)
+#define RSYSL_ETMVMIDCVR2() MRC(14, 1, c3, c4, 1)
+#define RSYSL_ETMVMIDCVR3() MRC(14, 1, c3, c6, 1)
+#define RSYSL_ETMVMIDCVR4() MRC(14, 1, c3, c8, 1)
+#define RSYSL_ETMVMIDCVR5() MRC(14, 1, c3, c10, 1)
+#define RSYSL_ETMVMIDCVR6() MRC(14, 1, c3, c12, 1)
+#define RSYSL_ETMVMIDCVR7() MRC(14, 1, c3, c14, 1)
+#define RSYSL_ETMDVCVR0() MRC(14, 1, c2, c0, 4)
+#define RSYSL_ETMDVCVR1() MRC(14, 1, c2, c4, 4)
+#define RSYSL_ETMDVCVR2() MRC(14, 1, c2, c8, 4)
+#define RSYSL_ETMDVCVR3() MRC(14, 1, c2, c12, 4)
+#define RSYSL_ETMDVCVR4() MRC(14, 1, c2, c0, 5)
+#define RSYSL_ETMDVCVR5() MRC(14, 1, c2, c4, 5)
+#define RSYSL_ETMDVCVR6() MRC(14, 1, c2, c8, 5)
+#define RSYSL_ETMDVCVR7() MRC(14, 1, c2, c12, 5)
+#define RSYSL_ETMDVCMR0() MRC(14, 1, c2, c0, 6)
+#define RSYSL_ETMDVCMR1() MRC(14, 1, c2, c4, 6)
+#define RSYSL_ETMDVCMR2() MRC(14, 1, c2, c8, 6)
+#define RSYSL_ETMDVCMR3() MRC(14, 1, c2, c12, 6)
+#define RSYSL_ETMDVCMR4() MRC(14, 1, c2, c0, 7)
+#define RSYSL_ETMDVCMR5() MRC(14, 1, c2, c4, 7)
+#define RSYSL_ETMDVCMR6() MRC(14, 1, c2, c8, 7)
+#define RSYSL_ETMDVCMR7() MRC(14, 1, c2, c12, 7)
+
+/*
+ * 32 and 64 bit registers
+ * A write to a 32-bit register location using a 64-bit access result
+ * in the upper 32bit of access
+ */
+#define WSYS_ETMAUXCTLR(val) MCR(val, 14, 1, c0, c6, 0)
+#define WSYS_ETMACATR0(val) MCR(val, 14, 1, c2, c0, 2)
+#define WSYS_ETMACATR1(val) MCR(val, 14, 1, c2, c2, 2)
+#define WSYS_ETMACATR2(val) MCR(val, 14, 1, c2, c4, 2)
+#define WSYS_ETMACATR3(val) MCR(val, 14, 1, c2, c6, 2)
+#define WSYS_ETMACATR4(val) MCR(val, 14, 1, c2, c8, 2)
+#define WSYS_ETMACATR5(val) MCR(val, 14, 1, c2, c10, 2)
+#define WSYS_ETMACATR6(val) MCR(val, 14, 1, c2, c12, 2)
+#define WSYS_ETMACATR7(val) MCR(val, 14, 1, c2, c14, 2)
+#define WSYS_ETMACATR8(val) MCR(val, 14, 1, c2, c0, 3)
+#define WSYS_ETMACATR9(val) MCR(val, 14, 1, c2, c2, 3)
+#define WSYS_ETMACATR10(val) MCR(val, 14, 1, c2, c4, 3)
+#define WSYS_ETMACATR11(val) MCR(val, 14, 1, c2, c6, 3)
+#define WSYS_ETMACATR12(val) MCR(val, 14, 1, c2, c8, 3)
+#define WSYS_ETMACATR13(val) MCR(val, 14, 1, c2, c10, 3)
+#define WSYS_ETMACATR14(val) MCR(val, 14, 1, c2, c12, 3)
+#define WSYS_ETMACATR15(val) MCR(val, 14, 1, c2, c14, 3)
+#define WSYS_ETMACVR0(val) MCR(val, 14, 1, c2, c0, 0)
+#define WSYS_ETMACVR1(val) MCR(val, 14, 1, c2, c2, 0)
+#define WSYS_ETMACVR2(val) MCR(val, 14, 1, c2, c4, 0)
+#define WSYS_ETMACVR3(val) MCR(val, 14, 1, c2, c6, 0)
+#define WSYS_ETMACVR4(val) MCR(val, 14, 1, c2, c8, 0)
+#define WSYS_ETMACVR5(val) MCR(val, 14, 1, c2, c10, 0)
+#define WSYS_ETMACVR6(val) MCR(val, 14, 1, c2, c12, 0)
+#define WSYS_ETMACVR7(val) MCR(val, 14, 1, c2, c14, 0)
+#define WSYS_ETMACVR8(val) MCR(val, 14, 1, c2, c0, 1)
+#define WSYS_ETMACVR9(val) MCR(val, 14, 1, c2, c2, 1)
+#define WSYS_ETMACVR10(val) MCR(val, 14, 1, c2, c4, 1)
+#define WSYS_ETMACVR11(val) MCR(val, 14, 1, c2, c6, 1)
+#define WSYS_ETMACVR12(val) MCR(val, 14, 1, c2, c8, 1)
+#define WSYS_ETMACVR13(val) MCR(val, 14, 1, c2, c10, 1)
+#define WSYS_ETMACVR14(val) MCR(val, 14, 1, c2, c12, 1)
+#define WSYS_ETMACVR15(val) MCR(val, 14, 1, c2, c14, 1)
+#define WSYS_ETMCCCTLR(val) MCR(val, 14, 1, c0, c14, 0)
+#define WSYS_ETMCIDCCTLR0(val) MCR(val, 14, 1, c3, c0, 2)
+#define WSYS_ETMCIDCVR0(val) MCR(val, 14, 1, c3, c0, 0)
+#define WSYS_ETMCIDCVR1(val) MCR(val, 14, 1, c3, c2, 0)
+#define WSYS_ETMCIDCVR2(val) MCR(val, 14, 1, c3, c4, 0)
+#define WSYS_ETMCIDCVR3(val) MCR(val, 14, 1, c3, c6, 0)
+#define WSYS_ETMCIDCVR4(val) MCR(val, 14, 1, c3, c8, 0)
+#define WSYS_ETMCIDCVR5(val) MCR(val, 14, 1, c3, c10, 0)
+#define WSYS_ETMCIDCVR6(val) MCR(val, 14, 1, c3, c12, 0)
+#define WSYS_ETMCIDCVR7(val) MCR(val, 14, 1, c3, c14, 0)
+#define WSYS_ETMCNTCTLR0(val) MCR(val, 14, 1, c0, c4, 5)
+#define WSYS_ETMCNTCTLR1(val) MCR(val, 14, 1, c0, c5, 5)
+#define WSYS_ETMCNTCTLR2(val) MCR(val, 14, 1, c0, c6, 5)
+#define WSYS_ETMCNTCTLR3(val) MCR(val, 14, 1, c0, c7, 5)
+#define WSYS_ETMCNTRLDVR0(val) MCR(val, 14, 1, c0, c0, 5)
+#define WSYS_ETMCNTRLDVR1(val) MCR(val, 14, 1, c0, c1, 5)
+#define WSYS_ETMCNTRLDVR2(val) MCR(val, 14, 1, c0, c2, 5)
+#define WSYS_ETMCNTRLDVR3(val) MCR(val, 14, 1, c0, c3, 5)
+#define WSYS_ETMCNTVR0(val) MCR(val, 14, 1, c0, c8, 5)
+#define WSYS_ETMCNTVR1(val) MCR(val, 14, 1, c0, c9, 5)
+#define WSYS_ETMCNTVR2(val) MCR(val, 14, 1, c0, c10, 5)
+#define WSYS_ETMCNTVR3(val) MCR(val, 14, 1, c0, c11, 5)
+#define WSYS_ETMCONFIGR(val) MCR(val, 14, 1, c0, c4, 0)
+#define WSYS_ETMEVENTCTL0R(val) MCR(val, 14, 1, c0, c8, 0)
+#define WSYS_ETMEVENTCTL1R(val) MCR(val, 14, 1, c0, c9, 0)
+#define WSYS_ETMEXTINSELR(val) MCR(val, 14, 1, c0, c8, 4)
+#define WSYS_ETMIMSPEC0(val) MCR(val, 14, 1, c0, c0, 7)
+#define WSYS_ETMOSLAR(val) MCR(val, 14, 1, c1, c0, 4)
+#define WSYS_ETMPRGCTLR(val) MCR(val, 14, 1, c0, c1, 0)
+#define WSYS_ETMRSCTLR10(val) MCR(val, 14, 1, c1, c10, 0)
+#define WSYS_ETMRSCTLR11(val) MCR(val, 14, 1, c1, c11, 0)
+#define WSYS_ETMRSCTLR12(val) MCR(val, 14, 1, c1, c12, 0)
+#define WSYS_ETMRSCTLR13(val) MCR(val, 14, 1, c1, c13, 0)
+#define WSYS_ETMRSCTLR14(val) MCR(val, 14, 1, c1, c14, 0)
+#define WSYS_ETMRSCTLR15(val) MCR(val, 14, 1, c1, c15, 0)
+#define WSYS_ETMRSCTLR2(val) MCR(val, 14, 1, c1, c2, 0)
+#define WSYS_ETMRSCTLR3(val) MCR(val, 14, 1, c1, c3, 0)
+#define WSYS_ETMRSCTLR4(val) MCR(val, 14, 1, c1, c4, 0)
+#define WSYS_ETMRSCTLR5(val) MCR(val, 14, 1, c1, c5, 0)
+#define WSYS_ETMRSCTLR6(val) MCR(val, 14, 1, c1, c6, 0)
+#define WSYS_ETMRSCTLR7(val) MCR(val, 14, 1, c1, c7, 0)
+#define WSYS_ETMRSCTLR8(val) MCR(val, 14, 1, c1, c8, 0)
+#define WSYS_ETMRSCTLR9(val) MCR(val, 14, 1, c1, c9, 0)
+#define WSYS_ETMRSCTLR16(val) MCR(val, 14, 1, c1, c0, 1)
+#define WSYS_ETMRSCTLR17(val) MCR(val, 14, 1, c1, c1, 1)
+#define WSYS_ETMRSCTLR18(val) MCR(val, 14, 1, c1, c2, 1)
+#define WSYS_ETMRSCTLR19(val) MCR(val, 14, 1, c1, c3, 1)
+#define WSYS_ETMRSCTLR20(val) MCR(val, 14, 1, c1, c4, 1)
+#define WSYS_ETMRSCTLR21(val) MCR(val, 14, 1, c1, c5, 1)
+#define WSYS_ETMRSCTLR22(val) MCR(val, 14, 1, c1, c6, 1)
+#define WSYS_ETMRSCTLR23(val) MCR(val, 14, 1, c1, c7, 1)
+#define WSYS_ETMRSCTLR24(val) MCR(val, 14, 1, c1, c8, 1)
+#define WSYS_ETMRSCTLR25(val) MCR(val, 14, 1, c1, c9, 1)
+#define WSYS_ETMRSCTLR26(val) MCR(val, 14, 1, c1, c10, 1)
+#define WSYS_ETMRSCTLR27(val) MCR(val, 14, 1, c1, c11, 1)
+#define WSYS_ETMRSCTLR28(val) MCR(val, 14, 1, c1, c12, 1)
+#define WSYS_ETMRSCTLR29(val) MCR(val, 14, 1, c1, c13, 1)
+#define WSYS_ETMRSCTLR30(val) MCR(val, 14, 1, c1, c14, 1)
+#define WSYS_ETMRSCTLR31(val) MCR(val, 14, 1, c1, c15, 1)
+#define WSYS_ETMSEQEVR0(val) MCR(val, 14, 1, c0, c0, 4)
+#define WSYS_ETMSEQEVR1(val) MCR(val, 14, 1, c0, c1, 4)
+#define WSYS_ETMSEQEVR2(val) MCR(val, 14, 1, c0, c2, 4)
+#define WSYS_ETMSEQRSTEVR(val) MCR(val, 14, 1, c0, c6, 4)
+#define WSYS_ETMSEQSTR(val) MCR(val, 14, 1, c0, c7, 4)
+#define WSYS_ETMSTALLCTLR(val) MCR(val, 14, 1, c0, c11, 0)
+#define WSYS_ETMSYNCPR(val) MCR(val, 14, 1, c0, c13, 0)
+#define WSYS_ETMTRACEIDR(val) MCR(val, 14, 1, c0, c0, 1)
+#define WSYS_ETMTSCTLR(val) MCR(val, 14, 1, c0, c12, 0)
+#define WSYS_ETMVICTLR(val) MCR(val, 14, 1, c0, c0, 2)
+#define WSYS_ETMVIIECTLR(val) MCR(val, 14, 1, c0, c1, 2)
+#define WSYS_ETMVISSCTLR(val) MCR(val, 14, 1, c0, c2, 2)
+#define WSYS_ETMVMIDCVR0(val) MCR(val, 14, 1, c3, c0, 1)
+#define WSYS_ETMVMIDCVR1(val) MCR(val, 14, 1, c3, c2, 1)
+#define WSYS_ETMVMIDCVR2(val) MCR(val, 14, 1, c3, c4, 1)
+#define WSYS_ETMVMIDCVR3(val) MCR(val, 14, 1, c3, c6, 1)
+#define WSYS_ETMVMIDCVR4(val) MCR(val, 14, 1, c3, c8, 1)
+#define WSYS_ETMVMIDCVR5(val) MCR(val, 14, 1, c3, c10, 1)
+#define WSYS_ETMVMIDCVR6(val) MCR(val, 14, 1, c3, c12, 1)
+#define WSYS_ETMVMIDCVR7(val) MCR(val, 14, 1, c3, c14, 1)
+#define WSYS_ETMDVCVR0(val) MCR(val, 14, 1, c2, c0, 4)
+#define WSYS_ETMDVCVR1(val) MCR(val, 14, 1, c2, c4, 4)
+#define WSYS_ETMDVCVR2(val) MCR(val, 14, 1, c2, c8, 4)
+#define WSYS_ETMDVCVR3(val) MCR(val, 14, 1, c2, c12, 4)
+#define WSYS_ETMDVCVR4(val) MCR(val, 14, 1, c2, c0, 5)
+#define WSYS_ETMDVCVR5(val) MCR(val, 14, 1, c2, c4, 5)
+#define WSYS_ETMDVCVR6(val) MCR(val, 14, 1, c2, c8, 5)
+#define WSYS_ETMDVCVR7(val) MCR(val, 14, 1, c2, c12, 5)
+#define WSYS_ETMDVCMR0(val) MCR(val, 14, 1, c2, c0, 6)
+#define WSYS_ETMDVCMR1(val) MCR(val, 14, 1, c2, c4, 6)
+#define WSYS_ETMDVCMR2(val) MCR(val, 14, 1, c2, c8, 6)
+#define WSYS_ETMDVCMR3(val) MCR(val, 14, 1, c2, c12, 6)
+#define WSYS_ETMDVCMR4(val) MCR(val, 14, 1, c2, c0, 7)
+#define WSYS_ETMDVCMR5(val) MCR(val, 14, 1, c2, c4, 7)
+#define WSYS_ETMDVCMR6(val) MCR(val, 14, 1, c2, c8, 7)
+#define WSYS_ETMDVCMR7(val) MCR(val, 14, 1, c2, c12, 7)
+#define WSYS_ETMSSCCR0(val) MCR(val, 14, 1, c1, c0, 2)
+#define WSYS_ETMSSCCR1(val) MCR(val, 14, 1, c1, c1, 2)
+#define WSYS_ETMSSCCR2(val) MCR(val, 14, 1, c1, c2, 2)
+#define WSYS_ETMSSCCR3(val) MCR(val, 14, 1, c1, c3, 2)
+#define WSYS_ETMSSCCR4(val) MCR(val, 14, 1, c1, c4, 2)
+#define WSYS_ETMSSCCR5(val) MCR(val, 14, 1, c1, c5, 2)
+#define WSYS_ETMSSCCR6(val) MCR(val, 14, 1, c1, c6, 2)
+#define WSYS_ETMSSCCR7(val) MCR(val, 14, 1, c1, c7, 2)
+#define WSYS_ETMSSCSR0(val) MCR(val, 14, 1, c1, c8, 2)
+#define WSYS_ETMSSCSR1(val) MCR(val, 14, 1, c1, c9, 2)
+#define WSYS_ETMSSCSR2(val) MCR(val, 14, 1, c1, c10, 2)
+#define WSYS_ETMSSCSR3(val) MCR(val, 14, 1, c1, c11, 2)
+#define WSYS_ETMSSCSR4(val) MCR(val, 14, 1, c1, c12, 2)
+#define WSYS_ETMSSCSR5(val) MCR(val, 14, 1, c1, c13, 2)
+#define WSYS_ETMSSCSR6(val) MCR(val, 14, 1, c1, c14, 2)
+#define WSYS_ETMSSCSR7(val) MCR(val, 14, 1, c1, c15, 2)
+#define WSYS_ETMSSPCICR0(val) MCR(val, 14, 1, c1, c0, 3)
+#define WSYS_ETMSSPCICR1(val) MCR(val, 14, 1, c1, c1, 3)
+#define WSYS_ETMSSPCICR2(val) MCR(val, 14, 1, c1, c2, 3)
+#define WSYS_ETMSSPCICR3(val) MCR(val, 14, 1, c1, c3, 3)
+#define WSYS_ETMSSPCICR4(val) MCR(val, 14, 1, c1, c4, 3)
+#define WSYS_ETMSSPCICR5(val) MCR(val, 14, 1, c1, c5, 3)
+#define WSYS_ETMSSPCICR6(val) MCR(val, 14, 1, c1, c6, 3)
+#define WSYS_ETMSSPCICR7(val) MCR(val, 14, 1, c1, c7, 3)
+
+#endif
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 6795368..cc41438 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -128,20 +128,10 @@
#endif /* !SMP */
static inline int
-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret, tmp;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
#ifndef CONFIG_SMP
preempt_disable();
#endif
@@ -172,17 +162,9 @@
preempt_enable();
#endif
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/arm/include/asm/hardware/debugv8.h b/arch/arm/include/asm/hardware/debugv8.h
new file mode 100644
index 0000000..a8249cd
--- /dev/null
+++ b/arch/arm/include/asm/hardware/debugv8.h
@@ -0,0 +1,247 @@
+/* Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_HARDWARE_DEBUGV8_H
+#define __ASM_HARDWARE_DEBUGV8_H
+
+#include <linux/types.h>
+
+/* Accessors for CP14 registers */
+#define dbg_read(reg) RCP14_##reg()
+#define dbg_write(val, reg) WCP14_##reg(val)
+
+/* MRC14 registers */
+#define MRC14(op1, crn, crm, op2) \
+({ \
+uint32_t val; \
+asm volatile("mrc p14, "#op1", %0, "#crn", "#crm", "#op2 : "=r" (val)); \
+val; \
+})
+
+/* MCR14 registers */
+#define MCR14(val, op1, crn, crm, op2) \
+({ \
+asm volatile("mcr p14, "#op1", %0, "#crn", "#crm", "#op2 : : "r" (val));\
+})
+
+/*
+ * Debug Registers
+ *
+ * Read only
+ * DBGDIDR, DBGDSCRint, DBGDTRRXint, DBGDRAR, DBGOSLSR, DBGOSSRR, DBGDSAR,
+ * DBGAUTHSTATUS, DBGDEVID2, DBGDEVID1, DBGDEVID
+ *
+ * Write only
+ * DBGDTRTXint, DBGOSLAR
+ */
+#define RCP14_DBGDIDR() MRC14(0, c0, c0, 0)
+#define RCP14_DBGDSCRint() MRC14(0, c0, c1, 0)
+#define RCP14_DBGDCCINT() MRC14(0, c0, c2, 0)
+#define RCP14_DBGDTRRXint() MRC14(0, c0, c5, 0)
+#define RCP14_DBGWFAR() MRC14(0, c0, c6, 0)
+#define RCP14_DBGVCR() MRC14(0, c0, c7, 0)
+#define RCP14_DBGDTRRXext() MRC14(0, c0, c0, 2)
+#define RCP14_DBGDSCRext() MRC14(0, c0, c2, 2)
+#define RCP14_DBGDTRTXext() MRC14(0, c0, c3, 2)
+#define RCP14_DBGOSECCR() MRC14(0, c0, c6, 2)
+#define RCP14_DBGBVR0() MRC14(0, c0, c0, 4)
+#define RCP14_DBGBVR1() MRC14(0, c0, c1, 4)
+#define RCP14_DBGBVR2() MRC14(0, c0, c2, 4)
+#define RCP14_DBGBVR3() MRC14(0, c0, c3, 4)
+#define RCP14_DBGBVR4() MRC14(0, c0, c4, 4)
+#define RCP14_DBGBVR5() MRC14(0, c0, c5, 4)
+#define RCP14_DBGBVR6() MRC14(0, c0, c6, 4)
+#define RCP14_DBGBVR7() MRC14(0, c0, c7, 4)
+#define RCP14_DBGBVR8() MRC14(0, c0, c8, 4)
+#define RCP14_DBGBVR9() MRC14(0, c0, c9, 4)
+#define RCP14_DBGBVR10() MRC14(0, c0, c10, 4)
+#define RCP14_DBGBVR11() MRC14(0, c0, c11, 4)
+#define RCP14_DBGBVR12() MRC14(0, c0, c12, 4)
+#define RCP14_DBGBVR13() MRC14(0, c0, c13, 4)
+#define RCP14_DBGBVR14() MRC14(0, c0, c14, 4)
+#define RCP14_DBGBVR15() MRC14(0, c0, c15, 4)
+#define RCP14_DBGBCR0() MRC14(0, c0, c0, 5)
+#define RCP14_DBGBCR1() MRC14(0, c0, c1, 5)
+#define RCP14_DBGBCR2() MRC14(0, c0, c2, 5)
+#define RCP14_DBGBCR3() MRC14(0, c0, c3, 5)
+#define RCP14_DBGBCR4() MRC14(0, c0, c4, 5)
+#define RCP14_DBGBCR5() MRC14(0, c0, c5, 5)
+#define RCP14_DBGBCR6() MRC14(0, c0, c6, 5)
+#define RCP14_DBGBCR7() MRC14(0, c0, c7, 5)
+#define RCP14_DBGBCR8() MRC14(0, c0, c8, 5)
+#define RCP14_DBGBCR9() MRC14(0, c0, c9, 5)
+#define RCP14_DBGBCR10() MRC14(0, c0, c10, 5)
+#define RCP14_DBGBCR11() MRC14(0, c0, c11, 5)
+#define RCP14_DBGBCR12() MRC14(0, c0, c12, 5)
+#define RCP14_DBGBCR13() MRC14(0, c0, c13, 5)
+#define RCP14_DBGBCR14() MRC14(0, c0, c14, 5)
+#define RCP14_DBGBCR15() MRC14(0, c0, c15, 5)
+#define RCP14_DBGWVR0() MRC14(0, c0, c0, 6)
+#define RCP14_DBGWVR1() MRC14(0, c0, c1, 6)
+#define RCP14_DBGWVR2() MRC14(0, c0, c2, 6)
+#define RCP14_DBGWVR3() MRC14(0, c0, c3, 6)
+#define RCP14_DBGWVR4() MRC14(0, c0, c4, 6)
+#define RCP14_DBGWVR5() MRC14(0, c0, c5, 6)
+#define RCP14_DBGWVR6() MRC14(0, c0, c6, 6)
+#define RCP14_DBGWVR7() MRC14(0, c0, c7, 6)
+#define RCP14_DBGWVR8() MRC14(0, c0, c8, 6)
+#define RCP14_DBGWVR9() MRC14(0, c0, c9, 6)
+#define RCP14_DBGWVR10() MRC14(0, c0, c10, 6)
+#define RCP14_DBGWVR11() MRC14(0, c0, c11, 6)
+#define RCP14_DBGWVR12() MRC14(0, c0, c12, 6)
+#define RCP14_DBGWVR13() MRC14(0, c0, c13, 6)
+#define RCP14_DBGWVR14() MRC14(0, c0, c14, 6)
+#define RCP14_DBGWVR15() MRC14(0, c0, c15, 6)
+#define RCP14_DBGWCR0() MRC14(0, c0, c0, 7)
+#define RCP14_DBGWCR1() MRC14(0, c0, c1, 7)
+#define RCP14_DBGWCR2() MRC14(0, c0, c2, 7)
+#define RCP14_DBGWCR3() MRC14(0, c0, c3, 7)
+#define RCP14_DBGWCR4() MRC14(0, c0, c4, 7)
+#define RCP14_DBGWCR5() MRC14(0, c0, c5, 7)
+#define RCP14_DBGWCR6() MRC14(0, c0, c6, 7)
+#define RCP14_DBGWCR7() MRC14(0, c0, c7, 7)
+#define RCP14_DBGWCR8() MRC14(0, c0, c8, 7)
+#define RCP14_DBGWCR9() MRC14(0, c0, c9, 7)
+#define RCP14_DBGWCR10() MRC14(0, c0, c10, 7)
+#define RCP14_DBGWCR11() MRC14(0, c0, c11, 7)
+#define RCP14_DBGWCR12() MRC14(0, c0, c12, 7)
+#define RCP14_DBGWCR13() MRC14(0, c0, c13, 7)
+#define RCP14_DBGWCR14() MRC14(0, c0, c14, 7)
+#define RCP14_DBGWCR15() MRC14(0, c0, c15, 7)
+#define RCP14_DBGDRAR() MRC14(0, c1, c0, 0)
+#define RCP14_DBGBXVR0() MRC14(0, c1, c0, 1)
+#define RCP14_DBGBXVR1() MRC14(0, c1, c1, 1)
+#define RCP14_DBGBXVR2() MRC14(0, c1, c2, 1)
+#define RCP14_DBGBXVR3() MRC14(0, c1, c3, 1)
+#define RCP14_DBGBXVR4() MRC14(0, c1, c4, 1)
+#define RCP14_DBGBXVR5() MRC14(0, c1, c5, 1)
+#define RCP14_DBGBXVR6() MRC14(0, c1, c6, 1)
+#define RCP14_DBGBXVR7() MRC14(0, c1, c7, 1)
+#define RCP14_DBGBXVR8() MRC14(0, c1, c8, 1)
+#define RCP14_DBGBXVR9() MRC14(0, c1, c9, 1)
+#define RCP14_DBGBXVR10() MRC14(0, c1, c10, 1)
+#define RCP14_DBGBXVR11() MRC14(0, c1, c11, 1)
+#define RCP14_DBGBXVR12() MRC14(0, c1, c12, 1)
+#define RCP14_DBGBXVR13() MRC14(0, c1, c13, 1)
+#define RCP14_DBGBXVR14() MRC14(0, c1, c14, 1)
+#define RCP14_DBGBXVR15() MRC14(0, c1, c15, 1)
+#define RCP14_DBGOSLSR() MRC14(0, c1, c1, 4)
+#define RCP14_DBGOSSRR() MRC14(0, c1, c2, 4)
+#define RCP14_DBGOSDLR() MRC14(0, c1, c3, 4)
+#define RCP14_DBGPRCR() MRC14(0, c1, c4, 4)
+#define RCP14_DBGPRSR() MRC14(0, c1, c5, 4)
+#define RCP14_DBGDSAR() MRC14(0, c2, c0, 0)
+#define RCP14_DBGITCTRL() MRC14(0, c7, c0, 4)
+#define RCP14_DBGCLAIMSET() MRC14(0, c7, c8, 6)
+#define RCP14_DBGCLAIMCLR() MRC14(0, c7, c9, 6)
+#define RCP14_DBGAUTHSTATUS() MRC14(0, c7, c14, 6)
+#define RCP14_DBGDEVID2() MRC14(0, c7, c0, 7)
+#define RCP14_DBGDEVID1() MRC14(0, c7, c1, 7)
+#define RCP14_DBGDEVID() MRC14(0, c7, c2, 7)
+
+#define WCP14_DBGDCCINT(val) MCR14(val, 0, c0, c2, 0)
+#define WCP14_DBGDTRTXint(val) MCR14(val, 0, c0, c5, 0)
+#define WCP14_DBGWFAR(val) MCR14(val, 0, c0, c6, 0)
+#define WCP14_DBGVCR(val) MCR14(val, 0, c0, c7, 0)
+#define WCP14_DBGDTRRXext(val) MCR14(val, 0, c0, c0, 2)
+#define WCP14_DBGDSCRext(val) MCR14(val, 0, c0, c2, 2)
+#define WCP14_DBGDTRTXext(val) MCR14(val, 0, c0, c3, 2)
+#define WCP14_DBGOSECCR(val) MCR14(val, 0, c0, c6, 2)
+#define WCP14_DBGBVR0(val) MCR14(val, 0, c0, c0, 4)
+#define WCP14_DBGBVR1(val) MCR14(val, 0, c0, c1, 4)
+#define WCP14_DBGBVR2(val) MCR14(val, 0, c0, c2, 4)
+#define WCP14_DBGBVR3(val) MCR14(val, 0, c0, c3, 4)
+#define WCP14_DBGBVR4(val) MCR14(val, 0, c0, c4, 4)
+#define WCP14_DBGBVR5(val) MCR14(val, 0, c0, c5, 4)
+#define WCP14_DBGBVR6(val) MCR14(val, 0, c0, c6, 4)
+#define WCP14_DBGBVR7(val) MCR14(val, 0, c0, c7, 4)
+#define WCP14_DBGBVR8(val) MCR14(val, 0, c0, c8, 4)
+#define WCP14_DBGBVR9(val) MCR14(val, 0, c0, c9, 4)
+#define WCP14_DBGBVR10(val) MCR14(val, 0, c0, c10, 4)
+#define WCP14_DBGBVR11(val) MCR14(val, 0, c0, c11, 4)
+#define WCP14_DBGBVR12(val) MCR14(val, 0, c0, c12, 4)
+#define WCP14_DBGBVR13(val) MCR14(val, 0, c0, c13, 4)
+#define WCP14_DBGBVR14(val) MCR14(val, 0, c0, c14, 4)
+#define WCP14_DBGBVR15(val) MCR14(val, 0, c0, c15, 4)
+#define WCP14_DBGBCR0(val) MCR14(val, 0, c0, c0, 5)
+#define WCP14_DBGBCR1(val) MCR14(val, 0, c0, c1, 5)
+#define WCP14_DBGBCR2(val) MCR14(val, 0, c0, c2, 5)
+#define WCP14_DBGBCR3(val) MCR14(val, 0, c0, c3, 5)
+#define WCP14_DBGBCR4(val) MCR14(val, 0, c0, c4, 5)
+#define WCP14_DBGBCR5(val) MCR14(val, 0, c0, c5, 5)
+#define WCP14_DBGBCR6(val) MCR14(val, 0, c0, c6, 5)
+#define WCP14_DBGBCR7(val) MCR14(val, 0, c0, c7, 5)
+#define WCP14_DBGBCR8(val) MCR14(val, 0, c0, c8, 5)
+#define WCP14_DBGBCR9(val) MCR14(val, 0, c0, c9, 5)
+#define WCP14_DBGBCR10(val) MCR14(val, 0, c0, c10, 5)
+#define WCP14_DBGBCR11(val) MCR14(val, 0, c0, c11, 5)
+#define WCP14_DBGBCR12(val) MCR14(val, 0, c0, c12, 5)
+#define WCP14_DBGBCR13(val) MCR14(val, 0, c0, c13, 5)
+#define WCP14_DBGBCR14(val) MCR14(val, 0, c0, c14, 5)
+#define WCP14_DBGBCR15(val) MCR14(val, 0, c0, c15, 5)
+#define WCP14_DBGWVR0(val) MCR14(val, 0, c0, c0, 6)
+#define WCP14_DBGWVR1(val) MCR14(val, 0, c0, c1, 6)
+#define WCP14_DBGWVR2(val) MCR14(val, 0, c0, c2, 6)
+#define WCP14_DBGWVR3(val) MCR14(val, 0, c0, c3, 6)
+#define WCP14_DBGWVR4(val) MCR14(val, 0, c0, c4, 6)
+#define WCP14_DBGWVR5(val) MCR14(val, 0, c0, c5, 6)
+#define WCP14_DBGWVR6(val) MCR14(val, 0, c0, c6, 6)
+#define WCP14_DBGWVR7(val) MCR14(val, 0, c0, c7, 6)
+#define WCP14_DBGWVR8(val) MCR14(val, 0, c0, c8, 6)
+#define WCP14_DBGWVR9(val) MCR14(val, 0, c0, c9, 6)
+#define WCP14_DBGWVR10(val) MCR14(val, 0, c0, c10, 6)
+#define WCP14_DBGWVR11(val) MCR14(val, 0, c0, c11, 6)
+#define WCP14_DBGWVR12(val) MCR14(val, 0, c0, c12, 6)
+#define WCP14_DBGWVR13(val) MCR14(val, 0, c0, c13, 6)
+#define WCP14_DBGWVR14(val) MCR14(val, 0, c0, c14, 6)
+#define WCP14_DBGWVR15(val) MCR14(val, 0, c0, c15, 6)
+#define WCP14_DBGWCR0(val) MCR14(val, 0, c0, c0, 7)
+#define WCP14_DBGWCR1(val) MCR14(val, 0, c0, c1, 7)
+#define WCP14_DBGWCR2(val) MCR14(val, 0, c0, c2, 7)
+#define WCP14_DBGWCR3(val) MCR14(val, 0, c0, c3, 7)
+#define WCP14_DBGWCR4(val) MCR14(val, 0, c0, c4, 7)
+#define WCP14_DBGWCR5(val) MCR14(val, 0, c0, c5, 7)
+#define WCP14_DBGWCR6(val) MCR14(val, 0, c0, c6, 7)
+#define WCP14_DBGWCR7(val) MCR14(val, 0, c0, c7, 7)
+#define WCP14_DBGWCR8(val) MCR14(val, 0, c0, c8, 7)
+#define WCP14_DBGWCR9(val) MCR14(val, 0, c0, c9, 7)
+#define WCP14_DBGWCR10(val) MCR14(val, 0, c0, c10, 7)
+#define WCP14_DBGWCR11(val) MCR14(val, 0, c0, c11, 7)
+#define WCP14_DBGWCR12(val) MCR14(val, 0, c0, c12, 7)
+#define WCP14_DBGWCR13(val) MCR14(val, 0, c0, c13, 7)
+#define WCP14_DBGWCR14(val) MCR14(val, 0, c0, c14, 7)
+#define WCP14_DBGWCR15(val) MCR14(val, 0, c0, c15, 7)
+#define WCP14_DBGBXVR0(val) MCR14(val, 0, c1, c0, 1)
+#define WCP14_DBGBXVR1(val) MCR14(val, 0, c1, c1, 1)
+#define WCP14_DBGBXVR2(val) MCR14(val, 0, c1, c2, 1)
+#define WCP14_DBGBXVR3(val) MCR14(val, 0, c1, c3, 1)
+#define WCP14_DBGBXVR4(val) MCR14(val, 0, c1, c4, 1)
+#define WCP14_DBGBXVR5(val) MCR14(val, 0, c1, c5, 1)
+#define WCP14_DBGBXVR6(val) MCR14(val, 0, c1, c6, 1)
+#define WCP14_DBGBXVR7(val) MCR14(val, 0, c1, c7, 1)
+#define WCP14_DBGBXVR8(val) MCR14(val, 0, c1, c8, 1)
+#define WCP14_DBGBXVR9(val) MCR14(val, 0, c1, c9, 1)
+#define WCP14_DBGBXVR10(val) MCR14(val, 0, c1, c10, 1)
+#define WCP14_DBGBXVR11(val) MCR14(val, 0, c1, c11, 1)
+#define WCP14_DBGBXVR12(val) MCR14(val, 0, c1, c12, 1)
+#define WCP14_DBGBXVR13(val) MCR14(val, 0, c1, c13, 1)
+#define WCP14_DBGBXVR14(val) MCR14(val, 0, c1, c14, 1)
+#define WCP14_DBGBXVR15(val) MCR14(val, 0, c1, c15, 1)
+#define WCP14_DBGOSLAR(val) MCR14(val, 0, c1, c0, 4)
+#define WCP14_DBGOSSRR(val) MCR14(val, 0, c1, c2, 4)
+#define WCP14_DBGOSDLR(val) MCR14(val, 0, c1, c3, 4)
+#define WCP14_DBGPRCR(val) MCR14(val, 0, c1, c4, 4)
+#define WCP14_DBGITCTRL(val) MCR14(val, 0, c7, c0, 4)
+#define WCP14_DBGCLAIMSET(val) MCR14(val, 0, c7, c8, 6)
+#define WCP14_DBGCLAIMCLR(val) MCR14(val, 0, c7, c9, 6)
+
+#endif
diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h
index afcaf8b..e40bbc5 100644
--- a/arch/arm/include/asm/hw_breakpoint.h
+++ b/arch/arm/include/asm/hw_breakpoint.h
@@ -52,6 +52,7 @@
#define ARM_DEBUG_ARCH_V7_MM 4
#define ARM_DEBUG_ARCH_V7_1 5
#define ARM_DEBUG_ARCH_V8 6
+#define ARM_DEBUG_ARCH_V8_8 8
/* Breakpoint */
#define ARM_BREAKPOINT_EXECUTE 0
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 9fe1043..f4dab20 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -78,6 +78,9 @@
/* Interrupt controller */
struct vgic_dist vgic;
int max_vcpus;
+
+ /* Mandated version of PSCI */
+ u32 psci_version;
};
#define KVM_NR_MEM_OBJS 40
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index b38c10c..0b8cf31 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -173,6 +173,12 @@
#define KVM_REG_ARM_VFP_FPINST 0x1009
#define KVM_REG_ARM_VFP_FPINST2 0x100A
+/* KVM-as-firmware specific pseudo-registers */
+#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM | KVM_REG_SIZE_U64 | \
+ KVM_REG_ARM_FW | ((r) & 0xffff))
+#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
+
/* Device Control API: ARM VGIC */
#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
diff --git a/arch/arm/kernel/psci_smp.c b/arch/arm/kernel/psci_smp.c
index 29286fb..8ff6674 100644
--- a/arch/arm/kernel/psci_smp.c
+++ b/arch/arm/kernel/psci_smp.c
@@ -112,6 +112,12 @@
return 0;
}
+static bool psci_cpu_can_disable(unsigned int cpu)
+{
+ /*Hotplug of any CPU is supported*/
+ return true;
+}
+
#endif
bool __init psci_smp_available(void)
@@ -126,5 +132,6 @@
.cpu_disable = psci_cpu_disable,
.cpu_die = psci_cpu_die,
.cpu_kill = psci_cpu_kill,
+ .cpu_can_disable = psci_cpu_can_disable,
#endif
};
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index 9aca920..630117d 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -22,6 +22,7 @@
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
+#include <kvm/arm_psci.h>
#include <asm/cputype.h>
#include <asm/uaccess.h>
#include <asm/kvm.h>
@@ -176,6 +177,7 @@
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
{
return num_core_regs() + kvm_arm_num_coproc_regs(vcpu)
+ + kvm_arm_get_fw_num_regs(vcpu)
+ NUM_TIMER_REGS;
}
@@ -196,6 +198,11 @@
uindices++;
}
+ ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
+ if (ret)
+ return ret;
+ uindices += kvm_arm_get_fw_num_regs(vcpu);
+
ret = copy_timer_indices(vcpu, uindices);
if (ret)
return ret;
@@ -214,6 +221,9 @@
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
return get_core_reg(vcpu, reg);
+ if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
+ return kvm_arm_get_fw_reg(vcpu, reg);
+
if (is_timer_reg(reg->id))
return get_timer_reg(vcpu, reg);
@@ -230,6 +240,9 @@
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
return set_core_reg(vcpu, reg);
+ if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
+ return kvm_arm_set_fw_reg(vcpu, reg);
+
if (is_timer_reg(reg->id))
return set_timer_reg(vcpu, reg);
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
index 3d96225..8a9c654 100644
--- a/arch/arm/kvm/psci.c
+++ b/arch/arm/kvm/psci.c
@@ -18,6 +18,7 @@
#include <linux/arm-smccc.h>
#include <linux/preempt.h>
#include <linux/kvm_host.h>
+#include <linux/uaccess.h>
#include <linux/wait.h>
#include <asm/cputype.h>
@@ -425,3 +426,62 @@
smccc_set_retval(vcpu, val, 0, 0, 0);
return 1;
}
+
+int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
+{
+ return 1; /* PSCI version */
+}
+
+int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
+{
+ if (put_user(KVM_REG_ARM_PSCI_VERSION, uindices))
+ return -EFAULT;
+
+ return 0;
+}
+
+int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ if (reg->id == KVM_REG_ARM_PSCI_VERSION) {
+ void __user *uaddr = (void __user *)(long)reg->addr;
+ u64 val;
+
+ val = kvm_psci_version(vcpu, vcpu->kvm);
+ if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)))
+ return -EFAULT;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ if (reg->id == KVM_REG_ARM_PSCI_VERSION) {
+ void __user *uaddr = (void __user *)(long)reg->addr;
+ bool wants_02;
+ u64 val;
+
+ if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
+ return -EFAULT;
+
+ wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features);
+
+ switch (val) {
+ case KVM_ARM_PSCI_0_1:
+ if (wants_02)
+ return -EINVAL;
+ vcpu->kvm->arch.psci_version = val;
+ return 0;
+ case KVM_ARM_PSCI_0_2:
+ case KVM_ARM_PSCI_1_0:
+ if (!wants_02)
+ return -EINVAL;
+ vcpu->kvm->arch.psci_version = val;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 4db459a..1d845e8 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -135,6 +135,7 @@
sdm670-usbc-external-codec-pm660a-mtp-overlay.dtbo \
sda670-cdp-overlay.dtbo \
sda670-mtp-overlay.dtbo \
+ sda670-hdk-overlay.dtbo \
sda670-pm660a-cdp-overlay.dtbo \
sda670-pm660a-mtp-overlay.dtbo \
sdm670-tasha-codec-cdp-overlay.dtbo \
@@ -194,6 +195,7 @@
sdm670-pm660a-aqt1000-cdp-overlay.dtbo-base := sdm670.dtb
sda670-cdp-overlay.dtbo-base := sda670.dtb
sda670-mtp-overlay.dtbo-base := sda670.dtb
+sda670-hdk-overlay.dtbo-base := sda670.dtb
sda670-pm660a-cdp-overlay.dtbo-base := sda670.dtb
sda670-pm660a-mtp-overlay.dtbo-base := sda670.dtb
qcs605-cdp-overlay.dtbo-base := qcs605.dtb
@@ -245,6 +247,7 @@
sdm670-usbc-pm660a-cdp.dtb \
sdm670-usbc-pm660a-mtp.dtb \
sda670-mtp.dtb \
+ sda670-hdk.dtb \
sda670-cdp.dtb \
sdm670-tasha-codec-cdp.dtb \
sdm670-pm660a-tasha-codec-cdp.dtb \
@@ -447,7 +450,8 @@
apq8009w-bg-alpha.dtb \
apq8009-mtp-wcd9326-refboard.dtb \
apq8009-robot-som-refboard.dtb \
- apq8009-dragon.dtb
+ apq8009-dragon.dtb \
+ apq8009-lat-v1.0.dtb
dtb-$(CONFIG_ARCH_SDM450) += sdm450-rcm.dtb \
sdm450-cdp.dtb \
diff --git a/arch/arm64/boot/dts/qcom/apq8009-lat-v1.0.dts b/arch/arm64/boot/dts/qcom/apq8009-lat-v1.0.dts
new file mode 100644
index 0000000..f81e369
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/apq8009-lat-v1.0.dts
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8909-mtp.dtsi"
+#include "8909-pm8916.dtsi"
+#include "msm8909-pm8916-mtp.dtsi"
+#include "apq8009-audio-external_codec.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. APQ8009-lat-v1.0 Board";
+ compatible = "qcom,apq8009-mtp", "qcom,apq8009", "qcom,mtp";
+ qcom,msm-id = <265 2>;
+ qcom,board-id= <10 0x1>;
+
+ bluetooth: bt_qca9379 {
+ compatible = "qca,qca9379";
+ qca,bt-reset-gpio = <&msm_gpio 47 0x0>; /* BT_EN */
+ };
+};
+
+&soc {
+ ext-codec {
+ qcom,msm-mbhc-hphl-swh = <0>;
+ qcom,audio-routing =
+ "AIF4 VI", "MCLK",
+ "RX_BIAS", "MCLK",
+ "MADINPUT", "MCLK",
+ "AMIC2", "MIC BIAS2",
+ "MIC BIAS2", "Headset Mic",
+ "DMIC0", "MIC BIAS1",
+ "MIC BIAS1", "Digital Mic0",
+ "DMIC1", "MIC BIAS1",
+ "MIC BIAS1", "Digital Mic1",
+ "DMIC2", "MIC BIAS3",
+ "MIC BIAS3", "Digital Mic2",
+ "DMIC3", "MIC BIAS3",
+ "MIC BIAS3", "Digital Mic3",
+ "SpkrLeft IN", "SPK1 OUT",
+ "SpkrRight IN", "SPK2 OUT";
+ };
+
+ sound-9335 {
+ status = "disabled";
+ };
+
+ i2c@78b8000 {
+ wcd9xxx_codec@d {
+ status = "disabled";
+ };
+ };
+
+ vph_pwr_vreg: vph_pwr_vreg {
+ compatible = "regulator-fixed";
+ status = "ok";
+ regulator-name = "vph_pwr";
+ regulator-always-on;
+ };
+
+ mdss_mdp: qcom,mdss_mdp@1a00000 {
+ status = "disabled";
+ };
+
+ qcom,msm-thermal {
+ qcom,core-control-mask = <0xa>;
+ qcom,freq-mitigation-value = <1190400>;
+ qcom,freq-mitigation-control-mask = <0x05>;
+ };
+};
+
+&sdhc_2 {
+ status = "disabled";
+};
+
+&usb_otg {
+ interrupts = <0 134 0>, <0 140 0>, <0 136 0>;
+ interrupt-names = "core_irq", "async_irq", "phy_irq";
+ qcom,hsusb-otg-mode = <3>;
+ qcom,phy-id-high-as-peripheral;
+ vbus_otg-supply = <&vph_pwr_vreg>;
+};
+
+&external_image_mem {
+ reg = <0x0 0x87a00000 0x0 0x0600000>;
+};
+
+&modem_adsp_mem {
+ reg = <0x0 0x88000000 0x0 0x01e00000>;
+};
+
+&peripheral_mem {
+ reg = <0x0 0x89e00000 0x0 0x0700000>;
+};
+
+&i2c_4 {
+ smb1360_otg_supply: smb1360-chg-fg@14 {
+ compatible = "qcom,smb1360-chg-fg";
+ reg = <0x14>;
+ interrupt-parent = <&msm_gpio>;
+ interrupts = <58 8>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&smb_int_default>;
+ qcom,charging-disabled;
+ qcom,empty-soc-disabled;
+ qcom,chg-inhibit-disabled;
+ qcom,float-voltage-mv = <4200>;
+ qcom,iterm-ma = <200>;
+ qcom,recharge-thresh-mv = <100>;
+ qcom,thermal-mitigation = <1500 700 600 0>;
+ regulator-name = "smb1360_otg_vreg";
+ status= "disabled";
+ };
+};
+
+&firmware {
+ android {
+ compatible = "android,firmware";
+ fstab {
+ compatible = "android,fstab";
+ vendor_fstab: vendor {
+ fsmgr_flags = "wait,slotselect";
+ };
+ /delete-node/ system;
+ };
+ };
+};
+
+&pm8916_chg {
+ status = "ok";
+};
+
+&pm8916_bms {
+ status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/apq8009-mtp-wcd9326-refboard.dts b/arch/arm64/boot/dts/qcom/apq8009-mtp-wcd9326-refboard.dts
index a0a9c54..1866e2f 100644
--- a/arch/arm64/boot/dts/qcom/apq8009-mtp-wcd9326-refboard.dts
+++ b/arch/arm64/boot/dts/qcom/apq8009-mtp-wcd9326-refboard.dts
@@ -18,6 +18,8 @@
#include "apq8009-audio-external_codec.dtsi"
#include "apq8009-memory.dtsi"
#include <dt-bindings/pinctrl/qcom,pmic-mpp.h>
+#include "msm8909-pm8916-camera.dtsi"
+#include "msm8909-pm8916-camera-sensor-robot.dtsi"
/ {
model = "Qualcomm Technologies, Inc. APQ8009 WCD9326 Reference Board";
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-icn9706-720-1440p-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-icn9706-720-1440p-video.dtsi
new file mode 100644
index 0000000..d50fe3b
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-icn9706-720-1440p-video.dtsi
@@ -0,0 +1,98 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+ dsi_icn9706_720_1440_vid: qcom,mdss_dsi_icn9706_720_1440p_video {
+ qcom,mdss-dsi-panel-name =
+ "icn9706 720 1440p video mode dsi panel";
+ qcom,mdss-dsi-panel-controller = <&mdss_dsi0>;
+ qcom,mdss-dsi-panel-destination = "display_1";
+ qcom,mdss-dsi-panel-type = "dsi_video_mode";
+ qcom,mdss-dsi-panel-framerate = <60>;
+ qcom,mdss-dsi-virtual-channel-id = <0>;
+ qcom,mdss-dsi-stream = <0>;
+ qcom,mdss-dsi-panel-width = <720>;
+ qcom,mdss-dsi-panel-height = <1440>;
+ qcom,mdss-dsi-h-front-porch = <84>;
+ qcom,mdss-dsi-h-back-porch = <84>;
+ qcom,mdss-dsi-h-pulse-width = <24>;
+ qcom,mdss-dsi-h-sync-skew = <0>;
+ qcom,mdss-dsi-v-back-porch = <20>;
+ qcom,mdss-dsi-v-front-porch = <24>;
+ qcom,mdss-dsi-v-pulse-width = <8>;
+ qcom,mdss-dsi-h-left-border = <0>;
+ qcom,mdss-dsi-h-right-border = <0>;
+ qcom,mdss-dsi-v-top-border = <0>;
+ qcom,mdss-dsi-v-bottom-border = <0>;
+ qcom,mdss-dsi-bpp = <24>;
+ qcom,mdss-dsi-underflow-color = <0xff>;
+ qcom,mdss-dsi-border-color = <0>;
+ qcom,mdss-dsi-h-sync-pulse = <1>;
+ qcom,mdss-dsi-traffic-mode = "burst_mode";
+ qcom,mdss-dsi-bllp-eof-power-mode;
+ qcom,mdss-dsi-bllp-power-mode;
+ qcom,mdss-dsi-lane-map = "lane_map_0123";
+ qcom,mdss-dsi-lane-0-state;
+ qcom,mdss-dsi-lane-1-state;
+ qcom,mdss-dsi-lane-2-state;
+ qcom,mdss-dsi-lane-3-state;
+ qcom,mdss-dsi-panel-timings = [8b 1e 14 00 44 48 18 22 19
+ 03 04 00];
+ qcom,mdss-dsi-t-clk-post = <0x04>;
+ qcom,mdss-dsi-t-clk-pre = <0x1c>;
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-dma-trigger = "trigger_sw";
+ qcom,mdss-dsi-mdp-trigger = "none";
+ qcom,mdss-dsi-on-command = [39 01 00 00 64 00 02 01 00
+ 39 01 00 00 00 00 03 f0 5a 5a
+ 39 01 00 00 00 00 03 f1 5a 5a
+ 39 01 00 00 00 00 03 f0 b4 4b
+ 39 01 00 00 00 00 03 b6 10 10
+ 39 01 00 00 00 00 15 b4 0a 08 12 10 0e 0c 00 00
+ 00 03 00 03 03 03 03 03 03 03 04 06
+ 39 01 00 00 00 00 15 b3 0b 09 13 11 0f 0d 00 00
+ 00 03 00 03 03 03 03 03 03 03 05 07
+ 39 01 00 00 00 00 0d b0 54 32 23 45 44 44 44 44
+ 60 01 60 01
+ 39 01 00 00 00 00 09 b1 32 84 02 83 15 01 57 01
+ 39 01 00 00 00 00 02 b2 33
+ 39 01 00 00 00 00 07 bd 54 14 6a 6a 20 19
+ 39 01 00 00 00 00 12 b7 01 01 09 11 0d 15 19 0d
+ 21 1d 00 00 20 00 02 ff 3c
+ 39 01 00 00 00 00 06 b8 23 01 30 34 53
+ 39 01 00 00 00 00 05 b9 a1 2c ff c4
+ 39 01 00 00 00 00 03 ba 88 23
+ 39 01 00 00 00 00 07 c1 16 16 04 0c 10 04
+ 39 01 00 00 00 00 03 c2 12 68
+ 39 01 00 00 00 00 04 c3 22 31 04
+ 39 01 00 00 00 00 06 c7 05 23 6b 41 00
+ 39 01 00 00 00 00 27 c8 7c 54 3d 2d 26 16 1b 08
+ 25 28 2d 4f 3e 48 3d 3d 35 25 06 7c 54 3d 2d
+ 26 16 1b 08 25 28 2d 4f 3e 48 3d 3d 35 25 06
+ 39 01 00 00 00 00 09 c6 00 00 68 00 00 60 36 00
+ 05 01 00 00 64 00 02 11 00
+ 05 01 00 00 32 00 02 29 00];
+
+ qcom,mdss-dsi-off-command = [05 01 00 00 32 00 02 28 00
+ 05 01 00 00 32 00 02 10 00];
+ qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-off-command-state = "dsi_lp_mode";
+ qcom,esd-check-enabled;
+ qcom,mdss-dsi-panel-status-check-mode = "bta_check";
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-reset-sequence = <1 2>, <0 20>, <1 50>;
+ qcom,mdss-dsi-tx-eot-append;
+ qcom,mdss-pan-physical-width-dimension = <63>;
+ qcom,mdss-pan-physical-height-dimension = <112>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8909-pm8916-camera-sensor-robot.dtsi b/arch/arm64/boot/dts/qcom/msm8909-pm8916-camera-sensor-robot.dtsi
new file mode 100644
index 0000000..6f6655a
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8909-pm8916-camera-sensor-robot.dtsi
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&i2c_3 {
+ status = "ok";
+};
+
+&i2c_3 {
+ qcom,camera@0 {
+ cell-index = <0>;
+ compatible = "qcom,camera";
+ reg = <0x2>;
+ qcom,csiphy-sd-index = <0>;
+ qcom,csid-sd-index = <0>;
+ qcom,mount-angle = <90>;
+ cam_vdig-supply = <&pm8916_l2>;
+ cam_vana-supply = <&pm8916_l17>;
+ cam_vio-supply = <&pm8916_l6>;
+ qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana";
+ qcom,cam-vreg-min-voltage = <1200000 1800000 2850000>;
+ qcom,cam-vreg-max-voltage = <1200000 1800000 2850000>;
+ qcom,cam-vreg-op-mode = <200000 0 80000>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_default
+ &cam_sensor_rear_default>;
+ pinctrl-1 = <&cam_sensor_mclk0_sleep &cam_sensor_rear_sleep>;
+ gpios = <&msm_gpio 26 0>,
+ <&msm_gpio 35 0>,
+ <&msm_gpio 34 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-standby = <2>;
+ qcom,gpio-req-tbl-num = <0 1 2>;
+ qcom,gpio-req-tbl-flags = <1 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK",
+ "CAM_RESET",
+ "CAM_STANDBY";
+ qcom,sensor-position = <0>;
+ qcom,sensor-mode = <0>;
+ status = "ok";
+ clocks = <&clock_gcc clk_mclk0_clk_src>,
+ <&clock_gcc clk_gcc_camss_mclk0_clk>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+
+ qcom,camera@1 {
+ cell-index = <1>;
+ compatible = "qcom,camera";
+ reg = <0x1>;
+ qcom,csiphy-sd-index = <0>;
+ qcom,csid-sd-index = <0>;
+ qcom,mount-angle = <90>;
+ cam_vana-supply = <&pm8916_l17>;
+ cam_vio-supply = <&pm8916_l6>;
+ qcom,cam-vreg-name = "cam_vio","cam_vana";
+ qcom,cam-vreg-min-voltage = <1800000 2850000>;
+ qcom,cam-vreg-max-voltage = <1800000 2850000>;
+ qcom,cam-vreg-op-mode = <0 80000>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_default
+ &cam_sensor_rear_default>;
+ pinctrl-1 = <&cam_sensor_mclk0_sleep &cam_sensor_rear_sleep>;
+ gpios = <&msm_gpio 26 0>,
+ <&msm_gpio 35 0>,
+ <&msm_gpio 34 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-standby = <2>;
+ qcom,gpio-req-tbl-num = <0 1 2>;
+ qcom,gpio-req-tbl-flags = <1 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK",
+ "CAM_RESET",
+ "CAM_STANDBY";
+ qcom,sensor-position = <0>;
+ qcom,sensor-mode = <0>;
+ status = "ok";
+ clocks = <&clock_gcc clk_mclk0_clk_src>,
+ <&clock_gcc clk_gcc_camss_mclk0_clk>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8909-pm8916-camera.dtsi b/arch/arm64/boot/dts/qcom/msm8909-pm8916-camera.dtsi
new file mode 100644
index 0000000..0b648ec
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8909-pm8916-camera.dtsi
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ qcom,msm-cam@1800000{
+ compatible = "qcom,msm-cam";
+ reg = <0x1b00000 0x40000>;
+ reg-names = "msm-cam";
+ status = "ok";
+ bus-vectors = "suspend", "svs", "nominal", "turbo";
+ qcom,bus-votes = <0 320000000 640000000 640000000>;
+ };
+
+ qcom,csiphy@1b0ac00 {
+ cell-index = <0>;
+ compatible = "qcom,csiphy-v3.1", "qcom,csiphy";
+ reg = <0x1b0ac00 0x200>,
+ <0x1b00030 0x4>;
+ reg-names = "csiphy", "csiphy_clk_mux";
+ interrupts = <0 78 0>;
+ interrupt-names = "csiphy";
+ clocks = <&clock_gcc clk_gcc_camss_top_ahb_clk>,
+ <&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+ <&clock_gcc clk_csi0phytimer_clk_src>,
+ <&clock_gcc clk_gcc_camss_csi0phytimer_clk>,
+ <&clock_gcc clk_camss_top_ahb_clk_src>,
+ <&clock_gcc clk_gcc_camss_csi0phy_clk>,
+ <&clock_gcc clk_gcc_camss_csi1phy_clk>,
+ <&clock_gcc clk_gcc_camss_ahb_clk>;
+ clock-names = "camss_top_ahb_clk", "ispif_ahb_clk",
+ "csiphy_timer_src_clk", "csiphy_timer_clk",
+ "camss_ahb_src", "csi0_phy_clk", "csi1_phy_clk",
+ "camss_ahb_clk";
+ qcom,clock-rates = <0 0 200000000 0 0 0 0 0>;
+ };
+
+ qcom,csid@1b08000 {
+ cell-index = <0>;
+ compatible = "qcom,csid-v3.1", "qcom,csid";
+ reg = <0x1b08000 0x100>;
+ reg-names = "csid";
+ interrupts = <0 49 0>;
+ interrupt-names = "csid";
+ qcom,csi-vdd-voltage = <1200000>;
+ qcom,mipi-csi-vdd-supply = <&pm8916_l2>;
+ clocks = <&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+ <&clock_gcc clk_gcc_camss_top_ahb_clk>,
+ <&clock_gcc clk_gcc_camss_csi0_ahb_clk>,
+ <&clock_gcc clk_csi0_clk_src>,
+ <&clock_gcc clk_gcc_camss_csi0_clk>,
+ <&clock_gcc clk_gcc_camss_csi0pix_clk>,
+ <&clock_gcc clk_gcc_camss_csi0rdi_clk>,
+ <&clock_gcc clk_gcc_camss_ahb_clk>;
+ clock-names = "ispif_ahb_clk", "camss_top_ahb_clk",
+ "csi_ahb_clk", "csi_src_clk",
+ "csi_clk", "csi_pix_clk",
+ "csi_rdi_clk", "camss_ahb_clk";
+ qcom,clock-rates = <40000000 0 0 200000000 0 0 0 0>;
+ };
+
+ qcom,csid@1b08400 {
+ cell-index = <1>;
+ compatible = "qcom,csid-v3.1", "qcom,csid";
+ reg = <0x1b08400 0x100>;
+ reg-names = "csid";
+ interrupts = <0 50 0>;
+ interrupt-names = "csid";
+ qcom,csi-vdd-voltage = <1200000>;
+ qcom,mipi-csi-vdd-supply = <&pm8916_l2>;
+ clocks = <&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+ <&clock_gcc clk_gcc_camss_top_ahb_clk>,
+ <&clock_gcc clk_gcc_camss_csi1_ahb_clk>,
+ <&clock_gcc clk_csi1_clk_src>,
+ <&clock_gcc clk_gcc_camss_csi1_clk>,
+ <&clock_gcc clk_gcc_camss_csi1pix_clk>,
+ <&clock_gcc clk_gcc_camss_csi1rdi_clk>,
+ <&clock_gcc clk_gcc_camss_ahb_clk>,
+ <&clock_gcc clk_gcc_camss_csi1phy_clk>;
+ clock-names = "ispif_ahb_clk", "camss_top_ahb_clk",
+ "csi_ahb_clk", "csi_src_clk",
+ "csi_clk", "csi_pix_clk",
+ "csi_rdi_clk", "camss_ahb_clk", "camss_csi1_phy";
+ qcom,clock-rates = <40000000 0 0 200000000 0 0 0 0 0>;
+ };
+
+ qcom,ispif@1b0a000 {
+ cell-index = <0>;
+ compatible = "qcom,ispif";
+ reg = <0x1b0a000 0x500>,
+ <0x1b00020 0x10>;
+ reg-names = "ispif", "csi_clk_mux";
+ interrupts = <0 51 0>;
+ interrupt-names = "ispif";
+ qcom,num-isps = <0x1>;
+ vfe0_vdd_supply = <&gdsc_vfe>;
+ clocks = <&clock_gcc clk_gcc_camss_top_ahb_clk>,
+ <&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+
+ <&clock_gcc clk_csi0_clk_src>,
+ <&clock_gcc clk_gcc_camss_csi0_clk>,
+ <&clock_gcc clk_gcc_camss_csi0rdi_clk>,
+ <&clock_gcc clk_gcc_camss_csi0pix_clk>,
+ <&clock_gcc clk_csi1_clk_src>,
+ <&clock_gcc clk_gcc_camss_csi1_clk>,
+ <&clock_gcc clk_gcc_camss_csi1rdi_clk>,
+ <&clock_gcc clk_gcc_camss_csi1pix_clk>,
+ <&clock_gcc clk_vfe0_clk_src>,
+ <&clock_gcc clk_gcc_camss_vfe0_clk>,
+ <&clock_gcc clk_gcc_camss_csi_vfe0_clk>;
+
+ clock-names = "camss_top_ahb_clk", "ispif_ahb_clk",
+ "csi0_src_clk", "csi0_clk",
+ "csi0_rdi_clk", "csi0_pix_clk",
+ "csi1_src_clk", "csi1_clk",
+ "csi1_rdi_clk", "csi1_pix_clk",
+ "vfe0_clk_src", "camss_vfe_vfe0_clk",
+ "camss_csi_vfe0_clk";
+ qcom,clock-rates = <0 40000000
+ 200000000 0 0 0
+ 200000000 0 0 0
+ 0 0 0>;
+ qcom,clock-control = "NO_SET_RATE", "SET_RATE",
+ "SET_RATE", "NO_SET_RATE", "NO_SET_RATE", "NO_SET_RATE",
+ "SET_RATE", "NO_SET_RATE", "NO_SET_RATE", "NO_SET_RATE",
+ "INIT_RATE", "NO_SET_RATE", "NO_SET_RATE";
+ };
+
+ qcom,vfe@1b10000 {
+ cell-index = <0>;
+ compatible = "qcom,vfe32";
+ reg = <0x1b10000 0x830>,
+ <0x1b40000 0x200>;
+ reg-names = "vfe", "vfe_vbif";
+ interrupts = <0 52 0>;
+ interrupt-names = "vfe";
+ vdd-supply = <&gdsc_vfe>;
+ clocks = <&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+ <&clock_gcc clk_vfe0_clk_src>,
+ <&clock_gcc clk_gcc_camss_vfe0_clk>,
+ <&clock_gcc clk_gcc_camss_csi_vfe0_clk>,
+ <&clock_gcc clk_gcc_camss_vfe_ahb_clk>,
+ <&clock_gcc clk_gcc_camss_vfe_axi_clk>,
+ <&clock_gcc clk_gcc_camss_ahb_clk>,
+ <&clock_gcc clk_gcc_camss_top_ahb_clk>;
+ clock-names = "camss_top_ahb_clk", "vfe_clk_src",
+ "camss_vfe_vfe_clk", "camss_csi_vfe_clk", "iface_clk",
+ "bus_clk", "camss_ahb_clk", "ispif_ahb_clk";
+ qcom,clock-rates = <40000000 266670000 0 0 0 0 0 0>;
+
+ qos-entries = <8>;
+ qos-regs = <0x7BC 0x7C0 0x7C4 0x7C8 0x7CC 0x7D0
+ 0x7D4 0x798>;
+ qos-settings = <0xAAA5AAA5 0xAAA5AAA5 0xAAA5AAA5
+ 0xAAA5AAA5 0xAAA5AAA5 0xAAA5AAA5
+ 0xAAA5AAA5 0x00010000>;
+ vbif-entries = <1>;
+ vbif-regs = <0x04>;
+ vbif-settings = <0x1>;
+ ds-entries = <15>;
+ ds-regs = <0x7D8 0x7DC 0x7E0 0x7E4 0x7E8
+ 0x7EC 0x7F0 0x7F4 0x7F8 0x7FC 0x800
+ 0x804 0x808 0x80C 0x810>;
+ ds-settings = <0xCCCC1111 0xCCCC1111 0xCCCC1111
+ 0xCCCC1111 0xCCCC1111 0xCCCC1111
+ 0xCCCC1111 0xCCCC1111 0xCCCC1111
+ 0xCCCC1111 0xCCCC1111 0xCCCC1111
+ 0xCCCC1111 0xCCCC1111 0x00000103>;
+
+ bus-util-factor = <1024>;
+ };
+
+ qcom,cam_smmu {
+ status = "ok";
+ compatible = "qcom,msm-cam-smmu";
+ msm_cam_smmu_cb1: msm_cam_smmu_cb1 {
+ compatible = "qcom,msm-cam-smmu-cb";
+ iommus = <&apps_iommu 0x400 0x00>;
+ label = "vfe";
+ qcom,scratch-buf-support;
+ };
+ };
+
+ qcom,irqrouter@1b00000 {
+ status = "ok";
+ cell-index = <0>;
+ compatible = "qcom,irqrouter";
+ reg = <0x1b00000 0x100>;
+ reg-names = "irqrouter";
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8917-cdp-mirror-lake-touch.dtsi b/arch/arm64/boot/dts/qcom/msm8917-cdp-mirror-lake-touch.dtsi
index d6d85fa..1f19e20 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-cdp-mirror-lake-touch.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917-cdp-mirror-lake-touch.dtsi
@@ -141,6 +141,12 @@
qcom,panel-roi-alignment = <2 2 2 2 2 2>;
};
+&dsi_icn9706_720_1440_vid {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-pan-enable-dynamic-fps;
+ qcom,mdss-dsi-pan-fps-update = "dfps_immediate_porch_mode_vfp";
+};
+
&tlmm {
tlmm_gpio_key {
gpio_key_active: gpio_key_active {
diff --git a/arch/arm64/boot/dts/qcom/msm8937-mdss-panels.dtsi b/arch/arm64/boot/dts/qcom/msm8937-mdss-panels.dtsi
index ef4f4b0..528e8aa 100644
--- a/arch/arm64/boot/dts/qcom/msm8937-mdss-panels.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8937-mdss-panels.dtsi
@@ -25,6 +25,8 @@
#include "dsi-panel-hx8399c-hd-plus-video.dtsi"
#include "dsi-panel-nt35695b-truly-fhd-video.dtsi"
#include "dsi-panel-nt35695b-truly-fhd-cmd.dtsi"
+#include "dsi-panel-icn9706-720-1440p-video.dtsi"
+
&soc {
dsi_panel_pwr_supply: dsi_panel_pwr_supply {
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp.dts b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp.dts
index b80583e..e3a5b4a 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp.dts
@@ -25,3 +25,75 @@
qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
};
+&int_codec {
+ status = "disabled";
+};
+
+&pmic_analog_codec {
+ status = "disabled";
+};
+
+&wsa881x_i2c_f {
+ status = "disabled";
+};
+
+&wsa881x_i2c_45 {
+ status = "disabled";
+};
+
+&cdc_pri_mi2s_gpios {
+ status = "disabled";
+};
+
+&wsa881x_analog_vi_gpio {
+ status = "disabled";
+};
+
+&wsa881x_analog_clk_gpio {
+ status = "disabled";
+};
+
+&wsa881x_analog_reset_gpio {
+ status = "disabled";
+};
+
+&cdc_comp_gpios {
+ status = "disabled";
+};
+
+&slim_msm {
+ status = "okay";
+};
+
+&dai_slim {
+ status = "okay";
+};
+
+&wcd9xxx_intc {
+ status = "okay";
+};
+
+&clock_audio {
+ status = "okay";
+};
+
+&wcd9335 {
+ status = "okay";
+};
+
+&cdc_us_euro_sw {
+ status = "okay";
+};
+
+&cdc_quin_mi2s_gpios {
+ status = "okay";
+};
+
+&wcd_rst_gpio {
+ status = "okay";
+};
+
+&ext_codec {
+ qcom,model = "msm8953-tasha-snd-card";
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/pmi632.dtsi b/arch/arm64/boot/dts/qcom/pmi632.dtsi
index 65390cb..d77d55d 100644
--- a/arch/arm64/boot/dts/qcom/pmi632.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi632.dtsi
@@ -316,6 +316,8 @@
qcom,auto-recharge-soc = <98>;
qcom,chg-vadc = <&pmi632_vadc>;
qcom,flash-disable-soc = <10>;
+ qcom,sw-jeita-enable;
+ qcom,step-charging-enable;
qcom,thermal-mitigation
= <3000000 2500000 2000000 1500000
diff --git a/arch/arm64/boot/dts/qcom/sda670-hdk-overlay.dts b/arch/arm64/boot/dts/qcom/sda670-hdk-overlay.dts
new file mode 100644
index 0000000..a7299a4
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda670-hdk-overlay.dts
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include "sda670-hdk.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDA670 HDK";
+ compatible = "qcom,sda670-hdk", "qcom,sda670", "qcom,hdk";
+ qcom,board-id = <0x01001F 0x00>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sda670-hdk.dts b/arch/arm64/boot/dts/qcom/sda670-hdk.dts
new file mode 100644
index 0000000..ed9eec9
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda670-hdk.dts
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sda670.dtsi"
+#include "sda670-hdk.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDA670 HDK";
+ compatible = "qcom,sda670-hdk", "qcom,sda670", "qcom,hdk";
+ qcom,board-id = <0x01001F 0x00>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sda670-hdk.dtsi b/arch/arm64/boot/dts/qcom/sda670-hdk.dtsi
new file mode 100644
index 0000000..4daf8b4
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda670-hdk.dtsi
@@ -0,0 +1,32 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "sdm670-qrd.dtsi"
+#include "sdm670-external-codec.dtsi"
+
+&dsi_dual_nt36850_truly_cmd_display {
+ /delete-property/ qcom,dsi-display-active;
+};
+
+&dsi_hx8399_truly_cmd {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,panel-mode-gpio = <&tlmm 76 0>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
+ qcom,platform-reset-gpio = <&tlmm 75 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+};
+
+&dsi_hx8399_truly_cmd_display {
+ qcom,dsi-display-active;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm439-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm439-mtp.dtsi
index 0336d82..7a61a7a 100644
--- a/arch/arm64/boot/dts/qcom/sdm439-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439-mtp.dtsi
@@ -229,6 +229,321 @@
"dfps_immediate_porch_mode_vfp";
};
+&dsi_nt35695b_truly_fhd_cmd {
+ qcom,mdss-dsi-panel-width = <1080>;
+ qcom,mdss-dsi-panel-height = <1920>;
+ qcom,mdss-dsi-h-front-porch = <120>;
+ qcom,mdss-dsi-h-back-porch = <60>;
+ qcom,mdss-dsi-h-pulse-width = <12>;
+ qcom,mdss-dsi-h-sync-skew = <0>;
+ qcom,mdss-dsi-v-back-porch = <2>;
+ qcom,mdss-dsi-v-front-porch = <12>;
+ qcom,mdss-dsi-v-pulse-width = <2>;
+ qcom,mdss-dsi-h-sync-pulse = <0>;
+ qcom,mdss-dsi-h-left-border = <0>;
+ qcom,mdss-dsi-h-right-border = <0>;
+ qcom,mdss-dsi-v-top-border = <0>;
+ qcom,mdss-dsi-v-bottom-border = <0>;
+ qcom,mdss-dsi-panel-framerate = <60>;
+ qcom,mdss-dsi-on-command =
+ [15 01 00 00 10 00 02 ff 20
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 00 01
+ 15 01 00 00 00 00 02 01 55
+ 15 01 00 00 00 00 02 02 45
+ 15 01 00 00 00 00 02 03 55
+ 15 01 00 00 00 00 02 05 50
+ 15 01 00 00 00 00 02 06 a8
+ 15 01 00 00 00 00 02 07 ad
+ 15 01 00 00 00 00 02 08 0c
+ 15 01 00 00 00 00 02 0b aa
+ 15 01 00 00 00 00 02 0c aa
+ 15 01 00 00 00 00 02 0e b0
+ 15 01 00 00 00 00 02 0f b3
+ 15 01 00 00 00 00 02 11 28
+ 15 01 00 00 00 00 02 12 10
+ 15 01 00 00 00 00 02 13 01
+ 15 01 00 00 00 00 02 14 4a
+ 15 01 00 00 00 00 02 15 12
+ 15 01 00 00 00 00 02 16 12
+ 15 01 00 00 00 00 02 30 01
+ 15 01 00 00 00 00 02 72 11
+ 15 01 00 00 00 00 02 58 82
+ 15 01 00 00 00 00 02 59 00
+ 15 01 00 00 00 00 02 5a 02
+ 15 01 00 00 00 00 02 5b 00
+ 15 01 00 00 00 00 02 5c 82
+ 15 01 00 00 00 00 02 5d 80
+ 15 01 00 00 00 00 02 5e 02
+ 15 01 00 00 00 00 02 5f 00
+ 15 01 00 00 00 00 02 ff 24
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 00 01
+ 15 01 00 00 00 00 02 01 0b
+ 15 01 00 00 00 00 02 02 0c
+ 15 01 00 00 00 00 02 03 89
+ 15 01 00 00 00 00 02 04 8a
+ 15 01 00 00 00 00 02 05 0f
+ 15 01 00 00 00 00 02 06 10
+ 15 01 00 00 00 00 02 07 10
+ 15 01 00 00 00 00 02 08 1c
+ 15 01 00 00 00 00 02 09 00
+ 15 01 00 00 00 00 02 0a 00
+ 15 01 00 00 00 00 02 0b 00
+ 15 01 00 00 00 00 02 0c 00
+ 15 01 00 00 00 00 02 0d 13
+ 15 01 00 00 00 00 02 0e 15
+ 15 01 00 00 00 00 02 0f 17
+ 15 01 00 00 00 00 02 10 01
+ 15 01 00 00 00 00 02 11 0b
+ 15 01 00 00 00 00 02 12 0c
+ 15 01 00 00 00 00 02 13 89
+ 15 01 00 00 00 00 02 14 8a
+ 15 01 00 00 00 00 02 15 0f
+ 15 01 00 00 00 00 02 16 10
+ 15 01 00 00 00 00 02 17 10
+ 15 01 00 00 00 00 02 18 1c
+ 15 01 00 00 00 00 02 19 00
+ 15 01 00 00 00 00 02 1a 00
+ 15 01 00 00 00 00 02 1b 00
+ 15 01 00 00 00 00 02 1c 00
+ 15 01 00 00 00 00 02 1d 13
+ 15 01 00 00 00 00 02 1e 15
+ 15 01 00 00 00 00 02 1f 17
+ 15 01 00 00 00 00 02 20 00
+ 15 01 00 00 00 00 02 21 01
+ 15 01 00 00 00 00 02 22 00
+ 15 01 00 00 00 00 02 23 40
+ 15 01 00 00 00 00 02 24 40
+ 15 01 00 00 00 00 02 25 6d
+ 15 01 00 00 00 00 02 26 40
+ 15 01 00 00 00 00 02 27 40
+ 15 01 00 00 00 00 02 29 d8
+ 15 01 00 00 00 00 02 2a 2a
+ 15 01 00 00 00 00 02 4b 03
+ 15 01 00 00 00 00 02 4c 11
+ 15 01 00 00 00 00 02 4d 10
+ 15 01 00 00 00 00 02 4e 01
+ 15 01 00 00 00 00 02 4f 01
+ 15 01 00 00 00 00 02 50 10
+ 15 01 00 00 00 00 02 51 00
+ 15 01 00 00 00 00 02 52 80
+ 15 01 00 00 00 00 02 53 00
+ 15 01 00 00 00 00 02 54 07
+ 15 01 00 00 00 00 02 55 25
+ 15 01 00 00 00 00 02 56 00
+ 15 01 00 00 00 00 02 58 07
+ 15 01 00 00 00 00 02 5b 43
+ 15 01 00 00 00 00 02 5c 00
+ 15 01 00 00 00 00 02 5f 73
+ 15 01 00 00 00 00 02 60 73
+ 15 01 00 00 00 00 02 63 22
+ 15 01 00 00 00 00 02 64 00
+ 15 01 00 00 00 00 02 67 08
+ 15 01 00 00 00 00 02 68 04
+ 15 01 00 00 00 00 02 7a 80
+ 15 01 00 00 00 00 02 7b 91
+ 15 01 00 00 00 00 02 7c d8
+ 15 01 00 00 00 00 02 7d 60
+ 15 01 00 00 00 00 02 93 06
+ 15 01 00 00 00 00 02 94 06
+ 15 01 00 00 00 00 02 8a 00
+ 15 01 00 00 00 00 02 9b 0f
+ 15 01 00 00 00 00 02 b3 c0
+ 15 01 00 00 00 00 02 b4 00
+ 15 01 00 00 00 00 02 b5 00
+ 15 01 00 00 00 00 02 b6 21
+ 15 01 00 00 00 00 02 b7 22
+ 15 01 00 00 00 00 02 b8 07
+ 15 01 00 00 00 00 02 b9 07
+ 15 01 00 00 00 00 02 ba 22
+ 15 01 00 00 00 00 02 bd 20
+ 15 01 00 00 00 00 02 be 07
+ 15 01 00 00 00 00 02 bf 07
+ 15 01 00 00 00 00 02 c1 6d
+ 15 01 00 00 00 00 02 c4 24
+ 15 01 00 00 00 00 02 e3 00
+ 15 01 00 00 00 00 02 ec 00
+ 15 01 00 00 00 00 02 ff 10
+ 15 01 00 00 00 00 02 bb 10
+ 15 01 00 00 00 00 02 35 00
+ 05 01 00 00 78 00 02 11 00
+ 05 01 00 00 78 00 02 29 00];
+ qcom,mdss-dsi-off-command = [05 01 00 00 14
+ 00 02 28 00 05 01 00 00 78 00 02 10 00];
+ qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-off-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-panel-timings-phy-12nm = [17 0a 0f 06 03 08 06 0e];
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm";
+ qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
+ qcom,mdss-dsi-bl-pmic-bank-select = <0>;
+ qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
+ qcom,ulps-enabled;
+ qcom,esd-check-enabled;
+ qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+ qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+ qcom,mdss-dsi-panel-status-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-panel-status-value = <0x9c>;
+ qcom,mdss-dsi-panel-on-check-value = <0x9c>;
+ qcom,mdss-dsi-panel-status-read-length = <1>;
+ qcom,mdss-dsi-panel-max-error-count = <3>;
+ /delete-node/ qcom,mdss-dsi-display-timings;
+};
+
+&dsi_nt35695b_truly_fhd_video {
+ qcom,mdss-dsi-panel-width = <1080>;
+ qcom,mdss-dsi-panel-height = <1920>;
+ qcom,mdss-dsi-h-front-porch = <120>;
+ qcom,mdss-dsi-h-back-porch = <60>;
+ qcom,mdss-dsi-h-pulse-width = <12>;
+ qcom,mdss-dsi-h-sync-skew = <0>;
+ qcom,mdss-dsi-h-sync-pulse = <0>;
+ qcom,mdss-dsi-v-back-porch = <2>;
+ qcom,mdss-dsi-v-front-porch = <12>;
+ qcom,mdss-dsi-v-pulse-width = <2>;
+ qcom,mdss-dsi-h-left-border = <0>;
+ qcom,mdss-dsi-h-right-border = <0>;
+ qcom,mdss-dsi-v-top-border = <0>;
+ qcom,mdss-dsi-v-bottom-border = <0>;
+ qcom,mdss-dsi-panel-framerate = <60>;
+ qcom,mdss-dsi-on-command =
+ [15 01 00 00 10 00 02 ff 20
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 00 01
+ 15 01 00 00 00 00 02 01 55
+ 15 01 00 00 00 00 02 02 45
+ 15 01 00 00 00 00 02 03 55
+ 15 01 00 00 00 00 02 05 50
+ 15 01 00 00 00 00 02 06 a8
+ 15 01 00 00 00 00 02 07 ad
+ 15 01 00 00 00 00 02 08 0c
+ 15 01 00 00 00 00 02 0b aa
+ 15 01 00 00 00 00 02 0c aa
+ 15 01 00 00 00 00 02 0e b0
+ 15 01 00 00 00 00 02 0f b3
+ 15 01 00 00 00 00 02 11 28
+ 15 01 00 00 00 00 02 12 10
+ 15 01 00 00 00 00 02 13 01
+ 15 01 00 00 00 00 02 14 4a
+ 15 01 00 00 00 00 02 15 12
+ 15 01 00 00 00 00 02 16 12
+ 15 01 00 00 00 00 02 30 01
+ 15 01 00 00 00 00 02 72 11
+ 15 01 00 00 00 00 02 58 82
+ 15 01 00 00 00 00 02 59 00
+ 15 01 00 00 00 00 02 5a 02
+ 15 01 00 00 00 00 02 5b 00
+ 15 01 00 00 00 00 02 5c 82
+ 15 01 00 00 00 00 02 5d 80
+ 15 01 00 00 00 00 02 5e 02
+ 15 01 00 00 00 00 02 5f 00
+ 15 01 00 00 00 00 02 ff 24
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 00 01
+ 15 01 00 00 00 00 02 01 0b
+ 15 01 00 00 00 00 02 02 0c
+ 15 01 00 00 00 00 02 03 89
+ 15 01 00 00 00 00 02 04 8a
+ 15 01 00 00 00 00 02 05 0f
+ 15 01 00 00 00 00 02 06 10
+ 15 01 00 00 00 00 02 07 10
+ 15 01 00 00 00 00 02 08 1c
+ 15 01 00 00 00 00 02 09 00
+ 15 01 00 00 00 00 02 0a 00
+ 15 01 00 00 00 00 02 0b 00
+ 15 01 00 00 00 00 02 0c 00
+ 15 01 00 00 00 00 02 0d 13
+ 15 01 00 00 00 00 02 0e 15
+ 15 01 00 00 00 00 02 0f 17
+ 15 01 00 00 00 00 02 10 01
+ 15 01 00 00 00 00 02 11 0b
+ 15 01 00 00 00 00 02 12 0c
+ 15 01 00 00 00 00 02 13 89
+ 15 01 00 00 00 00 02 14 8a
+ 15 01 00 00 00 00 02 15 0f
+ 15 01 00 00 00 00 02 16 10
+ 15 01 00 00 00 00 02 17 10
+ 15 01 00 00 00 00 02 18 1c
+ 15 01 00 00 00 00 02 19 00
+ 15 01 00 00 00 00 02 1a 00
+ 15 01 00 00 00 00 02 1b 00
+ 15 01 00 00 00 00 02 1c 00
+ 15 01 00 00 00 00 02 1d 13
+ 15 01 00 00 00 00 02 1e 15
+ 15 01 00 00 00 00 02 1f 17
+ 15 01 00 00 00 00 02 20 00
+ 15 01 00 00 00 00 02 21 01
+ 15 01 00 00 00 00 02 22 00
+ 15 01 00 00 00 00 02 23 40
+ 15 01 00 00 00 00 02 24 40
+ 15 01 00 00 00 00 02 25 6d
+ 15 01 00 00 00 00 02 26 40
+ 15 01 00 00 00 00 02 27 40
+ 15 01 00 00 00 00 02 29 d8
+ 15 01 00 00 00 00 02 2a 2a
+ 15 01 00 00 00 00 02 4b 03
+ 15 01 00 00 00 00 02 4c 11
+ 15 01 00 00 00 00 02 4d 10
+ 15 01 00 00 00 00 02 4e 01
+ 15 01 00 00 00 00 02 4f 01
+ 15 01 00 00 00 00 02 50 10
+ 15 01 00 00 00 00 02 51 00
+ 15 01 00 00 00 00 02 52 80
+ 15 01 00 00 00 00 02 53 00
+ 15 01 00 00 00 00 02 54 07
+ 15 01 00 00 00 00 02 55 25
+ 15 01 00 00 00 00 02 56 00
+ 15 01 00 00 00 00 02 58 07
+ 15 01 00 00 00 00 02 5b 43
+ 15 01 00 00 00 00 02 5c 00
+ 15 01 00 00 00 00 02 5f 73
+ 15 01 00 00 00 00 02 60 73
+ 15 01 00 00 00 00 02 63 22
+ 15 01 00 00 00 00 02 64 00
+ 15 01 00 00 00 00 02 67 08
+ 15 01 00 00 00 00 02 68 04
+ 15 01 00 00 00 00 02 7a 80
+ 15 01 00 00 00 00 02 7b 91
+ 15 01 00 00 00 00 02 7c d8
+ 15 01 00 00 00 00 02 7d 60
+ 15 01 00 00 00 00 02 93 06
+ 15 01 00 00 00 00 02 94 06
+ 15 01 00 00 00 00 02 8a 00
+ 15 01 00 00 00 00 02 9b 0f
+ 15 01 00 00 00 00 02 b3 c0
+ 15 01 00 00 00 00 02 b4 00
+ 15 01 00 00 00 00 02 b5 00
+ 15 01 00 00 00 00 02 b6 21
+ 15 01 00 00 00 00 02 b7 22
+ 15 01 00 00 00 00 02 b8 07
+ 15 01 00 00 00 00 02 b9 07
+ 15 01 00 00 00 00 02 ba 22
+ 15 01 00 00 00 00 02 bd 20
+ 15 01 00 00 00 00 02 be 07
+ 15 01 00 00 00 00 02 bf 07
+ 15 01 00 00 00 00 02 c1 6d
+ 15 01 00 00 00 00 02 c4 24
+ 15 01 00 00 00 00 02 e3 00
+ 15 01 00 00 00 00 02 ec 00
+ 15 01 00 00 00 00 02 ff 10
+ 15 01 00 00 00 00 02 bb 03
+ 05 01 00 00 78 00 02 11 00
+ 05 01 00 00 78 00 02 29 00];
+ qcom,mdss-dsi-off-command = [05 01 00 00
+ 14 00 02 28 00 05 01 00 00 78 00
+ 02 10 00];
+ qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-off-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-panel-timings-phy-12nm = [17 0a 0f 06 03 08 06 0e];
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm";
+ qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
+ qcom,mdss-dsi-bl-pmic-bank-select = <0>;
+ qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
+ /delete-node/ qcom,mdss-dsi-display-timings;
+};
+
&i2c_2 {
#include "smb1355.dtsi"
};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-qrd.dtsi
index c8f7ac0..78047bd 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-qrd.dtsi
@@ -11,6 +11,8 @@
* GNU General Public License for more details.
*/
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+
&soc {
led_flash_rear: qcom,camera-flash@0 {
cell-index = <0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pmic-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pmic-overlay.dtsi
index 27be1fd..f63d442 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pmic-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pmic-overlay.dtsi
@@ -10,6 +10,8 @@
* GNU General Public License for more details.
*/
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
&pm660_0{
pm660_charger: qcom,qpnp-smb2 {
compatible = "qcom,qpnp-smb2";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-qrd-overlay.dts
index 36d485e..67b5ebe 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qrd-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-qrd-overlay.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -19,7 +19,7 @@
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
-#include "sdm670-qrd.dtsi"
+#include "sdm670-qrd-sku1.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660L QRD";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qrd-sku1.dtsi b/arch/arm64/boot/dts/qcom/sdm670-qrd-sku1.dtsi
new file mode 100644
index 0000000..2c1cde6
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-qrd-sku1.dtsi
@@ -0,0 +1,14 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sdm670-qrd.dtsi"
+#include "sdm670-audio-overlay.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2-overlay.dts
index 73d1909..d5edb36 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2-overlay.dts
@@ -13,13 +13,7 @@
/dts-v1/;
/plugin/;
-#include <dt-bindings/clock/qcom,gcc-sdm845.h>
-#include <dt-bindings/clock/qcom,camcc-sdm845.h>
-#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
-#include <dt-bindings/clock/qcom,rpmh.h>
-#include <dt-bindings/interrupt-controller/arm-gic.h>
-
-#include "sdm670-qrd.dtsi"
+#include "sdm670-qrd-sku2.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660L QRD SKU2";
@@ -30,22 +24,3 @@
<0x0001001b 0x0102001a 0x0 0x0>,
<0x0001001b 0x0201011a 0x0 0x0>;
};
-
-&dsi_dual_nt36850_truly_cmd_display {
- /delete-property/ qcom,dsi-display-active;
-};
-
-&dsi_hx8399_truly_cmd {
- qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
- qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
- qcom,mdss-dsi-bl-min-level = <1>;
- qcom,mdss-dsi-bl-max-level = <4095>;
- qcom,panel-mode-gpio = <&tlmm 76 0>;
- qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
- qcom,platform-reset-gpio = <&tlmm 75 0>;
- qcom,platform-te-gpio = <&tlmm 10 0>;
-};
-
-&dsi_hx8399_truly_cmd_display {
- qcom,dsi-display-active;
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2.dts b/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2.dts
index 680bc17..9f871c5 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,7 @@
/dts-v1/;
#include "sdm670.dtsi"
-#include "sdm670-qrd.dtsi"
+#include "sdm670-qrd-sku2.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660L QRD SKU2";
@@ -24,22 +24,3 @@
<0x0001001b 0x0102001a 0x0 0x0>,
<0x0001001b 0x0201011a 0x0 0x0>;
};
-
-&dsi_dual_nt36850_truly_cmd_display {
- /delete-property/ qcom,dsi-display-active;
-};
-
-&dsi_hx8399_truly_cmd {
- qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
- qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
- qcom,mdss-dsi-bl-min-level = <1>;
- qcom,mdss-dsi-bl-max-level = <4095>;
- qcom,panel-mode-gpio = <&tlmm 76 0>;
- qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
- qcom,platform-reset-gpio = <&tlmm 75 0>;
- qcom,platform-te-gpio = <&tlmm 10 0>;
-};
-
-&dsi_hx8399_truly_cmd_display {
- qcom,dsi-display-active;
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2.dtsi b/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2.dtsi
new file mode 100644
index 0000000..cdb652e
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2.dtsi
@@ -0,0 +1,32 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "sdm670-qrd.dtsi"
+#include "sdm670-audio-overlay.dtsi"
+
+&dsi_dual_nt36850_truly_cmd_display {
+ /delete-property/ qcom,dsi-display-active;
+};
+
+&dsi_hx8399_truly_cmd {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,panel-mode-gpio = <&tlmm 76 0>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
+ qcom,platform-reset-gpio = <&tlmm 75 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+};
+
+&dsi_hx8399_truly_cmd_display {
+ qcom,dsi-display-active;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qrd.dts b/arch/arm64/boot/dts/qcom/sdm670-qrd.dts
index c22afa4..318939f 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qrd.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-qrd.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,7 @@
/dts-v1/;
#include "sdm670.dtsi"
-#include "sdm670-qrd.dtsi"
+#include "sdm670-qrd-sku1.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660L QRD";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
index 3b8b375..5ff2c32 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
@@ -10,10 +10,10 @@
* GNU General Public License for more details.
*/
+#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/gpio/gpio.h>
#include "sdm670-camera-sensor-qrd.dtsi"
#include "sdm670-pmic-overlay.dtsi"
-#include "sdm670-audio-overlay.dtsi"
#include "sdm670-sde-display.dtsi"
&qupv3_se10_i2c {
diff --git a/arch/arm64/boot/dts/qcom/sdm710-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/sdm710-qrd-overlay.dts
index 803616d..08c3433 100644
--- a/arch/arm64/boot/dts/qcom/sdm710-qrd-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm710-qrd-overlay.dts
@@ -13,13 +13,7 @@
/dts-v1/;
/plugin/;
-#include <dt-bindings/clock/qcom,gcc-sdm845.h>
-#include <dt-bindings/clock/qcom,camcc-sdm845.h>
-#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
-#include <dt-bindings/clock/qcom,rpmh.h>
-#include <dt-bindings/interrupt-controller/arm-gic.h>
-
-#include "sdm670-qrd.dtsi"
+#include "sdm670-qrd-sku1.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDM710 PM660 + PM660L QRD";
diff --git a/arch/arm64/boot/dts/qcom/sdm710-qrd-sku2-overlay.dts b/arch/arm64/boot/dts/qcom/sdm710-qrd-sku2-overlay.dts
index ab3ce4d..91891ba 100644
--- a/arch/arm64/boot/dts/qcom/sdm710-qrd-sku2-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm710-qrd-sku2-overlay.dts
@@ -13,13 +13,7 @@
/dts-v1/;
/plugin/;
-#include <dt-bindings/clock/qcom,gcc-sdm845.h>
-#include <dt-bindings/clock/qcom,camcc-sdm845.h>
-#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
-#include <dt-bindings/clock/qcom,rpmh.h>
-#include <dt-bindings/interrupt-controller/arm-gic.h>
-
-#include "sdm670-qrd.dtsi"
+#include "sdm670-qrd-sku2.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDM710 PM660 + PM660L QRD SKU2";
@@ -30,22 +24,3 @@
<0x0001001b 0x0102001a 0x0 0x0>,
<0x0001001b 0x0201011a 0x0 0x0>;
};
-
-&dsi_dual_nt36850_truly_cmd_display {
- /delete-property/ qcom,dsi-display-active;
-};
-
-&dsi_hx8399_truly_cmd {
- qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
- qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
- qcom,mdss-dsi-bl-min-level = <1>;
- qcom,mdss-dsi-bl-max-level = <4095>;
- qcom,panel-mode-gpio = <&tlmm 76 0>;
- qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
- qcom,platform-reset-gpio = <&tlmm 75 0>;
- qcom,platform-te-gpio = <&tlmm 10 0>;
-};
-
-&dsi_hx8399_truly_cmd_display {
- qcom,dsi-display-active;
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm710-qrd-sku2.dts b/arch/arm64/boot/dts/qcom/sdm710-qrd-sku2.dts
index 76b2862..f674893 100644
--- a/arch/arm64/boot/dts/qcom/sdm710-qrd-sku2.dts
+++ b/arch/arm64/boot/dts/qcom/sdm710-qrd-sku2.dts
@@ -14,7 +14,7 @@
/dts-v1/;
#include "sdm710.dtsi"
-#include "sdm670-qrd.dtsi"
+#include "sdm670-qrd-sku2.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDM710 PM660 + PM660L QRD SKU2";
@@ -24,22 +24,3 @@
<0x0001001b 0x0102001a 0x0 0x0>,
<0x0001001b 0x0201011a 0x0 0x0>;
};
-
-&dsi_dual_nt36850_truly_cmd_display {
- /delete-property/ qcom,dsi-display-active;
-};
-
-&dsi_hx8399_truly_cmd {
- qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
- qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
- qcom,mdss-dsi-bl-min-level = <1>;
- qcom,mdss-dsi-bl-max-level = <4095>;
- qcom,panel-mode-gpio = <&tlmm 76 0>;
- qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
- qcom,platform-reset-gpio = <&tlmm 75 0>;
- qcom,platform-te-gpio = <&tlmm 10 0>;
-};
-
-&dsi_hx8399_truly_cmd_display {
- qcom,dsi-display-active;
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm710-qrd.dts b/arch/arm64/boot/dts/qcom/sdm710-qrd.dts
index e3cb7cc..4eb414f 100644
--- a/arch/arm64/boot/dts/qcom/sdm710-qrd.dts
+++ b/arch/arm64/boot/dts/qcom/sdm710-qrd.dts
@@ -15,6 +15,7 @@
#include "sdm710.dtsi"
#include "sdm670-qrd.dtsi"
+#include "sdm670-audio-overlay.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDM710 PM660 + PM660L QRD";
diff --git a/arch/arm64/boot/dts/qcom/smb1390.dtsi b/arch/arm64/boot/dts/qcom/smb1390.dtsi
new file mode 100644
index 0000000..92ac103
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/smb1390.dtsi
@@ -0,0 +1,61 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+
+smb1390: qcom,smb1390@10 {
+ compatible = "qcom,i2c-pmic";
+ reg = <0x10>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupt-parent = <&spmi_bus>;
+ interrupts = <0x0 0xd1 0x0 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "smb1390";
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ qcom,periph-map = <0x10>;
+
+ smb1390_revid: qcom,revid {
+ compatible = "qcom,qpnp-revid";
+ reg = <0x100>;
+ };
+
+ smb1390_charger: qcom,charge_pump {
+ compatible = "qcom,smb1390-charger";
+ qcom,pmic-revid = <&smb1390_revid>;
+ interrupt-parent = <&smb1390>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&smb1390_die_temp_default>;
+ qcom,smb-vadc = <&pm8998_vadc>;
+ qcom,channel-num = <0x14>;
+ status = "disabled";
+
+ qcom,core {
+ interrupts = <0x10 0x0 IRQ_TYPE_EDGE_RISING>,
+ <0x10 0x1 IRQ_TYPE_EDGE_RISING>,
+ <0x10 0x2 IRQ_TYPE_EDGE_RISING>,
+ <0x10 0x3 IRQ_TYPE_EDGE_RISING>,
+ <0x10 0x4 IRQ_TYPE_EDGE_RISING>,
+ <0x10 0x5 IRQ_TYPE_EDGE_RISING>,
+ <0x10 0x6 IRQ_TYPE_EDGE_RISING>,
+ <0x10 0x7 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "switcher-off-window",
+ "switcher-off-fault",
+ "tsd-fault",
+ "irev-fault",
+ "vph-ov-hard",
+ "vph-ov-soft",
+ "ilim",
+ "temp-alarm";
+ };
+ };
+};
diff --git a/arch/arm64/configs/msm8937-perf_defconfig b/arch/arm64/configs/msm8937-perf_defconfig
index ac6cc3d..a33d09a 100644
--- a/arch/arm64/configs/msm8937-perf_defconfig
+++ b/arch/arm64/configs/msm8937-perf_defconfig
@@ -18,6 +18,7 @@
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_SCHEDTUNE=y
CONFIG_RT_GROUP_SCHED=y
diff --git a/arch/arm64/configs/msm8937_defconfig b/arch/arm64/configs/msm8937_defconfig
index ced9c40..ded6e42 100644
--- a/arch/arm64/configs/msm8937_defconfig
+++ b/arch/arm64/configs/msm8937_defconfig
@@ -19,6 +19,7 @@
CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
CONFIG_CGROUP_DEBUG=y
CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_SCHEDTUNE=y
CONFIG_RT_GROUP_SCHED=y
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index a0a7031..dceaafd 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -102,6 +102,7 @@
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
@@ -114,6 +115,7 @@
CONFIG_INET6_ESP=y
CONFIG_INET6_IPCOMP=y
CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
CONFIG_NETFILTER=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index 41959a6..946e4f4 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -105,6 +105,7 @@
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
@@ -117,6 +118,7 @@
CONFIG_INET6_ESP=y
CONFIG_INET6_IPCOMP=y
CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
CONFIG_NETFILTER=y
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index c5bc52e..a891bb6 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -48,20 +48,9 @@
} while (0)
static inline int
-futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *_uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (int)(encoded_op << 8) >> 20;
- int cmparg = (int)(encoded_op << 20) >> 20;
int oldval = 0, ret, tmp;
- u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
-
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1U << (oparg & 0x1f);
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
pagefault_disable();
@@ -92,17 +81,9 @@
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 37d56e8..0a33ea3 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -73,6 +73,9 @@
/* Timer */
struct arch_timer_kvm timer;
+
+ /* Mandated version of PSCI */
+ u32 psci_version;
};
#define KVM_NR_MEM_OBJS 40
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 6c35d21..de21caa 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -312,7 +312,7 @@
(err), ARM64_HAS_UAO); \
break; \
case 8: \
- __get_user_asm("ldr", "ldtr", "%", __gu_val, (ptr), \
+ __get_user_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \
(err), ARM64_HAS_UAO); \
break; \
default: \
@@ -384,7 +384,7 @@
(err), ARM64_HAS_UAO); \
break; \
case 8: \
- __put_user_asm("str", "sttr", "%", __pu_val, (ptr), \
+ __put_user_asm("str", "sttr", "%x", __pu_val, (ptr), \
(err), ARM64_HAS_UAO); \
break; \
default: \
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 3051f86..702de7a 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -195,6 +195,12 @@
#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2)
#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2)
+/* KVM-as-firmware specific pseudo-registers */
+#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
+ KVM_REG_ARM_FW | ((r) & 0xffff))
+#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
+
/* Device Control API: ARM VGIC */
#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 3f9e157..d3e0a2f 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
+#include <kvm/arm_psci.h>
#include <asm/cputype.h>
#include <asm/uaccess.h>
#include <asm/kvm.h>
@@ -205,7 +206,7 @@
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
{
return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu)
- + NUM_TIMER_REGS;
+ + kvm_arm_get_fw_num_regs(vcpu) + NUM_TIMER_REGS;
}
/**
@@ -225,6 +226,11 @@
uindices++;
}
+ ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
+ if (ret)
+ return ret;
+ uindices += kvm_arm_get_fw_num_regs(vcpu);
+
ret = copy_timer_indices(vcpu, uindices);
if (ret)
return ret;
@@ -243,6 +249,9 @@
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
return get_core_reg(vcpu, reg);
+ if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
+ return kvm_arm_get_fw_reg(vcpu, reg);
+
if (is_timer_reg(reg->id))
return get_timer_reg(vcpu, reg);
@@ -259,6 +268,9 @@
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
return set_core_reg(vcpu, reg);
+ if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
+ return kvm_arm_set_fw_reg(vcpu, reg);
+
if (is_timer_reg(reg->id))
return set_timer_reg(vcpu, reg);
diff --git a/arch/frv/include/asm/futex.h b/arch/frv/include/asm/futex.h
index 4bea27f..2702bd8 100644
--- a/arch/frv/include/asm/futex.h
+++ b/arch/frv/include/asm/futex.h
@@ -7,7 +7,8 @@
#include <asm/errno.h>
#include <asm/uaccess.h>
-extern int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr);
+extern int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr);
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
diff --git a/arch/frv/kernel/futex.c b/arch/frv/kernel/futex.c
index d155ca9..37f7b2b 100644
--- a/arch/frv/kernel/futex.c
+++ b/arch/frv/kernel/futex.c
@@ -186,20 +186,10 @@
/*
* do the futex operations
*/
-int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
pagefault_disable();
switch (op) {
@@ -225,18 +215,9 @@
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS; break;
- }
- }
+ if (!ret)
+ *oval = oldval;
return ret;
-} /* end futex_atomic_op_inuser() */
+} /* end arch_futex_atomic_op_inuser() */
diff --git a/arch/hexagon/include/asm/futex.h b/arch/hexagon/include/asm/futex.h
index 7e597f8..c607b77 100644
--- a/arch/hexagon/include/asm/futex.h
+++ b/arch/hexagon/include/asm/futex.h
@@ -31,18 +31,9 @@
static inline int
-futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
pagefault_disable();
@@ -72,30 +63,9 @@
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ:
- ret = (oldval == cmparg);
- break;
- case FUTEX_OP_CMP_NE:
- ret = (oldval != cmparg);
- break;
- case FUTEX_OP_CMP_LT:
- ret = (oldval < cmparg);
- break;
- case FUTEX_OP_CMP_GE:
- ret = (oldval >= cmparg);
- break;
- case FUTEX_OP_CMP_LE:
- ret = (oldval <= cmparg);
- break;
- case FUTEX_OP_CMP_GT:
- ret = (oldval > cmparg);
- break;
- default:
- ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h
index 76acbcd..6d67dc1 100644
--- a/arch/ia64/include/asm/futex.h
+++ b/arch/ia64/include/asm/futex.h
@@ -45,18 +45,9 @@
} while (0)
static inline int
-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
pagefault_disable();
@@ -84,17 +75,9 @@
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/microblaze/include/asm/futex.h b/arch/microblaze/include/asm/futex.h
index 01848f0..a9dad9e 100644
--- a/arch/microblaze/include/asm/futex.h
+++ b/arch/microblaze/include/asm/futex.h
@@ -29,18 +29,9 @@
})
static inline int
-futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
pagefault_disable();
@@ -66,30 +57,9 @@
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ:
- ret = (oldval == cmparg);
- break;
- case FUTEX_OP_CMP_NE:
- ret = (oldval != cmparg);
- break;
- case FUTEX_OP_CMP_LT:
- ret = (oldval < cmparg);
- break;
- case FUTEX_OP_CMP_GE:
- ret = (oldval >= cmparg);
- break;
- case FUTEX_OP_CMP_LE:
- ret = (oldval <= cmparg);
- break;
- case FUTEX_OP_CMP_GT:
- ret = (oldval > cmparg);
- break;
- default:
- ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 2d2fd79..34fbbf8 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -95,6 +95,7 @@
select PCI_DRIVERS_GENERIC
select PINCTRL
select SMP_UP if SMP
+ select SWAP_IO_SPACE
select SYS_HAS_CPU_MIPS32_R1
select SYS_HAS_CPU_MIPS32_R2
select SYS_HAS_CPU_MIPS32_R6
diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h
index 1de190b..a9e61ea 100644
--- a/arch/mips/include/asm/futex.h
+++ b/arch/mips/include/asm/futex.h
@@ -83,18 +83,9 @@
}
static inline int
-futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
pagefault_disable();
@@ -125,17 +116,9 @@
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h
index ac8bd58..06a1a88 100644
--- a/arch/parisc/include/asm/futex.h
+++ b/arch/parisc/include/asm/futex.h
@@ -32,22 +32,12 @@
}
static inline int
-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
unsigned long int flags;
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval, ret;
u32 tmp;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr)))
- return -EFAULT;
-
_futex_spin_lock_irqsave(uaddr, &flags);
pagefault_disable();
@@ -85,17 +75,9 @@
pagefault_enable();
_futex_spin_unlock_irqrestore(uaddr, &flags);
- if (ret == 0) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
index 2a9cf84..f4c7467f 100644
--- a/arch/powerpc/include/asm/futex.h
+++ b/arch/powerpc/include/asm/futex.h
@@ -31,18 +31,10 @@
: "b" (uaddr), "i" (-EFAULT), "r" (oparg) \
: "cr0", "memory")
-static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
pagefault_disable();
@@ -68,17 +60,9 @@
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 6ef8f0b..27843665 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -207,18 +207,18 @@
if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
return NULL;
+
+ device_lock(&dev->dev);
dev->error_state = pci_channel_io_frozen;
driver = eeh_pcid_get(dev);
- if (!driver) return NULL;
+ if (!driver) goto out_no_dev;
eeh_disable_irq(dev);
if (!driver->err_handler ||
- !driver->err_handler->error_detected) {
- eeh_pcid_put(dev);
- return NULL;
- }
+ !driver->err_handler->error_detected)
+ goto out;
rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen);
@@ -227,7 +227,10 @@
if (*res == PCI_ERS_RESULT_NONE) *res = rc;
edev->in_error = true;
+out:
eeh_pcid_put(dev);
+out_no_dev:
+ device_unlock(&dev->dev);
return NULL;
}
@@ -250,15 +253,14 @@
if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
return NULL;
+ device_lock(&dev->dev);
driver = eeh_pcid_get(dev);
- if (!driver) return NULL;
+ if (!driver) goto out_no_dev;
if (!driver->err_handler ||
!driver->err_handler->mmio_enabled ||
- (edev->mode & EEH_DEV_NO_HANDLER)) {
- eeh_pcid_put(dev);
- return NULL;
- }
+ (edev->mode & EEH_DEV_NO_HANDLER))
+ goto out;
rc = driver->err_handler->mmio_enabled(dev);
@@ -266,7 +268,10 @@
if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
if (*res == PCI_ERS_RESULT_NONE) *res = rc;
+out:
eeh_pcid_put(dev);
+out_no_dev:
+ device_unlock(&dev->dev);
return NULL;
}
@@ -289,20 +294,20 @@
if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
return NULL;
+
+ device_lock(&dev->dev);
dev->error_state = pci_channel_io_normal;
driver = eeh_pcid_get(dev);
- if (!driver) return NULL;
+ if (!driver) goto out_no_dev;
eeh_enable_irq(dev);
if (!driver->err_handler ||
!driver->err_handler->slot_reset ||
(edev->mode & EEH_DEV_NO_HANDLER) ||
- (!edev->in_error)) {
- eeh_pcid_put(dev);
- return NULL;
- }
+ (!edev->in_error))
+ goto out;
rc = driver->err_handler->slot_reset(dev);
if ((*res == PCI_ERS_RESULT_NONE) ||
@@ -310,7 +315,10 @@
if (*res == PCI_ERS_RESULT_DISCONNECT &&
rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
+out:
eeh_pcid_put(dev);
+out_no_dev:
+ device_unlock(&dev->dev);
return NULL;
}
@@ -361,10 +369,12 @@
if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
return NULL;
+
+ device_lock(&dev->dev);
dev->error_state = pci_channel_io_normal;
driver = eeh_pcid_get(dev);
- if (!driver) return NULL;
+ if (!driver) goto out_no_dev;
was_in_error = edev->in_error;
edev->in_error = false;
@@ -374,13 +384,15 @@
!driver->err_handler->resume ||
(edev->mode & EEH_DEV_NO_HANDLER) || !was_in_error) {
edev->mode &= ~EEH_DEV_NO_HANDLER;
- eeh_pcid_put(dev);
- return NULL;
+ goto out;
}
driver->err_handler->resume(dev);
+out:
eeh_pcid_put(dev);
+out_no_dev:
+ device_unlock(&dev->dev);
return NULL;
}
@@ -400,22 +412,25 @@
if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
return NULL;
+
+ device_lock(&dev->dev);
dev->error_state = pci_channel_io_perm_failure;
driver = eeh_pcid_get(dev);
- if (!driver) return NULL;
+ if (!driver) goto out_no_dev;
eeh_disable_irq(dev);
if (!driver->err_handler ||
- !driver->err_handler->error_detected) {
- eeh_pcid_put(dev);
- return NULL;
- }
+ !driver->err_handler->error_detected)
+ goto out;
driver->err_handler->error_detected(dev, pci_channel_io_perm_failure);
+out:
eeh_pcid_put(dev);
+out_no_dev:
+ device_unlock(&dev->dev);
return NULL;
}
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 55fbc0c..79a180c 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -299,7 +299,6 @@
stw r12, STACK_SLOT_TRAP(r1)
bl kvmhv_commence_exit
nop
- lwz r12, STACK_SLOT_TRAP(r1)
b kvmhv_switch_to_host
/*
@@ -1023,6 +1022,7 @@
secondary_too_late:
li r12, 0
+ stw r12, STACK_SLOT_TRAP(r1)
cmpdi r4, 0
beq 11f
stw r12, VCPU_TRAP(r4)
@@ -1266,12 +1266,12 @@
bl kvmhv_accumulate_time
#endif
+ stw r12, STACK_SLOT_TRAP(r1)
mr r3, r12
/* Increment exit count, poke other threads to exit */
bl kvmhv_commence_exit
nop
ld r9, HSTATE_KVM_VCPU(r13)
- lwz r12, VCPU_TRAP(r9)
/* Stop others sending VCPU interrupts to this physical CPU */
li r0, -1
@@ -1549,6 +1549,7 @@
* POWER7/POWER8 guest -> host partition switch code.
* We don't have to lock against tlbies but we do
* have to coordinate the hardware threads.
+ * Here STACK_SLOT_TRAP(r1) contains the trap number.
*/
kvmhv_switch_to_host:
/* Secondary threads wait for primary to do partition switch */
@@ -1599,11 +1600,11 @@
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
/* If HMI, call kvmppc_realmode_hmi_handler() */
+ lwz r12, STACK_SLOT_TRAP(r1)
cmpwi r12, BOOK3S_INTERRUPT_HMI
bne 27f
bl kvmppc_realmode_hmi_handler
nop
- li r12, BOOK3S_INTERRUPT_HMI
/*
* At this point kvmppc_realmode_hmi_handler would have resync-ed
* the TB. Hence it is not required to subtract guest timebase
@@ -1678,6 +1679,7 @@
li r0, KVM_GUEST_MODE_NONE
stb r0, HSTATE_IN_GUEST(r13)
+ lwz r12, STACK_SLOT_TRAP(r1) /* return trap # in r12 */
ld r0, SFS+PPC_LR_STKOFF(r1)
addi r1, r1, SFS
mtlr r0
diff --git a/arch/powerpc/platforms/powernv/opal-rtc.c b/arch/powerpc/platforms/powernv/opal-rtc.c
index f886886..aa2a513 100644
--- a/arch/powerpc/platforms/powernv/opal-rtc.c
+++ b/arch/powerpc/platforms/powernv/opal-rtc.c
@@ -48,10 +48,12 @@
while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
- if (rc == OPAL_BUSY_EVENT)
+ if (rc == OPAL_BUSY_EVENT) {
+ mdelay(OPAL_BUSY_DELAY_MS);
opal_poll_events(NULL);
- else if (rc == OPAL_BUSY)
- mdelay(10);
+ } else if (rc == OPAL_BUSY) {
+ mdelay(OPAL_BUSY_DELAY_MS);
+ }
}
if (rc != OPAL_SUCCESS)
return 0;
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 9aa0d04..1c4a595 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -118,6 +118,7 @@
select GENERIC_CLOCKEVENTS
select GENERIC_CPU_AUTOPROBE
select GENERIC_CPU_DEVICES if !SMP
+ select GENERIC_CPU_VULNERABILITIES
select GENERIC_FIND_FIRST_BIT
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL
@@ -704,6 +705,51 @@
If unsure, say Y.
+config KERNEL_NOBP
+ def_bool n
+ prompt "Enable modified branch prediction for the kernel by default"
+ help
+ If this option is selected the kernel will switch to a modified
+ branch prediction mode if the firmware interface is available.
+ The modified branch prediction mode improves the behaviour in
+ regard to speculative execution.
+
+ With the option enabled the kernel parameter "nobp=0" or "nospec"
+ can be used to run the kernel in the normal branch prediction mode.
+
+ With the option disabled the modified branch prediction mode is
+ enabled with the "nobp=1" kernel parameter.
+
+ If unsure, say N.
+
+config EXPOLINE
+ def_bool n
+ prompt "Avoid speculative indirect branches in the kernel"
+ help
+ Compile the kernel with the expoline compiler options to guard
+ against kernel-to-user data leaks by avoiding speculative indirect
+ branches.
+ Requires a compiler with -mindirect-branch=thunk support for full
+ protection. The kernel may run slower.
+
+ If unsure, say N.
+
+choice
+ prompt "Expoline default"
+ depends on EXPOLINE
+ default EXPOLINE_FULL
+
+config EXPOLINE_OFF
+ bool "spectre_v2=off"
+
+config EXPOLINE_AUTO
+ bool "spectre_v2=auto"
+
+config EXPOLINE_FULL
+ bool "spectre_v2=on"
+
+endchoice
+
endmenu
menu "Power Management"
@@ -753,6 +799,7 @@
config SHARED_KERNEL
bool "VM shared kernel support"
depends on !JUMP_LABEL
+ depends on !ALTERNATIVES
help
Select this option, if you want to share the text segment of the
Linux kernel between different VM guests. This reduces memory
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 54e0052..bef67c0 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -79,6 +79,16 @@
cflags-$(CONFIG_WARN_DYNAMIC_STACK) += -mwarn-dynamicstack
endif
+ifdef CONFIG_EXPOLINE
+ ifeq ($(call cc-option-yn,$(CC_FLAGS_MARCH) -mindirect-branch=thunk),y)
+ CC_FLAGS_EXPOLINE := -mindirect-branch=thunk
+ CC_FLAGS_EXPOLINE += -mfunction-return=thunk
+ CC_FLAGS_EXPOLINE += -mindirect-branch-table
+ export CC_FLAGS_EXPOLINE
+ cflags-y += $(CC_FLAGS_EXPOLINE) -DCC_USING_EXPOLINE
+ endif
+endif
+
ifdef CONFIG_FUNCTION_TRACER
# make use of hotpatch feature if the compiler supports it
cc_hotpatch := -mhotpatch=0,3
diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h
new file mode 100644
index 0000000..a720020
--- /dev/null
+++ b/arch/s390/include/asm/alternative.h
@@ -0,0 +1,149 @@
+#ifndef _ASM_S390_ALTERNATIVE_H
+#define _ASM_S390_ALTERNATIVE_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/stringify.h>
+
+struct alt_instr {
+ s32 instr_offset; /* original instruction */
+ s32 repl_offset; /* offset to replacement instruction */
+ u16 facility; /* facility bit set for replacement */
+ u8 instrlen; /* length of original instruction */
+ u8 replacementlen; /* length of new instruction */
+} __packed;
+
+void apply_alternative_instructions(void);
+void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
+
+/*
+ * |661: |662: |6620 |663:
+ * +-----------+---------------------+
+ * | oldinstr | oldinstr_padding |
+ * | +----------+----------+
+ * | | | |
+ * | | >6 bytes |6/4/2 nops|
+ * | |6 bytes jg----------->
+ * +-----------+---------------------+
+ * ^^ static padding ^^
+ *
+ * .altinstr_replacement section
+ * +---------------------+-----------+
+ * |6641: |6651:
+ * | alternative instr 1 |
+ * +-----------+---------+- - - - - -+
+ * |6642: |6652: |
+ * | alternative instr 2 | padding
+ * +---------------------+- - - - - -+
+ * ^ runtime ^
+ *
+ * .altinstructions section
+ * +---------------------------------+
+ * | alt_instr entries for each |
+ * | alternative instr |
+ * +---------------------------------+
+ */
+
+#define b_altinstr(num) "664"#num
+#define e_altinstr(num) "665"#num
+
+#define e_oldinstr_pad_end "663"
+#define oldinstr_len "662b-661b"
+#define oldinstr_total_len e_oldinstr_pad_end"b-661b"
+#define altinstr_len(num) e_altinstr(num)"b-"b_altinstr(num)"b"
+#define oldinstr_pad_len(num) \
+ "-(((" altinstr_len(num) ")-(" oldinstr_len ")) > 0) * " \
+ "((" altinstr_len(num) ")-(" oldinstr_len "))"
+
+#define INSTR_LEN_SANITY_CHECK(len) \
+ ".if " len " > 254\n" \
+ "\t.error \"cpu alternatives does not support instructions " \
+ "blocks > 254 bytes\"\n" \
+ ".endif\n" \
+ ".if (" len ") %% 2\n" \
+ "\t.error \"cpu alternatives instructions length is odd\"\n" \
+ ".endif\n"
+
+#define OLDINSTR_PADDING(oldinstr, num) \
+ ".if " oldinstr_pad_len(num) " > 6\n" \
+ "\tjg " e_oldinstr_pad_end "f\n" \
+ "6620:\n" \
+ "\t.fill (" oldinstr_pad_len(num) " - (6620b-662b)) / 2, 2, 0x0700\n" \
+ ".else\n" \
+ "\t.fill " oldinstr_pad_len(num) " / 6, 6, 0xc0040000\n" \
+ "\t.fill " oldinstr_pad_len(num) " %% 6 / 4, 4, 0x47000000\n" \
+ "\t.fill " oldinstr_pad_len(num) " %% 6 %% 4 / 2, 2, 0x0700\n" \
+ ".endif\n"
+
+#define OLDINSTR(oldinstr, num) \
+ "661:\n\t" oldinstr "\n662:\n" \
+ OLDINSTR_PADDING(oldinstr, num) \
+ e_oldinstr_pad_end ":\n" \
+ INSTR_LEN_SANITY_CHECK(oldinstr_len)
+
+#define OLDINSTR_2(oldinstr, num1, num2) \
+ "661:\n\t" oldinstr "\n662:\n" \
+ ".if " altinstr_len(num1) " < " altinstr_len(num2) "\n" \
+ OLDINSTR_PADDING(oldinstr, num2) \
+ ".else\n" \
+ OLDINSTR_PADDING(oldinstr, num1) \
+ ".endif\n" \
+ e_oldinstr_pad_end ":\n" \
+ INSTR_LEN_SANITY_CHECK(oldinstr_len)
+
+#define ALTINSTR_ENTRY(facility, num) \
+ "\t.long 661b - .\n" /* old instruction */ \
+ "\t.long " b_altinstr(num)"b - .\n" /* alt instruction */ \
+ "\t.word " __stringify(facility) "\n" /* facility bit */ \
+ "\t.byte " oldinstr_total_len "\n" /* source len */ \
+ "\t.byte " altinstr_len(num) "\n" /* alt instruction len */
+
+#define ALTINSTR_REPLACEMENT(altinstr, num) /* replacement */ \
+ b_altinstr(num)":\n\t" altinstr "\n" e_altinstr(num) ":\n" \
+ INSTR_LEN_SANITY_CHECK(altinstr_len(num))
+
+/* alternative assembly primitive: */
+#define ALTERNATIVE(oldinstr, altinstr, facility) \
+ ".pushsection .altinstr_replacement, \"ax\"\n" \
+ ALTINSTR_REPLACEMENT(altinstr, 1) \
+ ".popsection\n" \
+ OLDINSTR(oldinstr, 1) \
+ ".pushsection .altinstructions,\"a\"\n" \
+ ALTINSTR_ENTRY(facility, 1) \
+ ".popsection\n"
+
+#define ALTERNATIVE_2(oldinstr, altinstr1, facility1, altinstr2, facility2)\
+ ".pushsection .altinstr_replacement, \"ax\"\n" \
+ ALTINSTR_REPLACEMENT(altinstr1, 1) \
+ ALTINSTR_REPLACEMENT(altinstr2, 2) \
+ ".popsection\n" \
+ OLDINSTR_2(oldinstr, 1, 2) \
+ ".pushsection .altinstructions,\"a\"\n" \
+ ALTINSTR_ENTRY(facility1, 1) \
+ ALTINSTR_ENTRY(facility2, 2) \
+ ".popsection\n"
+
+/*
+ * Alternative instructions for different CPU types or capabilities.
+ *
+ * This allows to use optimized instructions even on generic binary
+ * kernels.
+ *
+ * oldinstr is padded with jump and nops at compile time if altinstr is
+ * longer. altinstr is padded with jump and nops at run-time during patching.
+ *
+ * For non barrier like inlines please define new variants
+ * without volatile and memory clobber.
+ */
+#define alternative(oldinstr, altinstr, facility) \
+ asm volatile(ALTERNATIVE(oldinstr, altinstr, facility) : : : "memory")
+
+#define alternative_2(oldinstr, altinstr1, facility1, altinstr2, facility2) \
+ asm volatile(ALTERNATIVE_2(oldinstr, altinstr1, facility1, \
+ altinstr2, facility2) ::: "memory")
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_S390_ALTERNATIVE_H */
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index 5c8db3c..03b2e5b 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -48,6 +48,30 @@
#define __smp_mb__before_atomic() barrier()
#define __smp_mb__after_atomic() barrier()
+/**
+ * array_index_mask_nospec - generate a mask for array_idx() that is
+ * ~0UL when the bounds check succeeds and 0 otherwise
+ * @index: array element index
+ * @size: number of elements in array
+ */
+#define array_index_mask_nospec array_index_mask_nospec
+static inline unsigned long array_index_mask_nospec(unsigned long index,
+ unsigned long size)
+{
+ unsigned long mask;
+
+ if (__builtin_constant_p(size) && size > 0) {
+ asm(" clgr %2,%1\n"
+ " slbgr %0,%0\n"
+ :"=d" (mask) : "d" (size-1), "d" (index) :"cc");
+ return mask;
+ }
+ asm(" clgr %1,%2\n"
+ " slbgr %0,%0\n"
+ :"=d" (mask) : "d" (size), "d" (index) :"cc");
+ return ~mask;
+}
+
#include <asm-generic/barrier.h>
#endif /* __ASM_BARRIER_H */
diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
index 09b406d..5811e78 100644
--- a/arch/s390/include/asm/facility.h
+++ b/arch/s390/include/asm/facility.h
@@ -15,7 +15,25 @@
#include <linux/preempt.h>
#include <asm/lowcore.h>
-#define MAX_FACILITY_BIT (256*8) /* stfle_fac_list has 256 bytes */
+#define MAX_FACILITY_BIT (sizeof(((struct lowcore *)0)->stfle_fac_list) * 8)
+
+static inline void __set_facility(unsigned long nr, void *facilities)
+{
+ unsigned char *ptr = (unsigned char *) facilities;
+
+ if (nr >= MAX_FACILITY_BIT)
+ return;
+ ptr[nr >> 3] |= 0x80 >> (nr & 7);
+}
+
+static inline void __clear_facility(unsigned long nr, void *facilities)
+{
+ unsigned char *ptr = (unsigned char *) facilities;
+
+ if (nr >= MAX_FACILITY_BIT)
+ return;
+ ptr[nr >> 3] &= ~(0x80 >> (nr & 7));
+}
static inline int __test_facility(unsigned long nr, void *facilities)
{
diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h
index a4811aa..8f8eec9e 100644
--- a/arch/s390/include/asm/futex.h
+++ b/arch/s390/include/asm/futex.h
@@ -21,17 +21,12 @@
: "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
"m" (*uaddr) : "cc");
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, newval, ret;
load_kernel_asce();
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
pagefault_disable();
switch (op) {
@@ -60,17 +55,9 @@
}
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index a41faf3..5792590 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -181,7 +181,8 @@
__u16 ipa; /* 0x0056 */
__u32 ipb; /* 0x0058 */
__u32 scaoh; /* 0x005c */
- __u8 reserved60; /* 0x0060 */
+#define FPF_BPBC 0x20
+ __u8 fpf; /* 0x0060 */
__u8 ecb; /* 0x0061 */
__u8 ecb2; /* 0x0062 */
#define ECB3_AES 0x04
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 7b93b78..ad4e0ce 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -135,7 +135,9 @@
/* Per cpu primary space access list */
__u32 paste[16]; /* 0x0400 */
- __u8 pad_0x04c0[0x0e00-0x0440]; /* 0x0440 */
+ /* br %r1 trampoline */
+ __u16 br_r1_trampoline; /* 0x0440 */
+ __u8 pad_0x0442[0x0e00-0x0442]; /* 0x0442 */
/*
* 0xe00 contains the address of the IPL Parameter Information
@@ -150,7 +152,8 @@
__u8 pad_0x0e20[0x0f00-0x0e20]; /* 0x0e20 */
/* Extended facility list */
- __u64 stfle_fac_list[32]; /* 0x0f00 */
+ __u64 stfle_fac_list[16]; /* 0x0f00 */
+ __u64 alt_stfle_fac_list[16]; /* 0x0f80 */
__u8 pad_0x1000[0x11b0-0x1000]; /* 0x1000 */
/* Pointer to vector register save area */
diff --git a/arch/s390/include/asm/nospec-branch.h b/arch/s390/include/asm/nospec-branch.h
new file mode 100644
index 0000000..b4bd8c4
--- /dev/null
+++ b/arch/s390/include/asm/nospec-branch.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_EXPOLINE_H
+#define _ASM_S390_EXPOLINE_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+extern int nospec_disable;
+
+void nospec_init_branches(void);
+void nospec_auto_detect(void);
+void nospec_revert(s32 *start, s32 *end);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_S390_EXPOLINE_H */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 6bcbbec..d584212 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -84,6 +84,7 @@
extern const struct seq_operations cpuinfo_op;
extern int sysctl_ieee_emulation_warnings;
extern void execve_tail(void);
+extern void __bpon(void);
/*
* User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
@@ -359,6 +360,9 @@
memcpy_absolute(&(dest), &__tmp, sizeof(__tmp)); \
}
+extern int s390_isolate_bp(void);
+extern int s390_isolate_bp_guest(void);
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_S390_PROCESSOR_H */
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index f15c039..84f2ae4 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -79,6 +79,8 @@
#define TIF_SECCOMP 5 /* secure computing */
#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
#define TIF_UPROBE 7 /* breakpointed or single-stepping */
+#define TIF_ISOLATE_BP 8 /* Run process with isolated BP */
+#define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */
#define TIF_31BIT 16 /* 32bit process */
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal() */
@@ -94,6 +96,8 @@
#define _TIF_SECCOMP _BITUL(TIF_SECCOMP)
#define _TIF_SYSCALL_TRACEPOINT _BITUL(TIF_SYSCALL_TRACEPOINT)
#define _TIF_UPROBE _BITUL(TIF_UPROBE)
+#define _TIF_ISOLATE_BP _BITUL(TIF_ISOLATE_BP)
+#define _TIF_ISOLATE_BP_GUEST _BITUL(TIF_ISOLATE_BP_GUEST)
#define _TIF_31BIT _BITUL(TIF_31BIT)
#define _TIF_SINGLE_STEP _BITUL(TIF_SINGLE_STEP)
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
index a2ffec4..81c02e1 100644
--- a/arch/s390/include/uapi/asm/kvm.h
+++ b/arch/s390/include/uapi/asm/kvm.h
@@ -197,6 +197,7 @@
#define KVM_SYNC_VRS (1UL << 6)
#define KVM_SYNC_RICCB (1UL << 7)
#define KVM_SYNC_FPRS (1UL << 8)
+#define KVM_SYNC_BPBC (1UL << 10)
/* definition of registers in kvm_run */
struct kvm_sync_regs {
__u64 prefix; /* prefix register */
@@ -217,7 +218,9 @@
};
__u8 reserved[512]; /* for future vector expansion */
__u32 fpc; /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */
- __u8 padding[52]; /* riccb needs to be 64byte aligned */
+ __u8 bpbc : 1; /* bp mode */
+ __u8 reserved2 : 7;
+ __u8 padding1[51]; /* riccb needs to be 64byte aligned */
__u8 riccb[64]; /* runtime instrumentation controls block */
};
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 1f0fe98..0501cac 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -42,6 +42,7 @@
CFLAGS_REMOVE_sclp.o += $(CC_FLAGS_MARCH)
CFLAGS_sclp.o += -march=z900
CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH)
+CFLAGS_REMOVE_als.o += $(CC_FLAGS_EXPOLINE)
CFLAGS_als.o += -march=z900
AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
AFLAGS_head.o += -march=z900
@@ -57,10 +58,13 @@
obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o als.o
obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o
-obj-y += entry.o reipl.o relocate_kernel.o
+obj-y += entry.o reipl.o relocate_kernel.o alternative.o
+obj-y += nospec-branch.o
extra-y += head.o head64.o vmlinux.lds
+CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE)
+
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SCHED_TOPOLOGY) += topology.o
diff --git a/arch/s390/kernel/alternative.c b/arch/s390/kernel/alternative.c
new file mode 100644
index 0000000..b57b293
--- /dev/null
+++ b/arch/s390/kernel/alternative.c
@@ -0,0 +1,112 @@
+#include <linux/module.h>
+#include <asm/alternative.h>
+#include <asm/facility.h>
+#include <asm/nospec-branch.h>
+
+#define MAX_PATCH_LEN (255 - 1)
+
+static int __initdata_or_module alt_instr_disabled;
+
+static int __init disable_alternative_instructions(char *str)
+{
+ alt_instr_disabled = 1;
+ return 0;
+}
+
+early_param("noaltinstr", disable_alternative_instructions);
+
+struct brcl_insn {
+ u16 opc;
+ s32 disp;
+} __packed;
+
+static u16 __initdata_or_module nop16 = 0x0700;
+static u32 __initdata_or_module nop32 = 0x47000000;
+static struct brcl_insn __initdata_or_module nop48 = {
+ 0xc004, 0
+};
+
+static const void *nops[] __initdata_or_module = {
+ &nop16,
+ &nop32,
+ &nop48
+};
+
+static void __init_or_module add_jump_padding(void *insns, unsigned int len)
+{
+ struct brcl_insn brcl = {
+ 0xc0f4,
+ len / 2
+ };
+
+ memcpy(insns, &brcl, sizeof(brcl));
+ insns += sizeof(brcl);
+ len -= sizeof(brcl);
+
+ while (len > 0) {
+ memcpy(insns, &nop16, 2);
+ insns += 2;
+ len -= 2;
+ }
+}
+
+static void __init_or_module add_padding(void *insns, unsigned int len)
+{
+ if (len > 6)
+ add_jump_padding(insns, len);
+ else if (len >= 2)
+ memcpy(insns, nops[len / 2 - 1], len);
+}
+
+static void __init_or_module __apply_alternatives(struct alt_instr *start,
+ struct alt_instr *end)
+{
+ struct alt_instr *a;
+ u8 *instr, *replacement;
+ u8 insnbuf[MAX_PATCH_LEN];
+
+ /*
+ * The scan order should be from start to end. A later scanned
+ * alternative code can overwrite previously scanned alternative code.
+ */
+ for (a = start; a < end; a++) {
+ int insnbuf_sz = 0;
+
+ instr = (u8 *)&a->instr_offset + a->instr_offset;
+ replacement = (u8 *)&a->repl_offset + a->repl_offset;
+
+ if (!__test_facility(a->facility,
+ S390_lowcore.alt_stfle_fac_list))
+ continue;
+
+ if (unlikely(a->instrlen % 2 || a->replacementlen % 2)) {
+ WARN_ONCE(1, "cpu alternatives instructions length is "
+ "odd, skipping patching\n");
+ continue;
+ }
+
+ memcpy(insnbuf, replacement, a->replacementlen);
+ insnbuf_sz = a->replacementlen;
+
+ if (a->instrlen > a->replacementlen) {
+ add_padding(insnbuf + a->replacementlen,
+ a->instrlen - a->replacementlen);
+ insnbuf_sz += a->instrlen - a->replacementlen;
+ }
+
+ s390_kernel_write(instr, insnbuf, insnbuf_sz);
+ }
+}
+
+void __init_or_module apply_alternatives(struct alt_instr *start,
+ struct alt_instr *end)
+{
+ if (!alt_instr_disabled)
+ __apply_alternatives(start, end);
+}
+
+extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
+void __init apply_alternative_instructions(void)
+{
+ apply_alternatives(__alt_instructions, __alt_instructions_end);
+}
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 6257898..0c7a7d5 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -299,6 +299,11 @@
{
stfle(S390_lowcore.stfle_fac_list,
ARRAY_SIZE(S390_lowcore.stfle_fac_list));
+ memcpy(S390_lowcore.alt_stfle_fac_list,
+ S390_lowcore.stfle_fac_list,
+ sizeof(S390_lowcore.alt_stfle_fac_list));
+ if (!IS_ENABLED(CONFIG_KERNEL_NOBP))
+ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
}
static __init void detect_diag9c(void)
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 3bc2825..1996afe 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -105,6 +105,7 @@
j 3f
1: LAST_BREAK %r14
UPDATE_VTIME %r14,%r15,\timer
+ BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
2: lg %r15,__LC_ASYNC_STACK # load async stack
3: la %r11,STACK_FRAME_OVERHEAD(%r15)
.endm
@@ -163,6 +164,130 @@
tm off+\addr, \mask
.endm
+ .macro BPOFF
+ .pushsection .altinstr_replacement, "ax"
+660: .long 0xb2e8c000
+ .popsection
+661: .long 0x47000000
+ .pushsection .altinstructions, "a"
+ .long 661b - .
+ .long 660b - .
+ .word 82
+ .byte 4
+ .byte 4
+ .popsection
+ .endm
+
+ .macro BPON
+ .pushsection .altinstr_replacement, "ax"
+662: .long 0xb2e8d000
+ .popsection
+663: .long 0x47000000
+ .pushsection .altinstructions, "a"
+ .long 663b - .
+ .long 662b - .
+ .word 82
+ .byte 4
+ .byte 4
+ .popsection
+ .endm
+
+ .macro BPENTER tif_ptr,tif_mask
+ .pushsection .altinstr_replacement, "ax"
+662: .word 0xc004, 0x0000, 0x0000 # 6 byte nop
+ .word 0xc004, 0x0000, 0x0000 # 6 byte nop
+ .popsection
+664: TSTMSK \tif_ptr,\tif_mask
+ jz . + 8
+ .long 0xb2e8d000
+ .pushsection .altinstructions, "a"
+ .long 664b - .
+ .long 662b - .
+ .word 82
+ .byte 12
+ .byte 12
+ .popsection
+ .endm
+
+ .macro BPEXIT tif_ptr,tif_mask
+ TSTMSK \tif_ptr,\tif_mask
+ .pushsection .altinstr_replacement, "ax"
+662: jnz . + 8
+ .long 0xb2e8d000
+ .popsection
+664: jz . + 8
+ .long 0xb2e8c000
+ .pushsection .altinstructions, "a"
+ .long 664b - .
+ .long 662b - .
+ .word 82
+ .byte 8
+ .byte 8
+ .popsection
+ .endm
+
+#ifdef CONFIG_EXPOLINE
+
+ .macro GEN_BR_THUNK name,reg,tmp
+ .section .text.\name,"axG",@progbits,\name,comdat
+ .globl \name
+ .hidden \name
+ .type \name,@function
+\name:
+ .cfi_startproc
+#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
+ exrl 0,0f
+#else
+ larl \tmp,0f
+ ex 0,0(\tmp)
+#endif
+ j .
+0: br \reg
+ .cfi_endproc
+ .endm
+
+ GEN_BR_THUNK __s390x_indirect_jump_r1use_r9,%r9,%r1
+ GEN_BR_THUNK __s390x_indirect_jump_r1use_r14,%r14,%r1
+ GEN_BR_THUNK __s390x_indirect_jump_r11use_r14,%r14,%r11
+
+ .macro BASR_R14_R9
+0: brasl %r14,__s390x_indirect_jump_r1use_r9
+ .pushsection .s390_indirect_branches,"a",@progbits
+ .long 0b-.
+ .popsection
+ .endm
+
+ .macro BR_R1USE_R14
+0: jg __s390x_indirect_jump_r1use_r14
+ .pushsection .s390_indirect_branches,"a",@progbits
+ .long 0b-.
+ .popsection
+ .endm
+
+ .macro BR_R11USE_R14
+0: jg __s390x_indirect_jump_r11use_r14
+ .pushsection .s390_indirect_branches,"a",@progbits
+ .long 0b-.
+ .popsection
+ .endm
+
+#else /* CONFIG_EXPOLINE */
+
+ .macro BASR_R14_R9
+ basr %r14,%r9
+ .endm
+
+ .macro BR_R1USE_R14
+ br %r14
+ .endm
+
+ .macro BR_R11USE_R14
+ br %r14
+ .endm
+
+#endif /* CONFIG_EXPOLINE */
+
+
.section .kprobes.text, "ax"
.Ldummy:
/*
@@ -175,6 +300,11 @@
*/
nop 0
+ENTRY(__bpon)
+ .globl __bpon
+ BPON
+ BR_R1USE_R14
+
/*
* Scheduler resume function, called by switch_to
* gpr2 = (task_struct *) prev
@@ -201,9 +331,9 @@
mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
- bzr %r14
+ jz 0f
.insn s,0xb2800000,__LC_LPP # set program parameter
- br %r14
+0: BR_R1USE_R14
.L__critical_start:
@@ -215,9 +345,11 @@
*/
ENTRY(sie64a)
stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
+ lg %r12,__LC_CURRENT
stg %r2,__SF_EMPTY(%r15) # save control block pointer
stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # reason code = 0
+ mvc __SF_EMPTY+24(8,%r15),__TI_flags(%r12) # copy thread flags
TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ?
jno .Lsie_load_guest_gprs
brasl %r14,load_fpu_regs # load guest fp/vx regs
@@ -234,7 +366,11 @@
jnz .Lsie_skip
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
jo .Lsie_skip # exit if fp/vx regs changed
+ BPEXIT __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
sie 0(%r14)
+.Lsie_exit:
+ BPOFF
+ BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
.Lsie_skip:
ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
@@ -255,9 +391,15 @@
sie_exit:
lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
stmg %r0,%r13,0(%r14) # save guest gprs 0-13
+ xgr %r0,%r0 # clear guest registers to
+ xgr %r1,%r1 # prevent speculative use
+ xgr %r2,%r2
+ xgr %r3,%r3
+ xgr %r4,%r4
+ xgr %r5,%r5
lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
lg %r2,__SF_EMPTY+16(%r15) # return exit reason code
- br %r14
+ BR_R1USE_R14
.Lsie_fault:
lghi %r14,-EFAULT
stg %r14,__SF_EMPTY+16(%r15) # set exit reason code
@@ -280,6 +422,7 @@
stpt __LC_SYNC_ENTER_TIMER
.Lsysc_stmg:
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
+ BPOFF
lg %r10,__LC_LAST_BREAK
lg %r12,__LC_THREAD_INFO
lghi %r14,_PIF_SYSCALL
@@ -289,12 +432,15 @@
LAST_BREAK %r13
.Lsysc_vtime:
UPDATE_VTIME %r10,%r13,__LC_SYNC_ENTER_TIMER
+ BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
stmg %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
stg %r14,__PT_FLAGS(%r11)
.Lsysc_do_svc:
+ # clear user controlled register to prevent speculative use
+ xgr %r0,%r0
lg %r10,__TI_sysc_table(%r12) # address of system call table
llgh %r8,__PT_INT_CODE+2(%r11)
slag %r8,%r8,2 # shift and test for svc 0
@@ -312,7 +458,7 @@
lgf %r9,0(%r8,%r10) # get system call add.
TSTMSK __TI_flags(%r12),_TIF_TRACE
jnz .Lsysc_tracesys
- basr %r14,%r9 # call sys_xxxx
+ BASR_R14_R9 # call sys_xxxx
stg %r2,__PT_R2(%r11) # store return value
.Lsysc_return:
@@ -324,6 +470,7 @@
jnz .Lsysc_work # check for work
TSTMSK __LC_CPU_FLAGS,_CIF_WORK
jnz .Lsysc_work
+ BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
.Lsysc_restore:
lg %r14,__LC_VDSO_PER_CPU
lmg %r0,%r10,__PT_R0(%r11)
@@ -451,7 +598,7 @@
lmg %r3,%r7,__PT_R3(%r11)
stg %r7,STACK_FRAME_OVERHEAD(%r15)
lg %r2,__PT_ORIG_GPR2(%r11)
- basr %r14,%r9 # call sys_xxx
+ BASR_R14_R9 # call sys_xxx
stg %r2,__PT_R2(%r11) # store return value
.Lsysc_tracenogo:
TSTMSK __TI_flags(%r12),_TIF_TRACE
@@ -475,7 +622,7 @@
lmg %r9,%r10,__PT_R9(%r11) # load gprs
ENTRY(kernel_thread_starter)
la %r2,0(%r10)
- basr %r14,%r9
+ BASR_R14_R9
j .Lsysc_tracenogo
/*
@@ -484,6 +631,7 @@
ENTRY(pgm_check_handler)
stpt __LC_SYNC_ENTER_TIMER
+ BPOFF
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
lg %r10,__LC_LAST_BREAK
lg %r12,__LC_THREAD_INFO
@@ -508,6 +656,7 @@
j 3f
2: LAST_BREAK %r14
UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
+ BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
lg %r15,__LC_KERNEL_STACK
lg %r14,__TI_task(%r12)
aghi %r14,__TASK_thread # pointer to thread_struct
@@ -517,6 +666,15 @@
mvc __THREAD_trap_tdb(256,%r14),0(%r13)
3: la %r11,STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r7,__PT_R0(%r11)
+ # clear user controlled registers to prevent speculative use
+ xgr %r0,%r0
+ xgr %r1,%r1
+ xgr %r2,%r2
+ xgr %r3,%r3
+ xgr %r4,%r4
+ xgr %r5,%r5
+ xgr %r6,%r6
+ xgr %r7,%r7
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
stmg %r8,%r9,__PT_PSW(%r11)
mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
@@ -538,9 +696,9 @@
nill %r10,0x007f
sll %r10,2
je .Lpgm_return
- lgf %r1,0(%r10,%r1) # load address of handler routine
+ lgf %r9,0(%r10,%r1) # load address of handler routine
lgr %r2,%r11 # pass pointer to pt_regs
- basr %r14,%r1 # branch to interrupt-handler
+ BASR_R14_R9 # branch to interrupt-handler
.Lpgm_return:
LOCKDEP_SYS_EXIT
tm __PT_PSW+1(%r11),0x01 # returning to user ?
@@ -573,6 +731,7 @@
ENTRY(io_int_handler)
STCK __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
+ BPOFF
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
lg %r10,__LC_LAST_BREAK
lg %r12,__LC_THREAD_INFO
@@ -580,6 +739,16 @@
lmg %r8,%r9,__LC_IO_OLD_PSW
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
stmg %r0,%r7,__PT_R0(%r11)
+ # clear user controlled registers to prevent speculative use
+ xgr %r0,%r0
+ xgr %r1,%r1
+ xgr %r2,%r2
+ xgr %r3,%r3
+ xgr %r4,%r4
+ xgr %r5,%r5
+ xgr %r6,%r6
+ xgr %r7,%r7
+ xgr %r10,%r10
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
stmg %r8,%r9,__PT_PSW(%r11)
mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
@@ -614,9 +783,13 @@
lg %r14,__LC_VDSO_PER_CPU
lmg %r0,%r10,__PT_R0(%r11)
mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
+ tm __PT_PSW+1(%r11),0x01 # returning to user ?
+ jno .Lio_exit_kernel
+ BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
.Lio_exit_timer:
stpt __LC_EXIT_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+.Lio_exit_kernel:
lmg %r11,%r15,__PT_R11(%r11)
lpswe __LC_RETURN_PSW
.Lio_done:
@@ -748,6 +921,7 @@
ENTRY(ext_int_handler)
STCK __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
+ BPOFF
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
lg %r10,__LC_LAST_BREAK
lg %r12,__LC_THREAD_INFO
@@ -755,6 +929,16 @@
lmg %r8,%r9,__LC_EXT_OLD_PSW
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
stmg %r0,%r7,__PT_R0(%r11)
+ # clear user controlled registers to prevent speculative use
+ xgr %r0,%r0
+ xgr %r1,%r1
+ xgr %r2,%r2
+ xgr %r3,%r3
+ xgr %r4,%r4
+ xgr %r5,%r5
+ xgr %r6,%r6
+ xgr %r7,%r7
+ xgr %r10,%r10
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
stmg %r8,%r9,__PT_PSW(%r11)
lghi %r1,__LC_EXT_PARAMS2
@@ -787,11 +971,12 @@
.Lpsw_idle_stcctm:
#endif
oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
+ BPON
STCK __CLOCK_IDLE_ENTER(%r2)
stpt __TIMER_IDLE_ENTER(%r2)
.Lpsw_idle_lpsw:
lpswe __SF_EMPTY(%r15)
- br %r14
+ BR_R1USE_R14
.Lpsw_idle_end:
/*
@@ -805,7 +990,7 @@
lg %r2,__LC_CURRENT
aghi %r2,__TASK_thread
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
- bor %r14
+ jo .Lsave_fpu_regs_exit
stfpc __THREAD_FPU_fpc(%r2)
.Lsave_fpu_regs_fpc_end:
lg %r3,__THREAD_FPU_regs(%r2)
@@ -835,7 +1020,8 @@
std 15,120(%r3)
.Lsave_fpu_regs_done:
oi __LC_CPU_FLAGS+7,_CIF_FPU
- br %r14
+.Lsave_fpu_regs_exit:
+ BR_R1USE_R14
.Lsave_fpu_regs_end:
#if IS_ENABLED(CONFIG_KVM)
EXPORT_SYMBOL(save_fpu_regs)
@@ -855,7 +1041,7 @@
lg %r4,__LC_CURRENT
aghi %r4,__TASK_thread
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
- bnor %r14
+ jno .Lload_fpu_regs_exit
lfpc __THREAD_FPU_fpc(%r4)
TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
@@ -884,7 +1070,8 @@
ld 15,120(%r4)
.Lload_fpu_regs_done:
ni __LC_CPU_FLAGS+7,255-_CIF_FPU
- br %r14
+.Lload_fpu_regs_exit:
+ BR_R1USE_R14
.Lload_fpu_regs_end:
.L__critical_end:
@@ -894,6 +1081,7 @@
*/
ENTRY(mcck_int_handler)
STCK __LC_MCCK_CLOCK
+ BPOFF
la %r1,4095 # revalidate r1
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
@@ -925,6 +1113,16 @@
.Lmcck_skip:
lghi %r14,__LC_GPREGS_SAVE_AREA+64
stmg %r0,%r7,__PT_R0(%r11)
+ # clear user controlled registers to prevent speculative use
+ xgr %r0,%r0
+ xgr %r1,%r1
+ xgr %r2,%r2
+ xgr %r3,%r3
+ xgr %r4,%r4
+ xgr %r5,%r5
+ xgr %r6,%r6
+ xgr %r7,%r7
+ xgr %r10,%r10
mvc __PT_R8(64,%r11),0(%r14)
stmg %r8,%r9,__PT_PSW(%r11)
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
@@ -950,6 +1148,7 @@
mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
jno 0f
+ BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
stpt __LC_EXIT_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
0: lmg %r11,%r15,__PT_R11(%r11)
@@ -1045,7 +1244,7 @@
jl 0f
clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end
jl .Lcleanup_load_fpu_regs
-0: br %r14
+0: BR_R11USE_R14
.align 8
.Lcleanup_table:
@@ -1070,11 +1269,12 @@
.quad .Lsie_done
.Lcleanup_sie:
+ BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
lg %r9,__SF_EMPTY(%r15) # get control block pointer
ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
larl %r9,sie_exit # skip forward to sie_exit
- br %r14
+ BR_R11USE_R14
#endif
.Lcleanup_system_call:
@@ -1116,7 +1316,8 @@
srag %r9,%r9,23
jz 0f
mvc __TI_last_break(8,%r12),16(%r11)
-0: # set up saved register r11
+0: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
+ # set up saved register r11
lg %r15,__LC_KERNEL_STACK
la %r9,STACK_FRAME_OVERHEAD(%r15)
stg %r9,24(%r11) # r11 pt_regs pointer
@@ -1131,7 +1332,7 @@
stg %r15,56(%r11) # r15 stack pointer
# set new psw address and exit
larl %r9,.Lsysc_do_svc
- br %r14
+ BR_R11USE_R14
.Lcleanup_system_call_insn:
.quad system_call
.quad .Lsysc_stmg
@@ -1141,7 +1342,7 @@
.Lcleanup_sysc_tif:
larl %r9,.Lsysc_tif
- br %r14
+ BR_R11USE_R14
.Lcleanup_sysc_restore:
# check if stpt has been executed
@@ -1158,14 +1359,14 @@
mvc 0(64,%r11),__PT_R8(%r9)
lmg %r0,%r7,__PT_R0(%r9)
1: lmg %r8,%r9,__LC_RETURN_PSW
- br %r14
+ BR_R11USE_R14
.Lcleanup_sysc_restore_insn:
.quad .Lsysc_exit_timer
.quad .Lsysc_done - 4
.Lcleanup_io_tif:
larl %r9,.Lio_tif
- br %r14
+ BR_R11USE_R14
.Lcleanup_io_restore:
# check if stpt has been executed
@@ -1179,7 +1380,7 @@
mvc 0(64,%r11),__PT_R8(%r9)
lmg %r0,%r7,__PT_R0(%r9)
1: lmg %r8,%r9,__LC_RETURN_PSW
- br %r14
+ BR_R11USE_R14
.Lcleanup_io_restore_insn:
.quad .Lio_exit_timer
.quad .Lio_done - 4
@@ -1232,17 +1433,17 @@
# prepare return psw
nihh %r8,0xfcfd # clear irq & wait state bits
lg %r9,48(%r11) # return from psw_idle
- br %r14
+ BR_R11USE_R14
.Lcleanup_idle_insn:
.quad .Lpsw_idle_lpsw
.Lcleanup_save_fpu_regs:
larl %r9,save_fpu_regs
- br %r14
+ BR_R11USE_R14
.Lcleanup_load_fpu_regs:
larl %r9,load_fpu_regs
- br %r14
+ BR_R11USE_R14
/*
* Integer constants
@@ -1258,7 +1459,6 @@
.Lsie_critical_length:
.quad .Lsie_done - .Lsie_gmap
#endif
-
.section .rodata, "a"
#define SYSCALL(esame,emu) .long esame
.globl sys_call_table
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 39127b6..df49f2a1 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -563,6 +563,7 @@
static void __ipl_run(void *unused)
{
+ __bpon();
diag308(DIAG308_LOAD_CLEAR, NULL);
if (MACHINE_IS_VM)
__cpcmd("IPL", NULL, 0, NULL);
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index fbc0789..64ccfdf 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -31,6 +31,9 @@
#include <linux/kernel.h>
#include <linux/moduleloader.h>
#include <linux/bug.h>
+#include <asm/alternative.h>
+#include <asm/nospec-branch.h>
+#include <asm/facility.h>
#if 0
#define DEBUGP printk
@@ -167,7 +170,11 @@
me->arch.got_offset = me->core_layout.size;
me->core_layout.size += me->arch.got_size;
me->arch.plt_offset = me->core_layout.size;
- me->core_layout.size += me->arch.plt_size;
+ if (me->arch.plt_size) {
+ if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable)
+ me->arch.plt_size += PLT_ENTRY_SIZE;
+ me->core_layout.size += me->arch.plt_size;
+ }
return 0;
}
@@ -321,9 +328,20 @@
unsigned int *ip;
ip = me->core_layout.base + me->arch.plt_offset +
info->plt_offset;
- ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
- ip[1] = 0x100a0004;
- ip[2] = 0x07f10000;
+ ip[0] = 0x0d10e310; /* basr 1,0 */
+ ip[1] = 0x100a0004; /* lg 1,10(1) */
+ if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) {
+ unsigned int *ij;
+ ij = me->core_layout.base +
+ me->arch.plt_offset +
+ me->arch.plt_size - PLT_ENTRY_SIZE;
+ ip[2] = 0xa7f40000 + /* j __jump_r1 */
+ (unsigned int)(u16)
+ (((unsigned long) ij - 8 -
+ (unsigned long) ip) / 2);
+ } else {
+ ip[2] = 0x07f10000; /* br %r1 */
+ }
ip[3] = (unsigned int) (val >> 32);
ip[4] = (unsigned int) val;
info->plt_initialized = 1;
@@ -428,6 +446,45 @@
const Elf_Shdr *sechdrs,
struct module *me)
{
+ const Elf_Shdr *s;
+ char *secstrings, *secname;
+ void *aseg;
+
+ if (IS_ENABLED(CONFIG_EXPOLINE) &&
+ !nospec_disable && me->arch.plt_size) {
+ unsigned int *ij;
+
+ ij = me->core_layout.base + me->arch.plt_offset +
+ me->arch.plt_size - PLT_ENTRY_SIZE;
+ if (test_facility(35)) {
+ ij[0] = 0xc6000000; /* exrl %r0,.+10 */
+ ij[1] = 0x0005a7f4; /* j . */
+ ij[2] = 0x000007f1; /* br %r1 */
+ } else {
+ ij[0] = 0x44000000 | (unsigned int)
+ offsetof(struct lowcore, br_r1_trampoline);
+ ij[1] = 0xa7f40000; /* j . */
+ }
+ }
+
+ secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+ for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
+ aseg = (void *) s->sh_addr;
+ secname = secstrings + s->sh_name;
+
+ if (!strcmp(".altinstructions", secname))
+ /* patch .altinstructions */
+ apply_alternatives(aseg, aseg + s->sh_size);
+
+ if (IS_ENABLED(CONFIG_EXPOLINE) &&
+ (!strncmp(".s390_indirect", secname, 14)))
+ nospec_revert(aseg, aseg + s->sh_size);
+
+ if (IS_ENABLED(CONFIG_EXPOLINE) &&
+ (!strncmp(".s390_return", secname, 12)))
+ nospec_revert(aseg, aseg + s->sh_size);
+ }
+
jump_label_apply_nops(me);
return 0;
}
diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c
new file mode 100644
index 0000000..9f3b5b3
--- /dev/null
+++ b/arch/s390/kernel/nospec-branch.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/module.h>
+#include <linux/device.h>
+#include <asm/facility.h>
+#include <asm/nospec-branch.h>
+
+static int __init nobp_setup_early(char *str)
+{
+ bool enabled;
+ int rc;
+
+ rc = kstrtobool(str, &enabled);
+ if (rc)
+ return rc;
+ if (enabled && test_facility(82)) {
+ /*
+ * The user explicitely requested nobp=1, enable it and
+ * disable the expoline support.
+ */
+ __set_facility(82, S390_lowcore.alt_stfle_fac_list);
+ if (IS_ENABLED(CONFIG_EXPOLINE))
+ nospec_disable = 1;
+ } else {
+ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ }
+ return 0;
+}
+early_param("nobp", nobp_setup_early);
+
+static int __init nospec_setup_early(char *str)
+{
+ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ return 0;
+}
+early_param("nospec", nospec_setup_early);
+
+static int __init nospec_report(void)
+{
+ if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
+ pr_info("Spectre V2 mitigation: execute trampolines.\n");
+ if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
+ pr_info("Spectre V2 mitigation: limited branch prediction.\n");
+ return 0;
+}
+arch_initcall(nospec_report);
+
+#ifdef CONFIG_SYSFS
+ssize_t cpu_show_spectre_v1(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+}
+
+ssize_t cpu_show_spectre_v2(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
+ return sprintf(buf, "Mitigation: execute trampolines\n");
+ if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
+ return sprintf(buf, "Mitigation: limited branch prediction.\n");
+ return sprintf(buf, "Vulnerable\n");
+}
+#endif
+
+#ifdef CONFIG_EXPOLINE
+
+int nospec_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF);
+
+static int __init nospectre_v2_setup_early(char *str)
+{
+ nospec_disable = 1;
+ return 0;
+}
+early_param("nospectre_v2", nospectre_v2_setup_early);
+
+void __init nospec_auto_detect(void)
+{
+ if (IS_ENABLED(CC_USING_EXPOLINE)) {
+ /*
+ * The kernel has been compiled with expolines.
+ * Keep expolines enabled and disable nobp.
+ */
+ nospec_disable = 0;
+ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ }
+ /*
+ * If the kernel has not been compiled with expolines the
+ * nobp setting decides what is done, this depends on the
+ * CONFIG_KERNEL_NP option and the nobp/nospec parameters.
+ */
+}
+
+static int __init spectre_v2_setup_early(char *str)
+{
+ if (str && !strncmp(str, "on", 2)) {
+ nospec_disable = 0;
+ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ }
+ if (str && !strncmp(str, "off", 3))
+ nospec_disable = 1;
+ if (str && !strncmp(str, "auto", 4))
+ nospec_auto_detect();
+ return 0;
+}
+early_param("spectre_v2", spectre_v2_setup_early);
+
+static void __init_or_module __nospec_revert(s32 *start, s32 *end)
+{
+ enum { BRCL_EXPOLINE, BRASL_EXPOLINE } type;
+ u8 *instr, *thunk, *br;
+ u8 insnbuf[6];
+ s32 *epo;
+
+ /* Second part of the instruction replace is always a nop */
+ memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x00, 0x00 }, 4);
+ for (epo = start; epo < end; epo++) {
+ instr = (u8 *) epo + *epo;
+ if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04)
+ type = BRCL_EXPOLINE; /* brcl instruction */
+ else if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x05)
+ type = BRASL_EXPOLINE; /* brasl instruction */
+ else
+ continue;
+ thunk = instr + (*(int *)(instr + 2)) * 2;
+ if (thunk[0] == 0xc6 && thunk[1] == 0x00)
+ /* exrl %r0,<target-br> */
+ br = thunk + (*(int *)(thunk + 2)) * 2;
+ else if (thunk[0] == 0xc0 && (thunk[1] & 0x0f) == 0x00 &&
+ thunk[6] == 0x44 && thunk[7] == 0x00 &&
+ (thunk[8] & 0x0f) == 0x00 && thunk[9] == 0x00 &&
+ (thunk[1] & 0xf0) == (thunk[8] & 0xf0))
+ /* larl %rx,<target br> + ex %r0,0(%rx) */
+ br = thunk + (*(int *)(thunk + 2)) * 2;
+ else
+ continue;
+ if (br[0] != 0x07 || (br[1] & 0xf0) != 0xf0)
+ continue;
+ switch (type) {
+ case BRCL_EXPOLINE:
+ /* brcl to thunk, replace with br + nop */
+ insnbuf[0] = br[0];
+ insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
+ break;
+ case BRASL_EXPOLINE:
+ /* brasl to thunk, replace with basr + nop */
+ insnbuf[0] = 0x0d;
+ insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
+ break;
+ }
+
+ s390_kernel_write(instr, insnbuf, 6);
+ }
+}
+
+void __init_or_module nospec_revert(s32 *start, s32 *end)
+{
+ if (nospec_disable)
+ __nospec_revert(start, end);
+}
+
+extern s32 __nospec_call_start[], __nospec_call_end[];
+extern s32 __nospec_return_start[], __nospec_return_end[];
+void __init nospec_init_branches(void)
+{
+ nospec_revert(__nospec_call_start, __nospec_call_end);
+ nospec_revert(__nospec_return_start, __nospec_return_end);
+}
+
+#endif /* CONFIG_EXPOLINE */
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 81d0808..d856263 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -179,3 +179,21 @@
.stop = c_stop,
.show = show_cpuinfo,
};
+
+int s390_isolate_bp(void)
+{
+ if (!test_facility(82))
+ return -EOPNOTSUPP;
+ set_thread_flag(TIF_ISOLATE_BP);
+ return 0;
+}
+EXPORT_SYMBOL(s390_isolate_bp);
+
+int s390_isolate_bp_guest(void)
+{
+ if (!test_facility(82))
+ return -EOPNOTSUPP;
+ set_thread_flag(TIF_ISOLATE_BP_GUEST);
+ return 0;
+}
+EXPORT_SYMBOL(s390_isolate_bp_guest);
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index e974e53..feb9d97 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -63,6 +63,8 @@
#include <asm/sclp.h>
#include <asm/sysinfo.h>
#include <asm/numa.h>
+#include <asm/alternative.h>
+#include <asm/nospec-branch.h>
#include "entry.h"
/*
@@ -335,7 +337,9 @@
lc->machine_flags = S390_lowcore.machine_flags;
lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
- MAX_FACILITY_BIT/8);
+ sizeof(lc->stfle_fac_list));
+ memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
+ sizeof(lc->alt_stfle_fac_list));
if (MACHINE_HAS_VX)
lc->vector_save_area_addr =
(unsigned long) &lc->vector_save_area;
@@ -372,6 +376,7 @@
#ifdef CONFIG_SMP
lc->spinlock_lockval = arch_spin_lockval(0);
#endif
+ lc->br_r1_trampoline = 0x07f1; /* br %r1 */
set_prefix((u32)(unsigned long) lc);
lowcore_ptr[0] = lc;
@@ -871,6 +876,9 @@
init_mm.end_data = (unsigned long) &_edata;
init_mm.brk = (unsigned long) &_end;
+ if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
+ nospec_auto_detect();
+
parse_early_param();
#ifdef CONFIG_CRASH_DUMP
/* Deactivate elfcorehdr= kernel parameter */
@@ -931,6 +939,10 @@
conmode_default();
set_preferred_console();
+ apply_alternative_instructions();
+ if (IS_ENABLED(CONFIG_EXPOLINE))
+ nospec_init_branches();
+
/* Setup zfcpdump support */
setup_zfcpdump();
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 35531fe..0a31110 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -205,6 +205,7 @@
lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
lc->cpu_nr = cpu;
lc->spinlock_lockval = arch_spin_lockval(cpu);
+ lc->br_r1_trampoline = 0x07f1; /* br %r1 */
if (MACHINE_HAS_VX)
lc->vector_save_area_addr =
(unsigned long) &lc->vector_save_area;
@@ -253,7 +254,9 @@
__ctl_store(lc->cregs_save_area, 0, 15);
save_access_regs((unsigned int *) lc->access_regs_save_area);
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
- MAX_FACILITY_BIT/8);
+ sizeof(lc->stfle_fac_list));
+ memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
+ sizeof(lc->alt_stfle_fac_list));
}
static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
@@ -302,6 +305,7 @@
mem_assign_absolute(lc->restart_fn, (unsigned long) func);
mem_assign_absolute(lc->restart_data, (unsigned long) data);
mem_assign_absolute(lc->restart_source, source_cpu);
+ __bpon();
asm volatile(
"0: sigp 0,%0,%2 # sigp restart to target cpu\n"
" brc 2,0b # busy, try again\n"
@@ -875,6 +879,7 @@
void __noreturn cpu_die(void)
{
idle_task_exit();
+ __bpon();
pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
for (;;) ;
}
diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
index 66956c0..3d04dfd 100644
--- a/arch/s390/kernel/uprobes.c
+++ b/arch/s390/kernel/uprobes.c
@@ -147,6 +147,15 @@
return orig;
}
+bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
+ struct pt_regs *regs)
+{
+ if (ctx == RP_CHECK_CHAIN_CALL)
+ return user_stack_pointer(regs) <= ret->stack;
+ else
+ return user_stack_pointer(regs) < ret->stack;
+}
+
/* Instruction Emulation */
static void adjust_psw_addr(psw_t *psw, unsigned long len)
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 115bda2..dd96b46 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -99,6 +99,43 @@
EXIT_DATA
}
+ /*
+ * struct alt_inst entries. From the header (alternative.h):
+ * "Alternative instructions for different CPU types or capabilities"
+ * Think locking instructions on spinlocks.
+ * Note, that it is a part of __init region.
+ */
+ . = ALIGN(8);
+ .altinstructions : {
+ __alt_instructions = .;
+ *(.altinstructions)
+ __alt_instructions_end = .;
+ }
+
+ /*
+ * And here are the replacement instructions. The linker sticks
+ * them as binary blobs. The .altinstructions has enough data to
+ * get the address and the length of them to patch the kernel safely.
+ * Note, that it is a part of __init region.
+ */
+ .altinstr_replacement : {
+ *(.altinstr_replacement)
+ }
+
+ /*
+ * Table with the patch locations to undo expolines
+ */
+ .nospec_call_table : {
+ __nospec_call_start = . ;
+ *(.s390_indirect*)
+ __nospec_call_end = . ;
+ }
+ .nospec_return_table : {
+ __nospec_return_start = . ;
+ *(.s390_return*)
+ __nospec_return_end = . ;
+ }
+
/* early.c uses stsi, which requires page aligned data. */
. = ALIGN(PAGE_SIZE);
INIT_DATA_SECTION(0x100)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index a70ff09..2032ab8 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -401,6 +401,9 @@
case KVM_CAP_S390_RI:
r = test_facility(64);
break;
+ case KVM_CAP_S390_BPB:
+ r = test_facility(82);
+ break;
default:
r = 0;
}
@@ -1713,6 +1716,8 @@
kvm_s390_set_prefix(vcpu, 0);
if (test_kvm_facility(vcpu->kvm, 64))
vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
+ if (test_kvm_facility(vcpu->kvm, 82))
+ vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
/* fprs can be synchronized via vrs, even if the guest has no vx. With
* MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
*/
@@ -1829,7 +1834,6 @@
if (test_fp_ctl(current->thread.fpu.fpc))
/* User space provided an invalid FPC, let's clear it */
current->thread.fpu.fpc = 0;
-
save_access_regs(vcpu->arch.host_acrs);
restore_access_regs(vcpu->run->s.regs.acrs);
gmap_enable(vcpu->arch.enabled_gmap);
@@ -1877,6 +1881,7 @@
current->thread.fpu.fpc = 0;
vcpu->arch.sie_block->gbea = 1;
vcpu->arch.sie_block->pp = 0;
+ vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
kvm_clear_async_pf_completion_queue(vcpu);
if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
@@ -2744,6 +2749,11 @@
if (riccb->valid)
vcpu->arch.sie_block->ecb3 |= 0x01;
}
+ if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
+ test_kvm_facility(vcpu->kvm, 82)) {
+ vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
+ vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
+ }
kvm_run->kvm_dirty_regs = 0;
}
@@ -2762,6 +2772,7 @@
kvm_run->s.regs.pft = vcpu->arch.pfault_token;
kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
+ kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
}
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index d8673e2..ced6c9b 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -217,6 +217,12 @@
memcpy(scb_o->gcr, scb_s->gcr, 128);
scb_o->pp = scb_s->pp;
+ /* branch prediction */
+ if (test_kvm_facility(vcpu->kvm, 82)) {
+ scb_o->fpf &= ~FPF_BPBC;
+ scb_o->fpf |= scb_s->fpf & FPF_BPBC;
+ }
+
/* interrupt intercept */
switch (scb_s->icptcode) {
case ICPT_PROGI:
@@ -259,6 +265,7 @@
scb_s->ecb3 = 0;
scb_s->ecd = 0;
scb_s->fac = 0;
+ scb_s->fpf = 0;
rc = prepare_cpuflags(vcpu, vsie_page);
if (rc)
@@ -316,6 +323,9 @@
prefix_unmapped(vsie_page);
scb_s->ecb |= scb_o->ecb & 0x10U;
}
+ /* branch prediction */
+ if (test_kvm_facility(vcpu->kvm, 82))
+ scb_s->fpf |= scb_o->fpf & FPF_BPBC;
/* SIMD */
if (test_kvm_facility(vcpu->kvm, 129)) {
scb_s->eca |= scb_o->eca & 0x00020000U;
@@ -754,6 +764,7 @@
{
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
+ int guest_bp_isolation;
int rc;
handle_last_fault(vcpu, vsie_page);
@@ -764,6 +775,20 @@
s390_handle_mcck();
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+
+ /* save current guest state of bp isolation override */
+ guest_bp_isolation = test_thread_flag(TIF_ISOLATE_BP_GUEST);
+
+ /*
+ * The guest is running with BPBC, so we have to force it on for our
+ * nested guest. This is done by enabling BPBC globally, so the BPBC
+ * control in the SCB (which the nested guest can modify) is simply
+ * ignored.
+ */
+ if (test_kvm_facility(vcpu->kvm, 82) &&
+ vcpu->arch.sie_block->fpf & FPF_BPBC)
+ set_thread_flag(TIF_ISOLATE_BP_GUEST);
+
local_irq_disable();
guest_enter_irqoff();
local_irq_enable();
@@ -773,6 +798,11 @@
local_irq_disable();
guest_exit_irqoff();
local_irq_enable();
+
+ /* restore guest state for bp isolation override */
+ if (!guest_bp_isolation)
+ clear_thread_flag(TIF_ISOLATE_BP_GUEST);
+
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
if (rc > 0)
diff --git a/arch/sh/include/asm/futex.h b/arch/sh/include/asm/futex.h
index d007874..8f8cf94 100644
--- a/arch/sh/include/asm/futex.h
+++ b/arch/sh/include/asm/futex.h
@@ -27,21 +27,12 @@
return atomic_futex_op_cmpxchg_inatomic(uval, uaddr, oldval, newval);
}
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- u32 oparg = (encoded_op << 8) >> 20;
- u32 cmparg = (encoded_op << 20) >> 20;
u32 oldval, newval, prev;
int ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
pagefault_disable();
do {
@@ -80,17 +71,8 @@
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = ((int)oldval < (int)cmparg); break;
- case FUTEX_OP_CMP_GE: ret = ((int)oldval >= (int)cmparg); break;
- case FUTEX_OP_CMP_LE: ret = ((int)oldval <= (int)cmparg); break;
- case FUTEX_OP_CMP_GT: ret = ((int)oldval > (int)cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
return ret;
}
diff --git a/arch/sparc/include/asm/futex_64.h b/arch/sparc/include/asm/futex_64.h
index 4e899b0..1cfd89d 100644
--- a/arch/sparc/include/asm/futex_64.h
+++ b/arch/sparc/include/asm/futex_64.h
@@ -29,22 +29,14 @@
: "r" (uaddr), "r" (oparg), "i" (-EFAULT) \
: "memory")
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret, tem;
- if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
- return -EFAULT;
if (unlikely((((unsigned long) uaddr) & 0x3UL)))
return -EINVAL;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
pagefault_disable();
switch (op) {
@@ -69,17 +61,9 @@
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/tile/include/asm/futex.h b/arch/tile/include/asm/futex.h
index e64a1b7..83c1e63 100644
--- a/arch/tile/include/asm/futex.h
+++ b/arch/tile/include/asm/futex.h
@@ -106,12 +106,9 @@
lock = __atomic_hashed_lock((int __force *)uaddr)
#endif
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int uninitialized_var(val), ret;
__futex_prolog();
@@ -119,12 +116,6 @@
/* The 32-bit futex code makes this assumption, so validate it here. */
BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
@@ -148,30 +139,9 @@
}
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ:
- ret = (val == cmparg);
- break;
- case FUTEX_OP_CMP_NE:
- ret = (val != cmparg);
- break;
- case FUTEX_OP_CMP_LT:
- ret = (val < cmparg);
- break;
- case FUTEX_OP_CMP_GE:
- ret = (val >= cmparg);
- break;
- case FUTEX_OP_CMP_LE:
- ret = (val <= cmparg);
- break;
- case FUTEX_OP_CMP_GT:
- ret = (val > cmparg);
- break;
- default:
- ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = val;
+
return ret;
}
diff --git a/arch/x86/configs/x86_64_cuttlefish_defconfig b/arch/x86/configs/x86_64_cuttlefish_defconfig
index 43c9575..5b06edd 100644
--- a/arch/x86/configs/x86_64_cuttlefish_defconfig
+++ b/arch/x86/configs/x86_64_cuttlefish_defconfig
@@ -59,7 +59,7 @@
CONFIG_RANDOMIZE_BASE=y
CONFIG_PHYSICAL_ALIGN=0x1000000
CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyS0 reboot=p"
+CONFIG_CMDLINE="console=ttyS0 reboot=p nopti"
CONFIG_PM_WAKELOCKS=y
CONFIG_PM_WAKELOCKS_LIMIT=0
# CONFIG_PM_WAKELOCKS_GC is not set
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index f73796d..02e547f 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -26,6 +26,7 @@
#include <linux/cpu.h>
#include <linux/bitops.h>
#include <linux/device.h>
+#include <linux/nospec.h>
#include <asm/apic.h>
#include <asm/stacktrace.h>
@@ -303,17 +304,20 @@
config = attr->config;
- cache_type = (config >> 0) & 0xff;
+ cache_type = (config >> 0) & 0xff;
if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
return -EINVAL;
+ cache_type = array_index_nospec(cache_type, PERF_COUNT_HW_CACHE_MAX);
cache_op = (config >> 8) & 0xff;
if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
return -EINVAL;
+ cache_op = array_index_nospec(cache_op, PERF_COUNT_HW_CACHE_OP_MAX);
cache_result = (config >> 16) & 0xff;
if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return -EINVAL;
+ cache_result = array_index_nospec(cache_result, PERF_COUNT_HW_CACHE_RESULT_MAX);
val = hw_cache_event_ids[cache_type][cache_op][cache_result];
@@ -420,6 +424,8 @@
if (attr->config >= x86_pmu.max_events)
return -EINVAL;
+ attr->config = array_index_nospec((unsigned long)attr->config, x86_pmu.max_events);
+
/*
* The generic map:
*/
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index 1076c9a..47d526c 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -90,6 +90,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/perf_event.h>
+#include <linux/nospec.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include "../perf_event.h"
@@ -300,6 +301,7 @@
} else if (event->pmu == &cstate_pkg_pmu) {
if (cfg >= PERF_CSTATE_PKG_EVENT_MAX)
return -EINVAL;
+ cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_PKG_EVENT_MAX);
if (!pkg_msr[cfg].attr)
return -EINVAL;
event->hw.event_base = pkg_msr[cfg].msr;
diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
index 4bb3ec6..be0b196 100644
--- a/arch/x86/events/msr.c
+++ b/arch/x86/events/msr.c
@@ -1,4 +1,5 @@
#include <linux/perf_event.h>
+#include <linux/nospec.h>
#include <asm/intel-family.h>
enum perf_msr_id {
@@ -136,9 +137,6 @@
if (event->attr.type != event->pmu->type)
return -ENOENT;
- if (cfg >= PERF_MSR_EVENT_MAX)
- return -EINVAL;
-
/* unsupported modes and filters */
if (event->attr.exclude_user ||
event->attr.exclude_kernel ||
@@ -149,6 +147,11 @@
event->attr.sample_period) /* no sampling */
return -EINVAL;
+ if (cfg >= PERF_MSR_EVENT_MAX)
+ return -EINVAL;
+
+ cfg = array_index_nospec((unsigned long)cfg, PERF_MSR_EVENT_MAX);
+
if (!msr[cfg].attr)
return -EINVAL;
diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
index b4c1f54..f4dc9b6 100644
--- a/arch/x86/include/asm/futex.h
+++ b/arch/x86/include/asm/futex.h
@@ -41,20 +41,11 @@
"+m" (*uaddr), "=&r" (tem) \
: "r" (oparg), "i" (-EFAULT), "1" (0))
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret, tem;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
pagefault_disable();
switch (op) {
@@ -80,30 +71,9 @@
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ:
- ret = (oldval == cmparg);
- break;
- case FUTEX_OP_CMP_NE:
- ret = (oldval != cmparg);
- break;
- case FUTEX_OP_CMP_LT:
- ret = (oldval < cmparg);
- break;
- case FUTEX_OP_CMP_GE:
- ret = (oldval >= cmparg);
- break;
- case FUTEX_OP_CMP_LE:
- ret = (oldval <= cmparg);
- break;
- case FUTEX_OP_CMP_GT:
- ret = (oldval > cmparg);
- break;
- default:
- ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/x86/include/uapi/asm/msgbuf.h b/arch/x86/include/uapi/asm/msgbuf.h
index 809134c..90ab9a7 100644
--- a/arch/x86/include/uapi/asm/msgbuf.h
+++ b/arch/x86/include/uapi/asm/msgbuf.h
@@ -1 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ASM_X64_MSGBUF_H
+#define __ASM_X64_MSGBUF_H
+
+#if !defined(__x86_64__) || !defined(__ILP32__)
#include <asm-generic/msgbuf.h>
+#else
+/*
+ * The msqid64_ds structure for x86 architecture with x32 ABI.
+ *
+ * On x86-32 and x86-64 we can just use the generic definition, but
+ * x32 uses the same binary layout as x86_64, which is differnet
+ * from other 32-bit architectures.
+ */
+
+struct msqid64_ds {
+ struct ipc64_perm msg_perm;
+ __kernel_time_t msg_stime; /* last msgsnd time */
+ __kernel_time_t msg_rtime; /* last msgrcv time */
+ __kernel_time_t msg_ctime; /* last change time */
+ __kernel_ulong_t msg_cbytes; /* current number of bytes on queue */
+ __kernel_ulong_t msg_qnum; /* number of messages in queue */
+ __kernel_ulong_t msg_qbytes; /* max number of bytes on queue */
+ __kernel_pid_t msg_lspid; /* pid of last msgsnd */
+ __kernel_pid_t msg_lrpid; /* last receive pid */
+ __kernel_ulong_t __unused4;
+ __kernel_ulong_t __unused5;
+};
+
+#endif
+
+#endif /* __ASM_GENERIC_MSGBUF_H */
diff --git a/arch/x86/include/uapi/asm/shmbuf.h b/arch/x86/include/uapi/asm/shmbuf.h
index 83c05fc..644421f 100644
--- a/arch/x86/include/uapi/asm/shmbuf.h
+++ b/arch/x86/include/uapi/asm/shmbuf.h
@@ -1 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ASM_X86_SHMBUF_H
+#define __ASM_X86_SHMBUF_H
+
+#if !defined(__x86_64__) || !defined(__ILP32__)
#include <asm-generic/shmbuf.h>
+#else
+/*
+ * The shmid64_ds structure for x86 architecture with x32 ABI.
+ *
+ * On x86-32 and x86-64 we can just use the generic definition, but
+ * x32 uses the same binary layout as x86_64, which is differnet
+ * from other 32-bit architectures.
+ */
+
+struct shmid64_ds {
+ struct ipc64_perm shm_perm; /* operation perms */
+ size_t shm_segsz; /* size of segment (bytes) */
+ __kernel_time_t shm_atime; /* last attach time */
+ __kernel_time_t shm_dtime; /* last detach time */
+ __kernel_time_t shm_ctime; /* last change time */
+ __kernel_pid_t shm_cpid; /* pid of creator */
+ __kernel_pid_t shm_lpid; /* pid of last operator */
+ __kernel_ulong_t shm_nattch; /* no. of current attaches */
+ __kernel_ulong_t __unused4;
+ __kernel_ulong_t __unused5;
+};
+
+struct shminfo64 {
+ __kernel_ulong_t shmmax;
+ __kernel_ulong_t shmmin;
+ __kernel_ulong_t shmmni;
+ __kernel_ulong_t shmseg;
+ __kernel_ulong_t shmall;
+ __kernel_ulong_t __unused1;
+ __kernel_ulong_t __unused2;
+ __kernel_ulong_t __unused3;
+ __kernel_ulong_t __unused4;
+};
+
+#endif
+
+#endif /* __ASM_X86_SHMBUF_H */
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 4bcd30c..79291d6 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -474,7 +474,6 @@
*/
static void save_mc_for_early(u8 *mc)
{
-#ifdef CONFIG_HOTPLUG_CPU
/* Synchronization during CPU hotplug. */
static DEFINE_MUTEX(x86_cpu_microcode_mutex);
@@ -521,7 +520,6 @@
out:
mutex_unlock(&x86_cpu_microcode_mutex);
-#endif
}
static bool __init load_builtin_intel_microcode(struct cpio_data *cp)
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index e803d72..83929cc4 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1591,6 +1591,8 @@
void *mwait_ptr;
int i;
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ return;
if (!this_cpu_has(X86_FEATURE_MWAIT))
return;
if (!this_cpu_has(X86_FEATURE_CLFLUSH))
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index bbfb03ec..da6a287 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -409,7 +409,7 @@
hpet2 -= hpet1;
tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
do_div(tmp, 1000000);
- do_div(deltatsc, tmp);
+ deltatsc = div64_u64(deltatsc, tmp);
return (unsigned long) deltatsc;
}
diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h
index 72bfc1c..5bfbc1c 100644
--- a/arch/xtensa/include/asm/futex.h
+++ b/arch/xtensa/include/asm/futex.h
@@ -44,18 +44,10 @@
: "r" (uaddr), "I" (-EFAULT), "r" (oparg) \
: "memory")
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
#if !XCHAL_HAVE_S32C1I
return -ENOSYS;
@@ -89,19 +81,10 @@
pagefault_enable();
- if (ret)
- return ret;
+ if (!ret)
+ *oval = oldval;
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: return (oldval == cmparg);
- case FUTEX_OP_CMP_NE: return (oldval != cmparg);
- case FUTEX_OP_CMP_LT: return (oldval < cmparg);
- case FUTEX_OP_CMP_GE: return (oldval >= cmparg);
- case FUTEX_OP_CMP_LE: return (oldval <= cmparg);
- case FUTEX_OP_CMP_GT: return (oldval > cmparg);
- }
-
- return -ENOSYS;
+ return ret;
}
static inline int
diff --git a/block/ioctl.c b/block/ioctl.c
index d4a78d0..c4555b1 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -564,8 +564,6 @@
if ((size >> 9) > ~0UL)
return -EFBIG;
return put_ulong(arg, size >> 9);
- case BLKGETSTPART:
- return put_ulong(arg, bdev->bd_part->start_sect);
case BLKGETSIZE64:
return put_u64(arg, i_size_read(bdev->bd_inode));
case BLKTRACESTART:
diff --git a/build.config.cuttlefish.x86_64 b/build.config.cuttlefish.x86_64
index 5a96563..edfa150 100644
--- a/build.config.cuttlefish.x86_64
+++ b/build.config.cuttlefish.x86_64
@@ -13,3 +13,4 @@
vmlinux
System.map
"
+STOP_SHIP_TRACEPRINTK=1
diff --git a/build.config.goldfish.arm b/build.config.goldfish.arm
index 866da93..ff5646a 100644
--- a/build.config.goldfish.arm
+++ b/build.config.goldfish.arm
@@ -10,3 +10,4 @@
vmlinux
System.map
"
+STOP_SHIP_TRACEPRINTK=1
diff --git a/build.config.goldfish.arm64 b/build.config.goldfish.arm64
index 9c963cf..4c896a6 100644
--- a/build.config.goldfish.arm64
+++ b/build.config.goldfish.arm64
@@ -10,3 +10,4 @@
vmlinux
System.map
"
+STOP_SHIP_TRACEPRINTK=1
diff --git a/build.config.goldfish.mips b/build.config.goldfish.mips
index 8af53d2..9a14a44 100644
--- a/build.config.goldfish.mips
+++ b/build.config.goldfish.mips
@@ -9,3 +9,4 @@
vmlinux
System.map
"
+STOP_SHIP_TRACEPRINTK=1
diff --git a/build.config.goldfish.mips64 b/build.config.goldfish.mips64
index 2a33d36..6ad9759 100644
--- a/build.config.goldfish.mips64
+++ b/build.config.goldfish.mips64
@@ -9,3 +9,4 @@
vmlinux
System.map
"
+STOP_SHIP_TRACEPRINTK=1
diff --git a/build.config.goldfish.x86 b/build.config.goldfish.x86
index f86253f..2266c62 100644
--- a/build.config.goldfish.x86
+++ b/build.config.goldfish.x86
@@ -10,3 +10,4 @@
vmlinux
System.map
"
+STOP_SHIP_TRACEPRINTK=1
diff --git a/build.config.goldfish.x86_64 b/build.config.goldfish.x86_64
index e173886..08c42c2 100644
--- a/build.config.goldfish.x86_64
+++ b/build.config.goldfish.x86_64
@@ -10,3 +10,4 @@
vmlinux
System.map
"
+STOP_SHIP_TRACEPRINTK=1
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index ca50eeb..b5953f1 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -157,16 +157,16 @@
void *private;
int err;
- /* If caller uses non-allowed flag, return error. */
- if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
- return -EINVAL;
-
if (sock->state == SS_CONNECTED)
return -EINVAL;
if (addr_len != sizeof(*sa))
return -EINVAL;
+ /* If caller uses non-allowed flag, return error. */
+ if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
+ return -EINVAL;
+
sa->salg_type[sizeof(sa->salg_type) - 1] = 0;
sa->salg_name[sizeof(sa->salg_name) - 1] = 0;
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 942ddff..4bb5f93 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1134,8 +1134,10 @@
if (!drbg)
return;
kzfree(drbg->Vbuf);
+ drbg->Vbuf = NULL;
drbg->V = NULL;
kzfree(drbg->Cbuf);
+ drbg->Cbuf = NULL;
drbg->C = NULL;
kzfree(drbg->scratchpadbuf);
drbg->scratchpadbuf = NULL;
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 94e04c9..667dc5c 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -2069,6 +2069,25 @@
return opregion;
}
+static bool dmi_is_desktop(void)
+{
+ const char *chassis_type;
+
+ chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
+ if (!chassis_type)
+ return false;
+
+ if (!strcmp(chassis_type, "3") || /* 3: Desktop */
+ !strcmp(chassis_type, "4") || /* 4: Low Profile Desktop */
+ !strcmp(chassis_type, "5") || /* 5: Pizza Box */
+ !strcmp(chassis_type, "6") || /* 6: Mini Tower */
+ !strcmp(chassis_type, "7") || /* 7: Tower */
+ !strcmp(chassis_type, "11")) /* 11: Main Server Chassis */
+ return true;
+
+ return false;
+}
+
int acpi_video_register(void)
{
int ret = 0;
@@ -2089,8 +2108,12 @@
* win8 ready (where we also prefer the native backlight driver, so
* normally the acpi_video code should not register there anyways).
*/
- if (only_lcd == -1)
- only_lcd = acpi_osi_is_win8();
+ if (only_lcd == -1) {
+ if (dmi_is_desktop() && acpi_osi_is_win8())
+ only_lcd = true;
+ else
+ only_lcd = false;
+ }
dmi_check_system(video_dmi_table);
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 7394aac..93888cc 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -69,11 +69,12 @@
struct device_attribute *attr, char *buf)
{
struct amba_device *dev = to_amba_device(_dev);
+ ssize_t len;
- if (!dev->driver_override)
- return 0;
-
- return sprintf(buf, "%s\n", dev->driver_override);
+ device_lock(_dev);
+ len = sprintf(buf, "%s\n", dev->driver_override);
+ device_unlock(_dev);
+ return len;
}
static ssize_t driver_override_store(struct device *_dev,
@@ -81,7 +82,7 @@
const char *buf, size_t count)
{
struct amba_device *dev = to_amba_device(_dev);
- char *driver_override, *old = dev->driver_override, *cp;
+ char *driver_override, *old, *cp;
/* We need to keep extra room for a newline */
if (count >= (PAGE_SIZE - 1))
@@ -95,12 +96,15 @@
if (cp)
*cp = '\0';
+ device_lock(_dev);
+ old = dev->driver_override;
if (strlen(driver_override)) {
dev->driver_override = driver_override;
} else {
kfree(driver_override);
dev->driver_override = NULL;
}
+ device_unlock(_dev);
kfree(old);
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index e7e4560..957eb3c 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -3001,6 +3001,14 @@
else
return_error = BR_DEAD_REPLY;
mutex_unlock(&context->context_mgr_node_lock);
+ if (target_node && target_proc == proc) {
+ binder_user_error("%d:%d got transaction to context manager from process owning it\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
+ goto err_invalid_target_handle;
+ }
}
if (!target_node) {
/*
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index e08c09f..4fe3ec1 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4422,6 +4422,9 @@
ATA_HORKAGE_ZERO_AFTER_TRIM |
ATA_HORKAGE_NOLPM, },
+ /* Sandisk devices which are known to not handle LPM well */
+ { "SanDisk SD7UB3Q*G1001", NULL, ATA_HORKAGE_NOLPM, },
+
/* devices that don't properly handle queued TRIM commands */
{ "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index d3dc954..81bfeec 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -23,6 +23,7 @@
#include <linux/bitops.h>
#include <linux/wait.h>
#include <linux/slab.h>
+#include <linux/nospec.h>
#include <asm/byteorder.h>
#include <asm/string.h>
#include <asm/io.h>
@@ -1458,6 +1459,8 @@
return -EFAULT;
if (pool < 0 || pool > ZATM_LAST_POOL)
return -EINVAL;
+ pool = array_index_nospec(pool,
+ ZATM_LAST_POOL + 1);
spin_lock_irqsave(&zatm_dev->lock, flags);
info = zatm_dev->pool_info[pool];
if (cmd == ZATM_GETPOOLZ) {
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index f8ba5c7..3257647 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -217,6 +217,7 @@
{ USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },
@@ -249,7 +250,6 @@
{ USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
/* QCA ROME chipset */
- { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME },
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 5d475b3..128ebd4 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2368,7 +2368,7 @@
if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT)
return media_changed(cdi, 1);
- if ((unsigned int)arg >= cdi->capacity)
+ if (arg >= cdi->capacity)
return -EINVAL;
info = kmalloc(sizeof(*info), GFP_KERNEL);
diff --git a/drivers/char/diag/diag_ipc_logging.h b/drivers/char/diag/diag_ipc_logging.h
index b9958a4..4b8dd1b 100644
--- a/drivers/char/diag/diag_ipc_logging.h
+++ b/drivers/char/diag/diag_ipc_logging.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -24,6 +24,7 @@
#define DIAG_DEBUG_MASKS 0x0010
#define DIAG_DEBUG_POWER 0x0020
#define DIAG_DEBUG_BRIDGE 0x0040
+#define DIAG_DEBUG_CONTROL 0x0080
#define DIAG_DEBUG
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
index 8d47ee38..6f81bfd 100644
--- a/drivers/char/diag/diagfwd_cntl.c
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -45,8 +45,11 @@
void diag_cntl_channel_open(struct diagfwd_info *p_info)
{
- if (!p_info)
+ if (!p_info) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid fwd_info structure\n");
return;
+ }
driver->mask_update |= PERIPHERAL_MASK(p_info->peripheral);
queue_work(driver->cntl_wq, &driver->mask_update_work);
diag_notify_md_client(p_info->peripheral, DIAG_STATUS_OPEN);
@@ -56,12 +59,18 @@
{
uint8_t peripheral;
- if (!p_info)
+ if (!p_info) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid fwd_info structure\n");
return;
+ }
peripheral = p_info->peripheral;
- if (peripheral >= NUM_PERIPHERALS)
+ if (peripheral >= NUM_PERIPHERALS) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid peripheral (%d)\n", peripheral);
return;
+ }
driver->feature[peripheral].sent_feature_mask = 0;
driver->feature[peripheral].rcvd_feature_mask = 0;
@@ -87,8 +96,11 @@
driver->stm_peripheral = 0;
mutex_unlock(&driver->cntl_lock);
- if (peripheral_mask == 0)
+ if (peripheral_mask == 0) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Empty Peripheral mask\n");
return;
+ }
for (i = 0; i < NUM_PERIPHERALS; i++) {
if (!driver->feature[i].stm_support)
@@ -111,11 +123,18 @@
struct pid *pid_struct;
struct task_struct *result;
- if (peripheral > NUM_PERIPHERALS)
+ if (peripheral > NUM_PERIPHERALS) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid peripheral (%d)\n", peripheral);
return;
+ }
- if (driver->logging_mode != DIAG_MEMORY_DEVICE_MODE)
+ if (driver->logging_mode != DIAG_MEMORY_DEVICE_MODE) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid logging_mode (%d)\n",
+ driver->logging_mode);
return;
+ }
mutex_lock(&driver->md_session_lock);
memset(&info, 0, sizeof(struct siginfo));
@@ -171,8 +190,12 @@
uint32_t pd;
int status = DIAG_STATUS_CLOSED;
- if (!buf || peripheral >= NUM_PERIPHERALS || len < sizeof(*pd_msg))
+ if (!buf || peripheral >= NUM_PERIPHERALS || len < sizeof(*pd_msg)) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid parameters:(!buf) = %d, peripheral = %d, len = %d, pd_msg_len = %d\n",
+ !buf, peripheral, len, (int)sizeof(*pd_msg));
return;
+ }
pd_msg = (struct diag_ctrl_msg_pd_status *)buf;
pd = pd_msg->pd_id;
@@ -182,8 +205,11 @@
static void enable_stm_feature(uint8_t peripheral)
{
- if (peripheral >= NUM_PERIPHERALS)
+ if (peripheral >= NUM_PERIPHERALS) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid peripheral (%d)\n", peripheral);
return;
+ }
mutex_lock(&driver->cntl_lock);
driver->feature[peripheral].stm_support = ENABLE_STM;
@@ -195,8 +221,11 @@
static void enable_socket_feature(uint8_t peripheral)
{
- if (peripheral >= NUM_PERIPHERALS)
+ if (peripheral >= NUM_PERIPHERALS) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid peripheral (%d)\n", peripheral);
return;
+ }
if (driver->supports_sockets)
driver->feature[peripheral].sockets_enabled = 1;
@@ -206,8 +235,11 @@
static void process_hdlc_encoding_feature(uint8_t peripheral)
{
- if (peripheral >= NUM_PERIPHERALS)
+ if (peripheral >= NUM_PERIPHERALS) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid peripheral (%d)\n", peripheral);
return;
+ }
if (driver->supports_apps_hdlc_encoding) {
driver->feature[peripheral].encode_hdlc =
@@ -220,8 +252,11 @@
static void process_upd_header_untagging_feature(uint8_t peripheral)
{
- if (peripheral >= NUM_PERIPHERALS)
+ if (peripheral >= NUM_PERIPHERALS) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid peripheral (%d)\n", peripheral);
return;
+ }
if (driver->supports_apps_header_untagging) {
driver->feature[peripheral].untag_header =
@@ -247,8 +282,16 @@
* Perform Basic sanity. The len field is the size of the data payload.
* This doesn't include the header size.
*/
- if (!buf || peripheral >= NUM_PERIPHERALS || len == 0)
+ if (!buf || peripheral >= NUM_PERIPHERALS || len == 0) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid parameters:(!buf) = %d, peripheral = %d, len = %d\n",
+ !buf, peripheral, len);
return;
+ }
+
+ DIAG_LOG(DIAG_DEBUG_CONTROL,
+ "diag:peripheral(%d) command deregistration packet processing started\n",
+ peripheral);
dereg = (struct diag_ctrl_cmd_dereg *)ptr;
ptr += header_len;
@@ -256,8 +299,8 @@
read_len += header_len - (2 * sizeof(uint32_t));
if (dereg->count_entries == 0) {
- pr_debug("diag: In %s, received reg tbl with no entries\n",
- __func__);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: received reg tbl with no entries\n");
return;
}
@@ -276,6 +319,9 @@
pr_err("diag: In %s, reading less than available, read_len: %d, len: %d count: %d\n",
__func__, read_len, len, dereg->count_entries);
}
+ DIAG_LOG(DIAG_DEBUG_CONTROL,
+ "diag:peripheral(%d) command deregistration packet processing complete\n",
+ peripheral);
}
static void process_command_registration(uint8_t *buf, uint32_t len,
uint8_t peripheral)
@@ -292,8 +338,15 @@
* Perform Basic sanity. The len field is the size of the data payload.
* This doesn't include the header size.
*/
- if (!buf || peripheral >= NUM_PERIPHERALS || len == 0)
+ if (!buf || peripheral >= NUM_PERIPHERALS || len == 0) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid parameters:(!buf) = %d, peripheral = %d, len = %d\n",
+ !buf, peripheral, len);
return;
+ }
+ DIAG_LOG(DIAG_DEBUG_CONTROL,
+ "diag: peripheral(%d) command registration packet processing started\n",
+ peripheral);
reg = (struct diag_ctrl_cmd_reg *)ptr;
ptr += header_len;
@@ -301,7 +354,8 @@
read_len += header_len - (2 * sizeof(uint32_t));
if (reg->count_entries == 0) {
- pr_debug("diag: In %s, received reg tbl with no entries\n",
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: In %s, received reg tbl with no entries\n",
__func__);
return;
}
@@ -321,6 +375,9 @@
pr_err("diag: In %s, reading less than available, read_len: %d, len: %d count: %d\n",
__func__, read_len, len, reg->count_entries);
}
+ DIAG_LOG(DIAG_DEBUG_CONTROL,
+ "diag: peripheral(%d) command registration packet processing complete\n",
+ peripheral);
}
static void diag_close_transport_work_fn(struct work_struct *work)
@@ -347,8 +404,11 @@
static void process_socket_feature(uint8_t peripheral)
{
- if (peripheral >= NUM_PERIPHERALS)
+ if (peripheral >= NUM_PERIPHERALS) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid peripheral (%d)\n", peripheral);
return;
+ }
mutex_lock(&driver->cntl_lock);
driver->close_transport |= PERIPHERAL_MASK(peripheral);
@@ -379,15 +439,20 @@
uint32_t feature_mask = 0;
uint8_t *ptr = buf;
- if (!buf || peripheral >= NUM_PERIPHERALS || len == 0)
+ if (!buf || peripheral >= NUM_PERIPHERALS || len == 0) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid parameters:(!buf) = %d, peripheral = %d, len = %d\n",
+ !buf, peripheral, len);
return;
+ }
header = (struct diag_ctrl_feature_mask *)ptr;
ptr += header_len;
feature_mask_len = header->feature_mask_len;
if (feature_mask_len == 0) {
- pr_debug("diag: In %s, received invalid feature mask from peripheral %d\n",
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: In %s, received invalid feature mask from peripheral %d\n",
__func__, peripheral);
return;
}
@@ -400,6 +465,8 @@
diag_cmd_remove_reg_by_proc(peripheral);
driver->feature[peripheral].rcvd_feature_mask = 1;
+ DIAG_LOG(DIAG_DEBUG_CONTROL,
+ "diag: Received feature mask for peripheral %d\n", peripheral);
for (i = 0; i < feature_mask_len && read_len < len; i++) {
feature_mask = *(uint8_t *)ptr;
@@ -431,6 +498,10 @@
process_socket_feature(peripheral);
process_log_on_demand_feature(peripheral);
+
+ DIAG_LOG(DIAG_DEBUG_CONTROL,
+ "diag: Peripheral(%d) feature mask is processed\n",
+ peripheral);
}
static void process_last_event_report(uint8_t *buf, uint32_t len,
@@ -442,14 +513,23 @@
uint32_t pkt_len = sizeof(uint32_t) + sizeof(uint16_t);
uint16_t event_size = 0;
- if (!buf || peripheral >= NUM_PERIPHERALS || len != pkt_len)
+ if (!buf || peripheral >= NUM_PERIPHERALS || len != pkt_len) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid parameters:(!buf) = %d, peripheral = %d, len = %d, pkt_len = %d\n",
+ !buf, peripheral, len, pkt_len);
return;
+ }
+
+ DIAG_LOG(DIAG_DEBUG_CONTROL,
+ "diag:started processing last event report for peripheral (%d)\n",
+ peripheral);
mutex_lock(&event_mask.lock);
header = (struct diag_ctrl_last_event_report *)ptr;
event_size = ((header->event_last_id / 8) + 1);
if (event_size >= driver->event_mask_size) {
- pr_debug("diag: In %s, receiving event mask size more that Apps can handle\n",
+ DIAG_LOG(DIAG_DEBUG_CONTROL,
+ "diag: In %s, receiving event mask size more that Apps can handle\n",
__func__);
temp = krealloc(driver->event_mask->ptr, event_size,
GFP_KERNEL);
@@ -467,6 +547,9 @@
driver->last_event_id = header->event_last_id;
err:
mutex_unlock(&event_mask.lock);
+ DIAG_LOG(DIAG_DEBUG_CONTROL,
+ "diag: last event report processed for peripheral (%d)\n",
+ peripheral);
}
static void process_log_range_report(uint8_t *buf, uint32_t len,
@@ -480,8 +563,15 @@
struct diag_ctrl_log_range *log_range = NULL;
struct diag_log_mask_t *mask_ptr = NULL;
- if (!buf || peripheral >= NUM_PERIPHERALS || len < 0)
+ if (!buf || peripheral >= NUM_PERIPHERALS || len < 0) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid parameters:(!buf) = %d, peripheral = %d, len = %d\n",
+ !buf, peripheral, len);
return;
+ }
+ DIAG_LOG(DIAG_DEBUG_CONTROL,
+ "diag:started processing log range report for peripheral(%d)\n",
+ peripheral);
header = (struct diag_ctrl_log_range_report *)ptr;
ptr += header_len;
@@ -507,6 +597,9 @@
mask_ptr->range = LOG_ITEMS_TO_SIZE(log_range->num_items);
mutex_unlock(&(mask_ptr->lock));
}
+ DIAG_LOG(DIAG_DEBUG_CONTROL,
+ "diag: log range report processed for peripheral (%d)\n",
+ peripheral);
}
static int update_msg_mask_tbl_entry(struct diag_msg_mask_t *mask,
@@ -514,8 +607,12 @@
{
uint32_t temp_range;
- if (!mask || !range)
+ if (!mask || !range) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid %s\n",
+ (!mask ? "mask" : (!range ? "range" : " ")));
return -EIO;
+ }
if (range->ssid_last < range->ssid_first) {
pr_err("diag: In %s, invalid ssid range, first: %d, last: %d\n",
__func__, range->ssid_first, range->ssid_last);
@@ -547,8 +644,16 @@
uint8_t *temp = NULL;
uint32_t min_len = header_len - sizeof(struct diag_ctrl_pkt_header_t);
- if (!buf || peripheral >= NUM_PERIPHERALS || len < min_len)
+ if (!buf || peripheral >= NUM_PERIPHERALS || len < min_len) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid parameters:(!buf) = %d, peripheral = %d, len = %d, min_len = %d\n",
+ !buf, peripheral, len, min_len);
return;
+ }
+
+ DIAG_LOG(DIAG_DEBUG_CONTROL,
+ "diag: started processing ssid range for peripheral (%d)\n",
+ peripheral);
header = (struct diag_ctrl_ssid_range_report *)ptr;
ptr += header_len;
@@ -600,6 +705,9 @@
driver->msg_mask_tbl_count += 1;
}
mutex_unlock(&driver->msg_mask_lock);
+ DIAG_LOG(DIAG_DEBUG_CONTROL,
+ "diag: processed ssid range for peripheral(%d)\n",
+ peripheral);
}
static void diag_build_time_mask_update(uint8_t *buf,
@@ -616,8 +724,12 @@
uint32_t *dest_ptr = NULL;
struct diag_msg_mask_t *build_mask = NULL;
- if (!range || !buf)
+ if (!range || !buf) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid %s\n",
+ (!range ? "range" : (!buf ? "buf" : " ")));
return;
+ }
if (range->ssid_last < range->ssid_first) {
pr_err("diag: In %s, invalid ssid range, first: %d, last: %d\n",
@@ -679,8 +791,16 @@
struct diag_ctrl_build_mask_report *header = NULL;
struct diag_ssid_range_t *range = NULL;
- if (!buf || peripheral >= NUM_PERIPHERALS || len < header_len)
+ if (!buf || peripheral >= NUM_PERIPHERALS || len < header_len) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid parameters:(!buf) = %d, peripheral = %d, len = %d, header_len = %d\n",
+ !buf, peripheral, len, header_len);
return;
+ }
+
+ DIAG_LOG(DIAG_DEBUG_CONTROL,
+ "diag: started processing build mask for peripheral(%d)\n",
+ peripheral);
header = (struct diag_ctrl_build_mask_report *)ptr;
ptr += header_len;
@@ -696,6 +816,8 @@
ptr += num_items * sizeof(uint32_t);
read_len += num_items * sizeof(uint32_t);
}
+ DIAG_LOG(DIAG_DEBUG_CONTROL,
+ "diag: processing build mask complete (%d)\n", peripheral);
}
int diag_add_diag_id_to_list(uint8_t diag_id, char *process_name,
@@ -703,8 +825,12 @@
{
struct diag_id_tbl_t *new_item = NULL;
- if (!process_name || diag_id == 0)
+ if (!process_name || diag_id == 0) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid parameters: !process_name = %d, diag_id = %d\n",
+ !process_name, diag_id);
return -EINVAL;
+ }
new_item = kzalloc(sizeof(struct diag_id_tbl_t), GFP_KERNEL);
if (!new_item)
@@ -734,8 +860,10 @@
struct list_head *temp;
struct diag_id_tbl_t *item = NULL;
- if (!process_name || !diag_id)
+ if (!process_name || !diag_id) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "diag: Invalid parameters\n");
return -EINVAL;
+ }
mutex_lock(&driver->diag_id_mutex);
list_for_each_safe(start, temp, &driver->diag_id_list) {
@@ -762,8 +890,12 @@
uint8_t local_diag_id = 0;
uint8_t new_request = 0, i = 0, ch_type = 0;
- if (!buf || len == 0 || peripheral >= NUM_PERIPHERALS)
+ if (!buf || len == 0 || peripheral >= NUM_PERIPHERALS) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Invalid parameters: !buf = %d, len = %d, peripheral = %d\n",
+ !buf, len, peripheral);
return;
+ }
header = (struct diag_ctrl_diagid *)buf;
process_name = (char *)&header->process_name;
@@ -841,7 +973,7 @@
fwd_info = &peripheral_info[TYPE_DATA][peripheral];
diagfwd_buffers_init(fwd_info);
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
- "diag: diag_id sent = %d to peripheral = %d with diag_id = %d for %s :\n",
+ "diag: diag_id sent = %d to peripheral = %d with diag_id = %d for %s\n",
driver->diag_id_sent[peripheral], peripheral,
ctrl_pkt.diag_id, process_name);
}
@@ -855,8 +987,10 @@
uint8_t *ptr = buf;
struct diag_ctrl_pkt_header_t *ctrl_pkt = NULL;
- if (!buf || len <= 0 || !p_info)
+ if (!buf || len <= 0 || !p_info) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "diag: Invalid parameters\n");
return;
+ }
if (reg_dirty & PERIPHERAL_MASK(p_info->peripheral)) {
pr_err_ratelimited("diag: dropping command registration from peripheral %d\n",
@@ -866,6 +1000,9 @@
while (read_len + header_len < len) {
ctrl_pkt = (struct diag_ctrl_pkt_header_t *)ptr;
+ DIAG_LOG(DIAG_DEBUG_CONTROL,
+ "diag:peripheral: %d: pkt_id: %d\n",
+ p_info->peripheral, ctrl_pkt->pkt_id);
switch (ctrl_pkt->pkt_id) {
case DIAG_CTRL_MSG_REG:
process_command_registration(ptr, ctrl_pkt->len,
@@ -904,12 +1041,15 @@
p_info->peripheral);
break;
default:
- pr_debug("diag: Control packet %d not supported\n",
- ctrl_pkt->pkt_id);
+ DIAG_LOG(DIAG_DEBUG_CONTROL,
+ "diag: Control packet %d not supported\n",
+ ctrl_pkt->pkt_id);
}
ptr += header_len + ctrl_pkt->len;
read_len += header_len + ctrl_pkt->len;
}
+ DIAG_LOG(DIAG_DEBUG_CONTROL,
+ "diag: control packet processing complete\n");
}
static int diag_compute_real_time(int idx)
@@ -1127,15 +1267,16 @@
for (i = 0; i < DIAG_NUM_PROC; i++) {
temp_real_time = diag_compute_real_time(i);
if (temp_real_time == driver->real_time_mode[i]) {
- pr_debug("diag: did not update real time mode on proc %d, already in the req mode %d",
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: did not update real time mode on proc %d, already in the req mode %d\n",
i, temp_real_time);
continue;
}
if (i == DIAG_LOCAL_PROC) {
if (!send_update) {
- pr_debug("diag: In %s, cannot send real time mode pkt since one of the periperhal is in buffering mode\n",
- __func__);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: cannot send real time mode pkt since one of the periperhal is in buffering mode\n");
break;
}
for (j = 0; j < NUM_PERIPHERALS; j++)
@@ -1169,7 +1310,8 @@
temp_real_time = MODE_NONREALTIME;
}
if (temp_real_time == driver->real_time_mode[i]) {
- pr_debug("diag: did not update real time mode on proc %d, already in the req mode %d",
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: did not update real time mode on proc %d, already in the req mode %d\n",
i, temp_real_time);
continue;
}
@@ -1204,8 +1346,8 @@
if (!driver->diagfwd_cntl[peripheral] ||
!driver->diagfwd_cntl[peripheral]->ch_open) {
- pr_debug("diag: In %s, control channel is not open, p: %d\n",
- __func__, peripheral);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: control channel is not open, p: %d\n", peripheral);
return err;
}
@@ -1317,8 +1459,9 @@
}
if (!driver->feature[peripheral].peripheral_buffering) {
- pr_debug("diag: In %s, peripheral %d doesn't support buffering\n",
- __func__, peripheral);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: peripheral %d doesn't support buffering\n",
+ peripheral);
driver->buffering_flag[params->peripheral] = 0;
return -EIO;
}
@@ -1383,8 +1526,9 @@
if (!driver->diagfwd_cntl[peripheral] ||
!driver->diagfwd_cntl[peripheral]->ch_open) {
- pr_debug("diag: In %s, control channel is not open, p: %d\n",
- __func__, peripheral);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: control channel is not open, p: %d\n",
+ peripheral);
return -ENODEV;
}
@@ -1413,15 +1557,17 @@
struct diag_ctrl_drain_immediate_v2 ctrl_pkt_v2;
if (!driver->feature[peripheral].peripheral_buffering) {
- pr_debug("diag: In %s, peripheral %d doesn't support buffering\n",
- __func__, peripheral);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: peripheral %d doesn't support buffering\n",
+ peripheral);
return -EINVAL;
}
if (!driver->diagfwd_cntl[peripheral] ||
!driver->diagfwd_cntl[peripheral]->ch_open) {
- pr_debug("diag: In %s, control channel is not open, p: %d\n",
- __func__, peripheral);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: control channel is not open, p: %d\n",
+ peripheral);
return -ENODEV;
}
@@ -1478,8 +1624,9 @@
}
if (!driver->feature[peripheral].peripheral_buffering) {
- pr_debug("diag: In %s, peripheral %d doesn't support buffering\n",
- __func__, peripheral);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: peripheral %d doesn't support buffering\n",
+ peripheral);
return -EINVAL;
}
@@ -1557,15 +1704,17 @@
}
if (!driver->feature[peripheral].peripheral_buffering) {
- pr_debug("diag: In %s, peripheral %d doesn't support buffering\n",
- __func__, peripheral);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: peripheral %d doesn't support buffering\n",
+ peripheral);
return -EINVAL;
}
if (!driver->diagfwd_cntl[peripheral] ||
!driver->diagfwd_cntl[peripheral]->ch_open) {
- pr_debug("diag: In %s, control channel is not open, p: %d\n",
- __func__, peripheral);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: control channel is not open, p: %d\n",
+ peripheral);
return -ENODEV;
}
diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c
index 7225dc2..2022e7b 100644
--- a/drivers/char/diag/diagfwd_peripheral.c
+++ b/drivers/char/diag/diagfwd_peripheral.c
@@ -723,6 +723,7 @@
unsigned char *buf, int len)
{
if (!fwd_info) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "diag: Invalid fwd_info\n");
diag_ws_release();
return;
}
@@ -743,8 +744,12 @@
*/
diag_ws_on_copy_fail(DIAG_WS_MUX);
/* Reset the buffer in_busy value after processing the data */
- if (fwd_info->buf_1)
+ if (fwd_info->buf_1) {
atomic_set(&fwd_info->buf_1->in_busy, 0);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 1 for core PD is marked free, p: %d, t: %d\n",
+ fwd_info->peripheral, fwd_info->type);
+ }
diagfwd_queue_read(fwd_info);
diagfwd_queue_read(&peripheral_info[TYPE_DATA][fwd_info->peripheral]);
@@ -769,8 +774,12 @@
diag_dci_process_peripheral_data(fwd_info, (void *)buf, len);
/* Reset the buffer in_busy value after processing the data */
- if (fwd_info->buf_1)
+ if (fwd_info->buf_1) {
atomic_set(&fwd_info->buf_1->in_busy, 0);
+ DIAG_LOG(DIAG_DEBUG_DCI,
+ "Buffer 1 for core PD is marked free, p: %d, t: %d\n",
+ fwd_info->peripheral, fwd_info->type);
+ }
diagfwd_queue_read(fwd_info);
}
@@ -1638,13 +1647,15 @@
struct diagfwd_buf_t *temp_buf = NULL;
if (!fwd_info) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "diag: Invalid fwd_info\n");
diag_ws_release();
return;
}
if (!fwd_info->inited || !atomic_read(&fwd_info->opened)) {
- pr_debug("diag: In %s, p: %d, t: %d, inited: %d, opened: %d ch_open: %d\n",
- __func__, fwd_info->peripheral, fwd_info->type,
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: p: %d, t: %d, inited: %d, opened: %d, ch_open: %d\n",
+ fwd_info->peripheral, fwd_info->type,
fwd_info->inited, atomic_read(&fwd_info->opened),
fwd_info->ch_open);
diag_ws_release();
@@ -1680,8 +1691,9 @@
atomic_set(&temp_buf->in_busy, 1);
}
} else {
- pr_debug("diag: In %s, both buffers are empty for p: %d, t: %d\n",
- __func__, fwd_info->peripheral, fwd_info->type);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: both buffers are busy for p: %d, t: %d\n",
+ fwd_info->peripheral, fwd_info->type);
}
if (!read_buf) {
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 8a167a6..1b3c731 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -259,6 +259,7 @@
#include <linux/kmemcheck.h>
#include <linux/workqueue.h>
#include <linux/irq.h>
+#include <linux/ratelimit.h>
#include <linux/syscalls.h>
#include <linux/completion.h>
#include <linux/uuid.h>
@@ -444,6 +445,16 @@
__u8 tmp[CHACHA20_BLOCK_SIZE], int used);
static void process_random_ready_list(void);
+static struct ratelimit_state unseeded_warning =
+ RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
+static struct ratelimit_state urandom_warning =
+ RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
+
+static int ratelimit_disable __read_mostly;
+
+module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
+MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
+
/**********************************************************************
*
* OS independent entropy store. Here are the functions which handle
@@ -819,6 +830,39 @@
return 1;
}
+#ifdef CONFIG_NUMA
+static void do_numa_crng_init(struct work_struct *work)
+{
+ int i;
+ struct crng_state *crng;
+ struct crng_state **pool;
+
+ pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
+ for_each_online_node(i) {
+ crng = kmalloc_node(sizeof(struct crng_state),
+ GFP_KERNEL | __GFP_NOFAIL, i);
+ spin_lock_init(&crng->lock);
+ crng_initialize(crng);
+ pool[i] = crng;
+ }
+ mb();
+ if (cmpxchg(&crng_node_pool, NULL, pool)) {
+ for_each_node(i)
+ kfree(pool[i]);
+ kfree(pool);
+ }
+}
+
+static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
+
+static void numa_crng_init(void)
+{
+ schedule_work(&numa_crng_init_work);
+}
+#else
+static void numa_crng_init(void) {}
+#endif
+
static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
{
unsigned long flags;
@@ -848,10 +892,23 @@
memzero_explicit(&buf, sizeof(buf));
crng->init_time = jiffies;
if (crng == &primary_crng && crng_init < 2) {
+ numa_crng_init();
crng_init = 2;
process_random_ready_list();
wake_up_interruptible(&crng_init_wait);
pr_notice("random: crng init done\n");
+ if (unseeded_warning.missed) {
+ pr_notice("random: %d get_random_xx warning(s) missed "
+ "due to ratelimiting\n",
+ unseeded_warning.missed);
+ unseeded_warning.missed = 0;
+ }
+ if (urandom_warning.missed) {
+ pr_notice("random: %d urandom warning(s) missed "
+ "due to ratelimiting\n",
+ urandom_warning.missed);
+ urandom_warning.missed = 0;
+ }
}
spin_unlock_irqrestore(&crng->lock, flags);
}
@@ -1661,29 +1718,14 @@
*/
static int rand_initialize(void)
{
-#ifdef CONFIG_NUMA
- int i;
- struct crng_state *crng;
- struct crng_state **pool;
-#endif
-
init_std_data(&input_pool);
init_std_data(&blocking_pool);
crng_initialize(&primary_crng);
crng_global_init_time = jiffies;
-
-#ifdef CONFIG_NUMA
- pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
- for_each_online_node(i) {
- crng = kmalloc_node(sizeof(struct crng_state),
- GFP_KERNEL | __GFP_NOFAIL, i);
- spin_lock_init(&crng->lock);
- crng_initialize(crng);
- pool[i] = crng;
+ if (ratelimit_disable) {
+ urandom_warning.interval = 0;
+ unseeded_warning.interval = 0;
}
- mb();
- crng_node_pool = pool;
-#endif
return 0;
}
early_initcall(rand_initialize);
@@ -1751,9 +1793,10 @@
if (!crng_ready() && maxwarn > 0) {
maxwarn--;
- printk(KERN_NOTICE "random: %s: uninitialized urandom read "
- "(%zd bytes read)\n",
- current->comm, nbytes);
+ if (__ratelimit(&urandom_warning))
+ printk(KERN_NOTICE "random: %s: uninitialized "
+ "urandom read (%zd bytes read)\n",
+ current->comm, nbytes);
spin_lock_irqsave(&primary_crng.lock, flags);
crng_init_cnt = 0;
spin_unlock_irqrestore(&primary_crng.lock, flags);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 8f890c1..8c0017d 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1405,7 +1405,6 @@
{
char debugfs_name[16];
struct port *port;
- struct port_buffer *buf;
dev_t devt;
unsigned int nr_added_bufs;
int err;
@@ -1516,8 +1515,6 @@
return 0;
free_inbufs:
- while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
- free_buf(buf, true);
free_device:
device_destroy(pdrvdata.class, port->dev->devt);
free_cdev:
@@ -1542,34 +1539,14 @@
static void remove_port_data(struct port *port)
{
- struct port_buffer *buf;
-
spin_lock_irq(&port->inbuf_lock);
/* Remove unused data this port might have received. */
discard_port_data(port);
spin_unlock_irq(&port->inbuf_lock);
- /* Remove buffers we queued up for the Host to send us data in. */
- do {
- spin_lock_irq(&port->inbuf_lock);
- buf = virtqueue_detach_unused_buf(port->in_vq);
- spin_unlock_irq(&port->inbuf_lock);
- if (buf)
- free_buf(buf, true);
- } while (buf);
-
spin_lock_irq(&port->outvq_lock);
reclaim_consumed_buffers(port);
spin_unlock_irq(&port->outvq_lock);
-
- /* Free pending buffers from the out-queue. */
- do {
- spin_lock_irq(&port->outvq_lock);
- buf = virtqueue_detach_unused_buf(port->out_vq);
- spin_unlock_irq(&port->outvq_lock);
- if (buf)
- free_buf(buf, true);
- } while (buf);
}
/*
@@ -1794,13 +1771,24 @@
spin_unlock(&portdev->c_ivq_lock);
}
+static void flush_bufs(struct virtqueue *vq, bool can_sleep)
+{
+ struct port_buffer *buf;
+ unsigned int len;
+
+ while ((buf = virtqueue_get_buf(vq, &len)))
+ free_buf(buf, can_sleep);
+}
+
static void out_intr(struct virtqueue *vq)
{
struct port *port;
port = find_port_by_vq(vq->vdev->priv, vq);
- if (!port)
+ if (!port) {
+ flush_bufs(vq, false);
return;
+ }
wake_up_interruptible(&port->waitqueue);
}
@@ -1811,8 +1799,10 @@
unsigned long flags;
port = find_port_by_vq(vq->vdev->priv, vq);
- if (!port)
+ if (!port) {
+ flush_bufs(vq, false);
return;
+ }
spin_lock_irqsave(&port->inbuf_lock, flags);
port->inbuf = get_inbuf(port);
@@ -1987,6 +1977,15 @@
static void remove_vqs(struct ports_device *portdev)
{
+ struct virtqueue *vq;
+
+ virtio_device_for_each_vq(portdev->vdev, vq) {
+ struct port_buffer *buf;
+
+ flush_bufs(vq, true);
+ while ((buf = virtqueue_detach_unused_buf(vq)))
+ free_buf(buf, true);
+ }
portdev->vdev->config->del_vqs(portdev->vdev);
kfree(portdev->in_vqs);
kfree(portdev->out_vqs);
diff --git a/drivers/clk/msm/clock-gcc-8952.c b/drivers/clk/msm/clock-gcc-8952.c
index 6d7727f..d471138 100644
--- a/drivers/clk/msm/clock-gcc-8952.c
+++ b/drivers/clk/msm/clock-gcc-8952.c
@@ -216,6 +216,7 @@
.config_reg = (void __iomem *)APCS_C0_PLL_USER_CTL,
.status_reg = (void __iomem *)APCS_C0_PLL_STATUS,
.freq_tbl = apcs_c0_pll_freq,
+ .config_ctl_reg = (void __iomem *)APCS_C0_PLL_CONFIG_CTL,
.masks = {
.vco_mask = BM(29, 28),
.pre_div_mask = BIT(12),
@@ -283,6 +284,7 @@
.config_reg = (void __iomem *)APCS_C1_PLL_USER_CTL,
.status_reg = (void __iomem *)APCS_C1_PLL_STATUS,
.freq_tbl = apcs_c1_pll_freq,
+ .config_ctl_reg = (void __iomem *)APCS_C1_PLL_CONFIG_CTL,
.masks = {
.vco_mask = BM(29, 28),
.pre_div_mask = BIT(12),
@@ -4407,6 +4409,11 @@
if (compat_bin2 || compat_bin4 || compat_bin5)
nbases = APCS_C0_PLL_BASE;
+ if (compat_bin5 || compat_bin6) {
+ a53ss_c0_pll.c.ops = &clk_ops_acpu_pll;
+ a53ss_c1_pll.c.ops = &clk_ops_acpu_pll;
+ }
+
ret = get_mmio_addr(pdev, nbases);
if (ret)
return ret;
diff --git a/drivers/clk/msm/clock-pll.c b/drivers/clk/msm/clock-pll.c
index 26c04e5..381c8db 100644
--- a/drivers/clk/msm/clock-pll.c
+++ b/drivers/clk/msm/clock-pll.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -217,13 +217,46 @@
writel_relaxed(regval, pll_config);
}
+static void pll_wait_for_lock(struct pll_clk *pll)
+{
+ int count;
+ u32 mode = readl_relaxed(PLL_MODE_REG(pll));
+ u32 lockmask = pll->masks.lock_mask ?: PLL_LOCKED_BIT;
+ u32 status_reg, user_reg, l_reg, m_reg, n_reg, config_reg;
+
+ /* Wait for pll to lock. */
+ for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
+ if (readl_relaxed(PLL_STATUS_REG(pll)) & lockmask)
+ break;
+ udelay(1);
+ }
+
+ if (!(readl_relaxed(PLL_STATUS_REG(pll)) & lockmask)) {
+ mode = readl_relaxed(PLL_MODE_REG(pll));
+ status_reg = readl_relaxed(PLL_STATUS_REG(pll));
+ user_reg = readl_relaxed(PLL_CONFIG_REG(pll));
+ config_reg = readl_relaxed(PLL_CFG_CTL_REG(pll));
+ l_reg = readl_relaxed(PLL_L_REG(pll));
+ m_reg = readl_relaxed(PLL_M_REG(pll));
+ n_reg = readl_relaxed(PLL_N_REG(pll));
+ pr_err("count = %d\n", (int)count);
+ pr_err("mode register is 0x%x\n", mode);
+ pr_err("status register is 0x%x\n", status_reg);
+ pr_err("user control register is 0x%x\n", user_reg);
+ pr_err("config control register is 0x%x\n", config_reg);
+ pr_err("L value register is 0x%x\n", l_reg);
+ pr_err("M value register is 0x%x\n", m_reg);
+ pr_err("N value control register is 0x%x\n", n_reg);
+ panic("PLL %s didn't lock after enabling it!\n",
+ pll->c.dbg_name);
+ }
+}
+
static int sr2_pll_clk_enable(struct clk *c)
{
unsigned long flags;
struct pll_clk *pll = to_pll_clk(c);
- int ret = 0, count;
u32 mode = readl_relaxed(PLL_MODE_REG(pll));
- u32 lockmask = pll->masks.lock_mask ?: PLL_LOCKED_BIT;
spin_lock_irqsave(&pll_reg_lock, flags);
@@ -245,15 +278,7 @@
mode |= PLL_RESET_N;
writel_relaxed(mode, PLL_MODE_REG(pll));
- /* Wait for pll to lock. */
- for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
- if (readl_relaxed(PLL_STATUS_REG(pll)) & lockmask)
- break;
- udelay(1);
- }
-
- if (!(readl_relaxed(PLL_STATUS_REG(pll)) & lockmask))
- pr_err("PLL %s didn't lock after enabling it!\n", c->dbg_name);
+ pll_wait_for_lock(pll);
/* Enable PLL output. */
mode |= PLL_OUTCTRL;
@@ -263,7 +288,50 @@
mb();
spin_unlock_irqrestore(&pll_reg_lock, flags);
- return ret;
+ return 0;
+}
+
+static int acpu_pll_clk_enable(struct clk *c)
+{
+ unsigned long flags;
+ struct pll_clk *pll = to_pll_clk(c);
+ u32 mode = readl_relaxed(PLL_MODE_REG(pll));
+
+ spin_lock_irqsave(&pll_reg_lock, flags);
+
+ spm_event(pll->spm_ctrl.spm_base, pll->spm_ctrl.offset,
+ pll->spm_ctrl.event_bit, false);
+
+ /* Disable PLL bypass mode. */
+ mode |= PLL_BYPASSNL;
+ writel_relaxed(mode, PLL_MODE_REG(pll));
+
+ /*
+ * H/W requires a 5us delay between disabling the bypass and
+ * de-asserting the reset. Delay 10us just to be safe.
+ */
+ mb();
+ udelay(10);
+
+ /* De-assert active-low PLL reset. */
+ mode |= PLL_RESET_N;
+ writel_relaxed(mode, PLL_MODE_REG(pll));
+
+ /* PLL H/W requires a 50uSec delay before polling lock_detect. */
+ mb();
+ udelay(50);
+
+ pll_wait_for_lock(pll);
+
+ /* Enable PLL output. */
+ mode |= PLL_OUTCTRL;
+ writel_relaxed(mode, PLL_MODE_REG(pll));
+
+ /* Ensure that the write above goes through before returning. */
+ mb();
+
+ spin_unlock_irqrestore(&pll_reg_lock, flags);
+ return 0;
}
void __variable_rate_pll_init(struct clk *c)
@@ -886,6 +954,15 @@
.list_registers = local_pll_clk_list_registers,
};
+const struct clk_ops clk_ops_acpu_pll = {
+ .enable = acpu_pll_clk_enable,
+ .disable = local_pll_clk_disable,
+ .set_rate = local_pll_clk_set_rate,
+ .round_rate = local_pll_clk_round_rate,
+ .handoff = local_pll_clk_handoff,
+ .list_registers = local_pll_clk_list_registers,
+};
+
const struct clk_ops clk_ops_variable_rate_pll_hwfsm = {
.enable = variable_rate_pll_clk_enable_hwfsm,
.disable = variable_rate_pll_clk_disable_hwfsm,
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index d426691..316ac39 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2013-2014, 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2014, 2017-2018,
+ * The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -30,7 +31,9 @@
struct qcom_cc {
struct qcom_reset_controller reset;
struct clk_regmap **rclks;
+ struct clk_hw **hwclks;
size_t num_rclks;
+ size_t num_hwclks;
};
const
@@ -182,11 +185,14 @@
struct qcom_cc *cc = data;
unsigned int idx = clkspec->args[0];
- if (idx >= cc->num_rclks) {
+ if (idx >= cc->num_rclks + cc->num_hwclks) {
pr_err("invalid index %u\n", idx);
return ERR_PTR(-EINVAL);
}
+ if (idx < cc->num_hwclks && cc->hwclks[idx])
+ return cc->hwclks[idx];
+
return cc->rclks[idx] ? &cc->rclks[idx]->hw : ERR_PTR(-ENOENT);
}
@@ -199,7 +205,9 @@
struct qcom_cc *cc;
struct gdsc_desc *scd;
size_t num_clks = desc->num_clks;
+ size_t num_hwclks = desc->num_hwclks;
struct clk_regmap **rclks = desc->clks;
+ struct clk_hw **hwclks = desc->hwclks;
cc = devm_kzalloc(dev, sizeof(*cc), GFP_KERNEL);
if (!cc)
@@ -207,6 +215,17 @@
cc->rclks = rclks;
cc->num_rclks = num_clks;
+ cc->hwclks = hwclks;
+ cc->num_hwclks = num_hwclks;
+
+ for (i = 0; i < num_hwclks; i++) {
+ if (!hwclks[i])
+ continue;
+
+ ret = devm_clk_hw_register(dev, hwclks[i]);
+ if (ret)
+ return ret;
+ }
for (i = 0; i < num_clks; i++) {
if (!rclks[i])
diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
index 5e26763..29c4697 100644
--- a/drivers/clk/qcom/common.h
+++ b/drivers/clk/qcom/common.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -28,7 +28,9 @@
struct qcom_cc_desc {
const struct regmap_config *config;
struct clk_regmap **clks;
+ struct clk_hw **hwclks;
size_t num_clks;
+ size_t num_hwclks;
const struct qcom_reset_map *resets;
size_t num_resets;
struct gdsc **gdscs;
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index 555b8bd..7ea5d9d 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -35,6 +35,7 @@
#include "reset.h"
#include "clk-alpha-pll.h"
#include "vdd-level-sdm845.h"
+#include "clk-voter.h"
#define GCC_MMSS_MISC 0x09FFC
#define GCC_GPU_MISC 0x71028
@@ -1505,6 +1506,11 @@
},
};
+static DEFINE_CLK_VOTER(ufs_phy_axi_emmc_vote_clk,
+ gcc_aggre_ufs_phy_axi_clk, 0);
+static DEFINE_CLK_VOTER(ufs_phy_axi_ufs_vote_clk,
+ gcc_aggre_ufs_phy_axi_clk, 0);
+
static struct clk_branch gcc_aggre_ufs_phy_axi_hw_ctl_clk = {
.halt_reg = 0x82024,
.clkr = {
@@ -3780,6 +3786,8 @@
[MEASURE_ONLY_CNOC_CLK] = &measure_only_cnoc_clk.hw,
[MEASURE_ONLY_BIMC_CLK] = &measure_only_bimc_clk.hw,
[MEASURE_ONLY_IPA_2X_CLK] = &measure_only_ipa_2x_clk.hw,
+ [UFS_PHY_AXI_EMMC_VOTE_CLK] = &ufs_phy_axi_emmc_vote_clk.hw,
+ [UFS_PHY_AXI_UFS_VOTE_CLK] = &ufs_phy_axi_ufs_vote_clk.hw,
};
static struct clk_regmap *gcc_sdm845_clocks[] = {
@@ -4061,6 +4069,8 @@
.config = &gcc_sdm845_regmap_config,
.clks = gcc_sdm845_clocks,
.num_clks = ARRAY_SIZE(gcc_sdm845_clocks),
+ .hwclks = gcc_sdm845_hws,
+ .num_hwclks = ARRAY_SIZE(gcc_sdm845_hws),
.resets = gcc_sdm845_resets,
.num_resets = ARRAY_SIZE(gcc_sdm845_resets),
};
@@ -4279,9 +4289,8 @@
static int gcc_sdm845_probe(struct platform_device *pdev)
{
- struct clk *clk;
struct regmap *regmap;
- int i, ret = 0;
+ int ret = 0;
regmap = qcom_cc_map(pdev, &gcc_sdm845_desc);
if (IS_ERR(regmap))
@@ -4307,13 +4316,6 @@
if (ret)
return ret;
- /* Register the dummy measurement clocks */
- for (i = 0; i < ARRAY_SIZE(gcc_sdm845_hws); i++) {
- clk = devm_clk_register(&pdev->dev, gcc_sdm845_hws[i]);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
- }
-
ret = qcom_cc_really_probe(pdev, &gcc_sdm845_desc, regmap);
if (ret) {
dev_err(&pdev->dev, "Failed to register GCC clocks\n");
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 6fb3cd2..a1d7fa4 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -599,6 +599,16 @@
if (!spin_trylock(&gpstates->gpstate_lock))
return;
+ /*
+ * If the timer has migrated to the different cpu then bring
+ * it back to one of the policy->cpus
+ */
+ if (!cpumask_test_cpu(raw_smp_processor_id(), policy->cpus)) {
+ gpstates->timer.expires = jiffies + msecs_to_jiffies(1);
+ add_timer_on(&gpstates->timer, cpumask_first(policy->cpus));
+ spin_unlock(&gpstates->gpstate_lock);
+ return;
+ }
gpstates->last_sampled_time += time_diff;
gpstates->elapsed_time += time_diff;
@@ -626,10 +636,8 @@
gpstates->last_gpstate_idx = pstate_to_idx(freq_data.gpstate_id);
gpstates->last_lpstate_idx = pstate_to_idx(freq_data.pstate_id);
+ set_pstate(&freq_data);
spin_unlock(&gpstates->gpstate_lock);
-
- /* Timer may get migrated to a different cpu on cpu hot unplug */
- smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1);
}
/*
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index 21340e0..f521448 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -4,6 +4,7 @@
config ARM_CPUIDLE
bool "Generic ARM/ARM64 CPU idle Driver"
select DT_IDLE_STATES
+ select CPU_IDLE_MULTIPLE_DRIVERS
help
Select this to enable generic cpuidle driver for ARM.
It provides a generic idle driver whose idle states are configured
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index f440d38..f47c545 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/slab.h>
+#include <linux/topology.h>
#include <asm/cpuidle.h>
@@ -44,7 +45,7 @@
return CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, idx);
}
-static struct cpuidle_driver arm_idle_driver = {
+static struct cpuidle_driver arm_idle_driver __initdata = {
.name = "arm_idle",
.owner = THIS_MODULE,
/*
@@ -80,30 +81,42 @@
static int __init arm_idle_init(void)
{
int cpu, ret;
- struct cpuidle_driver *drv = &arm_idle_driver;
+ struct cpuidle_driver *drv;
struct cpuidle_device *dev;
- /*
- * Initialize idle states data, starting at index 1.
- * This driver is DT only, if no DT idle states are detected (ret == 0)
- * let the driver initialization fail accordingly since there is no
- * reason to initialize the idle driver if only wfi is supported.
- */
- ret = dt_init_idle_driver(drv, arm_idle_state_match, 1);
- if (ret <= 0)
- return ret ? : -ENODEV;
-
- ret = cpuidle_register_driver(drv);
- if (ret) {
- pr_err("Failed to register cpuidle driver\n");
- return ret;
- }
-
- /*
- * Call arch CPU operations in order to initialize
- * idle states suspend back-end specific data
- */
for_each_possible_cpu(cpu) {
+
+ drv = kmemdup(&arm_idle_driver, sizeof(*drv), GFP_KERNEL);
+ if (!drv) {
+ ret = -ENOMEM;
+ goto out_fail;
+ }
+
+ drv->cpumask = (struct cpumask *)cpumask_of(cpu);
+
+ /*
+ * Initialize idle states data, starting at index 1. This
+ * driver is DT only, if no DT idle states are detected (ret
+ * == 0) let the driver initialization fail accordingly since
+ * there is no reason to initialize the idle driver if only
+ * wfi is supported.
+ */
+ ret = dt_init_idle_driver(drv, arm_idle_state_match, 1);
+ if (ret <= 0) {
+ ret = ret ? : -ENODEV;
+ goto out_kfree_drv;
+ }
+
+ ret = cpuidle_register_driver(drv);
+ if (ret) {
+ pr_err("Failed to register cpuidle driver\n");
+ goto out_kfree_drv;
+ }
+
+ /*
+ * Call arch CPU operations in order to initialize
+ * idle states suspend back-end specific data
+ */
ret = arm_cpuidle_init(cpu);
/*
@@ -115,14 +128,14 @@
if (ret) {
pr_err("CPU %d failed to init idle CPU ops\n", cpu);
- goto out_fail;
+ goto out_unregister_drv;
}
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) {
pr_err("Failed to allocate cpuidle device\n");
ret = -ENOMEM;
- goto out_fail;
+ goto out_unregister_drv;
}
dev->cpu = cpu;
@@ -130,21 +143,28 @@
if (ret) {
pr_err("Failed to register cpuidle device for CPU %d\n",
cpu);
- kfree(dev);
- goto out_fail;
+ goto out_kfree_dev;
}
}
return 0;
+
+out_kfree_dev:
+ kfree(dev);
+out_unregister_drv:
+ cpuidle_unregister_driver(drv);
+out_kfree_drv:
+ kfree(drv);
out_fail:
while (--cpu >= 0) {
dev = per_cpu(cpuidle_devices, cpu);
+ drv = cpuidle_get_cpu_driver(dev);
cpuidle_unregister_device(dev);
+ cpuidle_unregister_driver(drv);
kfree(dev);
+ kfree(drv);
}
- cpuidle_unregister_driver(drv);
-
return ret;
}
device_initcall(arm_idle_init);
diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c
index f15267e..199d573 100644
--- a/drivers/crypto/msm/ice.c
+++ b/drivers/crypto/msm/ice.c
@@ -77,6 +77,8 @@
#define QCOM_ICE_ENCRYPT 0x1
#define QCOM_ICE_DECRYPT 0x2
#define QCOM_SECT_LEN_IN_BYTE 512
+#define QCOM_UD_FOOTER_SIZE 0x4000
+#define QCOM_UD_FOOTER_SECS (QCOM_UD_FOOTER_SIZE / QCOM_SECT_LEN_IN_BYTE)
struct ice_clk_info {
struct list_head list;
@@ -127,8 +129,6 @@
};
static int ice_fde_flag;
-static unsigned long userdata_start;
-static unsigned long userdata_end;
static struct ice_crypto_setting ice_data;
static int qti_ice_setting_config(struct request *req,
@@ -160,17 +160,21 @@
memcpy(&setting->crypto_data, crypto_data,
sizeof(setting->crypto_data));
- if (rq_data_dir(req) == WRITE &&
- (ice_fde_flag & QCOM_ICE_ENCRYPT))
- setting->encr_bypass = false;
- else if (rq_data_dir(req) == READ &&
- (ice_fde_flag & QCOM_ICE_DECRYPT))
- setting->decr_bypass = false;
- else {
+ switch (rq_data_dir(req)) {
+ case WRITE:
+ if (!ice_fde_flag || (ice_fde_flag & QCOM_ICE_ENCRYPT))
+ setting->encr_bypass = false;
+ break;
+ case READ:
+ if (!ice_fde_flag || (ice_fde_flag & QCOM_ICE_DECRYPT))
+ setting->decr_bypass = false;
+ break;
+ default:
/* Should I say BUG_ON */
setting->encr_bypass = true;
setting->decr_bypass = true;
- pr_debug("%s direction unknown", __func__);
+ pr_debug("%s(): direction unknown\n", __func__);
+ break;
}
}
@@ -184,26 +188,6 @@
}
EXPORT_SYMBOL(qcom_ice_set_fde_flag);
-int qcom_ice_set_fde_conf(sector_t s_sector, sector_t size,
- int index, int mode)
-{
- userdata_start = s_sector;
- userdata_end = s_sector + size;
- if (INT_MAX - s_sector < size) {
- WARN_ON(1);
- return -EINVAL;
- }
- ice_data.key_index = index;
- ice_data.algo_mode = mode;
- ice_data.key_size = ICE_CRYPTO_KEY_SIZE_256;
- ice_data.key_mode = ICE_CRYPTO_USE_LUT_SW_KEY;
-
- pr_debug("%s sector info set start %lu end %lu\n", __func__,
- userdata_start, userdata_end);
- return 0;
-}
-EXPORT_SYMBOL(qcom_ice_set_fde_conf);
-
static int qcom_ice_enable_clocks(struct ice_device *, bool);
#ifdef CONFIG_MSM_BUS_SCALING
@@ -1486,10 +1470,13 @@
struct request *req,
struct ice_data_setting *setting, bool async)
{
+ struct ice_crypto_setting *crypto_data;
struct ice_crypto_setting pfk_crypto_data = {0};
int ret = 0;
bool is_pfe = false;
sector_t data_size;
+ union map_info *info;
+ unsigned long sec_end = 0;
if (!pdev || !req) {
pr_err("%s: Invalid params passed\n", __func__);
@@ -1525,22 +1512,46 @@
&pfk_crypto_data, setting);
}
- if (ice_fde_flag == 0)
- return 0;
-
- if ((req->__sector >= userdata_start) &&
- (req->__sector < userdata_end)) {
- /*
- * Ugly hack to address non-block-size aligned userdata end address in
- * eMMC based devices.
- */
- data_size = req->__data_len/QCOM_SECT_LEN_IN_BYTE;
-
- if ((req->__sector + data_size) > userdata_end)
- return 0;
- else
+ if (!ice_fde_flag) {
+ if (bio_flagged(req->bio, BIO_INLINECRYPT)) {
+ info = dm_get_rq_mapinfo(req);
+ if (!info) {
+ pr_debug("%s info not available in request\n",
+ __func__);
+ return 0;
+ }
+ crypto_data = (struct ice_crypto_setting *)info->ptr;
+ if (!crypto_data) {
+ pr_err("%s crypto_data not available in req\n",
+ __func__);
+ return -EINVAL;
+ }
return qti_ice_setting_config(req, pdev,
- &ice_data, setting);
+ crypto_data, setting);
+ }
+ return 0;
+ }
+
+ if (req->part && req->part->info && req->part->info->volname[0]) {
+ if (!strcmp(req->part->info->volname, "userdata")) {
+ sec_end = req->part->start_sect + req->part->nr_sects -
+ QCOM_UD_FOOTER_SECS;
+ if ((req->__sector >= req->part->start_sect) &&
+ (req->__sector < sec_end)) {
+ /*
+ * Ugly hack to address non-block-size aligned
+ * userdata end address in eMMC based devices.
+ */
+ data_size = req->__data_len /
+ QCOM_SECT_LEN_IN_BYTE;
+
+ if ((req->__sector + data_size) > sec_end)
+ return 0;
+ else
+ return qti_ice_setting_config(req, pdev,
+ &ice_data, setting);
+ }
+ }
}
/*
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 42c060c..7c71722 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1116,10 +1116,10 @@
return count;
}
-int talitos_sg_map(struct device *dev, struct scatterlist *src,
- unsigned int len, struct talitos_edesc *edesc,
- struct talitos_ptr *ptr,
- int sg_count, unsigned int offset, int tbl_off)
+static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
+ unsigned int len, struct talitos_edesc *edesc,
+ struct talitos_ptr *ptr, int sg_count,
+ unsigned int offset, int tbl_off, int elen)
{
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
@@ -1130,7 +1130,7 @@
}
to_talitos_ptr_len(ptr, len, is_sec1);
- to_talitos_ptr_ext_set(ptr, 0, is_sec1);
+ to_talitos_ptr_ext_set(ptr, elen, is_sec1);
if (sg_count == 1) {
to_talitos_ptr(ptr, sg_dma_address(src) + offset, is_sec1);
@@ -1140,7 +1140,7 @@
to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, is_sec1);
return sg_count;
}
- sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len,
+ sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen,
&edesc->link_tbl[tbl_off]);
if (sg_count == 1) {
/* Only one segment now, so no link tbl needed*/
@@ -1154,6 +1154,15 @@
return sg_count;
}
+static int talitos_sg_map(struct device *dev, struct scatterlist *src,
+ unsigned int len, struct talitos_edesc *edesc,
+ struct talitos_ptr *ptr, int sg_count,
+ unsigned int offset, int tbl_off)
+{
+ return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
+ tbl_off, 0);
+}
+
/*
* fill in and submit ipsec_esp descriptor
*/
@@ -1171,7 +1180,7 @@
unsigned int ivsize = crypto_aead_ivsize(aead);
int tbl_off = 0;
int sg_count, ret;
- int sg_link_tbl_len;
+ int elen = 0;
bool sync_needed = false;
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
@@ -1225,20 +1234,12 @@
* extent is bytes of HMAC postpended to ciphertext,
* typically 12 for ipsec
*/
- to_talitos_ptr_len(&desc->ptr[4], cryptlen, is_sec1);
- to_talitos_ptr_ext_set(&desc->ptr[4], 0, is_sec1);
+ if ((desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
+ (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
+ elen = authsize;
- sg_link_tbl_len = cryptlen;
-
- if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
- to_talitos_ptr_ext_set(&desc->ptr[4], authsize, is_sec1);
-
- if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
- sg_link_tbl_len += authsize;
- }
-
- ret = talitos_sg_map(dev, areq->src, sg_link_tbl_len, edesc,
- &desc->ptr[4], sg_count, areq->assoclen, tbl_off);
+ ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
+ sg_count, areq->assoclen, tbl_off, elen);
if (ret > 1) {
tbl_off += ret;
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 7053bb4..1936383 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -305,7 +305,8 @@
poll_wait(file, &sync_file->wq, wait);
- if (!test_and_set_bit(POLL_ENABLED, &sync_file->flags)) {
+ if (list_empty(&sync_file->cb.node) &&
+ !test_and_set_bit(POLL_ENABLED, &sync_file->flags)) {
if (fence_add_callback(sync_file->fence, &sync_file->cb,
fence_check_cb_func) < 0)
wake_up_all(&sync_file->wq);
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 03a5925..a9daf71 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -256,7 +256,7 @@
if (set)
reg |= bit;
else
- reg &= bit;
+ reg &= ~bit;
iowrite32(reg, addr);
spin_unlock_irqrestore(&gpio->lock, flags);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 4f54ff4..56b2419 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -425,7 +425,7 @@
struct gpiohandle_request handlereq;
struct linehandle_state *lh;
struct file *file;
- int fd, i, ret;
+ int fd, i, count = 0, ret;
if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
return -EFAULT;
@@ -471,6 +471,7 @@
if (ret)
goto out_free_descs;
lh->descs[i] = desc;
+ count = i;
if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
@@ -537,7 +538,7 @@
out_put_unused_fd:
put_unused_fd(fd);
out_free_descs:
- for (; i >= 0; i--)
+ for (i = 0; i < count; i++)
gpiod_free(lh->descs[i]);
kfree(lh->label);
out_free_lh:
@@ -794,7 +795,7 @@
desc = &gdev->descs[offset];
ret = gpiod_request(desc, le->label);
if (ret)
- goto out_free_desc;
+ goto out_free_label;
le->desc = desc;
le->eflags = eflags;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index a88d365..564362e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1484,10 +1484,11 @@
static const u32 vgpr_init_regs[] =
{
mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff,
- mmCOMPUTE_RESOURCE_LIMITS, 0,
+ mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
mmCOMPUTE_NUM_THREAD_X, 256*4,
mmCOMPUTE_NUM_THREAD_Y, 1,
mmCOMPUTE_NUM_THREAD_Z, 1,
+ mmCOMPUTE_PGM_RSRC1, 0x100004f, /* VGPRS=15 (64 logical VGPRs), SGPRS=1 (16 SGPRs), BULKY=1 */
mmCOMPUTE_PGM_RSRC2, 20,
mmCOMPUTE_USER_DATA_0, 0xedcedc00,
mmCOMPUTE_USER_DATA_1, 0xedcedc01,
@@ -1504,10 +1505,11 @@
static const u32 sgpr1_init_regs[] =
{
mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f,
- mmCOMPUTE_RESOURCE_LIMITS, 0x1000000,
+ mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
mmCOMPUTE_NUM_THREAD_X, 256*5,
mmCOMPUTE_NUM_THREAD_Y, 1,
mmCOMPUTE_NUM_THREAD_Z, 1,
+ mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
mmCOMPUTE_PGM_RSRC2, 20,
mmCOMPUTE_USER_DATA_0, 0xedcedc00,
mmCOMPUTE_USER_DATA_1, 0xedcedc01,
@@ -1528,6 +1530,7 @@
mmCOMPUTE_NUM_THREAD_X, 256*5,
mmCOMPUTE_NUM_THREAD_Y, 1,
mmCOMPUTE_NUM_THREAD_Z, 1,
+ mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
mmCOMPUTE_PGM_RSRC2, 20,
mmCOMPUTE_USER_DATA_0, 0xedcedc00,
mmCOMPUTE_USER_DATA_1, 0xedcedc01,
diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c
index afec232..cfd80bc 100644
--- a/drivers/gpu/drm/bridge/dumb-vga-dac.c
+++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c
@@ -53,7 +53,9 @@
}
drm_mode_connector_update_edid_property(connector, edid);
- return drm_add_edid_modes(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ return ret;
fallback:
/*
diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
index a7b2a75..cdb5358 100644
--- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c
+++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
@@ -322,19 +322,44 @@
{
uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE;
ssize_t ret;
+ int retry;
if (type < DRM_DP_DUAL_MODE_TYPE2_DVI)
return 0;
- ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
- &tmds_oen, sizeof(tmds_oen));
- if (ret) {
- DRM_DEBUG_KMS("Failed to %s TMDS output buffers\n",
- enable ? "enable" : "disable");
- return ret;
+ /*
+ * LSPCON adapters in low-power state may ignore the first write, so
+ * read back and verify the written value a few times.
+ */
+ for (retry = 0; retry < 3; retry++) {
+ uint8_t tmp;
+
+ ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
+ &tmds_oen, sizeof(tmds_oen));
+ if (ret) {
+ DRM_DEBUG_KMS("Failed to %s TMDS output buffers (%d attempts)\n",
+ enable ? "enable" : "disable",
+ retry + 1);
+ return ret;
+ }
+
+ ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN,
+ &tmp, sizeof(tmp));
+ if (ret) {
+ DRM_DEBUG_KMS("I2C read failed during TMDS output buffer %s (%d attempts)\n",
+ enable ? "enabling" : "disabling",
+ retry + 1);
+ return ret;
+ }
+
+ if (tmp == tmds_oen)
+ return 0;
}
- return 0;
+ DRM_DEBUG_KMS("I2C write value mismatch during TMDS output buffer %s\n",
+ enable ? "enabling" : "disabling");
+
+ return -EIO;
}
EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 36a665f..e23748c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3681,7 +3681,11 @@
struct intel_display_error_state *error);
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
-int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
+int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, u32 mbox,
+ u32 val, int timeout_us);
+#define sandybridge_pcode_write(dev_priv, mbox, val) \
+ sandybridge_pcode_write_timeout(dev_priv, mbox, val, 500)
+
int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
u32 reply_mask, u32 reply, int timeout_base_ms);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index ce32303..c185625 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -6012,8 +6012,8 @@
/* Inform power controller of upcoming frequency change */
mutex_lock(&dev_priv->rps.hw_lock);
- ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
- 0x80000000);
+ ret = sandybridge_pcode_write_timeout(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
+ 0x80000000, 2000);
mutex_unlock(&dev_priv->rps.hw_lock);
if (ret) {
@@ -6044,8 +6044,9 @@
I915_WRITE(CDCLK_CTL, val);
mutex_lock(&dev_priv->rps.hw_lock);
- ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
- DIV_ROUND_UP(cdclk, 25000));
+ ret = sandybridge_pcode_write_timeout(dev_priv,
+ HSW_PCODE_DE_WRITE_FREQ_REQ,
+ DIV_ROUND_UP(cdclk, 25000), 2000);
mutex_unlock(&dev_priv->rps.hw_lock);
if (ret) {
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index e1d47d5..3517c0e 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -321,7 +321,8 @@
I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON);
POSTING_READ(lvds_encoder->reg);
- if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 1000))
+
+ if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 5000))
DRM_ERROR("timed out waiting for panel to power on\n");
intel_panel_enable_backlight(intel_connector);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 49de476..05427d2 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -7913,8 +7913,8 @@
return 0;
}
-int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
- u32 mbox, u32 val)
+int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv,
+ u32 mbox, u32 val, int timeout_us)
{
int status;
@@ -7935,7 +7935,7 @@
if (intel_wait_for_register_fw(dev_priv,
GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
- 500)) {
+ timeout_us)) {
DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
return -ETIMEDOUT;
}
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index ec9023b..d53e805 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -80,6 +80,7 @@
struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
if (bo->validated_shader) {
+ kfree(bo->validated_shader->uniform_addr_offsets);
kfree(bo->validated_shader->texture_samples);
kfree(bo->validated_shader);
bo->validated_shader = NULL;
@@ -328,6 +329,7 @@
}
if (bo->validated_shader) {
+ kfree(bo->validated_shader->uniform_addr_offsets);
kfree(bo->validated_shader->texture_samples);
kfree(bo->validated_shader);
bo->validated_shader = NULL;
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 881bf48..7505655 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -533,7 +533,7 @@
* the scl fields here.
*/
if (num_planes == 1) {
- scl0 = vc4_get_scl_field(state, 1);
+ scl0 = vc4_get_scl_field(state, 0);
scl1 = scl0;
} else {
scl0 = vc4_get_scl_field(state, 1);
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
index 917321c..19a5bde8 100644
--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -874,6 +874,7 @@
fail:
kfree(validation_state.branch_targets);
if (validated_shader) {
+ kfree(validated_shader->uniform_addr_offsets);
kfree(validated_shader->texture_samples);
kfree(validated_shader);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 5a0f8a7..52436b3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -324,7 +324,7 @@
ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
if (ret == -ENOSPC) {
spin_unlock(&vgdev->ctrlq.qlock);
- wait_event(vgdev->ctrlq.ack_queue, vq->num_free);
+ wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
spin_lock(&vgdev->ctrlq.qlock);
goto retry;
} else {
@@ -399,7 +399,7 @@
ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
if (ret == -ENOSPC) {
spin_unlock(&vgdev->cursorq.qlock);
- wait_event(vgdev->cursorq.ack_queue, vq->num_free);
+ wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
spin_lock(&vgdev->cursorq.qlock);
goto retry;
} else {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 87086af..33ca24a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -2014,6 +2014,7 @@
vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
out_fence, NULL);
+ vmw_dmabuf_unreference(&ctx->buf);
vmw_resource_unreserve(res, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
}
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index e5c8222..4f98912 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1332,7 +1332,7 @@
iommu_regs = kgsl_mmu_get_prot_regs(&device->mmu);
if (iommu_regs)
adreno_set_protected_registers(adreno_dev, &index,
- iommu_regs->base, iommu_regs->range);
+ iommu_regs->base, ilog2(iommu_regs->range));
}
static void a3xx_start(struct adreno_device *adreno_dev)
diff --git a/drivers/gpu/msm/adreno_a4xx.c b/drivers/gpu/msm/adreno_a4xx.c
index 771d035..432e98d 100644
--- a/drivers/gpu/msm/adreno_a4xx.c
+++ b/drivers/gpu/msm/adreno_a4xx.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -524,7 +524,7 @@
iommu_regs = kgsl_mmu_get_prot_regs(&device->mmu);
if (iommu_regs)
adreno_set_protected_registers(adreno_dev, &index,
- iommu_regs->base, iommu_regs->range);
+ iommu_regs->base, ilog2(iommu_regs->range));
}
static struct adreno_snapshot_sizes a4xx_snap_sizes = {
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 876b7c9..2a3ae3e 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -386,7 +386,7 @@
iommu_regs = kgsl_mmu_get_prot_regs(&device->mmu);
if (iommu_regs)
adreno_set_protected_registers(adreno_dev, &index,
- iommu_regs->base, iommu_regs->range);
+ iommu_regs->base, ilog2(iommu_regs->range));
}
/*
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index 37330cb..517b813 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -446,7 +446,7 @@
if (mmu_prot) {
mmu_base = mmu_prot->base;
- mmu_range = 1 << mmu_prot->range;
+ mmu_range = mmu_prot->range;
req_sets += DIV_ROUND_UP(mmu_range, 0x2000);
}
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index a2d6071..3b55fc6 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -2388,7 +2388,6 @@
struct kgsl_gpuobj_import *param = data;
struct kgsl_mem_entry *entry;
int ret, fd = -1;
- struct kgsl_mmu *mmu = &dev_priv->device->mmu;
entry = kgsl_mem_entry_create();
if (entry == NULL)
@@ -2402,18 +2401,10 @@
| KGSL_MEMFLAGS_FORCE_32BIT
| KGSL_MEMFLAGS_IOCOHERENT;
- /* Disable IO coherence if it is not supported on the chip */
- if (!MMU_FEATURE(mmu, KGSL_MMU_IO_COHERENT))
- param->flags &= ~((uint64_t)KGSL_MEMFLAGS_IOCOHERENT);
-
if (kgsl_is_compat_task())
param->flags |= KGSL_MEMFLAGS_FORCE_32BIT;
- entry->memdesc.flags = param->flags;
-
- if (MMU_FEATURE(mmu, KGSL_MMU_NEED_GUARD_PAGE))
- entry->memdesc.priv |= KGSL_MEMDESC_GUARD_PAGE;
-
+ kgsl_memdesc_init(dev_priv->device, &entry->memdesc, param->flags);
if (param->type == KGSL_USER_MEM_TYPE_ADDR)
ret = _gpuobj_map_useraddr(dev_priv->device, private->pagetable,
entry, param);
@@ -2652,6 +2643,7 @@
struct kgsl_process_private *private = dev_priv->process_priv;
struct kgsl_mmu *mmu = &dev_priv->device->mmu;
unsigned int memtype;
+ uint64_t flags;
/*
* If content protection is not enabled and secure buffer
@@ -2688,30 +2680,17 @@
* Note: CACHEMODE is ignored for this call. Caching should be
* determined by type of allocation being mapped.
*/
- param->flags &= KGSL_MEMFLAGS_GPUREADONLY
- | KGSL_MEMTYPE_MASK
- | KGSL_MEMALIGN_MASK
- | KGSL_MEMFLAGS_USE_CPU_MAP
- | KGSL_MEMFLAGS_SECURE
- | KGSL_MEMFLAGS_IOCOHERENT;
-
- /* Disable IO coherence if it is not supported on the chip */
- if (!MMU_FEATURE(mmu, KGSL_MMU_IO_COHERENT))
- param->flags &= ~((uint64_t)KGSL_MEMFLAGS_IOCOHERENT);
-
- entry->memdesc.flags = (uint64_t) param->flags;
+ flags = param->flags & (KGSL_MEMFLAGS_GPUREADONLY
+ | KGSL_MEMTYPE_MASK
+ | KGSL_MEMALIGN_MASK
+ | KGSL_MEMFLAGS_USE_CPU_MAP
+ | KGSL_MEMFLAGS_SECURE
+ | KGSL_MEMFLAGS_IOCOHERENT);
if (kgsl_is_compat_task())
- entry->memdesc.flags |= KGSL_MEMFLAGS_FORCE_32BIT;
+ flags |= KGSL_MEMFLAGS_FORCE_32BIT;
- if (!kgsl_mmu_use_cpu_map(mmu))
- entry->memdesc.flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);
-
- if (MMU_FEATURE(mmu, KGSL_MMU_NEED_GUARD_PAGE))
- entry->memdesc.priv |= KGSL_MEMDESC_GUARD_PAGE;
-
- if (param->flags & KGSL_MEMFLAGS_SECURE)
- entry->memdesc.priv |= KGSL_MEMDESC_SECURE;
+ kgsl_memdesc_init(dev_priv->device, &entry->memdesc, flags);
switch (memtype) {
case KGSL_MEM_ENTRY_USER:
@@ -3107,10 +3086,6 @@
| KGSL_MEMFLAGS_FORCE_32BIT
| KGSL_MEMFLAGS_IOCOHERENT;
- /* Turn off SVM if the system doesn't support it */
- if (!kgsl_mmu_use_cpu_map(mmu))
- flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);
-
/* Return not supported error if secure memory isn't enabled */
if (!kgsl_mmu_is_secured(mmu) &&
(flags & KGSL_MEMFLAGS_SECURE)) {
@@ -3119,10 +3094,6 @@
return ERR_PTR(-EOPNOTSUPP);
}
- /* Secure memory disables advanced addressing modes */
- if (flags & KGSL_MEMFLAGS_SECURE)
- flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);
-
/* Cap the alignment bits to the highest number we can handle */
align = MEMFLAGS(flags, KGSL_MEMALIGN_MASK, KGSL_MEMALIGN_SHIFT);
if (align >= ilog2(KGSL_MAX_ALIGN)) {
@@ -3141,20 +3112,10 @@
flags = kgsl_filter_cachemode(flags);
- /* Disable IO coherence if it is not supported on the chip */
- if (!MMU_FEATURE(mmu, KGSL_MMU_IO_COHERENT))
- flags &= ~((uint64_t)KGSL_MEMFLAGS_IOCOHERENT);
-
entry = kgsl_mem_entry_create();
if (entry == NULL)
return ERR_PTR(-ENOMEM);
- if (MMU_FEATURE(mmu, KGSL_MMU_NEED_GUARD_PAGE))
- entry->memdesc.priv |= KGSL_MEMDESC_GUARD_PAGE;
-
- if (flags & KGSL_MEMFLAGS_SECURE)
- entry->memdesc.priv |= KGSL_MEMDESC_SECURE;
-
ret = kgsl_allocate_user(dev_priv->device, &entry->memdesc,
size, flags);
if (ret != 0)
@@ -3338,6 +3299,7 @@
struct kgsl_process_private *process = dev_priv->process_priv;
struct kgsl_sparse_phys_alloc *param = data;
struct kgsl_mem_entry *entry;
+ uint64_t flags;
int ret;
int id;
@@ -3370,11 +3332,12 @@
entry->id = id;
entry->priv = process;
- entry->memdesc.flags = KGSL_MEMFLAGS_SPARSE_PHYS;
- kgsl_memdesc_set_align(&entry->memdesc, ilog2(param->pagesize));
+ flags = KGSL_MEMFLAGS_SPARSE_PHYS |
+ ((ilog2(param->pagesize) << KGSL_MEMALIGN_SHIFT) &
+ KGSL_MEMALIGN_MASK);
ret = kgsl_allocate_user(dev_priv->device, &entry->memdesc,
- param->size, entry->memdesc.flags);
+ param->size, flags);
if (ret)
goto err_remove_idr;
@@ -3463,7 +3426,8 @@
if (entry == NULL)
return -ENOMEM;
- entry->memdesc.flags = KGSL_MEMFLAGS_SPARSE_VIRT;
+ kgsl_memdesc_init(dev_priv->device, &entry->memdesc,
+ KGSL_MEMFLAGS_SPARSE_VIRT);
entry->memdesc.size = param->size;
entry->memdesc.cur_bindings = 0;
kgsl_memdesc_set_align(&entry->memdesc, ilog2(param->pagesize));
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 325d44a..3539cda 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -262,13 +262,12 @@
return;
}
- gpu_qdss_desc.flags = 0;
+ kgsl_memdesc_init(device, &gpu_qdss_desc, 0);
gpu_qdss_desc.priv = 0;
gpu_qdss_desc.physaddr = gpu_qdss_entry[0];
gpu_qdss_desc.size = gpu_qdss_entry[1];
gpu_qdss_desc.pagetable = NULL;
gpu_qdss_desc.ops = NULL;
- gpu_qdss_desc.dev = device->dev->parent;
gpu_qdss_desc.hostptr = NULL;
result = memdesc_sg_dma(&gpu_qdss_desc, gpu_qdss_desc.physaddr,
@@ -307,13 +306,12 @@
return;
}
- gpu_qtimer_desc.flags = 0;
+ kgsl_memdesc_init(device, &gpu_qtimer_desc, 0);
gpu_qtimer_desc.priv = 0;
gpu_qtimer_desc.physaddr = gpu_qtimer_entry[0];
gpu_qtimer_desc.size = gpu_qtimer_entry[1];
gpu_qtimer_desc.pagetable = NULL;
gpu_qtimer_desc.ops = NULL;
- gpu_qtimer_desc.dev = device->dev->parent;
gpu_qtimer_desc.hostptr = NULL;
result = memdesc_sg_dma(&gpu_qtimer_desc, gpu_qtimer_desc.physaddr,
@@ -1486,6 +1484,7 @@
{
int ret;
+ kgsl_memdesc_init(device, &iommu->setstate, 0);
ret = kgsl_sharedmem_alloc_contig(device, &iommu->setstate, PAGE_SIZE);
if (!ret) {
@@ -2630,7 +2629,7 @@
return -EINVAL;
}
iommu->protect.base = reg_val[0] / sizeof(u32);
- iommu->protect.range = ilog2(reg_val[1] / sizeof(u32));
+ iommu->protect.range = reg_val[1] / sizeof(u32);
of_property_for_each_string(node, "clock-names", prop, cname) {
struct clk *c = devm_clk_get(&pdev->dev, cname);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index 14653ea..df88b9a 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -413,7 +413,7 @@
{
int ret;
- memdesc->flags = flags;
+ kgsl_memdesc_init(device, memdesc, flags);
if (kgsl_mmu_get_mmutype(device) == KGSL_MMU_TYPE_NONE)
ret = kgsl_sharedmem_alloc_contig(device, memdesc, size);
@@ -769,6 +769,40 @@
}
EXPORT_SYMBOL(kgsl_cache_range_op);
+void kgsl_memdesc_init(struct kgsl_device *device,
+ struct kgsl_memdesc *memdesc, uint64_t flags)
+{
+ struct kgsl_mmu *mmu = &device->mmu;
+ unsigned int align;
+
+ memset(memdesc, 0, sizeof(*memdesc));
+ /* Turn off SVM if the system doesn't support it */
+ if (!kgsl_mmu_use_cpu_map(mmu))
+ flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);
+
+ /* Secure memory disables advanced addressing modes */
+ if (flags & KGSL_MEMFLAGS_SECURE)
+ flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);
+
+ /* Disable IO coherence if it is not supported on the chip */
+ if (!MMU_FEATURE(mmu, KGSL_MMU_IO_COHERENT))
+ flags &= ~((uint64_t) KGSL_MEMFLAGS_IOCOHERENT);
+
+ if (MMU_FEATURE(mmu, KGSL_MMU_NEED_GUARD_PAGE))
+ memdesc->priv |= KGSL_MEMDESC_GUARD_PAGE;
+
+ if (flags & KGSL_MEMFLAGS_SECURE)
+ memdesc->priv |= KGSL_MEMDESC_SECURE;
+
+ memdesc->flags = flags;
+ memdesc->dev = device->dev->parent;
+
+ align = max_t(unsigned int,
+ (memdesc->flags & KGSL_MEMALIGN_MASK) >> KGSL_MEMALIGN_SHIFT,
+ ilog2(PAGE_SIZE));
+ kgsl_memdesc_set_align(memdesc, align);
+}
+
int
kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
uint64_t size)
@@ -969,8 +1003,6 @@
if (memdesc->pages)
kgsl_free(memdesc->pages);
-
- memset(memdesc, 0, sizeof(*memdesc));
}
EXPORT_SYMBOL(kgsl_sharedmem_free);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h
index 55bb34f..976752d 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.h
+++ b/drivers/gpu/msm/kgsl_sharedmem.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -57,6 +57,9 @@
uint64_t offset, uint64_t size,
unsigned int op);
+void kgsl_memdesc_init(struct kgsl_device *device,
+ struct kgsl_memdesc *memdesc, uint64_t flags);
+
void kgsl_process_init_sysfs(struct kgsl_device *device,
struct kgsl_process_private *private);
void kgsl_process_uninit_sysfs(struct kgsl_process_private *private);
@@ -282,8 +285,8 @@
{
int ret;
- memdesc->flags = flags;
- memdesc->priv = priv;
+ kgsl_memdesc_init(device, memdesc, flags);
+ memdesc->priv |= priv;
if (((memdesc->priv & KGSL_MEMDESC_CONTIG) != 0) ||
(kgsl_mmu_get_mmutype(device) == KGSL_MMU_TYPE_NONE))
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index e6fe21a..b32bf7e 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -243,6 +243,7 @@
struct i2c_adapter adapter;
unsigned long smba;
unsigned char original_hstcfg;
+ unsigned char original_slvcmd;
struct pci_dev *pci_dev;
unsigned int features;
@@ -962,13 +963,24 @@
if (!priv->host_notify)
return -ENOMEM;
- outb_p(SMBSLVCMD_HST_NTFY_INTREN, SMBSLVCMD(priv));
+ if (!(SMBSLVCMD_HST_NTFY_INTREN & priv->original_slvcmd))
+ outb_p(SMBSLVCMD_HST_NTFY_INTREN | priv->original_slvcmd,
+ SMBSLVCMD(priv));
+
/* clear Host Notify bit to allow a new notification */
outb_p(SMBSLVSTS_HST_NTFY_STS, SMBSLVSTS(priv));
return 0;
}
+static void i801_disable_host_notify(struct i801_priv *priv)
+{
+ if (!(priv->features & FEATURE_HOST_NOTIFY))
+ return;
+
+ outb_p(priv->original_slvcmd, SMBSLVCMD(priv));
+}
+
static const struct i2c_algorithm smbus_algorithm = {
.smbus_xfer = i801_access,
.functionality = i801_func,
@@ -1589,6 +1601,10 @@
outb_p(inb_p(SMBAUXCTL(priv)) &
~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv));
+ /* Remember original Host Notify setting */
+ if (priv->features & FEATURE_HOST_NOTIFY)
+ priv->original_slvcmd = inb_p(SMBSLVCMD(priv));
+
/* Default timeout in interrupt mode: 200 ms */
priv->adapter.timeout = HZ / 5;
@@ -1666,6 +1682,7 @@
pm_runtime_forbid(&dev->dev);
pm_runtime_get_noresume(&dev->dev);
+ i801_disable_host_notify(priv);
i801_del_mux(priv);
i2c_del_adapter(&priv->adapter);
i801_acpi_remove(priv);
@@ -1679,6 +1696,15 @@
*/
}
+static void i801_shutdown(struct pci_dev *dev)
+{
+ struct i801_priv *priv = pci_get_drvdata(dev);
+
+ /* Restore config registers to avoid hard hang on some systems */
+ i801_disable_host_notify(priv);
+ pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
+}
+
#ifdef CONFIG_PM
static int i801_suspend(struct device *dev)
{
@@ -1711,6 +1737,7 @@
.id_table = i801_ids,
.probe = i801_probe,
.remove = i801_remove,
+ .shutdown = i801_shutdown,
.driver = {
.pm = &i801_pm_ops,
},
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 760ef60..15f4bdf 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -999,8 +999,7 @@
return -ENOMEM;
ib_comp_wq = alloc_workqueue("ib-comp-wq",
- WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
- WQ_UNBOUND_MAX_ACTIVE);
+ WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
if (!ib_comp_wq) {
ret = -ENOMEM;
goto err;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index cb79d17..f2f1c9f 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -676,7 +676,7 @@
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
- if (!rdma_addr_size_in6(&cmd.src_addr) ||
+ if ((cmd.src_addr.sin6_family && !rdma_addr_size_in6(&cmd.src_addr)) ||
!rdma_addr_size_in6(&cmd.dst_addr))
return -EINVAL;
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index b85a1a9..9e0f2cc 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -856,6 +856,11 @@
rdev->status_page->db_off = 0;
+ init_completion(&rdev->rqt_compl);
+ init_completion(&rdev->pbl_compl);
+ kref_init(&rdev->rqt_kref);
+ kref_init(&rdev->pbl_kref);
+
return 0;
err_free_status_page:
free_page((unsigned long)rdev->status_page);
@@ -872,12 +877,14 @@
static void c4iw_rdev_close(struct c4iw_rdev *rdev)
{
- destroy_workqueue(rdev->free_workq);
kfree(rdev->wr_log);
free_page((unsigned long)rdev->status_page);
c4iw_pblpool_destroy(rdev);
c4iw_rqtpool_destroy(rdev);
+ wait_for_completion(&rdev->pbl_compl);
+ wait_for_completion(&rdev->rqt_compl);
c4iw_destroy_resource(&rdev->resource);
+ destroy_workqueue(rdev->free_workq);
}
static void c4iw_dealloc(struct uld_ctx *ctx)
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 7d54066..896dff7 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -186,6 +186,10 @@
struct wr_log_entry *wr_log;
int wr_log_size;
struct workqueue_struct *free_workq;
+ struct completion rqt_compl;
+ struct completion pbl_compl;
+ struct kref rqt_kref;
+ struct kref pbl_kref;
};
static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c
index 67df71a..803c677 100644
--- a/drivers/infiniband/hw/cxgb4/resource.c
+++ b/drivers/infiniband/hw/cxgb4/resource.c
@@ -260,12 +260,22 @@
rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT);
if (rdev->stats.pbl.cur > rdev->stats.pbl.max)
rdev->stats.pbl.max = rdev->stats.pbl.cur;
+ kref_get(&rdev->pbl_kref);
} else
rdev->stats.pbl.fail++;
mutex_unlock(&rdev->stats.lock);
return (u32)addr;
}
+static void destroy_pblpool(struct kref *kref)
+{
+ struct c4iw_rdev *rdev;
+
+ rdev = container_of(kref, struct c4iw_rdev, pbl_kref);
+ gen_pool_destroy(rdev->pbl_pool);
+ complete(&rdev->pbl_compl);
+}
+
void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{
PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
@@ -273,6 +283,7 @@
rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
mutex_unlock(&rdev->stats.lock);
gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size);
+ kref_put(&rdev->pbl_kref, destroy_pblpool);
}
int c4iw_pblpool_create(struct c4iw_rdev *rdev)
@@ -312,7 +323,7 @@
void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
{
- gen_pool_destroy(rdev->pbl_pool);
+ kref_put(&rdev->pbl_kref, destroy_pblpool);
}
/*
@@ -333,12 +344,22 @@
rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT);
if (rdev->stats.rqt.cur > rdev->stats.rqt.max)
rdev->stats.rqt.max = rdev->stats.rqt.cur;
+ kref_get(&rdev->rqt_kref);
} else
rdev->stats.rqt.fail++;
mutex_unlock(&rdev->stats.lock);
return (u32)addr;
}
+static void destroy_rqtpool(struct kref *kref)
+{
+ struct c4iw_rdev *rdev;
+
+ rdev = container_of(kref, struct c4iw_rdev, rqt_kref);
+ gen_pool_destroy(rdev->rqt_pool);
+ complete(&rdev->rqt_compl);
+}
+
void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{
PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6);
@@ -346,6 +367,7 @@
rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT);
mutex_unlock(&rdev->stats.lock);
gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6);
+ kref_put(&rdev->rqt_kref, destroy_rqtpool);
}
int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
@@ -383,7 +405,7 @@
void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
{
- gen_pool_destroy(rdev->rqt_pool);
+ kref_put(&rdev->rqt_kref, destroy_rqtpool);
}
/*
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 84a97f3..ae1f90d 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -1049,6 +1049,8 @@
return ERR_PTR(-ENOMEM);
dd->num_pports = nports;
dd->pport = (struct hfi1_pportdata *)(dd + 1);
+ dd->pcidev = pdev;
+ pci_set_drvdata(pdev, dd);
INIT_LIST_HEAD(&dd->list);
idr_preload(GFP_KERNEL);
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 335613a1..7176260 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -162,9 +162,6 @@
unsigned long len;
resource_size_t addr;
- dd->pcidev = pdev;
- pci_set_drvdata(pdev, dd);
-
addr = pci_resource_start(pdev, 0);
len = pci_resource_len(pdev, 0);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 403df35..3cdcbfb 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -253,7 +253,11 @@
} else {
if (ucmd) {
qp->rq.wqe_cnt = ucmd->rq_wqe_count;
+ if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift))
+ return -EINVAL;
qp->rq.wqe_shift = ucmd->rq_wqe_shift;
+ if ((1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) < qp->wq_sig)
+ return -EINVAL;
qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
qp->rq.max_post = qp->rq.wqe_cnt;
} else {
@@ -2164,18 +2168,18 @@
static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
{
- if (rate == IB_RATE_PORT_CURRENT) {
+ if (rate == IB_RATE_PORT_CURRENT)
return 0;
- } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
- return -EINVAL;
- } else {
- while (rate != IB_RATE_2_5_GBPS &&
- !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
- MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
- --rate;
- }
- return rate + MLX5_STAT_RATE_OFFSET;
+ if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS)
+ return -EINVAL;
+
+ while (rate != IB_RATE_PORT_CURRENT &&
+ !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
+ MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
+ --rate;
+
+ return rate ? rate + MLX5_STAT_RATE_OFFSET : rate;
}
static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
@@ -2848,7 +2852,8 @@
* If we moved a kernel QP to RESET, clean up all old CQ
* entries and reinitialize the QP.
*/
- if (new_state == IB_QPS_RESET && !ibqp->uobject) {
+ if (new_state == IB_QPS_RESET &&
+ !ibqp->uobject && ibqp->qp_type != IB_QPT_XRC_TGT) {
mlx5_ib_cq_clean(recv_cq, base->mqp.qpn,
ibqp->srq ? to_msrq(ibqp->srq) : NULL);
if (send_cq != recv_cq)
diff --git a/drivers/input/input-leds.c b/drivers/input/input-leds.c
index 766bf26..5f04b2d 100644
--- a/drivers/input/input-leds.c
+++ b/drivers/input/input-leds.c
@@ -88,6 +88,7 @@
const struct input_device_id *id)
{
struct input_leds *leds;
+ struct input_led *led;
unsigned int num_leds;
unsigned int led_code;
int led_no;
@@ -119,14 +120,13 @@
led_no = 0;
for_each_set_bit(led_code, dev->ledbit, LED_CNT) {
- struct input_led *led = &leds->leds[led_no];
-
- led->handle = &leds->handle;
- led->code = led_code;
-
if (!input_led_info[led_code].name)
continue;
+ led = &leds->leds[led_no];
+ led->handle = &leds->handle;
+ led->code = led_code;
+
led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s",
dev_name(&dev->dev),
input_led_info[led_code].name);
diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c
index 930424e..251d64c 100644
--- a/drivers/input/misc/drv260x.c
+++ b/drivers/input/misc/drv260x.c
@@ -521,7 +521,7 @@
if (!haptics)
return -ENOMEM;
- haptics->rated_voltage = DRV260X_DEF_OD_CLAMP_VOLT;
+ haptics->overdrive_voltage = DRV260X_DEF_OD_CLAMP_VOLT;
haptics->rated_voltage = DRV260X_DEF_RATED_VOLT;
if (pdata) {
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index e5d185f..2613240 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -3028,6 +3028,15 @@
.driver_data = samus_platform_data,
},
{
+ /* Samsung Chromebook Pro */
+ .ident = "Samsung Chromebook Pro",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Caroline"),
+ },
+ .driver_data = samus_platform_data,
+ },
+ {
/* Other Google Chromebooks */
.ident = "Chromebook",
.matches = {
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_core.c b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_core.c
index 21a9e8f..21236e9 100644
--- a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_core.c
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_core.c
@@ -113,6 +113,13 @@
#define F12_WAKEUP_GESTURE_MODE 0x02
#define F12_UDG_DETECT 0x0f
+#define PWR_VTG_MIN_UV 2700000
+#define PWR_VTG_MAX_UV 3600000
+#define PWR_ACTIVE_LOAD_UA 2000
+#define I2C_VTG_MIN_UV 1710000
+#define I2C_VTG_MAX_UV 2000000
+#define I2C_ACTIVE_LOAD_UA 7000
+
static int synaptics_rmi4_check_status(struct synaptics_rmi4_data *rmi4_data,
bool *was_in_bl_mode);
static int synaptics_rmi4_free_fingers(struct synaptics_rmi4_data *rmi4_data);
@@ -3407,6 +3414,66 @@
return retval;
}
+static int reg_set_optimum_mode_check(struct regulator *reg, int load_uA)
+{
+ return (regulator_count_voltages(reg) > 0) ?
+ regulator_set_load(reg, load_uA) : 0;
+}
+
+static int synaptics_rmi4_configure_reg(struct synaptics_rmi4_data *rmi4_data,
+ bool on)
+{
+ int retval;
+
+ if (on == false)
+ goto hw_shutdown;
+
+ if (rmi4_data->pwr_reg) {
+ if (regulator_count_voltages(rmi4_data->pwr_reg) > 0) {
+ retval = regulator_set_voltage(rmi4_data->pwr_reg,
+ PWR_VTG_MIN_UV, PWR_VTG_MAX_UV);
+ if (retval) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "regulator set_vtg failed retval =%d\n",
+ retval);
+ goto err_set_vtg_pwr;
+ }
+ }
+ }
+
+ if (rmi4_data->bus_reg) {
+ if (regulator_count_voltages(rmi4_data->bus_reg) > 0) {
+ retval = regulator_set_voltage(rmi4_data->bus_reg,
+ I2C_VTG_MIN_UV, I2C_VTG_MAX_UV);
+ if (retval) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "regulator set_vtg failed retval =%d\n",
+ retval);
+ goto err_set_vtg_bus;
+ }
+ }
+ }
+
+ return 0;
+
+err_set_vtg_bus:
+ if (rmi4_data->pwr_reg &&
+ regulator_count_voltages(rmi4_data->pwr_reg) > 0)
+ regulator_set_voltage(rmi4_data->pwr_reg, 0, PWR_VTG_MAX_UV);
+err_set_vtg_pwr:
+ return retval;
+
+hw_shutdown:
+ if (rmi4_data->pwr_reg &&
+ regulator_count_voltages(rmi4_data->pwr_reg) > 0)
+ regulator_set_voltage(rmi4_data->pwr_reg, 0, PWR_VTG_MAX_UV);
+ if (rmi4_data->bus_reg &&
+ regulator_count_voltages(rmi4_data->bus_reg) > 0)
+ regulator_set_voltage(rmi4_data->bus_reg, 0, I2C_VTG_MAX_UV);
+
+ return 0;
+}
+
static int synaptics_rmi4_get_reg(struct synaptics_rmi4_data *rmi4_data,
bool get)
{
@@ -3472,37 +3539,66 @@
}
if (rmi4_data->bus_reg) {
+ retval = reg_set_optimum_mode_check(rmi4_data->bus_reg,
+ I2C_ACTIVE_LOAD_UA);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Regulator set_opt failed rc=%d\n",
+ __func__, retval);
+ return retval;
+ }
+
retval = regulator_enable(rmi4_data->bus_reg);
if (retval < 0) {
dev_err(rmi4_data->pdev->dev.parent,
"%s: Failed to enable bus pullup regulator\n",
__func__);
- goto exit;
+ goto err_bus_reg_en;
}
}
if (rmi4_data->pwr_reg) {
+ retval = reg_set_optimum_mode_check(rmi4_data->pwr_reg,
+ PWR_ACTIVE_LOAD_UA);
+ if (retval < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "%s: Regulator set_opt failed rc=%d\n",
+ __func__, retval);
+ goto disable_bus_reg;
+ }
+
retval = regulator_enable(rmi4_data->pwr_reg);
if (retval < 0) {
dev_err(rmi4_data->pdev->dev.parent,
"%s: Failed to enable power regulator\n",
__func__);
- goto disable_bus_reg;
+ goto err_pwr_reg_en;
}
msleep(bdata->power_delay_ms);
}
return 0;
+err_pwr_reg_en:
+ reg_set_optimum_mode_check(rmi4_data->pwr_reg, 0);
+ goto disable_bus_reg;
+err_bus_reg_en:
+ reg_set_optimum_mode_check(rmi4_data->bus_reg, 0);
+
+ return retval;
+
disable_pwr_reg:
- if (rmi4_data->pwr_reg)
+ if (rmi4_data->pwr_reg) {
+ reg_set_optimum_mode_check(rmi4_data->pwr_reg, 0);
regulator_disable(rmi4_data->pwr_reg);
+ }
disable_bus_reg:
- if (rmi4_data->bus_reg)
+ if (rmi4_data->bus_reg) {
+ reg_set_optimum_mode_check(rmi4_data->bus_reg, 0);
regulator_disable(rmi4_data->bus_reg);
+ }
-exit:
return retval;
}
@@ -3976,6 +4072,14 @@
goto err_get_reg;
}
+ retval = synaptics_rmi4_configure_reg(rmi4_data, true);
+ if (retval < 0) {
+ dev_err(&pdev->dev,
+ "%s: Failed to configure regulators\n",
+ __func__);
+ goto err_configure_reg;
+ }
+
retval = synaptics_rmi4_enable_reg(rmi4_data, true);
if (retval < 0) {
dev_err(&pdev->dev,
@@ -4202,6 +4306,8 @@
err_enable_reg:
synaptics_rmi4_get_reg(rmi4_data, false);
+err_configure_reg:
+ synaptics_rmi4_configure_reg(rmi4_data, false);
err_get_reg:
kfree(rmi4_data);
@@ -4284,6 +4390,7 @@
}
synaptics_rmi4_enable_reg(rmi4_data, false);
+ synaptics_rmi4_configure_reg(rmi4_data, false);
synaptics_rmi4_get_reg(rmi4_data, false);
kfree(rmi4_data);
@@ -4557,6 +4664,7 @@
struct synaptics_rmi4_exp_fhandler *exp_fhandler;
struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
int retval;
+ int lpm_uA;
if (rmi4_data->stay_awake)
return 0;
@@ -4565,6 +4673,17 @@
if (rmi4_data->enable_wakeup_gesture) {
if (!rmi4_data->suspend) {
+ /* Set lpm current for bus regulator */
+ lpm_uA = rmi4_data->hw_if->board_data->bus_lpm_cur_uA;
+ if (lpm_uA) {
+ retval = reg_set_optimum_mode_check(
+ rmi4_data->bus_reg, lpm_uA);
+ if (retval < 0)
+ dev_err(dev,
+ "Bus Regulator set_opt failed rc=%d\n",
+ retval);
+ }
+
synaptics_rmi4_wakeup_gesture(rmi4_data, true);
enable_irq_wake(rmi4_data->irq);
}
@@ -4594,7 +4713,8 @@
}
mutex_unlock(&exp_data.mutex);
- if (!rmi4_data->suspend && !rmi4_data->enable_wakeup_gesture)
+ if (!rmi4_data->suspend && !rmi4_data->enable_wakeup_gesture &&
+ !rmi4_data->hw_if->board_data->dont_disable_regs)
synaptics_rmi4_enable_reg(rmi4_data, false);
rmi4_data->suspend = true;
@@ -4623,6 +4743,18 @@
if (rmi4_data->enable_wakeup_gesture) {
if (rmi4_data->suspend) {
+ /* Set active current for the bus regulator */
+ if (rmi4_data->hw_if->board_data->bus_lpm_cur_uA) {
+ retval = reg_set_optimum_mode_check(
+ rmi4_data->bus_reg,
+ I2C_ACTIVE_LOAD_UA);
+ if (retval < 0)
+ dev_err(dev,
+ "Pwr regulator set_opt failed rc=%d\n",
+ retval);
+ }
+
+
synaptics_rmi4_wakeup_gesture(rmi4_data, false);
disable_irq_wake(rmi4_data->irq);
}
@@ -4631,7 +4763,8 @@
rmi4_data->current_page = MASK_8BIT;
- if (rmi4_data->suspend)
+ if (rmi4_data->suspend &&
+ !rmi4_data->hw_if->board_data->dont_disable_regs)
synaptics_rmi4_enable_reg(rmi4_data, true);
synaptics_rmi4_sleep_enable(rmi4_data, false);
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_i2c.c b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_i2c.c
index f634f17..e078853 100644
--- a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_i2c.c
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_i2c.c
@@ -84,6 +84,9 @@
bdata->wakeup_gesture_en = of_property_read_bool(np,
"synaptics,wakeup-gestures-en");
+ bdata->dont_disable_regs = of_property_read_bool(np,
+ "synaptics,do-not-disable-regulators");
+
retval = of_property_read_string(np, "synaptics,pwr-reg-name", &name);
if (retval < 0)
bdata->pwr_reg_name = NULL;
@@ -184,6 +187,10 @@
bdata->max_y_for_2d = -1;
}
+ retval = of_property_read_u32(np, "synaptics,bus-lpm-cur-uA",
+ &value);
+ bdata->bus_lpm_cur_uA = retval < 0 ? 0 : value;
+
bdata->swap_axes = of_property_read_bool(np, "synaptics,swap-axes");
bdata->x_flip = of_property_read_bool(np, "synaptics,x-flip");
bdata->y_flip = of_property_read_bool(np, "synaptics,y-flip");
diff --git a/drivers/media/platform/msm/camera_v2/Kconfig b/drivers/media/platform/msm/camera_v2/Kconfig
index cabc612..e5439d8 100644
--- a/drivers/media/platform/msm/camera_v2/Kconfig
+++ b/drivers/media/platform/msm/camera_v2/Kconfig
@@ -95,13 +95,22 @@
and also provides support for writing data in case of FLASH ROM.
Currently supports I2C, CCI and SPI protocol
+config MSM_ISP_V1
+ bool "QTI MSM Image Signal Processing interface support"
+ depends on MSMB_CAMERA
+ ---help---
+ Enable support for Image Signal Processing interface module.
+ This module acts as a crossbar between CSID and VFE. Output
+ of any CID of CSID can be routed to of pix or raw
+ data interface in VFE.
+
config MSM_ISPIF
bool "QTI MSM Image Signal Processing interface support"
depends on MSMB_CAMERA
---help---
Enable support for Image Signal Processing interface module.
This module acts as a crossbar between CSID and VFE. Output
- of any CID of CSID can be routed to of of pix or raw
+ of any CID of CSID can be routed to of pix or raw
data interface in VFE.
config MSM_ISPIF_V1
diff --git a/drivers/media/platform/msm/camera_v2/common/msm_camera_tz_util.c b/drivers/media/platform/msm/camera_v2/common/msm_camera_tz_util.c
index 22f4891..d32a0f2 100644
--- a/drivers/media/platform/msm/camera_v2/common/msm_camera_tz_util.c
+++ b/drivers/media/platform/msm/camera_v2/common/msm_camera_tz_util.c
@@ -20,8 +20,6 @@
#define EMPTY_QSEECOM_HANDLE NULL
#define QSEECOM_SBUFF_SIZE SZ_128K
-#define MSM_CAMERA_TZ_UTIL_VERBOSE
-
#define MSM_CAMERA_TZ_BOOT_PROTECTED (false)
/* Update version major number in case the HLOS-TA interface is changed*/
diff --git a/drivers/media/platform/msm/camera_v2/isp/Makefile b/drivers/media/platform/msm/camera_v2/isp/Makefile
index 621d81d..d36b1e2 100644
--- a/drivers/media/platform/msm/camera_v2/isp/Makefile
+++ b/drivers/media/platform/msm/camera_v2/isp/Makefile
@@ -1,5 +1,10 @@
ccflags-y += -Idrivers/media/platform/msm/camera_v2
ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
ccflags-y += -Idrivers/media/platform/msm/camera_v2/common/
+ifeq ($(CONFIG_MSM_ISP_V1),y)
+obj-$(CONFIG_MSMB_CAMERA) += msm_isp_32.o msm_buf_mgr.o msm_isp_util_32.o msm_isp_axi_util_32.o msm_isp_stats_util_32.o
+obj-$(CONFIG_MSMB_CAMERA) += msm_isp32.o
+else
obj-$(CONFIG_MSMB_CAMERA) += msm_buf_mgr.o msm_isp_util.o msm_isp_axi_util.o msm_isp_stats_util.o
obj-$(CONFIG_MSMB_CAMERA) += msm_isp48.o msm_isp47.o msm_isp46.o msm_isp44.o msm_isp40.o msm_isp.o
+endif
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
index 691b492..6196a8c 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
@@ -189,7 +189,9 @@
int i, rc = -1;
int ret;
struct msm_isp_buffer_mapped_info *mapped_info;
+#ifndef CONFIG_MSM_ISP_V1
uint32_t accu_length = 0;
+#endif
struct msm_isp_bufq *bufq = NULL;
bufq = msm_isp_get_bufq(buf_mgr, buf_info->bufq_handle);
@@ -228,8 +230,12 @@
goto get_phy_err;
}
+#ifdef CONFIG_MSM_ISP_V1
+ mapped_info->paddr += qbuf_buf->planes[i].offset;
+#else
mapped_info->paddr += accu_length;
accu_length += qbuf_buf->planes[i].length;
+#endif
CDBG("%s: plane: %d addr:%pK\n",
__func__, i, (void *)mapped_info->paddr);
@@ -732,10 +738,17 @@
spin_lock_irqsave(&bufq->bufq_lock, flags);
buf_info->frame_id = frame_id;
+#ifdef CONFIG_MSM_ISP_V1
+ if (buf_info->state == MSM_ISP_BUFFER_STATE_DEQUEUED) {
+ buf_info->state = MSM_ISP_BUFFER_STATE_DIVERTED;
+ buf_info->tv = tv;
+ }
+#else
if (BUF_SRC(bufq->stream_id) == MSM_ISP_BUFFER_SRC_NATIVE) {
buf_info->state = MSM_ISP_BUFFER_STATE_DIVERTED;
buf_info->tv = tv;
}
+#endif
spin_unlock_irqrestore(&bufq->bufq_lock, flags);
return 0;
}
@@ -1077,7 +1090,6 @@
}
}
-
/**
* msm_isp_buf_put_scratch() - Release scratch buffers
* @buf_mgr: The buffer structure for h/w
@@ -1220,7 +1232,6 @@
return rc;
}
-
static int msm_isp_init_isp_buf_mgr(struct msm_isp_buf_mgr *buf_mgr,
const char *ctx_name)
{
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
index 6da1360..a95917c 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
@@ -12,21 +12,15 @@
#include <linux/module.h>
#include <linux/platform_device.h>
-
#include "msm_isp32.h"
-#include "msm_isp_util.h"
-#include "msm_isp_axi_util.h"
-#include "msm_isp_stats_util.h"
-#include "msm_isp.h"
+#include "msm_isp_util_32.h"
+#include "msm_isp_axi_util_32.h"
+#include "msm_isp_stats_util_32.h"
+#include "msm_isp_32.h"
#include "msm.h"
#include "msm_camera_io_util.h"
-static const struct platform_device_id msm_vfe32_dev_id[] = {
- {"msm_vfe32", (kernel_ulong_t) &vfe32_hw_info},
- {}
-};
-
-#define VFE32_BURST_LEN 2
+#define VFE32_BURST_LEN 3
#define VFE32_UB_SIZE 1024
#define VFE32_UB_SIZE_32KB 2048
#define VFE32_EQUAL_SLICE_UB 194
@@ -36,7 +30,7 @@
#define VFE32_XBAR_BASE(idx) (0x40 + 0x4 * (idx / 4))
#define VFE32_XBAR_SHIFT(idx) ((idx % 4) * 8)
#define VFE32_PING_PONG_BASE(wm, ping_pong) \
- (VFE32_WM_BASE(wm) + 0x4 * (1 + ((~ping_pong) & 0x1)))
+ (VFE32_WM_BASE(wm) + 0x4 * (1 + (~(ping_pong >> wm) & 0x1)))
static uint8_t stats_pingpong_offset_map[] = {
7, 8, 9, 10, 11, 12, 13};
@@ -60,16 +54,6 @@
{"csi_vfe_clk", -1},
};
-static uint32_t msm_vfe32_ub_reg_offset(struct vfe_device *vfe_dev, int idx)
-{
- return (VFE32_WM_BASE(idx) + 0x10);
-}
-
-static uint32_t msm_vfe32_get_ub_size(struct vfe_device *vfe_dev)
-{
- return MSM_ISP32_TOTAL_WM_UB;
-}
-
static int32_t msm_vfe32_init_qos_parms(struct vfe_device *vfe_dev,
struct msm_vfe_hw_init_parms *qos_parms,
struct msm_vfe_hw_init_parms *ds_parms)
@@ -284,8 +268,6 @@
pr_err("%s: vfe ioremap failed\n", __func__);
goto vfe_remap_failed;
}
- vfe_dev->common_data->dual_vfe_res->vfe_base[vfe_dev->pdev->id] =
- vfe_dev->vfe_base;
vfe_dev->vfe_vbif_base = ioremap(vfe_dev->vfe_vbif_mem->start,
resource_size(vfe_dev->vfe_vbif_mem));
@@ -330,14 +312,12 @@
static void msm_vfe32_release_hardware(struct vfe_device *vfe_dev)
{
- msm_camera_io_w_mb(0x0, vfe_dev->vfe_base + 0x1C);
- msm_camera_io_w_mb(0x0, vfe_dev->vfe_base + 0x20);
- disable_irq(vfe_dev->vfe_irq->start);
free_irq(vfe_dev->vfe_irq->start, vfe_dev);
tasklet_kill(&vfe_dev->vfe_tasklet);
- msm_isp_flush_tasklet(vfe_dev);
iounmap(vfe_dev->vfe_vbif_base);
vfe_dev->vfe_vbif_base = NULL;
+ iounmap(vfe_dev->vfe_base);
+ vfe_dev->vfe_base = NULL;
if (vfe_dev->vfe_clk_idx == 1)
msm_cam_clk_enable(&vfe_dev->pdev->dev,
msm_vfe32_1_clk_info, vfe_dev->vfe_clk,
@@ -346,9 +326,6 @@
msm_cam_clk_enable(&vfe_dev->pdev->dev,
msm_vfe32_2_clk_info, vfe_dev->vfe_clk,
ARRAY_SIZE(msm_vfe32_2_clk_info), 0);
- vfe_dev->common_data->dual_vfe_res->vfe_base[vfe_dev->pdev->id] = NULL;
- iounmap(vfe_dev->vfe_base);
- vfe_dev->vfe_base = NULL;
kfree(vfe_dev->vfe_clk);
regulator_disable(vfe_dev->fs_vfe);
msm_isp_deinit_bandwidth_mgr(ISP_VFE0 + vfe_dev->pdev->id);
@@ -369,7 +346,6 @@
ds_parms.entries = "ds-entries";
ds_parms.regs = "ds-regs";
ds_parms.settings = "ds-settings";
-
msm_vfe32_init_qos_parms(vfe_dev, &qos_parms, &ds_parms);
msm_vfe32_init_vbif_parms(vfe_dev, &vbif_parms);
@@ -381,6 +357,11 @@
msm_camera_io_w_mb(0x1CFFFFFF, vfe_dev->vfe_base + 0x20);
msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x24);
msm_camera_io_w_mb(0x1FFFFFFF, vfe_dev->vfe_base + 0x28);
+ msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x6FC);
+ msm_camera_io_w(0x10000000, vfe_dev->vfe_base + VFE32_RDI_BASE(1));
+ msm_camera_io_w(0x10000000, vfe_dev->vfe_base + VFE32_RDI_BASE(2));
+ msm_camera_io_w(0x0, vfe_dev->vfe_base + VFE32_XBAR_BASE(0));
+ msm_camera_io_w(0x0, vfe_dev->vfe_base + VFE32_XBAR_BASE(4));
}
@@ -396,13 +377,30 @@
static void msm_vfe32_process_reset_irq(struct vfe_device *vfe_dev,
uint32_t irq_status0, uint32_t irq_status1)
{
- if (irq_status1 & BIT(23))
+ if (irq_status1 & BIT(23)) {
+ if (vfe_dev->vfe_reset_timeout_processed == 1) {
+ pr_err("%s:vfe reset was processed.\n", __func__);
+ return;
+ }
complete(&vfe_dev->reset_complete);
+ }
}
static void msm_vfe32_process_halt_irq(struct vfe_device *vfe_dev,
uint32_t irq_status0, uint32_t irq_status1)
{
+ if (irq_status1 & (1 << 24))
+ msm_camera_io_w_mb(0, vfe_dev->vfe_base + 0x1D8);
+}
+
+static void msm_vfe32_process_epoch_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts)
+{
+ if (!(irq_status0 & 0x18))
+ return;
+ if (irq_status0 & (1 << 3))
+ msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
}
static void msm_vfe32_process_camif_irq(struct vfe_device *vfe_dev,
@@ -416,10 +414,12 @@
ISP_DBG("%s: SOF IRQ\n", __func__);
if (vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count > 0
&& vfe_dev->axi_data.src_info[VFE_PIX_0].
- stream_count == 0) {
+ pix_stream_count == 0) {
msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
- msm_isp_axi_stream_update(vfe_dev, VFE_PIX_0, ts);
- msm_isp_update_framedrop_reg(vfe_dev, VFE_PIX_0);
+ if (vfe_dev->axi_data.stream_update)
+ msm_isp_axi_stream_update(vfe_dev,
+ (1 << VFE_PIX_0));
+ msm_isp_update_framedrop_reg(vfe_dev, (1 << VFE_PIX_0));
}
}
}
@@ -485,7 +485,20 @@
static void msm_vfe32_get_overflow_mask(uint32_t *overflow_mask)
{
- *overflow_mask = 0x0;
+ *overflow_mask = 0x003FFF7E;
+}
+
+static void msm_vfe32_get_rdi_wm_mask(struct vfe_device *vfe_dev,
+ uint32_t *rdi_wm_mask)
+{
+ *rdi_wm_mask = vfe_dev->axi_data.rdi_wm_mask;
+}
+
+static void msm_vfe32_get_irq_mask(struct vfe_device *vfe_dev,
+ uint32_t *irq0_mask, uint32_t *irq1_mask)
+{
+ *irq0_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
+ *irq1_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x20);
}
static void msm_vfe32_process_error_status(struct vfe_device *vfe_dev)
@@ -571,7 +584,7 @@
pr_err("%s: axi error\n", __func__);
}
-static void msm_vfe32_read_and_clear_irq_status(struct vfe_device *vfe_dev,
+static void msm_vfe32_read_irq_status(struct vfe_device *vfe_dev,
uint32_t *irq_status0, uint32_t *irq_status1)
{
*irq_status0 = msm_camera_io_r(vfe_dev->vfe_base + 0x2C);
@@ -589,122 +602,127 @@
msm_camera_io_r(vfe_dev->vfe_base + 0x7B4);
}
-static void msm_vfe32_read_irq_status(struct vfe_device *vfe_dev,
- uint32_t *irq_status0, uint32_t irq_status1)
-{
- *irq_status0 = msm_camera_io_r(vfe_dev->vfe_base + 0x2C);
- *irq_status1 = msm_camera_io_r(vfe_dev->vfe_base + 0x30);
-}
-
static void msm_vfe32_process_reg_update(struct vfe_device *vfe_dev,
uint32_t irq_status0, uint32_t irq_status1,
struct msm_isp_timestamp *ts)
{
- uint32_t rdi_status;
- enum msm_vfe_input_src i;
+ uint8_t input_src = 0x0;
if (!(irq_status0 & 0x20) && !(irq_status1 & 0x1C000000))
return;
if (irq_status0 & BIT(5)) {
- msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
- vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev,
- VFE_PIX_0);
- if (vfe_dev->axi_data.stream_update[VFE_PIX_0]) {
- rdi_status = msm_camera_io_r(vfe_dev->vfe_base +
- VFE32_XBAR_BASE(0));
- rdi_status |= msm_camera_io_r(vfe_dev->vfe_base +
- VFE32_XBAR_BASE(4));
-
- if ((rdi_status & BIT(7)) && (!(irq_status0 & 0x20)))
- return;
- }
- msm_isp_process_stats_reg_upd_epoch_irq(vfe_dev,
- MSM_ISP_COMP_IRQ_REG_UPD);
+ msm_isp_notify(vfe_dev, ISP_EVENT_REG_UPDATE, VFE_PIX_0, ts);
+ input_src |= (1 << VFE_PIX_0);
+ }
+ if (irq_status1 & BIT(26)) {
+ msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_RAW_0, ts);
+ input_src |= (1 << VFE_RAW_0);
+ }
+ if (irq_status1 & BIT(27)) {
+ msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_RAW_1, ts);
+ input_src |= (1 << VFE_RAW_1);
+ }
+ if (irq_status1 & BIT(28)) {
+ msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_RAW_2, ts);
+ input_src |= (1 << VFE_RAW_2);
}
- for (i = VFE_RAW_0; i <= VFE_RAW_2; i++) {
- if (irq_status1 & BIT(26 + (i - VFE_RAW_0))) {
- msm_isp_notify(vfe_dev, ISP_EVENT_SOF, i, ts);
- msm_isp_axi_stream_update(vfe_dev, i, ts);
- msm_isp_update_framedrop_reg(vfe_dev, i);
-
- vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev,
- i);
+ if (vfe_dev->axi_data.stream_update)
+ msm_isp_axi_stream_update(vfe_dev, input_src);
+ if (atomic_read(&vfe_dev->stats_data.stats_update))
+ msm_isp_stats_stream_update(vfe_dev);
+ if (vfe_dev->axi_data.stream_update ||
+ atomic_read(&vfe_dev->stats_data.stats_update)) {
+ if (input_src & (1 << VFE_PIX_0)) {
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ reg_update(vfe_dev, (1 << VFE_PIX_0));
}
}
-
+ msm_isp_update_framedrop_reg(vfe_dev, input_src);
+ msm_isp_update_stats_framedrop_reg(vfe_dev);
msm_isp_update_error_frame_count(vfe_dev);
-
-}
-
-static void msm_vfe32_process_epoch_irq(struct vfe_device *vfe_dev,
- uint32_t irq_status0, uint32_t irq_status1,
- struct msm_isp_timestamp *ts)
-{
- /* Not supported */
-}
-
-static void msm_vfe32_reg_update(struct vfe_device *vfe_dev,
- enum msm_vfe_input_src frame_src)
-{
- if (vfe_dev->is_split && vfe_dev->pdev->id == ISP_VFE1) {
- msm_camera_io_w_mb(0xF,
- vfe_dev->common_data->dual_vfe_res->vfe_base[ISP_VFE0]
- + 0x260);
- msm_camera_io_w_mb(0xF, vfe_dev->vfe_base + 0x260);
- } else if (!vfe_dev->is_split) {
- msm_camera_io_w_mb(0xF, vfe_dev->vfe_base + 0x260);
+ if ((input_src & (1 << VFE_RAW_0)) ||
+ (input_src & (1 << VFE_RAW_1)) ||
+ (input_src & (1 << VFE_RAW_2))) {
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ reg_update(vfe_dev, input_src);
}
+
+ return;
+}
+
+static void msm_vfe32_reg_update(
+ struct vfe_device *vfe_dev, uint32_t input_src)
+{
+ msm_camera_io_w_mb(input_src, vfe_dev->vfe_base + 0x260);
}
static long msm_vfe32_reset_hardware(struct vfe_device *vfe_dev,
uint32_t first_start, uint32_t blocking)
{
- init_completion(&vfe_dev->reset_complete);
- msm_camera_io_w_mb(0x3FF, vfe_dev->vfe_base + 0x4);
- return wait_for_completion_timeout(
- &vfe_dev->reset_complete, msecs_to_jiffies(50));
+ long rc = 0;
+ uint32_t irq_status1;
+
+ if (blocking) {
+ init_completion(&vfe_dev->reset_complete);
+ msm_camera_io_w_mb(0x3FF, vfe_dev->vfe_base + 0x4);
+ vfe_dev->vfe_reset_timeout_processed = 0;
+ rc = wait_for_completion_timeout(
+ &vfe_dev->reset_complete, msecs_to_jiffies(500));
+ } else {
+ msm_camera_io_w_mb(0x3FF, vfe_dev->vfe_base + 0x4);
+ }
+
+ if (blocking && rc <= 0) {
+ /*read ISP status register*/
+ irq_status1 = msm_camera_io_r(vfe_dev->vfe_base + 0x30);
+ pr_err("%s: handling vfe reset time out error. irq_status1 0x%x\n",
+ __func__, irq_status1);
+ if (irq_status1 & BIT(23)) {
+ pr_err("%s: vfe reset has done actually\n", __func__);
+ vfe_dev->vfe_reset_timeout_processed = 1;
+ return 1;
+ }
+ }
+ return rc;
}
static void msm_vfe32_axi_reload_wm(
- struct vfe_device *vfe_dev, void __iomem *vfe_base,
- uint32_t reload_mask)
+ struct vfe_device *vfe_dev, uint32_t reload_mask)
{
if (!vfe_dev->pdev->dev.of_node) {
/*vfe32 A-family: 8960*/
- msm_camera_io_w_mb(reload_mask, vfe_base + 0x38);
+ msm_camera_io_w_mb(reload_mask, vfe_dev->vfe_base + 0x38);
} else {
/*vfe32 B-family: 8610*/
- msm_camera_io_w(0x0, vfe_base + 0x24);
- msm_camera_io_w(0x0, vfe_base + 0x28);
- msm_camera_io_w(0x0, vfe_base + 0x20);
- msm_camera_io_w_mb(0x1, vfe_base + 0x18);
- msm_camera_io_w(0x9AAAAAAA, vfe_base + 0x600);
- msm_camera_io_w(reload_mask, vfe_base + 0x38);
+ msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x28);
+ msm_camera_io_w(0x1C800000, vfe_dev->vfe_base + 0x20);
+ msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x18);
+ msm_camera_io_w(0x9AAAAAAA, vfe_dev->vfe_base + 0x600);
+ msm_camera_io_w(reload_mask, vfe_dev->vfe_base + 0x38);
}
}
-static void msm_vfe32_axi_enable_wm(void __iomem *vfe_base,
+static void msm_vfe32_axi_enable_wm(struct vfe_device *vfe_dev,
uint8_t wm_idx, uint8_t enable)
{
uint32_t val = msm_camera_io_r(
- vfe_base + VFE32_WM_BASE(wm_idx));
+ vfe_dev->vfe_base + VFE32_WM_BASE(wm_idx));
if (enable)
val |= 0x1;
else
val &= ~0x1;
msm_camera_io_w_mb(val,
- vfe_base + VFE32_WM_BASE(wm_idx));
+ vfe_dev->vfe_base + VFE32_WM_BASE(wm_idx));
}
static void msm_vfe32_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
- int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
uint32_t comp_mask, comp_mask_index =
- stream_info->comp_mask_index[vfe_idx];
+ stream_info->comp_mask_index;
uint32_t irq_mask;
comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x34);
@@ -721,9 +739,7 @@
static void msm_vfe32_axi_clear_comp_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
- uint32_t comp_mask, comp_mask_index =
- stream_info->comp_mask_index[vfe_idx];
+ uint32_t comp_mask, comp_mask_index = stream_info->comp_mask_index;
uint32_t irq_mask;
comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x34);
@@ -739,10 +755,9 @@
struct msm_vfe_axi_stream *stream_info)
{
uint32_t irq_mask;
- int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
- irq_mask |= BIT(stream_info->wm[vfe_idx][0] + 6);
+ irq_mask |= BIT(stream_info->wm[0] + 6);
msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x1C);
}
@@ -750,31 +765,40 @@
struct msm_vfe_axi_stream *stream_info)
{
uint32_t irq_mask;
- int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
- irq_mask &= ~BIT(stream_info->wm[vfe_idx][0] + 6);
+ irq_mask &= ~BIT(stream_info->wm[0] + 6);
msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x1C);
}
static void msm_vfe32_cfg_framedrop(struct vfe_device *vfe_dev,
- struct msm_vfe_axi_stream *stream_info, uint32_t framedrop_pattern,
- uint32_t framedrop_period)
+ struct msm_vfe_axi_stream *stream_info)
{
- void __iomem *vfe_base = vfe_dev->vfe_base;
+ uint32_t framedrop_pattern = 0, framedrop_period = 0;
+
+ if (stream_info->runtime_init_frame_drop == 0) {
+ framedrop_pattern = stream_info->framedrop_pattern;
+ framedrop_period = stream_info->framedrop_period;
+ }
+
+ if (stream_info->stream_type == BURST_STREAM &&
+ stream_info->runtime_burst_frame_count == 0) {
+ framedrop_pattern = 0;
+ framedrop_period = 0;
+ }
if (stream_info->stream_src == PIX_ENCODER) {
- msm_camera_io_w(framedrop_period - 1, vfe_base + 0x504);
- msm_camera_io_w(framedrop_period - 1, vfe_base + 0x508);
- msm_camera_io_w(framedrop_pattern, vfe_base + 0x50C);
- msm_camera_io_w(framedrop_pattern, vfe_base + 0x510);
+ msm_camera_io_w(framedrop_period, vfe_dev->vfe_base + 0x504);
+ msm_camera_io_w(framedrop_period, vfe_dev->vfe_base + 0x508);
+ msm_camera_io_w(framedrop_pattern, vfe_dev->vfe_base + 0x50C);
+ msm_camera_io_w(framedrop_pattern, vfe_dev->vfe_base + 0x510);
} else if (stream_info->stream_src == PIX_VIEWFINDER) {
- msm_camera_io_w(framedrop_period - 1, vfe_base + 0x514);
- msm_camera_io_w(framedrop_period - 1, vfe_base + 0x518);
- msm_camera_io_w(framedrop_pattern, vfe_base + 0x51C);
- msm_camera_io_w(framedrop_pattern, vfe_base + 0x520);
+ msm_camera_io_w(framedrop_period, vfe_dev->vfe_base + 0x514);
+ msm_camera_io_w(framedrop_period, vfe_dev->vfe_base + 0x518);
+ msm_camera_io_w(framedrop_pattern, vfe_dev->vfe_base + 0x51C);
+ msm_camera_io_w(framedrop_pattern, vfe_dev->vfe_base + 0x520);
}
- msm_camera_io_w_mb(0x1, vfe_base + 0x260);
+ msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x260);
}
static void msm_vfe32_clear_framedrop(struct vfe_device *vfe_dev,
@@ -877,6 +901,7 @@
struct msm_vfe_pix_cfg *pix_cfg)
{
pr_err("%s: Fetch engine not supported\n", __func__);
+ return;
}
static void msm_vfe32_cfg_camif(struct vfe_device *vfe_dev,
@@ -924,6 +949,7 @@
pr_err("%s: Unsupported input mux %d\n",
__func__, pix_cfg->input_mux);
}
+ return;
}
static void msm_vfe32_update_camif_state(
@@ -938,19 +964,20 @@
if (update_state == ENABLE_CAMIF) {
val = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
- val |= 0x1;
+ val |= 0x19;
msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x1C);
-
+ msm_camera_io_w_mb(0xA, vfe_dev->vfe_base + 0x200);
val = msm_camera_io_r(vfe_dev->vfe_base + 0x1E4);
bus_en =
((vfe_dev->axi_data.src_info[
VFE_PIX_0].raw_stream_count > 0) ? 1 : 0);
vfe_en =
((vfe_dev->axi_data.src_info[
- VFE_PIX_0].stream_count > 0) ? 1 : 0);
+ VFE_PIX_0].pix_stream_count > 0) ? 1 : 0);
val &= 0xFFFFFF3F;
val = val | bus_en << 7 | vfe_en << 6;
msm_camera_io_w(val, vfe_dev->vfe_base + 0x1E4);
+ msm_camera_io_w_mb(0x4, vfe_dev->vfe_base + 0x1E0);
msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x1E0);
vfe_dev->axi_data.src_info[VFE_PIX_0].active = 1;
} else if (update_state == DISABLE_CAMIF) {
@@ -990,17 +1017,16 @@
uint8_t plane_idx)
{
uint32_t val;
- int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
- uint32_t wm_base = VFE32_WM_BASE(stream_info->wm[vfe_idx][plane_idx]);
+ uint32_t wm_base = VFE32_WM_BASE(stream_info->wm[plane_idx]);
if (!stream_info->frame_based) {
/*WR_IMAGE_SIZE*/
val =
((msm_isp_cal_word_per_line(
stream_info->output_format,
- stream_info->plane_cfg[vfe_idx][plane_idx].
+ stream_info->plane_cfg[plane_idx].
output_width)+1)/2 - 1) << 16 |
- (stream_info->plane_cfg[vfe_idx][plane_idx].
+ (stream_info->plane_cfg[plane_idx].
output_height - 1);
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x10);
@@ -1008,9 +1034,9 @@
val =
msm_isp_cal_word_per_line(
stream_info->output_format,
- stream_info->plane_cfg[vfe_idx][plane_idx].
+ stream_info->plane_cfg[plane_idx].
output_stride) << 16 |
- (stream_info->plane_cfg[vfe_idx][plane_idx].
+ (stream_info->plane_cfg[plane_idx].
output_height - 1) << 4 | VFE32_BURST_LEN;
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
} else {
@@ -1018,12 +1044,13 @@
val =
msm_isp_cal_word_per_line(
stream_info->output_format,
- stream_info->plane_cfg[vfe_idx][plane_idx].
+ stream_info->plane_cfg[plane_idx].
output_width) << 16 |
- (stream_info->plane_cfg[vfe_idx][plane_idx].
+ (stream_info->plane_cfg[plane_idx].
output_height - 1) << 4 | VFE32_BURST_LEN;
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
}
+ return;
}
static void msm_vfe32_axi_clear_wm_reg(
@@ -1031,22 +1058,24 @@
struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
{
uint32_t val = 0;
- int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
- uint32_t wm_base = VFE32_WM_BASE(stream_info->wm[vfe_idx][plane_idx]);
+ uint32_t wm_base = VFE32_WM_BASE(stream_info->wm[plane_idx]);
+
+ /* FRAME BASED */
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base);
/*WR_IMAGE_SIZE*/
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x10);
/*WR_BUFFER_CFG*/
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
+ return;
}
static void msm_vfe32_axi_cfg_wm_xbar_reg(
struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
{
- int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
struct msm_vfe_axi_plane_cfg *plane_cfg =
- &stream_info->plane_cfg[vfe_idx][plane_idx];
- uint8_t wm = stream_info->wm[vfe_idx][plane_idx];
+ &stream_info->plane_cfg[plane_idx];
+ uint8_t wm = stream_info->wm[plane_idx];
uint32_t xbar_cfg = 0;
uint32_t xbar_reg_cfg = 0;
@@ -1093,14 +1122,14 @@
xbar_reg_cfg &= ~(0xFF << VFE32_XBAR_SHIFT(wm));
xbar_reg_cfg |= (xbar_cfg << VFE32_XBAR_SHIFT(wm));
msm_camera_io_w(xbar_reg_cfg, vfe_dev->vfe_base + VFE32_XBAR_BASE(wm));
+ return;
}
static void msm_vfe32_axi_clear_wm_xbar_reg(
struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
{
- int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
- uint8_t wm = stream_info->wm[vfe_idx][plane_idx];
+ uint8_t wm = stream_info->wm[plane_idx];
uint32_t xbar_reg_cfg = 0;
xbar_reg_cfg = msm_camera_io_r(vfe_dev->vfe_base + VFE32_XBAR_BASE(wm));
@@ -1108,21 +1137,95 @@
msm_camera_io_w(xbar_reg_cfg, vfe_dev->vfe_base + VFE32_XBAR_BASE(wm));
}
-static void msm_vfe32_update_ping_pong_addr(void __iomem *vfe_base,
- uint8_t wm_idx, uint32_t pingpong_bit, dma_addr_t paddr,
- int32_t buf_size)
+static void msm_vfe32_cfg_axi_ub_equal_default(struct vfe_device *vfe_dev)
+{
+ int i;
+ uint32_t ub_offset = 0;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ uint32_t total_image_size = 0;
+ uint32_t num_used_wms = 0;
+ uint32_t prop_size = 0;
+ uint32_t wm_ub_size;
+ uint64_t delta;
+
+ for (i = 0; i < axi_data->hw_info->num_wm; i++) {
+ if (axi_data->free_wm[i] > 0) {
+ num_used_wms++;
+ total_image_size += axi_data->wm_image_size[i];
+ }
+ }
+ prop_size = MSM_ISP32_TOTAL_WM_UB -
+ axi_data->hw_info->min_wm_ub * num_used_wms;
+ for (i = 0; i < axi_data->hw_info->num_wm; i++) {
+ if (axi_data->free_wm[i]) {
+ delta =
+ (uint64_t)(axi_data->wm_image_size[i] *
+ prop_size);
+ do_div(delta, total_image_size);
+ wm_ub_size = axi_data->hw_info->min_wm_ub +
+ (uint32_t)delta;
+ msm_camera_io_w(ub_offset << 16 |
+ (wm_ub_size - 1), vfe_dev->vfe_base +
+ VFE32_WM_BASE(i) + 0xC);
+ ub_offset += wm_ub_size;
+ } else {
+ msm_camera_io_w(0,
+ vfe_dev->vfe_base + VFE32_WM_BASE(i) + 0xC);
+ }
+ }
+}
+
+static void msm_vfe32_cfg_axi_ub_equal_slicing(struct vfe_device *vfe_dev)
+{
+ int i;
+ uint32_t ub_offset = 0;
+ uint32_t final_ub_slice_size;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+
+ for (i = 0; i < axi_data->hw_info->num_wm; i++) {
+ if (ub_offset + VFE32_EQUAL_SLICE_UB > VFE32_AXI_SLICE_UB) {
+ final_ub_slice_size = VFE32_AXI_SLICE_UB - ub_offset;
+ msm_camera_io_w(ub_offset << 16 |
+ (final_ub_slice_size - 1), vfe_dev->vfe_base +
+ VFE32_WM_BASE(i) + 0xC);
+ ub_offset += final_ub_slice_size;
+ } else {
+ msm_camera_io_w(ub_offset << 16 |
+ (VFE32_EQUAL_SLICE_UB - 1), vfe_dev->vfe_base +
+ VFE32_WM_BASE(i) + 0xC);
+ ub_offset += VFE32_EQUAL_SLICE_UB;
+ }
+ }
+}
+
+static void msm_vfe32_cfg_axi_ub(struct vfe_device *vfe_dev)
+{
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+
+ axi_data->wm_ub_cfg_policy = MSM_WM_UB_CFG_DEFAULT;
+ if (axi_data->wm_ub_cfg_policy == MSM_WM_UB_EQUAL_SLICING)
+ msm_vfe32_cfg_axi_ub_equal_slicing(vfe_dev);
+ else
+ msm_vfe32_cfg_axi_ub_equal_default(vfe_dev);
+}
+
+static void msm_vfe32_update_ping_pong_addr(struct vfe_device *vfe_dev,
+ uint8_t wm_idx, uint32_t pingpong_status, dma_addr_t paddr)
{
uint32_t paddr32 = (paddr & 0xFFFFFFFF);
- msm_camera_io_w(paddr32, vfe_base +
- VFE32_PING_PONG_BASE(wm_idx, pingpong_bit));
+ msm_camera_io_w(paddr32, vfe_dev->vfe_base +
+ VFE32_PING_PONG_BASE(wm_idx, pingpong_status));
}
static int msm_vfe32_axi_halt(struct vfe_device *vfe_dev, uint32_t blocking)
{
- uint32_t halt_mask;
uint32_t axi_busy_flag = true;
+ /* Keep only halt and restart mask */
+ msm_camera_io_w(0x01800000, vfe_dev->vfe_base + 0x20);
+ /*Clear IRQ Status */
+ msm_camera_io_w(0xFE7FFFFF, vfe_dev->vfe_base + 0x28);
msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x1D8);
while (axi_busy_flag) {
if (msm_camera_io_r(
@@ -1130,10 +1233,27 @@
axi_busy_flag = false;
}
msm_camera_io_w_mb(0, vfe_dev->vfe_base + 0x1D8);
- halt_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x20);
- halt_mask &= 0xFEFFFFFF;
- /* Disable AXI IRQ */
- msm_camera_io_w_mb(halt_mask, vfe_dev->vfe_base + 0x20);
+ return 0;
+}
+
+static int msm_vfe32_axi_restart(struct vfe_device *vfe_dev,
+ uint32_t blocking, uint32_t enable_camif)
+{
+ vfe_dev->hw_info->vfe_ops.core_ops.restore_irq_mask(vfe_dev);
+
+ /*Clear IRQ Status */
+ msm_camera_io_w(0xFE7FFFFF, vfe_dev->vfe_base + 0x28);
+ msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x1D8);
+ msm_camera_io_w_mb(0xA, vfe_dev->vfe_base + 0x200);
+ /* Start AXI */
+ msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x1D8);
+ vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev, 0xF);
+ memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info));
+ atomic_set(&vfe_dev->error_info.overflow_state, NO_OVERFLOW);
+ if (enable_camif) {
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ update_camif_state(vfe_dev, ENABLE_CAMIF);
+ }
return 0;
}
@@ -1187,20 +1307,36 @@
}
static void msm_vfe32_stats_cfg_comp_mask(struct vfe_device *vfe_dev,
- uint32_t stats_mask, uint8_t comp_idx, uint8_t enable)
+ uint32_t stats_mask, uint8_t enable)
{
+ uint32_t i = 0;
+ atomic_t *stats_comp;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+
+ stats_mask = stats_mask & 0x7F;
+
+ for (i = 0;
+ i < vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask; i++) {
+ stats_comp = &stats_data->stats_comp_mask[i];
+ if (enable)
+ atomic_add(stats_mask, stats_comp);
+ else
+ atomic_sub(stats_mask, stats_comp);
+ ISP_DBG("%s: comp_mask: %x\n",
+ __func__, atomic_read(&stats_data->stats_comp_mask[i]));
+ }
+ return;
}
static void msm_vfe32_stats_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
- int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
- stream_info);
uint32_t irq_mask;
irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
- irq_mask |= BIT(STATS_IDX(stream_info->stream_handle[vfe_idx]) + 13);
+ irq_mask |= BIT(STATS_IDX(stream_info->stream_handle) + 13);
msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x1C);
+ return;
}
static void msm_vfe32_stats_clear_wm_irq_mask(struct vfe_device *vfe_dev,
@@ -1211,18 +1347,21 @@
irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
irq_mask &= ~(BIT(STATS_IDX(stream_info->stream_handle) + 13));
msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x1C);
+ return;
}
static void msm_vfe32_stats_cfg_wm_reg(struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
/*Nothing to configure for VFE3.x*/
+ return;
}
static void msm_vfe32_stats_clear_wm_reg(struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
/*Nothing to configure for VFE3.x*/
+ return;
}
static void msm_vfe32_stats_cfg_ub(struct vfe_device *vfe_dev)
@@ -1247,12 +1386,7 @@
msm_camera_io_w(ub_offset << 16 | (ub_size[i] - 1),
vfe_dev->vfe_base + VFE32_STATS_BASE(i) + 0x8);
}
-}
-
-static bool msm_vfe32_is_module_cfg_lock_needed(
- uint32_t reg_offset)
-{
- return false;
+ return;
}
static void msm_vfe32_stats_enable_module(struct vfe_device *vfe_dev,
@@ -1294,15 +1428,13 @@
static void msm_vfe32_stats_update_ping_pong_addr(struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info, uint32_t pingpong_status,
- dma_addr_t paddr, uint32_t buf_sz)
+ dma_addr_t paddr)
{
- void __iomem *vfe_base = vfe_dev->vfe_base;
- int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
- stream_info);
uint32_t paddr32 = (paddr & 0xFFFFFFFF);
- int stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
- msm_camera_io_w(paddr32, vfe_base +
+ int stats_idx = STATS_IDX(stream_info->stream_handle);
+
+ msm_camera_io_w(paddr32, vfe_dev->vfe_base +
VFE32_STATS_PING_PONG_BASE(stats_idx, pingpong_status));
}
@@ -1361,28 +1493,6 @@
goto vfe_no_resource;
}
- if (!vfe_dev->pdev->dev.of_node)
- vfe_dev->iommu_ctx[0] = msm_iommu_get_ctx("vfe_imgwr");
- else
- vfe_dev->iommu_ctx[0] = msm_iommu_get_ctx("vfe");
-
- if (!vfe_dev->iommu_ctx[0]) {
- pr_err("%s: no iommux ctx resource?\n", __func__);
- rc = -ENODEV;
- goto vfe_no_resource;
- }
-
- if (!vfe_dev->pdev->dev.of_node)
- vfe_dev->iommu_ctx[1] = msm_iommu_get_ctx("vfe_misc");
- else
- vfe_dev->iommu_ctx[1] = msm_iommu_get_ctx("vfe");
-
- if (!vfe_dev->iommu_ctx[1]) {
- pr_err("%s: no iommux ctx resource?\n", __func__);
- rc = -ENODEV;
- goto vfe_no_resource;
- }
-
vfe_no_resource:
return rc;
}
@@ -1394,13 +1504,27 @@
*error_mask1 = 0x007FFFFF;
}
-struct msm_vfe_axi_hardware_info msm_vfe32_axi_hw_info = {
- .num_wm = 5,
+
+static void msm_vfe32_restore_irq_mask(struct vfe_device *vfe_dev)
+{
+ msm_camera_io_w(vfe_dev->error_info.overflow_recover_irq_mask0,
+ vfe_dev->vfe_base + 0x1C);
+ msm_camera_io_w(vfe_dev->error_info.overflow_recover_irq_mask1,
+ vfe_dev->vfe_base + 0x20);
+}
+
+static void msm_vfe32_get_halt_restart_mask(uint32_t *irq0_mask,
+ uint32_t *irq1_mask)
+{
+ *irq1_mask = 0x01800000;
+}
+
+static struct msm_vfe_axi_hardware_info msm_vfe32_axi_hw_info = {
+ .num_wm = 6,
.num_comp_mask = 3,
.num_rdi = 3,
.num_rdi_master = 3,
.min_wm_ub = 64,
- .scratch_buf_range = SZ_32M,
};
static struct msm_vfe_stats_hardware_info msm_vfe32_stats_hw_info = {
@@ -1412,7 +1536,22 @@
1 << MSM_ISP_STATS_SKIN | 1 << MSM_ISP_STATS_BHIST,
.stats_ping_pong_offset = stats_pingpong_offset_map,
.num_stats_type = VFE32_NUM_STATS_TYPE,
- .num_stats_comp_mask = 0,
+ .num_stats_comp_mask = 1,
+};
+
+static struct v4l2_subdev_core_ops msm_vfe32_subdev_core_ops = {
+ .ioctl = msm_isp_ioctl,
+ .subscribe_event = msm_isp_subscribe_event,
+ .unsubscribe_event = msm_isp_unsubscribe_event,
+};
+
+static struct v4l2_subdev_ops msm_vfe32_subdev_ops = {
+ .core = &msm_vfe32_subdev_core_ops,
+};
+
+static struct v4l2_subdev_internal_ops msm_vfe32_internal_ops = {
+ .open = msm_isp_open_node,
+ .close = msm_isp_close_node,
};
struct msm_vfe_hardware_info vfe32_hw_info = {
@@ -1421,16 +1560,14 @@
.vfe_clk_idx = VFE32_CLK_IDX,
.vfe_ops = {
.irq_ops = {
- .read_and_clear_irq_status =
- msm_vfe32_read_and_clear_irq_status,
.read_irq_status = msm_vfe32_read_irq_status,
.process_camif_irq = msm_vfe32_process_camif_irq,
.process_reset_irq = msm_vfe32_process_reset_irq,
.process_halt_irq = msm_vfe32_process_halt_irq,
.process_reg_update = msm_vfe32_process_reg_update,
+ .process_epoch_irq = msm_vfe32_process_epoch_irq,
.process_axi_irq = msm_isp_process_axi_irq,
.process_stats_irq = msm_isp_process_stats_irq,
- .process_epoch_irq = msm_vfe32_process_epoch_irq,
},
.axi_ops = {
.reload_wm = msm_vfe32_axi_reload_wm,
@@ -1446,15 +1583,14 @@
.clear_wm_reg = msm_vfe32_axi_clear_wm_reg,
.cfg_wm_xbar_reg = msm_vfe32_axi_cfg_wm_xbar_reg,
.clear_wm_xbar_reg = msm_vfe32_axi_clear_wm_xbar_reg,
- .cfg_ub = msm_vfe47_cfg_axi_ub,
+ .cfg_ub = msm_vfe32_cfg_axi_ub,
.update_ping_pong_addr =
msm_vfe32_update_ping_pong_addr,
.get_comp_mask = msm_vfe32_get_comp_mask,
.get_wm_mask = msm_vfe32_get_wm_mask,
.get_pingpong_status = msm_vfe32_get_pingpong_status,
.halt = msm_vfe32_axi_halt,
- .ub_reg_offset = msm_vfe40_ub_reg_offset,
- .get_ub_size = msm_vfe40_get_ub_size,
+ .restart = msm_vfe32_axi_restart,
},
.core_ops = {
.reg_update = msm_vfe32_reg_update,
@@ -1469,13 +1605,13 @@
.release_hw = msm_vfe32_release_hardware,
.get_platform_data = msm_vfe32_get_platform_data,
.get_error_mask = msm_vfe32_get_error_mask,
- .process_error_status = msm_vfe32_process_error_status,
.get_overflow_mask = msm_vfe32_get_overflow_mask,
- .is_module_cfg_lock_needed =
- msm_vfe32_is_module_cfg_lock_needed,
- .ahb_clk_cfg = NULL,
- .set_bus_err_ign_mask = NULL,
- .get_bus_err_mask = NULL,
+ .get_rdi_wm_mask = msm_vfe32_get_rdi_wm_mask,
+ .get_irq_mask = msm_vfe32_get_irq_mask,
+ .restore_irq_mask = msm_vfe32_restore_irq_mask,
+ .get_halt_restart_mask =
+ msm_vfe32_get_halt_restart_mask,
+ .process_error_status = msm_vfe32_process_error_status,
},
.stats_ops = {
.get_stats_idx = msm_vfe32_get_stats_idx,
@@ -1493,47 +1629,12 @@
.get_wm_mask = msm_vfe32_stats_get_wm_mask,
.get_frame_id = msm_vfe32_stats_get_frame_id,
.get_pingpong_status = msm_vfe32_get_pingpong_status,
- .enable_stats_wm = NULL,
},
},
.dmi_reg_offset = 0x5A0,
.axi_hw_info = &msm_vfe32_axi_hw_info,
.stats_hw_info = &msm_vfe32_stats_hw_info,
+ .subdev_ops = &msm_vfe32_subdev_ops,
+ .subdev_internal_ops = &msm_vfe32_internal_ops,
};
EXPORT_SYMBOL(vfe32_hw_info);
-
-static const struct of_device_id msm_vfe32_dt_match[] = {
- {
- .compatible = "qcom,vfe32",
- .data = &vfe32_hw_info,
- },
- {}
-};
-
-MODULE_DEVICE_TABLE(of, msm_vfe32_dt_match);
-
-static struct platform_driver vfe32_driver = {
- .probe = vfe_hw_probe,
- .driver = {
- .name = "msm_vfe32",
- .owner = THIS_MODULE,
- .of_match_table = msm_vfe32_dt_match,
- },
- .id_table = msm_vfe32_dev_id,
-};
-
-static int __init msm_vfe32_init_module(void)
-{
- return platform_driver_register(&vfe32_driver);
-}
-
-static void __exit msm_vfe32_exit_module(void)
-{
- platform_driver_unregister(&vfe32_driver);
-}
-
-module_init(msm_vfe32_init_module);
-module_exit(msm_vfe32_exit_module);
-MODULE_DESCRIPTION("MSM VFE32 driver");
-MODULE_LICENSE("GPL v2");
-
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_32.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_32.c
new file mode 100644
index 0000000..eada5fa
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_32.c
@@ -0,0 +1,557 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/of_device.h>
+#include <linux/sched_clock.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+
+#include "msm_isp_32.h"
+#include "msm_isp_util_32.h"
+#include "msm_isp_axi_util_32.h"
+#include "msm_isp_stats_util_32.h"
+#include "msm_sd.h"
+#include "msm_isp32.h"
+
+static struct msm_sd_req_vb2_q vfe_vb2_ops;
+
+static const struct of_device_id msm_vfe_dt_match[] = {
+ {
+ .compatible = "qcom,vfe32",
+ .data = &vfe32_hw_info,
+ },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_vfe_dt_match);
+
+static const struct platform_device_id msm_vfe_dev_id[] = {
+ {"msm_vfe32", (kernel_ulong_t) &vfe32_hw_info},
+ {}
+};
+#define MAX_OVERFLOW_COUNTERS 29
+#define OVERFLOW_LENGTH 1024
+#define OVERFLOW_BUFFER_LENGTH 64
+static char stat_line[OVERFLOW_LENGTH];
+
+static struct msm_isp_buf_mgr vfe_buf_mgr;
+static int msm_isp_enable_debugfs(struct vfe_device *vfe_dev,
+ struct msm_isp_bw_req_info *isp_req_hist);
+static char *stats_str[MAX_OVERFLOW_COUNTERS] = {
+ "imgmaster0_overflow_cnt",
+ "imgmaster1_overflow_cnt",
+ "imgmaster2_overflow_cnt",
+ "imgmaster3_overflow_cnt",
+ "imgmaster4_overflow_cnt",
+ "imgmaster5_overflow_cnt",
+ "imgmaster6_overflow_cnt",
+ "be_overflow_cnt",
+ "bg_overflow_cnt",
+ "bf_overflow_cnt",
+ "awb_overflow_cnt",
+ "rs_overflow_cnt",
+ "cs_overflow_cnt",
+ "ihist_overflow_cnt",
+ "skinbhist_overflow_cnt",
+ "bfscale_overflow_cnt",
+ "ISP_VFE0_client_info.active",
+ "ISP_VFE0_client_info.ab",
+ "ISP_VFE0_client_info.ib",
+ "ISP_VFE1_client_info.active",
+ "ISP_VFE1_client_info.ab",
+ "ISP_VFE1_client_info.ib",
+ "ISP_CPP_client_info.active",
+ "ISP_CPP_client_info.ab",
+ "ISP_CPP_client_info.ib",
+ "ISP_last_overflow.ab",
+ "ISP_last_overflow.ib",
+ "ISP_VFE_CLK_RATE",
+ "ISP_CPP_CLK_RATE",
+};
+
+#define MAX_DEPTH_BW_REQ_HISTORY 25
+#define MAX_BW_HISTORY_BUFF_LEN 6144
+#define MAX_BW_HISTORY_LINE_BUFF_LEN 512
+
+#define MAX_UB_INFO_BUFF_LEN 1024
+#define MAX_UB_INFO_LINE_BUFF_LEN 256
+
+static struct msm_isp_bw_req_info
+ msm_isp_bw_request_history[MAX_DEPTH_BW_REQ_HISTORY];
+static int msm_isp_bw_request_history_idx;
+static char bw_request_history_buff[MAX_BW_HISTORY_BUFF_LEN];
+static char ub_info_buffer[MAX_UB_INFO_BUFF_LEN];
+static spinlock_t req_history_lock;
+static int vfe_debugfs_statistics_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t vfe_debugfs_statistics_read(struct file *t_file,
+ char __user *t_char, size_t t_size_t, loff_t *t_loff_t)
+{
+ int i;
+ uint64_t *ptr;
+ char buffer[OVERFLOW_BUFFER_LENGTH] = {0};
+ struct vfe_device *vfe_dev = (struct vfe_device *)
+ t_file->private_data;
+ struct msm_isp_statistics *stats = vfe_dev->stats;
+
+ memset(stat_line, 0, sizeof(stat_line));
+ msm_isp_util_get_bandwidth_stats(vfe_dev, stats);
+ ptr = (uint64_t *)(stats);
+ for (i = 0; i < MAX_OVERFLOW_COUNTERS; i++) {
+ strlcat(stat_line, stats_str[i], sizeof(stat_line));
+ strlcat(stat_line, " ", sizeof(stat_line));
+ snprintf(buffer, sizeof(buffer), "%llu", ptr[i]);
+ strlcat(stat_line, buffer, sizeof(stat_line));
+ strlcat(stat_line, "\r\n", sizeof(stat_line));
+ }
+ return simple_read_from_buffer(t_char, t_size_t,
+ t_loff_t, stat_line, strlen(stat_line));
+}
+
+static ssize_t vfe_debugfs_statistics_write(struct file *t_file,
+ const char __user *t_char, size_t t_size_t, loff_t *t_loff_t)
+{
+ struct vfe_device *vfe_dev = (struct vfe_device *)
+ t_file->private_data;
+ struct msm_isp_statistics *stats = vfe_dev->stats;
+
+ memset(stats, 0, sizeof(struct msm_isp_statistics));
+
+ return sizeof(struct msm_isp_statistics);
+}
+
+static int bw_history_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t bw_history_read(struct file *t_file, char __user *t_char,
+ size_t t_size_t, loff_t *t_loff_t)
+{
+ int i;
+ char *out_buffer = bw_request_history_buff;
+ char line_buffer[MAX_BW_HISTORY_LINE_BUFF_LEN] = {0};
+ struct msm_isp_bw_req_info *isp_req_hist =
+ (struct msm_isp_bw_req_info *) t_file->private_data;
+
+ memset(out_buffer, 0, MAX_BW_HISTORY_BUFF_LEN);
+
+ snprintf(line_buffer, sizeof(line_buffer),
+ "Bus bandwidth request history in chronological order:\n");
+ strlcat(out_buffer, line_buffer, sizeof(bw_request_history_buff));
+
+ snprintf(line_buffer, sizeof(line_buffer),
+ "MSM_ISP_MIN_AB = %u, MSM_ISP_MIN_IB = %u\n\n",
+ MSM_ISP_MIN_AB, MSM_ISP_MIN_IB);
+ strlcat(out_buffer, line_buffer, sizeof(bw_request_history_buff));
+
+ for (i = 0; i < MAX_DEPTH_BW_REQ_HISTORY; i++) {
+ snprintf(line_buffer, sizeof(line_buffer),
+ "idx = %d, client = %u, timestamp = %llu, ab = %llu, ib = %llu\n"
+ "ISP0.active = %x, ISP0.ab = %llu, ISP0.ib = %llu\n"
+ "ISP1.active = %x, ISP1.ab = %llu, ISP1.ib = %llu\n"
+ "CPP.active = %x, CPP.ab = %llu, CPP.ib = %llu\n\n",
+ i, isp_req_hist[i].client, isp_req_hist[i].timestamp,
+ isp_req_hist[i].total_ab, isp_req_hist[i].total_ib,
+ isp_req_hist[i].client_info[0].active,
+ isp_req_hist[i].client_info[0].ab,
+ isp_req_hist[i].client_info[0].ib,
+ isp_req_hist[i].client_info[1].active,
+ isp_req_hist[i].client_info[1].ab,
+ isp_req_hist[i].client_info[1].ib,
+ isp_req_hist[i].client_info[2].active,
+ isp_req_hist[i].client_info[2].ab,
+ isp_req_hist[i].client_info[2].ib);
+ strlcat(out_buffer, line_buffer,
+ sizeof(bw_request_history_buff));
+ }
+ return simple_read_from_buffer(t_char, t_size_t,
+ t_loff_t, out_buffer, strlen(out_buffer));
+}
+
+static ssize_t bw_history_write(struct file *t_file,
+ const char __user *t_char, size_t t_size_t, loff_t *t_loff_t)
+{
+ struct msm_isp_bw_req_info *isp_req_hist =
+ (struct msm_isp_bw_req_info *) t_file->private_data;
+
+ memset(isp_req_hist, 0, sizeof(msm_isp_bw_request_history));
+ msm_isp_bw_request_history_idx = 0;
+ return sizeof(msm_isp_bw_request_history);
+}
+
+static int ub_info_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t ub_info_read(struct file *t_file, char __user *t_char,
+ size_t t_size_t, loff_t *t_loff_t)
+{
+ int i;
+ char *out_buffer = ub_info_buffer;
+ char line_buffer[MAX_UB_INFO_LINE_BUFF_LEN] = {0};
+ struct vfe_device *vfe_dev =
+ (struct vfe_device *) t_file->private_data;
+ struct msm_isp_ub_info *ub_info = vfe_dev->ub_info;
+
+ memset(out_buffer, 0, MAX_UB_INFO_LINE_BUFF_LEN);
+ snprintf(line_buffer, sizeof(line_buffer),
+ "wm_ub_policy_type = %d\n"
+ "num_wm = %d\n"
+ "wm_ub = %d\n",
+ ub_info->policy, ub_info->num_wm, ub_info->wm_ub);
+ strlcat(out_buffer, line_buffer,
+ sizeof(ub_info_buffer));
+ for (i = 0; i < ub_info->num_wm; i++) {
+ snprintf(line_buffer, sizeof(line_buffer),
+ "data[%d] = 0x%x, addr[%d] = 0x%llx\n",
+ i, ub_info->data[i], i, ub_info->addr[i]);
+ strlcat(out_buffer, line_buffer,
+ sizeof(ub_info_buffer));
+ }
+
+ return simple_read_from_buffer(t_char, t_size_t,
+ t_loff_t, out_buffer, strlen(out_buffer));
+}
+
+static ssize_t ub_info_write(struct file *t_file,
+ const char __user *t_char, size_t t_size_t, loff_t *t_loff_t)
+{
+ struct vfe_device *vfe_dev =
+ (struct vfe_device *) t_file->private_data;
+ struct msm_isp_ub_info *ub_info = vfe_dev->ub_info;
+
+ memset(ub_info, 0, sizeof(struct msm_isp_ub_info));
+
+ return sizeof(struct msm_isp_ub_info);
+}
+
+static const struct file_operations vfe_debugfs_error = {
+ .open = vfe_debugfs_statistics_open,
+ .read = vfe_debugfs_statistics_read,
+ .write = vfe_debugfs_statistics_write,
+};
+
+static const struct file_operations bw_history_ops = {
+ .open = bw_history_open,
+ .read = bw_history_read,
+ .write = bw_history_write,
+};
+
+static const struct file_operations ub_info_ops = {
+ .open = ub_info_open,
+ .read = ub_info_read,
+ .write = ub_info_write,
+};
+
+static int msm_isp_enable_debugfs(struct vfe_device *vfe_dev,
+ struct msm_isp_bw_req_info *isp_req_hist)
+{
+ struct dentry *debugfs_base;
+ char dirname[32] = {0};
+
+ snprintf(dirname, sizeof(dirname), "msm_isp%d", vfe_dev->pdev->id);
+ debugfs_base = debugfs_create_dir(dirname, NULL);
+ if (!debugfs_base)
+ return -ENOMEM;
+ if (!debugfs_create_file("stats", 0644, debugfs_base,
+ vfe_dev, &vfe_debugfs_error))
+ return -ENOMEM;
+
+ if (!debugfs_create_file("bw_req_history", 0644,
+ debugfs_base, isp_req_hist, &bw_history_ops))
+ return -ENOMEM;
+
+ if (!debugfs_create_file("ub_info", 0644,
+ debugfs_base, vfe_dev, &ub_info_ops))
+ return -ENOMEM;
+
+ return 0;
+}
+
+void msm_isp_update_req_history(uint32_t client, uint64_t ab,
+ uint64_t ib,
+ struct msm_isp_bandwidth_info *client_info,
+ unsigned long long ts)
+{
+ int i;
+
+ spin_lock(&req_history_lock);
+ msm_isp_bw_request_history[msm_isp_bw_request_history_idx].client =
+ client;
+ msm_isp_bw_request_history[msm_isp_bw_request_history_idx].timestamp =
+ ts;
+ msm_isp_bw_request_history[msm_isp_bw_request_history_idx].total_ab =
+ ab;
+ msm_isp_bw_request_history[msm_isp_bw_request_history_idx].total_ib =
+ ib;
+
+ for (i = 0; i < MAX_ISP_CLIENT; i++) {
+ msm_isp_bw_request_history[msm_isp_bw_request_history_idx].
+ client_info[i].active = client_info[i].active;
+ msm_isp_bw_request_history[msm_isp_bw_request_history_idx].
+ client_info[i].ab = client_info[i].ab;
+ msm_isp_bw_request_history[msm_isp_bw_request_history_idx].
+ client_info[i].ib = client_info[i].ib;
+ }
+
+ msm_isp_bw_request_history_idx = (msm_isp_bw_request_history_idx + 1)
+ % MAX_DEPTH_BW_REQ_HISTORY;
+ spin_unlock(&req_history_lock);
+}
+
+#ifdef CONFIG_COMPAT
+static long msm_isp_dqevent(struct file *file, struct v4l2_fh *vfh, void *arg)
+{
+ long rc;
+
+ if (is_compat_task()) {
+ struct msm_isp32_event_data32 *event_data32;
+ struct msm_isp32_event_data *event_data;
+ struct v4l2_event isp_event;
+ struct v4l2_event *isp_event_user;
+
+ memset(&isp_event, 0, sizeof(isp_event));
+ rc = v4l2_event_dequeue(vfh, &isp_event,
+ file->f_flags & O_NONBLOCK);
+ if (rc)
+ return rc;
+ event_data = (struct msm_isp32_event_data *)
+ isp_event.u.data;
+ isp_event_user = (struct v4l2_event *)arg;
+ memcpy(isp_event_user, &isp_event,
+ sizeof(*isp_event_user));
+ event_data32 = (struct msm_isp32_event_data32 *)
+ isp_event_user->u.data;
+ memset(event_data32, 0,
+ sizeof(struct msm_isp32_event_data32));
+ event_data32->timestamp.tv_sec =
+ event_data->timestamp.tv_sec;
+ event_data32->timestamp.tv_usec =
+ event_data->timestamp.tv_usec;
+ event_data32->mono_timestamp.tv_sec =
+ event_data->mono_timestamp.tv_sec;
+ event_data32->mono_timestamp.tv_usec =
+ event_data->mono_timestamp.tv_usec;
+ event_data32->input_intf = event_data->input_intf;
+ event_data32->frame_id = event_data->frame_id;
+ memcpy(&(event_data32->u), &(event_data->u),
+ sizeof(event_data32->u));
+ } else {
+ rc = v4l2_event_dequeue(vfh, arg,
+ file->f_flags & O_NONBLOCK);
+ }
+ return rc;
+}
+#else
+static long msm_isp_dqevent(struct file *file, struct v4l2_fh *vfh, void *arg)
+{
+ return v4l2_event_dequeue(vfh, arg,
+ file->f_flags & O_NONBLOCK);
+}
+#endif
+
+static long msm_isp_subdev_do_ioctl(
+ struct file *file, unsigned int cmd, void *arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+ struct v4l2_fh *vfh = file->private_data;
+
+ switch (cmd) {
+ case VIDIOC_DQEVENT: {
+ if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
+ return -ENOIOCTLCMD;
+ return msm_isp_dqevent(file, vfh, arg);
+ }
+ break;
+ case VIDIOC_SUBSCRIBE_EVENT:
+ return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg);
+
+ case VIDIOC_UNSUBSCRIBE_EVENT:
+ return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg);
+
+ default:
+ return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
+ }
+}
+
+static long msm_isp_subdev_fops_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, msm_isp_subdev_do_ioctl);
+}
+
+static struct v4l2_file_operations msm_isp_v4l2_subdev_fops = {
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = msm_isp_subdev_fops_ioctl,
+#endif
+ .unlocked_ioctl = msm_isp_subdev_fops_ioctl
+};
+
+static int vfe_probe(struct platform_device *pdev)
+{
+ struct vfe_device *vfe_dev;
+ /*struct msm_cam_subdev_info sd_info;*/
+ const struct of_device_id *match_dev;
+ int rc = 0;
+
+ vfe_dev = kzalloc(sizeof(struct vfe_device), GFP_KERNEL);
+ if (!vfe_dev) {
+ rc = -ENOMEM;
+ goto end;
+ }
+ vfe_dev->stats = kzalloc(sizeof(struct msm_isp_statistics), GFP_KERNEL);
+ if (!vfe_dev->stats) {
+ rc = -ENOMEM;
+ goto probe_fail1;
+ }
+
+ vfe_dev->ub_info = kzalloc(sizeof(struct msm_isp_ub_info), GFP_KERNEL);
+ if (!vfe_dev->ub_info) {
+ rc = -ENOMEM;
+ goto probe_fail2;
+ }
+ if (pdev->dev.of_node) {
+ of_property_read_u32((&pdev->dev)->of_node,
+ "cell-index", &pdev->id);
+ match_dev = of_match_device(msm_vfe_dt_match, &pdev->dev);
+ if (!match_dev) {
+ pr_err("%s: No vfe hardware info\n", __func__);
+ rc = -EINVAL;
+ goto probe_fail3;
+ }
+ vfe_dev->hw_info =
+ (struct msm_vfe_hardware_info *) match_dev->data;
+ } else {
+ vfe_dev->hw_info = (struct msm_vfe_hardware_info *)
+ platform_get_device_id(pdev)->driver_data;
+ }
+
+ if (!vfe_dev->hw_info) {
+ pr_err("%s: No vfe hardware info\n", __func__);
+ rc = -EINVAL;
+ goto probe_fail3;
+ }
+ ISP_DBG("%s: device id = %d\n", __func__, pdev->id);
+
+ vfe_dev->pdev = pdev;
+ rc = vfe_dev->hw_info->vfe_ops.core_ops.get_platform_data(vfe_dev);
+ if (rc < 0) {
+ pr_err("%s: failed to get platform resources\n", __func__);
+ rc = -ENOMEM;
+ goto probe_fail3;
+ }
+
+ INIT_LIST_HEAD(&vfe_dev->tasklet_q);
+ tasklet_init(&vfe_dev->vfe_tasklet,
+ msm_isp_do_tasklet, (unsigned long)vfe_dev);
+
+ v4l2_subdev_init(&vfe_dev->subdev.sd, vfe_dev->hw_info->subdev_ops);
+ vfe_dev->subdev.sd.internal_ops =
+ vfe_dev->hw_info->subdev_internal_ops;
+ snprintf(vfe_dev->subdev.sd.name,
+ ARRAY_SIZE(vfe_dev->subdev.sd.name),
+ "vfe");
+ vfe_dev->subdev.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ vfe_dev->subdev.sd.flags |= V4L2_SUBDEV_FL_HAS_EVENTS;
+ v4l2_set_subdevdata(&vfe_dev->subdev.sd, vfe_dev);
+ platform_set_drvdata(pdev, &vfe_dev->subdev.sd);
+ mutex_init(&vfe_dev->realtime_mutex);
+ mutex_init(&vfe_dev->core_mutex);
+ spin_lock_init(&vfe_dev->tasklet_lock);
+ spin_lock_init(&vfe_dev->shared_data_lock);
+ spin_lock_init(&req_history_lock);
+ media_entity_pads_init(&vfe_dev->subdev.sd.entity, 0, NULL);
+ vfe_dev->subdev.sd.entity.function = MSM_CAMERA_SUBDEV_VFE;
+ vfe_dev->subdev.sd.entity.name = pdev->name;
+ vfe_dev->subdev.close_seq = MSM_SD_CLOSE_1ST_CATEGORY | 0x2;
+ rc = msm_sd_register(&vfe_dev->subdev);
+ if (rc != 0) {
+ pr_err("%s: msm_sd_register error = %d\n", __func__, rc);
+ goto probe_fail3;
+ }
+
+ msm_isp_v4l2_subdev_fops.owner = v4l2_subdev_fops.owner;
+ msm_isp_v4l2_subdev_fops.open = v4l2_subdev_fops.open;
+ msm_isp_v4l2_subdev_fops.release = v4l2_subdev_fops.release;
+ msm_isp_v4l2_subdev_fops.poll = v4l2_subdev_fops.poll;
+
+ vfe_dev->subdev.sd.devnode->fops = &msm_isp_v4l2_subdev_fops;
+
+ vfe_dev->buf_mgr = &vfe_buf_mgr;
+ v4l2_subdev_notify(&vfe_dev->subdev.sd,
+ MSM_SD_NOTIFY_REQ_CB, &vfe_vb2_ops);
+ rc = msm_isp_create_isp_buf_mgr(vfe_dev->buf_mgr,
+ &vfe_vb2_ops, &pdev->dev,
+ vfe_dev->hw_info->axi_hw_info->scratch_buf_range);
+ if (rc < 0) {
+ pr_err("%s: Unable to create buffer manager\n", __func__);
+ rc = -EINVAL;
+ goto probe_fail3;
+ }
+ msm_isp_enable_debugfs(vfe_dev, msm_isp_bw_request_history);
+
+ vfe_dev->buf_mgr->init_done = 1;
+ vfe_dev->vfe_open_cnt = 0;
+ return rc;
+
+probe_fail3:
+ kfree(vfe_dev->ub_info);
+probe_fail2:
+ kfree(vfe_dev->stats);
+probe_fail1:
+ kfree(vfe_dev);
+end:
+ return rc;
+}
+
+static struct platform_driver vfe_driver = {
+ .probe = vfe_probe,
+ .driver = {
+ .name = "msm_vfe",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_vfe_dt_match,
+ },
+ .id_table = msm_vfe_dev_id,
+};
+
+static int __init msm_vfe_init_module(void)
+{
+ return platform_driver_register(&vfe_driver);
+}
+
+static void __exit msm_vfe_exit_module(void)
+{
+ platform_driver_unregister(&vfe_driver);
+}
+
+module_init(msm_vfe_init_module);
+module_exit(msm_vfe_exit_module);
+MODULE_DESCRIPTION("MSM VFE driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_32.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_32.h
new file mode 100644
index 0000000..cbe92fa
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_32.h
@@ -0,0 +1,599 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_VFE_H__
+#define __MSM_VFE_H__
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <media/v4l2-subdev.h>
+#include <media/msmb_isp.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+
+#include "msm_buf_mgr.h"
+
+#define VFE40_8974V1_VERSION 0x10000018
+#define VFE40_8974V2_VERSION 0x1001001A
+#define VFE40_8974V3_VERSION 0x1001001B
+#define VFE40_8x26_VERSION 0x20000013
+#define VFE40_8x26V2_VERSION 0x20010014
+#define VFE40_8916_VERSION 0x10030000
+#define VFE40_8939_VERSION 0x10040000
+#define VFE32_8909_VERSION 0x30600
+
+#define MAX_IOMMU_CTX 2
+#define MAX_NUM_WM 7
+#define MAX_NUM_RDI 3
+#define MAX_NUM_RDI_MASTER 3
+#define MAX_NUM_COMPOSITE_MASK 4
+#define MAX_NUM_STATS_COMP_MASK 2
+#define MAX_INIT_FRAME_DROP 31
+#define ISP_Q2 (1 << 2)
+#define ISP_Q10 (1 << 10)
+
+#define VFE_PING_FLAG 0xFFFFFFFF
+#define VFE_PONG_FLAG 0x0
+
+#define VFE_MAX_CFG_TIMEOUT 3000
+#define VFE_CLK_INFO_MAX 16
+#define STATS_COMP_BIT_MASK 0xFF0000
+
+#define MSM_ISP_MIN_AB 11000000
+#define MSM_ISP_MIN_IB 11000000
+
+struct vfe_device;
+struct msm_vfe_axi_stream;
+struct msm_vfe_stats_stream;
+
+struct vfe_subscribe_info {
+ struct v4l2_fh *vfh;
+ uint32_t active;
+};
+
+enum msm_isp_pack_fmt {
+ QCOM,
+ MIPI,
+ DPCM6,
+ DPCM8,
+ PLAIN8,
+ PLAIN16,
+ MAX_ISP_PACK_FMT,
+};
+
+enum msm_isp_camif_update_state {
+ NO_UPDATE,
+ ENABLE_CAMIF,
+ DISABLE_CAMIF,
+ DISABLE_CAMIF_IMMEDIATELY
+};
+
+struct msm_isp_timestamp {
+ /*Monotonic clock for v4l2 buffer*/
+ struct timeval buf_time;
+ /*Monotonic clock for VT */
+ struct timeval vt_time;
+ /*Wall clock for userspace event*/
+ struct timeval event_time;
+};
+
+struct msm_vfe_irq_ops {
+ void (*read_irq_status)(struct vfe_device *vfe_dev,
+ uint32_t *irq_status0, uint32_t *irq_status1);
+ void (*process_reg_update)(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts);
+ void (*process_epoch_irq)(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts);
+ void (*process_reset_irq)(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1);
+ void (*process_halt_irq)(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1);
+ void (*process_camif_irq)(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts);
+ void (*process_axi_irq)(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts);
+ void (*process_stats_irq)(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts);
+};
+
+struct msm_vfe_axi_ops {
+ void (*reload_wm)(struct vfe_device *vfe_dev,
+ uint32_t reload_mask);
+ void (*enable_wm)(struct vfe_device *vfe_dev,
+ uint8_t wm_idx, uint8_t enable);
+ int32_t (*cfg_io_format)(struct vfe_device *vfe_dev,
+ enum msm_vfe_axi_stream_src stream_src,
+ uint32_t io_format);
+ void (*cfg_framedrop)(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info);
+ void (*clear_framedrop)(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info);
+ void (*cfg_comp_mask)(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info);
+ void (*clear_comp_mask)(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info);
+ void (*cfg_wm_irq_mask)(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info);
+ void (*clear_wm_irq_mask)(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info);
+
+ void (*cfg_wm_reg)(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info,
+ uint8_t plane_idx);
+ void (*clear_wm_reg)(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx);
+
+ void (*cfg_wm_xbar_reg)(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info,
+ uint8_t plane_idx);
+ void (*clear_wm_xbar_reg)(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx);
+
+ void (*cfg_ub)(struct vfe_device *vfe_dev);
+
+ void (*update_ping_pong_addr)(struct vfe_device *vfe_dev,
+ uint8_t wm_idx, uint32_t pingpong_status, dma_addr_t paddr);
+
+ uint32_t (*get_wm_mask)(uint32_t irq_status0, uint32_t irq_status1);
+ uint32_t (*get_comp_mask)(uint32_t irq_status0, uint32_t irq_status1);
+ uint32_t (*get_pingpong_status)(struct vfe_device *vfe_dev);
+ int (*halt)(struct vfe_device *vfe_dev, uint32_t blocking);
+ int (*restart)(struct vfe_device *vfe_dev, uint32_t blocking,
+ uint32_t enable_camif);
+ void (*update_cgc_override)(struct vfe_device *vfe_dev,
+ uint8_t wm_idx, uint8_t cgc_override);
+};
+
+struct msm_vfe_core_ops {
+ void (*reg_update)(struct vfe_device *vfe_dev, uint32_t input_src);
+ long (*reset_hw)(struct vfe_device *vfe_dev, uint32_t first_start,
+ uint32_t blocking_call);
+ int (*init_hw)(struct vfe_device *vfe_dev);
+ void (*init_hw_reg)(struct vfe_device *vfe_dev);
+ void (*clear_status_reg)(struct vfe_device *vfe_dev);
+ void (*release_hw)(struct vfe_device *vfe_dev);
+ void (*cfg_input_mux)(struct vfe_device *vfe_dev,
+ struct msm_vfe_pix_cfg *pix_cfg);
+ int (*start_fetch_eng)(struct vfe_device *vfe_dev,
+ void *arg);
+ void (*update_camif_state)(struct vfe_device *vfe_dev,
+ enum msm_isp_camif_update_state update_state);
+ void (*cfg_rdi_reg)(struct vfe_device *vfe_dev,
+ struct msm_vfe_rdi_cfg *rdi_cfg,
+ enum msm_vfe_input_src input_src);
+ int (*get_platform_data)(struct vfe_device *vfe_dev);
+ void (*get_error_mask)(uint32_t *error_mask0, uint32_t *error_mask1);
+ void (*process_error_status)(struct vfe_device *vfe_dev);
+ void (*get_overflow_mask)(uint32_t *overflow_mask);
+ void (*get_irq_mask)(struct vfe_device *vfe_dev,
+ uint32_t *irq0_mask, uint32_t *irq1_mask);
+ void (*restore_irq_mask)(struct vfe_device *vfe_dev);
+ void (*get_halt_restart_mask)(uint32_t *irq0_mask,
+ uint32_t *irq1_mask);
+ void (*get_rdi_wm_mask)(struct vfe_device *vfe_dev,
+ uint32_t *rdi_wm_mask);
+};
+struct msm_vfe_stats_ops {
+ int (*get_stats_idx)(enum msm_isp_stats_type stats_type);
+ int (*check_streams)(struct msm_vfe_stats_stream *stream_info);
+ void (*cfg_framedrop)(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info);
+ void (*clear_framedrop)(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info);
+ void (*cfg_comp_mask)(struct vfe_device *vfe_dev,
+ uint32_t stats_mask, uint8_t enable);
+ void (*cfg_wm_irq_mask)(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info);
+ void (*clear_wm_irq_mask)(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info);
+
+ void (*cfg_wm_reg)(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info);
+ void (*clear_wm_reg)(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info);
+
+ void (*cfg_ub)(struct vfe_device *vfe_dev);
+
+ void (*enable_module)(struct vfe_device *vfe_dev,
+ uint32_t stats_mask, uint8_t enable);
+
+ void (*update_ping_pong_addr)(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info,
+ uint32_t pingpong_status, dma_addr_t paddr);
+
+ uint32_t (*get_frame_id)(struct vfe_device *vfe_dev);
+ uint32_t (*get_wm_mask)(uint32_t irq_status0, uint32_t irq_status1);
+ uint32_t (*get_comp_mask)(uint32_t irq_status0, uint32_t irq_status1);
+ uint32_t (*get_pingpong_status)(struct vfe_device *vfe_dev);
+
+ void (*update_cgc_override)(struct vfe_device *vfe_dev,
+ uint32_t stats_mask, uint8_t enable);
+};
+
+struct msm_vfe_ops {
+ struct msm_vfe_irq_ops irq_ops;
+ struct msm_vfe_axi_ops axi_ops;
+ struct msm_vfe_core_ops core_ops;
+ struct msm_vfe_stats_ops stats_ops;
+};
+
+struct msm_vfe_hardware_info {
+ int num_iommu_ctx;
+ /* secure iommu ctx nums */
+ int num_iommu_secure_ctx;
+ int vfe_clk_idx;
+ struct msm_vfe_ops vfe_ops;
+ struct msm_vfe_axi_hardware_info *axi_hw_info;
+ struct msm_vfe_stats_hardware_info *stats_hw_info;
+ struct v4l2_subdev_internal_ops *subdev_internal_ops;
+ struct v4l2_subdev_ops *subdev_ops;
+ uint32_t dmi_reg_offset;
+};
+
+struct msm_vfe_axi_hardware_info {
+ uint8_t num_wm;
+ uint8_t num_rdi;
+ uint8_t num_rdi_master;
+ uint8_t num_comp_mask;
+ uint32_t min_wm_ub;
+ uint32_t scratch_buf_range;
+};
+
+enum msm_vfe_axi_state {
+ AVAILABLE,
+ INACTIVE,
+ ACTIVE,
+ PAUSED,
+ START_PENDING,
+ STOP_PENDING,
+ PAUSE_PENDING,
+ RESUME_PENDING,
+ STARTING,
+ STOPPING,
+ PAUSING,
+ RESUMING,
+};
+
+enum msm_vfe_axi_cfg_update_state {
+ NO_AXI_CFG_UPDATE,
+ APPLYING_UPDATE_RESUME,
+ UPDATE_REQUESTED,
+};
+
+#define VFE_NO_DROP 0xFFFFFFFF
+#define VFE_DROP_EVERY_2FRAME 0x55555555
+#define VFE_DROP_EVERY_4FRAME 0x11111111
+#define VFE_DROP_EVERY_8FRAME 0x01010101
+#define VFE_DROP_EVERY_16FRAME 0x00010001
+#define VFE_DROP_EVERY_32FRAME 0x00000001
+
+enum msm_vfe_axi_stream_type {
+ CONTINUOUS_STREAM,
+ BURST_STREAM,
+};
+
+struct msm_vfe_axi_stream {
+ uint32_t frame_id;
+ enum msm_vfe_axi_state state;
+ enum msm_vfe_axi_stream_src stream_src;
+ uint8_t num_planes;
+ uint8_t wm[MAX_PLANES_PER_STREAM];
+ uint32_t output_format;/*Planar/RAW/Misc*/
+ struct msm_vfe_axi_plane_cfg plane_cfg[MAX_PLANES_PER_STREAM];
+ uint8_t comp_mask_index;
+ struct msm_isp_buffer *buf[2];
+ uint32_t session_id;
+ uint32_t stream_id;
+ uint32_t bufq_handle;
+ uint32_t bufq_scratch_handle;
+ uint32_t controllable_output;
+ uint32_t stream_handle;
+ uint32_t request_frm_num;
+ uint8_t buf_divert;
+ enum msm_vfe_axi_stream_type stream_type;
+ uint32_t frame_based;
+ enum msm_vfe_frame_skip_pattern frame_skip_pattern;
+ uint32_t framedrop_period;
+ uint32_t framedrop_pattern;
+ uint32_t num_burst_capture;/*number of frame to capture*/
+ uint32_t init_frame_drop;
+ uint32_t burst_frame_count;/*number of sof before burst stop*/
+ uint8_t framedrop_update;
+ spinlock_t lock;
+
+ /*Bandwidth calculation info*/
+ uint32_t max_width;
+ /*Based on format plane size in Q2. e.g NV12 = 1.5*/
+ uint32_t format_factor;
+ uint32_t bandwidth;
+
+ /*Run time update variables*/
+ uint32_t runtime_init_frame_drop;
+ uint32_t runtime_burst_frame_count;/*number of sof before burst stop*/
+ uint32_t runtime_num_burst_capture;
+ uint8_t runtime_framedrop_update;
+ uint8_t runtime_framedrop_update_burst;
+ uint32_t runtime_output_format;
+ enum msm_stream_memory_input_t memory_input;
+};
+
+struct msm_vfe_axi_composite_info {
+ uint32_t stream_handle;
+ uint32_t stream_composite_mask;
+};
+
+struct msm_vfe_src_info {
+ uint32_t frame_id;
+ uint8_t active;
+ uint8_t pix_stream_count;
+ uint8_t raw_stream_count;
+ enum msm_vfe_inputmux input_mux;
+ uint32_t width;
+ long pixel_clock;
+ uint32_t input_format;/*V4L2 pix format with bayer pattern*/
+ uint32_t last_updt_frm_id;
+};
+
+struct msm_vfe_fetch_engine_info {
+ uint32_t session_id;
+ uint32_t stream_id;
+ uint32_t bufq_handle;
+ uint32_t buf_idx;
+ uint8_t is_busy;
+};
+
+enum msm_wm_ub_cfg_type {
+ MSM_WM_UB_CFG_DEFAULT,
+ MSM_WM_UB_EQUAL_SLICING,
+ MSM_WM_UB_CFG_MAX_NUM
+};
+
+struct msm_vfe_axi_shared_data {
+ struct msm_vfe_axi_hardware_info *hw_info;
+ struct msm_vfe_axi_stream stream_info[VFE_AXI_SRC_MAX];
+ uint32_t free_wm[MAX_NUM_WM];
+ uint32_t wm_image_size[MAX_NUM_WM];
+ enum msm_wm_ub_cfg_type wm_ub_cfg_policy;
+ uint8_t num_used_wm;
+ uint8_t num_active_stream;
+ uint8_t num_rdi_stream;
+ uint8_t num_pix_stream;
+ uint32_t rdi_wm_mask;
+ struct msm_vfe_axi_composite_info
+ composite_info[MAX_NUM_COMPOSITE_MASK];
+ uint8_t num_used_composite_mask;
+ uint32_t stream_update;
+ atomic_t axi_cfg_update;
+ enum msm_isp_camif_update_state pipeline_update;
+ struct msm_vfe_src_info src_info[VFE_SRC_MAX];
+ uint16_t stream_handle_cnt;
+ uint32_t event_mask;
+};
+
+struct msm_vfe_stats_hardware_info {
+ uint32_t stats_capability_mask;
+ uint8_t *stats_ping_pong_offset;
+ uint8_t num_stats_type;
+ uint8_t num_stats_comp_mask;
+};
+
+enum msm_vfe_stats_state {
+ STATS_AVAILABLE,
+ STATS_INACTIVE,
+ STATS_ACTIVE,
+ STATS_START_PENDING,
+ STATS_STOP_PENDING,
+ STATS_STARTING,
+ STATS_STOPPING,
+};
+
+struct msm_vfe_stats_stream {
+ uint32_t session_id;
+ uint32_t stream_id;
+ uint32_t stream_handle;
+ uint32_t composite_flag;
+ enum msm_isp_stats_type stats_type;
+ enum msm_vfe_stats_state state;
+ uint32_t framedrop_pattern;
+ uint32_t framedrop_period;
+ uint32_t irq_subsample_pattern;
+ uint32_t init_stats_frame_drop;
+
+ uint32_t buffer_offset;
+ struct msm_isp_buffer *buf[2];
+ uint32_t bufq_handle;
+};
+
+struct msm_vfe_stats_shared_data {
+ struct msm_vfe_stats_stream stream_info[MSM_ISP_STATS_MAX];
+ uint8_t num_active_stream;
+ atomic_t stats_comp_mask[MAX_NUM_STATS_COMP_MASK];
+ uint32_t reg_mask;
+ uint16_t stream_handle_cnt;
+ atomic_t stats_update;
+};
+
+struct msm_vfe_tasklet_queue_cmd {
+ struct list_head list;
+ uint32_t vfeInterruptStatus0;
+ uint32_t vfeInterruptStatus1;
+ struct msm_isp_timestamp ts;
+ uint8_t cmd_used;
+};
+
+#define MSM_VFE_TASKLETQ_SIZE 200
+
+enum msm_vfe_overflow_state {
+ NO_OVERFLOW,
+ OVERFLOW_DETECTED,
+ HALT_REQUESTED,
+ RESTART_REQUESTED,
+};
+
+struct msm_vfe_error_info {
+ atomic_t overflow_state;
+ uint32_t overflow_recover_irq_mask0;
+ uint32_t overflow_recover_irq_mask1;
+ uint32_t error_mask0;
+ uint32_t error_mask1;
+ uint32_t violation_status;
+ uint32_t camif_status;
+ uint32_t stream_framedrop_count[MAX_NUM_STREAM];
+ uint32_t stats_framedrop_count[MSM_ISP_STATS_MAX];
+ uint32_t info_dump_frame_count;
+ uint32_t error_count;
+};
+
+struct msm_isp_statistics {
+ int64_t imagemaster0_overflow;
+ int64_t imagemaster1_overflow;
+ int64_t imagemaster2_overflow;
+ int64_t imagemaster3_overflow;
+ int64_t imagemaster4_overflow;
+ int64_t imagemaster5_overflow;
+ int64_t imagemaster6_overflow;
+ int64_t be_overflow;
+ int64_t bg_overflow;
+ int64_t bf_overflow;
+ int64_t awb_overflow;
+ int64_t rs_overflow;
+ int64_t cs_overflow;
+ int64_t ihist_overflow;
+ int64_t skinbhist_overflow;
+ int64_t bfscale_overflow;
+
+ int64_t isp_vfe0_active;
+ int64_t isp_vfe0_ab;
+ int64_t isp_vfe0_ib;
+
+ int64_t isp_vfe1_active;
+ int64_t isp_vfe1_ab;
+ int64_t isp_vfe1_ib;
+
+ int64_t isp_cpp_active;
+ int64_t isp_cpp_ab;
+ int64_t isp_cpp_ib;
+
+ int64_t last_overflow_ab;
+ int64_t last_overflow_ib;
+
+ int64_t vfe_clk_rate;
+ int64_t cpp_clk_rate;
+};
+
+enum msm_isp_hw_client {
+ ISP_VFE0,
+ ISP_VFE1,
+ ISP_CPP,
+ MAX_ISP_CLIENT,
+};
+
+struct msm_isp_bandwidth_info {
+ uint32_t active;
+ uint64_t ab;
+ uint64_t ib;
+};
+
+struct msm_isp_bw_req_info {
+ uint32_t client;
+ unsigned long long timestamp;
+ uint64_t total_ab;
+ uint64_t total_ib;
+ struct msm_isp_bandwidth_info client_info[MAX_ISP_CLIENT];
+};
+
+#define MSM_ISP_MAX_WM 7
+struct msm_isp_ub_info {
+ enum msm_wm_ub_cfg_type policy;
+ uint8_t num_wm;
+ uint32_t wm_ub;
+ uint32_t data[MSM_ISP_MAX_WM];
+ uint64_t addr[MSM_ISP_MAX_WM];
+};
+
+struct msm_vfe_hw_init_parms {
+ const char *entries;
+ const char *regs;
+ const char *settings;
+};
+
+struct vfe_device {
+ struct platform_device *pdev;
+ struct msm_sd_subdev subdev;
+ struct resource *vfe_irq;
+ struct resource *vfe_mem;
+ struct resource *vfe_vbif_mem;
+ struct resource *vfe_io;
+ struct resource *vfe_vbif_io;
+ void __iomem *vfe_base;
+ void __iomem *vfe_vbif_base;
+
+ struct device *iommu_ctx[MAX_IOMMU_CTX];
+ /*Add secure context banks*/
+ struct device *iommu_secure_ctx[MAX_IOMMU_CTX];
+
+ struct regulator *fs_vfe;
+ struct clk **vfe_clk;
+ uint32_t num_clk;
+
+ uint32_t bus_perf_client;
+
+ struct completion reset_complete;
+ struct completion halt_complete;
+ struct completion stream_config_complete;
+ struct completion stats_config_complete;
+ struct mutex realtime_mutex;
+ struct mutex core_mutex;
+
+ atomic_t irq_cnt;
+ uint8_t taskletq_idx;
+ spinlock_t tasklet_lock;
+ spinlock_t shared_data_lock;
+ struct list_head tasklet_q;
+ struct tasklet_struct vfe_tasklet;
+ struct msm_vfe_tasklet_queue_cmd
+ tasklet_queue_cmd[MSM_VFE_TASKLETQ_SIZE];
+
+ uint32_t vfe_hw_version;
+ struct msm_vfe_hardware_info *hw_info;
+ struct msm_vfe_axi_shared_data axi_data;
+ struct msm_vfe_stats_shared_data stats_data;
+ struct msm_vfe_error_info error_info;
+ struct msm_isp_buf_mgr *buf_mgr;
+ int dump_reg;
+ int vfe_clk_idx;
+ uint32_t vfe_open_cnt;
+ uint8_t vt_enable;
+ uint8_t ignore_error;
+ struct msm_isp_statistics *stats;
+ struct msm_vfe_fetch_engine_info fetch_engine_info;
+ uint64_t msm_isp_last_overflow_ab;
+ uint64_t msm_isp_last_overflow_ib;
+ uint64_t msm_isp_vfe_clk_rate;
+ struct msm_isp_ub_info *ub_info;
+ uint32_t vfe_ub_policy;
+ uint32_t isp_sof_debug;
+ uint8_t reset_pending;
+ uint32_t bus_util_factor;
+ uint8_t vfe_reset_timeout_processed;
+};
+
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util_32.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util_32.c
new file mode 100644
index 0000000..55a10ca
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util_32.c
@@ -0,0 +1,2095 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/io.h>
+#include <media/v4l2-subdev.h>
+#include <asm/div64.h>
+#include "msm_isp_util_32.h"
+#include "msm_isp_axi_util_32.h"
+
+#define SRC_TO_INTF(src) \
+ ((src < RDI_INTF_0 || src == VFE_AXI_SRC_MAX) ? VFE_PIX_0 : \
+ (VFE_RAW_0 + src - RDI_INTF_0))
+
+#define HANDLE_TO_IDX(handle) (handle & 0xFF)
+
+int msm_isp_axi_create_stream(
+ struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe32_axi_stream_request_cmd *stream_cfg_cmd)
+{
+ uint32_t i = stream_cfg_cmd->stream_src;
+
+ if (i >= VFE_AXI_SRC_MAX) {
+ pr_err("%s:%d invalid stream_src %d\n", __func__, __LINE__,
+ stream_cfg_cmd->stream_src);
+ return -EINVAL;
+ }
+
+ if ((axi_data->stream_handle_cnt << 8) == 0)
+ axi_data->stream_handle_cnt++;
+
+ stream_cfg_cmd->axi_stream_handle =
+ (++axi_data->stream_handle_cnt) << 8 | i;
+
+ memset(&axi_data->stream_info[i], 0,
+ sizeof(struct msm_vfe_axi_stream));
+ spin_lock_init(&axi_data->stream_info[i].lock);
+ axi_data->stream_info[i].session_id = stream_cfg_cmd->session_id;
+ axi_data->stream_info[i].stream_id = stream_cfg_cmd->stream_id;
+ axi_data->stream_info[i].buf_divert = stream_cfg_cmd->buf_divert;
+ axi_data->stream_info[i].state = INACTIVE;
+ axi_data->stream_info[i].stream_handle =
+ stream_cfg_cmd->axi_stream_handle;
+ axi_data->stream_info[i].controllable_output =
+ stream_cfg_cmd->controllable_output;
+ if (stream_cfg_cmd->controllable_output)
+ stream_cfg_cmd->frame_skip_pattern = SKIP_ALL;
+ return 0;
+}
+
+void msm_isp_axi_destroy_stream(
+ struct msm_vfe_axi_shared_data *axi_data, int stream_idx)
+{
+ if (axi_data->stream_info[stream_idx].state != AVAILABLE) {
+ axi_data->stream_info[stream_idx].state = AVAILABLE;
+ axi_data->stream_info[stream_idx].stream_handle = 0;
+ } else {
+ pr_err("%s: stream does not exist\n", __func__);
+ }
+}
+
+int msm_isp_validate_axi_request(struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe32_axi_stream_request_cmd *stream_cfg_cmd)
+{
+ int rc = -1, i;
+ struct msm_vfe_axi_stream *stream_info = NULL;
+
+ if (HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle) < MAX_NUM_STREAM) {
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)];
+ } else {
+ pr_err("%s: Invalid axi_stream_handle\n", __func__);
+ return rc;
+ }
+
+ if (!stream_info) {
+ pr_err("%s: Stream info is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (stream_cfg_cmd->output_format) {
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ case V4L2_PIX_FMT_SBGGR10:
+ case V4L2_PIX_FMT_SGBRG10:
+ case V4L2_PIX_FMT_SGRBG10:
+ case V4L2_PIX_FMT_SRGGB10:
+ case V4L2_PIX_FMT_SBGGR12:
+ case V4L2_PIX_FMT_SGBRG12:
+ case V4L2_PIX_FMT_SGRBG12:
+ case V4L2_PIX_FMT_SRGGB12:
+ case V4L2_PIX_FMT_SBGGR14:
+ case V4L2_PIX_FMT_SGBRG14:
+ case V4L2_PIX_FMT_SGRBG14:
+ case V4L2_PIX_FMT_SRGGB14:
+ case V4L2_PIX_FMT_QBGGR8:
+ case V4L2_PIX_FMT_QGBRG8:
+ case V4L2_PIX_FMT_QGRBG8:
+ case V4L2_PIX_FMT_QRGGB8:
+ case V4L2_PIX_FMT_QBGGR10:
+ case V4L2_PIX_FMT_QGBRG10:
+ case V4L2_PIX_FMT_QGRBG10:
+ case V4L2_PIX_FMT_QRGGB10:
+ case V4L2_PIX_FMT_QBGGR12:
+ case V4L2_PIX_FMT_QGBRG12:
+ case V4L2_PIX_FMT_QGRBG12:
+ case V4L2_PIX_FMT_QRGGB12:
+ case V4L2_PIX_FMT_QBGGR14:
+ case V4L2_PIX_FMT_QGBRG14:
+ case V4L2_PIX_FMT_QGRBG14:
+ case V4L2_PIX_FMT_QRGGB14:
+ case V4L2_PIX_FMT_P16BGGR10:
+ case V4L2_PIX_FMT_P16GBRG10:
+ case V4L2_PIX_FMT_P16GRBG10:
+ case V4L2_PIX_FMT_P16RGGB10:
+ case V4L2_PIX_FMT_JPEG:
+ case V4L2_PIX_FMT_META:
+ stream_info->num_planes = 1;
+ stream_info->format_factor = ISP_Q2;
+ break;
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV14:
+ case V4L2_PIX_FMT_NV41:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ stream_info->num_planes = 2;
+ stream_info->format_factor = 1.5 * ISP_Q2;
+ break;
+ /*TD: Add more image format*/
+ default:
+ msm_isp_print_fourcc_error(__func__,
+ stream_cfg_cmd->output_format);
+ return rc;
+ }
+
+ if (axi_data->hw_info->num_wm - axi_data->num_used_wm <
+ stream_info->num_planes) {
+ pr_err("%s: No free write masters\n", __func__);
+ return rc;
+ }
+
+ if ((stream_info->num_planes > 1) &&
+ (axi_data->hw_info->num_comp_mask -
+ axi_data->num_used_composite_mask < 1)) {
+ pr_err("%s: No free composite mask\n", __func__);
+ return rc;
+ }
+
+ if (stream_cfg_cmd->init_frame_drop >= MAX_INIT_FRAME_DROP) {
+ pr_err("%s: Invalid skip pattern\n", __func__);
+ return rc;
+ }
+
+ if (stream_cfg_cmd->frame_skip_pattern >= MAX_SKIP) {
+ pr_err("%s: Invalid skip pattern\n", __func__);
+ return rc;
+ }
+
+ for (i = 0; i < stream_info->num_planes; i++) {
+ stream_info->plane_cfg[i] = stream_cfg_cmd->plane_cfg[i];
+ stream_info->max_width = max(stream_info->max_width,
+ stream_cfg_cmd->plane_cfg[i].output_width);
+ }
+
+ stream_info->output_format = stream_cfg_cmd->output_format;
+ stream_info->runtime_output_format = stream_info->output_format;
+ stream_info->stream_src = stream_cfg_cmd->stream_src;
+ stream_info->frame_based = stream_cfg_cmd->frame_base;
+ return 0;
+}
+
+static uint32_t msm_isp_axi_get_plane_size(
+ struct msm_vfe_axi_stream *stream_info, int plane_idx)
+{
+ uint32_t size = 0;
+ struct msm_vfe_axi_plane_cfg *plane_cfg = stream_info->plane_cfg;
+
+ switch (stream_info->output_format) {
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ case V4L2_PIX_FMT_QBGGR8:
+ case V4L2_PIX_FMT_QGBRG8:
+ case V4L2_PIX_FMT_QGRBG8:
+ case V4L2_PIX_FMT_QRGGB8:
+ case V4L2_PIX_FMT_JPEG:
+ case V4L2_PIX_FMT_META:
+ size = plane_cfg[plane_idx].output_height *
+ plane_cfg[plane_idx].output_width;
+ break;
+ case V4L2_PIX_FMT_SBGGR10:
+ case V4L2_PIX_FMT_SGBRG10:
+ case V4L2_PIX_FMT_SGRBG10:
+ case V4L2_PIX_FMT_SRGGB10:
+ case V4L2_PIX_FMT_QBGGR10:
+ case V4L2_PIX_FMT_QGBRG10:
+ case V4L2_PIX_FMT_QGRBG10:
+ case V4L2_PIX_FMT_QRGGB10:
+ /* TODO: fix me */
+ size = plane_cfg[plane_idx].output_height *
+ plane_cfg[plane_idx].output_width;
+ break;
+ case V4L2_PIX_FMT_SBGGR12:
+ case V4L2_PIX_FMT_SGBRG12:
+ case V4L2_PIX_FMT_SGRBG12:
+ case V4L2_PIX_FMT_SRGGB12:
+ case V4L2_PIX_FMT_QBGGR12:
+ case V4L2_PIX_FMT_QGBRG12:
+ case V4L2_PIX_FMT_QGRBG12:
+ case V4L2_PIX_FMT_QRGGB12:
+ case V4L2_PIX_FMT_SBGGR14:
+ case V4L2_PIX_FMT_SGBRG14:
+ case V4L2_PIX_FMT_SGRBG14:
+ case V4L2_PIX_FMT_SRGGB14:
+ case V4L2_PIX_FMT_QBGGR14:
+ case V4L2_PIX_FMT_QGBRG14:
+ case V4L2_PIX_FMT_QGRBG14:
+ case V4L2_PIX_FMT_QRGGB14:
+ /* TODO: fix me */
+ size = plane_cfg[plane_idx].output_height *
+ plane_cfg[plane_idx].output_width;
+ break;
+ case V4L2_PIX_FMT_P16BGGR10:
+ case V4L2_PIX_FMT_P16GBRG10:
+ case V4L2_PIX_FMT_P16GRBG10:
+ case V4L2_PIX_FMT_P16RGGB10:
+ size = plane_cfg[plane_idx].output_height *
+ plane_cfg[plane_idx].output_width;
+ break;
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ if (plane_cfg[plane_idx].output_plane_format == Y_PLANE)
+ size = plane_cfg[plane_idx].output_height *
+ plane_cfg[plane_idx].output_width;
+ else
+ size = plane_cfg[plane_idx].output_height *
+ plane_cfg[plane_idx].output_width;
+ break;
+ case V4L2_PIX_FMT_NV14:
+ case V4L2_PIX_FMT_NV41:
+ if (plane_cfg[plane_idx].output_plane_format == Y_PLANE)
+ size = plane_cfg[plane_idx].output_height *
+ plane_cfg[plane_idx].output_width;
+ else
+ size = plane_cfg[plane_idx].output_height *
+ plane_cfg[plane_idx].output_width;
+ break;
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ size = plane_cfg[plane_idx].output_height *
+ plane_cfg[plane_idx].output_width;
+ break;
+ /*TD: Add more image format*/
+ default:
+ msm_isp_print_fourcc_error(__func__,
+ stream_info->output_format);
+ break;
+ }
+ return size;
+}
+
+void msm_isp_axi_reserve_wm(struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ int i, j;
+
+ for (i = 0; i < stream_info->num_planes; i++) {
+ for (j = 0; j < axi_data->hw_info->num_wm; j++) {
+ if (!axi_data->free_wm[j]) {
+ axi_data->free_wm[j] =
+ stream_info->stream_handle;
+ axi_data->wm_image_size[j] =
+ msm_isp_axi_get_plane_size(
+ stream_info, i);
+ axi_data->num_used_wm++;
+ break;
+ }
+ }
+ stream_info->wm[i] = j;
+ }
+}
+
+void msm_isp_axi_free_wm(struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ int i;
+
+ for (i = 0; i < stream_info->num_planes; i++) {
+ axi_data->free_wm[stream_info->wm[i]] = 0;
+ axi_data->num_used_wm--;
+ }
+ if (stream_info->stream_src <= IDEAL_RAW)
+ axi_data->num_pix_stream++;
+ else if (stream_info->stream_src < VFE_AXI_SRC_MAX)
+ axi_data->num_rdi_stream++;
+}
+
+void msm_isp_axi_reserve_comp_mask(
+ struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ int i;
+ uint8_t comp_mask = 0;
+
+ for (i = 0; i < stream_info->num_planes; i++)
+ comp_mask |= 1 << stream_info->wm[i];
+
+ for (i = 0; i < axi_data->hw_info->num_comp_mask; i++) {
+ if (!axi_data->composite_info[i].stream_handle) {
+ axi_data->composite_info[i].stream_handle =
+ stream_info->stream_handle;
+ axi_data->composite_info[i].
+ stream_composite_mask = comp_mask;
+ axi_data->num_used_composite_mask++;
+ break;
+ }
+ }
+ stream_info->comp_mask_index = i;
+}
+
+static void msm_isp_axi_free_comp_mask(struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ axi_data->composite_info[stream_info->comp_mask_index].
+ stream_composite_mask = 0;
+ axi_data->composite_info[stream_info->comp_mask_index].
+ stream_handle = 0;
+ axi_data->num_used_composite_mask--;
+}
+
+static int msm_isp_axi_get_bufq_handles(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ int rc = 0;
+
+ if (stream_info->stream_id & ISP_SCRATCH_BUF_BIT) {
+ stream_info->bufq_handle =
+ vfe_dev->buf_mgr->ops->get_bufq_handle(
+ vfe_dev->buf_mgr, stream_info->session_id,
+ stream_info->stream_id & ~ISP_SCRATCH_BUF_BIT);
+ if (stream_info->bufq_handle == 0) {
+ pr_err("%s: Stream 0x%x has no valid buffer queue\n",
+ __func__, (unsigned int)stream_info->stream_id);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ stream_info->bufq_scratch_handle =
+ vfe_dev->buf_mgr->ops->get_bufq_handle(
+ vfe_dev->buf_mgr, stream_info->session_id,
+ stream_info->stream_id);
+ if (stream_info->bufq_scratch_handle == 0) {
+ pr_err("%s: Stream 0x%x has no valid buffer queue\n",
+ __func__, (unsigned int)stream_info->stream_id);
+ rc = -EINVAL;
+ return rc;
+ }
+ } else {
+ stream_info->bufq_handle =
+ vfe_dev->buf_mgr->ops->get_bufq_handle(
+ vfe_dev->buf_mgr, stream_info->session_id,
+ stream_info->stream_id);
+ if (stream_info->bufq_handle == 0) {
+ pr_err("%s: Stream 0x%x has no valid buffer queue\n",
+ __func__, (unsigned int)stream_info->stream_id);
+ rc = -EINVAL;
+ return rc;
+ }
+ }
+ return rc;
+}
+
+int msm_isp_axi_check_stream_state(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd)
+{
+ int rc = 0, i;
+ unsigned long flags;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ struct msm_vfe_axi_stream *stream_info;
+ enum msm_vfe_axi_state valid_state =
+ (stream_cfg_cmd->cmd == START_STREAM) ? INACTIVE : ACTIVE;
+
+ if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM)
+ return -EINVAL;
+
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
+ MAX_NUM_STREAM) {
+ return -EINVAL;
+ }
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+ spin_lock_irqsave(&stream_info->lock, flags);
+ if (stream_info->state != valid_state) {
+ if ((stream_info->state == PAUSING ||
+ stream_info->state == PAUSED ||
+ stream_info->state == RESUME_PENDING ||
+ stream_info->state == RESUMING) &&
+ (stream_cfg_cmd->cmd == STOP_STREAM ||
+ stream_cfg_cmd->cmd == STOP_IMMEDIATELY)) {
+ stream_info->state = ACTIVE;
+ } else {
+ pr_err("%s: Invalid stream state: %d\n",
+ __func__, stream_info->state);
+ spin_unlock_irqrestore(
+ &stream_info->lock, flags);
+ rc = -EINVAL;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+
+ if (stream_cfg_cmd->cmd == START_STREAM) {
+ rc = msm_isp_axi_get_bufq_handles(vfe_dev, stream_info);
+ if (rc)
+ break;
+ }
+ }
+ return rc;
+}
+
+void msm_isp_update_framedrop_reg(struct vfe_device *vfe_dev,
+ uint8_t input_src)
+{
+ int i;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ struct msm_vfe_axi_stream *stream_info;
+
+ for (i = 0; i < MAX_NUM_STREAM; i++) {
+ stream_info = &axi_data->stream_info[i];
+ if (stream_info->state != ACTIVE)
+ continue;
+
+ if (stream_info->runtime_framedrop_update) {
+ stream_info->runtime_init_frame_drop--;
+ if (stream_info->runtime_init_frame_drop == 0) {
+ stream_info->runtime_framedrop_update = 0;
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_framedrop(vfe_dev, stream_info);
+ }
+ }
+ if (stream_info->stream_type == BURST_STREAM &&
+ ((1 << SRC_TO_INTF(stream_info->stream_src)) &
+ input_src)) {
+ if (stream_info->runtime_framedrop_update_burst) {
+ stream_info->runtime_framedrop_update_burst = 0;
+ stream_info->runtime_burst_frame_count =
+ stream_info->runtime_init_frame_drop +
+ (stream_info->runtime_num_burst_capture -
+ 1) *
+ (stream_info->framedrop_period + 1) + 1;
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_framedrop(vfe_dev, stream_info);
+ } else {
+ stream_info->runtime_burst_frame_count--;
+ if (stream_info->
+ runtime_burst_frame_count == 0) {
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_framedrop(vfe_dev, stream_info);
+ }
+ }
+ }
+ }
+}
+
+void msm_isp_reset_framedrop(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ stream_info->runtime_init_frame_drop = stream_info->init_frame_drop;
+ stream_info->runtime_burst_frame_count =
+ stream_info->burst_frame_count;
+ stream_info->runtime_num_burst_capture =
+ stream_info->num_burst_capture;
+ stream_info->runtime_framedrop_update = stream_info->framedrop_update;
+ vfe_dev->hw_info->vfe_ops.axi_ops.cfg_framedrop(vfe_dev, stream_info);
+}
+
+void msm_isp_notify(struct vfe_device *vfe_dev, uint32_t event_type,
+ enum msm_vfe_input_src frame_src, struct msm_isp_timestamp *ts)
+{
+ struct msm_isp32_event_data event_data;
+
+ memset(&event_data, 0, sizeof(event_data));
+ switch (event_type) {
+ case ISP_EVENT_SOF:
+ if ((frame_src == VFE_PIX_0) && (vfe_dev->isp_sof_debug < 5)) {
+ pr_err("%s: PIX0 frame id: %u\n", __func__,
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id);
+ vfe_dev->isp_sof_debug++;
+ }
+ vfe_dev->axi_data.src_info[frame_src].frame_id++;
+ if (vfe_dev->axi_data.src_info[frame_src].frame_id == 0)
+ vfe_dev->axi_data.src_info[frame_src].frame_id = 1;
+ ISP_DBG("%s: frame_src %d frame id: %u\n", __func__,
+ frame_src,
+ vfe_dev->axi_data.src_info[frame_src].frame_id);
+ break;
+ case ISP_EVENT_REG_UPDATE:
+ vfe_dev->axi_data.src_info[frame_src].last_updt_frm_id = 0;
+ break;
+ default:
+ break;
+ }
+
+ event_data.input_intf = frame_src;
+ event_data.frame_id = vfe_dev->axi_data.src_info[frame_src].frame_id;
+ event_data.timestamp = ts->event_time;
+ event_data.mono_timestamp = ts->buf_time;
+ msm_isp_send_event(vfe_dev, event_type | frame_src, &event_data);
+}
+
+void msm_isp_calculate_framedrop(
+ struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe32_axi_stream_request_cmd *stream_cfg_cmd)
+{
+ uint32_t framedrop_period = 0;
+ struct msm_vfe_axi_stream *stream_info = NULL;
+
+ if (HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle) < MAX_NUM_STREAM) {
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)];
+ } else {
+ pr_err("%s: Invalid stream handle", __func__);
+ return;
+ }
+ if (!stream_info) {
+ pr_err("%s: Stream info is NULL\n", __func__);
+ return;
+ }
+
+ framedrop_period = msm_isp_get_framedrop_period(
+ stream_cfg_cmd->frame_skip_pattern);
+ stream_info->frame_skip_pattern =
+ stream_cfg_cmd->frame_skip_pattern;
+ if (stream_cfg_cmd->frame_skip_pattern == SKIP_ALL)
+ stream_info->framedrop_pattern = 0x0;
+ else
+ stream_info->framedrop_pattern = 0x1;
+ stream_info->framedrop_period = framedrop_period - 1;
+
+ if (stream_cfg_cmd->init_frame_drop < framedrop_period) {
+ stream_info->framedrop_pattern <<=
+ stream_cfg_cmd->init_frame_drop;
+ stream_info->init_frame_drop = 0;
+ stream_info->framedrop_update = 0;
+ } else {
+ stream_info->init_frame_drop = stream_cfg_cmd->init_frame_drop;
+ stream_info->framedrop_update = 1;
+ }
+
+ if (stream_cfg_cmd->burst_count > 0) {
+ stream_info->stream_type = BURST_STREAM;
+ stream_info->num_burst_capture =
+ stream_cfg_cmd->burst_count;
+ stream_info->burst_frame_count =
+ stream_cfg_cmd->init_frame_drop +
+ (stream_cfg_cmd->burst_count - 1) *
+ framedrop_period + 1;
+ } else {
+ stream_info->stream_type = CONTINUOUS_STREAM;
+ stream_info->burst_frame_count = 0;
+ stream_info->num_burst_capture = 0;
+ }
+}
+
+static void msm_isp_calculate_bandwidth(
+ struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ int bpp = 0;
+
+ if (stream_info->stream_src < RDI_INTF_0) {
+ stream_info->bandwidth =
+ (axi_data->src_info[VFE_PIX_0].pixel_clock /
+ axi_data->src_info[VFE_PIX_0].width) *
+ stream_info->max_width;
+ stream_info->bandwidth = (unsigned long)stream_info->bandwidth *
+ stream_info->format_factor / ISP_Q2;
+ } else {
+ int rdi = SRC_TO_INTF(stream_info->stream_src);
+
+ bpp = msm_isp_get_bit_per_pixel(stream_info->output_format);
+ if (rdi < VFE_SRC_MAX)
+ stream_info->bandwidth =
+ (axi_data->src_info[rdi].pixel_clock / 8) * bpp;
+ else
+ pr_err("%s: Invalid rdi interface\n", __func__);
+ }
+}
+
+#ifdef CONFIG_MSM_AVTIMER
+void msm_isp_start_avtimer(void)
+{
+ avcs_core_open();
+ avcs_core_disable_power_collapse(1);
+}
+
+static inline void msm_isp_get_avtimer_ts(
+ struct msm_isp_timestamp *time_stamp)
+{
+ int rc = 0;
+ uint32_t avtimer_usec = 0;
+ uint64_t avtimer_tick = 0;
+
+ rc = avcs_core_query_timer(&avtimer_tick);
+ if (rc < 0) {
+ pr_err("%s: Error: Invalid AVTimer Tick, rc=%d\n",
+ __func__, rc);
+ /* In case of error return zero AVTimer Tick Value */
+ time_stamp->vt_time.tv_sec = 0;
+ time_stamp->vt_time.tv_usec = 0;
+ } else {
+ avtimer_usec = do_div(avtimer_tick, USEC_PER_SEC);
+ time_stamp->vt_time.tv_sec = (uint32_t)(avtimer_tick);
+ time_stamp->vt_time.tv_usec = avtimer_usec;
+ pr_debug("%s: AVTimer TS = %u:%u\n", __func__,
+ (uint32_t)(avtimer_tick), avtimer_usec);
+ }
+}
+#else
+void msm_isp_start_avtimer(void)
+{
+ pr_err("AV Timer is not supported\n");
+}
+
+inline void msm_isp_get_avtimer_ts(
+ struct msm_isp_timestamp *time_stamp)
+{
+ pr_err_ratelimited("%s: Error: AVTimer driver not available\n",
+ __func__);
+ time_stamp->vt_time.tv_sec = 0;
+ time_stamp->vt_time.tv_usec = 0;
+}
+#endif
+
+int msm_isp_request_axi_stream(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = 0, i;
+ uint32_t io_format = 0;
+ struct msm_vfe32_axi_stream_request_cmd *stream_cfg_cmd = arg;
+ struct msm_vfe_axi_stream *stream_info;
+
+ rc = msm_isp_axi_create_stream(
+ &vfe_dev->axi_data, stream_cfg_cmd);
+ if (rc) {
+ pr_err("%s: create stream failed\n", __func__);
+ return rc;
+ }
+
+ rc = msm_isp_validate_axi_request(
+ &vfe_dev->axi_data, stream_cfg_cmd);
+ if (rc) {
+ pr_err("%s: Request validation failed\n", __func__);
+ if (HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle) <
+ MAX_NUM_STREAM)
+ msm_isp_axi_destroy_stream(&vfe_dev->axi_data,
+ HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle));
+ return rc;
+ }
+ stream_info = &vfe_dev->axi_data.
+ stream_info[HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)];
+ if (!stream_info) {
+ pr_err("%s: can not find stream handle %x\n", __func__,
+ stream_cfg_cmd->axi_stream_handle);
+ return -EINVAL;
+ }
+
+ stream_info->memory_input = stream_cfg_cmd->memory_input;
+
+ msm_isp_axi_reserve_wm(&vfe_dev->axi_data, stream_info);
+
+ if (stream_info->stream_src < RDI_INTF_0) {
+ io_format = vfe_dev->axi_data.src_info[VFE_PIX_0].input_format;
+ if (stream_info->stream_src == CAMIF_RAW ||
+ stream_info->stream_src == IDEAL_RAW) {
+ if (stream_info->stream_src == CAMIF_RAW &&
+ io_format != stream_info->output_format)
+ pr_debug("%s: Overriding input format\n",
+ __func__);
+
+ io_format = stream_info->output_format;
+ }
+ rc = vfe_dev->hw_info->vfe_ops.axi_ops.cfg_io_format(
+ vfe_dev, stream_info->stream_src, io_format);
+ if (rc) {
+ pr_err("%s: cfg io format failed\n", __func__);
+ msm_isp_axi_free_wm(&vfe_dev->axi_data,
+ stream_info);
+ msm_isp_axi_destroy_stream(&vfe_dev->axi_data,
+ HANDLE_TO_IDX(
+ stream_cfg_cmd->axi_stream_handle));
+ return rc;
+ }
+ }
+
+ msm_isp_calculate_framedrop(&vfe_dev->axi_data, stream_cfg_cmd);
+ if (stream_cfg_cmd->vt_enable && !vfe_dev->vt_enable) {
+ vfe_dev->vt_enable = stream_cfg_cmd->vt_enable;
+ msm_isp_start_avtimer();
+ }
+ if (stream_info->num_planes > 1) {
+ msm_isp_axi_reserve_comp_mask(
+ &vfe_dev->axi_data, stream_info);
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_comp_mask(vfe_dev, stream_info);
+ } else {
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_wm_irq_mask(vfe_dev, stream_info);
+ }
+
+ for (i = 0; i < stream_info->num_planes; i++) {
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_wm_reg(vfe_dev, stream_info, i);
+
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_wm_xbar_reg(vfe_dev, stream_info, i);
+ }
+ return rc;
+}
+
+int msm_isp_release_axi_stream(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = 0, i;
+ struct msm_vfe_axi_stream_release_cmd *stream_release_cmd = arg;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_stream_cfg_cmd stream_cfg;
+
+
+ if (HANDLE_TO_IDX(stream_release_cmd->stream_handle) >=
+ MAX_NUM_STREAM) {
+ pr_err("%s: Invalid stream handle\n", __func__);
+ return -EINVAL;
+ }
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(stream_release_cmd->stream_handle)];
+ if (stream_info->state == AVAILABLE) {
+ pr_err("%s: Stream already released\n", __func__);
+ return -EINVAL;
+ } else if (stream_info->state != INACTIVE) {
+ stream_cfg.cmd = STOP_STREAM;
+ stream_cfg.num_streams = 1;
+ stream_cfg.stream_handle[0] = stream_release_cmd->stream_handle;
+ msm_isp_cfg_axi_stream(vfe_dev, (void *) &stream_cfg);
+ }
+
+ for (i = 0; i < stream_info->num_planes; i++) {
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ clear_wm_reg(vfe_dev, stream_info, i);
+
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ clear_wm_xbar_reg(vfe_dev, stream_info, i);
+ }
+
+ if (stream_info->num_planes > 1) {
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ clear_comp_mask(vfe_dev, stream_info);
+ msm_isp_axi_free_comp_mask(&vfe_dev->axi_data, stream_info);
+ } else {
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ clear_wm_irq_mask(vfe_dev, stream_info);
+ }
+
+ vfe_dev->hw_info->vfe_ops.axi_ops.clear_framedrop(vfe_dev, stream_info);
+ msm_isp_axi_free_wm(axi_data, stream_info);
+
+ msm_isp_axi_destroy_stream(&vfe_dev->axi_data,
+ HANDLE_TO_IDX(stream_release_cmd->stream_handle));
+
+ return rc;
+}
+
+static void msm_isp_axi_stream_enable_cfg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ int i;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+
+ if (stream_info->state == INACTIVE)
+ return;
+ for (i = 0; i < stream_info->num_planes; i++) {
+ if (stream_info->state == START_PENDING ||
+ stream_info->state == RESUME_PENDING) {
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ enable_wm(vfe_dev, stream_info->wm[i], 1);
+ } else {
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ enable_wm(vfe_dev, stream_info->wm[i], 0);
+ /* Issue a reg update for Raw Snapshot Case
+ * since we dont have reg update ack
+ */
+ if (stream_info->stream_src == CAMIF_RAW ||
+ stream_info->stream_src == IDEAL_RAW) {
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ reg_update(vfe_dev, (1 << VFE_PIX_0));
+ }
+ }
+ }
+
+ if (stream_info->state == START_PENDING)
+ axi_data->num_active_stream++;
+ else if (stream_info->state == STOP_PENDING)
+ axi_data->num_active_stream--;
+}
+
+void msm_isp_axi_stream_update(struct vfe_device *vfe_dev, uint8_t input_src)
+{
+ int i;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+
+ for (i = 0; i < MAX_NUM_STREAM; i++) {
+ if (axi_data->stream_info[i].state == START_PENDING ||
+ axi_data->stream_info[i].state ==
+ STOP_PENDING) {
+ if ((1 <<
+ SRC_TO_INTF(axi_data->stream_info[i].
+ stream_src)) &
+ input_src) {
+ msm_isp_axi_stream_enable_cfg(
+ vfe_dev, &axi_data->stream_info[i]);
+ axi_data->stream_info[i].state =
+ axi_data->stream_info[i].state ==
+ START_PENDING ? STARTING : STOPPING;
+ }
+ } else if (axi_data->stream_info[i].state == STARTING ||
+ axi_data->stream_info[i].state == STOPPING) {
+ if ((1 <<
+ SRC_TO_INTF(axi_data->stream_info[i].
+ stream_src)) &
+ input_src) {
+ axi_data->stream_info[i].state =
+ axi_data->stream_info[i].state == STARTING ?
+ ACTIVE : INACTIVE;
+ vfe_dev->axi_data.stream_update--;
+ }
+ }
+ }
+
+ if (vfe_dev->axi_data.pipeline_update == DISABLE_CAMIF ||
+ (vfe_dev->axi_data.pipeline_update ==
+ DISABLE_CAMIF_IMMEDIATELY)) {
+ vfe_dev->hw_info->vfe_ops.stats_ops.
+ enable_module(vfe_dev, 0xFF, 0);
+ vfe_dev->axi_data.pipeline_update = NO_UPDATE;
+ }
+
+ if (vfe_dev->axi_data.stream_update == 0)
+ complete(&vfe_dev->stream_config_complete);
+}
+
+static void msm_isp_reload_ping_pong_offset(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ int i, j;
+ uint32_t flag;
+ struct msm_isp_buffer *buf;
+
+ for (i = 0; i < 2; i++) {
+ buf = stream_info->buf[i];
+ if (!buf)
+ continue;
+ flag = i ? VFE_PONG_FLAG : VFE_PING_FLAG;
+ for (j = 0; j < stream_info->num_planes; j++) {
+ vfe_dev->hw_info->vfe_ops.axi_ops.update_ping_pong_addr(
+ vfe_dev, stream_info->wm[j], flag,
+ buf->mapped_info[j].paddr +
+ stream_info->plane_cfg[j].plane_addr_offset);
+ }
+ }
+}
+
+void msm_isp_axi_cfg_update(struct vfe_device *vfe_dev)
+{
+ int i, j;
+ uint32_t update_state;
+ unsigned long flags;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ struct msm_vfe_axi_stream *stream_info;
+
+ for (i = 0; i < MAX_NUM_STREAM; i++) {
+ stream_info = &axi_data->stream_info[i];
+ if (stream_info->stream_type == BURST_STREAM ||
+ stream_info->state == AVAILABLE)
+ continue;
+ spin_lock_irqsave(&stream_info->lock, flags);
+ if (stream_info->state == PAUSING) {
+ /*AXI Stopped, apply update*/
+ stream_info->state = PAUSED;
+ msm_isp_reload_ping_pong_offset(vfe_dev, stream_info);
+ for (j = 0; j < stream_info->num_planes; j++)
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_wm_reg(vfe_dev, stream_info, j);
+ /*Resume AXI*/
+ stream_info->state = RESUME_PENDING;
+ msm_isp_axi_stream_enable_cfg(
+ vfe_dev, &axi_data->stream_info[i]);
+ stream_info->state = RESUMING;
+ } else if (stream_info->state == RESUMING) {
+ stream_info->runtime_output_format =
+ stream_info->output_format;
+ stream_info->state = ACTIVE;
+ }
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ }
+
+ update_state = atomic_dec_return(&axi_data->axi_cfg_update);
+}
+
+static void msm_isp_cfg_pong_address(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ int i;
+ struct msm_isp_buffer *buf = stream_info->buf[0];
+
+ for (i = 0; i < stream_info->num_planes; i++)
+ vfe_dev->hw_info->vfe_ops.axi_ops.update_ping_pong_addr(
+ vfe_dev, stream_info->wm[i],
+ VFE_PONG_FLAG, buf->mapped_info[i].paddr +
+ stream_info->plane_cfg[i].plane_addr_offset);
+ stream_info->buf[1] = buf;
+}
+
+static void msm_isp_get_done_buf(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint32_t pingpong_status,
+ struct msm_isp_buffer **done_buf)
+{
+ uint32_t pingpong_bit = 0, i;
+
+ pingpong_bit = (~(pingpong_status >> stream_info->wm[0]) & 0x1);
+ for (i = 0; i < stream_info->num_planes; i++) {
+ if (pingpong_bit !=
+ (~(pingpong_status >> stream_info->wm[i]) & 0x1)) {
+ pr_debug("%s: Write master ping pong mismatch. Status: 0x%x\n",
+ __func__, pingpong_status);
+ }
+ }
+
+ *done_buf = stream_info->buf[pingpong_bit];
+
+ if (stream_info->controllable_output) {
+ stream_info->buf[pingpong_bit] = NULL;
+ stream_info->request_frm_num--;
+ }
+}
+
+static int msm_isp_cfg_ping_pong_address(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint32_t pingpong_status,
+ uint32_t pingpong_bit)
+{
+ int i, rc = -1;
+ struct msm_isp_buffer *buf = NULL;
+ uint32_t bufq_handle = 0, frame_id = 0;
+ uint32_t stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
+
+ if (stream_idx >= MAX_NUM_STREAM) {
+ pr_err("%s: Invalid stream_idx", __func__);
+ return rc;
+ }
+
+ if (stream_info->controllable_output && !stream_info->request_frm_num)
+ return 0;
+
+ frame_id = vfe_dev->axi_data.
+ src_info[SRC_TO_INTF(stream_info->stream_src)].frame_id;
+ if (frame_id && stream_info->frame_id &&
+ stream_info->frame_id == frame_id) {
+ /* This could happen if reg update ack is delayed */
+ pr_err("%s: Duplicate frame streamId:%d stream_fid:%d frame_id:%d\n",
+ __func__, stream_info->stream_id, stream_info->frame_id,
+ frame_id);
+ vfe_dev->error_info.stream_framedrop_count[stream_idx]++;
+ return rc;
+ }
+
+ bufq_handle = stream_info->bufq_handle;
+ if (SRC_TO_INTF(stream_info->stream_src) < VFE_SRC_MAX)
+ rc = vfe_dev->buf_mgr->ops->get_buf(vfe_dev->buf_mgr,
+ vfe_dev->pdev->id, bufq_handle,
+ MSM_ISP_INVALID_BUF_INDEX, &buf);
+ else {
+ pr_err("%s: Invalid stream index\n", __func__);
+ rc = -1;
+ }
+
+ if (rc < 0) {
+ vfe_dev->error_info.stream_framedrop_count[stream_idx]++;
+ return rc;
+ }
+
+ if (buf->num_planes != stream_info->num_planes) {
+ pr_err("%s: Invalid buffer\n", __func__);
+ rc = -EINVAL;
+ goto buf_error;
+ }
+
+ for (i = 0; i < stream_info->num_planes; i++)
+ vfe_dev->hw_info->vfe_ops.axi_ops.update_ping_pong_addr(
+ vfe_dev, stream_info->wm[i],
+ pingpong_status, buf->mapped_info[i].paddr +
+ stream_info->plane_cfg[i].plane_addr_offset);
+
+ stream_info->buf[pingpong_bit] = buf;
+
+ return 0;
+buf_error:
+ vfe_dev->buf_mgr->ops->put_buf(vfe_dev->buf_mgr,
+ buf->bufq_handle, buf->buf_idx);
+ return rc;
+}
+
+static void msm_isp_process_done_buf(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, struct msm_isp_buffer *buf,
+ struct msm_isp_timestamp *ts)
+{
+ int rc;
+ struct msm_isp32_event_data buf_event;
+ struct timeval *time_stamp;
+ uint32_t stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
+ uint32_t frame_id;
+ uint32_t buf_src;
+
+ memset(&buf_event, 0, sizeof(buf_event));
+
+ if (stream_idx >= MAX_NUM_STREAM) {
+ pr_err("%s: Invalid stream_idx", __func__);
+ return;
+ }
+
+ if (SRC_TO_INTF(stream_info->stream_src) < VFE_SRC_MAX)
+ frame_id = vfe_dev->axi_data.
+ src_info[SRC_TO_INTF(stream_info->stream_src)].frame_id;
+ else {
+ pr_err("%s: Invalid stream index, put buf back to vb2 queue\n",
+ __func__);
+ vfe_dev->buf_mgr->ops->put_buf(vfe_dev->buf_mgr,
+ buf->bufq_handle, buf->buf_idx);
+ return;
+ }
+
+ if (buf && ts) {
+ if (vfe_dev->vt_enable) {
+ msm_isp_get_avtimer_ts(ts);
+ time_stamp = &ts->vt_time;
+ } else
+ time_stamp = &ts->buf_time;
+
+ rc = vfe_dev->buf_mgr->ops->get_buf_src(vfe_dev->buf_mgr,
+ buf->bufq_handle, &buf_src);
+ if (stream_info->buf_divert && rc == 0 &&
+ buf_src != MSM_ISP_BUFFER_SRC_SCRATCH) {
+ rc = vfe_dev->buf_mgr->ops->buf_divert(vfe_dev->buf_mgr,
+ buf->bufq_handle, buf->buf_idx,
+ time_stamp, frame_id);
+ /* Buf divert return value represent whether the buf
+ * can be diverted. A positive return value means
+ * other ISP hardware is still processing the frame.
+ */
+ if (rc == 0) {
+ buf_event.input_intf =
+ SRC_TO_INTF(stream_info->stream_src);
+ buf_event.frame_id = frame_id;
+ buf_event.timestamp = *time_stamp;
+ buf_event.u.buf_done.session_id =
+ stream_info->session_id;
+ buf_event.u.buf_done.stream_id =
+ stream_info->stream_id;
+ buf_event.u.buf_done.handle =
+ stream_info->bufq_handle;
+ buf_event.u.buf_done.buf_idx = buf->buf_idx;
+ buf_event.u.buf_done.output_format =
+ stream_info->runtime_output_format;
+ msm_isp_send_event(vfe_dev,
+ ISP_EVENT_BUF_DIVERT + stream_idx,
+ &buf_event);
+ }
+ } else {
+ buf_event.input_intf =
+ SRC_TO_INTF(stream_info->stream_src);
+ buf_event.frame_id = frame_id;
+ buf_event.timestamp = ts->buf_time;
+ buf_event.u.buf_done.session_id =
+ stream_info->session_id;
+ buf_event.u.buf_done.stream_id =
+ stream_info->stream_id;
+ buf_event.u.buf_done.output_format =
+ stream_info->runtime_output_format;
+ msm_isp_send_event(vfe_dev,
+ ISP_EVENT_BUF_DONE, &buf_event);
+ vfe_dev->buf_mgr->ops->buf_done(vfe_dev->buf_mgr,
+ buf->bufq_handle, buf->buf_idx,
+ time_stamp, frame_id,
+ stream_info->runtime_output_format);
+ }
+ }
+}
+
+static enum msm_isp_camif_update_state
+ msm_isp_get_camif_update_state(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd)
+{
+ int i;
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ uint8_t pix_stream_cnt = 0, cur_pix_stream_cnt;
+
+ cur_pix_stream_cnt =
+ axi_data->src_info[VFE_PIX_0].pix_stream_count +
+ axi_data->src_info[VFE_PIX_0].raw_stream_count;
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ stream_info =
+ &axi_data->stream_info[
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+ if (stream_info->stream_src < RDI_INTF_0)
+ pix_stream_cnt++;
+ }
+
+ if ((pix_stream_cnt) &&
+ (axi_data->src_info[VFE_PIX_0].input_mux != EXTERNAL_READ)) {
+
+ if (cur_pix_stream_cnt == 0 && pix_stream_cnt &&
+ stream_cfg_cmd->cmd == START_STREAM)
+ return ENABLE_CAMIF;
+ else if (cur_pix_stream_cnt &&
+ (cur_pix_stream_cnt - pix_stream_cnt) == 0 &&
+ stream_cfg_cmd->cmd == STOP_STREAM)
+ return DISABLE_CAMIF;
+ else if (cur_pix_stream_cnt &&
+ (cur_pix_stream_cnt - pix_stream_cnt) == 0 &&
+ stream_cfg_cmd->cmd == STOP_IMMEDIATELY)
+ return DISABLE_CAMIF_IMMEDIATELY;
+ }
+
+ return NO_UPDATE;
+}
+
+static void msm_isp_update_camif_output_count(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd)
+{
+ int i;
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+
+ if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM)
+ return;
+
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
+ MAX_NUM_STREAM) {
+ return;
+ }
+ stream_info =
+ &axi_data->stream_info[
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+ if (stream_info->stream_src >= RDI_INTF_0)
+ continue;
+ if (stream_info->stream_src == PIX_ENCODER ||
+ stream_info->stream_src == PIX_VIEWFINDER ||
+ stream_info->stream_src == PIX_VIDEO ||
+ stream_info->stream_src == IDEAL_RAW) {
+ if (stream_cfg_cmd->cmd == START_STREAM)
+ vfe_dev->axi_data.src_info[VFE_PIX_0].
+ pix_stream_count++;
+ else
+ vfe_dev->axi_data.src_info[VFE_PIX_0].
+ pix_stream_count--;
+ } else if (stream_info->stream_src == CAMIF_RAW) {
+ if (stream_cfg_cmd->cmd == START_STREAM)
+ vfe_dev->axi_data.src_info[VFE_PIX_0].
+ raw_stream_count++;
+ else
+ vfe_dev->axi_data.src_info[VFE_PIX_0].
+ raw_stream_count--;
+ }
+ }
+}
+
+
+static void msm_isp_update_rdi_output_count(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd)
+{
+ int i;
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+
+ if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM)
+ return;
+
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])
+ > MAX_NUM_STREAM)
+ return;
+ stream_info =
+ &axi_data->stream_info[
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+ if (stream_info->stream_src < RDI_INTF_0)
+ continue;
+ if (stream_info->stream_src == RDI_INTF_0) {
+ if (stream_cfg_cmd->cmd == START_STREAM)
+ vfe_dev->axi_data.src_info[VFE_RAW_0].
+ raw_stream_count++;
+ else
+ vfe_dev->axi_data.src_info[VFE_RAW_0].
+ raw_stream_count--;
+ } else if (stream_info->stream_src == RDI_INTF_1) {
+ if (stream_cfg_cmd->cmd == START_STREAM)
+ vfe_dev->axi_data.src_info[VFE_RAW_1].
+ raw_stream_count++;
+ else
+ vfe_dev->axi_data.src_info[VFE_RAW_1].
+ raw_stream_count--;
+ } else if (stream_info->stream_src == RDI_INTF_2) {
+ if (stream_cfg_cmd->cmd == START_STREAM)
+ vfe_dev->axi_data.src_info[VFE_RAW_2].
+ raw_stream_count++;
+ else
+ vfe_dev->axi_data.src_info[VFE_RAW_2].
+ raw_stream_count--;
+ }
+
+ }
+}
+
+static uint8_t msm_isp_get_curr_stream_cnt(
+ struct vfe_device *vfe_dev)
+{
+ uint8_t curr_stream_cnt = 0;
+
+ curr_stream_cnt = vfe_dev->axi_data.src_info[VFE_RAW_0].
+ raw_stream_count +
+ vfe_dev->axi_data.src_info[VFE_RAW_1].
+ raw_stream_count +
+ vfe_dev->axi_data.src_info[VFE_RAW_2].
+ raw_stream_count +
+ vfe_dev->axi_data.src_info[VFE_PIX_0].
+ pix_stream_count +
+ vfe_dev->axi_data.src_info[VFE_PIX_0].
+ raw_stream_count;
+ return curr_stream_cnt;
+}
+
+/*Factor in Q2 format*/
+#define ISP_DEFAULT_FORMAT_FACTOR 6
+#define ISP_BUS_UTILIZATION_FACTOR 1536 /* 1.5 in Q10 format */
+static int msm_isp_update_stream_bandwidth(struct vfe_device *vfe_dev)
+{
+ int i, rc = 0;
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ uint64_t total_pix_bandwidth = 0, total_rdi_bandwidth = 0;
+ uint32_t num_pix_streams = 0;
+ uint32_t num_rdi_streams = 0;
+ uint32_t total_streams = 0;
+ uint64_t total_bandwidth = 0;
+ uint32_t bus_util_factor = ISP_BUS_UTILIZATION_FACTOR;
+
+ for (i = 0; i < MAX_NUM_STREAM; i++) {
+ stream_info = &axi_data->stream_info[i];
+ if (stream_info->state == ACTIVE ||
+ stream_info->state == START_PENDING) {
+ if (stream_info->stream_src < RDI_INTF_0) {
+ total_pix_bandwidth += stream_info->bandwidth;
+ num_pix_streams++;
+ } else {
+ total_rdi_bandwidth += stream_info->bandwidth;
+ num_rdi_streams++;
+ }
+ }
+ }
+ total_bandwidth = total_pix_bandwidth + total_rdi_bandwidth;
+ total_streams = num_pix_streams + num_rdi_streams;
+ if (vfe_dev->bus_util_factor)
+ bus_util_factor = vfe_dev->bus_util_factor;
+ ISP_DBG("%s: bus_util_factor = %u\n", __func__, bus_util_factor);
+
+ if (total_streams == 1)
+ rc = msm_isp_update_bandwidth(ISP_VFE0 + vfe_dev->pdev->id,
+ total_bandwidth,
+ (total_bandwidth * bus_util_factor / ISP_Q10));
+ else
+ rc = msm_isp_update_bandwidth(ISP_VFE0 + vfe_dev->pdev->id,
+ (total_bandwidth + MSM_ISP_MIN_AB), (total_bandwidth *
+ bus_util_factor / ISP_Q10 + MSM_ISP_MIN_IB));
+ if (rc < 0)
+ pr_err("%s: update failed\n", __func__);
+
+ return rc;
+}
+
+static int msm_isp_axi_wait_for_cfg_done(struct vfe_device *vfe_dev,
+ enum msm_isp_camif_update_state camif_update)
+{
+ int rc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vfe_dev->shared_data_lock, flags);
+ init_completion(&vfe_dev->stream_config_complete);
+ vfe_dev->axi_data.pipeline_update = camif_update;
+ spin_unlock_irqrestore(&vfe_dev->shared_data_lock, flags);
+ rc = wait_for_completion_timeout(
+ &vfe_dev->stream_config_complete,
+ msecs_to_jiffies(VFE_MAX_CFG_TIMEOUT));
+ if (rc == 0) {
+ pr_err("%s: wait timeout\n", __func__);
+ rc = -1;
+ } else {
+ rc = 0;
+ }
+ return rc;
+}
+
+static int msm_isp_init_stream_ping_pong_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ int rc = 0;
+ /*Set address for both PING & PONG register */
+ rc = msm_isp_cfg_ping_pong_address(vfe_dev,
+ stream_info, VFE_PING_FLAG, 0);
+ if (rc < 0) {
+ pr_err("%s: No free buffer for ping\n",
+ __func__);
+ return rc;
+ }
+
+ /* For burst stream of one capture, only one buffer
+ * is allocated. Duplicate ping buffer address to pong
+ * buffer to ensure hardware write to a valid address
+ */
+ if (stream_info->stream_type == BURST_STREAM &&
+ stream_info->runtime_num_burst_capture <= 1) {
+ msm_isp_cfg_pong_address(vfe_dev, stream_info);
+ } else {
+ rc = msm_isp_cfg_ping_pong_address(vfe_dev,
+ stream_info, VFE_PONG_FLAG, 1);
+ if (rc < 0) {
+ pr_err("%s: No free buffer for pong\n",
+ __func__);
+ return rc;
+ }
+ }
+ return rc;
+}
+
+static void msm_isp_deinit_stream_ping_pong_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ struct msm_isp_buffer *buf;
+
+ buf = stream_info->buf[i];
+ if (buf) {
+ vfe_dev->buf_mgr->ops->put_buf(vfe_dev->buf_mgr,
+ buf->bufq_handle, buf->buf_idx);
+ }
+ }
+}
+
+static void msm_isp_get_stream_wm_mask(
+ struct msm_vfe_axi_stream *stream_info,
+ uint32_t *wm_reload_mask)
+{
+ int i;
+
+ for (i = 0; i < stream_info->num_planes; i++)
+ *wm_reload_mask |= (1 << stream_info->wm[i]);
+}
+
+int msm_isp_axi_halt(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_halt_cmd *halt_cmd)
+{
+ int rc = 0;
+
+ if (halt_cmd->overflow_detected) {
+ /*Store current IRQ mask*/
+ if (vfe_dev->error_info.overflow_recover_irq_mask0 == 0) {
+ vfe_dev->hw_info->vfe_ops.core_ops.get_irq_mask(vfe_dev,
+ &vfe_dev->error_info.overflow_recover_irq_mask0,
+ &vfe_dev->error_info.overflow_recover_irq_mask1);
+ }
+ atomic_set(&vfe_dev->error_info.overflow_state,
+ OVERFLOW_DETECTED);
+ }
+
+ rc = vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 1);
+
+ if (halt_cmd->stop_camif) {
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ update_camif_state(vfe_dev, DISABLE_CAMIF_IMMEDIATELY);
+ }
+
+ return rc;
+}
+
+int msm_isp_axi_reset(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_reset_cmd *reset_cmd)
+{
+ int rc = 0, i, j;
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ struct msm_isp_bufq *bufq = NULL;
+ struct msm_isp_timestamp timestamp;
+
+ if (!reset_cmd) {
+ pr_err("%s: NULL pointer reset cmd %pK\n", __func__, reset_cmd);
+ rc = -1;
+ return rc;
+ }
+
+ msm_isp_get_timestamp(×tamp, vfe_dev);
+ rc = vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev,
+ 0, reset_cmd->blocking);
+
+ for (i = 0, j = 0; j < axi_data->num_active_stream &&
+ i < MAX_NUM_STREAM; i++, j++) {
+ stream_info = &axi_data->stream_info[i];
+ if (stream_info->stream_src >= VFE_AXI_SRC_MAX) {
+ rc = -1;
+ pr_err("%s invalid stream src = %d\n", __func__,
+ stream_info->stream_src);
+ break;
+ }
+ if (stream_info->state != ACTIVE) {
+ j--;
+ continue;
+ }
+
+ bufq = vfe_dev->buf_mgr->ops->get_bufq(vfe_dev->buf_mgr,
+ stream_info->bufq_handle);
+ if (!bufq) {
+ pr_err("%s: bufq null %pK by handle %x\n", __func__,
+ bufq, stream_info->bufq_handle);
+ continue;
+ }
+
+ if (bufq->buf_type != ISP_SHARE_BUF) {
+ msm_isp_deinit_stream_ping_pong_reg(vfe_dev,
+ stream_info);
+ } else {
+ vfe_dev->buf_mgr->ops->flush_buf(vfe_dev->buf_mgr,
+ stream_info->bufq_handle,
+ MSM_ISP_BUFFER_FLUSH_ALL,
+ ×tamp.buf_time,
+ reset_cmd->frame_id);
+ }
+ axi_data->src_info[SRC_TO_INTF(stream_info->stream_src)].
+ frame_id = reset_cmd->frame_id;
+ msm_isp_reset_burst_count_and_frame_drop(vfe_dev, stream_info);
+ }
+
+ if (rc < 0)
+ pr_err("%s Error! reset hw Timed out\n", __func__);
+
+ return rc;
+}
+
+int msm_isp_axi_restart(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_restart_cmd *restart_cmd)
+{
+ int rc = 0, i, j;
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ uint32_t wm_reload_mask = 0x0;
+
+ for (i = 0, j = 0; j < axi_data->num_active_stream &&
+ i < MAX_NUM_STREAM; i++, j++) {
+ stream_info = &axi_data->stream_info[i];
+ if (stream_info->state != ACTIVE) {
+ j--;
+ continue;
+ }
+ msm_isp_get_stream_wm_mask(stream_info, &wm_reload_mask);
+ msm_isp_init_stream_ping_pong_reg(vfe_dev, stream_info);
+ }
+
+ vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev, wm_reload_mask);
+ rc = vfe_dev->hw_info->vfe_ops.axi_ops.restart(vfe_dev, 0,
+ restart_cmd->enable_camif);
+ if (rc < 0)
+ pr_err("%s Error restarting HW\n", __func__);
+
+ return rc;
+}
+
+static int msm_isp_axi_update_cgc_override(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd,
+ uint8_t cgc_override)
+{
+ int i = 0, j = 0;
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+
+ if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM)
+ return -EINVAL;
+
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
+ MAX_NUM_STREAM) {
+ return -EINVAL;
+ }
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+ for (j = 0; j < stream_info->num_planes; j++) {
+ if (vfe_dev->hw_info->vfe_ops.axi_ops.
+ update_cgc_override)
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ update_cgc_override(vfe_dev,
+ stream_info->wm[j], cgc_override);
+ }
+ }
+ return 0;
+}
+
+static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd,
+ enum msm_isp_camif_update_state camif_update)
+{
+ int i, rc = 0;
+ uint8_t src_state, wait_for_complete = 0;
+ uint32_t wm_reload_mask = 0x0;
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ uint8_t init_frm_drop = 0;
+
+ if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM)
+ return -EINVAL;
+
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
+ MAX_NUM_STREAM) {
+ return -EINVAL;
+ }
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+ if (SRC_TO_INTF(stream_info->stream_src) < VFE_SRC_MAX)
+ src_state = axi_data->src_info[
+ SRC_TO_INTF(stream_info->stream_src)].active;
+ else {
+ pr_err("%s: invalid src info index\n", __func__);
+ return -EINVAL;
+ }
+
+ msm_isp_calculate_bandwidth(axi_data, stream_info);
+ msm_isp_reset_framedrop(vfe_dev, stream_info);
+ init_frm_drop = stream_info->init_frame_drop;
+ msm_isp_get_stream_wm_mask(stream_info, &wm_reload_mask);
+ rc = msm_isp_init_stream_ping_pong_reg(vfe_dev, stream_info);
+ if (rc < 0) {
+ pr_err("%s: No buffer for stream %d\n",
+ __func__,
+ HANDLE_TO_IDX(
+ stream_cfg_cmd->stream_handle[i]));
+ return rc;
+ }
+
+ stream_info->state = START_PENDING;
+ if (src_state) {
+ wait_for_complete = 1;
+ } else {
+ if (vfe_dev->dump_reg)
+ msm_camera_io_dump_2(vfe_dev->vfe_base, 0x900);
+
+ /*Configure AXI start bits to start immediately*/
+ msm_isp_axi_stream_enable_cfg(vfe_dev, stream_info);
+ stream_info->state = ACTIVE;
+ }
+ if (SRC_TO_INTF(stream_info->stream_src) != VFE_PIX_0 &&
+ stream_info->stream_src < VFE_AXI_SRC_MAX) {
+ vfe_dev->axi_data.src_info[SRC_TO_INTF(
+ stream_info->stream_src)].frame_id =
+ init_frm_drop;
+ }
+ }
+ msm_isp_update_stream_bandwidth(vfe_dev);
+ vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev, wm_reload_mask);
+ vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev, 0xF);
+ msm_isp_update_camif_output_count(vfe_dev, stream_cfg_cmd);
+ msm_isp_update_rdi_output_count(vfe_dev, stream_cfg_cmd);
+ if (camif_update == ENABLE_CAMIF) {
+ atomic_set(&vfe_dev->error_info.overflow_state,
+ NO_OVERFLOW);
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id = 0;
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ update_camif_state(vfe_dev, camif_update);
+ }
+
+ if (wait_for_complete) {
+ vfe_dev->axi_data.stream_update = stream_cfg_cmd->num_streams;
+ rc = msm_isp_axi_wait_for_cfg_done(vfe_dev, camif_update);
+ }
+
+ return rc;
+}
+
+static int msm_isp_stop_axi_stream(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd,
+ enum msm_isp_camif_update_state camif_update)
+{
+ int i, rc = 0;
+ uint8_t wait_for_complete_for_this_stream = 0, cur_stream_cnt = 0;
+ uint8_t wait_for_complete = 0;
+ struct msm_vfe_axi_stream *stream_info = NULL;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ int ext_read =
+ axi_data->src_info[VFE_PIX_0].input_mux == EXTERNAL_READ;
+
+ if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM ||
+ stream_cfg_cmd->num_streams == 0)
+ return -EINVAL;
+
+ msm_isp_update_camif_output_count(vfe_dev, stream_cfg_cmd);
+ msm_isp_update_rdi_output_count(vfe_dev, stream_cfg_cmd);
+ cur_stream_cnt = msm_isp_get_curr_stream_cnt(vfe_dev);
+
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
+ MAX_NUM_STREAM) {
+ return -EINVAL;
+ }
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+ wait_for_complete_for_this_stream = 0;
+ stream_info->state = STOP_PENDING;
+ if (stream_info->stream_src == CAMIF_RAW ||
+ stream_info->stream_src == IDEAL_RAW) {
+ /* We dont get reg update IRQ for raw snapshot
+ * so frame skip cant be ocnfigured
+ */
+ if ((camif_update != DISABLE_CAMIF_IMMEDIATELY) &&
+ (!ext_read))
+ wait_for_complete_for_this_stream = 1;
+ } else if (stream_info->stream_type == BURST_STREAM &&
+ stream_info->runtime_num_burst_capture == 0) {
+ /* Configure AXI writemasters to stop immediately
+ * since for burst case, write masters already skip
+ * all frames.
+ */
+ if (stream_info->stream_src == RDI_INTF_0 ||
+ stream_info->stream_src == RDI_INTF_1 ||
+ stream_info->stream_src == RDI_INTF_2)
+ wait_for_complete_for_this_stream = 1;
+ } else {
+ if ((camif_update != DISABLE_CAMIF_IMMEDIATELY) &&
+ (!ext_read) &&
+ !(stream_info->stream_src == RDI_INTF_0 ||
+ stream_info->stream_src == RDI_INTF_1 ||
+ stream_info->stream_src == RDI_INTF_2))
+ wait_for_complete_for_this_stream = 1;
+ }
+ if (!wait_for_complete_for_this_stream) {
+ msm_isp_axi_stream_enable_cfg(vfe_dev, stream_info);
+ stream_info->state = INACTIVE;
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ reg_update(vfe_dev, 0xF);
+ }
+ wait_for_complete |= wait_for_complete_for_this_stream;
+ }
+ if (wait_for_complete) {
+ vfe_dev->axi_data.stream_update = stream_cfg_cmd->num_streams;
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ reg_update(vfe_dev, 0xF);
+ rc = msm_isp_axi_wait_for_cfg_done(vfe_dev, camif_update);
+ if (rc < 0) {
+ pr_err("%s: wait for config done failed\n", __func__);
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(
+ stream_cfg_cmd->stream_handle[i])];
+ stream_info->state = STOP_PENDING;
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ reg_update(vfe_dev, 0xF);
+ msm_isp_axi_stream_enable_cfg(
+ vfe_dev, stream_info);
+ stream_info->state = INACTIVE;
+ }
+ }
+ }
+ if (camif_update == DISABLE_CAMIF) {
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ update_camif_state(vfe_dev, DISABLE_CAMIF);
+ } else if ((camif_update == DISABLE_CAMIF_IMMEDIATELY) ||
+ (ext_read)) {
+ /*during stop immediately, stop output then stop input*/
+ if (!ext_read)
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ update_camif_state(vfe_dev,
+ DISABLE_CAMIF_IMMEDIATELY);
+ }
+ if (cur_stream_cnt == 0) {
+ vfe_dev->ignore_error = 1;
+ vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 1);
+ vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, 1, 1);
+ vfe_dev->hw_info->vfe_ops.core_ops.init_hw_reg(vfe_dev);
+ vfe_dev->ignore_error = 0;
+ }
+ msm_isp_update_stream_bandwidth(vfe_dev);
+
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+ msm_isp_deinit_stream_ping_pong_reg(vfe_dev, stream_info);
+ }
+
+ return rc;
+}
+
+
+int msm_isp_cfg_axi_stream(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = 0;
+ struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd = arg;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ enum msm_isp_camif_update_state camif_update;
+
+ rc = msm_isp_axi_check_stream_state(vfe_dev, stream_cfg_cmd);
+ if (rc < 0) {
+ pr_err("%s: Invalid stream state\n", __func__);
+ return rc;
+ }
+
+ if (axi_data->num_active_stream == 0) {
+ /*Configure UB*/
+ vfe_dev->hw_info->vfe_ops.axi_ops.cfg_ub(vfe_dev);
+ }
+ camif_update = msm_isp_get_camif_update_state(vfe_dev, stream_cfg_cmd);
+
+ if (stream_cfg_cmd->cmd == START_STREAM) {
+ msm_isp_axi_update_cgc_override(vfe_dev, stream_cfg_cmd, 1);
+
+ rc = msm_isp_start_axi_stream(
+ vfe_dev, stream_cfg_cmd, camif_update);
+ } else {
+ rc = msm_isp_stop_axi_stream(
+ vfe_dev, stream_cfg_cmd, camif_update);
+
+ msm_isp_axi_update_cgc_override(vfe_dev, stream_cfg_cmd, 0);
+ }
+
+ if (rc < 0)
+ pr_err("%s: start/stop stream failed\n", __func__);
+ return rc;
+}
+
+static int msm_isp_request_frame(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint32_t request_frm_num)
+{
+ struct msm_vfe32_axi_stream_request_cmd stream_cfg_cmd;
+ int rc = 0;
+ uint32_t pingpong_status, pingpong_bit, wm_reload_mask = 0x0;
+
+ if (!stream_info->controllable_output)
+ return 0;
+
+ if (!request_frm_num) {
+ pr_err("%s: Invalid frame request.\n", __func__);
+ return -EINVAL;
+ }
+
+ stream_info->request_frm_num += request_frm_num;
+
+ stream_cfg_cmd.axi_stream_handle = stream_info->stream_handle;
+ stream_cfg_cmd.frame_skip_pattern = NO_SKIP;
+ stream_cfg_cmd.init_frame_drop = 0;
+ stream_cfg_cmd.burst_count = stream_info->request_frm_num;
+ msm_isp_calculate_framedrop(&vfe_dev->axi_data, &stream_cfg_cmd);
+ msm_isp_reset_framedrop(vfe_dev, stream_info);
+
+ if (stream_info->request_frm_num != request_frm_num) {
+ pingpong_status =
+ vfe_dev->hw_info->vfe_ops.axi_ops.get_pingpong_status(
+ vfe_dev);
+ pingpong_bit = (~(pingpong_status >> stream_info->wm[0]) & 0x1);
+
+ if (!stream_info->buf[pingpong_bit]) {
+ rc = msm_isp_cfg_ping_pong_address(vfe_dev, stream_info,
+ pingpong_status, pingpong_bit);
+ if (rc) {
+ pr_err("%s:%d fail to set ping pong address\n",
+ __func__, __LINE__);
+ return rc;
+ }
+ }
+
+ if (!stream_info->buf[!pingpong_bit] && request_frm_num > 1) {
+ rc = msm_isp_cfg_ping_pong_address(vfe_dev, stream_info,
+ ~pingpong_status, !pingpong_bit);
+ if (rc) {
+ pr_err("%s:%d fail to set ping pong address\n",
+ __func__, __LINE__);
+ return rc;
+ }
+ }
+ } else {
+ if (!stream_info->buf[0]) {
+ rc = msm_isp_cfg_ping_pong_address(vfe_dev, stream_info,
+ VFE_PING_FLAG, 0);
+ if (rc) {
+ pr_err("%s:%d fail to set ping pong address\n",
+ __func__, __LINE__);
+ return rc;
+ }
+ }
+
+ if (!stream_info->buf[1] && request_frm_num > 1) {
+ rc = msm_isp_cfg_ping_pong_address(vfe_dev, stream_info,
+ VFE_PONG_FLAG, 1);
+ if (rc) {
+ pr_err("%s:%d fail to set ping pong address\n",
+ __func__, __LINE__);
+ return rc;
+ }
+ }
+
+ msm_isp_get_stream_wm_mask(stream_info, &wm_reload_mask);
+ vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
+ wm_reload_mask);
+ }
+
+ return rc;
+}
+
+int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = 0, i, j;
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ struct msm_vfe_axi_stream_update_cmd *update_cmd = arg;
+ struct msm_vfe_axi_stream_cfg_update_info *update_info;
+ uint32_t frame_id;
+ struct msm_isp_timestamp timestamp;
+
+ if (update_cmd->update_type == UPDATE_STREAM_AXI_CONFIG &&
+ atomic_read(&axi_data->axi_cfg_update)) {
+ pr_err("%s: AXI stream config updating\n", __func__);
+ return -EBUSY;
+ }
+
+ /*num_stream is uint32 and update_info[] bound by MAX_NUM_STREAM*/
+ if (update_cmd->num_streams > MAX_NUM_STREAM)
+ return -EINVAL;
+
+ for (i = 0; i < update_cmd->num_streams; i++) {
+ update_info = &update_cmd->update_info[i];
+ /*check array reference bounds*/
+ if (HANDLE_TO_IDX(update_info->stream_handle) >=
+ MAX_NUM_STREAM) {
+ return -EINVAL;
+ }
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(update_info->stream_handle)];
+ if (stream_info->state != ACTIVE &&
+ stream_info->state != INACTIVE) {
+ pr_err("%s: Invalid stream state\n", __func__);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < update_cmd->num_streams; i++) {
+ update_info = &update_cmd->update_info[i];
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(update_info->stream_handle)];
+
+ switch (update_cmd->update_type) {
+ case ENABLE_STREAM_BUF_DIVERT:
+ stream_info->buf_divert = 1;
+ break;
+ case DISABLE_STREAM_BUF_DIVERT:
+ msm_isp_get_timestamp(×tamp, vfe_dev);
+ stream_info->buf_divert = 0;
+ frame_id = vfe_dev->axi_data.
+ src_info[SRC_TO_INTF(
+ stream_info->stream_src)].
+ frame_id;
+ vfe_dev->buf_mgr->ops->flush_buf(vfe_dev->buf_mgr,
+ stream_info->bufq_handle,
+ MSM_ISP_BUFFER_FLUSH_DIVERTED,
+ ×tamp.buf_time, frame_id);
+ break;
+ case UPDATE_STREAM_FRAMEDROP_PATTERN: {
+ uint32_t framedrop_period =
+ msm_isp_get_framedrop_period(
+ update_info->skip_pattern);
+ if (update_info->skip_pattern == SKIP_ALL)
+ stream_info->framedrop_pattern = 0x0;
+ else
+ stream_info->framedrop_pattern = 0x1;
+ stream_info->framedrop_period = framedrop_period - 1;
+ if (stream_info->stream_type == BURST_STREAM) {
+ stream_info->runtime_framedrop_update_burst = 1;
+ } else {
+ stream_info->runtime_init_frame_drop = 0;
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_framedrop(vfe_dev, stream_info);
+ }
+ break;
+ }
+ case UPDATE_STREAM_AXI_CONFIG: {
+ for (j = 0; j < stream_info->num_planes; j++) {
+ stream_info->plane_cfg[j] =
+ update_info->plane_cfg[j];
+ }
+ stream_info->output_format = update_info->output_format;
+ if (stream_info->state == ACTIVE) {
+ stream_info->state = PAUSE_PENDING;
+ msm_isp_axi_stream_enable_cfg(
+ vfe_dev, stream_info);
+ stream_info->state = PAUSING;
+ atomic_set(&axi_data->axi_cfg_update,
+ UPDATE_REQUESTED);
+ } else {
+ for (j = 0; j < stream_info->num_planes; j++)
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_wm_reg(vfe_dev, stream_info, j);
+ stream_info->runtime_output_format =
+ stream_info->output_format;
+ }
+ break;
+ }
+ case UPDATE_STREAM_REQUEST_FRAMES: {
+ rc = msm_isp_request_frame(vfe_dev, stream_info,
+ update_info->frame_id);
+ if (rc)
+ pr_err("%s failed to request frame!\n",
+ __func__);
+ break;
+ }
+ default:
+ pr_err("%s: Invalid update type\n", __func__);
+ return -EINVAL;
+ }
+ }
+ return rc;
+}
+
+void msm_isp_process_axi_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts)
+{
+ int i, rc = 0;
+ struct msm_isp_buffer *done_buf = NULL;
+ uint32_t comp_mask = 0, wm_mask = 0;
+ uint32_t pingpong_status, stream_idx;
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_composite_info *comp_info;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ uint32_t pingpong_bit = 0, frame_id = 0;
+
+ comp_mask = vfe_dev->hw_info->vfe_ops.axi_ops.
+ get_comp_mask(irq_status0, irq_status1);
+ wm_mask = vfe_dev->hw_info->vfe_ops.axi_ops.
+ get_wm_mask(irq_status0, irq_status1);
+ if (!(comp_mask || wm_mask))
+ return;
+
+ ISP_DBG("%s: status: 0x%x\n", __func__, irq_status0);
+ pingpong_status =
+ vfe_dev->hw_info->vfe_ops.axi_ops.get_pingpong_status(vfe_dev);
+ for (i = 0; i < axi_data->hw_info->num_comp_mask; i++) {
+ rc = 0;
+ comp_info = &axi_data->composite_info[i];
+ if (comp_mask & (1 << i)) {
+ stream_idx = HANDLE_TO_IDX(comp_info->stream_handle);
+ if ((!comp_info->stream_handle) ||
+ (stream_idx >= MAX_NUM_STREAM)) {
+ pr_err("%s: Invalid handle for composite irq\n",
+ __func__);
+ } else {
+ stream_idx =
+ HANDLE_TO_IDX(comp_info->stream_handle);
+ stream_info =
+ &axi_data->stream_info[stream_idx];
+
+ pingpong_bit = (~(pingpong_status >>
+ stream_info->wm[0]) & 0x1);
+
+ if (stream_info->stream_type == BURST_STREAM)
+ stream_info->
+ runtime_num_burst_capture--;
+
+ msm_isp_get_done_buf(vfe_dev, stream_info,
+ pingpong_status, &done_buf);
+ if (stream_info->stream_type ==
+ CONTINUOUS_STREAM ||
+ stream_info->
+ runtime_num_burst_capture > 1) {
+ rc = msm_isp_cfg_ping_pong_address(
+ vfe_dev, stream_info,
+ pingpong_status,
+ pingpong_bit);
+ }
+ frame_id = vfe_dev->axi_data.
+ src_info[SRC_TO_INTF(
+ stream_info->stream_src)].
+ frame_id;
+ stream_info->frame_id = frame_id;
+ ISP_DBG("%s: stream id:%d frame id:%d\n",
+ __func__, stream_info->stream_id,
+ stream_info->frame_id);
+ if (done_buf && !rc)
+ msm_isp_process_done_buf(vfe_dev,
+ stream_info, done_buf, ts);
+ }
+ }
+ wm_mask &= ~(comp_info->stream_composite_mask);
+ }
+
+ for (i = 0; i < axi_data->hw_info->num_wm; i++) {
+ if (wm_mask & (1 << i)) {
+ stream_idx = HANDLE_TO_IDX(axi_data->free_wm[i]);
+ if ((!axi_data->free_wm[i]) ||
+ (stream_idx >= MAX_NUM_STREAM)) {
+ pr_err("%s: Invalid handle for wm irq\n",
+ __func__);
+ continue;
+ }
+ stream_info = &axi_data->stream_info[stream_idx];
+
+ pingpong_bit = (~(pingpong_status >>
+ stream_info->wm[0]) & 0x1);
+
+ if (stream_info->stream_type == BURST_STREAM)
+ stream_info->runtime_num_burst_capture--;
+
+ msm_isp_get_done_buf(vfe_dev, stream_info,
+ pingpong_status, &done_buf);
+ if (stream_info->stream_type == CONTINUOUS_STREAM ||
+ stream_info->runtime_num_burst_capture > 1) {
+ rc = msm_isp_cfg_ping_pong_address(vfe_dev,
+ stream_info, pingpong_status,
+ pingpong_bit);
+ }
+ stream_info->frame_id = frame_id;
+ ISP_DBG("%s: stream id:%d frame id:%d\n",
+ __func__, stream_info->stream_id,
+ stream_info->frame_id);
+ if (done_buf && !rc)
+ msm_isp_process_done_buf(vfe_dev,
+ stream_info, done_buf, ts);
+ }
+ }
+}
+int msm_isp_user_buf_done(struct vfe_device *vfe_dev,
+ struct msm_isp32_event_data *buf_cmd)
+{
+ int rc = 0;
+ struct msm_isp32_event_data buf_event;
+
+ memset(&buf_event, 0, sizeof(buf_event));
+ buf_event.input_intf = buf_cmd->input_intf;
+ buf_event.frame_id = buf_cmd->frame_id;
+ buf_event.timestamp = buf_cmd->timestamp;
+ buf_event.u.buf_done.session_id =
+ buf_cmd->u.buf_done.session_id;
+ buf_event.u.buf_done.stream_id =
+ buf_cmd->u.buf_done.stream_id;
+ buf_event.u.buf_done.output_format =
+ buf_cmd->u.buf_done.output_format;
+ buf_event.u.buf_done.buf_idx =
+ buf_cmd->u.buf_done.buf_idx;
+ buf_event.u.buf_done.handle =
+ buf_cmd->u.buf_done.handle;
+
+ vfe_dev->buf_mgr->ops->buf_done(vfe_dev->buf_mgr,
+ buf_event.u.buf_done.handle,
+ buf_event.u.buf_done.buf_idx,
+ &buf_event.timestamp, buf_event.frame_id,
+ buf_event.u.buf_done.output_format);
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util_32.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util_32.h
new file mode 100644
index 0000000..5d89a84
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util_32.h
@@ -0,0 +1,72 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MSM_ISP_AXI_UTIL_H__
+#define __MSM_ISP_AXI_UTIL_H__
+
+#include "msm_isp_32.h"
+
+int msm_isp_axi_create_stream(
+ struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe32_axi_stream_request_cmd *stream_cfg_cmd);
+
+void msm_isp_axi_destroy_stream(
+ struct msm_vfe_axi_shared_data *axi_data, int stream_idx);
+
+int msm_isp_validate_axi_request(
+ struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe32_axi_stream_request_cmd *stream_cfg_cmd);
+
+void msm_isp_axi_reserve_wm(
+ struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream *stream_info);
+
+void msm_isp_axi_reserve_comp_mask(
+ struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream *stream_info);
+
+int msm_isp_axi_check_stream_state(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd);
+
+void msm_isp_calculate_framedrop(
+ struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe32_axi_stream_request_cmd *stream_cfg_cmd);
+void msm_isp_reset_framedrop(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info);
+
+void msm_isp_start_avtimer(void);
+int msm_isp_request_axi_stream(struct vfe_device *vfe_dev, void *arg);
+int msm_isp_cfg_axi_stream(struct vfe_device *vfe_dev, void *arg);
+int msm_isp_release_axi_stream(struct vfe_device *vfe_dev, void *arg);
+int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg);
+void msm_isp_axi_cfg_update(struct vfe_device *vfe_dev);
+int msm_isp_axi_halt(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_halt_cmd *halt_cmd);
+int msm_isp_axi_reset(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_reset_cmd *reset_cmd);
+int msm_isp_axi_restart(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_restart_cmd *restart_cmd);
+int msm_isp_user_buf_done(struct vfe_device *vfe_dev,
+ struct msm_isp32_event_data *buf_cmd);
+void msm_isp_axi_stream_update(struct vfe_device *vfe_dev,
+ uint8_t input_src);
+
+void msm_isp_update_framedrop_reg(struct vfe_device *vfe_dev,
+ uint8_t input_src);
+void msm_isp_notify(struct vfe_device *vfe_dev, uint32_t event_type,
+ enum msm_vfe_input_src frame_src, struct msm_isp_timestamp *ts);
+void msm_isp_process_axi_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts);
+void msm_isp_axi_free_wm(struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream *stream_info);
+#endif /* __MSM_ISP_AXI_UTIL_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util_32.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util_32.c
new file mode 100644
index 0000000..8273298
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util_32.c
@@ -0,0 +1,709 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/io.h>
+#include <linux/atomic.h>
+#include <media/v4l2-subdev.h>
+#include <media/msmb_isp.h>
+#include "msm_isp_util_32.h"
+#include "msm_isp_stats_util_32.h"
+
+static int msm_isp_stats_cfg_ping_pong_address(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info, uint32_t pingpong_status,
+ struct msm_isp_buffer **done_buf)
+{
+ int rc = -1;
+ struct msm_isp_buffer *buf;
+ uint32_t pingpong_bit = 0;
+ uint32_t bufq_handle = stream_info->bufq_handle;
+ uint32_t stats_pingpong_offset;
+ uint32_t stats_idx = STATS_IDX(stream_info->stream_handle);
+
+ if (stats_idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type ||
+ stats_idx >= MSM_ISP_STATS_MAX) {
+ pr_err("%s Invalid stats index %d", __func__, stats_idx);
+ return -EINVAL;
+ }
+
+ stats_pingpong_offset =
+ vfe_dev->hw_info->stats_hw_info->stats_ping_pong_offset[
+ stats_idx];
+
+ pingpong_bit = (~(pingpong_status >> stats_pingpong_offset) & 0x1);
+ rc = vfe_dev->buf_mgr->ops->get_buf(vfe_dev->buf_mgr,
+ vfe_dev->pdev->id, bufq_handle,
+ MSM_ISP_INVALID_BUF_INDEX, &buf);
+ if (rc < 0) {
+ vfe_dev->error_info.stats_framedrop_count[stats_idx]++;
+ return rc;
+ }
+
+ if (buf->num_planes != 1) {
+ pr_err("%s: Invalid buffer\n", __func__);
+ rc = -EINVAL;
+ goto buf_error;
+ }
+
+ vfe_dev->hw_info->vfe_ops.stats_ops.update_ping_pong_addr(
+ vfe_dev, stream_info,
+ pingpong_status, buf->mapped_info[0].paddr +
+ stream_info->buffer_offset);
+
+ if (stream_info->buf[pingpong_bit] && done_buf)
+ *done_buf = stream_info->buf[pingpong_bit];
+
+ stream_info->buf[pingpong_bit] = buf;
+ return 0;
+buf_error:
+ vfe_dev->buf_mgr->ops->put_buf(vfe_dev->buf_mgr,
+ buf->bufq_handle, buf->buf_idx);
+ return rc;
+}
+
+void msm_isp_process_stats_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts)
+{
+ int i, j, rc;
+ struct msm_isp32_event_data buf_event;
+ struct msm_isp_stats_event *stats_event = &buf_event.u.stats;
+ struct msm_isp_buffer *done_buf;
+ struct msm_vfe_stats_stream *stream_info = NULL;
+ uint32_t pingpong_status;
+ uint32_t comp_stats_type_mask = 0, atomic_stats_mask = 0;
+ uint32_t stats_comp_mask = 0, stats_irq_mask = 0;
+ uint32_t num_stats_comp_mask =
+ vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask;
+ stats_comp_mask = vfe_dev->hw_info->vfe_ops.stats_ops.
+ get_comp_mask(irq_status0, irq_status1);
+ stats_irq_mask = vfe_dev->hw_info->vfe_ops.stats_ops.
+ get_wm_mask(irq_status0, irq_status1);
+ if (!(stats_comp_mask || stats_irq_mask))
+ return;
+ ISP_DBG("%s: status: 0x%x\n", __func__, irq_status0);
+
+ /*
+ * If any of composite mask is set, clear irq bits from mask,
+ * they will be restored by comp mask
+ */
+ if (stats_comp_mask) {
+ for (j = 0; j < num_stats_comp_mask; j++) {
+ stats_irq_mask &= ~atomic_read(
+ &vfe_dev->stats_data.stats_comp_mask[j]);
+ }
+ }
+
+ for (j = 0; j < num_stats_comp_mask; j++) {
+ atomic_stats_mask = atomic_read(
+ &vfe_dev->stats_data.stats_comp_mask[j]);
+ if (!stats_comp_mask) {
+ stats_irq_mask &= ~atomic_stats_mask;
+ } else {
+ /* restore irq bits from composite mask */
+ if (stats_comp_mask & (1 << j))
+ stats_irq_mask |= atomic_stats_mask;
+ }
+ /* if no irq bits set from this composite mask continue*/
+ if (!stats_irq_mask)
+ continue;
+ memset(&buf_event, 0, sizeof(struct msm_isp32_event_data));
+ buf_event.timestamp = ts->event_time;
+ buf_event.frame_id =
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+ buf_event.input_intf = VFE_PIX_0;
+ pingpong_status = vfe_dev->hw_info->
+ vfe_ops.stats_ops.get_pingpong_status(vfe_dev);
+
+ for (i = 0; i < vfe_dev->hw_info->stats_hw_info->num_stats_type;
+ i++) {
+ if (!(stats_irq_mask & (1 << i)))
+ continue;
+
+ stats_irq_mask &= ~(1 << i);
+ stream_info = &vfe_dev->stats_data.stream_info[i];
+ done_buf = NULL;
+ msm_isp_stats_cfg_ping_pong_address(vfe_dev,
+ stream_info, pingpong_status, &done_buf);
+ if (done_buf) {
+ rc = vfe_dev->buf_mgr->ops->buf_divert(
+ vfe_dev->buf_mgr, done_buf->bufq_handle,
+ done_buf->buf_idx, &ts->buf_time,
+ vfe_dev->axi_data.
+ src_info[VFE_PIX_0].frame_id);
+ if (rc != 0)
+ continue;
+
+ stats_event->stats_buf_idxs
+ [stream_info->stats_type] =
+ done_buf->buf_idx;
+ if (!stream_info->composite_flag) {
+ stats_event->stats_mask =
+ 1 << stream_info->stats_type;
+ ISP_DBG("%s: stats frameid: 0x%x %d\n",
+ __func__, buf_event.frame_id,
+ stream_info->stats_type);
+ msm_isp_send_event(vfe_dev,
+ ISP_EVENT_STATS_NOTIFY +
+ stream_info->stats_type,
+ &buf_event);
+ } else {
+ comp_stats_type_mask |=
+ 1 << stream_info->stats_type;
+ }
+ }
+ }
+
+ if (comp_stats_type_mask) {
+ ISP_DBG("%s: comp_stats frameid: 0x%x, 0x%x\n",
+ __func__, buf_event.frame_id,
+ comp_stats_type_mask);
+ stats_event->stats_mask = comp_stats_type_mask;
+ msm_isp_send_event(vfe_dev,
+ ISP_EVENT_COMP_STATS_NOTIFY, &buf_event);
+ comp_stats_type_mask = 0;
+ }
+ }
+}
+
+int msm_isp_stats_create_stream(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream_request_cmd *stream_req_cmd)
+{
+ int rc = -1;
+ struct msm_vfe_stats_stream *stream_info = NULL;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+ uint32_t stats_idx;
+
+ if (!(vfe_dev->hw_info->stats_hw_info->stats_capability_mask &
+ (1 << stream_req_cmd->stats_type))) {
+ pr_err("%s: Stats type not supported\n", __func__);
+ return rc;
+ }
+
+ stats_idx = vfe_dev->hw_info->vfe_ops.stats_ops.
+ get_stats_idx(stream_req_cmd->stats_type);
+
+ if (stats_idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+ pr_err("%s Invalid stats index %d", __func__, stats_idx);
+ return -EINVAL;
+ }
+
+ stream_info = &stats_data->stream_info[stats_idx];
+ if (stream_info->state != STATS_AVAILABLE) {
+ pr_err("%s: Stats already requested\n", __func__);
+ return rc;
+ }
+
+ if (stream_req_cmd->framedrop_pattern >= MAX_SKIP) {
+ pr_err("%s: Invalid framedrop pattern\n", __func__);
+ return rc;
+ }
+
+ if (stream_req_cmd->irq_subsample_pattern >= MAX_SKIP) {
+ pr_err("%s: Invalid irq subsample pattern\n", __func__);
+ return rc;
+ }
+
+ stream_info->session_id = stream_req_cmd->session_id;
+ stream_info->stream_id = stream_req_cmd->stream_id;
+ stream_info->composite_flag = stream_req_cmd->composite_flag;
+ stream_info->stats_type = stream_req_cmd->stats_type;
+ stream_info->buffer_offset = stream_req_cmd->buffer_offset;
+ stream_info->framedrop_pattern = stream_req_cmd->framedrop_pattern;
+ stream_info->init_stats_frame_drop = stream_req_cmd->init_frame_drop;
+ stream_info->irq_subsample_pattern =
+ stream_req_cmd->irq_subsample_pattern;
+ stream_info->state = STATS_INACTIVE;
+
+ if ((vfe_dev->stats_data.stream_handle_cnt << 8) == 0)
+ vfe_dev->stats_data.stream_handle_cnt++;
+
+ stream_req_cmd->stream_handle =
+ (++vfe_dev->stats_data.stream_handle_cnt) << 8 | stats_idx;
+
+ stream_info->stream_handle = stream_req_cmd->stream_handle;
+ return 0;
+}
+
+int msm_isp_request_stats_stream(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = -1;
+ struct msm_vfe_stats_stream_request_cmd *stream_req_cmd = arg;
+ struct msm_vfe_stats_stream *stream_info = NULL;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+ uint32_t framedrop_period;
+ uint32_t stats_idx;
+
+ rc = msm_isp_stats_create_stream(vfe_dev, stream_req_cmd);
+ if (rc < 0) {
+ pr_err("%s: create stream failed\n", __func__);
+ return rc;
+ }
+
+ stats_idx = STATS_IDX(stream_req_cmd->stream_handle);
+
+ if (stats_idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+ pr_err("%s Invalid stats index %d", __func__, stats_idx);
+ return -EINVAL;
+ }
+
+ stream_info = &stats_data->stream_info[stats_idx];
+
+ framedrop_period = msm_isp_get_framedrop_period(
+ stream_req_cmd->framedrop_pattern);
+
+ if (stream_req_cmd->framedrop_pattern == SKIP_ALL)
+ stream_info->framedrop_pattern = 0x0;
+ else
+ stream_info->framedrop_pattern = 0x1;
+ stream_info->framedrop_period = framedrop_period - 1;
+
+ if (!stream_info->composite_flag)
+ vfe_dev->hw_info->vfe_ops.stats_ops.
+ cfg_wm_irq_mask(vfe_dev, stream_info);
+
+ if (stream_info->init_stats_frame_drop == 0)
+ vfe_dev->hw_info->vfe_ops.stats_ops.cfg_wm_reg(vfe_dev,
+ stream_info);
+
+ return rc;
+}
+
+int msm_isp_release_stats_stream(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = -1;
+ struct msm_vfe_stats_stream_cfg_cmd stream_cfg_cmd;
+ struct msm_vfe_stats_stream_release_cmd *stream_release_cmd = arg;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+ int stats_idx = STATS_IDX(stream_release_cmd->stream_handle);
+ struct msm_vfe_stats_stream *stream_info = NULL;
+
+ if (stats_idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+ pr_err("%s Invalid stats index %d", __func__, stats_idx);
+ return -EINVAL;
+ }
+
+ stream_info = &stats_data->stream_info[stats_idx];
+ if (stream_info->state == STATS_AVAILABLE) {
+ pr_err("%s: stream already release\n", __func__);
+ return rc;
+ } else if (stream_info->state != STATS_INACTIVE) {
+ stream_cfg_cmd.enable = 0;
+ stream_cfg_cmd.num_streams = 1;
+ stream_cfg_cmd.stream_handle[0] =
+ stream_release_cmd->stream_handle;
+ rc = msm_isp_cfg_stats_stream(vfe_dev, &stream_cfg_cmd);
+ }
+
+ if (!stream_info->composite_flag)
+ vfe_dev->hw_info->vfe_ops.stats_ops.
+ clear_wm_irq_mask(vfe_dev, stream_info);
+
+ vfe_dev->hw_info->vfe_ops.stats_ops.clear_wm_reg(vfe_dev, stream_info);
+ memset(stream_info, 0, sizeof(struct msm_vfe_stats_stream));
+ return 0;
+}
+
+static int msm_isp_init_stats_ping_pong_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info)
+{
+ int rc = 0;
+
+ stream_info->bufq_handle =
+ vfe_dev->buf_mgr->ops->get_bufq_handle(
+ vfe_dev->buf_mgr, stream_info->session_id,
+ stream_info->stream_id);
+ if (stream_info->bufq_handle == 0) {
+ pr_err("%s: no buf configured for stream: 0x%x\n",
+ __func__, stream_info->stream_handle);
+ return -EINVAL;
+ }
+
+ rc = msm_isp_stats_cfg_ping_pong_address(vfe_dev,
+ stream_info, VFE_PING_FLAG, NULL);
+ if (rc < 0) {
+ pr_err("%s: No free buffer for ping\n", __func__);
+ return rc;
+ }
+ rc = msm_isp_stats_cfg_ping_pong_address(vfe_dev,
+ stream_info, VFE_PONG_FLAG, NULL);
+ if (rc < 0) {
+ pr_err("%s: No free buffer for pong\n", __func__);
+ return rc;
+ }
+ return rc;
+}
+
+static void msm_isp_deinit_stats_ping_pong_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info)
+{
+ int i;
+ struct msm_isp_buffer *buf;
+
+ for (i = 0; i < 2; i++) {
+ buf = stream_info->buf[i];
+ if (buf)
+ vfe_dev->buf_mgr->ops->put_buf(vfe_dev->buf_mgr,
+ buf->bufq_handle, buf->buf_idx);
+ }
+}
+
+void msm_isp_update_stats_framedrop_reg(struct vfe_device *vfe_dev)
+{
+ int i;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+ struct msm_vfe_stats_stream *stream_info = NULL;
+
+ for (i = 0; i < vfe_dev->hw_info->stats_hw_info->num_stats_type; i++) {
+ stream_info = &stats_data->stream_info[i];
+ if (stream_info->state != STATS_ACTIVE)
+ continue;
+
+ if (stream_info->init_stats_frame_drop) {
+ stream_info->init_stats_frame_drop--;
+ if (stream_info->init_stats_frame_drop == 0) {
+ vfe_dev->hw_info->vfe_ops.stats_ops.cfg_wm_reg(
+ vfe_dev, stream_info);
+ }
+ }
+ }
+}
+
+void msm_isp_stats_stream_update(struct vfe_device *vfe_dev)
+{
+ int i;
+ uint32_t stats_mask = 0, comp_stats_mask = 0;
+ uint32_t enable = 0;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+
+ for (i = 0; i < vfe_dev->hw_info->stats_hw_info->num_stats_type; i++) {
+ if (stats_data->stream_info[i].state == STATS_START_PENDING ||
+ stats_data->stream_info[i].state ==
+ STATS_STOP_PENDING) {
+ stats_mask |= i;
+ enable = stats_data->stream_info[i].state ==
+ STATS_START_PENDING ? 1 : 0;
+ stats_data->stream_info[i].state =
+ stats_data->stream_info[i].state ==
+ STATS_START_PENDING ?
+ STATS_STARTING : STATS_STOPPING;
+ vfe_dev->hw_info->vfe_ops.stats_ops.enable_module(
+ vfe_dev, BIT(i), enable);
+ vfe_dev->hw_info->vfe_ops.stats_ops.cfg_comp_mask(
+ vfe_dev, BIT(i), enable);
+ } else if (stats_data->stream_info[i].state == STATS_STARTING ||
+ stats_data->stream_info[i].state == STATS_STOPPING) {
+ if (stats_data->stream_info[i].composite_flag)
+ comp_stats_mask |= i;
+ stats_data->stream_info[i].state =
+ stats_data->stream_info[i].state ==
+ STATS_STARTING ? STATS_ACTIVE : STATS_INACTIVE;
+ }
+ }
+ atomic_sub(1, &stats_data->stats_update);
+ if (!atomic_read(&stats_data->stats_update))
+ complete(&vfe_dev->stats_config_complete);
+}
+
+static int msm_isp_stats_wait_for_cfg_done(struct vfe_device *vfe_dev)
+{
+ int rc;
+
+ init_completion(&vfe_dev->stats_config_complete);
+ atomic_set(&vfe_dev->stats_data.stats_update, 2);
+ rc = wait_for_completion_timeout(
+ &vfe_dev->stats_config_complete,
+ msecs_to_jiffies(VFE_MAX_CFG_TIMEOUT));
+ if (rc == 0) {
+ pr_err("%s: wait timeout\n", __func__);
+ rc = -1;
+ } else {
+ rc = 0;
+ }
+ return rc;
+}
+
+static int msm_isp_stats_update_cgc_override(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd)
+{
+ int i;
+ uint32_t stats_mask = 0, idx;
+
+ if (stream_cfg_cmd->num_streams > MSM_ISP_STATS_MAX) {
+ pr_err("%s invalid num_streams %d\n", __func__,
+ stream_cfg_cmd->num_streams);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
+
+ if (idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+ pr_err("%s Invalid stats index %d", __func__, idx);
+ return -EINVAL;
+ }
+ stats_mask |= 1 << idx;
+ }
+
+ if (vfe_dev->hw_info->vfe_ops.stats_ops.update_cgc_override) {
+ vfe_dev->hw_info->vfe_ops.stats_ops.update_cgc_override(
+ vfe_dev, stats_mask, stream_cfg_cmd->enable);
+ }
+ return 0;
+}
+
+static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd)
+{
+ int i, rc = 0;
+ uint32_t stats_mask = 0, idx;
+ uint32_t comp_stats_mask[MAX_NUM_STATS_COMP_MASK] = {0};
+ uint32_t num_stats_comp_mask = 0;
+ struct msm_vfe_stats_stream *stream_info;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+
+ if (stream_cfg_cmd->num_streams > MSM_ISP_STATS_MAX) {
+ pr_err("%s invalid num_streams %d\n", __func__,
+ stream_cfg_cmd->num_streams);
+ return -EINVAL;
+ }
+
+ num_stats_comp_mask =
+ vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask;
+ rc = vfe_dev->hw_info->vfe_ops.stats_ops.check_streams(
+ stats_data->stream_info);
+ if (rc < 0)
+ return rc;
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
+
+ if (idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+ pr_err("%s Invalid stats index %d", __func__, idx);
+ return -EINVAL;
+ }
+
+ stream_info = &stats_data->stream_info[idx];
+ if (stream_info->stream_handle !=
+ stream_cfg_cmd->stream_handle[i]) {
+ pr_err("%s: Invalid stream handle: 0x%x received\n",
+ __func__, stream_cfg_cmd->stream_handle[i]);
+ continue;
+ }
+
+ if (stream_info->composite_flag > num_stats_comp_mask) {
+ pr_err("%s: comp grp %d exceed max %d\n",
+ __func__, stream_info->composite_flag,
+ num_stats_comp_mask);
+ return -EINVAL;
+ }
+ rc = msm_isp_init_stats_ping_pong_reg(vfe_dev, stream_info);
+ if (rc < 0) {
+ pr_err("%s: No buffer for stream%d type:%d stmID:0x%x\n",
+ __func__, idx, stream_info->stats_type,
+ stream_info->stream_id);
+ return rc;
+ }
+
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].active)
+ stream_info->state = STATS_START_PENDING;
+ else
+ stream_info->state = STATS_ACTIVE;
+
+ stats_data->num_active_stream++;
+ stats_mask |= 1 << idx;
+
+ if (stream_info->composite_flag > 0)
+ comp_stats_mask[stream_info->composite_flag-1] |=
+ 1 << idx;
+
+ ISP_DBG("%s: stats_mask %x %x active streams %d\n",
+ __func__, comp_stats_mask[0],
+ comp_stats_mask[1],
+ stats_data->num_active_stream);
+
+ }
+
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) {
+ rc = msm_isp_stats_wait_for_cfg_done(vfe_dev);
+ } else {
+ vfe_dev->hw_info->vfe_ops.stats_ops.enable_module(
+ vfe_dev, stats_mask, stream_cfg_cmd->enable);
+ for (i = 0; i < num_stats_comp_mask; i++) {
+ vfe_dev->hw_info->vfe_ops.stats_ops.cfg_comp_mask(
+ vfe_dev, comp_stats_mask[i], 1);
+ }
+ }
+ return rc;
+}
+
+static int msm_isp_stop_stats_stream(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd)
+{
+ int i, rc = 0;
+ uint32_t stats_mask = 0, idx;
+ uint32_t comp_stats_mask[MAX_NUM_STATS_COMP_MASK] = {0};
+ uint32_t num_stats_comp_mask = 0;
+ struct msm_vfe_stats_stream *stream_info;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+
+ num_stats_comp_mask =
+ vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask;
+
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+
+ idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
+
+ if (idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+ pr_err("%s Invalid stats index %d", __func__, idx);
+ return -EINVAL;
+ }
+
+ stream_info = &stats_data->stream_info[idx];
+ if (stream_info->stream_handle !=
+ stream_cfg_cmd->stream_handle[i]) {
+ pr_err("%s: Invalid stream handle: 0x%x received\n",
+ __func__, stream_cfg_cmd->stream_handle[i]);
+ continue;
+ }
+
+ if (stream_info->composite_flag > num_stats_comp_mask) {
+ pr_err("%s: comp grp %d exceed max %d\n",
+ __func__, stream_info->composite_flag,
+ num_stats_comp_mask);
+ return -EINVAL;
+ }
+
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].active)
+ stream_info->state = STATS_STOP_PENDING;
+ else
+ stream_info->state = STATS_INACTIVE;
+
+ stats_data->num_active_stream--;
+ stats_mask |= 1 << idx;
+
+ if (stream_info->composite_flag > 0)
+ comp_stats_mask[stream_info->composite_flag-1] |=
+ 1 << idx;
+
+ ISP_DBG("%s: stats_mask %x %x active streams %d\n",
+ __func__, comp_stats_mask[0],
+ comp_stats_mask[1],
+ stats_data->num_active_stream);
+ }
+
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) {
+ rc = msm_isp_stats_wait_for_cfg_done(vfe_dev);
+ } else {
+ vfe_dev->hw_info->vfe_ops.stats_ops.enable_module(
+ vfe_dev, stats_mask, stream_cfg_cmd->enable);
+ for (i = 0; i < num_stats_comp_mask; i++) {
+ vfe_dev->hw_info->vfe_ops.stats_ops.cfg_comp_mask(
+ vfe_dev, comp_stats_mask[i], 0);
+ }
+ }
+
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
+
+ if (idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+ pr_err("%s Invalid stats index %d", __func__, idx);
+ return -EINVAL;
+ }
+
+ stream_info = &stats_data->stream_info[idx];
+ msm_isp_deinit_stats_ping_pong_reg(vfe_dev, stream_info);
+ }
+ return rc;
+}
+
+int msm_isp_cfg_stats_stream(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = 0;
+ struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd = arg;
+
+ if (vfe_dev->stats_data.num_active_stream == 0)
+ vfe_dev->hw_info->vfe_ops.stats_ops.cfg_ub(vfe_dev);
+
+ if (stream_cfg_cmd->num_streams > MSM_ISP_STATS_MAX) {
+ pr_err("%s invalid num_streams %d\n", __func__,
+ stream_cfg_cmd->num_streams);
+ return -EINVAL;
+ }
+
+ if (stream_cfg_cmd->enable) {
+ msm_isp_stats_update_cgc_override(vfe_dev, stream_cfg_cmd);
+
+ rc = msm_isp_start_stats_stream(vfe_dev, stream_cfg_cmd);
+ } else {
+ rc = msm_isp_stop_stats_stream(vfe_dev, stream_cfg_cmd);
+
+ msm_isp_stats_update_cgc_override(vfe_dev, stream_cfg_cmd);
+ }
+
+ return rc;
+}
+
+int msm_isp_update_stats_stream(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = 0, i;
+ struct msm_vfe_stats_stream *stream_info;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+ struct msm_vfe_axi_stream_update_cmd *update_cmd = arg;
+ struct msm_vfe_axi_stream_cfg_update_info *update_info = NULL;
+
+ /*validate request*/
+ for (i = 0; i < update_cmd->num_streams; i++) {
+ update_info = &update_cmd->update_info[i];
+ /*check array reference bounds*/
+ if (STATS_IDX(update_info->stream_handle)
+ > vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+ pr_err("%s: stats idx %d out of bound!", __func__,
+ STATS_IDX(update_info->stream_handle));
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < update_cmd->num_streams; i++) {
+ update_info = &update_cmd->update_info[i];
+ stream_info = &stats_data->stream_info[
+ STATS_IDX(update_info->stream_handle)];
+ if (stream_info->stream_handle !=
+ update_info->stream_handle) {
+ pr_err("%s: stats stream handle %x %x mismatch!\n",
+ __func__, stream_info->stream_handle,
+ update_info->stream_handle);
+ continue;
+ }
+
+ switch (update_cmd->update_type) {
+ case UPDATE_STREAM_STATS_FRAMEDROP_PATTERN: {
+ uint32_t framedrop_period =
+ msm_isp_get_framedrop_period(
+ update_info->skip_pattern);
+ if (update_info->skip_pattern == SKIP_ALL)
+ stream_info->framedrop_pattern = 0x0;
+ else
+ stream_info->framedrop_pattern = 0x1;
+ stream_info->framedrop_period = framedrop_period - 1;
+ if (stream_info->init_stats_frame_drop == 0)
+ vfe_dev->hw_info->vfe_ops.stats_ops.cfg_wm_reg(
+ vfe_dev, stream_info);
+ break;
+ }
+
+ default:
+ pr_err("%s: Invalid update type\n", __func__);
+ return -EINVAL;
+ }
+ }
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util_32.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util_32.h
new file mode 100644
index 0000000..11c0c28
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util_32.h
@@ -0,0 +1,29 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MSM_ISP_STATS_UTIL_H__
+#define __MSM_ISP_STATS_UTIL_H__
+
+#include "msm_isp_32.h"
+#define STATS_IDX(idx) (idx & 0xFF)
+
+void msm_isp_process_stats_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts);
+int msm_isp_stats_create_stream(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream_request_cmd *stream_req_cmd);
+void msm_isp_stats_stream_update(struct vfe_device *vfe_dev);
+int msm_isp_cfg_stats_stream(struct vfe_device *vfe_dev, void *arg);
+int msm_isp_update_stats_stream(struct vfe_device *vfe_dev, void *arg);
+int msm_isp_release_stats_stream(struct vfe_device *vfe_dev, void *arg);
+int msm_isp_request_stats_stream(struct vfe_device *vfe_dev, void *arg);
+void msm_isp_update_stats_framedrop_reg(struct vfe_device *vfe_dev);
+#endif /* __MSM_ISP_STATS_UTIL_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util_32.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util_32.c
new file mode 100644
index 0000000..f6ca41c
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util_32.c
@@ -0,0 +1,1939 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <media/v4l2-subdev.h>
+#include <linux/ratelimit.h>
+
+#include "msm.h"
+#include "msm_isp_util_32.h"
+#include "msm_isp_axi_util_32.h"
+#include "msm_isp_stats_util_32.h"
+#include "msm_camera_io_util.h"
+#include "cam_smmu_api.h"
+
+#ifndef UINT16_MAX
+#define UINT16_MAX (65535U)
+#endif
+
+#define MAX_ISP_V4l2_EVENTS 100
+#define MAX_ISP_REG_LIST 100
+static DEFINE_MUTEX(bandwidth_mgr_mutex);
+static struct msm_isp_bandwidth_mgr isp_bandwidth_mgr;
+
+static uint64_t msm_isp_cpp_clk_rate;
+
+#define VFE40_8974V2_VERSION 0x1001001A
+static struct msm_bus_vectors msm_isp_init_vectors[] = {
+ {
+ .src = MSM_BUS_MASTER_VFE,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 0,
+ .ib = 0,
+ },
+};
+
+static struct msm_bus_vectors msm_isp_ping_vectors[] = {
+ {
+ .src = MSM_BUS_MASTER_VFE,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = MSM_ISP_MIN_AB,
+ .ib = MSM_ISP_MIN_IB,
+ },
+};
+
+static struct msm_bus_vectors msm_isp_pong_vectors[] = {
+ {
+ .src = MSM_BUS_MASTER_VFE,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = MSM_ISP_MIN_AB,
+ .ib = MSM_ISP_MIN_IB,
+ },
+};
+
+static struct msm_bus_paths msm_isp_bus_client_config[] = {
+ {
+ ARRAY_SIZE(msm_isp_init_vectors),
+ msm_isp_init_vectors,
+ },
+ {
+ ARRAY_SIZE(msm_isp_ping_vectors),
+ msm_isp_ping_vectors,
+ },
+ {
+ ARRAY_SIZE(msm_isp_pong_vectors),
+ msm_isp_pong_vectors,
+ },
+};
+
+static struct msm_bus_scale_pdata msm_isp_bus_client_pdata = {
+ msm_isp_bus_client_config,
+ NULL,
+ ARRAY_SIZE(msm_isp_bus_client_config),
+ .name = "msm_camera_isp",
+ 0,
+};
+
+
+void msm_camera_io_dump_2(void __iomem *addr, int size)
+{
+ char line_str[128], *p_str;
+ int i;
+ u32 __iomem *p = (u32 __iomem *) addr;
+ u32 data;
+
+ pr_err("%s: %pK %d\n", __func__, addr, size);
+ line_str[0] = '\0';
+ p_str = line_str;
+ for (i = 0; i < size/4; i++) {
+ if (i % 4 == 0) {
+#ifdef CONFIG_COMPAT
+ snprintf(p_str, 20, "%016lx: ", (unsigned long) p);
+ p_str += 18;
+#else
+ snprintf(p_str, 12, "%08lx: ", (unsigned long) p);
+ p_str += 10;
+#endif
+ }
+ data = readl_relaxed(p++);
+ snprintf(p_str, 12, "%08x ", data);
+ p_str += 9;
+ if ((i + 1) % 4 == 0) {
+ pr_err("%s\n", line_str);
+ line_str[0] = '\0';
+ p_str = line_str;
+ }
+ }
+ if (line_str[0] != '\0')
+ pr_err("%s\n", line_str);
+}
+
+void msm_isp_print_fourcc_error(const char *origin, uint32_t fourcc_format)
+{
+ int i;
+ char text[5];
+
+ text[4] = '\0';
+ for (i = 0; i < 4; i++) {
+ text[i] = (char)(((fourcc_format) >> (i * 8)) & 0xFF);
+ if ((text[i] < '0') || (text[i] > 'z')) {
+ pr_err("%s: Invalid output format %d (unprintable)\n",
+ origin, fourcc_format);
+ return;
+ }
+ }
+ pr_err("%s: Invalid output format %s\n",
+ origin, text);
+}
+
+int msm_isp_init_bandwidth_mgr(enum msm_isp_hw_client client)
+{
+ int rc = 0;
+
+ mutex_lock(&bandwidth_mgr_mutex);
+ isp_bandwidth_mgr.client_info[client].active = 1;
+ if (isp_bandwidth_mgr.use_count++) {
+ mutex_unlock(&bandwidth_mgr_mutex);
+ return rc;
+ }
+ isp_bandwidth_mgr.bus_client =
+ msm_bus_scale_register_client(&msm_isp_bus_client_pdata);
+ if (!isp_bandwidth_mgr.bus_client) {
+ pr_err("%s: client register failed\n", __func__);
+ mutex_unlock(&bandwidth_mgr_mutex);
+ return -EINVAL;
+ }
+
+ isp_bandwidth_mgr.bus_vector_active_idx = 1;
+ msm_bus_scale_client_update_request(
+ isp_bandwidth_mgr.bus_client,
+ isp_bandwidth_mgr.bus_vector_active_idx);
+
+ mutex_unlock(&bandwidth_mgr_mutex);
+ return 0;
+}
+
+int msm_isp_update_bandwidth(enum msm_isp_hw_client client,
+ uint64_t ab, uint64_t ib)
+{
+ int i;
+ struct msm_bus_paths *path;
+
+ mutex_lock(&bandwidth_mgr_mutex);
+ if (!isp_bandwidth_mgr.use_count ||
+ !isp_bandwidth_mgr.bus_client) {
+ pr_err("%s:error bandwidth manager inactive use_cnt:%d bus_clnt:%d\n",
+ __func__, isp_bandwidth_mgr.use_count,
+ isp_bandwidth_mgr.bus_client);
+ return -EINVAL;
+ }
+
+ isp_bandwidth_mgr.client_info[client].ab = ab;
+ isp_bandwidth_mgr.client_info[client].ib = ib;
+ ALT_VECTOR_IDX(isp_bandwidth_mgr.bus_vector_active_idx);
+ path =
+ &(msm_isp_bus_client_pdata.usecase[
+ isp_bandwidth_mgr.bus_vector_active_idx]);
+ path->vectors[0].ab = 0;
+ path->vectors[0].ib = 0;
+ for (i = 0; i < MAX_ISP_CLIENT; i++) {
+ if (isp_bandwidth_mgr.client_info[i].active) {
+ path->vectors[0].ab +=
+ isp_bandwidth_mgr.client_info[i].ab;
+ path->vectors[0].ib +=
+ isp_bandwidth_mgr.client_info[i].ib;
+ }
+ }
+ ISP_DBG("%s: Total AB = %llu IB = %llu\n", __func__,
+ path->vectors[0].ab, path->vectors[0].ib);
+ msm_bus_scale_client_update_request(isp_bandwidth_mgr.bus_client,
+ isp_bandwidth_mgr.bus_vector_active_idx);
+ /* Insert into circular buffer */
+ msm_isp_update_req_history(isp_bandwidth_mgr.bus_client,
+ path->vectors[0].ab,
+ path->vectors[0].ib,
+ isp_bandwidth_mgr.client_info,
+ sched_clock());
+ mutex_unlock(&bandwidth_mgr_mutex);
+ return 0;
+}
+
+void msm_isp_deinit_bandwidth_mgr(enum msm_isp_hw_client client)
+{
+ if (client >= MAX_ISP_CLIENT) {
+ pr_err("invalid Client id %d", client);
+ return;
+ }
+ mutex_lock(&bandwidth_mgr_mutex);
+ memset(&isp_bandwidth_mgr.client_info[client], 0,
+ sizeof(struct msm_isp_bandwidth_info));
+ if (--isp_bandwidth_mgr.use_count) {
+ mutex_unlock(&bandwidth_mgr_mutex);
+ return;
+ }
+
+ if (!isp_bandwidth_mgr.bus_client) {
+ pr_err("%s:%d error: bus client invalid\n", __func__, __LINE__);
+ mutex_unlock(&bandwidth_mgr_mutex);
+ return;
+ }
+
+ msm_bus_scale_client_update_request(
+ isp_bandwidth_mgr.bus_client, 0);
+ msm_bus_scale_unregister_client(isp_bandwidth_mgr.bus_client);
+ isp_bandwidth_mgr.bus_client = 0;
+ mutex_unlock(&bandwidth_mgr_mutex);
+}
+
+void msm_isp_util_get_bandwidth_stats(struct vfe_device *vfe_dev,
+ struct msm_isp_statistics *stats)
+{
+ stats->isp_vfe0_active = isp_bandwidth_mgr.client_info[ISP_VFE0].active;
+ stats->isp_vfe0_ab = isp_bandwidth_mgr.client_info[ISP_VFE0].ab;
+ stats->isp_vfe0_ib = isp_bandwidth_mgr.client_info[ISP_VFE0].ib;
+
+ stats->isp_vfe1_active = isp_bandwidth_mgr.client_info[ISP_VFE1].active;
+ stats->isp_vfe1_ab = isp_bandwidth_mgr.client_info[ISP_VFE1].ab;
+ stats->isp_vfe1_ib = isp_bandwidth_mgr.client_info[ISP_VFE1].ib;
+
+ stats->isp_cpp_active = isp_bandwidth_mgr.client_info[ISP_CPP].active;
+ stats->isp_cpp_ab = isp_bandwidth_mgr.client_info[ISP_CPP].ab;
+ stats->isp_cpp_ib = isp_bandwidth_mgr.client_info[ISP_CPP].ib;
+ stats->last_overflow_ab = vfe_dev->msm_isp_last_overflow_ab;
+ stats->last_overflow_ib = vfe_dev->msm_isp_last_overflow_ib;
+ stats->vfe_clk_rate = vfe_dev->msm_isp_vfe_clk_rate;
+ stats->cpp_clk_rate = msm_isp_cpp_clk_rate;
+}
+
+void msm_isp_util_update_last_overflow_ab_ib(struct vfe_device *vfe_dev)
+{
+ struct msm_bus_paths *path;
+ path = &(msm_isp_bus_client_pdata.usecase[
+ isp_bandwidth_mgr.bus_vector_active_idx]);
+ vfe_dev->msm_isp_last_overflow_ab = path->vectors[0].ab;
+ vfe_dev->msm_isp_last_overflow_ib = path->vectors[0].ib;
+}
+
+void msm_isp_util_update_clk_rate(long clock_rate)
+{
+ msm_isp_cpp_clk_rate = clock_rate;
+}
+
+uint32_t msm_isp_get_framedrop_period(
+ enum msm_vfe_frame_skip_pattern frame_skip_pattern)
+{
+ switch (frame_skip_pattern) {
+ case NO_SKIP:
+ case EVERY_2FRAME:
+ case EVERY_3FRAME:
+ case EVERY_4FRAME:
+ case EVERY_5FRAME:
+ case EVERY_6FRAME:
+ case EVERY_7FRAME:
+ case EVERY_8FRAME:
+ return frame_skip_pattern + 1;
+ case EVERY_16FRAME:
+ return 16;
+ case EVERY_32FRAME:
+ return 32;
+ case SKIP_ALL:
+ return 1;
+ default:
+ return 1;
+ }
+ return 1;
+}
+
+int msm_isp_get_clk_info(struct vfe_device *vfe_dev,
+ struct platform_device *pdev, struct msm_cam_clk_info *vfe_clk_info)
+{
+ int i, count, rc;
+ uint32_t rates[VFE_CLK_INFO_MAX];
+
+ struct device_node *of_node;
+
+ of_node = pdev->dev.of_node;
+
+ count = of_property_count_strings(of_node, "clock-names");
+
+ ISP_DBG("count = %d\n", count);
+ if (count <= 0) {
+ pr_err("no clocks found in device tree, count=%d", count);
+ return 0;
+ }
+
+ if (count > VFE_CLK_INFO_MAX) {
+ pr_err("invalid count=%d, max is %d\n", count,
+ VFE_CLK_INFO_MAX);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_string_index(of_node, "clock-names",
+ i, &(vfe_clk_info[i].clk_name));
+ ISP_DBG("clock-names[%d] = %s\n", i, vfe_clk_info[i].clk_name);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return rc;
+ }
+ }
+ rc = of_property_read_u32_array(of_node, "qcom,clock-rates",
+ rates, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return rc;
+ }
+ for (i = 0; i < count; i++) {
+ vfe_clk_info[i].clk_rate =
+ (rates[i] == 0) ? (long)-1 : rates[i];
+ ISP_DBG("clk_rate[%d] = %ld\n", i, vfe_clk_info[i].clk_rate);
+ }
+ vfe_dev->num_clk = count;
+ return 0;
+}
+
+void msm_isp_get_timestamp(struct msm_isp_timestamp *time_stamp,
+ struct vfe_device *vfe_dev)
+{
+ struct timespec ts;
+
+ do_gettimeofday(&(time_stamp->event_time));
+ if (vfe_dev->vt_enable) {
+ msm_isp_get_avtimer_ts(time_stamp);
+ time_stamp->buf_time.tv_sec = time_stamp->vt_time.tv_sec;
+ time_stamp->buf_time.tv_usec = time_stamp->vt_time.tv_usec;
+ } else {
+ get_monotonic_boottime(&ts);
+ time_stamp->buf_time.tv_sec = ts.tv_sec;
+ time_stamp->buf_time.tv_usec = ts.tv_nsec/1000;
+ }
+}
+
+int msm_isp_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
+ int rc = 0;
+
+ rc = v4l2_event_subscribe(fh, sub, MAX_ISP_V4l2_EVENTS, NULL);
+ if (rc == 0) {
+ if (sub->type == V4L2_EVENT_ALL) {
+ int i;
+
+ vfe_dev->axi_data.event_mask = 0;
+ for (i = 0; i < ISP_EVENT_MAX; i++)
+ vfe_dev->axi_data.event_mask |= (1 << i);
+ } else {
+ int event_idx = sub->type - ISP_EVENT_BASE;
+
+ vfe_dev->axi_data.event_mask |= (1 << event_idx);
+ }
+ }
+ return rc;
+}
+
+int msm_isp_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
+ int rc = 0;
+
+ rc = v4l2_event_unsubscribe(fh, sub);
+ if (sub->type == V4L2_EVENT_ALL) {
+ vfe_dev->axi_data.event_mask = 0;
+ } else {
+ int event_idx = sub->type - ISP_EVENT_BASE;
+
+ vfe_dev->axi_data.event_mask &= ~(1 << event_idx);
+ }
+ return rc;
+}
+
+static int msm_isp_get_max_clk_rate(struct vfe_device *vfe_dev, long *rate)
+{
+ int clk_idx = 0;
+ unsigned long max_value = ~0;
+ long round_rate = 0;
+
+ if (!vfe_dev || !rate) {
+ pr_err("%s:%d failed: vfe_dev %pK rate %pK\n",
+ __func__, __LINE__, vfe_dev, rate);
+ return -EINVAL;
+ }
+
+ *rate = 0;
+ if (!vfe_dev->hw_info) {
+ pr_err("%s:%d failed: vfe_dev->hw_info %pK\n", __func__,
+ __LINE__, vfe_dev->hw_info);
+ return -EINVAL;
+ }
+
+ clk_idx = vfe_dev->hw_info->vfe_clk_idx;
+ if (clk_idx >= vfe_dev->num_clk) {
+ pr_err("%s:%d failed: clk_idx %d max array size %d\n",
+ __func__, __LINE__, clk_idx,
+ vfe_dev->num_clk);
+ return -EINVAL;
+ }
+
+ round_rate = clk_round_rate(vfe_dev->vfe_clk[clk_idx], max_value);
+ if (round_rate < 0) {
+ pr_err("%s: Invalid vfe clock rate\n", __func__);
+ return -EINVAL;
+ }
+
+ *rate = round_rate;
+ return 0;
+}
+
+static int msm_isp_set_clk_rate(struct vfe_device *vfe_dev, long *rate)
+{
+ int rc = 0;
+ int clk_idx = vfe_dev->hw_info->vfe_clk_idx;
+ long round_rate =
+ clk_round_rate(vfe_dev->vfe_clk[clk_idx], *rate);
+ if (round_rate < 0) {
+ pr_err("%s: Invalid vfe clock rate\n", __func__);
+ return round_rate;
+ }
+
+ rc = clk_set_rate(vfe_dev->vfe_clk[clk_idx], round_rate);
+ if (rc < 0) {
+ pr_err("%s: Vfe set rate error\n", __func__);
+ return rc;
+ }
+ *rate = round_rate;
+ vfe_dev->msm_isp_vfe_clk_rate = round_rate;
+ return 0;
+}
+
+void msm_isp_fetch_engine_done_notify(struct vfe_device *vfe_dev,
+ struct msm_vfe_fetch_engine_info *fetch_engine_info)
+{
+ struct msm_isp32_event_data fe_rd_done_event;
+
+ if (!fetch_engine_info->is_busy)
+ return;
+ memset(&fe_rd_done_event, 0, sizeof(struct msm_isp32_event_data));
+ fe_rd_done_event.frame_id =
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+ fe_rd_done_event.u.buf_done.session_id = fetch_engine_info->session_id;
+ fe_rd_done_event.u.buf_done.stream_id = fetch_engine_info->stream_id;
+ fe_rd_done_event.u.buf_done.handle = fetch_engine_info->bufq_handle;
+ fe_rd_done_event.u.buf_done.buf_idx = fetch_engine_info->buf_idx;
+ ISP_DBG("%s:VFE%d ISP_EVENT_FE_READ_DONE buf_idx %d\n",
+ __func__, vfe_dev->pdev->id, fetch_engine_info->buf_idx);
+ fetch_engine_info->is_busy = 0;
+ msm_isp_send_event(vfe_dev, ISP_EVENT_FE_READ_DONE, &fe_rd_done_event);
+}
+
+static int msm_isp_cfg_pix(struct vfe_device *vfe_dev,
+ struct msm_vfe_input_cfg *input_cfg)
+{
+ int rc = 0;
+
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) {
+ pr_err("%s: src %d path is active\n", __func__, VFE_PIX_0);
+ return -EINVAL;
+ }
+
+ vfe_dev->axi_data.src_info[VFE_PIX_0].pixel_clock =
+ input_cfg->input_pix_clk;
+ vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux =
+ input_cfg->d.pix_cfg.input_mux;
+ vfe_dev->axi_data.src_info[VFE_PIX_0].input_format =
+ input_cfg->d.pix_cfg.input_format;
+
+ rc = msm_isp_set_clk_rate(vfe_dev,
+ &vfe_dev->axi_data.src_info[VFE_PIX_0].pixel_clock);
+ if (rc < 0) {
+ pr_err("%s: clock set rate failed\n", __func__);
+ return rc;
+ }
+
+ ISP_DBG("%s: input mux is %d CAMIF %d io_format 0x%x\n", __func__,
+ input_cfg->d.pix_cfg.input_mux, CAMIF,
+ input_cfg->d.pix_cfg.input_format);
+
+ if (input_cfg->d.pix_cfg.input_mux == CAMIF) {
+ vfe_dev->axi_data.src_info[VFE_PIX_0].width =
+ input_cfg->d.pix_cfg.camif_cfg.pixels_per_line;
+ } else if (input_cfg->d.pix_cfg.input_mux == EXTERNAL_READ) {
+ vfe_dev->axi_data.src_info[VFE_PIX_0].width =
+ input_cfg->d.pix_cfg.fetch_engine_cfg.buf_stride;
+ }
+ vfe_dev->hw_info->vfe_ops.core_ops.cfg_input_mux(
+ vfe_dev, &input_cfg->d.pix_cfg);
+ return rc;
+}
+
+static int msm_isp_cfg_rdi(struct vfe_device *vfe_dev,
+ struct msm_vfe_input_cfg *input_cfg)
+{
+ int rc = 0;
+
+ if (vfe_dev->axi_data.src_info[input_cfg->input_src].active) {
+ pr_err("%s: RAW%d path is active\n", __func__,
+ input_cfg->input_src - VFE_RAW_0);
+ return -EINVAL;
+ }
+
+ vfe_dev->axi_data.src_info[input_cfg->input_src].pixel_clock =
+ input_cfg->input_pix_clk;
+ vfe_dev->hw_info->vfe_ops.core_ops.cfg_rdi_reg(
+ vfe_dev, &input_cfg->d.rdi_cfg, input_cfg->input_src);
+ return rc;
+}
+
+int msm_isp_cfg_input(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = 0;
+ struct msm_vfe_input_cfg *input_cfg = arg;
+
+ switch (input_cfg->input_src) {
+ case VFE_PIX_0:
+ rc = msm_isp_cfg_pix(vfe_dev, input_cfg);
+ break;
+ case VFE_RAW_0:
+ case VFE_RAW_1:
+ case VFE_RAW_2:
+ rc = msm_isp_cfg_rdi(vfe_dev, input_cfg);
+ break;
+ default:
+ pr_err("%s: Invalid input source\n", __func__);
+ rc = -EINVAL;
+ }
+ return rc;
+}
+
+static int msm_isp_proc_cmd_list_unlocked(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = 0;
+ uint32_t count = 0;
+ struct msm_vfe_cfg_cmd_list *proc_cmd =
+ (struct msm_vfe_cfg_cmd_list *)arg;
+ struct msm_vfe_cfg_cmd_list cmd, cmd_next;
+ struct msm_vfe_cfg_cmd2 cfg_cmd;
+
+ if (!vfe_dev || !arg) {
+ pr_err("%s:%d failed: vfe_dev %pK arg %pK", __func__, __LINE__,
+ vfe_dev, arg);
+ return -EINVAL;
+ }
+
+ rc = msm_isp_proc_cmd(vfe_dev, &proc_cmd->cfg_cmd);
+ if (rc < 0)
+ pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
+
+ cmd = *proc_cmd;
+
+ while (cmd.next) {
+ if (cmd.next_size != sizeof(struct msm_vfe_cfg_cmd_list)) {
+ pr_err("%s:%d failed: next size %u != expected %zu\n",
+ __func__, __LINE__, cmd.next_size,
+ sizeof(struct msm_vfe_cfg_cmd_list));
+ break;
+ }
+ if (++count >= MAX_ISP_REG_LIST) {
+ pr_err("%s:%d Error exceeding the max register count:%u\n",
+ __func__, __LINE__, count);
+ rc = -EFAULT;
+ break;
+ }
+ if (copy_from_user(&cmd_next, (void __user *)cmd.next,
+ sizeof(struct msm_vfe_cfg_cmd_list))) {
+ rc = -EFAULT;
+ continue;
+ }
+
+ cfg_cmd = cmd_next.cfg_cmd;
+
+ rc = msm_isp_proc_cmd(vfe_dev, &cfg_cmd);
+ if (rc < 0)
+ pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
+
+ cmd = cmd_next;
+ }
+ return rc;
+}
+
+#ifdef CONFIG_COMPAT
+struct msm_vfe_cfg_cmd2_32 {
+ uint16_t num_cfg;
+ uint16_t cmd_len;
+ compat_caddr_t cfg_data;
+ compat_caddr_t cfg_cmd;
+};
+
+struct msm_vfe_cfg_cmd_list_32 {
+ struct msm_vfe_cfg_cmd2_32 cfg_cmd;
+ compat_caddr_t next;
+ uint32_t next_size;
+};
+
+#define VIDIOC_MSM_VFE_REG_CFG_COMPAT \
+ _IOWR('V', BASE_VIDIOC_PRIVATE, struct msm_vfe_cfg_cmd2_32)
+#define VIDIOC_MSM_VFE_REG_LIST_CFG_COMPAT \
+ _IOWR('V', BASE_VIDIOC_PRIVATE+14, struct msm_vfe_cfg_cmd_list_32)
+
+static void msm_isp_compat_to_proc_cmd(struct msm_vfe_cfg_cmd2 *proc_cmd,
+ struct msm_vfe_cfg_cmd2_32 *proc_cmd_ptr32)
+{
+ proc_cmd->num_cfg = proc_cmd_ptr32->num_cfg;
+ proc_cmd->cmd_len = proc_cmd_ptr32->cmd_len;
+ proc_cmd->cfg_data = compat_ptr(proc_cmd_ptr32->cfg_data);
+ proc_cmd->cfg_cmd = compat_ptr(proc_cmd_ptr32->cfg_cmd);
+}
+
+static int msm_isp_proc_cmd_list_compat(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = 0;
+ uint32_t count = 0;
+ struct msm_vfe_cfg_cmd_list_32 *proc_cmd =
+ (struct msm_vfe_cfg_cmd_list_32 *)arg;
+ struct msm_vfe_cfg_cmd_list_32 cmd, cmd_next;
+ struct msm_vfe_cfg_cmd2 current_cmd;
+
+ if (!vfe_dev || !arg) {
+ pr_err("%s:%d failed: vfe_dev %pK arg %pK", __func__, __LINE__,
+ vfe_dev, arg);
+ return -EINVAL;
+ }
+ msm_isp_compat_to_proc_cmd(¤t_cmd, &proc_cmd->cfg_cmd);
+ rc = msm_isp_proc_cmd(vfe_dev, ¤t_cmd);
+ if (rc < 0)
+ pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
+
+ cmd = *proc_cmd;
+
+ while (compat_ptr(cmd.next) != NULL) {
+ if (cmd.next_size != sizeof(struct msm_vfe_cfg_cmd_list_32)) {
+ pr_err("%s:%d failed: next size %u != expected %zu\n",
+ __func__, __LINE__, cmd.next_size,
+ sizeof(struct msm_vfe_cfg_cmd_list));
+ break;
+ }
+ if (++count >= MAX_ISP_REG_LIST) {
+ pr_err("%s:%d Error exceeding the max register count:%u\n",
+ __func__, __LINE__, count);
+ rc = -EFAULT;
+ break;
+ }
+ if (copy_from_user(&cmd_next, compat_ptr(cmd.next),
+ sizeof(struct msm_vfe_cfg_cmd_list_32))) {
+ rc = -EFAULT;
+ continue;
+ }
+
+ msm_isp_compat_to_proc_cmd(¤t_cmd, &cmd_next.cfg_cmd);
+ rc = msm_isp_proc_cmd(vfe_dev, ¤t_cmd);
+ if (rc < 0)
+ pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
+
+ cmd = cmd_next;
+ }
+ return rc;
+}
+
+static int msm_isp_proc_cmd_list(struct vfe_device *vfe_dev, void *arg)
+{
+ if (is_compat_task())
+ return msm_isp_proc_cmd_list_compat(vfe_dev, arg);
+ else
+ return msm_isp_proc_cmd_list_unlocked(vfe_dev, arg);
+}
+#else /* CONFIG_COMPAT */
+static int msm_isp_proc_cmd_list(struct vfe_device *vfe_dev, void *arg)
+{
+ return msm_isp_proc_cmd_list_unlocked(vfe_dev, arg);
+}
+#endif /* CONFIG_COMPAT */
+
+
+static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ long rc = 0;
+ struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
+
+ if (!vfe_dev || !vfe_dev->vfe_base) {
+ pr_err("%s:%d failed: invalid params %pK\n",
+ __func__, __LINE__, vfe_dev);
+ if (vfe_dev)
+ pr_err("%s:%d failed %pK\n", __func__,
+ __LINE__, vfe_dev->vfe_base);
+ return -EINVAL;
+ }
+
+ /* use real time mutex for hard real-time ioctls such as
+ * buffer operations and register updates.
+ * Use core mutex for other ioctls that could take
+ * longer time to complete such as start/stop ISP streams
+ * which blocks until the hardware start/stop streaming
+ */
+ ISP_DBG("%s cmd: %d\n", __func__, _IOC_TYPE(cmd));
+ switch (cmd) {
+ case VIDIOC_MSM_VFE_REG_CFG: {
+ mutex_lock(&vfe_dev->realtime_mutex);
+ rc = msm_isp_proc_cmd(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->realtime_mutex);
+ break;
+ }
+ case VIDIOC_MSM_VFE_REG_LIST_CFG: {
+ mutex_lock(&vfe_dev->realtime_mutex);
+ rc = msm_isp_proc_cmd_list(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->realtime_mutex);
+ break;
+ }
+ case VIDIOC_MSM_ISP_REQUEST_BUF:
+ case VIDIOC_MSM_ISP_ENQUEUE_BUF:
+ case VIDIOC_MSM_ISP_RELEASE_BUF: {
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_proc_buf_cmd(vfe_dev->buf_mgr, cmd, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ }
+ case VIDIOC_MSM_ISP32_REQUEST_STREAM:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_request_axi_stream(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_RELEASE_STREAM:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_release_axi_stream(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_CFG_STREAM:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_cfg_axi_stream(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_AXI_HALT:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_axi_halt(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_AXI_RESET:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_axi_reset(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_AXI_RESTART:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_axi_restart(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_INPUT_CFG:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_cfg_input(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_FETCH_ENG_START:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = vfe_dev->hw_info->vfe_ops.core_ops.
+ start_fetch_eng(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_REG_UPDATE_CMD:
+ if (arg) {
+ enum msm_vfe_input_src frame_src =
+ *((enum msm_vfe_input_src *)arg);
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ reg_update(vfe_dev, (1 << frame_src));
+ vfe_dev->axi_data.src_info[frame_src].last_updt_frm_id =
+ vfe_dev->axi_data.src_info[frame_src].frame_id;
+ }
+ break;
+ case VIDIOC_MSM_ISP_SET_SRC_STATE:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_set_src_state(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_REQUEST_STATS_STREAM:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_request_stats_stream(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_RELEASE_STATS_STREAM:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_release_stats_stream(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_CFG_STATS_STREAM:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_cfg_stats_stream(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_UPDATE_STATS_STREAM:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_update_stats_stream(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_UPDATE_STREAM:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_update_axi_stream(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_SMMU_ATTACH:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_smmu_attach(vfe_dev->buf_mgr, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case MSM_SD_NOTIFY_FREEZE:
+ vfe_dev->isp_sof_debug = 0;
+ break;
+ case VIDIOC_MSM_ISP_BUF_DONE:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_user_buf_done(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case MSM_SD_SHUTDOWN:
+ while (vfe_dev->vfe_open_cnt != 0)
+ msm_isp_close_node(sd, NULL);
+ break;
+
+ default:
+ pr_err_ratelimited("%s: Invalid ISP command\n", __func__);
+ rc = -EINVAL;
+ }
+ return rc;
+}
+
+
+#ifdef CONFIG_COMPAT
+static long msm_isp_ioctl_compat(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
+ long rc = 0;
+
+ if (!vfe_dev || !vfe_dev->vfe_base) {
+ pr_err("%s:%d failed: invalid params %pK\n",
+ __func__, __LINE__, vfe_dev);
+ if (vfe_dev)
+ pr_err("%s:%d failed %pK\n", __func__,
+ __LINE__, vfe_dev->vfe_base);
+ return -EINVAL;
+ }
+
+ switch (cmd) {
+ case VIDIOC_MSM_VFE_REG_CFG_COMPAT: {
+ struct msm_vfe_cfg_cmd2 proc_cmd;
+
+ mutex_lock(&vfe_dev->realtime_mutex);
+ msm_isp_compat_to_proc_cmd(&proc_cmd,
+ (struct msm_vfe_cfg_cmd2_32 *) arg);
+ rc = msm_isp_proc_cmd(vfe_dev, &proc_cmd);
+ mutex_unlock(&vfe_dev->realtime_mutex);
+ break;
+ }
+ case VIDIOC_MSM_VFE_REG_LIST_CFG_COMPAT: {
+ mutex_lock(&vfe_dev->realtime_mutex);
+ rc = msm_isp_proc_cmd_list(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->realtime_mutex);
+ break;
+ }
+ default:
+ return msm_isp_ioctl_unlocked(sd, cmd, arg);
+ }
+
+ return rc;
+}
+
+long msm_isp_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ return msm_isp_ioctl_compat(sd, cmd, arg);
+}
+#else /* CONFIG_COMPAT */
+long msm_isp_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ return msm_isp_ioctl_unlocked(sd, cmd, arg);
+}
+#endif /* CONFIG_COMPAT */
+
+static int msm_isp_send_hw_cmd(struct vfe_device *vfe_dev,
+ struct msm_vfe_reg_cfg_cmd *reg_cfg_cmd,
+ uint32_t *cfg_data, uint32_t cmd_len)
+{
+ if (!vfe_dev || !reg_cfg_cmd) {
+ pr_err("%s:%d failed: vfe_dev %pK reg_cfg_cmd %pK\n", __func__,
+ __LINE__, vfe_dev, reg_cfg_cmd);
+ return -EINVAL;
+ }
+ if ((reg_cfg_cmd->cmd_type != VFE_CFG_MASK) &&
+ (!cfg_data || !cmd_len)) {
+ pr_err("%s:%d failed: cmd type %d cfg_data %pK cmd_len %d\n",
+ __func__, __LINE__, reg_cfg_cmd->cmd_type, cfg_data,
+ cmd_len);
+ return -EINVAL;
+ }
+
+ /* Validate input parameters */
+ switch (reg_cfg_cmd->cmd_type) {
+ case VFE_WRITE:
+ case VFE_READ:
+ case VFE_WRITE_MB: {
+ if ((reg_cfg_cmd->u.rw_info.reg_offset >
+ (UINT_MAX - reg_cfg_cmd->u.rw_info.len)) ||
+ ((reg_cfg_cmd->u.rw_info.reg_offset +
+ reg_cfg_cmd->u.rw_info.len) >
+ resource_size(vfe_dev->vfe_mem)) ||
+ (reg_cfg_cmd->u.rw_info.reg_offset & 0x3)) {
+ pr_err("%s:%d reg_offset %d len %d res %d\n",
+ __func__, __LINE__,
+ reg_cfg_cmd->u.rw_info.reg_offset,
+ reg_cfg_cmd->u.rw_info.len,
+ (uint32_t)resource_size(vfe_dev->vfe_mem));
+ return -EINVAL;
+ }
+
+ if ((reg_cfg_cmd->u.rw_info.cmd_data_offset >
+ (UINT_MAX - reg_cfg_cmd->u.rw_info.len)) ||
+ ((reg_cfg_cmd->u.rw_info.cmd_data_offset +
+ reg_cfg_cmd->u.rw_info.len) > cmd_len)) {
+ pr_err("%s:%d cmd_data_offset %d len %d cmd_len %d\n",
+ __func__, __LINE__,
+ reg_cfg_cmd->u.rw_info.cmd_data_offset,
+ reg_cfg_cmd->u.rw_info.len, cmd_len);
+ return -EINVAL;
+ }
+ break;
+ }
+
+ case VFE_WRITE_DMI_16BIT:
+ case VFE_WRITE_DMI_32BIT:
+ case VFE_WRITE_DMI_64BIT:
+ case VFE_READ_DMI_16BIT:
+ case VFE_READ_DMI_32BIT:
+ case VFE_READ_DMI_64BIT: {
+ if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT ||
+ reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT) {
+ if ((reg_cfg_cmd->u.dmi_info.hi_tbl_offset <=
+ reg_cfg_cmd->u.dmi_info.lo_tbl_offset) ||
+ (reg_cfg_cmd->u.dmi_info.hi_tbl_offset -
+ reg_cfg_cmd->u.dmi_info.lo_tbl_offset !=
+ (sizeof(uint32_t)))) {
+ pr_err("%s:%d hi %d lo %d\n",
+ __func__, __LINE__,
+ reg_cfg_cmd->u.dmi_info.hi_tbl_offset,
+ reg_cfg_cmd->u.dmi_info.hi_tbl_offset);
+ return -EINVAL;
+ }
+ if (reg_cfg_cmd->u.dmi_info.len <= sizeof(uint32_t)) {
+ pr_err("%s:%d len %d\n",
+ __func__, __LINE__,
+ reg_cfg_cmd->u.dmi_info.len);
+ return -EINVAL;
+ }
+ if (((UINT_MAX -
+ reg_cfg_cmd->u.dmi_info.hi_tbl_offset) <
+ (reg_cfg_cmd->u.dmi_info.len -
+ sizeof(uint32_t))) ||
+ ((reg_cfg_cmd->u.dmi_info.hi_tbl_offset +
+ reg_cfg_cmd->u.dmi_info.len -
+ sizeof(uint32_t)) > cmd_len)) {
+ pr_err("%s:%d hi_tbl_offset %d len %d cmd %d\n",
+ __func__, __LINE__,
+ reg_cfg_cmd->u.dmi_info.hi_tbl_offset,
+ reg_cfg_cmd->u.dmi_info.len, cmd_len);
+ return -EINVAL;
+ }
+ }
+ if ((reg_cfg_cmd->u.dmi_info.lo_tbl_offset >
+ (UINT_MAX - reg_cfg_cmd->u.dmi_info.len)) ||
+ ((reg_cfg_cmd->u.dmi_info.lo_tbl_offset +
+ reg_cfg_cmd->u.dmi_info.len) > cmd_len)) {
+ pr_err("%s:%d lo_tbl_offset %d len %d cmd_len %d\n",
+ __func__, __LINE__,
+ reg_cfg_cmd->u.dmi_info.lo_tbl_offset,
+ reg_cfg_cmd->u.dmi_info.len, cmd_len);
+ return -EINVAL;
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ switch (reg_cfg_cmd->cmd_type) {
+ case VFE_WRITE: {
+ msm_camera_io_memcpy(vfe_dev->vfe_base +
+ reg_cfg_cmd->u.rw_info.reg_offset,
+ cfg_data + reg_cfg_cmd->u.rw_info.cmd_data_offset/4,
+ reg_cfg_cmd->u.rw_info.len);
+ break;
+ }
+ case VFE_WRITE_MB: {
+ msm_camera_io_memcpy_mb(vfe_dev->vfe_base +
+ reg_cfg_cmd->u.rw_info.reg_offset,
+ cfg_data + reg_cfg_cmd->u.rw_info.cmd_data_offset/4,
+ reg_cfg_cmd->u.rw_info.len);
+ break;
+ }
+ case VFE_CFG_MASK: {
+ uint32_t temp;
+
+ if ((UINT_MAX - sizeof(temp) <
+ reg_cfg_cmd->u.mask_info.reg_offset) ||
+ (resource_size(vfe_dev->vfe_mem) <
+ reg_cfg_cmd->u.mask_info.reg_offset +
+ sizeof(temp)) ||
+ (reg_cfg_cmd->u.mask_info.reg_offset & 0x3)) {
+ pr_err("%s: VFE_CFG_MASK: Invalid length\n", __func__);
+ return -EINVAL;
+ }
+ temp = msm_camera_io_r(vfe_dev->vfe_base +
+ reg_cfg_cmd->u.mask_info.reg_offset);
+
+ temp &= ~reg_cfg_cmd->u.mask_info.mask;
+ temp |= reg_cfg_cmd->u.mask_info.val;
+ msm_camera_io_w(temp, vfe_dev->vfe_base +
+ reg_cfg_cmd->u.mask_info.reg_offset);
+ break;
+ }
+ case VFE_WRITE_DMI_16BIT:
+ case VFE_WRITE_DMI_32BIT:
+ case VFE_WRITE_DMI_64BIT: {
+ int i;
+ uint32_t *hi_tbl_ptr = NULL, *lo_tbl_ptr = NULL;
+ uint32_t hi_val, lo_val, lo_val1;
+
+ if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT) {
+ hi_tbl_ptr = cfg_data +
+ reg_cfg_cmd->u.dmi_info.hi_tbl_offset/4;
+ }
+ lo_tbl_ptr = cfg_data +
+ reg_cfg_cmd->u.dmi_info.lo_tbl_offset/4;
+ if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT)
+ reg_cfg_cmd->u.dmi_info.len =
+ reg_cfg_cmd->u.dmi_info.len / 2;
+ for (i = 0; i < reg_cfg_cmd->u.dmi_info.len/4; i++) {
+ lo_val = *lo_tbl_ptr++;
+ if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_16BIT) {
+ lo_val1 = lo_val & 0x0000FFFF;
+ lo_val = (lo_val & 0xFFFF0000)>>16;
+ msm_camera_io_w(lo_val1, vfe_dev->vfe_base +
+ vfe_dev->hw_info->dmi_reg_offset + 0x4);
+ } else if (reg_cfg_cmd->cmd_type ==
+ VFE_WRITE_DMI_64BIT) {
+ lo_tbl_ptr++;
+ hi_val = *hi_tbl_ptr;
+ hi_tbl_ptr = hi_tbl_ptr + 2;
+ msm_camera_io_w(hi_val, vfe_dev->vfe_base +
+ vfe_dev->hw_info->dmi_reg_offset);
+ }
+ msm_camera_io_w(lo_val, vfe_dev->vfe_base +
+ vfe_dev->hw_info->dmi_reg_offset + 0x4);
+ }
+ break;
+ }
+ case VFE_READ_DMI_16BIT:
+ case VFE_READ_DMI_32BIT:
+ case VFE_READ_DMI_64BIT: {
+ int i;
+ uint32_t *hi_tbl_ptr = NULL, *lo_tbl_ptr = NULL;
+ uint32_t hi_val, lo_val, lo_val1;
+
+ if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT) {
+ hi_tbl_ptr = cfg_data +
+ reg_cfg_cmd->u.dmi_info.hi_tbl_offset/4;
+ }
+
+ lo_tbl_ptr = cfg_data +
+ reg_cfg_cmd->u.dmi_info.lo_tbl_offset/4;
+
+ if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT)
+ reg_cfg_cmd->u.dmi_info.len =
+ reg_cfg_cmd->u.dmi_info.len / 2;
+
+ for (i = 0; i < reg_cfg_cmd->u.dmi_info.len/4; i++) {
+ lo_val = msm_camera_io_r(vfe_dev->vfe_base +
+ vfe_dev->hw_info->dmi_reg_offset + 0x4);
+
+ if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_16BIT) {
+ lo_val1 = msm_camera_io_r(vfe_dev->vfe_base +
+ vfe_dev->hw_info->dmi_reg_offset + 0x4);
+ lo_val |= lo_val1 << 16;
+ }
+ *lo_tbl_ptr++ = lo_val;
+ if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT) {
+ hi_val = msm_camera_io_r(vfe_dev->vfe_base +
+ vfe_dev->hw_info->dmi_reg_offset);
+ *hi_tbl_ptr = hi_val;
+ hi_tbl_ptr += 2;
+ lo_tbl_ptr++;
+ }
+ }
+ break;
+ }
+ case VFE_HW_UPDATE_LOCK: {
+ uint32_t update_id =
+ vfe_dev->axi_data.src_info[VFE_PIX_0].last_updt_frm_id;
+ if (update_id) {
+ ISP_DBG("%s hw_update_lock fail cur_id %u,last_id %u\n",
+ __func__,
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id,
+ update_id);
+ return -EINVAL;
+ }
+ break;
+ }
+ case VFE_HW_UPDATE_UNLOCK: {
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id
+ != *cfg_data) {
+ ISP_DBG("hw_updt over frm bound,strt_id %u end_id %d\n",
+ *cfg_data,
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id);
+ }
+ vfe_dev->axi_data.src_info[VFE_PIX_0].last_updt_frm_id =
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+ break;
+ }
+ case VFE_READ: {
+ int i;
+ uint32_t *data_ptr = cfg_data +
+ reg_cfg_cmd->u.rw_info.cmd_data_offset/4;
+ for (i = 0; i < reg_cfg_cmd->u.rw_info.len/4; i++) {
+ if ((data_ptr < cfg_data) ||
+ (UINT_MAX / sizeof(*data_ptr) <
+ (data_ptr - cfg_data)) ||
+ (sizeof(*data_ptr) * (data_ptr - cfg_data) >=
+ cmd_len))
+ return -EINVAL;
+ *data_ptr++ = msm_camera_io_r(vfe_dev->vfe_base +
+ reg_cfg_cmd->u.rw_info.reg_offset);
+ reg_cfg_cmd->u.rw_info.reg_offset += 4;
+ }
+ break;
+ }
+ case GET_MAX_CLK_RATE: {
+ int rc = 0;
+ unsigned long rate;
+
+ if (cmd_len != sizeof(__u32)) {
+ pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
+ __func__, __LINE__, cmd_len,
+ sizeof(__u32));
+ return -EINVAL;
+ }
+ rc = msm_isp_get_max_clk_rate(vfe_dev, &rate);
+ if (rc < 0) {
+ pr_err("%s:%d failed: rc %d\n", __func__, __LINE__, rc);
+ return -EINVAL;
+ }
+
+ *(__u32 *)cfg_data = (__u32)rate;
+
+ break;
+ }
+ case GET_ISP_ID: {
+ uint32_t *isp_id = NULL;
+
+ if (cmd_len < sizeof(uint32_t)) {
+ pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
+ __func__, __LINE__, cmd_len,
+ sizeof(uint32_t));
+ return -EINVAL;
+ }
+
+ isp_id = (uint32_t *)cfg_data;
+ *isp_id = vfe_dev->pdev->id;
+ break;
+ }
+ case SET_WM_UB_SIZE:
+ break;
+ case SET_UB_POLICY: {
+
+ if (cmd_len < sizeof(vfe_dev->vfe_ub_policy)) {
+ pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
+ __func__, __LINE__, cmd_len,
+ sizeof(vfe_dev->vfe_ub_policy));
+ return -EINVAL;
+ }
+ vfe_dev->vfe_ub_policy = *cfg_data;
+ break;
+ }
+ default:
+ break;
+ }
+ return 0;
+}
+
+int msm_isp_proc_cmd(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = 0, i;
+ struct msm_vfe_cfg_cmd2 *proc_cmd = arg;
+ struct msm_vfe_reg_cfg_cmd *reg_cfg_cmd;
+ uint32_t *cfg_data = NULL;
+
+ if (!proc_cmd->num_cfg) {
+ pr_err("%s: Passed num_cfg as 0\n", __func__);
+ return -EINVAL;
+ }
+
+ reg_cfg_cmd = kzalloc(sizeof(struct msm_vfe_reg_cfg_cmd)*
+ proc_cmd->num_cfg, GFP_KERNEL);
+ if (!reg_cfg_cmd) {
+ rc = -ENOMEM;
+ goto reg_cfg_failed;
+ }
+
+ if (copy_from_user(reg_cfg_cmd,
+ (void __user *)(proc_cmd->cfg_cmd),
+ sizeof(struct msm_vfe_reg_cfg_cmd) * proc_cmd->num_cfg)) {
+ rc = -EFAULT;
+ goto copy_cmd_failed;
+ }
+
+ if (proc_cmd->cmd_len > 0 &&
+ proc_cmd->cmd_len < UINT16_MAX) {
+ cfg_data = kzalloc(proc_cmd->cmd_len, GFP_KERNEL);
+ if (!cfg_data) {
+ pr_err("%s: cfg_data alloc failed\n", __func__);
+ rc = -ENOMEM;
+ goto cfg_data_failed;
+ }
+
+ if (copy_from_user(cfg_data,
+ (void __user *)(proc_cmd->cfg_data),
+ proc_cmd->cmd_len)) {
+ rc = -EFAULT;
+ goto copy_cmd_failed;
+ }
+ }
+
+ for (i = 0; i < proc_cmd->num_cfg; i++)
+ rc = msm_isp_send_hw_cmd(vfe_dev, ®_cfg_cmd[i],
+ cfg_data, proc_cmd->cmd_len);
+
+ if (copy_to_user(proc_cmd->cfg_data,
+ cfg_data, proc_cmd->cmd_len)) {
+ rc = -EFAULT;
+ goto copy_cmd_failed;
+ }
+
+copy_cmd_failed:
+ kfree(cfg_data);
+cfg_data_failed:
+ kfree(reg_cfg_cmd);
+reg_cfg_failed:
+ return rc;
+}
+
+int msm_isp_send_event(struct vfe_device *vfe_dev,
+ uint32_t event_type,
+ struct msm_isp32_event_data *event_data)
+{
+ struct v4l2_event isp_event;
+
+ memset(&isp_event, 0, sizeof(struct v4l2_event));
+ isp_event.id = 0;
+ isp_event.type = event_type;
+
+ memcpy(&isp_event.u.data[0], event_data,
+ sizeof(struct msm_isp32_event_data));
+ v4l2_event_queue(vfe_dev->subdev.sd.devnode, &isp_event);
+ return 0;
+}
+
+#define CAL_WORD(width, M, N) ((width * M + N - 1) / N)
+
+int msm_isp_cal_word_per_line(uint32_t output_format,
+ uint32_t pixel_per_line)
+{
+ int val = -1;
+
+ switch (output_format) {
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ case V4L2_PIX_FMT_QBGGR8:
+ case V4L2_PIX_FMT_QGBRG8:
+ case V4L2_PIX_FMT_QGRBG8:
+ case V4L2_PIX_FMT_QRGGB8:
+ case V4L2_PIX_FMT_JPEG:
+ case V4L2_PIX_FMT_META:
+ val = CAL_WORD(pixel_per_line, 1, 8);
+ break;
+ case V4L2_PIX_FMT_SBGGR10:
+ case V4L2_PIX_FMT_SGBRG10:
+ case V4L2_PIX_FMT_SGRBG10:
+ case V4L2_PIX_FMT_SRGGB10:
+ val = CAL_WORD(pixel_per_line, 5, 32);
+ break;
+ case V4L2_PIX_FMT_SBGGR12:
+ case V4L2_PIX_FMT_SGBRG12:
+ case V4L2_PIX_FMT_SGRBG12:
+ case V4L2_PIX_FMT_SRGGB12:
+ val = CAL_WORD(pixel_per_line, 3, 16);
+ break;
+ case V4L2_PIX_FMT_SBGGR14:
+ case V4L2_PIX_FMT_SGBRG14:
+ case V4L2_PIX_FMT_SGRBG14:
+ case V4L2_PIX_FMT_SRGGB14:
+ val = CAL_WORD(pixel_per_line, 7, 32);
+ break;
+ case V4L2_PIX_FMT_QBGGR10:
+ case V4L2_PIX_FMT_QGBRG10:
+ case V4L2_PIX_FMT_QGRBG10:
+ case V4L2_PIX_FMT_QRGGB10:
+ val = CAL_WORD(pixel_per_line, 1, 6);
+ break;
+ case V4L2_PIX_FMT_QBGGR12:
+ case V4L2_PIX_FMT_QGBRG12:
+ case V4L2_PIX_FMT_QGRBG12:
+ case V4L2_PIX_FMT_QRGGB12:
+ val = CAL_WORD(pixel_per_line, 1, 5);
+ break;
+ case V4L2_PIX_FMT_QBGGR14:
+ case V4L2_PIX_FMT_QGBRG14:
+ case V4L2_PIX_FMT_QGRBG14:
+ case V4L2_PIX_FMT_QRGGB14:
+ val = CAL_WORD(pixel_per_line, 1, 4);
+ break;
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV14:
+ case V4L2_PIX_FMT_NV41:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ val = CAL_WORD(pixel_per_line, 1, 8);
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ val = CAL_WORD(pixel_per_line, 2, 8);
+ break;
+ case V4L2_PIX_FMT_P16BGGR10:
+ case V4L2_PIX_FMT_P16GBRG10:
+ case V4L2_PIX_FMT_P16GRBG10:
+ case V4L2_PIX_FMT_P16RGGB10:
+ val = CAL_WORD(pixel_per_line, 1, 4);
+ break;
+ /*TD: Add more image format*/
+ default:
+ msm_isp_print_fourcc_error(__func__, output_format);
+ break;
+ }
+ return val;
+}
+
+enum msm_isp_pack_fmt msm_isp_get_pack_format(uint32_t output_format)
+{
+ switch (output_format) {
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ case V4L2_PIX_FMT_SBGGR10:
+ case V4L2_PIX_FMT_SGBRG10:
+ case V4L2_PIX_FMT_SGRBG10:
+ case V4L2_PIX_FMT_SRGGB10:
+ case V4L2_PIX_FMT_SBGGR12:
+ case V4L2_PIX_FMT_SGBRG12:
+ case V4L2_PIX_FMT_SGRBG12:
+ case V4L2_PIX_FMT_SRGGB12:
+ case V4L2_PIX_FMT_SBGGR14:
+ case V4L2_PIX_FMT_SGBRG14:
+ case V4L2_PIX_FMT_SGRBG14:
+ case V4L2_PIX_FMT_SRGGB14:
+ return MIPI;
+ case V4L2_PIX_FMT_QBGGR8:
+ case V4L2_PIX_FMT_QGBRG8:
+ case V4L2_PIX_FMT_QGRBG8:
+ case V4L2_PIX_FMT_QRGGB8:
+ case V4L2_PIX_FMT_QBGGR10:
+ case V4L2_PIX_FMT_QGBRG10:
+ case V4L2_PIX_FMT_QGRBG10:
+ case V4L2_PIX_FMT_QRGGB10:
+ case V4L2_PIX_FMT_QBGGR12:
+ case V4L2_PIX_FMT_QGBRG12:
+ case V4L2_PIX_FMT_QGRBG12:
+ case V4L2_PIX_FMT_QRGGB12:
+ case V4L2_PIX_FMT_QBGGR14:
+ case V4L2_PIX_FMT_QGBRG14:
+ case V4L2_PIX_FMT_QGRBG14:
+ case V4L2_PIX_FMT_QRGGB14:
+ return QCOM;
+ case V4L2_PIX_FMT_P16BGGR10:
+ case V4L2_PIX_FMT_P16GBRG10:
+ case V4L2_PIX_FMT_P16GRBG10:
+ case V4L2_PIX_FMT_P16RGGB10:
+ return PLAIN16;
+ default:
+ msm_isp_print_fourcc_error(__func__, output_format);
+ break;
+ }
+ return -EINVAL;
+}
+
+int msm_isp_get_bit_per_pixel(uint32_t output_format)
+{
+ switch (output_format) {
+ case V4L2_PIX_FMT_Y4:
+ return 4;
+ case V4L2_PIX_FMT_Y6:
+ return 6;
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ case V4L2_PIX_FMT_QBGGR8:
+ case V4L2_PIX_FMT_QGBRG8:
+ case V4L2_PIX_FMT_QGRBG8:
+ case V4L2_PIX_FMT_QRGGB8:
+ case V4L2_PIX_FMT_JPEG:
+ case V4L2_PIX_FMT_META:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV14:
+ case V4L2_PIX_FMT_NV41:
+ case V4L2_PIX_FMT_YVU410:
+ case V4L2_PIX_FMT_YVU420:
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YYUV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ case V4L2_PIX_FMT_YUV422P:
+ case V4L2_PIX_FMT_YUV411P:
+ case V4L2_PIX_FMT_Y41P:
+ case V4L2_PIX_FMT_YUV444:
+ case V4L2_PIX_FMT_YUV555:
+ case V4L2_PIX_FMT_YUV565:
+ case V4L2_PIX_FMT_YUV32:
+ case V4L2_PIX_FMT_YUV410:
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_GREY:
+ case V4L2_PIX_FMT_PAL8:
+ case V4L2_PIX_FMT_UV8:
+ case MSM_V4L2_PIX_FMT_META:
+ return 8;
+ case V4L2_PIX_FMT_SBGGR10:
+ case V4L2_PIX_FMT_SGBRG10:
+ case V4L2_PIX_FMT_SGRBG10:
+ case V4L2_PIX_FMT_SRGGB10:
+ case V4L2_PIX_FMT_QBGGR10:
+ case V4L2_PIX_FMT_QGBRG10:
+ case V4L2_PIX_FMT_QGRBG10:
+ case V4L2_PIX_FMT_QRGGB10:
+ case V4L2_PIX_FMT_Y10:
+ case V4L2_PIX_FMT_Y10BPACK:
+ case V4L2_PIX_FMT_P16BGGR10:
+ case V4L2_PIX_FMT_P16GBRG10:
+ case V4L2_PIX_FMT_P16GRBG10:
+ case V4L2_PIX_FMT_P16RGGB10:
+ return 10;
+ case V4L2_PIX_FMT_SBGGR12:
+ case V4L2_PIX_FMT_SGBRG12:
+ case V4L2_PIX_FMT_SGRBG12:
+ case V4L2_PIX_FMT_SRGGB12:
+ case V4L2_PIX_FMT_QBGGR12:
+ case V4L2_PIX_FMT_QGBRG12:
+ case V4L2_PIX_FMT_QGRBG12:
+ case V4L2_PIX_FMT_QRGGB12:
+ case V4L2_PIX_FMT_Y12:
+ return 12;
+ case V4L2_PIX_FMT_SBGGR14:
+ case V4L2_PIX_FMT_SGBRG14:
+ case V4L2_PIX_FMT_SGRBG14:
+ case V4L2_PIX_FMT_SRGGB14:
+ case V4L2_PIX_FMT_QBGGR14:
+ case V4L2_PIX_FMT_QGBRG14:
+ case V4L2_PIX_FMT_QGRBG14:
+ case V4L2_PIX_FMT_QRGGB14:
+ return 14;
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ case V4L2_PIX_FMT_Y16:
+ return 16;
+ /*TD: Add more image format*/
+ default:
+ msm_isp_print_fourcc_error(__func__, output_format);
+ pr_err("%s: Invalid output format %x\n",
+ __func__, output_format);
+ return -EINVAL;
+ }
+}
+
+void msm_isp_update_error_frame_count(struct vfe_device *vfe_dev)
+{
+ struct msm_vfe_error_info *error_info = &vfe_dev->error_info;
+
+ error_info->info_dump_frame_count++;
+}
+
+void msm_isp_process_error_info(struct vfe_device *vfe_dev)
+{
+ int i;
+ uint8_t num_stats_type =
+ vfe_dev->hw_info->stats_hw_info->num_stats_type;
+ struct msm_vfe_error_info *error_info = &vfe_dev->error_info;
+ static DEFINE_RATELIMIT_STATE(rs,
+ DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
+ static DEFINE_RATELIMIT_STATE(rs_stats,
+ DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
+
+ if (error_info->error_count == 1 ||
+ !(error_info->info_dump_frame_count % 100)) {
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ process_error_status(vfe_dev);
+ error_info->error_mask0 = 0;
+ error_info->error_mask1 = 0;
+ error_info->camif_status = 0;
+ error_info->violation_status = 0;
+ for (i = 0; i < MAX_NUM_STREAM; i++) {
+ if (error_info->stream_framedrop_count[i] != 0 &&
+ __ratelimit(&rs)) {
+ pr_err("%s: Stream[%d]: dropped %d frames\n",
+ __func__, i,
+ error_info->stream_framedrop_count[i]);
+ error_info->stream_framedrop_count[i] = 0;
+ }
+ }
+ for (i = 0; i < num_stats_type; i++) {
+ if (error_info->stats_framedrop_count[i] != 0 &&
+ __ratelimit(&rs_stats)) {
+ pr_err("%s: Stats stream[%d]: dropped %d frames\n",
+ __func__, i,
+ error_info->stats_framedrop_count[i]);
+ error_info->stats_framedrop_count[i] = 0;
+ }
+ }
+ }
+}
+
+static inline void msm_isp_update_error_info(struct vfe_device *vfe_dev,
+ uint32_t error_mask0, uint32_t error_mask1)
+{
+ vfe_dev->error_info.error_mask0 |= error_mask0;
+ vfe_dev->error_info.error_mask1 |= error_mask1;
+ vfe_dev->error_info.error_count++;
+}
+
+static void msm_isp_process_overflow_irq(
+ struct vfe_device *vfe_dev,
+ uint32_t *irq_status0, uint32_t *irq_status1)
+{
+ uint32_t overflow_mask;
+
+ /* if there are no active streams - do not start recovery */
+ if (!vfe_dev->axi_data.num_active_stream)
+ return;
+
+ /*Mask out all other irqs if recovery is started*/
+ if (atomic_read(&vfe_dev->error_info.overflow_state) != NO_OVERFLOW) {
+ uint32_t halt_restart_mask0, halt_restart_mask1;
+
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ get_halt_restart_mask(&halt_restart_mask0,
+ &halt_restart_mask1);
+ *irq_status0 &= halt_restart_mask0;
+ *irq_status1 &= halt_restart_mask1;
+
+ return;
+ }
+
+ /*Check if any overflow bit is set*/
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ get_overflow_mask(&overflow_mask);
+ overflow_mask &= *irq_status1;
+
+ if (overflow_mask) {
+ struct msm_isp32_event_data error_event;
+
+ if (vfe_dev->reset_pending == 1) {
+ pr_err("%s:%d failed: overflow %x during reset\n",
+ __func__, __LINE__, overflow_mask);
+ /* Clear overflow bits since reset is pending */
+ *irq_status1 &= ~overflow_mask;
+ return;
+ }
+
+ ISP_DBG("%s: Bus overflow detected: 0x%x, start recovery!\n",
+ __func__, overflow_mask);
+ atomic_set(&vfe_dev->error_info.overflow_state,
+ OVERFLOW_DETECTED);
+ /*Store current IRQ mask*/
+ vfe_dev->hw_info->vfe_ops.core_ops.get_irq_mask(vfe_dev,
+ &vfe_dev->error_info.overflow_recover_irq_mask0,
+ &vfe_dev->error_info.overflow_recover_irq_mask1);
+
+ /*Halt the hardware & Clear all other IRQ mask*/
+ vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 0);
+
+ /*Stop CAMIF Immediately*/
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ update_camif_state(vfe_dev, DISABLE_CAMIF_IMMEDIATELY);
+
+ /*Update overflow state*/
+ *irq_status0 = 0;
+ *irq_status1 = 0;
+
+ memset(&error_event, 0, sizeof(error_event));
+ error_event.frame_id =
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+ error_event.u.error_info.error_mask = 1 << ISP_WM_BUS_OVERFLOW;
+ msm_isp_send_event(vfe_dev,
+ ISP_EVENT_WM_BUS_OVERFLOW, &error_event);
+ }
+}
+
+void msm_isp_reset_burst_count_and_frame_drop(
+ struct vfe_device *vfe_dev, struct msm_vfe_axi_stream *stream_info)
+{
+ uint32_t framedrop_period = 0;
+
+ if (stream_info->state != ACTIVE ||
+ stream_info->stream_type != BURST_STREAM) {
+ return;
+ }
+ if (stream_info->num_burst_capture != 0) {
+ framedrop_period = msm_isp_get_framedrop_period(
+ stream_info->frame_skip_pattern);
+ stream_info->burst_frame_count =
+ stream_info->init_frame_drop +
+ (stream_info->num_burst_capture - 1) *
+ framedrop_period + 1;
+ msm_isp_reset_framedrop(vfe_dev, stream_info);
+ }
+}
+
+irqreturn_t msm_isp_process_irq(int irq_num, void *data)
+{
+ unsigned long flags;
+ struct msm_vfe_tasklet_queue_cmd *queue_cmd;
+ struct vfe_device *vfe_dev = (struct vfe_device *) data;
+ uint32_t irq_status0, irq_status1;
+ uint32_t error_mask0, error_mask1;
+
+ vfe_dev->hw_info->vfe_ops.irq_ops.
+ read_irq_status(vfe_dev, &irq_status0, &irq_status1);
+
+ if ((irq_status0 == 0) && (irq_status1 == 0)) {
+ pr_err_ratelimited("%s:VFE%d irq_status0 & 1 are both 0\n",
+ __func__, vfe_dev->pdev->id);
+ return IRQ_HANDLED;
+ }
+
+ msm_isp_process_overflow_irq(vfe_dev,
+ &irq_status0, &irq_status1);
+
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ get_error_mask(&error_mask0, &error_mask1);
+ error_mask0 &= irq_status0;
+ error_mask1 &= irq_status1;
+ irq_status0 &= ~error_mask0;
+ irq_status1 &= ~error_mask1;
+ if (!vfe_dev->ignore_error &&
+ ((error_mask0 != 0) || (error_mask1 != 0)))
+ msm_isp_update_error_info(vfe_dev, error_mask0, error_mask1);
+
+ if ((irq_status0 == 0) && (irq_status1 == 0) &&
+ (!(((error_mask0 != 0) || (error_mask1 != 0)) &&
+ vfe_dev->error_info.error_count == 1))) {
+ ISP_DBG("%s: error_mask0/1 & error_count are set!\n", __func__);
+ return IRQ_HANDLED;
+ }
+
+ spin_lock_irqsave(&vfe_dev->tasklet_lock, flags);
+ queue_cmd = &vfe_dev->tasklet_queue_cmd[vfe_dev->taskletq_idx];
+ if (queue_cmd->cmd_used) {
+ pr_err_ratelimited("%s: Tasklet queue overflow: %d\n",
+ __func__, vfe_dev->pdev->id);
+ list_del(&queue_cmd->list);
+ } else {
+ atomic_add(1, &vfe_dev->irq_cnt);
+ }
+ queue_cmd->vfeInterruptStatus0 = irq_status0;
+ queue_cmd->vfeInterruptStatus1 = irq_status1;
+ msm_isp_get_timestamp(&queue_cmd->ts, vfe_dev);
+ queue_cmd->cmd_used = 1;
+ vfe_dev->taskletq_idx =
+ (vfe_dev->taskletq_idx + 1) % MSM_VFE_TASKLETQ_SIZE;
+ list_add_tail(&queue_cmd->list, &vfe_dev->tasklet_q);
+ spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
+ tasklet_schedule(&vfe_dev->vfe_tasklet);
+ return IRQ_HANDLED;
+}
+
+void msm_isp_do_tasklet(unsigned long data)
+{
+ unsigned long flags;
+ struct vfe_device *vfe_dev = (struct vfe_device *) data;
+ struct msm_vfe_irq_ops *irq_ops = &vfe_dev->hw_info->vfe_ops.irq_ops;
+ struct msm_vfe_tasklet_queue_cmd *queue_cmd;
+ struct msm_isp_timestamp ts;
+ uint32_t irq_status0, irq_status1;
+
+ while (atomic_read(&vfe_dev->irq_cnt)) {
+ spin_lock_irqsave(&vfe_dev->tasklet_lock, flags);
+ queue_cmd = list_first_entry(&vfe_dev->tasklet_q,
+ struct msm_vfe_tasklet_queue_cmd, list);
+
+ if (!queue_cmd) {
+ atomic_set(&vfe_dev->irq_cnt, 0);
+ spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
+ return;
+ }
+ atomic_sub(1, &vfe_dev->irq_cnt);
+ list_del(&queue_cmd->list);
+ queue_cmd->cmd_used = 0;
+ irq_status0 = queue_cmd->vfeInterruptStatus0;
+ irq_status1 = queue_cmd->vfeInterruptStatus1;
+ ts = queue_cmd->ts;
+ spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
+ ISP_DBG("%s: status0: 0x%x status1: 0x%x\n",
+ __func__, irq_status0, irq_status1);
+ irq_ops->process_reset_irq(vfe_dev,
+ irq_status0, irq_status1);
+ irq_ops->process_halt_irq(vfe_dev,
+ irq_status0, irq_status1);
+ if (atomic_read(&vfe_dev->error_info.overflow_state)
+ != NO_OVERFLOW) {
+ pr_err("%s: Recovery in processing, Ignore IRQs!!!\n",
+ __func__);
+ continue;
+ }
+ msm_isp_process_error_info(vfe_dev);
+ irq_ops->process_camif_irq(vfe_dev,
+ irq_status0, irq_status1, &ts);
+ irq_ops->process_axi_irq(vfe_dev,
+ irq_status0, irq_status1, &ts);
+ irq_ops->process_stats_irq(vfe_dev,
+ irq_status0, irq_status1, &ts);
+ irq_ops->process_reg_update(vfe_dev,
+ irq_status0, irq_status1, &ts);
+ irq_ops->process_epoch_irq(vfe_dev,
+ irq_status0, irq_status1, &ts);
+ }
+}
+
+int msm_isp_set_src_state(struct vfe_device *vfe_dev, void *arg)
+{
+ struct msm_vfe_axi_src_state *src_state = arg;
+
+ if (src_state->input_src >= VFE_SRC_MAX)
+ return -EINVAL;
+ vfe_dev->axi_data.src_info[src_state->input_src].active =
+ src_state->src_active;
+ vfe_dev->axi_data.src_info[src_state->input_src].frame_id =
+ src_state->src_frame_id;
+ return 0;
+}
+
+static void msm_vfe_iommu_fault_handler(struct iommu_domain *domain,
+ struct device *dev, unsigned long iova, int flags, void *token)
+{
+ struct vfe_device *vfe_dev = NULL;
+
+ if (token) {
+ vfe_dev = (struct vfe_device *)token;
+ if (!vfe_dev->buf_mgr || !vfe_dev->buf_mgr->ops) {
+ pr_err("%s:%d] buf_mgr %pK\n", __func__,
+ __LINE__, vfe_dev->buf_mgr);
+ goto end;
+ }
+ if (!vfe_dev->buf_mgr->pagefault_debug_disable) {
+ pr_err("%s:%d] vfe_dev %pK id %d\n", __func__,
+ __LINE__, vfe_dev, vfe_dev->pdev->id);
+ vfe_dev->buf_mgr->ops->buf_mgr_debug(vfe_dev->buf_mgr,
+ iova);
+ }
+ } else {
+ ISP_DBG("%s:%d] no token received: %pK\n",
+ __func__, __LINE__, token);
+ goto end;
+ }
+end:
+ return;
+}
+
+int msm_isp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
+ long rc = 0;
+
+ ISP_DBG("%s\n", __func__);
+
+ mutex_lock(&vfe_dev->realtime_mutex);
+ mutex_lock(&vfe_dev->core_mutex);
+
+ if (vfe_dev->vfe_open_cnt++ && vfe_dev->vfe_base) {
+ mutex_unlock(&vfe_dev->core_mutex);
+ mutex_unlock(&vfe_dev->realtime_mutex);
+ return 0;
+ }
+
+ if (vfe_dev->vfe_base) {
+ pr_err("%s:%d invalid params cnt %d base %pK\n", __func__,
+ __LINE__, vfe_dev->vfe_open_cnt, vfe_dev->vfe_base);
+ vfe_dev->vfe_base = NULL;
+ }
+
+ vfe_dev->reset_pending = 0;
+ vfe_dev->isp_sof_debug = 0;
+
+ if (vfe_dev->hw_info->vfe_ops.core_ops.init_hw(vfe_dev) < 0) {
+ pr_err("%s: init hardware failed\n", __func__);
+ vfe_dev->vfe_open_cnt--;
+ mutex_unlock(&vfe_dev->core_mutex);
+ mutex_unlock(&vfe_dev->realtime_mutex);
+ return -EBUSY;
+ }
+
+ memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info));
+ atomic_set(&vfe_dev->error_info.overflow_state, NO_OVERFLOW);
+
+ vfe_dev->hw_info->vfe_ops.core_ops.clear_status_reg(vfe_dev);
+
+ rc = vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, 1, 1);
+ if (rc <= 0) {
+ pr_err("%s: reset timeout\n", __func__);
+ vfe_dev->hw_info->vfe_ops.core_ops.release_hw(vfe_dev);
+ vfe_dev->vfe_open_cnt--;
+ mutex_unlock(&vfe_dev->core_mutex);
+ mutex_unlock(&vfe_dev->realtime_mutex);
+ return -EINVAL;
+ }
+ vfe_dev->vfe_hw_version = msm_camera_io_r(vfe_dev->vfe_base);
+ ISP_DBG("%s: HW Version: 0x%x\n", __func__, vfe_dev->vfe_hw_version);
+
+ vfe_dev->hw_info->vfe_ops.core_ops.init_hw_reg(vfe_dev);
+
+ vfe_dev->buf_mgr->ops->buf_mgr_init(vfe_dev->buf_mgr, "msm_isp");
+
+ memset(&vfe_dev->axi_data, 0, sizeof(struct msm_vfe_axi_shared_data));
+ memset(&vfe_dev->stats_data, 0,
+ sizeof(struct msm_vfe_stats_shared_data));
+ memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info));
+ memset(&vfe_dev->fetch_engine_info, 0,
+ sizeof(vfe_dev->fetch_engine_info));
+ vfe_dev->axi_data.hw_info = vfe_dev->hw_info->axi_hw_info;
+ vfe_dev->taskletq_idx = 0;
+ vfe_dev->vt_enable = 0;
+ vfe_dev->bus_util_factor = 0;
+ rc = of_property_read_u32(vfe_dev->pdev->dev.of_node,
+ "bus-util-factor", &vfe_dev->bus_util_factor);
+ if (rc < 0)
+ ISP_DBG("%s: Use default bus utilization factor\n", __func__);
+
+ cam_smmu_reg_client_page_fault_handler(
+ vfe_dev->buf_mgr->iommu_hdl,
+ msm_vfe_iommu_fault_handler,
+ NULL,
+ vfe_dev);
+
+ mutex_unlock(&vfe_dev->core_mutex);
+ mutex_unlock(&vfe_dev->realtime_mutex);
+ return 0;
+}
+
+#ifdef CONFIG_MSM_AVTIMER
+static void msm_isp_end_avtimer(void)
+{
+ avcs_core_disable_power_collapse(0);
+}
+#else
+static void msm_isp_end_avtimer(void)
+{
+ pr_err("AV Timer is not supported\n");
+}
+#endif
+
+int msm_isp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ long rc = 0;
+ struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
+
+ ISP_DBG("%s E\n", __func__);
+ mutex_lock(&vfe_dev->realtime_mutex);
+ mutex_lock(&vfe_dev->core_mutex);
+
+ if (!vfe_dev->vfe_open_cnt) {
+ pr_err("%s invalid state open cnt %d\n", __func__,
+ vfe_dev->vfe_open_cnt);
+ mutex_unlock(&vfe_dev->core_mutex);
+ mutex_unlock(&vfe_dev->realtime_mutex);
+ return -EINVAL;
+ }
+
+ if (--vfe_dev->vfe_open_cnt) {
+ mutex_unlock(&vfe_dev->core_mutex);
+ mutex_unlock(&vfe_dev->realtime_mutex);
+ return 0;
+ }
+
+ /* Unregister page fault handler */
+ cam_smmu_reg_client_page_fault_handler(
+ vfe_dev->buf_mgr->iommu_hdl,
+ NULL, NULL, vfe_dev);
+
+ rc = vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 1);
+ if (rc < 0)
+ pr_err("%s: halt timeout rc=%ld\n", __func__, rc);
+
+ vfe_dev->buf_mgr->ops->buf_mgr_deinit(vfe_dev->buf_mgr);
+ vfe_dev->hw_info->vfe_ops.core_ops.release_hw(vfe_dev);
+ if (vfe_dev->vt_enable) {
+ msm_isp_end_avtimer();
+ vfe_dev->vt_enable = 0;
+ }
+ mutex_unlock(&vfe_dev->core_mutex);
+ mutex_unlock(&vfe_dev->realtime_mutex);
+ return 0;
+}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util_32.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util_32.h
new file mode 100644
index 0000000..f2268c3
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util_32.h
@@ -0,0 +1,84 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MSM_ISP_UTIL_H__
+#define __MSM_ISP_UTIL_H__
+
+#include "msm_isp_32.h"
+#include <soc/qcom/camera2.h>
+
+/* #define CONFIG_MSM_ISP_DBG 1 */
+
+#ifdef CONFIG_MSM_ISP_DBG
+#define ISP_DBG(fmt, args...) printk(fmt, ##args)
+#else
+#define ISP_DBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+#define ALT_VECTOR_IDX(x) {x = 3 - x; }
+
+struct msm_isp_bandwidth_mgr {
+ uint32_t bus_client;
+ uint32_t bus_vector_active_idx;
+ uint32_t use_count;
+ struct msm_isp_bandwidth_info client_info[MAX_ISP_CLIENT];
+};
+
+uint32_t msm_isp_get_framedrop_period(
+ enum msm_vfe_frame_skip_pattern frame_skip_pattern);
+void msm_isp_reset_burst_count_and_frame_drop(
+ struct vfe_device *vfe_dev, struct msm_vfe_axi_stream *stream_info);
+
+int msm_isp_init_bandwidth_mgr(enum msm_isp_hw_client client);
+int msm_isp_update_bandwidth(enum msm_isp_hw_client client,
+ uint64_t ab, uint64_t ib);
+void msm_isp_util_get_bandwidth_stats(struct vfe_device *vfe_dev,
+ struct msm_isp_statistics *stats);
+void msm_isp_util_update_last_overflow_ab_ib(struct vfe_device *vfe_dev);
+void msm_isp_util_update_clk_rate(long clock_rate);
+void msm_isp_update_req_history(uint32_t client, uint64_t ab,
+ uint64_t ib,
+ struct msm_isp_bandwidth_info *client_info,
+ unsigned long long ts);
+void msm_isp_deinit_bandwidth_mgr(enum msm_isp_hw_client client);
+
+int msm_isp_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub);
+
+int msm_isp_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub);
+
+int msm_isp_proc_cmd(struct vfe_device *vfe_dev, void *arg);
+int msm_isp_send_event(struct vfe_device *vfe_dev,
+ uint32_t type, struct msm_isp32_event_data *event_data);
+int msm_isp_cal_word_per_line(uint32_t output_format,
+ uint32_t pixel_per_line);
+int msm_isp_get_bit_per_pixel(uint32_t output_format);
+enum msm_isp_pack_fmt msm_isp_get_pack_format(uint32_t output_format);
+irqreturn_t msm_isp_process_irq(int irq_num, void *data);
+int msm_isp_set_src_state(struct vfe_device *vfe_dev, void *arg);
+void msm_isp_do_tasklet(unsigned long data);
+void msm_isp_update_error_frame_count(struct vfe_device *vfe_dev);
+void msm_isp_process_error_info(struct vfe_device *vfe_dev);
+int msm_isp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh);
+int msm_isp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh);
+long msm_isp_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg);
+int msm_isp_get_clk_info(struct vfe_device *vfe_dev,
+ struct platform_device *pdev, struct msm_cam_clk_info *vfe_clk_info);
+void msm_isp_fetch_engine_done_notify(struct vfe_device *vfe_dev,
+ struct msm_vfe_fetch_engine_info *fetch_engine_info);
+void msm_camera_io_dump_2(void __iomem *addr, int size);
+void msm_isp_print_fourcc_error(const char *origin, uint32_t fourcc_format);
+void msm_isp_get_timestamp(struct msm_isp_timestamp *time_stamp,
+ struct vfe_device *vfe_dev);
+void msm_isp_get_avtimer_ts(struct msm_isp_timestamp *time_stamp);
+int msm_isp_cfg_input(struct vfe_device *vfe_dev, void *arg);
+#endif /* __MSM_ISP_UTIL_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/ispif/Makefile b/drivers/media/platform/msm/camera_v2/ispif/Makefile
index 236ec73..d56332d 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/Makefile
+++ b/drivers/media/platform/msm/camera_v2/ispif/Makefile
@@ -1,4 +1,8 @@
ccflags-y += -Idrivers/media/platform/msm/camera_v2
ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+ifeq ($(CONFIG_MSM_ISP_V1),y)
+obj-$(CONFIG_MSM_CSID) += msm_ispif_32.o
+else
obj-$(CONFIG_MSM_CSID) += msm_ispif.o
+endif
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_32.c b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_32.c
new file mode 100644
index 0000000..e9b2a1d
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_32.c
@@ -0,0 +1,1581 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/of.h>
+#include <linux/videodev2.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/iopoll.h>
+#include <linux/compat.h>
+#include <media/msmb_isp.h>
+
+#include "msm_ispif_32.h"
+#include "msm.h"
+#include "msm_sd.h"
+#include "msm_camera_io_util.h"
+
+#ifdef CONFIG_MSM_ISPIF_V1
+#include "msm_ispif_hwreg_v1.h"
+#else
+#include "msm_ispif_hwreg_v2.h"
+#endif
+
+#define V4L2_IDENT_ISPIF 50001
+#define MSM_ISPIF_DRV_NAME "msm_ispif"
+
+#define ISPIF_INTF_CMD_DISABLE_FRAME_BOUNDARY 0x00
+#define ISPIF_INTF_CMD_ENABLE_FRAME_BOUNDARY 0x01
+#define ISPIF_INTF_CMD_DISABLE_IMMEDIATELY 0x02
+
+#define ISPIF_TIMEOUT_SLEEP_US 1000
+#define ISPIF_TIMEOUT_ALL_US 1000000
+
+#undef CDBG
+#ifdef CONFIG_MSMB_CAMERA_DEBUG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#else
+#define CDBG(fmt, args...) do { } while (0)
+#endif
+
+static void msm_ispif_io_dump_reg(struct ispif_device *ispif)
+{
+ if (!ispif->enb_dump_reg)
+ return;
+
+ if (!ispif->base) {
+ pr_err("%s: null pointer for the ispif base\n", __func__);
+ return;
+ }
+
+ msm_camera_io_dump(ispif->base, 0x250, 1);
+}
+
+
+static inline int msm_ispif_is_intf_valid(uint32_t csid_version,
+ uint8_t intf_type)
+{
+ return (((csid_version <= CSID_VERSION_V22
+ && intf_type != VFE0) ||
+ (intf_type >= VFE_MAX))
+ ? false : true);
+}
+
+static struct msm_cam_clk_info ispif_8626_reset_clk_info[] = {
+ {"ispif_ahb_clk", NO_SET_RATE},
+ {"csi0_src_clk", NO_SET_RATE},
+ {"csi0_clk", NO_SET_RATE},
+ {"csi0_pix_clk", NO_SET_RATE},
+ {"csi0_rdi_clk", NO_SET_RATE},
+ {"csi1_src_clk", NO_SET_RATE},
+ {"csi1_clk", NO_SET_RATE},
+ {"csi1_pix_clk", NO_SET_RATE},
+ {"csi1_rdi_clk", NO_SET_RATE},
+ {"camss_vfe_vfe0_clk", NO_SET_RATE},
+ {"camss_csi_vfe0_clk", NO_SET_RATE},
+};
+
+static struct msm_cam_clk_info ispif_8974_ahb_clk_info[ISPIF_CLK_INFO_MAX];
+
+static struct msm_cam_clk_info ispif_8974_reset_clk_info[] = {
+ {"csi0_src_clk", INIT_RATE},
+ {"csi0_clk", NO_SET_RATE},
+ {"csi0_pix_clk", NO_SET_RATE},
+ {"csi0_rdi_clk", NO_SET_RATE},
+ {"csi1_src_clk", INIT_RATE},
+ {"csi1_clk", NO_SET_RATE},
+ {"csi1_pix_clk", NO_SET_RATE},
+ {"csi1_rdi_clk", NO_SET_RATE},
+ {"csi2_src_clk", INIT_RATE},
+ {"csi2_clk", NO_SET_RATE},
+ {"csi2_pix_clk", NO_SET_RATE},
+ {"csi2_rdi_clk", NO_SET_RATE},
+ {"csi3_src_clk", INIT_RATE},
+ {"csi3_clk", NO_SET_RATE},
+ {"csi3_pix_clk", NO_SET_RATE},
+ {"csi3_rdi_clk", NO_SET_RATE},
+ {"vfe0_clk_src", INIT_RATE},
+ {"camss_vfe_vfe0_clk", NO_SET_RATE},
+ {"camss_csi_vfe0_clk", NO_SET_RATE},
+ {"vfe1_clk_src", INIT_RATE},
+ {"camss_vfe_vfe1_clk", NO_SET_RATE},
+ {"camss_csi_vfe1_clk", NO_SET_RATE},
+};
+
+static int msm_ispif_reset_hw(struct ispif_device *ispif)
+{
+ int rc = 0;
+ long timeout = 0;
+ struct clk *reset_clk[ARRAY_SIZE(ispif_8974_reset_clk_info)];
+ struct clk *reset_clk1[ARRAY_SIZE(ispif_8626_reset_clk_info)];
+
+ ispif->clk_idx = 0;
+
+ rc = msm_cam_clk_enable(&ispif->pdev->dev,
+ ispif_8974_reset_clk_info, reset_clk,
+ ARRAY_SIZE(ispif_8974_reset_clk_info), 1);
+ if (rc < 0) {
+ rc = msm_cam_clk_enable(&ispif->pdev->dev,
+ ispif_8626_reset_clk_info, reset_clk1,
+ ARRAY_SIZE(ispif_8626_reset_clk_info), 1);
+ if (rc < 0) {
+ pr_err("%s: cannot enable clock, error = %d",
+ __func__, rc);
+ } else {
+ /* This is set when device is 8x26 */
+ ispif->clk_idx = 2;
+ }
+ } else {
+ /* This is set when device is 8974 */
+ ispif->clk_idx = 1;
+ }
+
+ init_completion(&ispif->reset_complete[VFE0]);
+ if (ispif->hw_num_isps > 1)
+ init_completion(&ispif->reset_complete[VFE1]);
+
+ /* initiate reset of ISPIF */
+ msm_camera_io_w(ISPIF_RST_CMD_MASK,
+ ispif->base + ISPIF_RST_CMD_ADDR);
+ if (ispif->hw_num_isps > 1)
+ msm_camera_io_w(ISPIF_RST_CMD_1_MASK,
+ ispif->base + ISPIF_RST_CMD_1_ADDR);
+
+ timeout = wait_for_completion_timeout(
+ &ispif->reset_complete[VFE0], msecs_to_jiffies(500));
+ CDBG("%s: VFE0 done\n", __func__);
+
+ if (timeout <= 0) {
+ pr_err("%s: VFE0 reset wait timeout\n", __func__);
+ rc = msm_cam_clk_enable(&ispif->pdev->dev,
+ ispif_8974_reset_clk_info, reset_clk,
+ ARRAY_SIZE(ispif_8974_reset_clk_info), 0);
+ if (rc < 0) {
+ rc = msm_cam_clk_enable(&ispif->pdev->dev,
+ ispif_8626_reset_clk_info, reset_clk1,
+ ARRAY_SIZE(ispif_8626_reset_clk_info), 0);
+ if (rc < 0)
+ pr_err("%s: VFE0 reset wait timeout\n",
+ __func__);
+ }
+ return -ETIMEDOUT;
+ }
+
+ if (ispif->hw_num_isps > 1) {
+ timeout = wait_for_completion_timeout(
+ &ispif->reset_complete[VFE1],
+ msecs_to_jiffies(500));
+ CDBG("%s: VFE1 done\n", __func__);
+ if (timeout <= 0) {
+ pr_err("%s: VFE1 reset wait timeout\n", __func__);
+ msm_cam_clk_enable(&ispif->pdev->dev,
+ ispif_8974_reset_clk_info, reset_clk,
+ ARRAY_SIZE(ispif_8974_reset_clk_info), 0);
+ return -ETIMEDOUT;
+ }
+ }
+
+ if (ispif->clk_idx == 1) {
+ rc = msm_cam_clk_enable(&ispif->pdev->dev,
+ ispif_8974_reset_clk_info, reset_clk,
+ ARRAY_SIZE(ispif_8974_reset_clk_info), 0);
+ if (rc < 0) {
+ pr_err("%s: cannot disable clock, error = %d",
+ __func__, rc);
+ }
+ }
+
+ if (ispif->clk_idx == 2) {
+ rc = msm_cam_clk_enable(&ispif->pdev->dev,
+ ispif_8626_reset_clk_info, reset_clk1,
+ ARRAY_SIZE(ispif_8626_reset_clk_info), 0);
+ if (rc < 0) {
+ pr_err("%s: cannot disable clock, error = %d",
+ __func__, rc);
+ }
+ }
+
+ return rc;
+}
+
+static int msm_ispif_get_ahb_clk_info(struct ispif_device *ispif_dev,
+ struct platform_device *pdev,
+ struct msm_cam_clk_info *ahb_clk_info)
+{
+ uint32_t num_ahb_clk = 0;
+ int i, count, rc;
+ uint32_t rates[ISPIF_CLK_INFO_MAX];
+
+ struct device_node *of_node;
+
+ of_node = pdev->dev.of_node;
+
+ count = of_property_count_strings(of_node, "clock-names");
+
+ CDBG("count = %d\n", count);
+ if (count <= 0) {
+ pr_err("no clocks found in device tree, count=%d", count);
+ return 0;
+ }
+
+ if (count > ISPIF_CLK_INFO_MAX) {
+ pr_err("invalid count=%d, max is %d\n", count,
+ ISPIF_CLK_INFO_MAX);
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,clock-rates",
+ rates, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return rc;
+ }
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_string_index(of_node, "clock-names",
+ i, &(ahb_clk_info[num_ahb_clk].clk_name));
+ CDBG("clock-names[%d] = %s\n",
+ i, ahb_clk_info[i].clk_name);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return rc;
+ }
+ if (strnstr(ahb_clk_info[num_ahb_clk].clk_name, "ahb",
+ sizeof(ahb_clk_info[num_ahb_clk].clk_name))) {
+ ahb_clk_info[num_ahb_clk].clk_rate =
+ (rates[i] == 0) ? (long)-1 : rates[i];
+ CDBG("clk_rate[%d] = %ld\n", i,
+ ahb_clk_info[i].clk_rate);
+ num_ahb_clk++;
+ }
+ }
+ ispif_dev->num_ahb_clk = num_ahb_clk;
+ return 0;
+}
+
+static int msm_ispif_clk_ahb_enable(struct ispif_device *ispif, int enable)
+{
+ int rc = 0;
+
+ if (ispif->csid_version < CSID_VERSION_V30) {
+ /* Older ISPIF versiond don't need ahb clokc */
+ return 0;
+ }
+
+ rc = msm_ispif_get_ahb_clk_info(ispif, ispif->pdev,
+ ispif_8974_ahb_clk_info);
+ if (rc < 0) {
+ pr_err("%s: msm_isp_get_clk_info() failed", __func__);
+ return -EFAULT;
+ }
+
+ rc = msm_cam_clk_enable(&ispif->pdev->dev,
+ ispif_8974_ahb_clk_info, ispif->ahb_clk,
+ ispif->num_ahb_clk, enable);
+ if (rc < 0) {
+ pr_err("%s: cannot enable clock, error = %d",
+ __func__, rc);
+ }
+
+ return rc;
+}
+
+static int msm_ispif_reset(struct ispif_device *ispif)
+{
+ int rc = 0;
+ int i;
+
+ if (WARN_ON(!ispif))
+ return -EINVAL;
+
+ memset(ispif->sof_count, 0, sizeof(ispif->sof_count));
+ for (i = 0; i < ispif->vfe_info.num_vfe; i++) {
+
+ msm_camera_io_w(1 << PIX0_LINE_BUF_EN_BIT,
+ ispif->base + ISPIF_VFE_m_CTRL_0(i));
+ msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_IRQ_MASK_0(i));
+ msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_IRQ_MASK_1(i));
+ msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_IRQ_MASK_2(i));
+ msm_camera_io_w(0xFFFFFFFF, ispif->base +
+ ISPIF_VFE_m_IRQ_CLEAR_0(i));
+ msm_camera_io_w(0xFFFFFFFF, ispif->base +
+ ISPIF_VFE_m_IRQ_CLEAR_1(i));
+ msm_camera_io_w(0xFFFFFFFF, ispif->base +
+ ISPIF_VFE_m_IRQ_CLEAR_2(i));
+
+ msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_INPUT_SEL(i));
+
+ msm_camera_io_w(ISPIF_STOP_INTF_IMMEDIATELY,
+ ispif->base + ISPIF_VFE_m_INTF_CMD_0(i));
+ msm_camera_io_w(ISPIF_STOP_INTF_IMMEDIATELY,
+ ispif->base + ISPIF_VFE_m_INTF_CMD_1(i));
+ pr_debug("%s: base %pK", __func__, ispif->base);
+ msm_camera_io_w(0, ispif->base +
+ ISPIF_VFE_m_PIX_INTF_n_CID_MASK(i, 0));
+ msm_camera_io_w(0, ispif->base +
+ ISPIF_VFE_m_PIX_INTF_n_CID_MASK(i, 1));
+ msm_camera_io_w(0, ispif->base +
+ ISPIF_VFE_m_RDI_INTF_n_CID_MASK(i, 0));
+ msm_camera_io_w(0, ispif->base +
+ ISPIF_VFE_m_RDI_INTF_n_CID_MASK(i, 1));
+ msm_camera_io_w(0, ispif->base +
+ ISPIF_VFE_m_RDI_INTF_n_CID_MASK(i, 2));
+
+ msm_camera_io_w(0, ispif->base +
+ ISPIF_VFE_m_PIX_INTF_n_CROP(i, 0));
+ msm_camera_io_w(0, ispif->base +
+ ISPIF_VFE_m_PIX_INTF_n_CROP(i, 1));
+ }
+
+ msm_camera_io_w_mb(ISPIF_IRQ_GLOBAL_CLEAR_CMD, ispif->base +
+ ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+
+ return rc;
+}
+
+static void msm_ispif_sel_csid_core(struct ispif_device *ispif,
+ uint8_t intftype, uint8_t csid, uint8_t vfe_intf)
+{
+ uint32_t data;
+
+ if (WARN_ON(!ispif))
+ return;
+
+ if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) {
+ pr_err("%s: invalid interface type\n", __func__);
+ return;
+ }
+
+ data = msm_camera_io_r(ispif->base + ISPIF_VFE_m_INPUT_SEL(vfe_intf));
+ switch (intftype) {
+ case PIX0:
+ data &= ~(BIT(1) | BIT(0));
+ data |= (uint32_t)csid;
+ break;
+ case RDI0:
+ data &= ~(BIT(5) | BIT(4));
+ data |= (uint32_t)(csid << 4);
+ break;
+ case PIX1:
+ data &= ~(BIT(9) | BIT(8));
+ data |= (uint32_t)(csid << 8);
+ break;
+ case RDI1:
+ data &= ~(BIT(13) | BIT(12));
+ data |= (uint32_t)(csid << 12);
+ break;
+ case RDI2:
+ data &= ~(BIT(21) | BIT(20));
+ data |= (uint32_t)(csid << 20);
+ break;
+ }
+
+ msm_camera_io_w_mb(data, ispif->base +
+ ISPIF_VFE_m_INPUT_SEL(vfe_intf));
+}
+
+static void msm_ispif_enable_crop(struct ispif_device *ispif,
+ uint8_t intftype, uint8_t vfe_intf, uint16_t start_pixel,
+ uint16_t end_pixel)
+{
+ uint32_t data;
+
+ if (WARN_ON(!ispif))
+ return;
+
+ if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) {
+ pr_err("%s: invalid interface type\n", __func__);
+ return;
+ }
+
+ data = msm_camera_io_r(ispif->base + ISPIF_VFE_m_CTRL_0(vfe_intf));
+ data |= (1 << (intftype + 7));
+ if (intftype == PIX0)
+ data |= 1 << PIX0_LINE_BUF_EN_BIT;
+ msm_camera_io_w(data,
+ ispif->base + ISPIF_VFE_m_CTRL_0(vfe_intf));
+
+ if (intftype == PIX0)
+ msm_camera_io_w_mb(start_pixel | (end_pixel << 16),
+ ispif->base + ISPIF_VFE_m_PIX_INTF_n_CROP(vfe_intf, 0));
+ else if (intftype == PIX1)
+ msm_camera_io_w_mb(start_pixel | (end_pixel << 16),
+ ispif->base + ISPIF_VFE_m_PIX_INTF_n_CROP(vfe_intf, 1));
+ else {
+ pr_err("%s: invalid intftype=%d\n", __func__, intftype);
+ WARN_ON(1);
+ return;
+ }
+}
+
+static void msm_ispif_enable_intf_cids(struct ispif_device *ispif,
+ uint8_t intftype, uint16_t cid_mask, uint8_t vfe_intf, uint8_t enable)
+{
+ uint32_t intf_addr, data;
+
+ if (WARN_ON((!ispif)))
+ return;
+
+ if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) {
+ pr_err("%s: invalid interface type\n", __func__);
+ return;
+ }
+
+ switch (intftype) {
+ case PIX0:
+ intf_addr = ISPIF_VFE_m_PIX_INTF_n_CID_MASK(vfe_intf, 0);
+ break;
+ case RDI0:
+ intf_addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe_intf, 0);
+ break;
+ case PIX1:
+ intf_addr = ISPIF_VFE_m_PIX_INTF_n_CID_MASK(vfe_intf, 1);
+ break;
+ case RDI1:
+ intf_addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe_intf, 1);
+ break;
+ case RDI2:
+ intf_addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe_intf, 2);
+ break;
+ default:
+ pr_err("%s: invalid intftype=%d\n", __func__, intftype);
+ WARN_ON(1);
+ return;
+ }
+
+ data = msm_camera_io_r(ispif->base + intf_addr);
+ if (enable)
+ data |= (uint32_t)cid_mask;
+ else
+ data &= ~((uint32_t)cid_mask);
+ msm_camera_io_w_mb(data, ispif->base + intf_addr);
+}
+
+static int msm_ispif_validate_intf_status(struct ispif_device *ispif,
+ uint8_t intftype, uint8_t vfe_intf)
+{
+ int rc = 0;
+ uint32_t data = 0;
+
+ if (WARN_ON((!ispif)))
+ return -ENODEV;
+
+ if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) {
+ pr_err("%s: invalid interface type\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (intftype) {
+ case PIX0:
+ data = msm_camera_io_r(ispif->base +
+ ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 0));
+ break;
+ case RDI0:
+ data = msm_camera_io_r(ispif->base +
+ ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 0));
+ break;
+ case PIX1:
+ data = msm_camera_io_r(ispif->base +
+ ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 1));
+ break;
+ case RDI1:
+ data = msm_camera_io_r(ispif->base +
+ ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 1));
+ break;
+ case RDI2:
+ data = msm_camera_io_r(ispif->base +
+ ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 2));
+ break;
+ }
+ if ((data & 0xf) != 0xf)
+ rc = -EBUSY;
+ return rc;
+}
+
+static void msm_ispif_select_clk_mux(struct ispif_device *ispif,
+ uint8_t intftype, uint8_t csid, uint8_t vfe_intf)
+{
+ uint32_t data = 0;
+
+ switch (intftype) {
+ case PIX0:
+ data = msm_camera_io_r(ispif->clk_mux_base);
+ data &= ~(0xf << (vfe_intf * 8));
+ data |= (csid << (vfe_intf * 8));
+ msm_camera_io_w(data, ispif->clk_mux_base);
+ break;
+
+ case RDI0:
+ data = msm_camera_io_r(ispif->clk_mux_base +
+ ISPIF_RDI_CLK_MUX_SEL_ADDR);
+ data &= ~(0xf << (vfe_intf * 12));
+ data |= (csid << (vfe_intf * 12));
+ msm_camera_io_w(data, ispif->clk_mux_base +
+ ISPIF_RDI_CLK_MUX_SEL_ADDR);
+ break;
+
+ case PIX1:
+ data = msm_camera_io_r(ispif->clk_mux_base);
+ data &= ~(0xf0 << (vfe_intf * 8));
+ data |= (csid << (4 + (vfe_intf * 8)));
+ msm_camera_io_w(data, ispif->clk_mux_base);
+ break;
+
+ case RDI1:
+ data = msm_camera_io_r(ispif->clk_mux_base +
+ ISPIF_RDI_CLK_MUX_SEL_ADDR);
+ data &= ~(0xf << (4 + (vfe_intf * 12)));
+ data |= (csid << (4 + (vfe_intf * 12)));
+ msm_camera_io_w(data, ispif->clk_mux_base +
+ ISPIF_RDI_CLK_MUX_SEL_ADDR);
+ break;
+
+ case RDI2:
+ data = msm_camera_io_r(ispif->clk_mux_base +
+ ISPIF_RDI_CLK_MUX_SEL_ADDR);
+ data &= ~(0xf << (8 + (vfe_intf * 12)));
+ data |= (csid << (8 + (vfe_intf * 12)));
+ msm_camera_io_w(data, ispif->clk_mux_base +
+ ISPIF_RDI_CLK_MUX_SEL_ADDR);
+ break;
+ }
+ CDBG("%s intftype %d data %x\n", __func__, intftype, data);
+ /* ensure clk mux is enabled */
+ mb();
+}
+
+static uint16_t msm_ispif_get_cids_mask_from_cfg(
+ struct msm_ispif_params_entry *entry)
+{
+ int i;
+ uint16_t cids_mask = 0;
+
+ if (WARN_ON(!entry)) {
+ pr_err("%s: invalid entry", __func__);
+ return cids_mask;
+ }
+
+ for (i = 0; i < entry->num_cids && i < MAX_CID_CH_PARAM_ENTRY; i++)
+ cids_mask |= (1 << entry->cids[i]);
+
+ return cids_mask;
+}
+
+static int msm_ispif_config(struct ispif_device *ispif,
+ struct msm_ispif_param_data *params)
+{
+ int rc = 0, i = 0;
+ uint16_t cid_mask;
+ enum msm_ispif_intftype intftype;
+ enum msm_ispif_vfe_intf vfe_intf;
+
+ if (WARN_ON(!ispif) || WARN_ON(!params))
+ return -EINVAL;
+
+ if (ispif->ispif_state != ISPIF_POWER_UP) {
+ pr_err("%s: ispif invalid state %d\n", __func__,
+ ispif->ispif_state);
+ rc = -EPERM;
+ return rc;
+ }
+ if (params->num > MAX_PARAM_ENTRIES) {
+ pr_err("%s: invalid param entries %d\n", __func__,
+ params->num);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ for (i = 0; i < params->num; i++) {
+ vfe_intf = params->entries[i].vfe_intf;
+ if (vfe_intf >= VFE_MAX) {
+ pr_err("%s: %d invalid i %d vfe_intf %d\n", __func__,
+ __LINE__, i, vfe_intf);
+ return -EINVAL;
+ }
+ if (!msm_ispif_is_intf_valid(ispif->csid_version,
+ vfe_intf)) {
+ pr_err("%s: invalid interface type\n", __func__);
+ return -EINVAL;
+ }
+ msm_camera_io_w(0x0, ispif->base +
+ ISPIF_VFE_m_IRQ_MASK_0(vfe_intf));
+ msm_camera_io_w(0x0, ispif->base +
+ ISPIF_VFE_m_IRQ_MASK_1(vfe_intf));
+ msm_camera_io_w_mb(0x0, ispif->base +
+ ISPIF_VFE_m_IRQ_MASK_2(vfe_intf));
+ }
+
+ for (i = 0; i < params->num; i++) {
+ intftype = params->entries[i].intftype;
+
+ vfe_intf = params->entries[i].vfe_intf;
+
+ CDBG("%s intftype %x, vfe_intf %d, csid %d\n", __func__,
+ intftype, vfe_intf, params->entries[i].csid);
+
+ if ((intftype >= INTF_MAX) ||
+ (vfe_intf >= ispif->vfe_info.num_vfe) ||
+ (ispif->csid_version <= CSID_VERSION_V22 &&
+ (vfe_intf > VFE0))) {
+ pr_err("%s: VFEID %d and CSID version %d mismatch\n",
+ __func__, vfe_intf, ispif->csid_version);
+ return -EINVAL;
+ }
+
+ if (ispif->csid_version >= CSID_VERSION_V30)
+ msm_ispif_select_clk_mux(ispif, intftype,
+ params->entries[i].csid, vfe_intf);
+
+ rc = msm_ispif_validate_intf_status(ispif, intftype, vfe_intf);
+ if (rc) {
+ pr_err("%s:validate_intf_status failed, rc = %d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ msm_ispif_sel_csid_core(ispif, intftype,
+ params->entries[i].csid, vfe_intf);
+ cid_mask = msm_ispif_get_cids_mask_from_cfg(
+ ¶ms->entries[i]);
+ msm_ispif_enable_intf_cids(ispif, intftype,
+ cid_mask, vfe_intf, 1);
+ if (params->entries[i].crop_enable)
+ msm_ispif_enable_crop(ispif, intftype, vfe_intf,
+ params->entries[i].crop_start_pixel,
+ params->entries[i].crop_end_pixel);
+ }
+
+ for (vfe_intf = 0; vfe_intf < 2; vfe_intf++) {
+ msm_camera_io_w(ISPIF_IRQ_STATUS_MASK, ispif->base +
+ ISPIF_VFE_m_IRQ_MASK_0(vfe_intf));
+
+ msm_camera_io_w(ISPIF_IRQ_STATUS_MASK, ispif->base +
+ ISPIF_VFE_m_IRQ_CLEAR_0(vfe_intf));
+
+ msm_camera_io_w(ISPIF_IRQ_STATUS_1_MASK, ispif->base +
+ ISPIF_VFE_m_IRQ_MASK_1(vfe_intf));
+
+ msm_camera_io_w(ISPIF_IRQ_STATUS_1_MASK, ispif->base +
+ ISPIF_VFE_m_IRQ_CLEAR_1(vfe_intf));
+
+ msm_camera_io_w(ISPIF_IRQ_STATUS_2_MASK, ispif->base +
+ ISPIF_VFE_m_IRQ_MASK_2(vfe_intf));
+
+ msm_camera_io_w(ISPIF_IRQ_STATUS_2_MASK, ispif->base +
+ ISPIF_VFE_m_IRQ_CLEAR_2(vfe_intf));
+ }
+
+ msm_camera_io_w_mb(ISPIF_IRQ_GLOBAL_CLEAR_CMD, ispif->base +
+ ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+
+ return rc;
+}
+
+static int msm_ispif_intf_cmd(struct ispif_device *ispif, uint32_t cmd_bits,
+ struct msm_ispif_param_data *params)
+{
+ uint8_t vc;
+ int i, k;
+ enum msm_ispif_intftype intf_type;
+ enum msm_ispif_cid cid;
+ enum msm_ispif_vfe_intf vfe_intf;
+
+ if (WARN_ON(!ispif) || WARN_ON(!params))
+ return -EINVAL;
+
+ for (i = 0; i < params->num; i++) {
+ vfe_intf = params->entries[i].vfe_intf;
+ if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) {
+ pr_err("%s: invalid interface type\n", __func__);
+ return -EINVAL;
+ }
+ if (params->entries[i].num_cids > MAX_CID_CH_PARAM_ENTRY) {
+ pr_err("%s: out of range of cid_num %d\n",
+ __func__, params->entries[i].num_cids);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < params->num; i++) {
+ intf_type = params->entries[i].intftype;
+ vfe_intf = params->entries[i].vfe_intf;
+ for (k = 0; k < params->entries[i].num_cids; k++) {
+ cid = params->entries[i].cids[k];
+ vc = cid / 4;
+ if (intf_type == RDI2) {
+ /* zero out two bits */
+ ispif->applied_intf_cmd[vfe_intf].intf_cmd1 &=
+ ~(0x3 << (vc * 2 + 8));
+ /* set cmd bits */
+ ispif->applied_intf_cmd[vfe_intf].intf_cmd1 |=
+ (cmd_bits << (vc * 2 + 8));
+ } else {
+ /* zero 2 bits */
+ ispif->applied_intf_cmd[vfe_intf].intf_cmd &=
+ ~(0x3 << (vc * 2 + intf_type * 8));
+ /* set cmd bits */
+ ispif->applied_intf_cmd[vfe_intf].intf_cmd |=
+ (cmd_bits << (vc * 2 + intf_type * 8));
+ }
+ }
+ /* cmd for PIX0, PIX1, RDI0, RDI1 */
+ if (ispif->applied_intf_cmd[vfe_intf].intf_cmd != 0xFFFFFFFF)
+ msm_camera_io_w_mb(
+ ispif->applied_intf_cmd[vfe_intf].intf_cmd,
+ ispif->base + ISPIF_VFE_m_INTF_CMD_0(vfe_intf));
+
+ /* cmd for RDI2 */
+ if (ispif->applied_intf_cmd[vfe_intf].intf_cmd1 != 0xFFFFFFFF)
+ msm_camera_io_w_mb(
+ ispif->applied_intf_cmd[vfe_intf].intf_cmd1,
+ ispif->base + ISPIF_VFE_m_INTF_CMD_1(vfe_intf));
+ }
+ return 0;
+}
+
+static int msm_ispif_stop_immediately(struct ispif_device *ispif,
+ struct msm_ispif_param_data *params)
+{
+ int i, rc = 0;
+ uint16_t cid_mask = 0;
+
+ if (WARN_ON(!ispif) || WARN_ON(!params))
+ return -EINVAL;
+
+ if (ispif->ispif_state != ISPIF_POWER_UP) {
+ pr_err("%s: ispif invalid state %d\n", __func__,
+ ispif->ispif_state);
+ rc = -EPERM;
+ return rc;
+ }
+
+ if (params->num > MAX_PARAM_ENTRIES) {
+ pr_err("%s: invalid param entries %d\n", __func__,
+ params->num);
+ rc = -EINVAL;
+ return rc;
+ }
+ msm_ispif_intf_cmd(ispif, ISPIF_INTF_CMD_DISABLE_IMMEDIATELY, params);
+
+ /* after stop the interface we need to unmask the CID enable bits */
+ for (i = 0; i < params->num; i++) {
+ cid_mask = msm_ispif_get_cids_mask_from_cfg(
+ ¶ms->entries[i]);
+ msm_ispif_enable_intf_cids(ispif, params->entries[i].intftype,
+ cid_mask, params->entries[i].vfe_intf, 0);
+ }
+
+ return rc;
+}
+
+static int msm_ispif_start_frame_boundary(struct ispif_device *ispif,
+ struct msm_ispif_param_data *params)
+{
+ int rc = 0;
+
+ if (ispif->ispif_state != ISPIF_POWER_UP) {
+ pr_err("%s: ispif invalid state %d\n", __func__,
+ ispif->ispif_state);
+ rc = -EPERM;
+ return rc;
+ }
+ if (params->num > MAX_PARAM_ENTRIES) {
+ pr_err("%s: invalid param entries %d\n", __func__,
+ params->num);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ msm_ispif_intf_cmd(ispif, ISPIF_INTF_CMD_ENABLE_FRAME_BOUNDARY, params);
+
+ return rc;
+}
+
+static int msm_ispif_restart_frame_boundary(struct ispif_device *ispif,
+ struct msm_ispif_param_data *params)
+{
+ int rc = 0, i;
+ long timeout = 0;
+ uint16_t cid_mask;
+ enum msm_ispif_intftype intftype;
+ enum msm_ispif_vfe_intf vfe_intf;
+ uint32_t vfe_mask = 0;
+ uint32_t intf_addr;
+ struct clk *reset_clk[ARRAY_SIZE(ispif_8974_reset_clk_info)];
+ struct clk *reset_clk1[ARRAY_SIZE(ispif_8626_reset_clk_info)];
+
+ ispif->clk_idx = 0;
+
+ if (ispif->ispif_state != ISPIF_POWER_UP) {
+ pr_err("%s: ispif invalid state %d\n", __func__,
+ ispif->ispif_state);
+ rc = -EPERM;
+ return rc;
+ }
+ if (params->num > MAX_PARAM_ENTRIES) {
+ pr_err("%s: invalid param entries %d\n", __func__,
+ params->num);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ for (i = 0; i < params->num; i++) {
+ vfe_intf = params->entries[i].vfe_intf;
+ if (vfe_intf >= VFE_MAX) {
+ pr_err("%s: %d invalid i %d vfe_intf %d\n", __func__,
+ __LINE__, i, vfe_intf);
+ return -EINVAL;
+ }
+ vfe_mask |= (1 << vfe_intf);
+ }
+
+ rc = msm_cam_clk_enable(&ispif->pdev->dev,
+ ispif_8974_reset_clk_info, reset_clk,
+ ARRAY_SIZE(ispif_8974_reset_clk_info), 1);
+ if (rc < 0) {
+ rc = msm_cam_clk_enable(&ispif->pdev->dev,
+ ispif_8626_reset_clk_info, reset_clk1,
+ ARRAY_SIZE(ispif_8626_reset_clk_info), 1);
+ if (rc < 0) {
+ pr_err("%s: cannot enable clock, error = %d",
+ __func__, rc);
+ } else {
+ /* This is set when device is 8x26 */
+ ispif->clk_idx = 2;
+ }
+ } else {
+ /* This is set when device is 8974 */
+ ispif->clk_idx = 1;
+ }
+
+ if (vfe_mask & (1 << VFE0)) {
+ init_completion(&ispif->reset_complete[VFE0]);
+ pr_err("%s Init completion VFE0\n", __func__);
+ /* initiate reset of ISPIF */
+ msm_camera_io_w(0x00001FF9,
+ ispif->base + ISPIF_RST_CMD_ADDR);
+ }
+ if (ispif->hw_num_isps > 1 && (vfe_mask & (1 << VFE1))) {
+ init_completion(&ispif->reset_complete[VFE1]);
+ pr_err("%s Init completion VFE1\n", __func__);
+ msm_camera_io_w(0x00001FF9,
+ ispif->base + ISPIF_RST_CMD_1_ADDR);
+ }
+
+ if (vfe_mask & (1 << VFE0)) {
+ timeout = wait_for_completion_timeout(
+ &ispif->reset_complete[VFE0], msecs_to_jiffies(500));
+ if (timeout <= 0) {
+ pr_err("%s: VFE0 reset wait timeout\n", __func__);
+ rc = -ETIMEDOUT;
+ goto disable_clk;
+ }
+ }
+
+ if (ispif->hw_num_isps > 1 && (vfe_mask & (1 << VFE1))) {
+ timeout = wait_for_completion_timeout(
+ &ispif->reset_complete[VFE1],
+ msecs_to_jiffies(500));
+ if (timeout <= 0) {
+ pr_err("%s: VFE1 reset wait timeout\n", __func__);
+ rc = -ETIMEDOUT;
+ goto disable_clk;
+ }
+ }
+
+ pr_info("%s: ISPIF reset hw done", __func__);
+
+ if (ispif->clk_idx == 1) {
+ rc = msm_cam_clk_enable(&ispif->pdev->dev,
+ ispif_8974_reset_clk_info, reset_clk,
+ ARRAY_SIZE(ispif_8974_reset_clk_info), 0);
+ if (rc < 0) {
+ pr_err("%s: cannot disable clock, error = %d",
+ __func__, rc);
+ goto end;
+ }
+ }
+
+ if (ispif->clk_idx == 2) {
+ rc = msm_cam_clk_enable(&ispif->pdev->dev,
+ ispif_8626_reset_clk_info, reset_clk1,
+ ARRAY_SIZE(ispif_8626_reset_clk_info), 0);
+ if (rc < 0) {
+ pr_err("%s: cannot disable clock, error = %d",
+ __func__, rc);
+ goto end;
+ }
+ }
+
+ for (i = 0; i < params->num; i++) {
+ intftype = params->entries[i].intftype;
+ vfe_intf = params->entries[i].vfe_intf;
+
+ switch (params->entries[0].intftype) {
+ case PIX0:
+ intf_addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 0);
+ break;
+ case RDI0:
+ intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 0);
+ break;
+ case PIX1:
+ intf_addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 1);
+ break;
+ case RDI1:
+ intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 1);
+ break;
+ case RDI2:
+ intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 2);
+ break;
+ default:
+ pr_err("%s: invalid intftype=%d\n", __func__,
+ params->entries[i].intftype);
+ rc = -EPERM;
+ goto end;
+ }
+
+ msm_ispif_intf_cmd(ispif,
+ ISPIF_INTF_CMD_ENABLE_FRAME_BOUNDARY, params);
+ }
+
+ for (i = 0; i < params->num; i++) {
+ intftype = params->entries[i].intftype;
+
+ vfe_intf = params->entries[i].vfe_intf;
+
+
+ cid_mask = msm_ispif_get_cids_mask_from_cfg(
+ ¶ms->entries[i]);
+
+ msm_ispif_enable_intf_cids(ispif, intftype,
+ cid_mask, vfe_intf, 1);
+ }
+
+end:
+ return rc;
+disable_clk:
+ rc = msm_cam_clk_enable(&ispif->pdev->dev,
+ ispif_8974_reset_clk_info, reset_clk,
+ ARRAY_SIZE(ispif_8974_reset_clk_info), 0);
+ if (rc < 0) {
+ rc = msm_cam_clk_enable(&ispif->pdev->dev,
+ ispif_8626_reset_clk_info, reset_clk1,
+ ARRAY_SIZE(ispif_8626_reset_clk_info), 0);
+ if (rc < 0)
+ pr_err("%s: cannot enable clock, error = %d",
+ __func__, rc);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int msm_ispif_stop_frame_boundary(struct ispif_device *ispif,
+ struct msm_ispif_param_data *params)
+{
+ int i, rc = 0;
+ uint16_t cid_mask = 0;
+ uint32_t intf_addr;
+ enum msm_ispif_vfe_intf vfe_intf;
+ uint32_t stop_flag = 0;
+
+ if (WARN_ON(!ispif) || WARN_ON(!params))
+ return -EINVAL;
+
+
+ if (ispif->ispif_state != ISPIF_POWER_UP) {
+ pr_err("%s: ispif invalid state %d\n", __func__,
+ ispif->ispif_state);
+ rc = -EPERM;
+ return rc;
+ }
+
+ if (params->num > MAX_PARAM_ENTRIES) {
+ pr_err("%s: invalid param entries %d\n", __func__,
+ params->num);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ for (i = 0; i < params->num; i++) {
+ if (!msm_ispif_is_intf_valid(ispif->csid_version,
+ params->entries[i].vfe_intf)) {
+ pr_err("%s: invalid interface type\n", __func__);
+ rc = -EINVAL;
+ goto end;
+ }
+ }
+
+ msm_ispif_intf_cmd(ispif,
+ ISPIF_INTF_CMD_DISABLE_FRAME_BOUNDARY, params);
+
+ for (i = 0; i < params->num; i++) {
+ cid_mask =
+ msm_ispif_get_cids_mask_from_cfg(¶ms->entries[i]);
+ vfe_intf = params->entries[i].vfe_intf;
+
+ switch (params->entries[i].intftype) {
+ case PIX0:
+ intf_addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 0);
+ break;
+ case RDI0:
+ intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 0);
+ break;
+ case PIX1:
+ intf_addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 1);
+ break;
+ case RDI1:
+ intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 1);
+ break;
+ case RDI2:
+ intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 2);
+ break;
+ default:
+ pr_err("%s: invalid intftype=%d\n", __func__,
+ params->entries[i].intftype);
+ rc = -EPERM;
+ goto end;
+ }
+
+ rc = readl_poll_timeout(ispif->base + intf_addr, stop_flag,
+ (stop_flag & 0xF) == 0xF,
+ ISPIF_TIMEOUT_SLEEP_US,
+ ISPIF_TIMEOUT_ALL_US);
+ if (rc < 0)
+ goto end;
+
+ /* disable CIDs in CID_MASK register */
+ msm_ispif_enable_intf_cids(ispif, params->entries[i].intftype,
+ cid_mask, vfe_intf, 0);
+ }
+
+end:
+ return rc;
+}
+
+static void ispif_process_irq(struct ispif_device *ispif,
+ struct ispif_irq_status *out, enum msm_ispif_vfe_intf vfe_id)
+{
+ if (WARN_ON(!ispif) || WARN_ON(!out))
+ return;
+
+ if (out[vfe_id].ispifIrqStatus0 &
+ ISPIF_IRQ_STATUS_PIX_SOF_MASK) {
+ if (ispif->ispif_sof_debug < 5)
+ pr_err("%s: PIX0 frame id: %u\n", __func__,
+ ispif->sof_count[vfe_id].sof_cnt[PIX0]);
+ ispif->sof_count[vfe_id].sof_cnt[PIX0]++;
+ ispif->ispif_sof_debug++;
+ }
+ if (out[vfe_id].ispifIrqStatus0 &
+ ISPIF_IRQ_STATUS_RDI0_SOF_MASK) {
+ ispif->sof_count[vfe_id].sof_cnt[RDI0]++;
+ }
+ if (out[vfe_id].ispifIrqStatus1 &
+ ISPIF_IRQ_STATUS_RDI1_SOF_MASK) {
+ ispif->sof_count[vfe_id].sof_cnt[RDI1]++;
+ }
+ if (out[vfe_id].ispifIrqStatus2 &
+ ISPIF_IRQ_STATUS_RDI2_SOF_MASK) {
+ ispif->sof_count[vfe_id].sof_cnt[RDI2]++;
+ }
+}
+
+static inline void msm_ispif_read_irq_status(struct ispif_irq_status *out,
+ void *data)
+{
+ struct ispif_device *ispif = (struct ispif_device *)data;
+
+ if (WARN_ON(!ispif) || WARN_ON(!out))
+ return;
+
+ out[VFE0].ispifIrqStatus0 = msm_camera_io_r(ispif->base +
+ ISPIF_VFE_m_IRQ_STATUS_0(VFE0));
+ msm_camera_io_w(out[VFE0].ispifIrqStatus0,
+ ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(VFE0));
+
+ out[VFE0].ispifIrqStatus1 = msm_camera_io_r(ispif->base +
+ ISPIF_VFE_m_IRQ_STATUS_1(VFE0));
+ msm_camera_io_w(out[VFE0].ispifIrqStatus1,
+ ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(VFE0));
+
+ out[VFE0].ispifIrqStatus2 = msm_camera_io_r(ispif->base +
+ ISPIF_VFE_m_IRQ_STATUS_2(VFE0));
+ msm_camera_io_w_mb(out[VFE0].ispifIrqStatus2,
+ ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(VFE0));
+
+ if (ispif->vfe_info.num_vfe > 1) {
+ out[VFE1].ispifIrqStatus0 = msm_camera_io_r(ispif->base +
+ ISPIF_VFE_m_IRQ_STATUS_0(VFE1));
+ msm_camera_io_w(out[VFE1].ispifIrqStatus0,
+ ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(VFE1));
+
+ out[VFE1].ispifIrqStatus1 = msm_camera_io_r(ispif->base +
+ ISPIF_VFE_m_IRQ_STATUS_1(VFE1));
+ msm_camera_io_w(out[VFE1].ispifIrqStatus1,
+ ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(VFE1));
+
+ out[VFE1].ispifIrqStatus2 = msm_camera_io_r(ispif->base +
+ ISPIF_VFE_m_IRQ_STATUS_2(VFE1));
+ msm_camera_io_w_mb(out[VFE1].ispifIrqStatus2,
+ ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(VFE1));
+ }
+ msm_camera_io_w_mb(ISPIF_IRQ_GLOBAL_CLEAR_CMD, ispif->base +
+ ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+
+ if (out[VFE0].ispifIrqStatus0 & ISPIF_IRQ_STATUS_MASK) {
+ if (out[VFE0].ispifIrqStatus0 & RESET_DONE_IRQ)
+ complete(&ispif->reset_complete[VFE0]);
+
+ if (out[VFE0].ispifIrqStatus0 & PIX_INTF_0_OVERFLOW_IRQ)
+ pr_err("%s: VFE0 pix0 overflow.\n", __func__);
+
+ if (out[VFE0].ispifIrqStatus0 & RAW_INTF_0_OVERFLOW_IRQ)
+ pr_err("%s: VFE0 rdi0 overflow.\n", __func__);
+
+ if (out[VFE0].ispifIrqStatus1 & RAW_INTF_1_OVERFLOW_IRQ)
+ pr_err("%s: VFE0 rdi1 overflow.\n", __func__);
+
+ if (out[VFE0].ispifIrqStatus2 & RAW_INTF_2_OVERFLOW_IRQ)
+ pr_err("%s: VFE0 rdi2 overflow.\n", __func__);
+
+ ispif_process_irq(ispif, out, VFE0);
+ }
+ if (ispif->hw_num_isps > 1) {
+ if (out[VFE1].ispifIrqStatus0 & RESET_DONE_IRQ)
+ complete(&ispif->reset_complete[VFE1]);
+
+ if (out[VFE1].ispifIrqStatus0 & PIX_INTF_0_OVERFLOW_IRQ)
+ pr_err("%s: VFE1 pix0 overflow.\n", __func__);
+
+ if (out[VFE1].ispifIrqStatus0 & RAW_INTF_0_OVERFLOW_IRQ)
+ pr_err("%s: VFE1 rdi0 overflow.\n", __func__);
+
+ if (out[VFE1].ispifIrqStatus1 & RAW_INTF_1_OVERFLOW_IRQ)
+ pr_err("%s: VFE1 rdi1 overflow.\n", __func__);
+
+ if (out[VFE1].ispifIrqStatus2 & RAW_INTF_2_OVERFLOW_IRQ)
+ pr_err("%s: VFE1 rdi2 overflow.\n", __func__);
+
+ ispif_process_irq(ispif, out, VFE1);
+ }
+}
+
+static irqreturn_t msm_io_ispif_irq(int irq_num, void *data)
+{
+ struct ispif_irq_status irq[VFE_MAX];
+
+ msm_ispif_read_irq_status(irq, data);
+ return IRQ_HANDLED;
+}
+
+static int msm_ispif_set_vfe_info(struct ispif_device *ispif,
+ struct msm_ispif_vfe_info *vfe_info)
+{
+ if (!vfe_info || (vfe_info->num_vfe <= 0) ||
+ ((uint32_t)(vfe_info->num_vfe) > ispif->hw_num_isps)) {
+ pr_err("Invalid VFE info: %pK %d\n", vfe_info,
+ (vfe_info ? vfe_info->num_vfe:0));
+ return -EINVAL;
+ }
+
+ memcpy(&ispif->vfe_info, vfe_info, sizeof(struct msm_ispif_vfe_info));
+
+ return 0;
+}
+
+static int msm_ispif_init(struct ispif_device *ispif,
+ uint32_t csid_version)
+{
+ int rc = 0;
+
+ if (WARN_ON(!ispif)) {
+ pr_err("%s: invalid ispif params", __func__);
+ return -EINVAL;
+ }
+
+ if (ispif->ispif_state == ISPIF_POWER_UP) {
+ pr_err("%s: ispif already initted state = %d\n", __func__,
+ ispif->ispif_state);
+ rc = -EPERM;
+ return rc;
+ }
+
+ /* can we set to zero? */
+ ispif->applied_intf_cmd[VFE0].intf_cmd = 0xFFFFFFFF;
+ ispif->applied_intf_cmd[VFE0].intf_cmd1 = 0xFFFFFFFF;
+ ispif->applied_intf_cmd[VFE1].intf_cmd = 0xFFFFFFFF;
+ ispif->applied_intf_cmd[VFE1].intf_cmd1 = 0xFFFFFFFF;
+ memset(ispif->sof_count, 0, sizeof(ispif->sof_count));
+
+ ispif->csid_version = csid_version;
+
+ if (ispif->csid_version >= CSID_VERSION_V30) {
+ if (!ispif->clk_mux_mem || !ispif->clk_mux_io) {
+ pr_err("%s csi clk mux mem %pK io %pK\n", __func__,
+ ispif->clk_mux_mem, ispif->clk_mux_io);
+ rc = -ENOMEM;
+ return rc;
+ }
+ ispif->clk_mux_base = ioremap(ispif->clk_mux_mem->start,
+ resource_size(ispif->clk_mux_mem));
+ if (!ispif->clk_mux_base) {
+ pr_err("%s: clk_mux_mem ioremap failed\n", __func__);
+ rc = -ENOMEM;
+ return rc;
+ }
+ }
+
+ ispif->base = ioremap(ispif->mem->start,
+ resource_size(ispif->mem));
+ if (!ispif->base) {
+ rc = -ENOMEM;
+ pr_err("%s: nomem\n", __func__);
+ goto end;
+ }
+ rc = request_irq(ispif->irq->start, msm_io_ispif_irq,
+ IRQF_TRIGGER_RISING, "ispif", ispif);
+ if (rc) {
+ pr_err("%s: request_irq error = %d\n", __func__, rc);
+ goto error_irq;
+ }
+
+ rc = msm_ispif_clk_ahb_enable(ispif, 1);
+ if (rc) {
+ pr_err("%s: ahb_clk enable failed", __func__);
+ goto error_ahb;
+ }
+
+ msm_ispif_reset_hw(ispif);
+
+ rc = msm_ispif_reset(ispif);
+ if (rc == 0) {
+ ispif->ispif_state = ISPIF_POWER_UP;
+ CDBG("%s: power up done\n", __func__);
+ goto end;
+ }
+
+error_ahb:
+ free_irq(ispif->irq->start, ispif);
+error_irq:
+ iounmap(ispif->base);
+
+end:
+ return rc;
+}
+
+static void msm_ispif_release(struct ispif_device *ispif)
+{
+ if (WARN_ON(!ispif)) {
+ pr_err("%s: invalid ispif params", __func__);
+ return;
+ }
+
+ if (!ispif->base) {
+ pr_err("%s: ispif base is NULL\n", __func__);
+ return;
+ }
+
+ if (ispif->ispif_state != ISPIF_POWER_UP) {
+ pr_err("%s: ispif invalid state %d\n", __func__,
+ ispif->ispif_state);
+ return;
+ }
+
+ /* make sure no streaming going on */
+ msm_ispif_reset(ispif);
+
+ msm_ispif_clk_ahb_enable(ispif, 0);
+
+ free_irq(ispif->irq->start, ispif);
+
+ iounmap(ispif->base);
+
+ iounmap(ispif->clk_mux_base);
+
+ ispif->ispif_state = ISPIF_POWER_DOWN;
+}
+
+static long msm_ispif_cmd(struct v4l2_subdev *sd, void *arg)
+{
+ long rc = 0;
+ struct ispif_cfg_data *pcdata = (struct ispif_cfg_data *)arg;
+ struct ispif_device *ispif =
+ (struct ispif_device *)v4l2_get_subdevdata(sd);
+
+ if (WARN_ON(!sd) || WARN_ON(!pcdata))
+ return -EINVAL;
+
+ mutex_lock(&ispif->mutex);
+ switch (pcdata->cfg_type) {
+ case ISPIF_ENABLE_REG_DUMP:
+ ispif->enb_dump_reg = pcdata->reg_dump; /* save dump config */
+ break;
+ case ISPIF_INIT:
+ rc = msm_ispif_init(ispif, pcdata->csid_version);
+ msm_ispif_io_dump_reg(ispif);
+ break;
+ case ISPIF_CFG:
+ rc = msm_ispif_config(ispif, &pcdata->params);
+ msm_ispif_io_dump_reg(ispif);
+ break;
+ case ISPIF_START_FRAME_BOUNDARY:
+ rc = msm_ispif_start_frame_boundary(ispif, &pcdata->params);
+ msm_ispif_io_dump_reg(ispif);
+ break;
+ case ISPIF_RESTART_FRAME_BOUNDARY:
+ rc = msm_ispif_restart_frame_boundary(ispif, &pcdata->params);
+ msm_ispif_io_dump_reg(ispif);
+ break;
+
+ case ISPIF_STOP_FRAME_BOUNDARY:
+ rc = msm_ispif_stop_frame_boundary(ispif, &pcdata->params);
+ msm_ispif_io_dump_reg(ispif);
+ break;
+ case ISPIF_STOP_IMMEDIATELY:
+ rc = msm_ispif_stop_immediately(ispif, &pcdata->params);
+ msm_ispif_io_dump_reg(ispif);
+ break;
+ case ISPIF_RELEASE:
+ msm_ispif_release(ispif);
+ break;
+ case ISPIF_SET_VFE_INFO:
+ rc = msm_ispif_set_vfe_info(ispif, &pcdata->vfe_info);
+ break;
+ default:
+ pr_err("%s: invalid cfg_type\n", __func__);
+ rc = -EINVAL;
+ break;
+ }
+ mutex_unlock(&ispif->mutex);
+ return rc;
+}
+static struct v4l2_file_operations msm_ispif_v4l2_subdev_fops;
+
+static long msm_ispif_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ struct ispif_device *ispif =
+ (struct ispif_device *)v4l2_get_subdevdata(sd);
+
+ switch (cmd) {
+ case VIDIOC_MSM_ISPIF_CFG:
+ return msm_ispif_cmd(sd, arg);
+ case MSM_SD_NOTIFY_FREEZE: {
+ ispif->ispif_sof_debug = 0;
+ return 0;
+ }
+ case MSM_SD_SHUTDOWN: {
+ struct ispif_device *ispif =
+ (struct ispif_device *)v4l2_get_subdevdata(sd);
+ if (ispif && ispif->base) {
+ mutex_lock(&ispif->mutex);
+ msm_ispif_release(ispif);
+ mutex_unlock(&ispif->mutex);
+ }
+ return 0;
+ }
+ default:
+ pr_err_ratelimited("%s: invalid cmd 0x%x received\n",
+ __func__, cmd);
+ return -ENOIOCTLCMD;
+ }
+}
+
+static long msm_ispif_subdev_do_ioctl(
+ struct file *file, unsigned int cmd, void *arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+
+ return msm_ispif_subdev_ioctl(sd, cmd, arg);
+}
+
+static long msm_ispif_subdev_fops_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, msm_ispif_subdev_do_ioctl);
+}
+
+static int ispif_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct ispif_device *ispif = v4l2_get_subdevdata(sd);
+
+ mutex_lock(&ispif->mutex);
+ /* mem remap is done in init when the clock is on */
+ ispif->open_cnt++;
+ mutex_unlock(&ispif->mutex);
+ return 0;
+}
+
+static int ispif_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ int rc = 0;
+ struct ispif_device *ispif = v4l2_get_subdevdata(sd);
+
+ if (!ispif) {
+ pr_err("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&ispif->mutex);
+ if (ispif->open_cnt == 0) {
+ pr_err("%s: Invalid close\n", __func__);
+ rc = -ENODEV;
+ goto end;
+ }
+ ispif->open_cnt--;
+ if (ispif->open_cnt == 0)
+ msm_ispif_release(ispif);
+end:
+ mutex_unlock(&ispif->mutex);
+ return rc;
+}
+
+static struct v4l2_subdev_core_ops msm_ispif_subdev_core_ops = {
+ /* .g_chip_ident = &msm_ispif_subdev_g_chip_ident, */
+ .ioctl = &msm_ispif_subdev_ioctl,
+};
+
+static const struct v4l2_subdev_ops msm_ispif_subdev_ops = {
+ .core = &msm_ispif_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops msm_ispif_internal_ops = {
+ .open = ispif_open_node,
+ .close = ispif_close_node,
+};
+
+static int ispif_probe(struct platform_device *pdev)
+{
+ int rc;
+ struct ispif_device *ispif;
+
+ ispif = kzalloc(sizeof(struct ispif_device), GFP_KERNEL);
+ if (!ispif)
+ return -ENOMEM;
+
+ if (pdev->dev.of_node) {
+ of_property_read_u32((&pdev->dev)->of_node,
+ "cell-index", &pdev->id);
+ rc = of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,num-isps", &ispif->hw_num_isps);
+ if (rc)
+ /* backward compatibility */
+ ispif->hw_num_isps = 1;
+ /* not an error condition */
+ rc = 0;
+ }
+
+ mutex_init(&ispif->mutex);
+ ispif->mem = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "ispif");
+ if (!ispif->mem) {
+ pr_err("%s: no mem resource?\n", __func__);
+ rc = -ENODEV;
+ goto error;
+ }
+ ispif->irq = platform_get_resource_byname(pdev,
+ IORESOURCE_IRQ, "ispif");
+ if (!ispif->irq) {
+ pr_err("%s: no irq resource?\n", __func__);
+ rc = -ENODEV;
+ goto error;
+ }
+ ispif->io = request_mem_region(ispif->mem->start,
+ resource_size(ispif->mem), pdev->name);
+ if (!ispif->io) {
+ pr_err("%s: no valid mem region\n", __func__);
+ rc = -EBUSY;
+ goto error;
+ }
+ ispif->clk_mux_mem = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "csi_clk_mux");
+ if (ispif->clk_mux_mem) {
+ ispif->clk_mux_io = request_mem_region(
+ ispif->clk_mux_mem->start,
+ resource_size(ispif->clk_mux_mem),
+ ispif->clk_mux_mem->name);
+ if (!ispif->clk_mux_io)
+ pr_err("%s: no valid csi_mux region\n", __func__);
+ }
+
+ v4l2_subdev_init(&ispif->msm_sd.sd, &msm_ispif_subdev_ops);
+ ispif->msm_sd.sd.internal_ops = &msm_ispif_internal_ops;
+ ispif->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ snprintf(ispif->msm_sd.sd.name,
+ ARRAY_SIZE(ispif->msm_sd.sd.name), MSM_ISPIF_DRV_NAME);
+ v4l2_set_subdevdata(&ispif->msm_sd.sd, ispif);
+
+ platform_set_drvdata(pdev, &ispif->msm_sd.sd);
+
+ media_entity_pads_init(&ispif->msm_sd.sd.entity, 0, NULL);
+ ispif->msm_sd.sd.entity.function = MSM_CAMERA_SUBDEV_ISPIF;
+ ispif->msm_sd.sd.entity.name = pdev->name;
+ ispif->msm_sd.close_seq = MSM_SD_CLOSE_1ST_CATEGORY | 0x1;
+ rc = msm_sd_register(&ispif->msm_sd);
+ if (rc) {
+ pr_err("%s: msm_sd_register error = %d\n", __func__, rc);
+ goto error;
+ }
+ msm_ispif_v4l2_subdev_fops.owner = v4l2_subdev_fops.owner;
+ msm_ispif_v4l2_subdev_fops.open = v4l2_subdev_fops.open;
+ msm_ispif_v4l2_subdev_fops.unlocked_ioctl = msm_ispif_subdev_fops_ioctl;
+ msm_ispif_v4l2_subdev_fops.release = v4l2_subdev_fops.release;
+ msm_ispif_v4l2_subdev_fops.poll = v4l2_subdev_fops.poll;
+#ifdef CONFIG_COMPAT
+ msm_ispif_v4l2_subdev_fops.compat_ioctl32 = msm_ispif_subdev_fops_ioctl;
+#endif
+ ispif->msm_sd.sd.devnode->fops = &msm_ispif_v4l2_subdev_fops;
+ ispif->pdev = pdev;
+ ispif->ispif_state = ISPIF_POWER_DOWN;
+ ispif->open_cnt = 0;
+ return 0;
+
+error:
+ mutex_destroy(&ispif->mutex);
+ kfree(ispif);
+ return rc;
+}
+
+static const struct of_device_id msm_ispif_dt_match[] = {
+ {.compatible = "qcom,ispif"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_ispif_dt_match);
+
+static struct platform_driver ispif_driver = {
+ .probe = ispif_probe,
+ .driver = {
+ .name = MSM_ISPIF_DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = msm_ispif_dt_match,
+ },
+};
+
+static int __init msm_ispif_init_module(void)
+{
+ return platform_driver_register(&ispif_driver);
+}
+
+static void __exit msm_ispif_exit_module(void)
+{
+ platform_driver_unregister(&ispif_driver);
+}
+
+module_init(msm_ispif_init_module);
+module_exit(msm_ispif_exit_module);
+MODULE_DESCRIPTION("MSM ISP Interface driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_32.h b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_32.h
new file mode 100644
index 0000000..6217fba
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_32.h
@@ -0,0 +1,69 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_ISPIF_H
+#define MSM_ISPIF_H
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <media/v4l2-subdev.h>
+#include <media/msmb_ispif.h>
+#include "msm_sd.h"
+
+#define ISPIF_CLK_INFO_MAX 24
+
+struct ispif_irq_status {
+ uint32_t ispifIrqStatus0;
+ uint32_t ispifIrqStatus1;
+ uint32_t ispifIrqStatus2;
+};
+
+enum msm_ispif_state_t {
+ ISPIF_POWER_UP,
+ ISPIF_POWER_DOWN,
+};
+struct ispif_sof_count {
+ uint32_t sof_cnt[INTF_MAX];
+};
+
+struct ispif_intf_cmd {
+ uint32_t intf_cmd;
+ uint32_t intf_cmd1;
+};
+
+struct ispif_device {
+ struct platform_device *pdev;
+ struct msm_sd_subdev msm_sd;
+ struct resource *mem;
+ struct resource *clk_mux_mem;
+ struct resource *irq;
+ struct resource *io;
+ struct resource *clk_mux_io;
+ void __iomem *base;
+ void __iomem *clk_mux_base;
+ struct mutex mutex;
+ uint8_t start_ack_pending;
+ uint32_t csid_version;
+ int enb_dump_reg;
+ uint32_t open_cnt;
+ struct ispif_sof_count sof_count[VFE_MAX];
+ struct ispif_intf_cmd applied_intf_cmd[VFE_MAX];
+ enum msm_ispif_state_t ispif_state;
+ struct msm_ispif_vfe_info vfe_info;
+ struct clk *ahb_clk[ISPIF_CLK_INFO_MAX];
+ struct completion reset_complete[VFE_MAX];
+ uint32_t hw_num_isps;
+ uint32_t num_ahb_clk;
+ uint32_t clk_idx;
+ uint32_t ispif_sof_debug;
+};
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c b/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
index 625a0db..c045eda 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
@@ -2077,14 +2077,8 @@
static int __init msm_actuator_init_module(void)
{
- int32_t rc = 0;
-
CDBG("Enter\n");
- rc = platform_driver_register(&msm_actuator_platform_driver);
- if (!rc)
- return rc;
-
- CDBG("%s:%d rc %d\n", __func__, __LINE__, rc);
+ platform_driver_register(&msm_actuator_platform_driver);
return i2c_add_driver(&msm_actuator_i2c_driver);
}
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
index dfcb73a..d5e7989 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
@@ -323,8 +323,7 @@
if (!msm_csid_find_max_clk_rate(csid_dev))
pr_err("msm_csid_find_max_clk_rate failed\n");
- clk_rate = (csid_params->csi_clk > 0) ?
- (csid_params->csi_clk) : csid_dev->csid_max_clk;
+ clk_rate = csid_dev->csid_max_clk;
clk_rate = msm_camera_clk_set_rate(&csid_dev->pdev->dev,
csid_dev->csid_clk[csid_dev->csid_clk_index], clk_rate);
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
index 6fc8e1e..5f56676 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
@@ -29,7 +29,6 @@
#include "include/msm_csiphy_5_0_hwreg.h"
#include "include/msm_csiphy_5_0_1_hwreg.h"
#include "include/msm_csiphy_10_0_0_hwreg.h"
-
#include "cam_hw_ops.h"
#define DBG_CSIPHY 0
@@ -218,6 +217,8 @@
}
}
+ csiphy_dev->snps_programmed_data_rate = csiphy_params->data_rate;
+
if (mode == TWO_LANE_PHY_A) {
msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_snps_reg.
mipi_csiphy_sys_ctrl.data,
@@ -322,8 +323,11 @@
mode = AGGREGATE_MODE;
num_lanes = 4;
if (csiphy_dev->snps_state != NOT_CONFIGURED) {
- pr_err("%s: invalid request\n", __func__);
- return -EINVAL;
+ if (csiphy_dev->snps_programmed_data_rate !=
+ csiphy_params->data_rate)
+ pr_err("reconfiguring snps phy");
+ else
+ return 0;
}
csiphy_dev->snps_state = CONFIGURED_AGGREGATE_MODE;
clk_mux_reg &= ~0xff;
@@ -332,34 +336,38 @@
} else if (lane_mask == LANE_MASK_PHY_A) { /* PHY A */
/* 2 lane config */
num_lanes = 2;
+ mode = TWO_LANE_PHY_A;
if (csiphy_dev->snps_state == NOT_CONFIGURED) {
- mode = TWO_LANE_PHY_A;
csiphy_dev->snps_state = CONFIGURED_TWO_LANE_PHY_A;
} else if (csiphy_dev->snps_state ==
CONFIGURED_TWO_LANE_PHY_B) {
/* 2 lane + 2 lane config */
- mode = TWO_LANE_PHY_A;
csiphy_dev->snps_state = CONFIGURED_COMBO_MODE;
} else {
- pr_err("%s: invalid request\n", __func__);
- return -EINVAL;
+ if (csiphy_dev->snps_programmed_data_rate !=
+ csiphy_params->data_rate)
+ pr_err("reconfiguring snps phy");
+ else
+ return 0;
}
clk_mux_reg &= ~0xf;
clk_mux_reg |= (uint32_t)csiphy_params->csid_core;
} else if (lane_mask == LANE_MASK_PHY_B) { /* PHY B */
/* 2 lane config */
num_lanes = 2;
+ mode = TWO_LANE_PHY_B;
if (csiphy_dev->snps_state == NOT_CONFIGURED) {
- mode = TWO_LANE_PHY_B;
csiphy_dev->snps_state = CONFIGURED_TWO_LANE_PHY_B;
} else if (csiphy_dev->snps_state ==
CONFIGURED_TWO_LANE_PHY_A) {
/* 2 lane + 2 lane config */
- mode = TWO_LANE_PHY_B;
csiphy_dev->snps_state = CONFIGURED_COMBO_MODE;
} else {
- pr_err("%s: invalid request\n", __func__);
- return -EINVAL;
+ if (csiphy_dev->snps_programmed_data_rate !=
+ csiphy_params->data_rate)
+ pr_err("reconfiguring snps phy");
+ else
+ return 0;
}
clk_mux_reg &= ~0xf0;
clk_mux_reg |= csiphy_params->csid_core << 4;
@@ -1239,9 +1247,7 @@
return rc;
}
- clk_rate = (csiphy_params->csiphy_clk > 0)
- ? csiphy_params->csiphy_clk :
- csiphy_dev->csiphy_max_clk;
+ clk_rate = csiphy_dev->csiphy_max_clk;
clk_rate = msm_camera_clk_set_rate(&csiphy_dev->pdev->dev,
csiphy_dev->csiphy_clk[csiphy_dev->csiphy_clk_index],
clk_rate);
@@ -1660,6 +1666,7 @@
csiphy_dev->hw_version);
csiphy_dev->csiphy_state = CSIPHY_POWER_UP;
csiphy_dev->snps_state = NOT_CONFIGURED;
+ csiphy_dev->snps_programmed_data_rate = 0;
return 0;
csiphy_enable_clk_fail:
@@ -1766,6 +1773,7 @@
csiphy_dev->hw_version);
csiphy_dev->csiphy_state = CSIPHY_POWER_UP;
csiphy_dev->snps_state = NOT_CONFIGURED;
+ csiphy_dev->snps_programmed_data_rate = 0;
return 0;
csiphy_enable_clk_fail:
@@ -1915,6 +1923,7 @@
csiphy_dev->csiphy_state = CSIPHY_POWER_DOWN;
csiphy_dev->snps_state = NOT_CONFIGURED;
+ csiphy_dev->snps_programmed_data_rate = 0;
if (cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CSIPHY,
CAM_AHB_SUSPEND_VOTE) < 0)
@@ -2047,6 +2056,7 @@
csiphy_dev->csiphy_state = CSIPHY_POWER_DOWN;
csiphy_dev->snps_state = NOT_CONFIGURED;
+ csiphy_dev->snps_programmed_data_rate = 0;
if (cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CSIPHY,
CAM_AHB_SUSPEND_VOTE) < 0)
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
index 79baf3c..41d2034 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
@@ -250,6 +250,7 @@
uint8_t is_snps_phy;
enum snps_csiphy_state snps_state;
uint8_t num_clk_irq_registers;
+ uint64_t snps_programmed_data_rate;
};
#define VIDIOC_MSM_CSIPHY_RELEASE \
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_tz_i2c.c b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_tz_i2c.c
index df22d84..3590e15 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_tz_i2c.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_tz_i2c.c
@@ -20,7 +20,6 @@
#include "msm_sensor.h"
#undef CDBG
-#define MSM_CAMERA_TZ_I2C_VERBOSE
#ifdef CONFIG_MSM_SEC_CCI_DEBUG
#define TZ_I2C_FN_RETURN(ret, i2c_fn, ...) \
diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
index 0b0f98a..f80de3a 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
@@ -10,7 +10,7 @@
* GNU General Public License for more details.
*/
-#define SENSOR_DRIVER_I2C "i2c_camera"
+#define SENSOR_DRIVER_I2C "camera"
/* Header file declaration */
#include "msm_sensor.h"
#include "msm_sd.h"
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 3679c59..ee643b1 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -738,7 +738,7 @@
(1 << V4L2_MPEG_VIDC_EXTRADATA_METADATA_MBI) |
(1 << V4L2_MPEG_VIDC_EXTRADATA_YUV_STATS)|
(1 << V4L2_MPEG_VIDC_EXTRADATA_ROI_QP)|
- (1UL << V4L2_MPEG_VIDC_EXTRADATA_ENC_FRAME_QP)
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO)
),
.qmenu = mpeg_video_vidc_extradata,
},
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index f9bc79c..40c9862 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -2406,8 +2406,9 @@
}
empty_buf_done = (struct vidc_hal_ebd *)&response->input_done;
- if ((get_hal_codec(inst->fmts[CAPTURE_PORT].fourcc) ==
- HAL_VIDEO_CODEC_HEVC) &&
+ if (inst->session_type == MSM_VIDC_ENCODER &&
+ (get_hal_codec(inst->fmts[CAPTURE_PORT].fourcc) ==
+ HAL_VIDEO_CODEC_HEVC) &&
(inst->img_grid_dimension > 0) &&
(empty_buf_done->input_tag < inst->tinfo.count - 1)) {
dprintk(VIDC_DBG, "Wait for last tile. Current tile no: %d\n",
@@ -4424,8 +4425,9 @@
for (c = 0; c < etbs.count; ++c) {
struct vidc_frame_data *frame_data = &etbs.data[c];
- if (get_hal_codec(inst->fmts[CAPTURE_PORT].fourcc) ==
- HAL_VIDEO_CODEC_HEVC &&
+ if (inst->session_type == MSM_VIDC_ENCODER &&
+ get_hal_codec(inst->fmts[CAPTURE_PORT].fourcc)
+ == HAL_VIDEO_CODEC_HEVC &&
(inst->img_grid_dimension > 0)) {
rc = msm_comm_qbuf_heic_tiles(inst, frame_data);
if (rc) {
diff --git a/drivers/media/usb/stkwebcam/stk-sensor.c b/drivers/media/usb/stkwebcam/stk-sensor.c
index e546b01..2dcc8d0 100644
--- a/drivers/media/usb/stkwebcam/stk-sensor.c
+++ b/drivers/media/usb/stkwebcam/stk-sensor.c
@@ -228,7 +228,7 @@
static int stk_sensor_outb(struct stk_camera *dev, u8 reg, u8 val)
{
int i = 0;
- int tmpval = 0;
+ u8 tmpval = 0;
if (stk_camera_write_reg(dev, STK_IIC_TX_INDEX, reg))
return 1;
@@ -253,7 +253,7 @@
static int stk_sensor_inb(struct stk_camera *dev, u8 reg, u8 *val)
{
int i = 0;
- int tmpval = 0;
+ u8 tmpval = 0;
if (stk_camera_write_reg(dev, STK_IIC_RX_INDEX, reg))
return 1;
@@ -274,7 +274,7 @@
if (stk_camera_read_reg(dev, STK_IIC_RX_VALUE, &tmpval))
return 1;
- *val = (u8) tmpval;
+ *val = tmpval;
return 0;
}
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index 22a9aae..1c48f2f 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -144,7 +144,7 @@
return 0;
}
-int stk_camera_read_reg(struct stk_camera *dev, u16 index, int *value)
+int stk_camera_read_reg(struct stk_camera *dev, u16 index, u8 *value)
{
struct usb_device *udev = dev->udev;
unsigned char *buf;
@@ -163,7 +163,7 @@
sizeof(u8),
500);
if (ret >= 0)
- memcpy(value, buf, sizeof(u8));
+ *value = *buf;
kfree(buf);
return ret;
@@ -171,9 +171,10 @@
static int stk_start_stream(struct stk_camera *dev)
{
- int value;
+ u8 value;
int i, ret;
- int value_116, value_117;
+ u8 value_116, value_117;
+
if (!is_present(dev))
return -ENODEV;
@@ -213,7 +214,7 @@
static int stk_stop_stream(struct stk_camera *dev)
{
- int value;
+ u8 value;
int i;
if (is_present(dev)) {
stk_camera_read_reg(dev, 0x0100, &value);
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.h b/drivers/media/usb/stkwebcam/stk-webcam.h
index 9bbfa3d..92bb48e 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.h
+++ b/drivers/media/usb/stkwebcam/stk-webcam.h
@@ -129,7 +129,7 @@
#define vdev_to_camera(d) container_of(d, struct stk_camera, vdev)
int stk_camera_write_reg(struct stk_camera *, u16, u8);
-int stk_camera_read_reg(struct stk_camera *, u16, int *);
+int stk_camera_read_reg(struct stk_camera *, u16, u8 *);
int stk_sensor_init(struct stk_camera *);
int stk_sensor_configure(struct stk_camera *);
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 7ee1667..00dff9b 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1994,6 +1994,7 @@
.cmd_per_lun = 7,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = mptscsih_host_attrs,
+ .no_write_same = 1,
};
static int mptsas_get_linkerrors(struct sas_phy *phy)
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 2b31ed3..d61f20e 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -6979,19 +6979,6 @@
break;
}
- case QSEECOM_IOCTL_SET_ENCDEC_INFO: {
- struct qseecom_encdec_conf_t conf;
-
- ret = copy_from_user(&conf, argp, sizeof(conf));
- if (ret) {
- pr_err("copy_from_user failed\n");
- return -EFAULT;
- }
- ret = qcom_ice_set_fde_conf(conf.start_sector, conf.fs_size,
- conf.index, conf.mode);
- break;
- }
-
case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
if ((data->listener.id == 0) ||
(data->type != QSEECOM_LISTENER_SERVICE)) {
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 2405ae3..e1cced6 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -3630,7 +3630,7 @@
* or disable state so cannot receive any completion of
* other requests.
*/
- BUG_ON(test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));
+ WARN_ON(test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));
/* clear pending request */
BUG_ON(!test_and_clear_bit(cmdq_req->tag,
@@ -3664,7 +3664,7 @@
out:
mmc_cmdq_clk_scaling_stop_busy(host, true, is_dcmd);
- if (!test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state)) {
+ if (!(err || cmdq_req->resp_err)) {
mmc_host_clk_release(host);
wake_up(&ctx_info->wait);
mmc_put_card(host->card);
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 5e1b68c..e1b603c 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -45,6 +45,7 @@
#define I82802AB 0x00ad
#define I82802AC 0x00ac
#define PF38F4476 0x881c
+#define M28F00AP30 0x8963
/* STMicroelectronics chips */
#define M50LPW080 0x002F
#define M50FLW080A 0x0080
@@ -375,6 +376,17 @@
extp->MinorVersion = '1';
}
+static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
+{
+ /*
+ * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
+ * Erase Supend for their small Erase Blocks(0x8000)
+ */
+ if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
+ return 1;
+ return 0;
+}
+
static inline struct cfi_pri_intelext *
read_pri_intelext(struct map_info *map, __u16 adr)
{
@@ -831,21 +843,30 @@
(mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
goto sleep;
+ /* Do not allow suspend iff read/write to EB address */
+ if ((adr & chip->in_progress_block_mask) ==
+ chip->in_progress_block_addr)
+ goto sleep;
+
+ /* do not suspend small EBs, buggy Micron Chips */
+ if (cfi_is_micron_28F00AP30(cfi, chip) &&
+ (chip->in_progress_block_mask == ~(0x8000-1)))
+ goto sleep;
/* Erase suspend */
- map_write(map, CMD(0xB0), adr);
+ map_write(map, CMD(0xB0), chip->in_progress_block_addr);
/* If the flash has finished erasing, then 'erase suspend'
* appears to make some (28F320) flash devices switch to
* 'read' mode. Make sure that we switch to 'read status'
* mode so we get the right data. --rmk
*/
- map_write(map, CMD(0x70), adr);
+ map_write(map, CMD(0x70), chip->in_progress_block_addr);
chip->oldstate = FL_ERASING;
chip->state = FL_ERASE_SUSPENDING;
chip->erase_suspended = 1;
for (;;) {
- status = map_read(map, adr);
+ status = map_read(map, chip->in_progress_block_addr);
if (map_word_andequal(map, status, status_OK, status_OK))
break;
@@ -1041,8 +1062,8 @@
sending the 0x70 (Read Status) command to an erasing
chip and expecting it to be ignored, that's what we
do. */
- map_write(map, CMD(0xd0), adr);
- map_write(map, CMD(0x70), adr);
+ map_write(map, CMD(0xd0), chip->in_progress_block_addr);
+ map_write(map, CMD(0x70), chip->in_progress_block_addr);
chip->oldstate = FL_READY;
chip->state = FL_ERASING;
break;
@@ -1933,6 +1954,8 @@
map_write(map, CMD(0xD0), adr);
chip->state = FL_ERASING;
chip->erase_suspended = 0;
+ chip->in_progress_block_addr = adr;
+ chip->in_progress_block_mask = ~(len - 1);
ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
adr, len,
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 9dca881..107c05b 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -812,9 +812,10 @@
(mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
goto sleep;
- /* We could check to see if we're trying to access the sector
- * that is currently being erased. However, no user will try
- * anything like that so we just wait for the timeout. */
+ /* Do not allow suspend iff read/write to EB address */
+ if ((adr & chip->in_progress_block_mask) ==
+ chip->in_progress_block_addr)
+ goto sleep;
/* Erase suspend */
/* It's harmless to issue the Erase-Suspend and Erase-Resume
@@ -2263,6 +2264,7 @@
chip->state = FL_ERASING;
chip->erase_suspended = 0;
chip->in_progress_block_addr = adr;
+ chip->in_progress_block_mask = ~(map->size - 1);
INVALIDATE_CACHE_UDELAY(map, chip,
adr, map->size,
@@ -2352,6 +2354,7 @@
chip->state = FL_ERASING;
chip->erase_suspended = 0;
chip->in_progress_block_addr = adr;
+ chip->in_progress_block_mask = ~(len - 1);
INVALIDATE_CACHE_UDELAY(map, chip,
adr, len,
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 551f0f8..91d8a48 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -450,7 +450,7 @@
{
int i;
- if (!client_info->slave)
+ if (!client_info->slave || !is_valid_ether_addr(client_info->mac_dst))
return;
for (i = 0; i < RLB_ARP_BURST_SIZE; i++) {
@@ -944,6 +944,10 @@
skb->priority = TC_PRIO_CONTROL;
skb->dev = slave->dev;
+ netdev_dbg(slave->bond->dev,
+ "Send learning packet: dev %s mac %pM vlan %d\n",
+ slave->dev->name, mac_addr, vid);
+
if (vid)
__vlan_hwaccel_put_tag(skb, vlan_proto, vid);
@@ -966,14 +970,13 @@
*/
rcu_read_lock();
netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
- if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) {
- if (strict_match &&
- ether_addr_equal_64bits(mac_addr,
- upper->dev_addr)) {
+ if (is_vlan_dev(upper) &&
+ bond->nest_level == vlan_get_encap_level(upper) - 1) {
+ if (upper->addr_assign_type == NET_ADDR_STOLEN) {
alb_send_lp_vid(slave, mac_addr,
vlan_dev_vlan_proto(upper),
vlan_dev_vlan_id(upper));
- } else if (!strict_match) {
+ } else {
alb_send_lp_vid(slave, upper->dev_addr,
vlan_dev_vlan_proto(upper),
vlan_dev_vlan_id(upper));
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 513457a..1a139d0 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1654,8 +1654,7 @@
} /* switch(bond_mode) */
#ifdef CONFIG_NET_POLL_CONTROLLER
- slave_dev->npinfo = bond->dev->npinfo;
- if (slave_dev->npinfo) {
+ if (bond->dev->npinfo) {
if (slave_enable_netpoll(new_slave)) {
netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
res = -EBUSY;
@@ -1733,6 +1732,8 @@
if (bond_mode_uses_xmit_hash(bond))
bond_update_slave_arr(bond, NULL);
+ bond->nest_level = dev_get_nest_level(bond_dev);
+
netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n",
slave_dev->name,
bond_is_active_slave(new_slave) ? "an active" : "a backup",
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index c9d61a6..3a75352 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -1179,7 +1179,7 @@
skb = alloc_can_skb(priv->netdev, &cf);
if (!skb) {
- stats->tx_dropped++;
+ stats->rx_dropped++;
return;
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 795a133..4ffbe85 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -8720,14 +8720,15 @@
tg3_mem_rx_release(tp);
tg3_mem_tx_release(tp);
- /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
- tg3_full_lock(tp, 0);
+ /* tp->hw_stats can be referenced safely:
+ * 1. under rtnl_lock
+ * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
+ */
if (tp->hw_stats) {
dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
tp->hw_stats, tp->stats_mapping);
tp->hw_stats = NULL;
}
- tg3_full_unlock(tp);
}
/*
@@ -14161,7 +14162,7 @@
struct tg3 *tp = netdev_priv(dev);
spin_lock_bh(&tp->lock);
- if (!tp->hw_stats) {
+ if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
*stats = tp->net_stats_prev;
spin_unlock_bh(&tp->lock);
return stats;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 24977cc..9a4c4f8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -970,6 +970,22 @@
if (!coal->tx_max_coalesced_frames_irq)
return -EINVAL;
+ if (coal->tx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME ||
+ coal->rx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME ||
+ coal->rx_coalesce_usecs_low > MLX4_EN_MAX_COAL_TIME ||
+ coal->rx_coalesce_usecs_high > MLX4_EN_MAX_COAL_TIME) {
+ netdev_info(dev, "%s: maximum coalesce time supported is %d usecs\n",
+ __func__, MLX4_EN_MAX_COAL_TIME);
+ return -ERANGE;
+ }
+
+ if (coal->tx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS ||
+ coal->rx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS) {
+ netdev_info(dev, "%s: maximum coalesced frames supported is %d\n",
+ __func__, MLX4_EN_MAX_COAL_PKTS);
+ return -ERANGE;
+ }
+
priv->rx_frames = (coal->rx_max_coalesced_frames ==
MLX4_EN_AUTO_CONF) ?
MLX4_EN_RX_COAL_TARGET :
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 18f221d..247d340 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -141,6 +141,9 @@
#define MLX4_EN_TX_COAL_PKTS 16
#define MLX4_EN_TX_COAL_TIME 0x10
+#define MLX4_EN_MAX_COAL_PKTS U16_MAX
+#define MLX4_EN_MAX_COAL_TIME U16_MAX
+
#define MLX4_EN_RX_RATE_LOW 400000
#define MLX4_EN_RX_COAL_TIME_LOW 0
#define MLX4_EN_RX_RATE_HIGH 450000
@@ -543,8 +546,8 @@
u16 rx_usecs_low;
u32 pkt_rate_high;
u16 rx_usecs_high;
- u16 sample_interval;
- u16 adaptive_rx_coal;
+ u32 sample_interval;
+ u32 adaptive_rx_coal;
u32 msg_enable;
u32 loopback_ok;
u32 validate_loopback;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index a8966e6..5d6eab1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1924,26 +1924,35 @@
memset(vf_stats, 0, sizeof(*vf_stats));
vf_stats->rx_packets =
MLX5_GET_CTR(out, received_eth_unicast.packets) +
+ MLX5_GET_CTR(out, received_ib_unicast.packets) +
MLX5_GET_CTR(out, received_eth_multicast.packets) +
+ MLX5_GET_CTR(out, received_ib_multicast.packets) +
MLX5_GET_CTR(out, received_eth_broadcast.packets);
vf_stats->rx_bytes =
MLX5_GET_CTR(out, received_eth_unicast.octets) +
+ MLX5_GET_CTR(out, received_ib_unicast.octets) +
MLX5_GET_CTR(out, received_eth_multicast.octets) +
+ MLX5_GET_CTR(out, received_ib_multicast.octets) +
MLX5_GET_CTR(out, received_eth_broadcast.octets);
vf_stats->tx_packets =
MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
+ MLX5_GET_CTR(out, transmitted_ib_unicast.packets) +
MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
+ MLX5_GET_CTR(out, transmitted_ib_multicast.packets) +
MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
vf_stats->tx_bytes =
MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
+ MLX5_GET_CTR(out, transmitted_ib_unicast.octets) +
MLX5_GET_CTR(out, transmitted_eth_multicast.octets) +
+ MLX5_GET_CTR(out, transmitted_ib_multicast.octets) +
MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
vf_stats->multicast =
- MLX5_GET_CTR(out, received_eth_multicast.packets);
+ MLX5_GET_CTR(out, received_eth_multicast.packets) +
+ MLX5_GET_CTR(out, received_ib_multicast.packets);
vf_stats->broadcast =
MLX5_GET_CTR(out, received_eth_broadcast.packets);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 331a6ca..5f3402b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -153,6 +153,7 @@
static void del_flow_table(struct fs_node *node);
static void del_flow_group(struct fs_node *node);
static void del_fte(struct fs_node *node);
+static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
static void tree_init_node(struct fs_node *node,
unsigned int refcount,
@@ -1690,24 +1691,28 @@
static int init_root_ns(struct mlx5_flow_steering *steering)
{
+ int err;
steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
if (!steering->root_ns)
- goto cleanup;
+ return -ENOMEM;
- if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node))
- goto cleanup;
+ err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
+ if (err)
+ goto out_err;
set_prio_attrs(steering->root_ns);
- if (create_anchor_flow_table(steering))
- goto cleanup;
+ err = create_anchor_flow_table(steering);
+ if (err)
+ goto out_err;
return 0;
-cleanup:
- mlx5_cleanup_fs(steering->dev);
- return -ENOMEM;
+out_err:
+ cleanup_root_ns(steering->root_ns);
+ steering->root_ns = NULL;
+ return err;
}
static void clean_tree(struct fs_node *node)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 4ca82bd..eee6e59 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -854,6 +854,8 @@
netdev_tx_sent_queue(nd_q, txbuf->real_len);
+ skb_tx_timestamp(skb);
+
tx_ring->wr_p += nr_frags + 1;
if (nfp_net_tx_ring_should_stop(tx_ring))
nfp_net_tx_ring_stop(nd_q, tx_ring);
@@ -866,8 +868,6 @@
tx_ring->wr_ptr_add = 0;
}
- skb_tx_timestamp(skb);
-
return NETDEV_TX_OK;
err_unmap:
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index da4c2d8..1420dfb 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -2233,7 +2233,7 @@
struct rtl8139_private *tp = netdev_priv(dev);
const int irq = tp->pci_dev->irq;
- disable_irq(irq);
+ disable_irq_nosync(irq);
rtl8139_interrupt(irq, dev);
enable_irq(irq);
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index dbb6364..59b932d 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4861,6 +4861,9 @@
static void rtl_pll_power_up(struct rtl8169_private *tp)
{
rtl_generic_op(tp, tp->pll_power_ops.up);
+
+ /* give MAC/PHY some time to resume */
+ msleep(20);
}
static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index a2371aa..e45e2f1 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -3442,7 +3442,7 @@
len = (val & RCR_ENTRY_L2_LEN) >>
RCR_ENTRY_L2_LEN_SHIFT;
- len -= ETH_FCS_LEN;
+ append_size = len + ETH_HLEN + ETH_FCS_LEN;
addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
@@ -3452,7 +3452,6 @@
RCR_ENTRY_PKTBUFSZ_SHIFT];
off = addr & ~PAGE_MASK;
- append_size = rcr_size;
if (num_rcr == 1) {
int ptype;
@@ -3465,7 +3464,7 @@
else
skb_checksum_none_assert(skb);
} else if (!(val & RCR_ENTRY_MULTI))
- append_size = len - skb->len;
+ append_size = append_size - skb->len;
niu_rx_skb_append(skb, page, off, append_size, rcr_size);
if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 552de9c..d7cb205 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -124,7 +124,7 @@
#define RX_PRIORITY_MAPPING 0x76543210
#define TX_PRIORITY_MAPPING 0x33221100
-#define CPDMA_TX_PRIORITY_MAP 0x01234567
+#define CPDMA_TX_PRIORITY_MAP 0x76543210
#define CPSW_VLAN_AWARE BIT(1)
#define CPSW_ALE_VLAN_AWARE 1
@@ -1141,6 +1141,8 @@
cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
HOST_PORT_NUM, ALE_VLAN |
ALE_SECURE, slave->port_vlan);
+ cpsw_ale_control_set(cpsw->ale, slave_port,
+ ALE_PORT_DROP_UNKNOWN_VLAN, 1);
}
static void soft_reset_slave(struct cpsw_slave *slave)
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index dc36c2e..fa2c7bd 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -620,6 +620,10 @@
lock_sock(sk);
error = -EINVAL;
+
+ if (sockaddr_len != sizeof(struct sockaddr_pppox))
+ goto end;
+
if (sp->sa_protocol != PX_PROTO_OE)
goto end;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 8673ef3..3696368 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -261,6 +261,17 @@
}
}
+static bool __team_option_inst_tmp_find(const struct list_head *opts,
+ const struct team_option_inst *needle)
+{
+ struct team_option_inst *opt_inst;
+
+ list_for_each_entry(opt_inst, opts, tmp_list)
+ if (opt_inst == needle)
+ return true;
+ return false;
+}
+
static int __team_options_register(struct team *team,
const struct team_option *option,
size_t option_count)
@@ -1067,14 +1078,11 @@
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
+static int __team_port_enable_netpoll(struct team_port *port)
{
struct netpoll *np;
int err;
- if (!team->dev->npinfo)
- return 0;
-
np = kzalloc(sizeof(*np), GFP_KERNEL);
if (!np)
return -ENOMEM;
@@ -1088,6 +1096,14 @@
return err;
}
+static int team_port_enable_netpoll(struct team_port *port)
+{
+ if (!port->team->dev->npinfo)
+ return 0;
+
+ return __team_port_enable_netpoll(port);
+}
+
static void team_port_disable_netpoll(struct team_port *port)
{
struct netpoll *np = port->np;
@@ -1102,7 +1118,7 @@
kfree(np);
}
#else
-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
+static int team_port_enable_netpoll(struct team_port *port)
{
return 0;
}
@@ -1210,7 +1226,7 @@
goto err_vids_add;
}
- err = team_port_enable_netpoll(team, port);
+ err = team_port_enable_netpoll(port);
if (err) {
netdev_err(dev, "Failed to enable netpoll on device %s\n",
portname);
@@ -1908,7 +1924,7 @@
mutex_lock(&team->lock);
list_for_each_entry(port, &team->port_list, list) {
- err = team_port_enable_netpoll(team, port);
+ err = __team_port_enable_netpoll(port);
if (err) {
__team_netpoll_cleanup(team);
break;
@@ -2569,6 +2585,14 @@
if (err)
goto team_put;
opt_inst->changed = true;
+
+ /* dumb/evil user-space can send us duplicate opt,
+ * keep only the last one
+ */
+ if (__team_option_inst_tmp_find(&opt_inst_list,
+ opt_inst))
+ continue;
+
list_add(&opt_inst->tmp_list, &opt_inst_list);
}
if (!opt_found) {
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 4fb4686..99424c8 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -530,6 +530,7 @@
#define REALTEK_VENDOR_ID 0x0bda
#define SAMSUNG_VENDOR_ID 0x04e8
#define LENOVO_VENDOR_ID 0x17ef
+#define LINKSYS_VENDOR_ID 0x13b1
#define NVIDIA_VENDOR_ID 0x0955
#define HP_VENDOR_ID 0x03f0
@@ -719,6 +720,15 @@
.driver_info = 0,
},
+#if IS_ENABLED(CONFIG_USB_RTL8152)
+/* Linksys USB3GIGV1 Ethernet Adapter */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(LINKSYS_VENDOR_ID, 0x0041, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+#endif
+
/* Lenovo Thinkpad USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7205, USB_CLASS_COMM,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 973e90f..3e893fe 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -803,6 +803,7 @@
{QMI_FIXED_INTF(0x05c6, 0x9080, 8)},
{QMI_FIXED_INTF(0x05c6, 0x9083, 3)},
{QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x90b2, 3)}, /* ublox R410M */
{QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
{QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
{QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */
@@ -1038,6 +1039,18 @@
id->driver_info = (unsigned long)&qmi_wwan_info;
}
+ /* There are devices where the same interface number can be
+ * configured as different functions. We should only bind to
+ * vendor specific functions when matching on interface number
+ */
+ if (id->match_flags & USB_DEVICE_ID_MATCH_INT_NUMBER &&
+ desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC) {
+ dev_dbg(&intf->dev,
+ "Rejecting interface number match for class %02x\n",
+ desc->bInterfaceClass);
+ return -ENODEV;
+ }
+
/* Quectel EC20 quirk where we've QMI on interface 4 instead of 0 */
if (quectel_ec20_detected(intf) && desc->bInterfaceNumber == 0) {
dev_dbg(&intf->dev, "Quectel EC20 quirk, skipping interface 0\n");
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index b2d7c7e..3cdfa24 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -519,6 +519,7 @@
#define VENDOR_ID_REALTEK 0x0bda
#define VENDOR_ID_SAMSUNG 0x04e8
#define VENDOR_ID_LENOVO 0x17ef
+#define VENDOR_ID_LINKSYS 0x13b1
#define VENDOR_ID_NVIDIA 0x0955
#define MCU_TYPE_PLA 0x0100
@@ -4506,6 +4507,7 @@
{REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
{REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)},
{}
};
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index a497bf3..5aa5df2 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -5819,9 +5819,8 @@
sta->addr, smps, err);
}
- if (changed & IEEE80211_RC_SUPP_RATES_CHANGED ||
- changed & IEEE80211_RC_NSS_CHANGED) {
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n",
+ if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
sta->addr);
err = ath10k_station_assoc(ar, arvif->vif, sta, true);
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index a35f78b..acef4ec9 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1603,6 +1603,10 @@
int count = 50;
u32 reg, last_val;
+ /* Check if chip failed to wake up */
+ if (REG_READ(ah, AR_CFG) == 0xdeadbeef)
+ return false;
+
if (AR_SREV_9300(ah))
return !ath9k_hw_detect_mac_hang(ah);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 54354a3..538457e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -2776,7 +2776,6 @@
struct brcmf_bss_info_le *bi)
{
struct wiphy *wiphy = cfg_to_wiphy(cfg);
- struct ieee80211_channel *notify_channel;
struct cfg80211_bss *bss;
struct ieee80211_supported_band *band;
struct brcmu_chan ch;
@@ -2786,7 +2785,7 @@
u16 notify_interval;
u8 *notify_ie;
size_t notify_ielen;
- s32 notify_signal;
+ struct cfg80211_inform_bss bss_data = {};
if (le32_to_cpu(bi->length) > WL_BSS_INFO_MAX) {
brcmf_err("Bss info is larger than buffer. Discarding\n");
@@ -2806,27 +2805,28 @@
band = wiphy->bands[NL80211_BAND_5GHZ];
freq = ieee80211_channel_to_frequency(channel, band->band);
- notify_channel = ieee80211_get_channel(wiphy, freq);
+ bss_data.chan = ieee80211_get_channel(wiphy, freq);
+ bss_data.scan_width = NL80211_BSS_CHAN_WIDTH_20;
+ bss_data.boottime_ns = ktime_to_ns(ktime_get_boottime());
notify_capability = le16_to_cpu(bi->capability);
notify_interval = le16_to_cpu(bi->beacon_period);
notify_ie = (u8 *)bi + le16_to_cpu(bi->ie_offset);
notify_ielen = le32_to_cpu(bi->ie_length);
- notify_signal = (s16)le16_to_cpu(bi->RSSI) * 100;
+ bss_data.signal = (s16)le16_to_cpu(bi->RSSI) * 100;
brcmf_dbg(CONN, "bssid: %pM\n", bi->BSSID);
brcmf_dbg(CONN, "Channel: %d(%d)\n", channel, freq);
brcmf_dbg(CONN, "Capability: %X\n", notify_capability);
brcmf_dbg(CONN, "Beacon interval: %d\n", notify_interval);
- brcmf_dbg(CONN, "Signal: %d\n", notify_signal);
+ brcmf_dbg(CONN, "Signal: %d\n", bss_data.signal);
- bss = cfg80211_inform_bss(wiphy, notify_channel,
- CFG80211_BSS_FTYPE_UNKNOWN,
- (const u8 *)bi->BSSID,
- 0, notify_capability,
- notify_interval, notify_ie,
- notify_ielen, notify_signal,
- GFP_KERNEL);
+ bss = cfg80211_inform_bss_data(wiphy, &bss_data,
+ CFG80211_BSS_FTYPE_UNKNOWN,
+ (const u8 *)bi->BSSID,
+ 0, notify_capability,
+ notify_interval, notify_ie,
+ notify_ielen, GFP_KERNEL);
if (!bss)
return -ENOMEM;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 4182c37..2681b533 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -3346,8 +3346,11 @@
continue;
list_del(&data->list);
- INIT_WORK(&data->destroy_work, destroy_radio);
- schedule_work(&data->destroy_work);
+ spin_unlock_bh(&hwsim_radio_lock);
+ mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy),
+ NULL);
+ spin_lock_bh(&hwsim_radio_lock);
+
}
spin_unlock_bh(&hwsim_radio_lock);
}
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 0dc31cf..9a6fad6 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -2085,7 +2085,7 @@
continue;
/* Allocate an alias_prop with enough space for the stem */
- ap = dt_alloc(sizeof(*ap) + len + 1, 4);
+ ap = dt_alloc(sizeof(*ap) + len + 1, __alignof__(*ap));
if (!ap)
continue;
memset(ap, 0, sizeof(*ap) + len + 1);
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 755b386..744f625 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -937,7 +937,7 @@
int offset;
const char *p, *q, *options = NULL;
int l;
- const struct earlycon_id *match;
+ const struct earlycon_id **p_match;
const void *fdt = initial_boot_params;
offset = fdt_path_offset(fdt, "/chosen");
@@ -964,7 +964,10 @@
return 0;
}
- for (match = __earlycon_table; match < __earlycon_table_end; match++) {
+ for (p_match = __earlycon_table; p_match < __earlycon_table_end;
+ p_match++) {
+ const struct earlycon_id *match = *p_match;
+
if (!match->compatible[0])
continue;
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
index 4fce494..11bad82 100644
--- a/drivers/pci/host/pci-aardvark.c
+++ b/drivers/pci/host/pci-aardvark.c
@@ -32,6 +32,7 @@
#define PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT 5
#define PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE (0 << 11)
#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT 12
+#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ 0x2
#define PCIE_CORE_LINK_CTRL_STAT_REG 0xd0
#define PCIE_CORE_LINK_L0S_ENTRY BIT(0)
#define PCIE_CORE_LINK_TRAINING BIT(5)
@@ -175,8 +176,6 @@
#define PCIE_CONFIG_WR_TYPE0 0xa
#define PCIE_CONFIG_WR_TYPE1 0xb
-/* PCI_BDF shifts 8bit, so we need extra 4bit shift */
-#define PCIE_BDF(dev) (dev << 4)
#define PCIE_CONF_BUS(bus) (((bus) & 0xff) << 20)
#define PCIE_CONF_DEV(dev) (((dev) & 0x1f) << 15)
#define PCIE_CONF_FUNC(fun) (((fun) & 0x7) << 12)
@@ -298,7 +297,8 @@
reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE |
(7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) |
PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE |
- PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT;
+ (PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ <<
+ PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT);
advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG);
/* Program PCIe Control 2 to disable strict ordering */
@@ -439,7 +439,7 @@
u32 reg;
int ret;
- if (PCI_SLOT(devfn) != 0) {
+ if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0) {
*val = 0xffffffff;
return PCIBIOS_DEVICE_NOT_FOUND;
}
@@ -458,7 +458,7 @@
advk_writel(pcie, reg, PIO_CTRL);
/* Program the address registers */
- reg = PCIE_BDF(devfn) | PCIE_CONF_REG(where);
+ reg = PCIE_CONF_ADDR(bus->number, devfn, where);
advk_writel(pcie, reg, PIO_ADDR_LS);
advk_writel(pcie, 0, PIO_ADDR_MS);
@@ -493,7 +493,7 @@
int offset;
int ret;
- if (PCI_SLOT(devfn) != 0)
+ if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0)
return PCIBIOS_DEVICE_NOT_FOUND;
if (where % size)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index a87c8e1..9c13aee 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3756,27 +3756,49 @@
}
EXPORT_SYMBOL(pci_wait_for_pending_transaction);
-/*
- * We should only need to wait 100ms after FLR, but some devices take longer.
- * Wait for up to 1000ms for config space to return something other than -1.
- * Intel IGD requires this when an LCD panel is attached. We read the 2nd
- * dword because VFs don't implement the 1st dword.
- */
static void pci_flr_wait(struct pci_dev *dev)
{
- int i = 0;
+ int delay = 1, timeout = 60000;
u32 id;
- do {
- msleep(100);
- pci_read_config_dword(dev, PCI_COMMAND, &id);
- } while (i++ < 10 && id == ~0);
+ /*
+ * Per PCIe r3.1, sec 6.6.2, a device must complete an FLR within
+ * 100ms, but may silently discard requests while the FLR is in
+ * progress. Wait 100ms before trying to access the device.
+ */
+ msleep(100);
- if (id == ~0)
- dev_warn(&dev->dev, "Failed to return from FLR\n");
- else if (i > 1)
- dev_info(&dev->dev, "Required additional %dms to return from FLR\n",
- (i - 1) * 100);
+ /*
+ * After 100ms, the device should not silently discard config
+ * requests, but it may still indicate that it needs more time by
+ * responding to them with CRS completions. The Root Port will
+ * generally synthesize ~0 data to complete the read (except when
+ * CRS SV is enabled and the read was for the Vendor ID; in that
+ * case it synthesizes 0x0001 data).
+ *
+ * Wait for the device to return a non-CRS completion. Read the
+ * Command register instead of Vendor ID so we don't have to
+ * contend with the CRS SV value.
+ */
+ pci_read_config_dword(dev, PCI_COMMAND, &id);
+ while (id == ~0) {
+ if (delay > timeout) {
+ dev_warn(&dev->dev, "not ready %dms after FLR; giving up\n",
+ 100 + delay - 1);
+ return;
+ }
+
+ if (delay > 1000)
+ dev_info(&dev->dev, "not ready %dms after FLR; waiting\n",
+ 100 + delay - 1);
+
+ msleep(delay);
+ delay *= 2;
+ pci_read_config_dword(dev, PCI_COMMAND, &id);
+ }
+
+ if (delay > 1000)
+ dev_info(&dev->dev, "ready %dms after FLR\n", 100 + delay - 1);
}
static int pcie_flr(struct pci_dev *dev, int probe)
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index df63b7d..b40a074 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -368,18 +368,6 @@
writel(value, padcfg0);
}
-static void intel_gpio_set_gpio_mode(void __iomem *padcfg0)
-{
- u32 value;
-
- /* Put the pad into GPIO mode */
- value = readl(padcfg0) & ~PADCFG0_PMODE_MASK;
- /* Disable SCI/SMI/NMI generation */
- value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
- value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
- writel(value, padcfg0);
-}
-
static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
unsigned pin)
@@ -387,6 +375,7 @@
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
void __iomem *padcfg0;
unsigned long flags;
+ u32 value;
raw_spin_lock_irqsave(&pctrl->lock, flags);
@@ -396,7 +385,13 @@
}
padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
- intel_gpio_set_gpio_mode(padcfg0);
+ /* Put the pad into GPIO mode */
+ value = readl(padcfg0) & ~PADCFG0_PMODE_MASK;
+ /* Disable SCI/SMI/NMI generation */
+ value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
+ value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
+ writel(value, padcfg0);
+
/* Disable TX buffer and enable RX (this will be input) */
__intel_gpio_set_direction(padcfg0, true);
@@ -775,8 +770,6 @@
raw_spin_lock_irqsave(&pctrl->lock, flags);
- intel_gpio_set_gpio_mode(reg);
-
value = readl(reg);
value &= ~(PADCFG0_RXEVCFG_MASK | PADCFG0_RXINV);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
index 953469a..c2f7aae 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
@@ -89,6 +89,8 @@
__stringify(DEL_L2TP_VLAN_MAPPING),
__stringify(IPA_PER_CLIENT_STATS_CONNECT_EVENT),
__stringify(IPA_PER_CLIENT_STATS_DISCONNECT_EVENT),
+ __stringify(ADD_BRIDGE_VLAN_MAPPING),
+ __stringify(DEL_BRIDGE_VLAN_MAPPING),
};
const char *ipa_hdr_l2_type_name[] = {
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
index 3241257..61c3a71 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
@@ -1249,8 +1249,9 @@
struct ipa_hdr_offset_entry *off_next;
struct ipa_hdr_proc_ctx_offset_entry *ctx_off_entry;
struct ipa_hdr_proc_ctx_offset_entry *ctx_off_next;
- int i, end = 0;
- bool user_rule = false;
+ struct ipa_hdr_tbl *htbl = &ipa_ctx->hdr_tbl;
+ struct ipa_hdr_proc_ctx_tbl *htbl_proc = &ipa_ctx->hdr_proc_ctx_tbl;
+ int i;
/*
* issue a reset on the routing module since routing rules point to
@@ -1288,9 +1289,6 @@
return -EFAULT;
}
- if (entry->ipacm_installed)
- user_rule = true;
-
if (!user_only || entry->ipacm_installed) {
if (entry->is_hdr_proc_ctx) {
dma_unmap_single(ipa_ctx->pdev,
@@ -1298,9 +1296,15 @@
entry->hdr_len,
DMA_TO_DEVICE);
entry->proc_ctx = NULL;
+ } else {
+ /* move the offset entry to free list */
+ entry->offset_entry->ipacm_installed = 0;
+ list_move(&entry->offset_entry->link,
+ &htbl->head_free_offset_list[
+ entry->offset_entry->bin]);
}
list_del(&entry->link);
- ipa_ctx->hdr_tbl.hdr_cnt--;
+ htbl->hdr_cnt--;
entry->ref_cnt = 0;
entry->cookie = 0;
@@ -1309,53 +1313,37 @@
kmem_cache_free(ipa_ctx->hdr_cache, entry);
}
}
- for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
- list_for_each_entry_safe(off_entry, off_next,
- &ipa_ctx->hdr_tbl.head_offset_list[i],
- link) {
- /*
- * do not remove the default exception header which is
- * at offset 0
- */
- if (off_entry->offset == 0)
- continue;
-
- if (!user_only ||
- off_entry->ipacm_installed) {
+ /* only clean up offset_list and free_offset_list on global reset */
+ if (!user_only) {
+ for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
+ list_for_each_entry_safe(off_entry, off_next,
+ &ipa_ctx->hdr_tbl.head_offset_list[i],
+ link) {
+ /**
+ * do not remove the default exception
+ * header which is at offset 0
+ */
+ if (off_entry->offset == 0)
+ continue;
list_del(&off_entry->link);
kmem_cache_free(ipa_ctx->hdr_offset_cache,
off_entry);
- } else {
- if (off_entry->offset +
- ipa_hdr_bin_sz[off_entry->bin] > end) {
- end = off_entry->offset +
- ipa_hdr_bin_sz[off_entry->bin];
- IPADBG("replace end = %d\n", end);
- }
}
- }
- list_for_each_entry_safe(off_entry, off_next,
+ list_for_each_entry_safe(off_entry, off_next,
&ipa_ctx->hdr_tbl.head_free_offset_list[i],
link) {
-
- if (!user_only ||
- off_entry->ipacm_installed) {
list_del(&off_entry->link);
kmem_cache_free(ipa_ctx->hdr_offset_cache,
off_entry);
}
}
+ /* there is one header of size 8 */
+ ipa_ctx->hdr_tbl.end = 8;
+ ipa_ctx->hdr_tbl.hdr_cnt = 1;
}
- IPADBG("hdr_tbl.end = %d\n", end);
- if (user_rule) {
- ipa_ctx->hdr_tbl.end = end;
- IPADBG("hdr_tbl.end = %d\n", end);
- }
IPADBG("reset hdr proc ctx\n");
- user_rule = false;
- end = 0;
list_for_each_entry_safe(
ctx_entry,
ctx_next,
@@ -1364,17 +1352,18 @@
if (ipa_id_find(ctx_entry->id) == NULL) {
mutex_unlock(&ipa_ctx->lock);
- WARN_ON(1);
+ WARN_ON_RATELIMIT_IPA(1);
return -EFAULT;
}
- if (entry->ipacm_installed)
- user_rule = true;
-
if (!user_only ||
ctx_entry->ipacm_installed) {
+ /* move the offset entry to appropriate free list */
+ list_move(&ctx_entry->offset_entry->link,
+ &htbl_proc->head_free_offset_list[
+ ctx_entry->offset_entry->bin]);
list_del(&ctx_entry->link);
- ipa_ctx->hdr_proc_ctx_tbl.proc_ctx_cnt--;
+ htbl_proc->proc_ctx_cnt--;
ctx_entry->ref_cnt = 0;
ctx_entry->cookie = 0;
@@ -1384,48 +1373,30 @@
ctx_entry);
}
}
- for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
- list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
+ /* only clean up offset_list and free_offset_list on global reset */
+ if (!user_only) {
+ for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
+ list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
&ipa_ctx->hdr_proc_ctx_tbl.head_offset_list[i],
link) {
-
- if (!user_only ||
- ctx_off_entry->ipacm_installed) {
list_del(&ctx_off_entry->link);
kmem_cache_free(
ipa_ctx->hdr_proc_ctx_offset_cache,
ctx_off_entry);
- } else {
- if (ctx_off_entry->offset +
- ipa_hdr_bin_sz[ctx_off_entry->bin]
- > end) {
- end = ctx_off_entry->offset +
- ipa_hdr_bin_sz[ctx_off_entry->bin];
- IPADBG("replace hdr_proc as %d\n", end);
- }
}
- }
- list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
- &ipa_ctx->hdr_proc_ctx_tbl.head_free_offset_list[i],
- link) {
-
- if (!user_only ||
- ctx_off_entry->ipacm_installed) {
+ list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
+ &ipa_ctx->hdr_proc_ctx_tbl.
+ head_free_offset_list[i], link) {
list_del(&ctx_off_entry->link);
kmem_cache_free(
ipa_ctx->hdr_proc_ctx_offset_cache,
ctx_off_entry);
}
}
+ ipa_ctx->hdr_proc_ctx_tbl.end = 0;
+ ipa_ctx->hdr_proc_ctx_tbl.proc_ctx_cnt = 0;
}
- IPADBG("hdr_proc_tbl.end = %d\n", end);
- if (user_rule) {
- ipa_ctx->hdr_proc_ctx_tbl.end = end;
- IPADBG("hdr_proc_tbl.end = %d\n", end);
- }
- mutex_unlock(&ipa_ctx->lock);
-
/* commit the change to IPA-HW */
if (ipa_ctx->ctrl->ipa_commit_hdr()) {
IPAERR_RL("fail to commit hdr\n");
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index d64f89b..82c8709 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -499,10 +499,15 @@
return;
}
- if (type != ADD_VLAN_IFACE &&
- type != DEL_VLAN_IFACE &&
- type != ADD_L2TP_VLAN_MAPPING &&
- type != DEL_L2TP_VLAN_MAPPING) {
+ switch (type) {
+ case ADD_VLAN_IFACE:
+ case DEL_VLAN_IFACE:
+ case ADD_L2TP_VLAN_MAPPING:
+ case DEL_L2TP_VLAN_MAPPING:
+ case ADD_BRIDGE_VLAN_MAPPING:
+ case DEL_BRIDGE_VLAN_MAPPING:
+ break;
+ default:
IPAERR("Wrong type given. buff %pK type %d\n", buff, type);
return;
}
@@ -515,10 +520,17 @@
int retval;
struct ipa_ioc_vlan_iface_info *vlan_info;
struct ipa_ioc_l2tp_vlan_mapping_info *mapping_info;
+ struct ipa_ioc_bridge_vlan_mapping_info *bridge_vlan_info;
struct ipa_msg_meta msg_meta;
+ void *buff;
- if (msg_type == ADD_VLAN_IFACE ||
- msg_type == DEL_VLAN_IFACE) {
+ IPADBG("type %d\n", msg_type);
+
+ memset(&msg_meta, 0, sizeof(msg_meta));
+ msg_meta.msg_type = msg_type;
+
+ if ((msg_type == ADD_VLAN_IFACE) ||
+ (msg_type == DEL_VLAN_IFACE)) {
vlan_info = kzalloc(sizeof(struct ipa_ioc_vlan_iface_info),
GFP_KERNEL);
if (!vlan_info) {
@@ -532,18 +544,10 @@
return -EFAULT;
}
- memset(&msg_meta, 0, sizeof(msg_meta));
- msg_meta.msg_type = msg_type;
msg_meta.msg_len = sizeof(struct ipa_ioc_vlan_iface_info);
- retval = ipa3_send_msg(&msg_meta, vlan_info,
- ipa3_vlan_l2tp_msg_free_cb);
- if (retval) {
- IPAERR("ipa3_send_msg failed: %d\n", retval);
- kfree(vlan_info);
- return retval;
- }
- } else if (msg_type == ADD_L2TP_VLAN_MAPPING ||
- msg_type == DEL_L2TP_VLAN_MAPPING) {
+ buff = vlan_info;
+ } else if ((msg_type == ADD_L2TP_VLAN_MAPPING) ||
+ (msg_type == DEL_L2TP_VLAN_MAPPING)) {
mapping_info = kzalloc(sizeof(struct
ipa_ioc_l2tp_vlan_mapping_info), GFP_KERNEL);
if (!mapping_info) {
@@ -558,22 +562,46 @@
return -EFAULT;
}
- memset(&msg_meta, 0, sizeof(msg_meta));
- msg_meta.msg_type = msg_type;
msg_meta.msg_len = sizeof(struct
ipa_ioc_l2tp_vlan_mapping_info);
- retval = ipa3_send_msg(&msg_meta, mapping_info,
- ipa3_vlan_l2tp_msg_free_cb);
- if (retval) {
- IPAERR("ipa3_send_msg failed: %d\n", retval);
- kfree(mapping_info);
- return retval;
+ buff = mapping_info;
+ } else if ((msg_type == ADD_BRIDGE_VLAN_MAPPING) ||
+ (msg_type == DEL_BRIDGE_VLAN_MAPPING)) {
+ bridge_vlan_info = kzalloc(
+ sizeof(struct ipa_ioc_bridge_vlan_mapping_info),
+ GFP_KERNEL);
+ if (!bridge_vlan_info) {
+ IPAERR("no memory\n");
+ return -ENOMEM;
}
+
+ if (copy_from_user((u8 *)bridge_vlan_info,
+ (void __user *)usr_param,
+ sizeof(struct ipa_ioc_bridge_vlan_mapping_info))) {
+ kfree(bridge_vlan_info);
+ IPAERR("copy from user failed\n");
+ return -EFAULT;
+ }
+
+ msg_meta.msg_len = sizeof(struct
+ ipa_ioc_bridge_vlan_mapping_info);
+ buff = bridge_vlan_info;
} else {
IPAERR("Unexpected event\n");
return -EFAULT;
}
+ retval = ipa3_send_msg(&msg_meta, buff,
+ ipa3_vlan_l2tp_msg_free_cb);
+ if (retval) {
+ IPAERR("ipa3_send_msg failed: %d, msg_type %d\n",
+ retval,
+ msg_type);
+ kfree(buff);
+ return retval;
+ }
+ IPADBG("exit\n");
+
return 0;
}
@@ -1726,7 +1754,18 @@
break;
}
break;
-
+ case IPA_IOC_ADD_BRIDGE_VLAN_MAPPING:
+ if (ipa3_send_vlan_l2tp_msg(arg, ADD_BRIDGE_VLAN_MAPPING)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ case IPA_IOC_DEL_BRIDGE_VLAN_MAPPING:
+ if (ipa3_send_vlan_l2tp_msg(arg, DEL_BRIDGE_VLAN_MAPPING)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
case IPA_IOC_ADD_L2TP_VLAN_MAPPING:
if (ipa3_send_vlan_l2tp_msg(arg, ADD_L2TP_VLAN_MAPPING)) {
retval = -EFAULT;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 4751c75..e10383c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -73,6 +73,8 @@
__stringify(DEL_L2TP_VLAN_MAPPING),
__stringify(IPA_PER_CLIENT_STATS_CONNECT_EVENT),
__stringify(IPA_PER_CLIENT_STATS_DISCONNECT_EVENT),
+ __stringify(ADD_BRIDGE_VLAN_MAPPING),
+ __stringify(DEL_BRIDGE_VLAN_MAPPING),
};
const char *ipa3_hdr_l2_type_name[] = {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
index 4196539..4eb8edb 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -996,8 +996,9 @@
struct ipa_hdr_offset_entry *off_next;
struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_entry;
struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_next;
- int i, end = 0;
- bool user_rule = false;
+ struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
+ struct ipa3_hdr_proc_ctx_tbl *htbl_proc = &ipa3_ctx->hdr_proc_ctx_tbl;
+ int i;
/*
* issue a reset on the routing module since routing rules point to
@@ -1035,9 +1036,6 @@
return -EFAULT;
}
- if (entry->ipacm_installed)
- user_rule = true;
-
if (!user_only || entry->ipacm_installed) {
if (entry->is_hdr_proc_ctx) {
dma_unmap_single(ipa3_ctx->pdev,
@@ -1045,9 +1043,15 @@
entry->hdr_len,
DMA_TO_DEVICE);
entry->proc_ctx = NULL;
+ } else {
+ /* move the offset entry to free list */
+ entry->offset_entry->ipacm_installed = 0;
+ list_move(&entry->offset_entry->link,
+ &htbl->head_free_offset_list[
+ entry->offset_entry->bin]);
}
list_del(&entry->link);
- ipa3_ctx->hdr_tbl.hdr_cnt--;
+ htbl->hdr_cnt--;
entry->ref_cnt = 0;
entry->cookie = 0;
@@ -1056,53 +1060,37 @@
kmem_cache_free(ipa3_ctx->hdr_cache, entry);
}
}
- for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
- list_for_each_entry_safe(off_entry, off_next,
+
+ /* only clean up offset_list and free_offset_list on global reset */
+ if (!user_only) {
+ for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
+ list_for_each_entry_safe(off_entry, off_next,
&ipa3_ctx->hdr_tbl.head_offset_list[i],
link) {
-
- /*
- * do not remove the default exception header which is
- * at offset 0
- */
- if (off_entry->offset == 0)
- continue;
-
- if (!user_only ||
- off_entry->ipacm_installed) {
+ /**
+ * do not remove the default exception
+ * header which is at offset 0
+ */
+ if (off_entry->offset == 0)
+ continue;
list_del(&off_entry->link);
kmem_cache_free(ipa3_ctx->hdr_offset_cache,
off_entry);
- } else {
- if (off_entry->offset +
- ipa_hdr_bin_sz[off_entry->bin] > end) {
- end = off_entry->offset +
- ipa_hdr_bin_sz[off_entry->bin];
- IPADBG("replace end = %d\n", end);
- }
}
- }
- list_for_each_entry_safe(off_entry, off_next,
+ list_for_each_entry_safe(off_entry, off_next,
&ipa3_ctx->hdr_tbl.head_free_offset_list[i],
link) {
-
- if (!user_only ||
- off_entry->ipacm_installed) {
list_del(&off_entry->link);
kmem_cache_free(ipa3_ctx->hdr_offset_cache,
off_entry);
}
}
+ /* there is one header of size 8 */
+ ipa3_ctx->hdr_tbl.end = 8;
+ ipa3_ctx->hdr_tbl.hdr_cnt = 1;
}
- IPADBG("hdr_tbl.end = %d\n", end);
- if (user_rule) {
- ipa3_ctx->hdr_tbl.end = end;
- IPADBG("hdr_tbl.end = %d\n", end);
- }
IPADBG("reset hdr proc ctx\n");
- user_rule = false;
- end = 0;
list_for_each_entry_safe(
ctx_entry,
ctx_next,
@@ -1115,13 +1103,14 @@
return -EFAULT;
}
- if (entry->ipacm_installed)
- user_rule = true;
-
if (!user_only ||
ctx_entry->ipacm_installed) {
+ /* move the offset entry to appropriate free list */
+ list_move(&ctx_entry->offset_entry->link,
+ &htbl_proc->head_free_offset_list[
+ ctx_entry->offset_entry->bin]);
list_del(&ctx_entry->link);
- ipa3_ctx->hdr_proc_ctx_tbl.proc_ctx_cnt--;
+ htbl_proc->proc_ctx_cnt--;
ctx_entry->ref_cnt = 0;
ctx_entry->cookie = 0;
@@ -1131,45 +1120,28 @@
ctx_entry);
}
}
- for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
- list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
+ /* only clean up offset_list and free_offset_list on global reset */
+ if (!user_only) {
+ for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
+ list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
&ipa3_ctx->hdr_proc_ctx_tbl.head_offset_list[i],
link) {
-
- if (!user_only ||
- ctx_off_entry->ipacm_installed) {
list_del(&ctx_off_entry->link);
kmem_cache_free(
ipa3_ctx->hdr_proc_ctx_offset_cache,
ctx_off_entry);
- } else {
- if (ctx_off_entry->offset +
- ipa_hdr_bin_sz[ctx_off_entry->bin]
- > end) {
- end = ctx_off_entry->offset +
- ipa_hdr_bin_sz[ctx_off_entry->bin];
- IPADBG("replace hdr_proc as %d\n", end);
- }
}
- }
- list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
- &ipa3_ctx->hdr_proc_ctx_tbl.head_free_offset_list[i],
- link) {
-
- if (!user_only ||
- ctx_off_entry->ipacm_installed) {
+ list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
+ &ipa3_ctx->hdr_proc_ctx_tbl.
+ head_free_offset_list[i], link) {
list_del(&ctx_off_entry->link);
kmem_cache_free(
ipa3_ctx->hdr_proc_ctx_offset_cache,
ctx_off_entry);
}
}
- }
-
- IPADBG("hdr_proc_tbl.end = %d\n", end);
- if (user_rule) {
- ipa3_ctx->hdr_proc_ctx_tbl.end = end;
- IPADBG("hdr_proc_tbl.end = %d\n", end);
+ ipa3_ctx->hdr_proc_ctx_tbl.end = 0;
+ ipa3_ctx->hdr_proc_ctx_tbl.proc_ctx_cnt = 0;
}
/* commit the change to IPA-HW */
diff --git a/drivers/platform/x86/asus-wireless.c b/drivers/platform/x86/asus-wireless.c
index 1871602..c098328 100644
--- a/drivers/platform/x86/asus-wireless.c
+++ b/drivers/platform/x86/asus-wireless.c
@@ -145,8 +145,10 @@
{
struct asus_wireless_data *data = acpi_driver_data(adev);
- if (data->wq)
+ if (data->wq) {
+ devm_led_classdev_unregister(&adev->dev, &data->led);
destroy_workqueue(data->wq);
+ }
return 0;
}
diff --git a/drivers/power/supply/bq2415x_charger.c b/drivers/power/supply/bq2415x_charger.c
index 73e2f0b..c4770a9 100644
--- a/drivers/power/supply/bq2415x_charger.c
+++ b/drivers/power/supply/bq2415x_charger.c
@@ -1569,6 +1569,11 @@
acpi_id =
acpi_match_device(client->dev.driver->acpi_match_table,
&client->dev);
+ if (!acpi_id) {
+ dev_err(&client->dev, "failed to match device name\n");
+ ret = -ENODEV;
+ goto error_1;
+ }
name = kasprintf(GFP_KERNEL, "%s-%d", acpi_id->id, num);
}
if (!name) {
diff --git a/drivers/power/supply/qcom/qg-core.h b/drivers/power/supply/qcom/qg-core.h
index a1aeac2..b2c8b89 100644
--- a/drivers/power/supply/qcom/qg-core.h
+++ b/drivers/power/supply/qcom/qg-core.h
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include "fg-alg.h"
+#include "qg-defs.h"
struct qg_batt_props {
const char *batt_type_str;
@@ -50,10 +51,24 @@
int rbat_conn_mohm;
int ignore_shutdown_soc_secs;
int cold_temp_threshold;
+ int esr_qual_i_ua;
+ int esr_qual_v_uv;
+ int esr_disable_soc;
bool hold_soc_while_full;
bool linearize_soc;
bool cl_disable;
bool cl_feedback_on;
+ bool esr_disable;
+ bool esr_discharge_enable;
+};
+
+struct qg_esr_data {
+ u32 pre_esr_v;
+ u32 pre_esr_i;
+ u32 post_esr_v;
+ u32 post_esr_i;
+ u32 esr;
+ bool valid;
};
struct qpnp_qg {
@@ -87,6 +102,7 @@
struct power_supply *batt_psy;
struct power_supply *usb_psy;
struct power_supply *parallel_psy;
+ struct qg_esr_data esr_data[QG_MAX_ESR_COUNT];
/* status variable */
u32 *debug_mask;
@@ -103,9 +119,12 @@
int charge_status;
int charge_type;
int next_wakeup_ms;
+ u32 fifo_done_count;
u32 wa_flags;
u32 seq_no;
u32 charge_counter_uah;
+ u32 esr_avg;
+ u32 esr_last;
ktime_t last_user_update_time;
ktime_t last_fifo_update_time;
@@ -147,6 +166,7 @@
QG_DEBUG_BUS_READ = BIT(8),
QG_DEBUG_BUS_WRITE = BIT(9),
QG_DEBUG_ALG_CL = BIT(10),
+ QG_DEBUG_ESR = BIT(11),
};
enum qg_irq {
diff --git a/drivers/power/supply/qcom/qg-defs.h b/drivers/power/supply/qcom/qg-defs.h
index 2061208..02a193f 100644
--- a/drivers/power/supply/qcom/qg-defs.h
+++ b/drivers/power/supply/qcom/qg-defs.h
@@ -44,6 +44,9 @@
#define UV_TO_DECIUV(a) (a / 100)
#define DECIUV_TO_UV(a) (a * 100)
+#define QG_MAX_ESR_COUNT 10
+#define QG_MIN_ESR_COUNT 2
+
#define CAP(min, max, value) \
((min > value) ? min : ((value > max) ? max : value))
diff --git a/drivers/power/supply/qcom/qg-reg.h b/drivers/power/supply/qcom/qg-reg.h
index 66f9be1..d586a72 100644
--- a/drivers/power/supply/qcom/qg-reg.h
+++ b/drivers/power/supply/qcom/qg-reg.h
@@ -18,6 +18,7 @@
#define QG_STATUS1_REG 0x08
#define BATTERY_PRESENT_BIT BIT(0)
+#define ESR_MEAS_DONE_BIT BIT(4)
#define QG_STATUS2_REG 0x09
#define GOOD_OCV_BIT BIT(1)
@@ -25,6 +26,9 @@
#define QG_STATUS3_REG 0x0A
#define COUNT_FIFO_RT_MASK GENMASK(3, 0)
+#define QG_STATUS4_REG 0x0B
+#define ESR_MEAS_IN_PROGRESS_BIT BIT(4)
+
#define QG_INT_RT_STS_REG 0x10
#define FIFO_UPDATE_DONE_RT_STS_BIT BIT(3)
#define VBAT_LOW_INT_RT_STS_BIT BIT(1)
@@ -60,11 +64,19 @@
#define QG_S3_ENTRY_IBAT_THRESHOLD_REG 0x5E
#define QG_S3_EXIT_IBAT_THRESHOLD_REG 0x5F
+#define QG_ESR_MEAS_TRIG_REG 0x68
+#define HW_ESR_MEAS_START_BIT BIT(0)
+
#define QG_S7_PON_OCV_V_DATA0_REG 0x70
#define QG_S7_PON_OCV_I_DATA0_REG 0x72
#define QG_S3_GOOD_OCV_V_DATA0_REG 0x74
#define QG_S3_GOOD_OCV_I_DATA0_REG 0x76
+#define QG_PRE_ESR_V_DATA0_REG 0x78
+#define QG_PRE_ESR_I_DATA0_REG 0x7A
+#define QG_POST_ESR_V_DATA0_REG 0x7C
+#define QG_POST_ESR_I_DATA0_REG 0x7E
+
#define QG_V_ACCUM_DATA0_RT_REG 0x88
#define QG_I_ACCUM_DATA0_RT_REG 0x8B
#define QG_ACCUM_CNT_RT_REG 0x8E
@@ -80,15 +92,19 @@
#define QG_LAST_S3_SLEEP_V_DATA0_REG 0xCC
/* SDAM offsets */
-#define QG_SDAM_VALID_OFFSET 0x46
-#define QG_SDAM_SOC_OFFSET 0x47
-#define QG_SDAM_TEMP_OFFSET 0x48
-#define QG_SDAM_RBAT_OFFSET 0x4A
-#define QG_SDAM_OCV_OFFSET 0x4C
-#define QG_SDAM_IBAT_OFFSET 0x50
-#define QG_SDAM_TIME_OFFSET 0x54
-#define QG_SDAM_CYCLE_COUNT_OFFSET 0x58
-#define QG_SDAM_LEARNED_CAPACITY_OFFSET 0x68
-#define QG_SDAM_PON_OCV_OFFSET 0x7C
+#define QG_SDAM_VALID_OFFSET 0x46 /* 1-byte 0x46 */
+#define QG_SDAM_SOC_OFFSET 0x47 /* 1-byte 0x47 */
+#define QG_SDAM_TEMP_OFFSET 0x48 /* 2-byte 0x48-0x49 */
+#define QG_SDAM_RBAT_OFFSET 0x4A /* 2-byte 0x4A-0x4B */
+#define QG_SDAM_OCV_OFFSET 0x4C /* 4-byte 0x4C-0x4F */
+#define QG_SDAM_IBAT_OFFSET 0x50 /* 4-byte 0x50-0x53 */
+#define QG_SDAM_TIME_OFFSET 0x54 /* 4-byte 0x54-0x57 */
+#define QG_SDAM_CYCLE_COUNT_OFFSET 0x58 /* 16-byte 0x58-0x67 */
+#define QG_SDAM_LEARNED_CAPACITY_OFFSET 0x68 /* 2-byte 0x68-0x69 */
+#define QG_SDAM_ESR_CHARGE_DELTA_OFFSET 0x6A /* 4-byte 0x6A-0x6D */
+#define QG_SDAM_ESR_DISCHARGE_DELTA_OFFSET 0x6E /* 4-byte 0x6E-0x71 */
+#define QG_SDAM_ESR_CHARGE_SF_OFFSET 0x72 /* 2-byte 0x72-0x73 */
+#define QG_SDAM_ESR_DISCHARGE_SF_OFFSET 0x74 /* 2-byte 0x74-0x75 */
+#define QG_SDAM_PON_OCV_OFFSET 0x7C /* 2-byte 0x7C-0x7D */
#endif
diff --git a/drivers/power/supply/qcom/qg-sdam.c b/drivers/power/supply/qcom/qg-sdam.c
index 7bc4afa..e7ffcb5 100644
--- a/drivers/power/supply/qcom/qg-sdam.c
+++ b/drivers/power/supply/qcom/qg-sdam.c
@@ -68,6 +68,26 @@
.offset = QG_SDAM_PON_OCV_OFFSET,
.length = 2,
},
+ [SDAM_ESR_CHARGE_DELTA] = {
+ .name = "SDAM_ESR_CHARGE_DELTA",
+ .offset = QG_SDAM_ESR_CHARGE_DELTA_OFFSET,
+ .length = 4,
+ },
+ [SDAM_ESR_DISCHARGE_DELTA] = {
+ .name = "SDAM_ESR_DISCHARGE_DELTA",
+ .offset = QG_SDAM_ESR_DISCHARGE_DELTA_OFFSET,
+ .length = 4,
+ },
+ [SDAM_ESR_CHARGE_SF] = {
+ .name = "SDAM_ESR_CHARGE_SF_OFFSET",
+ .offset = QG_SDAM_ESR_CHARGE_SF_OFFSET,
+ .length = 2,
+ },
+ [SDAM_ESR_DISCHARGE_SF] = {
+ .name = "SDAM_ESR_DISCHARGE_SF_OFFSET",
+ .offset = QG_SDAM_ESR_DISCHARGE_SF_OFFSET,
+ .length = 2,
+ },
};
int qg_sdam_write(u8 param, u32 data)
diff --git a/drivers/power/supply/qcom/qg-sdam.h b/drivers/power/supply/qcom/qg-sdam.h
index 10e684f..45218a8 100644
--- a/drivers/power/supply/qcom/qg-sdam.h
+++ b/drivers/power/supply/qcom/qg-sdam.h
@@ -24,6 +24,10 @@
SDAM_IBAT_UA,
SDAM_TIME_SEC,
SDAM_PON_OCV_UV,
+ SDAM_ESR_CHARGE_DELTA,
+ SDAM_ESR_DISCHARGE_DELTA,
+ SDAM_ESR_CHARGE_SF,
+ SDAM_ESR_DISCHARGE_SF,
SDAM_MAX,
};
diff --git a/drivers/power/supply/qcom/qg-util.c b/drivers/power/supply/qcom/qg-util.c
index d354799..824d914 100644
--- a/drivers/power/supply/qcom/qg-util.c
+++ b/drivers/power/supply/qcom/qg-util.c
@@ -111,6 +111,22 @@
return rc;
}
+int qg_read_raw_data(struct qpnp_qg *chip, int addr, u32 *data)
+{
+ int rc;
+ u8 reg[2] = {0};
+
+ rc = qg_read(chip, chip->qg_base + addr, ®[0], 2);
+ if (rc < 0) {
+ pr_err("Failed to read QG addr %d rc=%d\n", addr, rc);
+ return rc;
+ }
+
+ *data = reg[0] | (reg[1] << 8);
+
+ return rc;
+}
+
int get_fifo_length(struct qpnp_qg *chip, u32 *fifo_length, bool rt)
{
int rc;
diff --git a/drivers/power/supply/qcom/qg-util.h b/drivers/power/supply/qcom/qg-util.h
index 385c9e0..bb17afb 100644
--- a/drivers/power/supply/qcom/qg-util.h
+++ b/drivers/power/supply/qcom/qg-util.h
@@ -15,6 +15,7 @@
int qg_read(struct qpnp_qg *chip, u32 addr, u8 *val, int len);
int qg_write(struct qpnp_qg *chip, u32 addr, u8 *val, int len);
int qg_masked_write(struct qpnp_qg *chip, int addr, u32 mask, u32 val);
+int qg_read_raw_data(struct qpnp_qg *chip, int addr, u32 *data);
int get_fifo_length(struct qpnp_qg *chip, u32 *fifo_length, bool rt);
int get_sample_count(struct qpnp_qg *chip, u32 *sample_count);
int get_sample_interval(struct qpnp_qg *chip, u32 *sample_interval);
diff --git a/drivers/power/supply/qcom/qpnp-qg.c b/drivers/power/supply/qcom/qpnp-qg.c
index 5a0682c..0ee5c30 100644
--- a/drivers/power/supply/qcom/qpnp-qg.c
+++ b/drivers/power/supply/qcom/qpnp-qg.c
@@ -43,6 +43,16 @@
debug_mask, qg_debug_mask, int, 0600
);
+static int qg_esr_mod_count = 10;
+module_param_named(
+ esr_mod_count, qg_esr_mod_count, int, 0600
+);
+
+static int qg_esr_count = 5;
+module_param_named(
+ esr_count, qg_esr_count, int, 0600
+);
+
static bool is_battery_present(struct qpnp_qg *chip)
{
u8 reg = 0;
@@ -228,7 +238,7 @@
return true;
}
-static int qg_update_sdam_params(struct qpnp_qg *chip)
+static int qg_store_soc_params(struct qpnp_qg *chip)
{
int rc, batt_temp = 0, i;
unsigned long rtc_sec = 0;
@@ -245,13 +255,11 @@
else
chip->sdam_data[SDAM_TEMP] = (u32)batt_temp;
- rc = qg_sdam_write_all(chip->sdam_data);
- if (rc < 0)
- pr_err("Failed to write to SDAM rc=%d\n", rc);
-
- for (i = 0; i < SDAM_MAX; i++)
+ for (i = 0; i <= SDAM_TIME_SEC; i++) {
+ rc |= qg_sdam_write(i, chip->sdam_data[i]);
qg_dbg(chip, QG_DEBUG_STATUS, "SDAM write param %d value=%d\n",
i, chip->sdam_data[i]);
+ }
return rc;
}
@@ -433,6 +441,87 @@
return rc;
}
+#define MIN_FIFO_FULL_TIME_MS 12000
+static int process_rt_fifo_data(struct qpnp_qg *chip,
+ bool vbat_low, bool update_smb)
+{
+ int rc = 0;
+ ktime_t now = ktime_get();
+ s64 time_delta;
+
+ /*
+ * Reject the FIFO read event if there are back-to-back requests
+ * This is done to gaurantee that there is always a minimum FIFO
+ * data to be processed, ignore this if vbat_low is set.
+ */
+ time_delta = ktime_ms_delta(now, chip->last_user_update_time);
+
+ qg_dbg(chip, QG_DEBUG_FIFO, "time_delta=%lld ms vbat_low=%d\n",
+ time_delta, vbat_low);
+
+ if (time_delta > MIN_FIFO_FULL_TIME_MS || vbat_low || update_smb) {
+ rc = qg_master_hold(chip, true);
+ if (rc < 0) {
+ pr_err("Failed to hold master, rc=%d\n", rc);
+ goto done;
+ }
+
+ rc = qg_process_rt_fifo(chip);
+ if (rc < 0) {
+ pr_err("Failed to process FIFO real-time, rc=%d\n", rc);
+ goto done;
+ }
+
+ if (vbat_low) {
+ /* change FIFO length */
+ rc = qg_update_fifo_length(chip,
+ chip->dt.s2_vbat_low_fifo_length);
+ if (rc < 0)
+ goto done;
+
+ qg_dbg(chip, QG_DEBUG_STATUS,
+ "FIFO length updated to %d vbat_low=%d\n",
+ chip->dt.s2_vbat_low_fifo_length,
+ vbat_low);
+ }
+
+ if (update_smb) {
+ rc = qg_masked_write(chip, chip->qg_base +
+ QG_MODE_CTL1_REG, PARALLEL_IBAT_SENSE_EN_BIT,
+ chip->parallel_enabled ?
+ PARALLEL_IBAT_SENSE_EN_BIT : 0);
+ if (rc < 0) {
+ pr_err("Failed to update SMB_EN, rc=%d\n", rc);
+ goto done;
+ }
+ qg_dbg(chip, QG_DEBUG_STATUS, "Parallel SENSE %d\n",
+ chip->parallel_enabled);
+ }
+
+ rc = qg_master_hold(chip, false);
+ if (rc < 0) {
+ pr_err("Failed to release master, rc=%d\n", rc);
+ goto done;
+ }
+ /* FIFOs restarted */
+ chip->last_fifo_update_time = ktime_get();
+
+ /* signal the read thread */
+ chip->data_ready = true;
+ wake_up_interruptible(&chip->qg_wait_q);
+ chip->last_user_update_time = now;
+
+ /* vote to stay awake until userspace reads data */
+ vote(chip->awake_votable, FIFO_RT_DONE_VOTER, true, 0);
+ } else {
+ qg_dbg(chip, QG_DEBUG_FIFO, "FIFO processing too early time_delta=%lld\n",
+ time_delta);
+ }
+done:
+ qg_master_hold(chip, false);
+ return rc;
+}
+
#define VBAT_LOW_HYST_UV 50000 /* 50mV */
static int qg_vbat_low_wa(struct qpnp_qg *chip)
{
@@ -561,82 +650,353 @@
return rc;
}
-#define MIN_FIFO_FULL_TIME_MS 12000
-static int process_rt_fifo_data(struct qpnp_qg *chip,
- bool vbat_low, bool update_smb)
+static void qg_retrieve_esr_params(struct qpnp_qg *chip)
{
- int rc = 0;
- ktime_t now = ktime_get();
- s64 time_delta;
+ u32 data = 0;
+ int rc;
+
+ rc = qg_sdam_read(SDAM_ESR_CHARGE_DELTA, &data);
+ if (!rc && data) {
+ chip->kdata.param[QG_ESR_CHARGE_DELTA].data = data;
+ chip->kdata.param[QG_ESR_CHARGE_DELTA].valid = true;
+ qg_dbg(chip, QG_DEBUG_ESR,
+ "ESR_CHARGE_DELTA SDAM=%d\n", data);
+ } else if (rc < 0) {
+ pr_err("Failed to read ESR_CHARGE_DELTA rc=%d\n", rc);
+ }
+
+ rc = qg_sdam_read(SDAM_ESR_DISCHARGE_DELTA, &data);
+ if (!rc && data) {
+ chip->kdata.param[QG_ESR_DISCHARGE_DELTA].data = data;
+ chip->kdata.param[QG_ESR_DISCHARGE_DELTA].valid = true;
+ qg_dbg(chip, QG_DEBUG_ESR,
+ "ESR_DISCHARGE_DELTA SDAM=%d\n", data);
+ } else if (rc < 0) {
+ pr_err("Failed to read ESR_DISCHARGE_DELTA rc=%d\n", rc);
+ }
+
+ rc = qg_sdam_read(SDAM_ESR_CHARGE_SF, &data);
+ if (!rc && data) {
+ chip->kdata.param[QG_ESR_CHARGE_SF].data = data;
+ chip->kdata.param[QG_ESR_CHARGE_SF].valid = true;
+ qg_dbg(chip, QG_DEBUG_ESR,
+ "ESR_CHARGE_SF SDAM=%d\n", data);
+ } else if (rc < 0) {
+ pr_err("Failed to read ESR_CHARGE_SF rc=%d\n", rc);
+ }
+
+ rc = qg_sdam_read(SDAM_ESR_DISCHARGE_SF, &data);
+ if (!rc && data) {
+ chip->kdata.param[QG_ESR_DISCHARGE_SF].data = data;
+ chip->kdata.param[QG_ESR_DISCHARGE_SF].valid = true;
+ qg_dbg(chip, QG_DEBUG_ESR,
+ "ESR_DISCHARGE_SF SDAM=%d\n", data);
+ } else if (rc < 0) {
+ pr_err("Failed to read ESR_DISCHARGE_SF rc=%d\n", rc);
+ }
+}
+
+static void qg_store_esr_params(struct qpnp_qg *chip)
+{
+ unsigned int esr;
+
+ if (chip->udata.param[QG_ESR_CHARGE_DELTA].valid) {
+ esr = chip->udata.param[QG_ESR_CHARGE_DELTA].data;
+ qg_sdam_write(SDAM_ESR_CHARGE_DELTA, esr);
+ qg_dbg(chip, QG_DEBUG_ESR,
+ "SDAM store ESR_CHARGE_DELTA=%d\n", esr);
+ }
+
+ if (chip->udata.param[QG_ESR_DISCHARGE_DELTA].valid) {
+ esr = chip->udata.param[QG_ESR_DISCHARGE_DELTA].data;
+ qg_sdam_write(SDAM_ESR_DISCHARGE_DELTA, esr);
+ qg_dbg(chip, QG_DEBUG_ESR,
+ "SDAM store ESR_DISCHARGE_DELTA=%d\n", esr);
+ }
+
+ if (chip->udata.param[QG_ESR_CHARGE_SF].valid) {
+ esr = chip->udata.param[QG_ESR_CHARGE_SF].data;
+ qg_sdam_write(SDAM_ESR_CHARGE_SF, esr);
+ qg_dbg(chip, QG_DEBUG_ESR,
+ "SDAM store ESR_CHARGE_SF=%d\n", esr);
+ }
+
+ if (chip->udata.param[QG_ESR_DISCHARGE_SF].valid) {
+ esr = chip->udata.param[QG_ESR_DISCHARGE_SF].data;
+ qg_sdam_write(SDAM_ESR_DISCHARGE_SF, esr);
+ qg_dbg(chip, QG_DEBUG_ESR,
+ "SDAM store ESR_DISCHARGE_SF=%d\n", esr);
+ }
+}
+
+#define MAX_ESR_RETRY_COUNT 10
+#define ESR_SD_PERCENT 10
+static int qg_process_esr_data(struct qpnp_qg *chip)
+{
+ int i;
+ int pre_i, post_i, pre_v, post_v, first_pre_i = 0;
+ int diff_v, diff_i, esr_avg = 0, count = 0;
+
+ for (i = 0; i < qg_esr_count; i++) {
+ if (!chip->esr_data[i].valid)
+ continue;
+
+ pre_i = chip->esr_data[i].pre_esr_i;
+ pre_v = chip->esr_data[i].pre_esr_v;
+ post_i = chip->esr_data[i].post_esr_i;
+ post_v = chip->esr_data[i].post_esr_v;
+
+ /*
+ * Check if any of the pre/post readings have changed
+ * signs by comparing it with the first valid
+ * pre_i value.
+ */
+ if (!first_pre_i)
+ first_pre_i = pre_i;
+
+ if ((first_pre_i < 0 && pre_i > 0) ||
+ (first_pre_i > 0 && post_i < 0) ||
+ (first_pre_i < 0 && post_i > 0)) {
+ qg_dbg(chip, QG_DEBUG_ESR,
+ "ESR-sign mismatch %d reject all data\n", i);
+ esr_avg = count = 0;
+ break;
+ }
+
+ /* calculate ESR */
+ diff_v = abs(post_v - pre_v);
+ diff_i = abs(post_i - pre_i);
+
+ if (!diff_v || !diff_i ||
+ (diff_i < chip->dt.esr_qual_i_ua) ||
+ (diff_v < chip->dt.esr_qual_v_uv)) {
+ qg_dbg(chip, QG_DEBUG_ESR,
+ "ESR (%d) V/I %duA %duV fails qualification\n",
+ i, diff_i, diff_v);
+ chip->esr_data[i].valid = false;
+ continue;
+ }
+
+ chip->esr_data[i].esr =
+ DIV_ROUND_CLOSEST(diff_v * 1000, diff_i);
+ qg_dbg(chip, QG_DEBUG_ESR,
+ "ESR qualified: i=%d pre_i=%d pre_v=%d post_i=%d post_v=%d esr_diff_v=%d esr_diff_i=%d esr=%d\n",
+ i, pre_i, pre_v, post_i, post_v,
+ diff_v, diff_i, chip->esr_data[i].esr);
+
+ esr_avg += chip->esr_data[i].esr;
+ count++;
+ }
+
+ if (!count) {
+ qg_dbg(chip, QG_DEBUG_ESR,
+ "No ESR samples qualified, ESR not found\n");
+ chip->esr_avg = 0;
+ return 0;
+ }
+
+ esr_avg /= count;
+ qg_dbg(chip, QG_DEBUG_ESR,
+ "ESR all sample average=%d count=%d apply_SD=%d\n",
+ esr_avg, count, (esr_avg * ESR_SD_PERCENT) / 100);
/*
- * Reject the FIFO read event if there are back-to-back requests
- * This is done to gaurantee that there is always a minimum FIFO
- * data to be processed, ignore this if vbat_low is set.
+ * Reject ESR samples which do not fall in
+ * 10% the standard-deviation
*/
- time_delta = ktime_ms_delta(now, chip->last_user_update_time);
+ count = 0;
+ for (i = 0; i < qg_esr_count; i++) {
+ if (!chip->esr_data[i].valid)
+ continue;
- qg_dbg(chip, QG_DEBUG_FIFO, "time_delta=%lld ms vbat_low=%d\n",
- time_delta, vbat_low);
+ if ((abs(chip->esr_data[i].esr - esr_avg) <=
+ (esr_avg * ESR_SD_PERCENT) / 100)) {
+ /* valid ESR */
+ chip->esr_avg += chip->esr_data[i].esr;
+ count++;
+ qg_dbg(chip, QG_DEBUG_ESR,
+ "Valid ESR after SD (%d) %d mOhm\n",
+ i, chip->esr_data[i].esr);
+ } else {
+ qg_dbg(chip, QG_DEBUG_ESR,
+ "ESR (%d) %d falls-out of SD(%d)\n",
+ i, chip->esr_data[i].esr, ESR_SD_PERCENT);
+ }
+ }
- if (time_delta > MIN_FIFO_FULL_TIME_MS || vbat_low || update_smb) {
- rc = qg_master_hold(chip, true);
+ if (count >= QG_MIN_ESR_COUNT) {
+ chip->esr_avg /= count;
+ qg_dbg(chip, QG_DEBUG_ESR, "Average estimated ESR %d mOhm\n",
+ chip->esr_avg);
+ } else {
+ qg_dbg(chip, QG_DEBUG_ESR,
+ "Not enough ESR samples, ESR not found\n");
+ chip->esr_avg = 0;
+ }
+
+ return 0;
+}
+
+static int qg_esr_estimate(struct qpnp_qg *chip)
+{
+ int rc, i, ibat;
+ u8 esr_done_count, reg0 = 0, reg1 = 0;
+ bool is_charging = false;
+
+ if (chip->dt.esr_disable)
+ return 0;
+
+ /*
+ * Charge - enable ESR estimation only during fast-charging.
+ * Discharge - enable ESR estimation only if enabled via DT.
+ */
+ if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING &&
+ chip->charge_type != POWER_SUPPLY_CHARGE_TYPE_FAST) {
+ qg_dbg(chip, QG_DEBUG_ESR,
+ "Skip ESR, Not in fast-charge (CC)\n");
+ return 0;
+ }
+
+ if (chip->charge_status != POWER_SUPPLY_STATUS_CHARGING &&
+ !chip->dt.esr_discharge_enable)
+ return 0;
+
+ if (chip->batt_soc != INT_MIN && (chip->batt_soc <
+ chip->dt.esr_disable_soc)) {
+ qg_dbg(chip, QG_DEBUG_ESR,
+ "Skip ESR, batt-soc below %d\n",
+ chip->dt.esr_disable_soc);
+ return 0;
+ }
+
+ qg_dbg(chip, QG_DEBUG_ESR, "FIFO done count=%d ESR mod count=%d\n",
+ chip->fifo_done_count, qg_esr_mod_count);
+
+ if ((chip->fifo_done_count % qg_esr_mod_count) != 0)
+ return 0;
+
+ if (qg_esr_count > QG_MAX_ESR_COUNT)
+ qg_esr_count = QG_MAX_ESR_COUNT;
+
+ if (qg_esr_count < QG_MIN_ESR_COUNT)
+ qg_esr_count = QG_MIN_ESR_COUNT;
+
+ /* clear all data */
+ chip->esr_avg = 0;
+ memset(&chip->esr_data, 0, sizeof(chip->esr_data));
+
+ rc = qg_master_hold(chip, true);
+ if (rc < 0) {
+ pr_err("Failed to hold master, rc=%d\n", rc);
+ goto done;
+ }
+
+ for (i = 0; i < qg_esr_count; i++) {
+ /* Fire ESR measurement */
+ rc = qg_masked_write(chip,
+ chip->qg_base + QG_ESR_MEAS_TRIG_REG,
+ HW_ESR_MEAS_START_BIT, HW_ESR_MEAS_START_BIT);
if (rc < 0) {
- pr_err("Failed to hold master, rc=%d\n", rc);
- goto done;
+ pr_err("Failed to start ESR rc=%d\n", rc);
+ continue;
}
- rc = qg_process_rt_fifo(chip);
- if (rc < 0) {
- pr_err("Failed to process FIFO real-time, rc=%d\n", rc);
- goto done;
- }
+ esr_done_count = reg0 = reg1 = 0;
+ do {
+ /* delay for ESR processing to complete */
+ msleep(50);
- if (vbat_low) {
- /* change FIFO length */
- rc = qg_update_fifo_length(chip,
- chip->dt.s2_vbat_low_fifo_length);
+ esr_done_count++;
+
+ rc = qg_read(chip,
+ chip->qg_base + QG_STATUS1_REG, ®0, 1);
+ if (rc < 0)
+ continue;
+
+ rc = qg_read(chip,
+ chip->qg_base + QG_STATUS4_REG, ®1, 1);
+ if (rc < 0)
+ continue;
+
+ /* check ESR-done status */
+ if (!(reg1 & ESR_MEAS_IN_PROGRESS_BIT) &&
+ (reg0 & ESR_MEAS_DONE_BIT)) {
+ qg_dbg(chip, QG_DEBUG_ESR,
+ "ESR measurement done %d count %d\n",
+ i, esr_done_count);
+ break;
+ }
+ } while (esr_done_count < MAX_ESR_RETRY_COUNT);
+
+ if (esr_done_count == MAX_ESR_RETRY_COUNT) {
+ pr_err("Failed to get ESR done for %d iteration\n", i);
+ continue;
+ } else {
+ /* found a valid ESR, read pre-post data */
+ rc = qg_read_raw_data(chip, QG_PRE_ESR_V_DATA0_REG,
+ &chip->esr_data[i].pre_esr_v);
if (rc < 0)
goto done;
- qg_dbg(chip, QG_DEBUG_STATUS,
- "FIFO length updated to %d vbat_low=%d\n",
- chip->dt.s2_vbat_low_fifo_length,
- vbat_low);
- }
-
- if (update_smb) {
- rc = qg_masked_write(chip, chip->qg_base +
- QG_MODE_CTL1_REG, PARALLEL_IBAT_SENSE_EN_BIT,
- chip->parallel_enabled ?
- PARALLEL_IBAT_SENSE_EN_BIT : 0);
- if (rc < 0) {
- pr_err("Failed to update SMB_EN, rc=%d\n", rc);
+ rc = qg_read_raw_data(chip, QG_PRE_ESR_I_DATA0_REG,
+ &chip->esr_data[i].pre_esr_i);
+ if (rc < 0)
goto done;
- }
- qg_dbg(chip, QG_DEBUG_STATUS, "Parallel SENSE %d\n",
- chip->parallel_enabled);
+
+ rc = qg_read_raw_data(chip, QG_POST_ESR_V_DATA0_REG,
+ &chip->esr_data[i].post_esr_v);
+ if (rc < 0)
+ goto done;
+
+ rc = qg_read_raw_data(chip, QG_POST_ESR_I_DATA0_REG,
+ &chip->esr_data[i].post_esr_i);
+ if (rc < 0)
+ goto done;
+
+ chip->esr_data[i].pre_esr_v =
+ V_RAW_TO_UV(chip->esr_data[i].pre_esr_v);
+ ibat = sign_extend32(chip->esr_data[i].pre_esr_i, 15);
+ chip->esr_data[i].pre_esr_i = I_RAW_TO_UA(ibat);
+ chip->esr_data[i].post_esr_v =
+ V_RAW_TO_UV(chip->esr_data[i].post_esr_v);
+ ibat = sign_extend32(chip->esr_data[i].post_esr_i, 15);
+ chip->esr_data[i].post_esr_i = I_RAW_TO_UA(ibat);
+
+ chip->esr_data[i].valid = true;
+
+ if ((int)chip->esr_data[i].pre_esr_i < 0)
+ is_charging = true;
+
+ qg_dbg(chip, QG_DEBUG_ESR,
+ "ESR values for %d iteration pre_v=%d pre_i=%d post_v=%d post_i=%d\n",
+ i, chip->esr_data[i].pre_esr_v,
+ (int)chip->esr_data[i].pre_esr_i,
+ chip->esr_data[i].post_esr_v,
+ (int)chip->esr_data[i].post_esr_i);
}
-
- rc = qg_master_hold(chip, false);
- if (rc < 0) {
- pr_err("Failed to release master, rc=%d\n", rc);
- goto done;
- }
- /* FIFOs restarted */
- chip->last_fifo_update_time = ktime_get();
-
- /* signal the read thread */
- chip->data_ready = true;
- wake_up_interruptible(&chip->qg_wait_q);
- chip->last_user_update_time = now;
-
- /* vote to stay awake until userspace reads data */
- vote(chip->awake_votable, FIFO_RT_DONE_VOTER, true, 0);
- } else {
- qg_dbg(chip, QG_DEBUG_FIFO, "FIFO processing too early time_delta=%lld\n",
- time_delta);
+ /* delay before the next ESR measurement */
+ msleep(200);
}
+
+ rc = qg_process_esr_data(chip);
+ if (rc < 0)
+ pr_err("Failed to process ESR data rc=%d\n", rc);
+
+ rc = qg_master_hold(chip, false);
+ if (rc < 0) {
+ pr_err("Failed to release master, rc=%d\n", rc);
+ goto done;
+ }
+
+ if (chip->esr_avg) {
+ chip->kdata.param[QG_ESR].data = chip->esr_avg;
+ chip->kdata.param[QG_ESR].valid = true;
+ qg_dbg(chip, QG_DEBUG_ESR, "ESR_SW=%d during %s\n",
+ chip->esr_avg, is_charging ? "CHARGE" : "DISCHARGE");
+ qg_retrieve_esr_params(chip);
+ }
+
+ return 0;
done:
qg_master_hold(chip, false);
return rc;
@@ -669,7 +1029,7 @@
chip->udata.param[QG_RBAT_MOHM].data;
chip->sdam_data[SDAM_VALID] = 1;
- rc = qg_update_sdam_params(chip);
+ rc = qg_store_soc_params(chip);
if (rc < 0)
pr_err("Failed to update SDAM params, rc=%d\n", rc);
}
@@ -678,6 +1038,12 @@
chip->charge_counter_uah =
chip->udata.param[QG_CHARGE_COUNTER].data;
+ if (chip->udata.param[QG_ESR].valid)
+ chip->esr_last = chip->udata.param[QG_ESR].data;
+
+ if (!chip->dt.esr_disable)
+ qg_store_esr_params(chip);
+
vote(chip->awake_votable, UDATA_READY_VOTER, false, 0);
}
@@ -717,6 +1083,9 @@
goto done;
}
+ if (++chip->fifo_done_count == U32_MAX)
+ chip->fifo_done_count = 0;
+
rc = qg_vbat_thresholds_config(chip);
if (rc < 0)
pr_err("Failed to apply VBAT EMPTY config rc=%d\n", rc);
@@ -727,6 +1096,12 @@
goto done;
}
+ rc = qg_esr_estimate(chip);
+ if (rc < 0) {
+ pr_err("Failed to estimate ESR, rc=%d\n", rc);
+ goto done;
+ }
+
rc = get_fifo_done_time(chip, false, &hw_delta_ms);
if (rc < 0)
hw_delta_ms = 0;
@@ -794,7 +1169,7 @@
chip->sdam_data[SDAM_OCV_UV] = ocv_uv;
chip->sdam_data[SDAM_VALID] = 1;
- qg_update_sdam_params(chip);
+ qg_store_soc_params(chip);
if (chip->qg_psy)
power_supply_changed(chip->qg_psy);
@@ -1265,6 +1640,9 @@
if (!rc)
pval->intval *= 1000;
break;
+ case POWER_SUPPLY_PROP_RESISTANCE_NOW:
+ pval->intval = chip->esr_last;
+ break;
case POWER_SUPPLY_PROP_RESISTANCE_CAPACITIVE:
pval->intval = chip->dt.rbat_conn_mohm;
break;
@@ -1336,6 +1714,7 @@
POWER_SUPPLY_PROP_CHARGE_COUNTER,
POWER_SUPPLY_PROP_RESISTANCE,
POWER_SUPPLY_PROP_RESISTANCE_ID,
+ POWER_SUPPLY_PROP_RESISTANCE_NOW,
POWER_SUPPLY_PROP_RESISTANCE_CAPACITIVE,
POWER_SUPPLY_PROP_DEBUG_BATTERY,
POWER_SUPPLY_PROP_BATTERY_TYPE,
@@ -1481,6 +1860,13 @@
}
rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_CHARGE_TYPE, &prop);
+ if (rc < 0)
+ pr_err("Failed to get charge-type, rc=%d\n", rc);
+ else
+ chip->charge_type = prop.intval;
+
+ rc = power_supply_get_property(chip->batt_psy,
POWER_SUPPLY_PROP_STATUS, &prop);
if (rc < 0)
pr_err("Failed to get charger status, rc=%d\n", rc);
@@ -1996,7 +2382,7 @@
if (rc < 0)
pr_err("Failed to update MSOC register rc=%d\n", rc);
- rc = qg_update_sdam_params(chip);
+ rc = qg_store_soc_params(chip);
if (rc < 0)
pr_err("Failed to update sdam params rc=%d\n", rc);
@@ -2224,6 +2610,10 @@
QG_INIT_STATE_IRQ_DISABLE, true, 0);
}
+ /* restore ESR data */
+ if (!chip->dt.esr_disable)
+ qg_retrieve_esr_params(chip);
+
return 0;
}
@@ -2373,10 +2763,13 @@
#define DEFAULT_CL_MAX_START_SOC 15
#define DEFAULT_CL_MIN_TEMP_DECIDEGC 150
#define DEFAULT_CL_MAX_TEMP_DECIDEGC 500
-#define DEFAULT_CL_MAX_INC_DECIPERC 5
-#define DEFAULT_CL_MAX_DEC_DECIPERC 100
-#define DEFAULT_CL_MIN_LIM_DECIPERC 0
-#define DEFAULT_CL_MAX_LIM_DECIPERC 0
+#define DEFAULT_CL_MAX_INC_DECIPERC 10
+#define DEFAULT_CL_MAX_DEC_DECIPERC 20
+#define DEFAULT_CL_MIN_LIM_DECIPERC 500
+#define DEFAULT_CL_MAX_LIM_DECIPERC 100
+#define DEFAULT_ESR_QUAL_CURRENT_UA 130000
+#define DEFAULT_ESR_QUAL_VBAT_UV 7000
+#define DEFAULT_ESR_DISABLE_SOC 1000
static int qg_parse_dt(struct qpnp_qg *chip)
{
int rc = 0;
@@ -2570,6 +2963,31 @@
else
chip->dt.rbat_conn_mohm = temp;
+ /* esr */
+ chip->dt.esr_disable = of_property_read_bool(node,
+ "qcom,esr-disable");
+
+ chip->dt.esr_discharge_enable = of_property_read_bool(node,
+ "qcom,esr-discharge-enable");
+
+ rc = of_property_read_u32(node, "qcom,esr-qual-current-ua", &temp);
+ if (rc < 0)
+ chip->dt.esr_qual_i_ua = DEFAULT_ESR_QUAL_CURRENT_UA;
+ else
+ chip->dt.esr_qual_i_ua = temp;
+
+ rc = of_property_read_u32(node, "qcom,esr-qual-vbatt-uv", &temp);
+ if (rc < 0)
+ chip->dt.esr_qual_v_uv = DEFAULT_ESR_QUAL_VBAT_UV;
+ else
+ chip->dt.esr_qual_v_uv = temp;
+
+ rc = of_property_read_u32(node, "qcom,esr-disable-soc", &temp);
+ if (rc < 0)
+ chip->dt.esr_disable_soc = DEFAULT_ESR_DISABLE_SOC;
+ else
+ chip->dt.esr_disable_soc = temp * 100;
+
/* Capacity learning params*/
if (!chip->dt.cl_disable) {
chip->dt.cl_feedback_on = of_property_read_bool(node,
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index 5df241f..617ef62 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -331,6 +331,9 @@
if (rc < 0)
chg->otg_delay_ms = OTG_DEFAULT_DEGLITCH_TIME_MS;
+ chg->disable_stat_sw_override = of_property_read_bool(node,
+ "qcom,disable-stat-sw-override");
+
return 0;
}
@@ -1837,6 +1840,16 @@
}
}
+ if (chg->disable_stat_sw_override) {
+ rc = smblib_masked_write(chg, STAT_CFG_REG,
+ STAT_SW_OVERRIDE_CFG_BIT, 0);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't disable STAT SW override rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
return rc;
}
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index 3129861..097b24a 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -353,6 +353,7 @@
bool use_extcon;
bool otg_present;
bool is_audio_adapter;
+ bool disable_stat_sw_override;
/* workaround flag */
u32 wa_flags;
diff --git a/drivers/power/supply/qcom/smb1390-charger.c b/drivers/power/supply/qcom/smb1390-charger.c
index 91d215e..a92c975 100644
--- a/drivers/power/supply/qcom/smb1390-charger.c
+++ b/drivers/power/supply/qcom/smb1390-charger.c
@@ -82,6 +82,7 @@
#define FCC_VOTER "FCC_VOTER"
#define ICL_VOTER "ICL_VOTER"
#define USB_VOTER "USB_VOTER"
+#define SWITCHER_TOGGLE_VOTER "SWITCHER_TOGGLE_VOTER"
enum {
SWITCHER_OFF_WINDOW_IRQ = 0,
@@ -126,6 +127,7 @@
bool status_change_running;
bool taper_work_running;
int adc_channel;
+ int irq_status;
};
struct smb_irq {
@@ -206,6 +208,18 @@
return true;
}
+static void cp_toggle_switcher(struct smb1390 *chip)
+{
+ vote(chip->disable_votable, SWITCHER_TOGGLE_VOTER, true, 0);
+
+ /* Delay for toggling switcher */
+ usleep_range(20, 30);
+
+ vote(chip->disable_votable, SWITCHER_TOGGLE_VOTER, false, 0);
+
+ return;
+}
+
static irqreturn_t default_irq_handler(int irq, void *data)
{
struct smb1390 *chip = data;
@@ -214,40 +228,13 @@
for (i = 0; i < NUM_IRQS; ++i) {
if (irq == chip->irqs[i])
pr_debug("%s IRQ triggered\n", smb_irqs[i].name);
+ chip->irq_status |= 1 << i;
}
kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE);
return IRQ_HANDLED;
}
-static irqreturn_t irev_irq_handler(int irq, void *data)
-{
- struct smb1390 *chip = data;
- int rc;
-
- pr_debug("IREV IRQ triggered\n");
-
- rc = smb1390_masked_write(chip, CORE_CONTROL1_REG,
- CMD_EN_SWITCHER_BIT, 0);
- if (rc < 0) {
- pr_err("Couldn't disable switcher by command mode, rc=%d\n",
- rc);
- goto out;
- }
-
- rc = smb1390_masked_write(chip, CORE_CONTROL1_REG,
- CMD_EN_SWITCHER_BIT, CMD_EN_SWITCHER_BIT);
- if (rc < 0) {
- pr_err("Couldn't enable switcher by command mode, rc=%d\n",
- rc);
- goto out;
- }
-
-out:
- kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE);
- return IRQ_HANDLED;
-}
-
static const struct smb_irq smb_irqs[] = {
[SWITCHER_OFF_WINDOW_IRQ] = {
.name = "switcher-off-window",
@@ -266,7 +253,7 @@
},
[IREV_IRQ] = {
.name = "irev-fault",
- .handler = irev_irq_handler,
+ .handler = default_irq_handler,
.wake = true,
},
[VPH_OV_HARD_IRQ] = {
@@ -340,6 +327,38 @@
return count;
}
+static ssize_t cp_irq_show(struct class *c, struct class_attribute *attr,
+ char *buf)
+{
+ struct smb1390 *chip = container_of(c, struct smb1390, cp_class);
+ int rc, val;
+
+ rc = smb1390_read(chip, CORE_INT_RT_STS_REG, &val);
+ if (rc < 0)
+ return -EINVAL;
+
+ val |= chip->irq_status;
+ chip->irq_status = 0;
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", val);
+}
+
+static ssize_t toggle_switcher_store(struct class *c,
+ struct class_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct smb1390 *chip = container_of(c, struct smb1390, cp_class);
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val)
+ cp_toggle_switcher(chip);
+
+ return count;
+}
+
static ssize_t die_temp_show(struct class *c, struct class_attribute *attr,
char *buf)
{
@@ -360,6 +379,8 @@
__ATTR_RO(stat1),
__ATTR_RO(stat2),
__ATTR_RW(enable),
+ __ATTR_RO(cp_irq),
+ __ATTR_WO(toggle_switcher),
__ATTR_RO(die_temp),
__ATTR_NULL,
};
diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c
index aa53fce..180ec1f 100644
--- a/drivers/rtc/rtc-opal.c
+++ b/drivers/rtc/rtc-opal.c
@@ -57,7 +57,7 @@
static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
{
- long rc = OPAL_BUSY;
+ s64 rc = OPAL_BUSY;
int retries = 10;
u32 y_m_d;
u64 h_m_s_ms;
@@ -66,13 +66,17 @@
while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
- if (rc == OPAL_BUSY_EVENT)
+ if (rc == OPAL_BUSY_EVENT) {
+ msleep(OPAL_BUSY_DELAY_MS);
opal_poll_events(NULL);
- else if (retries-- && (rc == OPAL_HARDWARE
- || rc == OPAL_INTERNAL_ERROR))
- msleep(10);
- else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT)
- break;
+ } else if (rc == OPAL_BUSY) {
+ msleep(OPAL_BUSY_DELAY_MS);
+ } else if (rc == OPAL_HARDWARE || rc == OPAL_INTERNAL_ERROR) {
+ if (retries--) {
+ msleep(10); /* Wait 10ms before retry */
+ rc = OPAL_BUSY; /* go around again */
+ }
+ }
}
if (rc != OPAL_SUCCESS)
@@ -87,21 +91,26 @@
static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm)
{
- long rc = OPAL_BUSY;
+ s64 rc = OPAL_BUSY;
int retries = 10;
u32 y_m_d = 0;
u64 h_m_s_ms = 0;
tm_to_opal(tm, &y_m_d, &h_m_s_ms);
+
while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
rc = opal_rtc_write(y_m_d, h_m_s_ms);
- if (rc == OPAL_BUSY_EVENT)
+ if (rc == OPAL_BUSY_EVENT) {
+ msleep(OPAL_BUSY_DELAY_MS);
opal_poll_events(NULL);
- else if (retries-- && (rc == OPAL_HARDWARE
- || rc == OPAL_INTERNAL_ERROR))
- msleep(10);
- else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT)
- break;
+ } else if (rc == OPAL_BUSY) {
+ msleep(OPAL_BUSY_DELAY_MS);
+ } else if (rc == OPAL_HARDWARE || rc == OPAL_INTERNAL_ERROR) {
+ if (retries--) {
+ msleep(10); /* Wait 10ms before retry */
+ rc = OPAL_BUSY; /* go around again */
+ }
+ }
}
return rc == OPAL_SUCCESS ? 0 : -EIO;
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 1e56018..e453d2a 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -591,13 +591,22 @@
int dasd_alias_add_device(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
- struct alias_lcu *lcu;
+ __u8 uaddr = private->uid.real_unit_addr;
+ struct alias_lcu *lcu = private->lcu;
unsigned long flags;
int rc;
- lcu = private->lcu;
rc = 0;
spin_lock_irqsave(&lcu->lock, flags);
+ /*
+ * Check if device and lcu type differ. If so, the uac data may be
+ * outdated and needs to be updated.
+ */
+ if (private->uid.type != lcu->uac->unit[uaddr].ua_type) {
+ lcu->flags |= UPDATE_PENDING;
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "uid type mismatch - trigger rescan");
+ }
if (!(lcu->flags & UPDATE_PENDING)) {
rc = _add_device_to_lcu(lcu, device, device);
if (rc)
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 41e28b2..8ac27ef 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -2,6 +2,8 @@
# S/390 character devices
#
+CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_EXPOLINE)
+
obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
sclp_early.o
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 1167469..67903c9 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -451,6 +451,7 @@
static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
{
+ struct channel_path *chp;
struct chp_link link;
struct chp_id chpid;
int status;
@@ -463,10 +464,17 @@
chpid.id = sei_area->rsid;
/* allocate a new channel path structure, if needed */
status = chp_get_status(chpid);
- if (status < 0)
- chp_new(chpid);
- else if (!status)
+ if (!status)
return;
+
+ if (status < 0) {
+ chp_new(chpid);
+ } else {
+ chp = chpid_to_chp(chpid);
+ mutex_lock(&chp->lock);
+ chp_update_desc(chp);
+ mutex_unlock(&chp->lock);
+ }
memset(&link, 0, sizeof(struct chp_link));
link.chpid = chpid;
if ((sei_area->vf & 0xc0) != 0) {
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 4fb494a..b20cd8f 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1860,6 +1860,8 @@
break; /* standby */
if (sshdr.asc == 4 && sshdr.ascq == 0xc)
break; /* unavailable */
+ if (sshdr.asc == 4 && sshdr.ascq == 0x1b)
+ break; /* sanitize in progress */
/*
* Issue command to spin up drive when not ready
*/
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 8d867a2..93c3af0 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -2749,7 +2749,7 @@
* the regulators.
*/
if (of_property_read_bool(np, "non-removable") &&
- strlen(android_boot_dev) &&
+ !of_property_read_bool(np, "force-ufshc-probe") &&
strcmp(android_boot_dev, dev_name(dev)))
return -ENODEV;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index d427fb3..aeb09f3 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -7090,7 +7090,10 @@
* To avoid these unnecessary/illegal step we skip to the last error
* handling stage: reset and restore.
*/
- if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
+ if ((lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) ||
+ (lrbp->lun == UFS_UPIU_REPORT_LUNS_WLUN) ||
+ (lrbp->lun == UFS_UPIU_BOOT_WLUN) ||
+ (lrbp->lun == UFS_UPIU_RPMB_WLUN))
return ufshcd_eh_host_reset_handler(cmd);
ufshcd_hold_all(hba);
@@ -8093,9 +8096,16 @@
/*
* If we failed to initialize the device or the device is not
* present, turn off the power/clocks etc.
+ * In cases when there's both ufs and emmc present and regualtors
+ * are shared b/w the two, this shouldn't turn-off the regulators
+ * w/o giving emmc a chance to send PON.
+ * Hence schedule a delayed suspend, thus giving enough time to
+ * emmc to vote for the shared regulator.
*/
- if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress)
- pm_runtime_put_sync(hba->dev);
+ if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
+ pm_runtime_put_noidle(hba->dev);
+ pm_schedule_suspend(hba->dev, MSEC_PER_SEC * 10);
+ }
trace_ufshcd_init(dev_name(hba->dev), ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
diff --git a/drivers/soc/qcom/jtagv8-etm.c b/drivers/soc/qcom/jtagv8-etm.c
index 3f4b8bc..23cbb7b 100644
--- a/drivers/soc/qcom/jtagv8-etm.c
+++ b/drivers/soc/qcom/jtagv8-etm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -345,7 +345,7 @@
TRCCNTVRn(j));
}
/* resource selection registers */
- for (j = 0; j < etmdata->nr_resource; j++)
+ for (j = 0; j < etmdata->nr_resource * 2; j++)
etmdata->state[i++] = etm_readl(etmdata, TRCRSCTLRn(j));
/* comparator registers */
for (j = 0; j < etmdata->nr_addr_cmp * 2; j++) {
@@ -448,7 +448,7 @@
etm_writel(etmdata, etmdata->state[i++], TRCCNTVRn(j));
}
/* resource selection registers */
- for (j = 0; j < etmdata->nr_resource; j++)
+ for (j = 0; j < etmdata->nr_resource * 2; j++)
etm_writel(etmdata, etmdata->state[i++], TRCRSCTLRn(j));
/* comparator registers */
for (j = 0; j < etmdata->nr_addr_cmp * 2; j++) {
@@ -932,7 +932,7 @@
for (j = 0; j < etmdata->nr_cntr; j++)
i = etm_read_crxr(etmdata->state, i, j);
/* resource selection registers */
- for (j = 0; j < etmdata->nr_resource; j++)
+ for (j = 0; j < etmdata->nr_resource * 2; j++)
i = etm_read_rsxr(etmdata->state, i, j + 2);
/* comparator registers */
for (j = 0; j < etmdata->nr_addr_cmp * 2; j++)
@@ -1387,7 +1387,7 @@
for (j = 0; j < etmdata->nr_cntr; j++)
i = etm_write_crxr(etmdata->state, i, j);
/* resource selection registers */
- for (j = 0; j < etmdata->nr_resource; j++)
+ for (j = 0; j < etmdata->nr_resource * 2; j++)
i = etm_write_rsxr(etmdata->state, i, j + 2);
/* comparator registers */
for (j = 0; j < etmdata->nr_addr_cmp * 2; j++)
@@ -1496,7 +1496,7 @@
val = etm_readl(etmdata, TRCIDR4);
etmdata->nr_addr_cmp = BMVAL(val, 0, 3);
etmdata->nr_data_cmp = BMVAL(val, 4, 7);
- etmdata->nr_resource = BMVAL(val, 16, 19);
+ etmdata->nr_resource = BMVAL(val, 16, 19) + 1;
etmdata->nr_ss_cmp = BMVAL(val, 20, 23);
etmdata->nr_ctxid_cmp = BMVAL(val, 24, 27);
etmdata->nr_vmid_cmp = BMVAL(val, 28, 31);
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
index dd64148..edfb680 100644
--- a/drivers/staging/android/TODO
+++ b/drivers/staging/android/TODO
@@ -39,7 +39,6 @@
waiting threads. We should eventually use multiple queues and select the
queue based on the region.
- Add debugfs support for examining the permissions of regions.
- - Use ioremap_wc instead of ioremap_nocache.
- Remove VSOC_WAIT_FOR_INCOMING_INTERRUPT ioctl. This functionality has been
superseded by the futex and is there for legacy reasons.
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index c1103c7..6000707 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1003,7 +1003,6 @@
struct ion_device *dev = client->dev;
struct rb_node *n;
- pr_debug("%s: %d\n", __func__, __LINE__);
down_write(&dev->lock);
rb_erase(&client->node, &dev->clients);
up_write(&dev->lock);
@@ -1213,9 +1212,6 @@
int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
int i;
- pr_debug("%s: syncing for device %s\n", __func__,
- dev ? dev_name(dev) : "null");
-
if (!ion_buffer_fault_user_mappings(buffer))
return;
@@ -1269,7 +1265,6 @@
mutex_lock(&buffer->lock);
list_add(&vma_list->list, &buffer->vmas);
mutex_unlock(&buffer->lock);
- pr_debug("%s: adding %pK\n", __func__, vma);
}
static void ion_vm_close(struct vm_area_struct *vma)
@@ -1277,14 +1272,12 @@
struct ion_buffer *buffer = vma->vm_private_data;
struct ion_vma_list *vma_list, *tmp;
- pr_debug("%s\n", __func__);
mutex_lock(&buffer->lock);
list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
if (vma_list->vma != vma)
continue;
list_del(&vma_list->list);
kfree(vma_list);
- pr_debug("%s: deleting %pK\n", __func__, vma);
break;
}
mutex_unlock(&buffer->lock);
@@ -1680,7 +1673,6 @@
{
struct ion_client *client = file->private_data;
- pr_debug("%s: %d\n", __func__, __LINE__);
ion_client_destroy(client);
return 0;
}
@@ -1692,7 +1684,6 @@
struct ion_client *client;
char debug_name[64];
- pr_debug("%s: %d\n", __func__, __LINE__);
snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
client = ion_client_create(dev, debug_name);
if (IS_ERR(client))
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index 72f2b6a..d991b02 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -81,8 +81,6 @@
struct device *dev = heap->priv;
struct ion_cma_buffer_info *info;
- dev_dbg(dev, "Request buffer allocation len %ld\n", len);
-
info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
if (!info)
return ION_CMA_ALLOCATE_FAILED;
@@ -123,7 +121,6 @@
/* keep this for memory release */
buffer->priv_virt = info;
- dev_dbg(dev, "Allocate buffer %pK\n", buffer);
return 0;
err:
@@ -137,7 +134,6 @@
struct ion_cma_buffer_info *info = buffer->priv_virt;
unsigned long attrs = 0;
- dev_dbg(dev, "Release buffer %pK\n", buffer);
/* release memory */
if (info->is_cached)
attrs |= DMA_ATTR_FORCE_COHERENT;
diff --git a/drivers/staging/android/vsoc.c b/drivers/staging/android/vsoc.c
index 587c66d7..954ed2c 100644
--- a/drivers/staging/android/vsoc.c
+++ b/drivers/staging/android/vsoc.c
@@ -81,8 +81,8 @@
atomic_t *incoming_signalled;
/* Flag indicating the guest has signalled the host. */
atomic_t *outgoing_signalled;
- int irq_requested;
- int device_created;
+ bool irq_requested;
+ bool device_created;
};
struct vsoc_device {
@@ -91,7 +91,7 @@
/* Physical address of SHARED_MEMORY_BAR. */
phys_addr_t shm_phys_start;
/* Kernel virtual address of SHARED_MEMORY_BAR. */
- void *kernel_mapped_shm;
+ void __iomem *kernel_mapped_shm;
/* Size of the entire shared memory window in bytes. */
size_t shm_size;
/*
@@ -116,22 +116,23 @@
* vsoc_region_data because the kernel deals with them as an array.
*/
struct msix_entry *msix_entries;
- /*
- * Flags that indicate what we've initialzied. These are used to do an
- * orderly cleanup of the device.
- */
- char enabled_device;
- char requested_regions;
- char cdev_added;
- char class_added;
- char msix_enabled;
/* Mutex that protectes the permission list */
struct mutex mtx;
/* Major number assigned by the kernel */
int major;
-
+ /* Character device assigned by the kernel */
struct cdev cdev;
+ /* Device class assigned by the kernel */
struct class *class;
+ /*
+ * Flags that indicate what we've initialized. These are used to do an
+ * orderly cleanup of the device.
+ */
+ bool enabled_device;
+ bool requested_regions;
+ bool cdev_added;
+ bool class_added;
+ bool msix_enabled;
};
static struct vsoc_device vsoc_dev;
@@ -153,13 +154,13 @@
static int vsoc_mmap(struct file *, struct vm_area_struct *);
static int vsoc_open(struct inode *, struct file *);
static int vsoc_release(struct inode *, struct file *);
-static ssize_t vsoc_read(struct file *, char *, size_t, loff_t *);
-static ssize_t vsoc_write(struct file *, const char *, size_t, loff_t *);
+static ssize_t vsoc_read(struct file *, char __user *, size_t, loff_t *);
+static ssize_t vsoc_write(struct file *, const char __user *, size_t, loff_t *);
static loff_t vsoc_lseek(struct file *filp, loff_t offset, int origin);
static int do_create_fd_scoped_permission(
struct vsoc_device_region *region_p,
struct fd_scoped_permission_node *np,
- struct fd_scoped_permission_arg *__user arg);
+ struct fd_scoped_permission_arg __user *arg);
static void do_destroy_fd_scoped_permission(
struct vsoc_device_region *owner_region_p,
struct fd_scoped_permission *perm);
@@ -198,7 +199,7 @@
/* Converts from shared memory offset to virtual address */
static inline void *shm_off_to_virtual_addr(__u32 offset)
{
- return vsoc_dev.kernel_mapped_shm + offset;
+ return (void __force *)vsoc_dev.kernel_mapped_shm + offset;
}
/* Converts from shared memory offset to physical address */
@@ -261,7 +262,7 @@
static int do_create_fd_scoped_permission(
struct vsoc_device_region *region_p,
struct fd_scoped_permission_node *np,
- struct fd_scoped_permission_arg *__user arg)
+ struct fd_scoped_permission_arg __user *arg)
{
struct file *managed_filp;
s32 managed_fd;
@@ -632,11 +633,11 @@
return 0;
}
-static ssize_t vsoc_read(struct file *filp, char *buffer, size_t len,
+static ssize_t vsoc_read(struct file *filp, char __user *buffer, size_t len,
loff_t *poffset)
{
__u32 area_off;
- void *area_p;
+ const void *area_p;
ssize_t area_len;
int retval = vsoc_validate_filep(filp);
@@ -706,7 +707,7 @@
return offset;
}
-static ssize_t vsoc_write(struct file *filp, const char *buffer,
+static ssize_t vsoc_write(struct file *filp, const char __user *buffer,
size_t len, loff_t *poffset)
{
__u32 area_off;
@@ -772,14 +773,14 @@
pci_name(pdev), result);
return result;
}
- vsoc_dev.enabled_device = 1;
+ vsoc_dev.enabled_device = true;
result = pci_request_regions(pdev, "vsoc");
if (result < 0) {
dev_err(&pdev->dev, "pci_request_regions failed\n");
vsoc_remove_device(pdev);
return -EBUSY;
}
- vsoc_dev.requested_regions = 1;
+ vsoc_dev.requested_regions = true;
/* Set up the control registers in BAR 0 */
reg_size = pci_resource_len(pdev, REGISTER_BAR);
if (reg_size > MAX_REGISTER_BAR_LEN)
@@ -790,7 +791,7 @@
if (!vsoc_dev.regs) {
dev_err(&pdev->dev,
- "cannot ioremap registers of size %zu\n",
+ "cannot map registers of size %zu\n",
(size_t)reg_size);
vsoc_remove_device(pdev);
return -EBUSY;
@@ -800,19 +801,17 @@
vsoc_dev.shm_phys_start = pci_resource_start(pdev, SHARED_MEMORY_BAR);
vsoc_dev.shm_size = pci_resource_len(pdev, SHARED_MEMORY_BAR);
- dev_info(&pdev->dev, "shared memory @ DMA %p size=0x%zx\n",
- (void *)vsoc_dev.shm_phys_start, vsoc_dev.shm_size);
- /* TODO(ghartman): ioremap_wc should work here */
- vsoc_dev.kernel_mapped_shm = ioremap_nocache(
- vsoc_dev.shm_phys_start, vsoc_dev.shm_size);
+ dev_info(&pdev->dev, "shared memory @ DMA %pa size=0x%zx\n",
+ &vsoc_dev.shm_phys_start, vsoc_dev.shm_size);
+ vsoc_dev.kernel_mapped_shm = pci_iomap_wc(pdev, SHARED_MEMORY_BAR, 0);
if (!vsoc_dev.kernel_mapped_shm) {
dev_err(&vsoc_dev.dev->dev, "cannot iomap region\n");
vsoc_remove_device(pdev);
return -EBUSY;
}
- vsoc_dev.layout =
- (struct vsoc_shm_layout_descriptor *)vsoc_dev.kernel_mapped_shm;
+ vsoc_dev.layout = (struct vsoc_shm_layout_descriptor __force *)
+ vsoc_dev.kernel_mapped_shm;
dev_info(&pdev->dev, "major_version: %d\n",
vsoc_dev.layout->major_version);
dev_info(&pdev->dev, "minor_version: %d\n",
@@ -843,16 +842,16 @@
vsoc_remove_device(pdev);
return -EBUSY;
}
- vsoc_dev.cdev_added = 1;
+ vsoc_dev.cdev_added = true;
vsoc_dev.class = class_create(THIS_MODULE, VSOC_DEV_NAME);
if (IS_ERR(vsoc_dev.class)) {
dev_err(&vsoc_dev.dev->dev, "class_create failed\n");
vsoc_remove_device(pdev);
return PTR_ERR(vsoc_dev.class);
}
- vsoc_dev.class_added = 1;
- vsoc_dev.regions = (struct vsoc_device_region *)
- (vsoc_dev.kernel_mapped_shm +
+ vsoc_dev.class_added = true;
+ vsoc_dev.regions = (struct vsoc_device_region __force *)
+ ((void *)vsoc_dev.layout +
vsoc_dev.layout->vsoc_region_desc_offset);
vsoc_dev.msix_entries = kcalloc(
vsoc_dev.layout->region_count,
@@ -912,7 +911,7 @@
return -EFAULT;
}
}
- vsoc_dev.msix_enabled = 1;
+ vsoc_dev.msix_enabled = true;
for (i = 0; i < vsoc_dev.layout->region_count; ++i) {
const struct vsoc_device_region *region = vsoc_dev.regions + i;
size_t name_sz = sizeof(vsoc_dev.regions_data[i].name) - 1;
@@ -930,14 +929,11 @@
&vsoc_dev.regions_data[i].interrupt_wait_queue);
init_waitqueue_head(&vsoc_dev.regions_data[i].futex_wait_queue);
vsoc_dev.regions_data[i].incoming_signalled =
- vsoc_dev.kernel_mapped_shm +
- region->region_begin_offset +
+ shm_off_to_virtual_addr(region->region_begin_offset) +
h_to_g_signal_table->interrupt_signalled_offset;
vsoc_dev.regions_data[i].outgoing_signalled =
- vsoc_dev.kernel_mapped_shm +
- region->region_begin_offset +
+ shm_off_to_virtual_addr(region->region_begin_offset) +
g_to_h_signal_table->interrupt_signalled_offset;
-
result = request_irq(
vsoc_dev.msix_entries[i].vector,
vsoc_interrupt, 0,
@@ -950,7 +946,7 @@
vsoc_remove_device(pdev);
return -ENOSPC;
}
- vsoc_dev.regions_data[i].irq_requested = 1;
+ vsoc_dev.regions_data[i].irq_requested = true;
if (!device_create(vsoc_dev.class, NULL,
MKDEV(vsoc_dev.major, i),
NULL, vsoc_dev.regions_data[i].name)) {
@@ -958,7 +954,7 @@
vsoc_remove_device(pdev);
return -EBUSY;
}
- vsoc_dev.regions_data[i].device_created = 1;
+ vsoc_dev.regions_data[i].device_created = true;
}
return 0;
}
@@ -990,51 +986,51 @@
if (vsoc_dev.regions_data[i].device_created) {
device_destroy(vsoc_dev.class,
MKDEV(vsoc_dev.major, i));
- vsoc_dev.regions_data[i].device_created = 0;
+ vsoc_dev.regions_data[i].device_created = false;
}
if (vsoc_dev.regions_data[i].irq_requested)
free_irq(vsoc_dev.msix_entries[i].vector, NULL);
- vsoc_dev.regions_data[i].irq_requested = 0;
+ vsoc_dev.regions_data[i].irq_requested = false;
}
kfree(vsoc_dev.regions_data);
- vsoc_dev.regions_data = 0;
+ vsoc_dev.regions_data = NULL;
}
if (vsoc_dev.msix_enabled) {
pci_disable_msix(pdev);
- vsoc_dev.msix_enabled = 0;
+ vsoc_dev.msix_enabled = false;
}
kfree(vsoc_dev.msix_entries);
- vsoc_dev.msix_entries = 0;
- vsoc_dev.regions = 0;
+ vsoc_dev.msix_entries = NULL;
+ vsoc_dev.regions = NULL;
if (vsoc_dev.class_added) {
class_destroy(vsoc_dev.class);
- vsoc_dev.class_added = 0;
+ vsoc_dev.class_added = false;
}
if (vsoc_dev.cdev_added) {
cdev_del(&vsoc_dev.cdev);
- vsoc_dev.cdev_added = 0;
+ vsoc_dev.cdev_added = false;
}
if (vsoc_dev.major && vsoc_dev.layout) {
unregister_chrdev_region(MKDEV(vsoc_dev.major, 0),
vsoc_dev.layout->region_count);
vsoc_dev.major = 0;
}
- vsoc_dev.layout = 0;
+ vsoc_dev.layout = NULL;
if (vsoc_dev.kernel_mapped_shm) {
pci_iounmap(pdev, vsoc_dev.kernel_mapped_shm);
- vsoc_dev.kernel_mapped_shm = 0;
+ vsoc_dev.kernel_mapped_shm = NULL;
}
if (vsoc_dev.regs) {
pci_iounmap(pdev, vsoc_dev.regs);
- vsoc_dev.regs = 0;
+ vsoc_dev.regs = NULL;
}
if (vsoc_dev.requested_regions) {
pci_release_regions(pdev);
- vsoc_dev.requested_regions = 0;
+ vsoc_dev.requested_regions = false;
}
if (vsoc_dev.enabled_device) {
pci_disable_device(pdev);
- vsoc_dev.enabled_device = 0;
+ vsoc_dev.enabled_device = false;
}
/* Do this last: it indicates that the device is not initialized. */
vsoc_dev.dev = NULL;
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index ad1186d..a45810b 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -185,6 +185,7 @@
* @regulator: pointer to the TMU regulator structure.
* @reg_conf: pointer to structure to register with core thermal.
* @ntrip: number of supported trip points.
+ * @enabled: current status of TMU device
* @tmu_initialize: SoC specific TMU initialization method
* @tmu_control: SoC specific TMU control method
* @tmu_read: SoC specific TMU temperature read method
@@ -205,6 +206,7 @@
struct regulator *regulator;
struct thermal_zone_device *tzd;
unsigned int ntrip;
+ bool enabled;
int (*tmu_initialize)(struct platform_device *pdev);
void (*tmu_control)(struct platform_device *pdev, bool on);
@@ -398,6 +400,7 @@
mutex_lock(&data->lock);
clk_enable(data->clk);
data->tmu_control(pdev, on);
+ data->enabled = on;
clk_disable(data->clk);
mutex_unlock(&data->lock);
}
@@ -889,19 +892,24 @@
static int exynos_get_temp(void *p, int *temp)
{
struct exynos_tmu_data *data = p;
+ int value, ret = 0;
- if (!data || !data->tmu_read)
+ if (!data || !data->tmu_read || !data->enabled)
return -EINVAL;
mutex_lock(&data->lock);
clk_enable(data->clk);
- *temp = code_to_temp(data, data->tmu_read(data)) * MCELSIUS;
+ value = data->tmu_read(data);
+ if (value < 0)
+ ret = value;
+ else
+ *temp = code_to_temp(data, value) * MCELSIUS;
clk_disable(data->clk);
mutex_unlock(&data->lock);
- return 0;
+ return ret;
}
#ifdef CONFIG_THERMAL_EMULATION
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index fe22917..9e9016e 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -137,6 +137,9 @@
struct mutex mutex;
/* Link layer */
+ int mode;
+#define DLCI_MODE_ABM 0 /* Normal Asynchronous Balanced Mode */
+#define DLCI_MODE_ADM 1 /* Asynchronous Disconnected Mode */
spinlock_t lock; /* Protects the internal state */
struct timer_list t1; /* Retransmit timer for SABM and UA */
int retries;
@@ -1380,7 +1383,13 @@
ctrl->data = data;
ctrl->len = clen;
gsm->pending_cmd = ctrl;
- gsm->cretries = gsm->n2;
+
+ /* If DLCI0 is in ADM mode skip retries, it won't respond */
+ if (gsm->dlci[0]->mode == DLCI_MODE_ADM)
+ gsm->cretries = 1;
+ else
+ gsm->cretries = gsm->n2;
+
mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100);
gsm_control_transmit(gsm, ctrl);
spin_unlock_irqrestore(&gsm->control_lock, flags);
@@ -1488,6 +1497,7 @@
if (debug & 8)
pr_info("DLCI %d opening in ADM mode.\n",
dlci->addr);
+ dlci->mode = DLCI_MODE_ADM;
gsm_dlci_open(dlci);
} else {
gsm_dlci_close(dlci);
@@ -2865,11 +2875,22 @@
static int gsm_carrier_raised(struct tty_port *port)
{
struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port);
+ struct gsm_mux *gsm = dlci->gsm;
+
/* Not yet open so no carrier info */
if (dlci->state != DLCI_OPEN)
return 0;
if (debug & 2)
return 1;
+
+ /*
+ * Basic mode with control channel in ADM mode may not respond
+ * to CMD_MSC at all and modem_rx is empty.
+ */
+ if (gsm->encoding == 0 && gsm->dlci[0]->mode == DLCI_MODE_ADM &&
+ !dlci->modem_rx)
+ return 1;
+
return dlci->modem_rx & TIOCM_CD;
}
diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c
index 3b31fd8..b9a4625 100644
--- a/drivers/tty/serial/earlycon.c
+++ b/drivers/tty/serial/earlycon.c
@@ -172,7 +172,7 @@
*/
int __init setup_earlycon(char *buf)
{
- const struct earlycon_id *match;
+ const struct earlycon_id **p_match;
if (!buf || !buf[0])
return -EINVAL;
@@ -180,7 +180,9 @@
if (early_con.flags & CON_ENABLED)
return -EALREADY;
- for (match = __earlycon_table; match < __earlycon_table_end; match++) {
+ for (p_match = __earlycon_table; p_match < __earlycon_table_end;
+ p_match++) {
+ const struct earlycon_id *match = *p_match;
size_t len = strlen(match->name);
if (strncmp(buf, match->name, len))
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index f575a33..ecadc27 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -2145,7 +2145,9 @@
* and DCD (when they are outputs) or enables the respective
* irqs. So set this bit early, i.e. before requesting irqs.
*/
- writel(UFCR_DCEDTE, sport->port.membase + UFCR);
+ reg = readl(sport->port.membase + UFCR);
+ if (!(reg & UFCR_DCEDTE))
+ writel(reg | UFCR_DCEDTE, sport->port.membase + UFCR);
/*
* Disable UCR3_RI and UCR3_DCD irqs. They are also not
@@ -2156,7 +2158,15 @@
sport->port.membase + UCR3);
} else {
- writel(0, sport->port.membase + UFCR);
+ unsigned long ucr3 = UCR3_DSR;
+
+ reg = readl(sport->port.membase + UFCR);
+ if (reg & UFCR_DCEDTE)
+ writel(reg & ~UFCR_DCEDTE, sport->port.membase + UFCR);
+
+ if (!is_imx1_uart(sport))
+ ucr3 |= IMX21_UCR3_RXDMUXSEL | UCR3_ADNIMP;
+ writel(ucr3, sport->port.membase + UCR3);
}
clk_disable_unprepare(sport->clk_ipg);
diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
index cdd2f94..b9c7a90 100644
--- a/drivers/tty/serial/sccnxp.c
+++ b/drivers/tty/serial/sccnxp.c
@@ -889,7 +889,16 @@
goto err_out;
uartclk = 0;
} else {
- clk_prepare_enable(clk);
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto err_out;
+
+ ret = devm_add_action_or_reset(&pdev->dev,
+ (void(*)(void *))clk_disable_unprepare,
+ clk);
+ if (ret)
+ goto err_out;
+
uartclk = clk_get_rate(clk);
}
@@ -988,7 +997,7 @@
uart_unregister_driver(&s->uart);
err_out:
if (!IS_ERR(s->regulator))
- return regulator_disable(s->regulator);
+ regulator_disable(s->regulator);
return ret;
}
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 4ee0a9d..789c814 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -3170,7 +3170,10 @@
kref_init(&tty->kref);
tty->magic = TTY_MAGIC;
- tty_ldisc_init(tty);
+ if (tty_ldisc_init(tty)) {
+ kfree(tty);
+ return NULL;
+ }
tty->session = NULL;
tty->pgrp = NULL;
mutex_init(&tty->legacy_mutex);
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 3a9e2a2..4ab518d 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -175,12 +175,11 @@
return ERR_CAST(ldops);
}
- ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL);
- if (ld == NULL) {
- put_ldops(ldops);
- return ERR_PTR(-ENOMEM);
- }
-
+ /*
+ * There is no way to handle allocation failure of only 16 bytes.
+ * Let's simplify error handling and save more memory.
+ */
+ ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL | __GFP_NOFAIL);
ld->ops = ldops;
ld->tty = tty;
@@ -753,12 +752,13 @@
* the tty structure is not completely set up when this call is made.
*/
-void tty_ldisc_init(struct tty_struct *tty)
+int tty_ldisc_init(struct tty_struct *tty)
{
struct tty_ldisc *ld = tty_ldisc_get(tty, N_TTY);
if (IS_ERR(ld))
- panic("n_tty: init_tty");
+ return PTR_ERR(ld);
tty->ldisc = ld;
+ return 0;
}
/**
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 0dce6ab..876679a 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -186,7 +186,9 @@
static const unsigned short high_speed_maxpacket_maxes[4] = {
[USB_ENDPOINT_XFER_CONTROL] = 64,
[USB_ENDPOINT_XFER_ISOC] = 1024,
- [USB_ENDPOINT_XFER_BULK] = 512,
+
+ /* Bulk should be 512, but some devices use 1024: we will warn below */
+ [USB_ENDPOINT_XFER_BULK] = 1024,
[USB_ENDPOINT_XFER_INT] = 1024,
};
static const unsigned short super_speed_maxpacket_maxes[4] = {
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index e4b39a7..6a4ea98 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2430,6 +2430,7 @@
spin_lock_irqsave (&hcd_root_hub_lock, flags);
if (hcd->rh_registered) {
+ pm_wakeup_event(&hcd->self.root_hub->dev, 0);
set_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
queue_work(pm_wq, &hcd->wakeup_work);
}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index a9117ee..a9b3bbd 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -659,12 +659,17 @@
unsigned int portnum)
{
struct usb_hub *hub;
+ struct usb_port *port_dev;
if (!hdev)
return;
hub = usb_hub_to_struct_hub(hdev);
if (hub) {
+ port_dev = hub->ports[portnum - 1];
+ if (port_dev && port_dev->child)
+ pm_wakeup_event(&port_dev->child->dev, 0);
+
set_bit(portnum, hub->wakeup_bits);
kick_hub_wq(hub);
}
@@ -3428,8 +3433,11 @@
/* Skip the initial Clear-Suspend step for a remote wakeup */
status = hub_port_status(hub, port1, &portstatus, &portchange);
- if (status == 0 && !port_is_suspended(hub, portstatus))
+ if (status == 0 && !port_is_suspended(hub, portstatus)) {
+ if (portchange & USB_PORT_STAT_C_SUSPEND)
+ pm_wakeup_event(&udev->dev, 0);
goto SuspendCleared;
+ }
/* see 7.1.7.7; affects power usage, but not budgeting */
if (hub_is_superspeed(hub->hdev))
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 4f1c6f8..40ce175 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -45,6 +45,9 @@
{ USB_DEVICE(0x03f0, 0x0701), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
+ /* HP v222w 16GB Mini USB Drive */
+ { USB_DEVICE(0x03f0, 0x3f40), .driver_info = USB_QUIRK_DELAY_INIT },
+
/* Creative SB Audigy 2 NX */
{ USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/gadget/ci13xxx_msm.c b/drivers/usb/gadget/ci13xxx_msm.c
index d4c243c..42936f1 100644
--- a/drivers/usb/gadget/ci13xxx_msm.c
+++ b/drivers/usb/gadget/ci13xxx_msm.c
@@ -70,18 +70,11 @@
struct usb_phy *phy = udc->transceiver;
if (phy && (phy->flags & ENABLE_DP_MANUAL_PULLUP)) {
- u32 temp;
-
usb_phy_io_write(phy,
ULPI_MISC_A_VBUSVLDEXT |
ULPI_MISC_A_VBUSVLDEXTSEL,
ULPI_CLR(ULPI_MISC_A));
- /* Notify LINK of VBUS LOW */
- temp = readl_relaxed(USB_USBCMD);
- temp &= ~USBCMD_SESS_VLD_CTRL;
- writel_relaxed(temp, USB_USBCMD);
-
/*
* Add memory barrier as it is must to complete
* above USB PHY and Link register writes before
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
index 814b4a3e..a75e5d3 100644
--- a/drivers/usb/gadget/function/f_rndis.c
+++ b/drivers/usb/gadget/function/f_rndis.c
@@ -818,6 +818,27 @@
rndis_data_intf.bInterfaceNumber = status;
rndis_union_desc.bSlaveInterface0 = status;
+ if (rndis_opts->wceis) {
+ /* "Wireless" RNDIS; auto-detected by Windows */
+ rndis_iad_descriptor.bFunctionClass =
+ USB_CLASS_WIRELESS_CONTROLLER;
+ rndis_iad_descriptor.bFunctionSubClass = 0x01;
+ rndis_iad_descriptor.bFunctionProtocol = 0x03;
+ rndis_control_intf.bInterfaceClass =
+ USB_CLASS_WIRELESS_CONTROLLER;
+ rndis_control_intf.bInterfaceSubClass = 0x01;
+ rndis_control_intf.bInterfaceProtocol = 0x03;
+ } else {
+ rndis_iad_descriptor.bFunctionClass = USB_CLASS_COMM;
+ rndis_iad_descriptor.bFunctionSubClass =
+ USB_CDC_SUBCLASS_ETHERNET;
+ rndis_iad_descriptor.bFunctionProtocol = USB_CDC_PROTO_NONE;
+ rndis_control_intf.bInterfaceClass = USB_CLASS_COMM;
+ rndis_control_intf.bInterfaceSubClass = USB_CDC_SUBCLASS_ACM;
+ rndis_control_intf.bInterfaceProtocol =
+ USB_CDC_ACM_PROTO_VENDOR;
+ }
+
status = -ENODEV;
/* allocate instance-specific endpoints */
@@ -950,11 +971,15 @@
/* f_rndis_opts_ifname */
USB_ETHERNET_CONFIGFS_ITEM_ATTR_IFNAME(rndis);
+/* f_rndis_opts_wceis */
+USB_ETHERNET_CONFIGFS_ITEM_ATTR_WCEIS(rndis);
+
static struct configfs_attribute *rndis_attrs[] = {
&rndis_opts_attr_dev_addr,
&rndis_opts_attr_host_addr,
&rndis_opts_attr_qmult,
&rndis_opts_attr_ifname,
+ &rndis_opts_attr_wceis,
NULL,
};
@@ -1008,6 +1033,9 @@
}
opts->rndis_interf_group = rndis_interf_group;
+ /* Enable "Wireless" RNDIS by default */
+ opts->wceis = true;
+
return &opts->func_inst;
}
diff --git a/drivers/usb/gadget/function/u_ether_configfs.h b/drivers/usb/gadget/function/u_ether_configfs.h
index 0468459..1dd2ff4 100644
--- a/drivers/usb/gadget/function/u_ether_configfs.h
+++ b/drivers/usb/gadget/function/u_ether_configfs.h
@@ -188,4 +188,50 @@
\
CONFIGFS_ATTR_RO(_f_##_opts_, ifname)
+#define USB_ETHERNET_CONFIGFS_ITEM_ATTR_WCEIS(_f_) \
+ static ssize_t _f_##_opts_wceis_show(struct config_item *item, \
+ char *page) \
+ { \
+ struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
+ bool wceis; \
+ \
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
+ mutex_lock(&opts->lock); \
+ wceis = opts->wceis; \
+ mutex_unlock(&opts->lock); \
+ return snprintf(page, PAGE_SIZE, "%d", wceis); \
+ } \
+ \
+ static ssize_t _f_##_opts_wceis_store(struct config_item *item, \
+ const char *page, size_t len)\
+ { \
+ struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
+ bool wceis; \
+ int ret; \
+ \
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
+ mutex_lock(&opts->lock); \
+ \
+ ret = kstrtobool(page, &wceis); \
+ if (ret) \
+ goto out; \
+ \
+ opts->wceis = wceis; \
+ ret = len; \
+out: \
+ mutex_unlock(&opts->lock); \
+ \
+ return ret; \
+ } \
+ \
+ CONFIGFS_ATTR(_f_##_opts_, wceis)
+
#endif /* __U_ETHER_CONFIGFS_H */
diff --git a/drivers/usb/gadget/function/u_rndis.h b/drivers/usb/gadget/function/u_rndis.h
index 4e2ad04..f829a5e 100644
--- a/drivers/usb/gadget/function/u_rndis.h
+++ b/drivers/usb/gadget/function/u_rndis.h
@@ -38,6 +38,9 @@
*/
struct mutex lock;
int refcnt;
+
+ /* "Wireless" RNDIS; auto-detected by Windows */
+ bool wceis;
};
void rndis_borrow_net(struct usb_function_instance *f, struct net_device *net);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 2d9a806..579aa9a 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1774,6 +1774,7 @@
int vbus;
u8 devctl;
+ pm_runtime_get_sync(dev);
spin_lock_irqsave(&musb->lock, flags);
val = musb->a_wait_bcon;
vbus = musb_platform_get_vbus_status(musb);
@@ -1787,6 +1788,7 @@
vbus = 0;
}
spin_unlock_irqrestore(&musb->lock, flags);
+ pm_runtime_put_sync(dev);
return sprintf(buf, "Vbus %s, timeout %lu msec\n",
vbus ? "on" : "off", val);
@@ -2483,10 +2485,11 @@
musb_generic_disable(musb);
spin_unlock_irqrestore(&musb->lock, flags);
musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+ musb_platform_exit(musb);
+
pm_runtime_dont_use_autosuspend(musb->controller);
pm_runtime_put_sync(musb->controller);
pm_runtime_disable(musb->controller);
- musb_platform_exit(musb);
musb_phy_callback = NULL;
if (musb->dma_controller)
musb_dma_controller_destroy(musb->dma_controller);
@@ -2710,7 +2713,8 @@
if ((devctl & mask) != (musb->context.devctl & mask))
musb->port1_status = 0;
- musb_start(musb);
+ musb_enable_interrupts(musb);
+ musb_platform_enable(musb);
spin_lock_irqsave(&musb->lock, flags);
error = musb_run_resume_work(musb);
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index a55173c..f1219f6 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -442,7 +442,6 @@
req = next_request(musb_ep);
request = &req->request;
- trace_musb_req_tx(req);
csr = musb_readw(epio, MUSB_TXCSR);
musb_dbg(musb, "<== %s, txcsr %04x", musb_ep->end_point.name, csr);
@@ -481,6 +480,8 @@
u8 is_dma = 0;
bool short_packet = false;
+ trace_musb_req_tx(req);
+
if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
is_dma = 1;
csr |= MUSB_TXCSR_P_WZC_BITS;
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 4303389..e2bc915 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1023,7 +1023,9 @@
/* set tx_reinit and schedule the next qh */
ep->tx_reinit = 1;
}
- musb_start_urb(musb, is_in, next_qh);
+
+ if (next_qh)
+ musb_start_urb(musb, is_in, next_qh);
}
}
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 1550cae..a6326d5 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -273,6 +273,11 @@
module_param(dcp_max_current, int, 0644);
MODULE_PARM_DESC(dcp_max_current, "max current drawn for DCP charger");
+static bool chg_detection_for_float_charger;
+module_param(chg_detection_for_float_charger, bool, 0644);
+MODULE_PARM_DESC(chg_detection_for_float_charger,
+ "Whether to do PHY based charger detection for float chargers");
+
static struct msm_otg *the_msm_otg;
static bool debug_bus_voting_enabled;
@@ -2974,10 +2979,20 @@
set_bit(ID, &motg->inputs);
}
- if (test_bit(B_SESS_VLD, &motg->inputs) &&
- get_psy_type(motg) == POWER_SUPPLY_TYPE_UNKNOWN &&
- !motg->chg_detection)
- motg->chg_detection = true;
+ /*
+ * Enable PHY based charger detection in 2 cases:
+ * 1. PMI not capable of doing charger detection and provides VBUS
+ * notification with UNKNOWN psy type.
+ * 2. Data lines have been cut off from PMI, in which case it provides
+ * VBUS notification with FLOAT psy type and we want to do PHY based
+ * charger detection by setting 'chg_detection_for_float_charger'.
+ */
+ if (test_bit(B_SESS_VLD, &motg->inputs) && !motg->chg_detection) {
+ if ((get_psy_type(motg) == POWER_SUPPLY_TYPE_UNKNOWN) ||
+ (get_psy_type(motg) == POWER_SUPPLY_TYPE_USB_FLOAT &&
+ chg_detection_for_float_charger))
+ motg->chg_detection = true;
+ }
if (motg->chg_detection)
queue_delayed_work(motg->otg_wq, &motg->chg_work, 0);
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 584ae8c..77c3ebe 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -62,6 +62,7 @@
- Fundamental Software dongle.
- Google USB serial devices
- HP4x calculators
+ - Libtransistor USB console
- a number of Motorola phones
- Motorola Tetra devices
- Novatel Wireless GPS receivers
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index cab80ac..d985318 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -211,6 +211,7 @@
{ USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
{ USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
{ USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
+ { USB_DEVICE(0x3923, 0x7A0B) }, /* National Instruments USB Serial Console */
{ USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
{ } /* Terminating Entry */
};
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 71cbc68..2e2f7363 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1911,7 +1911,8 @@
return ftdi_jtag_probe(serial);
if (udev->product &&
- (!strcmp(udev->product, "BeagleBone/XDS100V2") ||
+ (!strcmp(udev->product, "Arrow USB Blaster") ||
+ !strcmp(udev->product, "BeagleBone/XDS100V2") ||
!strcmp(udev->product, "SNAP Connect E10")))
return ftdi_jtag_probe(serial);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 1799aa0..d982c45 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -236,6 +236,8 @@
/* These Quectel products use Qualcomm's vendor ID */
#define QUECTEL_PRODUCT_UC20 0x9003
#define QUECTEL_PRODUCT_UC15 0x9090
+/* These u-blox products use Qualcomm's vendor ID */
+#define UBLOX_PRODUCT_R410M 0x90b2
/* These Yuga products use Qualcomm's vendor ID */
#define YUGA_PRODUCT_CLM920_NC5 0x9625
@@ -244,6 +246,7 @@
#define QUECTEL_PRODUCT_EC21 0x0121
#define QUECTEL_PRODUCT_EC25 0x0125
#define QUECTEL_PRODUCT_BG96 0x0296
+#define QUECTEL_PRODUCT_EP06 0x0306
#define CMOTECH_VENDOR_ID 0x16d8
#define CMOTECH_PRODUCT_6001 0x6001
@@ -550,147 +553,15 @@
#define WETELECOM_PRODUCT_6802 0x6802
#define WETELECOM_PRODUCT_WMD300 0x6803
-struct option_blacklist_info {
- /* bitmask of interface numbers blacklisted for send_setup */
- const unsigned long sendsetup;
- /* bitmask of interface numbers that are reserved */
- const unsigned long reserved;
-};
-static const struct option_blacklist_info four_g_w14_blacklist = {
- .sendsetup = BIT(0) | BIT(1),
-};
+/* Device flags */
-static const struct option_blacklist_info four_g_w100_blacklist = {
- .sendsetup = BIT(1) | BIT(2),
- .reserved = BIT(3),
-};
+/* Interface does not support modem-control requests */
+#define NCTRL(ifnum) ((BIT(ifnum) & 0xff) << 8)
-static const struct option_blacklist_info alcatel_x200_blacklist = {
- .sendsetup = BIT(0) | BIT(1),
- .reserved = BIT(4),
-};
+/* Interface is reserved */
+#define RSVD(ifnum) ((BIT(ifnum) & 0xff) << 0)
-static const struct option_blacklist_info zte_0037_blacklist = {
- .sendsetup = BIT(0) | BIT(1),
-};
-
-static const struct option_blacklist_info zte_k3765_z_blacklist = {
- .sendsetup = BIT(0) | BIT(1) | BIT(2),
- .reserved = BIT(4),
-};
-
-static const struct option_blacklist_info zte_ad3812_z_blacklist = {
- .sendsetup = BIT(0) | BIT(1) | BIT(2),
-};
-
-static const struct option_blacklist_info zte_mc2718_z_blacklist = {
- .sendsetup = BIT(1) | BIT(2) | BIT(3) | BIT(4),
-};
-
-static const struct option_blacklist_info zte_mc2716_z_blacklist = {
- .sendsetup = BIT(1) | BIT(2) | BIT(3),
-};
-
-static const struct option_blacklist_info zte_me3620_mbim_blacklist = {
- .reserved = BIT(2) | BIT(3) | BIT(4),
-};
-
-static const struct option_blacklist_info zte_me3620_xl_blacklist = {
- .reserved = BIT(3) | BIT(4) | BIT(5),
-};
-
-static const struct option_blacklist_info zte_zm8620_x_blacklist = {
- .reserved = BIT(3) | BIT(4) | BIT(5),
-};
-
-static const struct option_blacklist_info huawei_cdc12_blacklist = {
- .reserved = BIT(1) | BIT(2),
-};
-
-static const struct option_blacklist_info net_intf0_blacklist = {
- .reserved = BIT(0),
-};
-
-static const struct option_blacklist_info net_intf1_blacklist = {
- .reserved = BIT(1),
-};
-
-static const struct option_blacklist_info net_intf2_blacklist = {
- .reserved = BIT(2),
-};
-
-static const struct option_blacklist_info net_intf3_blacklist = {
- .reserved = BIT(3),
-};
-
-static const struct option_blacklist_info net_intf4_blacklist = {
- .reserved = BIT(4),
-};
-
-static const struct option_blacklist_info net_intf5_blacklist = {
- .reserved = BIT(5),
-};
-
-static const struct option_blacklist_info net_intf6_blacklist = {
- .reserved = BIT(6),
-};
-
-static const struct option_blacklist_info zte_mf626_blacklist = {
- .sendsetup = BIT(0) | BIT(1),
- .reserved = BIT(4),
-};
-
-static const struct option_blacklist_info zte_1255_blacklist = {
- .reserved = BIT(3) | BIT(4),
-};
-
-static const struct option_blacklist_info simcom_sim7100e_blacklist = {
- .reserved = BIT(5) | BIT(6),
-};
-
-static const struct option_blacklist_info telit_me910_blacklist = {
- .sendsetup = BIT(0),
- .reserved = BIT(1) | BIT(3),
-};
-
-static const struct option_blacklist_info telit_me910_dual_modem_blacklist = {
- .sendsetup = BIT(0),
- .reserved = BIT(3),
-};
-
-static const struct option_blacklist_info telit_le910_blacklist = {
- .sendsetup = BIT(0),
- .reserved = BIT(1) | BIT(2),
-};
-
-static const struct option_blacklist_info telit_le920_blacklist = {
- .sendsetup = BIT(0),
- .reserved = BIT(1) | BIT(5),
-};
-
-static const struct option_blacklist_info telit_le920a4_blacklist_1 = {
- .sendsetup = BIT(0),
- .reserved = BIT(1),
-};
-
-static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = {
- .sendsetup = BIT(2),
- .reserved = BIT(0) | BIT(1) | BIT(3),
-};
-
-static const struct option_blacklist_info telit_le922_blacklist_usbcfg3 = {
- .sendsetup = BIT(0),
- .reserved = BIT(1) | BIT(2) | BIT(3),
-};
-
-static const struct option_blacklist_info cinterion_rmnet2_blacklist = {
- .reserved = BIT(4) | BIT(5),
-};
-
-static const struct option_blacklist_info yuga_clm920_nc5_blacklist = {
- .reserved = BIT(1) | BIT(4),
-};
static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
@@ -724,26 +595,26 @@
{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) },
{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) },
{ USB_DEVICE(QUANTA_VENDOR_ID, 0xea42),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c05, USB_CLASS_COMM, 0x02, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c1f, USB_CLASS_COMM, 0x02, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S6, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t) &net_intf2_blacklist },
+ .driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+ .driver_info = RSVD(1) | RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+ .driver_info = RSVD(1) | RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x14ac, 0xff, 0xff, 0xff), /* Huawei E1820 */
- .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+ .driver_info = RSVD(1) | RSVD(2) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0xff, 0xff) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x01) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x02) },
@@ -1188,65 +1059,70 @@
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
{ USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
/* Quectel products using Qualcomm vendor ID */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
/* Yuga products use Qualcomm vendor ID */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, YUGA_PRODUCT_CLM920_NC5),
- .driver_info = (kernel_ulong_t)&yuga_clm920_nc5_blacklist },
+ .driver_info = RSVD(1) | RSVD(4) },
+ /* u-blox products using Qualcomm vendor ID */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M),
+ .driver_info = RSVD(1) | RSVD(3) },
/* Quectel products using Quectel vendor ID */
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
+ { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06),
+ .driver_info = RSVD(4) | RSVD(5) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
- .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ .driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6004) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6005) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_628A) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHE_628S),
- .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ .driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_301),
- .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ .driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628),
- .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ .driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628S) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_680) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_685A) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720S),
- .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ .driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7002),
- .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ .driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629K),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7004),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7005) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_629),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629S),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720I),
- .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ .driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7212),
- .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ .driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7213),
- .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ .driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7251),
- .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7252),
- .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7253),
- .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) },
@@ -1254,38 +1130,38 @@
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
- .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+ .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1),
- .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG2),
- .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
- .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
- .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+ .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
- .driver_info = (kernel_ulong_t)&telit_me910_blacklist },
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
- .driver_info = (kernel_ulong_t)&telit_me910_dual_modem_blacklist },
+ .driver_info = NCTRL(0) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
- .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
- .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
- .driver_info = (kernel_ulong_t)&telit_le920_blacklist },
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(5) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1207) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1208),
- .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 },
+ .driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1211),
- .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1212),
- .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 },
+ .driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
- .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0003, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0004, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0005, 0xff, 0xff, 0xff) },
@@ -1301,58 +1177,58 @@
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0010, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0011, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0022, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0023, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff,
- 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_mf626_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, 0xff, 0xff),
+ .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0034, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&zte_0037_blacklist },
+ .driver_info = NCTRL(0) | NCTRL(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0038, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0039, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0040, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0043, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0044, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0048, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0050, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0056, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0064, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0065, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) },
@@ -1377,26 +1253,26 @@
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0096, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0097, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0105, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0106, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0108, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0122, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+ .driver_info = RSVD(6) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0135, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0136, 0xff, 0xff, 0xff) },
@@ -1412,50 +1288,50 @@
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0164, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0196, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0197, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0199, 0xff, 0xff, 0xff), /* ZTE MF820S */
- .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0200, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0201, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0254, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff), /* ONDA MT8205 */
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff), /* ZTE MF880 */
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0317, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0330, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0395, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0412, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G */
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1018, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1021, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ .driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) },
@@ -1572,23 +1448,23 @@
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&zte_1255_blacklist },
+ .driver_info = RSVD(3) | RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) },
@@ -1603,7 +1479,7 @@
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) },
@@ -1639,17 +1515,17 @@
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1303, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1333, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1401, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ .driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ .driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1424, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ .driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1425, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ .driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */
- .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ .driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */
- .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ .driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
@@ -1667,8 +1543,8 @@
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1596, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1598, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1600, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
- 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff),
+ .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */
@@ -1679,20 +1555,20 @@
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff42, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff43, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff44, 0xff, 0xff, 0xff) },
@@ -1844,19 +1720,19 @@
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&zte_mc2718_z_blacklist },
+ .driver_info = NCTRL(1) | NCTRL(2) | NCTRL(3) | NCTRL(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
+ .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
+ .driver_info = NCTRL(1) | NCTRL(2) | NCTRL(3) },
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_L),
- .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
+ .driver_info = RSVD(3) | RSVD(4) | RSVD(5) },
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_MBIM),
- .driver_info = (kernel_ulong_t)&zte_me3620_mbim_blacklist },
+ .driver_info = RSVD(2) | RSVD(3) | RSVD(4) },
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_X),
- .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
+ .driver_info = RSVD(3) | RSVD(4) | RSVD(5) },
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ZM8620_X),
- .driver_info = (kernel_ulong_t)&zte_zm8620_x_blacklist },
+ .driver_info = RSVD(3) | RSVD(4) | RSVD(5) },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
@@ -1876,37 +1752,34 @@
{ USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) },
{ USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
{ USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
- .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist },
+ .driver_info = RSVD(5) | RSVD(6) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
- .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
- },
+ .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
- .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+ .driver_info = RSVD(6) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, 0x0052),
- .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+ .driver_info = RSVD(6) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b6),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b7),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L800MA),
- .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ .driver_info = RSVD(2) },
{ USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
{ USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
- .driver_info = (kernel_ulong_t)&four_g_w14_blacklist
- },
+ .driver_info = NCTRL(0) | NCTRL(1) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100),
- .driver_info = (kernel_ulong_t)&four_g_w100_blacklist
- },
+ .driver_info = NCTRL(1) | NCTRL(2) | RSVD(3) },
{USB_DEVICE(LONGCHEER_VENDOR_ID, FUJISOFT_PRODUCT_FS040U),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist},
+ .driver_info = RSVD(3)},
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
@@ -1932,14 +1805,14 @@
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_2RMNET, 0xff),
- .driver_info = (kernel_ulong_t)&cinterion_rmnet2_blacklist },
+ .driver_info = RSVD(4) | RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_AUDIO, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_2RMNET, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
@@ -1949,20 +1822,20 @@
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD140),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD155),
- .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+ .driver_info = RSVD(6) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
- .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+ .driver_info = RSVD(6) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD160),
- .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+ .driver_info = RSVD(6) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
{ USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
@@ -2039,9 +1912,9 @@
{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) },
{ USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, TPLINK_PRODUCT_LTE, 0xff, 0x00, 0x00) }, /* TP-Link LTE Module */
{ USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(TPLINK_VENDOR_ID, 0x9000), /* TP-Link MA260 */
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x02, 0x01) }, /* D-Link DWM-156 (variant) */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x00, 0x00) }, /* D-Link DWM-156 (variant) */
@@ -2052,9 +1925,9 @@
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d0e, 0xff) }, /* D-Link DWM-157 C1 */
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
@@ -2114,7 +1987,7 @@
struct usb_interface_descriptor *iface_desc =
&serial->interface->cur_altsetting->desc;
struct usb_device_descriptor *dev_desc = &serial->dev->descriptor;
- const struct option_blacklist_info *blacklist;
+ unsigned long device_flags = id->driver_info;
/* Never bind to the CD-Rom emulation interface */
if (iface_desc->bInterfaceClass == 0x08)
@@ -2125,9 +1998,7 @@
* the same class/subclass/protocol as the serial interfaces. Look at
* the Windows driver .INF files for reserved interface numbers.
*/
- blacklist = (void *)id->driver_info;
- if (blacklist && test_bit(iface_desc->bInterfaceNumber,
- &blacklist->reserved))
+ if (device_flags & RSVD(iface_desc->bInterfaceNumber))
return -ENODEV;
/*
* Don't bind network interface on Samsung GT-B3730, it is handled by
@@ -2138,8 +2009,8 @@
iface_desc->bInterfaceClass != USB_CLASS_CDC_DATA)
return -ENODEV;
- /* Store the blacklist info so we can use it during attach. */
- usb_set_serial_data(serial, (void *)blacklist);
+ /* Store the device flags so we can use them during attach. */
+ usb_set_serial_data(serial, (void *)device_flags);
return 0;
}
@@ -2147,22 +2018,21 @@
static int option_attach(struct usb_serial *serial)
{
struct usb_interface_descriptor *iface_desc;
- const struct option_blacklist_info *blacklist;
struct usb_wwan_intf_private *data;
+ unsigned long device_flags;
data = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL);
if (!data)
return -ENOMEM;
- /* Retrieve blacklist info stored at probe. */
- blacklist = usb_get_serial_data(serial);
+ /* Retrieve device flags stored at probe. */
+ device_flags = (unsigned long)usb_get_serial_data(serial);
iface_desc = &serial->interface->cur_altsetting->desc;
- if (!blacklist || !test_bit(iface_desc->bInterfaceNumber,
- &blacklist->sendsetup)) {
+ if (!(device_flags & NCTRL(iface_desc->bInterfaceNumber)))
data->use_send_setup = 1;
- }
+
spin_lock_init(&data->susp_lock);
usb_set_serial_data(serial, data);
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 6aa7ff2..2674da4 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -66,6 +66,11 @@
0x01) }
DEVICE(google, GOOGLE_IDS);
+/* Libtransistor USB console */
+#define LIBTRANSISTOR_IDS() \
+ { USB_DEVICE(0x1209, 0x8b00) }
+DEVICE(libtransistor, LIBTRANSISTOR_IDS);
+
/* ViVOpay USB Serial Driver */
#define VIVOPAY_IDS() \
{ USB_DEVICE(0x1d5f, 0x1004) } /* ViVOpay 8800 */
@@ -113,6 +118,7 @@
&funsoft_device,
&flashloader_device,
&google_device,
+ &libtransistor_device,
&vivopay_device,
&moto_modem_device,
&motorola_tetra_device,
@@ -129,6 +135,7 @@
FUNSOFT_IDS(),
FLASHLOADER_IDS(),
GOOGLE_IDS(),
+ LIBTRANSISTOR_IDS(),
VIVOPAY_IDS(),
MOTO_IDS(),
MOTOROLA_TETRA_IDS(),
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 337a0be..dbc3801 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -338,47 +338,48 @@
goto exit;
}
- if (retval == sizeof(*connection_info)) {
- connection_info = (struct visor_connection_info *)
- transfer_buffer;
-
- num_ports = le16_to_cpu(connection_info->num_ports);
- for (i = 0; i < num_ports; ++i) {
- switch (
- connection_info->connections[i].port_function_id) {
- case VISOR_FUNCTION_GENERIC:
- string = "Generic";
- break;
- case VISOR_FUNCTION_DEBUGGER:
- string = "Debugger";
- break;
- case VISOR_FUNCTION_HOTSYNC:
- string = "HotSync";
- break;
- case VISOR_FUNCTION_CONSOLE:
- string = "Console";
- break;
- case VISOR_FUNCTION_REMOTE_FILE_SYS:
- string = "Remote File System";
- break;
- default:
- string = "unknown";
- break;
- }
- dev_info(dev, "%s: port %d, is for %s use\n",
- serial->type->description,
- connection_info->connections[i].port, string);
- }
+ if (retval != sizeof(*connection_info)) {
+ dev_err(dev, "Invalid connection information received from device\n");
+ retval = -ENODEV;
+ goto exit;
}
- /*
- * Handle devices that report invalid stuff here.
- */
+
+ connection_info = (struct visor_connection_info *)transfer_buffer;
+
+ num_ports = le16_to_cpu(connection_info->num_ports);
+
+ /* Handle devices that report invalid stuff here. */
if (num_ports == 0 || num_ports > 2) {
dev_warn(dev, "%s: No valid connect info available\n",
serial->type->description);
num_ports = 2;
}
+ for (i = 0; i < num_ports; ++i) {
+ switch (connection_info->connections[i].port_function_id) {
+ case VISOR_FUNCTION_GENERIC:
+ string = "Generic";
+ break;
+ case VISOR_FUNCTION_DEBUGGER:
+ string = "Debugger";
+ break;
+ case VISOR_FUNCTION_HOTSYNC:
+ string = "HotSync";
+ break;
+ case VISOR_FUNCTION_CONSOLE:
+ string = "Console";
+ break;
+ case VISOR_FUNCTION_REMOTE_FILE_SYS:
+ string = "Remote File System";
+ break;
+ default:
+ string = "unknown";
+ break;
+ }
+ dev_info(dev, "%s: port %d, is for %s use\n",
+ serial->type->description,
+ connection_info->connections[i].port, string);
+ }
dev_info(dev, "%s: Number of ports: %d\n", serial->type->description,
num_ports);
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
index 325b4c0..f761e02 100644
--- a/drivers/usb/usbip/stub_main.c
+++ b/drivers/usb/usbip/stub_main.c
@@ -201,7 +201,12 @@
if (!bid)
return -ENODEV;
+ /* device_attach() callers should hold parent lock for USB */
+ if (bid->udev->dev.parent)
+ device_lock(bid->udev->dev.parent);
ret = device_attach(&bid->udev->dev);
+ if (bid->udev->dev.parent)
+ device_unlock(bid->udev->dev.parent);
if (ret < 0) {
dev_err(&bid->udev->dev, "rebind failed\n");
return ret;
diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
index f0b955f..109e65b 100644
--- a/drivers/usb/usbip/usbip_common.h
+++ b/drivers/usb/usbip/usbip_common.h
@@ -258,7 +258,7 @@
#define VUDC_EVENT_ERROR_USB (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
#define VUDC_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
-#define VDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_BYE)
+#define VDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_RESET | USBIP_EH_BYE)
#define VDEV_EVENT_DOWN (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
#define VDEV_EVENT_ERROR_TCP (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
#define VDEV_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
diff --git a/drivers/usb/usbip/usbip_event.c b/drivers/usb/usbip/usbip_event.c
index f163566..f8f7f38 100644
--- a/drivers/usb/usbip/usbip_event.c
+++ b/drivers/usb/usbip/usbip_event.c
@@ -105,10 +105,6 @@
unset_event(ud, USBIP_EH_UNUSABLE);
}
- /* Stop the error handler. */
- if (ud->event & USBIP_EH_BYE)
- usbip_dbg_eh("removed %p\n", ud);
-
wake_up(&ud->eh_waitq);
}
}
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index d9cbda2..331ddd0 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -673,6 +673,9 @@
goto mknod_out;
}
+ if (!S_ISCHR(mode) && !S_ISBLK(mode))
+ goto mknod_out;
+
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
goto mknod_out;
@@ -681,10 +684,8 @@
buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
if (buf == NULL) {
- kfree(full_path);
rc = -ENOMEM;
- free_xid(xid);
- return rc;
+ goto mknod_out;
}
if (backup_cred(cifs_sb))
@@ -731,7 +732,7 @@
pdev->minor = cpu_to_le64(MINOR(device_number));
rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
&bytes_written, iov, 1);
- } /* else if (S_ISFIFO) */
+ }
tcon->ses->server->ops->close(xid, tcon, &fid);
d_drop(direntry);
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 176b4b2..6776f4a 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -320,6 +320,7 @@
struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_grpblk_t offset;
ext4_grpblk_t next_zero_bit;
+ ext4_grpblk_t max_bit = EXT4_CLUSTERS_PER_GROUP(sb);
ext4_fsblk_t blk;
ext4_fsblk_t group_first_block;
@@ -337,20 +338,25 @@
/* check whether block bitmap block number is set */
blk = ext4_block_bitmap(sb, desc);
offset = blk - group_first_block;
- if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
+ if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
+ !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
/* bad block bitmap */
return blk;
/* check whether the inode bitmap block number is set */
blk = ext4_inode_bitmap(sb, desc);
offset = blk - group_first_block;
- if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
+ if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
+ !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
/* bad block bitmap */
return blk;
/* check whether the inode table block number is set */
blk = ext4_inode_table(sb, desc);
offset = blk - group_first_block;
+ if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
+ EXT4_B2C(sbi, offset + sbi->s_itb_per_group) >= max_bit)
+ return blk;
next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
EXT4_B2C(sbi, offset + EXT4_SB(sb)->s_itb_per_group),
EXT4_B2C(sbi, offset));
@@ -416,6 +422,7 @@
ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
{
struct ext4_group_desc *desc;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
struct buffer_head *bh;
ext4_fsblk_t bitmap_blk;
int err;
@@ -424,6 +431,12 @@
if (!desc)
return ERR_PTR(-EFSCORRUPTED);
bitmap_blk = ext4_block_bitmap(sb, desc);
+ if ((bitmap_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
+ (bitmap_blk >= ext4_blocks_count(sbi->s_es))) {
+ ext4_error(sb, "Invalid block bitmap block %llu in "
+ "block_group %u", bitmap_blk, block_group);
+ return ERR_PTR(-EFSCORRUPTED);
+ }
bh = sb_getblk(sb, bitmap_blk);
if (unlikely(!bh)) {
ext4_error(sb, "Cannot get buffer for block bitmap - "
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index a8573fa..fb9ae76 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -5358,8 +5358,9 @@
stop = le32_to_cpu(extent->ee_block);
/*
- * In case of left shift, Don't start shifting extents until we make
- * sure the hole is big enough to accommodate the shift.
+ * For left shifts, make sure the hole on the left is big enough to
+ * accommodate the shift. For right shifts, make sure the last extent
+ * won't be shifted beyond EXT_MAX_BLOCKS.
*/
if (SHIFT == SHIFT_LEFT) {
path = ext4_find_extent(inode, start - 1, &path,
@@ -5379,9 +5380,14 @@
if ((start == ex_start && shift > ex_start) ||
(shift > start - ex_end)) {
- ext4_ext_drop_refs(path);
- kfree(path);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
+ }
+ } else {
+ if (shift > EXT_MAX_BLOCKS -
+ (stop + ext4_ext_get_actual_len(extent))) {
+ ret = -EINVAL;
+ goto out;
}
}
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index ef76b83..1ee26da 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -119,6 +119,7 @@
ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
{
struct ext4_group_desc *desc;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
struct buffer_head *bh = NULL;
ext4_fsblk_t bitmap_blk;
int err;
@@ -128,6 +129,12 @@
return ERR_PTR(-EFSCORRUPTED);
bitmap_blk = ext4_inode_bitmap(sb, desc);
+ if ((bitmap_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
+ (bitmap_blk >= ext4_blocks_count(sbi->s_es))) {
+ ext4_error(sb, "Invalid inode bitmap blk %llu in "
+ "block_group %u", bitmap_blk, block_group);
+ return ERR_PTR(-EFSCORRUPTED);
+ }
bh = sb_getblk(sb, bitmap_blk);
if (unlikely(!bh)) {
ext4_error(sb, "Cannot read inode bitmap - "
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 66b069b..9efe77e 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -1938,7 +1938,13 @@
redirty_out:
redirty_page_for_writepage(wbc, page);
- if (!err)
+ /*
+ * pageout() in MM traslates EAGAIN, so calls handle_write_error()
+ * -> mapping_set_error() -> set_bit(AS_EIO, ...).
+ * file_write_and_wait_range() will see EIO error, which is critical
+ * to return value of fsync() followed by atomic_write failure to user.
+ */
+ if (!err || wbc->for_reclaim)
return AOP_WRITEPAGE_ACTIVATE;
unlock_page(page);
return err;
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 604d3fc..66044fa 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -693,6 +693,7 @@
dec_page_count(fio.sbi, F2FS_DIRTY_META);
set_page_writeback(fio.encrypted_page);
+ ClearPageError(page);
/* allocate block address */
f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 26832e7..156ac4f 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -157,6 +157,7 @@
/* write data page to try to make data consistent */
set_page_writeback(page);
+ ClearPageError(page);
fio.old_blkaddr = dn->data_blkaddr;
set_inode_flag(dn->inode, FI_HOT_DATA);
write_data_page(dn, &fio);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index ccf410a..803a010 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1398,6 +1398,7 @@
fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
set_page_writeback(page);
+ ClearPageError(page);
fio.old_blkaddr = ni.blk_addr;
write_node_page(nid, &fio);
set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index fc4ee38..bdf567a 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -2758,6 +2758,7 @@
fio.op_flags &= ~REQ_META;
set_page_writeback(page);
+ ClearPageError(page);
f2fs_submit_page_write(&fio);
f2fs_update_iostat(sbi, io_type, F2FS_BLKSIZE);
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 5af226f..17ad41d 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -1942,7 +1942,7 @@
}
if (!list_empty(&wb->work_list))
- mod_delayed_work(bdi_wq, &wb->dwork, 0);
+ wb_wakeup(wb);
else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
wb_wakeup_delayed(wb);
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 4e5c610..9e9e093 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -528,6 +528,7 @@
*/
ret = start_this_handle(journal, handle, GFP_NOFS);
if (ret < 0) {
+ handle->h_journal = journal;
jbd2_journal_free_reserved(handle);
return ret;
}
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 4d51259..d484c63 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -274,6 +274,8 @@
if (ln->nlmsvc_users) {
if (--ln->nlmsvc_users == 0) {
nlm_shutdown_hosts_net(net);
+ cancel_delayed_work_sync(&ln->grace_period_end);
+ locks_end_grace(&ln->lockd_manager);
svc_shutdown_net(serv, net);
dprintk("lockd_down_net: per-net data destroyed; net=%p\n", net);
}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index abe157a5..5b2d1ea 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -253,7 +253,7 @@
* Inherently racy -- command line shares address space
* with code and data.
*/
- rv = access_remote_vm(mm, arg_end - 1, &c, 1, 0);
+ rv = access_remote_vm(mm, arg_end - 1, &c, 1, FOLL_ANON);
if (rv <= 0)
goto out_free_page;
@@ -271,7 +271,7 @@
int nr_read;
_count = min3(count, len, PAGE_SIZE);
- nr_read = access_remote_vm(mm, p, page, _count, 0);
+ nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON);
if (nr_read < 0)
rv = nr_read;
if (nr_read <= 0)
@@ -306,7 +306,7 @@
bool final;
_count = min3(count, len, PAGE_SIZE);
- nr_read = access_remote_vm(mm, p, page, _count, 0);
+ nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON);
if (nr_read < 0)
rv = nr_read;
if (nr_read <= 0)
@@ -355,7 +355,7 @@
bool final;
_count = min3(count, len, PAGE_SIZE);
- nr_read = access_remote_vm(mm, p, page, _count, 0);
+ nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON);
if (nr_read < 0)
rv = nr_read;
if (nr_read <= 0)
@@ -971,7 +971,7 @@
max_len = min_t(size_t, PAGE_SIZE, count);
this_len = min(max_len, this_len);
- retval = access_remote_vm(mm, (env_start + src), page, this_len, 0);
+ retval = access_remote_vm(mm, (env_start + src), page, this_len, FOLL_ANON);
if (retval <= 0) {
ret = retval;
diff --git a/fs/sdcardfs/dentry.c b/fs/sdcardfs/dentry.c
index e9426a6..776d549 100644
--- a/fs/sdcardfs/dentry.c
+++ b/fs/sdcardfs/dentry.c
@@ -51,7 +51,6 @@
* whether the base obbpath has been changed or not
*/
if (is_obbpath_invalid(dentry)) {
- d_drop(dentry);
return 0;
}
@@ -65,7 +64,6 @@
if ((lower_dentry->d_flags & DCACHE_OP_REVALIDATE)) {
err = lower_dentry->d_op->d_revalidate(lower_dentry, flags);
if (err == 0) {
- d_drop(dentry);
goto out;
}
}
@@ -73,14 +71,12 @@
spin_lock(&lower_dentry->d_lock);
if (d_unhashed(lower_dentry)) {
spin_unlock(&lower_dentry->d_lock);
- d_drop(dentry);
err = 0;
goto out;
}
spin_unlock(&lower_dentry->d_lock);
if (parent_lower_dentry != lower_cur_parent_dentry) {
- d_drop(dentry);
err = 0;
goto out;
}
@@ -94,7 +90,6 @@
}
if (!qstr_case_eq(&dentry->d_name, &lower_dentry->d_name)) {
- __d_drop(dentry);
err = 0;
}
@@ -113,7 +108,6 @@
if (inode) {
data = top_data_get(SDCARDFS_I(inode));
if (!data || data->abandoned) {
- d_drop(dentry);
err = 0;
}
if (data)
@@ -131,6 +125,8 @@
static void sdcardfs_d_release(struct dentry *dentry)
{
+ if (!dentry || !dentry->d_fsdata)
+ return;
/* release and reset the lower paths */
if (has_graft_path(dentry))
sdcardfs_put_reset_orig_path(dentry);
diff --git a/fs/sdcardfs/lookup.c b/fs/sdcardfs/lookup.c
index 843fcd2..98051996 100644
--- a/fs/sdcardfs/lookup.c
+++ b/fs/sdcardfs/lookup.c
@@ -41,8 +41,6 @@
void free_dentry_private_data(struct dentry *dentry)
{
- if (!dentry || !dentry->d_fsdata)
- return;
kmem_cache_free(sdcardfs_dentry_cachep, dentry->d_fsdata);
dentry->d_fsdata = NULL;
}
diff --git a/fs/sdcardfs/main.c b/fs/sdcardfs/main.c
index e4fd3fb..30e0c43 100644
--- a/fs/sdcardfs/main.c
+++ b/fs/sdcardfs/main.c
@@ -316,7 +316,7 @@
sb->s_root = d_make_root(inode);
if (!sb->s_root) {
err = -ENOMEM;
- goto out_iput;
+ goto out_sput;
}
d_set_d_op(sb->s_root, &sdcardfs_ci_dops);
@@ -361,8 +361,7 @@
/* no longer needed: free_dentry_private_data(sb->s_root); */
out_freeroot:
dput(sb->s_root);
-out_iput:
- iput(inode);
+ sb->s_root = NULL;
out_sput:
/* drop refs we took earlier */
atomic_dec(&lower_sb->s_active);
@@ -422,7 +421,7 @@
{
struct sdcardfs_sb_info *sbi;
- if (sb->s_magic == SDCARDFS_SUPER_MAGIC) {
+ if (sb->s_magic == SDCARDFS_SUPER_MAGIC && sb->s_fs_info) {
sbi = SDCARDFS_SB(sb);
mutex_lock(&sdcardfs_super_list_lock);
list_del(&sbi->list);
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 362c6b4..1410835 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -846,22 +846,26 @@
if (error)
goto out_unlock;
} else if (mode & FALLOC_FL_INSERT_RANGE) {
- unsigned int blksize_mask = i_blocksize(inode) - 1;
+ unsigned int blksize_mask = i_blocksize(inode) - 1;
+ loff_t isize = i_size_read(inode);
- new_size = i_size_read(inode) + len;
if (offset & blksize_mask || len & blksize_mask) {
error = -EINVAL;
goto out_unlock;
}
- /* check the new inode size does not wrap through zero */
- if (new_size > inode->i_sb->s_maxbytes) {
+ /*
+ * New inode size must not exceed ->s_maxbytes, accounting for
+ * possible signed overflow.
+ */
+ if (inode->i_sb->s_maxbytes - isize < len) {
error = -EFBIG;
goto out_unlock;
}
+ new_size = isize + len;
/* Offset should be less than i_size */
- if (offset >= i_size_read(inode)) {
+ if (offset >= isize) {
error = -EINVAL;
goto out_unlock;
}
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index bf2d34c..f0d8b1c 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -13,7 +13,7 @@
*/
/**
- * futex_atomic_op_inuser() - Atomic arithmetic operation with constant
+ * arch_futex_atomic_op_inuser() - Atomic arithmetic operation with constant
* argument and comparison of the previous
* futex value with another constant.
*
@@ -25,18 +25,11 @@
* <0 - On error
*/
static inline int
-futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval, ret;
u32 tmp;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
preempt_disable();
pagefault_disable();
@@ -74,17 +67,9 @@
pagefault_enable();
preempt_enable();
- if (ret == 0) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (ret == 0)
+ *oval = oldval;
+
return ret;
}
@@ -126,18 +111,9 @@
#else
static inline int
-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
pagefault_disable();
@@ -153,17 +129,9 @@
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 8236dbd..3c3519b 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -172,7 +172,7 @@
#endif
#ifdef CONFIG_SERIAL_EARLYCON
-#define EARLYCON_TABLE() STRUCT_ALIGN(); \
+#define EARLYCON_TABLE() . = ALIGN(8); \
VMLINUX_SYMBOL(__earlycon_table) = .; \
*(__earlycon_table) \
VMLINUX_SYMBOL(__earlycon_table_end) = .;
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h
index c8696df..6da90d0 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,205 +14,213 @@
#ifndef _DT_BINDINGS_CLK_MSM_GCC_SDM845_H
#define _DT_BINDINGS_CLK_MSM_GCC_SDM845_H
+/* Dummy clocks for rate measurement */
+#define MEASURE_ONLY_SNOC_CLK 0
+#define MEASURE_ONLY_CNOC_CLK 1
+#define MEASURE_ONLY_BIMC_CLK 2
+#define MEASURE_ONLY_IPA_2X_CLK 3
+#define UFS_PHY_AXI_EMMC_VOTE_CLK 4
+#define UFS_PHY_AXI_UFS_VOTE_CLK 5
+
/* GCC clock registers */
-#define GCC_AGGRE_NOC_PCIE_TBU_CLK 0
-#define GCC_AGGRE_UFS_CARD_AXI_CLK 1
-#define GCC_AGGRE_UFS_PHY_AXI_CLK 2
-#define GCC_AGGRE_USB3_PRIM_AXI_CLK 3
-#define GCC_AGGRE_USB3_SEC_AXI_CLK 4
-#define GCC_BOOT_ROM_AHB_CLK 5
-#define GCC_CAMERA_AHB_CLK 6
-#define GCC_CAMERA_AXI_CLK 7
-#define GCC_CAMERA_XO_CLK 8
-#define GCC_CE1_AHB_CLK 9
-#define GCC_CE1_AXI_CLK 10
-#define GCC_CE1_CLK 11
-#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 12
-#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 13
-#define GCC_CPUSS_AHB_CLK 14
-#define GCC_CPUSS_AHB_CLK_SRC 15
-#define GCC_CPUSS_DVM_BUS_CLK 16
-#define GCC_CPUSS_GNOC_CLK 17
-#define GCC_CPUSS_RBCPR_CLK 18
-#define GCC_CPUSS_RBCPR_CLK_SRC 19
-#define GCC_DDRSS_GPU_AXI_CLK 20
-#define GCC_DISP_AHB_CLK 21
-#define GCC_DISP_AXI_CLK 22
-#define GCC_DISP_GPLL0_CLK_SRC 23
-#define GCC_DISP_GPLL0_DIV_CLK_SRC 24
-#define GCC_DISP_XO_CLK 25
-#define GCC_GP1_CLK 26
-#define GCC_GP1_CLK_SRC 27
-#define GCC_GP2_CLK 28
-#define GCC_GP2_CLK_SRC 29
-#define GCC_GP3_CLK 30
-#define GCC_GP3_CLK_SRC 31
-#define GCC_GPU_CFG_AHB_CLK 32
-#define GCC_GPU_GPLL0_CLK_SRC 33
-#define GCC_GPU_GPLL0_DIV_CLK_SRC 34
-#define GCC_GPU_MEMNOC_GFX_CLK 35
-#define GCC_GPU_SNOC_DVM_GFX_CLK 36
-#define GCC_MSS_AXIS2_CLK 37
-#define GCC_MSS_CFG_AHB_CLK 38
-#define GCC_MSS_GPLL0_DIV_CLK_SRC 39
-#define GCC_MSS_MFAB_AXIS_CLK 40
-#define GCC_MSS_Q6_MEMNOC_AXI_CLK 41
-#define GCC_MSS_SNOC_AXI_CLK 42
-#define GCC_PCIE_0_AUX_CLK 43
-#define GCC_PCIE_0_AUX_CLK_SRC 44
-#define GCC_PCIE_0_CFG_AHB_CLK 45
-#define GCC_PCIE_0_CLKREF_CLK 46
-#define GCC_PCIE_0_MSTR_AXI_CLK 47
-#define GCC_PCIE_0_PIPE_CLK 48
-#define GCC_PCIE_0_SLV_AXI_CLK 49
-#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 50
-#define GCC_PCIE_1_AUX_CLK 51
-#define GCC_PCIE_1_AUX_CLK_SRC 52
-#define GCC_PCIE_1_CFG_AHB_CLK 53
-#define GCC_PCIE_1_CLKREF_CLK 54
-#define GCC_PCIE_1_MSTR_AXI_CLK 55
-#define GCC_PCIE_1_PIPE_CLK 56
-#define GCC_PCIE_1_SLV_AXI_CLK 57
-#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 58
-#define GCC_PCIE_PHY_AUX_CLK 59
-#define GCC_PCIE_PHY_REFGEN_CLK 60
-#define GCC_PCIE_PHY_REFGEN_CLK_SRC 61
-#define GCC_PDM2_CLK 62
-#define GCC_PDM2_CLK_SRC 63
-#define GCC_PDM_AHB_CLK 64
-#define GCC_PDM_XO4_CLK 65
-#define GCC_PRNG_AHB_CLK 66
-#define GCC_QMIP_CAMERA_AHB_CLK 67
-#define GCC_QMIP_DISP_AHB_CLK 68
-#define GCC_QMIP_VIDEO_AHB_CLK 69
-#define GCC_QUPV3_WRAP0_S0_CLK 70
-#define GCC_QUPV3_WRAP0_S0_CLK_SRC 71
-#define GCC_QUPV3_WRAP0_S1_CLK 72
-#define GCC_QUPV3_WRAP0_S1_CLK_SRC 73
-#define GCC_QUPV3_WRAP0_S2_CLK 74
-#define GCC_QUPV3_WRAP0_S2_CLK_SRC 75
-#define GCC_QUPV3_WRAP0_S3_CLK 76
-#define GCC_QUPV3_WRAP0_S3_CLK_SRC 77
-#define GCC_QUPV3_WRAP0_S4_CLK 78
-#define GCC_QUPV3_WRAP0_S4_CLK_SRC 79
-#define GCC_QUPV3_WRAP0_S5_CLK 80
-#define GCC_QUPV3_WRAP0_S5_CLK_SRC 81
-#define GCC_QUPV3_WRAP0_S6_CLK 82
-#define GCC_QUPV3_WRAP0_S6_CLK_SRC 83
-#define GCC_QUPV3_WRAP0_S7_CLK 84
-#define GCC_QUPV3_WRAP0_S7_CLK_SRC 85
-#define GCC_QUPV3_WRAP1_S0_CLK 86
-#define GCC_QUPV3_WRAP1_S0_CLK_SRC 87
-#define GCC_QUPV3_WRAP1_S1_CLK 88
-#define GCC_QUPV3_WRAP1_S1_CLK_SRC 89
-#define GCC_QUPV3_WRAP1_S2_CLK 90
-#define GCC_QUPV3_WRAP1_S2_CLK_SRC 91
-#define GCC_QUPV3_WRAP1_S3_CLK 92
-#define GCC_QUPV3_WRAP1_S3_CLK_SRC 93
-#define GCC_QUPV3_WRAP1_S4_CLK 94
-#define GCC_QUPV3_WRAP1_S4_CLK_SRC 95
-#define GCC_QUPV3_WRAP1_S5_CLK 96
-#define GCC_QUPV3_WRAP1_S5_CLK_SRC 97
-#define GCC_QUPV3_WRAP1_S6_CLK 98
-#define GCC_QUPV3_WRAP1_S6_CLK_SRC 99
-#define GCC_QUPV3_WRAP1_S7_CLK 100
-#define GCC_QUPV3_WRAP1_S7_CLK_SRC 101
-#define GCC_QUPV3_WRAP_0_M_AHB_CLK 102
-#define GCC_QUPV3_WRAP_0_S_AHB_CLK 103
-#define GCC_QUPV3_WRAP_1_M_AHB_CLK 104
-#define GCC_QUPV3_WRAP_1_S_AHB_CLK 105
-#define GCC_SDCC2_AHB_CLK 106
-#define GCC_SDCC2_APPS_CLK 107
-#define GCC_SDCC2_APPS_CLK_SRC 108
-#define GCC_SDCC4_AHB_CLK 109
-#define GCC_SDCC4_APPS_CLK 110
-#define GCC_SDCC4_APPS_CLK_SRC 111
-#define GCC_SYS_NOC_CPUSS_AHB_CLK 112
-#define GCC_TSIF_AHB_CLK 113
-#define GCC_TSIF_INACTIVITY_TIMERS_CLK 114
-#define GCC_TSIF_REF_CLK 115
-#define GCC_TSIF_REF_CLK_SRC 116
-#define GCC_UFS_CARD_AHB_CLK 117
-#define GCC_UFS_CARD_AXI_CLK 118
-#define GCC_UFS_CARD_AXI_CLK_SRC 119
-#define GCC_UFS_CARD_CLKREF_CLK 120
-#define GCC_UFS_CARD_ICE_CORE_CLK 121
-#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 122
-#define GCC_UFS_CARD_PHY_AUX_CLK 123
-#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 124
-#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 125
-#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 126
-#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 127
-#define GCC_UFS_CARD_UNIPRO_CORE_CLK 128
-#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 129
-#define GCC_UFS_MEM_CLKREF_CLK 130
-#define GCC_UFS_PHY_AHB_CLK 131
-#define GCC_UFS_PHY_AXI_CLK 132
-#define GCC_UFS_PHY_AXI_CLK_SRC 133
-#define GCC_UFS_PHY_ICE_CORE_CLK 134
-#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 135
-#define GCC_UFS_PHY_PHY_AUX_CLK 136
-#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 137
-#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 138
-#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 139
-#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 140
-#define GCC_UFS_PHY_UNIPRO_CORE_CLK 141
-#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 142
-#define GCC_USB30_PRIM_MASTER_CLK 143
-#define GCC_USB30_PRIM_MASTER_CLK_SRC 144
-#define GCC_USB30_PRIM_MOCK_UTMI_CLK 145
-#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 146
-#define GCC_USB30_PRIM_SLEEP_CLK 147
-#define GCC_USB30_SEC_MASTER_CLK 148
-#define GCC_USB30_SEC_MASTER_CLK_SRC 149
-#define GCC_USB30_SEC_MOCK_UTMI_CLK 150
-#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 151
-#define GCC_USB30_SEC_SLEEP_CLK 152
-#define GCC_USB3_PRIM_CLKREF_CLK 153
-#define GCC_USB3_PRIM_PHY_AUX_CLK 154
-#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 155
-#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 156
-#define GCC_USB3_PRIM_PHY_PIPE_CLK 157
-#define GCC_USB3_SEC_CLKREF_CLK 158
-#define GCC_USB3_SEC_PHY_AUX_CLK 159
-#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 160
-#define GCC_USB3_SEC_PHY_COM_AUX_CLK 161
-#define GCC_USB3_SEC_PHY_PIPE_CLK 162
-#define GCC_USB_PHY_CFG_AHB2PHY_CLK 163
-#define GCC_VIDEO_AHB_CLK 164
-#define GCC_VIDEO_AXI_CLK 165
-#define GCC_VIDEO_XO_CLK 166
-#define GPLL0 167
-#define GPLL0_OUT_EVEN 168
-#define GPLL0_OUT_MAIN 169
-#define GCC_UFS_CARD_AXI_HW_CTL_CLK 170
-#define GCC_UFS_PHY_AXI_HW_CTL_CLK 171
-#define GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK 172
-#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 173
-#define GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK 174
-#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 175
-#define GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK 176
-#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 177
-#define GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK 178
-#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 179
-#define GCC_GPU_IREF_CLK 180
-#define GCC_SDCC1_AHB_CLK 181
-#define GCC_SDCC1_APPS_CLK 182
-#define GCC_SDCC1_ICE_CORE_CLK 183
-#define GCC_SDCC1_APPS_CLK_SRC 184
-#define GCC_SDCC1_ICE_CORE_CLK_SRC 185
-#define GCC_APC_VS_CLK 186
-#define GCC_GPU_VS_CLK 187
-#define GCC_MSS_VS_CLK 188
-#define GCC_VDDA_VS_CLK 189
-#define GCC_VDDCX_VS_CLK 190
-#define GCC_VDDMX_VS_CLK 191
-#define GCC_VS_CTRL_AHB_CLK 192
-#define GCC_VS_CTRL_CLK 193
-#define GCC_VS_CTRL_CLK_SRC 194
-#define GCC_VSENSOR_CLK_SRC 195
-#define GPLL4 196
-#define GPLL6 197
+#define GCC_AGGRE_NOC_PCIE_TBU_CLK 6
+#define GCC_AGGRE_UFS_CARD_AXI_CLK 7
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 8
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 9
+#define GCC_AGGRE_USB3_SEC_AXI_CLK 10
+#define GCC_BOOT_ROM_AHB_CLK 11
+#define GCC_CAMERA_AHB_CLK 12
+#define GCC_CAMERA_AXI_CLK 13
+#define GCC_CAMERA_XO_CLK 14
+#define GCC_CE1_AHB_CLK 15
+#define GCC_CE1_AXI_CLK 16
+#define GCC_CE1_CLK 17
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 18
+#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 19
+#define GCC_CPUSS_AHB_CLK 20
+#define GCC_CPUSS_AHB_CLK_SRC 21
+#define GCC_CPUSS_DVM_BUS_CLK 22
+#define GCC_CPUSS_GNOC_CLK 23
+#define GCC_CPUSS_RBCPR_CLK 24
+#define GCC_CPUSS_RBCPR_CLK_SRC 25
+#define GCC_DDRSS_GPU_AXI_CLK 26
+#define GCC_DISP_AHB_CLK 27
+#define GCC_DISP_AXI_CLK 28
+#define GCC_DISP_GPLL0_CLK_SRC 29
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 30
+#define GCC_DISP_XO_CLK 31
+#define GCC_GP1_CLK 32
+#define GCC_GP1_CLK_SRC 33
+#define GCC_GP2_CLK 34
+#define GCC_GP2_CLK_SRC 35
+#define GCC_GP3_CLK 36
+#define GCC_GP3_CLK_SRC 37
+#define GCC_GPU_CFG_AHB_CLK 38
+#define GCC_GPU_GPLL0_CLK_SRC 39
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 40
+#define GCC_GPU_MEMNOC_GFX_CLK 41
+#define GCC_GPU_SNOC_DVM_GFX_CLK 42
+#define GCC_MSS_AXIS2_CLK 43
+#define GCC_MSS_CFG_AHB_CLK 44
+#define GCC_MSS_GPLL0_DIV_CLK_SRC 45
+#define GCC_MSS_MFAB_AXIS_CLK 46
+#define GCC_MSS_Q6_MEMNOC_AXI_CLK 47
+#define GCC_MSS_SNOC_AXI_CLK 48
+#define GCC_PCIE_0_AUX_CLK 49
+#define GCC_PCIE_0_AUX_CLK_SRC 50
+#define GCC_PCIE_0_CFG_AHB_CLK 51
+#define GCC_PCIE_0_CLKREF_CLK 52
+#define GCC_PCIE_0_MSTR_AXI_CLK 53
+#define GCC_PCIE_0_PIPE_CLK 54
+#define GCC_PCIE_0_SLV_AXI_CLK 55
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 56
+#define GCC_PCIE_1_AUX_CLK 57
+#define GCC_PCIE_1_AUX_CLK_SRC 58
+#define GCC_PCIE_1_CFG_AHB_CLK 59
+#define GCC_PCIE_1_CLKREF_CLK 60
+#define GCC_PCIE_1_MSTR_AXI_CLK 61
+#define GCC_PCIE_1_PIPE_CLK 62
+#define GCC_PCIE_1_SLV_AXI_CLK 63
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 64
+#define GCC_PCIE_PHY_AUX_CLK 65
+#define GCC_PCIE_PHY_REFGEN_CLK 66
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC 67
+#define GCC_PDM2_CLK 68
+#define GCC_PDM2_CLK_SRC 69
+#define GCC_PDM_AHB_CLK 70
+#define GCC_PDM_XO4_CLK 71
+#define GCC_PRNG_AHB_CLK 72
+#define GCC_QMIP_CAMERA_AHB_CLK 73
+#define GCC_QMIP_DISP_AHB_CLK 74
+#define GCC_QMIP_VIDEO_AHB_CLK 75
+#define GCC_QUPV3_WRAP0_S0_CLK 76
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 77
+#define GCC_QUPV3_WRAP0_S1_CLK 78
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 79
+#define GCC_QUPV3_WRAP0_S2_CLK 80
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 81
+#define GCC_QUPV3_WRAP0_S3_CLK 82
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 83
+#define GCC_QUPV3_WRAP0_S4_CLK 84
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 85
+#define GCC_QUPV3_WRAP0_S5_CLK 86
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 87
+#define GCC_QUPV3_WRAP0_S6_CLK 88
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 89
+#define GCC_QUPV3_WRAP0_S7_CLK 90
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 91
+#define GCC_QUPV3_WRAP1_S0_CLK 92
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 93
+#define GCC_QUPV3_WRAP1_S1_CLK 94
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 95
+#define GCC_QUPV3_WRAP1_S2_CLK 96
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 97
+#define GCC_QUPV3_WRAP1_S3_CLK 98
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 99
+#define GCC_QUPV3_WRAP1_S4_CLK 100
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 101
+#define GCC_QUPV3_WRAP1_S5_CLK 102
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 103
+#define GCC_QUPV3_WRAP1_S6_CLK 104
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 105
+#define GCC_QUPV3_WRAP1_S7_CLK 106
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 107
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 108
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 109
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 110
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 111
+#define GCC_SDCC2_AHB_CLK 112
+#define GCC_SDCC2_APPS_CLK 113
+#define GCC_SDCC2_APPS_CLK_SRC 114
+#define GCC_SDCC4_AHB_CLK 115
+#define GCC_SDCC4_APPS_CLK 116
+#define GCC_SDCC4_APPS_CLK_SRC 117
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 118
+#define GCC_TSIF_AHB_CLK 119
+#define GCC_TSIF_INACTIVITY_TIMERS_CLK 120
+#define GCC_TSIF_REF_CLK 121
+#define GCC_TSIF_REF_CLK_SRC 122
+#define GCC_UFS_CARD_AHB_CLK 123
+#define GCC_UFS_CARD_AXI_CLK 124
+#define GCC_UFS_CARD_AXI_CLK_SRC 125
+#define GCC_UFS_CARD_CLKREF_CLK 126
+#define GCC_UFS_CARD_ICE_CORE_CLK 127
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 128
+#define GCC_UFS_CARD_PHY_AUX_CLK 129
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 130
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 131
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 132
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 133
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK 134
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 135
+#define GCC_UFS_MEM_CLKREF_CLK 136
+#define GCC_UFS_PHY_AHB_CLK 137
+#define GCC_UFS_PHY_AXI_CLK 138
+#define GCC_UFS_PHY_AXI_CLK_SRC 139
+#define GCC_UFS_PHY_ICE_CORE_CLK 140
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 141
+#define GCC_UFS_PHY_PHY_AUX_CLK 142
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 143
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 144
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 145
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 146
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 147
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 148
+#define GCC_USB30_PRIM_MASTER_CLK 149
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 150
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 151
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 152
+#define GCC_USB30_PRIM_SLEEP_CLK 153
+#define GCC_USB30_SEC_MASTER_CLK 154
+#define GCC_USB30_SEC_MASTER_CLK_SRC 155
+#define GCC_USB30_SEC_MOCK_UTMI_CLK 156
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 157
+#define GCC_USB30_SEC_SLEEP_CLK 158
+#define GCC_USB3_PRIM_CLKREF_CLK 159
+#define GCC_USB3_PRIM_PHY_AUX_CLK 160
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 161
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 162
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 163
+#define GCC_USB3_SEC_CLKREF_CLK 164
+#define GCC_USB3_SEC_PHY_AUX_CLK 165
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 166
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK 167
+#define GCC_USB3_SEC_PHY_PIPE_CLK 168
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 169
+#define GCC_VIDEO_AHB_CLK 170
+#define GCC_VIDEO_AXI_CLK 171
+#define GCC_VIDEO_XO_CLK 172
+#define GPLL0 173
+#define GPLL0_OUT_EVEN 174
+#define GPLL0_OUT_MAIN 175
+#define GCC_UFS_CARD_AXI_HW_CTL_CLK 176
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 177
+#define GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK 178
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 179
+#define GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK 180
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 181
+#define GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK 182
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 183
+#define GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK 184
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 185
+#define GCC_GPU_IREF_CLK 186
+#define GCC_SDCC1_AHB_CLK 187
+#define GCC_SDCC1_APPS_CLK 188
+#define GCC_SDCC1_ICE_CORE_CLK 189
+#define GCC_SDCC1_APPS_CLK_SRC 190
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 191
+#define GCC_APC_VS_CLK 192
+#define GCC_GPU_VS_CLK 193
+#define GCC_MSS_VS_CLK 194
+#define GCC_VDDA_VS_CLK 195
+#define GCC_VDDCX_VS_CLK 196
+#define GCC_VDDMX_VS_CLK 197
+#define GCC_VS_CTRL_AHB_CLK 198
+#define GCC_VS_CTRL_CLK 199
+#define GCC_VS_CTRL_CLK_SRC 200
+#define GCC_VSENSOR_CLK_SRC 201
+#define GPLL4 202
+#define GPLL6 203
/* GCC reset clocks */
#define GCC_MMSS_BCR 0
@@ -243,10 +251,4 @@
#define GCC_PCIE_1_PHY_BCR 25
#define GCC_SDCC1_BCR 26
-/* Dummy clocks for rate measurement */
-#define MEASURE_ONLY_SNOC_CLK 0
-#define MEASURE_ONLY_CNOC_CLK 1
-#define MEASURE_ONLY_BIMC_CLK 2
-#define MEASURE_ONLY_IPA_2X_CLK 3
-
#endif
diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h
index e518e4e..4b15481 100644
--- a/include/kvm/arm_psci.h
+++ b/include/kvm/arm_psci.h
@@ -37,10 +37,15 @@
* Our PSCI implementation stays the same across versions from
* v0.2 onward, only adding the few mandatory functions (such
* as FEATURES with 1.0) that are required by newer
- * revisions. It is thus safe to return the latest.
+ * revisions. It is thus safe to return the latest, unless
+ * userspace has instructed us otherwise.
*/
- if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
+ if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) {
+ if (vcpu->kvm->arch.psci_version)
+ return vcpu->kvm->arch.psci_version;
+
return KVM_ARM_PSCI_LATEST;
+ }
return KVM_ARM_PSCI_0_1;
}
@@ -48,4 +53,11 @@
int kvm_hvc_call_handler(struct kvm_vcpu *vcpu);
+struct kvm_one_reg;
+
+int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu);
+int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
+int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+
#endif /* __KVM_ARM_PSCI_H__ */
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 3a4f264..a8b4284 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -117,7 +117,7 @@
#define CLOCK_SOURCE_RESELECT 0x100
/* simplify initialization of mask field */
-#define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
+#define CLOCKSOURCE_MASK(bits) GENMASK_ULL((bits) - 1, 0)
static inline u32 clocksource_freq2mult(u32 freq, u32 shift_constant, u64 from)
{
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index d9912e0..62f4fa8 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -146,7 +146,7 @@
* a new RANGE of SSIDs to the msg_mask_tbl.
*/
#define MSG_MASK_TBL_CNT 26
-#define APPS_EVENT_LAST_ID 0x0C5B
+#define APPS_EVENT_LAST_ID 0xC7A
#define MSG_SSID_0 0
#define MSG_SSID_0_LAST 125
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index f4c0d36..ab7938a 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -244,8 +244,16 @@
return *this_cpu_ptr(ops->disabled);
}
+#ifdef CONFIG_CFI_CLANG
+/* Use a C stub with the correct type for CFI */
+static inline void ftrace_stub(unsigned long a0, unsigned long a1,
+ struct ftrace_ops *op, struct pt_regs *regs)
+{
+}
+#else
extern void ftrace_stub(unsigned long a0, unsigned long a1,
struct ftrace_ops *op, struct pt_regs *regs);
+#endif
#else /* !CONFIG_FUNCTION_TRACER */
/*
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 8feecd5..7e39719 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -600,7 +600,7 @@
* Returns true if the skb is tagged with multiple vlan headers, regardless
* of whether it is hardware accelerated or not.
*/
-static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
+static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
{
__be16 protocol = skb->protocol;
@@ -610,6 +610,9 @@
if (likely(!eth_type_vlan(protocol)))
return false;
+ if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
+ return false;
+
veh = (struct vlan_ethhdr *)skb->data;
protocol = veh->h_vlan_encapsulated_proto;
}
@@ -627,7 +630,7 @@
*
* Returns features without unsafe ones if the skb has multiple tags.
*/
-static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
+static inline netdev_features_t vlan_features_check(struct sk_buff *skb,
netdev_features_t features)
{
if (skb_vlan_tagged_multi(skb)) {
diff --git a/include/linux/input/synaptics_dsx_v2_6.h b/include/linux/input/synaptics_dsx_v2_6.h
index 5cd26bb..75d4b77 100644
--- a/include/linux/input/synaptics_dsx_v2_6.h
+++ b/include/linux/input/synaptics_dsx_v2_6.h
@@ -59,6 +59,7 @@
* @y_flip: y flip flag
* @swap_axes: swap axes flag
* @resume_in_workqueue: defer resume function to workqueue
+ * @resume_in_workqueue: do not disable/enable regulators in suspend/resume
* @irq_gpio: attention interrupt GPIO
* @irq_on_state: attention interrupt active state
* @power_gpio: power switch GPIO
@@ -75,6 +76,7 @@
* @power_delay_ms: delay time to wait after powering up device
* @reset_delay_ms: delay time to wait after resetting device
* @reset_active_ms: reset active time
+ * @bus_lpm_cur_uA: low power mode current setting for bus
* @byte_delay_us: delay time between two bytes of SPI data
* @block_delay_us: delay time between two SPI transfers
* @pwr_reg_name: pointer to name of regulator for power control
@@ -88,12 +90,14 @@
bool swap_axes;
bool resume_in_workqueue;
bool wakeup_gesture_en;
+ bool dont_disable_regs;
int irq_gpio;
int irq_on_state;
int power_gpio;
int power_on_state;
int reset_gpio;
int reset_on_state;
+ int bus_lpm_cur_uA;
int max_y_for_2d;
unsigned long irq_flags;
unsigned short i2c_addr;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 270f032..b328cca 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2246,6 +2246,7 @@
#define FOLL_MLOCK 0x1000 /* lock present pages */
#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */
#define FOLL_COW 0x4000 /* internal GUP flag */
+#define FOLL_ANON 0x8000 /* don't do file mappings */
typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
void *data);
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 41d376e..e030a68 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -50,6 +50,13 @@
list_add(&page->lru, &lruvec->lists[lru]);
}
+static __always_inline void add_page_to_lru_list_tail(struct page *page,
+ struct lruvec *lruvec, enum lru_list lru)
+{
+ update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
+ list_add_tail(&page->lru, &lruvec->lists[lru]);
+}
+
static __always_inline void del_page_from_lru_list(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 0c28c28..815d0f4 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -245,8 +245,6 @@
#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
-/* Isolate clean file */
-#define ISOLATE_CLEAN ((__force isolate_mode_t)0x1)
/* Isolate unmapped file */
#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
/* Isolate for asynchronous migration */
diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
index b63fa45..3529683 100644
--- a/include/linux/mtd/flashchip.h
+++ b/include/linux/mtd/flashchip.h
@@ -85,6 +85,7 @@
unsigned int write_suspended:1;
unsigned int erase_suspended:1;
unsigned long in_progress_block_addr;
+ unsigned long in_progress_block_mask;
struct mutex mutex;
wait_queue_head_t wq; /* Wait on here when we're waiting for the chip
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 1a94397..42e3f06 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -348,10 +348,10 @@
char name[16];
char compatible[128];
int (*setup)(struct earlycon_device *, const char *options);
-} __aligned(32);
+};
-extern const struct earlycon_id __earlycon_table[];
-extern const struct earlycon_id __earlycon_table_end[];
+extern const struct earlycon_id *__earlycon_table[];
+extern const struct earlycon_id *__earlycon_table_end[];
#if defined(CONFIG_SERIAL_EARLYCON) && !defined(MODULE)
#define EARLYCON_USED_OR_UNUSED __used
@@ -359,12 +359,19 @@
#define EARLYCON_USED_OR_UNUSED __maybe_unused
#endif
-#define OF_EARLYCON_DECLARE(_name, compat, fn) \
- static const struct earlycon_id __UNIQUE_ID(__earlycon_##_name) \
- EARLYCON_USED_OR_UNUSED __section(__earlycon_table) \
+#define _OF_EARLYCON_DECLARE(_name, compat, fn, unique_id) \
+ static const struct earlycon_id unique_id \
+ EARLYCON_USED_OR_UNUSED __initconst \
= { .name = __stringify(_name), \
.compatible = compat, \
- .setup = fn }
+ .setup = fn }; \
+ static const struct earlycon_id EARLYCON_USED_OR_UNUSED \
+ __section(__earlycon_table) \
+ * const __PASTE(__p, unique_id) = &unique_id
+
+#define OF_EARLYCON_DECLARE(_name, compat, fn) \
+ _OF_EARLYCON_DECLARE(_name, compat, fn, \
+ __UNIQUE_ID(__earlycon_##_name))
#define EARLYCON_DECLARE(_name, fn) OF_EARLYCON_DECLARE(_name, "", fn)
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 00a1f33..9c452f6d 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -518,7 +518,7 @@
}
static inline struct kernfs_node *sysfs_get_dirent(struct kernfs_node *parent,
- const unsigned char *name)
+ const char *name)
{
return kernfs_find_and_get(parent, name);
}
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 6f1ee85..fe1b862 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -657,7 +657,7 @@
extern int tty_set_ldisc(struct tty_struct *tty, int disc);
extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty);
extern void tty_ldisc_release(struct tty_struct *tty);
-extern void tty_ldisc_init(struct tty_struct *tty);
+extern int __must_check tty_ldisc_init(struct tty_struct *tty);
extern void tty_ldisc_deinit(struct tty_struct *tty);
extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p,
char *f, int count);
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index d5eb547..3f8f3505 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -143,6 +143,9 @@
int virtio_device_restore(struct virtio_device *dev);
#endif
+#define virtio_device_for_each_vq(vdev, vq) \
+ list_for_each_entry(vq, &vdev->vqs, list)
+
/**
* virtio_driver - operations for a virtio I/O driver
* @driver: underlying device driver (populate name and owner).
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 9a8eb83..3eed4f1 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -43,7 +43,7 @@
*/
enum wb_reason {
WB_REASON_BACKGROUND,
- WB_REASON_TRY_TO_FREE_PAGES,
+ WB_REASON_VMSCAN,
WB_REASON_SYNC,
WB_REASON_PERIODIC,
WB_REASON_LAPTOP_TIMER,
diff --git a/include/media/msmb_isp.h b/include/media/msmb_isp.h
index 95679cb..176dd2e 100644
--- a/include/media/msmb_isp.h
+++ b/include/media/msmb_isp.h
@@ -28,6 +28,19 @@
struct msm_isp_sof_info sof_info;
} u;
};
+
+struct msm_isp32_event_data32 {
+ struct compat_timeval timestamp;
+ struct compat_timeval mono_timestamp;
+ enum msm_vfe_input_src input_intf;
+ uint32_t frame_id;
+ union {
+ struct msm_isp_stats_event stats;
+ struct msm_isp_buf_event buf_done;
+ struct msm_isp32_error_info error_info;
+ } u;
+};
+
#endif
#ifdef CONFIG_MSM_AVTIMER
struct avtimer_fptr_t {
diff --git a/include/net/bonding.h b/include/net/bonding.h
index f32f7ef..7734cc9 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -197,6 +197,7 @@
struct slave __rcu *primary_slave;
struct bond_up_slave __rcu *slave_arr; /* Array of usable slaves */
bool force_primary;
+ u32 nest_level;
s32 slave_cnt; /* never change this value outside the attach/detach wrappers */
int (*recv_probe)(const struct sk_buff *, struct bonding *,
struct slave *);
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index c9b3eb7..567017b 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -55,6 +55,7 @@
#define tw_family __tw_common.skc_family
#define tw_state __tw_common.skc_state
#define tw_reuse __tw_common.skc_reuse
+#define tw_reuseport __tw_common.skc_reuseport
#define tw_ipv6only __tw_common.skc_ipv6only
#define tw_bound_dev_if __tw_common.skc_bound_dev_if
#define tw_node __tw_common.skc_nulls_node
diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
index fe994d2..ea985aa 100644
--- a/include/net/llc_conn.h
+++ b/include/net/llc_conn.h
@@ -97,6 +97,7 @@
struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority,
struct proto *prot, int kern);
+void llc_sk_stop_all_timers(struct sock *sk, bool sync);
void llc_sk_free(struct sock *sk);
void llc_sk_reset(struct sock *sk);
diff --git a/include/net/nexthop.h b/include/net/nexthop.h
index 3334dbf..7fc7866 100644
--- a/include/net/nexthop.h
+++ b/include/net/nexthop.h
@@ -6,7 +6,7 @@
static inline int rtnh_ok(const struct rtnexthop *rtnh, int remaining)
{
- return remaining >= sizeof(*rtnh) &&
+ return remaining >= (int)sizeof(*rtnh) &&
rtnh->rtnh_len >= sizeof(*rtnh) &&
rtnh->rtnh_len <= remaining;
}
diff --git a/include/soc/qcom/clock-pll.h b/include/soc/qcom/clock-pll.h
index 1865e3c..dd7e186 100644
--- a/include/soc/qcom/clock-pll.h
+++ b/include/soc/qcom/clock-pll.h
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2012-2015, 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2015, 2017-2018, The Linux Foundation.
+ * All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -174,6 +175,7 @@
extern const struct clk_ops clk_ops_local_pll;
extern const struct clk_ops clk_ops_sr2_pll;
+extern const struct clk_ops clk_ops_acpu_pll;
extern const struct clk_ops clk_ops_variable_rate_pll;
extern const struct clk_ops clk_ops_variable_rate_pll_hwfsm;
diff --git a/include/sound/control.h b/include/sound/control.h
index 21d047f..4142757 100644
--- a/include/sound/control.h
+++ b/include/sound/control.h
@@ -22,6 +22,7 @@
*
*/
+#include <linux/nospec.h>
#include <sound/asound.h>
#define snd_kcontrol_chip(kcontrol) ((kcontrol)->private_data)
@@ -147,12 +148,14 @@
static inline unsigned int snd_ctl_get_ioffnum(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
{
- return id->numid - kctl->id.numid;
+ unsigned int ioff = id->numid - kctl->id.numid;
+ return array_index_nospec(ioff, kctl->count);
}
static inline unsigned int snd_ctl_get_ioffidx(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
{
- return id->index - kctl->id.index;
+ unsigned int ioff = id->index - kctl->id.index;
+ return array_index_nospec(ioff, kctl->count);
}
static inline unsigned int snd_ctl_get_ioff(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 2ccd9cc..7bd8783 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -31,7 +31,7 @@
#define WB_WORK_REASON \
EM( WB_REASON_BACKGROUND, "background") \
- EM( WB_REASON_TRY_TO_FREE_PAGES, "try_to_free_pages") \
+ EM( WB_REASON_VMSCAN, "vmscan") \
EM( WB_REASON_SYNC, "sync") \
EM( WB_REASON_PERIODIC, "periodic") \
EM( WB_REASON_LAPTOP_TIMER, "laptop_timer") \
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index d0341cd..4b26cba 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -180,8 +180,12 @@
*/
struct drm_msm_gem_submit_reloc {
__u32 submit_offset; /* in, offset from submit_bo */
- __u32 or; /* in, value OR'd with result */
- __s32 shift; /* in, amount of left shift (can be negative) */
+#ifdef __cplusplus
+ __u32 or_val;
+#else
+ __u32 or; /* in, value OR'd with result */
+#endif
+ __s32 shift; /* in, amount of left shift (can be negative) */
__u32 reloc_idx; /* in, index of reloc_bo buffer */
__u64 reloc_offset; /* in, offset from start of reloc_bo */
};
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index dfcf371..fb1ec56 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -226,7 +226,6 @@
#define BLKSECDISCARD _IO(0x12,125)
#define BLKROTATIONAL _IO(0x12,126)
#define BLKZEROOUT _IO(0x12,127)
-#define BLKGETSTPART _IO(0x12, 128)
#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
#define FIBMAP _IO(0x00,1) /* bmap access */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 4ee67cb..05b9bb6 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -870,6 +870,7 @@
#define KVM_CAP_S390_USER_INSTR0 130
#define KVM_CAP_MSI_DEVID 131
#define KVM_CAP_PPC_HTM 132
+#define KVM_CAP_S390_BPB 152
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index 1c39daf..be4cb02 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -98,6 +98,8 @@
#define IPA_IOCTL_CLEANUP 56
#define IPA_IOCTL_QUERY_WLAN_CLIENT 57
#define IPA_IOCTL_GET_VLAN_MODE 58
+#define IPA_IOCTL_ADD_BRIDGE_VLAN_MAPPING 59
+#define IPA_IOCTL_DEL_BRIDGE_VLAN_MAPPING 60
/**
* max size of the header to be inserted
@@ -512,7 +514,13 @@
IPA_PER_CLIENT_STATS_EVENT_MAX
};
-#define IPA_EVENT_MAX_NUM (IPA_PER_CLIENT_STATS_EVENT_MAX)
+enum ipa_vlan_bridge_event {
+ ADD_BRIDGE_VLAN_MAPPING = IPA_PER_CLIENT_STATS_EVENT_MAX,
+ DEL_BRIDGE_VLAN_MAPPING,
+ BRIDGE_VLAN_MAPPING_MAX
+};
+
+#define IPA_EVENT_MAX_NUM (BRIDGE_VLAN_MAPPING_MAX)
#define IPA_EVENT_MAX ((int)IPA_EVENT_MAX_NUM)
/**
@@ -1872,6 +1880,20 @@
};
/**
+ * struct ipa_ioc_bridge_vlan_mapping_info - vlan to bridge mapping info
+ * @bridge_name: bridge interface name
+ * @vlan_id: vlan ID bridge is mapped to
+ * @bridge_ipv4: bridge interface ipv4 address
+ * @subnet_mask: bridge interface subnet mask
+ */
+struct ipa_ioc_bridge_vlan_mapping_info {
+ char bridge_name[IPA_RESOURCE_NAME_MAX];
+ uint16_t vlan_id;
+ uint32_t bridge_ipv4;
+ uint32_t subnet_mask;
+};
+
+/**
* actual IOCTLs supported by IPA driver
*/
#define IPA_IOC_ADD_HDR _IOWR(IPA_IOC_MAGIC, \
@@ -2056,6 +2078,15 @@
#define IPA_IOC_GET_VLAN_MODE _IOWR(IPA_IOC_MAGIC, \
IPA_IOCTL_GET_VLAN_MODE, \
struct ipa_ioc_get_vlan_mode *)
+
+#define IPA_IOC_ADD_BRIDGE_VLAN_MAPPING _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ADD_BRIDGE_VLAN_MAPPING, \
+ struct ipa_ioc_bridge_vlan_mapping_info)
+
+#define IPA_IOC_DEL_BRIDGE_VLAN_MAPPING _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_BRIDGE_VLAN_MAPPING, \
+ struct ipa_ioc_bridge_vlan_mapping_info)
+
/*
* unique magic number of the Tethering bridge ioctls
*/
diff --git a/include/uapi/linux/qg.h b/include/uapi/linux/qg.h
index 2c7b49a..d338db9 100644
--- a/include/uapi/linux/qg.h
+++ b/include/uapi/linux/qg.h
@@ -14,10 +14,10 @@
QG_FIFO_TIME_DELTA,
QG_BATT_SOC,
QG_CC_SOC,
- QG_RESERVED_3,
- QG_RESERVED_4,
- QG_RESERVED_5,
- QG_RESERVED_6,
+ QG_ESR_CHARGE_DELTA,
+ QG_ESR_DISCHARGE_DELTA,
+ QG_ESR_CHARGE_SF,
+ QG_ESR_DISCHARGE_SF,
QG_RESERVED_7,
QG_RESERVED_8,
QG_RESERVED_9,
@@ -27,6 +27,10 @@
#define QG_BATT_SOC QG_BATT_SOC
#define QG_CC_SOC QG_CC_SOC
+#define QG_ESR_CHARGE_DELTA QG_ESR_CHARGE_DELTA
+#define QG_ESR_DISCHARGE_DELTA QG_ESR_DISCHARGE_DELTA
+#define QG_ESR_CHARGE_SF QG_ESR_CHARGE_SF
+#define QG_ESR_DISCHARGE_SF QG_ESR_DISCHARGE_SF
struct fifo_data {
unsigned int v;
diff --git a/include/uapi/linux/qseecom.h b/include/uapi/linux/qseecom.h
index f0a26b2..6de4c76 100644
--- a/include/uapi/linux/qseecom.h
+++ b/include/uapi/linux/qseecom.h
@@ -281,13 +281,6 @@
int flag;
};
-struct qseecom_encdec_conf_t {
- __le64 start_sector;
- size_t fs_size;
- int index;
- int mode;
-};
-
#define SG_ENTRY_SZ sizeof(struct qseecom_sg_entry)
#define SG_ENTRY_SZ_64BIT sizeof(struct qseecom_sg_entry_64bit)
@@ -399,7 +392,4 @@
#define QSEECOM_IOCTL_SET_ICE_INFO \
_IOWR(QSEECOM_IOC_MAGIC, 43, struct qseecom_ice_data_t)
-#define QSEECOM_IOCTL_SET_ENCDEC_INFO \
- _IOWR(QSEECOM_IOC_MAGIC, 44, struct qseecom_encdec_conf_t)
-
#endif /* _UAPI_QSEECOM_H_ */
diff --git a/include/uapi/media/msmb_isp.h b/include/uapi/media/msmb_isp.h
index 053fa76..74a8d93 100644
--- a/include/uapi/media/msmb_isp.h
+++ b/include/uapi/media/msmb_isp.h
@@ -18,6 +18,7 @@
#define ISP1_BIT (0x10000 << 2)
#define ISP_META_CHANNEL_BIT (0x10000 << 3)
#define ISP_SCRATCH_BUF_BIT (0x10000 << 4)
+#define ISP_PDAF_CHANNEL_BIT (0x10000 << 5)
#define ISP_OFFLINE_STATS_BIT (0x10000 << 5)
#define ISP_SVHDR_IN_BIT (0x10000 << 6) /* RDI hw stream for SVHDR */
#define ISP_SVHDR_OUT_BIT (0x10000 << 7) /* SVHDR output bufq stream*/
@@ -295,6 +296,11 @@
uint8_t rdi_cid;/*CID 1-16*/
};
+enum msm_stream_memory_input_t {
+ MEMORY_INPUT_DISABLED,
+ MEMORY_INPUT_ENABLED
+};
+
enum msm_stream_rdi_input_type {
MSM_CAMERA_RDI_MIN,
MSM_CAMERA_RDI_PDAF,
@@ -324,6 +330,29 @@
enum msm_stream_rdi_input_type rdi_input_type;
};
+struct msm_vfe32_axi_stream_request_cmd {
+ uint32_t session_id;
+ uint32_t stream_id;
+ uint32_t vt_enable;
+ uint32_t output_format;/*Planar/RAW/Misc*/
+ enum msm_vfe_axi_stream_src stream_src; /*CAMIF/IDEAL/RDIs*/
+ struct msm_vfe_axi_plane_cfg plane_cfg[MAX_PLANES_PER_STREAM];
+
+ uint32_t burst_count;
+ uint32_t hfr_mode;
+ uint8_t frame_base;
+
+ uint32_t init_frame_drop; /*MAX 31 Frames*/
+ enum msm_vfe_frame_skip_pattern frame_skip_pattern;
+ uint8_t buf_divert; /* if TRUE no vb2 buf done. */
+ /*Return values*/
+ uint32_t axi_stream_handle;
+ uint32_t controllable_output;
+ uint32_t burst_len;
+ /* Flag indicating memory input stream */
+ enum msm_stream_memory_input_t memory_input;
+};
+
struct msm_vfe_axi_stream_release_cmd {
uint32_t stream_handle;
};
@@ -680,7 +709,9 @@
ISP_PING_PONG_MISMATCH = 12,
ISP_REG_UPDATE_MISSING = 13,
ISP_BUF_FATAL_ERROR = 14,
- ISP_EVENT_MAX = 15
+ ISP_EVENT_MAX = 15,
+ ISP_WM_BUS_OVERFLOW = 16,
+ ISP_CAMIF_ERROR = 17,
};
#define ISP_EVENT_OFFSET 8
@@ -710,6 +741,7 @@
#define ISP_EVENT_REG_UPDATE_MISSING (ISP_EVENT_BASE + ISP_REG_UPDATE_MISSING)
#define ISP_EVENT_BUF_FATAL_ERROR (ISP_EVENT_BASE + ISP_BUF_FATAL_ERROR)
#define ISP_EVENT_STREAM_UPDATE_DONE (ISP_STREAM_EVENT_BASE)
+#define ISP_EVENT_WM_BUS_OVERFLOW (ISP_EVENT_BASE + ISP_WM_BUS_OVERFLOW)
/* The msm_v4l2_event_data structure should match the
* v4l2_event.u.data field.
@@ -759,6 +791,11 @@
uint32_t stream_id_mask;
};
+struct msm_isp32_error_info {
+ /* 1 << msm_isp_event_idx */
+ uint32_t error_mask;
+};
+
/* This structure reports delta between master and slave */
struct msm_isp_ms_delta_info {
uint8_t num_delta_info;
@@ -827,6 +864,25 @@
} u; /* union can have max 52 bytes */
};
+struct msm_isp32_event_data {
+ /*Wall clock except for buffer divert events
+ *which use monotonic clock
+ */
+ struct timeval timestamp;
+ /* Monotonic timestamp since bootup */
+ struct timeval mono_timestamp;
+ enum msm_vfe_input_src input_intf;
+ uint32_t frame_id;
+ union {
+ /* Sent for Stats_Done event */
+ struct msm_isp_stats_event stats;
+ /* Sent for Buf_Divert event */
+ struct msm_isp_buf_event buf_done;
+ struct msm_isp32_error_info error_info;
+ } u; /* union can have max 52 bytes */
+ uint32_t is_skip_pproc;
+};
+
enum msm_vfe_ahb_clk_vote {
MSM_ISP_CAMERA_AHB_SVS_VOTE = 1,
MSM_ISP_CAMERA_AHB_TURBO_VOTE = 2,
@@ -919,6 +975,7 @@
MSM_ISP_MAP_BUF_START_MULTI_PASS_FE,
MSM_ISP_REQUEST_BUF_VER2,
MSM_ISP_DUAL_HW_LPM_MODE,
+ MSM_ISP32_REQUEST_STREAM,
};
#define VIDIOC_MSM_VFE_REG_CFG \
@@ -941,6 +998,10 @@
_IOWR('V', MSM_ISP_REQUEST_STREAM, \
struct msm_vfe_axi_stream_request_cmd)
+#define VIDIOC_MSM_ISP32_REQUEST_STREAM \
+ _IOWR('V', MSM_ISP32_REQUEST_STREAM, \
+ struct msm_vfe32_axi_stream_request_cmd)
+
#define VIDIOC_MSM_ISP_CFG_STREAM \
_IOWR('V', MSM_ISP_CFG_STREAM, \
struct msm_vfe_axi_stream_cfg_cmd)
@@ -1038,6 +1099,8 @@
#define VIDIOC_MSM_ISP_REQUEST_BUF_VER2 \
_IOWR('V', MSM_ISP_REQUEST_BUF_VER2, struct msm_isp_buf_request_ver2)
+#define VIDIOC_MSM_ISP_BUF_DONE \
+ _IOWR('V', BASE_VIDIOC_PRIVATE+21, struct msm_isp32_event_data)
#define VIDIOC_MSM_ISP_DUAL_HW_LPM_MODE \
_IOWR('V', MSM_ISP_DUAL_HW_LPM_MODE, \
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 4db6a67..b30ca0f 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -194,7 +194,7 @@
static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
{
struct bpf_array *array = container_of(map, struct bpf_array, map);
- u32 index = *(u32 *)key;
+ u32 index = key ? *(u32 *)key : U32_MAX;
u32 *next = (u32 *)next_key;
if (index >= array->map.max_entries) {
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 27f4f2c..9c86d5d 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -328,12 +328,15 @@
struct hlist_head *head;
struct htab_elem *l, *next_l;
u32 hash, key_size;
- int i;
+ int i = 0;
WARN_ON_ONCE(!rcu_read_lock_held());
key_size = map->key_size;
+ if (!key)
+ goto find_first_elem;
+
hash = htab_map_hash(key, key_size);
head = select_bucket(htab, hash);
@@ -341,10 +344,8 @@
/* lookup the key */
l = lookup_elem_raw(head, hash, key, key_size);
- if (!l) {
- i = 0;
+ if (!l)
goto find_first_elem;
- }
/* key was found, get next key in the same bucket */
next_l = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&l->hash_node)),
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 41aa664..85ea598 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -579,14 +579,18 @@
goto err_put;
}
- err = -ENOMEM;
- key = kmalloc(map->key_size, GFP_USER);
- if (!key)
- goto err_put;
+ if (ukey) {
+ err = -ENOMEM;
+ key = kmalloc(map->key_size, GFP_USER);
+ if (!key)
+ goto err_put;
- err = -EFAULT;
- if (copy_from_user(key, ukey, map->key_size) != 0)
- goto free_key;
+ err = -EFAULT;
+ if (copy_from_user(key, ukey, map->key_size) != 0)
+ goto free_key;
+ } else {
+ key = NULL;
+ }
err = -ENOMEM;
next_key = kmalloc(map->key_size, GFP_USER);
diff --git a/kernel/cfi.c b/kernel/cfi.c
index 87053e2..6951c25 100644
--- a/kernel/cfi.c
+++ b/kernel/cfi.c
@@ -23,12 +23,12 @@
#define cfi_slowpath_handler __cfi_slowpath
#endif /* CONFIG_CFI_PERMISSIVE */
-static inline void handle_cfi_failure()
+static inline void handle_cfi_failure(void *ptr)
{
#ifdef CONFIG_CFI_PERMISSIVE
- WARN_RATELIMIT(1, "CFI failure:\n");
+ WARN_RATELIMIT(1, "CFI failure (target: [<%px>] %pF):\n", ptr, ptr);
#else
- pr_err("CFI failure:\n");
+ pr_err("CFI failure (target: [<%px>] %pF):\n", ptr, ptr);
BUG();
#endif
}
@@ -282,18 +282,18 @@
if (likely(check))
check(id, ptr, diag);
else /* Don't allow unchecked modules */
- handle_cfi_failure();
+ handle_cfi_failure(ptr);
}
EXPORT_SYMBOL(cfi_slowpath_handler);
#endif /* CONFIG_MODULES */
-void cfi_failure_handler(void *data, void *value, void *vtable)
+void cfi_failure_handler(void *data, void *ptr, void *vtable)
{
- handle_cfi_failure();
+ handle_cfi_failure(ptr);
}
EXPORT_SYMBOL(cfi_failure_handler);
-void __cfi_check_fail(void *data, void *value)
+void __cfi_check_fail(void *data, void *ptr)
{
- handle_cfi_failure();
+ handle_cfi_failure(ptr);
}
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index 411226b..c265f1c 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -117,23 +117,20 @@
goto exit;
}
- if (count > 1) {
- /* If the allocation failed, give up */
- if (!callchain_cpus_entries)
- err = -ENOMEM;
- /*
- * If requesting per event more than the global cap,
- * return a different error to help userspace figure
- * this out.
- *
- * And also do it here so that we have &callchain_mutex held.
- */
- if (event_max_stack > sysctl_perf_event_max_stack)
- err = -EOVERFLOW;
+ /*
+ * If requesting per event more than the global cap,
+ * return a different error to help userspace figure
+ * this out.
+ *
+ * And also do it here so that we have &callchain_mutex held.
+ */
+ if (event_max_stack > sysctl_perf_event_max_stack) {
+ err = -EOVERFLOW;
goto exit;
}
- err = alloc_callchain_buffers();
+ if (count == 1)
+ err = alloc_callchain_buffers();
exit:
if (err)
atomic_dec(&nr_callchain_events);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 2ea4eb1..349bc92 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -486,7 +486,7 @@
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
- int ret = proc_dointvec(table, write, buffer, lenp, ppos);
+ int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (ret || !write)
return ret;
@@ -9708,9 +9708,9 @@
* __u16 sample size limit.
*/
if (attr->sample_stack_user >= USHRT_MAX)
- ret = -EINVAL;
+ return -EINVAL;
else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
- ret = -EINVAL;
+ return -EINVAL;
}
if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 257fa46..017f793 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/circ_buf.h>
#include <linux/poll.h>
+#include <linux/nospec.h>
#include "internal.h"
@@ -844,8 +845,10 @@
return NULL;
/* AUX space */
- if (pgoff >= rb->aux_pgoff)
- return virt_to_page(rb->aux_pages[pgoff - rb->aux_pgoff]);
+ if (pgoff >= rb->aux_pgoff) {
+ int aux_pgoff = array_index_nospec(pgoff - rb->aux_pgoff, rb->aux_nr_pages);
+ return virt_to_page(rb->aux_pages[aux_pgoff]);
+ }
}
return __perf_mmap_to_page(rb, pgoff);
diff --git a/kernel/exit.c b/kernel/exit.c
index 4b4f03a..2c18194 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1680,6 +1680,10 @@
__WNOTHREAD|__WCLONE|__WALL))
return -EINVAL;
+ /* -INT_MIN is not defined */
+ if (upid == INT_MIN)
+ return -ESRCH;
+
if (upid == -1)
type = PIDTYPE_MAX;
else if (upid < 0) {
diff --git a/kernel/futex.c b/kernel/futex.c
index bb2265a..c3ea6f2 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1458,6 +1458,45 @@
return ret;
}
+static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
+{
+ unsigned int op = (encoded_op & 0x70000000) >> 28;
+ unsigned int cmp = (encoded_op & 0x0f000000) >> 24;
+ int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 11);
+ int cmparg = sign_extend32(encoded_op & 0x00000fff, 11);
+ int oldval, ret;
+
+ if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) {
+ if (oparg < 0 || oparg > 31)
+ return -EINVAL;
+ oparg = 1 << oparg;
+ }
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ ret = arch_futex_atomic_op_inuser(op, oparg, &oldval, uaddr);
+ if (ret)
+ return ret;
+
+ switch (cmp) {
+ case FUTEX_OP_CMP_EQ:
+ return oldval == cmparg;
+ case FUTEX_OP_CMP_NE:
+ return oldval != cmparg;
+ case FUTEX_OP_CMP_LT:
+ return oldval < cmparg;
+ case FUTEX_OP_CMP_GE:
+ return oldval >= cmparg;
+ case FUTEX_OP_CMP_LE:
+ return oldval <= cmparg;
+ case FUTEX_OP_CMP_GT:
+ return oldval > cmparg;
+ default:
+ return -ENOSYS;
+ }
+}
+
/*
* Wake up all waiters hashed on the physical page that is mapped
* to this virtual address:
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index fff4170..5ad731a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5710,6 +5710,9 @@
for_each_cpu(i, sched_group_cpus(sg))
state = min(state, idle_get_state_idx(cpu_rq(i)));
+ if (unlikely(state == INT_MAX))
+ return -EINVAL;
+
/* Take non-cpuidle idling into account (active idle/arch_cpu_idle()) */
state++;
@@ -5776,7 +5779,7 @@
* The required scaling will be performed just one time, by the calling
* functions, once we accumulated the contributons for all the SGs.
*/
-static void calc_sg_energy(struct energy_env *eenv)
+static int calc_sg_energy(struct energy_env *eenv)
{
struct sched_group *sg = eenv->sg;
int busy_energy, idle_energy;
@@ -5805,6 +5808,11 @@
/* Compute IDLE energy */
idle_idx = group_idle_state(eenv, cpu_idx);
+ if (unlikely(idle_idx < 0))
+ return idle_idx;
+ if (idle_idx > sg->sge->nr_idle_states - 1)
+ idle_idx = sg->sge->nr_idle_states - 1;
+
idle_power = sg->sge->idle_states[idle_idx].power;
idle_energy = SCHED_CAPACITY_SCALE - sg_util;
@@ -5813,6 +5821,7 @@
total_energy = busy_energy + idle_energy;
eenv->cpu[cpu_idx].energy += total_energy;
}
+ return 0;
}
/*
@@ -5874,7 +5883,8 @@
* CPUs in the current visited SG.
*/
eenv->sg = sg;
- calc_sg_energy(eenv);
+ if (calc_sg_energy(eenv))
+ return -EINVAL;
/* remove CPUs we have just visited */
if (!sd->child) {
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 063dd22..5534be1 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -120,8 +120,9 @@
struct ftrace_ops *op, struct pt_regs *regs);
#else
/* See comment below, where ftrace_ops_list_func is defined */
-static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
-#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
+static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct pt_regs *regs);
+#define ftrace_ops_list_func ftrace_ops_no_ops
#endif
/*
@@ -5309,7 +5310,8 @@
__ftrace_ops_list_func(ip, parent_ip, NULL, regs);
}
#else
-static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
+static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct pt_regs *regs)
{
__ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
}
@@ -5735,14 +5737,17 @@
fgraph_graph_time = enable;
}
+void ftrace_graph_return_stub(struct ftrace_graph_ret *trace)
+{
+}
+
int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
{
return 0;
}
/* The callbacks that hook a function */
-trace_func_graph_ret_t ftrace_graph_return =
- (trace_func_graph_ret_t)ftrace_stub;
+trace_func_graph_ret_t ftrace_graph_return = ftrace_graph_return_stub;
trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
@@ -5970,7 +5975,7 @@
goto out;
ftrace_graph_active--;
- ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
+ ftrace_graph_return = ftrace_graph_return_stub;
ftrace_graph_entry = ftrace_graph_entry_stub;
__ftrace_graph_entry = ftrace_graph_entry_stub;
ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 046abb0..de11b81 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3078,13 +3078,14 @@
if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
return;
- if (iter->started && cpumask_test_cpu(iter->cpu, iter->started))
+ if (cpumask_available(iter->started) &&
+ cpumask_test_cpu(iter->cpu, iter->started))
return;
if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
return;
- if (iter->started)
+ if (cpumask_available(iter->started))
cpumask_set_cpu(iter->cpu, iter->started);
/* Don't print started cpu buffer for the first entry of the trace */
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 0193f58..e35a411 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -322,6 +322,9 @@
static int regex_match_front(char *str, struct regex *r, int len)
{
+ if (len < r->len)
+ return 0;
+
if (strncmp(str, r->pattern, r->len) == 0)
return 1;
return 0;
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index bc6c6ec..83afbf2 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -149,6 +149,8 @@
return;
ret = strncpy_from_user(dst, src, maxlen);
+ if (ret == maxlen)
+ dst[--ret] = '\0';
if (ret < 0) { /* Failed to fetch string */
((u8 *)get_rloc_data(dest))[0] = '\0';
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index d0639d9..c8e7cc0 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -202,7 +202,7 @@
lockdep_is_held(&tracepoints_mutex));
old = func_add(&tp_funcs, func, prio);
if (IS_ERR(old)) {
- WARN_ON_ONCE(1);
+ WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM);
return PTR_ERR(old);
}
@@ -235,7 +235,7 @@
lockdep_is_held(&tracepoints_mutex));
old = func_remove(&tp_funcs, func);
if (IS_ERR(old)) {
- WARN_ON_ONCE(1);
+ WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM);
return PTR_ERR(old);
}
diff --git a/lib/kobject.c b/lib/kobject.c
index 763d70a..34f8472 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -234,14 +234,12 @@
/* be noisy on error issues */
if (error == -EEXIST)
- WARN(1, "%s failed for %s with "
- "-EEXIST, don't try to register things with "
- "the same name in the same directory.\n",
- __func__, kobject_name(kobj));
+ pr_err("%s failed for %s with -EEXIST, don't try to register things with the same name in the same directory.\n",
+ __func__, kobject_name(kobj));
else
- WARN(1, "%s failed for %s (error: %d parent: %s)\n",
- __func__, kobject_name(kobj), error,
- parent ? kobject_name(parent) : "'none'");
+ pr_err("%s failed for %s (error: %d parent: %s)\n",
+ __func__, kobject_name(kobj), error,
+ parent ? kobject_name(parent) : "'none'");
} else
kobj->state_in_sysfs = 1;
diff --git a/mm/gup.c b/mm/gup.c
index 6c3b4e8..be4ccdd 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -430,6 +430,9 @@
if (vm_flags & (VM_IO | VM_PFNMAP))
return -EFAULT;
+ if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
+ return -EFAULT;
+
if (write) {
if (!(vm_flags & VM_WRITE)) {
if (!(gup_flags & FOLL_FORCE))
diff --git a/mm/percpu.c b/mm/percpu.c
index f014ceb..3794cfc 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -70,6 +70,7 @@
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include <linux/kmemleak.h>
+#include <linux/sched.h>
#include <asm/cacheflush.h>
#include <asm/sections.h>
diff --git a/mm/swap.c b/mm/swap.c
index 4dcf852..6f22754 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -208,9 +208,10 @@
{
int *pgmoved = arg;
- if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
- enum lru_list lru = page_lru_base_type(page);
- list_move_tail(&page->lru, &lruvec->lists[lru]);
+ if (PageLRU(page) && !PageUnevictable(page)) {
+ del_page_from_lru_list(page, lruvec, page_lru(page));
+ ClearPageActive(page);
+ add_page_to_lru_list_tail(page, lruvec, page_lru(page));
(*pgmoved)++;
}
}
@@ -234,7 +235,7 @@
*/
void rotate_reclaimable_page(struct page *page)
{
- if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
+ if (!PageLocked(page) && !PageDirty(page) &&
!PageUnevictable(page) && PageLRU(page)) {
struct pagevec *pvec;
unsigned long flags;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c5b94d61..c8e300c 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -87,6 +87,7 @@
/* The highest zone to isolate pages for reclaim from */
enum zone_type reclaim_idx;
+ /* Writepage batching in laptop mode; RECLAIM_WRITE */
unsigned int may_writepage:1;
/* Can mapped pages be reclaimed? */
@@ -1057,6 +1058,15 @@
* throttling so we could easily OOM just because too many
* pages are in writeback and there is nothing else to
* reclaim. Wait for the writeback to complete.
+ *
+ * In cases 1) and 2) we activate the pages to get them out of
+ * the way while we continue scanning for clean pages on the
+ * inactive list and refilling from the active list. The
+ * observation here is that waiting for disk writes is more
+ * expensive than potentially causing reloads down the line.
+ * Since they're marked for immediate reclaim, they won't put
+ * memory pressure on the cache working set any longer than it
+ * takes to write them to disk.
*/
if (PageWriteback(page)) {
/* Case 1 above */
@@ -1064,7 +1074,7 @@
PageReclaim(page) &&
(pgdat && test_bit(PGDAT_WRITEBACK, &pgdat->flags))) {
nr_immediate++;
- goto keep_locked;
+ goto activate_locked;
/* Case 2 above */
} else if (sane_reclaim(sc) ||
@@ -1082,7 +1092,7 @@
*/
SetPageReclaim(page);
nr_writeback++;
- goto keep_locked;
+ goto activate_locked;
/* Case 3 above */
} else {
@@ -1153,14 +1163,18 @@
if (PageDirty(page)) {
/*
- * Only kswapd can writeback filesystem pages to
- * avoid risk of stack overflow but only writeback
- * if many dirty pages have been encountered.
+ * Only kswapd can writeback filesystem pages
+ * to avoid risk of stack overflow. But avoid
+ * injecting inefficient single-page IO into
+ * flusher writeback as much as possible: only
+ * write pages when we've encountered many
+ * dirty pages, and when we've already scanned
+ * the rest of the LRU for clean pages and see
+ * the same dirty pages again (PageReclaim).
*/
if (page_is_file_cache(page) &&
- (!current_is_kswapd() ||
- (pgdat &&
- !test_bit(PGDAT_DIRTY, &pgdat->flags)))) {
+ (!current_is_kswapd() || !PageReclaim(page) ||
+ !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
/*
* Immediately reclaim when written back.
* Similar in principal to deactivate_page()
@@ -1170,7 +1184,7 @@
inc_node_page_state(page, NR_VMSCAN_IMMEDIATE);
SetPageReclaim(page);
- goto keep_locked;
+ goto activate_locked;
}
if (references == PAGEREF_RECLAIM_CLEAN)
@@ -1416,13 +1430,10 @@
* wants to isolate pages it will be able to operate on without
* blocking - clean pages for the most part.
*
- * ISOLATE_CLEAN means that only clean pages should be isolated. This
- * is used by reclaim when it is cannot write to backing storage
- *
* ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
* that it is possible to migrate without blocking
*/
- if (mode & (ISOLATE_CLEAN|ISOLATE_ASYNC_MIGRATE)) {
+ if (mode & ISOLATE_ASYNC_MIGRATE) {
/* All the caller can do on PageWriteback is block */
if (PageWriteback(page))
return ret;
@@ -1430,10 +1441,6 @@
if (PageDirty(page)) {
struct address_space *mapping;
- /* ISOLATE_CLEAN means only clean pages */
- if (mode & ISOLATE_CLEAN)
- return ret;
-
/*
* Only pages without mappings or that have a
* ->migratepage callback are possible to migrate
@@ -1831,8 +1838,6 @@
if (!sc->may_unmap)
isolate_mode |= ISOLATE_UNMAPPED;
- if (!sc->may_writepage)
- isolate_mode |= ISOLATE_CLEAN;
spin_lock_irq(&pgdat->lru_lock);
@@ -1894,6 +1899,20 @@
set_bit(PGDAT_WRITEBACK, &pgdat->flags);
/*
+ * If dirty pages are scanned that are not queued for IO, it
+ * implies that flushers are not doing their job. This can
+ * happen when memory pressure pushes dirty pages to the end of
+ * the LRU before the dirty limits are breached and the dirty
+ * data has expired. It can also happen when the proportion of
+ * dirty pages grows not through writes but through memory
+ * pressure reclaiming all the clean cache. And in some cases,
+ * the flushers simply cannot keep up with the allocation
+ * rate. Nudge the flusher threads in case they are asleep.
+ */
+ if (nr_unqueued_dirty == nr_taken)
+ wakeup_flusher_threads(0, WB_REASON_VMSCAN);
+
+ /*
* Legacy memcg will stall in page writeback so avoid forcibly
* stalling here.
*/
@@ -1905,12 +1924,7 @@
if (nr_dirty && nr_dirty == nr_congested)
set_bit(PGDAT_CONGESTED, &pgdat->flags);
- /*
- * If dirty pages are scanned that are not queued for IO, it
- * implies that flushers are not keeping up. In this case, flag
- * the pgdat PGDAT_DIRTY and kswapd will start writing pages from
- * reclaim context.
- */
+ /* Allow kswapd to start writing pages during reclaim. */
if (nr_unqueued_dirty == nr_taken)
set_bit(PGDAT_DIRTY, &pgdat->flags);
@@ -2020,8 +2034,6 @@
if (!sc->may_unmap)
isolate_mode |= ISOLATE_UNMAPPED;
- if (!sc->may_writepage)
- isolate_mode |= ISOLATE_CLEAN;
spin_lock_irq(&pgdat->lru_lock);
@@ -2823,8 +2835,6 @@
struct scan_control *sc)
{
int initial_priority = sc->priority;
- unsigned long total_scanned = 0;
- unsigned long writeback_threshold;
retry:
delayacct_freepages_start();
@@ -2837,7 +2847,6 @@
sc->nr_scanned = 0;
shrink_zones(zonelist, sc);
- total_scanned += sc->nr_scanned;
if (sc->nr_reclaimed >= sc->nr_to_reclaim)
break;
@@ -2850,20 +2859,6 @@
*/
if (sc->priority < DEF_PRIORITY - 2)
sc->may_writepage = 1;
-
- /*
- * Try to write back as many pages as we just scanned. This
- * tends to cause slow streaming writers to write data to the
- * disk smoothly, at the dirtying rate, which is nice. But
- * that's undesirable in laptop mode, where we *want* lumpy
- * writeout. So in laptop mode, write out the whole world.
- */
- writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
- if (total_scanned > writeback_threshold) {
- wakeup_flusher_threads(laptop_mode ? 0 : total_scanned,
- WB_REASON_TRY_TO_FREE_PAGES);
- sc->may_writepage = 1;
- }
} while (--sc->priority >= 0);
delayacct_freepages_end();
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 5d26938..1e84c52 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -41,6 +41,9 @@
#include <linux/module.h>
#include <linux/init.h>
+/* Hardening for Spectre-v1 */
+#include <linux/nospec.h>
+
#include "lec.h"
#include "lec_arpc.h"
#include "resources.h"
@@ -697,8 +700,10 @@
bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmlec_ioc));
if (bytes_left != 0)
pr_info("copy from user failed for %d bytes\n", bytes_left);
- if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF ||
- !dev_lec[ioc_data.dev_num])
+ if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF)
+ return -EINVAL;
+ ioc_data.dev_num = array_index_nospec(ioc_data.dev_num, MAX_LEC_ITF);
+ if (!dev_lec[ioc_data.dev_num])
return -EINVAL;
vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL);
if (!vpriv)
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 9218931..f57de0a 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -504,8 +504,8 @@
if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit)
return -ELOOP;
- /* Device is already being bridged */
- if (br_port_exists(dev))
+ /* Device has master upper dev */
+ if (netdev_master_upper_dev_get(dev))
return -EBUSY;
/* No bridging devices that dislike that (e.g. wireless) */
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 25a30be..98ea28d 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -2512,6 +2512,11 @@
int ret = 1;
dout("try_write start %p state %lu\n", con, con->state);
+ if (con->state != CON_STATE_PREOPEN &&
+ con->state != CON_STATE_CONNECTING &&
+ con->state != CON_STATE_NEGOTIATING &&
+ con->state != CON_STATE_OPEN)
+ return 0;
more:
dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
@@ -2537,6 +2542,8 @@
}
more_kvec:
+ BUG_ON(!con->sock);
+
/* kvec data queued? */
if (con->out_kvec_left) {
ret = write_partial_kvec(con);
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index a8effc8..5004810 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -209,6 +209,14 @@
__open_session(monc);
}
+static void un_backoff(struct ceph_mon_client *monc)
+{
+ monc->hunt_mult /= 2; /* reduce by 50% */
+ if (monc->hunt_mult < 1)
+ monc->hunt_mult = 1;
+ dout("%s hunt_mult now %d\n", __func__, monc->hunt_mult);
+}
+
/*
* Reschedule delayed work timer.
*/
@@ -955,6 +963,7 @@
if (!monc->hunting) {
ceph_con_keepalive(&monc->con);
__validate_auth(monc);
+ un_backoff(monc);
}
if (is_auth) {
@@ -1114,9 +1123,8 @@
dout("%s found mon%d\n", __func__, monc->cur_mon);
monc->hunting = false;
monc->had_a_connection = true;
- monc->hunt_mult /= 2; /* reduce by 50% */
- if (monc->hunt_mult < 1)
- monc->hunt_mult = 1;
+ un_backoff(monc);
+ __schedule_delayed(monc);
}
}
diff --git a/net/compat.c b/net/compat.c
index a96fd2f..73671e6 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -372,7 +372,8 @@
optname == SO_ATTACH_REUSEPORT_CBPF)
return do_set_attach_filter(sock, level, optname,
optval, optlen);
- if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)
+ if (!COMPAT_USE_64BIT_TIME &&
+ (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO))
return do_set_sock_timeout(sock, level, optname, optval, optlen);
return sock_setsockopt(sock, level, optname, optval, optlen);
@@ -437,7 +438,8 @@
static int compat_sock_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
- if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)
+ if (!COMPAT_USE_64BIT_TIME &&
+ (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO))
return do_get_sock_timeout(sock, level, optname, optval, optlen);
return sock_getsockopt(sock, level, optname, optval, optlen);
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 802b3fa..c6a8932 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2873,7 +2873,7 @@
}
EXPORT_SYMBOL(passthru_features_check);
-static netdev_features_t dflt_features_check(const struct sk_buff *skb,
+static netdev_features_t dflt_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index c0548d2..e3e6a3e 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -57,8 +57,8 @@
return -EINVAL;
list_for_each_entry(ha, &list->list, list) {
- if (!memcmp(ha->addr, addr, addr_len) &&
- ha->type == addr_type) {
+ if (ha->type == addr_type &&
+ !memcmp(ha->addr, addr, addr_len)) {
if (global) {
/* check if addr is already used as global */
if (ha->global_use)
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index cb9a16b..340a3db 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -54,7 +54,8 @@
static void neigh_timer_handler(unsigned long arg);
static void __neigh_notify(struct neighbour *n, int type, int flags);
static void neigh_update_notify(struct neighbour *neigh);
-static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
+static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
+ struct net_device *dev);
static unsigned int neigh_probe_enable;
#ifdef CONFIG_PROC_FS
@@ -255,8 +256,7 @@
{
write_lock_bh(&tbl->lock);
neigh_flush_dev(tbl, dev);
- pneigh_ifdown(tbl, dev);
- write_unlock_bh(&tbl->lock);
+ pneigh_ifdown_and_unlock(tbl, dev);
del_timer_sync(&tbl->proxy_timer);
pneigh_queue_purge(&tbl->proxy_queue);
@@ -646,9 +646,10 @@
return -ENOENT;
}
-static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
+static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
+ struct net_device *dev)
{
- struct pneigh_entry *n, **np;
+ struct pneigh_entry *n, **np, *freelist = NULL;
u32 h;
for (h = 0; h <= PNEIGH_HASHMASK; h++) {
@@ -656,16 +657,23 @@
while ((n = *np) != NULL) {
if (!dev || n->dev == dev) {
*np = n->next;
- if (tbl->pdestructor)
- tbl->pdestructor(n);
- if (n->dev)
- dev_put(n->dev);
- kfree(n);
+ n->next = freelist;
+ freelist = n;
continue;
}
np = &n->next;
}
}
+ write_unlock_bh(&tbl->lock);
+ while ((n = freelist)) {
+ freelist = n->next;
+ n->next = NULL;
+ if (tbl->pdestructor)
+ tbl->pdestructor(n);
+ if (n->dev)
+ dev_put(n->dev);
+ kfree(n);
+ }
return -ENOENT;
}
@@ -2299,12 +2307,16 @@
err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL);
if (!err) {
- if (tb[NDA_IFINDEX])
+ if (tb[NDA_IFINDEX]) {
+ if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
+ return -EINVAL;
filter_idx = nla_get_u32(tb[NDA_IFINDEX]);
-
- if (tb[NDA_MASTER])
+ }
+ if (tb[NDA_MASTER]) {
+ if (nla_len(tb[NDA_MASTER]) != sizeof(u32))
+ return -EINVAL;
filter_master_idx = nla_get_u32(tb[NDA_MASTER]);
-
+ }
if (filter_idx || filter_master_idx)
flags |= NLM_F_DUMP_FILTERED;
}
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 457f882..9b2d611 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -666,7 +666,7 @@
int err;
rtnl_lock();
- if (np->dev_name) {
+ if (np->dev_name[0]) {
struct net *net = current->nsproxy->net_ns;
ndev = __dev_get_by_name(net, np->dev_name);
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index e2136eb..9c2e60e 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -909,6 +909,7 @@
n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
n->cloned = 1;
n->nohdr = 0;
+ n->peeked = 0;
n->destructor = NULL;
C(tail);
C(end);
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 7753681..86a2ed0 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -126,6 +126,16 @@
DCCPF_SEQ_WMAX));
}
+static void dccp_tasklet_schedule(struct sock *sk)
+{
+ struct tasklet_struct *t = &dccp_sk(sk)->dccps_xmitlet;
+
+ if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
+ sock_hold(sk);
+ __tasklet_schedule(t);
+ }
+}
+
static void ccid2_hc_tx_rto_expire(unsigned long data)
{
struct sock *sk = (struct sock *)data;
@@ -166,7 +176,7 @@
/* if we were blocked before, we may now send cwnd=1 packet */
if (sender_was_blocked)
- tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
+ dccp_tasklet_schedule(sk);
/* restart backed-off timer */
sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
out:
@@ -706,7 +716,7 @@
done:
/* check if incoming Acks allow pending packets to be sent */
if (sender_was_blocked && !ccid2_cwnd_network_limited(hc))
- tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
+ dccp_tasklet_schedule(sk);
dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
}
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 8c7799cd..6697b18 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -620,6 +620,7 @@
ireq = inet_rsk(req);
sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
+ ireq->ir_mark = inet_request_mark(sk, skb);
ireq->ireq_family = AF_INET;
ireq->ir_iif = sk->sk_bound_dev_if;
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 28e8252..6cbcf39 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -349,6 +349,7 @@
ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
ireq->ireq_family = AF_INET6;
+ ireq->ir_mark = inet_request_mark(sk, skb);
if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index 3a2c340..2a952cb 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -230,12 +230,12 @@
else
dccp_write_xmit(sk);
bh_unlock_sock(sk);
+ sock_put(sk);
}
static void dccp_write_xmit_timer(unsigned long data)
{
dccp_write_xmitlet(data);
- sock_put((struct sock *)data);
}
void dccp_init_xmit_timers(struct sock *sk)
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index e1d4d89..f025276 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -25,6 +25,7 @@
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/ratelimit.h>
#include <linux/kernel.h>
#include <linux/keyctl.h>
#include <linux/err.h>
@@ -91,9 +92,9 @@
next_opt = memchr(opt, '#', end - opt) ?: end;
opt_len = next_opt - opt;
- if (!opt_len) {
- printk(KERN_WARNING
- "Empty option to dns_resolver key\n");
+ if (opt_len <= 0 || opt_len > 128) {
+ pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n",
+ opt_len);
return -EINVAL;
}
@@ -127,10 +128,8 @@
}
bad_option_value:
- printk(KERN_WARNING
- "Option '%*.*s' to dns_resolver key:"
- " bad/missing value\n",
- opt_nlen, opt_nlen, opt);
+ pr_warn_ratelimited("Option '%*.*s' to dns_resolver key: bad/missing value\n",
+ opt_nlen, opt_nlen, opt);
return -EINVAL;
} while (opt = next_opt + 1, opt < end);
}
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index ddcd56c..a6b34ac 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -182,6 +182,7 @@
tw->tw_dport = inet->inet_dport;
tw->tw_family = sk->sk_family;
tw->tw_reuse = sk->sk_reuse;
+ tw->tw_reuseport = sk->sk_reuseport;
tw->tw_hash = sk->sk_hash;
tw->tw_ipv6only = 0;
tw->tw_transparent = inet->transparent;
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 93bfadf..8fa153c 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -775,8 +775,10 @@
ipc.addr = faddr = daddr;
if (ipc.opt && ipc.opt->opt.srr) {
- if (!daddr)
- return -EINVAL;
+ if (!daddr) {
+ err = -EINVAL;
+ goto out_free;
+ }
faddr = ipc.opt->opt.faddr;
}
tos = get_rttos(&ipc, inet);
@@ -842,6 +844,7 @@
out:
ip_rt_put(rt);
+out_free:
if (free)
kfree(ipc.opt);
if (!err) {
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index af0b324..fdfaaf0 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1146,7 +1146,8 @@
lock_sock(sk);
flags = msg->msg_flags;
- if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect)) {
+ if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect) &&
+ !tp->repair) {
err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
if (err == -EINPROGRESS && copied_syn > 0)
goto out;
@@ -2558,7 +2559,7 @@
case TCP_REPAIR_QUEUE:
if (!tp->repair)
err = -EPERM;
- else if (val < TCP_QUEUES_NR)
+ else if ((unsigned int)val < TCP_QUEUES_NR)
tp->repair_queue = val;
else
err = -EINVAL;
@@ -2697,8 +2698,10 @@
#ifdef CONFIG_TCP_MD5SIG
case TCP_MD5SIG:
- /* Read the IP->Key mappings from userspace */
- err = tp->af_specific->md5_parse(sk, optval, optlen);
+ if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
+ err = tp->af_specific->md5_parse(sk, optval, optlen);
+ else
+ err = -EINVAL;
break;
#endif
case TCP_USER_TIMEOUT:
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 8ec6053..91698595 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -773,7 +773,9 @@
}
}
}
- bbr->idle_restart = 0;
+ /* Restart after idle ends only once we process a new S/ACK for data */
+ if (rs->delivered > 0)
+ bbr->idle_restart = 0;
}
static void bbr_update_model(struct sock *sk, const struct rate_sample *rs)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 16d3619..c2ad59d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3944,11 +3944,8 @@
int length = (th->doff << 2) - sizeof(*th);
const u8 *ptr = (const u8 *)(th + 1);
- /* If the TCP option is too short, we can short cut */
- if (length < TCPOLEN_MD5SIG)
- return NULL;
-
- while (length > 0) {
+ /* If not enough data remaining, we can short cut */
+ while (length >= TCPOLEN_MD5SIG) {
int opcode = *ptr++;
int opsize;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 5af27b9..885cc39 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -987,8 +987,10 @@
sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags);
if (ipc.opt && ipc.opt->opt.srr) {
- if (!daddr)
- return -EINVAL;
+ if (!daddr) {
+ err = -EINVAL;
+ goto out_free;
+ }
faddr = ipc.opt->opt.faddr;
connected = 0;
}
@@ -1096,6 +1098,7 @@
out:
ip_rt_put(rt);
+out_free:
if (free)
kfree(ipc.opt);
if (!err)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 96be019..0fbc5ba 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2776,6 +2776,7 @@
static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
[RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
+ [RTA_PREFSRC] = { .len = sizeof(struct in6_addr) },
[RTA_OIF] = { .type = NLA_U32 },
[RTA_IIF] = { .type = NLA_U32 },
[RTA_PRIORITY] = { .type = NLA_U32 },
@@ -2786,6 +2787,7 @@
[RTA_ENCAP] = { .type = NLA_NESTED },
[RTA_EXPIRES] = { .type = NLA_U32 },
[RTA_UID] = { .type = NLA_U32 },
+ [RTA_TABLE] = { .type = NLA_U32 },
};
static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 63e6d08..cc306de 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1424,6 +1424,7 @@
*/
if (csk->sk_user_data) {
write_unlock_bh(&csk->sk_callback_lock);
+ strp_stop(&psock->strp);
strp_done(&psock->strp);
kmem_cache_free(kcm_psockp, psock);
err = -EALREADY;
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index ce12384..ee03bc8 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -750,8 +750,6 @@
if ((session->ifname[0] &&
nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) ||
- (session->offset &&
- nla_put_u16(skb, L2TP_ATTR_OFFSET, session->offset)) ||
(session->cookie_len &&
nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len,
&session->cookie[0])) ||
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 163f1fa..9b214f3 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -590,6 +590,13 @@
lock_sock(sk);
error = -EINVAL;
+
+ if (sockaddr_len != sizeof(struct sockaddr_pppol2tp) &&
+ sockaddr_len != sizeof(struct sockaddr_pppol2tpv3) &&
+ sockaddr_len != sizeof(struct sockaddr_pppol2tpin6) &&
+ sockaddr_len != sizeof(struct sockaddr_pppol2tpv3in6))
+ goto end;
+
if (sp->sa_protocol != PX_PROTO_OL2TP)
goto end;
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index f7caf0f..85aae8c 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -197,9 +197,19 @@
llc->laddr.lsap, llc->daddr.lsap);
if (!llc_send_disc(sk))
llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
- if (!sock_flag(sk, SOCK_ZAPPED))
+ if (!sock_flag(sk, SOCK_ZAPPED)) {
+ struct llc_sap *sap = llc->sap;
+
+ /* Hold this for release_sock(), so that llc_backlog_rcv()
+ * could still use it.
+ */
+ llc_sap_hold(sap);
llc_sap_remove_socket(llc->sap, sk);
- release_sock(sk);
+ release_sock(sk);
+ llc_sap_put(sap);
+ } else {
+ release_sock(sk);
+ }
if (llc->dev)
dev_put(llc->dev);
sock_put(sk);
@@ -916,6 +926,9 @@
if (size > llc->dev->mtu)
size = llc->dev->mtu;
copied = size - hdrlen;
+ rc = -EINVAL;
+ if (copied < 0)
+ goto release;
release_sock(sk);
skb = sock_alloc_send_skb(sk, size, noblock, &rc);
lock_sock(sk);
diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
index ea225bd..f8d4ab8 100644
--- a/net/llc/llc_c_ac.c
+++ b/net/llc/llc_c_ac.c
@@ -1096,14 +1096,7 @@
int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb)
{
- struct llc_sock *llc = llc_sk(sk);
-
- del_timer(&llc->pf_cycle_timer.timer);
- del_timer(&llc->ack_timer.timer);
- del_timer(&llc->rej_sent_timer.timer);
- del_timer(&llc->busy_state_timer.timer);
- llc->ack_must_be_send = 0;
- llc->ack_pf = 0;
+ llc_sk_stop_all_timers(sk, false);
return 0;
}
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index 8bc5a1b..d861b74 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -951,6 +951,26 @@
return sk;
}
+void llc_sk_stop_all_timers(struct sock *sk, bool sync)
+{
+ struct llc_sock *llc = llc_sk(sk);
+
+ if (sync) {
+ del_timer_sync(&llc->pf_cycle_timer.timer);
+ del_timer_sync(&llc->ack_timer.timer);
+ del_timer_sync(&llc->rej_sent_timer.timer);
+ del_timer_sync(&llc->busy_state_timer.timer);
+ } else {
+ del_timer(&llc->pf_cycle_timer.timer);
+ del_timer(&llc->ack_timer.timer);
+ del_timer(&llc->rej_sent_timer.timer);
+ del_timer(&llc->busy_state_timer.timer);
+ }
+
+ llc->ack_must_be_send = 0;
+ llc->ack_pf = 0;
+}
+
/**
* llc_sk_free - Frees a LLC socket
* @sk - socket to free
@@ -963,7 +983,7 @@
llc->state = LLC_CONN_OUT_OF_SVC;
/* Stop all (possibly) running timers */
- llc_conn_ac_stop_all_timers(sk, NULL);
+ llc_sk_stop_all_timers(sk, true);
#ifdef DEBUG_LLC_CONN_ALLOC
printk(KERN_INFO "%s: unackq=%d, txq=%d\n", __func__,
skb_queue_len(&llc->pdu_unack_q),
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 74d1195..c5f2350 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2393,11 +2393,7 @@
strlcpy(cfg.mcast_ifn, dm->mcast_ifn,
sizeof(cfg.mcast_ifn));
cfg.syncid = dm->syncid;
- rtnl_lock();
- mutex_lock(&ipvs->sync_mutex);
ret = start_sync_thread(ipvs, &cfg, dm->state);
- mutex_unlock(&ipvs->sync_mutex);
- rtnl_unlock();
} else {
mutex_lock(&ipvs->sync_mutex);
ret = stop_sync_thread(ipvs, dm->state);
@@ -3495,12 +3491,8 @@
if (ipvs->mixed_address_family_dests > 0)
return -EINVAL;
- rtnl_lock();
- mutex_lock(&ipvs->sync_mutex);
ret = start_sync_thread(ipvs, &c,
nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
- mutex_unlock(&ipvs->sync_mutex);
- rtnl_unlock();
return ret;
}
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 9350530..5fbf4b2 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -48,6 +48,7 @@
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/kernel.h>
+#include <linux/sched.h>
#include <asm/unaligned.h> /* Used for ntoh_seq and hton_seq */
@@ -1359,15 +1360,9 @@
/*
* Specifiy default interface for outgoing multicasts
*/
-static int set_mcast_if(struct sock *sk, char *ifname)
+static int set_mcast_if(struct sock *sk, struct net_device *dev)
{
- struct net_device *dev;
struct inet_sock *inet = inet_sk(sk);
- struct net *net = sock_net(sk);
-
- dev = __dev_get_by_name(net, ifname);
- if (!dev)
- return -ENODEV;
if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
return -EINVAL;
@@ -1395,19 +1390,14 @@
* in the in_addr structure passed in as a parameter.
*/
static int
-join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
+join_mcast_group(struct sock *sk, struct in_addr *addr, struct net_device *dev)
{
- struct net *net = sock_net(sk);
struct ip_mreqn mreq;
- struct net_device *dev;
int ret;
memset(&mreq, 0, sizeof(mreq));
memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr));
- dev = __dev_get_by_name(net, ifname);
- if (!dev)
- return -ENODEV;
if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
return -EINVAL;
@@ -1422,15 +1412,10 @@
#ifdef CONFIG_IP_VS_IPV6
static int join_mcast_group6(struct sock *sk, struct in6_addr *addr,
- char *ifname)
+ struct net_device *dev)
{
- struct net *net = sock_net(sk);
- struct net_device *dev;
int ret;
- dev = __dev_get_by_name(net, ifname);
- if (!dev)
- return -ENODEV;
if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
return -EINVAL;
@@ -1442,24 +1427,18 @@
}
#endif
-static int bind_mcastif_addr(struct socket *sock, char *ifname)
+static int bind_mcastif_addr(struct socket *sock, struct net_device *dev)
{
- struct net *net = sock_net(sock->sk);
- struct net_device *dev;
__be32 addr;
struct sockaddr_in sin;
- dev = __dev_get_by_name(net, ifname);
- if (!dev)
- return -ENODEV;
-
addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
if (!addr)
pr_err("You probably need to specify IP address on "
"multicast interface.\n");
IP_VS_DBG(7, "binding socket with (%s) %pI4\n",
- ifname, &addr);
+ dev->name, &addr);
/* Now bind the socket with the address of multicast interface */
sin.sin_family = AF_INET;
@@ -1492,7 +1471,8 @@
/*
* Set up sending multicast socket over UDP
*/
-static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
+static int make_send_sock(struct netns_ipvs *ipvs, int id,
+ struct net_device *dev, struct socket **sock_ret)
{
/* multicast addr */
union ipvs_sockaddr mcast_addr;
@@ -1504,9 +1484,10 @@
IPPROTO_UDP, &sock);
if (result < 0) {
pr_err("Error during creation of socket; terminating\n");
- return ERR_PTR(result);
+ goto error;
}
- result = set_mcast_if(sock->sk, ipvs->mcfg.mcast_ifn);
+ *sock_ret = sock;
+ result = set_mcast_if(sock->sk, dev);
if (result < 0) {
pr_err("Error setting outbound mcast interface\n");
goto error;
@@ -1521,7 +1502,7 @@
set_sock_size(sock->sk, 1, result);
if (AF_INET == ipvs->mcfg.mcast_af)
- result = bind_mcastif_addr(sock, ipvs->mcfg.mcast_ifn);
+ result = bind_mcastif_addr(sock, dev);
else
result = 0;
if (result < 0) {
@@ -1537,19 +1518,18 @@
goto error;
}
- return sock;
+ return 0;
error:
- sock_release(sock);
- return ERR_PTR(result);
+ return result;
}
/*
* Set up receiving multicast socket over UDP
*/
-static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
- int ifindex)
+static int make_receive_sock(struct netns_ipvs *ipvs, int id,
+ struct net_device *dev, struct socket **sock_ret)
{
/* multicast addr */
union ipvs_sockaddr mcast_addr;
@@ -1561,8 +1541,9 @@
IPPROTO_UDP, &sock);
if (result < 0) {
pr_err("Error during creation of socket; terminating\n");
- return ERR_PTR(result);
+ goto error;
}
+ *sock_ret = sock;
/* it is equivalent to the REUSEADDR option in user-space */
sock->sk->sk_reuse = SK_CAN_REUSE;
result = sysctl_sync_sock_size(ipvs);
@@ -1570,7 +1551,7 @@
set_sock_size(sock->sk, 0, result);
get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id);
- sock->sk->sk_bound_dev_if = ifindex;
+ sock->sk->sk_bound_dev_if = dev->ifindex;
result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen);
if (result < 0) {
pr_err("Error binding to the multicast addr\n");
@@ -1581,21 +1562,20 @@
#ifdef CONFIG_IP_VS_IPV6
if (ipvs->bcfg.mcast_af == AF_INET6)
result = join_mcast_group6(sock->sk, &mcast_addr.in6.sin6_addr,
- ipvs->bcfg.mcast_ifn);
+ dev);
else
#endif
result = join_mcast_group(sock->sk, &mcast_addr.in.sin_addr,
- ipvs->bcfg.mcast_ifn);
+ dev);
if (result < 0) {
pr_err("Error joining to the multicast group\n");
goto error;
}
- return sock;
+ return 0;
error:
- sock_release(sock);
- return ERR_PTR(result);
+ return result;
}
@@ -1780,13 +1760,12 @@
int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
int state)
{
- struct ip_vs_sync_thread_data *tinfo;
+ struct ip_vs_sync_thread_data *tinfo = NULL;
struct task_struct **array = NULL, *task;
- struct socket *sock;
struct net_device *dev;
char *name;
int (*threadfn)(void *data);
- int id, count, hlen;
+ int id = 0, count, hlen;
int result = -ENOMEM;
u16 mtu, min_mtu;
@@ -1794,6 +1773,18 @@
IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n",
sizeof(struct ip_vs_sync_conn_v0));
+ /* Do not hold one mutex and then to block on another */
+ for (;;) {
+ rtnl_lock();
+ if (mutex_trylock(&ipvs->sync_mutex))
+ break;
+ rtnl_unlock();
+ mutex_lock(&ipvs->sync_mutex);
+ if (rtnl_trylock())
+ break;
+ mutex_unlock(&ipvs->sync_mutex);
+ }
+
if (!ipvs->sync_state) {
count = clamp(sysctl_sync_ports(ipvs), 1, IPVS_SYNC_PORTS_MAX);
ipvs->threads_mask = count - 1;
@@ -1812,7 +1803,8 @@
dev = __dev_get_by_name(ipvs->net, c->mcast_ifn);
if (!dev) {
pr_err("Unknown mcast interface: %s\n", c->mcast_ifn);
- return -ENODEV;
+ result = -ENODEV;
+ goto out_early;
}
hlen = (AF_INET6 == c->mcast_af) ?
sizeof(struct ipv6hdr) + sizeof(struct udphdr) :
@@ -1829,26 +1821,30 @@
c->sync_maxlen = mtu - hlen;
if (state == IP_VS_STATE_MASTER) {
+ result = -EEXIST;
if (ipvs->ms)
- return -EEXIST;
+ goto out_early;
ipvs->mcfg = *c;
name = "ipvs-m:%d:%d";
threadfn = sync_thread_master;
} else if (state == IP_VS_STATE_BACKUP) {
+ result = -EEXIST;
if (ipvs->backup_threads)
- return -EEXIST;
+ goto out_early;
ipvs->bcfg = *c;
name = "ipvs-b:%d:%d";
threadfn = sync_thread_backup;
} else {
- return -EINVAL;
+ result = -EINVAL;
+ goto out_early;
}
if (state == IP_VS_STATE_MASTER) {
struct ipvs_master_sync_state *ms;
+ result = -ENOMEM;
ipvs->ms = kzalloc(count * sizeof(ipvs->ms[0]), GFP_KERNEL);
if (!ipvs->ms)
goto out;
@@ -1864,39 +1860,38 @@
} else {
array = kzalloc(count * sizeof(struct task_struct *),
GFP_KERNEL);
+ result = -ENOMEM;
if (!array)
goto out;
}
- tinfo = NULL;
for (id = 0; id < count; id++) {
- if (state == IP_VS_STATE_MASTER)
- sock = make_send_sock(ipvs, id);
- else
- sock = make_receive_sock(ipvs, id, dev->ifindex);
- if (IS_ERR(sock)) {
- result = PTR_ERR(sock);
- goto outtinfo;
- }
+ result = -ENOMEM;
tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
if (!tinfo)
- goto outsocket;
+ goto out;
tinfo->ipvs = ipvs;
- tinfo->sock = sock;
+ tinfo->sock = NULL;
if (state == IP_VS_STATE_BACKUP) {
tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen,
GFP_KERNEL);
if (!tinfo->buf)
- goto outtinfo;
+ goto out;
} else {
tinfo->buf = NULL;
}
tinfo->id = id;
+ if (state == IP_VS_STATE_MASTER)
+ result = make_send_sock(ipvs, id, dev, &tinfo->sock);
+ else
+ result = make_receive_sock(ipvs, id, dev, &tinfo->sock);
+ if (result < 0)
+ goto out;
task = kthread_run(threadfn, tinfo, name, ipvs->gen, id);
if (IS_ERR(task)) {
result = PTR_ERR(task);
- goto outtinfo;
+ goto out;
}
tinfo = NULL;
if (state == IP_VS_STATE_MASTER)
@@ -1913,20 +1908,20 @@
ipvs->sync_state |= state;
spin_unlock_bh(&ipvs->sync_buff_lock);
+ mutex_unlock(&ipvs->sync_mutex);
+ rtnl_unlock();
+
/* increase the module use count */
ip_vs_use_count_inc();
return 0;
-outsocket:
- sock_release(sock);
-
-outtinfo:
- if (tinfo) {
- sock_release(tinfo->sock);
- kfree(tinfo->buf);
- kfree(tinfo);
- }
+out:
+ /* We do not need RTNL lock anymore, release it here so that
+ * sock_release below and in the kthreads can use rtnl_lock
+ * to leave the mcast group.
+ */
+ rtnl_unlock();
count = id;
while (count-- > 0) {
if (state == IP_VS_STATE_MASTER)
@@ -1934,13 +1929,23 @@
else
kthread_stop(array[count]);
}
- kfree(array);
-
-out:
if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
kfree(ipvs->ms);
ipvs->ms = NULL;
}
+ mutex_unlock(&ipvs->sync_mutex);
+ if (tinfo) {
+ if (tinfo->sock)
+ sock_release(tinfo->sock);
+ kfree(tinfo->buf);
+ kfree(tinfo);
+ }
+ kfree(array);
+ return result;
+
+out_early:
+ mutex_unlock(&ipvs->sync_mutex);
+ rtnl_unlock();
return result;
}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 1e97b8d..15e6e7b 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1795,6 +1795,8 @@
if (msg->msg_namelen) {
err = -EINVAL;
+ if (msg->msg_namelen < sizeof(struct sockaddr_nl))
+ goto out;
if (addr->nl_family != AF_NETLINK)
goto out;
dst_portid = addr->nl_pid;
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 1668916..326945d 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -1296,13 +1296,10 @@
/* The nlattr stream should already have been validated */
nla_for_each_nested(nla, attr, rem) {
- if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED) {
- if (tbl[nla_type(nla)].next)
- tbl = tbl[nla_type(nla)].next;
- nlattr_set(nla, val, tbl);
- } else {
+ if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED)
+ nlattr_set(nla, val, tbl[nla_type(nla)].next ? : tbl);
+ else
memset(nla_data(nla), val, nla_len(nla));
- }
if (nla_type(nla) == OVS_KEY_ATTR_CT_STATE)
*(u32 *)nla_data(nla) &= CT_SUPPORTED_MASK;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index f15f08b..8f78d14 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -333,11 +333,11 @@
skb_set_queue_mapping(skb, queue_index);
}
-/* register_prot_hook must be invoked with the po->bind_lock held,
+/* __register_prot_hook must be invoked through register_prot_hook
* or from a context in which asynchronous accesses to the packet
* socket is not possible (packet_create()).
*/
-static void register_prot_hook(struct sock *sk)
+static void __register_prot_hook(struct sock *sk)
{
struct packet_sock *po = pkt_sk(sk);
@@ -352,8 +352,13 @@
}
}
-/* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
- * held. If the sync parameter is true, we will temporarily drop
+static void register_prot_hook(struct sock *sk)
+{
+ lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
+ __register_prot_hook(sk);
+}
+
+/* If the sync parameter is true, we will temporarily drop
* the po->bind_lock and do a synchronize_net to make sure no
* asynchronous packet processing paths still refer to the elements
* of po->prot_hook. If the sync parameter is false, it is the
@@ -363,6 +368,8 @@
{
struct packet_sock *po = pkt_sk(sk);
+ lockdep_assert_held_once(&po->bind_lock);
+
po->running = 0;
if (po->fanout)
@@ -3017,6 +3024,7 @@
packet_flush_mclist(sk);
+ lock_sock(sk);
if (po->rx_ring.pg_vec) {
memset(&req_u, 0, sizeof(req_u));
packet_set_ring(sk, &req_u, 1, 0);
@@ -3026,6 +3034,7 @@
memset(&req_u, 0, sizeof(req_u));
packet_set_ring(sk, &req_u, 1, 1);
}
+ release_sock(sk);
f = fanout_release(sk);
@@ -3259,7 +3268,7 @@
if (proto) {
po->prot_hook.type = proto;
- register_prot_hook(sk);
+ __register_prot_hook(sk);
}
mutex_lock(&net->packet.sklist_lock);
@@ -3654,6 +3663,7 @@
union tpacket_req_u req_u;
int len;
+ lock_sock(sk);
switch (po->tp_version) {
case TPACKET_V1:
case TPACKET_V2:
@@ -3664,12 +3674,17 @@
len = sizeof(req_u.req3);
break;
}
- if (optlen < len)
- return -EINVAL;
- if (copy_from_user(&req_u.req, optval, len))
- return -EFAULT;
- return packet_set_ring(sk, &req_u, 0,
- optname == PACKET_TX_RING);
+ if (optlen < len) {
+ ret = -EINVAL;
+ } else {
+ if (copy_from_user(&req_u.req, optval, len))
+ ret = -EFAULT;
+ else
+ ret = packet_set_ring(sk, &req_u, 0,
+ optname == PACKET_TX_RING);
+ }
+ release_sock(sk);
+ return ret;
}
case PACKET_COPY_THRESH:
{
@@ -3735,12 +3750,18 @@
if (optlen != sizeof(val))
return -EINVAL;
- if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
- return -EBUSY;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
- po->tp_loss = !!val;
- return 0;
+
+ lock_sock(sk);
+ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
+ ret = -EBUSY;
+ } else {
+ po->tp_loss = !!val;
+ ret = 0;
+ }
+ release_sock(sk);
+ return ret;
}
case PACKET_AUXDATA:
{
@@ -3751,7 +3772,9 @@
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
+ lock_sock(sk);
po->auxdata = !!val;
+ release_sock(sk);
return 0;
}
case PACKET_ORIGDEV:
@@ -3763,7 +3786,9 @@
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
+ lock_sock(sk);
po->origdev = !!val;
+ release_sock(sk);
return 0;
}
case PACKET_VNET_HDR:
@@ -3772,15 +3797,20 @@
if (sock->type != SOCK_RAW)
return -EINVAL;
- if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
- return -EBUSY;
if (optlen < sizeof(val))
return -EINVAL;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
- po->has_vnet_hdr = !!val;
- return 0;
+ lock_sock(sk);
+ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
+ ret = -EBUSY;
+ } else {
+ po->has_vnet_hdr = !!val;
+ ret = 0;
+ }
+ release_sock(sk);
+ return ret;
}
case PACKET_TIMESTAMP:
{
@@ -3818,11 +3848,17 @@
if (optlen != sizeof(val))
return -EINVAL;
- if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
- return -EBUSY;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
- po->tp_tx_has_off = !!val;
+
+ lock_sock(sk);
+ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
+ ret = -EBUSY;
+ } else {
+ po->tp_tx_has_off = !!val;
+ ret = 0;
+ }
+ release_sock(sk);
return 0;
}
case PACKET_QDISC_BYPASS:
@@ -4219,7 +4255,6 @@
/* Added to avoid minimal code churn */
struct tpacket_req *req = &req_u->req;
- lock_sock(sk);
/* Opening a Tx-ring is NOT supported in TPACKET_V3 */
if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
net_warn_ratelimited("Tx-ring is not supported.\n");
@@ -4355,7 +4390,6 @@
if (pg_vec)
free_pg_vec(pg_vec, order, req->tp_block_nr);
out:
- release_sock(sk);
return err;
}
diff --git a/net/packet/internal.h b/net/packet/internal.h
index e76042f..b8d5618 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -111,10 +111,12 @@
int copy_thresh;
spinlock_t bind_lock;
struct mutex pg_vec_lock;
- unsigned int running:1, /* prot_hook is attached*/
- auxdata:1,
+ unsigned int running; /* bind_lock must be held */
+ unsigned int auxdata:1, /* writer must hold sock lock */
origdev:1,
- has_vnet_hdr:1;
+ has_vnet_hdr:1,
+ tp_loss:1,
+ tp_tx_has_off:1;
int pressure;
int ifindex; /* bound device */
__be16 num;
@@ -124,8 +126,6 @@
enum tpacket_versions tp_version;
unsigned int tp_hdrlen;
unsigned int tp_reserve;
- unsigned int tp_loss:1;
- unsigned int tp_tx_has_off:1;
unsigned int tp_tstamp;
struct net_device __rcu *cached_dev;
int (*xmit)(struct sk_buff *skb);
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index 76c01cb..d6d8b34 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -138,13 +138,18 @@
ret = rfkill_register(rfkill->rfkill_dev);
if (ret < 0)
- return ret;
+ goto err_destroy;
platform_set_drvdata(pdev, rfkill);
dev_info(&pdev->dev, "%s device registered.\n", rfkill->name);
return 0;
+
+err_destroy:
+ rfkill_destroy(rfkill->rfkill_dev);
+
+ return ret;
}
static int rfkill_gpio_remove(struct platform_device *pdev)
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 95c463c..235db2c 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -634,7 +634,7 @@
}
}
- return 0;
+ return -ENOENT;
}
struct ifeheadr {
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 18e7524..b57b4de 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -128,6 +128,28 @@
return f->next == &detached;
}
+static bool fq_flow_is_throttled(const struct fq_flow *f)
+{
+ return f->next == &throttled;
+}
+
+static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
+{
+ if (head->first)
+ head->last->next = flow;
+ else
+ head->first = flow;
+ head->last = flow;
+ flow->next = NULL;
+}
+
+static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
+{
+ rb_erase(&f->rate_node, &q->delayed);
+ q->throttled_flows--;
+ fq_flow_add_tail(&q->old_flows, f);
+}
+
static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
{
struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
@@ -155,15 +177,6 @@
static struct kmem_cache *fq_flow_cachep __read_mostly;
-static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
-{
- if (head->first)
- head->last->next = flow;
- else
- head->first = flow;
- head->last = flow;
- flow->next = NULL;
-}
/* limit number of collected flows per round */
#define FQ_GC_MAX 8
@@ -267,6 +280,8 @@
f->socket_hash != sk->sk_hash)) {
f->credit = q->initial_quantum;
f->socket_hash = sk->sk_hash;
+ if (fq_flow_is_throttled(f))
+ fq_flow_unset_throttled(q, f);
f->time_next_packet = 0ULL;
}
return f;
@@ -430,9 +445,7 @@
q->time_next_delayed_flow = f->time_next_packet;
break;
}
- rb_erase(p, &q->delayed);
- q->throttled_flows--;
- fq_flow_add_tail(&q->old_flows, f);
+ fq_flow_unset_throttled(q, f);
}
}
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index f10d339..738c55e 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1006,9 +1006,10 @@
struct sctp_endpoint *ep;
struct sctp_chunk *chunk;
struct sctp_inq *inqueue;
- int state;
sctp_subtype_t subtype;
+ int first_time = 1; /* is this the first time through the loop */
int error = 0;
+ int state;
/* The association should be held so we should be safe. */
ep = asoc->ep;
@@ -1019,6 +1020,30 @@
state = asoc->state;
subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
+ /* If the first chunk in the packet is AUTH, do special
+ * processing specified in Section 6.3 of SCTP-AUTH spec
+ */
+ if (first_time && subtype.chunk == SCTP_CID_AUTH) {
+ struct sctp_chunkhdr *next_hdr;
+
+ next_hdr = sctp_inq_peek(inqueue);
+ if (!next_hdr)
+ goto normal;
+
+ /* If the next chunk is COOKIE-ECHO, skip the AUTH
+ * chunk while saving a pointer to it so we can do
+ * Authentication later (during cookie-echo
+ * processing).
+ */
+ if (next_hdr->type == SCTP_CID_COOKIE_ECHO) {
+ chunk->auth_chunk = skb_clone(chunk->skb,
+ GFP_ATOMIC);
+ chunk->auth = 1;
+ continue;
+ }
+ }
+
+normal:
/* SCTP-AUTH, Section 6.3:
* The receiver has a list of chunk types which it expects
* to be received only after an AUTH-chunk. This list has
@@ -1057,6 +1082,9 @@
/* If there is an error on chunk, discard this packet. */
if (error && chunk)
chunk->pdiscard = 1;
+
+ if (first_time)
+ first_time = 0;
}
sctp_association_put(asoc);
}
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index f731de3..e06083c 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -217,7 +217,7 @@
skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t));
chunk->subh.v = NULL; /* Subheader is no longer valid. */
- if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) <
+ if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) <=
skb_tail_pointer(chunk->skb)) {
/* This is not a singleton */
chunk->singleton = 0;
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 355d95a7..f4d5efb 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -521,44 +521,47 @@
addr->v6.sin6_scope_id = 0;
}
+static int __sctp_v6_cmp_addr(const union sctp_addr *addr1,
+ const union sctp_addr *addr2)
+{
+ if (addr1->sa.sa_family != addr2->sa.sa_family) {
+ if (addr1->sa.sa_family == AF_INET &&
+ addr2->sa.sa_family == AF_INET6 &&
+ ipv6_addr_v4mapped(&addr2->v6.sin6_addr) &&
+ addr2->v6.sin6_addr.s6_addr32[3] ==
+ addr1->v4.sin_addr.s_addr)
+ return 1;
+
+ if (addr2->sa.sa_family == AF_INET &&
+ addr1->sa.sa_family == AF_INET6 &&
+ ipv6_addr_v4mapped(&addr1->v6.sin6_addr) &&
+ addr1->v6.sin6_addr.s6_addr32[3] ==
+ addr2->v4.sin_addr.s_addr)
+ return 1;
+
+ return 0;
+ }
+
+ if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr))
+ return 0;
+
+ /* If this is a linklocal address, compare the scope_id. */
+ if ((ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) &&
+ addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id &&
+ addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)
+ return 0;
+
+ return 1;
+}
+
/* Compare addresses exactly.
* v4-mapped-v6 is also in consideration.
*/
static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
const union sctp_addr *addr2)
{
- if (addr1->sa.sa_family != addr2->sa.sa_family) {
- if (addr1->sa.sa_family == AF_INET &&
- addr2->sa.sa_family == AF_INET6 &&
- ipv6_addr_v4mapped(&addr2->v6.sin6_addr)) {
- if (addr2->v6.sin6_port == addr1->v4.sin_port &&
- addr2->v6.sin6_addr.s6_addr32[3] ==
- addr1->v4.sin_addr.s_addr)
- return 1;
- }
- if (addr2->sa.sa_family == AF_INET &&
- addr1->sa.sa_family == AF_INET6 &&
- ipv6_addr_v4mapped(&addr1->v6.sin6_addr)) {
- if (addr1->v6.sin6_port == addr2->v4.sin_port &&
- addr1->v6.sin6_addr.s6_addr32[3] ==
- addr2->v4.sin_addr.s_addr)
- return 1;
- }
- return 0;
- }
- if (addr1->v6.sin6_port != addr2->v6.sin6_port)
- return 0;
- if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr))
- return 0;
- /* If this is a linklocal address, compare the scope_id. */
- if (ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) {
- if (addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id &&
- (addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)) {
- return 0;
- }
- }
-
- return 1;
+ return __sctp_v6_cmp_addr(addr1, addr2) &&
+ addr1->v6.sin6_port == addr2->v6.sin6_port;
}
/* Initialize addr struct to INADDR_ANY. */
@@ -844,8 +847,8 @@
const union sctp_addr *addr2,
struct sctp_sock *opt)
{
- struct sctp_af *af1, *af2;
struct sock *sk = sctp_opt2sk(opt);
+ struct sctp_af *af1, *af2;
af1 = sctp_get_af_specific(addr1->sa.sa_family);
af2 = sctp_get_af_specific(addr2->sa.sa_family);
@@ -861,10 +864,10 @@
if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2))
return 1;
- if (addr1->sa.sa_family != addr2->sa.sa_family)
- return 0;
+ if (addr1->sa.sa_family == AF_INET && addr2->sa.sa_family == AF_INET)
+ return addr1->v4.sin_addr.s_addr == addr2->v4.sin_addr.s_addr;
- return af1->cmp_addr(addr1, addr2);
+ return __sctp_v6_cmp_addr(addr1, addr2);
}
/* Verify that the provided sockaddr looks bindable. Common verification,
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 8ec20a6..bfd0686 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -144,10 +144,8 @@
void *arg,
sctp_cmd_seq_t *commands);
-static sctp_ierror_t sctp_sf_authenticate(struct net *net,
- const struct sctp_endpoint *ep,
+static sctp_ierror_t sctp_sf_authenticate(
const struct sctp_association *asoc,
- const sctp_subtype_t type,
struct sctp_chunk *chunk);
static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net,
@@ -615,6 +613,38 @@
return SCTP_DISPOSITION_CONSUME;
}
+static bool sctp_auth_chunk_verify(struct net *net, struct sctp_chunk *chunk,
+ const struct sctp_association *asoc)
+{
+ struct sctp_chunk auth;
+
+ if (!chunk->auth_chunk)
+ return true;
+
+ /* SCTP-AUTH: auth_chunk pointer is only set when the cookie-echo
+ * is supposed to be authenticated and we have to do delayed
+ * authentication. We've just recreated the association using
+ * the information in the cookie and now it's much easier to
+ * do the authentication.
+ */
+
+ /* Make sure that we and the peer are AUTH capable */
+ if (!net->sctp.auth_enable || !asoc->peer.auth_capable)
+ return false;
+
+ /* set-up our fake chunk so that we can process it */
+ auth.skb = chunk->auth_chunk;
+ auth.asoc = chunk->asoc;
+ auth.sctp_hdr = chunk->sctp_hdr;
+ auth.chunk_hdr = (struct sctp_chunkhdr *)
+ skb_push(chunk->auth_chunk,
+ sizeof(struct sctp_chunkhdr));
+ skb_pull(chunk->auth_chunk, sizeof(struct sctp_chunkhdr));
+ auth.transport = chunk->transport;
+
+ return sctp_sf_authenticate(asoc, &auth) == SCTP_IERROR_NO_ERROR;
+}
+
/*
* Respond to a normal COOKIE ECHO chunk.
* We are the side that is being asked for an association.
@@ -751,36 +781,9 @@
if (error)
goto nomem_init;
- /* SCTP-AUTH: auth_chunk pointer is only set when the cookie-echo
- * is supposed to be authenticated and we have to do delayed
- * authentication. We've just recreated the association using
- * the information in the cookie and now it's much easier to
- * do the authentication.
- */
- if (chunk->auth_chunk) {
- struct sctp_chunk auth;
- sctp_ierror_t ret;
-
- /* Make sure that we and the peer are AUTH capable */
- if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) {
- sctp_association_free(new_asoc);
- return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
- }
-
- /* set-up our fake chunk so that we can process it */
- auth.skb = chunk->auth_chunk;
- auth.asoc = chunk->asoc;
- auth.sctp_hdr = chunk->sctp_hdr;
- auth.chunk_hdr = (sctp_chunkhdr_t *)skb_push(chunk->auth_chunk,
- sizeof(sctp_chunkhdr_t));
- skb_pull(chunk->auth_chunk, sizeof(sctp_chunkhdr_t));
- auth.transport = chunk->transport;
-
- ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth);
- if (ret != SCTP_IERROR_NO_ERROR) {
- sctp_association_free(new_asoc);
- return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
- }
+ if (!sctp_auth_chunk_verify(net, chunk, new_asoc)) {
+ sctp_association_free(new_asoc);
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
repl = sctp_make_cookie_ack(new_asoc, chunk);
@@ -1717,13 +1720,15 @@
GFP_ATOMIC))
goto nomem;
+ if (!sctp_auth_chunk_verify(net, chunk, new_asoc))
+ return SCTP_DISPOSITION_DISCARD;
+
/* Make sure no new addresses are being added during the
* restart. Though this is a pretty complicated attack
* since you'd have to get inside the cookie.
*/
- if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands)) {
+ if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands))
return SCTP_DISPOSITION_CONSUME;
- }
/* If the endpoint is in the SHUTDOWN-ACK-SENT state and recognizes
* the peer has restarted (Action A), it MUST NOT setup a new
@@ -1828,6 +1833,9 @@
GFP_ATOMIC))
goto nomem;
+ if (!sctp_auth_chunk_verify(net, chunk, new_asoc))
+ return SCTP_DISPOSITION_DISCARD;
+
/* Update the content of current association. */
sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
@@ -1920,6 +1928,9 @@
* a COOKIE ACK.
*/
+ if (!sctp_auth_chunk_verify(net, chunk, asoc))
+ return SCTP_DISPOSITION_DISCARD;
+
/* Don't accidentally move back into established state. */
if (asoc->state < SCTP_STATE_ESTABLISHED) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
@@ -1959,7 +1970,7 @@
}
}
- repl = sctp_make_cookie_ack(new_asoc, chunk);
+ repl = sctp_make_cookie_ack(asoc, chunk);
if (!repl)
goto nomem;
@@ -3981,10 +3992,8 @@
*
* The return value is the disposition of the chunk.
*/
-static sctp_ierror_t sctp_sf_authenticate(struct net *net,
- const struct sctp_endpoint *ep,
+static sctp_ierror_t sctp_sf_authenticate(
const struct sctp_association *asoc,
- const sctp_subtype_t type,
struct sctp_chunk *chunk)
{
struct sctp_authhdr *auth_hdr;
@@ -4083,7 +4092,7 @@
commands);
auth_hdr = (struct sctp_authhdr *)chunk->skb->data;
- error = sctp_sf_authenticate(net, ep, asoc, type, chunk);
+ error = sctp_sf_authenticate(asoc, chunk);
switch (error) {
case SCTP_IERROR_AUTH_BAD_HMAC:
/* Generate the ERROR chunk and discard the rest
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index bea0005..6825e05 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -723,7 +723,6 @@
return event;
fail_mark:
- sctp_chunk_put(chunk);
kfree_skb(skb);
fail:
return NULL;
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
index 6cbc935..bbee334 100644
--- a/net/strparser/strparser.c
+++ b/net/strparser/strparser.c
@@ -285,9 +285,9 @@
strp_start_rx_timer(strp);
}
+ rxm->accum_len += cand_len;
strp->rx_need_bytes = rxm->strp.full_len -
rxm->accum_len;
- rxm->accum_len += cand_len;
rxm->early_eaten = cand_len;
STRP_STATS_ADD(strp->stats.rx_bytes, cand_len);
desc->count = 0; /* Stop reading socket */
@@ -310,6 +310,7 @@
/* Hurray, we have a new message! */
del_timer(&strp->rx_msg_timer);
strp->rx_skb_head = NULL;
+ strp->rx_need_bytes = 0;
STRP_STATS_INCR(strp->stats.rx_msgs);
/* Give skb to upper layer */
@@ -374,9 +375,7 @@
return;
if (strp->rx_need_bytes) {
- if (strp_peek_len(strp) >= strp->rx_need_bytes)
- strp->rx_need_bytes = 0;
- else
+ if (strp_peek_len(strp) < strp->rx_need_bytes)
return;
}
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index 3200059..9ba3c46 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -79,7 +79,8 @@
const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
[TIPC_NLA_NET_UNSPEC] = { .type = NLA_UNSPEC },
- [TIPC_NLA_NET_ID] = { .type = NLA_U32 }
+ [TIPC_NLA_NET_ID] = { .type = NLA_U32 },
+ [TIPC_NLA_NET_ADDR] = { .type = NLA_U32 },
};
const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index d869b1d..3fbe584 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1197,6 +1197,7 @@
if (orig->aead) {
x->aead = xfrm_algo_aead_clone(orig->aead);
+ x->geniv = orig->geniv;
if (!x->aead)
goto error;
}
diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
index e1512ae..0c81e26 100644
--- a/sound/core/pcm_compat.c
+++ b/sound/core/pcm_compat.c
@@ -426,6 +426,8 @@
return -ENOTTY;
if (substream->stream != dir)
return -EINVAL;
+ if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN)
+ return -EBADFD;
if ((ch = substream->runtime->channels) > 128)
return -EINVAL;
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 019f60b..2df7e6b 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -2765,6 +2765,7 @@
sync_ptr.s.status.hw_ptr = status->hw_ptr;
sync_ptr.s.status.tstamp = status->tstamp;
sync_ptr.s.status.suspended_state = status->suspended_state;
+ sync_ptr.s.status.audio_tstamp = status->audio_tstamp;
snd_pcm_stream_unlock_irq(substream);
if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr)))
return -EFAULT;
diff --git a/sound/core/seq/oss/seq_oss_event.c b/sound/core/seq/oss/seq_oss_event.c
index c390886..86ca584 100644
--- a/sound/core/seq/oss/seq_oss_event.c
+++ b/sound/core/seq/oss/seq_oss_event.c
@@ -26,6 +26,7 @@
#include <sound/seq_oss_legacy.h>
#include "seq_oss_readq.h"
#include "seq_oss_writeq.h"
+#include <linux/nospec.h>
/*
@@ -287,10 +288,10 @@
{
struct seq_oss_synthinfo *info;
- if (!snd_seq_oss_synth_is_valid(dp, dev))
+ info = snd_seq_oss_synth_info(dp, dev);
+ if (!info)
return -ENXIO;
- info = &dp->synths[dev];
switch (info->arg.event_passing) {
case SNDRV_SEQ_OSS_PROCESS_EVENTS:
if (! info->ch || ch < 0 || ch >= info->nr_voices) {
@@ -298,6 +299,7 @@
return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev);
}
+ ch = array_index_nospec(ch, info->nr_voices);
if (note == 255 && info->ch[ch].note >= 0) {
/* volume control */
int type;
@@ -347,10 +349,10 @@
{
struct seq_oss_synthinfo *info;
- if (!snd_seq_oss_synth_is_valid(dp, dev))
+ info = snd_seq_oss_synth_info(dp, dev);
+ if (!info)
return -ENXIO;
- info = &dp->synths[dev];
switch (info->arg.event_passing) {
case SNDRV_SEQ_OSS_PROCESS_EVENTS:
if (! info->ch || ch < 0 || ch >= info->nr_voices) {
@@ -358,6 +360,7 @@
return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev);
}
+ ch = array_index_nospec(ch, info->nr_voices);
if (info->ch[ch].note >= 0) {
note = info->ch[ch].note;
info->ch[ch].vel = 0;
@@ -381,7 +384,7 @@
static int
set_note_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int note, int vel, struct snd_seq_event *ev)
{
- if (! snd_seq_oss_synth_is_valid(dp, dev))
+ if (!snd_seq_oss_synth_info(dp, dev))
return -ENXIO;
ev->type = type;
@@ -399,7 +402,7 @@
static int
set_control_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int param, int val, struct snd_seq_event *ev)
{
- if (! snd_seq_oss_synth_is_valid(dp, dev))
+ if (!snd_seq_oss_synth_info(dp, dev))
return -ENXIO;
ev->type = type;
diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c
index b30b213..9debd1b 100644
--- a/sound/core/seq/oss/seq_oss_midi.c
+++ b/sound/core/seq/oss/seq_oss_midi.c
@@ -29,6 +29,7 @@
#include "../seq_lock.h"
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/nospec.h>
/*
@@ -315,6 +316,7 @@
{
if (dev < 0 || dev >= dp->max_mididev)
return NULL;
+ dev = array_index_nospec(dev, dp->max_mididev);
return get_mdev(dev);
}
diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
index cd0e0eb..278ebb9 100644
--- a/sound/core/seq/oss/seq_oss_synth.c
+++ b/sound/core/seq/oss/seq_oss_synth.c
@@ -26,6 +26,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/nospec.h>
/*
* constants
@@ -339,17 +340,13 @@
dp->max_synthdev = 0;
}
-/*
- * check if the specified device is MIDI mapped device
- */
-static int
-is_midi_dev(struct seq_oss_devinfo *dp, int dev)
+static struct seq_oss_synthinfo *
+get_synthinfo_nospec(struct seq_oss_devinfo *dp, int dev)
{
if (dev < 0 || dev >= dp->max_synthdev)
- return 0;
- if (dp->synths[dev].is_midi)
- return 1;
- return 0;
+ return NULL;
+ dev = array_index_nospec(dev, SNDRV_SEQ_OSS_MAX_SYNTH_DEVS);
+ return &dp->synths[dev];
}
/*
@@ -359,14 +356,20 @@
get_synthdev(struct seq_oss_devinfo *dp, int dev)
{
struct seq_oss_synth *rec;
- if (dev < 0 || dev >= dp->max_synthdev)
+ struct seq_oss_synthinfo *info = get_synthinfo_nospec(dp, dev);
+
+ if (!info)
return NULL;
- if (! dp->synths[dev].opened)
+ if (!info->opened)
return NULL;
- if (dp->synths[dev].is_midi)
- return &midi_synth_dev;
- if ((rec = get_sdev(dev)) == NULL)
- return NULL;
+ if (info->is_midi) {
+ rec = &midi_synth_dev;
+ snd_use_lock_use(&rec->use_lock);
+ } else {
+ rec = get_sdev(dev);
+ if (!rec)
+ return NULL;
+ }
if (! rec->opened) {
snd_use_lock_free(&rec->use_lock);
return NULL;
@@ -402,10 +405,8 @@
struct seq_oss_synth *rec;
struct seq_oss_synthinfo *info;
- if (snd_BUG_ON(dev < 0 || dev >= dp->max_synthdev))
- return;
- info = &dp->synths[dev];
- if (! info->opened)
+ info = get_synthinfo_nospec(dp, dev);
+ if (!info || !info->opened)
return;
if (info->sysex)
info->sysex->len = 0; /* reset sysex */
@@ -454,12 +455,14 @@
const char __user *buf, int p, int c)
{
struct seq_oss_synth *rec;
+ struct seq_oss_synthinfo *info;
int rc;
- if (dev < 0 || dev >= dp->max_synthdev)
+ info = get_synthinfo_nospec(dp, dev);
+ if (!info)
return -ENXIO;
- if (is_midi_dev(dp, dev))
+ if (info->is_midi)
return 0;
if ((rec = get_synthdev(dp, dev)) == NULL)
return -ENXIO;
@@ -467,24 +470,25 @@
if (rec->oper.load_patch == NULL)
rc = -ENXIO;
else
- rc = rec->oper.load_patch(&dp->synths[dev].arg, fmt, buf, p, c);
+ rc = rec->oper.load_patch(&info->arg, fmt, buf, p, c);
snd_use_lock_free(&rec->use_lock);
return rc;
}
/*
- * check if the device is valid synth device
+ * check if the device is valid synth device and return the synth info
*/
-int
-snd_seq_oss_synth_is_valid(struct seq_oss_devinfo *dp, int dev)
+struct seq_oss_synthinfo *
+snd_seq_oss_synth_info(struct seq_oss_devinfo *dp, int dev)
{
struct seq_oss_synth *rec;
+
rec = get_synthdev(dp, dev);
if (rec) {
snd_use_lock_free(&rec->use_lock);
- return 1;
+ return get_synthinfo_nospec(dp, dev);
}
- return 0;
+ return NULL;
}
@@ -499,16 +503,18 @@
int i, send;
unsigned char *dest;
struct seq_oss_synth_sysex *sysex;
+ struct seq_oss_synthinfo *info;
- if (! snd_seq_oss_synth_is_valid(dp, dev))
+ info = snd_seq_oss_synth_info(dp, dev);
+ if (!info)
return -ENXIO;
- sysex = dp->synths[dev].sysex;
+ sysex = info->sysex;
if (sysex == NULL) {
sysex = kzalloc(sizeof(*sysex), GFP_KERNEL);
if (sysex == NULL)
return -ENOMEM;
- dp->synths[dev].sysex = sysex;
+ info->sysex = sysex;
}
send = 0;
@@ -553,10 +559,12 @@
int
snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev)
{
- if (! snd_seq_oss_synth_is_valid(dp, dev))
+ struct seq_oss_synthinfo *info = snd_seq_oss_synth_info(dp, dev);
+
+ if (!info)
return -EINVAL;
- snd_seq_oss_fill_addr(dp, ev, dp->synths[dev].arg.addr.client,
- dp->synths[dev].arg.addr.port);
+ snd_seq_oss_fill_addr(dp, ev, info->arg.addr.client,
+ info->arg.addr.port);
return 0;
}
@@ -568,16 +576,18 @@
snd_seq_oss_synth_ioctl(struct seq_oss_devinfo *dp, int dev, unsigned int cmd, unsigned long addr)
{
struct seq_oss_synth *rec;
+ struct seq_oss_synthinfo *info;
int rc;
- if (is_midi_dev(dp, dev))
+ info = get_synthinfo_nospec(dp, dev);
+ if (!info || info->is_midi)
return -ENXIO;
if ((rec = get_synthdev(dp, dev)) == NULL)
return -ENXIO;
if (rec->oper.ioctl == NULL)
rc = -ENXIO;
else
- rc = rec->oper.ioctl(&dp->synths[dev].arg, cmd, addr);
+ rc = rec->oper.ioctl(&info->arg, cmd, addr);
snd_use_lock_free(&rec->use_lock);
return rc;
}
@@ -589,7 +599,10 @@
int
snd_seq_oss_synth_raw_event(struct seq_oss_devinfo *dp, int dev, unsigned char *data, struct snd_seq_event *ev)
{
- if (! snd_seq_oss_synth_is_valid(dp, dev) || is_midi_dev(dp, dev))
+ struct seq_oss_synthinfo *info;
+
+ info = snd_seq_oss_synth_info(dp, dev);
+ if (!info || info->is_midi)
return -ENXIO;
ev->type = SNDRV_SEQ_EVENT_OSS;
memcpy(ev->data.raw8.d, data, 8);
diff --git a/sound/core/seq/oss/seq_oss_synth.h b/sound/core/seq/oss/seq_oss_synth.h
index 74ac55f..a63f9e2 100644
--- a/sound/core/seq/oss/seq_oss_synth.h
+++ b/sound/core/seq/oss/seq_oss_synth.h
@@ -37,7 +37,8 @@
void snd_seq_oss_synth_reset(struct seq_oss_devinfo *dp, int dev);
int snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt,
const char __user *buf, int p, int c);
-int snd_seq_oss_synth_is_valid(struct seq_oss_devinfo *dp, int dev);
+struct seq_oss_synthinfo *snd_seq_oss_synth_info(struct seq_oss_devinfo *dp,
+ int dev);
int snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf,
struct snd_seq_event *ev);
int snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev);
diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
index 2007649..8bdc4c9 100644
--- a/sound/core/seq/seq_virmidi.c
+++ b/sound/core/seq/seq_virmidi.c
@@ -174,12 +174,12 @@
}
return;
}
+ spin_lock_irqsave(&substream->runtime->lock, flags);
if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) {
if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0)
- return;
+ goto out;
vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
}
- spin_lock_irqsave(&substream->runtime->lock, flags);
while (1) {
count = __snd_rawmidi_transmit_peek(substream, buf, sizeof(buf));
if (count <= 0)
diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
index dc91002..847f703 100644
--- a/sound/drivers/aloop.c
+++ b/sound/drivers/aloop.c
@@ -296,6 +296,8 @@
cable->pause |= stream;
loopback_timer_stop(dpcm);
spin_unlock(&cable->lock);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ loopback_active_notify(dpcm);
break;
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
case SNDRV_PCM_TRIGGER_RESUME:
@@ -304,6 +306,8 @@
cable->pause &= ~stream;
loopback_timer_start(dpcm);
spin_unlock(&cable->lock);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ loopback_active_notify(dpcm);
break;
default:
return -EINVAL;
@@ -828,9 +832,11 @@
{
struct loopback *loopback = snd_kcontrol_chip(kcontrol);
+ mutex_lock(&loopback->cable_lock);
ucontrol->value.integer.value[0] =
loopback->setup[kcontrol->id.subdevice]
[kcontrol->id.device].rate_shift;
+ mutex_unlock(&loopback->cable_lock);
return 0;
}
@@ -862,9 +868,11 @@
{
struct loopback *loopback = snd_kcontrol_chip(kcontrol);
+ mutex_lock(&loopback->cable_lock);
ucontrol->value.integer.value[0] =
loopback->setup[kcontrol->id.subdevice]
[kcontrol->id.device].notify;
+ mutex_unlock(&loopback->cable_lock);
return 0;
}
@@ -876,12 +884,14 @@
int change = 0;
val = ucontrol->value.integer.value[0] ? 1 : 0;
+ mutex_lock(&loopback->cable_lock);
if (val != loopback->setup[kcontrol->id.subdevice]
[kcontrol->id.device].notify) {
loopback->setup[kcontrol->id.subdevice]
[kcontrol->id.device].notify = val;
change = 1;
}
+ mutex_unlock(&loopback->cable_lock);
return change;
}
@@ -889,13 +899,18 @@
struct snd_ctl_elem_value *ucontrol)
{
struct loopback *loopback = snd_kcontrol_chip(kcontrol);
- struct loopback_cable *cable = loopback->cables
- [kcontrol->id.subdevice][kcontrol->id.device ^ 1];
+ struct loopback_cable *cable;
+
unsigned int val = 0;
- if (cable != NULL)
- val = (cable->running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) ?
- 1 : 0;
+ mutex_lock(&loopback->cable_lock);
+ cable = loopback->cables[kcontrol->id.subdevice][kcontrol->id.device ^ 1];
+ if (cable != NULL) {
+ unsigned int running = cable->running ^ cable->pause;
+
+ val = (running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) ? 1 : 0;
+ }
+ mutex_unlock(&loopback->cable_lock);
ucontrol->value.integer.value[0] = val;
return 0;
}
@@ -938,9 +953,11 @@
{
struct loopback *loopback = snd_kcontrol_chip(kcontrol);
+ mutex_lock(&loopback->cable_lock);
ucontrol->value.integer.value[0] =
loopback->setup[kcontrol->id.subdevice]
[kcontrol->id.device].rate;
+ mutex_unlock(&loopback->cable_lock);
return 0;
}
@@ -960,9 +977,11 @@
{
struct loopback *loopback = snd_kcontrol_chip(kcontrol);
+ mutex_lock(&loopback->cable_lock);
ucontrol->value.integer.value[0] =
loopback->setup[kcontrol->id.subdevice]
[kcontrol->id.device].channels;
+ mutex_unlock(&loopback->cable_lock);
return 0;
}
diff --git a/sound/drivers/opl3/opl3_synth.c b/sound/drivers/opl3/opl3_synth.c
index ddcc1a3..42920a2 100644
--- a/sound/drivers/opl3/opl3_synth.c
+++ b/sound/drivers/opl3/opl3_synth.c
@@ -21,6 +21,7 @@
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/nospec.h>
#include <sound/opl3.h>
#include <sound/asound_fm.h>
@@ -448,7 +449,7 @@
{
unsigned short reg_side;
unsigned char op_offset;
- unsigned char voice_offset;
+ unsigned char voice_offset, voice_op;
unsigned short opl3_reg;
unsigned char reg_val;
@@ -473,7 +474,9 @@
voice_offset = voice->voice - MAX_OPL2_VOICES;
}
/* Get register offset of operator */
- op_offset = snd_opl3_regmap[voice_offset][voice->op];
+ voice_offset = array_index_nospec(voice_offset, MAX_OPL2_VOICES);
+ voice_op = array_index_nospec(voice->op, 4);
+ op_offset = snd_opl3_regmap[voice_offset][voice_op];
reg_val = 0x00;
/* Set amplitude modulation (tremolo) effect */
diff --git a/sound/firewire/dice/dice-stream.c b/sound/firewire/dice/dice-stream.c
index ec4db3a..257cfbf 100644
--- a/sound/firewire/dice/dice-stream.c
+++ b/sound/firewire/dice/dice-stream.c
@@ -425,7 +425,7 @@
err = init_stream(dice, AMDTP_IN_STREAM, i);
if (err < 0) {
for (; i >= 0; i--)
- destroy_stream(dice, AMDTP_OUT_STREAM, i);
+ destroy_stream(dice, AMDTP_IN_STREAM, i);
goto end;
}
}
diff --git a/sound/firewire/dice/dice.c b/sound/firewire/dice/dice.c
index 25e9f77..0d3d36f 100644
--- a/sound/firewire/dice/dice.c
+++ b/sound/firewire/dice/dice.c
@@ -14,7 +14,7 @@
#define OUI_WEISS 0x001c6a
#define OUI_LOUD 0x000ff2
#define OUI_FOCUSRITE 0x00130e
-#define OUI_TCELECTRONIC 0x001486
+#define OUI_TCELECTRONIC 0x000166
#define DICE_CATEGORY_ID 0x04
#define WEISS_CATEGORY_ID 0x00
diff --git a/sound/pci/asihpi/hpimsginit.c b/sound/pci/asihpi/hpimsginit.c
index 7eb6171..a31a70d 100644
--- a/sound/pci/asihpi/hpimsginit.c
+++ b/sound/pci/asihpi/hpimsginit.c
@@ -23,6 +23,7 @@
#include "hpi_internal.h"
#include "hpimsginit.h"
+#include <linux/nospec.h>
/* The actual message size for each object type */
static u16 msg_size[HPI_OBJ_MAXINDEX + 1] = HPI_MESSAGE_SIZE_BY_OBJECT;
@@ -39,10 +40,12 @@
{
u16 size;
- if ((object > 0) && (object <= HPI_OBJ_MAXINDEX))
+ if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) {
+ object = array_index_nospec(object, HPI_OBJ_MAXINDEX + 1);
size = msg_size[object];
- else
+ } else {
size = sizeof(*phm);
+ }
memset(phm, 0, size);
phm->size = size;
@@ -66,10 +69,12 @@
{
u16 size;
- if ((object > 0) && (object <= HPI_OBJ_MAXINDEX))
+ if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) {
+ object = array_index_nospec(object, HPI_OBJ_MAXINDEX + 1);
size = res_size[object];
- else
+ } else {
size = sizeof(*phr);
+ }
memset(phr, 0, sizeof(*phr));
phr->size = size;
diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c
index 7e3aa50..3ef9af5 100644
--- a/sound/pci/asihpi/hpioctl.c
+++ b/sound/pci/asihpi/hpioctl.c
@@ -33,6 +33,7 @@
#include <linux/stringify.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
+#include <linux/nospec.h>
#ifdef MODULE_FIRMWARE
MODULE_FIRMWARE("asihpi/dsp5000.bin");
@@ -182,7 +183,8 @@
struct hpi_adapter *pa = NULL;
if (hm->h.adapter_index < ARRAY_SIZE(adapters))
- pa = &adapters[hm->h.adapter_index];
+ pa = &adapters[array_index_nospec(hm->h.adapter_index,
+ ARRAY_SIZE(adapters))];
if (!pa || !pa->adapter || !pa->adapter->type) {
hpi_init_response(&hr->r0, hm->h.object,
diff --git a/sound/pci/hda/hda_hwdep.c b/sound/pci/hda/hda_hwdep.c
index 57df06e..cc009a4 100644
--- a/sound/pci/hda/hda_hwdep.c
+++ b/sound/pci/hda/hda_hwdep.c
@@ -21,6 +21,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/compat.h>
+#include <linux/nospec.h>
#include <sound/core.h>
#include "hda_codec.h"
#include "hda_local.h"
@@ -51,7 +52,16 @@
if (get_user(verb, &arg->verb))
return -EFAULT;
- res = get_wcaps(codec, verb >> 24);
+ /* open-code get_wcaps(verb>>24) with nospec */
+ verb >>= 24;
+ if (verb < codec->core.start_nid ||
+ verb >= codec->core.start_nid + codec->core.num_nodes) {
+ res = 0;
+ } else {
+ verb -= codec->core.start_nid;
+ verb = array_index_nospec(verb, codec->core.num_nodes);
+ res = codec->wcaps[verb];
+ }
if (put_user(res, &arg->res))
return -EFAULT;
return 0;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index e2230be..7ece1ab 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -329,6 +329,7 @@
break;
case 0x10ec0225:
case 0x10ec0233:
+ case 0x10ec0235:
case 0x10ec0236:
case 0x10ec0255:
case 0x10ec0256:
@@ -6359,6 +6360,7 @@
case 0x10ec0298:
spec->codec_variant = ALC269_TYPE_ALC298;
break;
+ case 0x10ec0235:
case 0x10ec0255:
spec->codec_variant = ALC269_TYPE_ALC255;
break;
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 14bbf55..9899ef4 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -137,6 +137,7 @@
#include <linux/pci.h>
#include <linux/math64.h>
#include <linux/io.h>
+#include <linux/nospec.h>
#include <sound/core.h>
#include <sound/control.h>
@@ -5692,40 +5693,43 @@
struct snd_pcm_channel_info *info)
{
struct hdspm *hdspm = snd_pcm_substream_chip(substream);
+ unsigned int channel = info->channel;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- if (snd_BUG_ON(info->channel >= hdspm->max_channels_out)) {
+ if (snd_BUG_ON(channel >= hdspm->max_channels_out)) {
dev_info(hdspm->card->dev,
"snd_hdspm_channel_info: output channel out of range (%d)\n",
- info->channel);
+ channel);
return -EINVAL;
}
- if (hdspm->channel_map_out[info->channel] < 0) {
+ channel = array_index_nospec(channel, hdspm->max_channels_out);
+ if (hdspm->channel_map_out[channel] < 0) {
dev_info(hdspm->card->dev,
"snd_hdspm_channel_info: output channel %d mapped out\n",
- info->channel);
+ channel);
return -EINVAL;
}
- info->offset = hdspm->channel_map_out[info->channel] *
+ info->offset = hdspm->channel_map_out[channel] *
HDSPM_CHANNEL_BUFFER_BYTES;
} else {
- if (snd_BUG_ON(info->channel >= hdspm->max_channels_in)) {
+ if (snd_BUG_ON(channel >= hdspm->max_channels_in)) {
dev_info(hdspm->card->dev,
"snd_hdspm_channel_info: input channel out of range (%d)\n",
- info->channel);
+ channel);
return -EINVAL;
}
- if (hdspm->channel_map_in[info->channel] < 0) {
+ channel = array_index_nospec(channel, hdspm->max_channels_in);
+ if (hdspm->channel_map_in[channel] < 0) {
dev_info(hdspm->card->dev,
"snd_hdspm_channel_info: input channel %d mapped out\n",
- info->channel);
+ channel);
return -EINVAL;
}
- info->offset = hdspm->channel_map_in[info->channel] *
+ info->offset = hdspm->channel_map_in[channel] *
HDSPM_CHANNEL_BUFFER_BYTES;
}
diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
index 55172c6..a76b1f1 100644
--- a/sound/pci/rme9652/rme9652.c
+++ b/sound/pci/rme9652/rme9652.c
@@ -26,6 +26,7 @@
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/io.h>
+#include <linux/nospec.h>
#include <sound/core.h>
#include <sound/control.h>
@@ -2036,9 +2037,10 @@
if (snd_BUG_ON(info->channel >= RME9652_NCHANNELS))
return -EINVAL;
- if ((chn = rme9652->channel_map[info->channel]) < 0) {
+ chn = rme9652->channel_map[array_index_nospec(info->channel,
+ RME9652_NCHANNELS)];
+ if (chn < 0)
return -EINVAL;
- }
info->offset = chn * RME9652_CHANNEL_BUFFER_BYTES;
info->first = 0;
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index 38bfd46..3ef1745 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -145,6 +145,13 @@
psr = ratio <= 256 * maxfp ? ESAI_xCCR_xPSR_BYPASS : ESAI_xCCR_xPSR_DIV8;
+ /* Do not loop-search if PM (1 ~ 256) alone can serve the ratio */
+ if (ratio <= 256) {
+ pm = ratio;
+ fp = 1;
+ goto out;
+ }
+
/* Set the max fluctuation -- 0.1% of the max devisor */
savesub = (psr ? 1 : 8) * 256 * maxfp / 1000;
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
index 9038b2e..eaa03ac 100644
--- a/sound/usb/mixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -353,8 +353,11 @@
/*
* Dell usb dock with ALC4020 codec had a firmware problem where it got
* screwed up when zero volume is passed; just skip it as a workaround
+ *
+ * Also the extension unit gives an access error, so skip it as well.
*/
static const struct usbmix_name_map dell_alc4020_map[] = {
+ { 4, NULL }, /* extension unit */
{ 16, NULL },
{ 19, NULL },
{ 0 }
diff --git a/tools/lib/str_error_r.c b/tools/lib/str_error_r.c
index 503ae07..9ab2d0a 100644
--- a/tools/lib/str_error_r.c
+++ b/tools/lib/str_error_r.c
@@ -21,6 +21,6 @@
{
int err = strerror_r(errnum, buf, buflen);
if (err)
- snprintf(buf, buflen, "INTERNAL ERROR: strerror_r(%d, %p, %zd)=%d", errnum, buf, buflen, err);
+ snprintf(buf, buflen, "INTERNAL ERROR: strerror_r(%d, [buf], %zd)=%d", errnum, buflen, err);
return buf;
}
diff --git a/tools/lib/subcmd/pager.c b/tools/lib/subcmd/pager.c
index 6518bea..68af60f 100644
--- a/tools/lib/subcmd/pager.c
+++ b/tools/lib/subcmd/pager.c
@@ -29,10 +29,13 @@
* have real input
*/
fd_set in;
+ fd_set exception;
FD_ZERO(&in);
+ FD_ZERO(&exception);
FD_SET(0, &in);
- select(1, &in, NULL, &in, NULL);
+ FD_SET(0, &exception);
+ select(1, &in, NULL, &exception, NULL);
setenv("LESS", "FRSX", 0);
}
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index 4bc5882..d2c6cdd 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -366,23 +366,7 @@
if (!is_regular_file(name))
return -EINVAL;
- if (dso__needs_decompress(dso)) {
- char newpath[KMOD_DECOMP_LEN];
- size_t len = sizeof(newpath);
-
- if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) {
- free(name);
- return -dso->load_errno;
- }
-
- strcpy(name, newpath);
- }
-
fd = do_open(name);
-
- if (dso__needs_decompress(dso))
- unlink(name);
-
free(name);
return fd;
}
diff --git a/tools/testing/selftests/firmware/fw_filesystem.sh b/tools/testing/selftests/firmware/fw_filesystem.sh
index 17e16fc..99d7f13 100755
--- a/tools/testing/selftests/firmware/fw_filesystem.sh
+++ b/tools/testing/selftests/firmware/fw_filesystem.sh
@@ -29,9 +29,11 @@
echo "$OLD_TIMEOUT" >/sys/class/firmware/timeout
fi
if [ "$OLD_FWPATH" = "" ]; then
- OLD_FWPATH=" "
+ # A zero-length write won't work; write a null byte
+ printf '\000' >/sys/module/firmware_class/parameters/path
+ else
+ echo -n "$OLD_FWPATH" >/sys/module/firmware_class/parameters/path
fi
- echo -n "$OLD_FWPATH" >/sys/module/firmware_class/parameters/path
rm -f "$FW"
rmdir "$FWPATH"
}